net: move address list functions to a separate file
[deliverable/linux.git] / drivers / net / typhoon.c
CommitLineData
1da177e4
LT
1/* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */
2/*
3 Written 2002-2004 by David Dillow <dave@thedillows.org>
4 Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and
5 Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>.
6
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
13
14 This software is available on a public web site. It may enable
15 cryptographic capabilities of the 3Com hardware, and may be
16 exported from the United States under License Exception "TSU"
17 pursuant to 15 C.F.R. Section 740.13(e).
18
19 This work was funded by the National Library of Medicine under
20 the Department of Energy project number 0274DD06D1 and NLM project
21 number Y1-LM-2015-01.
22
23 This driver is designed for the 3Com 3CR990 Family of cards with the
24 3XP Processor. It has been tested on x86 and sparc64.
25
26 KNOWN ISSUES:
27 *) The current firmware always strips the VLAN tag off, even if
28 we tell it not to. You should filter VLANs at the switch
29 as a workaround (good practice in any event) until we can
30 get this fixed.
31 *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
32 issue. Hopefully 3Com will fix it.
33 *) Waiting for a command response takes 8ms due to non-preemptable
34 polling. Only significant for getting stats and creating
35 SAs, but an ugly wart never the less.
36
37 TODO:
38 *) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming.
39 *) Add more support for ethtool (especially for NIC stats)
40 *) Allow disabling of RX checksum offloading
41 *) Fix MAC changing to work while the interface is up
42 (Need to put commands on the TX ring, which changes
43 the locking)
44 *) Add in FCS to {rx,tx}_bytes, since the hardware doesn't. See
45 http://oss.sgi.com/cgi-bin/mesg.cgi?a=netdev&i=20031215152211.7003fe8e.rddunlap%40osdl.org
46*/
47
48/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
49 * Setting to > 1518 effectively disables this feature.
50 */
51static int rx_copybreak = 200;
52
53/* Should we use MMIO or Port IO?
54 * 0: Port IO
55 * 1: MMIO
56 * 2: Try MMIO, fallback to Port IO
57 */
58static unsigned int use_mmio = 2;
59
60/* end user-configurable values */
61
62/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
63 */
64static const int multicast_filter_limit = 32;
65
66/* Operational parameters that are set at compile time. */
67
68/* Keep the ring sizes a power of two for compile efficiency.
69 * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
70 * Making the Tx ring too large decreases the effectiveness of channel
71 * bonding and packet priority.
72 * There are no ill effects from too-large receive rings.
73 *
74 * We don't currently use the Hi Tx ring so, don't make it very big.
75 *
76 * Beware that if we start using the Hi Tx ring, we will need to change
77 * typhoon_num_free_tx() and typhoon_tx_complete() to account for that.
78 */
79#define TXHI_ENTRIES 2
80#define TXLO_ENTRIES 128
81#define RX_ENTRIES 32
82#define COMMAND_ENTRIES 16
83#define RESPONSE_ENTRIES 32
84
85#define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc))
86#define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc))
87
88/* The 3XP will preload and remove 64 entries from the free buffer
6aa20a22 89 * list, and we need one entry to keep the ring from wrapping, so
1da177e4
LT
90 * to keep this a power of two, we use 128 entries.
91 */
92#define RXFREE_ENTRIES 128
93#define RXENT_ENTRIES (RXFREE_ENTRIES - 1)
94
95/* Operational parameters that usually are not changed. */
96
97/* Time in jiffies before concluding the transmitter is hung. */
98#define TX_TIMEOUT (2*HZ)
99
100#define PKT_BUF_SZ 1536
a8c9a53c 101#define FIRMWARE_NAME "3com/typhoon.bin"
1da177e4 102
0bc88e4a
JP
103#define pr_fmt(fmt) KBUILD_MODNAME " " fmt
104
1da177e4
LT
105#include <linux/module.h>
106#include <linux/kernel.h>
d43c36dc 107#include <linux/sched.h>
1da177e4
LT
108#include <linux/string.h>
109#include <linux/timer.h>
110#include <linux/errno.h>
111#include <linux/ioport.h>
112#include <linux/slab.h>
113#include <linux/interrupt.h>
114#include <linux/pci.h>
115#include <linux/netdevice.h>
116#include <linux/etherdevice.h>
117#include <linux/skbuff.h>
d7fe0f24 118#include <linux/mm.h>
1da177e4
LT
119#include <linux/init.h>
120#include <linux/delay.h>
121#include <linux/ethtool.h>
122#include <linux/if_vlan.h>
123#include <linux/crc32.h>
124#include <linux/bitops.h>
125#include <asm/processor.h>
126#include <asm/io.h>
127#include <asm/uaccess.h>
128#include <linux/in6.h>
1da177e4 129#include <linux/dma-mapping.h>
b775a750 130#include <linux/firmware.h>
0bc88e4a 131#include <generated/utsrelease.h>
1da177e4
LT
132
133#include "typhoon.h"
1da177e4 134
1da177e4 135MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
0bc88e4a 136MODULE_VERSION(UTS_RELEASE);
1da177e4 137MODULE_LICENSE("GPL");
b775a750 138MODULE_FIRMWARE(FIRMWARE_NAME);
1da177e4
LT
139MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
140MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and "
141 "the buffer given back to the NIC. Default "
142 "is 200.");
143MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
144 "Default is to try MMIO and fallback to PIO.");
145module_param(rx_copybreak, int, 0);
146module_param(use_mmio, int, 0);
147
148#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
149#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
150#undef NETIF_F_TSO
151#endif
152
153#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
154#error TX ring too small!
155#endif
156
157struct typhoon_card_info {
0bc88e4a
JP
158 const char *name;
159 const int capabilities;
1da177e4
LT
160};
161
162#define TYPHOON_CRYPTO_NONE 0x00
163#define TYPHOON_CRYPTO_DES 0x01
164#define TYPHOON_CRYPTO_3DES 0x02
165#define TYPHOON_CRYPTO_VARIABLE 0x04
166#define TYPHOON_FIBER 0x08
167#define TYPHOON_WAKEUP_NEEDS_RESET 0x10
168
169enum typhoon_cards {
170 TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
171 TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
172 TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
173 TYPHOON_FXM,
174};
175
176/* directly indexed by enum typhoon_cards, above */
952b3494 177static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
1da177e4
LT
178 { "3Com Typhoon (3C990-TX)",
179 TYPHOON_CRYPTO_NONE},
180 { "3Com Typhoon (3CR990-TX-95)",
181 TYPHOON_CRYPTO_DES},
182 { "3Com Typhoon (3CR990-TX-97)",
183 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
184 { "3Com Typhoon (3C990SVR)",
185 TYPHOON_CRYPTO_NONE},
186 { "3Com Typhoon (3CR990SVR95)",
187 TYPHOON_CRYPTO_DES},
188 { "3Com Typhoon (3CR990SVR97)",
189 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
190 { "3Com Typhoon2 (3C990B-TX-M)",
191 TYPHOON_CRYPTO_VARIABLE},
192 { "3Com Typhoon2 (3C990BSVR)",
193 TYPHOON_CRYPTO_VARIABLE},
194 { "3Com Typhoon (3CR990-FX-95)",
195 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
196 { "3Com Typhoon (3CR990-FX-97)",
197 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
198 { "3Com Typhoon (3CR990-FX-95 Server)",
199 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
200 { "3Com Typhoon (3CR990-FX-97 Server)",
201 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
202 { "3Com Typhoon2 (3C990B-FX-97)",
203 TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
204};
205
206/* Notes on the new subsystem numbering scheme:
7f927fcc 207 * bits 0-1 indicate crypto capabilities: (0) variable, (1) DES, or (2) 3DES
1da177e4
LT
208 * bit 4 indicates if this card has secured firmware (we don't support it)
209 * bit 8 indicates if this is a (0) copper or (1) fiber card
210 * bits 12-16 indicate card type: (0) client and (1) server
211 */
a3aa1884 212static DEFINE_PCI_DEVICE_TABLE(typhoon_pci_tbl) = {
1da177e4
LT
213 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
214 PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
215 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
216 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
217 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
218 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
219 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
220 PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
221 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
222 PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
223 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
224 PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
225 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
226 PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
227 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
228 PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
229 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
230 PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
231 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
232 PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
233 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
234 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
235 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
236 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
237 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
238 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
239 { 0, }
240};
241MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
242
243/* Define the shared memory area
244 * Align everything the 3XP will normally be using.
245 * We'll need to move/align txHi if we start using that ring.
246 */
247#define __3xp_aligned ____cacheline_aligned
248struct typhoon_shared {
249 struct typhoon_interface iface;
250 struct typhoon_indexes indexes __3xp_aligned;
251 struct tx_desc txLo[TXLO_ENTRIES] __3xp_aligned;
252 struct rx_desc rxLo[RX_ENTRIES] __3xp_aligned;
253 struct rx_desc rxHi[RX_ENTRIES] __3xp_aligned;
254 struct cmd_desc cmd[COMMAND_ENTRIES] __3xp_aligned;
255 struct resp_desc resp[RESPONSE_ENTRIES] __3xp_aligned;
256 struct rx_free rxBuff[RXFREE_ENTRIES] __3xp_aligned;
257 u32 zeroWord;
258 struct tx_desc txHi[TXHI_ENTRIES];
259} __attribute__ ((packed));
260
261struct rxbuff_ent {
262 struct sk_buff *skb;
263 dma_addr_t dma_addr;
264};
265
266struct typhoon {
267 /* Tx cache line section */
6aa20a22 268 struct transmit_ring txLoRing ____cacheline_aligned;
1da177e4
LT
269 struct pci_dev * tx_pdev;
270 void __iomem *tx_ioaddr;
271 u32 txlo_dma_addr;
272
273 /* Irq/Rx cache line section */
274 void __iomem *ioaddr ____cacheline_aligned;
275 struct typhoon_indexes *indexes;
276 u8 awaiting_resp;
277 u8 duplex;
278 u8 speed;
279 u8 card_state;
280 struct basic_ring rxLoRing;
281 struct pci_dev * pdev;
282 struct net_device * dev;
bea3348e 283 struct napi_struct napi;
1da177e4
LT
284 spinlock_t state_lock;
285 struct vlan_group * vlgrp;
286 struct basic_ring rxHiRing;
287 struct basic_ring rxBuffRing;
288 struct rxbuff_ent rxbuffers[RXENT_ENTRIES];
289
290 /* general section */
291 spinlock_t command_lock ____cacheline_aligned;
292 struct basic_ring cmdRing;
293 struct basic_ring respRing;
294 struct net_device_stats stats;
295 struct net_device_stats stats_saved;
1da177e4
LT
296 struct typhoon_shared * shared;
297 dma_addr_t shared_dma;
03a710ff
AV
298 __le16 xcvr_select;
299 __le16 wol_events;
300 __le32 offload;
1da177e4
LT
301
302 /* unused stuff (future use) */
303 int capabilities;
304 struct transmit_ring txHiRing;
305};
306
307enum completion_wait_values {
308 NoWait = 0, WaitNoSleep, WaitSleep,
309};
310
311/* These are the values for the typhoon.card_state variable.
312 * These determine where the statistics will come from in get_stats().
313 * The sleep image does not support the statistics we need.
314 */
315enum state_values {
316 Sleeping = 0, Running,
317};
318
319/* PCI writes are not guaranteed to be posted in order, but outstanding writes
320 * cannot pass a read, so this forces current writes to post.
321 */
322#define typhoon_post_pci_writes(x) \
323 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
324
325/* We'll wait up to six seconds for a reset, and half a second normally.
326 */
327#define TYPHOON_UDELAY 50
328#define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ)
329#define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY)
330#define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY)
331
1da177e4 332#if defined(NETIF_F_TSO)
7967168c 333#define skb_tso_size(x) (skb_shinfo(x)->gso_size)
1da177e4
LT
334#define TSO_NUM_DESCRIPTORS 2
335#define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT
336#else
337#define NETIF_F_TSO 0
338#define skb_tso_size(x) 0
339#define TSO_NUM_DESCRIPTORS 0
340#define TSO_OFFLOAD_ON 0
341#endif
342
343static inline void
344typhoon_inc_index(u32 *index, const int count, const int num_entries)
345{
346 /* Increment a ring index -- we can use this for all rings execept
347 * the Rx rings, as they use different size descriptors
348 * otherwise, everything is the same size as a cmd_desc
349 */
350 *index += count * sizeof(struct cmd_desc);
351 *index %= num_entries * sizeof(struct cmd_desc);
352}
353
354static inline void
355typhoon_inc_cmd_index(u32 *index, const int count)
356{
357 typhoon_inc_index(index, count, COMMAND_ENTRIES);
358}
359
360static inline void
361typhoon_inc_resp_index(u32 *index, const int count)
362{
363 typhoon_inc_index(index, count, RESPONSE_ENTRIES);
364}
365
366static inline void
367typhoon_inc_rxfree_index(u32 *index, const int count)
368{
369 typhoon_inc_index(index, count, RXFREE_ENTRIES);
370}
371
372static inline void
373typhoon_inc_tx_index(u32 *index, const int count)
374{
375 /* if we start using the Hi Tx ring, this needs updateing */
376 typhoon_inc_index(index, count, TXLO_ENTRIES);
377}
378
379static inline void
380typhoon_inc_rx_index(u32 *index, const int count)
381{
382 /* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */
383 *index += count * sizeof(struct rx_desc);
384 *index %= RX_ENTRIES * sizeof(struct rx_desc);
385}
386
387static int
388typhoon_reset(void __iomem *ioaddr, int wait_type)
389{
390 int i, err = 0;
391 int timeout;
392
393 if(wait_type == WaitNoSleep)
394 timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
395 else
396 timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
397
398 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
399 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
400
401 iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
402 typhoon_post_pci_writes(ioaddr);
403 udelay(1);
404 iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
405
406 if(wait_type != NoWait) {
407 for(i = 0; i < timeout; i++) {
408 if(ioread32(ioaddr + TYPHOON_REG_STATUS) ==
409 TYPHOON_STATUS_WAITING_FOR_HOST)
410 goto out;
411
3173c890
NA
412 if(wait_type == WaitSleep)
413 schedule_timeout_uninterruptible(1);
414 else
1da177e4
LT
415 udelay(TYPHOON_UDELAY);
416 }
417
418 err = -ETIMEDOUT;
419 }
420
421out:
422 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
423 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
424
425 /* The 3XP seems to need a little extra time to complete the load
426 * of the sleep image before we can reliably boot it. Failure to
427 * do this occasionally results in a hung adapter after boot in
428 * typhoon_init_one() while trying to read the MAC address or
429 * putting the card to sleep. 3Com's driver waits 5ms, but
430 * that seems to be overkill. However, if we can sleep, we might
431 * as well give it that much time. Otherwise, we'll give it 500us,
432 * which should be enough (I've see it work well at 100us, but still
433 * saw occasional problems.)
434 */
435 if(wait_type == WaitSleep)
436 msleep(5);
437 else
438 udelay(500);
439 return err;
440}
441
442static int
443typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
444{
445 int i, err = 0;
446
447 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
448 if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
449 goto out;
450 udelay(TYPHOON_UDELAY);
451 }
452
453 err = -ETIMEDOUT;
454
455out:
456 return err;
457}
458
459static inline void
460typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
461{
462 if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
463 netif_carrier_off(dev);
464 else
465 netif_carrier_on(dev);
466}
467
468static inline void
469typhoon_hello(struct typhoon *tp)
470{
471 struct basic_ring *ring = &tp->cmdRing;
472 struct cmd_desc *cmd;
473
474 /* We only get a hello request if we've not sent anything to the
475 * card in a long while. If the lock is held, then we're in the
476 * process of issuing a command, so we don't need to respond.
477 */
478 if(spin_trylock(&tp->command_lock)) {
479 cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
480 typhoon_inc_cmd_index(&ring->lastWrite, 1);
481
482 INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
5fe88eae 483 wmb();
1da177e4
LT
484 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
485 spin_unlock(&tp->command_lock);
486 }
487}
488
489static int
490typhoon_process_response(struct typhoon *tp, int resp_size,
491 struct resp_desc *resp_save)
492{
493 struct typhoon_indexes *indexes = tp->indexes;
494 struct resp_desc *resp;
495 u8 *base = tp->respRing.ringBase;
496 int count, len, wrap_len;
497 u32 cleared;
498 u32 ready;
499
500 cleared = le32_to_cpu(indexes->respCleared);
501 ready = le32_to_cpu(indexes->respReady);
502 while(cleared != ready) {
503 resp = (struct resp_desc *)(base + cleared);
504 count = resp->numDesc + 1;
505 if(resp_save && resp->seqNo) {
506 if(count > resp_size) {
507 resp_save->flags = TYPHOON_RESP_ERROR;
508 goto cleanup;
509 }
510
511 wrap_len = 0;
512 len = count * sizeof(*resp);
513 if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
514 wrap_len = cleared + len - RESPONSE_RING_SIZE;
515 len = RESPONSE_RING_SIZE - cleared;
516 }
517
518 memcpy(resp_save, resp, len);
519 if(unlikely(wrap_len)) {
520 resp_save += len / sizeof(*resp);
521 memcpy(resp_save, base, wrap_len);
522 }
523
524 resp_save = NULL;
525 } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
526 typhoon_media_status(tp->dev, resp);
527 } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
528 typhoon_hello(tp);
529 } else {
0bc88e4a
JP
530 netdev_err(tp->dev,
531 "dumping unexpected response 0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
532 le16_to_cpu(resp->cmd),
533 resp->numDesc, resp->flags,
534 le16_to_cpu(resp->parm1),
535 le32_to_cpu(resp->parm2),
536 le32_to_cpu(resp->parm3));
1da177e4
LT
537 }
538
539cleanup:
540 typhoon_inc_resp_index(&cleared, count);
541 }
542
543 indexes->respCleared = cpu_to_le32(cleared);
544 wmb();
545 return (resp_save == NULL);
546}
547
548static inline int
549typhoon_num_free(int lastWrite, int lastRead, int ringSize)
550{
551 /* this works for all descriptors but rx_desc, as they are a
552 * different size than the cmd_desc -- everyone else is the same
553 */
554 lastWrite /= sizeof(struct cmd_desc);
555 lastRead /= sizeof(struct cmd_desc);
556 return (ringSize + lastRead - lastWrite - 1) % ringSize;
557}
558
559static inline int
560typhoon_num_free_cmd(struct typhoon *tp)
561{
562 int lastWrite = tp->cmdRing.lastWrite;
563 int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
564
565 return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
566}
567
568static inline int
569typhoon_num_free_resp(struct typhoon *tp)
570{
571 int respReady = le32_to_cpu(tp->indexes->respReady);
572 int respCleared = le32_to_cpu(tp->indexes->respCleared);
573
574 return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
575}
576
577static inline int
578typhoon_num_free_tx(struct transmit_ring *ring)
579{
580 /* if we start using the Hi Tx ring, this needs updating */
581 return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
582}
583
584static int
585typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
586 int num_resp, struct resp_desc *resp)
587{
588 struct typhoon_indexes *indexes = tp->indexes;
589 struct basic_ring *ring = &tp->cmdRing;
590 struct resp_desc local_resp;
591 int i, err = 0;
592 int got_resp;
593 int freeCmd, freeResp;
594 int len, wrap_len;
595
596 spin_lock(&tp->command_lock);
597
598 freeCmd = typhoon_num_free_cmd(tp);
599 freeResp = typhoon_num_free_resp(tp);
600
601 if(freeCmd < num_cmd || freeResp < num_resp) {
0bc88e4a
JP
602 netdev_err(tp->dev, "no descs for cmd, had (needed) %d (%d) cmd, %d (%d) resp\n",
603 freeCmd, num_cmd, freeResp, num_resp);
1da177e4
LT
604 err = -ENOMEM;
605 goto out;
606 }
607
608 if(cmd->flags & TYPHOON_CMD_RESPOND) {
609 /* If we're expecting a response, but the caller hasn't given
610 * us a place to put it, we'll provide one.
611 */
612 tp->awaiting_resp = 1;
613 if(resp == NULL) {
614 resp = &local_resp;
615 num_resp = 1;
616 }
617 }
618
619 wrap_len = 0;
620 len = num_cmd * sizeof(*cmd);
621 if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
622 wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
623 len = COMMAND_RING_SIZE - ring->lastWrite;
624 }
625
626 memcpy(ring->ringBase + ring->lastWrite, cmd, len);
627 if(unlikely(wrap_len)) {
628 struct cmd_desc *wrap_ptr = cmd;
629 wrap_ptr += len / sizeof(*cmd);
630 memcpy(ring->ringBase, wrap_ptr, wrap_len);
631 }
632
633 typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
634
59c51591 635 /* "I feel a presence... another warrior is on the mesa."
1da177e4
LT
636 */
637 wmb();
638 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
639 typhoon_post_pci_writes(tp->ioaddr);
640
641 if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
642 goto out;
643
644 /* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
645 * preempt or do anything other than take interrupts. So, don't
646 * wait for a response unless you have to.
647 *
648 * I've thought about trying to sleep here, but we're called
649 * from many contexts that don't allow that. Also, given the way
650 * 3Com has implemented irq coalescing, we would likely timeout --
651 * this has been observed in real life!
652 *
653 * The big killer is we have to wait to get stats from the card,
654 * though we could go to a periodic refresh of those if we don't
655 * mind them getting somewhat stale. The rest of the waiting
656 * commands occur during open/close/suspend/resume, so they aren't
657 * time critical. Creating SAs in the future will also have to
658 * wait here.
659 */
660 got_resp = 0;
661 for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
662 if(indexes->respCleared != indexes->respReady)
663 got_resp = typhoon_process_response(tp, num_resp,
664 resp);
665 udelay(TYPHOON_UDELAY);
666 }
667
668 if(!got_resp) {
669 err = -ETIMEDOUT;
670 goto out;
671 }
672
673 /* Collect the error response even if we don't care about the
674 * rest of the response
675 */
676 if(resp->flags & TYPHOON_RESP_ERROR)
677 err = -EIO;
678
679out:
680 if(tp->awaiting_resp) {
681 tp->awaiting_resp = 0;
682 smp_wmb();
683
684 /* Ugh. If a response was added to the ring between
685 * the call to typhoon_process_response() and the clearing
686 * of tp->awaiting_resp, we could have missed the interrupt
687 * and it could hang in the ring an indeterminate amount of
688 * time. So, check for it, and interrupt ourselves if this
689 * is the case.
690 */
691 if(indexes->respCleared != indexes->respReady)
692 iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
693 }
694
695 spin_unlock(&tp->command_lock);
696 return err;
697}
698
699static void
700typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
701{
702 struct typhoon *tp = netdev_priv(dev);
703 struct cmd_desc xp_cmd;
704 int err;
705
706 spin_lock_bh(&tp->state_lock);
707 if(!tp->vlgrp != !grp) {
708 /* We've either been turned on for the first time, or we've
709 * been turned off. Update the 3XP.
710 */
711 if(grp)
712 tp->offload |= TYPHOON_OFFLOAD_VLAN;
713 else
714 tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
715
716 /* If the interface is up, the runtime is running -- and we
717 * must be up for the vlan core to call us.
718 *
719 * Do the command outside of the spin lock, as it is slow.
720 */
721 INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
722 TYPHOON_CMD_SET_OFFLOAD_TASKS);
723 xp_cmd.parm2 = tp->offload;
724 xp_cmd.parm3 = tp->offload;
725 spin_unlock_bh(&tp->state_lock);
726 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
727 if(err < 0)
0bc88e4a 728 netdev_err(tp->dev, "vlan offload error %d\n", -err);
1da177e4
LT
729 spin_lock_bh(&tp->state_lock);
730 }
731
732 /* now make the change visible */
733 tp->vlgrp = grp;
734 spin_unlock_bh(&tp->state_lock);
735}
736
1da177e4
LT
737static inline void
738typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
739 u32 ring_dma)
740{
741 struct tcpopt_desc *tcpd;
742 u32 tcpd_offset = ring_dma;
743
744 tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
745 tcpd_offset += txRing->lastWrite;
746 tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
747 typhoon_inc_tx_index(&txRing->lastWrite, 1);
748
749 tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
750 tcpd->numDesc = 1;
751 tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
752 tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
753 tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
754 tcpd->bytesTx = cpu_to_le32(skb->len);
755 tcpd->status = 0;
756}
757
61357325 758static netdev_tx_t
1da177e4
LT
759typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
760{
761 struct typhoon *tp = netdev_priv(dev);
762 struct transmit_ring *txRing;
763 struct tx_desc *txd, *first_txd;
764 dma_addr_t skb_dma;
765 int numDesc;
766
767 /* we have two rings to choose from, but we only use txLo for now
768 * If we start using the Hi ring as well, we'll need to update
769 * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
7f927fcc 770 * and TXHI_ENTRIES to match, as well as update the TSO code below
1da177e4
LT
771 * to get the right DMA address
772 */
773 txRing = &tp->txLoRing;
774
775 /* We need one descriptor for each fragment of the sk_buff, plus the
776 * one for the ->data area of it.
777 *
778 * The docs say a maximum of 16 fragment descriptors per TCP option
779 * descriptor, then make a new packet descriptor and option descriptor
780 * for the next 16 fragments. The engineers say just an option
781 * descriptor is needed. I've tested up to 26 fragments with a single
782 * packet descriptor/option descriptor combo, so I use that for now.
783 *
784 * If problems develop with TSO, check this first.
785 */
786 numDesc = skb_shinfo(skb)->nr_frags + 1;
89114afd 787 if (skb_is_gso(skb))
1da177e4
LT
788 numDesc++;
789
790 /* When checking for free space in the ring, we need to also
791 * account for the initial Tx descriptor, and we always must leave
792 * at least one descriptor unused in the ring so that it doesn't
793 * wrap and look empty.
794 *
795 * The only time we should loop here is when we hit the race
796 * between marking the queue awake and updating the cleared index.
797 * Just loop and it will appear. This comes from the acenic driver.
798 */
799 while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
800 smp_rmb();
801
802 first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
803 typhoon_inc_tx_index(&txRing->lastWrite, 1);
804
805 first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
806 first_txd->numDesc = 0;
807 first_txd->len = 0;
71f1bb1a 808 first_txd->tx_addr = (u64)((unsigned long) skb);
1da177e4
LT
809 first_txd->processFlags = 0;
810
84fa7933 811 if(skb->ip_summed == CHECKSUM_PARTIAL) {
1da177e4
LT
812 /* The 3XP will figure out if this is UDP/TCP */
813 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
814 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
815 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
816 }
817
818 if(vlan_tx_tag_present(skb)) {
819 first_txd->processFlags |=
820 TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
821 first_txd->processFlags |=
03a710ff 822 cpu_to_le32(ntohs(vlan_tx_tag_get(skb)) <<
1da177e4
LT
823 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
824 }
825
89114afd 826 if (skb_is_gso(skb)) {
1da177e4
LT
827 first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
828 first_txd->numDesc++;
829
830 typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
831 }
832
833 txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
834 typhoon_inc_tx_index(&txRing->lastWrite, 1);
835
836 /* No need to worry about padding packet -- the firmware pads
837 * it with zeros to ETH_ZLEN for us.
838 */
839 if(skb_shinfo(skb)->nr_frags == 0) {
840 skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
841 PCI_DMA_TODEVICE);
842 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
843 txd->len = cpu_to_le16(skb->len);
71f1bb1a
AV
844 txd->frag.addr = cpu_to_le32(skb_dma);
845 txd->frag.addrHi = 0;
1da177e4
LT
846 first_txd->numDesc++;
847 } else {
848 int i, len;
849
850 len = skb_headlen(skb);
851 skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
852 PCI_DMA_TODEVICE);
853 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
854 txd->len = cpu_to_le16(len);
71f1bb1a
AV
855 txd->frag.addr = cpu_to_le32(skb_dma);
856 txd->frag.addrHi = 0;
1da177e4
LT
857 first_txd->numDesc++;
858
859 for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
860 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
861 void *frag_addr;
862
863 txd = (struct tx_desc *) (txRing->ringBase +
864 txRing->lastWrite);
865 typhoon_inc_tx_index(&txRing->lastWrite, 1);
866
867 len = frag->size;
868 frag_addr = (void *) page_address(frag->page) +
869 frag->page_offset;
870 skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
871 PCI_DMA_TODEVICE);
872 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
873 txd->len = cpu_to_le16(len);
71f1bb1a
AV
874 txd->frag.addr = cpu_to_le32(skb_dma);
875 txd->frag.addrHi = 0;
1da177e4
LT
876 first_txd->numDesc++;
877 }
878 }
879
880 /* Kick the 3XP
881 */
882 wmb();
883 iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
884
885 dev->trans_start = jiffies;
886
887 /* If we don't have room to put the worst case packet on the
888 * queue, then we must stop the queue. We need 2 extra
889 * descriptors -- one to prevent ring wrap, and one for the
890 * Tx header.
891 */
892 numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
893
894 if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
895 netif_stop_queue(dev);
896
897 /* A Tx complete IRQ could have gotten inbetween, making
898 * the ring free again. Only need to recheck here, since
899 * Tx is serialized.
900 */
901 if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
902 netif_wake_queue(dev);
903 }
904
6ed10654 905 return NETDEV_TX_OK;
1da177e4
LT
906}
907
908static void
909typhoon_set_rx_mode(struct net_device *dev)
910{
911 struct typhoon *tp = netdev_priv(dev);
912 struct cmd_desc xp_cmd;
913 u32 mc_filter[2];
03a710ff 914 __le16 filter;
1da177e4
LT
915
916 filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
917 if(dev->flags & IFF_PROMISC) {
1da177e4 918 filter |= TYPHOON_RX_FILTER_PROMISCOUS;
4cd24eaf 919 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1da177e4
LT
920 (dev->flags & IFF_ALLMULTI)) {
921 /* Too many to match, or accept all multicasts. */
922 filter |= TYPHOON_RX_FILTER_ALL_MCAST;
4cd24eaf 923 } else if (!netdev_mc_empty(dev)) {
1da177e4 924 struct dev_mc_list *mclist;
1da177e4
LT
925
926 memset(mc_filter, 0, sizeof(mc_filter));
567ec874 927 netdev_for_each_mc_addr(mclist, dev) {
1da177e4
LT
928 int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
929 mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
930 }
931
932 INIT_COMMAND_NO_RESPONSE(&xp_cmd,
933 TYPHOON_CMD_SET_MULTICAST_HASH);
934 xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
935 xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
936 xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
937 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
938
939 filter |= TYPHOON_RX_FILTER_MCAST_HASH;
940 }
941
942 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
943 xp_cmd.parm1 = filter;
944 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
945}
946
947static int
948typhoon_do_get_stats(struct typhoon *tp)
949{
950 struct net_device_stats *stats = &tp->stats;
951 struct net_device_stats *saved = &tp->stats_saved;
952 struct cmd_desc xp_cmd;
953 struct resp_desc xp_resp[7];
954 struct stats_resp *s = (struct stats_resp *) xp_resp;
955 int err;
956
957 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
958 err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
959 if(err < 0)
960 return err;
961
962 /* 3Com's Linux driver uses txMultipleCollisions as it's
963 * collisions value, but there is some other collision info as well...
964 *
965 * The extra status reported would be a good candidate for
966 * ethtool_ops->get_{strings,stats}()
967 */
968 stats->tx_packets = le32_to_cpu(s->txPackets);
73eac064 969 stats->tx_bytes = le64_to_cpu(s->txBytes);
1da177e4
LT
970 stats->tx_errors = le32_to_cpu(s->txCarrierLost);
971 stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost);
972 stats->collisions = le32_to_cpu(s->txMultipleCollisions);
973 stats->rx_packets = le32_to_cpu(s->rxPacketsGood);
73eac064 974 stats->rx_bytes = le64_to_cpu(s->rxBytesGood);
1da177e4
LT
975 stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns);
976 stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
977 le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors);
978 stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors);
979 stats->rx_length_errors = le32_to_cpu(s->rxOversized);
980 tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
981 SPEED_100 : SPEED_10;
982 tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
983 DUPLEX_FULL : DUPLEX_HALF;
984
985 /* add in the saved statistics
986 */
987 stats->tx_packets += saved->tx_packets;
988 stats->tx_bytes += saved->tx_bytes;
989 stats->tx_errors += saved->tx_errors;
990 stats->collisions += saved->collisions;
991 stats->rx_packets += saved->rx_packets;
992 stats->rx_bytes += saved->rx_bytes;
993 stats->rx_fifo_errors += saved->rx_fifo_errors;
994 stats->rx_errors += saved->rx_errors;
995 stats->rx_crc_errors += saved->rx_crc_errors;
996 stats->rx_length_errors += saved->rx_length_errors;
997
998 return 0;
999}
1000
1001static struct net_device_stats *
1002typhoon_get_stats(struct net_device *dev)
1003{
1004 struct typhoon *tp = netdev_priv(dev);
1005 struct net_device_stats *stats = &tp->stats;
1006 struct net_device_stats *saved = &tp->stats_saved;
1007
1008 smp_rmb();
1009 if(tp->card_state == Sleeping)
1010 return saved;
1011
1012 if(typhoon_do_get_stats(tp) < 0) {
0bc88e4a 1013 netdev_err(dev, "error getting stats\n");
1da177e4
LT
1014 return saved;
1015 }
1016
1017 return stats;
1018}
1019
1020static int
1021typhoon_set_mac_address(struct net_device *dev, void *addr)
1022{
1023 struct sockaddr *saddr = (struct sockaddr *) addr;
1024
1025 if(netif_running(dev))
1026 return -EBUSY;
1027
1028 memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
1029 return 0;
1030}
1031
1032static void
1033typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1034{
1035 struct typhoon *tp = netdev_priv(dev);
1036 struct pci_dev *pci_dev = tp->pdev;
1037 struct cmd_desc xp_cmd;
1038 struct resp_desc xp_resp[3];
1039
1040 smp_rmb();
1041 if(tp->card_state == Sleeping) {
1042 strcpy(info->fw_version, "Sleep image");
1043 } else {
1044 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
1045 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
1046 strcpy(info->fw_version, "Unknown runtime");
1047 } else {
fdcfd77c 1048 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
1da177e4 1049 snprintf(info->fw_version, 32, "%02x.%03x.%03x",
6aa20a22 1050 sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
1da177e4
LT
1051 sleep_ver & 0xfff);
1052 }
1053 }
1054
0bc88e4a
JP
1055 strcpy(info->driver, KBUILD_MODNAME);
1056 strcpy(info->version, UTS_RELEASE);
1da177e4
LT
1057 strcpy(info->bus_info, pci_name(pci_dev));
1058}
1059
1060static int
1061typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1062{
1063 struct typhoon *tp = netdev_priv(dev);
1064
1065 cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1066 SUPPORTED_Autoneg;
1067
1068 switch (tp->xcvr_select) {
1069 case TYPHOON_XCVR_10HALF:
1070 cmd->advertising = ADVERTISED_10baseT_Half;
1071 break;
1072 case TYPHOON_XCVR_10FULL:
1073 cmd->advertising = ADVERTISED_10baseT_Full;
1074 break;
1075 case TYPHOON_XCVR_100HALF:
1076 cmd->advertising = ADVERTISED_100baseT_Half;
1077 break;
1078 case TYPHOON_XCVR_100FULL:
1079 cmd->advertising = ADVERTISED_100baseT_Full;
1080 break;
1081 case TYPHOON_XCVR_AUTONEG:
1082 cmd->advertising = ADVERTISED_10baseT_Half |
1083 ADVERTISED_10baseT_Full |
1084 ADVERTISED_100baseT_Half |
1085 ADVERTISED_100baseT_Full |
1086 ADVERTISED_Autoneg;
1087 break;
1088 }
1089
1090 if(tp->capabilities & TYPHOON_FIBER) {
1091 cmd->supported |= SUPPORTED_FIBRE;
1092 cmd->advertising |= ADVERTISED_FIBRE;
1093 cmd->port = PORT_FIBRE;
1094 } else {
1095 cmd->supported |= SUPPORTED_10baseT_Half |
1096 SUPPORTED_10baseT_Full |
1097 SUPPORTED_TP;
1098 cmd->advertising |= ADVERTISED_TP;
1099 cmd->port = PORT_TP;
1100 }
1101
1102 /* need to get stats to make these link speed/duplex valid */
1103 typhoon_do_get_stats(tp);
1104 cmd->speed = tp->speed;
1105 cmd->duplex = tp->duplex;
1106 cmd->phy_address = 0;
1107 cmd->transceiver = XCVR_INTERNAL;
1108 if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
1109 cmd->autoneg = AUTONEG_ENABLE;
1110 else
1111 cmd->autoneg = AUTONEG_DISABLE;
1112 cmd->maxtxpkt = 1;
1113 cmd->maxrxpkt = 1;
1114
1115 return 0;
1116}
1117
1118static int
1119typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1120{
1121 struct typhoon *tp = netdev_priv(dev);
1122 struct cmd_desc xp_cmd;
03a710ff 1123 __le16 xcvr;
1da177e4
LT
1124 int err;
1125
1126 err = -EINVAL;
1127 if(cmd->autoneg == AUTONEG_ENABLE) {
1128 xcvr = TYPHOON_XCVR_AUTONEG;
1129 } else {
1130 if(cmd->duplex == DUPLEX_HALF) {
1131 if(cmd->speed == SPEED_10)
1132 xcvr = TYPHOON_XCVR_10HALF;
1133 else if(cmd->speed == SPEED_100)
1134 xcvr = TYPHOON_XCVR_100HALF;
1135 else
1136 goto out;
1137 } else if(cmd->duplex == DUPLEX_FULL) {
1138 if(cmd->speed == SPEED_10)
1139 xcvr = TYPHOON_XCVR_10FULL;
1140 else if(cmd->speed == SPEED_100)
1141 xcvr = TYPHOON_XCVR_100FULL;
1142 else
1143 goto out;
1144 } else
1145 goto out;
1146 }
1147
1148 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
b46281f9 1149 xp_cmd.parm1 = xcvr;
1da177e4
LT
1150 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1151 if(err < 0)
1152 goto out;
1153
1154 tp->xcvr_select = xcvr;
1155 if(cmd->autoneg == AUTONEG_ENABLE) {
1156 tp->speed = 0xff; /* invalid */
1157 tp->duplex = 0xff; /* invalid */
1158 } else {
1159 tp->speed = cmd->speed;
1160 tp->duplex = cmd->duplex;
1161 }
1162
1163out:
1164 return err;
1165}
1166
1167static void
1168typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1169{
1170 struct typhoon *tp = netdev_priv(dev);
1171
1172 wol->supported = WAKE_PHY | WAKE_MAGIC;
1173 wol->wolopts = 0;
1174 if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
1175 wol->wolopts |= WAKE_PHY;
1176 if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
1177 wol->wolopts |= WAKE_MAGIC;
1178 memset(&wol->sopass, 0, sizeof(wol->sopass));
1179}
1180
1181static int
1182typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1183{
1184 struct typhoon *tp = netdev_priv(dev);
1185
1186 if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
1187 return -EINVAL;
1188
1189 tp->wol_events = 0;
1190 if(wol->wolopts & WAKE_PHY)
1191 tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
1192 if(wol->wolopts & WAKE_MAGIC)
1193 tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
1194
1195 return 0;
1196}
1197
1198static u32
1199typhoon_get_rx_csum(struct net_device *dev)
1200{
1201 /* For now, we don't allow turning off RX checksums.
1202 */
1203 return 1;
1204}
1205
1206static void
1207typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
1208{
1209 ering->rx_max_pending = RXENT_ENTRIES;
1210 ering->rx_mini_max_pending = 0;
1211 ering->rx_jumbo_max_pending = 0;
1212 ering->tx_max_pending = TXLO_ENTRIES - 1;
1213
1214 ering->rx_pending = RXENT_ENTRIES;
1215 ering->rx_mini_pending = 0;
1216 ering->rx_jumbo_pending = 0;
1217 ering->tx_pending = TXLO_ENTRIES - 1;
1218}
1219
7282d491 1220static const struct ethtool_ops typhoon_ethtool_ops = {
1da177e4
LT
1221 .get_settings = typhoon_get_settings,
1222 .set_settings = typhoon_set_settings,
1223 .get_drvinfo = typhoon_get_drvinfo,
1224 .get_wol = typhoon_get_wol,
1225 .set_wol = typhoon_set_wol,
1226 .get_link = ethtool_op_get_link,
1227 .get_rx_csum = typhoon_get_rx_csum,
1da177e4 1228 .set_tx_csum = ethtool_op_set_tx_csum,
1da177e4 1229 .set_sg = ethtool_op_set_sg,
1da177e4
LT
1230 .set_tso = ethtool_op_set_tso,
1231 .get_ringparam = typhoon_get_ringparam,
1232};
1233
1234static int
1235typhoon_wait_interrupt(void __iomem *ioaddr)
1236{
1237 int i, err = 0;
1238
1239 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1240 if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
1241 TYPHOON_INTR_BOOTCMD)
1242 goto out;
1243 udelay(TYPHOON_UDELAY);
1244 }
1245
1246 err = -ETIMEDOUT;
1247
1248out:
1249 iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1250 return err;
1251}
1252
1253#define shared_offset(x) offsetof(struct typhoon_shared, x)
1254
1255static void
1256typhoon_init_interface(struct typhoon *tp)
1257{
1258 struct typhoon_interface *iface = &tp->shared->iface;
1259 dma_addr_t shared_dma;
1260
1261 memset(tp->shared, 0, sizeof(struct typhoon_shared));
1262
1263 /* The *Hi members of iface are all init'd to zero by the memset().
1264 */
1265 shared_dma = tp->shared_dma + shared_offset(indexes);
1266 iface->ringIndex = cpu_to_le32(shared_dma);
1267
1268 shared_dma = tp->shared_dma + shared_offset(txLo);
1269 iface->txLoAddr = cpu_to_le32(shared_dma);
1270 iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
1271
1272 shared_dma = tp->shared_dma + shared_offset(txHi);
1273 iface->txHiAddr = cpu_to_le32(shared_dma);
1274 iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
1275
1276 shared_dma = tp->shared_dma + shared_offset(rxBuff);
1277 iface->rxBuffAddr = cpu_to_le32(shared_dma);
1278 iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
1279 sizeof(struct rx_free));
1280
1281 shared_dma = tp->shared_dma + shared_offset(rxLo);
1282 iface->rxLoAddr = cpu_to_le32(shared_dma);
1283 iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1284
1285 shared_dma = tp->shared_dma + shared_offset(rxHi);
1286 iface->rxHiAddr = cpu_to_le32(shared_dma);
1287 iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1288
1289 shared_dma = tp->shared_dma + shared_offset(cmd);
1290 iface->cmdAddr = cpu_to_le32(shared_dma);
1291 iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
1292
1293 shared_dma = tp->shared_dma + shared_offset(resp);
1294 iface->respAddr = cpu_to_le32(shared_dma);
1295 iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
1296
1297 shared_dma = tp->shared_dma + shared_offset(zeroWord);
1298 iface->zeroAddr = cpu_to_le32(shared_dma);
1299
1300 tp->indexes = &tp->shared->indexes;
1301 tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
1302 tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
1303 tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
1304 tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
1305 tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
1306 tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
1307 tp->respRing.ringBase = (u8 *) tp->shared->resp;
1308
1309 tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
1310 tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
1311
8cc085c7 1312 tp->txlo_dma_addr = le32_to_cpu(iface->txLoAddr);
1da177e4 1313 tp->card_state = Sleeping;
1da177e4
LT
1314
1315 tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
1316 tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
1317
1318 spin_lock_init(&tp->command_lock);
1319 spin_lock_init(&tp->state_lock);
5fe88eae
DD
1320
1321 /* Force the writes to the shared memory area out before continuing. */
1322 wmb();
1da177e4
LT
1323}
1324
1325static void
1326typhoon_init_rings(struct typhoon *tp)
1327{
1328 memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
1329
1330 tp->txLoRing.lastWrite = 0;
1331 tp->txHiRing.lastWrite = 0;
1332 tp->rxLoRing.lastWrite = 0;
1333 tp->rxHiRing.lastWrite = 0;
1334 tp->rxBuffRing.lastWrite = 0;
1335 tp->cmdRing.lastWrite = 0;
1336 tp->cmdRing.lastWrite = 0;
1337
1338 tp->txLoRing.lastRead = 0;
1339 tp->txHiRing.lastRead = 0;
1340}
1341
b775a750
BH
1342static const struct firmware *typhoon_fw;
1343
1344static int
1345typhoon_request_firmware(struct typhoon *tp)
1346{
a8c9a53c
DD
1347 const struct typhoon_file_header *fHdr;
1348 const struct typhoon_section_header *sHdr;
1349 const u8 *image_data;
1350 u32 numSections;
1351 u32 section_len;
1352 u32 remaining;
b775a750
BH
1353 int err;
1354
1355 if (typhoon_fw)
1356 return 0;
1357
1358 err = request_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev);
1359 if (err) {
0bc88e4a
JP
1360 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
1361 FIRMWARE_NAME);
b775a750
BH
1362 return err;
1363 }
1364
a8c9a53c
DD
1365 image_data = (u8 *) typhoon_fw->data;
1366 remaining = typhoon_fw->size;
1367 if (remaining < sizeof(struct typhoon_file_header))
1368 goto invalid_fw;
d517c4a1 1369
a8c9a53c
DD
1370 fHdr = (struct typhoon_file_header *) image_data;
1371 if (memcmp(fHdr->tag, "TYPHOON", 8))
1372 goto invalid_fw;
1373
1374 numSections = le32_to_cpu(fHdr->numSections);
1375 image_data += sizeof(struct typhoon_file_header);
1376 remaining -= sizeof(struct typhoon_file_header);
1377
1378 while (numSections--) {
1379 if (remaining < sizeof(struct typhoon_section_header))
1380 goto invalid_fw;
1381
1382 sHdr = (struct typhoon_section_header *) image_data;
1383 image_data += sizeof(struct typhoon_section_header);
1384 section_len = le32_to_cpu(sHdr->len);
1385
1386 if (remaining < section_len)
1387 goto invalid_fw;
1388
1389 image_data += section_len;
1390 remaining -= section_len;
b775a750
BH
1391 }
1392
1393 return 0;
d517c4a1 1394
a8c9a53c 1395invalid_fw:
0bc88e4a 1396 netdev_err(tp->dev, "Invalid firmware image\n");
d517c4a1
DM
1397 release_firmware(typhoon_fw);
1398 typhoon_fw = NULL;
a8c9a53c 1399 return -EINVAL;
b775a750
BH
1400}
1401
1da177e4
LT
1402static int
1403typhoon_download_firmware(struct typhoon *tp)
1404{
1405 void __iomem *ioaddr = tp->ioaddr;
1406 struct pci_dev *pdev = tp->pdev;
b775a750
BH
1407 const struct typhoon_file_header *fHdr;
1408 const struct typhoon_section_header *sHdr;
1409 const u8 *image_data;
a8c9a53c
DD
1410 void *dpage;
1411 dma_addr_t dpage_dma;
71f1bb1a 1412 __sum16 csum;
1da177e4
LT
1413 u32 irqEnabled;
1414 u32 irqMasked;
1415 u32 numSections;
1416 u32 section_len;
a8c9a53c 1417 u32 len;
1da177e4
LT
1418 u32 load_addr;
1419 u32 hmac;
1420 int i;
1421 int err;
1422
a8c9a53c 1423 image_data = (u8 *) typhoon_fw->data;
b775a750 1424 fHdr = (struct typhoon_file_header *) image_data;
1da177e4 1425
a8c9a53c
DD
1426 /* Cannot just map the firmware image using pci_map_single() as
1427 * the firmware is vmalloc()'d and may not be physically contiguous,
1428 * so we allocate some consistent memory to copy the sections into.
1429 */
1da177e4 1430 err = -ENOMEM;
a8c9a53c
DD
1431 dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
1432 if(!dpage) {
0bc88e4a 1433 netdev_err(tp->dev, "no DMA mem for firmware\n");
1da177e4
LT
1434 goto err_out;
1435 }
1436
1437 irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE);
1438 iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD,
1439 ioaddr + TYPHOON_REG_INTR_ENABLE);
1440 irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK);
1441 iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD,
1442 ioaddr + TYPHOON_REG_INTR_MASK);
1443
1444 err = -ETIMEDOUT;
1445 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
0bc88e4a 1446 netdev_err(tp->dev, "card ready timeout\n");
1da177e4
LT
1447 goto err_out_irq;
1448 }
1449
1450 numSections = le32_to_cpu(fHdr->numSections);
1451 load_addr = le32_to_cpu(fHdr->startAddr);
1452
1453 iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1454 iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
1455 hmac = le32_to_cpu(fHdr->hmacDigest[0]);
1456 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
1457 hmac = le32_to_cpu(fHdr->hmacDigest[1]);
1458 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
1459 hmac = le32_to_cpu(fHdr->hmacDigest[2]);
1460 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
1461 hmac = le32_to_cpu(fHdr->hmacDigest[3]);
1462 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
1463 hmac = le32_to_cpu(fHdr->hmacDigest[4]);
1464 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
1465 typhoon_post_pci_writes(ioaddr);
1466 iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
1467
1468 image_data += sizeof(struct typhoon_file_header);
1469
1470 /* The ioread32() in typhoon_wait_interrupt() will force the
1471 * last write to the command register to post, so
1472 * we don't need a typhoon_post_pci_writes() after it.
1473 */
1474 for(i = 0; i < numSections; i++) {
1475 sHdr = (struct typhoon_section_header *) image_data;
1476 image_data += sizeof(struct typhoon_section_header);
1477 load_addr = le32_to_cpu(sHdr->startAddr);
1478 section_len = le32_to_cpu(sHdr->len);
1479
a8c9a53c
DD
1480 while(section_len) {
1481 len = min_t(u32, section_len, PAGE_SIZE);
1da177e4 1482
a8c9a53c
DD
1483 if(typhoon_wait_interrupt(ioaddr) < 0 ||
1484 ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1485 TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
0bc88e4a 1486 netdev_err(tp->dev, "segment ready timeout\n");
a8c9a53c
DD
1487 goto err_out_irq;
1488 }
1da177e4 1489
a8c9a53c
DD
1490 /* Do an pseudo IPv4 checksum on the data -- first
1491 * need to convert each u16 to cpu order before
1492 * summing. Fortunately, due to the properties of
1493 * the checksum, we can do this once, at the end.
1494 */
1495 csum = csum_fold(csum_partial_copy_nocheck(image_data,
0bc88e4a
JP
1496 dpage, len,
1497 0));
a8c9a53c
DD
1498
1499 iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
1500 iowrite32(le16_to_cpu((__force __le16)csum),
1501 ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
1502 iowrite32(load_addr,
1503 ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
1504 iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
1505 iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
1506 typhoon_post_pci_writes(ioaddr);
1507 iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
0bc88e4a 1508 ioaddr + TYPHOON_REG_COMMAND);
a8c9a53c
DD
1509
1510 image_data += len;
1511 load_addr += len;
1512 section_len -= len;
1513 }
1da177e4
LT
1514 }
1515
1516 if(typhoon_wait_interrupt(ioaddr) < 0 ||
1517 ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1518 TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
0bc88e4a 1519 netdev_err(tp->dev, "final segment ready timeout\n");
1da177e4
LT
1520 goto err_out_irq;
1521 }
1522
1523 iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
1524
1525 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
0bc88e4a
JP
1526 netdev_err(tp->dev, "boot ready timeout, status 0x%0x\n",
1527 ioread32(ioaddr + TYPHOON_REG_STATUS));
1da177e4
LT
1528 goto err_out_irq;
1529 }
1530
1531 err = 0;
1532
1533err_out_irq:
1534 iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
1535 iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
1536
a8c9a53c 1537 pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
1da177e4
LT
1538
1539err_out:
1540 return err;
1541}
1542
1543static int
1544typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
1545{
1546 void __iomem *ioaddr = tp->ioaddr;
1547
1548 if(typhoon_wait_status(ioaddr, initial_status) < 0) {
0bc88e4a 1549 netdev_err(tp->dev, "boot ready timeout\n");
1da177e4
LT
1550 goto out_timeout;
1551 }
1552
1553 iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
1554 iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
1555 typhoon_post_pci_writes(ioaddr);
1556 iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD,
1557 ioaddr + TYPHOON_REG_COMMAND);
1558
1559 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
0bc88e4a
JP
1560 netdev_err(tp->dev, "boot finish timeout (status 0x%x)\n",
1561 ioread32(ioaddr + TYPHOON_REG_STATUS));
1da177e4
LT
1562 goto out_timeout;
1563 }
1564
1565 /* Clear the Transmit and Command ready registers
1566 */
1567 iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY);
1568 iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY);
1569 iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY);
1570 typhoon_post_pci_writes(ioaddr);
1571 iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
1572
1573 return 0;
1574
1575out_timeout:
1576 return -ETIMEDOUT;
1577}
1578
1579static u32
1580typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
03a710ff 1581 volatile __le32 * index)
1da177e4
LT
1582{
1583 u32 lastRead = txRing->lastRead;
1584 struct tx_desc *tx;
1585 dma_addr_t skb_dma;
1586 int dma_len;
1587 int type;
1588
1589 while(lastRead != le32_to_cpu(*index)) {
1590 tx = (struct tx_desc *) (txRing->ringBase + lastRead);
1591 type = tx->flags & TYPHOON_TYPE_MASK;
1592
1593 if(type == TYPHOON_TX_DESC) {
1594 /* This tx_desc describes a packet.
1595 */
71f1bb1a 1596 unsigned long ptr = tx->tx_addr;
1da177e4
LT
1597 struct sk_buff *skb = (struct sk_buff *) ptr;
1598 dev_kfree_skb_irq(skb);
1599 } else if(type == TYPHOON_FRAG_DESC) {
1600 /* This tx_desc describes a memory mapping. Free it.
1601 */
71f1bb1a 1602 skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr);
1da177e4
LT
1603 dma_len = le16_to_cpu(tx->len);
1604 pci_unmap_single(tp->pdev, skb_dma, dma_len,
1605 PCI_DMA_TODEVICE);
1606 }
1607
1608 tx->flags = 0;
1609 typhoon_inc_tx_index(&lastRead, 1);
1610 }
1611
1612 return lastRead;
1613}
1614
1615static void
1616typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
03a710ff 1617 volatile __le32 * index)
1da177e4
LT
1618{
1619 u32 lastRead;
1620 int numDesc = MAX_SKB_FRAGS + 1;
1621
1622 /* This will need changing if we start to use the Hi Tx ring. */
1623 lastRead = typhoon_clean_tx(tp, txRing, index);
1624 if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
1625 lastRead, TXLO_ENTRIES) > (numDesc + 2))
1626 netif_wake_queue(tp->dev);
1627
1628 txRing->lastRead = lastRead;
1629 smp_wmb();
1630}
1631
1632static void
1633typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
1634{
1635 struct typhoon_indexes *indexes = tp->indexes;
1636 struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1637 struct basic_ring *ring = &tp->rxBuffRing;
1638 struct rx_free *r;
1639
1640 if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
8a5ed9ef 1641 le32_to_cpu(indexes->rxBuffCleared)) {
1da177e4
LT
1642 /* no room in ring, just drop the skb
1643 */
1644 dev_kfree_skb_any(rxb->skb);
1645 rxb->skb = NULL;
1646 return;
1647 }
1648
1649 r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1650 typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1651 r->virtAddr = idx;
1652 r->physAddr = cpu_to_le32(rxb->dma_addr);
1653
1654 /* Tell the card about it */
1655 wmb();
1656 indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1657}
1658
1659static int
1660typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
1661{
1662 struct typhoon_indexes *indexes = tp->indexes;
1663 struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1664 struct basic_ring *ring = &tp->rxBuffRing;
1665 struct rx_free *r;
1666 struct sk_buff *skb;
1667 dma_addr_t dma_addr;
1668
1669 rxb->skb = NULL;
1670
1671 if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
8a5ed9ef 1672 le32_to_cpu(indexes->rxBuffCleared))
1da177e4
LT
1673 return -ENOMEM;
1674
1675 skb = dev_alloc_skb(PKT_BUF_SZ);
1676 if(!skb)
1677 return -ENOMEM;
1678
1679#if 0
1680 /* Please, 3com, fix the firmware to allow DMA to a unaligned
1681 * address! Pretty please?
1682 */
1683 skb_reserve(skb, 2);
1684#endif
1685
1686 skb->dev = tp->dev;
689be439 1687 dma_addr = pci_map_single(tp->pdev, skb->data,
1da177e4
LT
1688 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1689
1690 /* Since no card does 64 bit DAC, the high bits will never
1691 * change from zero.
1692 */
1693 r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1694 typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1695 r->virtAddr = idx;
1696 r->physAddr = cpu_to_le32(dma_addr);
1697 rxb->skb = skb;
1698 rxb->dma_addr = dma_addr;
1699
1700 /* Tell the card about it */
1701 wmb();
1702 indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1703 return 0;
1704}
1705
1706static int
03a710ff
AV
1707typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * ready,
1708 volatile __le32 * cleared, int budget)
1da177e4
LT
1709{
1710 struct rx_desc *rx;
1711 struct sk_buff *skb, *new_skb;
1712 struct rxbuff_ent *rxb;
1713 dma_addr_t dma_addr;
1714 u32 local_ready;
1715 u32 rxaddr;
1716 int pkt_len;
1717 u32 idx;
03a710ff 1718 __le32 csum_bits;
1da177e4
LT
1719 int received;
1720
1721 received = 0;
1722 local_ready = le32_to_cpu(*ready);
1723 rxaddr = le32_to_cpu(*cleared);
1724 while(rxaddr != local_ready && budget > 0) {
1725 rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
1726 idx = rx->addr;
1727 rxb = &tp->rxbuffers[idx];
1728 skb = rxb->skb;
1729 dma_addr = rxb->dma_addr;
1730
1731 typhoon_inc_rx_index(&rxaddr, 1);
1732
1733 if(rx->flags & TYPHOON_RX_ERROR) {
1734 typhoon_recycle_rx_skb(tp, idx);
1735 continue;
1736 }
1737
1738 pkt_len = le16_to_cpu(rx->frameLen);
1739
1740 if(pkt_len < rx_copybreak &&
1741 (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1da177e4
LT
1742 skb_reserve(new_skb, 2);
1743 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
1744 PKT_BUF_SZ,
1745 PCI_DMA_FROMDEVICE);
8c7b7faa 1746 skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
1da177e4
LT
1747 pci_dma_sync_single_for_device(tp->pdev, dma_addr,
1748 PKT_BUF_SZ,
1749 PCI_DMA_FROMDEVICE);
1750 skb_put(new_skb, pkt_len);
1751 typhoon_recycle_rx_skb(tp, idx);
1752 } else {
1753 new_skb = skb;
1754 skb_put(new_skb, pkt_len);
1755 pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
1756 PCI_DMA_FROMDEVICE);
1757 typhoon_alloc_rx_skb(tp, idx);
1758 }
1759 new_skb->protocol = eth_type_trans(new_skb, tp->dev);
1760 csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
1761 TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
1762 if(csum_bits ==
8e95a202
JP
1763 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD) ||
1764 csum_bits ==
1da177e4
LT
1765 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
1766 new_skb->ip_summed = CHECKSUM_UNNECESSARY;
1767 } else
1768 new_skb->ip_summed = CHECKSUM_NONE;
1769
1770 spin_lock(&tp->state_lock);
1771 if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
1772 vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
1773 ntohl(rx->vlanTag) & 0xffff);
1774 else
1775 netif_receive_skb(new_skb);
1776 spin_unlock(&tp->state_lock);
1777
1da177e4
LT
1778 received++;
1779 budget--;
1780 }
1781 *cleared = cpu_to_le32(rxaddr);
1782
1783 return received;
1784}
1785
1786static void
1787typhoon_fill_free_ring(struct typhoon *tp)
1788{
1789 u32 i;
1790
1791 for(i = 0; i < RXENT_ENTRIES; i++) {
1792 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1793 if(rxb->skb)
1794 continue;
1795 if(typhoon_alloc_rx_skb(tp, i) < 0)
1796 break;
1797 }
1798}
1799
1800static int
bea3348e 1801typhoon_poll(struct napi_struct *napi, int budget)
1da177e4 1802{
bea3348e 1803 struct typhoon *tp = container_of(napi, struct typhoon, napi);
1da177e4 1804 struct typhoon_indexes *indexes = tp->indexes;
bea3348e 1805 int work_done;
1da177e4
LT
1806
1807 rmb();
1808 if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
1809 typhoon_process_response(tp, 0, NULL);
1810
1811 if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
1812 typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
1813
1da177e4 1814 work_done = 0;
1da177e4
LT
1815
1816 if(indexes->rxHiCleared != indexes->rxHiReady) {
bea3348e 1817 work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
1da177e4 1818 &indexes->rxHiCleared, budget);
1da177e4
LT
1819 }
1820
1821 if(indexes->rxLoCleared != indexes->rxLoReady) {
1822 work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
bea3348e 1823 &indexes->rxLoCleared, budget - work_done);
1da177e4
LT
1824 }
1825
1826 if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
1827 /* rxBuff ring is empty, try to fill it. */
1828 typhoon_fill_free_ring(tp);
1829 }
1830
bea3348e 1831 if (work_done < budget) {
288379f0 1832 napi_complete(napi);
1da177e4
LT
1833 iowrite32(TYPHOON_INTR_NONE,
1834 tp->ioaddr + TYPHOON_REG_INTR_MASK);
1835 typhoon_post_pci_writes(tp->ioaddr);
1836 }
1837
bea3348e 1838 return work_done;
1da177e4
LT
1839}
1840
1841static irqreturn_t
7d12e780 1842typhoon_interrupt(int irq, void *dev_instance)
1da177e4 1843{
06efcad0 1844 struct net_device *dev = dev_instance;
8f15ea42 1845 struct typhoon *tp = netdev_priv(dev);
1da177e4
LT
1846 void __iomem *ioaddr = tp->ioaddr;
1847 u32 intr_status;
1848
1849 intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
1850 if(!(intr_status & TYPHOON_INTR_HOST_INT))
1851 return IRQ_NONE;
1852
1853 iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1854
288379f0 1855 if (napi_schedule_prep(&tp->napi)) {
1da177e4
LT
1856 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1857 typhoon_post_pci_writes(ioaddr);
288379f0 1858 __napi_schedule(&tp->napi);
1da177e4 1859 } else {
0bc88e4a 1860 netdev_err(dev, "Error, poll already scheduled\n");
1da177e4
LT
1861 }
1862 return IRQ_HANDLED;
1863}
1864
1865static void
1866typhoon_free_rx_rings(struct typhoon *tp)
1867{
1868 u32 i;
1869
1870 for(i = 0; i < RXENT_ENTRIES; i++) {
1871 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1872 if(rxb->skb) {
1873 pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
1874 PCI_DMA_FROMDEVICE);
1875 dev_kfree_skb(rxb->skb);
1876 rxb->skb = NULL;
1877 }
1878 }
1879}
1880
1881static int
03a710ff 1882typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
1da177e4
LT
1883{
1884 struct pci_dev *pdev = tp->pdev;
1885 void __iomem *ioaddr = tp->ioaddr;
1886 struct cmd_desc xp_cmd;
1887 int err;
1888
1889 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
1890 xp_cmd.parm1 = events;
1891 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1892 if(err < 0) {
0bc88e4a
JP
1893 netdev_err(tp->dev, "typhoon_sleep(): wake events cmd err %d\n",
1894 err);
1da177e4
LT
1895 return err;
1896 }
1897
1898 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
1899 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1900 if(err < 0) {
0bc88e4a 1901 netdev_err(tp->dev, "typhoon_sleep(): sleep cmd err %d\n", err);
1da177e4
LT
1902 return err;
1903 }
1904
1905 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
1906 return -ETIMEDOUT;
1907
1908 /* Since we cannot monitor the status of the link while sleeping,
1909 * tell the world it went away.
1910 */
1911 netif_carrier_off(tp->dev);
1912
2a569579 1913 pci_enable_wake(tp->pdev, state, 1);
1da177e4 1914 pci_disable_device(pdev);
2a569579 1915 return pci_set_power_state(pdev, state);
1da177e4
LT
1916}
1917
1918static int
1919typhoon_wakeup(struct typhoon *tp, int wait_type)
1920{
1921 struct pci_dev *pdev = tp->pdev;
1922 void __iomem *ioaddr = tp->ioaddr;
1923
1924 pci_set_power_state(pdev, PCI_D0);
1925 pci_restore_state(pdev);
1926
1927 /* Post 2.x.x versions of the Sleep Image require a reset before
1928 * we can download the Runtime Image. But let's not make users of
1929 * the old firmware pay for the reset.
1930 */
1931 iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
1932 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
1933 (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
1934 return typhoon_reset(ioaddr, wait_type);
1935
1936 return 0;
1937}
1938
1939static int
1940typhoon_start_runtime(struct typhoon *tp)
1941{
1942 struct net_device *dev = tp->dev;
1943 void __iomem *ioaddr = tp->ioaddr;
1944 struct cmd_desc xp_cmd;
1945 int err;
1946
1947 typhoon_init_rings(tp);
1948 typhoon_fill_free_ring(tp);
1949
1950 err = typhoon_download_firmware(tp);
1951 if(err < 0) {
0bc88e4a 1952 netdev_err(tp->dev, "cannot load runtime on 3XP\n");
1da177e4
LT
1953 goto error_out;
1954 }
1955
1956 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
0bc88e4a 1957 netdev_err(tp->dev, "cannot boot 3XP\n");
1da177e4
LT
1958 err = -EIO;
1959 goto error_out;
1960 }
1961
1962 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
1963 xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
1964 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1965 if(err < 0)
1966 goto error_out;
1967
1968 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
03a710ff
AV
1969 xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
1970 xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
1da177e4
LT
1971 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1972 if(err < 0)
1973 goto error_out;
1974
1975 /* Disable IRQ coalescing -- we can reenable it when 3Com gives
1976 * us some more information on how to control it.
1977 */
1978 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
1979 xp_cmd.parm1 = 0;
1980 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1981 if(err < 0)
1982 goto error_out;
1983
1984 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1985 xp_cmd.parm1 = tp->xcvr_select;
1986 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1987 if(err < 0)
1988 goto error_out;
1989
1990 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
649aa95d 1991 xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q);
1da177e4
LT
1992 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1993 if(err < 0)
1994 goto error_out;
1995
1996 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
1997 spin_lock_bh(&tp->state_lock);
1998 xp_cmd.parm2 = tp->offload;
1999 xp_cmd.parm3 = tp->offload;
2000 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2001 spin_unlock_bh(&tp->state_lock);
2002 if(err < 0)
2003 goto error_out;
2004
2005 typhoon_set_rx_mode(dev);
2006
2007 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
2008 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2009 if(err < 0)
2010 goto error_out;
2011
2012 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
2013 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2014 if(err < 0)
2015 goto error_out;
2016
2017 tp->card_state = Running;
2018 smp_wmb();
2019
2020 iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2021 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
2022 typhoon_post_pci_writes(ioaddr);
2023
2024 return 0;
2025
2026error_out:
2027 typhoon_reset(ioaddr, WaitNoSleep);
2028 typhoon_free_rx_rings(tp);
2029 typhoon_init_rings(tp);
2030 return err;
2031}
2032
2033static int
2034typhoon_stop_runtime(struct typhoon *tp, int wait_type)
2035{
2036 struct typhoon_indexes *indexes = tp->indexes;
2037 struct transmit_ring *txLo = &tp->txLoRing;
2038 void __iomem *ioaddr = tp->ioaddr;
2039 struct cmd_desc xp_cmd;
2040 int i;
2041
2042 /* Disable interrupts early, since we can't schedule a poll
2043 * when called with !netif_running(). This will be posted
2044 * when we force the posting of the command.
2045 */
2046 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2047
2048 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
2049 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2050
2051 /* Wait 1/2 sec for any outstanding transmits to occur
2052 * We'll cleanup after the reset if this times out.
2053 */
2054 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
2055 if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
2056 break;
2057 udelay(TYPHOON_UDELAY);
2058 }
2059
2060 if(i == TYPHOON_WAIT_TIMEOUT)
0bc88e4a 2061 netdev_err(tp->dev, "halt timed out waiting for Tx to complete\n");
1da177e4
LT
2062
2063 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
2064 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2065
2066 /* save the statistics so when we bring the interface up again,
2067 * the values reported to userspace are correct.
2068 */
2069 tp->card_state = Sleeping;
2070 smp_wmb();
2071 typhoon_do_get_stats(tp);
2072 memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
2073
2074 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
2075 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2076
2077 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
0bc88e4a 2078 netdev_err(tp->dev, "timed out waiting for 3XP to halt\n");
1da177e4
LT
2079
2080 if(typhoon_reset(ioaddr, wait_type) < 0) {
0bc88e4a 2081 netdev_err(tp->dev, "unable to reset 3XP\n");
1da177e4
LT
2082 return -ETIMEDOUT;
2083 }
2084
2085 /* cleanup any outstanding Tx packets */
2086 if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
2087 indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
2088 typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
2089 }
2090
2091 return 0;
2092}
2093
2094static void
2095typhoon_tx_timeout(struct net_device *dev)
2096{
2097 struct typhoon *tp = netdev_priv(dev);
2098
2099 if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
0bc88e4a 2100 netdev_warn(dev, "could not reset in tx timeout\n");
a089377f 2101 goto truly_dead;
1da177e4
LT
2102 }
2103
2104 /* If we ever start using the Hi ring, it will need cleaning too */
2105 typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
2106 typhoon_free_rx_rings(tp);
2107
2108 if(typhoon_start_runtime(tp) < 0) {
0bc88e4a 2109 netdev_err(dev, "could not start runtime in tx timeout\n");
a089377f 2110 goto truly_dead;
1da177e4
LT
2111 }
2112
2113 netif_wake_queue(dev);
2114 return;
2115
a089377f 2116truly_dead:
1da177e4
LT
2117 /* Reset the hardware, and turn off carrier to avoid more timeouts */
2118 typhoon_reset(tp->ioaddr, NoWait);
2119 netif_carrier_off(dev);
2120}
2121
2122static int
2123typhoon_open(struct net_device *dev)
2124{
2125 struct typhoon *tp = netdev_priv(dev);
2126 int err;
2127
b775a750
BH
2128 err = typhoon_request_firmware(tp);
2129 if (err)
2130 goto out;
2131
1da177e4
LT
2132 err = typhoon_wakeup(tp, WaitSleep);
2133 if(err < 0) {
0bc88e4a 2134 netdev_err(dev, "unable to wakeup device\n");
1da177e4
LT
2135 goto out_sleep;
2136 }
2137
aa36ab8e 2138 err = request_irq(dev->irq, typhoon_interrupt, IRQF_SHARED,
1da177e4
LT
2139 dev->name, dev);
2140 if(err < 0)
2141 goto out_sleep;
2142
bea3348e
SH
2143 napi_enable(&tp->napi);
2144
1da177e4 2145 err = typhoon_start_runtime(tp);
bea3348e
SH
2146 if(err < 0) {
2147 napi_disable(&tp->napi);
1da177e4 2148 goto out_irq;
bea3348e 2149 }
1da177e4
LT
2150
2151 netif_start_queue(dev);
2152 return 0;
2153
2154out_irq:
2155 free_irq(dev->irq, dev);
2156
2157out_sleep:
2158 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
0bc88e4a 2159 netdev_err(dev, "unable to reboot into sleep img\n");
1da177e4
LT
2160 typhoon_reset(tp->ioaddr, NoWait);
2161 goto out;
2162 }
2163
6aa20a22 2164 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
0bc88e4a 2165 netdev_err(dev, "unable to go back to sleep\n");
1da177e4
LT
2166
2167out:
2168 return err;
2169}
2170
2171static int
2172typhoon_close(struct net_device *dev)
2173{
2174 struct typhoon *tp = netdev_priv(dev);
2175
2176 netif_stop_queue(dev);
bea3348e 2177 napi_disable(&tp->napi);
1da177e4
LT
2178
2179 if(typhoon_stop_runtime(tp, WaitSleep) < 0)
0bc88e4a 2180 netdev_err(dev, "unable to stop runtime\n");
1da177e4
LT
2181
2182 /* Make sure there is no irq handler running on a different CPU. */
1da177e4
LT
2183 free_irq(dev->irq, dev);
2184
2185 typhoon_free_rx_rings(tp);
2186 typhoon_init_rings(tp);
2187
2188 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
0bc88e4a 2189 netdev_err(dev, "unable to boot sleep image\n");
1da177e4
LT
2190
2191 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
0bc88e4a 2192 netdev_err(dev, "unable to put card to sleep\n");
1da177e4
LT
2193
2194 return 0;
2195}
2196
2197#ifdef CONFIG_PM
2198static int
2199typhoon_resume(struct pci_dev *pdev)
2200{
2201 struct net_device *dev = pci_get_drvdata(pdev);
2202 struct typhoon *tp = netdev_priv(dev);
2203
2204 /* If we're down, resume when we are upped.
2205 */
2206 if(!netif_running(dev))
2207 return 0;
2208
2209 if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
0bc88e4a 2210 netdev_err(dev, "critical: could not wake up in resume\n");
1da177e4
LT
2211 goto reset;
2212 }
2213
2214 if(typhoon_start_runtime(tp) < 0) {
0bc88e4a 2215 netdev_err(dev, "critical: could not start runtime in resume\n");
1da177e4
LT
2216 goto reset;
2217 }
2218
2219 netif_device_attach(dev);
1da177e4
LT
2220 return 0;
2221
2222reset:
2223 typhoon_reset(tp->ioaddr, NoWait);
2224 return -EBUSY;
2225}
2226
2227static int
2228typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
2229{
2230 struct net_device *dev = pci_get_drvdata(pdev);
2231 struct typhoon *tp = netdev_priv(dev);
2232 struct cmd_desc xp_cmd;
2233
2234 /* If we're down, we're already suspended.
2235 */
2236 if(!netif_running(dev))
2237 return 0;
2238
2239 spin_lock_bh(&tp->state_lock);
2240 if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
2241 spin_unlock_bh(&tp->state_lock);
0bc88e4a 2242 netdev_err(dev, "cannot do WAKE_MAGIC with VLANS\n");
1da177e4
LT
2243 return -EBUSY;
2244 }
2245 spin_unlock_bh(&tp->state_lock);
2246
2247 netif_device_detach(dev);
2248
2249 if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
0bc88e4a 2250 netdev_err(dev, "unable to stop runtime\n");
1da177e4
LT
2251 goto need_resume;
2252 }
2253
2254 typhoon_free_rx_rings(tp);
2255 typhoon_init_rings(tp);
2256
2257 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
0bc88e4a 2258 netdev_err(dev, "unable to boot sleep image\n");
1da177e4
LT
2259 goto need_resume;
2260 }
2261
2262 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
03a710ff
AV
2263 xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
2264 xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
1da177e4 2265 if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
0bc88e4a 2266 netdev_err(dev, "unable to set mac address in suspend\n");
1da177e4
LT
2267 goto need_resume;
2268 }
2269
2270 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
2271 xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
2272 if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
0bc88e4a 2273 netdev_err(dev, "unable to set rx filter in suspend\n");
1da177e4
LT
2274 goto need_resume;
2275 }
2276
2a569579 2277 if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
0bc88e4a 2278 netdev_err(dev, "unable to put card to sleep\n");
1da177e4
LT
2279 goto need_resume;
2280 }
2281
2282 return 0;
2283
2284need_resume:
2285 typhoon_resume(pdev);
2286 return -EBUSY;
2287}
1da177e4
LT
2288#endif
2289
2290static int __devinit
2291typhoon_test_mmio(struct pci_dev *pdev)
2292{
2293 void __iomem *ioaddr = pci_iomap(pdev, 1, 128);
2294 int mode = 0;
2295 u32 val;
2296
2297 if(!ioaddr)
2298 goto out;
2299
2300 if(ioread32(ioaddr + TYPHOON_REG_STATUS) !=
2301 TYPHOON_STATUS_WAITING_FOR_HOST)
2302 goto out_unmap;
2303
2304 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2305 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2306 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2307
2308 /* Ok, see if we can change our interrupt status register by
2309 * sending ourselves an interrupt. If so, then MMIO works.
2310 * The 50usec delay is arbitrary -- it could probably be smaller.
2311 */
2312 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2313 if((val & TYPHOON_INTR_SELF) == 0) {
2314 iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT);
2315 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2316 udelay(50);
2317 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2318 if(val & TYPHOON_INTR_SELF)
2319 mode = 1;
2320 }
2321
2322 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2323 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2324 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2325 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2326
2327out_unmap:
2328 pci_iounmap(pdev, ioaddr);
2329
2330out:
2331 if(!mode)
0bc88e4a 2332 pr_info("%s: falling back to port IO\n", pci_name(pdev));
1da177e4
LT
2333 return mode;
2334}
2335
8bdd5553
SH
2336static const struct net_device_ops typhoon_netdev_ops = {
2337 .ndo_open = typhoon_open,
2338 .ndo_stop = typhoon_close,
2339 .ndo_start_xmit = typhoon_start_tx,
2340 .ndo_set_multicast_list = typhoon_set_rx_mode,
2341 .ndo_tx_timeout = typhoon_tx_timeout,
2342 .ndo_get_stats = typhoon_get_stats,
2343 .ndo_validate_addr = eth_validate_addr,
2344 .ndo_set_mac_address = typhoon_set_mac_address,
2345 .ndo_change_mtu = eth_change_mtu,
2346 .ndo_vlan_rx_register = typhoon_vlan_rx_register,
2347};
2348
1da177e4
LT
2349static int __devinit
2350typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2351{
1da177e4
LT
2352 struct net_device *dev;
2353 struct typhoon *tp;
2354 int card_id = (int) ent->driver_data;
2355 void __iomem *ioaddr;
2356 void *shared;
2357 dma_addr_t shared_dma;
2358 struct cmd_desc xp_cmd;
2359 struct resp_desc xp_resp[3];
1da177e4 2360 int err = 0;
0bc88e4a 2361 const char *err_msg;
1da177e4
LT
2362
2363 dev = alloc_etherdev(sizeof(*tp));
2364 if(dev == NULL) {
0bc88e4a 2365 err_msg = "unable to alloc new net device";
1da177e4
LT
2366 err = -ENOMEM;
2367 goto error_out;
2368 }
1da177e4
LT
2369 SET_NETDEV_DEV(dev, &pdev->dev);
2370
2371 err = pci_enable_device(pdev);
2372 if(err < 0) {
0bc88e4a 2373 err_msg = "unable to enable device";
1da177e4
LT
2374 goto error_out_dev;
2375 }
2376
2377 err = pci_set_mwi(pdev);
2378 if(err < 0) {
0bc88e4a 2379 err_msg = "unable to set MWI";
1da177e4
LT
2380 goto error_out_disable;
2381 }
2382
284901a9 2383 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1da177e4 2384 if(err < 0) {
0bc88e4a 2385 err_msg = "No usable DMA configuration";
1da177e4
LT
2386 goto error_out_mwi;
2387 }
2388
2389 /* sanity checks on IO and MMIO BARs
2390 */
2391 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
0bc88e4a 2392 err_msg = "region #1 not a PCI IO resource, aborting";
1da177e4
LT
2393 err = -ENODEV;
2394 goto error_out_mwi;
2395 }
2396 if(pci_resource_len(pdev, 0) < 128) {
0bc88e4a 2397 err_msg = "Invalid PCI IO region size, aborting";
1da177e4
LT
2398 err = -ENODEV;
2399 goto error_out_mwi;
2400 }
2401 if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
0bc88e4a 2402 err_msg = "region #1 not a PCI MMIO resource, aborting";
1da177e4
LT
2403 err = -ENODEV;
2404 goto error_out_mwi;
2405 }
2406 if(pci_resource_len(pdev, 1) < 128) {
0bc88e4a 2407 err_msg = "Invalid PCI MMIO region size, aborting";
1da177e4
LT
2408 err = -ENODEV;
2409 goto error_out_mwi;
2410 }
2411
0bc88e4a 2412 err = pci_request_regions(pdev, KBUILD_MODNAME);
1da177e4 2413 if(err < 0) {
0bc88e4a 2414 err_msg = "could not request regions";
1da177e4
LT
2415 goto error_out_mwi;
2416 }
2417
2418 /* map our registers
2419 */
2420 if(use_mmio != 0 && use_mmio != 1)
2421 use_mmio = typhoon_test_mmio(pdev);
2422
2423 ioaddr = pci_iomap(pdev, use_mmio, 128);
2424 if (!ioaddr) {
0bc88e4a 2425 err_msg = "cannot remap registers, aborting";
1da177e4
LT
2426 err = -EIO;
2427 goto error_out_regions;
2428 }
2429
2430 /* allocate pci dma space for rx and tx descriptor rings
2431 */
2432 shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
2433 &shared_dma);
2434 if(!shared) {
0bc88e4a 2435 err_msg = "could not allocate DMA memory";
1da177e4
LT
2436 err = -ENOMEM;
2437 goto error_out_remap;
2438 }
2439
2440 dev->irq = pdev->irq;
2441 tp = netdev_priv(dev);
2442 tp->shared = (struct typhoon_shared *) shared;
2443 tp->shared_dma = shared_dma;
2444 tp->pdev = pdev;
2445 tp->tx_pdev = pdev;
2446 tp->ioaddr = ioaddr;
2447 tp->tx_ioaddr = ioaddr;
2448 tp->dev = dev;
2449
2450 /* Init sequence:
2451 * 1) Reset the adapter to clear any bad juju
2452 * 2) Reload the sleep image
2453 * 3) Boot the sleep image
2454 * 4) Get the hardware address.
2455 * 5) Put the card to sleep.
2456 */
2457 if (typhoon_reset(ioaddr, WaitSleep) < 0) {
0bc88e4a 2458 err_msg = "could not reset 3XP";
1da177e4
LT
2459 err = -EIO;
2460 goto error_out_dma;
2461 }
2462
2463 /* Now that we've reset the 3XP and are sure it's not going to
2464 * write all over memory, enable bus mastering, and save our
2465 * state for resuming after a suspend.
2466 */
2467 pci_set_master(pdev);
2468 pci_save_state(pdev);
2469
1da177e4
LT
2470 typhoon_init_interface(tp);
2471 typhoon_init_rings(tp);
2472
2473 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
0bc88e4a 2474 err_msg = "cannot boot 3XP sleep image";
1da177e4
LT
2475 err = -EIO;
2476 goto error_out_reset;
2477 }
2478
2479 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
2480 if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
0bc88e4a 2481 err_msg = "cannot read MAC address";
1da177e4
LT
2482 err = -EIO;
2483 goto error_out_reset;
2484 }
2485
03a710ff
AV
2486 *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
2487 *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
1da177e4
LT
2488
2489 if(!is_valid_ether_addr(dev->dev_addr)) {
0bc88e4a 2490 err_msg = "Could not obtain valid ethernet address, aborting";
1da177e4
LT
2491 goto error_out_reset;
2492 }
2493
2494 /* Read the Sleep Image version last, so the response is valid
2495 * later when we print out the version reported.
2496 */
2497 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
2498 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
0bc88e4a 2499 err_msg = "Could not get Sleep Image version";
1da177e4
LT
2500 goto error_out_reset;
2501 }
2502
2503 tp->capabilities = typhoon_card_info[card_id].capabilities;
2504 tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
2505
2506 /* Typhoon 1.0 Sleep Images return one response descriptor to the
2507 * READ_VERSIONS command. Those versions are OK after waking up
2508 * from sleep without needing a reset. Typhoon 1.1+ Sleep Images
2509 * seem to need a little extra help to get started. Since we don't
2510 * know how to nudge it along, just kick it.
2511 */
2512 if(xp_resp[0].numDesc != 0)
2513 tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
2514
2515 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
0bc88e4a 2516 err_msg = "cannot put adapter to sleep";
1da177e4
LT
2517 err = -EIO;
2518 goto error_out_reset;
2519 }
2520
2521 /* The chip-specific entries in the device structure. */
8bdd5553 2522 dev->netdev_ops = &typhoon_netdev_ops;
bea3348e 2523 netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
1da177e4 2524 dev->watchdog_timeo = TX_TIMEOUT;
25805dcf 2525
1da177e4
LT
2526 SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops);
2527
2528 /* We can handle scatter gather, up to 16 entries, and
2529 * we can do IP checksumming (only version 4, doh...)
2530 */
2531 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2532 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2533 dev->features |= NETIF_F_TSO;
2534
0bc88e4a
JP
2535 if(register_netdev(dev) < 0) {
2536 err_msg = "unable to register netdev";
1da177e4 2537 goto error_out_reset;
0bc88e4a 2538 }
1da177e4
LT
2539
2540 pci_set_drvdata(pdev, dev);
2541
0bc88e4a
JP
2542 netdev_info(dev, "%s at %s 0x%llx, %pM\n",
2543 typhoon_card_info[card_id].name,
2544 use_mmio ? "MMIO" : "IO",
2545 (unsigned long long)pci_resource_start(pdev, use_mmio),
2546 dev->dev_addr);
1da177e4
LT
2547
2548 /* xp_resp still contains the response to the READ_VERSIONS command.
2549 * For debugging, let the user know what version he has.
2550 */
2551 if(xp_resp[0].numDesc == 0) {
2552 /* This is the Typhoon 1.0 type Sleep Image, last 16 bits
2553 * of version is Month/Day of build.
2554 */
2555 u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
0bc88e4a
JP
2556 netdev_info(dev, "Typhoon 1.0 Sleep Image built %02u/%02u/2000\n",
2557 monthday >> 8, monthday & 0xff);
1da177e4
LT
2558 } else if(xp_resp[0].numDesc == 2) {
2559 /* This is the Typhoon 1.1+ type Sleep Image
2560 */
2561 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
2562 u8 *ver_string = (u8 *) &xp_resp[1];
2563 ver_string[25] = 0;
0bc88e4a
JP
2564 netdev_info(dev, "Typhoon 1.1+ Sleep Image version %02x.%03x.%03x %s\n",
2565 sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
2566 sleep_ver & 0xfff, ver_string);
1da177e4 2567 } else {
0bc88e4a
JP
2568 netdev_warn(dev, "Unknown Sleep Image version (%u:%04x)\n",
2569 xp_resp[0].numDesc, le32_to_cpu(xp_resp[0].parm2));
1da177e4 2570 }
6aa20a22 2571
1da177e4
LT
2572 return 0;
2573
2574error_out_reset:
2575 typhoon_reset(ioaddr, NoWait);
2576
2577error_out_dma:
2578 pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2579 shared, shared_dma);
2580error_out_remap:
2581 pci_iounmap(pdev, ioaddr);
2582error_out_regions:
2583 pci_release_regions(pdev);
2584error_out_mwi:
2585 pci_clear_mwi(pdev);
2586error_out_disable:
2587 pci_disable_device(pdev);
2588error_out_dev:
2589 free_netdev(dev);
2590error_out:
0bc88e4a 2591 pr_err("%s: %s\n", pci_name(pdev), err_msg);
1da177e4
LT
2592 return err;
2593}
2594
2595static void __devexit
2596typhoon_remove_one(struct pci_dev *pdev)
2597{
2598 struct net_device *dev = pci_get_drvdata(pdev);
2599 struct typhoon *tp = netdev_priv(dev);
2600
2601 unregister_netdev(dev);
2602 pci_set_power_state(pdev, PCI_D0);
2603 pci_restore_state(pdev);
2604 typhoon_reset(tp->ioaddr, NoWait);
2605 pci_iounmap(pdev, tp->ioaddr);
2606 pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2607 tp->shared, tp->shared_dma);
2608 pci_release_regions(pdev);
2609 pci_clear_mwi(pdev);
2610 pci_disable_device(pdev);
2611 pci_set_drvdata(pdev, NULL);
2612 free_netdev(dev);
2613}
2614
2615static struct pci_driver typhoon_driver = {
0bc88e4a 2616 .name = KBUILD_MODNAME,
1da177e4
LT
2617 .id_table = typhoon_pci_tbl,
2618 .probe = typhoon_init_one,
2619 .remove = __devexit_p(typhoon_remove_one),
2620#ifdef CONFIG_PM
2621 .suspend = typhoon_suspend,
2622 .resume = typhoon_resume,
1da177e4
LT
2623#endif
2624};
2625
2626static int __init
2627typhoon_init(void)
2628{
29917620 2629 return pci_register_driver(&typhoon_driver);
1da177e4
LT
2630}
2631
2632static void __exit
2633typhoon_cleanup(void)
2634{
a8c9a53c 2635 if (typhoon_fw)
b775a750 2636 release_firmware(typhoon_fw);
1da177e4
LT
2637 pci_unregister_driver(&typhoon_driver);
2638}
2639
2640module_init(typhoon_init);
2641module_exit(typhoon_cleanup);
This page took 1.083813 seconds and 5 git commands to generate.