Merge branch 'omap-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind...
[deliverable/linux.git] / drivers / net / typhoon.c
CommitLineData
1da177e4
LT
1/* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */
2/*
3 Written 2002-2004 by David Dillow <dave@thedillows.org>
4 Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and
5 Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>.
6
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
13
14 This software is available on a public web site. It may enable
15 cryptographic capabilities of the 3Com hardware, and may be
16 exported from the United States under License Exception "TSU"
17 pursuant to 15 C.F.R. Section 740.13(e).
18
19 This work was funded by the National Library of Medicine under
20 the Department of Energy project number 0274DD06D1 and NLM project
21 number Y1-LM-2015-01.
22
23 This driver is designed for the 3Com 3CR990 Family of cards with the
24 3XP Processor. It has been tested on x86 and sparc64.
25
26 KNOWN ISSUES:
27 *) The current firmware always strips the VLAN tag off, even if
28 we tell it not to. You should filter VLANs at the switch
29 as a workaround (good practice in any event) until we can
30 get this fixed.
31 *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
32 issue. Hopefully 3Com will fix it.
33 *) Waiting for a command response takes 8ms due to non-preemptable
34 polling. Only significant for getting stats and creating
35 SAs, but an ugly wart never the less.
36
37 TODO:
38 *) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming.
39 *) Add more support for ethtool (especially for NIC stats)
40 *) Allow disabling of RX checksum offloading
41 *) Fix MAC changing to work while the interface is up
42 (Need to put commands on the TX ring, which changes
43 the locking)
44 *) Add in FCS to {rx,tx}_bytes, since the hardware doesn't. See
45 http://oss.sgi.com/cgi-bin/mesg.cgi?a=netdev&i=20031215152211.7003fe8e.rddunlap%40osdl.org
46*/
47
48/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
49 * Setting to > 1518 effectively disables this feature.
50 */
51static int rx_copybreak = 200;
52
53/* Should we use MMIO or Port IO?
54 * 0: Port IO
55 * 1: MMIO
56 * 2: Try MMIO, fallback to Port IO
57 */
58static unsigned int use_mmio = 2;
59
60/* end user-configurable values */
61
62/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
63 */
64static const int multicast_filter_limit = 32;
65
66/* Operational parameters that are set at compile time. */
67
68/* Keep the ring sizes a power of two for compile efficiency.
69 * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
70 * Making the Tx ring too large decreases the effectiveness of channel
71 * bonding and packet priority.
72 * There are no ill effects from too-large receive rings.
73 *
74 * We don't currently use the Hi Tx ring so, don't make it very big.
75 *
76 * Beware that if we start using the Hi Tx ring, we will need to change
77 * typhoon_num_free_tx() and typhoon_tx_complete() to account for that.
78 */
79#define TXHI_ENTRIES 2
80#define TXLO_ENTRIES 128
81#define RX_ENTRIES 32
82#define COMMAND_ENTRIES 16
83#define RESPONSE_ENTRIES 32
84
85#define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc))
86#define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc))
87
88/* The 3XP will preload and remove 64 entries from the free buffer
6aa20a22 89 * list, and we need one entry to keep the ring from wrapping, so
1da177e4
LT
90 * to keep this a power of two, we use 128 entries.
91 */
92#define RXFREE_ENTRIES 128
93#define RXENT_ENTRIES (RXFREE_ENTRIES - 1)
94
95/* Operational parameters that usually are not changed. */
96
97/* Time in jiffies before concluding the transmitter is hung. */
98#define TX_TIMEOUT (2*HZ)
99
100#define PKT_BUF_SZ 1536
a8c9a53c 101#define FIRMWARE_NAME "3com/typhoon.bin"
1da177e4 102
0bc88e4a
JP
103#define pr_fmt(fmt) KBUILD_MODNAME " " fmt
104
1da177e4
LT
105#include <linux/module.h>
106#include <linux/kernel.h>
d43c36dc 107#include <linux/sched.h>
1da177e4
LT
108#include <linux/string.h>
109#include <linux/timer.h>
110#include <linux/errno.h>
111#include <linux/ioport.h>
112#include <linux/slab.h>
113#include <linux/interrupt.h>
114#include <linux/pci.h>
115#include <linux/netdevice.h>
116#include <linux/etherdevice.h>
117#include <linux/skbuff.h>
d7fe0f24 118#include <linux/mm.h>
1da177e4
LT
119#include <linux/init.h>
120#include <linux/delay.h>
121#include <linux/ethtool.h>
122#include <linux/if_vlan.h>
123#include <linux/crc32.h>
124#include <linux/bitops.h>
125#include <asm/processor.h>
126#include <asm/io.h>
127#include <asm/uaccess.h>
128#include <linux/in6.h>
1da177e4 129#include <linux/dma-mapping.h>
b775a750 130#include <linux/firmware.h>
0bc88e4a 131#include <generated/utsrelease.h>
1da177e4
LT
132
133#include "typhoon.h"
1da177e4 134
1da177e4 135MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
0bc88e4a 136MODULE_VERSION(UTS_RELEASE);
1da177e4 137MODULE_LICENSE("GPL");
b775a750 138MODULE_FIRMWARE(FIRMWARE_NAME);
1da177e4
LT
139MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
140MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and "
141 "the buffer given back to the NIC. Default "
142 "is 200.");
143MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
144 "Default is to try MMIO and fallback to PIO.");
145module_param(rx_copybreak, int, 0);
146module_param(use_mmio, int, 0);
147
148#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
149#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
150#undef NETIF_F_TSO
151#endif
152
153#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
154#error TX ring too small!
155#endif
156
157struct typhoon_card_info {
0bc88e4a
JP
158 const char *name;
159 const int capabilities;
1da177e4
LT
160};
161
162#define TYPHOON_CRYPTO_NONE 0x00
163#define TYPHOON_CRYPTO_DES 0x01
164#define TYPHOON_CRYPTO_3DES 0x02
165#define TYPHOON_CRYPTO_VARIABLE 0x04
166#define TYPHOON_FIBER 0x08
167#define TYPHOON_WAKEUP_NEEDS_RESET 0x10
168
169enum typhoon_cards {
170 TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
171 TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
172 TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
173 TYPHOON_FXM,
174};
175
176/* directly indexed by enum typhoon_cards, above */
952b3494 177static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
1da177e4
LT
178 { "3Com Typhoon (3C990-TX)",
179 TYPHOON_CRYPTO_NONE},
180 { "3Com Typhoon (3CR990-TX-95)",
181 TYPHOON_CRYPTO_DES},
182 { "3Com Typhoon (3CR990-TX-97)",
183 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
184 { "3Com Typhoon (3C990SVR)",
185 TYPHOON_CRYPTO_NONE},
186 { "3Com Typhoon (3CR990SVR95)",
187 TYPHOON_CRYPTO_DES},
188 { "3Com Typhoon (3CR990SVR97)",
189 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
190 { "3Com Typhoon2 (3C990B-TX-M)",
191 TYPHOON_CRYPTO_VARIABLE},
192 { "3Com Typhoon2 (3C990BSVR)",
193 TYPHOON_CRYPTO_VARIABLE},
194 { "3Com Typhoon (3CR990-FX-95)",
195 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
196 { "3Com Typhoon (3CR990-FX-97)",
197 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
198 { "3Com Typhoon (3CR990-FX-95 Server)",
199 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
200 { "3Com Typhoon (3CR990-FX-97 Server)",
201 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
202 { "3Com Typhoon2 (3C990B-FX-97)",
203 TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
204};
205
206/* Notes on the new subsystem numbering scheme:
7f927fcc 207 * bits 0-1 indicate crypto capabilities: (0) variable, (1) DES, or (2) 3DES
1da177e4
LT
208 * bit 4 indicates if this card has secured firmware (we don't support it)
209 * bit 8 indicates if this is a (0) copper or (1) fiber card
210 * bits 12-16 indicate card type: (0) client and (1) server
211 */
a3aa1884 212static DEFINE_PCI_DEVICE_TABLE(typhoon_pci_tbl) = {
1da177e4
LT
213 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
214 PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
215 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
216 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
217 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
218 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
219 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
220 PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
221 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
222 PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
223 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
224 PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
225 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
226 PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
227 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
228 PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
229 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
230 PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
231 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
232 PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
233 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
234 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
235 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
236 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
237 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
238 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
239 { 0, }
240};
241MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
242
243/* Define the shared memory area
244 * Align everything the 3XP will normally be using.
245 * We'll need to move/align txHi if we start using that ring.
246 */
247#define __3xp_aligned ____cacheline_aligned
248struct typhoon_shared {
249 struct typhoon_interface iface;
250 struct typhoon_indexes indexes __3xp_aligned;
251 struct tx_desc txLo[TXLO_ENTRIES] __3xp_aligned;
252 struct rx_desc rxLo[RX_ENTRIES] __3xp_aligned;
253 struct rx_desc rxHi[RX_ENTRIES] __3xp_aligned;
254 struct cmd_desc cmd[COMMAND_ENTRIES] __3xp_aligned;
255 struct resp_desc resp[RESPONSE_ENTRIES] __3xp_aligned;
256 struct rx_free rxBuff[RXFREE_ENTRIES] __3xp_aligned;
257 u32 zeroWord;
258 struct tx_desc txHi[TXHI_ENTRIES];
259} __attribute__ ((packed));
260
261struct rxbuff_ent {
262 struct sk_buff *skb;
263 dma_addr_t dma_addr;
264};
265
266struct typhoon {
267 /* Tx cache line section */
6aa20a22 268 struct transmit_ring txLoRing ____cacheline_aligned;
1da177e4
LT
269 struct pci_dev * tx_pdev;
270 void __iomem *tx_ioaddr;
271 u32 txlo_dma_addr;
272
273 /* Irq/Rx cache line section */
274 void __iomem *ioaddr ____cacheline_aligned;
275 struct typhoon_indexes *indexes;
276 u8 awaiting_resp;
277 u8 duplex;
278 u8 speed;
279 u8 card_state;
280 struct basic_ring rxLoRing;
281 struct pci_dev * pdev;
282 struct net_device * dev;
bea3348e 283 struct napi_struct napi;
1da177e4
LT
284 spinlock_t state_lock;
285 struct vlan_group * vlgrp;
286 struct basic_ring rxHiRing;
287 struct basic_ring rxBuffRing;
288 struct rxbuff_ent rxbuffers[RXENT_ENTRIES];
289
290 /* general section */
291 spinlock_t command_lock ____cacheline_aligned;
292 struct basic_ring cmdRing;
293 struct basic_ring respRing;
294 struct net_device_stats stats;
295 struct net_device_stats stats_saved;
1da177e4
LT
296 struct typhoon_shared * shared;
297 dma_addr_t shared_dma;
03a710ff
AV
298 __le16 xcvr_select;
299 __le16 wol_events;
300 __le32 offload;
1da177e4
LT
301
302 /* unused stuff (future use) */
303 int capabilities;
304 struct transmit_ring txHiRing;
305};
306
307enum completion_wait_values {
308 NoWait = 0, WaitNoSleep, WaitSleep,
309};
310
311/* These are the values for the typhoon.card_state variable.
312 * These determine where the statistics will come from in get_stats().
313 * The sleep image does not support the statistics we need.
314 */
315enum state_values {
316 Sleeping = 0, Running,
317};
318
319/* PCI writes are not guaranteed to be posted in order, but outstanding writes
320 * cannot pass a read, so this forces current writes to post.
321 */
322#define typhoon_post_pci_writes(x) \
323 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
324
325/* We'll wait up to six seconds for a reset, and half a second normally.
326 */
327#define TYPHOON_UDELAY 50
328#define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ)
329#define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY)
330#define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY)
331
1da177e4 332#if defined(NETIF_F_TSO)
7967168c 333#define skb_tso_size(x) (skb_shinfo(x)->gso_size)
1da177e4
LT
334#define TSO_NUM_DESCRIPTORS 2
335#define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT
336#else
337#define NETIF_F_TSO 0
338#define skb_tso_size(x) 0
339#define TSO_NUM_DESCRIPTORS 0
340#define TSO_OFFLOAD_ON 0
341#endif
342
343static inline void
344typhoon_inc_index(u32 *index, const int count, const int num_entries)
345{
346 /* Increment a ring index -- we can use this for all rings execept
347 * the Rx rings, as they use different size descriptors
348 * otherwise, everything is the same size as a cmd_desc
349 */
350 *index += count * sizeof(struct cmd_desc);
351 *index %= num_entries * sizeof(struct cmd_desc);
352}
353
354static inline void
355typhoon_inc_cmd_index(u32 *index, const int count)
356{
357 typhoon_inc_index(index, count, COMMAND_ENTRIES);
358}
359
360static inline void
361typhoon_inc_resp_index(u32 *index, const int count)
362{
363 typhoon_inc_index(index, count, RESPONSE_ENTRIES);
364}
365
366static inline void
367typhoon_inc_rxfree_index(u32 *index, const int count)
368{
369 typhoon_inc_index(index, count, RXFREE_ENTRIES);
370}
371
372static inline void
373typhoon_inc_tx_index(u32 *index, const int count)
374{
375 /* if we start using the Hi Tx ring, this needs updateing */
376 typhoon_inc_index(index, count, TXLO_ENTRIES);
377}
378
379static inline void
380typhoon_inc_rx_index(u32 *index, const int count)
381{
382 /* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */
383 *index += count * sizeof(struct rx_desc);
384 *index %= RX_ENTRIES * sizeof(struct rx_desc);
385}
386
387static int
388typhoon_reset(void __iomem *ioaddr, int wait_type)
389{
390 int i, err = 0;
391 int timeout;
392
393 if(wait_type == WaitNoSleep)
394 timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
395 else
396 timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
397
398 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
399 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
400
401 iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
402 typhoon_post_pci_writes(ioaddr);
403 udelay(1);
404 iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
405
406 if(wait_type != NoWait) {
407 for(i = 0; i < timeout; i++) {
408 if(ioread32(ioaddr + TYPHOON_REG_STATUS) ==
409 TYPHOON_STATUS_WAITING_FOR_HOST)
410 goto out;
411
3173c890
NA
412 if(wait_type == WaitSleep)
413 schedule_timeout_uninterruptible(1);
414 else
1da177e4
LT
415 udelay(TYPHOON_UDELAY);
416 }
417
418 err = -ETIMEDOUT;
419 }
420
421out:
422 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
423 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
424
425 /* The 3XP seems to need a little extra time to complete the load
426 * of the sleep image before we can reliably boot it. Failure to
427 * do this occasionally results in a hung adapter after boot in
428 * typhoon_init_one() while trying to read the MAC address or
429 * putting the card to sleep. 3Com's driver waits 5ms, but
430 * that seems to be overkill. However, if we can sleep, we might
431 * as well give it that much time. Otherwise, we'll give it 500us,
432 * which should be enough (I've see it work well at 100us, but still
433 * saw occasional problems.)
434 */
435 if(wait_type == WaitSleep)
436 msleep(5);
437 else
438 udelay(500);
439 return err;
440}
441
442static int
443typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
444{
445 int i, err = 0;
446
447 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
448 if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
449 goto out;
450 udelay(TYPHOON_UDELAY);
451 }
452
453 err = -ETIMEDOUT;
454
455out:
456 return err;
457}
458
459static inline void
460typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
461{
462 if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
463 netif_carrier_off(dev);
464 else
465 netif_carrier_on(dev);
466}
467
468static inline void
469typhoon_hello(struct typhoon *tp)
470{
471 struct basic_ring *ring = &tp->cmdRing;
472 struct cmd_desc *cmd;
473
474 /* We only get a hello request if we've not sent anything to the
475 * card in a long while. If the lock is held, then we're in the
476 * process of issuing a command, so we don't need to respond.
477 */
478 if(spin_trylock(&tp->command_lock)) {
479 cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
480 typhoon_inc_cmd_index(&ring->lastWrite, 1);
481
482 INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
483 smp_wmb();
484 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
485 spin_unlock(&tp->command_lock);
486 }
487}
488
489static int
490typhoon_process_response(struct typhoon *tp, int resp_size,
491 struct resp_desc *resp_save)
492{
493 struct typhoon_indexes *indexes = tp->indexes;
494 struct resp_desc *resp;
495 u8 *base = tp->respRing.ringBase;
496 int count, len, wrap_len;
497 u32 cleared;
498 u32 ready;
499
500 cleared = le32_to_cpu(indexes->respCleared);
501 ready = le32_to_cpu(indexes->respReady);
502 while(cleared != ready) {
503 resp = (struct resp_desc *)(base + cleared);
504 count = resp->numDesc + 1;
505 if(resp_save && resp->seqNo) {
506 if(count > resp_size) {
507 resp_save->flags = TYPHOON_RESP_ERROR;
508 goto cleanup;
509 }
510
511 wrap_len = 0;
512 len = count * sizeof(*resp);
513 if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
514 wrap_len = cleared + len - RESPONSE_RING_SIZE;
515 len = RESPONSE_RING_SIZE - cleared;
516 }
517
518 memcpy(resp_save, resp, len);
519 if(unlikely(wrap_len)) {
520 resp_save += len / sizeof(*resp);
521 memcpy(resp_save, base, wrap_len);
522 }
523
524 resp_save = NULL;
525 } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
526 typhoon_media_status(tp->dev, resp);
527 } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
528 typhoon_hello(tp);
529 } else {
0bc88e4a
JP
530 netdev_err(tp->dev,
531 "dumping unexpected response 0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
532 le16_to_cpu(resp->cmd),
533 resp->numDesc, resp->flags,
534 le16_to_cpu(resp->parm1),
535 le32_to_cpu(resp->parm2),
536 le32_to_cpu(resp->parm3));
1da177e4
LT
537 }
538
539cleanup:
540 typhoon_inc_resp_index(&cleared, count);
541 }
542
543 indexes->respCleared = cpu_to_le32(cleared);
544 wmb();
545 return (resp_save == NULL);
546}
547
548static inline int
549typhoon_num_free(int lastWrite, int lastRead, int ringSize)
550{
551 /* this works for all descriptors but rx_desc, as they are a
552 * different size than the cmd_desc -- everyone else is the same
553 */
554 lastWrite /= sizeof(struct cmd_desc);
555 lastRead /= sizeof(struct cmd_desc);
556 return (ringSize + lastRead - lastWrite - 1) % ringSize;
557}
558
559static inline int
560typhoon_num_free_cmd(struct typhoon *tp)
561{
562 int lastWrite = tp->cmdRing.lastWrite;
563 int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
564
565 return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
566}
567
568static inline int
569typhoon_num_free_resp(struct typhoon *tp)
570{
571 int respReady = le32_to_cpu(tp->indexes->respReady);
572 int respCleared = le32_to_cpu(tp->indexes->respCleared);
573
574 return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
575}
576
577static inline int
578typhoon_num_free_tx(struct transmit_ring *ring)
579{
580 /* if we start using the Hi Tx ring, this needs updating */
581 return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
582}
583
584static int
585typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
586 int num_resp, struct resp_desc *resp)
587{
588 struct typhoon_indexes *indexes = tp->indexes;
589 struct basic_ring *ring = &tp->cmdRing;
590 struct resp_desc local_resp;
591 int i, err = 0;
592 int got_resp;
593 int freeCmd, freeResp;
594 int len, wrap_len;
595
596 spin_lock(&tp->command_lock);
597
598 freeCmd = typhoon_num_free_cmd(tp);
599 freeResp = typhoon_num_free_resp(tp);
600
601 if(freeCmd < num_cmd || freeResp < num_resp) {
0bc88e4a
JP
602 netdev_err(tp->dev, "no descs for cmd, had (needed) %d (%d) cmd, %d (%d) resp\n",
603 freeCmd, num_cmd, freeResp, num_resp);
1da177e4
LT
604 err = -ENOMEM;
605 goto out;
606 }
607
608 if(cmd->flags & TYPHOON_CMD_RESPOND) {
609 /* If we're expecting a response, but the caller hasn't given
610 * us a place to put it, we'll provide one.
611 */
612 tp->awaiting_resp = 1;
613 if(resp == NULL) {
614 resp = &local_resp;
615 num_resp = 1;
616 }
617 }
618
619 wrap_len = 0;
620 len = num_cmd * sizeof(*cmd);
621 if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
622 wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
623 len = COMMAND_RING_SIZE - ring->lastWrite;
624 }
625
626 memcpy(ring->ringBase + ring->lastWrite, cmd, len);
627 if(unlikely(wrap_len)) {
628 struct cmd_desc *wrap_ptr = cmd;
629 wrap_ptr += len / sizeof(*cmd);
630 memcpy(ring->ringBase, wrap_ptr, wrap_len);
631 }
632
633 typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
634
59c51591 635 /* "I feel a presence... another warrior is on the mesa."
1da177e4
LT
636 */
637 wmb();
638 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
639 typhoon_post_pci_writes(tp->ioaddr);
640
641 if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
642 goto out;
643
644 /* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
645 * preempt or do anything other than take interrupts. So, don't
646 * wait for a response unless you have to.
647 *
648 * I've thought about trying to sleep here, but we're called
649 * from many contexts that don't allow that. Also, given the way
650 * 3Com has implemented irq coalescing, we would likely timeout --
651 * this has been observed in real life!
652 *
653 * The big killer is we have to wait to get stats from the card,
654 * though we could go to a periodic refresh of those if we don't
655 * mind them getting somewhat stale. The rest of the waiting
656 * commands occur during open/close/suspend/resume, so they aren't
657 * time critical. Creating SAs in the future will also have to
658 * wait here.
659 */
660 got_resp = 0;
661 for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
662 if(indexes->respCleared != indexes->respReady)
663 got_resp = typhoon_process_response(tp, num_resp,
664 resp);
665 udelay(TYPHOON_UDELAY);
666 }
667
668 if(!got_resp) {
669 err = -ETIMEDOUT;
670 goto out;
671 }
672
673 /* Collect the error response even if we don't care about the
674 * rest of the response
675 */
676 if(resp->flags & TYPHOON_RESP_ERROR)
677 err = -EIO;
678
679out:
680 if(tp->awaiting_resp) {
681 tp->awaiting_resp = 0;
682 smp_wmb();
683
684 /* Ugh. If a response was added to the ring between
685 * the call to typhoon_process_response() and the clearing
686 * of tp->awaiting_resp, we could have missed the interrupt
687 * and it could hang in the ring an indeterminate amount of
688 * time. So, check for it, and interrupt ourselves if this
689 * is the case.
690 */
691 if(indexes->respCleared != indexes->respReady)
692 iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
693 }
694
695 spin_unlock(&tp->command_lock);
696 return err;
697}
698
699static void
700typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
701{
702 struct typhoon *tp = netdev_priv(dev);
703 struct cmd_desc xp_cmd;
704 int err;
705
706 spin_lock_bh(&tp->state_lock);
707 if(!tp->vlgrp != !grp) {
708 /* We've either been turned on for the first time, or we've
709 * been turned off. Update the 3XP.
710 */
711 if(grp)
712 tp->offload |= TYPHOON_OFFLOAD_VLAN;
713 else
714 tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
715
716 /* If the interface is up, the runtime is running -- and we
717 * must be up for the vlan core to call us.
718 *
719 * Do the command outside of the spin lock, as it is slow.
720 */
721 INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
722 TYPHOON_CMD_SET_OFFLOAD_TASKS);
723 xp_cmd.parm2 = tp->offload;
724 xp_cmd.parm3 = tp->offload;
725 spin_unlock_bh(&tp->state_lock);
726 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
727 if(err < 0)
0bc88e4a 728 netdev_err(tp->dev, "vlan offload error %d\n", -err);
1da177e4
LT
729 spin_lock_bh(&tp->state_lock);
730 }
731
732 /* now make the change visible */
733 tp->vlgrp = grp;
734 spin_unlock_bh(&tp->state_lock);
735}
736
1da177e4
LT
737static inline void
738typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
739 u32 ring_dma)
740{
741 struct tcpopt_desc *tcpd;
742 u32 tcpd_offset = ring_dma;
743
744 tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
745 tcpd_offset += txRing->lastWrite;
746 tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
747 typhoon_inc_tx_index(&txRing->lastWrite, 1);
748
749 tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
750 tcpd->numDesc = 1;
751 tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
752 tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
753 tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
754 tcpd->bytesTx = cpu_to_le32(skb->len);
755 tcpd->status = 0;
756}
757
61357325 758static netdev_tx_t
1da177e4
LT
759typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
760{
761 struct typhoon *tp = netdev_priv(dev);
762 struct transmit_ring *txRing;
763 struct tx_desc *txd, *first_txd;
764 dma_addr_t skb_dma;
765 int numDesc;
766
767 /* we have two rings to choose from, but we only use txLo for now
768 * If we start using the Hi ring as well, we'll need to update
769 * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
7f927fcc 770 * and TXHI_ENTRIES to match, as well as update the TSO code below
1da177e4
LT
771 * to get the right DMA address
772 */
773 txRing = &tp->txLoRing;
774
775 /* We need one descriptor for each fragment of the sk_buff, plus the
776 * one for the ->data area of it.
777 *
778 * The docs say a maximum of 16 fragment descriptors per TCP option
779 * descriptor, then make a new packet descriptor and option descriptor
780 * for the next 16 fragments. The engineers say just an option
781 * descriptor is needed. I've tested up to 26 fragments with a single
782 * packet descriptor/option descriptor combo, so I use that for now.
783 *
784 * If problems develop with TSO, check this first.
785 */
786 numDesc = skb_shinfo(skb)->nr_frags + 1;
89114afd 787 if (skb_is_gso(skb))
1da177e4
LT
788 numDesc++;
789
790 /* When checking for free space in the ring, we need to also
791 * account for the initial Tx descriptor, and we always must leave
792 * at least one descriptor unused in the ring so that it doesn't
793 * wrap and look empty.
794 *
795 * The only time we should loop here is when we hit the race
796 * between marking the queue awake and updating the cleared index.
797 * Just loop and it will appear. This comes from the acenic driver.
798 */
799 while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
800 smp_rmb();
801
802 first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
803 typhoon_inc_tx_index(&txRing->lastWrite, 1);
804
805 first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
806 first_txd->numDesc = 0;
807 first_txd->len = 0;
71f1bb1a 808 first_txd->tx_addr = (u64)((unsigned long) skb);
1da177e4
LT
809 first_txd->processFlags = 0;
810
84fa7933 811 if(skb->ip_summed == CHECKSUM_PARTIAL) {
1da177e4
LT
812 /* The 3XP will figure out if this is UDP/TCP */
813 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
814 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
815 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
816 }
817
818 if(vlan_tx_tag_present(skb)) {
819 first_txd->processFlags |=
820 TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
821 first_txd->processFlags |=
03a710ff 822 cpu_to_le32(ntohs(vlan_tx_tag_get(skb)) <<
1da177e4
LT
823 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
824 }
825
89114afd 826 if (skb_is_gso(skb)) {
1da177e4
LT
827 first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
828 first_txd->numDesc++;
829
830 typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
831 }
832
833 txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
834 typhoon_inc_tx_index(&txRing->lastWrite, 1);
835
836 /* No need to worry about padding packet -- the firmware pads
837 * it with zeros to ETH_ZLEN for us.
838 */
839 if(skb_shinfo(skb)->nr_frags == 0) {
840 skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
841 PCI_DMA_TODEVICE);
842 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
843 txd->len = cpu_to_le16(skb->len);
71f1bb1a
AV
844 txd->frag.addr = cpu_to_le32(skb_dma);
845 txd->frag.addrHi = 0;
1da177e4
LT
846 first_txd->numDesc++;
847 } else {
848 int i, len;
849
850 len = skb_headlen(skb);
851 skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
852 PCI_DMA_TODEVICE);
853 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
854 txd->len = cpu_to_le16(len);
71f1bb1a
AV
855 txd->frag.addr = cpu_to_le32(skb_dma);
856 txd->frag.addrHi = 0;
1da177e4
LT
857 first_txd->numDesc++;
858
859 for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
860 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
861 void *frag_addr;
862
863 txd = (struct tx_desc *) (txRing->ringBase +
864 txRing->lastWrite);
865 typhoon_inc_tx_index(&txRing->lastWrite, 1);
866
867 len = frag->size;
868 frag_addr = (void *) page_address(frag->page) +
869 frag->page_offset;
870 skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
871 PCI_DMA_TODEVICE);
872 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
873 txd->len = cpu_to_le16(len);
71f1bb1a
AV
874 txd->frag.addr = cpu_to_le32(skb_dma);
875 txd->frag.addrHi = 0;
1da177e4
LT
876 first_txd->numDesc++;
877 }
878 }
879
880 /* Kick the 3XP
881 */
882 wmb();
883 iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
884
885 dev->trans_start = jiffies;
886
887 /* If we don't have room to put the worst case packet on the
888 * queue, then we must stop the queue. We need 2 extra
889 * descriptors -- one to prevent ring wrap, and one for the
890 * Tx header.
891 */
892 numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
893
894 if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
895 netif_stop_queue(dev);
896
897 /* A Tx complete IRQ could have gotten inbetween, making
898 * the ring free again. Only need to recheck here, since
899 * Tx is serialized.
900 */
901 if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
902 netif_wake_queue(dev);
903 }
904
6ed10654 905 return NETDEV_TX_OK;
1da177e4
LT
906}
907
908static void
909typhoon_set_rx_mode(struct net_device *dev)
910{
911 struct typhoon *tp = netdev_priv(dev);
912 struct cmd_desc xp_cmd;
913 u32 mc_filter[2];
03a710ff 914 __le16 filter;
1da177e4
LT
915
916 filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
917 if(dev->flags & IFF_PROMISC) {
1da177e4 918 filter |= TYPHOON_RX_FILTER_PROMISCOUS;
4cd24eaf 919 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1da177e4
LT
920 (dev->flags & IFF_ALLMULTI)) {
921 /* Too many to match, or accept all multicasts. */
922 filter |= TYPHOON_RX_FILTER_ALL_MCAST;
4cd24eaf 923 } else if (!netdev_mc_empty(dev)) {
1da177e4 924 struct dev_mc_list *mclist;
1da177e4
LT
925
926 memset(mc_filter, 0, sizeof(mc_filter));
567ec874 927 netdev_for_each_mc_addr(mclist, dev) {
1da177e4
LT
928 int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
929 mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
930 }
931
932 INIT_COMMAND_NO_RESPONSE(&xp_cmd,
933 TYPHOON_CMD_SET_MULTICAST_HASH);
934 xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
935 xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
936 xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
937 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
938
939 filter |= TYPHOON_RX_FILTER_MCAST_HASH;
940 }
941
942 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
943 xp_cmd.parm1 = filter;
944 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
945}
946
947static int
948typhoon_do_get_stats(struct typhoon *tp)
949{
950 struct net_device_stats *stats = &tp->stats;
951 struct net_device_stats *saved = &tp->stats_saved;
952 struct cmd_desc xp_cmd;
953 struct resp_desc xp_resp[7];
954 struct stats_resp *s = (struct stats_resp *) xp_resp;
955 int err;
956
957 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
958 err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
959 if(err < 0)
960 return err;
961
962 /* 3Com's Linux driver uses txMultipleCollisions as it's
963 * collisions value, but there is some other collision info as well...
964 *
965 * The extra status reported would be a good candidate for
966 * ethtool_ops->get_{strings,stats}()
967 */
968 stats->tx_packets = le32_to_cpu(s->txPackets);
73eac064 969 stats->tx_bytes = le64_to_cpu(s->txBytes);
1da177e4
LT
970 stats->tx_errors = le32_to_cpu(s->txCarrierLost);
971 stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost);
972 stats->collisions = le32_to_cpu(s->txMultipleCollisions);
973 stats->rx_packets = le32_to_cpu(s->rxPacketsGood);
73eac064 974 stats->rx_bytes = le64_to_cpu(s->rxBytesGood);
1da177e4
LT
975 stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns);
976 stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
977 le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors);
978 stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors);
979 stats->rx_length_errors = le32_to_cpu(s->rxOversized);
980 tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
981 SPEED_100 : SPEED_10;
982 tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
983 DUPLEX_FULL : DUPLEX_HALF;
984
985 /* add in the saved statistics
986 */
987 stats->tx_packets += saved->tx_packets;
988 stats->tx_bytes += saved->tx_bytes;
989 stats->tx_errors += saved->tx_errors;
990 stats->collisions += saved->collisions;
991 stats->rx_packets += saved->rx_packets;
992 stats->rx_bytes += saved->rx_bytes;
993 stats->rx_fifo_errors += saved->rx_fifo_errors;
994 stats->rx_errors += saved->rx_errors;
995 stats->rx_crc_errors += saved->rx_crc_errors;
996 stats->rx_length_errors += saved->rx_length_errors;
997
998 return 0;
999}
1000
1001static struct net_device_stats *
1002typhoon_get_stats(struct net_device *dev)
1003{
1004 struct typhoon *tp = netdev_priv(dev);
1005 struct net_device_stats *stats = &tp->stats;
1006 struct net_device_stats *saved = &tp->stats_saved;
1007
1008 smp_rmb();
1009 if(tp->card_state == Sleeping)
1010 return saved;
1011
1012 if(typhoon_do_get_stats(tp) < 0) {
0bc88e4a 1013 netdev_err(dev, "error getting stats\n");
1da177e4
LT
1014 return saved;
1015 }
1016
1017 return stats;
1018}
1019
1020static int
1021typhoon_set_mac_address(struct net_device *dev, void *addr)
1022{
1023 struct sockaddr *saddr = (struct sockaddr *) addr;
1024
1025 if(netif_running(dev))
1026 return -EBUSY;
1027
1028 memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
1029 return 0;
1030}
1031
1032static void
1033typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1034{
1035 struct typhoon *tp = netdev_priv(dev);
1036 struct pci_dev *pci_dev = tp->pdev;
1037 struct cmd_desc xp_cmd;
1038 struct resp_desc xp_resp[3];
1039
1040 smp_rmb();
1041 if(tp->card_state == Sleeping) {
1042 strcpy(info->fw_version, "Sleep image");
1043 } else {
1044 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
1045 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
1046 strcpy(info->fw_version, "Unknown runtime");
1047 } else {
fdcfd77c 1048 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
1da177e4 1049 snprintf(info->fw_version, 32, "%02x.%03x.%03x",
6aa20a22 1050 sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
1da177e4
LT
1051 sleep_ver & 0xfff);
1052 }
1053 }
1054
0bc88e4a
JP
1055 strcpy(info->driver, KBUILD_MODNAME);
1056 strcpy(info->version, UTS_RELEASE);
1da177e4
LT
1057 strcpy(info->bus_info, pci_name(pci_dev));
1058}
1059
1060static int
1061typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1062{
1063 struct typhoon *tp = netdev_priv(dev);
1064
1065 cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1066 SUPPORTED_Autoneg;
1067
1068 switch (tp->xcvr_select) {
1069 case TYPHOON_XCVR_10HALF:
1070 cmd->advertising = ADVERTISED_10baseT_Half;
1071 break;
1072 case TYPHOON_XCVR_10FULL:
1073 cmd->advertising = ADVERTISED_10baseT_Full;
1074 break;
1075 case TYPHOON_XCVR_100HALF:
1076 cmd->advertising = ADVERTISED_100baseT_Half;
1077 break;
1078 case TYPHOON_XCVR_100FULL:
1079 cmd->advertising = ADVERTISED_100baseT_Full;
1080 break;
1081 case TYPHOON_XCVR_AUTONEG:
1082 cmd->advertising = ADVERTISED_10baseT_Half |
1083 ADVERTISED_10baseT_Full |
1084 ADVERTISED_100baseT_Half |
1085 ADVERTISED_100baseT_Full |
1086 ADVERTISED_Autoneg;
1087 break;
1088 }
1089
1090 if(tp->capabilities & TYPHOON_FIBER) {
1091 cmd->supported |= SUPPORTED_FIBRE;
1092 cmd->advertising |= ADVERTISED_FIBRE;
1093 cmd->port = PORT_FIBRE;
1094 } else {
1095 cmd->supported |= SUPPORTED_10baseT_Half |
1096 SUPPORTED_10baseT_Full |
1097 SUPPORTED_TP;
1098 cmd->advertising |= ADVERTISED_TP;
1099 cmd->port = PORT_TP;
1100 }
1101
1102 /* need to get stats to make these link speed/duplex valid */
1103 typhoon_do_get_stats(tp);
1104 cmd->speed = tp->speed;
1105 cmd->duplex = tp->duplex;
1106 cmd->phy_address = 0;
1107 cmd->transceiver = XCVR_INTERNAL;
1108 if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
1109 cmd->autoneg = AUTONEG_ENABLE;
1110 else
1111 cmd->autoneg = AUTONEG_DISABLE;
1112 cmd->maxtxpkt = 1;
1113 cmd->maxrxpkt = 1;
1114
1115 return 0;
1116}
1117
1118static int
1119typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1120{
1121 struct typhoon *tp = netdev_priv(dev);
1122 struct cmd_desc xp_cmd;
03a710ff 1123 __le16 xcvr;
1da177e4
LT
1124 int err;
1125
1126 err = -EINVAL;
1127 if(cmd->autoneg == AUTONEG_ENABLE) {
1128 xcvr = TYPHOON_XCVR_AUTONEG;
1129 } else {
1130 if(cmd->duplex == DUPLEX_HALF) {
1131 if(cmd->speed == SPEED_10)
1132 xcvr = TYPHOON_XCVR_10HALF;
1133 else if(cmd->speed == SPEED_100)
1134 xcvr = TYPHOON_XCVR_100HALF;
1135 else
1136 goto out;
1137 } else if(cmd->duplex == DUPLEX_FULL) {
1138 if(cmd->speed == SPEED_10)
1139 xcvr = TYPHOON_XCVR_10FULL;
1140 else if(cmd->speed == SPEED_100)
1141 xcvr = TYPHOON_XCVR_100FULL;
1142 else
1143 goto out;
1144 } else
1145 goto out;
1146 }
1147
1148 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
b46281f9 1149 xp_cmd.parm1 = xcvr;
1da177e4
LT
1150 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1151 if(err < 0)
1152 goto out;
1153
1154 tp->xcvr_select = xcvr;
1155 if(cmd->autoneg == AUTONEG_ENABLE) {
1156 tp->speed = 0xff; /* invalid */
1157 tp->duplex = 0xff; /* invalid */
1158 } else {
1159 tp->speed = cmd->speed;
1160 tp->duplex = cmd->duplex;
1161 }
1162
1163out:
1164 return err;
1165}
1166
1167static void
1168typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1169{
1170 struct typhoon *tp = netdev_priv(dev);
1171
1172 wol->supported = WAKE_PHY | WAKE_MAGIC;
1173 wol->wolopts = 0;
1174 if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
1175 wol->wolopts |= WAKE_PHY;
1176 if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
1177 wol->wolopts |= WAKE_MAGIC;
1178 memset(&wol->sopass, 0, sizeof(wol->sopass));
1179}
1180
1181static int
1182typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1183{
1184 struct typhoon *tp = netdev_priv(dev);
1185
1186 if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
1187 return -EINVAL;
1188
1189 tp->wol_events = 0;
1190 if(wol->wolopts & WAKE_PHY)
1191 tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
1192 if(wol->wolopts & WAKE_MAGIC)
1193 tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
1194
1195 return 0;
1196}
1197
1198static u32
1199typhoon_get_rx_csum(struct net_device *dev)
1200{
1201 /* For now, we don't allow turning off RX checksums.
1202 */
1203 return 1;
1204}
1205
1206static void
1207typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
1208{
1209 ering->rx_max_pending = RXENT_ENTRIES;
1210 ering->rx_mini_max_pending = 0;
1211 ering->rx_jumbo_max_pending = 0;
1212 ering->tx_max_pending = TXLO_ENTRIES - 1;
1213
1214 ering->rx_pending = RXENT_ENTRIES;
1215 ering->rx_mini_pending = 0;
1216 ering->rx_jumbo_pending = 0;
1217 ering->tx_pending = TXLO_ENTRIES - 1;
1218}
1219
7282d491 1220static const struct ethtool_ops typhoon_ethtool_ops = {
1da177e4
LT
1221 .get_settings = typhoon_get_settings,
1222 .set_settings = typhoon_set_settings,
1223 .get_drvinfo = typhoon_get_drvinfo,
1224 .get_wol = typhoon_get_wol,
1225 .set_wol = typhoon_set_wol,
1226 .get_link = ethtool_op_get_link,
1227 .get_rx_csum = typhoon_get_rx_csum,
1da177e4 1228 .set_tx_csum = ethtool_op_set_tx_csum,
1da177e4 1229 .set_sg = ethtool_op_set_sg,
1da177e4
LT
1230 .set_tso = ethtool_op_set_tso,
1231 .get_ringparam = typhoon_get_ringparam,
1232};
1233
1234static int
1235typhoon_wait_interrupt(void __iomem *ioaddr)
1236{
1237 int i, err = 0;
1238
1239 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1240 if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
1241 TYPHOON_INTR_BOOTCMD)
1242 goto out;
1243 udelay(TYPHOON_UDELAY);
1244 }
1245
1246 err = -ETIMEDOUT;
1247
1248out:
1249 iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1250 return err;
1251}
1252
1253#define shared_offset(x) offsetof(struct typhoon_shared, x)
1254
1255static void
1256typhoon_init_interface(struct typhoon *tp)
1257{
1258 struct typhoon_interface *iface = &tp->shared->iface;
1259 dma_addr_t shared_dma;
1260
1261 memset(tp->shared, 0, sizeof(struct typhoon_shared));
1262
1263 /* The *Hi members of iface are all init'd to zero by the memset().
1264 */
1265 shared_dma = tp->shared_dma + shared_offset(indexes);
1266 iface->ringIndex = cpu_to_le32(shared_dma);
1267
1268 shared_dma = tp->shared_dma + shared_offset(txLo);
1269 iface->txLoAddr = cpu_to_le32(shared_dma);
1270 iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
1271
1272 shared_dma = tp->shared_dma + shared_offset(txHi);
1273 iface->txHiAddr = cpu_to_le32(shared_dma);
1274 iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
1275
1276 shared_dma = tp->shared_dma + shared_offset(rxBuff);
1277 iface->rxBuffAddr = cpu_to_le32(shared_dma);
1278 iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
1279 sizeof(struct rx_free));
1280
1281 shared_dma = tp->shared_dma + shared_offset(rxLo);
1282 iface->rxLoAddr = cpu_to_le32(shared_dma);
1283 iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1284
1285 shared_dma = tp->shared_dma + shared_offset(rxHi);
1286 iface->rxHiAddr = cpu_to_le32(shared_dma);
1287 iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1288
1289 shared_dma = tp->shared_dma + shared_offset(cmd);
1290 iface->cmdAddr = cpu_to_le32(shared_dma);
1291 iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
1292
1293 shared_dma = tp->shared_dma + shared_offset(resp);
1294 iface->respAddr = cpu_to_le32(shared_dma);
1295 iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
1296
1297 shared_dma = tp->shared_dma + shared_offset(zeroWord);
1298 iface->zeroAddr = cpu_to_le32(shared_dma);
1299
1300 tp->indexes = &tp->shared->indexes;
1301 tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
1302 tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
1303 tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
1304 tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
1305 tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
1306 tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
1307 tp->respRing.ringBase = (u8 *) tp->shared->resp;
1308
1309 tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
1310 tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
1311
8cc085c7 1312 tp->txlo_dma_addr = le32_to_cpu(iface->txLoAddr);
1da177e4
LT
1313 tp->card_state = Sleeping;
1314 smp_wmb();
1315
1316 tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
1317 tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
1318
1319 spin_lock_init(&tp->command_lock);
1320 spin_lock_init(&tp->state_lock);
1321}
1322
1323static void
1324typhoon_init_rings(struct typhoon *tp)
1325{
1326 memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
1327
1328 tp->txLoRing.lastWrite = 0;
1329 tp->txHiRing.lastWrite = 0;
1330 tp->rxLoRing.lastWrite = 0;
1331 tp->rxHiRing.lastWrite = 0;
1332 tp->rxBuffRing.lastWrite = 0;
1333 tp->cmdRing.lastWrite = 0;
1334 tp->cmdRing.lastWrite = 0;
1335
1336 tp->txLoRing.lastRead = 0;
1337 tp->txHiRing.lastRead = 0;
1338}
1339
b775a750
BH
1340static const struct firmware *typhoon_fw;
1341
1342static int
1343typhoon_request_firmware(struct typhoon *tp)
1344{
a8c9a53c
DD
1345 const struct typhoon_file_header *fHdr;
1346 const struct typhoon_section_header *sHdr;
1347 const u8 *image_data;
1348 u32 numSections;
1349 u32 section_len;
1350 u32 remaining;
b775a750
BH
1351 int err;
1352
1353 if (typhoon_fw)
1354 return 0;
1355
1356 err = request_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev);
1357 if (err) {
0bc88e4a
JP
1358 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
1359 FIRMWARE_NAME);
b775a750
BH
1360 return err;
1361 }
1362
a8c9a53c
DD
1363 image_data = (u8 *) typhoon_fw->data;
1364 remaining = typhoon_fw->size;
1365 if (remaining < sizeof(struct typhoon_file_header))
1366 goto invalid_fw;
d517c4a1 1367
a8c9a53c
DD
1368 fHdr = (struct typhoon_file_header *) image_data;
1369 if (memcmp(fHdr->tag, "TYPHOON", 8))
1370 goto invalid_fw;
1371
1372 numSections = le32_to_cpu(fHdr->numSections);
1373 image_data += sizeof(struct typhoon_file_header);
1374 remaining -= sizeof(struct typhoon_file_header);
1375
1376 while (numSections--) {
1377 if (remaining < sizeof(struct typhoon_section_header))
1378 goto invalid_fw;
1379
1380 sHdr = (struct typhoon_section_header *) image_data;
1381 image_data += sizeof(struct typhoon_section_header);
1382 section_len = le32_to_cpu(sHdr->len);
1383
1384 if (remaining < section_len)
1385 goto invalid_fw;
1386
1387 image_data += section_len;
1388 remaining -= section_len;
b775a750
BH
1389 }
1390
1391 return 0;
d517c4a1 1392
a8c9a53c 1393invalid_fw:
0bc88e4a 1394 netdev_err(tp->dev, "Invalid firmware image\n");
d517c4a1
DM
1395 release_firmware(typhoon_fw);
1396 typhoon_fw = NULL;
a8c9a53c 1397 return -EINVAL;
b775a750
BH
1398}
1399
1da177e4
LT
1400static int
1401typhoon_download_firmware(struct typhoon *tp)
1402{
1403 void __iomem *ioaddr = tp->ioaddr;
1404 struct pci_dev *pdev = tp->pdev;
b775a750
BH
1405 const struct typhoon_file_header *fHdr;
1406 const struct typhoon_section_header *sHdr;
1407 const u8 *image_data;
a8c9a53c
DD
1408 void *dpage;
1409 dma_addr_t dpage_dma;
71f1bb1a 1410 __sum16 csum;
1da177e4
LT
1411 u32 irqEnabled;
1412 u32 irqMasked;
1413 u32 numSections;
1414 u32 section_len;
a8c9a53c 1415 u32 len;
1da177e4
LT
1416 u32 load_addr;
1417 u32 hmac;
1418 int i;
1419 int err;
1420
a8c9a53c 1421 image_data = (u8 *) typhoon_fw->data;
b775a750 1422 fHdr = (struct typhoon_file_header *) image_data;
1da177e4 1423
a8c9a53c
DD
1424 /* Cannot just map the firmware image using pci_map_single() as
1425 * the firmware is vmalloc()'d and may not be physically contiguous,
1426 * so we allocate some consistent memory to copy the sections into.
1427 */
1da177e4 1428 err = -ENOMEM;
a8c9a53c
DD
1429 dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
1430 if(!dpage) {
0bc88e4a 1431 netdev_err(tp->dev, "no DMA mem for firmware\n");
1da177e4
LT
1432 goto err_out;
1433 }
1434
1435 irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE);
1436 iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD,
1437 ioaddr + TYPHOON_REG_INTR_ENABLE);
1438 irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK);
1439 iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD,
1440 ioaddr + TYPHOON_REG_INTR_MASK);
1441
1442 err = -ETIMEDOUT;
1443 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
0bc88e4a 1444 netdev_err(tp->dev, "card ready timeout\n");
1da177e4
LT
1445 goto err_out_irq;
1446 }
1447
1448 numSections = le32_to_cpu(fHdr->numSections);
1449 load_addr = le32_to_cpu(fHdr->startAddr);
1450
1451 iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1452 iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
1453 hmac = le32_to_cpu(fHdr->hmacDigest[0]);
1454 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
1455 hmac = le32_to_cpu(fHdr->hmacDigest[1]);
1456 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
1457 hmac = le32_to_cpu(fHdr->hmacDigest[2]);
1458 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
1459 hmac = le32_to_cpu(fHdr->hmacDigest[3]);
1460 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
1461 hmac = le32_to_cpu(fHdr->hmacDigest[4]);
1462 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
1463 typhoon_post_pci_writes(ioaddr);
1464 iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
1465
1466 image_data += sizeof(struct typhoon_file_header);
1467
1468 /* The ioread32() in typhoon_wait_interrupt() will force the
1469 * last write to the command register to post, so
1470 * we don't need a typhoon_post_pci_writes() after it.
1471 */
1472 for(i = 0; i < numSections; i++) {
1473 sHdr = (struct typhoon_section_header *) image_data;
1474 image_data += sizeof(struct typhoon_section_header);
1475 load_addr = le32_to_cpu(sHdr->startAddr);
1476 section_len = le32_to_cpu(sHdr->len);
1477
a8c9a53c
DD
1478 while(section_len) {
1479 len = min_t(u32, section_len, PAGE_SIZE);
1da177e4 1480
a8c9a53c
DD
1481 if(typhoon_wait_interrupt(ioaddr) < 0 ||
1482 ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1483 TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
0bc88e4a 1484 netdev_err(tp->dev, "segment ready timeout\n");
a8c9a53c
DD
1485 goto err_out_irq;
1486 }
1da177e4 1487
a8c9a53c
DD
1488 /* Do an pseudo IPv4 checksum on the data -- first
1489 * need to convert each u16 to cpu order before
1490 * summing. Fortunately, due to the properties of
1491 * the checksum, we can do this once, at the end.
1492 */
1493 csum = csum_fold(csum_partial_copy_nocheck(image_data,
0bc88e4a
JP
1494 dpage, len,
1495 0));
a8c9a53c
DD
1496
1497 iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
1498 iowrite32(le16_to_cpu((__force __le16)csum),
1499 ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
1500 iowrite32(load_addr,
1501 ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
1502 iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
1503 iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
1504 typhoon_post_pci_writes(ioaddr);
1505 iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
0bc88e4a 1506 ioaddr + TYPHOON_REG_COMMAND);
a8c9a53c
DD
1507
1508 image_data += len;
1509 load_addr += len;
1510 section_len -= len;
1511 }
1da177e4
LT
1512 }
1513
1514 if(typhoon_wait_interrupt(ioaddr) < 0 ||
1515 ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1516 TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
0bc88e4a 1517 netdev_err(tp->dev, "final segment ready timeout\n");
1da177e4
LT
1518 goto err_out_irq;
1519 }
1520
1521 iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
1522
1523 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
0bc88e4a
JP
1524 netdev_err(tp->dev, "boot ready timeout, status 0x%0x\n",
1525 ioread32(ioaddr + TYPHOON_REG_STATUS));
1da177e4
LT
1526 goto err_out_irq;
1527 }
1528
1529 err = 0;
1530
1531err_out_irq:
1532 iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
1533 iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
1534
a8c9a53c 1535 pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
1da177e4
LT
1536
1537err_out:
1538 return err;
1539}
1540
1541static int
1542typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
1543{
1544 void __iomem *ioaddr = tp->ioaddr;
1545
1546 if(typhoon_wait_status(ioaddr, initial_status) < 0) {
0bc88e4a 1547 netdev_err(tp->dev, "boot ready timeout\n");
1da177e4
LT
1548 goto out_timeout;
1549 }
1550
1551 iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
1552 iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
1553 typhoon_post_pci_writes(ioaddr);
1554 iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD,
1555 ioaddr + TYPHOON_REG_COMMAND);
1556
1557 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
0bc88e4a
JP
1558 netdev_err(tp->dev, "boot finish timeout (status 0x%x)\n",
1559 ioread32(ioaddr + TYPHOON_REG_STATUS));
1da177e4
LT
1560 goto out_timeout;
1561 }
1562
1563 /* Clear the Transmit and Command ready registers
1564 */
1565 iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY);
1566 iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY);
1567 iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY);
1568 typhoon_post_pci_writes(ioaddr);
1569 iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
1570
1571 return 0;
1572
1573out_timeout:
1574 return -ETIMEDOUT;
1575}
1576
1577static u32
1578typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
03a710ff 1579 volatile __le32 * index)
1da177e4
LT
1580{
1581 u32 lastRead = txRing->lastRead;
1582 struct tx_desc *tx;
1583 dma_addr_t skb_dma;
1584 int dma_len;
1585 int type;
1586
1587 while(lastRead != le32_to_cpu(*index)) {
1588 tx = (struct tx_desc *) (txRing->ringBase + lastRead);
1589 type = tx->flags & TYPHOON_TYPE_MASK;
1590
1591 if(type == TYPHOON_TX_DESC) {
1592 /* This tx_desc describes a packet.
1593 */
71f1bb1a 1594 unsigned long ptr = tx->tx_addr;
1da177e4
LT
1595 struct sk_buff *skb = (struct sk_buff *) ptr;
1596 dev_kfree_skb_irq(skb);
1597 } else if(type == TYPHOON_FRAG_DESC) {
1598 /* This tx_desc describes a memory mapping. Free it.
1599 */
71f1bb1a 1600 skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr);
1da177e4
LT
1601 dma_len = le16_to_cpu(tx->len);
1602 pci_unmap_single(tp->pdev, skb_dma, dma_len,
1603 PCI_DMA_TODEVICE);
1604 }
1605
1606 tx->flags = 0;
1607 typhoon_inc_tx_index(&lastRead, 1);
1608 }
1609
1610 return lastRead;
1611}
1612
1613static void
1614typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
03a710ff 1615 volatile __le32 * index)
1da177e4
LT
1616{
1617 u32 lastRead;
1618 int numDesc = MAX_SKB_FRAGS + 1;
1619
1620 /* This will need changing if we start to use the Hi Tx ring. */
1621 lastRead = typhoon_clean_tx(tp, txRing, index);
1622 if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
1623 lastRead, TXLO_ENTRIES) > (numDesc + 2))
1624 netif_wake_queue(tp->dev);
1625
1626 txRing->lastRead = lastRead;
1627 smp_wmb();
1628}
1629
1630static void
1631typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
1632{
1633 struct typhoon_indexes *indexes = tp->indexes;
1634 struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1635 struct basic_ring *ring = &tp->rxBuffRing;
1636 struct rx_free *r;
1637
1638 if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
8a5ed9ef 1639 le32_to_cpu(indexes->rxBuffCleared)) {
1da177e4
LT
1640 /* no room in ring, just drop the skb
1641 */
1642 dev_kfree_skb_any(rxb->skb);
1643 rxb->skb = NULL;
1644 return;
1645 }
1646
1647 r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1648 typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1649 r->virtAddr = idx;
1650 r->physAddr = cpu_to_le32(rxb->dma_addr);
1651
1652 /* Tell the card about it */
1653 wmb();
1654 indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1655}
1656
1657static int
1658typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
1659{
1660 struct typhoon_indexes *indexes = tp->indexes;
1661 struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1662 struct basic_ring *ring = &tp->rxBuffRing;
1663 struct rx_free *r;
1664 struct sk_buff *skb;
1665 dma_addr_t dma_addr;
1666
1667 rxb->skb = NULL;
1668
1669 if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
8a5ed9ef 1670 le32_to_cpu(indexes->rxBuffCleared))
1da177e4
LT
1671 return -ENOMEM;
1672
1673 skb = dev_alloc_skb(PKT_BUF_SZ);
1674 if(!skb)
1675 return -ENOMEM;
1676
1677#if 0
1678 /* Please, 3com, fix the firmware to allow DMA to a unaligned
1679 * address! Pretty please?
1680 */
1681 skb_reserve(skb, 2);
1682#endif
1683
1684 skb->dev = tp->dev;
689be439 1685 dma_addr = pci_map_single(tp->pdev, skb->data,
1da177e4
LT
1686 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1687
1688 /* Since no card does 64 bit DAC, the high bits will never
1689 * change from zero.
1690 */
1691 r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1692 typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1693 r->virtAddr = idx;
1694 r->physAddr = cpu_to_le32(dma_addr);
1695 rxb->skb = skb;
1696 rxb->dma_addr = dma_addr;
1697
1698 /* Tell the card about it */
1699 wmb();
1700 indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1701 return 0;
1702}
1703
1704static int
03a710ff
AV
1705typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * ready,
1706 volatile __le32 * cleared, int budget)
1da177e4
LT
1707{
1708 struct rx_desc *rx;
1709 struct sk_buff *skb, *new_skb;
1710 struct rxbuff_ent *rxb;
1711 dma_addr_t dma_addr;
1712 u32 local_ready;
1713 u32 rxaddr;
1714 int pkt_len;
1715 u32 idx;
03a710ff 1716 __le32 csum_bits;
1da177e4
LT
1717 int received;
1718
1719 received = 0;
1720 local_ready = le32_to_cpu(*ready);
1721 rxaddr = le32_to_cpu(*cleared);
1722 while(rxaddr != local_ready && budget > 0) {
1723 rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
1724 idx = rx->addr;
1725 rxb = &tp->rxbuffers[idx];
1726 skb = rxb->skb;
1727 dma_addr = rxb->dma_addr;
1728
1729 typhoon_inc_rx_index(&rxaddr, 1);
1730
1731 if(rx->flags & TYPHOON_RX_ERROR) {
1732 typhoon_recycle_rx_skb(tp, idx);
1733 continue;
1734 }
1735
1736 pkt_len = le16_to_cpu(rx->frameLen);
1737
1738 if(pkt_len < rx_copybreak &&
1739 (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1da177e4
LT
1740 skb_reserve(new_skb, 2);
1741 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
1742 PKT_BUF_SZ,
1743 PCI_DMA_FROMDEVICE);
8c7b7faa 1744 skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
1da177e4
LT
1745 pci_dma_sync_single_for_device(tp->pdev, dma_addr,
1746 PKT_BUF_SZ,
1747 PCI_DMA_FROMDEVICE);
1748 skb_put(new_skb, pkt_len);
1749 typhoon_recycle_rx_skb(tp, idx);
1750 } else {
1751 new_skb = skb;
1752 skb_put(new_skb, pkt_len);
1753 pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
1754 PCI_DMA_FROMDEVICE);
1755 typhoon_alloc_rx_skb(tp, idx);
1756 }
1757 new_skb->protocol = eth_type_trans(new_skb, tp->dev);
1758 csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
1759 TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
1760 if(csum_bits ==
8e95a202
JP
1761 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD) ||
1762 csum_bits ==
1da177e4
LT
1763 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
1764 new_skb->ip_summed = CHECKSUM_UNNECESSARY;
1765 } else
1766 new_skb->ip_summed = CHECKSUM_NONE;
1767
1768 spin_lock(&tp->state_lock);
1769 if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
1770 vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
1771 ntohl(rx->vlanTag) & 0xffff);
1772 else
1773 netif_receive_skb(new_skb);
1774 spin_unlock(&tp->state_lock);
1775
1da177e4
LT
1776 received++;
1777 budget--;
1778 }
1779 *cleared = cpu_to_le32(rxaddr);
1780
1781 return received;
1782}
1783
1784static void
1785typhoon_fill_free_ring(struct typhoon *tp)
1786{
1787 u32 i;
1788
1789 for(i = 0; i < RXENT_ENTRIES; i++) {
1790 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1791 if(rxb->skb)
1792 continue;
1793 if(typhoon_alloc_rx_skb(tp, i) < 0)
1794 break;
1795 }
1796}
1797
1798static int
bea3348e 1799typhoon_poll(struct napi_struct *napi, int budget)
1da177e4 1800{
bea3348e 1801 struct typhoon *tp = container_of(napi, struct typhoon, napi);
1da177e4 1802 struct typhoon_indexes *indexes = tp->indexes;
bea3348e 1803 int work_done;
1da177e4
LT
1804
1805 rmb();
1806 if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
1807 typhoon_process_response(tp, 0, NULL);
1808
1809 if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
1810 typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
1811
1da177e4 1812 work_done = 0;
1da177e4
LT
1813
1814 if(indexes->rxHiCleared != indexes->rxHiReady) {
bea3348e 1815 work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
1da177e4 1816 &indexes->rxHiCleared, budget);
1da177e4
LT
1817 }
1818
1819 if(indexes->rxLoCleared != indexes->rxLoReady) {
1820 work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
bea3348e 1821 &indexes->rxLoCleared, budget - work_done);
1da177e4
LT
1822 }
1823
1824 if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
1825 /* rxBuff ring is empty, try to fill it. */
1826 typhoon_fill_free_ring(tp);
1827 }
1828
bea3348e 1829 if (work_done < budget) {
288379f0 1830 napi_complete(napi);
1da177e4
LT
1831 iowrite32(TYPHOON_INTR_NONE,
1832 tp->ioaddr + TYPHOON_REG_INTR_MASK);
1833 typhoon_post_pci_writes(tp->ioaddr);
1834 }
1835
bea3348e 1836 return work_done;
1da177e4
LT
1837}
1838
1839static irqreturn_t
7d12e780 1840typhoon_interrupt(int irq, void *dev_instance)
1da177e4 1841{
06efcad0 1842 struct net_device *dev = dev_instance;
8f15ea42 1843 struct typhoon *tp = netdev_priv(dev);
1da177e4
LT
1844 void __iomem *ioaddr = tp->ioaddr;
1845 u32 intr_status;
1846
1847 intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
1848 if(!(intr_status & TYPHOON_INTR_HOST_INT))
1849 return IRQ_NONE;
1850
1851 iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1852
288379f0 1853 if (napi_schedule_prep(&tp->napi)) {
1da177e4
LT
1854 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1855 typhoon_post_pci_writes(ioaddr);
288379f0 1856 __napi_schedule(&tp->napi);
1da177e4 1857 } else {
0bc88e4a 1858 netdev_err(dev, "Error, poll already scheduled\n");
1da177e4
LT
1859 }
1860 return IRQ_HANDLED;
1861}
1862
1863static void
1864typhoon_free_rx_rings(struct typhoon *tp)
1865{
1866 u32 i;
1867
1868 for(i = 0; i < RXENT_ENTRIES; i++) {
1869 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1870 if(rxb->skb) {
1871 pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
1872 PCI_DMA_FROMDEVICE);
1873 dev_kfree_skb(rxb->skb);
1874 rxb->skb = NULL;
1875 }
1876 }
1877}
1878
1879static int
03a710ff 1880typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
1da177e4
LT
1881{
1882 struct pci_dev *pdev = tp->pdev;
1883 void __iomem *ioaddr = tp->ioaddr;
1884 struct cmd_desc xp_cmd;
1885 int err;
1886
1887 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
1888 xp_cmd.parm1 = events;
1889 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1890 if(err < 0) {
0bc88e4a
JP
1891 netdev_err(tp->dev, "typhoon_sleep(): wake events cmd err %d\n",
1892 err);
1da177e4
LT
1893 return err;
1894 }
1895
1896 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
1897 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1898 if(err < 0) {
0bc88e4a 1899 netdev_err(tp->dev, "typhoon_sleep(): sleep cmd err %d\n", err);
1da177e4
LT
1900 return err;
1901 }
1902
1903 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
1904 return -ETIMEDOUT;
1905
1906 /* Since we cannot monitor the status of the link while sleeping,
1907 * tell the world it went away.
1908 */
1909 netif_carrier_off(tp->dev);
1910
2a569579 1911 pci_enable_wake(tp->pdev, state, 1);
1da177e4 1912 pci_disable_device(pdev);
2a569579 1913 return pci_set_power_state(pdev, state);
1da177e4
LT
1914}
1915
1916static int
1917typhoon_wakeup(struct typhoon *tp, int wait_type)
1918{
1919 struct pci_dev *pdev = tp->pdev;
1920 void __iomem *ioaddr = tp->ioaddr;
1921
1922 pci_set_power_state(pdev, PCI_D0);
1923 pci_restore_state(pdev);
1924
1925 /* Post 2.x.x versions of the Sleep Image require a reset before
1926 * we can download the Runtime Image. But let's not make users of
1927 * the old firmware pay for the reset.
1928 */
1929 iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
1930 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
1931 (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
1932 return typhoon_reset(ioaddr, wait_type);
1933
1934 return 0;
1935}
1936
1937static int
1938typhoon_start_runtime(struct typhoon *tp)
1939{
1940 struct net_device *dev = tp->dev;
1941 void __iomem *ioaddr = tp->ioaddr;
1942 struct cmd_desc xp_cmd;
1943 int err;
1944
1945 typhoon_init_rings(tp);
1946 typhoon_fill_free_ring(tp);
1947
1948 err = typhoon_download_firmware(tp);
1949 if(err < 0) {
0bc88e4a 1950 netdev_err(tp->dev, "cannot load runtime on 3XP\n");
1da177e4
LT
1951 goto error_out;
1952 }
1953
1954 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
0bc88e4a 1955 netdev_err(tp->dev, "cannot boot 3XP\n");
1da177e4
LT
1956 err = -EIO;
1957 goto error_out;
1958 }
1959
1960 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
1961 xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
1962 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1963 if(err < 0)
1964 goto error_out;
1965
1966 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
03a710ff
AV
1967 xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
1968 xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
1da177e4
LT
1969 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1970 if(err < 0)
1971 goto error_out;
1972
1973 /* Disable IRQ coalescing -- we can reenable it when 3Com gives
1974 * us some more information on how to control it.
1975 */
1976 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
1977 xp_cmd.parm1 = 0;
1978 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1979 if(err < 0)
1980 goto error_out;
1981
1982 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1983 xp_cmd.parm1 = tp->xcvr_select;
1984 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1985 if(err < 0)
1986 goto error_out;
1987
1988 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
649aa95d 1989 xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q);
1da177e4
LT
1990 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1991 if(err < 0)
1992 goto error_out;
1993
1994 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
1995 spin_lock_bh(&tp->state_lock);
1996 xp_cmd.parm2 = tp->offload;
1997 xp_cmd.parm3 = tp->offload;
1998 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1999 spin_unlock_bh(&tp->state_lock);
2000 if(err < 0)
2001 goto error_out;
2002
2003 typhoon_set_rx_mode(dev);
2004
2005 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
2006 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2007 if(err < 0)
2008 goto error_out;
2009
2010 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
2011 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2012 if(err < 0)
2013 goto error_out;
2014
2015 tp->card_state = Running;
2016 smp_wmb();
2017
2018 iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2019 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
2020 typhoon_post_pci_writes(ioaddr);
2021
2022 return 0;
2023
2024error_out:
2025 typhoon_reset(ioaddr, WaitNoSleep);
2026 typhoon_free_rx_rings(tp);
2027 typhoon_init_rings(tp);
2028 return err;
2029}
2030
2031static int
2032typhoon_stop_runtime(struct typhoon *tp, int wait_type)
2033{
2034 struct typhoon_indexes *indexes = tp->indexes;
2035 struct transmit_ring *txLo = &tp->txLoRing;
2036 void __iomem *ioaddr = tp->ioaddr;
2037 struct cmd_desc xp_cmd;
2038 int i;
2039
2040 /* Disable interrupts early, since we can't schedule a poll
2041 * when called with !netif_running(). This will be posted
2042 * when we force the posting of the command.
2043 */
2044 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2045
2046 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
2047 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2048
2049 /* Wait 1/2 sec for any outstanding transmits to occur
2050 * We'll cleanup after the reset if this times out.
2051 */
2052 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
2053 if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
2054 break;
2055 udelay(TYPHOON_UDELAY);
2056 }
2057
2058 if(i == TYPHOON_WAIT_TIMEOUT)
0bc88e4a 2059 netdev_err(tp->dev, "halt timed out waiting for Tx to complete\n");
1da177e4
LT
2060
2061 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
2062 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2063
2064 /* save the statistics so when we bring the interface up again,
2065 * the values reported to userspace are correct.
2066 */
2067 tp->card_state = Sleeping;
2068 smp_wmb();
2069 typhoon_do_get_stats(tp);
2070 memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
2071
2072 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
2073 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2074
2075 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
0bc88e4a 2076 netdev_err(tp->dev, "timed out waiting for 3XP to halt\n");
1da177e4
LT
2077
2078 if(typhoon_reset(ioaddr, wait_type) < 0) {
0bc88e4a 2079 netdev_err(tp->dev, "unable to reset 3XP\n");
1da177e4
LT
2080 return -ETIMEDOUT;
2081 }
2082
2083 /* cleanup any outstanding Tx packets */
2084 if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
2085 indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
2086 typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
2087 }
2088
2089 return 0;
2090}
2091
2092static void
2093typhoon_tx_timeout(struct net_device *dev)
2094{
2095 struct typhoon *tp = netdev_priv(dev);
2096
2097 if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
0bc88e4a 2098 netdev_warn(dev, "could not reset in tx timeout\n");
1da177e4
LT
2099 goto truely_dead;
2100 }
2101
2102 /* If we ever start using the Hi ring, it will need cleaning too */
2103 typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
2104 typhoon_free_rx_rings(tp);
2105
2106 if(typhoon_start_runtime(tp) < 0) {
0bc88e4a 2107 netdev_err(dev, "could not start runtime in tx timeout\n");
1da177e4
LT
2108 goto truely_dead;
2109 }
2110
2111 netif_wake_queue(dev);
2112 return;
2113
2114truely_dead:
2115 /* Reset the hardware, and turn off carrier to avoid more timeouts */
2116 typhoon_reset(tp->ioaddr, NoWait);
2117 netif_carrier_off(dev);
2118}
2119
2120static int
2121typhoon_open(struct net_device *dev)
2122{
2123 struct typhoon *tp = netdev_priv(dev);
2124 int err;
2125
b775a750
BH
2126 err = typhoon_request_firmware(tp);
2127 if (err)
2128 goto out;
2129
1da177e4
LT
2130 err = typhoon_wakeup(tp, WaitSleep);
2131 if(err < 0) {
0bc88e4a 2132 netdev_err(dev, "unable to wakeup device\n");
1da177e4
LT
2133 goto out_sleep;
2134 }
2135
aa36ab8e 2136 err = request_irq(dev->irq, typhoon_interrupt, IRQF_SHARED,
1da177e4
LT
2137 dev->name, dev);
2138 if(err < 0)
2139 goto out_sleep;
2140
bea3348e
SH
2141 napi_enable(&tp->napi);
2142
1da177e4 2143 err = typhoon_start_runtime(tp);
bea3348e
SH
2144 if(err < 0) {
2145 napi_disable(&tp->napi);
1da177e4 2146 goto out_irq;
bea3348e 2147 }
1da177e4
LT
2148
2149 netif_start_queue(dev);
2150 return 0;
2151
2152out_irq:
2153 free_irq(dev->irq, dev);
2154
2155out_sleep:
2156 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
0bc88e4a 2157 netdev_err(dev, "unable to reboot into sleep img\n");
1da177e4
LT
2158 typhoon_reset(tp->ioaddr, NoWait);
2159 goto out;
2160 }
2161
6aa20a22 2162 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
0bc88e4a 2163 netdev_err(dev, "unable to go back to sleep\n");
1da177e4
LT
2164
2165out:
2166 return err;
2167}
2168
2169static int
2170typhoon_close(struct net_device *dev)
2171{
2172 struct typhoon *tp = netdev_priv(dev);
2173
2174 netif_stop_queue(dev);
bea3348e 2175 napi_disable(&tp->napi);
1da177e4
LT
2176
2177 if(typhoon_stop_runtime(tp, WaitSleep) < 0)
0bc88e4a 2178 netdev_err(dev, "unable to stop runtime\n");
1da177e4
LT
2179
2180 /* Make sure there is no irq handler running on a different CPU. */
1da177e4
LT
2181 free_irq(dev->irq, dev);
2182
2183 typhoon_free_rx_rings(tp);
2184 typhoon_init_rings(tp);
2185
2186 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
0bc88e4a 2187 netdev_err(dev, "unable to boot sleep image\n");
1da177e4
LT
2188
2189 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
0bc88e4a 2190 netdev_err(dev, "unable to put card to sleep\n");
1da177e4
LT
2191
2192 return 0;
2193}
2194
2195#ifdef CONFIG_PM
2196static int
2197typhoon_resume(struct pci_dev *pdev)
2198{
2199 struct net_device *dev = pci_get_drvdata(pdev);
2200 struct typhoon *tp = netdev_priv(dev);
2201
2202 /* If we're down, resume when we are upped.
2203 */
2204 if(!netif_running(dev))
2205 return 0;
2206
2207 if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
0bc88e4a 2208 netdev_err(dev, "critical: could not wake up in resume\n");
1da177e4
LT
2209 goto reset;
2210 }
2211
2212 if(typhoon_start_runtime(tp) < 0) {
0bc88e4a 2213 netdev_err(dev, "critical: could not start runtime in resume\n");
1da177e4
LT
2214 goto reset;
2215 }
2216
2217 netif_device_attach(dev);
1da177e4
LT
2218 return 0;
2219
2220reset:
2221 typhoon_reset(tp->ioaddr, NoWait);
2222 return -EBUSY;
2223}
2224
2225static int
2226typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
2227{
2228 struct net_device *dev = pci_get_drvdata(pdev);
2229 struct typhoon *tp = netdev_priv(dev);
2230 struct cmd_desc xp_cmd;
2231
2232 /* If we're down, we're already suspended.
2233 */
2234 if(!netif_running(dev))
2235 return 0;
2236
2237 spin_lock_bh(&tp->state_lock);
2238 if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
2239 spin_unlock_bh(&tp->state_lock);
0bc88e4a 2240 netdev_err(dev, "cannot do WAKE_MAGIC with VLANS\n");
1da177e4
LT
2241 return -EBUSY;
2242 }
2243 spin_unlock_bh(&tp->state_lock);
2244
2245 netif_device_detach(dev);
2246
2247 if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
0bc88e4a 2248 netdev_err(dev, "unable to stop runtime\n");
1da177e4
LT
2249 goto need_resume;
2250 }
2251
2252 typhoon_free_rx_rings(tp);
2253 typhoon_init_rings(tp);
2254
2255 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
0bc88e4a 2256 netdev_err(dev, "unable to boot sleep image\n");
1da177e4
LT
2257 goto need_resume;
2258 }
2259
2260 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
03a710ff
AV
2261 xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
2262 xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
1da177e4 2263 if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
0bc88e4a 2264 netdev_err(dev, "unable to set mac address in suspend\n");
1da177e4
LT
2265 goto need_resume;
2266 }
2267
2268 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
2269 xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
2270 if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
0bc88e4a 2271 netdev_err(dev, "unable to set rx filter in suspend\n");
1da177e4
LT
2272 goto need_resume;
2273 }
2274
2a569579 2275 if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
0bc88e4a 2276 netdev_err(dev, "unable to put card to sleep\n");
1da177e4
LT
2277 goto need_resume;
2278 }
2279
2280 return 0;
2281
2282need_resume:
2283 typhoon_resume(pdev);
2284 return -EBUSY;
2285}
1da177e4
LT
2286#endif
2287
2288static int __devinit
2289typhoon_test_mmio(struct pci_dev *pdev)
2290{
2291 void __iomem *ioaddr = pci_iomap(pdev, 1, 128);
2292 int mode = 0;
2293 u32 val;
2294
2295 if(!ioaddr)
2296 goto out;
2297
2298 if(ioread32(ioaddr + TYPHOON_REG_STATUS) !=
2299 TYPHOON_STATUS_WAITING_FOR_HOST)
2300 goto out_unmap;
2301
2302 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2303 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2304 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2305
2306 /* Ok, see if we can change our interrupt status register by
2307 * sending ourselves an interrupt. If so, then MMIO works.
2308 * The 50usec delay is arbitrary -- it could probably be smaller.
2309 */
2310 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2311 if((val & TYPHOON_INTR_SELF) == 0) {
2312 iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT);
2313 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2314 udelay(50);
2315 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2316 if(val & TYPHOON_INTR_SELF)
2317 mode = 1;
2318 }
2319
2320 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2321 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2322 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2323 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2324
2325out_unmap:
2326 pci_iounmap(pdev, ioaddr);
2327
2328out:
2329 if(!mode)
0bc88e4a 2330 pr_info("%s: falling back to port IO\n", pci_name(pdev));
1da177e4
LT
2331 return mode;
2332}
2333
8bdd5553
SH
2334static const struct net_device_ops typhoon_netdev_ops = {
2335 .ndo_open = typhoon_open,
2336 .ndo_stop = typhoon_close,
2337 .ndo_start_xmit = typhoon_start_tx,
2338 .ndo_set_multicast_list = typhoon_set_rx_mode,
2339 .ndo_tx_timeout = typhoon_tx_timeout,
2340 .ndo_get_stats = typhoon_get_stats,
2341 .ndo_validate_addr = eth_validate_addr,
2342 .ndo_set_mac_address = typhoon_set_mac_address,
2343 .ndo_change_mtu = eth_change_mtu,
2344 .ndo_vlan_rx_register = typhoon_vlan_rx_register,
2345};
2346
1da177e4
LT
2347static int __devinit
2348typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2349{
1da177e4
LT
2350 struct net_device *dev;
2351 struct typhoon *tp;
2352 int card_id = (int) ent->driver_data;
2353 void __iomem *ioaddr;
2354 void *shared;
2355 dma_addr_t shared_dma;
2356 struct cmd_desc xp_cmd;
2357 struct resp_desc xp_resp[3];
1da177e4 2358 int err = 0;
0bc88e4a 2359 const char *err_msg;
1da177e4
LT
2360
2361 dev = alloc_etherdev(sizeof(*tp));
2362 if(dev == NULL) {
0bc88e4a 2363 err_msg = "unable to alloc new net device";
1da177e4
LT
2364 err = -ENOMEM;
2365 goto error_out;
2366 }
1da177e4
LT
2367 SET_NETDEV_DEV(dev, &pdev->dev);
2368
2369 err = pci_enable_device(pdev);
2370 if(err < 0) {
0bc88e4a 2371 err_msg = "unable to enable device";
1da177e4
LT
2372 goto error_out_dev;
2373 }
2374
2375 err = pci_set_mwi(pdev);
2376 if(err < 0) {
0bc88e4a 2377 err_msg = "unable to set MWI";
1da177e4
LT
2378 goto error_out_disable;
2379 }
2380
284901a9 2381 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1da177e4 2382 if(err < 0) {
0bc88e4a 2383 err_msg = "No usable DMA configuration";
1da177e4
LT
2384 goto error_out_mwi;
2385 }
2386
2387 /* sanity checks on IO and MMIO BARs
2388 */
2389 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
0bc88e4a 2390 err_msg = "region #1 not a PCI IO resource, aborting";
1da177e4
LT
2391 err = -ENODEV;
2392 goto error_out_mwi;
2393 }
2394 if(pci_resource_len(pdev, 0) < 128) {
0bc88e4a 2395 err_msg = "Invalid PCI IO region size, aborting";
1da177e4
LT
2396 err = -ENODEV;
2397 goto error_out_mwi;
2398 }
2399 if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
0bc88e4a 2400 err_msg = "region #1 not a PCI MMIO resource, aborting";
1da177e4
LT
2401 err = -ENODEV;
2402 goto error_out_mwi;
2403 }
2404 if(pci_resource_len(pdev, 1) < 128) {
0bc88e4a 2405 err_msg = "Invalid PCI MMIO region size, aborting";
1da177e4
LT
2406 err = -ENODEV;
2407 goto error_out_mwi;
2408 }
2409
0bc88e4a 2410 err = pci_request_regions(pdev, KBUILD_MODNAME);
1da177e4 2411 if(err < 0) {
0bc88e4a 2412 err_msg = "could not request regions";
1da177e4
LT
2413 goto error_out_mwi;
2414 }
2415
2416 /* map our registers
2417 */
2418 if(use_mmio != 0 && use_mmio != 1)
2419 use_mmio = typhoon_test_mmio(pdev);
2420
2421 ioaddr = pci_iomap(pdev, use_mmio, 128);
2422 if (!ioaddr) {
0bc88e4a 2423 err_msg = "cannot remap registers, aborting";
1da177e4
LT
2424 err = -EIO;
2425 goto error_out_regions;
2426 }
2427
2428 /* allocate pci dma space for rx and tx descriptor rings
2429 */
2430 shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
2431 &shared_dma);
2432 if(!shared) {
0bc88e4a 2433 err_msg = "could not allocate DMA memory";
1da177e4
LT
2434 err = -ENOMEM;
2435 goto error_out_remap;
2436 }
2437
2438 dev->irq = pdev->irq;
2439 tp = netdev_priv(dev);
2440 tp->shared = (struct typhoon_shared *) shared;
2441 tp->shared_dma = shared_dma;
2442 tp->pdev = pdev;
2443 tp->tx_pdev = pdev;
2444 tp->ioaddr = ioaddr;
2445 tp->tx_ioaddr = ioaddr;
2446 tp->dev = dev;
2447
2448 /* Init sequence:
2449 * 1) Reset the adapter to clear any bad juju
2450 * 2) Reload the sleep image
2451 * 3) Boot the sleep image
2452 * 4) Get the hardware address.
2453 * 5) Put the card to sleep.
2454 */
2455 if (typhoon_reset(ioaddr, WaitSleep) < 0) {
0bc88e4a 2456 err_msg = "could not reset 3XP";
1da177e4
LT
2457 err = -EIO;
2458 goto error_out_dma;
2459 }
2460
2461 /* Now that we've reset the 3XP and are sure it's not going to
2462 * write all over memory, enable bus mastering, and save our
2463 * state for resuming after a suspend.
2464 */
2465 pci_set_master(pdev);
2466 pci_save_state(pdev);
2467
1da177e4
LT
2468 typhoon_init_interface(tp);
2469 typhoon_init_rings(tp);
2470
2471 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
0bc88e4a 2472 err_msg = "cannot boot 3XP sleep image";
1da177e4
LT
2473 err = -EIO;
2474 goto error_out_reset;
2475 }
2476
2477 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
2478 if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
0bc88e4a 2479 err_msg = "cannot read MAC address";
1da177e4
LT
2480 err = -EIO;
2481 goto error_out_reset;
2482 }
2483
03a710ff
AV
2484 *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
2485 *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
1da177e4
LT
2486
2487 if(!is_valid_ether_addr(dev->dev_addr)) {
0bc88e4a 2488 err_msg = "Could not obtain valid ethernet address, aborting";
1da177e4
LT
2489 goto error_out_reset;
2490 }
2491
2492 /* Read the Sleep Image version last, so the response is valid
2493 * later when we print out the version reported.
2494 */
2495 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
2496 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
0bc88e4a 2497 err_msg = "Could not get Sleep Image version";
1da177e4
LT
2498 goto error_out_reset;
2499 }
2500
2501 tp->capabilities = typhoon_card_info[card_id].capabilities;
2502 tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
2503
2504 /* Typhoon 1.0 Sleep Images return one response descriptor to the
2505 * READ_VERSIONS command. Those versions are OK after waking up
2506 * from sleep without needing a reset. Typhoon 1.1+ Sleep Images
2507 * seem to need a little extra help to get started. Since we don't
2508 * know how to nudge it along, just kick it.
2509 */
2510 if(xp_resp[0].numDesc != 0)
2511 tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
2512
2513 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
0bc88e4a 2514 err_msg = "cannot put adapter to sleep";
1da177e4
LT
2515 err = -EIO;
2516 goto error_out_reset;
2517 }
2518
2519 /* The chip-specific entries in the device structure. */
8bdd5553 2520 dev->netdev_ops = &typhoon_netdev_ops;
bea3348e 2521 netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
1da177e4 2522 dev->watchdog_timeo = TX_TIMEOUT;
25805dcf 2523
1da177e4
LT
2524 SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops);
2525
2526 /* We can handle scatter gather, up to 16 entries, and
2527 * we can do IP checksumming (only version 4, doh...)
2528 */
2529 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2530 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2531 dev->features |= NETIF_F_TSO;
2532
0bc88e4a
JP
2533 if(register_netdev(dev) < 0) {
2534 err_msg = "unable to register netdev";
1da177e4 2535 goto error_out_reset;
0bc88e4a 2536 }
1da177e4
LT
2537
2538 pci_set_drvdata(pdev, dev);
2539
0bc88e4a
JP
2540 netdev_info(dev, "%s at %s 0x%llx, %pM\n",
2541 typhoon_card_info[card_id].name,
2542 use_mmio ? "MMIO" : "IO",
2543 (unsigned long long)pci_resource_start(pdev, use_mmio),
2544 dev->dev_addr);
1da177e4
LT
2545
2546 /* xp_resp still contains the response to the READ_VERSIONS command.
2547 * For debugging, let the user know what version he has.
2548 */
2549 if(xp_resp[0].numDesc == 0) {
2550 /* This is the Typhoon 1.0 type Sleep Image, last 16 bits
2551 * of version is Month/Day of build.
2552 */
2553 u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
0bc88e4a
JP
2554 netdev_info(dev, "Typhoon 1.0 Sleep Image built %02u/%02u/2000\n",
2555 monthday >> 8, monthday & 0xff);
1da177e4
LT
2556 } else if(xp_resp[0].numDesc == 2) {
2557 /* This is the Typhoon 1.1+ type Sleep Image
2558 */
2559 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
2560 u8 *ver_string = (u8 *) &xp_resp[1];
2561 ver_string[25] = 0;
0bc88e4a
JP
2562 netdev_info(dev, "Typhoon 1.1+ Sleep Image version %02x.%03x.%03x %s\n",
2563 sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
2564 sleep_ver & 0xfff, ver_string);
1da177e4 2565 } else {
0bc88e4a
JP
2566 netdev_warn(dev, "Unknown Sleep Image version (%u:%04x)\n",
2567 xp_resp[0].numDesc, le32_to_cpu(xp_resp[0].parm2));
1da177e4 2568 }
6aa20a22 2569
1da177e4
LT
2570 return 0;
2571
2572error_out_reset:
2573 typhoon_reset(ioaddr, NoWait);
2574
2575error_out_dma:
2576 pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2577 shared, shared_dma);
2578error_out_remap:
2579 pci_iounmap(pdev, ioaddr);
2580error_out_regions:
2581 pci_release_regions(pdev);
2582error_out_mwi:
2583 pci_clear_mwi(pdev);
2584error_out_disable:
2585 pci_disable_device(pdev);
2586error_out_dev:
2587 free_netdev(dev);
2588error_out:
0bc88e4a 2589 pr_err("%s: %s\n", pci_name(pdev), err_msg);
1da177e4
LT
2590 return err;
2591}
2592
2593static void __devexit
2594typhoon_remove_one(struct pci_dev *pdev)
2595{
2596 struct net_device *dev = pci_get_drvdata(pdev);
2597 struct typhoon *tp = netdev_priv(dev);
2598
2599 unregister_netdev(dev);
2600 pci_set_power_state(pdev, PCI_D0);
2601 pci_restore_state(pdev);
2602 typhoon_reset(tp->ioaddr, NoWait);
2603 pci_iounmap(pdev, tp->ioaddr);
2604 pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2605 tp->shared, tp->shared_dma);
2606 pci_release_regions(pdev);
2607 pci_clear_mwi(pdev);
2608 pci_disable_device(pdev);
2609 pci_set_drvdata(pdev, NULL);
2610 free_netdev(dev);
2611}
2612
2613static struct pci_driver typhoon_driver = {
0bc88e4a 2614 .name = KBUILD_MODNAME,
1da177e4
LT
2615 .id_table = typhoon_pci_tbl,
2616 .probe = typhoon_init_one,
2617 .remove = __devexit_p(typhoon_remove_one),
2618#ifdef CONFIG_PM
2619 .suspend = typhoon_suspend,
2620 .resume = typhoon_resume,
1da177e4
LT
2621#endif
2622};
2623
2624static int __init
2625typhoon_init(void)
2626{
29917620 2627 return pci_register_driver(&typhoon_driver);
1da177e4
LT
2628}
2629
2630static void __exit
2631typhoon_cleanup(void)
2632{
a8c9a53c 2633 if (typhoon_fw)
b775a750 2634 release_firmware(typhoon_fw);
1da177e4
LT
2635 pci_unregister_driver(&typhoon_driver);
2636}
2637
2638module_init(typhoon_init);
2639module_exit(typhoon_cleanup);
This page took 2.81464 seconds and 5 git commands to generate.