qlge: removing unreachable block of code
[deliverable/linux.git] / drivers / net / qlge / qlge_main.c
CommitLineData
c4e84bde
RM
1/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
36#include <linux/skbuff.h>
c4e84bde 37#include <linux/if_vlan.h>
c4e84bde
RM
38#include <linux/delay.h>
39#include <linux/mm.h>
40#include <linux/vmalloc.h>
b7c6bfb7 41#include <net/ip6_checksum.h>
c4e84bde
RM
42
43#include "qlge.h"
44
45char qlge_driver_name[] = DRV_NAME;
46const char qlge_driver_version[] = DRV_VERSION;
47
48MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49MODULE_DESCRIPTION(DRV_STRING " ");
50MODULE_LICENSE("GPL");
51MODULE_VERSION(DRV_VERSION);
52
53static const u32 default_msg =
54 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
55/* NETIF_MSG_TIMER | */
56 NETIF_MSG_IFDOWN |
57 NETIF_MSG_IFUP |
58 NETIF_MSG_RX_ERR |
59 NETIF_MSG_TX_ERR |
4974097a
RM
60/* NETIF_MSG_TX_QUEUED | */
61/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
c4e84bde
RM
62/* NETIF_MSG_PKTDATA | */
63 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
64
65static int debug = 0x00007fff; /* defaults above */
66module_param(debug, int, 0);
67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68
69#define MSIX_IRQ 0
70#define MSI_IRQ 1
71#define LEG_IRQ 2
a5a62a1c
RM
72static int qlge_irq_type = MSIX_IRQ;
73module_param(qlge_irq_type, int, MSIX_IRQ);
74MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
c4e84bde 75
8aae2600
RM
76static int qlge_mpi_coredump;
77module_param(qlge_mpi_coredump, int, 0);
78MODULE_PARM_DESC(qlge_mpi_coredump,
79 "Option to enable MPI firmware dump. "
d5c1da56
RM
80 "Default is OFF - Do Not allocate memory. ");
81
82static int qlge_force_coredump;
83module_param(qlge_force_coredump, int, 0);
84MODULE_PARM_DESC(qlge_force_coredump,
85 "Option to allow force of firmware core dump. "
86 "Default is OFF - Do not allow.");
8aae2600 87
a3aa1884 88static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
b0c2aadf 89 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
cdca8d02 90 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
c4e84bde
RM
91 /* required last entry */
92 {0,}
93};
94
95MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
96
97/* This hardware semaphore causes exclusive access to
98 * resources shared between the NIC driver, MPI firmware,
99 * FCOE firmware and the FC driver.
100 */
101static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
102{
103 u32 sem_bits = 0;
104
105 switch (sem_mask) {
106 case SEM_XGMAC0_MASK:
107 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
108 break;
109 case SEM_XGMAC1_MASK:
110 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
111 break;
112 case SEM_ICB_MASK:
113 sem_bits = SEM_SET << SEM_ICB_SHIFT;
114 break;
115 case SEM_MAC_ADDR_MASK:
116 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
117 break;
118 case SEM_FLASH_MASK:
119 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
120 break;
121 case SEM_PROBE_MASK:
122 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
123 break;
124 case SEM_RT_IDX_MASK:
125 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
126 break;
127 case SEM_PROC_REG_MASK:
128 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
129 break;
130 default:
131 QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
132 return -EINVAL;
133 }
134
135 ql_write32(qdev, SEM, sem_bits | sem_mask);
136 return !(ql_read32(qdev, SEM) & sem_bits);
137}
138
139int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
140{
0857e9d7 141 unsigned int wait_count = 30;
c4e84bde
RM
142 do {
143 if (!ql_sem_trylock(qdev, sem_mask))
144 return 0;
0857e9d7
RM
145 udelay(100);
146 } while (--wait_count);
c4e84bde
RM
147 return -ETIMEDOUT;
148}
149
150void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
151{
152 ql_write32(qdev, SEM, sem_mask);
153 ql_read32(qdev, SEM); /* flush */
154}
155
156/* This function waits for a specific bit to come ready
157 * in a given register. It is used mostly by the initialize
158 * process, but is also used in kernel thread API such as
159 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
160 */
161int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
162{
163 u32 temp;
164 int count = UDELAY_COUNT;
165
166 while (count) {
167 temp = ql_read32(qdev, reg);
168
169 /* check for errors */
170 if (temp & err_bit) {
171 QPRINTK(qdev, PROBE, ALERT,
172 "register 0x%.08x access error, value = 0x%.08x!.\n",
173 reg, temp);
174 return -EIO;
175 } else if (temp & bit)
176 return 0;
177 udelay(UDELAY_DELAY);
178 count--;
179 }
180 QPRINTK(qdev, PROBE, ALERT,
181 "Timed out waiting for reg %x to come ready.\n", reg);
182 return -ETIMEDOUT;
183}
184
185/* The CFG register is used to download TX and RX control blocks
186 * to the chip. This function waits for an operation to complete.
187 */
188static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
189{
190 int count = UDELAY_COUNT;
191 u32 temp;
192
193 while (count) {
194 temp = ql_read32(qdev, CFG);
195 if (temp & CFG_LE)
196 return -EIO;
197 if (!(temp & bit))
198 return 0;
199 udelay(UDELAY_DELAY);
200 count--;
201 }
202 return -ETIMEDOUT;
203}
204
205
206/* Used to issue init control blocks to hw. Maps control block,
207 * sets address, triggers download, waits for completion.
208 */
209int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
210 u16 q_id)
211{
212 u64 map;
213 int status = 0;
214 int direction;
215 u32 mask;
216 u32 value;
217
218 direction =
219 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
220 PCI_DMA_FROMDEVICE;
221
222 map = pci_map_single(qdev->pdev, ptr, size, direction);
223 if (pci_dma_mapping_error(qdev->pdev, map)) {
224 QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
225 return -ENOMEM;
226 }
227
4322c5be
RM
228 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
229 if (status)
230 return status;
231
c4e84bde
RM
232 status = ql_wait_cfg(qdev, bit);
233 if (status) {
234 QPRINTK(qdev, IFUP, ERR,
235 "Timed out waiting for CFG to come ready.\n");
236 goto exit;
237 }
238
c4e84bde
RM
239 ql_write32(qdev, ICB_L, (u32) map);
240 ql_write32(qdev, ICB_H, (u32) (map >> 32));
c4e84bde
RM
241
242 mask = CFG_Q_MASK | (bit << 16);
243 value = bit | (q_id << CFG_Q_SHIFT);
244 ql_write32(qdev, CFG, (mask | value));
245
246 /*
247 * Wait for the bit to clear after signaling hw.
248 */
249 status = ql_wait_cfg(qdev, bit);
250exit:
4322c5be 251 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
c4e84bde
RM
252 pci_unmap_single(qdev->pdev, map, size, direction);
253 return status;
254}
255
256/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
257int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
258 u32 *value)
259{
260 u32 offset = 0;
261 int status;
262
c4e84bde
RM
263 switch (type) {
264 case MAC_ADDR_TYPE_MULTI_MAC:
265 case MAC_ADDR_TYPE_CAM_MAC:
266 {
267 status =
268 ql_wait_reg_rdy(qdev,
939678f8 269 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
270 if (status)
271 goto exit;
272 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
273 (index << MAC_ADDR_IDX_SHIFT) | /* index */
274 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
275 status =
276 ql_wait_reg_rdy(qdev,
939678f8 277 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
c4e84bde
RM
278 if (status)
279 goto exit;
280 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
281 status =
282 ql_wait_reg_rdy(qdev,
939678f8 283 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
284 if (status)
285 goto exit;
286 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
287 (index << MAC_ADDR_IDX_SHIFT) | /* index */
288 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
289 status =
290 ql_wait_reg_rdy(qdev,
939678f8 291 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
c4e84bde
RM
292 if (status)
293 goto exit;
294 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
295 if (type == MAC_ADDR_TYPE_CAM_MAC) {
296 status =
297 ql_wait_reg_rdy(qdev,
939678f8 298 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
299 if (status)
300 goto exit;
301 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
302 (index << MAC_ADDR_IDX_SHIFT) | /* index */
303 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
304 status =
305 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
939678f8 306 MAC_ADDR_MR, 0);
c4e84bde
RM
307 if (status)
308 goto exit;
309 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
310 }
311 break;
312 }
313 case MAC_ADDR_TYPE_VLAN:
314 case MAC_ADDR_TYPE_MULTI_FLTR:
315 default:
316 QPRINTK(qdev, IFUP, CRIT,
317 "Address type %d not yet supported.\n", type);
318 status = -EPERM;
319 }
320exit:
c4e84bde
RM
321 return status;
322}
323
324/* Set up a MAC, multicast or VLAN address for the
325 * inbound frame matching.
326 */
327static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
328 u16 index)
329{
330 u32 offset = 0;
331 int status = 0;
332
c4e84bde
RM
333 switch (type) {
334 case MAC_ADDR_TYPE_MULTI_MAC:
76b26694
RM
335 {
336 u32 upper = (addr[0] << 8) | addr[1];
337 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
338 (addr[4] << 8) | (addr[5]);
339
340 status =
341 ql_wait_reg_rdy(qdev,
342 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
343 if (status)
344 goto exit;
345 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
346 (index << MAC_ADDR_IDX_SHIFT) |
347 type | MAC_ADDR_E);
348 ql_write32(qdev, MAC_ADDR_DATA, lower);
349 status =
350 ql_wait_reg_rdy(qdev,
351 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
352 if (status)
353 goto exit;
354 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
355 (index << MAC_ADDR_IDX_SHIFT) |
356 type | MAC_ADDR_E);
357
358 ql_write32(qdev, MAC_ADDR_DATA, upper);
359 status =
360 ql_wait_reg_rdy(qdev,
361 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
362 if (status)
363 goto exit;
364 break;
365 }
c4e84bde
RM
366 case MAC_ADDR_TYPE_CAM_MAC:
367 {
368 u32 cam_output;
369 u32 upper = (addr[0] << 8) | addr[1];
370 u32 lower =
371 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
372 (addr[5]);
373
4974097a 374 QPRINTK(qdev, IFUP, DEBUG,
7c510e4b 375 "Adding %s address %pM"
c4e84bde
RM
376 " at index %d in the CAM.\n",
377 ((type ==
378 MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
7c510e4b 379 "UNICAST"), addr, index);
c4e84bde
RM
380
381 status =
382 ql_wait_reg_rdy(qdev,
939678f8 383 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
384 if (status)
385 goto exit;
386 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
387 (index << MAC_ADDR_IDX_SHIFT) | /* index */
388 type); /* type */
389 ql_write32(qdev, MAC_ADDR_DATA, lower);
390 status =
391 ql_wait_reg_rdy(qdev,
939678f8 392 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
393 if (status)
394 goto exit;
395 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
396 (index << MAC_ADDR_IDX_SHIFT) | /* index */
397 type); /* type */
398 ql_write32(qdev, MAC_ADDR_DATA, upper);
399 status =
400 ql_wait_reg_rdy(qdev,
939678f8 401 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
402 if (status)
403 goto exit;
404 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
405 (index << MAC_ADDR_IDX_SHIFT) | /* index */
406 type); /* type */
407 /* This field should also include the queue id
408 and possibly the function id. Right now we hardcode
409 the route field to NIC core.
410 */
76b26694
RM
411 cam_output = (CAM_OUT_ROUTE_NIC |
412 (qdev->
413 func << CAM_OUT_FUNC_SHIFT) |
414 (0 << CAM_OUT_CQ_ID_SHIFT));
415 if (qdev->vlgrp)
416 cam_output |= CAM_OUT_RV;
417 /* route to NIC core */
418 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
c4e84bde
RM
419 break;
420 }
421 case MAC_ADDR_TYPE_VLAN:
422 {
423 u32 enable_bit = *((u32 *) &addr[0]);
424 /* For VLAN, the addr actually holds a bit that
425 * either enables or disables the vlan id we are
426 * addressing. It's either MAC_ADDR_E on or off.
427 * That's bit-27 we're talking about.
428 */
429 QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
430 (enable_bit ? "Adding" : "Removing"),
431 index, (enable_bit ? "to" : "from"));
432
433 status =
434 ql_wait_reg_rdy(qdev,
939678f8 435 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
436 if (status)
437 goto exit;
438 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
439 (index << MAC_ADDR_IDX_SHIFT) | /* index */
440 type | /* type */
441 enable_bit); /* enable/disable */
442 break;
443 }
444 case MAC_ADDR_TYPE_MULTI_FLTR:
445 default:
446 QPRINTK(qdev, IFUP, CRIT,
447 "Address type %d not yet supported.\n", type);
448 status = -EPERM;
449 }
450exit:
c4e84bde
RM
451 return status;
452}
453
7fab3bfe
RM
454/* Set or clear MAC address in hardware. We sometimes
455 * have to clear it to prevent wrong frame routing
456 * especially in a bonding environment.
457 */
458static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
459{
460 int status;
461 char zero_mac_addr[ETH_ALEN];
462 char *addr;
463
464 if (set) {
465 addr = &qdev->ndev->dev_addr[0];
466 QPRINTK(qdev, IFUP, DEBUG,
fcb635e8 467 "Set Mac addr %pM\n", addr);
7fab3bfe
RM
468 } else {
469 memset(zero_mac_addr, 0, ETH_ALEN);
470 addr = &zero_mac_addr[0];
471 QPRINTK(qdev, IFUP, DEBUG,
472 "Clearing MAC address on %s\n",
473 qdev->ndev->name);
474 }
475 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
476 if (status)
477 return status;
478 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
479 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
480 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
481 if (status)
482 QPRINTK(qdev, IFUP, ERR, "Failed to init mac "
483 "address.\n");
484 return status;
485}
486
6a473308
RM
487void ql_link_on(struct ql_adapter *qdev)
488{
489 QPRINTK(qdev, LINK, ERR, "%s: Link is up.\n",
490 qdev->ndev->name);
491 netif_carrier_on(qdev->ndev);
492 ql_set_mac_addr(qdev, 1);
493}
494
495void ql_link_off(struct ql_adapter *qdev)
496{
497 QPRINTK(qdev, LINK, ERR, "%s: Link is down.\n",
498 qdev->ndev->name);
499 netif_carrier_off(qdev->ndev);
500 ql_set_mac_addr(qdev, 0);
501}
502
c4e84bde
RM
503/* Get a specific frame routing value from the CAM.
504 * Used for debug and reg dump.
505 */
506int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
507{
508 int status = 0;
509
939678f8 510 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
c4e84bde
RM
511 if (status)
512 goto exit;
513
514 ql_write32(qdev, RT_IDX,
515 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
939678f8 516 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
c4e84bde
RM
517 if (status)
518 goto exit;
519 *value = ql_read32(qdev, RT_DATA);
520exit:
c4e84bde
RM
521 return status;
522}
523
524/* The NIC function for this chip has 16 routing indexes. Each one can be used
525 * to route different frame types to various inbound queues. We send broadcast/
526 * multicast/error frames to the default queue for slow handling,
527 * and CAM hit/RSS frames to the fast handling queues.
528 */
529static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
530 int enable)
531{
8587ea35 532 int status = -EINVAL; /* Return error if no mask match. */
c4e84bde
RM
533 u32 value = 0;
534
c4e84bde
RM
535 QPRINTK(qdev, IFUP, DEBUG,
536 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
537 (enable ? "Adding" : "Removing"),
538 ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
539 ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
540 ((index ==
541 RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
542 ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
543 ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
544 ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
545 ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
546 ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
547 ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
548 ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
549 ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
550 ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
551 ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
552 ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
553 ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
554 ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
555 (enable ? "to" : "from"));
556
557 switch (mask) {
558 case RT_IDX_CAM_HIT:
559 {
560 value = RT_IDX_DST_CAM_Q | /* dest */
561 RT_IDX_TYPE_NICQ | /* type */
562 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
563 break;
564 }
565 case RT_IDX_VALID: /* Promiscuous Mode frames. */
566 {
567 value = RT_IDX_DST_DFLT_Q | /* dest */
568 RT_IDX_TYPE_NICQ | /* type */
569 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
570 break;
571 }
572 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
573 {
574 value = RT_IDX_DST_DFLT_Q | /* dest */
575 RT_IDX_TYPE_NICQ | /* type */
576 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
577 break;
578 }
579 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
580 {
581 value = RT_IDX_DST_DFLT_Q | /* dest */
582 RT_IDX_TYPE_NICQ | /* type */
583 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
584 break;
585 }
586 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
587 {
e163d7f2 588 value = RT_IDX_DST_DFLT_Q | /* dest */
c4e84bde
RM
589 RT_IDX_TYPE_NICQ | /* type */
590 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
591 break;
592 }
593 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
594 {
e163d7f2 595 value = RT_IDX_DST_DFLT_Q | /* dest */
c4e84bde
RM
596 RT_IDX_TYPE_NICQ | /* type */
597 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
598 break;
599 }
600 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
601 {
602 value = RT_IDX_DST_RSS | /* dest */
603 RT_IDX_TYPE_NICQ | /* type */
604 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
605 break;
606 }
607 case 0: /* Clear the E-bit on an entry. */
608 {
609 value = RT_IDX_DST_DFLT_Q | /* dest */
610 RT_IDX_TYPE_NICQ | /* type */
611 (index << RT_IDX_IDX_SHIFT);/* index */
612 break;
613 }
614 default:
615 QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
616 mask);
617 status = -EPERM;
618 goto exit;
619 }
620
621 if (value) {
622 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
623 if (status)
624 goto exit;
625 value |= (enable ? RT_IDX_E : 0);
626 ql_write32(qdev, RT_IDX, value);
627 ql_write32(qdev, RT_DATA, enable ? mask : 0);
628 }
629exit:
c4e84bde
RM
630 return status;
631}
632
633static void ql_enable_interrupts(struct ql_adapter *qdev)
634{
635 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
636}
637
638static void ql_disable_interrupts(struct ql_adapter *qdev)
639{
640 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
641}
642
643/* If we're running with multiple MSI-X vectors then we enable on the fly.
644 * Otherwise, we may have multiple outstanding workers and don't want to
645 * enable until the last one finishes. In this case, the irq_cnt gets
646 * incremented everytime we queue a worker and decremented everytime
647 * a worker finishes. Once it hits zero we enable the interrupt.
648 */
bb0d215c 649u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
c4e84bde 650{
bb0d215c
RM
651 u32 var = 0;
652 unsigned long hw_flags = 0;
653 struct intr_context *ctx = qdev->intr_context + intr;
654
655 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
656 /* Always enable if we're MSIX multi interrupts and
657 * it's not the default (zeroeth) interrupt.
658 */
c4e84bde 659 ql_write32(qdev, INTR_EN,
bb0d215c
RM
660 ctx->intr_en_mask);
661 var = ql_read32(qdev, STS);
662 return var;
c4e84bde 663 }
bb0d215c
RM
664
665 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
666 if (atomic_dec_and_test(&ctx->irq_cnt)) {
667 ql_write32(qdev, INTR_EN,
668 ctx->intr_en_mask);
669 var = ql_read32(qdev, STS);
670 }
671 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
672 return var;
c4e84bde
RM
673}
674
675static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
676{
677 u32 var = 0;
bb0d215c 678 struct intr_context *ctx;
c4e84bde 679
bb0d215c
RM
680 /* HW disables for us if we're MSIX multi interrupts and
681 * it's not the default (zeroeth) interrupt.
682 */
683 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
684 return 0;
685
686 ctx = qdev->intr_context + intr;
08b1bc8f 687 spin_lock(&qdev->hw_lock);
bb0d215c 688 if (!atomic_read(&ctx->irq_cnt)) {
c4e84bde 689 ql_write32(qdev, INTR_EN,
bb0d215c 690 ctx->intr_dis_mask);
c4e84bde
RM
691 var = ql_read32(qdev, STS);
692 }
bb0d215c 693 atomic_inc(&ctx->irq_cnt);
08b1bc8f 694 spin_unlock(&qdev->hw_lock);
c4e84bde
RM
695 return var;
696}
697
698static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
699{
700 int i;
701 for (i = 0; i < qdev->intr_count; i++) {
702 /* The enable call does a atomic_dec_and_test
703 * and enables only if the result is zero.
704 * So we precharge it here.
705 */
bb0d215c
RM
706 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
707 i == 0))
708 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
c4e84bde
RM
709 ql_enable_completion_interrupt(qdev, i);
710 }
711
712}
713
b0c2aadf
RM
714static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
715{
716 int status, i;
717 u16 csum = 0;
718 __le16 *flash = (__le16 *)&qdev->flash;
719
720 status = strncmp((char *)&qdev->flash, str, 4);
721 if (status) {
722 QPRINTK(qdev, IFUP, ERR, "Invalid flash signature.\n");
723 return status;
724 }
725
726 for (i = 0; i < size; i++)
727 csum += le16_to_cpu(*flash++);
728
729 if (csum)
730 QPRINTK(qdev, IFUP, ERR,
731 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
732
733 return csum;
734}
735
26351479 736static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
c4e84bde
RM
737{
738 int status = 0;
739 /* wait for reg to come ready */
740 status = ql_wait_reg_rdy(qdev,
741 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
742 if (status)
743 goto exit;
744 /* set up for reg read */
745 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
746 /* wait for reg to come ready */
747 status = ql_wait_reg_rdy(qdev,
748 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
749 if (status)
750 goto exit;
26351479
RM
751 /* This data is stored on flash as an array of
752 * __le32. Since ql_read32() returns cpu endian
753 * we need to swap it back.
754 */
755 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
c4e84bde
RM
756exit:
757 return status;
758}
759
cdca8d02
RM
760static int ql_get_8000_flash_params(struct ql_adapter *qdev)
761{
762 u32 i, size;
763 int status;
764 __le32 *p = (__le32 *)&qdev->flash;
765 u32 offset;
542512e4 766 u8 mac_addr[6];
cdca8d02
RM
767
768 /* Get flash offset for function and adjust
769 * for dword access.
770 */
e4552f51 771 if (!qdev->port)
cdca8d02
RM
772 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
773 else
774 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
775
776 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
777 return -ETIMEDOUT;
778
779 size = sizeof(struct flash_params_8000) / sizeof(u32);
780 for (i = 0; i < size; i++, p++) {
781 status = ql_read_flash_word(qdev, i+offset, p);
782 if (status) {
783 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
784 goto exit;
785 }
786 }
787
788 status = ql_validate_flash(qdev,
789 sizeof(struct flash_params_8000) / sizeof(u16),
790 "8000");
791 if (status) {
792 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
793 status = -EINVAL;
794 goto exit;
795 }
796
542512e4
RM
797 /* Extract either manufacturer or BOFM modified
798 * MAC address.
799 */
800 if (qdev->flash.flash_params_8000.data_type1 == 2)
801 memcpy(mac_addr,
802 qdev->flash.flash_params_8000.mac_addr1,
803 qdev->ndev->addr_len);
804 else
805 memcpy(mac_addr,
806 qdev->flash.flash_params_8000.mac_addr,
807 qdev->ndev->addr_len);
808
809 if (!is_valid_ether_addr(mac_addr)) {
cdca8d02
RM
810 QPRINTK(qdev, IFUP, ERR, "Invalid MAC address.\n");
811 status = -EINVAL;
812 goto exit;
813 }
814
815 memcpy(qdev->ndev->dev_addr,
542512e4 816 mac_addr,
cdca8d02
RM
817 qdev->ndev->addr_len);
818
819exit:
820 ql_sem_unlock(qdev, SEM_FLASH_MASK);
821 return status;
822}
823
b0c2aadf 824static int ql_get_8012_flash_params(struct ql_adapter *qdev)
c4e84bde
RM
825{
826 int i;
827 int status;
26351479 828 __le32 *p = (__le32 *)&qdev->flash;
e78f5fa7 829 u32 offset = 0;
b0c2aadf 830 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
e78f5fa7
RM
831
832 /* Second function's parameters follow the first
833 * function's.
834 */
e4552f51 835 if (qdev->port)
b0c2aadf 836 offset = size;
c4e84bde
RM
837
838 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
839 return -ETIMEDOUT;
840
b0c2aadf 841 for (i = 0; i < size; i++, p++) {
e78f5fa7 842 status = ql_read_flash_word(qdev, i+offset, p);
c4e84bde
RM
843 if (status) {
844 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
845 goto exit;
846 }
847
848 }
b0c2aadf
RM
849
850 status = ql_validate_flash(qdev,
851 sizeof(struct flash_params_8012) / sizeof(u16),
852 "8012");
853 if (status) {
854 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
855 status = -EINVAL;
856 goto exit;
857 }
858
859 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
860 status = -EINVAL;
861 goto exit;
862 }
863
864 memcpy(qdev->ndev->dev_addr,
865 qdev->flash.flash_params_8012.mac_addr,
866 qdev->ndev->addr_len);
867
c4e84bde
RM
868exit:
869 ql_sem_unlock(qdev, SEM_FLASH_MASK);
870 return status;
871}
872
873/* xgmac register are located behind the xgmac_addr and xgmac_data
874 * register pair. Each read/write requires us to wait for the ready
875 * bit before reading/writing the data.
876 */
877static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
878{
879 int status;
880 /* wait for reg to come ready */
881 status = ql_wait_reg_rdy(qdev,
882 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
883 if (status)
884 return status;
885 /* write the data to the data reg */
886 ql_write32(qdev, XGMAC_DATA, data);
887 /* trigger the write */
888 ql_write32(qdev, XGMAC_ADDR, reg);
889 return status;
890}
891
892/* xgmac register are located behind the xgmac_addr and xgmac_data
893 * register pair. Each read/write requires us to wait for the ready
894 * bit before reading/writing the data.
895 */
896int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
897{
898 int status = 0;
899 /* wait for reg to come ready */
900 status = ql_wait_reg_rdy(qdev,
901 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
902 if (status)
903 goto exit;
904 /* set up for reg read */
905 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
906 /* wait for reg to come ready */
907 status = ql_wait_reg_rdy(qdev,
908 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
909 if (status)
910 goto exit;
911 /* get the data */
912 *data = ql_read32(qdev, XGMAC_DATA);
913exit:
914 return status;
915}
916
917/* This is used for reading the 64-bit statistics regs. */
918int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
919{
920 int status = 0;
921 u32 hi = 0;
922 u32 lo = 0;
923
924 status = ql_read_xgmac_reg(qdev, reg, &lo);
925 if (status)
926 goto exit;
927
928 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
929 if (status)
930 goto exit;
931
932 *data = (u64) lo | ((u64) hi << 32);
933
934exit:
935 return status;
936}
937
cdca8d02
RM
938static int ql_8000_port_initialize(struct ql_adapter *qdev)
939{
bcc2cb3b 940 int status;
cfec0cbc
RM
941 /*
942 * Get MPI firmware version for driver banner
943 * and ethool info.
944 */
945 status = ql_mb_about_fw(qdev);
946 if (status)
947 goto exit;
bcc2cb3b
RM
948 status = ql_mb_get_fw_state(qdev);
949 if (status)
950 goto exit;
951 /* Wake up a worker to get/set the TX/RX frame sizes. */
952 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
953exit:
954 return status;
cdca8d02
RM
955}
956
c4e84bde
RM
957/* Take the MAC Core out of reset.
958 * Enable statistics counting.
959 * Take the transmitter/receiver out of reset.
960 * This functionality may be done in the MPI firmware at a
961 * later date.
962 */
b0c2aadf 963static int ql_8012_port_initialize(struct ql_adapter *qdev)
c4e84bde
RM
964{
965 int status = 0;
966 u32 data;
967
968 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
969 /* Another function has the semaphore, so
970 * wait for the port init bit to come ready.
971 */
972 QPRINTK(qdev, LINK, INFO,
973 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
974 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
975 if (status) {
976 QPRINTK(qdev, LINK, CRIT,
977 "Port initialize timed out.\n");
978 }
979 return status;
980 }
981
982 QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
983 /* Set the core reset. */
984 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
985 if (status)
986 goto end;
987 data |= GLOBAL_CFG_RESET;
988 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
989 if (status)
990 goto end;
991
992 /* Clear the core reset and turn on jumbo for receiver. */
993 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
994 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
995 data |= GLOBAL_CFG_TX_STAT_EN;
996 data |= GLOBAL_CFG_RX_STAT_EN;
997 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
998 if (status)
999 goto end;
1000
1001 /* Enable transmitter, and clear it's reset. */
1002 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1003 if (status)
1004 goto end;
1005 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
1006 data |= TX_CFG_EN; /* Enable the transmitter. */
1007 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1008 if (status)
1009 goto end;
1010
1011 /* Enable receiver and clear it's reset. */
1012 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1013 if (status)
1014 goto end;
1015 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1016 data |= RX_CFG_EN; /* Enable the receiver. */
1017 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1018 if (status)
1019 goto end;
1020
1021 /* Turn on jumbo. */
1022 status =
1023 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1024 if (status)
1025 goto end;
1026 status =
1027 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1028 if (status)
1029 goto end;
1030
1031 /* Signal to the world that the port is enabled. */
1032 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1033end:
1034 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1035 return status;
1036}
1037
7c734359
RM
1038static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1039{
1040 return PAGE_SIZE << qdev->lbq_buf_order;
1041}
1042
c4e84bde 1043/* Get the next large buffer. */
8668ae92 1044static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
c4e84bde
RM
1045{
1046 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1047 rx_ring->lbq_curr_idx++;
1048 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1049 rx_ring->lbq_curr_idx = 0;
1050 rx_ring->lbq_free_cnt++;
1051 return lbq_desc;
1052}
1053
7c734359
RM
1054static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1055 struct rx_ring *rx_ring)
1056{
1057 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1058
1059 pci_dma_sync_single_for_cpu(qdev->pdev,
1060 pci_unmap_addr(lbq_desc, mapaddr),
1061 rx_ring->lbq_buf_size,
1062 PCI_DMA_FROMDEVICE);
1063
1064 /* If it's the last chunk of our master page then
1065 * we unmap it.
1066 */
1067 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1068 == ql_lbq_block_size(qdev))
1069 pci_unmap_page(qdev->pdev,
1070 lbq_desc->p.pg_chunk.map,
1071 ql_lbq_block_size(qdev),
1072 PCI_DMA_FROMDEVICE);
1073 return lbq_desc;
1074}
1075
c4e84bde 1076/* Get the next small buffer. */
8668ae92 1077static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
c4e84bde
RM
1078{
1079 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1080 rx_ring->sbq_curr_idx++;
1081 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1082 rx_ring->sbq_curr_idx = 0;
1083 rx_ring->sbq_free_cnt++;
1084 return sbq_desc;
1085}
1086
1087/* Update an rx ring index. */
1088static void ql_update_cq(struct rx_ring *rx_ring)
1089{
1090 rx_ring->cnsmr_idx++;
1091 rx_ring->curr_entry++;
1092 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1093 rx_ring->cnsmr_idx = 0;
1094 rx_ring->curr_entry = rx_ring->cq_base;
1095 }
1096}
1097
1098static void ql_write_cq_idx(struct rx_ring *rx_ring)
1099{
1100 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1101}
1102
7c734359
RM
1103static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1104 struct bq_desc *lbq_desc)
1105{
1106 if (!rx_ring->pg_chunk.page) {
1107 u64 map;
1108 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1109 GFP_ATOMIC,
1110 qdev->lbq_buf_order);
1111 if (unlikely(!rx_ring->pg_chunk.page)) {
1112 QPRINTK(qdev, DRV, ERR,
1113 "page allocation failed.\n");
1114 return -ENOMEM;
1115 }
1116 rx_ring->pg_chunk.offset = 0;
1117 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1118 0, ql_lbq_block_size(qdev),
1119 PCI_DMA_FROMDEVICE);
1120 if (pci_dma_mapping_error(qdev->pdev, map)) {
1121 __free_pages(rx_ring->pg_chunk.page,
1122 qdev->lbq_buf_order);
1123 QPRINTK(qdev, DRV, ERR,
1124 "PCI mapping failed.\n");
1125 return -ENOMEM;
1126 }
1127 rx_ring->pg_chunk.map = map;
1128 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1129 }
1130
1131 /* Copy the current master pg_chunk info
1132 * to the current descriptor.
1133 */
1134 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1135
1136 /* Adjust the master page chunk for next
1137 * buffer get.
1138 */
1139 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1140 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1141 rx_ring->pg_chunk.page = NULL;
1142 lbq_desc->p.pg_chunk.last_flag = 1;
1143 } else {
1144 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1145 get_page(rx_ring->pg_chunk.page);
1146 lbq_desc->p.pg_chunk.last_flag = 0;
1147 }
1148 return 0;
1149}
c4e84bde
RM
1150/* Process (refill) a large buffer queue. */
1151static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1152{
49f2186d
RM
1153 u32 clean_idx = rx_ring->lbq_clean_idx;
1154 u32 start_idx = clean_idx;
c4e84bde 1155 struct bq_desc *lbq_desc;
c4e84bde
RM
1156 u64 map;
1157 int i;
1158
7c734359 1159 while (rx_ring->lbq_free_cnt > 32) {
c4e84bde
RM
1160 for (i = 0; i < 16; i++) {
1161 QPRINTK(qdev, RX_STATUS, DEBUG,
1162 "lbq: try cleaning clean_idx = %d.\n",
1163 clean_idx);
1164 lbq_desc = &rx_ring->lbq[clean_idx];
7c734359
RM
1165 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1166 QPRINTK(qdev, IFUP, ERR,
1167 "Could not get a page chunk.\n");
c4e84bde
RM
1168 return;
1169 }
7c734359
RM
1170
1171 map = lbq_desc->p.pg_chunk.map +
1172 lbq_desc->p.pg_chunk.offset;
c4e84bde 1173 pci_unmap_addr_set(lbq_desc, mapaddr, map);
7c734359
RM
1174 pci_unmap_len_set(lbq_desc, maplen,
1175 rx_ring->lbq_buf_size);
2c9a0d41 1176 *lbq_desc->addr = cpu_to_le64(map);
7c734359
RM
1177
1178 pci_dma_sync_single_for_device(qdev->pdev, map,
1179 rx_ring->lbq_buf_size,
1180 PCI_DMA_FROMDEVICE);
c4e84bde
RM
1181 clean_idx++;
1182 if (clean_idx == rx_ring->lbq_len)
1183 clean_idx = 0;
1184 }
1185
1186 rx_ring->lbq_clean_idx = clean_idx;
1187 rx_ring->lbq_prod_idx += 16;
1188 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1189 rx_ring->lbq_prod_idx = 0;
49f2186d
RM
1190 rx_ring->lbq_free_cnt -= 16;
1191 }
1192
1193 if (start_idx != clean_idx) {
c4e84bde
RM
1194 QPRINTK(qdev, RX_STATUS, DEBUG,
1195 "lbq: updating prod idx = %d.\n",
1196 rx_ring->lbq_prod_idx);
1197 ql_write_db_reg(rx_ring->lbq_prod_idx,
1198 rx_ring->lbq_prod_idx_db_reg);
c4e84bde
RM
1199 }
1200}
1201
1202/* Process (refill) a small buffer queue. */
1203static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1204{
49f2186d
RM
1205 u32 clean_idx = rx_ring->sbq_clean_idx;
1206 u32 start_idx = clean_idx;
c4e84bde 1207 struct bq_desc *sbq_desc;
c4e84bde
RM
1208 u64 map;
1209 int i;
1210
1211 while (rx_ring->sbq_free_cnt > 16) {
1212 for (i = 0; i < 16; i++) {
1213 sbq_desc = &rx_ring->sbq[clean_idx];
1214 QPRINTK(qdev, RX_STATUS, DEBUG,
1215 "sbq: try cleaning clean_idx = %d.\n",
1216 clean_idx);
c4e84bde
RM
1217 if (sbq_desc->p.skb == NULL) {
1218 QPRINTK(qdev, RX_STATUS, DEBUG,
1219 "sbq: getting new skb for index %d.\n",
1220 sbq_desc->index);
1221 sbq_desc->p.skb =
1222 netdev_alloc_skb(qdev->ndev,
52e55f3c 1223 SMALL_BUFFER_SIZE);
c4e84bde
RM
1224 if (sbq_desc->p.skb == NULL) {
1225 QPRINTK(qdev, PROBE, ERR,
1226 "Couldn't get an skb.\n");
1227 rx_ring->sbq_clean_idx = clean_idx;
1228 return;
1229 }
1230 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1231 map = pci_map_single(qdev->pdev,
1232 sbq_desc->p.skb->data,
52e55f3c
RM
1233 rx_ring->sbq_buf_size,
1234 PCI_DMA_FROMDEVICE);
c907a35a
RM
1235 if (pci_dma_mapping_error(qdev->pdev, map)) {
1236 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
1237 rx_ring->sbq_clean_idx = clean_idx;
06a3d510
RM
1238 dev_kfree_skb_any(sbq_desc->p.skb);
1239 sbq_desc->p.skb = NULL;
c907a35a
RM
1240 return;
1241 }
c4e84bde
RM
1242 pci_unmap_addr_set(sbq_desc, mapaddr, map);
1243 pci_unmap_len_set(sbq_desc, maplen,
52e55f3c 1244 rx_ring->sbq_buf_size);
2c9a0d41 1245 *sbq_desc->addr = cpu_to_le64(map);
c4e84bde
RM
1246 }
1247
1248 clean_idx++;
1249 if (clean_idx == rx_ring->sbq_len)
1250 clean_idx = 0;
1251 }
1252 rx_ring->sbq_clean_idx = clean_idx;
1253 rx_ring->sbq_prod_idx += 16;
1254 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1255 rx_ring->sbq_prod_idx = 0;
49f2186d
RM
1256 rx_ring->sbq_free_cnt -= 16;
1257 }
1258
1259 if (start_idx != clean_idx) {
c4e84bde
RM
1260 QPRINTK(qdev, RX_STATUS, DEBUG,
1261 "sbq: updating prod idx = %d.\n",
1262 rx_ring->sbq_prod_idx);
1263 ql_write_db_reg(rx_ring->sbq_prod_idx,
1264 rx_ring->sbq_prod_idx_db_reg);
c4e84bde
RM
1265 }
1266}
1267
1268static void ql_update_buffer_queues(struct ql_adapter *qdev,
1269 struct rx_ring *rx_ring)
1270{
1271 ql_update_sbq(qdev, rx_ring);
1272 ql_update_lbq(qdev, rx_ring);
1273}
1274
1275/* Unmaps tx buffers. Can be called from send() if a pci mapping
1276 * fails at some stage, or from the interrupt when a tx completes.
1277 */
1278static void ql_unmap_send(struct ql_adapter *qdev,
1279 struct tx_ring_desc *tx_ring_desc, int mapped)
1280{
1281 int i;
1282 for (i = 0; i < mapped; i++) {
1283 if (i == 0 || (i == 7 && mapped > 7)) {
1284 /*
1285 * Unmap the skb->data area, or the
1286 * external sglist (AKA the Outbound
1287 * Address List (OAL)).
1288 * If its the zeroeth element, then it's
1289 * the skb->data area. If it's the 7th
1290 * element and there is more than 6 frags,
1291 * then its an OAL.
1292 */
1293 if (i == 7) {
1294 QPRINTK(qdev, TX_DONE, DEBUG,
1295 "unmapping OAL area.\n");
1296 }
1297 pci_unmap_single(qdev->pdev,
1298 pci_unmap_addr(&tx_ring_desc->map[i],
1299 mapaddr),
1300 pci_unmap_len(&tx_ring_desc->map[i],
1301 maplen),
1302 PCI_DMA_TODEVICE);
1303 } else {
1304 QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
1305 i);
1306 pci_unmap_page(qdev->pdev,
1307 pci_unmap_addr(&tx_ring_desc->map[i],
1308 mapaddr),
1309 pci_unmap_len(&tx_ring_desc->map[i],
1310 maplen), PCI_DMA_TODEVICE);
1311 }
1312 }
1313
1314}
1315
1316/* Map the buffers for this transmit. This will return
1317 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1318 */
1319static int ql_map_send(struct ql_adapter *qdev,
1320 struct ob_mac_iocb_req *mac_iocb_ptr,
1321 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1322{
1323 int len = skb_headlen(skb);
1324 dma_addr_t map;
1325 int frag_idx, err, map_idx = 0;
1326 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1327 int frag_cnt = skb_shinfo(skb)->nr_frags;
1328
1329 if (frag_cnt) {
1330 QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
1331 }
1332 /*
1333 * Map the skb buffer first.
1334 */
1335 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1336
1337 err = pci_dma_mapping_error(qdev->pdev, map);
1338 if (err) {
1339 QPRINTK(qdev, TX_QUEUED, ERR,
1340 "PCI mapping failed with error: %d\n", err);
1341
1342 return NETDEV_TX_BUSY;
1343 }
1344
1345 tbd->len = cpu_to_le32(len);
1346 tbd->addr = cpu_to_le64(map);
1347 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1348 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1349 map_idx++;
1350
1351 /*
1352 * This loop fills the remainder of the 8 address descriptors
1353 * in the IOCB. If there are more than 7 fragments, then the
1354 * eighth address desc will point to an external list (OAL).
1355 * When this happens, the remainder of the frags will be stored
1356 * in this list.
1357 */
1358 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1359 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1360 tbd++;
1361 if (frag_idx == 6 && frag_cnt > 7) {
1362 /* Let's tack on an sglist.
1363 * Our control block will now
1364 * look like this:
1365 * iocb->seg[0] = skb->data
1366 * iocb->seg[1] = frag[0]
1367 * iocb->seg[2] = frag[1]
1368 * iocb->seg[3] = frag[2]
1369 * iocb->seg[4] = frag[3]
1370 * iocb->seg[5] = frag[4]
1371 * iocb->seg[6] = frag[5]
1372 * iocb->seg[7] = ptr to OAL (external sglist)
1373 * oal->seg[0] = frag[6]
1374 * oal->seg[1] = frag[7]
1375 * oal->seg[2] = frag[8]
1376 * oal->seg[3] = frag[9]
1377 * oal->seg[4] = frag[10]
1378 * etc...
1379 */
1380 /* Tack on the OAL in the eighth segment of IOCB. */
1381 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1382 sizeof(struct oal),
1383 PCI_DMA_TODEVICE);
1384 err = pci_dma_mapping_error(qdev->pdev, map);
1385 if (err) {
1386 QPRINTK(qdev, TX_QUEUED, ERR,
1387 "PCI mapping outbound address list with error: %d\n",
1388 err);
1389 goto map_error;
1390 }
1391
1392 tbd->addr = cpu_to_le64(map);
1393 /*
1394 * The length is the number of fragments
1395 * that remain to be mapped times the length
1396 * of our sglist (OAL).
1397 */
1398 tbd->len =
1399 cpu_to_le32((sizeof(struct tx_buf_desc) *
1400 (frag_cnt - frag_idx)) | TX_DESC_C);
1401 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1402 map);
1403 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1404 sizeof(struct oal));
1405 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1406 map_idx++;
1407 }
1408
1409 map =
1410 pci_map_page(qdev->pdev, frag->page,
1411 frag->page_offset, frag->size,
1412 PCI_DMA_TODEVICE);
1413
1414 err = pci_dma_mapping_error(qdev->pdev, map);
1415 if (err) {
1416 QPRINTK(qdev, TX_QUEUED, ERR,
1417 "PCI mapping frags failed with error: %d.\n",
1418 err);
1419 goto map_error;
1420 }
1421
1422 tbd->addr = cpu_to_le64(map);
1423 tbd->len = cpu_to_le32(frag->size);
1424 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1425 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1426 frag->size);
1427
1428 }
1429 /* Save the number of segments we've mapped. */
1430 tx_ring_desc->map_cnt = map_idx;
1431 /* Terminate the last segment. */
1432 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1433 return NETDEV_TX_OK;
1434
1435map_error:
1436 /*
1437 * If the first frag mapping failed, then i will be zero.
1438 * This causes the unmap of the skb->data area. Otherwise
1439 * we pass in the number of frags that mapped successfully
1440 * so they can be umapped.
1441 */
1442 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1443 return NETDEV_TX_BUSY;
1444}
1445
63526713
RM
1446/* Process an inbound completion from an rx ring. */
1447static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1448 struct rx_ring *rx_ring,
1449 struct ib_mac_iocb_rsp *ib_mac_rsp,
1450 u32 length,
1451 u16 vlan_id)
1452{
1453 struct sk_buff *skb;
1454 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1455 struct skb_frag_struct *rx_frag;
1456 int nr_frags;
1457 struct napi_struct *napi = &rx_ring->napi;
1458
1459 napi->dev = qdev->ndev;
1460
1461 skb = napi_get_frags(napi);
1462 if (!skb) {
1463 QPRINTK(qdev, DRV, ERR, "Couldn't get an skb, exiting.\n");
1464 rx_ring->rx_dropped++;
1465 put_page(lbq_desc->p.pg_chunk.page);
1466 return;
1467 }
1468 prefetch(lbq_desc->p.pg_chunk.va);
1469 rx_frag = skb_shinfo(skb)->frags;
1470 nr_frags = skb_shinfo(skb)->nr_frags;
1471 rx_frag += nr_frags;
1472 rx_frag->page = lbq_desc->p.pg_chunk.page;
1473 rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
1474 rx_frag->size = length;
1475
1476 skb->len += length;
1477 skb->data_len += length;
1478 skb->truesize += length;
1479 skb_shinfo(skb)->nr_frags++;
1480
1481 rx_ring->rx_packets++;
1482 rx_ring->rx_bytes += length;
1483 skb->ip_summed = CHECKSUM_UNNECESSARY;
1484 skb_record_rx_queue(skb, rx_ring->cq_id);
1485 if (qdev->vlgrp && (vlan_id != 0xffff))
1486 vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
1487 else
1488 napi_gro_frags(napi);
1489}
1490
4f848c0a
RM
1491/* Process an inbound completion from an rx ring. */
1492static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1493 struct rx_ring *rx_ring,
1494 struct ib_mac_iocb_rsp *ib_mac_rsp,
1495 u32 length,
1496 u16 vlan_id)
1497{
1498 struct net_device *ndev = qdev->ndev;
1499 struct sk_buff *skb = NULL;
1500 void *addr;
1501 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1502 struct napi_struct *napi = &rx_ring->napi;
1503
1504 skb = netdev_alloc_skb(ndev, length);
1505 if (!skb) {
1506 QPRINTK(qdev, DRV, ERR, "Couldn't get an skb, "
1507 "need to unwind!.\n");
1508 rx_ring->rx_dropped++;
1509 put_page(lbq_desc->p.pg_chunk.page);
1510 return;
1511 }
1512
1513 addr = lbq_desc->p.pg_chunk.va;
1514 prefetch(addr);
1515
1516
1517 /* Frame error, so drop the packet. */
1518 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1519 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1520 ib_mac_rsp->flags2);
1521 rx_ring->rx_errors++;
1522 goto err_out;
1523 }
1524
1525 /* The max framesize filter on this chip is set higher than
1526 * MTU since FCoE uses 2k frames.
1527 */
1528 if (skb->len > ndev->mtu + ETH_HLEN) {
1529 QPRINTK(qdev, DRV, ERR, "Segment too small, dropping.\n");
1530 rx_ring->rx_dropped++;
1531 goto err_out;
1532 }
1533 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1534 QPRINTK(qdev, RX_STATUS, DEBUG,
1535 "%d bytes of headers and data in large. Chain "
1536 "page to new skb and pull tail.\n", length);
1537 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1538 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1539 length-ETH_HLEN);
1540 skb->len += length-ETH_HLEN;
1541 skb->data_len += length-ETH_HLEN;
1542 skb->truesize += length-ETH_HLEN;
1543
1544 rx_ring->rx_packets++;
1545 rx_ring->rx_bytes += skb->len;
1546 skb->protocol = eth_type_trans(skb, ndev);
1547 skb->ip_summed = CHECKSUM_NONE;
1548
1549 if (qdev->rx_csum &&
1550 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1551 /* TCP frame. */
1552 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1553 QPRINTK(qdev, RX_STATUS, DEBUG,
1554 "TCP checksum done!\n");
1555 skb->ip_summed = CHECKSUM_UNNECESSARY;
1556 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1557 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1558 /* Unfragmented ipv4 UDP frame. */
1559 struct iphdr *iph = (struct iphdr *) skb->data;
1560 if (!(iph->frag_off &
1561 cpu_to_be16(IP_MF|IP_OFFSET))) {
1562 skb->ip_summed = CHECKSUM_UNNECESSARY;
1563 QPRINTK(qdev, RX_STATUS, DEBUG,
1564 "TCP checksum done!\n");
1565 }
1566 }
1567 }
1568
1569 skb_record_rx_queue(skb, rx_ring->cq_id);
1570 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1571 if (qdev->vlgrp && (vlan_id != 0xffff))
1572 vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
1573 else
1574 napi_gro_receive(napi, skb);
1575 } else {
1576 if (qdev->vlgrp && (vlan_id != 0xffff))
1577 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1578 else
1579 netif_receive_skb(skb);
1580 }
1581 return;
1582err_out:
1583 dev_kfree_skb_any(skb);
1584 put_page(lbq_desc->p.pg_chunk.page);
1585}
1586
1587/* Process an inbound completion from an rx ring. */
1588static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1589 struct rx_ring *rx_ring,
1590 struct ib_mac_iocb_rsp *ib_mac_rsp,
1591 u32 length,
1592 u16 vlan_id)
1593{
1594 struct net_device *ndev = qdev->ndev;
1595 struct sk_buff *skb = NULL;
1596 struct sk_buff *new_skb = NULL;
1597 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1598
1599 skb = sbq_desc->p.skb;
1600 /* Allocate new_skb and copy */
1601 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1602 if (new_skb == NULL) {
1603 QPRINTK(qdev, PROBE, ERR,
1604 "No skb available, drop the packet.\n");
1605 rx_ring->rx_dropped++;
1606 return;
1607 }
1608 skb_reserve(new_skb, NET_IP_ALIGN);
1609 memcpy(skb_put(new_skb, length), skb->data, length);
1610 skb = new_skb;
1611
1612 /* Frame error, so drop the packet. */
1613 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1614 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1615 ib_mac_rsp->flags2);
1616 dev_kfree_skb_any(skb);
1617 rx_ring->rx_errors++;
1618 return;
1619 }
1620
1621 /* loopback self test for ethtool */
1622 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1623 ql_check_lb_frame(qdev, skb);
1624 dev_kfree_skb_any(skb);
1625 return;
1626 }
1627
1628 /* The max framesize filter on this chip is set higher than
1629 * MTU since FCoE uses 2k frames.
1630 */
1631 if (skb->len > ndev->mtu + ETH_HLEN) {
1632 dev_kfree_skb_any(skb);
1633 rx_ring->rx_dropped++;
1634 return;
1635 }
1636
1637 prefetch(skb->data);
1638 skb->dev = ndev;
1639 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1640 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1641 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1642 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1643 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1644 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1645 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1646 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1647 }
1648 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1649 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1650
1651 rx_ring->rx_packets++;
1652 rx_ring->rx_bytes += skb->len;
1653 skb->protocol = eth_type_trans(skb, ndev);
1654 skb->ip_summed = CHECKSUM_NONE;
1655
1656 /* If rx checksum is on, and there are no
1657 * csum or frame errors.
1658 */
1659 if (qdev->rx_csum &&
1660 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1661 /* TCP frame. */
1662 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1663 QPRINTK(qdev, RX_STATUS, DEBUG,
1664 "TCP checksum done!\n");
1665 skb->ip_summed = CHECKSUM_UNNECESSARY;
1666 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1667 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1668 /* Unfragmented ipv4 UDP frame. */
1669 struct iphdr *iph = (struct iphdr *) skb->data;
1670 if (!(iph->frag_off &
1671 cpu_to_be16(IP_MF|IP_OFFSET))) {
1672 skb->ip_summed = CHECKSUM_UNNECESSARY;
1673 QPRINTK(qdev, RX_STATUS, DEBUG,
1674 "TCP checksum done!\n");
1675 }
1676 }
1677 }
1678
1679 skb_record_rx_queue(skb, rx_ring->cq_id);
1680 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1681 if (qdev->vlgrp && (vlan_id != 0xffff))
1682 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1683 vlan_id, skb);
1684 else
1685 napi_gro_receive(&rx_ring->napi, skb);
1686 } else {
1687 if (qdev->vlgrp && (vlan_id != 0xffff))
1688 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1689 else
1690 netif_receive_skb(skb);
1691 }
1692}
1693
8668ae92 1694static void ql_realign_skb(struct sk_buff *skb, int len)
c4e84bde
RM
1695{
1696 void *temp_addr = skb->data;
1697
1698 /* Undo the skb_reserve(skb,32) we did before
1699 * giving to hardware, and realign data on
1700 * a 2-byte boundary.
1701 */
1702 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1703 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1704 skb_copy_to_linear_data(skb, temp_addr,
1705 (unsigned int)len);
1706}
1707
1708/*
1709 * This function builds an skb for the given inbound
1710 * completion. It will be rewritten for readability in the near
1711 * future, but for not it works well.
1712 */
1713static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1714 struct rx_ring *rx_ring,
1715 struct ib_mac_iocb_rsp *ib_mac_rsp)
1716{
1717 struct bq_desc *lbq_desc;
1718 struct bq_desc *sbq_desc;
1719 struct sk_buff *skb = NULL;
1720 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1721 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1722
1723 /*
1724 * Handle the header buffer if present.
1725 */
1726 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1727 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1728 QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
1729 /*
1730 * Headers fit nicely into a small buffer.
1731 */
1732 sbq_desc = ql_get_curr_sbuf(rx_ring);
1733 pci_unmap_single(qdev->pdev,
1734 pci_unmap_addr(sbq_desc, mapaddr),
1735 pci_unmap_len(sbq_desc, maplen),
1736 PCI_DMA_FROMDEVICE);
1737 skb = sbq_desc->p.skb;
1738 ql_realign_skb(skb, hdr_len);
1739 skb_put(skb, hdr_len);
1740 sbq_desc->p.skb = NULL;
1741 }
1742
1743 /*
1744 * Handle the data buffer(s).
1745 */
1746 if (unlikely(!length)) { /* Is there data too? */
1747 QPRINTK(qdev, RX_STATUS, DEBUG,
1748 "No Data buffer in this packet.\n");
1749 return skb;
1750 }
1751
1752 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1753 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1754 QPRINTK(qdev, RX_STATUS, DEBUG,
1755 "Headers in small, data of %d bytes in small, combine them.\n", length);
1756 /*
1757 * Data is less than small buffer size so it's
1758 * stuffed in a small buffer.
1759 * For this case we append the data
1760 * from the "data" small buffer to the "header" small
1761 * buffer.
1762 */
1763 sbq_desc = ql_get_curr_sbuf(rx_ring);
1764 pci_dma_sync_single_for_cpu(qdev->pdev,
1765 pci_unmap_addr
1766 (sbq_desc, mapaddr),
1767 pci_unmap_len
1768 (sbq_desc, maplen),
1769 PCI_DMA_FROMDEVICE);
1770 memcpy(skb_put(skb, length),
1771 sbq_desc->p.skb->data, length);
1772 pci_dma_sync_single_for_device(qdev->pdev,
1773 pci_unmap_addr
1774 (sbq_desc,
1775 mapaddr),
1776 pci_unmap_len
1777 (sbq_desc,
1778 maplen),
1779 PCI_DMA_FROMDEVICE);
1780 } else {
1781 QPRINTK(qdev, RX_STATUS, DEBUG,
1782 "%d bytes in a single small buffer.\n", length);
1783 sbq_desc = ql_get_curr_sbuf(rx_ring);
1784 skb = sbq_desc->p.skb;
1785 ql_realign_skb(skb, length);
1786 skb_put(skb, length);
1787 pci_unmap_single(qdev->pdev,
1788 pci_unmap_addr(sbq_desc,
1789 mapaddr),
1790 pci_unmap_len(sbq_desc,
1791 maplen),
1792 PCI_DMA_FROMDEVICE);
1793 sbq_desc->p.skb = NULL;
1794 }
1795 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1796 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1797 QPRINTK(qdev, RX_STATUS, DEBUG,
1798 "Header in small, %d bytes in large. Chain large to small!\n", length);
1799 /*
1800 * The data is in a single large buffer. We
1801 * chain it to the header buffer's skb and let
1802 * it rip.
1803 */
7c734359 1804 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
c4e84bde 1805 QPRINTK(qdev, RX_STATUS, DEBUG,
7c734359
RM
1806 "Chaining page at offset = %d,"
1807 "for %d bytes to skb.\n",
1808 lbq_desc->p.pg_chunk.offset, length);
1809 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1810 lbq_desc->p.pg_chunk.offset,
1811 length);
c4e84bde
RM
1812 skb->len += length;
1813 skb->data_len += length;
1814 skb->truesize += length;
c4e84bde
RM
1815 } else {
1816 /*
1817 * The headers and data are in a single large buffer. We
1818 * copy it to a new skb and let it go. This can happen with
1819 * jumbo mtu on a non-TCP/UDP frame.
1820 */
7c734359 1821 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
c4e84bde
RM
1822 skb = netdev_alloc_skb(qdev->ndev, length);
1823 if (skb == NULL) {
1824 QPRINTK(qdev, PROBE, DEBUG,
1825 "No skb available, drop the packet.\n");
1826 return NULL;
1827 }
4055c7d4
RM
1828 pci_unmap_page(qdev->pdev,
1829 pci_unmap_addr(lbq_desc,
1830 mapaddr),
1831 pci_unmap_len(lbq_desc, maplen),
1832 PCI_DMA_FROMDEVICE);
c4e84bde
RM
1833 skb_reserve(skb, NET_IP_ALIGN);
1834 QPRINTK(qdev, RX_STATUS, DEBUG,
1835 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
7c734359
RM
1836 skb_fill_page_desc(skb, 0,
1837 lbq_desc->p.pg_chunk.page,
1838 lbq_desc->p.pg_chunk.offset,
1839 length);
c4e84bde
RM
1840 skb->len += length;
1841 skb->data_len += length;
1842 skb->truesize += length;
1843 length -= length;
c4e84bde
RM
1844 __pskb_pull_tail(skb,
1845 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1846 VLAN_ETH_HLEN : ETH_HLEN);
1847 }
1848 } else {
1849 /*
1850 * The data is in a chain of large buffers
1851 * pointed to by a small buffer. We loop
1852 * thru and chain them to the our small header
1853 * buffer's skb.
1854 * frags: There are 18 max frags and our small
1855 * buffer will hold 32 of them. The thing is,
1856 * we'll use 3 max for our 9000 byte jumbo
1857 * frames. If the MTU goes up we could
1858 * eventually be in trouble.
1859 */
7c734359 1860 int size, i = 0;
c4e84bde
RM
1861 sbq_desc = ql_get_curr_sbuf(rx_ring);
1862 pci_unmap_single(qdev->pdev,
1863 pci_unmap_addr(sbq_desc, mapaddr),
1864 pci_unmap_len(sbq_desc, maplen),
1865 PCI_DMA_FROMDEVICE);
1866 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1867 /*
1868 * This is an non TCP/UDP IP frame, so
1869 * the headers aren't split into a small
1870 * buffer. We have to use the small buffer
1871 * that contains our sg list as our skb to
1872 * send upstairs. Copy the sg list here to
1873 * a local buffer and use it to find the
1874 * pages to chain.
1875 */
1876 QPRINTK(qdev, RX_STATUS, DEBUG,
1877 "%d bytes of headers & data in chain of large.\n", length);
1878 skb = sbq_desc->p.skb;
c4e84bde
RM
1879 sbq_desc->p.skb = NULL;
1880 skb_reserve(skb, NET_IP_ALIGN);
c4e84bde
RM
1881 }
1882 while (length > 0) {
7c734359
RM
1883 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1884 size = (length < rx_ring->lbq_buf_size) ? length :
1885 rx_ring->lbq_buf_size;
c4e84bde
RM
1886
1887 QPRINTK(qdev, RX_STATUS, DEBUG,
1888 "Adding page %d to skb for %d bytes.\n",
1889 i, size);
7c734359
RM
1890 skb_fill_page_desc(skb, i,
1891 lbq_desc->p.pg_chunk.page,
1892 lbq_desc->p.pg_chunk.offset,
1893 size);
c4e84bde
RM
1894 skb->len += size;
1895 skb->data_len += size;
1896 skb->truesize += size;
1897 length -= size;
c4e84bde
RM
1898 i++;
1899 }
1900 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1901 VLAN_ETH_HLEN : ETH_HLEN);
1902 }
1903 return skb;
1904}
1905
1906/* Process an inbound completion from an rx ring. */
4f848c0a 1907static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
c4e84bde 1908 struct rx_ring *rx_ring,
4f848c0a
RM
1909 struct ib_mac_iocb_rsp *ib_mac_rsp,
1910 u16 vlan_id)
c4e84bde
RM
1911{
1912 struct net_device *ndev = qdev->ndev;
1913 struct sk_buff *skb = NULL;
1914
1915 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1916
1917 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1918 if (unlikely(!skb)) {
1919 QPRINTK(qdev, RX_STATUS, DEBUG,
1920 "No skb available, drop packet.\n");
885ee398 1921 rx_ring->rx_dropped++;
c4e84bde
RM
1922 return;
1923 }
1924
a32959cd
RM
1925 /* Frame error, so drop the packet. */
1926 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1927 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1928 ib_mac_rsp->flags2);
1929 dev_kfree_skb_any(skb);
885ee398 1930 rx_ring->rx_errors++;
a32959cd
RM
1931 return;
1932 }
ec33a491
RM
1933
1934 /* The max framesize filter on this chip is set higher than
1935 * MTU since FCoE uses 2k frames.
1936 */
1937 if (skb->len > ndev->mtu + ETH_HLEN) {
1938 dev_kfree_skb_any(skb);
885ee398 1939 rx_ring->rx_dropped++;
ec33a491
RM
1940 return;
1941 }
1942
9dfbbaa6
RM
1943 /* loopback self test for ethtool */
1944 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1945 ql_check_lb_frame(qdev, skb);
1946 dev_kfree_skb_any(skb);
1947 return;
1948 }
1949
c4e84bde
RM
1950 prefetch(skb->data);
1951 skb->dev = ndev;
1952 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1953 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1954 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1955 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1956 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1957 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1958 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1959 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
885ee398 1960 rx_ring->rx_multicast++;
c4e84bde
RM
1961 }
1962 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1963 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1964 }
d555f592 1965
d555f592
RM
1966 skb->protocol = eth_type_trans(skb, ndev);
1967 skb->ip_summed = CHECKSUM_NONE;
1968
1969 /* If rx checksum is on, and there are no
1970 * csum or frame errors.
1971 */
1972 if (qdev->rx_csum &&
d555f592
RM
1973 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1974 /* TCP frame. */
1975 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1976 QPRINTK(qdev, RX_STATUS, DEBUG,
1977 "TCP checksum done!\n");
1978 skb->ip_summed = CHECKSUM_UNNECESSARY;
1979 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1980 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1981 /* Unfragmented ipv4 UDP frame. */
1982 struct iphdr *iph = (struct iphdr *) skb->data;
1983 if (!(iph->frag_off &
1984 cpu_to_be16(IP_MF|IP_OFFSET))) {
1985 skb->ip_summed = CHECKSUM_UNNECESSARY;
1986 QPRINTK(qdev, RX_STATUS, DEBUG,
1987 "TCP checksum done!\n");
1988 }
1989 }
c4e84bde 1990 }
d555f592 1991
885ee398
RM
1992 rx_ring->rx_packets++;
1993 rx_ring->rx_bytes += skb->len;
b2014ff8 1994 skb_record_rx_queue(skb, rx_ring->cq_id);
22bdd4f5
RM
1995 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1996 if (qdev->vlgrp &&
1997 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1998 (vlan_id != 0))
1999 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
2000 vlan_id, skb);
2001 else
2002 napi_gro_receive(&rx_ring->napi, skb);
c4e84bde 2003 } else {
22bdd4f5
RM
2004 if (qdev->vlgrp &&
2005 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2006 (vlan_id != 0))
2007 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
2008 else
2009 netif_receive_skb(skb);
c4e84bde 2010 }
c4e84bde
RM
2011}
2012
4f848c0a
RM
2013/* Process an inbound completion from an rx ring. */
2014static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2015 struct rx_ring *rx_ring,
2016 struct ib_mac_iocb_rsp *ib_mac_rsp)
2017{
2018 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2019 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2020 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2021 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2022
2023 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2024
2025 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2026 /* The data and headers are split into
2027 * separate buffers.
2028 */
2029 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2030 vlan_id);
2031 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2032 /* The data fit in a single small buffer.
2033 * Allocate a new skb, copy the data and
2034 * return the buffer to the free pool.
2035 */
2036 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2037 length, vlan_id);
63526713
RM
2038 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2039 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2040 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2041 /* TCP packet in a page chunk that's been checksummed.
2042 * Tack it on to our GRO skb and let it go.
2043 */
2044 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2045 length, vlan_id);
4f848c0a
RM
2046 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2047 /* Non-TCP packet in a page chunk. Allocate an
2048 * skb, tack it on frags, and send it up.
2049 */
2050 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2051 length, vlan_id);
2052 } else {
2053 struct bq_desc *lbq_desc;
2054
2055 /* Free small buffer that holds the IAL */
2056 lbq_desc = ql_get_curr_sbuf(rx_ring);
2057 QPRINTK(qdev, RX_ERR, ERR, "Dropping frame, len %d > mtu %d\n",
2058 length, qdev->ndev->mtu);
2059
2060 /* Unwind the large buffers for this frame. */
2061 while (length > 0) {
2062 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
2063 length -= (length < rx_ring->lbq_buf_size) ?
2064 length : rx_ring->lbq_buf_size;
2065 put_page(lbq_desc->p.pg_chunk.page);
2066 }
2067 }
2068
2069 return (unsigned long)length;
2070}
2071
c4e84bde
RM
2072/* Process an outbound completion from an rx ring. */
2073static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2074 struct ob_mac_iocb_rsp *mac_rsp)
2075{
2076 struct tx_ring *tx_ring;
2077 struct tx_ring_desc *tx_ring_desc;
2078
2079 QL_DUMP_OB_MAC_RSP(mac_rsp);
2080 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2081 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2082 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
885ee398
RM
2083 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2084 tx_ring->tx_packets++;
c4e84bde
RM
2085 dev_kfree_skb(tx_ring_desc->skb);
2086 tx_ring_desc->skb = NULL;
2087
2088 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2089 OB_MAC_IOCB_RSP_S |
2090 OB_MAC_IOCB_RSP_L |
2091 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2092 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2093 QPRINTK(qdev, TX_DONE, WARNING,
2094 "Total descriptor length did not match transfer length.\n");
2095 }
2096 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2097 QPRINTK(qdev, TX_DONE, WARNING,
2098 "Frame too short to be legal, not sent.\n");
2099 }
2100 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2101 QPRINTK(qdev, TX_DONE, WARNING,
2102 "Frame too long, but sent anyway.\n");
2103 }
2104 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2105 QPRINTK(qdev, TX_DONE, WARNING,
2106 "PCI backplane error. Frame not sent.\n");
2107 }
2108 }
2109 atomic_inc(&tx_ring->tx_count);
2110}
2111
2112/* Fire up a handler to reset the MPI processor. */
2113void ql_queue_fw_error(struct ql_adapter *qdev)
2114{
6a473308 2115 ql_link_off(qdev);
c4e84bde
RM
2116 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2117}
2118
2119void ql_queue_asic_error(struct ql_adapter *qdev)
2120{
6a473308 2121 ql_link_off(qdev);
c4e84bde 2122 ql_disable_interrupts(qdev);
6497b607
RM
2123 /* Clear adapter up bit to signal the recovery
2124 * process that it shouldn't kill the reset worker
2125 * thread
2126 */
2127 clear_bit(QL_ADAPTER_UP, &qdev->flags);
c4e84bde
RM
2128 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2129}
2130
2131static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2132 struct ib_ae_iocb_rsp *ib_ae_rsp)
2133{
2134 switch (ib_ae_rsp->event) {
2135 case MGMT_ERR_EVENT:
2136 QPRINTK(qdev, RX_ERR, ERR,
2137 "Management Processor Fatal Error.\n");
2138 ql_queue_fw_error(qdev);
2139 return;
2140
2141 case CAM_LOOKUP_ERR_EVENT:
2142 QPRINTK(qdev, LINK, ERR,
2143 "Multiple CAM hits lookup occurred.\n");
2144 QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
2145 ql_queue_asic_error(qdev);
2146 return;
2147
2148 case SOFT_ECC_ERROR_EVENT:
2149 QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
2150 ql_queue_asic_error(qdev);
2151 break;
2152
2153 case PCI_ERR_ANON_BUF_RD:
2154 QPRINTK(qdev, RX_ERR, ERR,
2155 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
2156 ib_ae_rsp->q_id);
2157 ql_queue_asic_error(qdev);
2158 break;
2159
2160 default:
2161 QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
2162 ib_ae_rsp->event);
2163 ql_queue_asic_error(qdev);
2164 break;
2165 }
2166}
2167
2168static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2169{
2170 struct ql_adapter *qdev = rx_ring->qdev;
ba7cd3ba 2171 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde
RM
2172 struct ob_mac_iocb_rsp *net_rsp = NULL;
2173 int count = 0;
2174
1e213303 2175 struct tx_ring *tx_ring;
c4e84bde
RM
2176 /* While there are entries in the completion queue. */
2177 while (prod != rx_ring->cnsmr_idx) {
2178
2179 QPRINTK(qdev, RX_STATUS, DEBUG,
2180 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
2181 prod, rx_ring->cnsmr_idx);
2182
2183 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2184 rmb();
2185 switch (net_rsp->opcode) {
2186
2187 case OPCODE_OB_MAC_TSO_IOCB:
2188 case OPCODE_OB_MAC_IOCB:
2189 ql_process_mac_tx_intr(qdev, net_rsp);
2190 break;
2191 default:
2192 QPRINTK(qdev, RX_STATUS, DEBUG,
2193 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2194 net_rsp->opcode);
2195 }
2196 count++;
2197 ql_update_cq(rx_ring);
ba7cd3ba 2198 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde
RM
2199 }
2200 ql_write_cq_idx(rx_ring);
1e213303
RM
2201 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2202 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) &&
2203 net_rsp != NULL) {
c4e84bde
RM
2204 if (atomic_read(&tx_ring->queue_stopped) &&
2205 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2206 /*
2207 * The queue got stopped because the tx_ring was full.
2208 * Wake it up, because it's now at least 25% empty.
2209 */
1e213303 2210 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
c4e84bde
RM
2211 }
2212
2213 return count;
2214}
2215
2216static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2217{
2218 struct ql_adapter *qdev = rx_ring->qdev;
ba7cd3ba 2219 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde
RM
2220 struct ql_net_rsp_iocb *net_rsp;
2221 int count = 0;
2222
2223 /* While there are entries in the completion queue. */
2224 while (prod != rx_ring->cnsmr_idx) {
2225
2226 QPRINTK(qdev, RX_STATUS, DEBUG,
2227 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
2228 prod, rx_ring->cnsmr_idx);
2229
2230 net_rsp = rx_ring->curr_entry;
2231 rmb();
2232 switch (net_rsp->opcode) {
2233 case OPCODE_IB_MAC_IOCB:
2234 ql_process_mac_rx_intr(qdev, rx_ring,
2235 (struct ib_mac_iocb_rsp *)
2236 net_rsp);
2237 break;
2238
2239 case OPCODE_IB_AE_IOCB:
2240 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2241 net_rsp);
2242 break;
2243 default:
2244 {
2245 QPRINTK(qdev, RX_STATUS, DEBUG,
2246 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2247 net_rsp->opcode);
2248 }
2249 }
2250 count++;
2251 ql_update_cq(rx_ring);
ba7cd3ba 2252 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde
RM
2253 if (count == budget)
2254 break;
2255 }
2256 ql_update_buffer_queues(qdev, rx_ring);
2257 ql_write_cq_idx(rx_ring);
2258 return count;
2259}
2260
2261static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2262{
2263 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2264 struct ql_adapter *qdev = rx_ring->qdev;
39aa8165
RM
2265 struct rx_ring *trx_ring;
2266 int i, work_done = 0;
2267 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
c4e84bde
RM
2268
2269 QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
2270 rx_ring->cq_id);
2271
39aa8165
RM
2272 /* Service the TX rings first. They start
2273 * right after the RSS rings. */
2274 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2275 trx_ring = &qdev->rx_ring[i];
2276 /* If this TX completion ring belongs to this vector and
2277 * it's not empty then service it.
2278 */
2279 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2280 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2281 trx_ring->cnsmr_idx)) {
2282 QPRINTK(qdev, INTR, DEBUG,
2283 "%s: Servicing TX completion ring %d.\n",
2284 __func__, trx_ring->cq_id);
2285 ql_clean_outbound_rx_ring(trx_ring);
2286 }
2287 }
2288
2289 /*
2290 * Now service the RSS ring if it's active.
2291 */
2292 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2293 rx_ring->cnsmr_idx) {
2294 QPRINTK(qdev, INTR, DEBUG,
2295 "%s: Servicing RX completion ring %d.\n",
2296 __func__, rx_ring->cq_id);
2297 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2298 }
2299
c4e84bde 2300 if (work_done < budget) {
22bdd4f5 2301 napi_complete(napi);
c4e84bde
RM
2302 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2303 }
2304 return work_done;
2305}
2306
01e6b953 2307static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
c4e84bde
RM
2308{
2309 struct ql_adapter *qdev = netdev_priv(ndev);
2310
2311 qdev->vlgrp = grp;
2312 if (grp) {
2313 QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
2314 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2315 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2316 } else {
2317 QPRINTK(qdev, IFUP, DEBUG,
2318 "Turning off VLAN in NIC_RCV_CFG.\n");
2319 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2320 }
2321}
2322
01e6b953 2323static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
c4e84bde
RM
2324{
2325 struct ql_adapter *qdev = netdev_priv(ndev);
2326 u32 enable_bit = MAC_ADDR_E;
cc288f54 2327 int status;
c4e84bde 2328
cc288f54
RM
2329 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2330 if (status)
2331 return;
c4e84bde
RM
2332 if (ql_set_mac_addr_reg
2333 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2334 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
2335 }
cc288f54 2336 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
c4e84bde
RM
2337}
2338
01e6b953 2339static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
c4e84bde
RM
2340{
2341 struct ql_adapter *qdev = netdev_priv(ndev);
2342 u32 enable_bit = 0;
cc288f54
RM
2343 int status;
2344
2345 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2346 if (status)
2347 return;
c4e84bde 2348
c4e84bde
RM
2349 if (ql_set_mac_addr_reg
2350 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2351 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
2352 }
cc288f54 2353 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
c4e84bde
RM
2354
2355}
2356
c4e84bde
RM
2357/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2358static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2359{
2360 struct rx_ring *rx_ring = dev_id;
288379f0 2361 napi_schedule(&rx_ring->napi);
c4e84bde
RM
2362 return IRQ_HANDLED;
2363}
2364
c4e84bde
RM
2365/* This handles a fatal error, MPI activity, and the default
2366 * rx_ring in an MSI-X multiple vector environment.
2367 * In MSI/Legacy environment it also process the rest of
2368 * the rx_rings.
2369 */
2370static irqreturn_t qlge_isr(int irq, void *dev_id)
2371{
2372 struct rx_ring *rx_ring = dev_id;
2373 struct ql_adapter *qdev = rx_ring->qdev;
2374 struct intr_context *intr_context = &qdev->intr_context[0];
2375 u32 var;
c4e84bde
RM
2376 int work_done = 0;
2377
bb0d215c
RM
2378 spin_lock(&qdev->hw_lock);
2379 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2380 QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
2381 spin_unlock(&qdev->hw_lock);
2382 return IRQ_NONE;
c4e84bde 2383 }
bb0d215c 2384 spin_unlock(&qdev->hw_lock);
c4e84bde 2385
bb0d215c 2386 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
c4e84bde
RM
2387
2388 /*
2389 * Check for fatal error.
2390 */
2391 if (var & STS_FE) {
2392 ql_queue_asic_error(qdev);
2393 QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
2394 var = ql_read32(qdev, ERR_STS);
2395 QPRINTK(qdev, INTR, ERR,
2396 "Resetting chip. Error Status Register = 0x%x\n", var);
2397 return IRQ_HANDLED;
2398 }
2399
2400 /*
2401 * Check MPI processor activity.
2402 */
5ee22a5a
RM
2403 if ((var & STS_PI) &&
2404 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
c4e84bde
RM
2405 /*
2406 * We've got an async event or mailbox completion.
2407 * Handle it and clear the source of the interrupt.
2408 */
2409 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
2410 ql_disable_completion_interrupt(qdev, intr_context->intr);
5ee22a5a
RM
2411 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2412 queue_delayed_work_on(smp_processor_id(),
2413 qdev->workqueue, &qdev->mpi_work, 0);
c4e84bde
RM
2414 work_done++;
2415 }
2416
2417 /*
39aa8165
RM
2418 * Get the bit-mask that shows the active queues for this
2419 * pass. Compare it to the queues that this irq services
2420 * and call napi if there's a match.
c4e84bde 2421 */
39aa8165
RM
2422 var = ql_read32(qdev, ISR1);
2423 if (var & intr_context->irq_mask) {
32a5b2a0 2424 QPRINTK(qdev, INTR, INFO,
39aa8165
RM
2425 "Waking handler for rx_ring[0].\n");
2426 ql_disable_completion_interrupt(qdev, intr_context->intr);
32a5b2a0
RM
2427 napi_schedule(&rx_ring->napi);
2428 work_done++;
2429 }
bb0d215c 2430 ql_enable_completion_interrupt(qdev, intr_context->intr);
c4e84bde
RM
2431 return work_done ? IRQ_HANDLED : IRQ_NONE;
2432}
2433
2434static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2435{
2436
2437 if (skb_is_gso(skb)) {
2438 int err;
2439 if (skb_header_cloned(skb)) {
2440 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2441 if (err)
2442 return err;
2443 }
2444
2445 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2446 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2447 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2448 mac_iocb_ptr->total_hdrs_len =
2449 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2450 mac_iocb_ptr->net_trans_offset =
2451 cpu_to_le16(skb_network_offset(skb) |
2452 skb_transport_offset(skb)
2453 << OB_MAC_TRANSPORT_HDR_SHIFT);
2454 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2455 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2456 if (likely(skb->protocol == htons(ETH_P_IP))) {
2457 struct iphdr *iph = ip_hdr(skb);
2458 iph->check = 0;
2459 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2460 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2461 iph->daddr, 0,
2462 IPPROTO_TCP,
2463 0);
2464 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2465 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2466 tcp_hdr(skb)->check =
2467 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2468 &ipv6_hdr(skb)->daddr,
2469 0, IPPROTO_TCP, 0);
2470 }
2471 return 1;
2472 }
2473 return 0;
2474}
2475
2476static void ql_hw_csum_setup(struct sk_buff *skb,
2477 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2478{
2479 int len;
2480 struct iphdr *iph = ip_hdr(skb);
fd2df4f7 2481 __sum16 *check;
c4e84bde
RM
2482 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2483 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2484 mac_iocb_ptr->net_trans_offset =
2485 cpu_to_le16(skb_network_offset(skb) |
2486 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2487
2488 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2489 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2490 if (likely(iph->protocol == IPPROTO_TCP)) {
2491 check = &(tcp_hdr(skb)->check);
2492 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2493 mac_iocb_ptr->total_hdrs_len =
2494 cpu_to_le16(skb_transport_offset(skb) +
2495 (tcp_hdr(skb)->doff << 2));
2496 } else {
2497 check = &(udp_hdr(skb)->check);
2498 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2499 mac_iocb_ptr->total_hdrs_len =
2500 cpu_to_le16(skb_transport_offset(skb) +
2501 sizeof(struct udphdr));
2502 }
2503 *check = ~csum_tcpudp_magic(iph->saddr,
2504 iph->daddr, len, iph->protocol, 0);
2505}
2506
61357325 2507static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
c4e84bde
RM
2508{
2509 struct tx_ring_desc *tx_ring_desc;
2510 struct ob_mac_iocb_req *mac_iocb_ptr;
2511 struct ql_adapter *qdev = netdev_priv(ndev);
2512 int tso;
2513 struct tx_ring *tx_ring;
1e213303 2514 u32 tx_ring_idx = (u32) skb->queue_mapping;
c4e84bde
RM
2515
2516 tx_ring = &qdev->tx_ring[tx_ring_idx];
2517
74c50b4b
RM
2518 if (skb_padto(skb, ETH_ZLEN))
2519 return NETDEV_TX_OK;
2520
c4e84bde
RM
2521 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2522 QPRINTK(qdev, TX_QUEUED, INFO,
2523 "%s: shutting down tx queue %d du to lack of resources.\n",
2524 __func__, tx_ring_idx);
1e213303 2525 netif_stop_subqueue(ndev, tx_ring->wq_id);
c4e84bde 2526 atomic_inc(&tx_ring->queue_stopped);
885ee398 2527 tx_ring->tx_errors++;
c4e84bde
RM
2528 return NETDEV_TX_BUSY;
2529 }
2530 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2531 mac_iocb_ptr = tx_ring_desc->queue_entry;
e332471c 2532 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
c4e84bde
RM
2533
2534 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2535 mac_iocb_ptr->tid = tx_ring_desc->index;
2536 /* We use the upper 32-bits to store the tx queue for this IO.
2537 * When we get the completion we can use it to establish the context.
2538 */
2539 mac_iocb_ptr->txq_idx = tx_ring_idx;
2540 tx_ring_desc->skb = skb;
2541
2542 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2543
2544 if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
2545 QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
2546 vlan_tx_tag_get(skb));
2547 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2548 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2549 }
2550 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2551 if (tso < 0) {
2552 dev_kfree_skb_any(skb);
2553 return NETDEV_TX_OK;
2554 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2555 ql_hw_csum_setup(skb,
2556 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2557 }
0d979f74
RM
2558 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2559 NETDEV_TX_OK) {
2560 QPRINTK(qdev, TX_QUEUED, ERR,
2561 "Could not map the segments.\n");
885ee398 2562 tx_ring->tx_errors++;
0d979f74
RM
2563 return NETDEV_TX_BUSY;
2564 }
c4e84bde
RM
2565 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2566 tx_ring->prod_idx++;
2567 if (tx_ring->prod_idx == tx_ring->wq_len)
2568 tx_ring->prod_idx = 0;
2569 wmb();
2570
2571 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
c4e84bde
RM
2572 QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
2573 tx_ring->prod_idx, skb->len);
2574
2575 atomic_dec(&tx_ring->tx_count);
2576 return NETDEV_TX_OK;
2577}
2578
9dfbbaa6 2579
c4e84bde
RM
2580static void ql_free_shadow_space(struct ql_adapter *qdev)
2581{
2582 if (qdev->rx_ring_shadow_reg_area) {
2583 pci_free_consistent(qdev->pdev,
2584 PAGE_SIZE,
2585 qdev->rx_ring_shadow_reg_area,
2586 qdev->rx_ring_shadow_reg_dma);
2587 qdev->rx_ring_shadow_reg_area = NULL;
2588 }
2589 if (qdev->tx_ring_shadow_reg_area) {
2590 pci_free_consistent(qdev->pdev,
2591 PAGE_SIZE,
2592 qdev->tx_ring_shadow_reg_area,
2593 qdev->tx_ring_shadow_reg_dma);
2594 qdev->tx_ring_shadow_reg_area = NULL;
2595 }
2596}
2597
2598static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2599{
2600 qdev->rx_ring_shadow_reg_area =
2601 pci_alloc_consistent(qdev->pdev,
2602 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2603 if (qdev->rx_ring_shadow_reg_area == NULL) {
2604 QPRINTK(qdev, IFUP, ERR,
2605 "Allocation of RX shadow space failed.\n");
2606 return -ENOMEM;
2607 }
b25215d0 2608 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
c4e84bde
RM
2609 qdev->tx_ring_shadow_reg_area =
2610 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2611 &qdev->tx_ring_shadow_reg_dma);
2612 if (qdev->tx_ring_shadow_reg_area == NULL) {
2613 QPRINTK(qdev, IFUP, ERR,
2614 "Allocation of TX shadow space failed.\n");
2615 goto err_wqp_sh_area;
2616 }
b25215d0 2617 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
c4e84bde
RM
2618 return 0;
2619
2620err_wqp_sh_area:
2621 pci_free_consistent(qdev->pdev,
2622 PAGE_SIZE,
2623 qdev->rx_ring_shadow_reg_area,
2624 qdev->rx_ring_shadow_reg_dma);
2625 return -ENOMEM;
2626}
2627
2628static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2629{
2630 struct tx_ring_desc *tx_ring_desc;
2631 int i;
2632 struct ob_mac_iocb_req *mac_iocb_ptr;
2633
2634 mac_iocb_ptr = tx_ring->wq_base;
2635 tx_ring_desc = tx_ring->q;
2636 for (i = 0; i < tx_ring->wq_len; i++) {
2637 tx_ring_desc->index = i;
2638 tx_ring_desc->skb = NULL;
2639 tx_ring_desc->queue_entry = mac_iocb_ptr;
2640 mac_iocb_ptr++;
2641 tx_ring_desc++;
2642 }
2643 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2644 atomic_set(&tx_ring->queue_stopped, 0);
2645}
2646
2647static void ql_free_tx_resources(struct ql_adapter *qdev,
2648 struct tx_ring *tx_ring)
2649{
2650 if (tx_ring->wq_base) {
2651 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2652 tx_ring->wq_base, tx_ring->wq_base_dma);
2653 tx_ring->wq_base = NULL;
2654 }
2655 kfree(tx_ring->q);
2656 tx_ring->q = NULL;
2657}
2658
2659static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2660 struct tx_ring *tx_ring)
2661{
2662 tx_ring->wq_base =
2663 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2664 &tx_ring->wq_base_dma);
2665
8e95a202
JP
2666 if ((tx_ring->wq_base == NULL) ||
2667 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
c4e84bde
RM
2668 QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
2669 return -ENOMEM;
2670 }
2671 tx_ring->q =
2672 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2673 if (tx_ring->q == NULL)
2674 goto err;
2675
2676 return 0;
2677err:
2678 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2679 tx_ring->wq_base, tx_ring->wq_base_dma);
2680 return -ENOMEM;
2681}
2682
8668ae92 2683static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
c4e84bde 2684{
c4e84bde
RM
2685 struct bq_desc *lbq_desc;
2686
7c734359
RM
2687 uint32_t curr_idx, clean_idx;
2688
2689 curr_idx = rx_ring->lbq_curr_idx;
2690 clean_idx = rx_ring->lbq_clean_idx;
2691 while (curr_idx != clean_idx) {
2692 lbq_desc = &rx_ring->lbq[curr_idx];
2693
2694 if (lbq_desc->p.pg_chunk.last_flag) {
c4e84bde 2695 pci_unmap_page(qdev->pdev,
7c734359
RM
2696 lbq_desc->p.pg_chunk.map,
2697 ql_lbq_block_size(qdev),
c4e84bde 2698 PCI_DMA_FROMDEVICE);
7c734359 2699 lbq_desc->p.pg_chunk.last_flag = 0;
c4e84bde 2700 }
7c734359
RM
2701
2702 put_page(lbq_desc->p.pg_chunk.page);
2703 lbq_desc->p.pg_chunk.page = NULL;
2704
2705 if (++curr_idx == rx_ring->lbq_len)
2706 curr_idx = 0;
2707
c4e84bde
RM
2708 }
2709}
2710
8668ae92 2711static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
c4e84bde
RM
2712{
2713 int i;
2714 struct bq_desc *sbq_desc;
2715
2716 for (i = 0; i < rx_ring->sbq_len; i++) {
2717 sbq_desc = &rx_ring->sbq[i];
2718 if (sbq_desc == NULL) {
2719 QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
2720 return;
2721 }
2722 if (sbq_desc->p.skb) {
2723 pci_unmap_single(qdev->pdev,
2724 pci_unmap_addr(sbq_desc, mapaddr),
2725 pci_unmap_len(sbq_desc, maplen),
2726 PCI_DMA_FROMDEVICE);
2727 dev_kfree_skb(sbq_desc->p.skb);
2728 sbq_desc->p.skb = NULL;
2729 }
c4e84bde
RM
2730 }
2731}
2732
4545a3f2
RM
2733/* Free all large and small rx buffers associated
2734 * with the completion queues for this device.
2735 */
2736static void ql_free_rx_buffers(struct ql_adapter *qdev)
2737{
2738 int i;
2739 struct rx_ring *rx_ring;
2740
2741 for (i = 0; i < qdev->rx_ring_count; i++) {
2742 rx_ring = &qdev->rx_ring[i];
2743 if (rx_ring->lbq)
2744 ql_free_lbq_buffers(qdev, rx_ring);
2745 if (rx_ring->sbq)
2746 ql_free_sbq_buffers(qdev, rx_ring);
2747 }
2748}
2749
2750static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2751{
2752 struct rx_ring *rx_ring;
2753 int i;
2754
2755 for (i = 0; i < qdev->rx_ring_count; i++) {
2756 rx_ring = &qdev->rx_ring[i];
2757 if (rx_ring->type != TX_Q)
2758 ql_update_buffer_queues(qdev, rx_ring);
2759 }
2760}
2761
2762static void ql_init_lbq_ring(struct ql_adapter *qdev,
2763 struct rx_ring *rx_ring)
2764{
2765 int i;
2766 struct bq_desc *lbq_desc;
2767 __le64 *bq = rx_ring->lbq_base;
2768
2769 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2770 for (i = 0; i < rx_ring->lbq_len; i++) {
2771 lbq_desc = &rx_ring->lbq[i];
2772 memset(lbq_desc, 0, sizeof(*lbq_desc));
2773 lbq_desc->index = i;
2774 lbq_desc->addr = bq;
2775 bq++;
2776 }
2777}
2778
2779static void ql_init_sbq_ring(struct ql_adapter *qdev,
c4e84bde
RM
2780 struct rx_ring *rx_ring)
2781{
2782 int i;
2783 struct bq_desc *sbq_desc;
2c9a0d41 2784 __le64 *bq = rx_ring->sbq_base;
c4e84bde 2785
4545a3f2 2786 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
c4e84bde
RM
2787 for (i = 0; i < rx_ring->sbq_len; i++) {
2788 sbq_desc = &rx_ring->sbq[i];
4545a3f2 2789 memset(sbq_desc, 0, sizeof(*sbq_desc));
c4e84bde 2790 sbq_desc->index = i;
2c9a0d41 2791 sbq_desc->addr = bq;
c4e84bde
RM
2792 bq++;
2793 }
c4e84bde
RM
2794}
2795
2796static void ql_free_rx_resources(struct ql_adapter *qdev,
2797 struct rx_ring *rx_ring)
2798{
c4e84bde
RM
2799 /* Free the small buffer queue. */
2800 if (rx_ring->sbq_base) {
2801 pci_free_consistent(qdev->pdev,
2802 rx_ring->sbq_size,
2803 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2804 rx_ring->sbq_base = NULL;
2805 }
2806
2807 /* Free the small buffer queue control blocks. */
2808 kfree(rx_ring->sbq);
2809 rx_ring->sbq = NULL;
2810
2811 /* Free the large buffer queue. */
2812 if (rx_ring->lbq_base) {
2813 pci_free_consistent(qdev->pdev,
2814 rx_ring->lbq_size,
2815 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2816 rx_ring->lbq_base = NULL;
2817 }
2818
2819 /* Free the large buffer queue control blocks. */
2820 kfree(rx_ring->lbq);
2821 rx_ring->lbq = NULL;
2822
2823 /* Free the rx queue. */
2824 if (rx_ring->cq_base) {
2825 pci_free_consistent(qdev->pdev,
2826 rx_ring->cq_size,
2827 rx_ring->cq_base, rx_ring->cq_base_dma);
2828 rx_ring->cq_base = NULL;
2829 }
2830}
2831
2832/* Allocate queues and buffers for this completions queue based
2833 * on the values in the parameter structure. */
2834static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2835 struct rx_ring *rx_ring)
2836{
2837
2838 /*
2839 * Allocate the completion queue for this rx_ring.
2840 */
2841 rx_ring->cq_base =
2842 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2843 &rx_ring->cq_base_dma);
2844
2845 if (rx_ring->cq_base == NULL) {
2846 QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
2847 return -ENOMEM;
2848 }
2849
2850 if (rx_ring->sbq_len) {
2851 /*
2852 * Allocate small buffer queue.
2853 */
2854 rx_ring->sbq_base =
2855 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2856 &rx_ring->sbq_base_dma);
2857
2858 if (rx_ring->sbq_base == NULL) {
2859 QPRINTK(qdev, IFUP, ERR,
2860 "Small buffer queue allocation failed.\n");
2861 goto err_mem;
2862 }
2863
2864 /*
2865 * Allocate small buffer queue control blocks.
2866 */
2867 rx_ring->sbq =
2868 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2869 GFP_KERNEL);
2870 if (rx_ring->sbq == NULL) {
2871 QPRINTK(qdev, IFUP, ERR,
2872 "Small buffer queue control block allocation failed.\n");
2873 goto err_mem;
2874 }
2875
4545a3f2 2876 ql_init_sbq_ring(qdev, rx_ring);
c4e84bde
RM
2877 }
2878
2879 if (rx_ring->lbq_len) {
2880 /*
2881 * Allocate large buffer queue.
2882 */
2883 rx_ring->lbq_base =
2884 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2885 &rx_ring->lbq_base_dma);
2886
2887 if (rx_ring->lbq_base == NULL) {
2888 QPRINTK(qdev, IFUP, ERR,
2889 "Large buffer queue allocation failed.\n");
2890 goto err_mem;
2891 }
2892 /*
2893 * Allocate large buffer queue control blocks.
2894 */
2895 rx_ring->lbq =
2896 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2897 GFP_KERNEL);
2898 if (rx_ring->lbq == NULL) {
2899 QPRINTK(qdev, IFUP, ERR,
2900 "Large buffer queue control block allocation failed.\n");
2901 goto err_mem;
2902 }
2903
4545a3f2 2904 ql_init_lbq_ring(qdev, rx_ring);
c4e84bde
RM
2905 }
2906
2907 return 0;
2908
2909err_mem:
2910 ql_free_rx_resources(qdev, rx_ring);
2911 return -ENOMEM;
2912}
2913
2914static void ql_tx_ring_clean(struct ql_adapter *qdev)
2915{
2916 struct tx_ring *tx_ring;
2917 struct tx_ring_desc *tx_ring_desc;
2918 int i, j;
2919
2920 /*
2921 * Loop through all queues and free
2922 * any resources.
2923 */
2924 for (j = 0; j < qdev->tx_ring_count; j++) {
2925 tx_ring = &qdev->tx_ring[j];
2926 for (i = 0; i < tx_ring->wq_len; i++) {
2927 tx_ring_desc = &tx_ring->q[i];
2928 if (tx_ring_desc && tx_ring_desc->skb) {
2929 QPRINTK(qdev, IFDOWN, ERR,
2930 "Freeing lost SKB %p, from queue %d, index %d.\n",
2931 tx_ring_desc->skb, j,
2932 tx_ring_desc->index);
2933 ql_unmap_send(qdev, tx_ring_desc,
2934 tx_ring_desc->map_cnt);
2935 dev_kfree_skb(tx_ring_desc->skb);
2936 tx_ring_desc->skb = NULL;
2937 }
2938 }
2939 }
2940}
2941
c4e84bde
RM
2942static void ql_free_mem_resources(struct ql_adapter *qdev)
2943{
2944 int i;
2945
2946 for (i = 0; i < qdev->tx_ring_count; i++)
2947 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2948 for (i = 0; i < qdev->rx_ring_count; i++)
2949 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2950 ql_free_shadow_space(qdev);
2951}
2952
2953static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2954{
2955 int i;
2956
2957 /* Allocate space for our shadow registers and such. */
2958 if (ql_alloc_shadow_space(qdev))
2959 return -ENOMEM;
2960
2961 for (i = 0; i < qdev->rx_ring_count; i++) {
2962 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2963 QPRINTK(qdev, IFUP, ERR,
2964 "RX resource allocation failed.\n");
2965 goto err_mem;
2966 }
2967 }
2968 /* Allocate tx queue resources */
2969 for (i = 0; i < qdev->tx_ring_count; i++) {
2970 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2971 QPRINTK(qdev, IFUP, ERR,
2972 "TX resource allocation failed.\n");
2973 goto err_mem;
2974 }
2975 }
2976 return 0;
2977
2978err_mem:
2979 ql_free_mem_resources(qdev);
2980 return -ENOMEM;
2981}
2982
2983/* Set up the rx ring control block and pass it to the chip.
2984 * The control block is defined as
2985 * "Completion Queue Initialization Control Block", or cqicb.
2986 */
2987static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2988{
2989 struct cqicb *cqicb = &rx_ring->cqicb;
2990 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
b8facca0 2991 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
c4e84bde 2992 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
b8facca0 2993 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
c4e84bde
RM
2994 void __iomem *doorbell_area =
2995 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2996 int err = 0;
2997 u16 bq_len;
d4a4aba6 2998 u64 tmp;
b8facca0
RM
2999 __le64 *base_indirect_ptr;
3000 int page_entries;
c4e84bde
RM
3001
3002 /* Set up the shadow registers for this ring. */
3003 rx_ring->prod_idx_sh_reg = shadow_reg;
3004 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
7c734359 3005 *rx_ring->prod_idx_sh_reg = 0;
c4e84bde
RM
3006 shadow_reg += sizeof(u64);
3007 shadow_reg_dma += sizeof(u64);
3008 rx_ring->lbq_base_indirect = shadow_reg;
3009 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
b8facca0
RM
3010 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3011 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
c4e84bde
RM
3012 rx_ring->sbq_base_indirect = shadow_reg;
3013 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3014
3015 /* PCI doorbell mem area + 0x00 for consumer index register */
8668ae92 3016 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
c4e84bde
RM
3017 rx_ring->cnsmr_idx = 0;
3018 rx_ring->curr_entry = rx_ring->cq_base;
3019
3020 /* PCI doorbell mem area + 0x04 for valid register */
3021 rx_ring->valid_db_reg = doorbell_area + 0x04;
3022
3023 /* PCI doorbell mem area + 0x18 for large buffer consumer */
8668ae92 3024 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
c4e84bde
RM
3025
3026 /* PCI doorbell mem area + 0x1c */
8668ae92 3027 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
c4e84bde
RM
3028
3029 memset((void *)cqicb, 0, sizeof(struct cqicb));
3030 cqicb->msix_vect = rx_ring->irq;
3031
459caf5a
RM
3032 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3033 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
c4e84bde 3034
97345524 3035 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
c4e84bde 3036
97345524 3037 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
c4e84bde
RM
3038
3039 /*
3040 * Set up the control block load flags.
3041 */
3042 cqicb->flags = FLAGS_LC | /* Load queue base address */
3043 FLAGS_LV | /* Load MSI-X vector */
3044 FLAGS_LI; /* Load irq delay values */
3045 if (rx_ring->lbq_len) {
3046 cqicb->flags |= FLAGS_LL; /* Load lbq values */
a419aef8 3047 tmp = (u64)rx_ring->lbq_base_dma;
b8facca0
RM
3048 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
3049 page_entries = 0;
3050 do {
3051 *base_indirect_ptr = cpu_to_le64(tmp);
3052 tmp += DB_PAGE_SIZE;
3053 base_indirect_ptr++;
3054 page_entries++;
3055 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
97345524
RM
3056 cqicb->lbq_addr =
3057 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
459caf5a
RM
3058 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3059 (u16) rx_ring->lbq_buf_size;
3060 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3061 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3062 (u16) rx_ring->lbq_len;
c4e84bde 3063 cqicb->lbq_len = cpu_to_le16(bq_len);
4545a3f2 3064 rx_ring->lbq_prod_idx = 0;
c4e84bde 3065 rx_ring->lbq_curr_idx = 0;
4545a3f2
RM
3066 rx_ring->lbq_clean_idx = 0;
3067 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
c4e84bde
RM
3068 }
3069 if (rx_ring->sbq_len) {
3070 cqicb->flags |= FLAGS_LS; /* Load sbq values */
a419aef8 3071 tmp = (u64)rx_ring->sbq_base_dma;
b8facca0
RM
3072 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
3073 page_entries = 0;
3074 do {
3075 *base_indirect_ptr = cpu_to_le64(tmp);
3076 tmp += DB_PAGE_SIZE;
3077 base_indirect_ptr++;
3078 page_entries++;
3079 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
97345524
RM
3080 cqicb->sbq_addr =
3081 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
c4e84bde 3082 cqicb->sbq_buf_size =
52e55f3c 3083 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
459caf5a
RM
3084 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3085 (u16) rx_ring->sbq_len;
c4e84bde 3086 cqicb->sbq_len = cpu_to_le16(bq_len);
4545a3f2 3087 rx_ring->sbq_prod_idx = 0;
c4e84bde 3088 rx_ring->sbq_curr_idx = 0;
4545a3f2
RM
3089 rx_ring->sbq_clean_idx = 0;
3090 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
c4e84bde
RM
3091 }
3092 switch (rx_ring->type) {
3093 case TX_Q:
c4e84bde
RM
3094 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3095 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3096 break;
c4e84bde
RM
3097 case RX_Q:
3098 /* Inbound completion handling rx_rings run in
3099 * separate NAPI contexts.
3100 */
3101 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3102 64);
3103 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3104 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3105 break;
3106 default:
3107 QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
3108 rx_ring->type);
3109 }
4974097a 3110 QPRINTK(qdev, IFUP, DEBUG, "Initializing rx work queue.\n");
c4e84bde
RM
3111 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3112 CFG_LCQ, rx_ring->cq_id);
3113 if (err) {
3114 QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
3115 return err;
3116 }
c4e84bde
RM
3117 return err;
3118}
3119
3120static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3121{
3122 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3123 void __iomem *doorbell_area =
3124 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3125 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3126 (tx_ring->wq_id * sizeof(u64));
3127 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3128 (tx_ring->wq_id * sizeof(u64));
3129 int err = 0;
3130
3131 /*
3132 * Assign doorbell registers for this tx_ring.
3133 */
3134 /* TX PCI doorbell mem area for tx producer index */
8668ae92 3135 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
c4e84bde
RM
3136 tx_ring->prod_idx = 0;
3137 /* TX PCI doorbell mem area + 0x04 */
3138 tx_ring->valid_db_reg = doorbell_area + 0x04;
3139
3140 /*
3141 * Assign shadow registers for this tx_ring.
3142 */
3143 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3144 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3145
3146 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3147 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3148 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3149 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3150 wqicb->rid = 0;
97345524 3151 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
c4e84bde 3152
97345524 3153 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
c4e84bde
RM
3154
3155 ql_init_tx_ring(qdev, tx_ring);
3156
e332471c 3157 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
c4e84bde
RM
3158 (u16) tx_ring->wq_id);
3159 if (err) {
3160 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
3161 return err;
3162 }
4974097a 3163 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded WQICB.\n");
c4e84bde
RM
3164 return err;
3165}
3166
3167static void ql_disable_msix(struct ql_adapter *qdev)
3168{
3169 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3170 pci_disable_msix(qdev->pdev);
3171 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3172 kfree(qdev->msi_x_entry);
3173 qdev->msi_x_entry = NULL;
3174 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3175 pci_disable_msi(qdev->pdev);
3176 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3177 }
3178}
3179
a4ab6137
RM
3180/* We start by trying to get the number of vectors
3181 * stored in qdev->intr_count. If we don't get that
3182 * many then we reduce the count and try again.
3183 */
c4e84bde
RM
3184static void ql_enable_msix(struct ql_adapter *qdev)
3185{
a4ab6137 3186 int i, err;
c4e84bde 3187
c4e84bde 3188 /* Get the MSIX vectors. */
a5a62a1c 3189 if (qlge_irq_type == MSIX_IRQ) {
c4e84bde
RM
3190 /* Try to alloc space for the msix struct,
3191 * if it fails then go to MSI/legacy.
3192 */
a4ab6137 3193 qdev->msi_x_entry = kcalloc(qdev->intr_count,
c4e84bde
RM
3194 sizeof(struct msix_entry),
3195 GFP_KERNEL);
3196 if (!qdev->msi_x_entry) {
a5a62a1c 3197 qlge_irq_type = MSI_IRQ;
c4e84bde
RM
3198 goto msi;
3199 }
3200
a4ab6137 3201 for (i = 0; i < qdev->intr_count; i++)
c4e84bde
RM
3202 qdev->msi_x_entry[i].entry = i;
3203
a4ab6137
RM
3204 /* Loop to get our vectors. We start with
3205 * what we want and settle for what we get.
3206 */
3207 do {
3208 err = pci_enable_msix(qdev->pdev,
3209 qdev->msi_x_entry, qdev->intr_count);
3210 if (err > 0)
3211 qdev->intr_count = err;
3212 } while (err > 0);
3213
3214 if (err < 0) {
c4e84bde
RM
3215 kfree(qdev->msi_x_entry);
3216 qdev->msi_x_entry = NULL;
3217 QPRINTK(qdev, IFUP, WARNING,
3218 "MSI-X Enable failed, trying MSI.\n");
a4ab6137 3219 qdev->intr_count = 1;
a5a62a1c 3220 qlge_irq_type = MSI_IRQ;
a4ab6137
RM
3221 } else if (err == 0) {
3222 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3223 QPRINTK(qdev, IFUP, INFO,
3224 "MSI-X Enabled, got %d vectors.\n",
3225 qdev->intr_count);
3226 return;
c4e84bde
RM
3227 }
3228 }
3229msi:
a4ab6137 3230 qdev->intr_count = 1;
a5a62a1c 3231 if (qlge_irq_type == MSI_IRQ) {
c4e84bde
RM
3232 if (!pci_enable_msi(qdev->pdev)) {
3233 set_bit(QL_MSI_ENABLED, &qdev->flags);
3234 QPRINTK(qdev, IFUP, INFO,
3235 "Running with MSI interrupts.\n");
3236 return;
3237 }
3238 }
a5a62a1c 3239 qlge_irq_type = LEG_IRQ;
c4e84bde
RM
3240 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
3241}
3242
39aa8165
RM
3243/* Each vector services 1 RSS ring and and 1 or more
3244 * TX completion rings. This function loops through
3245 * the TX completion rings and assigns the vector that
3246 * will service it. An example would be if there are
3247 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3248 * This would mean that vector 0 would service RSS ring 0
3249 * and TX competion rings 0,1,2 and 3. Vector 1 would
3250 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3251 */
3252static void ql_set_tx_vect(struct ql_adapter *qdev)
3253{
3254 int i, j, vect;
3255 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3256
3257 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3258 /* Assign irq vectors to TX rx_rings.*/
3259 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3260 i < qdev->rx_ring_count; i++) {
3261 if (j == tx_rings_per_vector) {
3262 vect++;
3263 j = 0;
3264 }
3265 qdev->rx_ring[i].irq = vect;
3266 j++;
3267 }
3268 } else {
3269 /* For single vector all rings have an irq
3270 * of zero.
3271 */
3272 for (i = 0; i < qdev->rx_ring_count; i++)
3273 qdev->rx_ring[i].irq = 0;
3274 }
3275}
3276
3277/* Set the interrupt mask for this vector. Each vector
3278 * will service 1 RSS ring and 1 or more TX completion
3279 * rings. This function sets up a bit mask per vector
3280 * that indicates which rings it services.
3281 */
3282static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3283{
3284 int j, vect = ctx->intr;
3285 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3286
3287 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3288 /* Add the RSS ring serviced by this vector
3289 * to the mask.
3290 */
3291 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3292 /* Add the TX ring(s) serviced by this vector
3293 * to the mask. */
3294 for (j = 0; j < tx_rings_per_vector; j++) {
3295 ctx->irq_mask |=
3296 (1 << qdev->rx_ring[qdev->rss_ring_count +
3297 (vect * tx_rings_per_vector) + j].cq_id);
3298 }
3299 } else {
3300 /* For single vector we just shift each queue's
3301 * ID into the mask.
3302 */
3303 for (j = 0; j < qdev->rx_ring_count; j++)
3304 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3305 }
3306}
3307
c4e84bde
RM
3308/*
3309 * Here we build the intr_context structures based on
3310 * our rx_ring count and intr vector count.
3311 * The intr_context structure is used to hook each vector
3312 * to possibly different handlers.
3313 */
3314static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3315{
3316 int i = 0;
3317 struct intr_context *intr_context = &qdev->intr_context[0];
3318
c4e84bde
RM
3319 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3320 /* Each rx_ring has it's
3321 * own intr_context since we have separate
3322 * vectors for each queue.
c4e84bde
RM
3323 */
3324 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3325 qdev->rx_ring[i].irq = i;
3326 intr_context->intr = i;
3327 intr_context->qdev = qdev;
39aa8165
RM
3328 /* Set up this vector's bit-mask that indicates
3329 * which queues it services.
3330 */
3331 ql_set_irq_mask(qdev, intr_context);
c4e84bde
RM
3332 /*
3333 * We set up each vectors enable/disable/read bits so
3334 * there's no bit/mask calculations in the critical path.
3335 */
3336 intr_context->intr_en_mask =
3337 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3338 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3339 | i;
3340 intr_context->intr_dis_mask =
3341 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3342 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3343 INTR_EN_IHD | i;
3344 intr_context->intr_read_mask =
3345 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3346 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3347 i;
39aa8165
RM
3348 if (i == 0) {
3349 /* The first vector/queue handles
3350 * broadcast/multicast, fatal errors,
3351 * and firmware events. This in addition
3352 * to normal inbound NAPI processing.
c4e84bde 3353 */
39aa8165 3354 intr_context->handler = qlge_isr;
b2014ff8
RM
3355 sprintf(intr_context->name, "%s-rx-%d",
3356 qdev->ndev->name, i);
3357 } else {
c4e84bde 3358 /*
39aa8165 3359 * Inbound queues handle unicast frames only.
c4e84bde 3360 */
39aa8165
RM
3361 intr_context->handler = qlge_msix_rx_isr;
3362 sprintf(intr_context->name, "%s-rx-%d",
c4e84bde 3363 qdev->ndev->name, i);
c4e84bde
RM
3364 }
3365 }
3366 } else {
3367 /*
3368 * All rx_rings use the same intr_context since
3369 * there is only one vector.
3370 */
3371 intr_context->intr = 0;
3372 intr_context->qdev = qdev;
3373 /*
3374 * We set up each vectors enable/disable/read bits so
3375 * there's no bit/mask calculations in the critical path.
3376 */
3377 intr_context->intr_en_mask =
3378 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3379 intr_context->intr_dis_mask =
3380 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3381 INTR_EN_TYPE_DISABLE;
3382 intr_context->intr_read_mask =
3383 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3384 /*
3385 * Single interrupt means one handler for all rings.
3386 */
3387 intr_context->handler = qlge_isr;
3388 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
39aa8165
RM
3389 /* Set up this vector's bit-mask that indicates
3390 * which queues it services. In this case there is
3391 * a single vector so it will service all RSS and
3392 * TX completion rings.
3393 */
3394 ql_set_irq_mask(qdev, intr_context);
c4e84bde 3395 }
39aa8165
RM
3396 /* Tell the TX completion rings which MSIx vector
3397 * they will be using.
3398 */
3399 ql_set_tx_vect(qdev);
c4e84bde
RM
3400}
3401
3402static void ql_free_irq(struct ql_adapter *qdev)
3403{
3404 int i;
3405 struct intr_context *intr_context = &qdev->intr_context[0];
3406
3407 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3408 if (intr_context->hooked) {
3409 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3410 free_irq(qdev->msi_x_entry[i].vector,
3411 &qdev->rx_ring[i]);
4974097a 3412 QPRINTK(qdev, IFDOWN, DEBUG,
c4e84bde
RM
3413 "freeing msix interrupt %d.\n", i);
3414 } else {
3415 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
4974097a 3416 QPRINTK(qdev, IFDOWN, DEBUG,
c4e84bde
RM
3417 "freeing msi interrupt %d.\n", i);
3418 }
3419 }
3420 }
3421 ql_disable_msix(qdev);
3422}
3423
3424static int ql_request_irq(struct ql_adapter *qdev)
3425{
3426 int i;
3427 int status = 0;
3428 struct pci_dev *pdev = qdev->pdev;
3429 struct intr_context *intr_context = &qdev->intr_context[0];
3430
3431 ql_resolve_queues_to_irqs(qdev);
3432
3433 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3434 atomic_set(&intr_context->irq_cnt, 0);
3435 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3436 status = request_irq(qdev->msi_x_entry[i].vector,
3437 intr_context->handler,
3438 0,
3439 intr_context->name,
3440 &qdev->rx_ring[i]);
3441 if (status) {
3442 QPRINTK(qdev, IFUP, ERR,
3443 "Failed request for MSIX interrupt %d.\n",
3444 i);
3445 goto err_irq;
3446 } else {
4974097a 3447 QPRINTK(qdev, IFUP, DEBUG,
c4e84bde
RM
3448 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3449 i,
3450 qdev->rx_ring[i].type ==
3451 DEFAULT_Q ? "DEFAULT_Q" : "",
3452 qdev->rx_ring[i].type ==
3453 TX_Q ? "TX_Q" : "",
3454 qdev->rx_ring[i].type ==
3455 RX_Q ? "RX_Q" : "", intr_context->name);
3456 }
3457 } else {
3458 QPRINTK(qdev, IFUP, DEBUG,
3459 "trying msi or legacy interrupts.\n");
3460 QPRINTK(qdev, IFUP, DEBUG,
3461 "%s: irq = %d.\n", __func__, pdev->irq);
3462 QPRINTK(qdev, IFUP, DEBUG,
3463 "%s: context->name = %s.\n", __func__,
3464 intr_context->name);
3465 QPRINTK(qdev, IFUP, DEBUG,
3466 "%s: dev_id = 0x%p.\n", __func__,
3467 &qdev->rx_ring[0]);
3468 status =
3469 request_irq(pdev->irq, qlge_isr,
3470 test_bit(QL_MSI_ENABLED,
3471 &qdev->
3472 flags) ? 0 : IRQF_SHARED,
3473 intr_context->name, &qdev->rx_ring[0]);
3474 if (status)
3475 goto err_irq;
3476
3477 QPRINTK(qdev, IFUP, ERR,
3478 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3479 i,
3480 qdev->rx_ring[0].type ==
3481 DEFAULT_Q ? "DEFAULT_Q" : "",
3482 qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
3483 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3484 intr_context->name);
3485 }
3486 intr_context->hooked = 1;
3487 }
3488 return status;
3489err_irq:
3490 QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
3491 ql_free_irq(qdev);
3492 return status;
3493}
3494
3495static int ql_start_rss(struct ql_adapter *qdev)
3496{
541ae28c
RM
3497 u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3498 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
3499 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
3500 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
3501 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
3502 0xbe, 0xac, 0x01, 0xfa};
c4e84bde
RM
3503 struct ricb *ricb = &qdev->ricb;
3504 int status = 0;
3505 int i;
3506 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3507
e332471c 3508 memset((void *)ricb, 0, sizeof(*ricb));
c4e84bde 3509
b2014ff8 3510 ricb->base_cq = RSS_L4K;
c4e84bde 3511 ricb->flags =
541ae28c
RM
3512 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3513 ricb->mask = cpu_to_le16((u16)(0x3ff));
c4e84bde
RM
3514
3515 /*
3516 * Fill out the Indirection Table.
3517 */
541ae28c
RM
3518 for (i = 0; i < 1024; i++)
3519 hash_id[i] = (i & (qdev->rss_ring_count - 1));
c4e84bde 3520
541ae28c
RM
3521 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3522 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
c4e84bde 3523
4974097a 3524 QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
c4e84bde 3525
e332471c 3526 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
c4e84bde
RM
3527 if (status) {
3528 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
3529 return status;
3530 }
4974097a 3531 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded RICB.\n");
c4e84bde
RM
3532 return status;
3533}
3534
a5f59dc9 3535static int ql_clear_routing_entries(struct ql_adapter *qdev)
c4e84bde 3536{
a5f59dc9 3537 int i, status = 0;
c4e84bde 3538
8587ea35
RM
3539 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3540 if (status)
3541 return status;
c4e84bde
RM
3542 /* Clear all the entries in the routing table. */
3543 for (i = 0; i < 16; i++) {
3544 status = ql_set_routing_reg(qdev, i, 0, 0);
3545 if (status) {
3546 QPRINTK(qdev, IFUP, ERR,
a5f59dc9
RM
3547 "Failed to init routing register for CAM "
3548 "packets.\n");
3549 break;
c4e84bde
RM
3550 }
3551 }
a5f59dc9
RM
3552 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3553 return status;
3554}
3555
3556/* Initialize the frame-to-queue routing. */
3557static int ql_route_initialize(struct ql_adapter *qdev)
3558{
3559 int status = 0;
3560
fd21cf52
RM
3561 /* Clear all the entries in the routing table. */
3562 status = ql_clear_routing_entries(qdev);
a5f59dc9
RM
3563 if (status)
3564 return status;
3565
fd21cf52 3566 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
a5f59dc9 3567 if (status)
fd21cf52 3568 return status;
c4e84bde
RM
3569
3570 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
3571 if (status) {
3572 QPRINTK(qdev, IFUP, ERR,
3573 "Failed to init routing register for error packets.\n");
8587ea35 3574 goto exit;
c4e84bde
RM
3575 }
3576 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3577 if (status) {
3578 QPRINTK(qdev, IFUP, ERR,
3579 "Failed to init routing register for broadcast packets.\n");
8587ea35 3580 goto exit;
c4e84bde
RM
3581 }
3582 /* If we have more than one inbound queue, then turn on RSS in the
3583 * routing block.
3584 */
3585 if (qdev->rss_ring_count > 1) {
3586 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3587 RT_IDX_RSS_MATCH, 1);
3588 if (status) {
3589 QPRINTK(qdev, IFUP, ERR,
3590 "Failed to init routing register for MATCH RSS packets.\n");
8587ea35 3591 goto exit;
c4e84bde
RM
3592 }
3593 }
3594
3595 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3596 RT_IDX_CAM_HIT, 1);
8587ea35 3597 if (status)
c4e84bde
RM
3598 QPRINTK(qdev, IFUP, ERR,
3599 "Failed to init routing register for CAM packets.\n");
8587ea35
RM
3600exit:
3601 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
c4e84bde
RM
3602 return status;
3603}
3604
2ee1e272 3605int ql_cam_route_initialize(struct ql_adapter *qdev)
bb58b5b6 3606{
7fab3bfe 3607 int status, set;
bb58b5b6 3608
7fab3bfe
RM
3609 /* If check if the link is up and use to
3610 * determine if we are setting or clearing
3611 * the MAC address in the CAM.
3612 */
3613 set = ql_read32(qdev, STS);
3614 set &= qdev->port_link_up;
3615 status = ql_set_mac_addr(qdev, set);
bb58b5b6
RM
3616 if (status) {
3617 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
3618 return status;
3619 }
3620
3621 status = ql_route_initialize(qdev);
3622 if (status)
3623 QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
3624
3625 return status;
3626}
3627
c4e84bde
RM
3628static int ql_adapter_initialize(struct ql_adapter *qdev)
3629{
3630 u32 value, mask;
3631 int i;
3632 int status = 0;
3633
3634 /*
3635 * Set up the System register to halt on errors.
3636 */
3637 value = SYS_EFE | SYS_FAE;
3638 mask = value << 16;
3639 ql_write32(qdev, SYS, mask | value);
3640
c9cf0a04
RM
3641 /* Set the default queue, and VLAN behavior. */
3642 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3643 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
c4e84bde
RM
3644 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3645
3646 /* Set the MPI interrupt to enabled. */
3647 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3648
3649 /* Enable the function, set pagesize, enable error checking. */
3650 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
572c526f
RM
3651 FSC_EC | FSC_VM_PAGE_4K;
3652 value |= SPLT_SETTING;
c4e84bde
RM
3653
3654 /* Set/clear header splitting. */
3655 mask = FSC_VM_PAGESIZE_MASK |
3656 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3657 ql_write32(qdev, FSC, mask | value);
3658
572c526f 3659 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
c4e84bde 3660
a3b71939
RM
3661 /* Set RX packet routing to use port/pci function on which the
3662 * packet arrived on in addition to usual frame routing.
3663 * This is helpful on bonding where both interfaces can have
3664 * the same MAC address.
3665 */
3666 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
bc083ce9
RM
3667 /* Reroute all packets to our Interface.
3668 * They may have been routed to MPI firmware
3669 * due to WOL.
3670 */
3671 value = ql_read32(qdev, MGMT_RCV_CFG);
3672 value &= ~MGMT_RCV_CFG_RM;
3673 mask = 0xffff0000;
3674
3675 /* Sticky reg needs clearing due to WOL. */
3676 ql_write32(qdev, MGMT_RCV_CFG, mask);
3677 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3678
3679 /* Default WOL is enable on Mezz cards */
3680 if (qdev->pdev->subsystem_device == 0x0068 ||
3681 qdev->pdev->subsystem_device == 0x0180)
3682 qdev->wol = WAKE_MAGIC;
a3b71939 3683
c4e84bde
RM
3684 /* Start up the rx queues. */
3685 for (i = 0; i < qdev->rx_ring_count; i++) {
3686 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3687 if (status) {
3688 QPRINTK(qdev, IFUP, ERR,
3689 "Failed to start rx ring[%d].\n", i);
3690 return status;
3691 }
3692 }
3693
3694 /* If there is more than one inbound completion queue
3695 * then download a RICB to configure RSS.
3696 */
3697 if (qdev->rss_ring_count > 1) {
3698 status = ql_start_rss(qdev);
3699 if (status) {
3700 QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
3701 return status;
3702 }
3703 }
3704
3705 /* Start up the tx queues. */
3706 for (i = 0; i < qdev->tx_ring_count; i++) {
3707 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3708 if (status) {
3709 QPRINTK(qdev, IFUP, ERR,
3710 "Failed to start tx ring[%d].\n", i);
3711 return status;
3712 }
3713 }
3714
b0c2aadf
RM
3715 /* Initialize the port and set the max framesize. */
3716 status = qdev->nic_ops->port_initialize(qdev);
80928860
RM
3717 if (status)
3718 QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
c4e84bde 3719
bb58b5b6
RM
3720 /* Set up the MAC address and frame routing filter. */
3721 status = ql_cam_route_initialize(qdev);
c4e84bde 3722 if (status) {
bb58b5b6
RM
3723 QPRINTK(qdev, IFUP, ERR,
3724 "Failed to init CAM/Routing tables.\n");
c4e84bde
RM
3725 return status;
3726 }
3727
3728 /* Start NAPI for the RSS queues. */
b2014ff8 3729 for (i = 0; i < qdev->rss_ring_count; i++) {
4974097a 3730 QPRINTK(qdev, IFUP, DEBUG, "Enabling NAPI for rx_ring[%d].\n",
c4e84bde
RM
3731 i);
3732 napi_enable(&qdev->rx_ring[i].napi);
3733 }
3734
3735 return status;
3736}
3737
3738/* Issue soft reset to chip. */
3739static int ql_adapter_reset(struct ql_adapter *qdev)
3740{
3741 u32 value;
c4e84bde 3742 int status = 0;
a5f59dc9 3743 unsigned long end_jiffies;
c4e84bde 3744
a5f59dc9
RM
3745 /* Clear all the entries in the routing table. */
3746 status = ql_clear_routing_entries(qdev);
3747 if (status) {
3748 QPRINTK(qdev, IFUP, ERR, "Failed to clear routing bits.\n");
3749 return status;
3750 }
3751
3752 end_jiffies = jiffies +
3753 max((unsigned long)1, usecs_to_jiffies(30));
84087f4d
RM
3754
3755 /* Stop management traffic. */
3756 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3757
3758 /* Wait for the NIC and MGMNT FIFOs to empty. */
3759 ql_wait_fifo_empty(qdev);
3760
c4e84bde 3761 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
a75ee7f1 3762
c4e84bde
RM
3763 do {
3764 value = ql_read32(qdev, RST_FO);
3765 if ((value & RST_FO_FR) == 0)
3766 break;
a75ee7f1
RM
3767 cpu_relax();
3768 } while (time_before(jiffies, end_jiffies));
c4e84bde 3769
c4e84bde 3770 if (value & RST_FO_FR) {
c4e84bde 3771 QPRINTK(qdev, IFDOWN, ERR,
3ac49a1c 3772 "ETIMEDOUT!!! errored out of resetting the chip!\n");
a75ee7f1 3773 status = -ETIMEDOUT;
c4e84bde
RM
3774 }
3775
84087f4d
RM
3776 /* Resume management traffic. */
3777 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
c4e84bde
RM
3778 return status;
3779}
3780
3781static void ql_display_dev_info(struct net_device *ndev)
3782{
3783 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3784
3785 QPRINTK(qdev, PROBE, INFO,
e4552f51 3786 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
c4e84bde
RM
3787 "XG Roll = %d, XG Rev = %d.\n",
3788 qdev->func,
e4552f51 3789 qdev->port,
c4e84bde
RM
3790 qdev->chip_rev_id & 0x0000000f,
3791 qdev->chip_rev_id >> 4 & 0x0000000f,
3792 qdev->chip_rev_id >> 8 & 0x0000000f,
3793 qdev->chip_rev_id >> 12 & 0x0000000f);
7c510e4b 3794 QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
c4e84bde
RM
3795}
3796
bc083ce9
RM
3797int ql_wol(struct ql_adapter *qdev)
3798{
3799 int status = 0;
3800 u32 wol = MB_WOL_DISABLE;
3801
3802 /* The CAM is still intact after a reset, but if we
3803 * are doing WOL, then we may need to program the
3804 * routing regs. We would also need to issue the mailbox
3805 * commands to instruct the MPI what to do per the ethtool
3806 * settings.
3807 */
3808
3809 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3810 WAKE_MCAST | WAKE_BCAST)) {
3811 QPRINTK(qdev, IFDOWN, ERR,
3812 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3813 qdev->wol);
3814 return -EINVAL;
3815 }
3816
3817 if (qdev->wol & WAKE_MAGIC) {
3818 status = ql_mb_wol_set_magic(qdev, 1);
3819 if (status) {
3820 QPRINTK(qdev, IFDOWN, ERR,
3821 "Failed to set magic packet on %s.\n",
3822 qdev->ndev->name);
3823 return status;
3824 } else
3825 QPRINTK(qdev, DRV, INFO,
3826 "Enabled magic packet successfully on %s.\n",
3827 qdev->ndev->name);
3828
3829 wol |= MB_WOL_MAGIC_PKT;
3830 }
3831
3832 if (qdev->wol) {
bc083ce9
RM
3833 wol |= MB_WOL_MODE_ON;
3834 status = ql_mb_wol_mode(qdev, wol);
3835 QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
3836 (status == 0) ? "Sucessfully set" : "Failed", wol,
3837 qdev->ndev->name);
3838 }
3839
3840 return status;
3841}
3842
c4e84bde
RM
3843static int ql_adapter_down(struct ql_adapter *qdev)
3844{
c4e84bde 3845 int i, status = 0;
c4e84bde 3846
6a473308 3847 ql_link_off(qdev);
c4e84bde 3848
6497b607
RM
3849 /* Don't kill the reset worker thread if we
3850 * are in the process of recovery.
3851 */
3852 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3853 cancel_delayed_work_sync(&qdev->asic_reset_work);
c4e84bde
RM
3854 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3855 cancel_delayed_work_sync(&qdev->mpi_work);
2ee1e272 3856 cancel_delayed_work_sync(&qdev->mpi_idc_work);
8aae2600 3857 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
bcc2cb3b 3858 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
c4e84bde 3859
39aa8165
RM
3860 for (i = 0; i < qdev->rss_ring_count; i++)
3861 napi_disable(&qdev->rx_ring[i].napi);
c4e84bde
RM
3862
3863 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3864
3865 ql_disable_interrupts(qdev);
3866
3867 ql_tx_ring_clean(qdev);
3868
6b318cb3
RM
3869 /* Call netif_napi_del() from common point.
3870 */
b2014ff8 3871 for (i = 0; i < qdev->rss_ring_count; i++)
6b318cb3
RM
3872 netif_napi_del(&qdev->rx_ring[i].napi);
3873
4545a3f2 3874 ql_free_rx_buffers(qdev);
2d6a5e95 3875
c4e84bde
RM
3876 status = ql_adapter_reset(qdev);
3877 if (status)
3878 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
3879 qdev->func);
c4e84bde
RM
3880 return status;
3881}
3882
3883static int ql_adapter_up(struct ql_adapter *qdev)
3884{
3885 int err = 0;
3886
c4e84bde
RM
3887 err = ql_adapter_initialize(qdev);
3888 if (err) {
3889 QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
c4e84bde
RM
3890 goto err_init;
3891 }
c4e84bde 3892 set_bit(QL_ADAPTER_UP, &qdev->flags);
4545a3f2 3893 ql_alloc_rx_buffers(qdev);
8b007de1
RM
3894 /* If the port is initialized and the
3895 * link is up the turn on the carrier.
3896 */
3897 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3898 (ql_read32(qdev, STS) & qdev->port_link_up))
6a473308 3899 ql_link_on(qdev);
c4e84bde
RM
3900 ql_enable_interrupts(qdev);
3901 ql_enable_all_completion_interrupts(qdev);
1e213303 3902 netif_tx_start_all_queues(qdev->ndev);
c4e84bde
RM
3903
3904 return 0;
3905err_init:
3906 ql_adapter_reset(qdev);
3907 return err;
3908}
3909
c4e84bde
RM
3910static void ql_release_adapter_resources(struct ql_adapter *qdev)
3911{
3912 ql_free_mem_resources(qdev);
3913 ql_free_irq(qdev);
3914}
3915
3916static int ql_get_adapter_resources(struct ql_adapter *qdev)
3917{
3918 int status = 0;
3919
3920 if (ql_alloc_mem_resources(qdev)) {
3921 QPRINTK(qdev, IFUP, ERR, "Unable to allocate memory.\n");
3922 return -ENOMEM;
3923 }
3924 status = ql_request_irq(qdev);
c4e84bde
RM
3925 return status;
3926}
3927
3928static int qlge_close(struct net_device *ndev)
3929{
3930 struct ql_adapter *qdev = netdev_priv(ndev);
3931
4bbd1a19
RM
3932 /* If we hit pci_channel_io_perm_failure
3933 * failure condition, then we already
3934 * brought the adapter down.
3935 */
3936 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
3937 QPRINTK(qdev, DRV, ERR, "EEH fatal did unload.\n");
3938 clear_bit(QL_EEH_FATAL, &qdev->flags);
3939 return 0;
3940 }
3941
c4e84bde
RM
3942 /*
3943 * Wait for device to recover from a reset.
3944 * (Rarely happens, but possible.)
3945 */
3946 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3947 msleep(1);
3948 ql_adapter_down(qdev);
3949 ql_release_adapter_resources(qdev);
c4e84bde
RM
3950 return 0;
3951}
3952
3953static int ql_configure_rings(struct ql_adapter *qdev)
3954{
3955 int i;
3956 struct rx_ring *rx_ring;
3957 struct tx_ring *tx_ring;
a4ab6137 3958 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
7c734359
RM
3959 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
3960 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
3961
3962 qdev->lbq_buf_order = get_order(lbq_buf_len);
a4ab6137
RM
3963
3964 /* In a perfect world we have one RSS ring for each CPU
3965 * and each has it's own vector. To do that we ask for
3966 * cpu_cnt vectors. ql_enable_msix() will adjust the
3967 * vector count to what we actually get. We then
3968 * allocate an RSS ring for each.
3969 * Essentially, we are doing min(cpu_count, msix_vector_count).
c4e84bde 3970 */
a4ab6137
RM
3971 qdev->intr_count = cpu_cnt;
3972 ql_enable_msix(qdev);
3973 /* Adjust the RSS ring count to the actual vector count. */
3974 qdev->rss_ring_count = qdev->intr_count;
c4e84bde 3975 qdev->tx_ring_count = cpu_cnt;
b2014ff8 3976 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
c4e84bde 3977
c4e84bde
RM
3978 for (i = 0; i < qdev->tx_ring_count; i++) {
3979 tx_ring = &qdev->tx_ring[i];
e332471c 3980 memset((void *)tx_ring, 0, sizeof(*tx_ring));
c4e84bde
RM
3981 tx_ring->qdev = qdev;
3982 tx_ring->wq_id = i;
3983 tx_ring->wq_len = qdev->tx_ring_size;
3984 tx_ring->wq_size =
3985 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3986
3987 /*
3988 * The completion queue ID for the tx rings start
39aa8165 3989 * immediately after the rss rings.
c4e84bde 3990 */
39aa8165 3991 tx_ring->cq_id = qdev->rss_ring_count + i;
c4e84bde
RM
3992 }
3993
3994 for (i = 0; i < qdev->rx_ring_count; i++) {
3995 rx_ring = &qdev->rx_ring[i];
e332471c 3996 memset((void *)rx_ring, 0, sizeof(*rx_ring));
c4e84bde
RM
3997 rx_ring->qdev = qdev;
3998 rx_ring->cq_id = i;
3999 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
b2014ff8 4000 if (i < qdev->rss_ring_count) {
39aa8165
RM
4001 /*
4002 * Inbound (RSS) queues.
4003 */
c4e84bde
RM
4004 rx_ring->cq_len = qdev->rx_ring_size;
4005 rx_ring->cq_size =
4006 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4007 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4008 rx_ring->lbq_size =
2c9a0d41 4009 rx_ring->lbq_len * sizeof(__le64);
7c734359
RM
4010 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4011 QPRINTK(qdev, IFUP, DEBUG,
4012 "lbq_buf_size %d, order = %d\n",
4013 rx_ring->lbq_buf_size, qdev->lbq_buf_order);
c4e84bde
RM
4014 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4015 rx_ring->sbq_size =
2c9a0d41 4016 rx_ring->sbq_len * sizeof(__le64);
52e55f3c 4017 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
b2014ff8
RM
4018 rx_ring->type = RX_Q;
4019 } else {
c4e84bde
RM
4020 /*
4021 * Outbound queue handles outbound completions only.
4022 */
4023 /* outbound cq is same size as tx_ring it services. */
4024 rx_ring->cq_len = qdev->tx_ring_size;
4025 rx_ring->cq_size =
4026 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4027 rx_ring->lbq_len = 0;
4028 rx_ring->lbq_size = 0;
4029 rx_ring->lbq_buf_size = 0;
4030 rx_ring->sbq_len = 0;
4031 rx_ring->sbq_size = 0;
4032 rx_ring->sbq_buf_size = 0;
4033 rx_ring->type = TX_Q;
c4e84bde
RM
4034 }
4035 }
4036 return 0;
4037}
4038
4039static int qlge_open(struct net_device *ndev)
4040{
4041 int err = 0;
4042 struct ql_adapter *qdev = netdev_priv(ndev);
4043
74e12435
RM
4044 err = ql_adapter_reset(qdev);
4045 if (err)
4046 return err;
4047
c4e84bde
RM
4048 err = ql_configure_rings(qdev);
4049 if (err)
4050 return err;
4051
4052 err = ql_get_adapter_resources(qdev);
4053 if (err)
4054 goto error_up;
4055
4056 err = ql_adapter_up(qdev);
4057 if (err)
4058 goto error_up;
4059
4060 return err;
4061
4062error_up:
4063 ql_release_adapter_resources(qdev);
c4e84bde
RM
4064 return err;
4065}
4066
7c734359
RM
4067static int ql_change_rx_buffers(struct ql_adapter *qdev)
4068{
4069 struct rx_ring *rx_ring;
4070 int i, status;
4071 u32 lbq_buf_len;
4072
4073 /* Wait for an oustanding reset to complete. */
4074 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4075 int i = 3;
4076 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4077 QPRINTK(qdev, IFUP, ERR,
4078 "Waiting for adapter UP...\n");
4079 ssleep(1);
4080 }
4081
4082 if (!i) {
4083 QPRINTK(qdev, IFUP, ERR,
4084 "Timed out waiting for adapter UP\n");
4085 return -ETIMEDOUT;
4086 }
4087 }
4088
4089 status = ql_adapter_down(qdev);
4090 if (status)
4091 goto error;
4092
4093 /* Get the new rx buffer size. */
4094 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4095 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4096 qdev->lbq_buf_order = get_order(lbq_buf_len);
4097
4098 for (i = 0; i < qdev->rss_ring_count; i++) {
4099 rx_ring = &qdev->rx_ring[i];
4100 /* Set the new size. */
4101 rx_ring->lbq_buf_size = lbq_buf_len;
4102 }
4103
4104 status = ql_adapter_up(qdev);
4105 if (status)
4106 goto error;
4107
4108 return status;
4109error:
4110 QPRINTK(qdev, IFUP, ALERT,
4111 "Driver up/down cycle failed, closing device.\n");
4112 set_bit(QL_ADAPTER_UP, &qdev->flags);
4113 dev_close(qdev->ndev);
4114 return status;
4115}
4116
c4e84bde
RM
4117static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4118{
4119 struct ql_adapter *qdev = netdev_priv(ndev);
7c734359 4120 int status;
c4e84bde
RM
4121
4122 if (ndev->mtu == 1500 && new_mtu == 9000) {
4123 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
4124 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4125 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
c4e84bde
RM
4126 } else
4127 return -EINVAL;
7c734359
RM
4128
4129 queue_delayed_work(qdev->workqueue,
4130 &qdev->mpi_port_cfg_work, 3*HZ);
4131
4132 if (!netif_running(qdev->ndev)) {
4133 ndev->mtu = new_mtu;
4134 return 0;
4135 }
4136
c4e84bde 4137 ndev->mtu = new_mtu;
7c734359
RM
4138 status = ql_change_rx_buffers(qdev);
4139 if (status) {
4140 QPRINTK(qdev, IFUP, ERR,
4141 "Changing MTU failed.\n");
4142 }
4143
4144 return status;
c4e84bde
RM
4145}
4146
4147static struct net_device_stats *qlge_get_stats(struct net_device
4148 *ndev)
4149{
885ee398
RM
4150 struct ql_adapter *qdev = netdev_priv(ndev);
4151 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4152 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4153 unsigned long pkts, mcast, dropped, errors, bytes;
4154 int i;
4155
4156 /* Get RX stats. */
4157 pkts = mcast = dropped = errors = bytes = 0;
4158 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4159 pkts += rx_ring->rx_packets;
4160 bytes += rx_ring->rx_bytes;
4161 dropped += rx_ring->rx_dropped;
4162 errors += rx_ring->rx_errors;
4163 mcast += rx_ring->rx_multicast;
4164 }
4165 ndev->stats.rx_packets = pkts;
4166 ndev->stats.rx_bytes = bytes;
4167 ndev->stats.rx_dropped = dropped;
4168 ndev->stats.rx_errors = errors;
4169 ndev->stats.multicast = mcast;
4170
4171 /* Get TX stats. */
4172 pkts = errors = bytes = 0;
4173 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4174 pkts += tx_ring->tx_packets;
4175 bytes += tx_ring->tx_bytes;
4176 errors += tx_ring->tx_errors;
4177 }
4178 ndev->stats.tx_packets = pkts;
4179 ndev->stats.tx_bytes = bytes;
4180 ndev->stats.tx_errors = errors;
bcc90f55 4181 return &ndev->stats;
c4e84bde
RM
4182}
4183
4184static void qlge_set_multicast_list(struct net_device *ndev)
4185{
4186 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4187 struct dev_mc_list *mc_ptr;
cc288f54 4188 int i, status;
c4e84bde 4189
cc288f54
RM
4190 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4191 if (status)
4192 return;
c4e84bde
RM
4193 /*
4194 * Set or clear promiscuous mode if a
4195 * transition is taking place.
4196 */
4197 if (ndev->flags & IFF_PROMISC) {
4198 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4199 if (ql_set_routing_reg
4200 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4201 QPRINTK(qdev, HW, ERR,
4202 "Failed to set promiscous mode.\n");
4203 } else {
4204 set_bit(QL_PROMISCUOUS, &qdev->flags);
4205 }
4206 }
4207 } else {
4208 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4209 if (ql_set_routing_reg
4210 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4211 QPRINTK(qdev, HW, ERR,
4212 "Failed to clear promiscous mode.\n");
4213 } else {
4214 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4215 }
4216 }
4217 }
4218
4219 /*
4220 * Set or clear all multicast mode if a
4221 * transition is taking place.
4222 */
4223 if ((ndev->flags & IFF_ALLMULTI) ||
4224 (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
4225 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4226 if (ql_set_routing_reg
4227 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4228 QPRINTK(qdev, HW, ERR,
4229 "Failed to set all-multi mode.\n");
4230 } else {
4231 set_bit(QL_ALLMULTI, &qdev->flags);
4232 }
4233 }
4234 } else {
4235 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4236 if (ql_set_routing_reg
4237 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4238 QPRINTK(qdev, HW, ERR,
4239 "Failed to clear all-multi mode.\n");
4240 } else {
4241 clear_bit(QL_ALLMULTI, &qdev->flags);
4242 }
4243 }
4244 }
4245
4246 if (ndev->mc_count) {
cc288f54
RM
4247 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4248 if (status)
4249 goto exit;
c4e84bde
RM
4250 for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
4251 i++, mc_ptr = mc_ptr->next)
4252 if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
4253 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4254 QPRINTK(qdev, HW, ERR,
4255 "Failed to loadmulticast address.\n");
cc288f54 4256 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
c4e84bde
RM
4257 goto exit;
4258 }
cc288f54 4259 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
c4e84bde
RM
4260 if (ql_set_routing_reg
4261 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4262 QPRINTK(qdev, HW, ERR,
4263 "Failed to set multicast match mode.\n");
4264 } else {
4265 set_bit(QL_ALLMULTI, &qdev->flags);
4266 }
4267 }
4268exit:
8587ea35 4269 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
c4e84bde
RM
4270}
4271
4272static int qlge_set_mac_address(struct net_device *ndev, void *p)
4273{
4274 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4275 struct sockaddr *addr = p;
cc288f54 4276 int status;
c4e84bde 4277
c4e84bde
RM
4278 if (!is_valid_ether_addr(addr->sa_data))
4279 return -EADDRNOTAVAIL;
4280 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4281
cc288f54
RM
4282 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4283 if (status)
4284 return status;
cc288f54
RM
4285 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4286 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
cc288f54
RM
4287 if (status)
4288 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
4289 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4290 return status;
c4e84bde
RM
4291}
4292
4293static void qlge_tx_timeout(struct net_device *ndev)
4294{
4295 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
6497b607 4296 ql_queue_asic_error(qdev);
c4e84bde
RM
4297}
4298
4299static void ql_asic_reset_work(struct work_struct *work)
4300{
4301 struct ql_adapter *qdev =
4302 container_of(work, struct ql_adapter, asic_reset_work.work);
db98812f 4303 int status;
f2c0d8df 4304 rtnl_lock();
db98812f
RM
4305 status = ql_adapter_down(qdev);
4306 if (status)
4307 goto error;
4308
4309 status = ql_adapter_up(qdev);
4310 if (status)
4311 goto error;
2cd6dbaa
RM
4312
4313 /* Restore rx mode. */
4314 clear_bit(QL_ALLMULTI, &qdev->flags);
4315 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4316 qlge_set_multicast_list(qdev->ndev);
4317
f2c0d8df 4318 rtnl_unlock();
db98812f
RM
4319 return;
4320error:
4321 QPRINTK(qdev, IFUP, ALERT,
4322 "Driver up/down cycle failed, closing device\n");
f2c0d8df 4323
db98812f
RM
4324 set_bit(QL_ADAPTER_UP, &qdev->flags);
4325 dev_close(qdev->ndev);
4326 rtnl_unlock();
c4e84bde
RM
4327}
4328
b0c2aadf
RM
4329static struct nic_operations qla8012_nic_ops = {
4330 .get_flash = ql_get_8012_flash_params,
4331 .port_initialize = ql_8012_port_initialize,
4332};
4333
cdca8d02
RM
4334static struct nic_operations qla8000_nic_ops = {
4335 .get_flash = ql_get_8000_flash_params,
4336 .port_initialize = ql_8000_port_initialize,
4337};
4338
e4552f51
RM
4339/* Find the pcie function number for the other NIC
4340 * on this chip. Since both NIC functions share a
4341 * common firmware we have the lowest enabled function
4342 * do any common work. Examples would be resetting
4343 * after a fatal firmware error, or doing a firmware
4344 * coredump.
4345 */
4346static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4347{
4348 int status = 0;
4349 u32 temp;
4350 u32 nic_func1, nic_func2;
4351
4352 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4353 &temp);
4354 if (status)
4355 return status;
4356
4357 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4358 MPI_TEST_NIC_FUNC_MASK);
4359 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4360 MPI_TEST_NIC_FUNC_MASK);
4361
4362 if (qdev->func == nic_func1)
4363 qdev->alt_func = nic_func2;
4364 else if (qdev->func == nic_func2)
4365 qdev->alt_func = nic_func1;
4366 else
4367 status = -EIO;
4368
4369 return status;
4370}
b0c2aadf 4371
e4552f51 4372static int ql_get_board_info(struct ql_adapter *qdev)
c4e84bde 4373{
e4552f51 4374 int status;
c4e84bde
RM
4375 qdev->func =
4376 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
e4552f51
RM
4377 if (qdev->func > 3)
4378 return -EIO;
4379
4380 status = ql_get_alt_pcie_func(qdev);
4381 if (status)
4382 return status;
4383
4384 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4385 if (qdev->port) {
c4e84bde
RM
4386 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4387 qdev->port_link_up = STS_PL1;
4388 qdev->port_init = STS_PI1;
4389 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4390 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4391 } else {
4392 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4393 qdev->port_link_up = STS_PL0;
4394 qdev->port_init = STS_PI0;
4395 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4396 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4397 }
4398 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
b0c2aadf
RM
4399 qdev->device_id = qdev->pdev->device;
4400 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4401 qdev->nic_ops = &qla8012_nic_ops;
cdca8d02
RM
4402 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4403 qdev->nic_ops = &qla8000_nic_ops;
e4552f51 4404 return status;
c4e84bde
RM
4405}
4406
4407static void ql_release_all(struct pci_dev *pdev)
4408{
4409 struct net_device *ndev = pci_get_drvdata(pdev);
4410 struct ql_adapter *qdev = netdev_priv(ndev);
4411
4412 if (qdev->workqueue) {
4413 destroy_workqueue(qdev->workqueue);
4414 qdev->workqueue = NULL;
4415 }
39aa8165 4416
c4e84bde 4417 if (qdev->reg_base)
8668ae92 4418 iounmap(qdev->reg_base);
c4e84bde
RM
4419 if (qdev->doorbell_area)
4420 iounmap(qdev->doorbell_area);
8aae2600 4421 vfree(qdev->mpi_coredump);
c4e84bde
RM
4422 pci_release_regions(pdev);
4423 pci_set_drvdata(pdev, NULL);
4424}
4425
4426static int __devinit ql_init_device(struct pci_dev *pdev,
4427 struct net_device *ndev, int cards_found)
4428{
4429 struct ql_adapter *qdev = netdev_priv(ndev);
1d1023d0 4430 int err = 0;
c4e84bde 4431
e332471c 4432 memset((void *)qdev, 0, sizeof(*qdev));
c4e84bde
RM
4433 err = pci_enable_device(pdev);
4434 if (err) {
4435 dev_err(&pdev->dev, "PCI device enable failed.\n");
4436 return err;
4437 }
4438
ebd6e774
RM
4439 qdev->ndev = ndev;
4440 qdev->pdev = pdev;
4441 pci_set_drvdata(pdev, ndev);
c4e84bde 4442
bc9167f3
RM
4443 /* Set PCIe read request size */
4444 err = pcie_set_readrq(pdev, 4096);
4445 if (err) {
4446 dev_err(&pdev->dev, "Set readrq failed.\n");
4f9a91c8 4447 goto err_out1;
bc9167f3
RM
4448 }
4449
c4e84bde
RM
4450 err = pci_request_regions(pdev, DRV_NAME);
4451 if (err) {
4452 dev_err(&pdev->dev, "PCI region request failed.\n");
ebd6e774 4453 return err;
c4e84bde
RM
4454 }
4455
4456 pci_set_master(pdev);
6a35528a 4457 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
c4e84bde 4458 set_bit(QL_DMA64, &qdev->flags);
6a35528a 4459 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
c4e84bde 4460 } else {
284901a9 4461 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
c4e84bde 4462 if (!err)
284901a9 4463 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
c4e84bde
RM
4464 }
4465
4466 if (err) {
4467 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4f9a91c8 4468 goto err_out2;
c4e84bde
RM
4469 }
4470
73475339
RM
4471 /* Set PCIe reset type for EEH to fundamental. */
4472 pdev->needs_freset = 1;
6d190c6e 4473 pci_save_state(pdev);
c4e84bde
RM
4474 qdev->reg_base =
4475 ioremap_nocache(pci_resource_start(pdev, 1),
4476 pci_resource_len(pdev, 1));
4477 if (!qdev->reg_base) {
4478 dev_err(&pdev->dev, "Register mapping failed.\n");
4479 err = -ENOMEM;
4f9a91c8 4480 goto err_out2;
c4e84bde
RM
4481 }
4482
4483 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4484 qdev->doorbell_area =
4485 ioremap_nocache(pci_resource_start(pdev, 3),
4486 pci_resource_len(pdev, 3));
4487 if (!qdev->doorbell_area) {
4488 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4489 err = -ENOMEM;
4f9a91c8 4490 goto err_out2;
c4e84bde
RM
4491 }
4492
e4552f51
RM
4493 err = ql_get_board_info(qdev);
4494 if (err) {
4495 dev_err(&pdev->dev, "Register access failed.\n");
4496 err = -EIO;
4f9a91c8 4497 goto err_out2;
e4552f51 4498 }
c4e84bde
RM
4499 qdev->msg_enable = netif_msg_init(debug, default_msg);
4500 spin_lock_init(&qdev->hw_lock);
4501 spin_lock_init(&qdev->stats_lock);
4502
8aae2600
RM
4503 if (qlge_mpi_coredump) {
4504 qdev->mpi_coredump =
4505 vmalloc(sizeof(struct ql_mpi_coredump));
4506 if (qdev->mpi_coredump == NULL) {
4507 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4508 err = -ENOMEM;
ce96bc86 4509 goto err_out2;
8aae2600 4510 }
d5c1da56
RM
4511 if (qlge_force_coredump)
4512 set_bit(QL_FRC_COREDUMP, &qdev->flags);
8aae2600 4513 }
c4e84bde 4514 /* make sure the EEPROM is good */
b0c2aadf 4515 err = qdev->nic_ops->get_flash(qdev);
c4e84bde
RM
4516 if (err) {
4517 dev_err(&pdev->dev, "Invalid FLASH.\n");
4f9a91c8 4518 goto err_out2;
c4e84bde
RM
4519 }
4520
c4e84bde
RM
4521 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4522
4523 /* Set up the default ring sizes. */
4524 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4525 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4526
4527 /* Set up the coalescing parameters. */
4528 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4529 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4530 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4531 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4532
4533 /*
4534 * Set up the operating parameters.
4535 */
4536 qdev->rx_csum = 1;
c4e84bde
RM
4537 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4538 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4539 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4540 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
bcc2cb3b 4541 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
2ee1e272 4542 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
8aae2600 4543 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
bcc2cb3b 4544 init_completion(&qdev->ide_completion);
c4e84bde
RM
4545
4546 if (!cards_found) {
4547 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4548 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4549 DRV_NAME, DRV_VERSION);
4550 }
4551 return 0;
4f9a91c8 4552err_out2:
c4e84bde 4553 ql_release_all(pdev);
4f9a91c8 4554err_out1:
c4e84bde
RM
4555 pci_disable_device(pdev);
4556 return err;
4557}
4558
25ed7849
SH
4559static const struct net_device_ops qlge_netdev_ops = {
4560 .ndo_open = qlge_open,
4561 .ndo_stop = qlge_close,
4562 .ndo_start_xmit = qlge_send,
4563 .ndo_change_mtu = qlge_change_mtu,
4564 .ndo_get_stats = qlge_get_stats,
4565 .ndo_set_multicast_list = qlge_set_multicast_list,
4566 .ndo_set_mac_address = qlge_set_mac_address,
4567 .ndo_validate_addr = eth_validate_addr,
4568 .ndo_tx_timeout = qlge_tx_timeout,
01e6b953
RM
4569 .ndo_vlan_rx_register = qlge_vlan_rx_register,
4570 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4571 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
25ed7849
SH
4572};
4573
15c052fc
RM
4574static void ql_timer(unsigned long data)
4575{
4576 struct ql_adapter *qdev = (struct ql_adapter *)data;
4577 u32 var = 0;
4578
4579 var = ql_read32(qdev, STS);
4580 if (pci_channel_offline(qdev->pdev)) {
4581 QPRINTK(qdev, IFUP, ERR, "EEH STS = 0x%.08x.\n", var);
4582 return;
4583 }
4584
4585 qdev->timer.expires = jiffies + (5*HZ);
4586 add_timer(&qdev->timer);
4587}
4588
c4e84bde
RM
4589static int __devinit qlge_probe(struct pci_dev *pdev,
4590 const struct pci_device_id *pci_entry)
4591{
4592 struct net_device *ndev = NULL;
4593 struct ql_adapter *qdev = NULL;
4594 static int cards_found = 0;
4595 int err = 0;
4596
1e213303
RM
4597 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4598 min(MAX_CPUS, (int)num_online_cpus()));
c4e84bde
RM
4599 if (!ndev)
4600 return -ENOMEM;
4601
4602 err = ql_init_device(pdev, ndev, cards_found);
4603 if (err < 0) {
4604 free_netdev(ndev);
4605 return err;
4606 }
4607
4608 qdev = netdev_priv(ndev);
4609 SET_NETDEV_DEV(ndev, &pdev->dev);
4610 ndev->features = (0
4611 | NETIF_F_IP_CSUM
4612 | NETIF_F_SG
4613 | NETIF_F_TSO
4614 | NETIF_F_TSO6
4615 | NETIF_F_TSO_ECN
4616 | NETIF_F_HW_VLAN_TX
4617 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
22bdd4f5 4618 ndev->features |= NETIF_F_GRO;
c4e84bde
RM
4619
4620 if (test_bit(QL_DMA64, &qdev->flags))
4621 ndev->features |= NETIF_F_HIGHDMA;
4622
4623 /*
4624 * Set up net_device structure.
4625 */
4626 ndev->tx_queue_len = qdev->tx_ring_size;
4627 ndev->irq = pdev->irq;
25ed7849
SH
4628
4629 ndev->netdev_ops = &qlge_netdev_ops;
c4e84bde 4630 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
c4e84bde 4631 ndev->watchdog_timeo = 10 * HZ;
25ed7849 4632
c4e84bde
RM
4633 err = register_netdev(ndev);
4634 if (err) {
4635 dev_err(&pdev->dev, "net device registration failed.\n");
4636 ql_release_all(pdev);
4637 pci_disable_device(pdev);
4638 return err;
4639 }
15c052fc
RM
4640 /* Start up the timer to trigger EEH if
4641 * the bus goes dead
4642 */
4643 init_timer_deferrable(&qdev->timer);
4644 qdev->timer.data = (unsigned long)qdev;
4645 qdev->timer.function = ql_timer;
4646 qdev->timer.expires = jiffies + (5*HZ);
4647 add_timer(&qdev->timer);
6a473308 4648 ql_link_off(qdev);
c4e84bde 4649 ql_display_dev_info(ndev);
9dfbbaa6 4650 atomic_set(&qdev->lb_count, 0);
c4e84bde
RM
4651 cards_found++;
4652 return 0;
4653}
4654
9dfbbaa6
RM
4655netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4656{
4657 return qlge_send(skb, ndev);
4658}
4659
4660int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4661{
4662 return ql_clean_inbound_rx_ring(rx_ring, budget);
4663}
4664
c4e84bde
RM
4665static void __devexit qlge_remove(struct pci_dev *pdev)
4666{
4667 struct net_device *ndev = pci_get_drvdata(pdev);
15c052fc
RM
4668 struct ql_adapter *qdev = netdev_priv(ndev);
4669 del_timer_sync(&qdev->timer);
c4e84bde
RM
4670 unregister_netdev(ndev);
4671 ql_release_all(pdev);
4672 pci_disable_device(pdev);
4673 free_netdev(ndev);
4674}
4675
6d190c6e
RM
4676/* Clean up resources without touching hardware. */
4677static void ql_eeh_close(struct net_device *ndev)
4678{
4679 int i;
4680 struct ql_adapter *qdev = netdev_priv(ndev);
4681
4682 if (netif_carrier_ok(ndev)) {
4683 netif_carrier_off(ndev);
4684 netif_stop_queue(ndev);
4685 }
4686
4687 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
4688 cancel_delayed_work_sync(&qdev->asic_reset_work);
4689 cancel_delayed_work_sync(&qdev->mpi_reset_work);
4690 cancel_delayed_work_sync(&qdev->mpi_work);
4691 cancel_delayed_work_sync(&qdev->mpi_idc_work);
8aae2600 4692 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
6d190c6e
RM
4693 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
4694
4695 for (i = 0; i < qdev->rss_ring_count; i++)
4696 netif_napi_del(&qdev->rx_ring[i].napi);
4697
4698 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4699 ql_tx_ring_clean(qdev);
4700 ql_free_rx_buffers(qdev);
4701 ql_release_adapter_resources(qdev);
4702}
4703
c4e84bde
RM
4704/*
4705 * This callback is called by the PCI subsystem whenever
4706 * a PCI bus error is detected.
4707 */
4708static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4709 enum pci_channel_state state)
4710{
4711 struct net_device *ndev = pci_get_drvdata(pdev);
4bbd1a19 4712 struct ql_adapter *qdev = netdev_priv(ndev);
fbc663ce 4713
6d190c6e
RM
4714 switch (state) {
4715 case pci_channel_io_normal:
4716 return PCI_ERS_RESULT_CAN_RECOVER;
4717 case pci_channel_io_frozen:
4718 netif_device_detach(ndev);
4719 if (netif_running(ndev))
4720 ql_eeh_close(ndev);
4721 pci_disable_device(pdev);
4722 return PCI_ERS_RESULT_NEED_RESET;
4723 case pci_channel_io_perm_failure:
4724 dev_err(&pdev->dev,
4725 "%s: pci_channel_io_perm_failure.\n", __func__);
4bbd1a19
RM
4726 ql_eeh_close(ndev);
4727 set_bit(QL_EEH_FATAL, &qdev->flags);
fbc663ce 4728 return PCI_ERS_RESULT_DISCONNECT;
6d190c6e 4729 }
c4e84bde
RM
4730
4731 /* Request a slot reset. */
4732 return PCI_ERS_RESULT_NEED_RESET;
4733}
4734
4735/*
4736 * This callback is called after the PCI buss has been reset.
4737 * Basically, this tries to restart the card from scratch.
4738 * This is a shortened version of the device probe/discovery code,
4739 * it resembles the first-half of the () routine.
4740 */
4741static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4742{
4743 struct net_device *ndev = pci_get_drvdata(pdev);
4744 struct ql_adapter *qdev = netdev_priv(ndev);
4745
6d190c6e
RM
4746 pdev->error_state = pci_channel_io_normal;
4747
4748 pci_restore_state(pdev);
c4e84bde
RM
4749 if (pci_enable_device(pdev)) {
4750 QPRINTK(qdev, IFUP, ERR,
4751 "Cannot re-enable PCI device after reset.\n");
4752 return PCI_ERS_RESULT_DISCONNECT;
4753 }
c4e84bde 4754 pci_set_master(pdev);
a112fd4c
RM
4755
4756 if (ql_adapter_reset(qdev)) {
4757 QPRINTK(qdev, DRV, ERR, "reset FAILED!\n");
4bbd1a19 4758 set_bit(QL_EEH_FATAL, &qdev->flags);
a112fd4c
RM
4759 return PCI_ERS_RESULT_DISCONNECT;
4760 }
4761
c4e84bde
RM
4762 return PCI_ERS_RESULT_RECOVERED;
4763}
4764
4765static void qlge_io_resume(struct pci_dev *pdev)
4766{
4767 struct net_device *ndev = pci_get_drvdata(pdev);
4768 struct ql_adapter *qdev = netdev_priv(ndev);
6d190c6e 4769 int err = 0;
c4e84bde 4770
c4e84bde 4771 if (netif_running(ndev)) {
6d190c6e
RM
4772 err = qlge_open(ndev);
4773 if (err) {
c4e84bde
RM
4774 QPRINTK(qdev, IFUP, ERR,
4775 "Device initialization failed after reset.\n");
4776 return;
4777 }
6d190c6e
RM
4778 } else {
4779 QPRINTK(qdev, IFUP, ERR,
4780 "Device was not running prior to EEH.\n");
c4e84bde 4781 }
15c052fc
RM
4782 qdev->timer.expires = jiffies + (5*HZ);
4783 add_timer(&qdev->timer);
c4e84bde
RM
4784 netif_device_attach(ndev);
4785}
4786
4787static struct pci_error_handlers qlge_err_handler = {
4788 .error_detected = qlge_io_error_detected,
4789 .slot_reset = qlge_io_slot_reset,
4790 .resume = qlge_io_resume,
4791};
4792
4793static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4794{
4795 struct net_device *ndev = pci_get_drvdata(pdev);
4796 struct ql_adapter *qdev = netdev_priv(ndev);
6b318cb3 4797 int err;
c4e84bde
RM
4798
4799 netif_device_detach(ndev);
15c052fc 4800 del_timer_sync(&qdev->timer);
c4e84bde
RM
4801
4802 if (netif_running(ndev)) {
4803 err = ql_adapter_down(qdev);
4804 if (!err)
4805 return err;
4806 }
4807
bc083ce9 4808 ql_wol(qdev);
c4e84bde
RM
4809 err = pci_save_state(pdev);
4810 if (err)
4811 return err;
4812
4813 pci_disable_device(pdev);
4814
4815 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4816
4817 return 0;
4818}
4819
04da2cf9 4820#ifdef CONFIG_PM
c4e84bde
RM
4821static int qlge_resume(struct pci_dev *pdev)
4822{
4823 struct net_device *ndev = pci_get_drvdata(pdev);
4824 struct ql_adapter *qdev = netdev_priv(ndev);
4825 int err;
4826
4827 pci_set_power_state(pdev, PCI_D0);
4828 pci_restore_state(pdev);
4829 err = pci_enable_device(pdev);
4830 if (err) {
4831 QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
4832 return err;
4833 }
4834 pci_set_master(pdev);
4835
4836 pci_enable_wake(pdev, PCI_D3hot, 0);
4837 pci_enable_wake(pdev, PCI_D3cold, 0);
4838
4839 if (netif_running(ndev)) {
4840 err = ql_adapter_up(qdev);
4841 if (err)
4842 return err;
4843 }
4844
15c052fc
RM
4845 qdev->timer.expires = jiffies + (5*HZ);
4846 add_timer(&qdev->timer);
c4e84bde
RM
4847 netif_device_attach(ndev);
4848
4849 return 0;
4850}
04da2cf9 4851#endif /* CONFIG_PM */
c4e84bde
RM
4852
4853static void qlge_shutdown(struct pci_dev *pdev)
4854{
4855 qlge_suspend(pdev, PMSG_SUSPEND);
4856}
4857
4858static struct pci_driver qlge_driver = {
4859 .name = DRV_NAME,
4860 .id_table = qlge_pci_tbl,
4861 .probe = qlge_probe,
4862 .remove = __devexit_p(qlge_remove),
4863#ifdef CONFIG_PM
4864 .suspend = qlge_suspend,
4865 .resume = qlge_resume,
4866#endif
4867 .shutdown = qlge_shutdown,
4868 .err_handler = &qlge_err_handler
4869};
4870
4871static int __init qlge_init_module(void)
4872{
4873 return pci_register_driver(&qlge_driver);
4874}
4875
4876static void __exit qlge_exit(void)
4877{
4878 pci_unregister_driver(&qlge_driver);
4879}
4880
4881module_init(qlge_init_module);
4882module_exit(qlge_exit);
This page took 0.477556 seconds and 5 git commands to generate.