tc35815: Kill unused code
[deliverable/linux.git] / drivers / net / qlge / qlge_main.c
CommitLineData
c4e84bde
RM
1/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
36#include <linux/skbuff.h>
c4e84bde 37#include <linux/if_vlan.h>
c4e84bde
RM
38#include <linux/delay.h>
39#include <linux/mm.h>
40#include <linux/vmalloc.h>
b7c6bfb7 41#include <net/ip6_checksum.h>
c4e84bde
RM
42
43#include "qlge.h"
44
45char qlge_driver_name[] = DRV_NAME;
46const char qlge_driver_version[] = DRV_VERSION;
47
48MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49MODULE_DESCRIPTION(DRV_STRING " ");
50MODULE_LICENSE("GPL");
51MODULE_VERSION(DRV_VERSION);
52
53static const u32 default_msg =
54 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
55/* NETIF_MSG_TIMER | */
56 NETIF_MSG_IFDOWN |
57 NETIF_MSG_IFUP |
58 NETIF_MSG_RX_ERR |
59 NETIF_MSG_TX_ERR |
4974097a
RM
60/* NETIF_MSG_TX_QUEUED | */
61/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
c4e84bde
RM
62/* NETIF_MSG_PKTDATA | */
63 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
64
65static int debug = 0x00007fff; /* defaults above */
66module_param(debug, int, 0);
67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68
69#define MSIX_IRQ 0
70#define MSI_IRQ 1
71#define LEG_IRQ 2
72static int irq_type = MSIX_IRQ;
73module_param(irq_type, int, MSIX_IRQ);
74MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
75
76static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
b0c2aadf 77 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
cdca8d02 78 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
c4e84bde
RM
79 /* required last entry */
80 {0,}
81};
82
83MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
84
85/* This hardware semaphore causes exclusive access to
86 * resources shared between the NIC driver, MPI firmware,
87 * FCOE firmware and the FC driver.
88 */
89static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
90{
91 u32 sem_bits = 0;
92
93 switch (sem_mask) {
94 case SEM_XGMAC0_MASK:
95 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
96 break;
97 case SEM_XGMAC1_MASK:
98 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
99 break;
100 case SEM_ICB_MASK:
101 sem_bits = SEM_SET << SEM_ICB_SHIFT;
102 break;
103 case SEM_MAC_ADDR_MASK:
104 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
105 break;
106 case SEM_FLASH_MASK:
107 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
108 break;
109 case SEM_PROBE_MASK:
110 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
111 break;
112 case SEM_RT_IDX_MASK:
113 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
114 break;
115 case SEM_PROC_REG_MASK:
116 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
117 break;
118 default:
119 QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
120 return -EINVAL;
121 }
122
123 ql_write32(qdev, SEM, sem_bits | sem_mask);
124 return !(ql_read32(qdev, SEM) & sem_bits);
125}
126
127int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
128{
0857e9d7 129 unsigned int wait_count = 30;
c4e84bde
RM
130 do {
131 if (!ql_sem_trylock(qdev, sem_mask))
132 return 0;
0857e9d7
RM
133 udelay(100);
134 } while (--wait_count);
c4e84bde
RM
135 return -ETIMEDOUT;
136}
137
138void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
139{
140 ql_write32(qdev, SEM, sem_mask);
141 ql_read32(qdev, SEM); /* flush */
142}
143
144/* This function waits for a specific bit to come ready
145 * in a given register. It is used mostly by the initialize
146 * process, but is also used in kernel thread API such as
147 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
148 */
149int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
150{
151 u32 temp;
152 int count = UDELAY_COUNT;
153
154 while (count) {
155 temp = ql_read32(qdev, reg);
156
157 /* check for errors */
158 if (temp & err_bit) {
159 QPRINTK(qdev, PROBE, ALERT,
160 "register 0x%.08x access error, value = 0x%.08x!.\n",
161 reg, temp);
162 return -EIO;
163 } else if (temp & bit)
164 return 0;
165 udelay(UDELAY_DELAY);
166 count--;
167 }
168 QPRINTK(qdev, PROBE, ALERT,
169 "Timed out waiting for reg %x to come ready.\n", reg);
170 return -ETIMEDOUT;
171}
172
173/* The CFG register is used to download TX and RX control blocks
174 * to the chip. This function waits for an operation to complete.
175 */
176static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
177{
178 int count = UDELAY_COUNT;
179 u32 temp;
180
181 while (count) {
182 temp = ql_read32(qdev, CFG);
183 if (temp & CFG_LE)
184 return -EIO;
185 if (!(temp & bit))
186 return 0;
187 udelay(UDELAY_DELAY);
188 count--;
189 }
190 return -ETIMEDOUT;
191}
192
193
194/* Used to issue init control blocks to hw. Maps control block,
195 * sets address, triggers download, waits for completion.
196 */
197int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
198 u16 q_id)
199{
200 u64 map;
201 int status = 0;
202 int direction;
203 u32 mask;
204 u32 value;
205
206 direction =
207 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
208 PCI_DMA_FROMDEVICE;
209
210 map = pci_map_single(qdev->pdev, ptr, size, direction);
211 if (pci_dma_mapping_error(qdev->pdev, map)) {
212 QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
213 return -ENOMEM;
214 }
215
4322c5be
RM
216 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
217 if (status)
218 return status;
219
c4e84bde
RM
220 status = ql_wait_cfg(qdev, bit);
221 if (status) {
222 QPRINTK(qdev, IFUP, ERR,
223 "Timed out waiting for CFG to come ready.\n");
224 goto exit;
225 }
226
c4e84bde
RM
227 ql_write32(qdev, ICB_L, (u32) map);
228 ql_write32(qdev, ICB_H, (u32) (map >> 32));
c4e84bde
RM
229
230 mask = CFG_Q_MASK | (bit << 16);
231 value = bit | (q_id << CFG_Q_SHIFT);
232 ql_write32(qdev, CFG, (mask | value));
233
234 /*
235 * Wait for the bit to clear after signaling hw.
236 */
237 status = ql_wait_cfg(qdev, bit);
238exit:
4322c5be 239 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
c4e84bde
RM
240 pci_unmap_single(qdev->pdev, map, size, direction);
241 return status;
242}
243
244/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
245int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
246 u32 *value)
247{
248 u32 offset = 0;
249 int status;
250
c4e84bde
RM
251 switch (type) {
252 case MAC_ADDR_TYPE_MULTI_MAC:
253 case MAC_ADDR_TYPE_CAM_MAC:
254 {
255 status =
256 ql_wait_reg_rdy(qdev,
939678f8 257 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
258 if (status)
259 goto exit;
260 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
261 (index << MAC_ADDR_IDX_SHIFT) | /* index */
262 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
263 status =
264 ql_wait_reg_rdy(qdev,
939678f8 265 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
c4e84bde
RM
266 if (status)
267 goto exit;
268 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
269 status =
270 ql_wait_reg_rdy(qdev,
939678f8 271 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
272 if (status)
273 goto exit;
274 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
275 (index << MAC_ADDR_IDX_SHIFT) | /* index */
276 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
277 status =
278 ql_wait_reg_rdy(qdev,
939678f8 279 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
c4e84bde
RM
280 if (status)
281 goto exit;
282 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
283 if (type == MAC_ADDR_TYPE_CAM_MAC) {
284 status =
285 ql_wait_reg_rdy(qdev,
939678f8 286 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
287 if (status)
288 goto exit;
289 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
290 (index << MAC_ADDR_IDX_SHIFT) | /* index */
291 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
292 status =
293 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
939678f8 294 MAC_ADDR_MR, 0);
c4e84bde
RM
295 if (status)
296 goto exit;
297 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
298 }
299 break;
300 }
301 case MAC_ADDR_TYPE_VLAN:
302 case MAC_ADDR_TYPE_MULTI_FLTR:
303 default:
304 QPRINTK(qdev, IFUP, CRIT,
305 "Address type %d not yet supported.\n", type);
306 status = -EPERM;
307 }
308exit:
c4e84bde
RM
309 return status;
310}
311
312/* Set up a MAC, multicast or VLAN address for the
313 * inbound frame matching.
314 */
315static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
316 u16 index)
317{
318 u32 offset = 0;
319 int status = 0;
320
c4e84bde
RM
321 switch (type) {
322 case MAC_ADDR_TYPE_MULTI_MAC:
76b26694
RM
323 {
324 u32 upper = (addr[0] << 8) | addr[1];
325 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
326 (addr[4] << 8) | (addr[5]);
327
328 status =
329 ql_wait_reg_rdy(qdev,
330 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
331 if (status)
332 goto exit;
333 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
334 (index << MAC_ADDR_IDX_SHIFT) |
335 type | MAC_ADDR_E);
336 ql_write32(qdev, MAC_ADDR_DATA, lower);
337 status =
338 ql_wait_reg_rdy(qdev,
339 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
340 if (status)
341 goto exit;
342 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
343 (index << MAC_ADDR_IDX_SHIFT) |
344 type | MAC_ADDR_E);
345
346 ql_write32(qdev, MAC_ADDR_DATA, upper);
347 status =
348 ql_wait_reg_rdy(qdev,
349 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
350 if (status)
351 goto exit;
352 break;
353 }
c4e84bde
RM
354 case MAC_ADDR_TYPE_CAM_MAC:
355 {
356 u32 cam_output;
357 u32 upper = (addr[0] << 8) | addr[1];
358 u32 lower =
359 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
360 (addr[5]);
361
4974097a 362 QPRINTK(qdev, IFUP, DEBUG,
7c510e4b 363 "Adding %s address %pM"
c4e84bde
RM
364 " at index %d in the CAM.\n",
365 ((type ==
366 MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
7c510e4b 367 "UNICAST"), addr, index);
c4e84bde
RM
368
369 status =
370 ql_wait_reg_rdy(qdev,
939678f8 371 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
372 if (status)
373 goto exit;
374 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
375 (index << MAC_ADDR_IDX_SHIFT) | /* index */
376 type); /* type */
377 ql_write32(qdev, MAC_ADDR_DATA, lower);
378 status =
379 ql_wait_reg_rdy(qdev,
939678f8 380 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
381 if (status)
382 goto exit;
383 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
384 (index << MAC_ADDR_IDX_SHIFT) | /* index */
385 type); /* type */
386 ql_write32(qdev, MAC_ADDR_DATA, upper);
387 status =
388 ql_wait_reg_rdy(qdev,
939678f8 389 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
390 if (status)
391 goto exit;
392 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
393 (index << MAC_ADDR_IDX_SHIFT) | /* index */
394 type); /* type */
395 /* This field should also include the queue id
396 and possibly the function id. Right now we hardcode
397 the route field to NIC core.
398 */
76b26694
RM
399 cam_output = (CAM_OUT_ROUTE_NIC |
400 (qdev->
401 func << CAM_OUT_FUNC_SHIFT) |
402 (0 << CAM_OUT_CQ_ID_SHIFT));
403 if (qdev->vlgrp)
404 cam_output |= CAM_OUT_RV;
405 /* route to NIC core */
406 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
c4e84bde
RM
407 break;
408 }
409 case MAC_ADDR_TYPE_VLAN:
410 {
411 u32 enable_bit = *((u32 *) &addr[0]);
412 /* For VLAN, the addr actually holds a bit that
413 * either enables or disables the vlan id we are
414 * addressing. It's either MAC_ADDR_E on or off.
415 * That's bit-27 we're talking about.
416 */
417 QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
418 (enable_bit ? "Adding" : "Removing"),
419 index, (enable_bit ? "to" : "from"));
420
421 status =
422 ql_wait_reg_rdy(qdev,
939678f8 423 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
424 if (status)
425 goto exit;
426 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
427 (index << MAC_ADDR_IDX_SHIFT) | /* index */
428 type | /* type */
429 enable_bit); /* enable/disable */
430 break;
431 }
432 case MAC_ADDR_TYPE_MULTI_FLTR:
433 default:
434 QPRINTK(qdev, IFUP, CRIT,
435 "Address type %d not yet supported.\n", type);
436 status = -EPERM;
437 }
438exit:
c4e84bde
RM
439 return status;
440}
441
7fab3bfe
RM
442/* Set or clear MAC address in hardware. We sometimes
443 * have to clear it to prevent wrong frame routing
444 * especially in a bonding environment.
445 */
446static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
447{
448 int status;
449 char zero_mac_addr[ETH_ALEN];
450 char *addr;
451
452 if (set) {
453 addr = &qdev->ndev->dev_addr[0];
454 QPRINTK(qdev, IFUP, DEBUG,
455 "Set Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n",
456 addr[0], addr[1], addr[2], addr[3],
457 addr[4], addr[5]);
458 } else {
459 memset(zero_mac_addr, 0, ETH_ALEN);
460 addr = &zero_mac_addr[0];
461 QPRINTK(qdev, IFUP, DEBUG,
462 "Clearing MAC address on %s\n",
463 qdev->ndev->name);
464 }
465 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
466 if (status)
467 return status;
468 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
469 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
470 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
471 if (status)
472 QPRINTK(qdev, IFUP, ERR, "Failed to init mac "
473 "address.\n");
474 return status;
475}
476
6a473308
RM
477void ql_link_on(struct ql_adapter *qdev)
478{
479 QPRINTK(qdev, LINK, ERR, "%s: Link is up.\n",
480 qdev->ndev->name);
481 netif_carrier_on(qdev->ndev);
482 ql_set_mac_addr(qdev, 1);
483}
484
485void ql_link_off(struct ql_adapter *qdev)
486{
487 QPRINTK(qdev, LINK, ERR, "%s: Link is down.\n",
488 qdev->ndev->name);
489 netif_carrier_off(qdev->ndev);
490 ql_set_mac_addr(qdev, 0);
491}
492
c4e84bde
RM
493/* Get a specific frame routing value from the CAM.
494 * Used for debug and reg dump.
495 */
496int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
497{
498 int status = 0;
499
939678f8 500 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
c4e84bde
RM
501 if (status)
502 goto exit;
503
504 ql_write32(qdev, RT_IDX,
505 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
939678f8 506 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
c4e84bde
RM
507 if (status)
508 goto exit;
509 *value = ql_read32(qdev, RT_DATA);
510exit:
c4e84bde
RM
511 return status;
512}
513
514/* The NIC function for this chip has 16 routing indexes. Each one can be used
515 * to route different frame types to various inbound queues. We send broadcast/
516 * multicast/error frames to the default queue for slow handling,
517 * and CAM hit/RSS frames to the fast handling queues.
518 */
519static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
520 int enable)
521{
8587ea35 522 int status = -EINVAL; /* Return error if no mask match. */
c4e84bde
RM
523 u32 value = 0;
524
c4e84bde
RM
525 QPRINTK(qdev, IFUP, DEBUG,
526 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
527 (enable ? "Adding" : "Removing"),
528 ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
529 ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
530 ((index ==
531 RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
532 ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
533 ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
534 ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
535 ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
536 ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
537 ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
538 ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
539 ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
540 ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
541 ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
542 ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
543 ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
544 ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
545 (enable ? "to" : "from"));
546
547 switch (mask) {
548 case RT_IDX_CAM_HIT:
549 {
550 value = RT_IDX_DST_CAM_Q | /* dest */
551 RT_IDX_TYPE_NICQ | /* type */
552 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
553 break;
554 }
555 case RT_IDX_VALID: /* Promiscuous Mode frames. */
556 {
557 value = RT_IDX_DST_DFLT_Q | /* dest */
558 RT_IDX_TYPE_NICQ | /* type */
559 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
560 break;
561 }
562 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
563 {
564 value = RT_IDX_DST_DFLT_Q | /* dest */
565 RT_IDX_TYPE_NICQ | /* type */
566 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
567 break;
568 }
569 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
570 {
571 value = RT_IDX_DST_DFLT_Q | /* dest */
572 RT_IDX_TYPE_NICQ | /* type */
573 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
574 break;
575 }
576 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
577 {
e163d7f2 578 value = RT_IDX_DST_DFLT_Q | /* dest */
c4e84bde
RM
579 RT_IDX_TYPE_NICQ | /* type */
580 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
581 break;
582 }
583 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
584 {
e163d7f2 585 value = RT_IDX_DST_DFLT_Q | /* dest */
c4e84bde
RM
586 RT_IDX_TYPE_NICQ | /* type */
587 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
588 break;
589 }
590 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
591 {
592 value = RT_IDX_DST_RSS | /* dest */
593 RT_IDX_TYPE_NICQ | /* type */
594 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
595 break;
596 }
597 case 0: /* Clear the E-bit on an entry. */
598 {
599 value = RT_IDX_DST_DFLT_Q | /* dest */
600 RT_IDX_TYPE_NICQ | /* type */
601 (index << RT_IDX_IDX_SHIFT);/* index */
602 break;
603 }
604 default:
605 QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
606 mask);
607 status = -EPERM;
608 goto exit;
609 }
610
611 if (value) {
612 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
613 if (status)
614 goto exit;
615 value |= (enable ? RT_IDX_E : 0);
616 ql_write32(qdev, RT_IDX, value);
617 ql_write32(qdev, RT_DATA, enable ? mask : 0);
618 }
619exit:
c4e84bde
RM
620 return status;
621}
622
623static void ql_enable_interrupts(struct ql_adapter *qdev)
624{
625 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
626}
627
628static void ql_disable_interrupts(struct ql_adapter *qdev)
629{
630 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
631}
632
633/* If we're running with multiple MSI-X vectors then we enable on the fly.
634 * Otherwise, we may have multiple outstanding workers and don't want to
635 * enable until the last one finishes. In this case, the irq_cnt gets
636 * incremented everytime we queue a worker and decremented everytime
637 * a worker finishes. Once it hits zero we enable the interrupt.
638 */
bb0d215c 639u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
c4e84bde 640{
bb0d215c
RM
641 u32 var = 0;
642 unsigned long hw_flags = 0;
643 struct intr_context *ctx = qdev->intr_context + intr;
644
645 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
646 /* Always enable if we're MSIX multi interrupts and
647 * it's not the default (zeroeth) interrupt.
648 */
c4e84bde 649 ql_write32(qdev, INTR_EN,
bb0d215c
RM
650 ctx->intr_en_mask);
651 var = ql_read32(qdev, STS);
652 return var;
c4e84bde 653 }
bb0d215c
RM
654
655 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
656 if (atomic_dec_and_test(&ctx->irq_cnt)) {
657 ql_write32(qdev, INTR_EN,
658 ctx->intr_en_mask);
659 var = ql_read32(qdev, STS);
660 }
661 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
662 return var;
c4e84bde
RM
663}
664
665static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
666{
667 u32 var = 0;
bb0d215c 668 struct intr_context *ctx;
c4e84bde 669
bb0d215c
RM
670 /* HW disables for us if we're MSIX multi interrupts and
671 * it's not the default (zeroeth) interrupt.
672 */
673 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
674 return 0;
675
676 ctx = qdev->intr_context + intr;
08b1bc8f 677 spin_lock(&qdev->hw_lock);
bb0d215c 678 if (!atomic_read(&ctx->irq_cnt)) {
c4e84bde 679 ql_write32(qdev, INTR_EN,
bb0d215c 680 ctx->intr_dis_mask);
c4e84bde
RM
681 var = ql_read32(qdev, STS);
682 }
bb0d215c 683 atomic_inc(&ctx->irq_cnt);
08b1bc8f 684 spin_unlock(&qdev->hw_lock);
c4e84bde
RM
685 return var;
686}
687
688static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
689{
690 int i;
691 for (i = 0; i < qdev->intr_count; i++) {
692 /* The enable call does a atomic_dec_and_test
693 * and enables only if the result is zero.
694 * So we precharge it here.
695 */
bb0d215c
RM
696 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
697 i == 0))
698 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
c4e84bde
RM
699 ql_enable_completion_interrupt(qdev, i);
700 }
701
702}
703
b0c2aadf
RM
704static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
705{
706 int status, i;
707 u16 csum = 0;
708 __le16 *flash = (__le16 *)&qdev->flash;
709
710 status = strncmp((char *)&qdev->flash, str, 4);
711 if (status) {
712 QPRINTK(qdev, IFUP, ERR, "Invalid flash signature.\n");
713 return status;
714 }
715
716 for (i = 0; i < size; i++)
717 csum += le16_to_cpu(*flash++);
718
719 if (csum)
720 QPRINTK(qdev, IFUP, ERR,
721 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
722
723 return csum;
724}
725
26351479 726static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
c4e84bde
RM
727{
728 int status = 0;
729 /* wait for reg to come ready */
730 status = ql_wait_reg_rdy(qdev,
731 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
732 if (status)
733 goto exit;
734 /* set up for reg read */
735 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
736 /* wait for reg to come ready */
737 status = ql_wait_reg_rdy(qdev,
738 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
739 if (status)
740 goto exit;
26351479
RM
741 /* This data is stored on flash as an array of
742 * __le32. Since ql_read32() returns cpu endian
743 * we need to swap it back.
744 */
745 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
c4e84bde
RM
746exit:
747 return status;
748}
749
cdca8d02
RM
750static int ql_get_8000_flash_params(struct ql_adapter *qdev)
751{
752 u32 i, size;
753 int status;
754 __le32 *p = (__le32 *)&qdev->flash;
755 u32 offset;
542512e4 756 u8 mac_addr[6];
cdca8d02
RM
757
758 /* Get flash offset for function and adjust
759 * for dword access.
760 */
e4552f51 761 if (!qdev->port)
cdca8d02
RM
762 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
763 else
764 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
765
766 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
767 return -ETIMEDOUT;
768
769 size = sizeof(struct flash_params_8000) / sizeof(u32);
770 for (i = 0; i < size; i++, p++) {
771 status = ql_read_flash_word(qdev, i+offset, p);
772 if (status) {
773 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
774 goto exit;
775 }
776 }
777
778 status = ql_validate_flash(qdev,
779 sizeof(struct flash_params_8000) / sizeof(u16),
780 "8000");
781 if (status) {
782 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
783 status = -EINVAL;
784 goto exit;
785 }
786
542512e4
RM
787 /* Extract either manufacturer or BOFM modified
788 * MAC address.
789 */
790 if (qdev->flash.flash_params_8000.data_type1 == 2)
791 memcpy(mac_addr,
792 qdev->flash.flash_params_8000.mac_addr1,
793 qdev->ndev->addr_len);
794 else
795 memcpy(mac_addr,
796 qdev->flash.flash_params_8000.mac_addr,
797 qdev->ndev->addr_len);
798
799 if (!is_valid_ether_addr(mac_addr)) {
cdca8d02
RM
800 QPRINTK(qdev, IFUP, ERR, "Invalid MAC address.\n");
801 status = -EINVAL;
802 goto exit;
803 }
804
805 memcpy(qdev->ndev->dev_addr,
542512e4 806 mac_addr,
cdca8d02
RM
807 qdev->ndev->addr_len);
808
809exit:
810 ql_sem_unlock(qdev, SEM_FLASH_MASK);
811 return status;
812}
813
b0c2aadf 814static int ql_get_8012_flash_params(struct ql_adapter *qdev)
c4e84bde
RM
815{
816 int i;
817 int status;
26351479 818 __le32 *p = (__le32 *)&qdev->flash;
e78f5fa7 819 u32 offset = 0;
b0c2aadf 820 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
e78f5fa7
RM
821
822 /* Second function's parameters follow the first
823 * function's.
824 */
e4552f51 825 if (qdev->port)
b0c2aadf 826 offset = size;
c4e84bde
RM
827
828 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
829 return -ETIMEDOUT;
830
b0c2aadf 831 for (i = 0; i < size; i++, p++) {
e78f5fa7 832 status = ql_read_flash_word(qdev, i+offset, p);
c4e84bde
RM
833 if (status) {
834 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
835 goto exit;
836 }
837
838 }
b0c2aadf
RM
839
840 status = ql_validate_flash(qdev,
841 sizeof(struct flash_params_8012) / sizeof(u16),
842 "8012");
843 if (status) {
844 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
845 status = -EINVAL;
846 goto exit;
847 }
848
849 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
850 status = -EINVAL;
851 goto exit;
852 }
853
854 memcpy(qdev->ndev->dev_addr,
855 qdev->flash.flash_params_8012.mac_addr,
856 qdev->ndev->addr_len);
857
c4e84bde
RM
858exit:
859 ql_sem_unlock(qdev, SEM_FLASH_MASK);
860 return status;
861}
862
863/* xgmac register are located behind the xgmac_addr and xgmac_data
864 * register pair. Each read/write requires us to wait for the ready
865 * bit before reading/writing the data.
866 */
867static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
868{
869 int status;
870 /* wait for reg to come ready */
871 status = ql_wait_reg_rdy(qdev,
872 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
873 if (status)
874 return status;
875 /* write the data to the data reg */
876 ql_write32(qdev, XGMAC_DATA, data);
877 /* trigger the write */
878 ql_write32(qdev, XGMAC_ADDR, reg);
879 return status;
880}
881
882/* xgmac register are located behind the xgmac_addr and xgmac_data
883 * register pair. Each read/write requires us to wait for the ready
884 * bit before reading/writing the data.
885 */
886int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
887{
888 int status = 0;
889 /* wait for reg to come ready */
890 status = ql_wait_reg_rdy(qdev,
891 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
892 if (status)
893 goto exit;
894 /* set up for reg read */
895 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
896 /* wait for reg to come ready */
897 status = ql_wait_reg_rdy(qdev,
898 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
899 if (status)
900 goto exit;
901 /* get the data */
902 *data = ql_read32(qdev, XGMAC_DATA);
903exit:
904 return status;
905}
906
907/* This is used for reading the 64-bit statistics regs. */
908int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
909{
910 int status = 0;
911 u32 hi = 0;
912 u32 lo = 0;
913
914 status = ql_read_xgmac_reg(qdev, reg, &lo);
915 if (status)
916 goto exit;
917
918 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
919 if (status)
920 goto exit;
921
922 *data = (u64) lo | ((u64) hi << 32);
923
924exit:
925 return status;
926}
927
cdca8d02
RM
928static int ql_8000_port_initialize(struct ql_adapter *qdev)
929{
bcc2cb3b 930 int status;
cfec0cbc
RM
931 /*
932 * Get MPI firmware version for driver banner
933 * and ethool info.
934 */
935 status = ql_mb_about_fw(qdev);
936 if (status)
937 goto exit;
bcc2cb3b
RM
938 status = ql_mb_get_fw_state(qdev);
939 if (status)
940 goto exit;
941 /* Wake up a worker to get/set the TX/RX frame sizes. */
942 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
943exit:
944 return status;
cdca8d02
RM
945}
946
c4e84bde
RM
947/* Take the MAC Core out of reset.
948 * Enable statistics counting.
949 * Take the transmitter/receiver out of reset.
950 * This functionality may be done in the MPI firmware at a
951 * later date.
952 */
b0c2aadf 953static int ql_8012_port_initialize(struct ql_adapter *qdev)
c4e84bde
RM
954{
955 int status = 0;
956 u32 data;
957
958 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
959 /* Another function has the semaphore, so
960 * wait for the port init bit to come ready.
961 */
962 QPRINTK(qdev, LINK, INFO,
963 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
964 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
965 if (status) {
966 QPRINTK(qdev, LINK, CRIT,
967 "Port initialize timed out.\n");
968 }
969 return status;
970 }
971
972 QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
973 /* Set the core reset. */
974 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
975 if (status)
976 goto end;
977 data |= GLOBAL_CFG_RESET;
978 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
979 if (status)
980 goto end;
981
982 /* Clear the core reset and turn on jumbo for receiver. */
983 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
984 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
985 data |= GLOBAL_CFG_TX_STAT_EN;
986 data |= GLOBAL_CFG_RX_STAT_EN;
987 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
988 if (status)
989 goto end;
990
991 /* Enable transmitter, and clear it's reset. */
992 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
993 if (status)
994 goto end;
995 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
996 data |= TX_CFG_EN; /* Enable the transmitter. */
997 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
998 if (status)
999 goto end;
1000
1001 /* Enable receiver and clear it's reset. */
1002 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1003 if (status)
1004 goto end;
1005 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1006 data |= RX_CFG_EN; /* Enable the receiver. */
1007 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1008 if (status)
1009 goto end;
1010
1011 /* Turn on jumbo. */
1012 status =
1013 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1014 if (status)
1015 goto end;
1016 status =
1017 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1018 if (status)
1019 goto end;
1020
1021 /* Signal to the world that the port is enabled. */
1022 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1023end:
1024 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1025 return status;
1026}
1027
7c734359
RM
1028static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1029{
1030 return PAGE_SIZE << qdev->lbq_buf_order;
1031}
1032
c4e84bde 1033/* Get the next large buffer. */
8668ae92 1034static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
c4e84bde
RM
1035{
1036 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1037 rx_ring->lbq_curr_idx++;
1038 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1039 rx_ring->lbq_curr_idx = 0;
1040 rx_ring->lbq_free_cnt++;
1041 return lbq_desc;
1042}
1043
7c734359
RM
1044static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1045 struct rx_ring *rx_ring)
1046{
1047 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1048
1049 pci_dma_sync_single_for_cpu(qdev->pdev,
1050 pci_unmap_addr(lbq_desc, mapaddr),
1051 rx_ring->lbq_buf_size,
1052 PCI_DMA_FROMDEVICE);
1053
1054 /* If it's the last chunk of our master page then
1055 * we unmap it.
1056 */
1057 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1058 == ql_lbq_block_size(qdev))
1059 pci_unmap_page(qdev->pdev,
1060 lbq_desc->p.pg_chunk.map,
1061 ql_lbq_block_size(qdev),
1062 PCI_DMA_FROMDEVICE);
1063 return lbq_desc;
1064}
1065
c4e84bde 1066/* Get the next small buffer. */
8668ae92 1067static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
c4e84bde
RM
1068{
1069 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1070 rx_ring->sbq_curr_idx++;
1071 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1072 rx_ring->sbq_curr_idx = 0;
1073 rx_ring->sbq_free_cnt++;
1074 return sbq_desc;
1075}
1076
1077/* Update an rx ring index. */
1078static void ql_update_cq(struct rx_ring *rx_ring)
1079{
1080 rx_ring->cnsmr_idx++;
1081 rx_ring->curr_entry++;
1082 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1083 rx_ring->cnsmr_idx = 0;
1084 rx_ring->curr_entry = rx_ring->cq_base;
1085 }
1086}
1087
1088static void ql_write_cq_idx(struct rx_ring *rx_ring)
1089{
1090 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1091}
1092
7c734359
RM
1093static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1094 struct bq_desc *lbq_desc)
1095{
1096 if (!rx_ring->pg_chunk.page) {
1097 u64 map;
1098 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1099 GFP_ATOMIC,
1100 qdev->lbq_buf_order);
1101 if (unlikely(!rx_ring->pg_chunk.page)) {
1102 QPRINTK(qdev, DRV, ERR,
1103 "page allocation failed.\n");
1104 return -ENOMEM;
1105 }
1106 rx_ring->pg_chunk.offset = 0;
1107 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1108 0, ql_lbq_block_size(qdev),
1109 PCI_DMA_FROMDEVICE);
1110 if (pci_dma_mapping_error(qdev->pdev, map)) {
1111 __free_pages(rx_ring->pg_chunk.page,
1112 qdev->lbq_buf_order);
1113 QPRINTK(qdev, DRV, ERR,
1114 "PCI mapping failed.\n");
1115 return -ENOMEM;
1116 }
1117 rx_ring->pg_chunk.map = map;
1118 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1119 }
1120
1121 /* Copy the current master pg_chunk info
1122 * to the current descriptor.
1123 */
1124 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1125
1126 /* Adjust the master page chunk for next
1127 * buffer get.
1128 */
1129 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1130 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1131 rx_ring->pg_chunk.page = NULL;
1132 lbq_desc->p.pg_chunk.last_flag = 1;
1133 } else {
1134 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1135 get_page(rx_ring->pg_chunk.page);
1136 lbq_desc->p.pg_chunk.last_flag = 0;
1137 }
1138 return 0;
1139}
c4e84bde
RM
1140/* Process (refill) a large buffer queue. */
1141static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1142{
49f2186d
RM
1143 u32 clean_idx = rx_ring->lbq_clean_idx;
1144 u32 start_idx = clean_idx;
c4e84bde 1145 struct bq_desc *lbq_desc;
c4e84bde
RM
1146 u64 map;
1147 int i;
1148
7c734359 1149 while (rx_ring->lbq_free_cnt > 32) {
c4e84bde
RM
1150 for (i = 0; i < 16; i++) {
1151 QPRINTK(qdev, RX_STATUS, DEBUG,
1152 "lbq: try cleaning clean_idx = %d.\n",
1153 clean_idx);
1154 lbq_desc = &rx_ring->lbq[clean_idx];
7c734359
RM
1155 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1156 QPRINTK(qdev, IFUP, ERR,
1157 "Could not get a page chunk.\n");
c4e84bde
RM
1158 return;
1159 }
7c734359
RM
1160
1161 map = lbq_desc->p.pg_chunk.map +
1162 lbq_desc->p.pg_chunk.offset;
c4e84bde 1163 pci_unmap_addr_set(lbq_desc, mapaddr, map);
7c734359
RM
1164 pci_unmap_len_set(lbq_desc, maplen,
1165 rx_ring->lbq_buf_size);
2c9a0d41 1166 *lbq_desc->addr = cpu_to_le64(map);
7c734359
RM
1167
1168 pci_dma_sync_single_for_device(qdev->pdev, map,
1169 rx_ring->lbq_buf_size,
1170 PCI_DMA_FROMDEVICE);
c4e84bde
RM
1171 clean_idx++;
1172 if (clean_idx == rx_ring->lbq_len)
1173 clean_idx = 0;
1174 }
1175
1176 rx_ring->lbq_clean_idx = clean_idx;
1177 rx_ring->lbq_prod_idx += 16;
1178 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1179 rx_ring->lbq_prod_idx = 0;
49f2186d
RM
1180 rx_ring->lbq_free_cnt -= 16;
1181 }
1182
1183 if (start_idx != clean_idx) {
c4e84bde
RM
1184 QPRINTK(qdev, RX_STATUS, DEBUG,
1185 "lbq: updating prod idx = %d.\n",
1186 rx_ring->lbq_prod_idx);
1187 ql_write_db_reg(rx_ring->lbq_prod_idx,
1188 rx_ring->lbq_prod_idx_db_reg);
c4e84bde
RM
1189 }
1190}
1191
1192/* Process (refill) a small buffer queue. */
1193static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1194{
49f2186d
RM
1195 u32 clean_idx = rx_ring->sbq_clean_idx;
1196 u32 start_idx = clean_idx;
c4e84bde 1197 struct bq_desc *sbq_desc;
c4e84bde
RM
1198 u64 map;
1199 int i;
1200
1201 while (rx_ring->sbq_free_cnt > 16) {
1202 for (i = 0; i < 16; i++) {
1203 sbq_desc = &rx_ring->sbq[clean_idx];
1204 QPRINTK(qdev, RX_STATUS, DEBUG,
1205 "sbq: try cleaning clean_idx = %d.\n",
1206 clean_idx);
c4e84bde
RM
1207 if (sbq_desc->p.skb == NULL) {
1208 QPRINTK(qdev, RX_STATUS, DEBUG,
1209 "sbq: getting new skb for index %d.\n",
1210 sbq_desc->index);
1211 sbq_desc->p.skb =
1212 netdev_alloc_skb(qdev->ndev,
52e55f3c 1213 SMALL_BUFFER_SIZE);
c4e84bde
RM
1214 if (sbq_desc->p.skb == NULL) {
1215 QPRINTK(qdev, PROBE, ERR,
1216 "Couldn't get an skb.\n");
1217 rx_ring->sbq_clean_idx = clean_idx;
1218 return;
1219 }
1220 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1221 map = pci_map_single(qdev->pdev,
1222 sbq_desc->p.skb->data,
52e55f3c
RM
1223 rx_ring->sbq_buf_size,
1224 PCI_DMA_FROMDEVICE);
c907a35a
RM
1225 if (pci_dma_mapping_error(qdev->pdev, map)) {
1226 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
1227 rx_ring->sbq_clean_idx = clean_idx;
06a3d510
RM
1228 dev_kfree_skb_any(sbq_desc->p.skb);
1229 sbq_desc->p.skb = NULL;
c907a35a
RM
1230 return;
1231 }
c4e84bde
RM
1232 pci_unmap_addr_set(sbq_desc, mapaddr, map);
1233 pci_unmap_len_set(sbq_desc, maplen,
52e55f3c 1234 rx_ring->sbq_buf_size);
2c9a0d41 1235 *sbq_desc->addr = cpu_to_le64(map);
c4e84bde
RM
1236 }
1237
1238 clean_idx++;
1239 if (clean_idx == rx_ring->sbq_len)
1240 clean_idx = 0;
1241 }
1242 rx_ring->sbq_clean_idx = clean_idx;
1243 rx_ring->sbq_prod_idx += 16;
1244 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1245 rx_ring->sbq_prod_idx = 0;
49f2186d
RM
1246 rx_ring->sbq_free_cnt -= 16;
1247 }
1248
1249 if (start_idx != clean_idx) {
c4e84bde
RM
1250 QPRINTK(qdev, RX_STATUS, DEBUG,
1251 "sbq: updating prod idx = %d.\n",
1252 rx_ring->sbq_prod_idx);
1253 ql_write_db_reg(rx_ring->sbq_prod_idx,
1254 rx_ring->sbq_prod_idx_db_reg);
c4e84bde
RM
1255 }
1256}
1257
1258static void ql_update_buffer_queues(struct ql_adapter *qdev,
1259 struct rx_ring *rx_ring)
1260{
1261 ql_update_sbq(qdev, rx_ring);
1262 ql_update_lbq(qdev, rx_ring);
1263}
1264
1265/* Unmaps tx buffers. Can be called from send() if a pci mapping
1266 * fails at some stage, or from the interrupt when a tx completes.
1267 */
1268static void ql_unmap_send(struct ql_adapter *qdev,
1269 struct tx_ring_desc *tx_ring_desc, int mapped)
1270{
1271 int i;
1272 for (i = 0; i < mapped; i++) {
1273 if (i == 0 || (i == 7 && mapped > 7)) {
1274 /*
1275 * Unmap the skb->data area, or the
1276 * external sglist (AKA the Outbound
1277 * Address List (OAL)).
1278 * If its the zeroeth element, then it's
1279 * the skb->data area. If it's the 7th
1280 * element and there is more than 6 frags,
1281 * then its an OAL.
1282 */
1283 if (i == 7) {
1284 QPRINTK(qdev, TX_DONE, DEBUG,
1285 "unmapping OAL area.\n");
1286 }
1287 pci_unmap_single(qdev->pdev,
1288 pci_unmap_addr(&tx_ring_desc->map[i],
1289 mapaddr),
1290 pci_unmap_len(&tx_ring_desc->map[i],
1291 maplen),
1292 PCI_DMA_TODEVICE);
1293 } else {
1294 QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
1295 i);
1296 pci_unmap_page(qdev->pdev,
1297 pci_unmap_addr(&tx_ring_desc->map[i],
1298 mapaddr),
1299 pci_unmap_len(&tx_ring_desc->map[i],
1300 maplen), PCI_DMA_TODEVICE);
1301 }
1302 }
1303
1304}
1305
1306/* Map the buffers for this transmit. This will return
1307 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1308 */
1309static int ql_map_send(struct ql_adapter *qdev,
1310 struct ob_mac_iocb_req *mac_iocb_ptr,
1311 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1312{
1313 int len = skb_headlen(skb);
1314 dma_addr_t map;
1315 int frag_idx, err, map_idx = 0;
1316 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1317 int frag_cnt = skb_shinfo(skb)->nr_frags;
1318
1319 if (frag_cnt) {
1320 QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
1321 }
1322 /*
1323 * Map the skb buffer first.
1324 */
1325 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1326
1327 err = pci_dma_mapping_error(qdev->pdev, map);
1328 if (err) {
1329 QPRINTK(qdev, TX_QUEUED, ERR,
1330 "PCI mapping failed with error: %d\n", err);
1331
1332 return NETDEV_TX_BUSY;
1333 }
1334
1335 tbd->len = cpu_to_le32(len);
1336 tbd->addr = cpu_to_le64(map);
1337 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1338 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1339 map_idx++;
1340
1341 /*
1342 * This loop fills the remainder of the 8 address descriptors
1343 * in the IOCB. If there are more than 7 fragments, then the
1344 * eighth address desc will point to an external list (OAL).
1345 * When this happens, the remainder of the frags will be stored
1346 * in this list.
1347 */
1348 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1349 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1350 tbd++;
1351 if (frag_idx == 6 && frag_cnt > 7) {
1352 /* Let's tack on an sglist.
1353 * Our control block will now
1354 * look like this:
1355 * iocb->seg[0] = skb->data
1356 * iocb->seg[1] = frag[0]
1357 * iocb->seg[2] = frag[1]
1358 * iocb->seg[3] = frag[2]
1359 * iocb->seg[4] = frag[3]
1360 * iocb->seg[5] = frag[4]
1361 * iocb->seg[6] = frag[5]
1362 * iocb->seg[7] = ptr to OAL (external sglist)
1363 * oal->seg[0] = frag[6]
1364 * oal->seg[1] = frag[7]
1365 * oal->seg[2] = frag[8]
1366 * oal->seg[3] = frag[9]
1367 * oal->seg[4] = frag[10]
1368 * etc...
1369 */
1370 /* Tack on the OAL in the eighth segment of IOCB. */
1371 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1372 sizeof(struct oal),
1373 PCI_DMA_TODEVICE);
1374 err = pci_dma_mapping_error(qdev->pdev, map);
1375 if (err) {
1376 QPRINTK(qdev, TX_QUEUED, ERR,
1377 "PCI mapping outbound address list with error: %d\n",
1378 err);
1379 goto map_error;
1380 }
1381
1382 tbd->addr = cpu_to_le64(map);
1383 /*
1384 * The length is the number of fragments
1385 * that remain to be mapped times the length
1386 * of our sglist (OAL).
1387 */
1388 tbd->len =
1389 cpu_to_le32((sizeof(struct tx_buf_desc) *
1390 (frag_cnt - frag_idx)) | TX_DESC_C);
1391 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1392 map);
1393 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1394 sizeof(struct oal));
1395 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1396 map_idx++;
1397 }
1398
1399 map =
1400 pci_map_page(qdev->pdev, frag->page,
1401 frag->page_offset, frag->size,
1402 PCI_DMA_TODEVICE);
1403
1404 err = pci_dma_mapping_error(qdev->pdev, map);
1405 if (err) {
1406 QPRINTK(qdev, TX_QUEUED, ERR,
1407 "PCI mapping frags failed with error: %d.\n",
1408 err);
1409 goto map_error;
1410 }
1411
1412 tbd->addr = cpu_to_le64(map);
1413 tbd->len = cpu_to_le32(frag->size);
1414 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1415 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1416 frag->size);
1417
1418 }
1419 /* Save the number of segments we've mapped. */
1420 tx_ring_desc->map_cnt = map_idx;
1421 /* Terminate the last segment. */
1422 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1423 return NETDEV_TX_OK;
1424
1425map_error:
1426 /*
1427 * If the first frag mapping failed, then i will be zero.
1428 * This causes the unmap of the skb->data area. Otherwise
1429 * we pass in the number of frags that mapped successfully
1430 * so they can be umapped.
1431 */
1432 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1433 return NETDEV_TX_BUSY;
1434}
1435
8668ae92 1436static void ql_realign_skb(struct sk_buff *skb, int len)
c4e84bde
RM
1437{
1438 void *temp_addr = skb->data;
1439
1440 /* Undo the skb_reserve(skb,32) we did before
1441 * giving to hardware, and realign data on
1442 * a 2-byte boundary.
1443 */
1444 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1445 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1446 skb_copy_to_linear_data(skb, temp_addr,
1447 (unsigned int)len);
1448}
1449
1450/*
1451 * This function builds an skb for the given inbound
1452 * completion. It will be rewritten for readability in the near
1453 * future, but for not it works well.
1454 */
1455static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1456 struct rx_ring *rx_ring,
1457 struct ib_mac_iocb_rsp *ib_mac_rsp)
1458{
1459 struct bq_desc *lbq_desc;
1460 struct bq_desc *sbq_desc;
1461 struct sk_buff *skb = NULL;
1462 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1463 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1464
1465 /*
1466 * Handle the header buffer if present.
1467 */
1468 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1469 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1470 QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
1471 /*
1472 * Headers fit nicely into a small buffer.
1473 */
1474 sbq_desc = ql_get_curr_sbuf(rx_ring);
1475 pci_unmap_single(qdev->pdev,
1476 pci_unmap_addr(sbq_desc, mapaddr),
1477 pci_unmap_len(sbq_desc, maplen),
1478 PCI_DMA_FROMDEVICE);
1479 skb = sbq_desc->p.skb;
1480 ql_realign_skb(skb, hdr_len);
1481 skb_put(skb, hdr_len);
1482 sbq_desc->p.skb = NULL;
1483 }
1484
1485 /*
1486 * Handle the data buffer(s).
1487 */
1488 if (unlikely(!length)) { /* Is there data too? */
1489 QPRINTK(qdev, RX_STATUS, DEBUG,
1490 "No Data buffer in this packet.\n");
1491 return skb;
1492 }
1493
1494 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1495 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1496 QPRINTK(qdev, RX_STATUS, DEBUG,
1497 "Headers in small, data of %d bytes in small, combine them.\n", length);
1498 /*
1499 * Data is less than small buffer size so it's
1500 * stuffed in a small buffer.
1501 * For this case we append the data
1502 * from the "data" small buffer to the "header" small
1503 * buffer.
1504 */
1505 sbq_desc = ql_get_curr_sbuf(rx_ring);
1506 pci_dma_sync_single_for_cpu(qdev->pdev,
1507 pci_unmap_addr
1508 (sbq_desc, mapaddr),
1509 pci_unmap_len
1510 (sbq_desc, maplen),
1511 PCI_DMA_FROMDEVICE);
1512 memcpy(skb_put(skb, length),
1513 sbq_desc->p.skb->data, length);
1514 pci_dma_sync_single_for_device(qdev->pdev,
1515 pci_unmap_addr
1516 (sbq_desc,
1517 mapaddr),
1518 pci_unmap_len
1519 (sbq_desc,
1520 maplen),
1521 PCI_DMA_FROMDEVICE);
1522 } else {
1523 QPRINTK(qdev, RX_STATUS, DEBUG,
1524 "%d bytes in a single small buffer.\n", length);
1525 sbq_desc = ql_get_curr_sbuf(rx_ring);
1526 skb = sbq_desc->p.skb;
1527 ql_realign_skb(skb, length);
1528 skb_put(skb, length);
1529 pci_unmap_single(qdev->pdev,
1530 pci_unmap_addr(sbq_desc,
1531 mapaddr),
1532 pci_unmap_len(sbq_desc,
1533 maplen),
1534 PCI_DMA_FROMDEVICE);
1535 sbq_desc->p.skb = NULL;
1536 }
1537 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1538 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1539 QPRINTK(qdev, RX_STATUS, DEBUG,
1540 "Header in small, %d bytes in large. Chain large to small!\n", length);
1541 /*
1542 * The data is in a single large buffer. We
1543 * chain it to the header buffer's skb and let
1544 * it rip.
1545 */
7c734359 1546 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
c4e84bde 1547 QPRINTK(qdev, RX_STATUS, DEBUG,
7c734359
RM
1548 "Chaining page at offset = %d,"
1549 "for %d bytes to skb.\n",
1550 lbq_desc->p.pg_chunk.offset, length);
1551 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1552 lbq_desc->p.pg_chunk.offset,
1553 length);
c4e84bde
RM
1554 skb->len += length;
1555 skb->data_len += length;
1556 skb->truesize += length;
c4e84bde
RM
1557 } else {
1558 /*
1559 * The headers and data are in a single large buffer. We
1560 * copy it to a new skb and let it go. This can happen with
1561 * jumbo mtu on a non-TCP/UDP frame.
1562 */
7c734359 1563 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
c4e84bde
RM
1564 skb = netdev_alloc_skb(qdev->ndev, length);
1565 if (skb == NULL) {
1566 QPRINTK(qdev, PROBE, DEBUG,
1567 "No skb available, drop the packet.\n");
1568 return NULL;
1569 }
4055c7d4
RM
1570 pci_unmap_page(qdev->pdev,
1571 pci_unmap_addr(lbq_desc,
1572 mapaddr),
1573 pci_unmap_len(lbq_desc, maplen),
1574 PCI_DMA_FROMDEVICE);
c4e84bde
RM
1575 skb_reserve(skb, NET_IP_ALIGN);
1576 QPRINTK(qdev, RX_STATUS, DEBUG,
1577 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
7c734359
RM
1578 skb_fill_page_desc(skb, 0,
1579 lbq_desc->p.pg_chunk.page,
1580 lbq_desc->p.pg_chunk.offset,
1581 length);
c4e84bde
RM
1582 skb->len += length;
1583 skb->data_len += length;
1584 skb->truesize += length;
1585 length -= length;
c4e84bde
RM
1586 __pskb_pull_tail(skb,
1587 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1588 VLAN_ETH_HLEN : ETH_HLEN);
1589 }
1590 } else {
1591 /*
1592 * The data is in a chain of large buffers
1593 * pointed to by a small buffer. We loop
1594 * thru and chain them to the our small header
1595 * buffer's skb.
1596 * frags: There are 18 max frags and our small
1597 * buffer will hold 32 of them. The thing is,
1598 * we'll use 3 max for our 9000 byte jumbo
1599 * frames. If the MTU goes up we could
1600 * eventually be in trouble.
1601 */
7c734359 1602 int size, i = 0;
c4e84bde
RM
1603 sbq_desc = ql_get_curr_sbuf(rx_ring);
1604 pci_unmap_single(qdev->pdev,
1605 pci_unmap_addr(sbq_desc, mapaddr),
1606 pci_unmap_len(sbq_desc, maplen),
1607 PCI_DMA_FROMDEVICE);
1608 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1609 /*
1610 * This is an non TCP/UDP IP frame, so
1611 * the headers aren't split into a small
1612 * buffer. We have to use the small buffer
1613 * that contains our sg list as our skb to
1614 * send upstairs. Copy the sg list here to
1615 * a local buffer and use it to find the
1616 * pages to chain.
1617 */
1618 QPRINTK(qdev, RX_STATUS, DEBUG,
1619 "%d bytes of headers & data in chain of large.\n", length);
1620 skb = sbq_desc->p.skb;
c4e84bde
RM
1621 sbq_desc->p.skb = NULL;
1622 skb_reserve(skb, NET_IP_ALIGN);
c4e84bde
RM
1623 }
1624 while (length > 0) {
7c734359
RM
1625 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1626 size = (length < rx_ring->lbq_buf_size) ? length :
1627 rx_ring->lbq_buf_size;
c4e84bde
RM
1628
1629 QPRINTK(qdev, RX_STATUS, DEBUG,
1630 "Adding page %d to skb for %d bytes.\n",
1631 i, size);
7c734359
RM
1632 skb_fill_page_desc(skb, i,
1633 lbq_desc->p.pg_chunk.page,
1634 lbq_desc->p.pg_chunk.offset,
1635 size);
c4e84bde
RM
1636 skb->len += size;
1637 skb->data_len += size;
1638 skb->truesize += size;
1639 length -= size;
c4e84bde
RM
1640 i++;
1641 }
1642 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1643 VLAN_ETH_HLEN : ETH_HLEN);
1644 }
1645 return skb;
1646}
1647
1648/* Process an inbound completion from an rx ring. */
1649static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1650 struct rx_ring *rx_ring,
1651 struct ib_mac_iocb_rsp *ib_mac_rsp)
1652{
1653 struct net_device *ndev = qdev->ndev;
1654 struct sk_buff *skb = NULL;
22bdd4f5
RM
1655 u16 vlan_id = (le16_to_cpu(ib_mac_rsp->vlan_id) &
1656 IB_MAC_IOCB_RSP_VLAN_MASK)
c4e84bde
RM
1657
1658 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1659
1660 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1661 if (unlikely(!skb)) {
1662 QPRINTK(qdev, RX_STATUS, DEBUG,
1663 "No skb available, drop packet.\n");
1664 return;
1665 }
1666
a32959cd
RM
1667 /* Frame error, so drop the packet. */
1668 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1669 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1670 ib_mac_rsp->flags2);
1671 dev_kfree_skb_any(skb);
1672 return;
1673 }
ec33a491
RM
1674
1675 /* The max framesize filter on this chip is set higher than
1676 * MTU since FCoE uses 2k frames.
1677 */
1678 if (skb->len > ndev->mtu + ETH_HLEN) {
1679 dev_kfree_skb_any(skb);
1680 return;
1681 }
1682
9dfbbaa6
RM
1683 /* loopback self test for ethtool */
1684 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1685 ql_check_lb_frame(qdev, skb);
1686 dev_kfree_skb_any(skb);
1687 return;
1688 }
1689
c4e84bde
RM
1690 prefetch(skb->data);
1691 skb->dev = ndev;
1692 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1693 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1694 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1695 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1696 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1697 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1698 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1699 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1700 }
1701 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1702 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1703 }
d555f592 1704
d555f592
RM
1705 skb->protocol = eth_type_trans(skb, ndev);
1706 skb->ip_summed = CHECKSUM_NONE;
1707
1708 /* If rx checksum is on, and there are no
1709 * csum or frame errors.
1710 */
1711 if (qdev->rx_csum &&
d555f592
RM
1712 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1713 /* TCP frame. */
1714 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1715 QPRINTK(qdev, RX_STATUS, DEBUG,
1716 "TCP checksum done!\n");
1717 skb->ip_summed = CHECKSUM_UNNECESSARY;
1718 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1719 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1720 /* Unfragmented ipv4 UDP frame. */
1721 struct iphdr *iph = (struct iphdr *) skb->data;
1722 if (!(iph->frag_off &
1723 cpu_to_be16(IP_MF|IP_OFFSET))) {
1724 skb->ip_summed = CHECKSUM_UNNECESSARY;
1725 QPRINTK(qdev, RX_STATUS, DEBUG,
1726 "TCP checksum done!\n");
1727 }
1728 }
c4e84bde 1729 }
d555f592 1730
bcc90f55
AK
1731 ndev->stats.rx_packets++;
1732 ndev->stats.rx_bytes += skb->len;
b2014ff8 1733 skb_record_rx_queue(skb, rx_ring->cq_id);
22bdd4f5
RM
1734 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1735 if (qdev->vlgrp &&
1736 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1737 (vlan_id != 0))
1738 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1739 vlan_id, skb);
1740 else
1741 napi_gro_receive(&rx_ring->napi, skb);
c4e84bde 1742 } else {
22bdd4f5
RM
1743 if (qdev->vlgrp &&
1744 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1745 (vlan_id != 0))
1746 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1747 else
1748 netif_receive_skb(skb);
c4e84bde 1749 }
c4e84bde
RM
1750}
1751
1752/* Process an outbound completion from an rx ring. */
1753static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1754 struct ob_mac_iocb_rsp *mac_rsp)
1755{
bcc90f55 1756 struct net_device *ndev = qdev->ndev;
c4e84bde
RM
1757 struct tx_ring *tx_ring;
1758 struct tx_ring_desc *tx_ring_desc;
1759
1760 QL_DUMP_OB_MAC_RSP(mac_rsp);
1761 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
1762 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
1763 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
bcc90f55
AK
1764 ndev->stats.tx_bytes += (tx_ring_desc->skb)->len;
1765 ndev->stats.tx_packets++;
c4e84bde
RM
1766 dev_kfree_skb(tx_ring_desc->skb);
1767 tx_ring_desc->skb = NULL;
1768
1769 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
1770 OB_MAC_IOCB_RSP_S |
1771 OB_MAC_IOCB_RSP_L |
1772 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
1773 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
1774 QPRINTK(qdev, TX_DONE, WARNING,
1775 "Total descriptor length did not match transfer length.\n");
1776 }
1777 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
1778 QPRINTK(qdev, TX_DONE, WARNING,
1779 "Frame too short to be legal, not sent.\n");
1780 }
1781 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
1782 QPRINTK(qdev, TX_DONE, WARNING,
1783 "Frame too long, but sent anyway.\n");
1784 }
1785 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
1786 QPRINTK(qdev, TX_DONE, WARNING,
1787 "PCI backplane error. Frame not sent.\n");
1788 }
1789 }
1790 atomic_inc(&tx_ring->tx_count);
1791}
1792
1793/* Fire up a handler to reset the MPI processor. */
1794void ql_queue_fw_error(struct ql_adapter *qdev)
1795{
6a473308 1796 ql_link_off(qdev);
c4e84bde
RM
1797 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
1798}
1799
1800void ql_queue_asic_error(struct ql_adapter *qdev)
1801{
6a473308 1802 ql_link_off(qdev);
c4e84bde 1803 ql_disable_interrupts(qdev);
6497b607
RM
1804 /* Clear adapter up bit to signal the recovery
1805 * process that it shouldn't kill the reset worker
1806 * thread
1807 */
1808 clear_bit(QL_ADAPTER_UP, &qdev->flags);
c4e84bde
RM
1809 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
1810}
1811
1812static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
1813 struct ib_ae_iocb_rsp *ib_ae_rsp)
1814{
1815 switch (ib_ae_rsp->event) {
1816 case MGMT_ERR_EVENT:
1817 QPRINTK(qdev, RX_ERR, ERR,
1818 "Management Processor Fatal Error.\n");
1819 ql_queue_fw_error(qdev);
1820 return;
1821
1822 case CAM_LOOKUP_ERR_EVENT:
1823 QPRINTK(qdev, LINK, ERR,
1824 "Multiple CAM hits lookup occurred.\n");
1825 QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
1826 ql_queue_asic_error(qdev);
1827 return;
1828
1829 case SOFT_ECC_ERROR_EVENT:
1830 QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
1831 ql_queue_asic_error(qdev);
1832 break;
1833
1834 case PCI_ERR_ANON_BUF_RD:
1835 QPRINTK(qdev, RX_ERR, ERR,
1836 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
1837 ib_ae_rsp->q_id);
1838 ql_queue_asic_error(qdev);
1839 break;
1840
1841 default:
1842 QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
1843 ib_ae_rsp->event);
1844 ql_queue_asic_error(qdev);
1845 break;
1846 }
1847}
1848
1849static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1850{
1851 struct ql_adapter *qdev = rx_ring->qdev;
ba7cd3ba 1852 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde
RM
1853 struct ob_mac_iocb_rsp *net_rsp = NULL;
1854 int count = 0;
1855
1e213303 1856 struct tx_ring *tx_ring;
c4e84bde
RM
1857 /* While there are entries in the completion queue. */
1858 while (prod != rx_ring->cnsmr_idx) {
1859
1860 QPRINTK(qdev, RX_STATUS, DEBUG,
1861 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1862 prod, rx_ring->cnsmr_idx);
1863
1864 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
1865 rmb();
1866 switch (net_rsp->opcode) {
1867
1868 case OPCODE_OB_MAC_TSO_IOCB:
1869 case OPCODE_OB_MAC_IOCB:
1870 ql_process_mac_tx_intr(qdev, net_rsp);
1871 break;
1872 default:
1873 QPRINTK(qdev, RX_STATUS, DEBUG,
1874 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1875 net_rsp->opcode);
1876 }
1877 count++;
1878 ql_update_cq(rx_ring);
ba7cd3ba 1879 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde
RM
1880 }
1881 ql_write_cq_idx(rx_ring);
1e213303
RM
1882 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
1883 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) &&
1884 net_rsp != NULL) {
c4e84bde
RM
1885 if (atomic_read(&tx_ring->queue_stopped) &&
1886 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
1887 /*
1888 * The queue got stopped because the tx_ring was full.
1889 * Wake it up, because it's now at least 25% empty.
1890 */
1e213303 1891 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
c4e84bde
RM
1892 }
1893
1894 return count;
1895}
1896
1897static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
1898{
1899 struct ql_adapter *qdev = rx_ring->qdev;
ba7cd3ba 1900 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde
RM
1901 struct ql_net_rsp_iocb *net_rsp;
1902 int count = 0;
1903
1904 /* While there are entries in the completion queue. */
1905 while (prod != rx_ring->cnsmr_idx) {
1906
1907 QPRINTK(qdev, RX_STATUS, DEBUG,
1908 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1909 prod, rx_ring->cnsmr_idx);
1910
1911 net_rsp = rx_ring->curr_entry;
1912 rmb();
1913 switch (net_rsp->opcode) {
1914 case OPCODE_IB_MAC_IOCB:
1915 ql_process_mac_rx_intr(qdev, rx_ring,
1916 (struct ib_mac_iocb_rsp *)
1917 net_rsp);
1918 break;
1919
1920 case OPCODE_IB_AE_IOCB:
1921 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
1922 net_rsp);
1923 break;
1924 default:
1925 {
1926 QPRINTK(qdev, RX_STATUS, DEBUG,
1927 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1928 net_rsp->opcode);
1929 }
1930 }
1931 count++;
1932 ql_update_cq(rx_ring);
ba7cd3ba 1933 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde
RM
1934 if (count == budget)
1935 break;
1936 }
1937 ql_update_buffer_queues(qdev, rx_ring);
1938 ql_write_cq_idx(rx_ring);
1939 return count;
1940}
1941
1942static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
1943{
1944 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
1945 struct ql_adapter *qdev = rx_ring->qdev;
39aa8165
RM
1946 struct rx_ring *trx_ring;
1947 int i, work_done = 0;
1948 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
c4e84bde
RM
1949
1950 QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
1951 rx_ring->cq_id);
1952
39aa8165
RM
1953 /* Service the TX rings first. They start
1954 * right after the RSS rings. */
1955 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
1956 trx_ring = &qdev->rx_ring[i];
1957 /* If this TX completion ring belongs to this vector and
1958 * it's not empty then service it.
1959 */
1960 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
1961 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
1962 trx_ring->cnsmr_idx)) {
1963 QPRINTK(qdev, INTR, DEBUG,
1964 "%s: Servicing TX completion ring %d.\n",
1965 __func__, trx_ring->cq_id);
1966 ql_clean_outbound_rx_ring(trx_ring);
1967 }
1968 }
1969
1970 /*
1971 * Now service the RSS ring if it's active.
1972 */
1973 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
1974 rx_ring->cnsmr_idx) {
1975 QPRINTK(qdev, INTR, DEBUG,
1976 "%s: Servicing RX completion ring %d.\n",
1977 __func__, rx_ring->cq_id);
1978 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
1979 }
1980
c4e84bde 1981 if (work_done < budget) {
22bdd4f5 1982 napi_complete(napi);
c4e84bde
RM
1983 ql_enable_completion_interrupt(qdev, rx_ring->irq);
1984 }
1985 return work_done;
1986}
1987
01e6b953 1988static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
c4e84bde
RM
1989{
1990 struct ql_adapter *qdev = netdev_priv(ndev);
1991
1992 qdev->vlgrp = grp;
1993 if (grp) {
1994 QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
1995 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
1996 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
1997 } else {
1998 QPRINTK(qdev, IFUP, DEBUG,
1999 "Turning off VLAN in NIC_RCV_CFG.\n");
2000 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2001 }
2002}
2003
01e6b953 2004static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
c4e84bde
RM
2005{
2006 struct ql_adapter *qdev = netdev_priv(ndev);
2007 u32 enable_bit = MAC_ADDR_E;
cc288f54 2008 int status;
c4e84bde 2009
cc288f54
RM
2010 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2011 if (status)
2012 return;
c4e84bde
RM
2013 if (ql_set_mac_addr_reg
2014 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2015 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
2016 }
cc288f54 2017 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
c4e84bde
RM
2018}
2019
01e6b953 2020static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
c4e84bde
RM
2021{
2022 struct ql_adapter *qdev = netdev_priv(ndev);
2023 u32 enable_bit = 0;
cc288f54
RM
2024 int status;
2025
2026 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2027 if (status)
2028 return;
c4e84bde 2029
c4e84bde
RM
2030 if (ql_set_mac_addr_reg
2031 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2032 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
2033 }
cc288f54 2034 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
c4e84bde
RM
2035
2036}
2037
c4e84bde
RM
2038/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2039static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2040{
2041 struct rx_ring *rx_ring = dev_id;
288379f0 2042 napi_schedule(&rx_ring->napi);
c4e84bde
RM
2043 return IRQ_HANDLED;
2044}
2045
c4e84bde
RM
2046/* This handles a fatal error, MPI activity, and the default
2047 * rx_ring in an MSI-X multiple vector environment.
2048 * In MSI/Legacy environment it also process the rest of
2049 * the rx_rings.
2050 */
2051static irqreturn_t qlge_isr(int irq, void *dev_id)
2052{
2053 struct rx_ring *rx_ring = dev_id;
2054 struct ql_adapter *qdev = rx_ring->qdev;
2055 struct intr_context *intr_context = &qdev->intr_context[0];
2056 u32 var;
c4e84bde
RM
2057 int work_done = 0;
2058
bb0d215c
RM
2059 spin_lock(&qdev->hw_lock);
2060 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2061 QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
2062 spin_unlock(&qdev->hw_lock);
2063 return IRQ_NONE;
c4e84bde 2064 }
bb0d215c 2065 spin_unlock(&qdev->hw_lock);
c4e84bde 2066
bb0d215c 2067 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
c4e84bde
RM
2068
2069 /*
2070 * Check for fatal error.
2071 */
2072 if (var & STS_FE) {
2073 ql_queue_asic_error(qdev);
2074 QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
2075 var = ql_read32(qdev, ERR_STS);
2076 QPRINTK(qdev, INTR, ERR,
2077 "Resetting chip. Error Status Register = 0x%x\n", var);
2078 return IRQ_HANDLED;
2079 }
2080
2081 /*
2082 * Check MPI processor activity.
2083 */
5ee22a5a
RM
2084 if ((var & STS_PI) &&
2085 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
c4e84bde
RM
2086 /*
2087 * We've got an async event or mailbox completion.
2088 * Handle it and clear the source of the interrupt.
2089 */
2090 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
2091 ql_disable_completion_interrupt(qdev, intr_context->intr);
5ee22a5a
RM
2092 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2093 queue_delayed_work_on(smp_processor_id(),
2094 qdev->workqueue, &qdev->mpi_work, 0);
c4e84bde
RM
2095 work_done++;
2096 }
2097
2098 /*
39aa8165
RM
2099 * Get the bit-mask that shows the active queues for this
2100 * pass. Compare it to the queues that this irq services
2101 * and call napi if there's a match.
c4e84bde 2102 */
39aa8165
RM
2103 var = ql_read32(qdev, ISR1);
2104 if (var & intr_context->irq_mask) {
c4e84bde 2105 QPRINTK(qdev, INTR, INFO,
39aa8165
RM
2106 "Waking handler for rx_ring[0].\n");
2107 ql_disable_completion_interrupt(qdev, intr_context->intr);
288379f0 2108 napi_schedule(&rx_ring->napi);
c4e84bde
RM
2109 work_done++;
2110 }
bb0d215c 2111 ql_enable_completion_interrupt(qdev, intr_context->intr);
c4e84bde
RM
2112 return work_done ? IRQ_HANDLED : IRQ_NONE;
2113}
2114
2115static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2116{
2117
2118 if (skb_is_gso(skb)) {
2119 int err;
2120 if (skb_header_cloned(skb)) {
2121 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2122 if (err)
2123 return err;
2124 }
2125
2126 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2127 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2128 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2129 mac_iocb_ptr->total_hdrs_len =
2130 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2131 mac_iocb_ptr->net_trans_offset =
2132 cpu_to_le16(skb_network_offset(skb) |
2133 skb_transport_offset(skb)
2134 << OB_MAC_TRANSPORT_HDR_SHIFT);
2135 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2136 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2137 if (likely(skb->protocol == htons(ETH_P_IP))) {
2138 struct iphdr *iph = ip_hdr(skb);
2139 iph->check = 0;
2140 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2141 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2142 iph->daddr, 0,
2143 IPPROTO_TCP,
2144 0);
2145 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2146 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2147 tcp_hdr(skb)->check =
2148 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2149 &ipv6_hdr(skb)->daddr,
2150 0, IPPROTO_TCP, 0);
2151 }
2152 return 1;
2153 }
2154 return 0;
2155}
2156
2157static void ql_hw_csum_setup(struct sk_buff *skb,
2158 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2159{
2160 int len;
2161 struct iphdr *iph = ip_hdr(skb);
fd2df4f7 2162 __sum16 *check;
c4e84bde
RM
2163 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2164 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2165 mac_iocb_ptr->net_trans_offset =
2166 cpu_to_le16(skb_network_offset(skb) |
2167 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2168
2169 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2170 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2171 if (likely(iph->protocol == IPPROTO_TCP)) {
2172 check = &(tcp_hdr(skb)->check);
2173 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2174 mac_iocb_ptr->total_hdrs_len =
2175 cpu_to_le16(skb_transport_offset(skb) +
2176 (tcp_hdr(skb)->doff << 2));
2177 } else {
2178 check = &(udp_hdr(skb)->check);
2179 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2180 mac_iocb_ptr->total_hdrs_len =
2181 cpu_to_le16(skb_transport_offset(skb) +
2182 sizeof(struct udphdr));
2183 }
2184 *check = ~csum_tcpudp_magic(iph->saddr,
2185 iph->daddr, len, iph->protocol, 0);
2186}
2187
61357325 2188static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
c4e84bde
RM
2189{
2190 struct tx_ring_desc *tx_ring_desc;
2191 struct ob_mac_iocb_req *mac_iocb_ptr;
2192 struct ql_adapter *qdev = netdev_priv(ndev);
2193 int tso;
2194 struct tx_ring *tx_ring;
1e213303 2195 u32 tx_ring_idx = (u32) skb->queue_mapping;
c4e84bde
RM
2196
2197 tx_ring = &qdev->tx_ring[tx_ring_idx];
2198
74c50b4b
RM
2199 if (skb_padto(skb, ETH_ZLEN))
2200 return NETDEV_TX_OK;
2201
c4e84bde
RM
2202 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2203 QPRINTK(qdev, TX_QUEUED, INFO,
2204 "%s: shutting down tx queue %d du to lack of resources.\n",
2205 __func__, tx_ring_idx);
1e213303 2206 netif_stop_subqueue(ndev, tx_ring->wq_id);
c4e84bde
RM
2207 atomic_inc(&tx_ring->queue_stopped);
2208 return NETDEV_TX_BUSY;
2209 }
2210 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2211 mac_iocb_ptr = tx_ring_desc->queue_entry;
e332471c 2212 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
c4e84bde
RM
2213
2214 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2215 mac_iocb_ptr->tid = tx_ring_desc->index;
2216 /* We use the upper 32-bits to store the tx queue for this IO.
2217 * When we get the completion we can use it to establish the context.
2218 */
2219 mac_iocb_ptr->txq_idx = tx_ring_idx;
2220 tx_ring_desc->skb = skb;
2221
2222 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2223
2224 if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
2225 QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
2226 vlan_tx_tag_get(skb));
2227 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2228 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2229 }
2230 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2231 if (tso < 0) {
2232 dev_kfree_skb_any(skb);
2233 return NETDEV_TX_OK;
2234 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2235 ql_hw_csum_setup(skb,
2236 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2237 }
0d979f74
RM
2238 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2239 NETDEV_TX_OK) {
2240 QPRINTK(qdev, TX_QUEUED, ERR,
2241 "Could not map the segments.\n");
2242 return NETDEV_TX_BUSY;
2243 }
c4e84bde
RM
2244 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2245 tx_ring->prod_idx++;
2246 if (tx_ring->prod_idx == tx_ring->wq_len)
2247 tx_ring->prod_idx = 0;
2248 wmb();
2249
2250 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
c4e84bde
RM
2251 QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
2252 tx_ring->prod_idx, skb->len);
2253
2254 atomic_dec(&tx_ring->tx_count);
2255 return NETDEV_TX_OK;
2256}
2257
9dfbbaa6 2258
c4e84bde
RM
2259static void ql_free_shadow_space(struct ql_adapter *qdev)
2260{
2261 if (qdev->rx_ring_shadow_reg_area) {
2262 pci_free_consistent(qdev->pdev,
2263 PAGE_SIZE,
2264 qdev->rx_ring_shadow_reg_area,
2265 qdev->rx_ring_shadow_reg_dma);
2266 qdev->rx_ring_shadow_reg_area = NULL;
2267 }
2268 if (qdev->tx_ring_shadow_reg_area) {
2269 pci_free_consistent(qdev->pdev,
2270 PAGE_SIZE,
2271 qdev->tx_ring_shadow_reg_area,
2272 qdev->tx_ring_shadow_reg_dma);
2273 qdev->tx_ring_shadow_reg_area = NULL;
2274 }
2275}
2276
2277static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2278{
2279 qdev->rx_ring_shadow_reg_area =
2280 pci_alloc_consistent(qdev->pdev,
2281 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2282 if (qdev->rx_ring_shadow_reg_area == NULL) {
2283 QPRINTK(qdev, IFUP, ERR,
2284 "Allocation of RX shadow space failed.\n");
2285 return -ENOMEM;
2286 }
b25215d0 2287 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
c4e84bde
RM
2288 qdev->tx_ring_shadow_reg_area =
2289 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2290 &qdev->tx_ring_shadow_reg_dma);
2291 if (qdev->tx_ring_shadow_reg_area == NULL) {
2292 QPRINTK(qdev, IFUP, ERR,
2293 "Allocation of TX shadow space failed.\n");
2294 goto err_wqp_sh_area;
2295 }
b25215d0 2296 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
c4e84bde
RM
2297 return 0;
2298
2299err_wqp_sh_area:
2300 pci_free_consistent(qdev->pdev,
2301 PAGE_SIZE,
2302 qdev->rx_ring_shadow_reg_area,
2303 qdev->rx_ring_shadow_reg_dma);
2304 return -ENOMEM;
2305}
2306
2307static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2308{
2309 struct tx_ring_desc *tx_ring_desc;
2310 int i;
2311 struct ob_mac_iocb_req *mac_iocb_ptr;
2312
2313 mac_iocb_ptr = tx_ring->wq_base;
2314 tx_ring_desc = tx_ring->q;
2315 for (i = 0; i < tx_ring->wq_len; i++) {
2316 tx_ring_desc->index = i;
2317 tx_ring_desc->skb = NULL;
2318 tx_ring_desc->queue_entry = mac_iocb_ptr;
2319 mac_iocb_ptr++;
2320 tx_ring_desc++;
2321 }
2322 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2323 atomic_set(&tx_ring->queue_stopped, 0);
2324}
2325
2326static void ql_free_tx_resources(struct ql_adapter *qdev,
2327 struct tx_ring *tx_ring)
2328{
2329 if (tx_ring->wq_base) {
2330 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2331 tx_ring->wq_base, tx_ring->wq_base_dma);
2332 tx_ring->wq_base = NULL;
2333 }
2334 kfree(tx_ring->q);
2335 tx_ring->q = NULL;
2336}
2337
2338static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2339 struct tx_ring *tx_ring)
2340{
2341 tx_ring->wq_base =
2342 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2343 &tx_ring->wq_base_dma);
2344
2345 if ((tx_ring->wq_base == NULL)
88c55e3c 2346 || tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
c4e84bde
RM
2347 QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
2348 return -ENOMEM;
2349 }
2350 tx_ring->q =
2351 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2352 if (tx_ring->q == NULL)
2353 goto err;
2354
2355 return 0;
2356err:
2357 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2358 tx_ring->wq_base, tx_ring->wq_base_dma);
2359 return -ENOMEM;
2360}
2361
8668ae92 2362static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
c4e84bde 2363{
c4e84bde
RM
2364 struct bq_desc *lbq_desc;
2365
7c734359
RM
2366 uint32_t curr_idx, clean_idx;
2367
2368 curr_idx = rx_ring->lbq_curr_idx;
2369 clean_idx = rx_ring->lbq_clean_idx;
2370 while (curr_idx != clean_idx) {
2371 lbq_desc = &rx_ring->lbq[curr_idx];
2372
2373 if (lbq_desc->p.pg_chunk.last_flag) {
c4e84bde 2374 pci_unmap_page(qdev->pdev,
7c734359
RM
2375 lbq_desc->p.pg_chunk.map,
2376 ql_lbq_block_size(qdev),
c4e84bde 2377 PCI_DMA_FROMDEVICE);
7c734359 2378 lbq_desc->p.pg_chunk.last_flag = 0;
c4e84bde 2379 }
7c734359
RM
2380
2381 put_page(lbq_desc->p.pg_chunk.page);
2382 lbq_desc->p.pg_chunk.page = NULL;
2383
2384 if (++curr_idx == rx_ring->lbq_len)
2385 curr_idx = 0;
2386
c4e84bde
RM
2387 }
2388}
2389
8668ae92 2390static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
c4e84bde
RM
2391{
2392 int i;
2393 struct bq_desc *sbq_desc;
2394
2395 for (i = 0; i < rx_ring->sbq_len; i++) {
2396 sbq_desc = &rx_ring->sbq[i];
2397 if (sbq_desc == NULL) {
2398 QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
2399 return;
2400 }
2401 if (sbq_desc->p.skb) {
2402 pci_unmap_single(qdev->pdev,
2403 pci_unmap_addr(sbq_desc, mapaddr),
2404 pci_unmap_len(sbq_desc, maplen),
2405 PCI_DMA_FROMDEVICE);
2406 dev_kfree_skb(sbq_desc->p.skb);
2407 sbq_desc->p.skb = NULL;
2408 }
c4e84bde
RM
2409 }
2410}
2411
4545a3f2
RM
2412/* Free all large and small rx buffers associated
2413 * with the completion queues for this device.
2414 */
2415static void ql_free_rx_buffers(struct ql_adapter *qdev)
2416{
2417 int i;
2418 struct rx_ring *rx_ring;
2419
2420 for (i = 0; i < qdev->rx_ring_count; i++) {
2421 rx_ring = &qdev->rx_ring[i];
2422 if (rx_ring->lbq)
2423 ql_free_lbq_buffers(qdev, rx_ring);
2424 if (rx_ring->sbq)
2425 ql_free_sbq_buffers(qdev, rx_ring);
2426 }
2427}
2428
2429static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2430{
2431 struct rx_ring *rx_ring;
2432 int i;
2433
2434 for (i = 0; i < qdev->rx_ring_count; i++) {
2435 rx_ring = &qdev->rx_ring[i];
2436 if (rx_ring->type != TX_Q)
2437 ql_update_buffer_queues(qdev, rx_ring);
2438 }
2439}
2440
2441static void ql_init_lbq_ring(struct ql_adapter *qdev,
2442 struct rx_ring *rx_ring)
2443{
2444 int i;
2445 struct bq_desc *lbq_desc;
2446 __le64 *bq = rx_ring->lbq_base;
2447
2448 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2449 for (i = 0; i < rx_ring->lbq_len; i++) {
2450 lbq_desc = &rx_ring->lbq[i];
2451 memset(lbq_desc, 0, sizeof(*lbq_desc));
2452 lbq_desc->index = i;
2453 lbq_desc->addr = bq;
2454 bq++;
2455 }
2456}
2457
2458static void ql_init_sbq_ring(struct ql_adapter *qdev,
c4e84bde
RM
2459 struct rx_ring *rx_ring)
2460{
2461 int i;
2462 struct bq_desc *sbq_desc;
2c9a0d41 2463 __le64 *bq = rx_ring->sbq_base;
c4e84bde 2464
4545a3f2 2465 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
c4e84bde
RM
2466 for (i = 0; i < rx_ring->sbq_len; i++) {
2467 sbq_desc = &rx_ring->sbq[i];
4545a3f2 2468 memset(sbq_desc, 0, sizeof(*sbq_desc));
c4e84bde 2469 sbq_desc->index = i;
2c9a0d41 2470 sbq_desc->addr = bq;
c4e84bde
RM
2471 bq++;
2472 }
c4e84bde
RM
2473}
2474
2475static void ql_free_rx_resources(struct ql_adapter *qdev,
2476 struct rx_ring *rx_ring)
2477{
c4e84bde
RM
2478 /* Free the small buffer queue. */
2479 if (rx_ring->sbq_base) {
2480 pci_free_consistent(qdev->pdev,
2481 rx_ring->sbq_size,
2482 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2483 rx_ring->sbq_base = NULL;
2484 }
2485
2486 /* Free the small buffer queue control blocks. */
2487 kfree(rx_ring->sbq);
2488 rx_ring->sbq = NULL;
2489
2490 /* Free the large buffer queue. */
2491 if (rx_ring->lbq_base) {
2492 pci_free_consistent(qdev->pdev,
2493 rx_ring->lbq_size,
2494 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2495 rx_ring->lbq_base = NULL;
2496 }
2497
2498 /* Free the large buffer queue control blocks. */
2499 kfree(rx_ring->lbq);
2500 rx_ring->lbq = NULL;
2501
2502 /* Free the rx queue. */
2503 if (rx_ring->cq_base) {
2504 pci_free_consistent(qdev->pdev,
2505 rx_ring->cq_size,
2506 rx_ring->cq_base, rx_ring->cq_base_dma);
2507 rx_ring->cq_base = NULL;
2508 }
2509}
2510
2511/* Allocate queues and buffers for this completions queue based
2512 * on the values in the parameter structure. */
2513static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2514 struct rx_ring *rx_ring)
2515{
2516
2517 /*
2518 * Allocate the completion queue for this rx_ring.
2519 */
2520 rx_ring->cq_base =
2521 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2522 &rx_ring->cq_base_dma);
2523
2524 if (rx_ring->cq_base == NULL) {
2525 QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
2526 return -ENOMEM;
2527 }
2528
2529 if (rx_ring->sbq_len) {
2530 /*
2531 * Allocate small buffer queue.
2532 */
2533 rx_ring->sbq_base =
2534 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2535 &rx_ring->sbq_base_dma);
2536
2537 if (rx_ring->sbq_base == NULL) {
2538 QPRINTK(qdev, IFUP, ERR,
2539 "Small buffer queue allocation failed.\n");
2540 goto err_mem;
2541 }
2542
2543 /*
2544 * Allocate small buffer queue control blocks.
2545 */
2546 rx_ring->sbq =
2547 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2548 GFP_KERNEL);
2549 if (rx_ring->sbq == NULL) {
2550 QPRINTK(qdev, IFUP, ERR,
2551 "Small buffer queue control block allocation failed.\n");
2552 goto err_mem;
2553 }
2554
4545a3f2 2555 ql_init_sbq_ring(qdev, rx_ring);
c4e84bde
RM
2556 }
2557
2558 if (rx_ring->lbq_len) {
2559 /*
2560 * Allocate large buffer queue.
2561 */
2562 rx_ring->lbq_base =
2563 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2564 &rx_ring->lbq_base_dma);
2565
2566 if (rx_ring->lbq_base == NULL) {
2567 QPRINTK(qdev, IFUP, ERR,
2568 "Large buffer queue allocation failed.\n");
2569 goto err_mem;
2570 }
2571 /*
2572 * Allocate large buffer queue control blocks.
2573 */
2574 rx_ring->lbq =
2575 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2576 GFP_KERNEL);
2577 if (rx_ring->lbq == NULL) {
2578 QPRINTK(qdev, IFUP, ERR,
2579 "Large buffer queue control block allocation failed.\n");
2580 goto err_mem;
2581 }
2582
4545a3f2 2583 ql_init_lbq_ring(qdev, rx_ring);
c4e84bde
RM
2584 }
2585
2586 return 0;
2587
2588err_mem:
2589 ql_free_rx_resources(qdev, rx_ring);
2590 return -ENOMEM;
2591}
2592
2593static void ql_tx_ring_clean(struct ql_adapter *qdev)
2594{
2595 struct tx_ring *tx_ring;
2596 struct tx_ring_desc *tx_ring_desc;
2597 int i, j;
2598
2599 /*
2600 * Loop through all queues and free
2601 * any resources.
2602 */
2603 for (j = 0; j < qdev->tx_ring_count; j++) {
2604 tx_ring = &qdev->tx_ring[j];
2605 for (i = 0; i < tx_ring->wq_len; i++) {
2606 tx_ring_desc = &tx_ring->q[i];
2607 if (tx_ring_desc && tx_ring_desc->skb) {
2608 QPRINTK(qdev, IFDOWN, ERR,
2609 "Freeing lost SKB %p, from queue %d, index %d.\n",
2610 tx_ring_desc->skb, j,
2611 tx_ring_desc->index);
2612 ql_unmap_send(qdev, tx_ring_desc,
2613 tx_ring_desc->map_cnt);
2614 dev_kfree_skb(tx_ring_desc->skb);
2615 tx_ring_desc->skb = NULL;
2616 }
2617 }
2618 }
2619}
2620
c4e84bde
RM
2621static void ql_free_mem_resources(struct ql_adapter *qdev)
2622{
2623 int i;
2624
2625 for (i = 0; i < qdev->tx_ring_count; i++)
2626 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2627 for (i = 0; i < qdev->rx_ring_count; i++)
2628 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2629 ql_free_shadow_space(qdev);
2630}
2631
2632static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2633{
2634 int i;
2635
2636 /* Allocate space for our shadow registers and such. */
2637 if (ql_alloc_shadow_space(qdev))
2638 return -ENOMEM;
2639
2640 for (i = 0; i < qdev->rx_ring_count; i++) {
2641 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2642 QPRINTK(qdev, IFUP, ERR,
2643 "RX resource allocation failed.\n");
2644 goto err_mem;
2645 }
2646 }
2647 /* Allocate tx queue resources */
2648 for (i = 0; i < qdev->tx_ring_count; i++) {
2649 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2650 QPRINTK(qdev, IFUP, ERR,
2651 "TX resource allocation failed.\n");
2652 goto err_mem;
2653 }
2654 }
2655 return 0;
2656
2657err_mem:
2658 ql_free_mem_resources(qdev);
2659 return -ENOMEM;
2660}
2661
2662/* Set up the rx ring control block and pass it to the chip.
2663 * The control block is defined as
2664 * "Completion Queue Initialization Control Block", or cqicb.
2665 */
2666static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2667{
2668 struct cqicb *cqicb = &rx_ring->cqicb;
2669 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
b8facca0 2670 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
c4e84bde 2671 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
b8facca0 2672 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
c4e84bde
RM
2673 void __iomem *doorbell_area =
2674 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2675 int err = 0;
2676 u16 bq_len;
d4a4aba6 2677 u64 tmp;
b8facca0
RM
2678 __le64 *base_indirect_ptr;
2679 int page_entries;
c4e84bde
RM
2680
2681 /* Set up the shadow registers for this ring. */
2682 rx_ring->prod_idx_sh_reg = shadow_reg;
2683 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
7c734359 2684 *rx_ring->prod_idx_sh_reg = 0;
c4e84bde
RM
2685 shadow_reg += sizeof(u64);
2686 shadow_reg_dma += sizeof(u64);
2687 rx_ring->lbq_base_indirect = shadow_reg;
2688 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
b8facca0
RM
2689 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
2690 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
c4e84bde
RM
2691 rx_ring->sbq_base_indirect = shadow_reg;
2692 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
2693
2694 /* PCI doorbell mem area + 0x00 for consumer index register */
8668ae92 2695 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
c4e84bde
RM
2696 rx_ring->cnsmr_idx = 0;
2697 rx_ring->curr_entry = rx_ring->cq_base;
2698
2699 /* PCI doorbell mem area + 0x04 for valid register */
2700 rx_ring->valid_db_reg = doorbell_area + 0x04;
2701
2702 /* PCI doorbell mem area + 0x18 for large buffer consumer */
8668ae92 2703 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
c4e84bde
RM
2704
2705 /* PCI doorbell mem area + 0x1c */
8668ae92 2706 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
c4e84bde
RM
2707
2708 memset((void *)cqicb, 0, sizeof(struct cqicb));
2709 cqicb->msix_vect = rx_ring->irq;
2710
459caf5a
RM
2711 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
2712 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
c4e84bde 2713
97345524 2714 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
c4e84bde 2715
97345524 2716 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
c4e84bde
RM
2717
2718 /*
2719 * Set up the control block load flags.
2720 */
2721 cqicb->flags = FLAGS_LC | /* Load queue base address */
2722 FLAGS_LV | /* Load MSI-X vector */
2723 FLAGS_LI; /* Load irq delay values */
2724 if (rx_ring->lbq_len) {
2725 cqicb->flags |= FLAGS_LL; /* Load lbq values */
a419aef8 2726 tmp = (u64)rx_ring->lbq_base_dma;
b8facca0
RM
2727 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
2728 page_entries = 0;
2729 do {
2730 *base_indirect_ptr = cpu_to_le64(tmp);
2731 tmp += DB_PAGE_SIZE;
2732 base_indirect_ptr++;
2733 page_entries++;
2734 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
97345524
RM
2735 cqicb->lbq_addr =
2736 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
459caf5a
RM
2737 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
2738 (u16) rx_ring->lbq_buf_size;
2739 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
2740 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
2741 (u16) rx_ring->lbq_len;
c4e84bde 2742 cqicb->lbq_len = cpu_to_le16(bq_len);
4545a3f2 2743 rx_ring->lbq_prod_idx = 0;
c4e84bde 2744 rx_ring->lbq_curr_idx = 0;
4545a3f2
RM
2745 rx_ring->lbq_clean_idx = 0;
2746 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
c4e84bde
RM
2747 }
2748 if (rx_ring->sbq_len) {
2749 cqicb->flags |= FLAGS_LS; /* Load sbq values */
a419aef8 2750 tmp = (u64)rx_ring->sbq_base_dma;
b8facca0
RM
2751 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
2752 page_entries = 0;
2753 do {
2754 *base_indirect_ptr = cpu_to_le64(tmp);
2755 tmp += DB_PAGE_SIZE;
2756 base_indirect_ptr++;
2757 page_entries++;
2758 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
97345524
RM
2759 cqicb->sbq_addr =
2760 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
c4e84bde 2761 cqicb->sbq_buf_size =
52e55f3c 2762 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
459caf5a
RM
2763 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
2764 (u16) rx_ring->sbq_len;
c4e84bde 2765 cqicb->sbq_len = cpu_to_le16(bq_len);
4545a3f2 2766 rx_ring->sbq_prod_idx = 0;
c4e84bde 2767 rx_ring->sbq_curr_idx = 0;
4545a3f2
RM
2768 rx_ring->sbq_clean_idx = 0;
2769 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
c4e84bde
RM
2770 }
2771 switch (rx_ring->type) {
2772 case TX_Q:
c4e84bde
RM
2773 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
2774 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
2775 break;
c4e84bde
RM
2776 case RX_Q:
2777 /* Inbound completion handling rx_rings run in
2778 * separate NAPI contexts.
2779 */
2780 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
2781 64);
2782 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
2783 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
2784 break;
2785 default:
2786 QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
2787 rx_ring->type);
2788 }
4974097a 2789 QPRINTK(qdev, IFUP, DEBUG, "Initializing rx work queue.\n");
c4e84bde
RM
2790 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
2791 CFG_LCQ, rx_ring->cq_id);
2792 if (err) {
2793 QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
2794 return err;
2795 }
c4e84bde
RM
2796 return err;
2797}
2798
2799static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2800{
2801 struct wqicb *wqicb = (struct wqicb *)tx_ring;
2802 void __iomem *doorbell_area =
2803 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
2804 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
2805 (tx_ring->wq_id * sizeof(u64));
2806 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
2807 (tx_ring->wq_id * sizeof(u64));
2808 int err = 0;
2809
2810 /*
2811 * Assign doorbell registers for this tx_ring.
2812 */
2813 /* TX PCI doorbell mem area for tx producer index */
8668ae92 2814 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
c4e84bde
RM
2815 tx_ring->prod_idx = 0;
2816 /* TX PCI doorbell mem area + 0x04 */
2817 tx_ring->valid_db_reg = doorbell_area + 0x04;
2818
2819 /*
2820 * Assign shadow registers for this tx_ring.
2821 */
2822 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
2823 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
2824
2825 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
2826 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
2827 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
2828 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
2829 wqicb->rid = 0;
97345524 2830 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
c4e84bde 2831
97345524 2832 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
c4e84bde
RM
2833
2834 ql_init_tx_ring(qdev, tx_ring);
2835
e332471c 2836 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
c4e84bde
RM
2837 (u16) tx_ring->wq_id);
2838 if (err) {
2839 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
2840 return err;
2841 }
4974097a 2842 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded WQICB.\n");
c4e84bde
RM
2843 return err;
2844}
2845
2846static void ql_disable_msix(struct ql_adapter *qdev)
2847{
2848 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2849 pci_disable_msix(qdev->pdev);
2850 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
2851 kfree(qdev->msi_x_entry);
2852 qdev->msi_x_entry = NULL;
2853 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
2854 pci_disable_msi(qdev->pdev);
2855 clear_bit(QL_MSI_ENABLED, &qdev->flags);
2856 }
2857}
2858
a4ab6137
RM
2859/* We start by trying to get the number of vectors
2860 * stored in qdev->intr_count. If we don't get that
2861 * many then we reduce the count and try again.
2862 */
c4e84bde
RM
2863static void ql_enable_msix(struct ql_adapter *qdev)
2864{
a4ab6137 2865 int i, err;
c4e84bde 2866
c4e84bde
RM
2867 /* Get the MSIX vectors. */
2868 if (irq_type == MSIX_IRQ) {
2869 /* Try to alloc space for the msix struct,
2870 * if it fails then go to MSI/legacy.
2871 */
a4ab6137 2872 qdev->msi_x_entry = kcalloc(qdev->intr_count,
c4e84bde
RM
2873 sizeof(struct msix_entry),
2874 GFP_KERNEL);
2875 if (!qdev->msi_x_entry) {
2876 irq_type = MSI_IRQ;
2877 goto msi;
2878 }
2879
a4ab6137 2880 for (i = 0; i < qdev->intr_count; i++)
c4e84bde
RM
2881 qdev->msi_x_entry[i].entry = i;
2882
a4ab6137
RM
2883 /* Loop to get our vectors. We start with
2884 * what we want and settle for what we get.
2885 */
2886 do {
2887 err = pci_enable_msix(qdev->pdev,
2888 qdev->msi_x_entry, qdev->intr_count);
2889 if (err > 0)
2890 qdev->intr_count = err;
2891 } while (err > 0);
2892
2893 if (err < 0) {
c4e84bde
RM
2894 kfree(qdev->msi_x_entry);
2895 qdev->msi_x_entry = NULL;
2896 QPRINTK(qdev, IFUP, WARNING,
2897 "MSI-X Enable failed, trying MSI.\n");
a4ab6137 2898 qdev->intr_count = 1;
c4e84bde 2899 irq_type = MSI_IRQ;
a4ab6137
RM
2900 } else if (err == 0) {
2901 set_bit(QL_MSIX_ENABLED, &qdev->flags);
2902 QPRINTK(qdev, IFUP, INFO,
2903 "MSI-X Enabled, got %d vectors.\n",
2904 qdev->intr_count);
2905 return;
c4e84bde
RM
2906 }
2907 }
2908msi:
a4ab6137 2909 qdev->intr_count = 1;
c4e84bde
RM
2910 if (irq_type == MSI_IRQ) {
2911 if (!pci_enable_msi(qdev->pdev)) {
2912 set_bit(QL_MSI_ENABLED, &qdev->flags);
2913 QPRINTK(qdev, IFUP, INFO,
2914 "Running with MSI interrupts.\n");
2915 return;
2916 }
2917 }
2918 irq_type = LEG_IRQ;
c4e84bde
RM
2919 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
2920}
2921
39aa8165
RM
2922/* Each vector services 1 RSS ring and and 1 or more
2923 * TX completion rings. This function loops through
2924 * the TX completion rings and assigns the vector that
2925 * will service it. An example would be if there are
2926 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
2927 * This would mean that vector 0 would service RSS ring 0
2928 * and TX competion rings 0,1,2 and 3. Vector 1 would
2929 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
2930 */
2931static void ql_set_tx_vect(struct ql_adapter *qdev)
2932{
2933 int i, j, vect;
2934 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
2935
2936 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2937 /* Assign irq vectors to TX rx_rings.*/
2938 for (vect = 0, j = 0, i = qdev->rss_ring_count;
2939 i < qdev->rx_ring_count; i++) {
2940 if (j == tx_rings_per_vector) {
2941 vect++;
2942 j = 0;
2943 }
2944 qdev->rx_ring[i].irq = vect;
2945 j++;
2946 }
2947 } else {
2948 /* For single vector all rings have an irq
2949 * of zero.
2950 */
2951 for (i = 0; i < qdev->rx_ring_count; i++)
2952 qdev->rx_ring[i].irq = 0;
2953 }
2954}
2955
2956/* Set the interrupt mask for this vector. Each vector
2957 * will service 1 RSS ring and 1 or more TX completion
2958 * rings. This function sets up a bit mask per vector
2959 * that indicates which rings it services.
2960 */
2961static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
2962{
2963 int j, vect = ctx->intr;
2964 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
2965
2966 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2967 /* Add the RSS ring serviced by this vector
2968 * to the mask.
2969 */
2970 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
2971 /* Add the TX ring(s) serviced by this vector
2972 * to the mask. */
2973 for (j = 0; j < tx_rings_per_vector; j++) {
2974 ctx->irq_mask |=
2975 (1 << qdev->rx_ring[qdev->rss_ring_count +
2976 (vect * tx_rings_per_vector) + j].cq_id);
2977 }
2978 } else {
2979 /* For single vector we just shift each queue's
2980 * ID into the mask.
2981 */
2982 for (j = 0; j < qdev->rx_ring_count; j++)
2983 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
2984 }
2985}
2986
c4e84bde
RM
2987/*
2988 * Here we build the intr_context structures based on
2989 * our rx_ring count and intr vector count.
2990 * The intr_context structure is used to hook each vector
2991 * to possibly different handlers.
2992 */
2993static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
2994{
2995 int i = 0;
2996 struct intr_context *intr_context = &qdev->intr_context[0];
2997
c4e84bde
RM
2998 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2999 /* Each rx_ring has it's
3000 * own intr_context since we have separate
3001 * vectors for each queue.
c4e84bde
RM
3002 */
3003 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3004 qdev->rx_ring[i].irq = i;
3005 intr_context->intr = i;
3006 intr_context->qdev = qdev;
39aa8165
RM
3007 /* Set up this vector's bit-mask that indicates
3008 * which queues it services.
3009 */
3010 ql_set_irq_mask(qdev, intr_context);
c4e84bde
RM
3011 /*
3012 * We set up each vectors enable/disable/read bits so
3013 * there's no bit/mask calculations in the critical path.
3014 */
3015 intr_context->intr_en_mask =
3016 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3017 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3018 | i;
3019 intr_context->intr_dis_mask =
3020 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3021 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3022 INTR_EN_IHD | i;
3023 intr_context->intr_read_mask =
3024 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3025 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3026 i;
39aa8165
RM
3027 if (i == 0) {
3028 /* The first vector/queue handles
3029 * broadcast/multicast, fatal errors,
3030 * and firmware events. This in addition
3031 * to normal inbound NAPI processing.
c4e84bde 3032 */
39aa8165 3033 intr_context->handler = qlge_isr;
b2014ff8
RM
3034 sprintf(intr_context->name, "%s-rx-%d",
3035 qdev->ndev->name, i);
3036 } else {
c4e84bde 3037 /*
39aa8165 3038 * Inbound queues handle unicast frames only.
c4e84bde 3039 */
39aa8165
RM
3040 intr_context->handler = qlge_msix_rx_isr;
3041 sprintf(intr_context->name, "%s-rx-%d",
c4e84bde 3042 qdev->ndev->name, i);
c4e84bde
RM
3043 }
3044 }
3045 } else {
3046 /*
3047 * All rx_rings use the same intr_context since
3048 * there is only one vector.
3049 */
3050 intr_context->intr = 0;
3051 intr_context->qdev = qdev;
3052 /*
3053 * We set up each vectors enable/disable/read bits so
3054 * there's no bit/mask calculations in the critical path.
3055 */
3056 intr_context->intr_en_mask =
3057 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3058 intr_context->intr_dis_mask =
3059 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3060 INTR_EN_TYPE_DISABLE;
3061 intr_context->intr_read_mask =
3062 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3063 /*
3064 * Single interrupt means one handler for all rings.
3065 */
3066 intr_context->handler = qlge_isr;
3067 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
39aa8165
RM
3068 /* Set up this vector's bit-mask that indicates
3069 * which queues it services. In this case there is
3070 * a single vector so it will service all RSS and
3071 * TX completion rings.
3072 */
3073 ql_set_irq_mask(qdev, intr_context);
c4e84bde 3074 }
39aa8165
RM
3075 /* Tell the TX completion rings which MSIx vector
3076 * they will be using.
3077 */
3078 ql_set_tx_vect(qdev);
c4e84bde
RM
3079}
3080
3081static void ql_free_irq(struct ql_adapter *qdev)
3082{
3083 int i;
3084 struct intr_context *intr_context = &qdev->intr_context[0];
3085
3086 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3087 if (intr_context->hooked) {
3088 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3089 free_irq(qdev->msi_x_entry[i].vector,
3090 &qdev->rx_ring[i]);
4974097a 3091 QPRINTK(qdev, IFDOWN, DEBUG,
c4e84bde
RM
3092 "freeing msix interrupt %d.\n", i);
3093 } else {
3094 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
4974097a 3095 QPRINTK(qdev, IFDOWN, DEBUG,
c4e84bde
RM
3096 "freeing msi interrupt %d.\n", i);
3097 }
3098 }
3099 }
3100 ql_disable_msix(qdev);
3101}
3102
3103static int ql_request_irq(struct ql_adapter *qdev)
3104{
3105 int i;
3106 int status = 0;
3107 struct pci_dev *pdev = qdev->pdev;
3108 struct intr_context *intr_context = &qdev->intr_context[0];
3109
3110 ql_resolve_queues_to_irqs(qdev);
3111
3112 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3113 atomic_set(&intr_context->irq_cnt, 0);
3114 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3115 status = request_irq(qdev->msi_x_entry[i].vector,
3116 intr_context->handler,
3117 0,
3118 intr_context->name,
3119 &qdev->rx_ring[i]);
3120 if (status) {
3121 QPRINTK(qdev, IFUP, ERR,
3122 "Failed request for MSIX interrupt %d.\n",
3123 i);
3124 goto err_irq;
3125 } else {
4974097a 3126 QPRINTK(qdev, IFUP, DEBUG,
c4e84bde
RM
3127 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3128 i,
3129 qdev->rx_ring[i].type ==
3130 DEFAULT_Q ? "DEFAULT_Q" : "",
3131 qdev->rx_ring[i].type ==
3132 TX_Q ? "TX_Q" : "",
3133 qdev->rx_ring[i].type ==
3134 RX_Q ? "RX_Q" : "", intr_context->name);
3135 }
3136 } else {
3137 QPRINTK(qdev, IFUP, DEBUG,
3138 "trying msi or legacy interrupts.\n");
3139 QPRINTK(qdev, IFUP, DEBUG,
3140 "%s: irq = %d.\n", __func__, pdev->irq);
3141 QPRINTK(qdev, IFUP, DEBUG,
3142 "%s: context->name = %s.\n", __func__,
3143 intr_context->name);
3144 QPRINTK(qdev, IFUP, DEBUG,
3145 "%s: dev_id = 0x%p.\n", __func__,
3146 &qdev->rx_ring[0]);
3147 status =
3148 request_irq(pdev->irq, qlge_isr,
3149 test_bit(QL_MSI_ENABLED,
3150 &qdev->
3151 flags) ? 0 : IRQF_SHARED,
3152 intr_context->name, &qdev->rx_ring[0]);
3153 if (status)
3154 goto err_irq;
3155
3156 QPRINTK(qdev, IFUP, ERR,
3157 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3158 i,
3159 qdev->rx_ring[0].type ==
3160 DEFAULT_Q ? "DEFAULT_Q" : "",
3161 qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
3162 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3163 intr_context->name);
3164 }
3165 intr_context->hooked = 1;
3166 }
3167 return status;
3168err_irq:
3169 QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
3170 ql_free_irq(qdev);
3171 return status;
3172}
3173
3174static int ql_start_rss(struct ql_adapter *qdev)
3175{
541ae28c
RM
3176 u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3177 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
3178 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
3179 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
3180 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
3181 0xbe, 0xac, 0x01, 0xfa};
c4e84bde
RM
3182 struct ricb *ricb = &qdev->ricb;
3183 int status = 0;
3184 int i;
3185 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3186
e332471c 3187 memset((void *)ricb, 0, sizeof(*ricb));
c4e84bde 3188
b2014ff8 3189 ricb->base_cq = RSS_L4K;
c4e84bde 3190 ricb->flags =
541ae28c
RM
3191 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3192 ricb->mask = cpu_to_le16((u16)(0x3ff));
c4e84bde
RM
3193
3194 /*
3195 * Fill out the Indirection Table.
3196 */
541ae28c
RM
3197 for (i = 0; i < 1024; i++)
3198 hash_id[i] = (i & (qdev->rss_ring_count - 1));
c4e84bde 3199
541ae28c
RM
3200 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3201 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
c4e84bde 3202
4974097a 3203 QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
c4e84bde 3204
e332471c 3205 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
c4e84bde
RM
3206 if (status) {
3207 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
3208 return status;
3209 }
4974097a 3210 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded RICB.\n");
c4e84bde
RM
3211 return status;
3212}
3213
a5f59dc9 3214static int ql_clear_routing_entries(struct ql_adapter *qdev)
c4e84bde 3215{
a5f59dc9 3216 int i, status = 0;
c4e84bde 3217
8587ea35
RM
3218 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3219 if (status)
3220 return status;
c4e84bde
RM
3221 /* Clear all the entries in the routing table. */
3222 for (i = 0; i < 16; i++) {
3223 status = ql_set_routing_reg(qdev, i, 0, 0);
3224 if (status) {
3225 QPRINTK(qdev, IFUP, ERR,
a5f59dc9
RM
3226 "Failed to init routing register for CAM "
3227 "packets.\n");
3228 break;
c4e84bde
RM
3229 }
3230 }
a5f59dc9
RM
3231 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3232 return status;
3233}
3234
3235/* Initialize the frame-to-queue routing. */
3236static int ql_route_initialize(struct ql_adapter *qdev)
3237{
3238 int status = 0;
3239
fd21cf52
RM
3240 /* Clear all the entries in the routing table. */
3241 status = ql_clear_routing_entries(qdev);
a5f59dc9
RM
3242 if (status)
3243 return status;
3244
fd21cf52 3245 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
a5f59dc9 3246 if (status)
fd21cf52 3247 return status;
c4e84bde
RM
3248
3249 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
3250 if (status) {
3251 QPRINTK(qdev, IFUP, ERR,
3252 "Failed to init routing register for error packets.\n");
8587ea35 3253 goto exit;
c4e84bde
RM
3254 }
3255 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3256 if (status) {
3257 QPRINTK(qdev, IFUP, ERR,
3258 "Failed to init routing register for broadcast packets.\n");
8587ea35 3259 goto exit;
c4e84bde
RM
3260 }
3261 /* If we have more than one inbound queue, then turn on RSS in the
3262 * routing block.
3263 */
3264 if (qdev->rss_ring_count > 1) {
3265 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3266 RT_IDX_RSS_MATCH, 1);
3267 if (status) {
3268 QPRINTK(qdev, IFUP, ERR,
3269 "Failed to init routing register for MATCH RSS packets.\n");
8587ea35 3270 goto exit;
c4e84bde
RM
3271 }
3272 }
3273
3274 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3275 RT_IDX_CAM_HIT, 1);
8587ea35 3276 if (status)
c4e84bde
RM
3277 QPRINTK(qdev, IFUP, ERR,
3278 "Failed to init routing register for CAM packets.\n");
8587ea35
RM
3279exit:
3280 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
c4e84bde
RM
3281 return status;
3282}
3283
2ee1e272 3284int ql_cam_route_initialize(struct ql_adapter *qdev)
bb58b5b6 3285{
7fab3bfe 3286 int status, set;
bb58b5b6 3287
7fab3bfe
RM
3288 /* If check if the link is up and use to
3289 * determine if we are setting or clearing
3290 * the MAC address in the CAM.
3291 */
3292 set = ql_read32(qdev, STS);
3293 set &= qdev->port_link_up;
3294 status = ql_set_mac_addr(qdev, set);
bb58b5b6
RM
3295 if (status) {
3296 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
3297 return status;
3298 }
3299
3300 status = ql_route_initialize(qdev);
3301 if (status)
3302 QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
3303
3304 return status;
3305}
3306
c4e84bde
RM
3307static int ql_adapter_initialize(struct ql_adapter *qdev)
3308{
3309 u32 value, mask;
3310 int i;
3311 int status = 0;
3312
3313 /*
3314 * Set up the System register to halt on errors.
3315 */
3316 value = SYS_EFE | SYS_FAE;
3317 mask = value << 16;
3318 ql_write32(qdev, SYS, mask | value);
3319
c9cf0a04
RM
3320 /* Set the default queue, and VLAN behavior. */
3321 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3322 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
c4e84bde
RM
3323 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3324
3325 /* Set the MPI interrupt to enabled. */
3326 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3327
3328 /* Enable the function, set pagesize, enable error checking. */
3329 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3330 FSC_EC | FSC_VM_PAGE_4K | FSC_SH;
3331
3332 /* Set/clear header splitting. */
3333 mask = FSC_VM_PAGESIZE_MASK |
3334 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3335 ql_write32(qdev, FSC, mask | value);
3336
3337 ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
52e55f3c 3338 min(SMALL_BUF_MAP_SIZE, MAX_SPLIT_SIZE));
c4e84bde 3339
a3b71939
RM
3340 /* Set RX packet routing to use port/pci function on which the
3341 * packet arrived on in addition to usual frame routing.
3342 * This is helpful on bonding where both interfaces can have
3343 * the same MAC address.
3344 */
3345 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
bc083ce9
RM
3346 /* Reroute all packets to our Interface.
3347 * They may have been routed to MPI firmware
3348 * due to WOL.
3349 */
3350 value = ql_read32(qdev, MGMT_RCV_CFG);
3351 value &= ~MGMT_RCV_CFG_RM;
3352 mask = 0xffff0000;
3353
3354 /* Sticky reg needs clearing due to WOL. */
3355 ql_write32(qdev, MGMT_RCV_CFG, mask);
3356 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3357
3358 /* Default WOL is enable on Mezz cards */
3359 if (qdev->pdev->subsystem_device == 0x0068 ||
3360 qdev->pdev->subsystem_device == 0x0180)
3361 qdev->wol = WAKE_MAGIC;
a3b71939 3362
c4e84bde
RM
3363 /* Start up the rx queues. */
3364 for (i = 0; i < qdev->rx_ring_count; i++) {
3365 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3366 if (status) {
3367 QPRINTK(qdev, IFUP, ERR,
3368 "Failed to start rx ring[%d].\n", i);
3369 return status;
3370 }
3371 }
3372
3373 /* If there is more than one inbound completion queue
3374 * then download a RICB to configure RSS.
3375 */
3376 if (qdev->rss_ring_count > 1) {
3377 status = ql_start_rss(qdev);
3378 if (status) {
3379 QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
3380 return status;
3381 }
3382 }
3383
3384 /* Start up the tx queues. */
3385 for (i = 0; i < qdev->tx_ring_count; i++) {
3386 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3387 if (status) {
3388 QPRINTK(qdev, IFUP, ERR,
3389 "Failed to start tx ring[%d].\n", i);
3390 return status;
3391 }
3392 }
3393
b0c2aadf
RM
3394 /* Initialize the port and set the max framesize. */
3395 status = qdev->nic_ops->port_initialize(qdev);
80928860
RM
3396 if (status)
3397 QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
c4e84bde 3398
bb58b5b6
RM
3399 /* Set up the MAC address and frame routing filter. */
3400 status = ql_cam_route_initialize(qdev);
c4e84bde 3401 if (status) {
bb58b5b6
RM
3402 QPRINTK(qdev, IFUP, ERR,
3403 "Failed to init CAM/Routing tables.\n");
c4e84bde
RM
3404 return status;
3405 }
3406
3407 /* Start NAPI for the RSS queues. */
b2014ff8 3408 for (i = 0; i < qdev->rss_ring_count; i++) {
4974097a 3409 QPRINTK(qdev, IFUP, DEBUG, "Enabling NAPI for rx_ring[%d].\n",
c4e84bde
RM
3410 i);
3411 napi_enable(&qdev->rx_ring[i].napi);
3412 }
3413
3414 return status;
3415}
3416
3417/* Issue soft reset to chip. */
3418static int ql_adapter_reset(struct ql_adapter *qdev)
3419{
3420 u32 value;
c4e84bde 3421 int status = 0;
a5f59dc9 3422 unsigned long end_jiffies;
c4e84bde 3423
a5f59dc9
RM
3424 /* Clear all the entries in the routing table. */
3425 status = ql_clear_routing_entries(qdev);
3426 if (status) {
3427 QPRINTK(qdev, IFUP, ERR, "Failed to clear routing bits.\n");
3428 return status;
3429 }
3430
3431 end_jiffies = jiffies +
3432 max((unsigned long)1, usecs_to_jiffies(30));
84087f4d
RM
3433
3434 /* Stop management traffic. */
3435 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3436
3437 /* Wait for the NIC and MGMNT FIFOs to empty. */
3438 ql_wait_fifo_empty(qdev);
3439
c4e84bde 3440 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
a75ee7f1 3441
c4e84bde
RM
3442 do {
3443 value = ql_read32(qdev, RST_FO);
3444 if ((value & RST_FO_FR) == 0)
3445 break;
a75ee7f1
RM
3446 cpu_relax();
3447 } while (time_before(jiffies, end_jiffies));
c4e84bde 3448
c4e84bde 3449 if (value & RST_FO_FR) {
c4e84bde 3450 QPRINTK(qdev, IFDOWN, ERR,
3ac49a1c 3451 "ETIMEDOUT!!! errored out of resetting the chip!\n");
a75ee7f1 3452 status = -ETIMEDOUT;
c4e84bde
RM
3453 }
3454
84087f4d
RM
3455 /* Resume management traffic. */
3456 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
c4e84bde
RM
3457 return status;
3458}
3459
3460static void ql_display_dev_info(struct net_device *ndev)
3461{
3462 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3463
3464 QPRINTK(qdev, PROBE, INFO,
e4552f51 3465 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
c4e84bde
RM
3466 "XG Roll = %d, XG Rev = %d.\n",
3467 qdev->func,
e4552f51 3468 qdev->port,
c4e84bde
RM
3469 qdev->chip_rev_id & 0x0000000f,
3470 qdev->chip_rev_id >> 4 & 0x0000000f,
3471 qdev->chip_rev_id >> 8 & 0x0000000f,
3472 qdev->chip_rev_id >> 12 & 0x0000000f);
7c510e4b 3473 QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
c4e84bde
RM
3474}
3475
bc083ce9
RM
3476int ql_wol(struct ql_adapter *qdev)
3477{
3478 int status = 0;
3479 u32 wol = MB_WOL_DISABLE;
3480
3481 /* The CAM is still intact after a reset, but if we
3482 * are doing WOL, then we may need to program the
3483 * routing regs. We would also need to issue the mailbox
3484 * commands to instruct the MPI what to do per the ethtool
3485 * settings.
3486 */
3487
3488 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3489 WAKE_MCAST | WAKE_BCAST)) {
3490 QPRINTK(qdev, IFDOWN, ERR,
3491 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3492 qdev->wol);
3493 return -EINVAL;
3494 }
3495
3496 if (qdev->wol & WAKE_MAGIC) {
3497 status = ql_mb_wol_set_magic(qdev, 1);
3498 if (status) {
3499 QPRINTK(qdev, IFDOWN, ERR,
3500 "Failed to set magic packet on %s.\n",
3501 qdev->ndev->name);
3502 return status;
3503 } else
3504 QPRINTK(qdev, DRV, INFO,
3505 "Enabled magic packet successfully on %s.\n",
3506 qdev->ndev->name);
3507
3508 wol |= MB_WOL_MAGIC_PKT;
3509 }
3510
3511 if (qdev->wol) {
3512 /* Reroute all packets to Management Interface */
3513 ql_write32(qdev, MGMT_RCV_CFG, (MGMT_RCV_CFG_RM |
3514 (MGMT_RCV_CFG_RM << 16)));
3515 wol |= MB_WOL_MODE_ON;
3516 status = ql_mb_wol_mode(qdev, wol);
3517 QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
3518 (status == 0) ? "Sucessfully set" : "Failed", wol,
3519 qdev->ndev->name);
3520 }
3521
3522 return status;
3523}
3524
c4e84bde
RM
3525static int ql_adapter_down(struct ql_adapter *qdev)
3526{
c4e84bde 3527 int i, status = 0;
c4e84bde 3528
6a473308 3529 ql_link_off(qdev);
c4e84bde 3530
6497b607
RM
3531 /* Don't kill the reset worker thread if we
3532 * are in the process of recovery.
3533 */
3534 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3535 cancel_delayed_work_sync(&qdev->asic_reset_work);
c4e84bde
RM
3536 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3537 cancel_delayed_work_sync(&qdev->mpi_work);
2ee1e272 3538 cancel_delayed_work_sync(&qdev->mpi_idc_work);
bcc2cb3b 3539 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
c4e84bde 3540
39aa8165
RM
3541 for (i = 0; i < qdev->rss_ring_count; i++)
3542 napi_disable(&qdev->rx_ring[i].napi);
c4e84bde
RM
3543
3544 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3545
3546 ql_disable_interrupts(qdev);
3547
3548 ql_tx_ring_clean(qdev);
3549
6b318cb3
RM
3550 /* Call netif_napi_del() from common point.
3551 */
b2014ff8 3552 for (i = 0; i < qdev->rss_ring_count; i++)
6b318cb3
RM
3553 netif_napi_del(&qdev->rx_ring[i].napi);
3554
4545a3f2 3555 ql_free_rx_buffers(qdev);
2d6a5e95 3556
c4e84bde
RM
3557 status = ql_adapter_reset(qdev);
3558 if (status)
3559 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
3560 qdev->func);
c4e84bde
RM
3561 return status;
3562}
3563
3564static int ql_adapter_up(struct ql_adapter *qdev)
3565{
3566 int err = 0;
3567
c4e84bde
RM
3568 err = ql_adapter_initialize(qdev);
3569 if (err) {
3570 QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
c4e84bde
RM
3571 goto err_init;
3572 }
c4e84bde 3573 set_bit(QL_ADAPTER_UP, &qdev->flags);
4545a3f2 3574 ql_alloc_rx_buffers(qdev);
8b007de1
RM
3575 /* If the port is initialized and the
3576 * link is up the turn on the carrier.
3577 */
3578 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3579 (ql_read32(qdev, STS) & qdev->port_link_up))
6a473308 3580 ql_link_on(qdev);
c4e84bde
RM
3581 ql_enable_interrupts(qdev);
3582 ql_enable_all_completion_interrupts(qdev);
1e213303 3583 netif_tx_start_all_queues(qdev->ndev);
c4e84bde
RM
3584
3585 return 0;
3586err_init:
3587 ql_adapter_reset(qdev);
3588 return err;
3589}
3590
c4e84bde
RM
3591static void ql_release_adapter_resources(struct ql_adapter *qdev)
3592{
3593 ql_free_mem_resources(qdev);
3594 ql_free_irq(qdev);
3595}
3596
3597static int ql_get_adapter_resources(struct ql_adapter *qdev)
3598{
3599 int status = 0;
3600
3601 if (ql_alloc_mem_resources(qdev)) {
3602 QPRINTK(qdev, IFUP, ERR, "Unable to allocate memory.\n");
3603 return -ENOMEM;
3604 }
3605 status = ql_request_irq(qdev);
c4e84bde
RM
3606 return status;
3607}
3608
3609static int qlge_close(struct net_device *ndev)
3610{
3611 struct ql_adapter *qdev = netdev_priv(ndev);
3612
3613 /*
3614 * Wait for device to recover from a reset.
3615 * (Rarely happens, but possible.)
3616 */
3617 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3618 msleep(1);
3619 ql_adapter_down(qdev);
3620 ql_release_adapter_resources(qdev);
c4e84bde
RM
3621 return 0;
3622}
3623
3624static int ql_configure_rings(struct ql_adapter *qdev)
3625{
3626 int i;
3627 struct rx_ring *rx_ring;
3628 struct tx_ring *tx_ring;
a4ab6137 3629 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
7c734359
RM
3630 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
3631 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
3632
3633 qdev->lbq_buf_order = get_order(lbq_buf_len);
a4ab6137
RM
3634
3635 /* In a perfect world we have one RSS ring for each CPU
3636 * and each has it's own vector. To do that we ask for
3637 * cpu_cnt vectors. ql_enable_msix() will adjust the
3638 * vector count to what we actually get. We then
3639 * allocate an RSS ring for each.
3640 * Essentially, we are doing min(cpu_count, msix_vector_count).
c4e84bde 3641 */
a4ab6137
RM
3642 qdev->intr_count = cpu_cnt;
3643 ql_enable_msix(qdev);
3644 /* Adjust the RSS ring count to the actual vector count. */
3645 qdev->rss_ring_count = qdev->intr_count;
c4e84bde 3646 qdev->tx_ring_count = cpu_cnt;
b2014ff8 3647 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
c4e84bde 3648
c4e84bde
RM
3649 for (i = 0; i < qdev->tx_ring_count; i++) {
3650 tx_ring = &qdev->tx_ring[i];
e332471c 3651 memset((void *)tx_ring, 0, sizeof(*tx_ring));
c4e84bde
RM
3652 tx_ring->qdev = qdev;
3653 tx_ring->wq_id = i;
3654 tx_ring->wq_len = qdev->tx_ring_size;
3655 tx_ring->wq_size =
3656 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3657
3658 /*
3659 * The completion queue ID for the tx rings start
39aa8165 3660 * immediately after the rss rings.
c4e84bde 3661 */
39aa8165 3662 tx_ring->cq_id = qdev->rss_ring_count + i;
c4e84bde
RM
3663 }
3664
3665 for (i = 0; i < qdev->rx_ring_count; i++) {
3666 rx_ring = &qdev->rx_ring[i];
e332471c 3667 memset((void *)rx_ring, 0, sizeof(*rx_ring));
c4e84bde
RM
3668 rx_ring->qdev = qdev;
3669 rx_ring->cq_id = i;
3670 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
b2014ff8 3671 if (i < qdev->rss_ring_count) {
39aa8165
RM
3672 /*
3673 * Inbound (RSS) queues.
3674 */
c4e84bde
RM
3675 rx_ring->cq_len = qdev->rx_ring_size;
3676 rx_ring->cq_size =
3677 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3678 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3679 rx_ring->lbq_size =
2c9a0d41 3680 rx_ring->lbq_len * sizeof(__le64);
7c734359
RM
3681 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
3682 QPRINTK(qdev, IFUP, DEBUG,
3683 "lbq_buf_size %d, order = %d\n",
3684 rx_ring->lbq_buf_size, qdev->lbq_buf_order);
c4e84bde
RM
3685 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3686 rx_ring->sbq_size =
2c9a0d41 3687 rx_ring->sbq_len * sizeof(__le64);
52e55f3c 3688 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
b2014ff8
RM
3689 rx_ring->type = RX_Q;
3690 } else {
c4e84bde
RM
3691 /*
3692 * Outbound queue handles outbound completions only.
3693 */
3694 /* outbound cq is same size as tx_ring it services. */
3695 rx_ring->cq_len = qdev->tx_ring_size;
3696 rx_ring->cq_size =
3697 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3698 rx_ring->lbq_len = 0;
3699 rx_ring->lbq_size = 0;
3700 rx_ring->lbq_buf_size = 0;
3701 rx_ring->sbq_len = 0;
3702 rx_ring->sbq_size = 0;
3703 rx_ring->sbq_buf_size = 0;
3704 rx_ring->type = TX_Q;
c4e84bde
RM
3705 }
3706 }
3707 return 0;
3708}
3709
3710static int qlge_open(struct net_device *ndev)
3711{
3712 int err = 0;
3713 struct ql_adapter *qdev = netdev_priv(ndev);
3714
3715 err = ql_configure_rings(qdev);
3716 if (err)
3717 return err;
3718
3719 err = ql_get_adapter_resources(qdev);
3720 if (err)
3721 goto error_up;
3722
3723 err = ql_adapter_up(qdev);
3724 if (err)
3725 goto error_up;
3726
3727 return err;
3728
3729error_up:
3730 ql_release_adapter_resources(qdev);
c4e84bde
RM
3731 return err;
3732}
3733
7c734359
RM
3734static int ql_change_rx_buffers(struct ql_adapter *qdev)
3735{
3736 struct rx_ring *rx_ring;
3737 int i, status;
3738 u32 lbq_buf_len;
3739
3740 /* Wait for an oustanding reset to complete. */
3741 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
3742 int i = 3;
3743 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
3744 QPRINTK(qdev, IFUP, ERR,
3745 "Waiting for adapter UP...\n");
3746 ssleep(1);
3747 }
3748
3749 if (!i) {
3750 QPRINTK(qdev, IFUP, ERR,
3751 "Timed out waiting for adapter UP\n");
3752 return -ETIMEDOUT;
3753 }
3754 }
3755
3756 status = ql_adapter_down(qdev);
3757 if (status)
3758 goto error;
3759
3760 /* Get the new rx buffer size. */
3761 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
3762 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
3763 qdev->lbq_buf_order = get_order(lbq_buf_len);
3764
3765 for (i = 0; i < qdev->rss_ring_count; i++) {
3766 rx_ring = &qdev->rx_ring[i];
3767 /* Set the new size. */
3768 rx_ring->lbq_buf_size = lbq_buf_len;
3769 }
3770
3771 status = ql_adapter_up(qdev);
3772 if (status)
3773 goto error;
3774
3775 return status;
3776error:
3777 QPRINTK(qdev, IFUP, ALERT,
3778 "Driver up/down cycle failed, closing device.\n");
3779 set_bit(QL_ADAPTER_UP, &qdev->flags);
3780 dev_close(qdev->ndev);
3781 return status;
3782}
3783
c4e84bde
RM
3784static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
3785{
3786 struct ql_adapter *qdev = netdev_priv(ndev);
7c734359 3787 int status;
c4e84bde
RM
3788
3789 if (ndev->mtu == 1500 && new_mtu == 9000) {
3790 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
3791 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
3792 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
3793 } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
3794 (ndev->mtu == 9000 && new_mtu == 9000)) {
3795 return 0;
3796 } else
3797 return -EINVAL;
7c734359
RM
3798
3799 queue_delayed_work(qdev->workqueue,
3800 &qdev->mpi_port_cfg_work, 3*HZ);
3801
3802 if (!netif_running(qdev->ndev)) {
3803 ndev->mtu = new_mtu;
3804 return 0;
3805 }
3806
c4e84bde 3807 ndev->mtu = new_mtu;
7c734359
RM
3808 status = ql_change_rx_buffers(qdev);
3809 if (status) {
3810 QPRINTK(qdev, IFUP, ERR,
3811 "Changing MTU failed.\n");
3812 }
3813
3814 return status;
c4e84bde
RM
3815}
3816
3817static struct net_device_stats *qlge_get_stats(struct net_device
3818 *ndev)
3819{
bcc90f55 3820 return &ndev->stats;
c4e84bde
RM
3821}
3822
3823static void qlge_set_multicast_list(struct net_device *ndev)
3824{
3825 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3826 struct dev_mc_list *mc_ptr;
cc288f54 3827 int i, status;
c4e84bde 3828
cc288f54
RM
3829 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3830 if (status)
3831 return;
c4e84bde
RM
3832 /*
3833 * Set or clear promiscuous mode if a
3834 * transition is taking place.
3835 */
3836 if (ndev->flags & IFF_PROMISC) {
3837 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3838 if (ql_set_routing_reg
3839 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
3840 QPRINTK(qdev, HW, ERR,
3841 "Failed to set promiscous mode.\n");
3842 } else {
3843 set_bit(QL_PROMISCUOUS, &qdev->flags);
3844 }
3845 }
3846 } else {
3847 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3848 if (ql_set_routing_reg
3849 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
3850 QPRINTK(qdev, HW, ERR,
3851 "Failed to clear promiscous mode.\n");
3852 } else {
3853 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3854 }
3855 }
3856 }
3857
3858 /*
3859 * Set or clear all multicast mode if a
3860 * transition is taking place.
3861 */
3862 if ((ndev->flags & IFF_ALLMULTI) ||
3863 (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
3864 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
3865 if (ql_set_routing_reg
3866 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
3867 QPRINTK(qdev, HW, ERR,
3868 "Failed to set all-multi mode.\n");
3869 } else {
3870 set_bit(QL_ALLMULTI, &qdev->flags);
3871 }
3872 }
3873 } else {
3874 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
3875 if (ql_set_routing_reg
3876 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
3877 QPRINTK(qdev, HW, ERR,
3878 "Failed to clear all-multi mode.\n");
3879 } else {
3880 clear_bit(QL_ALLMULTI, &qdev->flags);
3881 }
3882 }
3883 }
3884
3885 if (ndev->mc_count) {
cc288f54
RM
3886 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3887 if (status)
3888 goto exit;
c4e84bde
RM
3889 for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
3890 i++, mc_ptr = mc_ptr->next)
3891 if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
3892 MAC_ADDR_TYPE_MULTI_MAC, i)) {
3893 QPRINTK(qdev, HW, ERR,
3894 "Failed to loadmulticast address.\n");
cc288f54 3895 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
c4e84bde
RM
3896 goto exit;
3897 }
cc288f54 3898 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
c4e84bde
RM
3899 if (ql_set_routing_reg
3900 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
3901 QPRINTK(qdev, HW, ERR,
3902 "Failed to set multicast match mode.\n");
3903 } else {
3904 set_bit(QL_ALLMULTI, &qdev->flags);
3905 }
3906 }
3907exit:
8587ea35 3908 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
c4e84bde
RM
3909}
3910
3911static int qlge_set_mac_address(struct net_device *ndev, void *p)
3912{
3913 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3914 struct sockaddr *addr = p;
cc288f54 3915 int status;
c4e84bde
RM
3916
3917 if (netif_running(ndev))
3918 return -EBUSY;
3919
3920 if (!is_valid_ether_addr(addr->sa_data))
3921 return -EADDRNOTAVAIL;
3922 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3923
cc288f54
RM
3924 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3925 if (status)
3926 return status;
cc288f54
RM
3927 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
3928 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
cc288f54
RM
3929 if (status)
3930 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
3931 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
3932 return status;
c4e84bde
RM
3933}
3934
3935static void qlge_tx_timeout(struct net_device *ndev)
3936{
3937 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
6497b607 3938 ql_queue_asic_error(qdev);
c4e84bde
RM
3939}
3940
3941static void ql_asic_reset_work(struct work_struct *work)
3942{
3943 struct ql_adapter *qdev =
3944 container_of(work, struct ql_adapter, asic_reset_work.work);
db98812f 3945 int status;
f2c0d8df 3946 rtnl_lock();
db98812f
RM
3947 status = ql_adapter_down(qdev);
3948 if (status)
3949 goto error;
3950
3951 status = ql_adapter_up(qdev);
3952 if (status)
3953 goto error;
2cd6dbaa
RM
3954
3955 /* Restore rx mode. */
3956 clear_bit(QL_ALLMULTI, &qdev->flags);
3957 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3958 qlge_set_multicast_list(qdev->ndev);
3959
f2c0d8df 3960 rtnl_unlock();
db98812f
RM
3961 return;
3962error:
3963 QPRINTK(qdev, IFUP, ALERT,
3964 "Driver up/down cycle failed, closing device\n");
f2c0d8df 3965
db98812f
RM
3966 set_bit(QL_ADAPTER_UP, &qdev->flags);
3967 dev_close(qdev->ndev);
3968 rtnl_unlock();
c4e84bde
RM
3969}
3970
b0c2aadf
RM
3971static struct nic_operations qla8012_nic_ops = {
3972 .get_flash = ql_get_8012_flash_params,
3973 .port_initialize = ql_8012_port_initialize,
3974};
3975
cdca8d02
RM
3976static struct nic_operations qla8000_nic_ops = {
3977 .get_flash = ql_get_8000_flash_params,
3978 .port_initialize = ql_8000_port_initialize,
3979};
3980
e4552f51
RM
3981/* Find the pcie function number for the other NIC
3982 * on this chip. Since both NIC functions share a
3983 * common firmware we have the lowest enabled function
3984 * do any common work. Examples would be resetting
3985 * after a fatal firmware error, or doing a firmware
3986 * coredump.
3987 */
3988static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
3989{
3990 int status = 0;
3991 u32 temp;
3992 u32 nic_func1, nic_func2;
3993
3994 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
3995 &temp);
3996 if (status)
3997 return status;
3998
3999 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4000 MPI_TEST_NIC_FUNC_MASK);
4001 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4002 MPI_TEST_NIC_FUNC_MASK);
4003
4004 if (qdev->func == nic_func1)
4005 qdev->alt_func = nic_func2;
4006 else if (qdev->func == nic_func2)
4007 qdev->alt_func = nic_func1;
4008 else
4009 status = -EIO;
4010
4011 return status;
4012}
b0c2aadf 4013
e4552f51 4014static int ql_get_board_info(struct ql_adapter *qdev)
c4e84bde 4015{
e4552f51 4016 int status;
c4e84bde
RM
4017 qdev->func =
4018 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
e4552f51
RM
4019 if (qdev->func > 3)
4020 return -EIO;
4021
4022 status = ql_get_alt_pcie_func(qdev);
4023 if (status)
4024 return status;
4025
4026 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4027 if (qdev->port) {
c4e84bde
RM
4028 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4029 qdev->port_link_up = STS_PL1;
4030 qdev->port_init = STS_PI1;
4031 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4032 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4033 } else {
4034 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4035 qdev->port_link_up = STS_PL0;
4036 qdev->port_init = STS_PI0;
4037 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4038 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4039 }
4040 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
b0c2aadf
RM
4041 qdev->device_id = qdev->pdev->device;
4042 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4043 qdev->nic_ops = &qla8012_nic_ops;
cdca8d02
RM
4044 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4045 qdev->nic_ops = &qla8000_nic_ops;
e4552f51 4046 return status;
c4e84bde
RM
4047}
4048
4049static void ql_release_all(struct pci_dev *pdev)
4050{
4051 struct net_device *ndev = pci_get_drvdata(pdev);
4052 struct ql_adapter *qdev = netdev_priv(ndev);
4053
4054 if (qdev->workqueue) {
4055 destroy_workqueue(qdev->workqueue);
4056 qdev->workqueue = NULL;
4057 }
39aa8165 4058
c4e84bde 4059 if (qdev->reg_base)
8668ae92 4060 iounmap(qdev->reg_base);
c4e84bde
RM
4061 if (qdev->doorbell_area)
4062 iounmap(qdev->doorbell_area);
4063 pci_release_regions(pdev);
4064 pci_set_drvdata(pdev, NULL);
4065}
4066
4067static int __devinit ql_init_device(struct pci_dev *pdev,
4068 struct net_device *ndev, int cards_found)
4069{
4070 struct ql_adapter *qdev = netdev_priv(ndev);
1d1023d0 4071 int err = 0;
c4e84bde 4072
e332471c 4073 memset((void *)qdev, 0, sizeof(*qdev));
c4e84bde
RM
4074 err = pci_enable_device(pdev);
4075 if (err) {
4076 dev_err(&pdev->dev, "PCI device enable failed.\n");
4077 return err;
4078 }
4079
ebd6e774
RM
4080 qdev->ndev = ndev;
4081 qdev->pdev = pdev;
4082 pci_set_drvdata(pdev, ndev);
c4e84bde 4083
bc9167f3
RM
4084 /* Set PCIe read request size */
4085 err = pcie_set_readrq(pdev, 4096);
4086 if (err) {
4087 dev_err(&pdev->dev, "Set readrq failed.\n");
4088 goto err_out;
4089 }
4090
c4e84bde
RM
4091 err = pci_request_regions(pdev, DRV_NAME);
4092 if (err) {
4093 dev_err(&pdev->dev, "PCI region request failed.\n");
ebd6e774 4094 return err;
c4e84bde
RM
4095 }
4096
4097 pci_set_master(pdev);
6a35528a 4098 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
c4e84bde 4099 set_bit(QL_DMA64, &qdev->flags);
6a35528a 4100 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
c4e84bde 4101 } else {
284901a9 4102 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
c4e84bde 4103 if (!err)
284901a9 4104 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
c4e84bde
RM
4105 }
4106
4107 if (err) {
4108 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4109 goto err_out;
4110 }
4111
6d190c6e 4112 pci_save_state(pdev);
c4e84bde
RM
4113 qdev->reg_base =
4114 ioremap_nocache(pci_resource_start(pdev, 1),
4115 pci_resource_len(pdev, 1));
4116 if (!qdev->reg_base) {
4117 dev_err(&pdev->dev, "Register mapping failed.\n");
4118 err = -ENOMEM;
4119 goto err_out;
4120 }
4121
4122 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4123 qdev->doorbell_area =
4124 ioremap_nocache(pci_resource_start(pdev, 3),
4125 pci_resource_len(pdev, 3));
4126 if (!qdev->doorbell_area) {
4127 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4128 err = -ENOMEM;
4129 goto err_out;
4130 }
4131
e4552f51
RM
4132 err = ql_get_board_info(qdev);
4133 if (err) {
4134 dev_err(&pdev->dev, "Register access failed.\n");
4135 err = -EIO;
4136 goto err_out;
4137 }
c4e84bde
RM
4138 qdev->msg_enable = netif_msg_init(debug, default_msg);
4139 spin_lock_init(&qdev->hw_lock);
4140 spin_lock_init(&qdev->stats_lock);
4141
4142 /* make sure the EEPROM is good */
b0c2aadf 4143 err = qdev->nic_ops->get_flash(qdev);
c4e84bde
RM
4144 if (err) {
4145 dev_err(&pdev->dev, "Invalid FLASH.\n");
4146 goto err_out;
4147 }
4148
c4e84bde
RM
4149 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4150
4151 /* Set up the default ring sizes. */
4152 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4153 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4154
4155 /* Set up the coalescing parameters. */
4156 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4157 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4158 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4159 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4160
4161 /*
4162 * Set up the operating parameters.
4163 */
4164 qdev->rx_csum = 1;
c4e84bde
RM
4165 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4166 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4167 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4168 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
bcc2cb3b 4169 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
2ee1e272 4170 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
bcc2cb3b 4171 init_completion(&qdev->ide_completion);
c4e84bde
RM
4172
4173 if (!cards_found) {
4174 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4175 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4176 DRV_NAME, DRV_VERSION);
4177 }
4178 return 0;
4179err_out:
4180 ql_release_all(pdev);
4181 pci_disable_device(pdev);
4182 return err;
4183}
4184
25ed7849
SH
4185static const struct net_device_ops qlge_netdev_ops = {
4186 .ndo_open = qlge_open,
4187 .ndo_stop = qlge_close,
4188 .ndo_start_xmit = qlge_send,
4189 .ndo_change_mtu = qlge_change_mtu,
4190 .ndo_get_stats = qlge_get_stats,
4191 .ndo_set_multicast_list = qlge_set_multicast_list,
4192 .ndo_set_mac_address = qlge_set_mac_address,
4193 .ndo_validate_addr = eth_validate_addr,
4194 .ndo_tx_timeout = qlge_tx_timeout,
01e6b953
RM
4195 .ndo_vlan_rx_register = qlge_vlan_rx_register,
4196 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4197 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
25ed7849
SH
4198};
4199
c4e84bde
RM
4200static int __devinit qlge_probe(struct pci_dev *pdev,
4201 const struct pci_device_id *pci_entry)
4202{
4203 struct net_device *ndev = NULL;
4204 struct ql_adapter *qdev = NULL;
4205 static int cards_found = 0;
4206 int err = 0;
4207
1e213303
RM
4208 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4209 min(MAX_CPUS, (int)num_online_cpus()));
c4e84bde
RM
4210 if (!ndev)
4211 return -ENOMEM;
4212
4213 err = ql_init_device(pdev, ndev, cards_found);
4214 if (err < 0) {
4215 free_netdev(ndev);
4216 return err;
4217 }
4218
4219 qdev = netdev_priv(ndev);
4220 SET_NETDEV_DEV(ndev, &pdev->dev);
4221 ndev->features = (0
4222 | NETIF_F_IP_CSUM
4223 | NETIF_F_SG
4224 | NETIF_F_TSO
4225 | NETIF_F_TSO6
4226 | NETIF_F_TSO_ECN
4227 | NETIF_F_HW_VLAN_TX
4228 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
22bdd4f5 4229 ndev->features |= NETIF_F_GRO;
c4e84bde
RM
4230
4231 if (test_bit(QL_DMA64, &qdev->flags))
4232 ndev->features |= NETIF_F_HIGHDMA;
4233
4234 /*
4235 * Set up net_device structure.
4236 */
4237 ndev->tx_queue_len = qdev->tx_ring_size;
4238 ndev->irq = pdev->irq;
25ed7849
SH
4239
4240 ndev->netdev_ops = &qlge_netdev_ops;
c4e84bde 4241 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
c4e84bde 4242 ndev->watchdog_timeo = 10 * HZ;
25ed7849 4243
c4e84bde
RM
4244 err = register_netdev(ndev);
4245 if (err) {
4246 dev_err(&pdev->dev, "net device registration failed.\n");
4247 ql_release_all(pdev);
4248 pci_disable_device(pdev);
4249 return err;
4250 }
6a473308 4251 ql_link_off(qdev);
c4e84bde 4252 ql_display_dev_info(ndev);
9dfbbaa6 4253 atomic_set(&qdev->lb_count, 0);
c4e84bde
RM
4254 cards_found++;
4255 return 0;
4256}
4257
9dfbbaa6
RM
4258netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4259{
4260 return qlge_send(skb, ndev);
4261}
4262
4263int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4264{
4265 return ql_clean_inbound_rx_ring(rx_ring, budget);
4266}
4267
c4e84bde
RM
4268static void __devexit qlge_remove(struct pci_dev *pdev)
4269{
4270 struct net_device *ndev = pci_get_drvdata(pdev);
4271 unregister_netdev(ndev);
4272 ql_release_all(pdev);
4273 pci_disable_device(pdev);
4274 free_netdev(ndev);
4275}
4276
6d190c6e
RM
4277/* Clean up resources without touching hardware. */
4278static void ql_eeh_close(struct net_device *ndev)
4279{
4280 int i;
4281 struct ql_adapter *qdev = netdev_priv(ndev);
4282
4283 if (netif_carrier_ok(ndev)) {
4284 netif_carrier_off(ndev);
4285 netif_stop_queue(ndev);
4286 }
4287
4288 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
4289 cancel_delayed_work_sync(&qdev->asic_reset_work);
4290 cancel_delayed_work_sync(&qdev->mpi_reset_work);
4291 cancel_delayed_work_sync(&qdev->mpi_work);
4292 cancel_delayed_work_sync(&qdev->mpi_idc_work);
4293 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
4294
4295 for (i = 0; i < qdev->rss_ring_count; i++)
4296 netif_napi_del(&qdev->rx_ring[i].napi);
4297
4298 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4299 ql_tx_ring_clean(qdev);
4300 ql_free_rx_buffers(qdev);
4301 ql_release_adapter_resources(qdev);
4302}
4303
c4e84bde
RM
4304/*
4305 * This callback is called by the PCI subsystem whenever
4306 * a PCI bus error is detected.
4307 */
4308static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4309 enum pci_channel_state state)
4310{
4311 struct net_device *ndev = pci_get_drvdata(pdev);
fbc663ce 4312
6d190c6e
RM
4313 switch (state) {
4314 case pci_channel_io_normal:
4315 return PCI_ERS_RESULT_CAN_RECOVER;
4316 case pci_channel_io_frozen:
4317 netif_device_detach(ndev);
4318 if (netif_running(ndev))
4319 ql_eeh_close(ndev);
4320 pci_disable_device(pdev);
4321 return PCI_ERS_RESULT_NEED_RESET;
4322 case pci_channel_io_perm_failure:
4323 dev_err(&pdev->dev,
4324 "%s: pci_channel_io_perm_failure.\n", __func__);
fbc663ce 4325 return PCI_ERS_RESULT_DISCONNECT;
6d190c6e 4326 }
c4e84bde
RM
4327
4328 /* Request a slot reset. */
4329 return PCI_ERS_RESULT_NEED_RESET;
4330}
4331
4332/*
4333 * This callback is called after the PCI buss has been reset.
4334 * Basically, this tries to restart the card from scratch.
4335 * This is a shortened version of the device probe/discovery code,
4336 * it resembles the first-half of the () routine.
4337 */
4338static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4339{
4340 struct net_device *ndev = pci_get_drvdata(pdev);
4341 struct ql_adapter *qdev = netdev_priv(ndev);
4342
6d190c6e
RM
4343 pdev->error_state = pci_channel_io_normal;
4344
4345 pci_restore_state(pdev);
c4e84bde
RM
4346 if (pci_enable_device(pdev)) {
4347 QPRINTK(qdev, IFUP, ERR,
4348 "Cannot re-enable PCI device after reset.\n");
4349 return PCI_ERS_RESULT_DISCONNECT;
4350 }
c4e84bde 4351 pci_set_master(pdev);
c4e84bde
RM
4352 return PCI_ERS_RESULT_RECOVERED;
4353}
4354
4355static void qlge_io_resume(struct pci_dev *pdev)
4356{
4357 struct net_device *ndev = pci_get_drvdata(pdev);
4358 struct ql_adapter *qdev = netdev_priv(ndev);
6d190c6e 4359 int err = 0;
c4e84bde 4360
6d190c6e
RM
4361 if (ql_adapter_reset(qdev))
4362 QPRINTK(qdev, DRV, ERR, "reset FAILED!\n");
c4e84bde 4363 if (netif_running(ndev)) {
6d190c6e
RM
4364 err = qlge_open(ndev);
4365 if (err) {
c4e84bde
RM
4366 QPRINTK(qdev, IFUP, ERR,
4367 "Device initialization failed after reset.\n");
4368 return;
4369 }
6d190c6e
RM
4370 } else {
4371 QPRINTK(qdev, IFUP, ERR,
4372 "Device was not running prior to EEH.\n");
c4e84bde 4373 }
c4e84bde
RM
4374 netif_device_attach(ndev);
4375}
4376
4377static struct pci_error_handlers qlge_err_handler = {
4378 .error_detected = qlge_io_error_detected,
4379 .slot_reset = qlge_io_slot_reset,
4380 .resume = qlge_io_resume,
4381};
4382
4383static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4384{
4385 struct net_device *ndev = pci_get_drvdata(pdev);
4386 struct ql_adapter *qdev = netdev_priv(ndev);
6b318cb3 4387 int err;
c4e84bde
RM
4388
4389 netif_device_detach(ndev);
4390
4391 if (netif_running(ndev)) {
4392 err = ql_adapter_down(qdev);
4393 if (!err)
4394 return err;
4395 }
4396
bc083ce9 4397 ql_wol(qdev);
c4e84bde
RM
4398 err = pci_save_state(pdev);
4399 if (err)
4400 return err;
4401
4402 pci_disable_device(pdev);
4403
4404 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4405
4406 return 0;
4407}
4408
04da2cf9 4409#ifdef CONFIG_PM
c4e84bde
RM
4410static int qlge_resume(struct pci_dev *pdev)
4411{
4412 struct net_device *ndev = pci_get_drvdata(pdev);
4413 struct ql_adapter *qdev = netdev_priv(ndev);
4414 int err;
4415
4416 pci_set_power_state(pdev, PCI_D0);
4417 pci_restore_state(pdev);
4418 err = pci_enable_device(pdev);
4419 if (err) {
4420 QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
4421 return err;
4422 }
4423 pci_set_master(pdev);
4424
4425 pci_enable_wake(pdev, PCI_D3hot, 0);
4426 pci_enable_wake(pdev, PCI_D3cold, 0);
4427
4428 if (netif_running(ndev)) {
4429 err = ql_adapter_up(qdev);
4430 if (err)
4431 return err;
4432 }
4433
4434 netif_device_attach(ndev);
4435
4436 return 0;
4437}
04da2cf9 4438#endif /* CONFIG_PM */
c4e84bde
RM
4439
4440static void qlge_shutdown(struct pci_dev *pdev)
4441{
4442 qlge_suspend(pdev, PMSG_SUSPEND);
4443}
4444
4445static struct pci_driver qlge_driver = {
4446 .name = DRV_NAME,
4447 .id_table = qlge_pci_tbl,
4448 .probe = qlge_probe,
4449 .remove = __devexit_p(qlge_remove),
4450#ifdef CONFIG_PM
4451 .suspend = qlge_suspend,
4452 .resume = qlge_resume,
4453#endif
4454 .shutdown = qlge_shutdown,
4455 .err_handler = &qlge_err_handler
4456};
4457
4458static int __init qlge_init_module(void)
4459{
4460 return pci_register_driver(&qlge_driver);
4461}
4462
4463static void __exit qlge_exit(void)
4464{
4465 pci_unregister_driver(&qlge_driver);
4466}
4467
4468module_init(qlge_init_module);
4469module_exit(qlge_exit);
This page took 0.560948 seconds and 5 git commands to generate.