qlge: Fix RSS hashing values.
[deliverable/linux.git] / drivers / net / qlge / qlge_main.c
CommitLineData
c4e84bde
RM
1/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
36#include <linux/skbuff.h>
c4e84bde 37#include <linux/if_vlan.h>
c4e84bde
RM
38#include <linux/delay.h>
39#include <linux/mm.h>
40#include <linux/vmalloc.h>
b7c6bfb7 41#include <net/ip6_checksum.h>
c4e84bde
RM
42
43#include "qlge.h"
44
45char qlge_driver_name[] = DRV_NAME;
46const char qlge_driver_version[] = DRV_VERSION;
47
48MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49MODULE_DESCRIPTION(DRV_STRING " ");
50MODULE_LICENSE("GPL");
51MODULE_VERSION(DRV_VERSION);
52
53static const u32 default_msg =
54 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
55/* NETIF_MSG_TIMER | */
56 NETIF_MSG_IFDOWN |
57 NETIF_MSG_IFUP |
58 NETIF_MSG_RX_ERR |
59 NETIF_MSG_TX_ERR |
4974097a
RM
60/* NETIF_MSG_TX_QUEUED | */
61/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
c4e84bde
RM
62/* NETIF_MSG_PKTDATA | */
63 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
64
65static int debug = 0x00007fff; /* defaults above */
66module_param(debug, int, 0);
67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68
69#define MSIX_IRQ 0
70#define MSI_IRQ 1
71#define LEG_IRQ 2
72static int irq_type = MSIX_IRQ;
73module_param(irq_type, int, MSIX_IRQ);
74MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
75
76static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
b0c2aadf 77 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
cdca8d02 78 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
c4e84bde
RM
79 /* required last entry */
80 {0,}
81};
82
83MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
84
85/* This hardware semaphore causes exclusive access to
86 * resources shared between the NIC driver, MPI firmware,
87 * FCOE firmware and the FC driver.
88 */
89static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
90{
91 u32 sem_bits = 0;
92
93 switch (sem_mask) {
94 case SEM_XGMAC0_MASK:
95 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
96 break;
97 case SEM_XGMAC1_MASK:
98 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
99 break;
100 case SEM_ICB_MASK:
101 sem_bits = SEM_SET << SEM_ICB_SHIFT;
102 break;
103 case SEM_MAC_ADDR_MASK:
104 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
105 break;
106 case SEM_FLASH_MASK:
107 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
108 break;
109 case SEM_PROBE_MASK:
110 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
111 break;
112 case SEM_RT_IDX_MASK:
113 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
114 break;
115 case SEM_PROC_REG_MASK:
116 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
117 break;
118 default:
119 QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
120 return -EINVAL;
121 }
122
123 ql_write32(qdev, SEM, sem_bits | sem_mask);
124 return !(ql_read32(qdev, SEM) & sem_bits);
125}
126
127int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
128{
0857e9d7 129 unsigned int wait_count = 30;
c4e84bde
RM
130 do {
131 if (!ql_sem_trylock(qdev, sem_mask))
132 return 0;
0857e9d7
RM
133 udelay(100);
134 } while (--wait_count);
c4e84bde
RM
135 return -ETIMEDOUT;
136}
137
138void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
139{
140 ql_write32(qdev, SEM, sem_mask);
141 ql_read32(qdev, SEM); /* flush */
142}
143
144/* This function waits for a specific bit to come ready
145 * in a given register. It is used mostly by the initialize
146 * process, but is also used in kernel thread API such as
147 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
148 */
149int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
150{
151 u32 temp;
152 int count = UDELAY_COUNT;
153
154 while (count) {
155 temp = ql_read32(qdev, reg);
156
157 /* check for errors */
158 if (temp & err_bit) {
159 QPRINTK(qdev, PROBE, ALERT,
160 "register 0x%.08x access error, value = 0x%.08x!.\n",
161 reg, temp);
162 return -EIO;
163 } else if (temp & bit)
164 return 0;
165 udelay(UDELAY_DELAY);
166 count--;
167 }
168 QPRINTK(qdev, PROBE, ALERT,
169 "Timed out waiting for reg %x to come ready.\n", reg);
170 return -ETIMEDOUT;
171}
172
173/* The CFG register is used to download TX and RX control blocks
174 * to the chip. This function waits for an operation to complete.
175 */
176static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
177{
178 int count = UDELAY_COUNT;
179 u32 temp;
180
181 while (count) {
182 temp = ql_read32(qdev, CFG);
183 if (temp & CFG_LE)
184 return -EIO;
185 if (!(temp & bit))
186 return 0;
187 udelay(UDELAY_DELAY);
188 count--;
189 }
190 return -ETIMEDOUT;
191}
192
193
194/* Used to issue init control blocks to hw. Maps control block,
195 * sets address, triggers download, waits for completion.
196 */
197int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
198 u16 q_id)
199{
200 u64 map;
201 int status = 0;
202 int direction;
203 u32 mask;
204 u32 value;
205
206 direction =
207 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
208 PCI_DMA_FROMDEVICE;
209
210 map = pci_map_single(qdev->pdev, ptr, size, direction);
211 if (pci_dma_mapping_error(qdev->pdev, map)) {
212 QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
213 return -ENOMEM;
214 }
215
4322c5be
RM
216 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
217 if (status)
218 return status;
219
c4e84bde
RM
220 status = ql_wait_cfg(qdev, bit);
221 if (status) {
222 QPRINTK(qdev, IFUP, ERR,
223 "Timed out waiting for CFG to come ready.\n");
224 goto exit;
225 }
226
c4e84bde
RM
227 ql_write32(qdev, ICB_L, (u32) map);
228 ql_write32(qdev, ICB_H, (u32) (map >> 32));
c4e84bde
RM
229
230 mask = CFG_Q_MASK | (bit << 16);
231 value = bit | (q_id << CFG_Q_SHIFT);
232 ql_write32(qdev, CFG, (mask | value));
233
234 /*
235 * Wait for the bit to clear after signaling hw.
236 */
237 status = ql_wait_cfg(qdev, bit);
238exit:
4322c5be 239 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
c4e84bde
RM
240 pci_unmap_single(qdev->pdev, map, size, direction);
241 return status;
242}
243
244/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
245int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
246 u32 *value)
247{
248 u32 offset = 0;
249 int status;
250
c4e84bde
RM
251 switch (type) {
252 case MAC_ADDR_TYPE_MULTI_MAC:
253 case MAC_ADDR_TYPE_CAM_MAC:
254 {
255 status =
256 ql_wait_reg_rdy(qdev,
939678f8 257 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
258 if (status)
259 goto exit;
260 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
261 (index << MAC_ADDR_IDX_SHIFT) | /* index */
262 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
263 status =
264 ql_wait_reg_rdy(qdev,
939678f8 265 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
c4e84bde
RM
266 if (status)
267 goto exit;
268 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
269 status =
270 ql_wait_reg_rdy(qdev,
939678f8 271 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
272 if (status)
273 goto exit;
274 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
275 (index << MAC_ADDR_IDX_SHIFT) | /* index */
276 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
277 status =
278 ql_wait_reg_rdy(qdev,
939678f8 279 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
c4e84bde
RM
280 if (status)
281 goto exit;
282 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
283 if (type == MAC_ADDR_TYPE_CAM_MAC) {
284 status =
285 ql_wait_reg_rdy(qdev,
939678f8 286 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
287 if (status)
288 goto exit;
289 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
290 (index << MAC_ADDR_IDX_SHIFT) | /* index */
291 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
292 status =
293 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
939678f8 294 MAC_ADDR_MR, 0);
c4e84bde
RM
295 if (status)
296 goto exit;
297 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
298 }
299 break;
300 }
301 case MAC_ADDR_TYPE_VLAN:
302 case MAC_ADDR_TYPE_MULTI_FLTR:
303 default:
304 QPRINTK(qdev, IFUP, CRIT,
305 "Address type %d not yet supported.\n", type);
306 status = -EPERM;
307 }
308exit:
c4e84bde
RM
309 return status;
310}
311
312/* Set up a MAC, multicast or VLAN address for the
313 * inbound frame matching.
314 */
315static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
316 u16 index)
317{
318 u32 offset = 0;
319 int status = 0;
320
c4e84bde
RM
321 switch (type) {
322 case MAC_ADDR_TYPE_MULTI_MAC:
323 case MAC_ADDR_TYPE_CAM_MAC:
324 {
325 u32 cam_output;
326 u32 upper = (addr[0] << 8) | addr[1];
327 u32 lower =
328 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
329 (addr[5]);
330
4974097a 331 QPRINTK(qdev, IFUP, DEBUG,
7c510e4b 332 "Adding %s address %pM"
c4e84bde
RM
333 " at index %d in the CAM.\n",
334 ((type ==
335 MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
7c510e4b 336 "UNICAST"), addr, index);
c4e84bde
RM
337
338 status =
339 ql_wait_reg_rdy(qdev,
939678f8 340 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
341 if (status)
342 goto exit;
343 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
344 (index << MAC_ADDR_IDX_SHIFT) | /* index */
345 type); /* type */
346 ql_write32(qdev, MAC_ADDR_DATA, lower);
347 status =
348 ql_wait_reg_rdy(qdev,
939678f8 349 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
350 if (status)
351 goto exit;
352 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
353 (index << MAC_ADDR_IDX_SHIFT) | /* index */
354 type); /* type */
355 ql_write32(qdev, MAC_ADDR_DATA, upper);
356 status =
357 ql_wait_reg_rdy(qdev,
939678f8 358 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
359 if (status)
360 goto exit;
361 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
362 (index << MAC_ADDR_IDX_SHIFT) | /* index */
363 type); /* type */
364 /* This field should also include the queue id
365 and possibly the function id. Right now we hardcode
366 the route field to NIC core.
367 */
368 if (type == MAC_ADDR_TYPE_CAM_MAC) {
369 cam_output = (CAM_OUT_ROUTE_NIC |
370 (qdev->
371 func << CAM_OUT_FUNC_SHIFT) |
b2014ff8 372 (0 << CAM_OUT_CQ_ID_SHIFT));
c4e84bde
RM
373 if (qdev->vlgrp)
374 cam_output |= CAM_OUT_RV;
375 /* route to NIC core */
376 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
377 }
378 break;
379 }
380 case MAC_ADDR_TYPE_VLAN:
381 {
382 u32 enable_bit = *((u32 *) &addr[0]);
383 /* For VLAN, the addr actually holds a bit that
384 * either enables or disables the vlan id we are
385 * addressing. It's either MAC_ADDR_E on or off.
386 * That's bit-27 we're talking about.
387 */
388 QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
389 (enable_bit ? "Adding" : "Removing"),
390 index, (enable_bit ? "to" : "from"));
391
392 status =
393 ql_wait_reg_rdy(qdev,
939678f8 394 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
395 if (status)
396 goto exit;
397 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
398 (index << MAC_ADDR_IDX_SHIFT) | /* index */
399 type | /* type */
400 enable_bit); /* enable/disable */
401 break;
402 }
403 case MAC_ADDR_TYPE_MULTI_FLTR:
404 default:
405 QPRINTK(qdev, IFUP, CRIT,
406 "Address type %d not yet supported.\n", type);
407 status = -EPERM;
408 }
409exit:
c4e84bde
RM
410 return status;
411}
412
7fab3bfe
RM
413/* Set or clear MAC address in hardware. We sometimes
414 * have to clear it to prevent wrong frame routing
415 * especially in a bonding environment.
416 */
417static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
418{
419 int status;
420 char zero_mac_addr[ETH_ALEN];
421 char *addr;
422
423 if (set) {
424 addr = &qdev->ndev->dev_addr[0];
425 QPRINTK(qdev, IFUP, DEBUG,
426 "Set Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n",
427 addr[0], addr[1], addr[2], addr[3],
428 addr[4], addr[5]);
429 } else {
430 memset(zero_mac_addr, 0, ETH_ALEN);
431 addr = &zero_mac_addr[0];
432 QPRINTK(qdev, IFUP, DEBUG,
433 "Clearing MAC address on %s\n",
434 qdev->ndev->name);
435 }
436 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
437 if (status)
438 return status;
439 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
440 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
441 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
442 if (status)
443 QPRINTK(qdev, IFUP, ERR, "Failed to init mac "
444 "address.\n");
445 return status;
446}
447
6a473308
RM
448void ql_link_on(struct ql_adapter *qdev)
449{
450 QPRINTK(qdev, LINK, ERR, "%s: Link is up.\n",
451 qdev->ndev->name);
452 netif_carrier_on(qdev->ndev);
453 ql_set_mac_addr(qdev, 1);
454}
455
456void ql_link_off(struct ql_adapter *qdev)
457{
458 QPRINTK(qdev, LINK, ERR, "%s: Link is down.\n",
459 qdev->ndev->name);
460 netif_carrier_off(qdev->ndev);
461 ql_set_mac_addr(qdev, 0);
462}
463
c4e84bde
RM
464/* Get a specific frame routing value from the CAM.
465 * Used for debug and reg dump.
466 */
467int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
468{
469 int status = 0;
470
939678f8 471 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
c4e84bde
RM
472 if (status)
473 goto exit;
474
475 ql_write32(qdev, RT_IDX,
476 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
939678f8 477 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
c4e84bde
RM
478 if (status)
479 goto exit;
480 *value = ql_read32(qdev, RT_DATA);
481exit:
c4e84bde
RM
482 return status;
483}
484
485/* The NIC function for this chip has 16 routing indexes. Each one can be used
486 * to route different frame types to various inbound queues. We send broadcast/
487 * multicast/error frames to the default queue for slow handling,
488 * and CAM hit/RSS frames to the fast handling queues.
489 */
490static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
491 int enable)
492{
8587ea35 493 int status = -EINVAL; /* Return error if no mask match. */
c4e84bde
RM
494 u32 value = 0;
495
c4e84bde
RM
496 QPRINTK(qdev, IFUP, DEBUG,
497 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
498 (enable ? "Adding" : "Removing"),
499 ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
500 ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
501 ((index ==
502 RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
503 ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
504 ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
505 ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
506 ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
507 ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
508 ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
509 ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
510 ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
511 ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
512 ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
513 ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
514 ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
515 ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
516 (enable ? "to" : "from"));
517
518 switch (mask) {
519 case RT_IDX_CAM_HIT:
520 {
521 value = RT_IDX_DST_CAM_Q | /* dest */
522 RT_IDX_TYPE_NICQ | /* type */
523 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
524 break;
525 }
526 case RT_IDX_VALID: /* Promiscuous Mode frames. */
527 {
528 value = RT_IDX_DST_DFLT_Q | /* dest */
529 RT_IDX_TYPE_NICQ | /* type */
530 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
531 break;
532 }
533 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
534 {
535 value = RT_IDX_DST_DFLT_Q | /* dest */
536 RT_IDX_TYPE_NICQ | /* type */
537 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
538 break;
539 }
540 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
541 {
542 value = RT_IDX_DST_DFLT_Q | /* dest */
543 RT_IDX_TYPE_NICQ | /* type */
544 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
545 break;
546 }
547 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
548 {
549 value = RT_IDX_DST_CAM_Q | /* dest */
550 RT_IDX_TYPE_NICQ | /* type */
551 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
552 break;
553 }
554 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
555 {
556 value = RT_IDX_DST_CAM_Q | /* dest */
557 RT_IDX_TYPE_NICQ | /* type */
558 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
559 break;
560 }
561 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
562 {
563 value = RT_IDX_DST_RSS | /* dest */
564 RT_IDX_TYPE_NICQ | /* type */
565 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
566 break;
567 }
568 case 0: /* Clear the E-bit on an entry. */
569 {
570 value = RT_IDX_DST_DFLT_Q | /* dest */
571 RT_IDX_TYPE_NICQ | /* type */
572 (index << RT_IDX_IDX_SHIFT);/* index */
573 break;
574 }
575 default:
576 QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
577 mask);
578 status = -EPERM;
579 goto exit;
580 }
581
582 if (value) {
583 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
584 if (status)
585 goto exit;
586 value |= (enable ? RT_IDX_E : 0);
587 ql_write32(qdev, RT_IDX, value);
588 ql_write32(qdev, RT_DATA, enable ? mask : 0);
589 }
590exit:
c4e84bde
RM
591 return status;
592}
593
594static void ql_enable_interrupts(struct ql_adapter *qdev)
595{
596 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
597}
598
599static void ql_disable_interrupts(struct ql_adapter *qdev)
600{
601 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
602}
603
604/* If we're running with multiple MSI-X vectors then we enable on the fly.
605 * Otherwise, we may have multiple outstanding workers and don't want to
606 * enable until the last one finishes. In this case, the irq_cnt gets
607 * incremented everytime we queue a worker and decremented everytime
608 * a worker finishes. Once it hits zero we enable the interrupt.
609 */
bb0d215c 610u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
c4e84bde 611{
bb0d215c
RM
612 u32 var = 0;
613 unsigned long hw_flags = 0;
614 struct intr_context *ctx = qdev->intr_context + intr;
615
616 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
617 /* Always enable if we're MSIX multi interrupts and
618 * it's not the default (zeroeth) interrupt.
619 */
c4e84bde 620 ql_write32(qdev, INTR_EN,
bb0d215c
RM
621 ctx->intr_en_mask);
622 var = ql_read32(qdev, STS);
623 return var;
c4e84bde 624 }
bb0d215c
RM
625
626 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
627 if (atomic_dec_and_test(&ctx->irq_cnt)) {
628 ql_write32(qdev, INTR_EN,
629 ctx->intr_en_mask);
630 var = ql_read32(qdev, STS);
631 }
632 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
633 return var;
c4e84bde
RM
634}
635
636static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
637{
638 u32 var = 0;
bb0d215c 639 struct intr_context *ctx;
c4e84bde 640
bb0d215c
RM
641 /* HW disables for us if we're MSIX multi interrupts and
642 * it's not the default (zeroeth) interrupt.
643 */
644 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
645 return 0;
646
647 ctx = qdev->intr_context + intr;
08b1bc8f 648 spin_lock(&qdev->hw_lock);
bb0d215c 649 if (!atomic_read(&ctx->irq_cnt)) {
c4e84bde 650 ql_write32(qdev, INTR_EN,
bb0d215c 651 ctx->intr_dis_mask);
c4e84bde
RM
652 var = ql_read32(qdev, STS);
653 }
bb0d215c 654 atomic_inc(&ctx->irq_cnt);
08b1bc8f 655 spin_unlock(&qdev->hw_lock);
c4e84bde
RM
656 return var;
657}
658
659static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
660{
661 int i;
662 for (i = 0; i < qdev->intr_count; i++) {
663 /* The enable call does a atomic_dec_and_test
664 * and enables only if the result is zero.
665 * So we precharge it here.
666 */
bb0d215c
RM
667 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
668 i == 0))
669 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
c4e84bde
RM
670 ql_enable_completion_interrupt(qdev, i);
671 }
672
673}
674
b0c2aadf
RM
675static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
676{
677 int status, i;
678 u16 csum = 0;
679 __le16 *flash = (__le16 *)&qdev->flash;
680
681 status = strncmp((char *)&qdev->flash, str, 4);
682 if (status) {
683 QPRINTK(qdev, IFUP, ERR, "Invalid flash signature.\n");
684 return status;
685 }
686
687 for (i = 0; i < size; i++)
688 csum += le16_to_cpu(*flash++);
689
690 if (csum)
691 QPRINTK(qdev, IFUP, ERR,
692 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
693
694 return csum;
695}
696
26351479 697static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
c4e84bde
RM
698{
699 int status = 0;
700 /* wait for reg to come ready */
701 status = ql_wait_reg_rdy(qdev,
702 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
703 if (status)
704 goto exit;
705 /* set up for reg read */
706 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
707 /* wait for reg to come ready */
708 status = ql_wait_reg_rdy(qdev,
709 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
710 if (status)
711 goto exit;
26351479
RM
712 /* This data is stored on flash as an array of
713 * __le32. Since ql_read32() returns cpu endian
714 * we need to swap it back.
715 */
716 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
c4e84bde
RM
717exit:
718 return status;
719}
720
cdca8d02
RM
721static int ql_get_8000_flash_params(struct ql_adapter *qdev)
722{
723 u32 i, size;
724 int status;
725 __le32 *p = (__le32 *)&qdev->flash;
726 u32 offset;
542512e4 727 u8 mac_addr[6];
cdca8d02
RM
728
729 /* Get flash offset for function and adjust
730 * for dword access.
731 */
e4552f51 732 if (!qdev->port)
cdca8d02
RM
733 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
734 else
735 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
736
737 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
738 return -ETIMEDOUT;
739
740 size = sizeof(struct flash_params_8000) / sizeof(u32);
741 for (i = 0; i < size; i++, p++) {
742 status = ql_read_flash_word(qdev, i+offset, p);
743 if (status) {
744 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
745 goto exit;
746 }
747 }
748
749 status = ql_validate_flash(qdev,
750 sizeof(struct flash_params_8000) / sizeof(u16),
751 "8000");
752 if (status) {
753 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
754 status = -EINVAL;
755 goto exit;
756 }
757
542512e4
RM
758 /* Extract either manufacturer or BOFM modified
759 * MAC address.
760 */
761 if (qdev->flash.flash_params_8000.data_type1 == 2)
762 memcpy(mac_addr,
763 qdev->flash.flash_params_8000.mac_addr1,
764 qdev->ndev->addr_len);
765 else
766 memcpy(mac_addr,
767 qdev->flash.flash_params_8000.mac_addr,
768 qdev->ndev->addr_len);
769
770 if (!is_valid_ether_addr(mac_addr)) {
cdca8d02
RM
771 QPRINTK(qdev, IFUP, ERR, "Invalid MAC address.\n");
772 status = -EINVAL;
773 goto exit;
774 }
775
776 memcpy(qdev->ndev->dev_addr,
542512e4 777 mac_addr,
cdca8d02
RM
778 qdev->ndev->addr_len);
779
780exit:
781 ql_sem_unlock(qdev, SEM_FLASH_MASK);
782 return status;
783}
784
b0c2aadf 785static int ql_get_8012_flash_params(struct ql_adapter *qdev)
c4e84bde
RM
786{
787 int i;
788 int status;
26351479 789 __le32 *p = (__le32 *)&qdev->flash;
e78f5fa7 790 u32 offset = 0;
b0c2aadf 791 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
e78f5fa7
RM
792
793 /* Second function's parameters follow the first
794 * function's.
795 */
e4552f51 796 if (qdev->port)
b0c2aadf 797 offset = size;
c4e84bde
RM
798
799 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
800 return -ETIMEDOUT;
801
b0c2aadf 802 for (i = 0; i < size; i++, p++) {
e78f5fa7 803 status = ql_read_flash_word(qdev, i+offset, p);
c4e84bde
RM
804 if (status) {
805 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
806 goto exit;
807 }
808
809 }
b0c2aadf
RM
810
811 status = ql_validate_flash(qdev,
812 sizeof(struct flash_params_8012) / sizeof(u16),
813 "8012");
814 if (status) {
815 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
816 status = -EINVAL;
817 goto exit;
818 }
819
820 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
821 status = -EINVAL;
822 goto exit;
823 }
824
825 memcpy(qdev->ndev->dev_addr,
826 qdev->flash.flash_params_8012.mac_addr,
827 qdev->ndev->addr_len);
828
c4e84bde
RM
829exit:
830 ql_sem_unlock(qdev, SEM_FLASH_MASK);
831 return status;
832}
833
834/* xgmac register are located behind the xgmac_addr and xgmac_data
835 * register pair. Each read/write requires us to wait for the ready
836 * bit before reading/writing the data.
837 */
838static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
839{
840 int status;
841 /* wait for reg to come ready */
842 status = ql_wait_reg_rdy(qdev,
843 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
844 if (status)
845 return status;
846 /* write the data to the data reg */
847 ql_write32(qdev, XGMAC_DATA, data);
848 /* trigger the write */
849 ql_write32(qdev, XGMAC_ADDR, reg);
850 return status;
851}
852
853/* xgmac register are located behind the xgmac_addr and xgmac_data
854 * register pair. Each read/write requires us to wait for the ready
855 * bit before reading/writing the data.
856 */
857int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
858{
859 int status = 0;
860 /* wait for reg to come ready */
861 status = ql_wait_reg_rdy(qdev,
862 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
863 if (status)
864 goto exit;
865 /* set up for reg read */
866 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
867 /* wait for reg to come ready */
868 status = ql_wait_reg_rdy(qdev,
869 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
870 if (status)
871 goto exit;
872 /* get the data */
873 *data = ql_read32(qdev, XGMAC_DATA);
874exit:
875 return status;
876}
877
878/* This is used for reading the 64-bit statistics regs. */
879int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
880{
881 int status = 0;
882 u32 hi = 0;
883 u32 lo = 0;
884
885 status = ql_read_xgmac_reg(qdev, reg, &lo);
886 if (status)
887 goto exit;
888
889 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
890 if (status)
891 goto exit;
892
893 *data = (u64) lo | ((u64) hi << 32);
894
895exit:
896 return status;
897}
898
cdca8d02
RM
899static int ql_8000_port_initialize(struct ql_adapter *qdev)
900{
bcc2cb3b 901 int status;
cfec0cbc
RM
902 /*
903 * Get MPI firmware version for driver banner
904 * and ethool info.
905 */
906 status = ql_mb_about_fw(qdev);
907 if (status)
908 goto exit;
bcc2cb3b
RM
909 status = ql_mb_get_fw_state(qdev);
910 if (status)
911 goto exit;
912 /* Wake up a worker to get/set the TX/RX frame sizes. */
913 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
914exit:
915 return status;
cdca8d02
RM
916}
917
c4e84bde
RM
918/* Take the MAC Core out of reset.
919 * Enable statistics counting.
920 * Take the transmitter/receiver out of reset.
921 * This functionality may be done in the MPI firmware at a
922 * later date.
923 */
b0c2aadf 924static int ql_8012_port_initialize(struct ql_adapter *qdev)
c4e84bde
RM
925{
926 int status = 0;
927 u32 data;
928
929 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
930 /* Another function has the semaphore, so
931 * wait for the port init bit to come ready.
932 */
933 QPRINTK(qdev, LINK, INFO,
934 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
935 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
936 if (status) {
937 QPRINTK(qdev, LINK, CRIT,
938 "Port initialize timed out.\n");
939 }
940 return status;
941 }
942
943 QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
944 /* Set the core reset. */
945 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
946 if (status)
947 goto end;
948 data |= GLOBAL_CFG_RESET;
949 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
950 if (status)
951 goto end;
952
953 /* Clear the core reset and turn on jumbo for receiver. */
954 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
955 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
956 data |= GLOBAL_CFG_TX_STAT_EN;
957 data |= GLOBAL_CFG_RX_STAT_EN;
958 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
959 if (status)
960 goto end;
961
962 /* Enable transmitter, and clear it's reset. */
963 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
964 if (status)
965 goto end;
966 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
967 data |= TX_CFG_EN; /* Enable the transmitter. */
968 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
969 if (status)
970 goto end;
971
972 /* Enable receiver and clear it's reset. */
973 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
974 if (status)
975 goto end;
976 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
977 data |= RX_CFG_EN; /* Enable the receiver. */
978 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
979 if (status)
980 goto end;
981
982 /* Turn on jumbo. */
983 status =
984 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
985 if (status)
986 goto end;
987 status =
988 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
989 if (status)
990 goto end;
991
992 /* Signal to the world that the port is enabled. */
993 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
994end:
995 ql_sem_unlock(qdev, qdev->xg_sem_mask);
996 return status;
997}
998
999/* Get the next large buffer. */
8668ae92 1000static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
c4e84bde
RM
1001{
1002 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1003 rx_ring->lbq_curr_idx++;
1004 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1005 rx_ring->lbq_curr_idx = 0;
1006 rx_ring->lbq_free_cnt++;
1007 return lbq_desc;
1008}
1009
1010/* Get the next small buffer. */
8668ae92 1011static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
c4e84bde
RM
1012{
1013 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1014 rx_ring->sbq_curr_idx++;
1015 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1016 rx_ring->sbq_curr_idx = 0;
1017 rx_ring->sbq_free_cnt++;
1018 return sbq_desc;
1019}
1020
1021/* Update an rx ring index. */
1022static void ql_update_cq(struct rx_ring *rx_ring)
1023{
1024 rx_ring->cnsmr_idx++;
1025 rx_ring->curr_entry++;
1026 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1027 rx_ring->cnsmr_idx = 0;
1028 rx_ring->curr_entry = rx_ring->cq_base;
1029 }
1030}
1031
1032static void ql_write_cq_idx(struct rx_ring *rx_ring)
1033{
1034 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1035}
1036
1037/* Process (refill) a large buffer queue. */
1038static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1039{
49f2186d
RM
1040 u32 clean_idx = rx_ring->lbq_clean_idx;
1041 u32 start_idx = clean_idx;
c4e84bde 1042 struct bq_desc *lbq_desc;
c4e84bde
RM
1043 u64 map;
1044 int i;
1045
1046 while (rx_ring->lbq_free_cnt > 16) {
1047 for (i = 0; i < 16; i++) {
1048 QPRINTK(qdev, RX_STATUS, DEBUG,
1049 "lbq: try cleaning clean_idx = %d.\n",
1050 clean_idx);
1051 lbq_desc = &rx_ring->lbq[clean_idx];
c4e84bde
RM
1052 if (lbq_desc->p.lbq_page == NULL) {
1053 QPRINTK(qdev, RX_STATUS, DEBUG,
1054 "lbq: getting new page for index %d.\n",
1055 lbq_desc->index);
1056 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
1057 if (lbq_desc->p.lbq_page == NULL) {
79d2b29e 1058 rx_ring->lbq_clean_idx = clean_idx;
c4e84bde
RM
1059 QPRINTK(qdev, RX_STATUS, ERR,
1060 "Couldn't get a page.\n");
1061 return;
1062 }
1063 map = pci_map_page(qdev->pdev,
1064 lbq_desc->p.lbq_page,
1065 0, PAGE_SIZE,
1066 PCI_DMA_FROMDEVICE);
1067 if (pci_dma_mapping_error(qdev->pdev, map)) {
79d2b29e 1068 rx_ring->lbq_clean_idx = clean_idx;
f2603c2c
RM
1069 put_page(lbq_desc->p.lbq_page);
1070 lbq_desc->p.lbq_page = NULL;
c4e84bde
RM
1071 QPRINTK(qdev, RX_STATUS, ERR,
1072 "PCI mapping failed.\n");
1073 return;
1074 }
1075 pci_unmap_addr_set(lbq_desc, mapaddr, map);
1076 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
2c9a0d41 1077 *lbq_desc->addr = cpu_to_le64(map);
c4e84bde
RM
1078 }
1079 clean_idx++;
1080 if (clean_idx == rx_ring->lbq_len)
1081 clean_idx = 0;
1082 }
1083
1084 rx_ring->lbq_clean_idx = clean_idx;
1085 rx_ring->lbq_prod_idx += 16;
1086 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1087 rx_ring->lbq_prod_idx = 0;
49f2186d
RM
1088 rx_ring->lbq_free_cnt -= 16;
1089 }
1090
1091 if (start_idx != clean_idx) {
c4e84bde
RM
1092 QPRINTK(qdev, RX_STATUS, DEBUG,
1093 "lbq: updating prod idx = %d.\n",
1094 rx_ring->lbq_prod_idx);
1095 ql_write_db_reg(rx_ring->lbq_prod_idx,
1096 rx_ring->lbq_prod_idx_db_reg);
c4e84bde
RM
1097 }
1098}
1099
1100/* Process (refill) a small buffer queue. */
1101static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1102{
49f2186d
RM
1103 u32 clean_idx = rx_ring->sbq_clean_idx;
1104 u32 start_idx = clean_idx;
c4e84bde 1105 struct bq_desc *sbq_desc;
c4e84bde
RM
1106 u64 map;
1107 int i;
1108
1109 while (rx_ring->sbq_free_cnt > 16) {
1110 for (i = 0; i < 16; i++) {
1111 sbq_desc = &rx_ring->sbq[clean_idx];
1112 QPRINTK(qdev, RX_STATUS, DEBUG,
1113 "sbq: try cleaning clean_idx = %d.\n",
1114 clean_idx);
c4e84bde
RM
1115 if (sbq_desc->p.skb == NULL) {
1116 QPRINTK(qdev, RX_STATUS, DEBUG,
1117 "sbq: getting new skb for index %d.\n",
1118 sbq_desc->index);
1119 sbq_desc->p.skb =
1120 netdev_alloc_skb(qdev->ndev,
1121 rx_ring->sbq_buf_size);
1122 if (sbq_desc->p.skb == NULL) {
1123 QPRINTK(qdev, PROBE, ERR,
1124 "Couldn't get an skb.\n");
1125 rx_ring->sbq_clean_idx = clean_idx;
1126 return;
1127 }
1128 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1129 map = pci_map_single(qdev->pdev,
1130 sbq_desc->p.skb->data,
1131 rx_ring->sbq_buf_size /
1132 2, PCI_DMA_FROMDEVICE);
c907a35a
RM
1133 if (pci_dma_mapping_error(qdev->pdev, map)) {
1134 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
1135 rx_ring->sbq_clean_idx = clean_idx;
06a3d510
RM
1136 dev_kfree_skb_any(sbq_desc->p.skb);
1137 sbq_desc->p.skb = NULL;
c907a35a
RM
1138 return;
1139 }
c4e84bde
RM
1140 pci_unmap_addr_set(sbq_desc, mapaddr, map);
1141 pci_unmap_len_set(sbq_desc, maplen,
1142 rx_ring->sbq_buf_size / 2);
2c9a0d41 1143 *sbq_desc->addr = cpu_to_le64(map);
c4e84bde
RM
1144 }
1145
1146 clean_idx++;
1147 if (clean_idx == rx_ring->sbq_len)
1148 clean_idx = 0;
1149 }
1150 rx_ring->sbq_clean_idx = clean_idx;
1151 rx_ring->sbq_prod_idx += 16;
1152 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1153 rx_ring->sbq_prod_idx = 0;
49f2186d
RM
1154 rx_ring->sbq_free_cnt -= 16;
1155 }
1156
1157 if (start_idx != clean_idx) {
c4e84bde
RM
1158 QPRINTK(qdev, RX_STATUS, DEBUG,
1159 "sbq: updating prod idx = %d.\n",
1160 rx_ring->sbq_prod_idx);
1161 ql_write_db_reg(rx_ring->sbq_prod_idx,
1162 rx_ring->sbq_prod_idx_db_reg);
c4e84bde
RM
1163 }
1164}
1165
1166static void ql_update_buffer_queues(struct ql_adapter *qdev,
1167 struct rx_ring *rx_ring)
1168{
1169 ql_update_sbq(qdev, rx_ring);
1170 ql_update_lbq(qdev, rx_ring);
1171}
1172
1173/* Unmaps tx buffers. Can be called from send() if a pci mapping
1174 * fails at some stage, or from the interrupt when a tx completes.
1175 */
1176static void ql_unmap_send(struct ql_adapter *qdev,
1177 struct tx_ring_desc *tx_ring_desc, int mapped)
1178{
1179 int i;
1180 for (i = 0; i < mapped; i++) {
1181 if (i == 0 || (i == 7 && mapped > 7)) {
1182 /*
1183 * Unmap the skb->data area, or the
1184 * external sglist (AKA the Outbound
1185 * Address List (OAL)).
1186 * If its the zeroeth element, then it's
1187 * the skb->data area. If it's the 7th
1188 * element and there is more than 6 frags,
1189 * then its an OAL.
1190 */
1191 if (i == 7) {
1192 QPRINTK(qdev, TX_DONE, DEBUG,
1193 "unmapping OAL area.\n");
1194 }
1195 pci_unmap_single(qdev->pdev,
1196 pci_unmap_addr(&tx_ring_desc->map[i],
1197 mapaddr),
1198 pci_unmap_len(&tx_ring_desc->map[i],
1199 maplen),
1200 PCI_DMA_TODEVICE);
1201 } else {
1202 QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
1203 i);
1204 pci_unmap_page(qdev->pdev,
1205 pci_unmap_addr(&tx_ring_desc->map[i],
1206 mapaddr),
1207 pci_unmap_len(&tx_ring_desc->map[i],
1208 maplen), PCI_DMA_TODEVICE);
1209 }
1210 }
1211
1212}
1213
1214/* Map the buffers for this transmit. This will return
1215 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1216 */
1217static int ql_map_send(struct ql_adapter *qdev,
1218 struct ob_mac_iocb_req *mac_iocb_ptr,
1219 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1220{
1221 int len = skb_headlen(skb);
1222 dma_addr_t map;
1223 int frag_idx, err, map_idx = 0;
1224 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1225 int frag_cnt = skb_shinfo(skb)->nr_frags;
1226
1227 if (frag_cnt) {
1228 QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
1229 }
1230 /*
1231 * Map the skb buffer first.
1232 */
1233 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1234
1235 err = pci_dma_mapping_error(qdev->pdev, map);
1236 if (err) {
1237 QPRINTK(qdev, TX_QUEUED, ERR,
1238 "PCI mapping failed with error: %d\n", err);
1239
1240 return NETDEV_TX_BUSY;
1241 }
1242
1243 tbd->len = cpu_to_le32(len);
1244 tbd->addr = cpu_to_le64(map);
1245 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1246 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1247 map_idx++;
1248
1249 /*
1250 * This loop fills the remainder of the 8 address descriptors
1251 * in the IOCB. If there are more than 7 fragments, then the
1252 * eighth address desc will point to an external list (OAL).
1253 * When this happens, the remainder of the frags will be stored
1254 * in this list.
1255 */
1256 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1257 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1258 tbd++;
1259 if (frag_idx == 6 && frag_cnt > 7) {
1260 /* Let's tack on an sglist.
1261 * Our control block will now
1262 * look like this:
1263 * iocb->seg[0] = skb->data
1264 * iocb->seg[1] = frag[0]
1265 * iocb->seg[2] = frag[1]
1266 * iocb->seg[3] = frag[2]
1267 * iocb->seg[4] = frag[3]
1268 * iocb->seg[5] = frag[4]
1269 * iocb->seg[6] = frag[5]
1270 * iocb->seg[7] = ptr to OAL (external sglist)
1271 * oal->seg[0] = frag[6]
1272 * oal->seg[1] = frag[7]
1273 * oal->seg[2] = frag[8]
1274 * oal->seg[3] = frag[9]
1275 * oal->seg[4] = frag[10]
1276 * etc...
1277 */
1278 /* Tack on the OAL in the eighth segment of IOCB. */
1279 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1280 sizeof(struct oal),
1281 PCI_DMA_TODEVICE);
1282 err = pci_dma_mapping_error(qdev->pdev, map);
1283 if (err) {
1284 QPRINTK(qdev, TX_QUEUED, ERR,
1285 "PCI mapping outbound address list with error: %d\n",
1286 err);
1287 goto map_error;
1288 }
1289
1290 tbd->addr = cpu_to_le64(map);
1291 /*
1292 * The length is the number of fragments
1293 * that remain to be mapped times the length
1294 * of our sglist (OAL).
1295 */
1296 tbd->len =
1297 cpu_to_le32((sizeof(struct tx_buf_desc) *
1298 (frag_cnt - frag_idx)) | TX_DESC_C);
1299 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1300 map);
1301 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1302 sizeof(struct oal));
1303 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1304 map_idx++;
1305 }
1306
1307 map =
1308 pci_map_page(qdev->pdev, frag->page,
1309 frag->page_offset, frag->size,
1310 PCI_DMA_TODEVICE);
1311
1312 err = pci_dma_mapping_error(qdev->pdev, map);
1313 if (err) {
1314 QPRINTK(qdev, TX_QUEUED, ERR,
1315 "PCI mapping frags failed with error: %d.\n",
1316 err);
1317 goto map_error;
1318 }
1319
1320 tbd->addr = cpu_to_le64(map);
1321 tbd->len = cpu_to_le32(frag->size);
1322 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1323 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1324 frag->size);
1325
1326 }
1327 /* Save the number of segments we've mapped. */
1328 tx_ring_desc->map_cnt = map_idx;
1329 /* Terminate the last segment. */
1330 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1331 return NETDEV_TX_OK;
1332
1333map_error:
1334 /*
1335 * If the first frag mapping failed, then i will be zero.
1336 * This causes the unmap of the skb->data area. Otherwise
1337 * we pass in the number of frags that mapped successfully
1338 * so they can be umapped.
1339 */
1340 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1341 return NETDEV_TX_BUSY;
1342}
1343
8668ae92 1344static void ql_realign_skb(struct sk_buff *skb, int len)
c4e84bde
RM
1345{
1346 void *temp_addr = skb->data;
1347
1348 /* Undo the skb_reserve(skb,32) we did before
1349 * giving to hardware, and realign data on
1350 * a 2-byte boundary.
1351 */
1352 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1353 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1354 skb_copy_to_linear_data(skb, temp_addr,
1355 (unsigned int)len);
1356}
1357
1358/*
1359 * This function builds an skb for the given inbound
1360 * completion. It will be rewritten for readability in the near
1361 * future, but for not it works well.
1362 */
1363static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1364 struct rx_ring *rx_ring,
1365 struct ib_mac_iocb_rsp *ib_mac_rsp)
1366{
1367 struct bq_desc *lbq_desc;
1368 struct bq_desc *sbq_desc;
1369 struct sk_buff *skb = NULL;
1370 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1371 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1372
1373 /*
1374 * Handle the header buffer if present.
1375 */
1376 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1377 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1378 QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
1379 /*
1380 * Headers fit nicely into a small buffer.
1381 */
1382 sbq_desc = ql_get_curr_sbuf(rx_ring);
1383 pci_unmap_single(qdev->pdev,
1384 pci_unmap_addr(sbq_desc, mapaddr),
1385 pci_unmap_len(sbq_desc, maplen),
1386 PCI_DMA_FROMDEVICE);
1387 skb = sbq_desc->p.skb;
1388 ql_realign_skb(skb, hdr_len);
1389 skb_put(skb, hdr_len);
1390 sbq_desc->p.skb = NULL;
1391 }
1392
1393 /*
1394 * Handle the data buffer(s).
1395 */
1396 if (unlikely(!length)) { /* Is there data too? */
1397 QPRINTK(qdev, RX_STATUS, DEBUG,
1398 "No Data buffer in this packet.\n");
1399 return skb;
1400 }
1401
1402 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1403 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1404 QPRINTK(qdev, RX_STATUS, DEBUG,
1405 "Headers in small, data of %d bytes in small, combine them.\n", length);
1406 /*
1407 * Data is less than small buffer size so it's
1408 * stuffed in a small buffer.
1409 * For this case we append the data
1410 * from the "data" small buffer to the "header" small
1411 * buffer.
1412 */
1413 sbq_desc = ql_get_curr_sbuf(rx_ring);
1414 pci_dma_sync_single_for_cpu(qdev->pdev,
1415 pci_unmap_addr
1416 (sbq_desc, mapaddr),
1417 pci_unmap_len
1418 (sbq_desc, maplen),
1419 PCI_DMA_FROMDEVICE);
1420 memcpy(skb_put(skb, length),
1421 sbq_desc->p.skb->data, length);
1422 pci_dma_sync_single_for_device(qdev->pdev,
1423 pci_unmap_addr
1424 (sbq_desc,
1425 mapaddr),
1426 pci_unmap_len
1427 (sbq_desc,
1428 maplen),
1429 PCI_DMA_FROMDEVICE);
1430 } else {
1431 QPRINTK(qdev, RX_STATUS, DEBUG,
1432 "%d bytes in a single small buffer.\n", length);
1433 sbq_desc = ql_get_curr_sbuf(rx_ring);
1434 skb = sbq_desc->p.skb;
1435 ql_realign_skb(skb, length);
1436 skb_put(skb, length);
1437 pci_unmap_single(qdev->pdev,
1438 pci_unmap_addr(sbq_desc,
1439 mapaddr),
1440 pci_unmap_len(sbq_desc,
1441 maplen),
1442 PCI_DMA_FROMDEVICE);
1443 sbq_desc->p.skb = NULL;
1444 }
1445 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1446 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1447 QPRINTK(qdev, RX_STATUS, DEBUG,
1448 "Header in small, %d bytes in large. Chain large to small!\n", length);
1449 /*
1450 * The data is in a single large buffer. We
1451 * chain it to the header buffer's skb and let
1452 * it rip.
1453 */
1454 lbq_desc = ql_get_curr_lbuf(rx_ring);
1455 pci_unmap_page(qdev->pdev,
1456 pci_unmap_addr(lbq_desc,
1457 mapaddr),
1458 pci_unmap_len(lbq_desc, maplen),
1459 PCI_DMA_FROMDEVICE);
1460 QPRINTK(qdev, RX_STATUS, DEBUG,
1461 "Chaining page to skb.\n");
1462 skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1463 0, length);
1464 skb->len += length;
1465 skb->data_len += length;
1466 skb->truesize += length;
1467 lbq_desc->p.lbq_page = NULL;
1468 } else {
1469 /*
1470 * The headers and data are in a single large buffer. We
1471 * copy it to a new skb and let it go. This can happen with
1472 * jumbo mtu on a non-TCP/UDP frame.
1473 */
1474 lbq_desc = ql_get_curr_lbuf(rx_ring);
1475 skb = netdev_alloc_skb(qdev->ndev, length);
1476 if (skb == NULL) {
1477 QPRINTK(qdev, PROBE, DEBUG,
1478 "No skb available, drop the packet.\n");
1479 return NULL;
1480 }
4055c7d4
RM
1481 pci_unmap_page(qdev->pdev,
1482 pci_unmap_addr(lbq_desc,
1483 mapaddr),
1484 pci_unmap_len(lbq_desc, maplen),
1485 PCI_DMA_FROMDEVICE);
c4e84bde
RM
1486 skb_reserve(skb, NET_IP_ALIGN);
1487 QPRINTK(qdev, RX_STATUS, DEBUG,
1488 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
1489 skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1490 0, length);
1491 skb->len += length;
1492 skb->data_len += length;
1493 skb->truesize += length;
1494 length -= length;
1495 lbq_desc->p.lbq_page = NULL;
1496 __pskb_pull_tail(skb,
1497 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1498 VLAN_ETH_HLEN : ETH_HLEN);
1499 }
1500 } else {
1501 /*
1502 * The data is in a chain of large buffers
1503 * pointed to by a small buffer. We loop
1504 * thru and chain them to the our small header
1505 * buffer's skb.
1506 * frags: There are 18 max frags and our small
1507 * buffer will hold 32 of them. The thing is,
1508 * we'll use 3 max for our 9000 byte jumbo
1509 * frames. If the MTU goes up we could
1510 * eventually be in trouble.
1511 */
1512 int size, offset, i = 0;
2c9a0d41 1513 __le64 *bq, bq_array[8];
c4e84bde
RM
1514 sbq_desc = ql_get_curr_sbuf(rx_ring);
1515 pci_unmap_single(qdev->pdev,
1516 pci_unmap_addr(sbq_desc, mapaddr),
1517 pci_unmap_len(sbq_desc, maplen),
1518 PCI_DMA_FROMDEVICE);
1519 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1520 /*
1521 * This is an non TCP/UDP IP frame, so
1522 * the headers aren't split into a small
1523 * buffer. We have to use the small buffer
1524 * that contains our sg list as our skb to
1525 * send upstairs. Copy the sg list here to
1526 * a local buffer and use it to find the
1527 * pages to chain.
1528 */
1529 QPRINTK(qdev, RX_STATUS, DEBUG,
1530 "%d bytes of headers & data in chain of large.\n", length);
1531 skb = sbq_desc->p.skb;
1532 bq = &bq_array[0];
1533 memcpy(bq, skb->data, sizeof(bq_array));
1534 sbq_desc->p.skb = NULL;
1535 skb_reserve(skb, NET_IP_ALIGN);
1536 } else {
1537 QPRINTK(qdev, RX_STATUS, DEBUG,
1538 "Headers in small, %d bytes of data in chain of large.\n", length);
2c9a0d41 1539 bq = (__le64 *)sbq_desc->p.skb->data;
c4e84bde
RM
1540 }
1541 while (length > 0) {
1542 lbq_desc = ql_get_curr_lbuf(rx_ring);
c4e84bde
RM
1543 pci_unmap_page(qdev->pdev,
1544 pci_unmap_addr(lbq_desc,
1545 mapaddr),
1546 pci_unmap_len(lbq_desc,
1547 maplen),
1548 PCI_DMA_FROMDEVICE);
1549 size = (length < PAGE_SIZE) ? length : PAGE_SIZE;
1550 offset = 0;
1551
1552 QPRINTK(qdev, RX_STATUS, DEBUG,
1553 "Adding page %d to skb for %d bytes.\n",
1554 i, size);
1555 skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page,
1556 offset, size);
1557 skb->len += size;
1558 skb->data_len += size;
1559 skb->truesize += size;
1560 length -= size;
1561 lbq_desc->p.lbq_page = NULL;
1562 bq++;
1563 i++;
1564 }
1565 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1566 VLAN_ETH_HLEN : ETH_HLEN);
1567 }
1568 return skb;
1569}
1570
1571/* Process an inbound completion from an rx ring. */
1572static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1573 struct rx_ring *rx_ring,
1574 struct ib_mac_iocb_rsp *ib_mac_rsp)
1575{
1576 struct net_device *ndev = qdev->ndev;
1577 struct sk_buff *skb = NULL;
22bdd4f5
RM
1578 u16 vlan_id = (le16_to_cpu(ib_mac_rsp->vlan_id) &
1579 IB_MAC_IOCB_RSP_VLAN_MASK)
c4e84bde
RM
1580
1581 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1582
1583 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1584 if (unlikely(!skb)) {
1585 QPRINTK(qdev, RX_STATUS, DEBUG,
1586 "No skb available, drop packet.\n");
1587 return;
1588 }
1589
a32959cd
RM
1590 /* Frame error, so drop the packet. */
1591 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1592 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1593 ib_mac_rsp->flags2);
1594 dev_kfree_skb_any(skb);
1595 return;
1596 }
ec33a491
RM
1597
1598 /* The max framesize filter on this chip is set higher than
1599 * MTU since FCoE uses 2k frames.
1600 */
1601 if (skb->len > ndev->mtu + ETH_HLEN) {
1602 dev_kfree_skb_any(skb);
1603 return;
1604 }
1605
c4e84bde
RM
1606 prefetch(skb->data);
1607 skb->dev = ndev;
1608 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1609 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1610 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1611 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1612 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1613 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1614 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1615 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1616 }
1617 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1618 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1619 }
d555f592 1620
d555f592
RM
1621 skb->protocol = eth_type_trans(skb, ndev);
1622 skb->ip_summed = CHECKSUM_NONE;
1623
1624 /* If rx checksum is on, and there are no
1625 * csum or frame errors.
1626 */
1627 if (qdev->rx_csum &&
d555f592
RM
1628 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1629 /* TCP frame. */
1630 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1631 QPRINTK(qdev, RX_STATUS, DEBUG,
1632 "TCP checksum done!\n");
1633 skb->ip_summed = CHECKSUM_UNNECESSARY;
1634 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1635 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1636 /* Unfragmented ipv4 UDP frame. */
1637 struct iphdr *iph = (struct iphdr *) skb->data;
1638 if (!(iph->frag_off &
1639 cpu_to_be16(IP_MF|IP_OFFSET))) {
1640 skb->ip_summed = CHECKSUM_UNNECESSARY;
1641 QPRINTK(qdev, RX_STATUS, DEBUG,
1642 "TCP checksum done!\n");
1643 }
1644 }
c4e84bde 1645 }
d555f592 1646
c4e84bde
RM
1647 qdev->stats.rx_packets++;
1648 qdev->stats.rx_bytes += skb->len;
b2014ff8 1649 skb_record_rx_queue(skb, rx_ring->cq_id);
22bdd4f5
RM
1650 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1651 if (qdev->vlgrp &&
1652 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1653 (vlan_id != 0))
1654 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1655 vlan_id, skb);
1656 else
1657 napi_gro_receive(&rx_ring->napi, skb);
c4e84bde 1658 } else {
22bdd4f5
RM
1659 if (qdev->vlgrp &&
1660 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1661 (vlan_id != 0))
1662 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1663 else
1664 netif_receive_skb(skb);
c4e84bde 1665 }
c4e84bde
RM
1666}
1667
1668/* Process an outbound completion from an rx ring. */
1669static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1670 struct ob_mac_iocb_rsp *mac_rsp)
1671{
1672 struct tx_ring *tx_ring;
1673 struct tx_ring_desc *tx_ring_desc;
1674
1675 QL_DUMP_OB_MAC_RSP(mac_rsp);
1676 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
1677 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
1678 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
13cfd5be 1679 qdev->stats.tx_bytes += (tx_ring_desc->skb)->len;
c4e84bde
RM
1680 qdev->stats.tx_packets++;
1681 dev_kfree_skb(tx_ring_desc->skb);
1682 tx_ring_desc->skb = NULL;
1683
1684 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
1685 OB_MAC_IOCB_RSP_S |
1686 OB_MAC_IOCB_RSP_L |
1687 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
1688 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
1689 QPRINTK(qdev, TX_DONE, WARNING,
1690 "Total descriptor length did not match transfer length.\n");
1691 }
1692 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
1693 QPRINTK(qdev, TX_DONE, WARNING,
1694 "Frame too short to be legal, not sent.\n");
1695 }
1696 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
1697 QPRINTK(qdev, TX_DONE, WARNING,
1698 "Frame too long, but sent anyway.\n");
1699 }
1700 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
1701 QPRINTK(qdev, TX_DONE, WARNING,
1702 "PCI backplane error. Frame not sent.\n");
1703 }
1704 }
1705 atomic_inc(&tx_ring->tx_count);
1706}
1707
1708/* Fire up a handler to reset the MPI processor. */
1709void ql_queue_fw_error(struct ql_adapter *qdev)
1710{
6a473308 1711 ql_link_off(qdev);
c4e84bde
RM
1712 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
1713}
1714
1715void ql_queue_asic_error(struct ql_adapter *qdev)
1716{
6a473308 1717 ql_link_off(qdev);
c4e84bde 1718 ql_disable_interrupts(qdev);
6497b607
RM
1719 /* Clear adapter up bit to signal the recovery
1720 * process that it shouldn't kill the reset worker
1721 * thread
1722 */
1723 clear_bit(QL_ADAPTER_UP, &qdev->flags);
c4e84bde
RM
1724 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
1725}
1726
1727static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
1728 struct ib_ae_iocb_rsp *ib_ae_rsp)
1729{
1730 switch (ib_ae_rsp->event) {
1731 case MGMT_ERR_EVENT:
1732 QPRINTK(qdev, RX_ERR, ERR,
1733 "Management Processor Fatal Error.\n");
1734 ql_queue_fw_error(qdev);
1735 return;
1736
1737 case CAM_LOOKUP_ERR_EVENT:
1738 QPRINTK(qdev, LINK, ERR,
1739 "Multiple CAM hits lookup occurred.\n");
1740 QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
1741 ql_queue_asic_error(qdev);
1742 return;
1743
1744 case SOFT_ECC_ERROR_EVENT:
1745 QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
1746 ql_queue_asic_error(qdev);
1747 break;
1748
1749 case PCI_ERR_ANON_BUF_RD:
1750 QPRINTK(qdev, RX_ERR, ERR,
1751 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
1752 ib_ae_rsp->q_id);
1753 ql_queue_asic_error(qdev);
1754 break;
1755
1756 default:
1757 QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
1758 ib_ae_rsp->event);
1759 ql_queue_asic_error(qdev);
1760 break;
1761 }
1762}
1763
1764static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1765{
1766 struct ql_adapter *qdev = rx_ring->qdev;
ba7cd3ba 1767 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde
RM
1768 struct ob_mac_iocb_rsp *net_rsp = NULL;
1769 int count = 0;
1770
1e213303 1771 struct tx_ring *tx_ring;
c4e84bde
RM
1772 /* While there are entries in the completion queue. */
1773 while (prod != rx_ring->cnsmr_idx) {
1774
1775 QPRINTK(qdev, RX_STATUS, DEBUG,
1776 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1777 prod, rx_ring->cnsmr_idx);
1778
1779 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
1780 rmb();
1781 switch (net_rsp->opcode) {
1782
1783 case OPCODE_OB_MAC_TSO_IOCB:
1784 case OPCODE_OB_MAC_IOCB:
1785 ql_process_mac_tx_intr(qdev, net_rsp);
1786 break;
1787 default:
1788 QPRINTK(qdev, RX_STATUS, DEBUG,
1789 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1790 net_rsp->opcode);
1791 }
1792 count++;
1793 ql_update_cq(rx_ring);
ba7cd3ba 1794 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde
RM
1795 }
1796 ql_write_cq_idx(rx_ring);
1e213303
RM
1797 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
1798 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) &&
1799 net_rsp != NULL) {
c4e84bde
RM
1800 if (atomic_read(&tx_ring->queue_stopped) &&
1801 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
1802 /*
1803 * The queue got stopped because the tx_ring was full.
1804 * Wake it up, because it's now at least 25% empty.
1805 */
1e213303 1806 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
c4e84bde
RM
1807 }
1808
1809 return count;
1810}
1811
1812static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
1813{
1814 struct ql_adapter *qdev = rx_ring->qdev;
ba7cd3ba 1815 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde
RM
1816 struct ql_net_rsp_iocb *net_rsp;
1817 int count = 0;
1818
1819 /* While there are entries in the completion queue. */
1820 while (prod != rx_ring->cnsmr_idx) {
1821
1822 QPRINTK(qdev, RX_STATUS, DEBUG,
1823 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1824 prod, rx_ring->cnsmr_idx);
1825
1826 net_rsp = rx_ring->curr_entry;
1827 rmb();
1828 switch (net_rsp->opcode) {
1829 case OPCODE_IB_MAC_IOCB:
1830 ql_process_mac_rx_intr(qdev, rx_ring,
1831 (struct ib_mac_iocb_rsp *)
1832 net_rsp);
1833 break;
1834
1835 case OPCODE_IB_AE_IOCB:
1836 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
1837 net_rsp);
1838 break;
1839 default:
1840 {
1841 QPRINTK(qdev, RX_STATUS, DEBUG,
1842 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1843 net_rsp->opcode);
1844 }
1845 }
1846 count++;
1847 ql_update_cq(rx_ring);
ba7cd3ba 1848 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde
RM
1849 if (count == budget)
1850 break;
1851 }
1852 ql_update_buffer_queues(qdev, rx_ring);
1853 ql_write_cq_idx(rx_ring);
1854 return count;
1855}
1856
1857static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
1858{
1859 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
1860 struct ql_adapter *qdev = rx_ring->qdev;
39aa8165
RM
1861 struct rx_ring *trx_ring;
1862 int i, work_done = 0;
1863 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
c4e84bde
RM
1864
1865 QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
1866 rx_ring->cq_id);
1867
39aa8165
RM
1868 /* Service the TX rings first. They start
1869 * right after the RSS rings. */
1870 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
1871 trx_ring = &qdev->rx_ring[i];
1872 /* If this TX completion ring belongs to this vector and
1873 * it's not empty then service it.
1874 */
1875 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
1876 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
1877 trx_ring->cnsmr_idx)) {
1878 QPRINTK(qdev, INTR, DEBUG,
1879 "%s: Servicing TX completion ring %d.\n",
1880 __func__, trx_ring->cq_id);
1881 ql_clean_outbound_rx_ring(trx_ring);
1882 }
1883 }
1884
1885 /*
1886 * Now service the RSS ring if it's active.
1887 */
1888 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
1889 rx_ring->cnsmr_idx) {
1890 QPRINTK(qdev, INTR, DEBUG,
1891 "%s: Servicing RX completion ring %d.\n",
1892 __func__, rx_ring->cq_id);
1893 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
1894 }
1895
c4e84bde 1896 if (work_done < budget) {
22bdd4f5 1897 napi_complete(napi);
c4e84bde
RM
1898 ql_enable_completion_interrupt(qdev, rx_ring->irq);
1899 }
1900 return work_done;
1901}
1902
1903static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
1904{
1905 struct ql_adapter *qdev = netdev_priv(ndev);
1906
1907 qdev->vlgrp = grp;
1908 if (grp) {
1909 QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
1910 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
1911 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
1912 } else {
1913 QPRINTK(qdev, IFUP, DEBUG,
1914 "Turning off VLAN in NIC_RCV_CFG.\n");
1915 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
1916 }
1917}
1918
1919static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
1920{
1921 struct ql_adapter *qdev = netdev_priv(ndev);
1922 u32 enable_bit = MAC_ADDR_E;
cc288f54 1923 int status;
c4e84bde 1924
cc288f54
RM
1925 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
1926 if (status)
1927 return;
c4e84bde
RM
1928 if (ql_set_mac_addr_reg
1929 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1930 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
1931 }
cc288f54 1932 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
c4e84bde
RM
1933}
1934
1935static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
1936{
1937 struct ql_adapter *qdev = netdev_priv(ndev);
1938 u32 enable_bit = 0;
cc288f54
RM
1939 int status;
1940
1941 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
1942 if (status)
1943 return;
c4e84bde 1944
c4e84bde
RM
1945 if (ql_set_mac_addr_reg
1946 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1947 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
1948 }
cc288f54 1949 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
c4e84bde
RM
1950
1951}
1952
c4e84bde
RM
1953/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
1954static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
1955{
1956 struct rx_ring *rx_ring = dev_id;
288379f0 1957 napi_schedule(&rx_ring->napi);
c4e84bde
RM
1958 return IRQ_HANDLED;
1959}
1960
c4e84bde
RM
1961/* This handles a fatal error, MPI activity, and the default
1962 * rx_ring in an MSI-X multiple vector environment.
1963 * In MSI/Legacy environment it also process the rest of
1964 * the rx_rings.
1965 */
1966static irqreturn_t qlge_isr(int irq, void *dev_id)
1967{
1968 struct rx_ring *rx_ring = dev_id;
1969 struct ql_adapter *qdev = rx_ring->qdev;
1970 struct intr_context *intr_context = &qdev->intr_context[0];
1971 u32 var;
c4e84bde
RM
1972 int work_done = 0;
1973
bb0d215c
RM
1974 spin_lock(&qdev->hw_lock);
1975 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
1976 QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
1977 spin_unlock(&qdev->hw_lock);
1978 return IRQ_NONE;
c4e84bde 1979 }
bb0d215c 1980 spin_unlock(&qdev->hw_lock);
c4e84bde 1981
bb0d215c 1982 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
c4e84bde
RM
1983
1984 /*
1985 * Check for fatal error.
1986 */
1987 if (var & STS_FE) {
1988 ql_queue_asic_error(qdev);
1989 QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
1990 var = ql_read32(qdev, ERR_STS);
1991 QPRINTK(qdev, INTR, ERR,
1992 "Resetting chip. Error Status Register = 0x%x\n", var);
1993 return IRQ_HANDLED;
1994 }
1995
1996 /*
1997 * Check MPI processor activity.
1998 */
5ee22a5a
RM
1999 if ((var & STS_PI) &&
2000 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
c4e84bde
RM
2001 /*
2002 * We've got an async event or mailbox completion.
2003 * Handle it and clear the source of the interrupt.
2004 */
2005 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
2006 ql_disable_completion_interrupt(qdev, intr_context->intr);
5ee22a5a
RM
2007 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2008 queue_delayed_work_on(smp_processor_id(),
2009 qdev->workqueue, &qdev->mpi_work, 0);
c4e84bde
RM
2010 work_done++;
2011 }
2012
2013 /*
39aa8165
RM
2014 * Get the bit-mask that shows the active queues for this
2015 * pass. Compare it to the queues that this irq services
2016 * and call napi if there's a match.
c4e84bde 2017 */
39aa8165
RM
2018 var = ql_read32(qdev, ISR1);
2019 if (var & intr_context->irq_mask) {
c4e84bde 2020 QPRINTK(qdev, INTR, INFO,
39aa8165
RM
2021 "Waking handler for rx_ring[0].\n");
2022 ql_disable_completion_interrupt(qdev, intr_context->intr);
288379f0 2023 napi_schedule(&rx_ring->napi);
c4e84bde
RM
2024 work_done++;
2025 }
bb0d215c 2026 ql_enable_completion_interrupt(qdev, intr_context->intr);
c4e84bde
RM
2027 return work_done ? IRQ_HANDLED : IRQ_NONE;
2028}
2029
2030static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2031{
2032
2033 if (skb_is_gso(skb)) {
2034 int err;
2035 if (skb_header_cloned(skb)) {
2036 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2037 if (err)
2038 return err;
2039 }
2040
2041 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2042 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2043 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2044 mac_iocb_ptr->total_hdrs_len =
2045 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2046 mac_iocb_ptr->net_trans_offset =
2047 cpu_to_le16(skb_network_offset(skb) |
2048 skb_transport_offset(skb)
2049 << OB_MAC_TRANSPORT_HDR_SHIFT);
2050 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2051 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2052 if (likely(skb->protocol == htons(ETH_P_IP))) {
2053 struct iphdr *iph = ip_hdr(skb);
2054 iph->check = 0;
2055 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2056 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2057 iph->daddr, 0,
2058 IPPROTO_TCP,
2059 0);
2060 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2061 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2062 tcp_hdr(skb)->check =
2063 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2064 &ipv6_hdr(skb)->daddr,
2065 0, IPPROTO_TCP, 0);
2066 }
2067 return 1;
2068 }
2069 return 0;
2070}
2071
2072static void ql_hw_csum_setup(struct sk_buff *skb,
2073 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2074{
2075 int len;
2076 struct iphdr *iph = ip_hdr(skb);
fd2df4f7 2077 __sum16 *check;
c4e84bde
RM
2078 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2079 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2080 mac_iocb_ptr->net_trans_offset =
2081 cpu_to_le16(skb_network_offset(skb) |
2082 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2083
2084 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2085 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2086 if (likely(iph->protocol == IPPROTO_TCP)) {
2087 check = &(tcp_hdr(skb)->check);
2088 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2089 mac_iocb_ptr->total_hdrs_len =
2090 cpu_to_le16(skb_transport_offset(skb) +
2091 (tcp_hdr(skb)->doff << 2));
2092 } else {
2093 check = &(udp_hdr(skb)->check);
2094 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2095 mac_iocb_ptr->total_hdrs_len =
2096 cpu_to_le16(skb_transport_offset(skb) +
2097 sizeof(struct udphdr));
2098 }
2099 *check = ~csum_tcpudp_magic(iph->saddr,
2100 iph->daddr, len, iph->protocol, 0);
2101}
2102
61357325 2103static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
c4e84bde
RM
2104{
2105 struct tx_ring_desc *tx_ring_desc;
2106 struct ob_mac_iocb_req *mac_iocb_ptr;
2107 struct ql_adapter *qdev = netdev_priv(ndev);
2108 int tso;
2109 struct tx_ring *tx_ring;
1e213303 2110 u32 tx_ring_idx = (u32) skb->queue_mapping;
c4e84bde
RM
2111
2112 tx_ring = &qdev->tx_ring[tx_ring_idx];
2113
74c50b4b
RM
2114 if (skb_padto(skb, ETH_ZLEN))
2115 return NETDEV_TX_OK;
2116
c4e84bde
RM
2117 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2118 QPRINTK(qdev, TX_QUEUED, INFO,
2119 "%s: shutting down tx queue %d du to lack of resources.\n",
2120 __func__, tx_ring_idx);
1e213303 2121 netif_stop_subqueue(ndev, tx_ring->wq_id);
c4e84bde
RM
2122 atomic_inc(&tx_ring->queue_stopped);
2123 return NETDEV_TX_BUSY;
2124 }
2125 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2126 mac_iocb_ptr = tx_ring_desc->queue_entry;
e332471c 2127 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
c4e84bde
RM
2128
2129 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2130 mac_iocb_ptr->tid = tx_ring_desc->index;
2131 /* We use the upper 32-bits to store the tx queue for this IO.
2132 * When we get the completion we can use it to establish the context.
2133 */
2134 mac_iocb_ptr->txq_idx = tx_ring_idx;
2135 tx_ring_desc->skb = skb;
2136
2137 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2138
2139 if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
2140 QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
2141 vlan_tx_tag_get(skb));
2142 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2143 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2144 }
2145 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2146 if (tso < 0) {
2147 dev_kfree_skb_any(skb);
2148 return NETDEV_TX_OK;
2149 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2150 ql_hw_csum_setup(skb,
2151 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2152 }
0d979f74
RM
2153 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2154 NETDEV_TX_OK) {
2155 QPRINTK(qdev, TX_QUEUED, ERR,
2156 "Could not map the segments.\n");
2157 return NETDEV_TX_BUSY;
2158 }
c4e84bde
RM
2159 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2160 tx_ring->prod_idx++;
2161 if (tx_ring->prod_idx == tx_ring->wq_len)
2162 tx_ring->prod_idx = 0;
2163 wmb();
2164
2165 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
c4e84bde
RM
2166 QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
2167 tx_ring->prod_idx, skb->len);
2168
2169 atomic_dec(&tx_ring->tx_count);
2170 return NETDEV_TX_OK;
2171}
2172
2173static void ql_free_shadow_space(struct ql_adapter *qdev)
2174{
2175 if (qdev->rx_ring_shadow_reg_area) {
2176 pci_free_consistent(qdev->pdev,
2177 PAGE_SIZE,
2178 qdev->rx_ring_shadow_reg_area,
2179 qdev->rx_ring_shadow_reg_dma);
2180 qdev->rx_ring_shadow_reg_area = NULL;
2181 }
2182 if (qdev->tx_ring_shadow_reg_area) {
2183 pci_free_consistent(qdev->pdev,
2184 PAGE_SIZE,
2185 qdev->tx_ring_shadow_reg_area,
2186 qdev->tx_ring_shadow_reg_dma);
2187 qdev->tx_ring_shadow_reg_area = NULL;
2188 }
2189}
2190
2191static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2192{
2193 qdev->rx_ring_shadow_reg_area =
2194 pci_alloc_consistent(qdev->pdev,
2195 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2196 if (qdev->rx_ring_shadow_reg_area == NULL) {
2197 QPRINTK(qdev, IFUP, ERR,
2198 "Allocation of RX shadow space failed.\n");
2199 return -ENOMEM;
2200 }
b25215d0 2201 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
c4e84bde
RM
2202 qdev->tx_ring_shadow_reg_area =
2203 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2204 &qdev->tx_ring_shadow_reg_dma);
2205 if (qdev->tx_ring_shadow_reg_area == NULL) {
2206 QPRINTK(qdev, IFUP, ERR,
2207 "Allocation of TX shadow space failed.\n");
2208 goto err_wqp_sh_area;
2209 }
b25215d0 2210 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
c4e84bde
RM
2211 return 0;
2212
2213err_wqp_sh_area:
2214 pci_free_consistent(qdev->pdev,
2215 PAGE_SIZE,
2216 qdev->rx_ring_shadow_reg_area,
2217 qdev->rx_ring_shadow_reg_dma);
2218 return -ENOMEM;
2219}
2220
2221static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2222{
2223 struct tx_ring_desc *tx_ring_desc;
2224 int i;
2225 struct ob_mac_iocb_req *mac_iocb_ptr;
2226
2227 mac_iocb_ptr = tx_ring->wq_base;
2228 tx_ring_desc = tx_ring->q;
2229 for (i = 0; i < tx_ring->wq_len; i++) {
2230 tx_ring_desc->index = i;
2231 tx_ring_desc->skb = NULL;
2232 tx_ring_desc->queue_entry = mac_iocb_ptr;
2233 mac_iocb_ptr++;
2234 tx_ring_desc++;
2235 }
2236 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2237 atomic_set(&tx_ring->queue_stopped, 0);
2238}
2239
2240static void ql_free_tx_resources(struct ql_adapter *qdev,
2241 struct tx_ring *tx_ring)
2242{
2243 if (tx_ring->wq_base) {
2244 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2245 tx_ring->wq_base, tx_ring->wq_base_dma);
2246 tx_ring->wq_base = NULL;
2247 }
2248 kfree(tx_ring->q);
2249 tx_ring->q = NULL;
2250}
2251
2252static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2253 struct tx_ring *tx_ring)
2254{
2255 tx_ring->wq_base =
2256 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2257 &tx_ring->wq_base_dma);
2258
2259 if ((tx_ring->wq_base == NULL)
88c55e3c 2260 || tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
c4e84bde
RM
2261 QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
2262 return -ENOMEM;
2263 }
2264 tx_ring->q =
2265 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2266 if (tx_ring->q == NULL)
2267 goto err;
2268
2269 return 0;
2270err:
2271 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2272 tx_ring->wq_base, tx_ring->wq_base_dma);
2273 return -ENOMEM;
2274}
2275
8668ae92 2276static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
c4e84bde
RM
2277{
2278 int i;
2279 struct bq_desc *lbq_desc;
2280
2281 for (i = 0; i < rx_ring->lbq_len; i++) {
2282 lbq_desc = &rx_ring->lbq[i];
2283 if (lbq_desc->p.lbq_page) {
2284 pci_unmap_page(qdev->pdev,
2285 pci_unmap_addr(lbq_desc, mapaddr),
2286 pci_unmap_len(lbq_desc, maplen),
2287 PCI_DMA_FROMDEVICE);
2288
2289 put_page(lbq_desc->p.lbq_page);
2290 lbq_desc->p.lbq_page = NULL;
2291 }
c4e84bde
RM
2292 }
2293}
2294
8668ae92 2295static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
c4e84bde
RM
2296{
2297 int i;
2298 struct bq_desc *sbq_desc;
2299
2300 for (i = 0; i < rx_ring->sbq_len; i++) {
2301 sbq_desc = &rx_ring->sbq[i];
2302 if (sbq_desc == NULL) {
2303 QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
2304 return;
2305 }
2306 if (sbq_desc->p.skb) {
2307 pci_unmap_single(qdev->pdev,
2308 pci_unmap_addr(sbq_desc, mapaddr),
2309 pci_unmap_len(sbq_desc, maplen),
2310 PCI_DMA_FROMDEVICE);
2311 dev_kfree_skb(sbq_desc->p.skb);
2312 sbq_desc->p.skb = NULL;
2313 }
c4e84bde
RM
2314 }
2315}
2316
4545a3f2
RM
2317/* Free all large and small rx buffers associated
2318 * with the completion queues for this device.
2319 */
2320static void ql_free_rx_buffers(struct ql_adapter *qdev)
2321{
2322 int i;
2323 struct rx_ring *rx_ring;
2324
2325 for (i = 0; i < qdev->rx_ring_count; i++) {
2326 rx_ring = &qdev->rx_ring[i];
2327 if (rx_ring->lbq)
2328 ql_free_lbq_buffers(qdev, rx_ring);
2329 if (rx_ring->sbq)
2330 ql_free_sbq_buffers(qdev, rx_ring);
2331 }
2332}
2333
2334static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2335{
2336 struct rx_ring *rx_ring;
2337 int i;
2338
2339 for (i = 0; i < qdev->rx_ring_count; i++) {
2340 rx_ring = &qdev->rx_ring[i];
2341 if (rx_ring->type != TX_Q)
2342 ql_update_buffer_queues(qdev, rx_ring);
2343 }
2344}
2345
2346static void ql_init_lbq_ring(struct ql_adapter *qdev,
2347 struct rx_ring *rx_ring)
2348{
2349 int i;
2350 struct bq_desc *lbq_desc;
2351 __le64 *bq = rx_ring->lbq_base;
2352
2353 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2354 for (i = 0; i < rx_ring->lbq_len; i++) {
2355 lbq_desc = &rx_ring->lbq[i];
2356 memset(lbq_desc, 0, sizeof(*lbq_desc));
2357 lbq_desc->index = i;
2358 lbq_desc->addr = bq;
2359 bq++;
2360 }
2361}
2362
2363static void ql_init_sbq_ring(struct ql_adapter *qdev,
c4e84bde
RM
2364 struct rx_ring *rx_ring)
2365{
2366 int i;
2367 struct bq_desc *sbq_desc;
2c9a0d41 2368 __le64 *bq = rx_ring->sbq_base;
c4e84bde 2369
4545a3f2 2370 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
c4e84bde
RM
2371 for (i = 0; i < rx_ring->sbq_len; i++) {
2372 sbq_desc = &rx_ring->sbq[i];
4545a3f2 2373 memset(sbq_desc, 0, sizeof(*sbq_desc));
c4e84bde 2374 sbq_desc->index = i;
2c9a0d41 2375 sbq_desc->addr = bq;
c4e84bde
RM
2376 bq++;
2377 }
c4e84bde
RM
2378}
2379
2380static void ql_free_rx_resources(struct ql_adapter *qdev,
2381 struct rx_ring *rx_ring)
2382{
c4e84bde
RM
2383 /* Free the small buffer queue. */
2384 if (rx_ring->sbq_base) {
2385 pci_free_consistent(qdev->pdev,
2386 rx_ring->sbq_size,
2387 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2388 rx_ring->sbq_base = NULL;
2389 }
2390
2391 /* Free the small buffer queue control blocks. */
2392 kfree(rx_ring->sbq);
2393 rx_ring->sbq = NULL;
2394
2395 /* Free the large buffer queue. */
2396 if (rx_ring->lbq_base) {
2397 pci_free_consistent(qdev->pdev,
2398 rx_ring->lbq_size,
2399 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2400 rx_ring->lbq_base = NULL;
2401 }
2402
2403 /* Free the large buffer queue control blocks. */
2404 kfree(rx_ring->lbq);
2405 rx_ring->lbq = NULL;
2406
2407 /* Free the rx queue. */
2408 if (rx_ring->cq_base) {
2409 pci_free_consistent(qdev->pdev,
2410 rx_ring->cq_size,
2411 rx_ring->cq_base, rx_ring->cq_base_dma);
2412 rx_ring->cq_base = NULL;
2413 }
2414}
2415
2416/* Allocate queues and buffers for this completions queue based
2417 * on the values in the parameter structure. */
2418static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2419 struct rx_ring *rx_ring)
2420{
2421
2422 /*
2423 * Allocate the completion queue for this rx_ring.
2424 */
2425 rx_ring->cq_base =
2426 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2427 &rx_ring->cq_base_dma);
2428
2429 if (rx_ring->cq_base == NULL) {
2430 QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
2431 return -ENOMEM;
2432 }
2433
2434 if (rx_ring->sbq_len) {
2435 /*
2436 * Allocate small buffer queue.
2437 */
2438 rx_ring->sbq_base =
2439 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2440 &rx_ring->sbq_base_dma);
2441
2442 if (rx_ring->sbq_base == NULL) {
2443 QPRINTK(qdev, IFUP, ERR,
2444 "Small buffer queue allocation failed.\n");
2445 goto err_mem;
2446 }
2447
2448 /*
2449 * Allocate small buffer queue control blocks.
2450 */
2451 rx_ring->sbq =
2452 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2453 GFP_KERNEL);
2454 if (rx_ring->sbq == NULL) {
2455 QPRINTK(qdev, IFUP, ERR,
2456 "Small buffer queue control block allocation failed.\n");
2457 goto err_mem;
2458 }
2459
4545a3f2 2460 ql_init_sbq_ring(qdev, rx_ring);
c4e84bde
RM
2461 }
2462
2463 if (rx_ring->lbq_len) {
2464 /*
2465 * Allocate large buffer queue.
2466 */
2467 rx_ring->lbq_base =
2468 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2469 &rx_ring->lbq_base_dma);
2470
2471 if (rx_ring->lbq_base == NULL) {
2472 QPRINTK(qdev, IFUP, ERR,
2473 "Large buffer queue allocation failed.\n");
2474 goto err_mem;
2475 }
2476 /*
2477 * Allocate large buffer queue control blocks.
2478 */
2479 rx_ring->lbq =
2480 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2481 GFP_KERNEL);
2482 if (rx_ring->lbq == NULL) {
2483 QPRINTK(qdev, IFUP, ERR,
2484 "Large buffer queue control block allocation failed.\n");
2485 goto err_mem;
2486 }
2487
4545a3f2 2488 ql_init_lbq_ring(qdev, rx_ring);
c4e84bde
RM
2489 }
2490
2491 return 0;
2492
2493err_mem:
2494 ql_free_rx_resources(qdev, rx_ring);
2495 return -ENOMEM;
2496}
2497
2498static void ql_tx_ring_clean(struct ql_adapter *qdev)
2499{
2500 struct tx_ring *tx_ring;
2501 struct tx_ring_desc *tx_ring_desc;
2502 int i, j;
2503
2504 /*
2505 * Loop through all queues and free
2506 * any resources.
2507 */
2508 for (j = 0; j < qdev->tx_ring_count; j++) {
2509 tx_ring = &qdev->tx_ring[j];
2510 for (i = 0; i < tx_ring->wq_len; i++) {
2511 tx_ring_desc = &tx_ring->q[i];
2512 if (tx_ring_desc && tx_ring_desc->skb) {
2513 QPRINTK(qdev, IFDOWN, ERR,
2514 "Freeing lost SKB %p, from queue %d, index %d.\n",
2515 tx_ring_desc->skb, j,
2516 tx_ring_desc->index);
2517 ql_unmap_send(qdev, tx_ring_desc,
2518 tx_ring_desc->map_cnt);
2519 dev_kfree_skb(tx_ring_desc->skb);
2520 tx_ring_desc->skb = NULL;
2521 }
2522 }
2523 }
2524}
2525
c4e84bde
RM
2526static void ql_free_mem_resources(struct ql_adapter *qdev)
2527{
2528 int i;
2529
2530 for (i = 0; i < qdev->tx_ring_count; i++)
2531 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2532 for (i = 0; i < qdev->rx_ring_count; i++)
2533 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2534 ql_free_shadow_space(qdev);
2535}
2536
2537static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2538{
2539 int i;
2540
2541 /* Allocate space for our shadow registers and such. */
2542 if (ql_alloc_shadow_space(qdev))
2543 return -ENOMEM;
2544
2545 for (i = 0; i < qdev->rx_ring_count; i++) {
2546 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2547 QPRINTK(qdev, IFUP, ERR,
2548 "RX resource allocation failed.\n");
2549 goto err_mem;
2550 }
2551 }
2552 /* Allocate tx queue resources */
2553 for (i = 0; i < qdev->tx_ring_count; i++) {
2554 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2555 QPRINTK(qdev, IFUP, ERR,
2556 "TX resource allocation failed.\n");
2557 goto err_mem;
2558 }
2559 }
2560 return 0;
2561
2562err_mem:
2563 ql_free_mem_resources(qdev);
2564 return -ENOMEM;
2565}
2566
2567/* Set up the rx ring control block and pass it to the chip.
2568 * The control block is defined as
2569 * "Completion Queue Initialization Control Block", or cqicb.
2570 */
2571static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2572{
2573 struct cqicb *cqicb = &rx_ring->cqicb;
2574 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
b8facca0 2575 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
c4e84bde 2576 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
b8facca0 2577 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
c4e84bde
RM
2578 void __iomem *doorbell_area =
2579 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2580 int err = 0;
2581 u16 bq_len;
d4a4aba6 2582 u64 tmp;
b8facca0
RM
2583 __le64 *base_indirect_ptr;
2584 int page_entries;
c4e84bde
RM
2585
2586 /* Set up the shadow registers for this ring. */
2587 rx_ring->prod_idx_sh_reg = shadow_reg;
2588 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2589 shadow_reg += sizeof(u64);
2590 shadow_reg_dma += sizeof(u64);
2591 rx_ring->lbq_base_indirect = shadow_reg;
2592 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
b8facca0
RM
2593 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
2594 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
c4e84bde
RM
2595 rx_ring->sbq_base_indirect = shadow_reg;
2596 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
2597
2598 /* PCI doorbell mem area + 0x00 for consumer index register */
8668ae92 2599 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
c4e84bde
RM
2600 rx_ring->cnsmr_idx = 0;
2601 rx_ring->curr_entry = rx_ring->cq_base;
2602
2603 /* PCI doorbell mem area + 0x04 for valid register */
2604 rx_ring->valid_db_reg = doorbell_area + 0x04;
2605
2606 /* PCI doorbell mem area + 0x18 for large buffer consumer */
8668ae92 2607 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
c4e84bde
RM
2608
2609 /* PCI doorbell mem area + 0x1c */
8668ae92 2610 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
c4e84bde
RM
2611
2612 memset((void *)cqicb, 0, sizeof(struct cqicb));
2613 cqicb->msix_vect = rx_ring->irq;
2614
459caf5a
RM
2615 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
2616 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
c4e84bde 2617
97345524 2618 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
c4e84bde 2619
97345524 2620 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
c4e84bde
RM
2621
2622 /*
2623 * Set up the control block load flags.
2624 */
2625 cqicb->flags = FLAGS_LC | /* Load queue base address */
2626 FLAGS_LV | /* Load MSI-X vector */
2627 FLAGS_LI; /* Load irq delay values */
2628 if (rx_ring->lbq_len) {
2629 cqicb->flags |= FLAGS_LL; /* Load lbq values */
a419aef8 2630 tmp = (u64)rx_ring->lbq_base_dma;
b8facca0
RM
2631 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
2632 page_entries = 0;
2633 do {
2634 *base_indirect_ptr = cpu_to_le64(tmp);
2635 tmp += DB_PAGE_SIZE;
2636 base_indirect_ptr++;
2637 page_entries++;
2638 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
97345524
RM
2639 cqicb->lbq_addr =
2640 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
459caf5a
RM
2641 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
2642 (u16) rx_ring->lbq_buf_size;
2643 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
2644 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
2645 (u16) rx_ring->lbq_len;
c4e84bde 2646 cqicb->lbq_len = cpu_to_le16(bq_len);
4545a3f2 2647 rx_ring->lbq_prod_idx = 0;
c4e84bde 2648 rx_ring->lbq_curr_idx = 0;
4545a3f2
RM
2649 rx_ring->lbq_clean_idx = 0;
2650 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
c4e84bde
RM
2651 }
2652 if (rx_ring->sbq_len) {
2653 cqicb->flags |= FLAGS_LS; /* Load sbq values */
a419aef8 2654 tmp = (u64)rx_ring->sbq_base_dma;
b8facca0
RM
2655 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
2656 page_entries = 0;
2657 do {
2658 *base_indirect_ptr = cpu_to_le64(tmp);
2659 tmp += DB_PAGE_SIZE;
2660 base_indirect_ptr++;
2661 page_entries++;
2662 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
97345524
RM
2663 cqicb->sbq_addr =
2664 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
c4e84bde 2665 cqicb->sbq_buf_size =
d4a4aba6 2666 cpu_to_le16((u16)(rx_ring->sbq_buf_size/2));
459caf5a
RM
2667 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
2668 (u16) rx_ring->sbq_len;
c4e84bde 2669 cqicb->sbq_len = cpu_to_le16(bq_len);
4545a3f2 2670 rx_ring->sbq_prod_idx = 0;
c4e84bde 2671 rx_ring->sbq_curr_idx = 0;
4545a3f2
RM
2672 rx_ring->sbq_clean_idx = 0;
2673 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
c4e84bde
RM
2674 }
2675 switch (rx_ring->type) {
2676 case TX_Q:
c4e84bde
RM
2677 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
2678 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
2679 break;
c4e84bde
RM
2680 case RX_Q:
2681 /* Inbound completion handling rx_rings run in
2682 * separate NAPI contexts.
2683 */
2684 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
2685 64);
2686 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
2687 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
2688 break;
2689 default:
2690 QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
2691 rx_ring->type);
2692 }
4974097a 2693 QPRINTK(qdev, IFUP, DEBUG, "Initializing rx work queue.\n");
c4e84bde
RM
2694 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
2695 CFG_LCQ, rx_ring->cq_id);
2696 if (err) {
2697 QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
2698 return err;
2699 }
c4e84bde
RM
2700 return err;
2701}
2702
2703static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2704{
2705 struct wqicb *wqicb = (struct wqicb *)tx_ring;
2706 void __iomem *doorbell_area =
2707 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
2708 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
2709 (tx_ring->wq_id * sizeof(u64));
2710 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
2711 (tx_ring->wq_id * sizeof(u64));
2712 int err = 0;
2713
2714 /*
2715 * Assign doorbell registers for this tx_ring.
2716 */
2717 /* TX PCI doorbell mem area for tx producer index */
8668ae92 2718 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
c4e84bde
RM
2719 tx_ring->prod_idx = 0;
2720 /* TX PCI doorbell mem area + 0x04 */
2721 tx_ring->valid_db_reg = doorbell_area + 0x04;
2722
2723 /*
2724 * Assign shadow registers for this tx_ring.
2725 */
2726 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
2727 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
2728
2729 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
2730 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
2731 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
2732 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
2733 wqicb->rid = 0;
97345524 2734 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
c4e84bde 2735
97345524 2736 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
c4e84bde
RM
2737
2738 ql_init_tx_ring(qdev, tx_ring);
2739
e332471c 2740 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
c4e84bde
RM
2741 (u16) tx_ring->wq_id);
2742 if (err) {
2743 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
2744 return err;
2745 }
4974097a 2746 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded WQICB.\n");
c4e84bde
RM
2747 return err;
2748}
2749
2750static void ql_disable_msix(struct ql_adapter *qdev)
2751{
2752 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2753 pci_disable_msix(qdev->pdev);
2754 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
2755 kfree(qdev->msi_x_entry);
2756 qdev->msi_x_entry = NULL;
2757 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
2758 pci_disable_msi(qdev->pdev);
2759 clear_bit(QL_MSI_ENABLED, &qdev->flags);
2760 }
2761}
2762
a4ab6137
RM
2763/* We start by trying to get the number of vectors
2764 * stored in qdev->intr_count. If we don't get that
2765 * many then we reduce the count and try again.
2766 */
c4e84bde
RM
2767static void ql_enable_msix(struct ql_adapter *qdev)
2768{
a4ab6137 2769 int i, err;
c4e84bde 2770
c4e84bde
RM
2771 /* Get the MSIX vectors. */
2772 if (irq_type == MSIX_IRQ) {
2773 /* Try to alloc space for the msix struct,
2774 * if it fails then go to MSI/legacy.
2775 */
a4ab6137 2776 qdev->msi_x_entry = kcalloc(qdev->intr_count,
c4e84bde
RM
2777 sizeof(struct msix_entry),
2778 GFP_KERNEL);
2779 if (!qdev->msi_x_entry) {
2780 irq_type = MSI_IRQ;
2781 goto msi;
2782 }
2783
a4ab6137 2784 for (i = 0; i < qdev->intr_count; i++)
c4e84bde
RM
2785 qdev->msi_x_entry[i].entry = i;
2786
a4ab6137
RM
2787 /* Loop to get our vectors. We start with
2788 * what we want and settle for what we get.
2789 */
2790 do {
2791 err = pci_enable_msix(qdev->pdev,
2792 qdev->msi_x_entry, qdev->intr_count);
2793 if (err > 0)
2794 qdev->intr_count = err;
2795 } while (err > 0);
2796
2797 if (err < 0) {
c4e84bde
RM
2798 kfree(qdev->msi_x_entry);
2799 qdev->msi_x_entry = NULL;
2800 QPRINTK(qdev, IFUP, WARNING,
2801 "MSI-X Enable failed, trying MSI.\n");
a4ab6137 2802 qdev->intr_count = 1;
c4e84bde 2803 irq_type = MSI_IRQ;
a4ab6137
RM
2804 } else if (err == 0) {
2805 set_bit(QL_MSIX_ENABLED, &qdev->flags);
2806 QPRINTK(qdev, IFUP, INFO,
2807 "MSI-X Enabled, got %d vectors.\n",
2808 qdev->intr_count);
2809 return;
c4e84bde
RM
2810 }
2811 }
2812msi:
a4ab6137 2813 qdev->intr_count = 1;
c4e84bde
RM
2814 if (irq_type == MSI_IRQ) {
2815 if (!pci_enable_msi(qdev->pdev)) {
2816 set_bit(QL_MSI_ENABLED, &qdev->flags);
2817 QPRINTK(qdev, IFUP, INFO,
2818 "Running with MSI interrupts.\n");
2819 return;
2820 }
2821 }
2822 irq_type = LEG_IRQ;
c4e84bde
RM
2823 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
2824}
2825
39aa8165
RM
2826/* Each vector services 1 RSS ring and and 1 or more
2827 * TX completion rings. This function loops through
2828 * the TX completion rings and assigns the vector that
2829 * will service it. An example would be if there are
2830 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
2831 * This would mean that vector 0 would service RSS ring 0
2832 * and TX competion rings 0,1,2 and 3. Vector 1 would
2833 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
2834 */
2835static void ql_set_tx_vect(struct ql_adapter *qdev)
2836{
2837 int i, j, vect;
2838 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
2839
2840 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2841 /* Assign irq vectors to TX rx_rings.*/
2842 for (vect = 0, j = 0, i = qdev->rss_ring_count;
2843 i < qdev->rx_ring_count; i++) {
2844 if (j == tx_rings_per_vector) {
2845 vect++;
2846 j = 0;
2847 }
2848 qdev->rx_ring[i].irq = vect;
2849 j++;
2850 }
2851 } else {
2852 /* For single vector all rings have an irq
2853 * of zero.
2854 */
2855 for (i = 0; i < qdev->rx_ring_count; i++)
2856 qdev->rx_ring[i].irq = 0;
2857 }
2858}
2859
2860/* Set the interrupt mask for this vector. Each vector
2861 * will service 1 RSS ring and 1 or more TX completion
2862 * rings. This function sets up a bit mask per vector
2863 * that indicates which rings it services.
2864 */
2865static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
2866{
2867 int j, vect = ctx->intr;
2868 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
2869
2870 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2871 /* Add the RSS ring serviced by this vector
2872 * to the mask.
2873 */
2874 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
2875 /* Add the TX ring(s) serviced by this vector
2876 * to the mask. */
2877 for (j = 0; j < tx_rings_per_vector; j++) {
2878 ctx->irq_mask |=
2879 (1 << qdev->rx_ring[qdev->rss_ring_count +
2880 (vect * tx_rings_per_vector) + j].cq_id);
2881 }
2882 } else {
2883 /* For single vector we just shift each queue's
2884 * ID into the mask.
2885 */
2886 for (j = 0; j < qdev->rx_ring_count; j++)
2887 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
2888 }
2889}
2890
c4e84bde
RM
2891/*
2892 * Here we build the intr_context structures based on
2893 * our rx_ring count and intr vector count.
2894 * The intr_context structure is used to hook each vector
2895 * to possibly different handlers.
2896 */
2897static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
2898{
2899 int i = 0;
2900 struct intr_context *intr_context = &qdev->intr_context[0];
2901
c4e84bde
RM
2902 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2903 /* Each rx_ring has it's
2904 * own intr_context since we have separate
2905 * vectors for each queue.
c4e84bde
RM
2906 */
2907 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2908 qdev->rx_ring[i].irq = i;
2909 intr_context->intr = i;
2910 intr_context->qdev = qdev;
39aa8165
RM
2911 /* Set up this vector's bit-mask that indicates
2912 * which queues it services.
2913 */
2914 ql_set_irq_mask(qdev, intr_context);
c4e84bde
RM
2915 /*
2916 * We set up each vectors enable/disable/read bits so
2917 * there's no bit/mask calculations in the critical path.
2918 */
2919 intr_context->intr_en_mask =
2920 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2921 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
2922 | i;
2923 intr_context->intr_dis_mask =
2924 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2925 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
2926 INTR_EN_IHD | i;
2927 intr_context->intr_read_mask =
2928 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2929 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
2930 i;
39aa8165
RM
2931 if (i == 0) {
2932 /* The first vector/queue handles
2933 * broadcast/multicast, fatal errors,
2934 * and firmware events. This in addition
2935 * to normal inbound NAPI processing.
c4e84bde 2936 */
39aa8165 2937 intr_context->handler = qlge_isr;
b2014ff8
RM
2938 sprintf(intr_context->name, "%s-rx-%d",
2939 qdev->ndev->name, i);
2940 } else {
c4e84bde 2941 /*
39aa8165 2942 * Inbound queues handle unicast frames only.
c4e84bde 2943 */
39aa8165
RM
2944 intr_context->handler = qlge_msix_rx_isr;
2945 sprintf(intr_context->name, "%s-rx-%d",
c4e84bde 2946 qdev->ndev->name, i);
c4e84bde
RM
2947 }
2948 }
2949 } else {
2950 /*
2951 * All rx_rings use the same intr_context since
2952 * there is only one vector.
2953 */
2954 intr_context->intr = 0;
2955 intr_context->qdev = qdev;
2956 /*
2957 * We set up each vectors enable/disable/read bits so
2958 * there's no bit/mask calculations in the critical path.
2959 */
2960 intr_context->intr_en_mask =
2961 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
2962 intr_context->intr_dis_mask =
2963 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2964 INTR_EN_TYPE_DISABLE;
2965 intr_context->intr_read_mask =
2966 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
2967 /*
2968 * Single interrupt means one handler for all rings.
2969 */
2970 intr_context->handler = qlge_isr;
2971 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
39aa8165
RM
2972 /* Set up this vector's bit-mask that indicates
2973 * which queues it services. In this case there is
2974 * a single vector so it will service all RSS and
2975 * TX completion rings.
2976 */
2977 ql_set_irq_mask(qdev, intr_context);
c4e84bde 2978 }
39aa8165
RM
2979 /* Tell the TX completion rings which MSIx vector
2980 * they will be using.
2981 */
2982 ql_set_tx_vect(qdev);
c4e84bde
RM
2983}
2984
2985static void ql_free_irq(struct ql_adapter *qdev)
2986{
2987 int i;
2988 struct intr_context *intr_context = &qdev->intr_context[0];
2989
2990 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2991 if (intr_context->hooked) {
2992 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2993 free_irq(qdev->msi_x_entry[i].vector,
2994 &qdev->rx_ring[i]);
4974097a 2995 QPRINTK(qdev, IFDOWN, DEBUG,
c4e84bde
RM
2996 "freeing msix interrupt %d.\n", i);
2997 } else {
2998 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
4974097a 2999 QPRINTK(qdev, IFDOWN, DEBUG,
c4e84bde
RM
3000 "freeing msi interrupt %d.\n", i);
3001 }
3002 }
3003 }
3004 ql_disable_msix(qdev);
3005}
3006
3007static int ql_request_irq(struct ql_adapter *qdev)
3008{
3009 int i;
3010 int status = 0;
3011 struct pci_dev *pdev = qdev->pdev;
3012 struct intr_context *intr_context = &qdev->intr_context[0];
3013
3014 ql_resolve_queues_to_irqs(qdev);
3015
3016 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3017 atomic_set(&intr_context->irq_cnt, 0);
3018 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3019 status = request_irq(qdev->msi_x_entry[i].vector,
3020 intr_context->handler,
3021 0,
3022 intr_context->name,
3023 &qdev->rx_ring[i]);
3024 if (status) {
3025 QPRINTK(qdev, IFUP, ERR,
3026 "Failed request for MSIX interrupt %d.\n",
3027 i);
3028 goto err_irq;
3029 } else {
4974097a 3030 QPRINTK(qdev, IFUP, DEBUG,
c4e84bde
RM
3031 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3032 i,
3033 qdev->rx_ring[i].type ==
3034 DEFAULT_Q ? "DEFAULT_Q" : "",
3035 qdev->rx_ring[i].type ==
3036 TX_Q ? "TX_Q" : "",
3037 qdev->rx_ring[i].type ==
3038 RX_Q ? "RX_Q" : "", intr_context->name);
3039 }
3040 } else {
3041 QPRINTK(qdev, IFUP, DEBUG,
3042 "trying msi or legacy interrupts.\n");
3043 QPRINTK(qdev, IFUP, DEBUG,
3044 "%s: irq = %d.\n", __func__, pdev->irq);
3045 QPRINTK(qdev, IFUP, DEBUG,
3046 "%s: context->name = %s.\n", __func__,
3047 intr_context->name);
3048 QPRINTK(qdev, IFUP, DEBUG,
3049 "%s: dev_id = 0x%p.\n", __func__,
3050 &qdev->rx_ring[0]);
3051 status =
3052 request_irq(pdev->irq, qlge_isr,
3053 test_bit(QL_MSI_ENABLED,
3054 &qdev->
3055 flags) ? 0 : IRQF_SHARED,
3056 intr_context->name, &qdev->rx_ring[0]);
3057 if (status)
3058 goto err_irq;
3059
3060 QPRINTK(qdev, IFUP, ERR,
3061 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3062 i,
3063 qdev->rx_ring[0].type ==
3064 DEFAULT_Q ? "DEFAULT_Q" : "",
3065 qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
3066 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3067 intr_context->name);
3068 }
3069 intr_context->hooked = 1;
3070 }
3071 return status;
3072err_irq:
3073 QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
3074 ql_free_irq(qdev);
3075 return status;
3076}
3077
3078static int ql_start_rss(struct ql_adapter *qdev)
3079{
541ae28c
RM
3080 u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3081 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
3082 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
3083 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
3084 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
3085 0xbe, 0xac, 0x01, 0xfa};
c4e84bde
RM
3086 struct ricb *ricb = &qdev->ricb;
3087 int status = 0;
3088 int i;
3089 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3090
e332471c 3091 memset((void *)ricb, 0, sizeof(*ricb));
c4e84bde 3092
b2014ff8 3093 ricb->base_cq = RSS_L4K;
c4e84bde 3094 ricb->flags =
541ae28c
RM
3095 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3096 ricb->mask = cpu_to_le16((u16)(0x3ff));
c4e84bde
RM
3097
3098 /*
3099 * Fill out the Indirection Table.
3100 */
541ae28c
RM
3101 for (i = 0; i < 1024; i++)
3102 hash_id[i] = (i & (qdev->rss_ring_count - 1));
c4e84bde 3103
541ae28c
RM
3104 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3105 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
c4e84bde 3106
4974097a 3107 QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
c4e84bde 3108
e332471c 3109 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
c4e84bde
RM
3110 if (status) {
3111 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
3112 return status;
3113 }
4974097a 3114 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded RICB.\n");
c4e84bde
RM
3115 return status;
3116}
3117
a5f59dc9 3118static int ql_clear_routing_entries(struct ql_adapter *qdev)
c4e84bde 3119{
a5f59dc9 3120 int i, status = 0;
c4e84bde 3121
8587ea35
RM
3122 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3123 if (status)
3124 return status;
c4e84bde
RM
3125 /* Clear all the entries in the routing table. */
3126 for (i = 0; i < 16; i++) {
3127 status = ql_set_routing_reg(qdev, i, 0, 0);
3128 if (status) {
3129 QPRINTK(qdev, IFUP, ERR,
a5f59dc9
RM
3130 "Failed to init routing register for CAM "
3131 "packets.\n");
3132 break;
c4e84bde
RM
3133 }
3134 }
a5f59dc9
RM
3135 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3136 return status;
3137}
3138
3139/* Initialize the frame-to-queue routing. */
3140static int ql_route_initialize(struct ql_adapter *qdev)
3141{
3142 int status = 0;
3143
fd21cf52
RM
3144 /* Clear all the entries in the routing table. */
3145 status = ql_clear_routing_entries(qdev);
a5f59dc9
RM
3146 if (status)
3147 return status;
3148
fd21cf52 3149 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
a5f59dc9 3150 if (status)
fd21cf52 3151 return status;
c4e84bde
RM
3152
3153 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
3154 if (status) {
3155 QPRINTK(qdev, IFUP, ERR,
3156 "Failed to init routing register for error packets.\n");
8587ea35 3157 goto exit;
c4e84bde
RM
3158 }
3159 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3160 if (status) {
3161 QPRINTK(qdev, IFUP, ERR,
3162 "Failed to init routing register for broadcast packets.\n");
8587ea35 3163 goto exit;
c4e84bde
RM
3164 }
3165 /* If we have more than one inbound queue, then turn on RSS in the
3166 * routing block.
3167 */
3168 if (qdev->rss_ring_count > 1) {
3169 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3170 RT_IDX_RSS_MATCH, 1);
3171 if (status) {
3172 QPRINTK(qdev, IFUP, ERR,
3173 "Failed to init routing register for MATCH RSS packets.\n");
8587ea35 3174 goto exit;
c4e84bde
RM
3175 }
3176 }
3177
3178 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3179 RT_IDX_CAM_HIT, 1);
8587ea35 3180 if (status)
c4e84bde
RM
3181 QPRINTK(qdev, IFUP, ERR,
3182 "Failed to init routing register for CAM packets.\n");
8587ea35
RM
3183exit:
3184 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
c4e84bde
RM
3185 return status;
3186}
3187
2ee1e272 3188int ql_cam_route_initialize(struct ql_adapter *qdev)
bb58b5b6 3189{
7fab3bfe 3190 int status, set;
bb58b5b6 3191
7fab3bfe
RM
3192 /* If check if the link is up and use to
3193 * determine if we are setting or clearing
3194 * the MAC address in the CAM.
3195 */
3196 set = ql_read32(qdev, STS);
3197 set &= qdev->port_link_up;
3198 status = ql_set_mac_addr(qdev, set);
bb58b5b6
RM
3199 if (status) {
3200 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
3201 return status;
3202 }
3203
3204 status = ql_route_initialize(qdev);
3205 if (status)
3206 QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
3207
3208 return status;
3209}
3210
c4e84bde
RM
3211static int ql_adapter_initialize(struct ql_adapter *qdev)
3212{
3213 u32 value, mask;
3214 int i;
3215 int status = 0;
3216
3217 /*
3218 * Set up the System register to halt on errors.
3219 */
3220 value = SYS_EFE | SYS_FAE;
3221 mask = value << 16;
3222 ql_write32(qdev, SYS, mask | value);
3223
c9cf0a04
RM
3224 /* Set the default queue, and VLAN behavior. */
3225 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3226 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
c4e84bde
RM
3227 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3228
3229 /* Set the MPI interrupt to enabled. */
3230 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3231
3232 /* Enable the function, set pagesize, enable error checking. */
3233 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3234 FSC_EC | FSC_VM_PAGE_4K | FSC_SH;
3235
3236 /* Set/clear header splitting. */
3237 mask = FSC_VM_PAGESIZE_MASK |
3238 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3239 ql_write32(qdev, FSC, mask | value);
3240
3241 ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
3242 min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE));
3243
3244 /* Start up the rx queues. */
3245 for (i = 0; i < qdev->rx_ring_count; i++) {
3246 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3247 if (status) {
3248 QPRINTK(qdev, IFUP, ERR,
3249 "Failed to start rx ring[%d].\n", i);
3250 return status;
3251 }
3252 }
3253
3254 /* If there is more than one inbound completion queue
3255 * then download a RICB to configure RSS.
3256 */
3257 if (qdev->rss_ring_count > 1) {
3258 status = ql_start_rss(qdev);
3259 if (status) {
3260 QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
3261 return status;
3262 }
3263 }
3264
3265 /* Start up the tx queues. */
3266 for (i = 0; i < qdev->tx_ring_count; i++) {
3267 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3268 if (status) {
3269 QPRINTK(qdev, IFUP, ERR,
3270 "Failed to start tx ring[%d].\n", i);
3271 return status;
3272 }
3273 }
3274
b0c2aadf
RM
3275 /* Initialize the port and set the max framesize. */
3276 status = qdev->nic_ops->port_initialize(qdev);
3277 if (status) {
3278 QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
3279 return status;
3280 }
c4e84bde 3281
bb58b5b6
RM
3282 /* Set up the MAC address and frame routing filter. */
3283 status = ql_cam_route_initialize(qdev);
c4e84bde 3284 if (status) {
bb58b5b6
RM
3285 QPRINTK(qdev, IFUP, ERR,
3286 "Failed to init CAM/Routing tables.\n");
c4e84bde
RM
3287 return status;
3288 }
3289
3290 /* Start NAPI for the RSS queues. */
b2014ff8 3291 for (i = 0; i < qdev->rss_ring_count; i++) {
4974097a 3292 QPRINTK(qdev, IFUP, DEBUG, "Enabling NAPI for rx_ring[%d].\n",
c4e84bde
RM
3293 i);
3294 napi_enable(&qdev->rx_ring[i].napi);
3295 }
3296
3297 return status;
3298}
3299
3300/* Issue soft reset to chip. */
3301static int ql_adapter_reset(struct ql_adapter *qdev)
3302{
3303 u32 value;
c4e84bde 3304 int status = 0;
a5f59dc9 3305 unsigned long end_jiffies;
c4e84bde 3306
a5f59dc9
RM
3307 /* Clear all the entries in the routing table. */
3308 status = ql_clear_routing_entries(qdev);
3309 if (status) {
3310 QPRINTK(qdev, IFUP, ERR, "Failed to clear routing bits.\n");
3311 return status;
3312 }
3313
3314 end_jiffies = jiffies +
3315 max((unsigned long)1, usecs_to_jiffies(30));
c4e84bde 3316 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
a75ee7f1 3317
c4e84bde
RM
3318 do {
3319 value = ql_read32(qdev, RST_FO);
3320 if ((value & RST_FO_FR) == 0)
3321 break;
a75ee7f1
RM
3322 cpu_relax();
3323 } while (time_before(jiffies, end_jiffies));
c4e84bde 3324
c4e84bde 3325 if (value & RST_FO_FR) {
c4e84bde 3326 QPRINTK(qdev, IFDOWN, ERR,
3ac49a1c 3327 "ETIMEDOUT!!! errored out of resetting the chip!\n");
a75ee7f1 3328 status = -ETIMEDOUT;
c4e84bde
RM
3329 }
3330
3331 return status;
3332}
3333
3334static void ql_display_dev_info(struct net_device *ndev)
3335{
3336 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3337
3338 QPRINTK(qdev, PROBE, INFO,
e4552f51 3339 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
c4e84bde
RM
3340 "XG Roll = %d, XG Rev = %d.\n",
3341 qdev->func,
e4552f51 3342 qdev->port,
c4e84bde
RM
3343 qdev->chip_rev_id & 0x0000000f,
3344 qdev->chip_rev_id >> 4 & 0x0000000f,
3345 qdev->chip_rev_id >> 8 & 0x0000000f,
3346 qdev->chip_rev_id >> 12 & 0x0000000f);
7c510e4b 3347 QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
c4e84bde
RM
3348}
3349
3350static int ql_adapter_down(struct ql_adapter *qdev)
3351{
c4e84bde 3352 int i, status = 0;
c4e84bde 3353
6a473308 3354 ql_link_off(qdev);
c4e84bde 3355
6497b607
RM
3356 /* Don't kill the reset worker thread if we
3357 * are in the process of recovery.
3358 */
3359 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3360 cancel_delayed_work_sync(&qdev->asic_reset_work);
c4e84bde
RM
3361 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3362 cancel_delayed_work_sync(&qdev->mpi_work);
2ee1e272 3363 cancel_delayed_work_sync(&qdev->mpi_idc_work);
bcc2cb3b 3364 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
c4e84bde 3365
39aa8165
RM
3366 for (i = 0; i < qdev->rss_ring_count; i++)
3367 napi_disable(&qdev->rx_ring[i].napi);
c4e84bde
RM
3368
3369 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3370
3371 ql_disable_interrupts(qdev);
3372
3373 ql_tx_ring_clean(qdev);
3374
6b318cb3
RM
3375 /* Call netif_napi_del() from common point.
3376 */
b2014ff8 3377 for (i = 0; i < qdev->rss_ring_count; i++)
6b318cb3
RM
3378 netif_napi_del(&qdev->rx_ring[i].napi);
3379
4545a3f2 3380 ql_free_rx_buffers(qdev);
2d6a5e95 3381
c4e84bde
RM
3382 status = ql_adapter_reset(qdev);
3383 if (status)
3384 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
3385 qdev->func);
c4e84bde
RM
3386 return status;
3387}
3388
3389static int ql_adapter_up(struct ql_adapter *qdev)
3390{
3391 int err = 0;
3392
c4e84bde
RM
3393 err = ql_adapter_initialize(qdev);
3394 if (err) {
3395 QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
c4e84bde
RM
3396 goto err_init;
3397 }
c4e84bde 3398 set_bit(QL_ADAPTER_UP, &qdev->flags);
4545a3f2 3399 ql_alloc_rx_buffers(qdev);
8b007de1
RM
3400 /* If the port is initialized and the
3401 * link is up the turn on the carrier.
3402 */
3403 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3404 (ql_read32(qdev, STS) & qdev->port_link_up))
6a473308 3405 ql_link_on(qdev);
c4e84bde
RM
3406 ql_enable_interrupts(qdev);
3407 ql_enable_all_completion_interrupts(qdev);
1e213303 3408 netif_tx_start_all_queues(qdev->ndev);
c4e84bde
RM
3409
3410 return 0;
3411err_init:
3412 ql_adapter_reset(qdev);
3413 return err;
3414}
3415
c4e84bde
RM
3416static void ql_release_adapter_resources(struct ql_adapter *qdev)
3417{
3418 ql_free_mem_resources(qdev);
3419 ql_free_irq(qdev);
3420}
3421
3422static int ql_get_adapter_resources(struct ql_adapter *qdev)
3423{
3424 int status = 0;
3425
3426 if (ql_alloc_mem_resources(qdev)) {
3427 QPRINTK(qdev, IFUP, ERR, "Unable to allocate memory.\n");
3428 return -ENOMEM;
3429 }
3430 status = ql_request_irq(qdev);
c4e84bde
RM
3431 return status;
3432}
3433
3434static int qlge_close(struct net_device *ndev)
3435{
3436 struct ql_adapter *qdev = netdev_priv(ndev);
3437
3438 /*
3439 * Wait for device to recover from a reset.
3440 * (Rarely happens, but possible.)
3441 */
3442 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3443 msleep(1);
3444 ql_adapter_down(qdev);
3445 ql_release_adapter_resources(qdev);
c4e84bde
RM
3446 return 0;
3447}
3448
3449static int ql_configure_rings(struct ql_adapter *qdev)
3450{
3451 int i;
3452 struct rx_ring *rx_ring;
3453 struct tx_ring *tx_ring;
a4ab6137
RM
3454 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
3455
3456 /* In a perfect world we have one RSS ring for each CPU
3457 * and each has it's own vector. To do that we ask for
3458 * cpu_cnt vectors. ql_enable_msix() will adjust the
3459 * vector count to what we actually get. We then
3460 * allocate an RSS ring for each.
3461 * Essentially, we are doing min(cpu_count, msix_vector_count).
c4e84bde 3462 */
a4ab6137
RM
3463 qdev->intr_count = cpu_cnt;
3464 ql_enable_msix(qdev);
3465 /* Adjust the RSS ring count to the actual vector count. */
3466 qdev->rss_ring_count = qdev->intr_count;
c4e84bde 3467 qdev->tx_ring_count = cpu_cnt;
b2014ff8 3468 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
c4e84bde 3469
c4e84bde
RM
3470 for (i = 0; i < qdev->tx_ring_count; i++) {
3471 tx_ring = &qdev->tx_ring[i];
e332471c 3472 memset((void *)tx_ring, 0, sizeof(*tx_ring));
c4e84bde
RM
3473 tx_ring->qdev = qdev;
3474 tx_ring->wq_id = i;
3475 tx_ring->wq_len = qdev->tx_ring_size;
3476 tx_ring->wq_size =
3477 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3478
3479 /*
3480 * The completion queue ID for the tx rings start
39aa8165 3481 * immediately after the rss rings.
c4e84bde 3482 */
39aa8165 3483 tx_ring->cq_id = qdev->rss_ring_count + i;
c4e84bde
RM
3484 }
3485
3486 for (i = 0; i < qdev->rx_ring_count; i++) {
3487 rx_ring = &qdev->rx_ring[i];
e332471c 3488 memset((void *)rx_ring, 0, sizeof(*rx_ring));
c4e84bde
RM
3489 rx_ring->qdev = qdev;
3490 rx_ring->cq_id = i;
3491 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
b2014ff8 3492 if (i < qdev->rss_ring_count) {
39aa8165
RM
3493 /*
3494 * Inbound (RSS) queues.
3495 */
c4e84bde
RM
3496 rx_ring->cq_len = qdev->rx_ring_size;
3497 rx_ring->cq_size =
3498 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3499 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3500 rx_ring->lbq_size =
2c9a0d41 3501 rx_ring->lbq_len * sizeof(__le64);
c4e84bde
RM
3502 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3503 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3504 rx_ring->sbq_size =
2c9a0d41 3505 rx_ring->sbq_len * sizeof(__le64);
c4e84bde 3506 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
b2014ff8
RM
3507 rx_ring->type = RX_Q;
3508 } else {
c4e84bde
RM
3509 /*
3510 * Outbound queue handles outbound completions only.
3511 */
3512 /* outbound cq is same size as tx_ring it services. */
3513 rx_ring->cq_len = qdev->tx_ring_size;
3514 rx_ring->cq_size =
3515 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3516 rx_ring->lbq_len = 0;
3517 rx_ring->lbq_size = 0;
3518 rx_ring->lbq_buf_size = 0;
3519 rx_ring->sbq_len = 0;
3520 rx_ring->sbq_size = 0;
3521 rx_ring->sbq_buf_size = 0;
3522 rx_ring->type = TX_Q;
c4e84bde
RM
3523 }
3524 }
3525 return 0;
3526}
3527
3528static int qlge_open(struct net_device *ndev)
3529{
3530 int err = 0;
3531 struct ql_adapter *qdev = netdev_priv(ndev);
3532
3533 err = ql_configure_rings(qdev);
3534 if (err)
3535 return err;
3536
3537 err = ql_get_adapter_resources(qdev);
3538 if (err)
3539 goto error_up;
3540
3541 err = ql_adapter_up(qdev);
3542 if (err)
3543 goto error_up;
3544
3545 return err;
3546
3547error_up:
3548 ql_release_adapter_resources(qdev);
c4e84bde
RM
3549 return err;
3550}
3551
3552static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
3553{
3554 struct ql_adapter *qdev = netdev_priv(ndev);
3555
3556 if (ndev->mtu == 1500 && new_mtu == 9000) {
3557 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
bcc2cb3b
RM
3558 queue_delayed_work(qdev->workqueue,
3559 &qdev->mpi_port_cfg_work, 0);
c4e84bde
RM
3560 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
3561 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
3562 } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
3563 (ndev->mtu == 9000 && new_mtu == 9000)) {
3564 return 0;
3565 } else
3566 return -EINVAL;
3567 ndev->mtu = new_mtu;
3568 return 0;
3569}
3570
3571static struct net_device_stats *qlge_get_stats(struct net_device
3572 *ndev)
3573{
3574 struct ql_adapter *qdev = netdev_priv(ndev);
3575 return &qdev->stats;
3576}
3577
3578static void qlge_set_multicast_list(struct net_device *ndev)
3579{
3580 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3581 struct dev_mc_list *mc_ptr;
cc288f54 3582 int i, status;
c4e84bde 3583
cc288f54
RM
3584 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3585 if (status)
3586 return;
c4e84bde
RM
3587 /*
3588 * Set or clear promiscuous mode if a
3589 * transition is taking place.
3590 */
3591 if (ndev->flags & IFF_PROMISC) {
3592 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3593 if (ql_set_routing_reg
3594 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
3595 QPRINTK(qdev, HW, ERR,
3596 "Failed to set promiscous mode.\n");
3597 } else {
3598 set_bit(QL_PROMISCUOUS, &qdev->flags);
3599 }
3600 }
3601 } else {
3602 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3603 if (ql_set_routing_reg
3604 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
3605 QPRINTK(qdev, HW, ERR,
3606 "Failed to clear promiscous mode.\n");
3607 } else {
3608 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3609 }
3610 }
3611 }
3612
3613 /*
3614 * Set or clear all multicast mode if a
3615 * transition is taking place.
3616 */
3617 if ((ndev->flags & IFF_ALLMULTI) ||
3618 (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
3619 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
3620 if (ql_set_routing_reg
3621 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
3622 QPRINTK(qdev, HW, ERR,
3623 "Failed to set all-multi mode.\n");
3624 } else {
3625 set_bit(QL_ALLMULTI, &qdev->flags);
3626 }
3627 }
3628 } else {
3629 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
3630 if (ql_set_routing_reg
3631 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
3632 QPRINTK(qdev, HW, ERR,
3633 "Failed to clear all-multi mode.\n");
3634 } else {
3635 clear_bit(QL_ALLMULTI, &qdev->flags);
3636 }
3637 }
3638 }
3639
3640 if (ndev->mc_count) {
cc288f54
RM
3641 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3642 if (status)
3643 goto exit;
c4e84bde
RM
3644 for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
3645 i++, mc_ptr = mc_ptr->next)
3646 if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
3647 MAC_ADDR_TYPE_MULTI_MAC, i)) {
3648 QPRINTK(qdev, HW, ERR,
3649 "Failed to loadmulticast address.\n");
cc288f54 3650 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
c4e84bde
RM
3651 goto exit;
3652 }
cc288f54 3653 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
c4e84bde
RM
3654 if (ql_set_routing_reg
3655 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
3656 QPRINTK(qdev, HW, ERR,
3657 "Failed to set multicast match mode.\n");
3658 } else {
3659 set_bit(QL_ALLMULTI, &qdev->flags);
3660 }
3661 }
3662exit:
8587ea35 3663 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
c4e84bde
RM
3664}
3665
3666static int qlge_set_mac_address(struct net_device *ndev, void *p)
3667{
3668 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3669 struct sockaddr *addr = p;
cc288f54 3670 int status;
c4e84bde
RM
3671
3672 if (netif_running(ndev))
3673 return -EBUSY;
3674
3675 if (!is_valid_ether_addr(addr->sa_data))
3676 return -EADDRNOTAVAIL;
3677 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3678
cc288f54
RM
3679 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3680 if (status)
3681 return status;
cc288f54
RM
3682 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
3683 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
cc288f54
RM
3684 if (status)
3685 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
3686 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
3687 return status;
c4e84bde
RM
3688}
3689
3690static void qlge_tx_timeout(struct net_device *ndev)
3691{
3692 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
6497b607 3693 ql_queue_asic_error(qdev);
c4e84bde
RM
3694}
3695
3696static void ql_asic_reset_work(struct work_struct *work)
3697{
3698 struct ql_adapter *qdev =
3699 container_of(work, struct ql_adapter, asic_reset_work.work);
db98812f 3700 int status;
f2c0d8df 3701 rtnl_lock();
db98812f
RM
3702 status = ql_adapter_down(qdev);
3703 if (status)
3704 goto error;
3705
3706 status = ql_adapter_up(qdev);
3707 if (status)
3708 goto error;
f2c0d8df 3709 rtnl_unlock();
db98812f
RM
3710 return;
3711error:
3712 QPRINTK(qdev, IFUP, ALERT,
3713 "Driver up/down cycle failed, closing device\n");
f2c0d8df 3714
db98812f
RM
3715 set_bit(QL_ADAPTER_UP, &qdev->flags);
3716 dev_close(qdev->ndev);
3717 rtnl_unlock();
c4e84bde
RM
3718}
3719
b0c2aadf
RM
3720static struct nic_operations qla8012_nic_ops = {
3721 .get_flash = ql_get_8012_flash_params,
3722 .port_initialize = ql_8012_port_initialize,
3723};
3724
cdca8d02
RM
3725static struct nic_operations qla8000_nic_ops = {
3726 .get_flash = ql_get_8000_flash_params,
3727 .port_initialize = ql_8000_port_initialize,
3728};
3729
e4552f51
RM
3730/* Find the pcie function number for the other NIC
3731 * on this chip. Since both NIC functions share a
3732 * common firmware we have the lowest enabled function
3733 * do any common work. Examples would be resetting
3734 * after a fatal firmware error, or doing a firmware
3735 * coredump.
3736 */
3737static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
3738{
3739 int status = 0;
3740 u32 temp;
3741 u32 nic_func1, nic_func2;
3742
3743 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
3744 &temp);
3745 if (status)
3746 return status;
3747
3748 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
3749 MPI_TEST_NIC_FUNC_MASK);
3750 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
3751 MPI_TEST_NIC_FUNC_MASK);
3752
3753 if (qdev->func == nic_func1)
3754 qdev->alt_func = nic_func2;
3755 else if (qdev->func == nic_func2)
3756 qdev->alt_func = nic_func1;
3757 else
3758 status = -EIO;
3759
3760 return status;
3761}
b0c2aadf 3762
e4552f51 3763static int ql_get_board_info(struct ql_adapter *qdev)
c4e84bde 3764{
e4552f51 3765 int status;
c4e84bde
RM
3766 qdev->func =
3767 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
e4552f51
RM
3768 if (qdev->func > 3)
3769 return -EIO;
3770
3771 status = ql_get_alt_pcie_func(qdev);
3772 if (status)
3773 return status;
3774
3775 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
3776 if (qdev->port) {
c4e84bde
RM
3777 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
3778 qdev->port_link_up = STS_PL1;
3779 qdev->port_init = STS_PI1;
3780 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
3781 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
3782 } else {
3783 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
3784 qdev->port_link_up = STS_PL0;
3785 qdev->port_init = STS_PI0;
3786 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
3787 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
3788 }
3789 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
b0c2aadf
RM
3790 qdev->device_id = qdev->pdev->device;
3791 if (qdev->device_id == QLGE_DEVICE_ID_8012)
3792 qdev->nic_ops = &qla8012_nic_ops;
cdca8d02
RM
3793 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
3794 qdev->nic_ops = &qla8000_nic_ops;
e4552f51 3795 return status;
c4e84bde
RM
3796}
3797
3798static void ql_release_all(struct pci_dev *pdev)
3799{
3800 struct net_device *ndev = pci_get_drvdata(pdev);
3801 struct ql_adapter *qdev = netdev_priv(ndev);
3802
3803 if (qdev->workqueue) {
3804 destroy_workqueue(qdev->workqueue);
3805 qdev->workqueue = NULL;
3806 }
39aa8165 3807
c4e84bde 3808 if (qdev->reg_base)
8668ae92 3809 iounmap(qdev->reg_base);
c4e84bde
RM
3810 if (qdev->doorbell_area)
3811 iounmap(qdev->doorbell_area);
3812 pci_release_regions(pdev);
3813 pci_set_drvdata(pdev, NULL);
3814}
3815
3816static int __devinit ql_init_device(struct pci_dev *pdev,
3817 struct net_device *ndev, int cards_found)
3818{
3819 struct ql_adapter *qdev = netdev_priv(ndev);
3820 int pos, err = 0;
3821 u16 val16;
3822
e332471c 3823 memset((void *)qdev, 0, sizeof(*qdev));
c4e84bde
RM
3824 err = pci_enable_device(pdev);
3825 if (err) {
3826 dev_err(&pdev->dev, "PCI device enable failed.\n");
3827 return err;
3828 }
3829
ebd6e774
RM
3830 qdev->ndev = ndev;
3831 qdev->pdev = pdev;
3832 pci_set_drvdata(pdev, ndev);
c4e84bde
RM
3833 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3834 if (pos <= 0) {
3835 dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
3836 "aborting.\n");
ebd6e774 3837 return pos;
c4e84bde
RM
3838 } else {
3839 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
3840 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
3841 val16 |= (PCI_EXP_DEVCTL_CERE |
3842 PCI_EXP_DEVCTL_NFERE |
3843 PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE);
3844 pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
3845 }
3846
3847 err = pci_request_regions(pdev, DRV_NAME);
3848 if (err) {
3849 dev_err(&pdev->dev, "PCI region request failed.\n");
ebd6e774 3850 return err;
c4e84bde
RM
3851 }
3852
3853 pci_set_master(pdev);
6a35528a 3854 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
c4e84bde 3855 set_bit(QL_DMA64, &qdev->flags);
6a35528a 3856 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
c4e84bde 3857 } else {
284901a9 3858 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
c4e84bde 3859 if (!err)
284901a9 3860 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
c4e84bde
RM
3861 }
3862
3863 if (err) {
3864 dev_err(&pdev->dev, "No usable DMA configuration.\n");
3865 goto err_out;
3866 }
3867
c4e84bde
RM
3868 qdev->reg_base =
3869 ioremap_nocache(pci_resource_start(pdev, 1),
3870 pci_resource_len(pdev, 1));
3871 if (!qdev->reg_base) {
3872 dev_err(&pdev->dev, "Register mapping failed.\n");
3873 err = -ENOMEM;
3874 goto err_out;
3875 }
3876
3877 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
3878 qdev->doorbell_area =
3879 ioremap_nocache(pci_resource_start(pdev, 3),
3880 pci_resource_len(pdev, 3));
3881 if (!qdev->doorbell_area) {
3882 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
3883 err = -ENOMEM;
3884 goto err_out;
3885 }
3886
e4552f51
RM
3887 err = ql_get_board_info(qdev);
3888 if (err) {
3889 dev_err(&pdev->dev, "Register access failed.\n");
3890 err = -EIO;
3891 goto err_out;
3892 }
c4e84bde
RM
3893 qdev->msg_enable = netif_msg_init(debug, default_msg);
3894 spin_lock_init(&qdev->hw_lock);
3895 spin_lock_init(&qdev->stats_lock);
3896
3897 /* make sure the EEPROM is good */
b0c2aadf 3898 err = qdev->nic_ops->get_flash(qdev);
c4e84bde
RM
3899 if (err) {
3900 dev_err(&pdev->dev, "Invalid FLASH.\n");
3901 goto err_out;
3902 }
3903
c4e84bde
RM
3904 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3905
3906 /* Set up the default ring sizes. */
3907 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
3908 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
3909
3910 /* Set up the coalescing parameters. */
3911 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
3912 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
3913 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3914 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3915
3916 /*
3917 * Set up the operating parameters.
3918 */
3919 qdev->rx_csum = 1;
c4e84bde
RM
3920 qdev->workqueue = create_singlethread_workqueue(ndev->name);
3921 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
3922 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
3923 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
bcc2cb3b 3924 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
2ee1e272 3925 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
bcc2cb3b 3926 init_completion(&qdev->ide_completion);
c4e84bde
RM
3927
3928 if (!cards_found) {
3929 dev_info(&pdev->dev, "%s\n", DRV_STRING);
3930 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
3931 DRV_NAME, DRV_VERSION);
3932 }
3933 return 0;
3934err_out:
3935 ql_release_all(pdev);
3936 pci_disable_device(pdev);
3937 return err;
3938}
3939
25ed7849
SH
3940
3941static const struct net_device_ops qlge_netdev_ops = {
3942 .ndo_open = qlge_open,
3943 .ndo_stop = qlge_close,
3944 .ndo_start_xmit = qlge_send,
3945 .ndo_change_mtu = qlge_change_mtu,
3946 .ndo_get_stats = qlge_get_stats,
3947 .ndo_set_multicast_list = qlge_set_multicast_list,
3948 .ndo_set_mac_address = qlge_set_mac_address,
3949 .ndo_validate_addr = eth_validate_addr,
3950 .ndo_tx_timeout = qlge_tx_timeout,
3951 .ndo_vlan_rx_register = ql_vlan_rx_register,
3952 .ndo_vlan_rx_add_vid = ql_vlan_rx_add_vid,
3953 .ndo_vlan_rx_kill_vid = ql_vlan_rx_kill_vid,
3954};
3955
c4e84bde
RM
3956static int __devinit qlge_probe(struct pci_dev *pdev,
3957 const struct pci_device_id *pci_entry)
3958{
3959 struct net_device *ndev = NULL;
3960 struct ql_adapter *qdev = NULL;
3961 static int cards_found = 0;
3962 int err = 0;
3963
1e213303
RM
3964 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
3965 min(MAX_CPUS, (int)num_online_cpus()));
c4e84bde
RM
3966 if (!ndev)
3967 return -ENOMEM;
3968
3969 err = ql_init_device(pdev, ndev, cards_found);
3970 if (err < 0) {
3971 free_netdev(ndev);
3972 return err;
3973 }
3974
3975 qdev = netdev_priv(ndev);
3976 SET_NETDEV_DEV(ndev, &pdev->dev);
3977 ndev->features = (0
3978 | NETIF_F_IP_CSUM
3979 | NETIF_F_SG
3980 | NETIF_F_TSO
3981 | NETIF_F_TSO6
3982 | NETIF_F_TSO_ECN
3983 | NETIF_F_HW_VLAN_TX
3984 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
22bdd4f5 3985 ndev->features |= NETIF_F_GRO;
c4e84bde
RM
3986
3987 if (test_bit(QL_DMA64, &qdev->flags))
3988 ndev->features |= NETIF_F_HIGHDMA;
3989
3990 /*
3991 * Set up net_device structure.
3992 */
3993 ndev->tx_queue_len = qdev->tx_ring_size;
3994 ndev->irq = pdev->irq;
25ed7849
SH
3995
3996 ndev->netdev_ops = &qlge_netdev_ops;
c4e84bde 3997 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
c4e84bde 3998 ndev->watchdog_timeo = 10 * HZ;
25ed7849 3999
c4e84bde
RM
4000 err = register_netdev(ndev);
4001 if (err) {
4002 dev_err(&pdev->dev, "net device registration failed.\n");
4003 ql_release_all(pdev);
4004 pci_disable_device(pdev);
4005 return err;
4006 }
6a473308 4007 ql_link_off(qdev);
c4e84bde
RM
4008 ql_display_dev_info(ndev);
4009 cards_found++;
4010 return 0;
4011}
4012
4013static void __devexit qlge_remove(struct pci_dev *pdev)
4014{
4015 struct net_device *ndev = pci_get_drvdata(pdev);
4016 unregister_netdev(ndev);
4017 ql_release_all(pdev);
4018 pci_disable_device(pdev);
4019 free_netdev(ndev);
4020}
4021
4022/*
4023 * This callback is called by the PCI subsystem whenever
4024 * a PCI bus error is detected.
4025 */
4026static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4027 enum pci_channel_state state)
4028{
4029 struct net_device *ndev = pci_get_drvdata(pdev);
4030 struct ql_adapter *qdev = netdev_priv(ndev);
4031
fbc663ce
DN
4032 netif_device_detach(ndev);
4033
4034 if (state == pci_channel_io_perm_failure)
4035 return PCI_ERS_RESULT_DISCONNECT;
4036
c4e84bde
RM
4037 if (netif_running(ndev))
4038 ql_adapter_down(qdev);
4039
4040 pci_disable_device(pdev);
4041
4042 /* Request a slot reset. */
4043 return PCI_ERS_RESULT_NEED_RESET;
4044}
4045
4046/*
4047 * This callback is called after the PCI buss has been reset.
4048 * Basically, this tries to restart the card from scratch.
4049 * This is a shortened version of the device probe/discovery code,
4050 * it resembles the first-half of the () routine.
4051 */
4052static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4053{
4054 struct net_device *ndev = pci_get_drvdata(pdev);
4055 struct ql_adapter *qdev = netdev_priv(ndev);
4056
4057 if (pci_enable_device(pdev)) {
4058 QPRINTK(qdev, IFUP, ERR,
4059 "Cannot re-enable PCI device after reset.\n");
4060 return PCI_ERS_RESULT_DISCONNECT;
4061 }
4062
4063 pci_set_master(pdev);
4064
4065 netif_carrier_off(ndev);
c4e84bde
RM
4066 ql_adapter_reset(qdev);
4067
4068 /* Make sure the EEPROM is good */
4069 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4070
4071 if (!is_valid_ether_addr(ndev->perm_addr)) {
4072 QPRINTK(qdev, IFUP, ERR, "After reset, invalid MAC address.\n");
4073 return PCI_ERS_RESULT_DISCONNECT;
4074 }
4075
4076 return PCI_ERS_RESULT_RECOVERED;
4077}
4078
4079static void qlge_io_resume(struct pci_dev *pdev)
4080{
4081 struct net_device *ndev = pci_get_drvdata(pdev);
4082 struct ql_adapter *qdev = netdev_priv(ndev);
4083
4084 pci_set_master(pdev);
4085
4086 if (netif_running(ndev)) {
4087 if (ql_adapter_up(qdev)) {
4088 QPRINTK(qdev, IFUP, ERR,
4089 "Device initialization failed after reset.\n");
4090 return;
4091 }
4092 }
4093
4094 netif_device_attach(ndev);
4095}
4096
4097static struct pci_error_handlers qlge_err_handler = {
4098 .error_detected = qlge_io_error_detected,
4099 .slot_reset = qlge_io_slot_reset,
4100 .resume = qlge_io_resume,
4101};
4102
4103static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4104{
4105 struct net_device *ndev = pci_get_drvdata(pdev);
4106 struct ql_adapter *qdev = netdev_priv(ndev);
6b318cb3 4107 int err;
c4e84bde
RM
4108
4109 netif_device_detach(ndev);
4110
4111 if (netif_running(ndev)) {
4112 err = ql_adapter_down(qdev);
4113 if (!err)
4114 return err;
4115 }
4116
4117 err = pci_save_state(pdev);
4118 if (err)
4119 return err;
4120
4121 pci_disable_device(pdev);
4122
4123 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4124
4125 return 0;
4126}
4127
04da2cf9 4128#ifdef CONFIG_PM
c4e84bde
RM
4129static int qlge_resume(struct pci_dev *pdev)
4130{
4131 struct net_device *ndev = pci_get_drvdata(pdev);
4132 struct ql_adapter *qdev = netdev_priv(ndev);
4133 int err;
4134
4135 pci_set_power_state(pdev, PCI_D0);
4136 pci_restore_state(pdev);
4137 err = pci_enable_device(pdev);
4138 if (err) {
4139 QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
4140 return err;
4141 }
4142 pci_set_master(pdev);
4143
4144 pci_enable_wake(pdev, PCI_D3hot, 0);
4145 pci_enable_wake(pdev, PCI_D3cold, 0);
4146
4147 if (netif_running(ndev)) {
4148 err = ql_adapter_up(qdev);
4149 if (err)
4150 return err;
4151 }
4152
4153 netif_device_attach(ndev);
4154
4155 return 0;
4156}
04da2cf9 4157#endif /* CONFIG_PM */
c4e84bde
RM
4158
4159static void qlge_shutdown(struct pci_dev *pdev)
4160{
4161 qlge_suspend(pdev, PMSG_SUSPEND);
4162}
4163
4164static struct pci_driver qlge_driver = {
4165 .name = DRV_NAME,
4166 .id_table = qlge_pci_tbl,
4167 .probe = qlge_probe,
4168 .remove = __devexit_p(qlge_remove),
4169#ifdef CONFIG_PM
4170 .suspend = qlge_suspend,
4171 .resume = qlge_resume,
4172#endif
4173 .shutdown = qlge_shutdown,
4174 .err_handler = &qlge_err_handler
4175};
4176
4177static int __init qlge_init_module(void)
4178{
4179 return pci_register_driver(&qlge_driver);
4180}
4181
4182static void __exit qlge_exit(void)
4183{
4184 pci_unregister_driver(&qlge_driver);
4185}
4186
4187module_init(qlge_init_module);
4188module_exit(qlge_exit);
This page took 0.643169 seconds and 5 git commands to generate.