qlge: Fix MAC address bonding issue.
[deliverable/linux.git] / drivers / net / qlge / qlge_main.c
CommitLineData
c4e84bde
RM
1/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
36#include <linux/skbuff.h>
37#include <linux/rtnetlink.h>
38#include <linux/if_vlan.h>
c4e84bde
RM
39#include <linux/delay.h>
40#include <linux/mm.h>
41#include <linux/vmalloc.h>
b7c6bfb7 42#include <net/ip6_checksum.h>
c4e84bde
RM
43
44#include "qlge.h"
45
46char qlge_driver_name[] = DRV_NAME;
47const char qlge_driver_version[] = DRV_VERSION;
48
49MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50MODULE_DESCRIPTION(DRV_STRING " ");
51MODULE_LICENSE("GPL");
52MODULE_VERSION(DRV_VERSION);
53
54static const u32 default_msg =
55 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
56/* NETIF_MSG_TIMER | */
57 NETIF_MSG_IFDOWN |
58 NETIF_MSG_IFUP |
59 NETIF_MSG_RX_ERR |
60 NETIF_MSG_TX_ERR |
4974097a
RM
61/* NETIF_MSG_TX_QUEUED | */
62/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
c4e84bde
RM
63/* NETIF_MSG_PKTDATA | */
64 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
65
66static int debug = 0x00007fff; /* defaults above */
67module_param(debug, int, 0);
68MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69
70#define MSIX_IRQ 0
71#define MSI_IRQ 1
72#define LEG_IRQ 2
73static int irq_type = MSIX_IRQ;
74module_param(irq_type, int, MSIX_IRQ);
75MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
76
77static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
b0c2aadf 78 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
cdca8d02 79 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
c4e84bde
RM
80 /* required last entry */
81 {0,}
82};
83
84MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
85
86/* This hardware semaphore causes exclusive access to
87 * resources shared between the NIC driver, MPI firmware,
88 * FCOE firmware and the FC driver.
89 */
90static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
91{
92 u32 sem_bits = 0;
93
94 switch (sem_mask) {
95 case SEM_XGMAC0_MASK:
96 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
97 break;
98 case SEM_XGMAC1_MASK:
99 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
100 break;
101 case SEM_ICB_MASK:
102 sem_bits = SEM_SET << SEM_ICB_SHIFT;
103 break;
104 case SEM_MAC_ADDR_MASK:
105 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
106 break;
107 case SEM_FLASH_MASK:
108 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
109 break;
110 case SEM_PROBE_MASK:
111 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
112 break;
113 case SEM_RT_IDX_MASK:
114 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
115 break;
116 case SEM_PROC_REG_MASK:
117 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
118 break;
119 default:
120 QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
121 return -EINVAL;
122 }
123
124 ql_write32(qdev, SEM, sem_bits | sem_mask);
125 return !(ql_read32(qdev, SEM) & sem_bits);
126}
127
128int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
129{
0857e9d7 130 unsigned int wait_count = 30;
c4e84bde
RM
131 do {
132 if (!ql_sem_trylock(qdev, sem_mask))
133 return 0;
0857e9d7
RM
134 udelay(100);
135 } while (--wait_count);
c4e84bde
RM
136 return -ETIMEDOUT;
137}
138
139void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
140{
141 ql_write32(qdev, SEM, sem_mask);
142 ql_read32(qdev, SEM); /* flush */
143}
144
145/* This function waits for a specific bit to come ready
146 * in a given register. It is used mostly by the initialize
147 * process, but is also used in kernel thread API such as
148 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
149 */
150int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
151{
152 u32 temp;
153 int count = UDELAY_COUNT;
154
155 while (count) {
156 temp = ql_read32(qdev, reg);
157
158 /* check for errors */
159 if (temp & err_bit) {
160 QPRINTK(qdev, PROBE, ALERT,
161 "register 0x%.08x access error, value = 0x%.08x!.\n",
162 reg, temp);
163 return -EIO;
164 } else if (temp & bit)
165 return 0;
166 udelay(UDELAY_DELAY);
167 count--;
168 }
169 QPRINTK(qdev, PROBE, ALERT,
170 "Timed out waiting for reg %x to come ready.\n", reg);
171 return -ETIMEDOUT;
172}
173
174/* The CFG register is used to download TX and RX control blocks
175 * to the chip. This function waits for an operation to complete.
176 */
177static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
178{
179 int count = UDELAY_COUNT;
180 u32 temp;
181
182 while (count) {
183 temp = ql_read32(qdev, CFG);
184 if (temp & CFG_LE)
185 return -EIO;
186 if (!(temp & bit))
187 return 0;
188 udelay(UDELAY_DELAY);
189 count--;
190 }
191 return -ETIMEDOUT;
192}
193
194
195/* Used to issue init control blocks to hw. Maps control block,
196 * sets address, triggers download, waits for completion.
197 */
198int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
199 u16 q_id)
200{
201 u64 map;
202 int status = 0;
203 int direction;
204 u32 mask;
205 u32 value;
206
207 direction =
208 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
209 PCI_DMA_FROMDEVICE;
210
211 map = pci_map_single(qdev->pdev, ptr, size, direction);
212 if (pci_dma_mapping_error(qdev->pdev, map)) {
213 QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
214 return -ENOMEM;
215 }
216
4322c5be
RM
217 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
218 if (status)
219 return status;
220
c4e84bde
RM
221 status = ql_wait_cfg(qdev, bit);
222 if (status) {
223 QPRINTK(qdev, IFUP, ERR,
224 "Timed out waiting for CFG to come ready.\n");
225 goto exit;
226 }
227
c4e84bde
RM
228 ql_write32(qdev, ICB_L, (u32) map);
229 ql_write32(qdev, ICB_H, (u32) (map >> 32));
c4e84bde
RM
230
231 mask = CFG_Q_MASK | (bit << 16);
232 value = bit | (q_id << CFG_Q_SHIFT);
233 ql_write32(qdev, CFG, (mask | value));
234
235 /*
236 * Wait for the bit to clear after signaling hw.
237 */
238 status = ql_wait_cfg(qdev, bit);
239exit:
4322c5be 240 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
c4e84bde
RM
241 pci_unmap_single(qdev->pdev, map, size, direction);
242 return status;
243}
244
245/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
246int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
247 u32 *value)
248{
249 u32 offset = 0;
250 int status;
251
c4e84bde
RM
252 switch (type) {
253 case MAC_ADDR_TYPE_MULTI_MAC:
254 case MAC_ADDR_TYPE_CAM_MAC:
255 {
256 status =
257 ql_wait_reg_rdy(qdev,
939678f8 258 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
259 if (status)
260 goto exit;
261 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
262 (index << MAC_ADDR_IDX_SHIFT) | /* index */
263 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
264 status =
265 ql_wait_reg_rdy(qdev,
939678f8 266 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
c4e84bde
RM
267 if (status)
268 goto exit;
269 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
270 status =
271 ql_wait_reg_rdy(qdev,
939678f8 272 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
273 if (status)
274 goto exit;
275 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
276 (index << MAC_ADDR_IDX_SHIFT) | /* index */
277 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
278 status =
279 ql_wait_reg_rdy(qdev,
939678f8 280 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
c4e84bde
RM
281 if (status)
282 goto exit;
283 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
284 if (type == MAC_ADDR_TYPE_CAM_MAC) {
285 status =
286 ql_wait_reg_rdy(qdev,
939678f8 287 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
288 if (status)
289 goto exit;
290 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
291 (index << MAC_ADDR_IDX_SHIFT) | /* index */
292 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
293 status =
294 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
939678f8 295 MAC_ADDR_MR, 0);
c4e84bde
RM
296 if (status)
297 goto exit;
298 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
299 }
300 break;
301 }
302 case MAC_ADDR_TYPE_VLAN:
303 case MAC_ADDR_TYPE_MULTI_FLTR:
304 default:
305 QPRINTK(qdev, IFUP, CRIT,
306 "Address type %d not yet supported.\n", type);
307 status = -EPERM;
308 }
309exit:
c4e84bde
RM
310 return status;
311}
312
313/* Set up a MAC, multicast or VLAN address for the
314 * inbound frame matching.
315 */
316static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
317 u16 index)
318{
319 u32 offset = 0;
320 int status = 0;
321
c4e84bde
RM
322 switch (type) {
323 case MAC_ADDR_TYPE_MULTI_MAC:
324 case MAC_ADDR_TYPE_CAM_MAC:
325 {
326 u32 cam_output;
327 u32 upper = (addr[0] << 8) | addr[1];
328 u32 lower =
329 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
330 (addr[5]);
331
4974097a 332 QPRINTK(qdev, IFUP, DEBUG,
7c510e4b 333 "Adding %s address %pM"
c4e84bde
RM
334 " at index %d in the CAM.\n",
335 ((type ==
336 MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
7c510e4b 337 "UNICAST"), addr, index);
c4e84bde
RM
338
339 status =
340 ql_wait_reg_rdy(qdev,
939678f8 341 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
342 if (status)
343 goto exit;
344 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
345 (index << MAC_ADDR_IDX_SHIFT) | /* index */
346 type); /* type */
347 ql_write32(qdev, MAC_ADDR_DATA, lower);
348 status =
349 ql_wait_reg_rdy(qdev,
939678f8 350 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
351 if (status)
352 goto exit;
353 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
354 (index << MAC_ADDR_IDX_SHIFT) | /* index */
355 type); /* type */
356 ql_write32(qdev, MAC_ADDR_DATA, upper);
357 status =
358 ql_wait_reg_rdy(qdev,
939678f8 359 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
360 if (status)
361 goto exit;
362 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
363 (index << MAC_ADDR_IDX_SHIFT) | /* index */
364 type); /* type */
365 /* This field should also include the queue id
366 and possibly the function id. Right now we hardcode
367 the route field to NIC core.
368 */
369 if (type == MAC_ADDR_TYPE_CAM_MAC) {
370 cam_output = (CAM_OUT_ROUTE_NIC |
371 (qdev->
372 func << CAM_OUT_FUNC_SHIFT) |
373 (qdev->
374 rss_ring_first_cq_id <<
375 CAM_OUT_CQ_ID_SHIFT));
376 if (qdev->vlgrp)
377 cam_output |= CAM_OUT_RV;
378 /* route to NIC core */
379 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
380 }
381 break;
382 }
383 case MAC_ADDR_TYPE_VLAN:
384 {
385 u32 enable_bit = *((u32 *) &addr[0]);
386 /* For VLAN, the addr actually holds a bit that
387 * either enables or disables the vlan id we are
388 * addressing. It's either MAC_ADDR_E on or off.
389 * That's bit-27 we're talking about.
390 */
391 QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
392 (enable_bit ? "Adding" : "Removing"),
393 index, (enable_bit ? "to" : "from"));
394
395 status =
396 ql_wait_reg_rdy(qdev,
939678f8 397 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
398 if (status)
399 goto exit;
400 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
401 (index << MAC_ADDR_IDX_SHIFT) | /* index */
402 type | /* type */
403 enable_bit); /* enable/disable */
404 break;
405 }
406 case MAC_ADDR_TYPE_MULTI_FLTR:
407 default:
408 QPRINTK(qdev, IFUP, CRIT,
409 "Address type %d not yet supported.\n", type);
410 status = -EPERM;
411 }
412exit:
c4e84bde
RM
413 return status;
414}
415
7fab3bfe
RM
416/* Set or clear MAC address in hardware. We sometimes
417 * have to clear it to prevent wrong frame routing
418 * especially in a bonding environment.
419 */
420static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
421{
422 int status;
423 char zero_mac_addr[ETH_ALEN];
424 char *addr;
425
426 if (set) {
427 addr = &qdev->ndev->dev_addr[0];
428 QPRINTK(qdev, IFUP, DEBUG,
429 "Set Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n",
430 addr[0], addr[1], addr[2], addr[3],
431 addr[4], addr[5]);
432 } else {
433 memset(zero_mac_addr, 0, ETH_ALEN);
434 addr = &zero_mac_addr[0];
435 QPRINTK(qdev, IFUP, DEBUG,
436 "Clearing MAC address on %s\n",
437 qdev->ndev->name);
438 }
439 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
440 if (status)
441 return status;
442 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
443 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
444 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
445 if (status)
446 QPRINTK(qdev, IFUP, ERR, "Failed to init mac "
447 "address.\n");
448 return status;
449}
450
c4e84bde
RM
451/* Get a specific frame routing value from the CAM.
452 * Used for debug and reg dump.
453 */
454int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
455{
456 int status = 0;
457
939678f8 458 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
c4e84bde
RM
459 if (status)
460 goto exit;
461
462 ql_write32(qdev, RT_IDX,
463 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
939678f8 464 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
c4e84bde
RM
465 if (status)
466 goto exit;
467 *value = ql_read32(qdev, RT_DATA);
468exit:
c4e84bde
RM
469 return status;
470}
471
472/* The NIC function for this chip has 16 routing indexes. Each one can be used
473 * to route different frame types to various inbound queues. We send broadcast/
474 * multicast/error frames to the default queue for slow handling,
475 * and CAM hit/RSS frames to the fast handling queues.
476 */
477static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
478 int enable)
479{
8587ea35 480 int status = -EINVAL; /* Return error if no mask match. */
c4e84bde
RM
481 u32 value = 0;
482
c4e84bde
RM
483 QPRINTK(qdev, IFUP, DEBUG,
484 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
485 (enable ? "Adding" : "Removing"),
486 ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
487 ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
488 ((index ==
489 RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
490 ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
491 ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
492 ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
493 ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
494 ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
495 ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
496 ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
497 ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
498 ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
499 ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
500 ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
501 ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
502 ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
503 (enable ? "to" : "from"));
504
505 switch (mask) {
506 case RT_IDX_CAM_HIT:
507 {
508 value = RT_IDX_DST_CAM_Q | /* dest */
509 RT_IDX_TYPE_NICQ | /* type */
510 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
511 break;
512 }
513 case RT_IDX_VALID: /* Promiscuous Mode frames. */
514 {
515 value = RT_IDX_DST_DFLT_Q | /* dest */
516 RT_IDX_TYPE_NICQ | /* type */
517 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
518 break;
519 }
520 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
521 {
522 value = RT_IDX_DST_DFLT_Q | /* dest */
523 RT_IDX_TYPE_NICQ | /* type */
524 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
525 break;
526 }
527 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
528 {
529 value = RT_IDX_DST_DFLT_Q | /* dest */
530 RT_IDX_TYPE_NICQ | /* type */
531 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
532 break;
533 }
534 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
535 {
536 value = RT_IDX_DST_CAM_Q | /* dest */
537 RT_IDX_TYPE_NICQ | /* type */
538 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
539 break;
540 }
541 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
542 {
543 value = RT_IDX_DST_CAM_Q | /* dest */
544 RT_IDX_TYPE_NICQ | /* type */
545 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
546 break;
547 }
548 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
549 {
550 value = RT_IDX_DST_RSS | /* dest */
551 RT_IDX_TYPE_NICQ | /* type */
552 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
553 break;
554 }
555 case 0: /* Clear the E-bit on an entry. */
556 {
557 value = RT_IDX_DST_DFLT_Q | /* dest */
558 RT_IDX_TYPE_NICQ | /* type */
559 (index << RT_IDX_IDX_SHIFT);/* index */
560 break;
561 }
562 default:
563 QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
564 mask);
565 status = -EPERM;
566 goto exit;
567 }
568
569 if (value) {
570 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
571 if (status)
572 goto exit;
573 value |= (enable ? RT_IDX_E : 0);
574 ql_write32(qdev, RT_IDX, value);
575 ql_write32(qdev, RT_DATA, enable ? mask : 0);
576 }
577exit:
c4e84bde
RM
578 return status;
579}
580
581static void ql_enable_interrupts(struct ql_adapter *qdev)
582{
583 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
584}
585
586static void ql_disable_interrupts(struct ql_adapter *qdev)
587{
588 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
589}
590
591/* If we're running with multiple MSI-X vectors then we enable on the fly.
592 * Otherwise, we may have multiple outstanding workers and don't want to
593 * enable until the last one finishes. In this case, the irq_cnt gets
594 * incremented everytime we queue a worker and decremented everytime
595 * a worker finishes. Once it hits zero we enable the interrupt.
596 */
bb0d215c 597u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
c4e84bde 598{
bb0d215c
RM
599 u32 var = 0;
600 unsigned long hw_flags = 0;
601 struct intr_context *ctx = qdev->intr_context + intr;
602
603 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
604 /* Always enable if we're MSIX multi interrupts and
605 * it's not the default (zeroeth) interrupt.
606 */
c4e84bde 607 ql_write32(qdev, INTR_EN,
bb0d215c
RM
608 ctx->intr_en_mask);
609 var = ql_read32(qdev, STS);
610 return var;
c4e84bde 611 }
bb0d215c
RM
612
613 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
614 if (atomic_dec_and_test(&ctx->irq_cnt)) {
615 ql_write32(qdev, INTR_EN,
616 ctx->intr_en_mask);
617 var = ql_read32(qdev, STS);
618 }
619 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
620 return var;
c4e84bde
RM
621}
622
623static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
624{
625 u32 var = 0;
bb0d215c 626 struct intr_context *ctx;
c4e84bde 627
bb0d215c
RM
628 /* HW disables for us if we're MSIX multi interrupts and
629 * it's not the default (zeroeth) interrupt.
630 */
631 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
632 return 0;
633
634 ctx = qdev->intr_context + intr;
08b1bc8f 635 spin_lock(&qdev->hw_lock);
bb0d215c 636 if (!atomic_read(&ctx->irq_cnt)) {
c4e84bde 637 ql_write32(qdev, INTR_EN,
bb0d215c 638 ctx->intr_dis_mask);
c4e84bde
RM
639 var = ql_read32(qdev, STS);
640 }
bb0d215c 641 atomic_inc(&ctx->irq_cnt);
08b1bc8f 642 spin_unlock(&qdev->hw_lock);
c4e84bde
RM
643 return var;
644}
645
646static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
647{
648 int i;
649 for (i = 0; i < qdev->intr_count; i++) {
650 /* The enable call does a atomic_dec_and_test
651 * and enables only if the result is zero.
652 * So we precharge it here.
653 */
bb0d215c
RM
654 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
655 i == 0))
656 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
c4e84bde
RM
657 ql_enable_completion_interrupt(qdev, i);
658 }
659
660}
661
b0c2aadf
RM
662static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
663{
664 int status, i;
665 u16 csum = 0;
666 __le16 *flash = (__le16 *)&qdev->flash;
667
668 status = strncmp((char *)&qdev->flash, str, 4);
669 if (status) {
670 QPRINTK(qdev, IFUP, ERR, "Invalid flash signature.\n");
671 return status;
672 }
673
674 for (i = 0; i < size; i++)
675 csum += le16_to_cpu(*flash++);
676
677 if (csum)
678 QPRINTK(qdev, IFUP, ERR,
679 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
680
681 return csum;
682}
683
26351479 684static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
c4e84bde
RM
685{
686 int status = 0;
687 /* wait for reg to come ready */
688 status = ql_wait_reg_rdy(qdev,
689 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
690 if (status)
691 goto exit;
692 /* set up for reg read */
693 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
694 /* wait for reg to come ready */
695 status = ql_wait_reg_rdy(qdev,
696 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
697 if (status)
698 goto exit;
26351479
RM
699 /* This data is stored on flash as an array of
700 * __le32. Since ql_read32() returns cpu endian
701 * we need to swap it back.
702 */
703 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
c4e84bde
RM
704exit:
705 return status;
706}
707
cdca8d02
RM
708static int ql_get_8000_flash_params(struct ql_adapter *qdev)
709{
710 u32 i, size;
711 int status;
712 __le32 *p = (__le32 *)&qdev->flash;
713 u32 offset;
542512e4 714 u8 mac_addr[6];
cdca8d02
RM
715
716 /* Get flash offset for function and adjust
717 * for dword access.
718 */
e4552f51 719 if (!qdev->port)
cdca8d02
RM
720 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
721 else
722 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
723
724 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
725 return -ETIMEDOUT;
726
727 size = sizeof(struct flash_params_8000) / sizeof(u32);
728 for (i = 0; i < size; i++, p++) {
729 status = ql_read_flash_word(qdev, i+offset, p);
730 if (status) {
731 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
732 goto exit;
733 }
734 }
735
736 status = ql_validate_flash(qdev,
737 sizeof(struct flash_params_8000) / sizeof(u16),
738 "8000");
739 if (status) {
740 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
741 status = -EINVAL;
742 goto exit;
743 }
744
542512e4
RM
745 /* Extract either manufacturer or BOFM modified
746 * MAC address.
747 */
748 if (qdev->flash.flash_params_8000.data_type1 == 2)
749 memcpy(mac_addr,
750 qdev->flash.flash_params_8000.mac_addr1,
751 qdev->ndev->addr_len);
752 else
753 memcpy(mac_addr,
754 qdev->flash.flash_params_8000.mac_addr,
755 qdev->ndev->addr_len);
756
757 if (!is_valid_ether_addr(mac_addr)) {
cdca8d02
RM
758 QPRINTK(qdev, IFUP, ERR, "Invalid MAC address.\n");
759 status = -EINVAL;
760 goto exit;
761 }
762
763 memcpy(qdev->ndev->dev_addr,
542512e4 764 mac_addr,
cdca8d02
RM
765 qdev->ndev->addr_len);
766
767exit:
768 ql_sem_unlock(qdev, SEM_FLASH_MASK);
769 return status;
770}
771
b0c2aadf 772static int ql_get_8012_flash_params(struct ql_adapter *qdev)
c4e84bde
RM
773{
774 int i;
775 int status;
26351479 776 __le32 *p = (__le32 *)&qdev->flash;
e78f5fa7 777 u32 offset = 0;
b0c2aadf 778 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
e78f5fa7
RM
779
780 /* Second function's parameters follow the first
781 * function's.
782 */
e4552f51 783 if (qdev->port)
b0c2aadf 784 offset = size;
c4e84bde
RM
785
786 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
787 return -ETIMEDOUT;
788
b0c2aadf 789 for (i = 0; i < size; i++, p++) {
e78f5fa7 790 status = ql_read_flash_word(qdev, i+offset, p);
c4e84bde
RM
791 if (status) {
792 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
793 goto exit;
794 }
795
796 }
b0c2aadf
RM
797
798 status = ql_validate_flash(qdev,
799 sizeof(struct flash_params_8012) / sizeof(u16),
800 "8012");
801 if (status) {
802 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
803 status = -EINVAL;
804 goto exit;
805 }
806
807 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
808 status = -EINVAL;
809 goto exit;
810 }
811
812 memcpy(qdev->ndev->dev_addr,
813 qdev->flash.flash_params_8012.mac_addr,
814 qdev->ndev->addr_len);
815
c4e84bde
RM
816exit:
817 ql_sem_unlock(qdev, SEM_FLASH_MASK);
818 return status;
819}
820
821/* xgmac register are located behind the xgmac_addr and xgmac_data
822 * register pair. Each read/write requires us to wait for the ready
823 * bit before reading/writing the data.
824 */
825static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
826{
827 int status;
828 /* wait for reg to come ready */
829 status = ql_wait_reg_rdy(qdev,
830 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
831 if (status)
832 return status;
833 /* write the data to the data reg */
834 ql_write32(qdev, XGMAC_DATA, data);
835 /* trigger the write */
836 ql_write32(qdev, XGMAC_ADDR, reg);
837 return status;
838}
839
840/* xgmac register are located behind the xgmac_addr and xgmac_data
841 * register pair. Each read/write requires us to wait for the ready
842 * bit before reading/writing the data.
843 */
844int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
845{
846 int status = 0;
847 /* wait for reg to come ready */
848 status = ql_wait_reg_rdy(qdev,
849 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
850 if (status)
851 goto exit;
852 /* set up for reg read */
853 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
854 /* wait for reg to come ready */
855 status = ql_wait_reg_rdy(qdev,
856 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
857 if (status)
858 goto exit;
859 /* get the data */
860 *data = ql_read32(qdev, XGMAC_DATA);
861exit:
862 return status;
863}
864
865/* This is used for reading the 64-bit statistics regs. */
866int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
867{
868 int status = 0;
869 u32 hi = 0;
870 u32 lo = 0;
871
872 status = ql_read_xgmac_reg(qdev, reg, &lo);
873 if (status)
874 goto exit;
875
876 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
877 if (status)
878 goto exit;
879
880 *data = (u64) lo | ((u64) hi << 32);
881
882exit:
883 return status;
884}
885
cdca8d02
RM
886static int ql_8000_port_initialize(struct ql_adapter *qdev)
887{
bcc2cb3b 888 int status;
cfec0cbc
RM
889 /*
890 * Get MPI firmware version for driver banner
891 * and ethool info.
892 */
893 status = ql_mb_about_fw(qdev);
894 if (status)
895 goto exit;
bcc2cb3b
RM
896 status = ql_mb_get_fw_state(qdev);
897 if (status)
898 goto exit;
899 /* Wake up a worker to get/set the TX/RX frame sizes. */
900 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
901exit:
902 return status;
cdca8d02
RM
903}
904
c4e84bde
RM
905/* Take the MAC Core out of reset.
906 * Enable statistics counting.
907 * Take the transmitter/receiver out of reset.
908 * This functionality may be done in the MPI firmware at a
909 * later date.
910 */
b0c2aadf 911static int ql_8012_port_initialize(struct ql_adapter *qdev)
c4e84bde
RM
912{
913 int status = 0;
914 u32 data;
915
916 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
917 /* Another function has the semaphore, so
918 * wait for the port init bit to come ready.
919 */
920 QPRINTK(qdev, LINK, INFO,
921 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
922 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
923 if (status) {
924 QPRINTK(qdev, LINK, CRIT,
925 "Port initialize timed out.\n");
926 }
927 return status;
928 }
929
930 QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
931 /* Set the core reset. */
932 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
933 if (status)
934 goto end;
935 data |= GLOBAL_CFG_RESET;
936 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
937 if (status)
938 goto end;
939
940 /* Clear the core reset and turn on jumbo for receiver. */
941 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
942 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
943 data |= GLOBAL_CFG_TX_STAT_EN;
944 data |= GLOBAL_CFG_RX_STAT_EN;
945 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
946 if (status)
947 goto end;
948
949 /* Enable transmitter, and clear it's reset. */
950 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
951 if (status)
952 goto end;
953 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
954 data |= TX_CFG_EN; /* Enable the transmitter. */
955 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
956 if (status)
957 goto end;
958
959 /* Enable receiver and clear it's reset. */
960 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
961 if (status)
962 goto end;
963 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
964 data |= RX_CFG_EN; /* Enable the receiver. */
965 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
966 if (status)
967 goto end;
968
969 /* Turn on jumbo. */
970 status =
971 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
972 if (status)
973 goto end;
974 status =
975 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
976 if (status)
977 goto end;
978
979 /* Signal to the world that the port is enabled. */
980 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
981end:
982 ql_sem_unlock(qdev, qdev->xg_sem_mask);
983 return status;
984}
985
986/* Get the next large buffer. */
8668ae92 987static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
c4e84bde
RM
988{
989 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
990 rx_ring->lbq_curr_idx++;
991 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
992 rx_ring->lbq_curr_idx = 0;
993 rx_ring->lbq_free_cnt++;
994 return lbq_desc;
995}
996
997/* Get the next small buffer. */
8668ae92 998static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
c4e84bde
RM
999{
1000 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1001 rx_ring->sbq_curr_idx++;
1002 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1003 rx_ring->sbq_curr_idx = 0;
1004 rx_ring->sbq_free_cnt++;
1005 return sbq_desc;
1006}
1007
1008/* Update an rx ring index. */
1009static void ql_update_cq(struct rx_ring *rx_ring)
1010{
1011 rx_ring->cnsmr_idx++;
1012 rx_ring->curr_entry++;
1013 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1014 rx_ring->cnsmr_idx = 0;
1015 rx_ring->curr_entry = rx_ring->cq_base;
1016 }
1017}
1018
1019static void ql_write_cq_idx(struct rx_ring *rx_ring)
1020{
1021 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1022}
1023
1024/* Process (refill) a large buffer queue. */
1025static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1026{
49f2186d
RM
1027 u32 clean_idx = rx_ring->lbq_clean_idx;
1028 u32 start_idx = clean_idx;
c4e84bde 1029 struct bq_desc *lbq_desc;
c4e84bde
RM
1030 u64 map;
1031 int i;
1032
1033 while (rx_ring->lbq_free_cnt > 16) {
1034 for (i = 0; i < 16; i++) {
1035 QPRINTK(qdev, RX_STATUS, DEBUG,
1036 "lbq: try cleaning clean_idx = %d.\n",
1037 clean_idx);
1038 lbq_desc = &rx_ring->lbq[clean_idx];
c4e84bde
RM
1039 if (lbq_desc->p.lbq_page == NULL) {
1040 QPRINTK(qdev, RX_STATUS, DEBUG,
1041 "lbq: getting new page for index %d.\n",
1042 lbq_desc->index);
1043 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
1044 if (lbq_desc->p.lbq_page == NULL) {
79d2b29e 1045 rx_ring->lbq_clean_idx = clean_idx;
c4e84bde
RM
1046 QPRINTK(qdev, RX_STATUS, ERR,
1047 "Couldn't get a page.\n");
1048 return;
1049 }
1050 map = pci_map_page(qdev->pdev,
1051 lbq_desc->p.lbq_page,
1052 0, PAGE_SIZE,
1053 PCI_DMA_FROMDEVICE);
1054 if (pci_dma_mapping_error(qdev->pdev, map)) {
79d2b29e 1055 rx_ring->lbq_clean_idx = clean_idx;
f2603c2c
RM
1056 put_page(lbq_desc->p.lbq_page);
1057 lbq_desc->p.lbq_page = NULL;
c4e84bde
RM
1058 QPRINTK(qdev, RX_STATUS, ERR,
1059 "PCI mapping failed.\n");
1060 return;
1061 }
1062 pci_unmap_addr_set(lbq_desc, mapaddr, map);
1063 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
2c9a0d41 1064 *lbq_desc->addr = cpu_to_le64(map);
c4e84bde
RM
1065 }
1066 clean_idx++;
1067 if (clean_idx == rx_ring->lbq_len)
1068 clean_idx = 0;
1069 }
1070
1071 rx_ring->lbq_clean_idx = clean_idx;
1072 rx_ring->lbq_prod_idx += 16;
1073 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1074 rx_ring->lbq_prod_idx = 0;
49f2186d
RM
1075 rx_ring->lbq_free_cnt -= 16;
1076 }
1077
1078 if (start_idx != clean_idx) {
c4e84bde
RM
1079 QPRINTK(qdev, RX_STATUS, DEBUG,
1080 "lbq: updating prod idx = %d.\n",
1081 rx_ring->lbq_prod_idx);
1082 ql_write_db_reg(rx_ring->lbq_prod_idx,
1083 rx_ring->lbq_prod_idx_db_reg);
c4e84bde
RM
1084 }
1085}
1086
1087/* Process (refill) a small buffer queue. */
1088static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1089{
49f2186d
RM
1090 u32 clean_idx = rx_ring->sbq_clean_idx;
1091 u32 start_idx = clean_idx;
c4e84bde 1092 struct bq_desc *sbq_desc;
c4e84bde
RM
1093 u64 map;
1094 int i;
1095
1096 while (rx_ring->sbq_free_cnt > 16) {
1097 for (i = 0; i < 16; i++) {
1098 sbq_desc = &rx_ring->sbq[clean_idx];
1099 QPRINTK(qdev, RX_STATUS, DEBUG,
1100 "sbq: try cleaning clean_idx = %d.\n",
1101 clean_idx);
c4e84bde
RM
1102 if (sbq_desc->p.skb == NULL) {
1103 QPRINTK(qdev, RX_STATUS, DEBUG,
1104 "sbq: getting new skb for index %d.\n",
1105 sbq_desc->index);
1106 sbq_desc->p.skb =
1107 netdev_alloc_skb(qdev->ndev,
1108 rx_ring->sbq_buf_size);
1109 if (sbq_desc->p.skb == NULL) {
1110 QPRINTK(qdev, PROBE, ERR,
1111 "Couldn't get an skb.\n");
1112 rx_ring->sbq_clean_idx = clean_idx;
1113 return;
1114 }
1115 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1116 map = pci_map_single(qdev->pdev,
1117 sbq_desc->p.skb->data,
1118 rx_ring->sbq_buf_size /
1119 2, PCI_DMA_FROMDEVICE);
c907a35a
RM
1120 if (pci_dma_mapping_error(qdev->pdev, map)) {
1121 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
1122 rx_ring->sbq_clean_idx = clean_idx;
06a3d510
RM
1123 dev_kfree_skb_any(sbq_desc->p.skb);
1124 sbq_desc->p.skb = NULL;
c907a35a
RM
1125 return;
1126 }
c4e84bde
RM
1127 pci_unmap_addr_set(sbq_desc, mapaddr, map);
1128 pci_unmap_len_set(sbq_desc, maplen,
1129 rx_ring->sbq_buf_size / 2);
2c9a0d41 1130 *sbq_desc->addr = cpu_to_le64(map);
c4e84bde
RM
1131 }
1132
1133 clean_idx++;
1134 if (clean_idx == rx_ring->sbq_len)
1135 clean_idx = 0;
1136 }
1137 rx_ring->sbq_clean_idx = clean_idx;
1138 rx_ring->sbq_prod_idx += 16;
1139 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1140 rx_ring->sbq_prod_idx = 0;
49f2186d
RM
1141 rx_ring->sbq_free_cnt -= 16;
1142 }
1143
1144 if (start_idx != clean_idx) {
c4e84bde
RM
1145 QPRINTK(qdev, RX_STATUS, DEBUG,
1146 "sbq: updating prod idx = %d.\n",
1147 rx_ring->sbq_prod_idx);
1148 ql_write_db_reg(rx_ring->sbq_prod_idx,
1149 rx_ring->sbq_prod_idx_db_reg);
c4e84bde
RM
1150 }
1151}
1152
1153static void ql_update_buffer_queues(struct ql_adapter *qdev,
1154 struct rx_ring *rx_ring)
1155{
1156 ql_update_sbq(qdev, rx_ring);
1157 ql_update_lbq(qdev, rx_ring);
1158}
1159
1160/* Unmaps tx buffers. Can be called from send() if a pci mapping
1161 * fails at some stage, or from the interrupt when a tx completes.
1162 */
1163static void ql_unmap_send(struct ql_adapter *qdev,
1164 struct tx_ring_desc *tx_ring_desc, int mapped)
1165{
1166 int i;
1167 for (i = 0; i < mapped; i++) {
1168 if (i == 0 || (i == 7 && mapped > 7)) {
1169 /*
1170 * Unmap the skb->data area, or the
1171 * external sglist (AKA the Outbound
1172 * Address List (OAL)).
1173 * If its the zeroeth element, then it's
1174 * the skb->data area. If it's the 7th
1175 * element and there is more than 6 frags,
1176 * then its an OAL.
1177 */
1178 if (i == 7) {
1179 QPRINTK(qdev, TX_DONE, DEBUG,
1180 "unmapping OAL area.\n");
1181 }
1182 pci_unmap_single(qdev->pdev,
1183 pci_unmap_addr(&tx_ring_desc->map[i],
1184 mapaddr),
1185 pci_unmap_len(&tx_ring_desc->map[i],
1186 maplen),
1187 PCI_DMA_TODEVICE);
1188 } else {
1189 QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
1190 i);
1191 pci_unmap_page(qdev->pdev,
1192 pci_unmap_addr(&tx_ring_desc->map[i],
1193 mapaddr),
1194 pci_unmap_len(&tx_ring_desc->map[i],
1195 maplen), PCI_DMA_TODEVICE);
1196 }
1197 }
1198
1199}
1200
1201/* Map the buffers for this transmit. This will return
1202 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1203 */
1204static int ql_map_send(struct ql_adapter *qdev,
1205 struct ob_mac_iocb_req *mac_iocb_ptr,
1206 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1207{
1208 int len = skb_headlen(skb);
1209 dma_addr_t map;
1210 int frag_idx, err, map_idx = 0;
1211 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1212 int frag_cnt = skb_shinfo(skb)->nr_frags;
1213
1214 if (frag_cnt) {
1215 QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
1216 }
1217 /*
1218 * Map the skb buffer first.
1219 */
1220 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1221
1222 err = pci_dma_mapping_error(qdev->pdev, map);
1223 if (err) {
1224 QPRINTK(qdev, TX_QUEUED, ERR,
1225 "PCI mapping failed with error: %d\n", err);
1226
1227 return NETDEV_TX_BUSY;
1228 }
1229
1230 tbd->len = cpu_to_le32(len);
1231 tbd->addr = cpu_to_le64(map);
1232 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1233 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1234 map_idx++;
1235
1236 /*
1237 * This loop fills the remainder of the 8 address descriptors
1238 * in the IOCB. If there are more than 7 fragments, then the
1239 * eighth address desc will point to an external list (OAL).
1240 * When this happens, the remainder of the frags will be stored
1241 * in this list.
1242 */
1243 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1244 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1245 tbd++;
1246 if (frag_idx == 6 && frag_cnt > 7) {
1247 /* Let's tack on an sglist.
1248 * Our control block will now
1249 * look like this:
1250 * iocb->seg[0] = skb->data
1251 * iocb->seg[1] = frag[0]
1252 * iocb->seg[2] = frag[1]
1253 * iocb->seg[3] = frag[2]
1254 * iocb->seg[4] = frag[3]
1255 * iocb->seg[5] = frag[4]
1256 * iocb->seg[6] = frag[5]
1257 * iocb->seg[7] = ptr to OAL (external sglist)
1258 * oal->seg[0] = frag[6]
1259 * oal->seg[1] = frag[7]
1260 * oal->seg[2] = frag[8]
1261 * oal->seg[3] = frag[9]
1262 * oal->seg[4] = frag[10]
1263 * etc...
1264 */
1265 /* Tack on the OAL in the eighth segment of IOCB. */
1266 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1267 sizeof(struct oal),
1268 PCI_DMA_TODEVICE);
1269 err = pci_dma_mapping_error(qdev->pdev, map);
1270 if (err) {
1271 QPRINTK(qdev, TX_QUEUED, ERR,
1272 "PCI mapping outbound address list with error: %d\n",
1273 err);
1274 goto map_error;
1275 }
1276
1277 tbd->addr = cpu_to_le64(map);
1278 /*
1279 * The length is the number of fragments
1280 * that remain to be mapped times the length
1281 * of our sglist (OAL).
1282 */
1283 tbd->len =
1284 cpu_to_le32((sizeof(struct tx_buf_desc) *
1285 (frag_cnt - frag_idx)) | TX_DESC_C);
1286 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1287 map);
1288 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1289 sizeof(struct oal));
1290 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1291 map_idx++;
1292 }
1293
1294 map =
1295 pci_map_page(qdev->pdev, frag->page,
1296 frag->page_offset, frag->size,
1297 PCI_DMA_TODEVICE);
1298
1299 err = pci_dma_mapping_error(qdev->pdev, map);
1300 if (err) {
1301 QPRINTK(qdev, TX_QUEUED, ERR,
1302 "PCI mapping frags failed with error: %d.\n",
1303 err);
1304 goto map_error;
1305 }
1306
1307 tbd->addr = cpu_to_le64(map);
1308 tbd->len = cpu_to_le32(frag->size);
1309 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1310 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1311 frag->size);
1312
1313 }
1314 /* Save the number of segments we've mapped. */
1315 tx_ring_desc->map_cnt = map_idx;
1316 /* Terminate the last segment. */
1317 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1318 return NETDEV_TX_OK;
1319
1320map_error:
1321 /*
1322 * If the first frag mapping failed, then i will be zero.
1323 * This causes the unmap of the skb->data area. Otherwise
1324 * we pass in the number of frags that mapped successfully
1325 * so they can be umapped.
1326 */
1327 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1328 return NETDEV_TX_BUSY;
1329}
1330
8668ae92 1331static void ql_realign_skb(struct sk_buff *skb, int len)
c4e84bde
RM
1332{
1333 void *temp_addr = skb->data;
1334
1335 /* Undo the skb_reserve(skb,32) we did before
1336 * giving to hardware, and realign data on
1337 * a 2-byte boundary.
1338 */
1339 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1340 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1341 skb_copy_to_linear_data(skb, temp_addr,
1342 (unsigned int)len);
1343}
1344
1345/*
1346 * This function builds an skb for the given inbound
1347 * completion. It will be rewritten for readability in the near
1348 * future, but for not it works well.
1349 */
1350static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1351 struct rx_ring *rx_ring,
1352 struct ib_mac_iocb_rsp *ib_mac_rsp)
1353{
1354 struct bq_desc *lbq_desc;
1355 struct bq_desc *sbq_desc;
1356 struct sk_buff *skb = NULL;
1357 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1358 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1359
1360 /*
1361 * Handle the header buffer if present.
1362 */
1363 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1364 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1365 QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
1366 /*
1367 * Headers fit nicely into a small buffer.
1368 */
1369 sbq_desc = ql_get_curr_sbuf(rx_ring);
1370 pci_unmap_single(qdev->pdev,
1371 pci_unmap_addr(sbq_desc, mapaddr),
1372 pci_unmap_len(sbq_desc, maplen),
1373 PCI_DMA_FROMDEVICE);
1374 skb = sbq_desc->p.skb;
1375 ql_realign_skb(skb, hdr_len);
1376 skb_put(skb, hdr_len);
1377 sbq_desc->p.skb = NULL;
1378 }
1379
1380 /*
1381 * Handle the data buffer(s).
1382 */
1383 if (unlikely(!length)) { /* Is there data too? */
1384 QPRINTK(qdev, RX_STATUS, DEBUG,
1385 "No Data buffer in this packet.\n");
1386 return skb;
1387 }
1388
1389 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1390 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1391 QPRINTK(qdev, RX_STATUS, DEBUG,
1392 "Headers in small, data of %d bytes in small, combine them.\n", length);
1393 /*
1394 * Data is less than small buffer size so it's
1395 * stuffed in a small buffer.
1396 * For this case we append the data
1397 * from the "data" small buffer to the "header" small
1398 * buffer.
1399 */
1400 sbq_desc = ql_get_curr_sbuf(rx_ring);
1401 pci_dma_sync_single_for_cpu(qdev->pdev,
1402 pci_unmap_addr
1403 (sbq_desc, mapaddr),
1404 pci_unmap_len
1405 (sbq_desc, maplen),
1406 PCI_DMA_FROMDEVICE);
1407 memcpy(skb_put(skb, length),
1408 sbq_desc->p.skb->data, length);
1409 pci_dma_sync_single_for_device(qdev->pdev,
1410 pci_unmap_addr
1411 (sbq_desc,
1412 mapaddr),
1413 pci_unmap_len
1414 (sbq_desc,
1415 maplen),
1416 PCI_DMA_FROMDEVICE);
1417 } else {
1418 QPRINTK(qdev, RX_STATUS, DEBUG,
1419 "%d bytes in a single small buffer.\n", length);
1420 sbq_desc = ql_get_curr_sbuf(rx_ring);
1421 skb = sbq_desc->p.skb;
1422 ql_realign_skb(skb, length);
1423 skb_put(skb, length);
1424 pci_unmap_single(qdev->pdev,
1425 pci_unmap_addr(sbq_desc,
1426 mapaddr),
1427 pci_unmap_len(sbq_desc,
1428 maplen),
1429 PCI_DMA_FROMDEVICE);
1430 sbq_desc->p.skb = NULL;
1431 }
1432 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1433 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1434 QPRINTK(qdev, RX_STATUS, DEBUG,
1435 "Header in small, %d bytes in large. Chain large to small!\n", length);
1436 /*
1437 * The data is in a single large buffer. We
1438 * chain it to the header buffer's skb and let
1439 * it rip.
1440 */
1441 lbq_desc = ql_get_curr_lbuf(rx_ring);
1442 pci_unmap_page(qdev->pdev,
1443 pci_unmap_addr(lbq_desc,
1444 mapaddr),
1445 pci_unmap_len(lbq_desc, maplen),
1446 PCI_DMA_FROMDEVICE);
1447 QPRINTK(qdev, RX_STATUS, DEBUG,
1448 "Chaining page to skb.\n");
1449 skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1450 0, length);
1451 skb->len += length;
1452 skb->data_len += length;
1453 skb->truesize += length;
1454 lbq_desc->p.lbq_page = NULL;
1455 } else {
1456 /*
1457 * The headers and data are in a single large buffer. We
1458 * copy it to a new skb and let it go. This can happen with
1459 * jumbo mtu on a non-TCP/UDP frame.
1460 */
1461 lbq_desc = ql_get_curr_lbuf(rx_ring);
1462 skb = netdev_alloc_skb(qdev->ndev, length);
1463 if (skb == NULL) {
1464 QPRINTK(qdev, PROBE, DEBUG,
1465 "No skb available, drop the packet.\n");
1466 return NULL;
1467 }
4055c7d4
RM
1468 pci_unmap_page(qdev->pdev,
1469 pci_unmap_addr(lbq_desc,
1470 mapaddr),
1471 pci_unmap_len(lbq_desc, maplen),
1472 PCI_DMA_FROMDEVICE);
c4e84bde
RM
1473 skb_reserve(skb, NET_IP_ALIGN);
1474 QPRINTK(qdev, RX_STATUS, DEBUG,
1475 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
1476 skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1477 0, length);
1478 skb->len += length;
1479 skb->data_len += length;
1480 skb->truesize += length;
1481 length -= length;
1482 lbq_desc->p.lbq_page = NULL;
1483 __pskb_pull_tail(skb,
1484 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1485 VLAN_ETH_HLEN : ETH_HLEN);
1486 }
1487 } else {
1488 /*
1489 * The data is in a chain of large buffers
1490 * pointed to by a small buffer. We loop
1491 * thru and chain them to the our small header
1492 * buffer's skb.
1493 * frags: There are 18 max frags and our small
1494 * buffer will hold 32 of them. The thing is,
1495 * we'll use 3 max for our 9000 byte jumbo
1496 * frames. If the MTU goes up we could
1497 * eventually be in trouble.
1498 */
1499 int size, offset, i = 0;
2c9a0d41 1500 __le64 *bq, bq_array[8];
c4e84bde
RM
1501 sbq_desc = ql_get_curr_sbuf(rx_ring);
1502 pci_unmap_single(qdev->pdev,
1503 pci_unmap_addr(sbq_desc, mapaddr),
1504 pci_unmap_len(sbq_desc, maplen),
1505 PCI_DMA_FROMDEVICE);
1506 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1507 /*
1508 * This is an non TCP/UDP IP frame, so
1509 * the headers aren't split into a small
1510 * buffer. We have to use the small buffer
1511 * that contains our sg list as our skb to
1512 * send upstairs. Copy the sg list here to
1513 * a local buffer and use it to find the
1514 * pages to chain.
1515 */
1516 QPRINTK(qdev, RX_STATUS, DEBUG,
1517 "%d bytes of headers & data in chain of large.\n", length);
1518 skb = sbq_desc->p.skb;
1519 bq = &bq_array[0];
1520 memcpy(bq, skb->data, sizeof(bq_array));
1521 sbq_desc->p.skb = NULL;
1522 skb_reserve(skb, NET_IP_ALIGN);
1523 } else {
1524 QPRINTK(qdev, RX_STATUS, DEBUG,
1525 "Headers in small, %d bytes of data in chain of large.\n", length);
2c9a0d41 1526 bq = (__le64 *)sbq_desc->p.skb->data;
c4e84bde
RM
1527 }
1528 while (length > 0) {
1529 lbq_desc = ql_get_curr_lbuf(rx_ring);
c4e84bde
RM
1530 pci_unmap_page(qdev->pdev,
1531 pci_unmap_addr(lbq_desc,
1532 mapaddr),
1533 pci_unmap_len(lbq_desc,
1534 maplen),
1535 PCI_DMA_FROMDEVICE);
1536 size = (length < PAGE_SIZE) ? length : PAGE_SIZE;
1537 offset = 0;
1538
1539 QPRINTK(qdev, RX_STATUS, DEBUG,
1540 "Adding page %d to skb for %d bytes.\n",
1541 i, size);
1542 skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page,
1543 offset, size);
1544 skb->len += size;
1545 skb->data_len += size;
1546 skb->truesize += size;
1547 length -= size;
1548 lbq_desc->p.lbq_page = NULL;
1549 bq++;
1550 i++;
1551 }
1552 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1553 VLAN_ETH_HLEN : ETH_HLEN);
1554 }
1555 return skb;
1556}
1557
1558/* Process an inbound completion from an rx ring. */
1559static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1560 struct rx_ring *rx_ring,
1561 struct ib_mac_iocb_rsp *ib_mac_rsp)
1562{
1563 struct net_device *ndev = qdev->ndev;
1564 struct sk_buff *skb = NULL;
22bdd4f5
RM
1565 u16 vlan_id = (le16_to_cpu(ib_mac_rsp->vlan_id) &
1566 IB_MAC_IOCB_RSP_VLAN_MASK)
c4e84bde
RM
1567
1568 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1569
1570 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1571 if (unlikely(!skb)) {
1572 QPRINTK(qdev, RX_STATUS, DEBUG,
1573 "No skb available, drop packet.\n");
1574 return;
1575 }
1576
a32959cd
RM
1577 /* Frame error, so drop the packet. */
1578 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1579 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1580 ib_mac_rsp->flags2);
1581 dev_kfree_skb_any(skb);
1582 return;
1583 }
ec33a491
RM
1584
1585 /* The max framesize filter on this chip is set higher than
1586 * MTU since FCoE uses 2k frames.
1587 */
1588 if (skb->len > ndev->mtu + ETH_HLEN) {
1589 dev_kfree_skb_any(skb);
1590 return;
1591 }
1592
c4e84bde
RM
1593 prefetch(skb->data);
1594 skb->dev = ndev;
1595 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1596 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1597 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1598 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1599 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1600 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1601 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1602 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1603 }
1604 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1605 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1606 }
d555f592 1607
d555f592
RM
1608 skb->protocol = eth_type_trans(skb, ndev);
1609 skb->ip_summed = CHECKSUM_NONE;
1610
1611 /* If rx checksum is on, and there are no
1612 * csum or frame errors.
1613 */
1614 if (qdev->rx_csum &&
d555f592
RM
1615 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1616 /* TCP frame. */
1617 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1618 QPRINTK(qdev, RX_STATUS, DEBUG,
1619 "TCP checksum done!\n");
1620 skb->ip_summed = CHECKSUM_UNNECESSARY;
1621 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1622 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1623 /* Unfragmented ipv4 UDP frame. */
1624 struct iphdr *iph = (struct iphdr *) skb->data;
1625 if (!(iph->frag_off &
1626 cpu_to_be16(IP_MF|IP_OFFSET))) {
1627 skb->ip_summed = CHECKSUM_UNNECESSARY;
1628 QPRINTK(qdev, RX_STATUS, DEBUG,
1629 "TCP checksum done!\n");
1630 }
1631 }
c4e84bde 1632 }
d555f592 1633
c4e84bde
RM
1634 qdev->stats.rx_packets++;
1635 qdev->stats.rx_bytes += skb->len;
22bdd4f5
RM
1636 skb_record_rx_queue(skb,
1637 rx_ring->cq_id - qdev->rss_ring_first_cq_id);
1638 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1639 if (qdev->vlgrp &&
1640 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1641 (vlan_id != 0))
1642 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1643 vlan_id, skb);
1644 else
1645 napi_gro_receive(&rx_ring->napi, skb);
c4e84bde 1646 } else {
22bdd4f5
RM
1647 if (qdev->vlgrp &&
1648 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1649 (vlan_id != 0))
1650 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1651 else
1652 netif_receive_skb(skb);
c4e84bde 1653 }
c4e84bde
RM
1654}
1655
1656/* Process an outbound completion from an rx ring. */
1657static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1658 struct ob_mac_iocb_rsp *mac_rsp)
1659{
1660 struct tx_ring *tx_ring;
1661 struct tx_ring_desc *tx_ring_desc;
1662
1663 QL_DUMP_OB_MAC_RSP(mac_rsp);
1664 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
1665 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
1666 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
13cfd5be 1667 qdev->stats.tx_bytes += (tx_ring_desc->skb)->len;
c4e84bde
RM
1668 qdev->stats.tx_packets++;
1669 dev_kfree_skb(tx_ring_desc->skb);
1670 tx_ring_desc->skb = NULL;
1671
1672 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
1673 OB_MAC_IOCB_RSP_S |
1674 OB_MAC_IOCB_RSP_L |
1675 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
1676 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
1677 QPRINTK(qdev, TX_DONE, WARNING,
1678 "Total descriptor length did not match transfer length.\n");
1679 }
1680 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
1681 QPRINTK(qdev, TX_DONE, WARNING,
1682 "Frame too short to be legal, not sent.\n");
1683 }
1684 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
1685 QPRINTK(qdev, TX_DONE, WARNING,
1686 "Frame too long, but sent anyway.\n");
1687 }
1688 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
1689 QPRINTK(qdev, TX_DONE, WARNING,
1690 "PCI backplane error. Frame not sent.\n");
1691 }
1692 }
1693 atomic_inc(&tx_ring->tx_count);
1694}
1695
1696/* Fire up a handler to reset the MPI processor. */
1697void ql_queue_fw_error(struct ql_adapter *qdev)
1698{
c4e84bde
RM
1699 netif_carrier_off(qdev->ndev);
1700 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
1701}
1702
1703void ql_queue_asic_error(struct ql_adapter *qdev)
1704{
c4e84bde
RM
1705 netif_carrier_off(qdev->ndev);
1706 ql_disable_interrupts(qdev);
6497b607
RM
1707 /* Clear adapter up bit to signal the recovery
1708 * process that it shouldn't kill the reset worker
1709 * thread
1710 */
1711 clear_bit(QL_ADAPTER_UP, &qdev->flags);
c4e84bde
RM
1712 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
1713}
1714
1715static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
1716 struct ib_ae_iocb_rsp *ib_ae_rsp)
1717{
1718 switch (ib_ae_rsp->event) {
1719 case MGMT_ERR_EVENT:
1720 QPRINTK(qdev, RX_ERR, ERR,
1721 "Management Processor Fatal Error.\n");
1722 ql_queue_fw_error(qdev);
1723 return;
1724
1725 case CAM_LOOKUP_ERR_EVENT:
1726 QPRINTK(qdev, LINK, ERR,
1727 "Multiple CAM hits lookup occurred.\n");
1728 QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
1729 ql_queue_asic_error(qdev);
1730 return;
1731
1732 case SOFT_ECC_ERROR_EVENT:
1733 QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
1734 ql_queue_asic_error(qdev);
1735 break;
1736
1737 case PCI_ERR_ANON_BUF_RD:
1738 QPRINTK(qdev, RX_ERR, ERR,
1739 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
1740 ib_ae_rsp->q_id);
1741 ql_queue_asic_error(qdev);
1742 break;
1743
1744 default:
1745 QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
1746 ib_ae_rsp->event);
1747 ql_queue_asic_error(qdev);
1748 break;
1749 }
1750}
1751
1752static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1753{
1754 struct ql_adapter *qdev = rx_ring->qdev;
ba7cd3ba 1755 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde
RM
1756 struct ob_mac_iocb_rsp *net_rsp = NULL;
1757 int count = 0;
1758
1e213303 1759 struct tx_ring *tx_ring;
c4e84bde
RM
1760 /* While there are entries in the completion queue. */
1761 while (prod != rx_ring->cnsmr_idx) {
1762
1763 QPRINTK(qdev, RX_STATUS, DEBUG,
1764 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1765 prod, rx_ring->cnsmr_idx);
1766
1767 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
1768 rmb();
1769 switch (net_rsp->opcode) {
1770
1771 case OPCODE_OB_MAC_TSO_IOCB:
1772 case OPCODE_OB_MAC_IOCB:
1773 ql_process_mac_tx_intr(qdev, net_rsp);
1774 break;
1775 default:
1776 QPRINTK(qdev, RX_STATUS, DEBUG,
1777 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1778 net_rsp->opcode);
1779 }
1780 count++;
1781 ql_update_cq(rx_ring);
ba7cd3ba 1782 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde
RM
1783 }
1784 ql_write_cq_idx(rx_ring);
1e213303
RM
1785 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
1786 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) &&
1787 net_rsp != NULL) {
c4e84bde
RM
1788 if (atomic_read(&tx_ring->queue_stopped) &&
1789 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
1790 /*
1791 * The queue got stopped because the tx_ring was full.
1792 * Wake it up, because it's now at least 25% empty.
1793 */
1e213303 1794 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
c4e84bde
RM
1795 }
1796
1797 return count;
1798}
1799
1800static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
1801{
1802 struct ql_adapter *qdev = rx_ring->qdev;
ba7cd3ba 1803 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde
RM
1804 struct ql_net_rsp_iocb *net_rsp;
1805 int count = 0;
1806
1807 /* While there are entries in the completion queue. */
1808 while (prod != rx_ring->cnsmr_idx) {
1809
1810 QPRINTK(qdev, RX_STATUS, DEBUG,
1811 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1812 prod, rx_ring->cnsmr_idx);
1813
1814 net_rsp = rx_ring->curr_entry;
1815 rmb();
1816 switch (net_rsp->opcode) {
1817 case OPCODE_IB_MAC_IOCB:
1818 ql_process_mac_rx_intr(qdev, rx_ring,
1819 (struct ib_mac_iocb_rsp *)
1820 net_rsp);
1821 break;
1822
1823 case OPCODE_IB_AE_IOCB:
1824 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
1825 net_rsp);
1826 break;
1827 default:
1828 {
1829 QPRINTK(qdev, RX_STATUS, DEBUG,
1830 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1831 net_rsp->opcode);
1832 }
1833 }
1834 count++;
1835 ql_update_cq(rx_ring);
ba7cd3ba 1836 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde
RM
1837 if (count == budget)
1838 break;
1839 }
1840 ql_update_buffer_queues(qdev, rx_ring);
1841 ql_write_cq_idx(rx_ring);
1842 return count;
1843}
1844
1845static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
1846{
1847 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
1848 struct ql_adapter *qdev = rx_ring->qdev;
1849 int work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
1850
1851 QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
1852 rx_ring->cq_id);
1853
1854 if (work_done < budget) {
22bdd4f5 1855 napi_complete(napi);
c4e84bde
RM
1856 ql_enable_completion_interrupt(qdev, rx_ring->irq);
1857 }
1858 return work_done;
1859}
1860
1861static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
1862{
1863 struct ql_adapter *qdev = netdev_priv(ndev);
1864
1865 qdev->vlgrp = grp;
1866 if (grp) {
1867 QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
1868 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
1869 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
1870 } else {
1871 QPRINTK(qdev, IFUP, DEBUG,
1872 "Turning off VLAN in NIC_RCV_CFG.\n");
1873 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
1874 }
1875}
1876
1877static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
1878{
1879 struct ql_adapter *qdev = netdev_priv(ndev);
1880 u32 enable_bit = MAC_ADDR_E;
cc288f54 1881 int status;
c4e84bde 1882
cc288f54
RM
1883 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
1884 if (status)
1885 return;
c4e84bde
RM
1886 spin_lock(&qdev->hw_lock);
1887 if (ql_set_mac_addr_reg
1888 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1889 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
1890 }
1891 spin_unlock(&qdev->hw_lock);
cc288f54 1892 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
c4e84bde
RM
1893}
1894
1895static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
1896{
1897 struct ql_adapter *qdev = netdev_priv(ndev);
1898 u32 enable_bit = 0;
cc288f54
RM
1899 int status;
1900
1901 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
1902 if (status)
1903 return;
c4e84bde
RM
1904
1905 spin_lock(&qdev->hw_lock);
1906 if (ql_set_mac_addr_reg
1907 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1908 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
1909 }
1910 spin_unlock(&qdev->hw_lock);
cc288f54 1911 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
c4e84bde
RM
1912
1913}
1914
1915/* Worker thread to process a given rx_ring that is dedicated
1916 * to outbound completions.
1917 */
1918static void ql_tx_clean(struct work_struct *work)
1919{
1920 struct rx_ring *rx_ring =
1921 container_of(work, struct rx_ring, rx_work.work);
1922 ql_clean_outbound_rx_ring(rx_ring);
1923 ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
1924
1925}
1926
1927/* Worker thread to process a given rx_ring that is dedicated
1928 * to inbound completions.
1929 */
1930static void ql_rx_clean(struct work_struct *work)
1931{
1932 struct rx_ring *rx_ring =
1933 container_of(work, struct rx_ring, rx_work.work);
1934 ql_clean_inbound_rx_ring(rx_ring, 64);
1935 ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
1936}
1937
1938/* MSI-X Multiple Vector Interrupt Handler for outbound completions. */
1939static irqreturn_t qlge_msix_tx_isr(int irq, void *dev_id)
1940{
1941 struct rx_ring *rx_ring = dev_id;
1942 queue_delayed_work_on(rx_ring->cpu, rx_ring->qdev->q_workqueue,
1943 &rx_ring->rx_work, 0);
1944 return IRQ_HANDLED;
1945}
1946
1947/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
1948static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
1949{
1950 struct rx_ring *rx_ring = dev_id;
288379f0 1951 napi_schedule(&rx_ring->napi);
c4e84bde
RM
1952 return IRQ_HANDLED;
1953}
1954
c4e84bde
RM
1955/* This handles a fatal error, MPI activity, and the default
1956 * rx_ring in an MSI-X multiple vector environment.
1957 * In MSI/Legacy environment it also process the rest of
1958 * the rx_rings.
1959 */
1960static irqreturn_t qlge_isr(int irq, void *dev_id)
1961{
1962 struct rx_ring *rx_ring = dev_id;
1963 struct ql_adapter *qdev = rx_ring->qdev;
1964 struct intr_context *intr_context = &qdev->intr_context[0];
1965 u32 var;
1966 int i;
1967 int work_done = 0;
1968
bb0d215c
RM
1969 spin_lock(&qdev->hw_lock);
1970 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
1971 QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
1972 spin_unlock(&qdev->hw_lock);
1973 return IRQ_NONE;
c4e84bde 1974 }
bb0d215c 1975 spin_unlock(&qdev->hw_lock);
c4e84bde 1976
bb0d215c 1977 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
c4e84bde
RM
1978
1979 /*
1980 * Check for fatal error.
1981 */
1982 if (var & STS_FE) {
1983 ql_queue_asic_error(qdev);
1984 QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
1985 var = ql_read32(qdev, ERR_STS);
1986 QPRINTK(qdev, INTR, ERR,
1987 "Resetting chip. Error Status Register = 0x%x\n", var);
1988 return IRQ_HANDLED;
1989 }
1990
1991 /*
1992 * Check MPI processor activity.
1993 */
1994 if (var & STS_PI) {
1995 /*
1996 * We've got an async event or mailbox completion.
1997 * Handle it and clear the source of the interrupt.
1998 */
1999 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
2000 ql_disable_completion_interrupt(qdev, intr_context->intr);
2001 queue_delayed_work_on(smp_processor_id(), qdev->workqueue,
2002 &qdev->mpi_work, 0);
2003 work_done++;
2004 }
2005
2006 /*
2007 * Check the default queue and wake handler if active.
2008 */
2009 rx_ring = &qdev->rx_ring[0];
ba7cd3ba 2010 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) {
c4e84bde
RM
2011 QPRINTK(qdev, INTR, INFO, "Waking handler for rx_ring[0].\n");
2012 ql_disable_completion_interrupt(qdev, intr_context->intr);
2013 queue_delayed_work_on(smp_processor_id(), qdev->q_workqueue,
2014 &rx_ring->rx_work, 0);
2015 work_done++;
2016 }
2017
2018 if (!test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2019 /*
2020 * Start the DPC for each active queue.
2021 */
2022 for (i = 1; i < qdev->rx_ring_count; i++) {
2023 rx_ring = &qdev->rx_ring[i];
ba7cd3ba 2024 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
c4e84bde
RM
2025 rx_ring->cnsmr_idx) {
2026 QPRINTK(qdev, INTR, INFO,
2027 "Waking handler for rx_ring[%d].\n", i);
2028 ql_disable_completion_interrupt(qdev,
2029 intr_context->
2030 intr);
2031 if (i < qdev->rss_ring_first_cq_id)
2032 queue_delayed_work_on(rx_ring->cpu,
2033 qdev->q_workqueue,
2034 &rx_ring->rx_work,
2035 0);
2036 else
288379f0 2037 napi_schedule(&rx_ring->napi);
c4e84bde
RM
2038 work_done++;
2039 }
2040 }
2041 }
bb0d215c 2042 ql_enable_completion_interrupt(qdev, intr_context->intr);
c4e84bde
RM
2043 return work_done ? IRQ_HANDLED : IRQ_NONE;
2044}
2045
2046static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2047{
2048
2049 if (skb_is_gso(skb)) {
2050 int err;
2051 if (skb_header_cloned(skb)) {
2052 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2053 if (err)
2054 return err;
2055 }
2056
2057 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2058 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2059 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2060 mac_iocb_ptr->total_hdrs_len =
2061 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2062 mac_iocb_ptr->net_trans_offset =
2063 cpu_to_le16(skb_network_offset(skb) |
2064 skb_transport_offset(skb)
2065 << OB_MAC_TRANSPORT_HDR_SHIFT);
2066 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2067 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2068 if (likely(skb->protocol == htons(ETH_P_IP))) {
2069 struct iphdr *iph = ip_hdr(skb);
2070 iph->check = 0;
2071 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2072 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2073 iph->daddr, 0,
2074 IPPROTO_TCP,
2075 0);
2076 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2077 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2078 tcp_hdr(skb)->check =
2079 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2080 &ipv6_hdr(skb)->daddr,
2081 0, IPPROTO_TCP, 0);
2082 }
2083 return 1;
2084 }
2085 return 0;
2086}
2087
2088static void ql_hw_csum_setup(struct sk_buff *skb,
2089 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2090{
2091 int len;
2092 struct iphdr *iph = ip_hdr(skb);
fd2df4f7 2093 __sum16 *check;
c4e84bde
RM
2094 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2095 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2096 mac_iocb_ptr->net_trans_offset =
2097 cpu_to_le16(skb_network_offset(skb) |
2098 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2099
2100 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2101 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2102 if (likely(iph->protocol == IPPROTO_TCP)) {
2103 check = &(tcp_hdr(skb)->check);
2104 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2105 mac_iocb_ptr->total_hdrs_len =
2106 cpu_to_le16(skb_transport_offset(skb) +
2107 (tcp_hdr(skb)->doff << 2));
2108 } else {
2109 check = &(udp_hdr(skb)->check);
2110 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2111 mac_iocb_ptr->total_hdrs_len =
2112 cpu_to_le16(skb_transport_offset(skb) +
2113 sizeof(struct udphdr));
2114 }
2115 *check = ~csum_tcpudp_magic(iph->saddr,
2116 iph->daddr, len, iph->protocol, 0);
2117}
2118
2119static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
2120{
2121 struct tx_ring_desc *tx_ring_desc;
2122 struct ob_mac_iocb_req *mac_iocb_ptr;
2123 struct ql_adapter *qdev = netdev_priv(ndev);
2124 int tso;
2125 struct tx_ring *tx_ring;
1e213303 2126 u32 tx_ring_idx = (u32) skb->queue_mapping;
c4e84bde
RM
2127
2128 tx_ring = &qdev->tx_ring[tx_ring_idx];
2129
74c50b4b
RM
2130 if (skb_padto(skb, ETH_ZLEN))
2131 return NETDEV_TX_OK;
2132
c4e84bde
RM
2133 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2134 QPRINTK(qdev, TX_QUEUED, INFO,
2135 "%s: shutting down tx queue %d du to lack of resources.\n",
2136 __func__, tx_ring_idx);
1e213303 2137 netif_stop_subqueue(ndev, tx_ring->wq_id);
c4e84bde
RM
2138 atomic_inc(&tx_ring->queue_stopped);
2139 return NETDEV_TX_BUSY;
2140 }
2141 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2142 mac_iocb_ptr = tx_ring_desc->queue_entry;
2143 memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr));
c4e84bde
RM
2144
2145 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2146 mac_iocb_ptr->tid = tx_ring_desc->index;
2147 /* We use the upper 32-bits to store the tx queue for this IO.
2148 * When we get the completion we can use it to establish the context.
2149 */
2150 mac_iocb_ptr->txq_idx = tx_ring_idx;
2151 tx_ring_desc->skb = skb;
2152
2153 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2154
2155 if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
2156 QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
2157 vlan_tx_tag_get(skb));
2158 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2159 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2160 }
2161 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2162 if (tso < 0) {
2163 dev_kfree_skb_any(skb);
2164 return NETDEV_TX_OK;
2165 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2166 ql_hw_csum_setup(skb,
2167 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2168 }
0d979f74
RM
2169 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2170 NETDEV_TX_OK) {
2171 QPRINTK(qdev, TX_QUEUED, ERR,
2172 "Could not map the segments.\n");
2173 return NETDEV_TX_BUSY;
2174 }
c4e84bde
RM
2175 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2176 tx_ring->prod_idx++;
2177 if (tx_ring->prod_idx == tx_ring->wq_len)
2178 tx_ring->prod_idx = 0;
2179 wmb();
2180
2181 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
c4e84bde
RM
2182 QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
2183 tx_ring->prod_idx, skb->len);
2184
2185 atomic_dec(&tx_ring->tx_count);
2186 return NETDEV_TX_OK;
2187}
2188
2189static void ql_free_shadow_space(struct ql_adapter *qdev)
2190{
2191 if (qdev->rx_ring_shadow_reg_area) {
2192 pci_free_consistent(qdev->pdev,
2193 PAGE_SIZE,
2194 qdev->rx_ring_shadow_reg_area,
2195 qdev->rx_ring_shadow_reg_dma);
2196 qdev->rx_ring_shadow_reg_area = NULL;
2197 }
2198 if (qdev->tx_ring_shadow_reg_area) {
2199 pci_free_consistent(qdev->pdev,
2200 PAGE_SIZE,
2201 qdev->tx_ring_shadow_reg_area,
2202 qdev->tx_ring_shadow_reg_dma);
2203 qdev->tx_ring_shadow_reg_area = NULL;
2204 }
2205}
2206
2207static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2208{
2209 qdev->rx_ring_shadow_reg_area =
2210 pci_alloc_consistent(qdev->pdev,
2211 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2212 if (qdev->rx_ring_shadow_reg_area == NULL) {
2213 QPRINTK(qdev, IFUP, ERR,
2214 "Allocation of RX shadow space failed.\n");
2215 return -ENOMEM;
2216 }
b25215d0 2217 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
c4e84bde
RM
2218 qdev->tx_ring_shadow_reg_area =
2219 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2220 &qdev->tx_ring_shadow_reg_dma);
2221 if (qdev->tx_ring_shadow_reg_area == NULL) {
2222 QPRINTK(qdev, IFUP, ERR,
2223 "Allocation of TX shadow space failed.\n");
2224 goto err_wqp_sh_area;
2225 }
b25215d0 2226 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
c4e84bde
RM
2227 return 0;
2228
2229err_wqp_sh_area:
2230 pci_free_consistent(qdev->pdev,
2231 PAGE_SIZE,
2232 qdev->rx_ring_shadow_reg_area,
2233 qdev->rx_ring_shadow_reg_dma);
2234 return -ENOMEM;
2235}
2236
2237static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2238{
2239 struct tx_ring_desc *tx_ring_desc;
2240 int i;
2241 struct ob_mac_iocb_req *mac_iocb_ptr;
2242
2243 mac_iocb_ptr = tx_ring->wq_base;
2244 tx_ring_desc = tx_ring->q;
2245 for (i = 0; i < tx_ring->wq_len; i++) {
2246 tx_ring_desc->index = i;
2247 tx_ring_desc->skb = NULL;
2248 tx_ring_desc->queue_entry = mac_iocb_ptr;
2249 mac_iocb_ptr++;
2250 tx_ring_desc++;
2251 }
2252 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2253 atomic_set(&tx_ring->queue_stopped, 0);
2254}
2255
2256static void ql_free_tx_resources(struct ql_adapter *qdev,
2257 struct tx_ring *tx_ring)
2258{
2259 if (tx_ring->wq_base) {
2260 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2261 tx_ring->wq_base, tx_ring->wq_base_dma);
2262 tx_ring->wq_base = NULL;
2263 }
2264 kfree(tx_ring->q);
2265 tx_ring->q = NULL;
2266}
2267
2268static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2269 struct tx_ring *tx_ring)
2270{
2271 tx_ring->wq_base =
2272 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2273 &tx_ring->wq_base_dma);
2274
2275 if ((tx_ring->wq_base == NULL)
88c55e3c 2276 || tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
c4e84bde
RM
2277 QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
2278 return -ENOMEM;
2279 }
2280 tx_ring->q =
2281 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2282 if (tx_ring->q == NULL)
2283 goto err;
2284
2285 return 0;
2286err:
2287 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2288 tx_ring->wq_base, tx_ring->wq_base_dma);
2289 return -ENOMEM;
2290}
2291
8668ae92 2292static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
c4e84bde
RM
2293{
2294 int i;
2295 struct bq_desc *lbq_desc;
2296
2297 for (i = 0; i < rx_ring->lbq_len; i++) {
2298 lbq_desc = &rx_ring->lbq[i];
2299 if (lbq_desc->p.lbq_page) {
2300 pci_unmap_page(qdev->pdev,
2301 pci_unmap_addr(lbq_desc, mapaddr),
2302 pci_unmap_len(lbq_desc, maplen),
2303 PCI_DMA_FROMDEVICE);
2304
2305 put_page(lbq_desc->p.lbq_page);
2306 lbq_desc->p.lbq_page = NULL;
2307 }
c4e84bde
RM
2308 }
2309}
2310
8668ae92 2311static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
c4e84bde
RM
2312{
2313 int i;
2314 struct bq_desc *sbq_desc;
2315
2316 for (i = 0; i < rx_ring->sbq_len; i++) {
2317 sbq_desc = &rx_ring->sbq[i];
2318 if (sbq_desc == NULL) {
2319 QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
2320 return;
2321 }
2322 if (sbq_desc->p.skb) {
2323 pci_unmap_single(qdev->pdev,
2324 pci_unmap_addr(sbq_desc, mapaddr),
2325 pci_unmap_len(sbq_desc, maplen),
2326 PCI_DMA_FROMDEVICE);
2327 dev_kfree_skb(sbq_desc->p.skb);
2328 sbq_desc->p.skb = NULL;
2329 }
c4e84bde
RM
2330 }
2331}
2332
4545a3f2
RM
2333/* Free all large and small rx buffers associated
2334 * with the completion queues for this device.
2335 */
2336static void ql_free_rx_buffers(struct ql_adapter *qdev)
2337{
2338 int i;
2339 struct rx_ring *rx_ring;
2340
2341 for (i = 0; i < qdev->rx_ring_count; i++) {
2342 rx_ring = &qdev->rx_ring[i];
2343 if (rx_ring->lbq)
2344 ql_free_lbq_buffers(qdev, rx_ring);
2345 if (rx_ring->sbq)
2346 ql_free_sbq_buffers(qdev, rx_ring);
2347 }
2348}
2349
2350static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2351{
2352 struct rx_ring *rx_ring;
2353 int i;
2354
2355 for (i = 0; i < qdev->rx_ring_count; i++) {
2356 rx_ring = &qdev->rx_ring[i];
2357 if (rx_ring->type != TX_Q)
2358 ql_update_buffer_queues(qdev, rx_ring);
2359 }
2360}
2361
2362static void ql_init_lbq_ring(struct ql_adapter *qdev,
2363 struct rx_ring *rx_ring)
2364{
2365 int i;
2366 struct bq_desc *lbq_desc;
2367 __le64 *bq = rx_ring->lbq_base;
2368
2369 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2370 for (i = 0; i < rx_ring->lbq_len; i++) {
2371 lbq_desc = &rx_ring->lbq[i];
2372 memset(lbq_desc, 0, sizeof(*lbq_desc));
2373 lbq_desc->index = i;
2374 lbq_desc->addr = bq;
2375 bq++;
2376 }
2377}
2378
2379static void ql_init_sbq_ring(struct ql_adapter *qdev,
c4e84bde
RM
2380 struct rx_ring *rx_ring)
2381{
2382 int i;
2383 struct bq_desc *sbq_desc;
2c9a0d41 2384 __le64 *bq = rx_ring->sbq_base;
c4e84bde 2385
4545a3f2 2386 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
c4e84bde
RM
2387 for (i = 0; i < rx_ring->sbq_len; i++) {
2388 sbq_desc = &rx_ring->sbq[i];
4545a3f2 2389 memset(sbq_desc, 0, sizeof(*sbq_desc));
c4e84bde 2390 sbq_desc->index = i;
2c9a0d41 2391 sbq_desc->addr = bq;
c4e84bde
RM
2392 bq++;
2393 }
c4e84bde
RM
2394}
2395
2396static void ql_free_rx_resources(struct ql_adapter *qdev,
2397 struct rx_ring *rx_ring)
2398{
c4e84bde
RM
2399 /* Free the small buffer queue. */
2400 if (rx_ring->sbq_base) {
2401 pci_free_consistent(qdev->pdev,
2402 rx_ring->sbq_size,
2403 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2404 rx_ring->sbq_base = NULL;
2405 }
2406
2407 /* Free the small buffer queue control blocks. */
2408 kfree(rx_ring->sbq);
2409 rx_ring->sbq = NULL;
2410
2411 /* Free the large buffer queue. */
2412 if (rx_ring->lbq_base) {
2413 pci_free_consistent(qdev->pdev,
2414 rx_ring->lbq_size,
2415 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2416 rx_ring->lbq_base = NULL;
2417 }
2418
2419 /* Free the large buffer queue control blocks. */
2420 kfree(rx_ring->lbq);
2421 rx_ring->lbq = NULL;
2422
2423 /* Free the rx queue. */
2424 if (rx_ring->cq_base) {
2425 pci_free_consistent(qdev->pdev,
2426 rx_ring->cq_size,
2427 rx_ring->cq_base, rx_ring->cq_base_dma);
2428 rx_ring->cq_base = NULL;
2429 }
2430}
2431
2432/* Allocate queues and buffers for this completions queue based
2433 * on the values in the parameter structure. */
2434static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2435 struct rx_ring *rx_ring)
2436{
2437
2438 /*
2439 * Allocate the completion queue for this rx_ring.
2440 */
2441 rx_ring->cq_base =
2442 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2443 &rx_ring->cq_base_dma);
2444
2445 if (rx_ring->cq_base == NULL) {
2446 QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
2447 return -ENOMEM;
2448 }
2449
2450 if (rx_ring->sbq_len) {
2451 /*
2452 * Allocate small buffer queue.
2453 */
2454 rx_ring->sbq_base =
2455 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2456 &rx_ring->sbq_base_dma);
2457
2458 if (rx_ring->sbq_base == NULL) {
2459 QPRINTK(qdev, IFUP, ERR,
2460 "Small buffer queue allocation failed.\n");
2461 goto err_mem;
2462 }
2463
2464 /*
2465 * Allocate small buffer queue control blocks.
2466 */
2467 rx_ring->sbq =
2468 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2469 GFP_KERNEL);
2470 if (rx_ring->sbq == NULL) {
2471 QPRINTK(qdev, IFUP, ERR,
2472 "Small buffer queue control block allocation failed.\n");
2473 goto err_mem;
2474 }
2475
4545a3f2 2476 ql_init_sbq_ring(qdev, rx_ring);
c4e84bde
RM
2477 }
2478
2479 if (rx_ring->lbq_len) {
2480 /*
2481 * Allocate large buffer queue.
2482 */
2483 rx_ring->lbq_base =
2484 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2485 &rx_ring->lbq_base_dma);
2486
2487 if (rx_ring->lbq_base == NULL) {
2488 QPRINTK(qdev, IFUP, ERR,
2489 "Large buffer queue allocation failed.\n");
2490 goto err_mem;
2491 }
2492 /*
2493 * Allocate large buffer queue control blocks.
2494 */
2495 rx_ring->lbq =
2496 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2497 GFP_KERNEL);
2498 if (rx_ring->lbq == NULL) {
2499 QPRINTK(qdev, IFUP, ERR,
2500 "Large buffer queue control block allocation failed.\n");
2501 goto err_mem;
2502 }
2503
4545a3f2 2504 ql_init_lbq_ring(qdev, rx_ring);
c4e84bde
RM
2505 }
2506
2507 return 0;
2508
2509err_mem:
2510 ql_free_rx_resources(qdev, rx_ring);
2511 return -ENOMEM;
2512}
2513
2514static void ql_tx_ring_clean(struct ql_adapter *qdev)
2515{
2516 struct tx_ring *tx_ring;
2517 struct tx_ring_desc *tx_ring_desc;
2518 int i, j;
2519
2520 /*
2521 * Loop through all queues and free
2522 * any resources.
2523 */
2524 for (j = 0; j < qdev->tx_ring_count; j++) {
2525 tx_ring = &qdev->tx_ring[j];
2526 for (i = 0; i < tx_ring->wq_len; i++) {
2527 tx_ring_desc = &tx_ring->q[i];
2528 if (tx_ring_desc && tx_ring_desc->skb) {
2529 QPRINTK(qdev, IFDOWN, ERR,
2530 "Freeing lost SKB %p, from queue %d, index %d.\n",
2531 tx_ring_desc->skb, j,
2532 tx_ring_desc->index);
2533 ql_unmap_send(qdev, tx_ring_desc,
2534 tx_ring_desc->map_cnt);
2535 dev_kfree_skb(tx_ring_desc->skb);
2536 tx_ring_desc->skb = NULL;
2537 }
2538 }
2539 }
2540}
2541
c4e84bde
RM
2542static void ql_free_mem_resources(struct ql_adapter *qdev)
2543{
2544 int i;
2545
2546 for (i = 0; i < qdev->tx_ring_count; i++)
2547 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2548 for (i = 0; i < qdev->rx_ring_count; i++)
2549 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2550 ql_free_shadow_space(qdev);
2551}
2552
2553static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2554{
2555 int i;
2556
2557 /* Allocate space for our shadow registers and such. */
2558 if (ql_alloc_shadow_space(qdev))
2559 return -ENOMEM;
2560
2561 for (i = 0; i < qdev->rx_ring_count; i++) {
2562 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2563 QPRINTK(qdev, IFUP, ERR,
2564 "RX resource allocation failed.\n");
2565 goto err_mem;
2566 }
2567 }
2568 /* Allocate tx queue resources */
2569 for (i = 0; i < qdev->tx_ring_count; i++) {
2570 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2571 QPRINTK(qdev, IFUP, ERR,
2572 "TX resource allocation failed.\n");
2573 goto err_mem;
2574 }
2575 }
2576 return 0;
2577
2578err_mem:
2579 ql_free_mem_resources(qdev);
2580 return -ENOMEM;
2581}
2582
2583/* Set up the rx ring control block and pass it to the chip.
2584 * The control block is defined as
2585 * "Completion Queue Initialization Control Block", or cqicb.
2586 */
2587static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2588{
2589 struct cqicb *cqicb = &rx_ring->cqicb;
2590 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
b8facca0 2591 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
c4e84bde 2592 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
b8facca0 2593 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
c4e84bde
RM
2594 void __iomem *doorbell_area =
2595 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2596 int err = 0;
2597 u16 bq_len;
d4a4aba6 2598 u64 tmp;
b8facca0
RM
2599 __le64 *base_indirect_ptr;
2600 int page_entries;
c4e84bde
RM
2601
2602 /* Set up the shadow registers for this ring. */
2603 rx_ring->prod_idx_sh_reg = shadow_reg;
2604 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2605 shadow_reg += sizeof(u64);
2606 shadow_reg_dma += sizeof(u64);
2607 rx_ring->lbq_base_indirect = shadow_reg;
2608 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
b8facca0
RM
2609 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
2610 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
c4e84bde
RM
2611 rx_ring->sbq_base_indirect = shadow_reg;
2612 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
2613
2614 /* PCI doorbell mem area + 0x00 for consumer index register */
8668ae92 2615 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
c4e84bde
RM
2616 rx_ring->cnsmr_idx = 0;
2617 rx_ring->curr_entry = rx_ring->cq_base;
2618
2619 /* PCI doorbell mem area + 0x04 for valid register */
2620 rx_ring->valid_db_reg = doorbell_area + 0x04;
2621
2622 /* PCI doorbell mem area + 0x18 for large buffer consumer */
8668ae92 2623 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
c4e84bde
RM
2624
2625 /* PCI doorbell mem area + 0x1c */
8668ae92 2626 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
c4e84bde
RM
2627
2628 memset((void *)cqicb, 0, sizeof(struct cqicb));
2629 cqicb->msix_vect = rx_ring->irq;
2630
459caf5a
RM
2631 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
2632 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
c4e84bde 2633
97345524 2634 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
c4e84bde 2635
97345524 2636 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
c4e84bde
RM
2637
2638 /*
2639 * Set up the control block load flags.
2640 */
2641 cqicb->flags = FLAGS_LC | /* Load queue base address */
2642 FLAGS_LV | /* Load MSI-X vector */
2643 FLAGS_LI; /* Load irq delay values */
2644 if (rx_ring->lbq_len) {
2645 cqicb->flags |= FLAGS_LL; /* Load lbq values */
d4a4aba6 2646 tmp = (u64)rx_ring->lbq_base_dma;;
b8facca0
RM
2647 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
2648 page_entries = 0;
2649 do {
2650 *base_indirect_ptr = cpu_to_le64(tmp);
2651 tmp += DB_PAGE_SIZE;
2652 base_indirect_ptr++;
2653 page_entries++;
2654 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
97345524
RM
2655 cqicb->lbq_addr =
2656 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
459caf5a
RM
2657 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
2658 (u16) rx_ring->lbq_buf_size;
2659 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
2660 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
2661 (u16) rx_ring->lbq_len;
c4e84bde 2662 cqicb->lbq_len = cpu_to_le16(bq_len);
4545a3f2 2663 rx_ring->lbq_prod_idx = 0;
c4e84bde 2664 rx_ring->lbq_curr_idx = 0;
4545a3f2
RM
2665 rx_ring->lbq_clean_idx = 0;
2666 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
c4e84bde
RM
2667 }
2668 if (rx_ring->sbq_len) {
2669 cqicb->flags |= FLAGS_LS; /* Load sbq values */
d4a4aba6 2670 tmp = (u64)rx_ring->sbq_base_dma;;
b8facca0
RM
2671 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
2672 page_entries = 0;
2673 do {
2674 *base_indirect_ptr = cpu_to_le64(tmp);
2675 tmp += DB_PAGE_SIZE;
2676 base_indirect_ptr++;
2677 page_entries++;
2678 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
97345524
RM
2679 cqicb->sbq_addr =
2680 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
c4e84bde 2681 cqicb->sbq_buf_size =
d4a4aba6 2682 cpu_to_le16((u16)(rx_ring->sbq_buf_size/2));
459caf5a
RM
2683 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
2684 (u16) rx_ring->sbq_len;
c4e84bde 2685 cqicb->sbq_len = cpu_to_le16(bq_len);
4545a3f2 2686 rx_ring->sbq_prod_idx = 0;
c4e84bde 2687 rx_ring->sbq_curr_idx = 0;
4545a3f2
RM
2688 rx_ring->sbq_clean_idx = 0;
2689 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
c4e84bde
RM
2690 }
2691 switch (rx_ring->type) {
2692 case TX_Q:
2693 /* If there's only one interrupt, then we use
2694 * worker threads to process the outbound
2695 * completion handling rx_rings. We do this so
2696 * they can be run on multiple CPUs. There is
2697 * room to play with this more where we would only
2698 * run in a worker if there are more than x number
2699 * of outbound completions on the queue and more
2700 * than one queue active. Some threshold that
2701 * would indicate a benefit in spite of the cost
2702 * of a context switch.
2703 * If there's more than one interrupt, then the
2704 * outbound completions are processed in the ISR.
2705 */
2706 if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
2707 INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean);
2708 else {
2709 /* With all debug warnings on we see a WARN_ON message
2710 * when we free the skb in the interrupt context.
2711 */
2712 INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean);
2713 }
2714 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
2715 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
2716 break;
2717 case DEFAULT_Q:
2718 INIT_DELAYED_WORK(&rx_ring->rx_work, ql_rx_clean);
2719 cqicb->irq_delay = 0;
2720 cqicb->pkt_delay = 0;
2721 break;
2722 case RX_Q:
2723 /* Inbound completion handling rx_rings run in
2724 * separate NAPI contexts.
2725 */
2726 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
2727 64);
2728 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
2729 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
2730 break;
2731 default:
2732 QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
2733 rx_ring->type);
2734 }
4974097a 2735 QPRINTK(qdev, IFUP, DEBUG, "Initializing rx work queue.\n");
c4e84bde
RM
2736 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
2737 CFG_LCQ, rx_ring->cq_id);
2738 if (err) {
2739 QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
2740 return err;
2741 }
c4e84bde
RM
2742 return err;
2743}
2744
2745static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2746{
2747 struct wqicb *wqicb = (struct wqicb *)tx_ring;
2748 void __iomem *doorbell_area =
2749 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
2750 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
2751 (tx_ring->wq_id * sizeof(u64));
2752 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
2753 (tx_ring->wq_id * sizeof(u64));
2754 int err = 0;
2755
2756 /*
2757 * Assign doorbell registers for this tx_ring.
2758 */
2759 /* TX PCI doorbell mem area for tx producer index */
8668ae92 2760 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
c4e84bde
RM
2761 tx_ring->prod_idx = 0;
2762 /* TX PCI doorbell mem area + 0x04 */
2763 tx_ring->valid_db_reg = doorbell_area + 0x04;
2764
2765 /*
2766 * Assign shadow registers for this tx_ring.
2767 */
2768 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
2769 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
2770
2771 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
2772 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
2773 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
2774 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
2775 wqicb->rid = 0;
97345524 2776 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
c4e84bde 2777
97345524 2778 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
c4e84bde
RM
2779
2780 ql_init_tx_ring(qdev, tx_ring);
2781
2782 err = ql_write_cfg(qdev, wqicb, sizeof(wqicb), CFG_LRQ,
2783 (u16) tx_ring->wq_id);
2784 if (err) {
2785 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
2786 return err;
2787 }
4974097a 2788 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded WQICB.\n");
c4e84bde
RM
2789 return err;
2790}
2791
2792static void ql_disable_msix(struct ql_adapter *qdev)
2793{
2794 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2795 pci_disable_msix(qdev->pdev);
2796 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
2797 kfree(qdev->msi_x_entry);
2798 qdev->msi_x_entry = NULL;
2799 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
2800 pci_disable_msi(qdev->pdev);
2801 clear_bit(QL_MSI_ENABLED, &qdev->flags);
2802 }
2803}
2804
2805static void ql_enable_msix(struct ql_adapter *qdev)
2806{
2807 int i;
2808
2809 qdev->intr_count = 1;
2810 /* Get the MSIX vectors. */
2811 if (irq_type == MSIX_IRQ) {
2812 /* Try to alloc space for the msix struct,
2813 * if it fails then go to MSI/legacy.
2814 */
2815 qdev->msi_x_entry = kcalloc(qdev->rx_ring_count,
2816 sizeof(struct msix_entry),
2817 GFP_KERNEL);
2818 if (!qdev->msi_x_entry) {
2819 irq_type = MSI_IRQ;
2820 goto msi;
2821 }
2822
2823 for (i = 0; i < qdev->rx_ring_count; i++)
2824 qdev->msi_x_entry[i].entry = i;
2825
2826 if (!pci_enable_msix
2827 (qdev->pdev, qdev->msi_x_entry, qdev->rx_ring_count)) {
2828 set_bit(QL_MSIX_ENABLED, &qdev->flags);
2829 qdev->intr_count = qdev->rx_ring_count;
4974097a 2830 QPRINTK(qdev, IFUP, DEBUG,
c4e84bde
RM
2831 "MSI-X Enabled, got %d vectors.\n",
2832 qdev->intr_count);
2833 return;
2834 } else {
2835 kfree(qdev->msi_x_entry);
2836 qdev->msi_x_entry = NULL;
2837 QPRINTK(qdev, IFUP, WARNING,
2838 "MSI-X Enable failed, trying MSI.\n");
2839 irq_type = MSI_IRQ;
2840 }
2841 }
2842msi:
2843 if (irq_type == MSI_IRQ) {
2844 if (!pci_enable_msi(qdev->pdev)) {
2845 set_bit(QL_MSI_ENABLED, &qdev->flags);
2846 QPRINTK(qdev, IFUP, INFO,
2847 "Running with MSI interrupts.\n");
2848 return;
2849 }
2850 }
2851 irq_type = LEG_IRQ;
c4e84bde
RM
2852 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
2853}
2854
2855/*
2856 * Here we build the intr_context structures based on
2857 * our rx_ring count and intr vector count.
2858 * The intr_context structure is used to hook each vector
2859 * to possibly different handlers.
2860 */
2861static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
2862{
2863 int i = 0;
2864 struct intr_context *intr_context = &qdev->intr_context[0];
2865
2866 ql_enable_msix(qdev);
2867
2868 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2869 /* Each rx_ring has it's
2870 * own intr_context since we have separate
2871 * vectors for each queue.
2872 * This only true when MSI-X is enabled.
2873 */
2874 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2875 qdev->rx_ring[i].irq = i;
2876 intr_context->intr = i;
2877 intr_context->qdev = qdev;
2878 /*
2879 * We set up each vectors enable/disable/read bits so
2880 * there's no bit/mask calculations in the critical path.
2881 */
2882 intr_context->intr_en_mask =
2883 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2884 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
2885 | i;
2886 intr_context->intr_dis_mask =
2887 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2888 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
2889 INTR_EN_IHD | i;
2890 intr_context->intr_read_mask =
2891 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2892 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
2893 i;
2894
2895 if (i == 0) {
2896 /*
2897 * Default queue handles bcast/mcast plus
2898 * async events. Needs buffers.
2899 */
2900 intr_context->handler = qlge_isr;
2901 sprintf(intr_context->name, "%s-default-queue",
2902 qdev->ndev->name);
2903 } else if (i < qdev->rss_ring_first_cq_id) {
2904 /*
2905 * Outbound queue is for outbound completions only.
2906 */
2907 intr_context->handler = qlge_msix_tx_isr;
c224969e 2908 sprintf(intr_context->name, "%s-tx-%d",
c4e84bde
RM
2909 qdev->ndev->name, i);
2910 } else {
2911 /*
2912 * Inbound queues handle unicast frames only.
2913 */
2914 intr_context->handler = qlge_msix_rx_isr;
c224969e 2915 sprintf(intr_context->name, "%s-rx-%d",
c4e84bde
RM
2916 qdev->ndev->name, i);
2917 }
2918 }
2919 } else {
2920 /*
2921 * All rx_rings use the same intr_context since
2922 * there is only one vector.
2923 */
2924 intr_context->intr = 0;
2925 intr_context->qdev = qdev;
2926 /*
2927 * We set up each vectors enable/disable/read bits so
2928 * there's no bit/mask calculations in the critical path.
2929 */
2930 intr_context->intr_en_mask =
2931 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
2932 intr_context->intr_dis_mask =
2933 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2934 INTR_EN_TYPE_DISABLE;
2935 intr_context->intr_read_mask =
2936 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
2937 /*
2938 * Single interrupt means one handler for all rings.
2939 */
2940 intr_context->handler = qlge_isr;
2941 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
2942 for (i = 0; i < qdev->rx_ring_count; i++)
2943 qdev->rx_ring[i].irq = 0;
2944 }
2945}
2946
2947static void ql_free_irq(struct ql_adapter *qdev)
2948{
2949 int i;
2950 struct intr_context *intr_context = &qdev->intr_context[0];
2951
2952 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2953 if (intr_context->hooked) {
2954 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2955 free_irq(qdev->msi_x_entry[i].vector,
2956 &qdev->rx_ring[i]);
4974097a 2957 QPRINTK(qdev, IFDOWN, DEBUG,
c4e84bde
RM
2958 "freeing msix interrupt %d.\n", i);
2959 } else {
2960 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
4974097a 2961 QPRINTK(qdev, IFDOWN, DEBUG,
c4e84bde
RM
2962 "freeing msi interrupt %d.\n", i);
2963 }
2964 }
2965 }
2966 ql_disable_msix(qdev);
2967}
2968
2969static int ql_request_irq(struct ql_adapter *qdev)
2970{
2971 int i;
2972 int status = 0;
2973 struct pci_dev *pdev = qdev->pdev;
2974 struct intr_context *intr_context = &qdev->intr_context[0];
2975
2976 ql_resolve_queues_to_irqs(qdev);
2977
2978 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2979 atomic_set(&intr_context->irq_cnt, 0);
2980 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2981 status = request_irq(qdev->msi_x_entry[i].vector,
2982 intr_context->handler,
2983 0,
2984 intr_context->name,
2985 &qdev->rx_ring[i]);
2986 if (status) {
2987 QPRINTK(qdev, IFUP, ERR,
2988 "Failed request for MSIX interrupt %d.\n",
2989 i);
2990 goto err_irq;
2991 } else {
4974097a 2992 QPRINTK(qdev, IFUP, DEBUG,
c4e84bde
RM
2993 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
2994 i,
2995 qdev->rx_ring[i].type ==
2996 DEFAULT_Q ? "DEFAULT_Q" : "",
2997 qdev->rx_ring[i].type ==
2998 TX_Q ? "TX_Q" : "",
2999 qdev->rx_ring[i].type ==
3000 RX_Q ? "RX_Q" : "", intr_context->name);
3001 }
3002 } else {
3003 QPRINTK(qdev, IFUP, DEBUG,
3004 "trying msi or legacy interrupts.\n");
3005 QPRINTK(qdev, IFUP, DEBUG,
3006 "%s: irq = %d.\n", __func__, pdev->irq);
3007 QPRINTK(qdev, IFUP, DEBUG,
3008 "%s: context->name = %s.\n", __func__,
3009 intr_context->name);
3010 QPRINTK(qdev, IFUP, DEBUG,
3011 "%s: dev_id = 0x%p.\n", __func__,
3012 &qdev->rx_ring[0]);
3013 status =
3014 request_irq(pdev->irq, qlge_isr,
3015 test_bit(QL_MSI_ENABLED,
3016 &qdev->
3017 flags) ? 0 : IRQF_SHARED,
3018 intr_context->name, &qdev->rx_ring[0]);
3019 if (status)
3020 goto err_irq;
3021
3022 QPRINTK(qdev, IFUP, ERR,
3023 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3024 i,
3025 qdev->rx_ring[0].type ==
3026 DEFAULT_Q ? "DEFAULT_Q" : "",
3027 qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
3028 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3029 intr_context->name);
3030 }
3031 intr_context->hooked = 1;
3032 }
3033 return status;
3034err_irq:
3035 QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
3036 ql_free_irq(qdev);
3037 return status;
3038}
3039
3040static int ql_start_rss(struct ql_adapter *qdev)
3041{
3042 struct ricb *ricb = &qdev->ricb;
3043 int status = 0;
3044 int i;
3045 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3046
3047 memset((void *)ricb, 0, sizeof(ricb));
3048
3049 ricb->base_cq = qdev->rss_ring_first_cq_id | RSS_L4K;
3050 ricb->flags =
3051 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 |
3052 RSS_RT6);
3053 ricb->mask = cpu_to_le16(qdev->rss_ring_count - 1);
3054
3055 /*
3056 * Fill out the Indirection Table.
3057 */
def48b6e
RM
3058 for (i = 0; i < 256; i++)
3059 hash_id[i] = i & (qdev->rss_ring_count - 1);
c4e84bde
RM
3060
3061 /*
3062 * Random values for the IPv6 and IPv4 Hash Keys.
3063 */
3064 get_random_bytes((void *)&ricb->ipv6_hash_key[0], 40);
3065 get_random_bytes((void *)&ricb->ipv4_hash_key[0], 16);
3066
4974097a 3067 QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
c4e84bde
RM
3068
3069 status = ql_write_cfg(qdev, ricb, sizeof(ricb), CFG_LR, 0);
3070 if (status) {
3071 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
3072 return status;
3073 }
4974097a 3074 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded RICB.\n");
c4e84bde
RM
3075 return status;
3076}
3077
a5f59dc9 3078static int ql_clear_routing_entries(struct ql_adapter *qdev)
c4e84bde 3079{
a5f59dc9 3080 int i, status = 0;
c4e84bde 3081
8587ea35
RM
3082 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3083 if (status)
3084 return status;
c4e84bde
RM
3085 /* Clear all the entries in the routing table. */
3086 for (i = 0; i < 16; i++) {
3087 status = ql_set_routing_reg(qdev, i, 0, 0);
3088 if (status) {
3089 QPRINTK(qdev, IFUP, ERR,
a5f59dc9
RM
3090 "Failed to init routing register for CAM "
3091 "packets.\n");
3092 break;
c4e84bde
RM
3093 }
3094 }
a5f59dc9
RM
3095 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3096 return status;
3097}
3098
3099/* Initialize the frame-to-queue routing. */
3100static int ql_route_initialize(struct ql_adapter *qdev)
3101{
3102 int status = 0;
3103
3104 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3105 if (status)
3106 return status;
3107
3108 /* Clear all the entries in the routing table. */
3109 status = ql_clear_routing_entries(qdev);
3110 if (status)
3111 goto exit;
c4e84bde
RM
3112
3113 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
3114 if (status) {
3115 QPRINTK(qdev, IFUP, ERR,
3116 "Failed to init routing register for error packets.\n");
8587ea35 3117 goto exit;
c4e84bde
RM
3118 }
3119 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3120 if (status) {
3121 QPRINTK(qdev, IFUP, ERR,
3122 "Failed to init routing register for broadcast packets.\n");
8587ea35 3123 goto exit;
c4e84bde
RM
3124 }
3125 /* If we have more than one inbound queue, then turn on RSS in the
3126 * routing block.
3127 */
3128 if (qdev->rss_ring_count > 1) {
3129 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3130 RT_IDX_RSS_MATCH, 1);
3131 if (status) {
3132 QPRINTK(qdev, IFUP, ERR,
3133 "Failed to init routing register for MATCH RSS packets.\n");
8587ea35 3134 goto exit;
c4e84bde
RM
3135 }
3136 }
3137
3138 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3139 RT_IDX_CAM_HIT, 1);
8587ea35 3140 if (status)
c4e84bde
RM
3141 QPRINTK(qdev, IFUP, ERR,
3142 "Failed to init routing register for CAM packets.\n");
8587ea35
RM
3143exit:
3144 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
c4e84bde
RM
3145 return status;
3146}
3147
2ee1e272 3148int ql_cam_route_initialize(struct ql_adapter *qdev)
bb58b5b6 3149{
7fab3bfe 3150 int status, set;
bb58b5b6 3151
7fab3bfe
RM
3152 /* If check if the link is up and use to
3153 * determine if we are setting or clearing
3154 * the MAC address in the CAM.
3155 */
3156 set = ql_read32(qdev, STS);
3157 set &= qdev->port_link_up;
3158 status = ql_set_mac_addr(qdev, set);
bb58b5b6
RM
3159 if (status) {
3160 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
3161 return status;
3162 }
3163
3164 status = ql_route_initialize(qdev);
3165 if (status)
3166 QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
3167
3168 return status;
3169}
3170
c4e84bde
RM
3171static int ql_adapter_initialize(struct ql_adapter *qdev)
3172{
3173 u32 value, mask;
3174 int i;
3175 int status = 0;
3176
3177 /*
3178 * Set up the System register to halt on errors.
3179 */
3180 value = SYS_EFE | SYS_FAE;
3181 mask = value << 16;
3182 ql_write32(qdev, SYS, mask | value);
3183
c9cf0a04
RM
3184 /* Set the default queue, and VLAN behavior. */
3185 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3186 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
c4e84bde
RM
3187 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3188
3189 /* Set the MPI interrupt to enabled. */
3190 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3191
3192 /* Enable the function, set pagesize, enable error checking. */
3193 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3194 FSC_EC | FSC_VM_PAGE_4K | FSC_SH;
3195
3196 /* Set/clear header splitting. */
3197 mask = FSC_VM_PAGESIZE_MASK |
3198 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3199 ql_write32(qdev, FSC, mask | value);
3200
3201 ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
3202 min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE));
3203
3204 /* Start up the rx queues. */
3205 for (i = 0; i < qdev->rx_ring_count; i++) {
3206 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3207 if (status) {
3208 QPRINTK(qdev, IFUP, ERR,
3209 "Failed to start rx ring[%d].\n", i);
3210 return status;
3211 }
3212 }
3213
3214 /* If there is more than one inbound completion queue
3215 * then download a RICB to configure RSS.
3216 */
3217 if (qdev->rss_ring_count > 1) {
3218 status = ql_start_rss(qdev);
3219 if (status) {
3220 QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
3221 return status;
3222 }
3223 }
3224
3225 /* Start up the tx queues. */
3226 for (i = 0; i < qdev->tx_ring_count; i++) {
3227 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3228 if (status) {
3229 QPRINTK(qdev, IFUP, ERR,
3230 "Failed to start tx ring[%d].\n", i);
3231 return status;
3232 }
3233 }
3234
b0c2aadf
RM
3235 /* Initialize the port and set the max framesize. */
3236 status = qdev->nic_ops->port_initialize(qdev);
3237 if (status) {
3238 QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
3239 return status;
3240 }
c4e84bde 3241
bb58b5b6
RM
3242 /* Set up the MAC address and frame routing filter. */
3243 status = ql_cam_route_initialize(qdev);
c4e84bde 3244 if (status) {
bb58b5b6
RM
3245 QPRINTK(qdev, IFUP, ERR,
3246 "Failed to init CAM/Routing tables.\n");
c4e84bde
RM
3247 return status;
3248 }
3249
3250 /* Start NAPI for the RSS queues. */
3251 for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++) {
4974097a 3252 QPRINTK(qdev, IFUP, DEBUG, "Enabling NAPI for rx_ring[%d].\n",
c4e84bde
RM
3253 i);
3254 napi_enable(&qdev->rx_ring[i].napi);
3255 }
3256
3257 return status;
3258}
3259
3260/* Issue soft reset to chip. */
3261static int ql_adapter_reset(struct ql_adapter *qdev)
3262{
3263 u32 value;
c4e84bde 3264 int status = 0;
a5f59dc9 3265 unsigned long end_jiffies;
c4e84bde 3266
a5f59dc9
RM
3267 /* Clear all the entries in the routing table. */
3268 status = ql_clear_routing_entries(qdev);
3269 if (status) {
3270 QPRINTK(qdev, IFUP, ERR, "Failed to clear routing bits.\n");
3271 return status;
3272 }
3273
3274 end_jiffies = jiffies +
3275 max((unsigned long)1, usecs_to_jiffies(30));
c4e84bde 3276 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
a75ee7f1 3277
c4e84bde
RM
3278 do {
3279 value = ql_read32(qdev, RST_FO);
3280 if ((value & RST_FO_FR) == 0)
3281 break;
a75ee7f1
RM
3282 cpu_relax();
3283 } while (time_before(jiffies, end_jiffies));
c4e84bde 3284
c4e84bde 3285 if (value & RST_FO_FR) {
c4e84bde 3286 QPRINTK(qdev, IFDOWN, ERR,
3ac49a1c 3287 "ETIMEDOUT!!! errored out of resetting the chip!\n");
a75ee7f1 3288 status = -ETIMEDOUT;
c4e84bde
RM
3289 }
3290
3291 return status;
3292}
3293
3294static void ql_display_dev_info(struct net_device *ndev)
3295{
3296 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3297
3298 QPRINTK(qdev, PROBE, INFO,
e4552f51 3299 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
c4e84bde
RM
3300 "XG Roll = %d, XG Rev = %d.\n",
3301 qdev->func,
e4552f51 3302 qdev->port,
c4e84bde
RM
3303 qdev->chip_rev_id & 0x0000000f,
3304 qdev->chip_rev_id >> 4 & 0x0000000f,
3305 qdev->chip_rev_id >> 8 & 0x0000000f,
3306 qdev->chip_rev_id >> 12 & 0x0000000f);
7c510e4b 3307 QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
c4e84bde
RM
3308}
3309
3310static int ql_adapter_down(struct ql_adapter *qdev)
3311{
c4e84bde
RM
3312 int i, status = 0;
3313 struct rx_ring *rx_ring;
3314
1e213303 3315 netif_carrier_off(qdev->ndev);
c4e84bde 3316
6497b607
RM
3317 /* Don't kill the reset worker thread if we
3318 * are in the process of recovery.
3319 */
3320 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3321 cancel_delayed_work_sync(&qdev->asic_reset_work);
c4e84bde
RM
3322 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3323 cancel_delayed_work_sync(&qdev->mpi_work);
2ee1e272 3324 cancel_delayed_work_sync(&qdev->mpi_idc_work);
bcc2cb3b 3325 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
c4e84bde
RM
3326
3327 /* The default queue at index 0 is always processed in
3328 * a workqueue.
3329 */
3330 cancel_delayed_work_sync(&qdev->rx_ring[0].rx_work);
3331
3332 /* The rest of the rx_rings are processed in
3333 * a workqueue only if it's a single interrupt
3334 * environment (MSI/Legacy).
3335 */
c062076c 3336 for (i = 1; i < qdev->rx_ring_count; i++) {
c4e84bde
RM
3337 rx_ring = &qdev->rx_ring[i];
3338 /* Only the RSS rings use NAPI on multi irq
3339 * environment. Outbound completion processing
3340 * is done in interrupt context.
3341 */
3342 if (i >= qdev->rss_ring_first_cq_id) {
3343 napi_disable(&rx_ring->napi);
3344 } else {
3345 cancel_delayed_work_sync(&rx_ring->rx_work);
3346 }
3347 }
3348
3349 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3350
3351 ql_disable_interrupts(qdev);
3352
3353 ql_tx_ring_clean(qdev);
3354
6b318cb3
RM
3355 /* Call netif_napi_del() from common point.
3356 */
3357 for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++)
3358 netif_napi_del(&qdev->rx_ring[i].napi);
3359
4545a3f2 3360 ql_free_rx_buffers(qdev);
2d6a5e95 3361
c4e84bde
RM
3362 spin_lock(&qdev->hw_lock);
3363 status = ql_adapter_reset(qdev);
3364 if (status)
3365 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
3366 qdev->func);
3367 spin_unlock(&qdev->hw_lock);
3368 return status;
3369}
3370
3371static int ql_adapter_up(struct ql_adapter *qdev)
3372{
3373 int err = 0;
3374
c4e84bde
RM
3375 err = ql_adapter_initialize(qdev);
3376 if (err) {
3377 QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
c4e84bde
RM
3378 goto err_init;
3379 }
c4e84bde 3380 set_bit(QL_ADAPTER_UP, &qdev->flags);
4545a3f2 3381 ql_alloc_rx_buffers(qdev);
8b007de1
RM
3382 /* If the port is initialized and the
3383 * link is up the turn on the carrier.
3384 */
3385 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3386 (ql_read32(qdev, STS) & qdev->port_link_up))
1e213303 3387 netif_carrier_on(qdev->ndev);
c4e84bde
RM
3388 ql_enable_interrupts(qdev);
3389 ql_enable_all_completion_interrupts(qdev);
1e213303 3390 netif_tx_start_all_queues(qdev->ndev);
c4e84bde
RM
3391
3392 return 0;
3393err_init:
3394 ql_adapter_reset(qdev);
3395 return err;
3396}
3397
c4e84bde
RM
3398static void ql_release_adapter_resources(struct ql_adapter *qdev)
3399{
3400 ql_free_mem_resources(qdev);
3401 ql_free_irq(qdev);
3402}
3403
3404static int ql_get_adapter_resources(struct ql_adapter *qdev)
3405{
3406 int status = 0;
3407
3408 if (ql_alloc_mem_resources(qdev)) {
3409 QPRINTK(qdev, IFUP, ERR, "Unable to allocate memory.\n");
3410 return -ENOMEM;
3411 }
3412 status = ql_request_irq(qdev);
c4e84bde
RM
3413 return status;
3414}
3415
3416static int qlge_close(struct net_device *ndev)
3417{
3418 struct ql_adapter *qdev = netdev_priv(ndev);
3419
3420 /*
3421 * Wait for device to recover from a reset.
3422 * (Rarely happens, but possible.)
3423 */
3424 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3425 msleep(1);
3426 ql_adapter_down(qdev);
3427 ql_release_adapter_resources(qdev);
c4e84bde
RM
3428 return 0;
3429}
3430
3431static int ql_configure_rings(struct ql_adapter *qdev)
3432{
3433 int i;
3434 struct rx_ring *rx_ring;
3435 struct tx_ring *tx_ring;
3436 int cpu_cnt = num_online_cpus();
3437
3438 /*
3439 * For each processor present we allocate one
3440 * rx_ring for outbound completions, and one
3441 * rx_ring for inbound completions. Plus there is
3442 * always the one default queue. For the CPU
3443 * counts we end up with the following rx_rings:
3444 * rx_ring count =
3445 * one default queue +
3446 * (CPU count * outbound completion rx_ring) +
3447 * (CPU count * inbound (RSS) completion rx_ring)
3448 * To keep it simple we limit the total number of
3449 * queues to < 32, so we truncate CPU to 8.
3450 * This limitation can be removed when requested.
3451 */
3452
683d46a9
RM
3453 if (cpu_cnt > MAX_CPUS)
3454 cpu_cnt = MAX_CPUS;
c4e84bde
RM
3455
3456 /*
3457 * rx_ring[0] is always the default queue.
3458 */
3459 /* Allocate outbound completion ring for each CPU. */
3460 qdev->tx_ring_count = cpu_cnt;
3461 /* Allocate inbound completion (RSS) ring for each CPU. */
3462 qdev->rss_ring_count = cpu_cnt;
3463 /* cq_id for the first inbound ring handler. */
3464 qdev->rss_ring_first_cq_id = cpu_cnt + 1;
3465 /*
3466 * qdev->rx_ring_count:
3467 * Total number of rx_rings. This includes the one
3468 * default queue, a number of outbound completion
3469 * handler rx_rings, and the number of inbound
3470 * completion handler rx_rings.
3471 */
3472 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1;
3473
c4e84bde
RM
3474 for (i = 0; i < qdev->tx_ring_count; i++) {
3475 tx_ring = &qdev->tx_ring[i];
3476 memset((void *)tx_ring, 0, sizeof(tx_ring));
3477 tx_ring->qdev = qdev;
3478 tx_ring->wq_id = i;
3479 tx_ring->wq_len = qdev->tx_ring_size;
3480 tx_ring->wq_size =
3481 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3482
3483 /*
3484 * The completion queue ID for the tx rings start
3485 * immediately after the default Q ID, which is zero.
3486 */
3487 tx_ring->cq_id = i + 1;
3488 }
3489
3490 for (i = 0; i < qdev->rx_ring_count; i++) {
3491 rx_ring = &qdev->rx_ring[i];
3492 memset((void *)rx_ring, 0, sizeof(rx_ring));
3493 rx_ring->qdev = qdev;
3494 rx_ring->cq_id = i;
3495 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
3496 if (i == 0) { /* Default queue at index 0. */
3497 /*
3498 * Default queue handles bcast/mcast plus
3499 * async events. Needs buffers.
3500 */
3501 rx_ring->cq_len = qdev->rx_ring_size;
3502 rx_ring->cq_size =
3503 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3504 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3505 rx_ring->lbq_size =
2c9a0d41 3506 rx_ring->lbq_len * sizeof(__le64);
c4e84bde
RM
3507 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3508 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3509 rx_ring->sbq_size =
2c9a0d41 3510 rx_ring->sbq_len * sizeof(__le64);
c4e84bde
RM
3511 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
3512 rx_ring->type = DEFAULT_Q;
3513 } else if (i < qdev->rss_ring_first_cq_id) {
3514 /*
3515 * Outbound queue handles outbound completions only.
3516 */
3517 /* outbound cq is same size as tx_ring it services. */
3518 rx_ring->cq_len = qdev->tx_ring_size;
3519 rx_ring->cq_size =
3520 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3521 rx_ring->lbq_len = 0;
3522 rx_ring->lbq_size = 0;
3523 rx_ring->lbq_buf_size = 0;
3524 rx_ring->sbq_len = 0;
3525 rx_ring->sbq_size = 0;
3526 rx_ring->sbq_buf_size = 0;
3527 rx_ring->type = TX_Q;
3528 } else { /* Inbound completions (RSS) queues */
3529 /*
3530 * Inbound queues handle unicast frames only.
3531 */
3532 rx_ring->cq_len = qdev->rx_ring_size;
3533 rx_ring->cq_size =
3534 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3535 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3536 rx_ring->lbq_size =
2c9a0d41 3537 rx_ring->lbq_len * sizeof(__le64);
c4e84bde
RM
3538 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3539 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3540 rx_ring->sbq_size =
2c9a0d41 3541 rx_ring->sbq_len * sizeof(__le64);
c4e84bde
RM
3542 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
3543 rx_ring->type = RX_Q;
3544 }
3545 }
3546 return 0;
3547}
3548
3549static int qlge_open(struct net_device *ndev)
3550{
3551 int err = 0;
3552 struct ql_adapter *qdev = netdev_priv(ndev);
3553
3554 err = ql_configure_rings(qdev);
3555 if (err)
3556 return err;
3557
3558 err = ql_get_adapter_resources(qdev);
3559 if (err)
3560 goto error_up;
3561
3562 err = ql_adapter_up(qdev);
3563 if (err)
3564 goto error_up;
3565
3566 return err;
3567
3568error_up:
3569 ql_release_adapter_resources(qdev);
c4e84bde
RM
3570 return err;
3571}
3572
3573static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
3574{
3575 struct ql_adapter *qdev = netdev_priv(ndev);
3576
3577 if (ndev->mtu == 1500 && new_mtu == 9000) {
3578 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
bcc2cb3b
RM
3579 queue_delayed_work(qdev->workqueue,
3580 &qdev->mpi_port_cfg_work, 0);
c4e84bde
RM
3581 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
3582 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
3583 } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
3584 (ndev->mtu == 9000 && new_mtu == 9000)) {
3585 return 0;
3586 } else
3587 return -EINVAL;
3588 ndev->mtu = new_mtu;
3589 return 0;
3590}
3591
3592static struct net_device_stats *qlge_get_stats(struct net_device
3593 *ndev)
3594{
3595 struct ql_adapter *qdev = netdev_priv(ndev);
3596 return &qdev->stats;
3597}
3598
3599static void qlge_set_multicast_list(struct net_device *ndev)
3600{
3601 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3602 struct dev_mc_list *mc_ptr;
cc288f54 3603 int i, status;
c4e84bde 3604
cc288f54
RM
3605 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3606 if (status)
3607 return;
c4e84bde
RM
3608 spin_lock(&qdev->hw_lock);
3609 /*
3610 * Set or clear promiscuous mode if a
3611 * transition is taking place.
3612 */
3613 if (ndev->flags & IFF_PROMISC) {
3614 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3615 if (ql_set_routing_reg
3616 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
3617 QPRINTK(qdev, HW, ERR,
3618 "Failed to set promiscous mode.\n");
3619 } else {
3620 set_bit(QL_PROMISCUOUS, &qdev->flags);
3621 }
3622 }
3623 } else {
3624 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3625 if (ql_set_routing_reg
3626 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
3627 QPRINTK(qdev, HW, ERR,
3628 "Failed to clear promiscous mode.\n");
3629 } else {
3630 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3631 }
3632 }
3633 }
3634
3635 /*
3636 * Set or clear all multicast mode if a
3637 * transition is taking place.
3638 */
3639 if ((ndev->flags & IFF_ALLMULTI) ||
3640 (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
3641 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
3642 if (ql_set_routing_reg
3643 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
3644 QPRINTK(qdev, HW, ERR,
3645 "Failed to set all-multi mode.\n");
3646 } else {
3647 set_bit(QL_ALLMULTI, &qdev->flags);
3648 }
3649 }
3650 } else {
3651 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
3652 if (ql_set_routing_reg
3653 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
3654 QPRINTK(qdev, HW, ERR,
3655 "Failed to clear all-multi mode.\n");
3656 } else {
3657 clear_bit(QL_ALLMULTI, &qdev->flags);
3658 }
3659 }
3660 }
3661
3662 if (ndev->mc_count) {
cc288f54
RM
3663 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3664 if (status)
3665 goto exit;
c4e84bde
RM
3666 for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
3667 i++, mc_ptr = mc_ptr->next)
3668 if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
3669 MAC_ADDR_TYPE_MULTI_MAC, i)) {
3670 QPRINTK(qdev, HW, ERR,
3671 "Failed to loadmulticast address.\n");
cc288f54 3672 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
c4e84bde
RM
3673 goto exit;
3674 }
cc288f54 3675 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
c4e84bde
RM
3676 if (ql_set_routing_reg
3677 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
3678 QPRINTK(qdev, HW, ERR,
3679 "Failed to set multicast match mode.\n");
3680 } else {
3681 set_bit(QL_ALLMULTI, &qdev->flags);
3682 }
3683 }
3684exit:
3685 spin_unlock(&qdev->hw_lock);
8587ea35 3686 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
c4e84bde
RM
3687}
3688
3689static int qlge_set_mac_address(struct net_device *ndev, void *p)
3690{
3691 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3692 struct sockaddr *addr = p;
cc288f54 3693 int status;
c4e84bde
RM
3694
3695 if (netif_running(ndev))
3696 return -EBUSY;
3697
3698 if (!is_valid_ether_addr(addr->sa_data))
3699 return -EADDRNOTAVAIL;
3700 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3701
cc288f54
RM
3702 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3703 if (status)
3704 return status;
c4e84bde 3705 spin_lock(&qdev->hw_lock);
cc288f54
RM
3706 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
3707 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
c4e84bde 3708 spin_unlock(&qdev->hw_lock);
cc288f54
RM
3709 if (status)
3710 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
3711 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
3712 return status;
c4e84bde
RM
3713}
3714
3715static void qlge_tx_timeout(struct net_device *ndev)
3716{
3717 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
6497b607 3718 ql_queue_asic_error(qdev);
c4e84bde
RM
3719}
3720
3721static void ql_asic_reset_work(struct work_struct *work)
3722{
3723 struct ql_adapter *qdev =
3724 container_of(work, struct ql_adapter, asic_reset_work.work);
db98812f
RM
3725 int status;
3726
3727 status = ql_adapter_down(qdev);
3728 if (status)
3729 goto error;
3730
3731 status = ql_adapter_up(qdev);
3732 if (status)
3733 goto error;
3734
3735 return;
3736error:
3737 QPRINTK(qdev, IFUP, ALERT,
3738 "Driver up/down cycle failed, closing device\n");
3739 rtnl_lock();
3740 set_bit(QL_ADAPTER_UP, &qdev->flags);
3741 dev_close(qdev->ndev);
3742 rtnl_unlock();
c4e84bde
RM
3743}
3744
b0c2aadf
RM
3745static struct nic_operations qla8012_nic_ops = {
3746 .get_flash = ql_get_8012_flash_params,
3747 .port_initialize = ql_8012_port_initialize,
3748};
3749
cdca8d02
RM
3750static struct nic_operations qla8000_nic_ops = {
3751 .get_flash = ql_get_8000_flash_params,
3752 .port_initialize = ql_8000_port_initialize,
3753};
3754
e4552f51
RM
3755/* Find the pcie function number for the other NIC
3756 * on this chip. Since both NIC functions share a
3757 * common firmware we have the lowest enabled function
3758 * do any common work. Examples would be resetting
3759 * after a fatal firmware error, or doing a firmware
3760 * coredump.
3761 */
3762static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
3763{
3764 int status = 0;
3765 u32 temp;
3766 u32 nic_func1, nic_func2;
3767
3768 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
3769 &temp);
3770 if (status)
3771 return status;
3772
3773 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
3774 MPI_TEST_NIC_FUNC_MASK);
3775 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
3776 MPI_TEST_NIC_FUNC_MASK);
3777
3778 if (qdev->func == nic_func1)
3779 qdev->alt_func = nic_func2;
3780 else if (qdev->func == nic_func2)
3781 qdev->alt_func = nic_func1;
3782 else
3783 status = -EIO;
3784
3785 return status;
3786}
b0c2aadf 3787
e4552f51 3788static int ql_get_board_info(struct ql_adapter *qdev)
c4e84bde 3789{
e4552f51 3790 int status;
c4e84bde
RM
3791 qdev->func =
3792 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
e4552f51
RM
3793 if (qdev->func > 3)
3794 return -EIO;
3795
3796 status = ql_get_alt_pcie_func(qdev);
3797 if (status)
3798 return status;
3799
3800 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
3801 if (qdev->port) {
c4e84bde
RM
3802 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
3803 qdev->port_link_up = STS_PL1;
3804 qdev->port_init = STS_PI1;
3805 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
3806 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
3807 } else {
3808 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
3809 qdev->port_link_up = STS_PL0;
3810 qdev->port_init = STS_PI0;
3811 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
3812 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
3813 }
3814 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
b0c2aadf
RM
3815 qdev->device_id = qdev->pdev->device;
3816 if (qdev->device_id == QLGE_DEVICE_ID_8012)
3817 qdev->nic_ops = &qla8012_nic_ops;
cdca8d02
RM
3818 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
3819 qdev->nic_ops = &qla8000_nic_ops;
e4552f51 3820 return status;
c4e84bde
RM
3821}
3822
3823static void ql_release_all(struct pci_dev *pdev)
3824{
3825 struct net_device *ndev = pci_get_drvdata(pdev);
3826 struct ql_adapter *qdev = netdev_priv(ndev);
3827
3828 if (qdev->workqueue) {
3829 destroy_workqueue(qdev->workqueue);
3830 qdev->workqueue = NULL;
3831 }
3832 if (qdev->q_workqueue) {
3833 destroy_workqueue(qdev->q_workqueue);
3834 qdev->q_workqueue = NULL;
3835 }
3836 if (qdev->reg_base)
8668ae92 3837 iounmap(qdev->reg_base);
c4e84bde
RM
3838 if (qdev->doorbell_area)
3839 iounmap(qdev->doorbell_area);
3840 pci_release_regions(pdev);
3841 pci_set_drvdata(pdev, NULL);
3842}
3843
3844static int __devinit ql_init_device(struct pci_dev *pdev,
3845 struct net_device *ndev, int cards_found)
3846{
3847 struct ql_adapter *qdev = netdev_priv(ndev);
3848 int pos, err = 0;
3849 u16 val16;
3850
3851 memset((void *)qdev, 0, sizeof(qdev));
3852 err = pci_enable_device(pdev);
3853 if (err) {
3854 dev_err(&pdev->dev, "PCI device enable failed.\n");
3855 return err;
3856 }
3857
3858 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3859 if (pos <= 0) {
3860 dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
3861 "aborting.\n");
3862 goto err_out;
3863 } else {
3864 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
3865 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
3866 val16 |= (PCI_EXP_DEVCTL_CERE |
3867 PCI_EXP_DEVCTL_NFERE |
3868 PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE);
3869 pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
3870 }
3871
3872 err = pci_request_regions(pdev, DRV_NAME);
3873 if (err) {
3874 dev_err(&pdev->dev, "PCI region request failed.\n");
3875 goto err_out;
3876 }
3877
3878 pci_set_master(pdev);
6a35528a 3879 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
c4e84bde 3880 set_bit(QL_DMA64, &qdev->flags);
6a35528a 3881 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
c4e84bde 3882 } else {
284901a9 3883 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
c4e84bde 3884 if (!err)
284901a9 3885 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
c4e84bde
RM
3886 }
3887
3888 if (err) {
3889 dev_err(&pdev->dev, "No usable DMA configuration.\n");
3890 goto err_out;
3891 }
3892
3893 pci_set_drvdata(pdev, ndev);
3894 qdev->reg_base =
3895 ioremap_nocache(pci_resource_start(pdev, 1),
3896 pci_resource_len(pdev, 1));
3897 if (!qdev->reg_base) {
3898 dev_err(&pdev->dev, "Register mapping failed.\n");
3899 err = -ENOMEM;
3900 goto err_out;
3901 }
3902
3903 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
3904 qdev->doorbell_area =
3905 ioremap_nocache(pci_resource_start(pdev, 3),
3906 pci_resource_len(pdev, 3));
3907 if (!qdev->doorbell_area) {
3908 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
3909 err = -ENOMEM;
3910 goto err_out;
3911 }
3912
c4e84bde
RM
3913 qdev->ndev = ndev;
3914 qdev->pdev = pdev;
e4552f51
RM
3915 err = ql_get_board_info(qdev);
3916 if (err) {
3917 dev_err(&pdev->dev, "Register access failed.\n");
3918 err = -EIO;
3919 goto err_out;
3920 }
c4e84bde
RM
3921 qdev->msg_enable = netif_msg_init(debug, default_msg);
3922 spin_lock_init(&qdev->hw_lock);
3923 spin_lock_init(&qdev->stats_lock);
3924
3925 /* make sure the EEPROM is good */
b0c2aadf 3926 err = qdev->nic_ops->get_flash(qdev);
c4e84bde
RM
3927 if (err) {
3928 dev_err(&pdev->dev, "Invalid FLASH.\n");
3929 goto err_out;
3930 }
3931
c4e84bde
RM
3932 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3933
3934 /* Set up the default ring sizes. */
3935 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
3936 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
3937
3938 /* Set up the coalescing parameters. */
3939 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
3940 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
3941 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3942 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3943
3944 /*
3945 * Set up the operating parameters.
3946 */
3947 qdev->rx_csum = 1;
3948
3949 qdev->q_workqueue = create_workqueue(ndev->name);
3950 qdev->workqueue = create_singlethread_workqueue(ndev->name);
3951 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
3952 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
3953 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
bcc2cb3b 3954 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
2ee1e272 3955 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
125844ea 3956 mutex_init(&qdev->mpi_mutex);
bcc2cb3b 3957 init_completion(&qdev->ide_completion);
c4e84bde
RM
3958
3959 if (!cards_found) {
3960 dev_info(&pdev->dev, "%s\n", DRV_STRING);
3961 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
3962 DRV_NAME, DRV_VERSION);
3963 }
3964 return 0;
3965err_out:
3966 ql_release_all(pdev);
3967 pci_disable_device(pdev);
3968 return err;
3969}
3970
25ed7849
SH
3971
3972static const struct net_device_ops qlge_netdev_ops = {
3973 .ndo_open = qlge_open,
3974 .ndo_stop = qlge_close,
3975 .ndo_start_xmit = qlge_send,
3976 .ndo_change_mtu = qlge_change_mtu,
3977 .ndo_get_stats = qlge_get_stats,
3978 .ndo_set_multicast_list = qlge_set_multicast_list,
3979 .ndo_set_mac_address = qlge_set_mac_address,
3980 .ndo_validate_addr = eth_validate_addr,
3981 .ndo_tx_timeout = qlge_tx_timeout,
3982 .ndo_vlan_rx_register = ql_vlan_rx_register,
3983 .ndo_vlan_rx_add_vid = ql_vlan_rx_add_vid,
3984 .ndo_vlan_rx_kill_vid = ql_vlan_rx_kill_vid,
3985};
3986
c4e84bde
RM
3987static int __devinit qlge_probe(struct pci_dev *pdev,
3988 const struct pci_device_id *pci_entry)
3989{
3990 struct net_device *ndev = NULL;
3991 struct ql_adapter *qdev = NULL;
3992 static int cards_found = 0;
3993 int err = 0;
3994
1e213303
RM
3995 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
3996 min(MAX_CPUS, (int)num_online_cpus()));
c4e84bde
RM
3997 if (!ndev)
3998 return -ENOMEM;
3999
4000 err = ql_init_device(pdev, ndev, cards_found);
4001 if (err < 0) {
4002 free_netdev(ndev);
4003 return err;
4004 }
4005
4006 qdev = netdev_priv(ndev);
4007 SET_NETDEV_DEV(ndev, &pdev->dev);
4008 ndev->features = (0
4009 | NETIF_F_IP_CSUM
4010 | NETIF_F_SG
4011 | NETIF_F_TSO
4012 | NETIF_F_TSO6
4013 | NETIF_F_TSO_ECN
4014 | NETIF_F_HW_VLAN_TX
4015 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
22bdd4f5 4016 ndev->features |= NETIF_F_GRO;
c4e84bde
RM
4017
4018 if (test_bit(QL_DMA64, &qdev->flags))
4019 ndev->features |= NETIF_F_HIGHDMA;
4020
4021 /*
4022 * Set up net_device structure.
4023 */
4024 ndev->tx_queue_len = qdev->tx_ring_size;
4025 ndev->irq = pdev->irq;
25ed7849
SH
4026
4027 ndev->netdev_ops = &qlge_netdev_ops;
c4e84bde 4028 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
c4e84bde 4029 ndev->watchdog_timeo = 10 * HZ;
25ed7849 4030
c4e84bde
RM
4031 err = register_netdev(ndev);
4032 if (err) {
4033 dev_err(&pdev->dev, "net device registration failed.\n");
4034 ql_release_all(pdev);
4035 pci_disable_device(pdev);
4036 return err;
4037 }
4038 netif_carrier_off(ndev);
c4e84bde
RM
4039 ql_display_dev_info(ndev);
4040 cards_found++;
4041 return 0;
4042}
4043
4044static void __devexit qlge_remove(struct pci_dev *pdev)
4045{
4046 struct net_device *ndev = pci_get_drvdata(pdev);
4047 unregister_netdev(ndev);
4048 ql_release_all(pdev);
4049 pci_disable_device(pdev);
4050 free_netdev(ndev);
4051}
4052
4053/*
4054 * This callback is called by the PCI subsystem whenever
4055 * a PCI bus error is detected.
4056 */
4057static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4058 enum pci_channel_state state)
4059{
4060 struct net_device *ndev = pci_get_drvdata(pdev);
4061 struct ql_adapter *qdev = netdev_priv(ndev);
4062
4063 if (netif_running(ndev))
4064 ql_adapter_down(qdev);
4065
4066 pci_disable_device(pdev);
4067
4068 /* Request a slot reset. */
4069 return PCI_ERS_RESULT_NEED_RESET;
4070}
4071
4072/*
4073 * This callback is called after the PCI buss has been reset.
4074 * Basically, this tries to restart the card from scratch.
4075 * This is a shortened version of the device probe/discovery code,
4076 * it resembles the first-half of the () routine.
4077 */
4078static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4079{
4080 struct net_device *ndev = pci_get_drvdata(pdev);
4081 struct ql_adapter *qdev = netdev_priv(ndev);
4082
4083 if (pci_enable_device(pdev)) {
4084 QPRINTK(qdev, IFUP, ERR,
4085 "Cannot re-enable PCI device after reset.\n");
4086 return PCI_ERS_RESULT_DISCONNECT;
4087 }
4088
4089 pci_set_master(pdev);
4090
4091 netif_carrier_off(ndev);
c4e84bde
RM
4092 ql_adapter_reset(qdev);
4093
4094 /* Make sure the EEPROM is good */
4095 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4096
4097 if (!is_valid_ether_addr(ndev->perm_addr)) {
4098 QPRINTK(qdev, IFUP, ERR, "After reset, invalid MAC address.\n");
4099 return PCI_ERS_RESULT_DISCONNECT;
4100 }
4101
4102 return PCI_ERS_RESULT_RECOVERED;
4103}
4104
4105static void qlge_io_resume(struct pci_dev *pdev)
4106{
4107 struct net_device *ndev = pci_get_drvdata(pdev);
4108 struct ql_adapter *qdev = netdev_priv(ndev);
4109
4110 pci_set_master(pdev);
4111
4112 if (netif_running(ndev)) {
4113 if (ql_adapter_up(qdev)) {
4114 QPRINTK(qdev, IFUP, ERR,
4115 "Device initialization failed after reset.\n");
4116 return;
4117 }
4118 }
4119
4120 netif_device_attach(ndev);
4121}
4122
4123static struct pci_error_handlers qlge_err_handler = {
4124 .error_detected = qlge_io_error_detected,
4125 .slot_reset = qlge_io_slot_reset,
4126 .resume = qlge_io_resume,
4127};
4128
4129static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4130{
4131 struct net_device *ndev = pci_get_drvdata(pdev);
4132 struct ql_adapter *qdev = netdev_priv(ndev);
6b318cb3 4133 int err;
c4e84bde
RM
4134
4135 netif_device_detach(ndev);
4136
4137 if (netif_running(ndev)) {
4138 err = ql_adapter_down(qdev);
4139 if (!err)
4140 return err;
4141 }
4142
4143 err = pci_save_state(pdev);
4144 if (err)
4145 return err;
4146
4147 pci_disable_device(pdev);
4148
4149 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4150
4151 return 0;
4152}
4153
04da2cf9 4154#ifdef CONFIG_PM
c4e84bde
RM
4155static int qlge_resume(struct pci_dev *pdev)
4156{
4157 struct net_device *ndev = pci_get_drvdata(pdev);
4158 struct ql_adapter *qdev = netdev_priv(ndev);
4159 int err;
4160
4161 pci_set_power_state(pdev, PCI_D0);
4162 pci_restore_state(pdev);
4163 err = pci_enable_device(pdev);
4164 if (err) {
4165 QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
4166 return err;
4167 }
4168 pci_set_master(pdev);
4169
4170 pci_enable_wake(pdev, PCI_D3hot, 0);
4171 pci_enable_wake(pdev, PCI_D3cold, 0);
4172
4173 if (netif_running(ndev)) {
4174 err = ql_adapter_up(qdev);
4175 if (err)
4176 return err;
4177 }
4178
4179 netif_device_attach(ndev);
4180
4181 return 0;
4182}
04da2cf9 4183#endif /* CONFIG_PM */
c4e84bde
RM
4184
4185static void qlge_shutdown(struct pci_dev *pdev)
4186{
4187 qlge_suspend(pdev, PMSG_SUSPEND);
4188}
4189
4190static struct pci_driver qlge_driver = {
4191 .name = DRV_NAME,
4192 .id_table = qlge_pci_tbl,
4193 .probe = qlge_probe,
4194 .remove = __devexit_p(qlge_remove),
4195#ifdef CONFIG_PM
4196 .suspend = qlge_suspend,
4197 .resume = qlge_resume,
4198#endif
4199 .shutdown = qlge_shutdown,
4200 .err_handler = &qlge_err_handler
4201};
4202
4203static int __init qlge_init_module(void)
4204{
4205 return pci_register_driver(&qlge_driver);
4206}
4207
4208static void __exit qlge_exit(void)
4209{
4210 pci_unregister_driver(&qlge_driver);
4211}
4212
4213module_init(qlge_init_module);
4214module_exit(qlge_exit);
This page took 0.351408 seconds and 5 git commands to generate.