ACPI / LPSS: make code less confusing for reader
[deliverable/linux.git] / drivers / net / ethernet / qlogic / qlge / qlge_main.c
1 /*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/bitops.h>
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
14 #include <linux/pci.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/pagemap.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dmapool.h>
20 #include <linux/mempool.h>
21 #include <linux/spinlock.h>
22 #include <linux/kthread.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
26 #include <linux/in.h>
27 #include <linux/ip.h>
28 #include <linux/ipv6.h>
29 #include <net/ipv6.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/if_ether.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/if_vlan.h>
38 #include <linux/skbuff.h>
39 #include <linux/delay.h>
40 #include <linux/mm.h>
41 #include <linux/vmalloc.h>
42 #include <linux/prefetch.h>
43 #include <net/ip6_checksum.h>
44
45 #include "qlge.h"
46
47 char qlge_driver_name[] = DRV_NAME;
48 const char qlge_driver_version[] = DRV_VERSION;
49
50 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51 MODULE_DESCRIPTION(DRV_STRING " ");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION(DRV_VERSION);
54
55 static const u32 default_msg =
56 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57 /* NETIF_MSG_TIMER | */
58 NETIF_MSG_IFDOWN |
59 NETIF_MSG_IFUP |
60 NETIF_MSG_RX_ERR |
61 NETIF_MSG_TX_ERR |
62 /* NETIF_MSG_TX_QUEUED | */
63 /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
64 /* NETIF_MSG_PKTDATA | */
65 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66
67 static int debug = -1; /* defaults above */
68 module_param(debug, int, 0664);
69 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70
71 #define MSIX_IRQ 0
72 #define MSI_IRQ 1
73 #define LEG_IRQ 2
74 static int qlge_irq_type = MSIX_IRQ;
75 module_param(qlge_irq_type, int, 0664);
76 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
77
78 static int qlge_mpi_coredump;
79 module_param(qlge_mpi_coredump, int, 0);
80 MODULE_PARM_DESC(qlge_mpi_coredump,
81 "Option to enable MPI firmware dump. "
82 "Default is OFF - Do Not allocate memory. ");
83
84 static int qlge_force_coredump;
85 module_param(qlge_force_coredump, int, 0);
86 MODULE_PARM_DESC(qlge_force_coredump,
87 "Option to allow force of firmware core dump. "
88 "Default is OFF - Do not allow.");
89
90 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
91 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
92 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
93 /* required last entry */
94 {0,}
95 };
96
97 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98
99 static int ql_wol(struct ql_adapter *qdev);
100 static void qlge_set_multicast_list(struct net_device *ndev);
101
102 /* This hardware semaphore causes exclusive access to
103 * resources shared between the NIC driver, MPI firmware,
104 * FCOE firmware and the FC driver.
105 */
106 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
107 {
108 u32 sem_bits = 0;
109
110 switch (sem_mask) {
111 case SEM_XGMAC0_MASK:
112 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
113 break;
114 case SEM_XGMAC1_MASK:
115 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
116 break;
117 case SEM_ICB_MASK:
118 sem_bits = SEM_SET << SEM_ICB_SHIFT;
119 break;
120 case SEM_MAC_ADDR_MASK:
121 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
122 break;
123 case SEM_FLASH_MASK:
124 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
125 break;
126 case SEM_PROBE_MASK:
127 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
128 break;
129 case SEM_RT_IDX_MASK:
130 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
131 break;
132 case SEM_PROC_REG_MASK:
133 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
134 break;
135 default:
136 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
137 return -EINVAL;
138 }
139
140 ql_write32(qdev, SEM, sem_bits | sem_mask);
141 return !(ql_read32(qdev, SEM) & sem_bits);
142 }
143
144 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
145 {
146 unsigned int wait_count = 30;
147 do {
148 if (!ql_sem_trylock(qdev, sem_mask))
149 return 0;
150 udelay(100);
151 } while (--wait_count);
152 return -ETIMEDOUT;
153 }
154
155 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
156 {
157 ql_write32(qdev, SEM, sem_mask);
158 ql_read32(qdev, SEM); /* flush */
159 }
160
161 /* This function waits for a specific bit to come ready
162 * in a given register. It is used mostly by the initialize
163 * process, but is also used in kernel thread API such as
164 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
165 */
166 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
167 {
168 u32 temp;
169 int count = UDELAY_COUNT;
170
171 while (count) {
172 temp = ql_read32(qdev, reg);
173
174 /* check for errors */
175 if (temp & err_bit) {
176 netif_alert(qdev, probe, qdev->ndev,
177 "register 0x%.08x access error, value = 0x%.08x!.\n",
178 reg, temp);
179 return -EIO;
180 } else if (temp & bit)
181 return 0;
182 udelay(UDELAY_DELAY);
183 count--;
184 }
185 netif_alert(qdev, probe, qdev->ndev,
186 "Timed out waiting for reg %x to come ready.\n", reg);
187 return -ETIMEDOUT;
188 }
189
190 /* The CFG register is used to download TX and RX control blocks
191 * to the chip. This function waits for an operation to complete.
192 */
193 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
194 {
195 int count = UDELAY_COUNT;
196 u32 temp;
197
198 while (count) {
199 temp = ql_read32(qdev, CFG);
200 if (temp & CFG_LE)
201 return -EIO;
202 if (!(temp & bit))
203 return 0;
204 udelay(UDELAY_DELAY);
205 count--;
206 }
207 return -ETIMEDOUT;
208 }
209
210
211 /* Used to issue init control blocks to hw. Maps control block,
212 * sets address, triggers download, waits for completion.
213 */
214 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
215 u16 q_id)
216 {
217 u64 map;
218 int status = 0;
219 int direction;
220 u32 mask;
221 u32 value;
222
223 direction =
224 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
225 PCI_DMA_FROMDEVICE;
226
227 map = pci_map_single(qdev->pdev, ptr, size, direction);
228 if (pci_dma_mapping_error(qdev->pdev, map)) {
229 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
230 return -ENOMEM;
231 }
232
233 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
234 if (status)
235 return status;
236
237 status = ql_wait_cfg(qdev, bit);
238 if (status) {
239 netif_err(qdev, ifup, qdev->ndev,
240 "Timed out waiting for CFG to come ready.\n");
241 goto exit;
242 }
243
244 ql_write32(qdev, ICB_L, (u32) map);
245 ql_write32(qdev, ICB_H, (u32) (map >> 32));
246
247 mask = CFG_Q_MASK | (bit << 16);
248 value = bit | (q_id << CFG_Q_SHIFT);
249 ql_write32(qdev, CFG, (mask | value));
250
251 /*
252 * Wait for the bit to clear after signaling hw.
253 */
254 status = ql_wait_cfg(qdev, bit);
255 exit:
256 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
257 pci_unmap_single(qdev->pdev, map, size, direction);
258 return status;
259 }
260
261 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
262 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
263 u32 *value)
264 {
265 u32 offset = 0;
266 int status;
267
268 switch (type) {
269 case MAC_ADDR_TYPE_MULTI_MAC:
270 case MAC_ADDR_TYPE_CAM_MAC:
271 {
272 status =
273 ql_wait_reg_rdy(qdev,
274 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
275 if (status)
276 goto exit;
277 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278 (index << MAC_ADDR_IDX_SHIFT) | /* index */
279 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
280 status =
281 ql_wait_reg_rdy(qdev,
282 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
283 if (status)
284 goto exit;
285 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
286 status =
287 ql_wait_reg_rdy(qdev,
288 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
289 if (status)
290 goto exit;
291 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292 (index << MAC_ADDR_IDX_SHIFT) | /* index */
293 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
294 status =
295 ql_wait_reg_rdy(qdev,
296 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
297 if (status)
298 goto exit;
299 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300 if (type == MAC_ADDR_TYPE_CAM_MAC) {
301 status =
302 ql_wait_reg_rdy(qdev,
303 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
304 if (status)
305 goto exit;
306 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307 (index << MAC_ADDR_IDX_SHIFT) | /* index */
308 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
309 status =
310 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
311 MAC_ADDR_MR, 0);
312 if (status)
313 goto exit;
314 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
315 }
316 break;
317 }
318 case MAC_ADDR_TYPE_VLAN:
319 case MAC_ADDR_TYPE_MULTI_FLTR:
320 default:
321 netif_crit(qdev, ifup, qdev->ndev,
322 "Address type %d not yet supported.\n", type);
323 status = -EPERM;
324 }
325 exit:
326 return status;
327 }
328
329 /* Set up a MAC, multicast or VLAN address for the
330 * inbound frame matching.
331 */
332 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
333 u16 index)
334 {
335 u32 offset = 0;
336 int status = 0;
337
338 switch (type) {
339 case MAC_ADDR_TYPE_MULTI_MAC:
340 {
341 u32 upper = (addr[0] << 8) | addr[1];
342 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343 (addr[4] << 8) | (addr[5]);
344
345 status =
346 ql_wait_reg_rdy(qdev,
347 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
348 if (status)
349 goto exit;
350 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351 (index << MAC_ADDR_IDX_SHIFT) |
352 type | MAC_ADDR_E);
353 ql_write32(qdev, MAC_ADDR_DATA, lower);
354 status =
355 ql_wait_reg_rdy(qdev,
356 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
357 if (status)
358 goto exit;
359 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360 (index << MAC_ADDR_IDX_SHIFT) |
361 type | MAC_ADDR_E);
362
363 ql_write32(qdev, MAC_ADDR_DATA, upper);
364 status =
365 ql_wait_reg_rdy(qdev,
366 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
367 if (status)
368 goto exit;
369 break;
370 }
371 case MAC_ADDR_TYPE_CAM_MAC:
372 {
373 u32 cam_output;
374 u32 upper = (addr[0] << 8) | addr[1];
375 u32 lower =
376 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
377 (addr[5]);
378 status =
379 ql_wait_reg_rdy(qdev,
380 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
381 if (status)
382 goto exit;
383 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
384 (index << MAC_ADDR_IDX_SHIFT) | /* index */
385 type); /* type */
386 ql_write32(qdev, MAC_ADDR_DATA, lower);
387 status =
388 ql_wait_reg_rdy(qdev,
389 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
390 if (status)
391 goto exit;
392 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
393 (index << MAC_ADDR_IDX_SHIFT) | /* index */
394 type); /* type */
395 ql_write32(qdev, MAC_ADDR_DATA, upper);
396 status =
397 ql_wait_reg_rdy(qdev,
398 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
399 if (status)
400 goto exit;
401 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
402 (index << MAC_ADDR_IDX_SHIFT) | /* index */
403 type); /* type */
404 /* This field should also include the queue id
405 and possibly the function id. Right now we hardcode
406 the route field to NIC core.
407 */
408 cam_output = (CAM_OUT_ROUTE_NIC |
409 (qdev->
410 func << CAM_OUT_FUNC_SHIFT) |
411 (0 << CAM_OUT_CQ_ID_SHIFT));
412 if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
413 cam_output |= CAM_OUT_RV;
414 /* route to NIC core */
415 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
416 break;
417 }
418 case MAC_ADDR_TYPE_VLAN:
419 {
420 u32 enable_bit = *((u32 *) &addr[0]);
421 /* For VLAN, the addr actually holds a bit that
422 * either enables or disables the vlan id we are
423 * addressing. It's either MAC_ADDR_E on or off.
424 * That's bit-27 we're talking about.
425 */
426 status =
427 ql_wait_reg_rdy(qdev,
428 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
429 if (status)
430 goto exit;
431 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
432 (index << MAC_ADDR_IDX_SHIFT) | /* index */
433 type | /* type */
434 enable_bit); /* enable/disable */
435 break;
436 }
437 case MAC_ADDR_TYPE_MULTI_FLTR:
438 default:
439 netif_crit(qdev, ifup, qdev->ndev,
440 "Address type %d not yet supported.\n", type);
441 status = -EPERM;
442 }
443 exit:
444 return status;
445 }
446
447 /* Set or clear MAC address in hardware. We sometimes
448 * have to clear it to prevent wrong frame routing
449 * especially in a bonding environment.
450 */
451 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
452 {
453 int status;
454 char zero_mac_addr[ETH_ALEN];
455 char *addr;
456
457 if (set) {
458 addr = &qdev->current_mac_addr[0];
459 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
460 "Set Mac addr %pM\n", addr);
461 } else {
462 memset(zero_mac_addr, 0, ETH_ALEN);
463 addr = &zero_mac_addr[0];
464 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
465 "Clearing MAC address\n");
466 }
467 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
468 if (status)
469 return status;
470 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
471 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
472 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
473 if (status)
474 netif_err(qdev, ifup, qdev->ndev,
475 "Failed to init mac address.\n");
476 return status;
477 }
478
479 void ql_link_on(struct ql_adapter *qdev)
480 {
481 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
482 netif_carrier_on(qdev->ndev);
483 ql_set_mac_addr(qdev, 1);
484 }
485
486 void ql_link_off(struct ql_adapter *qdev)
487 {
488 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
489 netif_carrier_off(qdev->ndev);
490 ql_set_mac_addr(qdev, 0);
491 }
492
493 /* Get a specific frame routing value from the CAM.
494 * Used for debug and reg dump.
495 */
496 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
497 {
498 int status = 0;
499
500 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
501 if (status)
502 goto exit;
503
504 ql_write32(qdev, RT_IDX,
505 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
506 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
507 if (status)
508 goto exit;
509 *value = ql_read32(qdev, RT_DATA);
510 exit:
511 return status;
512 }
513
514 /* The NIC function for this chip has 16 routing indexes. Each one can be used
515 * to route different frame types to various inbound queues. We send broadcast/
516 * multicast/error frames to the default queue for slow handling,
517 * and CAM hit/RSS frames to the fast handling queues.
518 */
519 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
520 int enable)
521 {
522 int status = -EINVAL; /* Return error if no mask match. */
523 u32 value = 0;
524
525 switch (mask) {
526 case RT_IDX_CAM_HIT:
527 {
528 value = RT_IDX_DST_CAM_Q | /* dest */
529 RT_IDX_TYPE_NICQ | /* type */
530 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
531 break;
532 }
533 case RT_IDX_VALID: /* Promiscuous Mode frames. */
534 {
535 value = RT_IDX_DST_DFLT_Q | /* dest */
536 RT_IDX_TYPE_NICQ | /* type */
537 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
538 break;
539 }
540 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
541 {
542 value = RT_IDX_DST_DFLT_Q | /* dest */
543 RT_IDX_TYPE_NICQ | /* type */
544 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
545 break;
546 }
547 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
548 {
549 value = RT_IDX_DST_DFLT_Q | /* dest */
550 RT_IDX_TYPE_NICQ | /* type */
551 (RT_IDX_IP_CSUM_ERR_SLOT <<
552 RT_IDX_IDX_SHIFT); /* index */
553 break;
554 }
555 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
556 {
557 value = RT_IDX_DST_DFLT_Q | /* dest */
558 RT_IDX_TYPE_NICQ | /* type */
559 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
560 RT_IDX_IDX_SHIFT); /* index */
561 break;
562 }
563 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
564 {
565 value = RT_IDX_DST_DFLT_Q | /* dest */
566 RT_IDX_TYPE_NICQ | /* type */
567 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
568 break;
569 }
570 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
571 {
572 value = RT_IDX_DST_DFLT_Q | /* dest */
573 RT_IDX_TYPE_NICQ | /* type */
574 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
575 break;
576 }
577 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
578 {
579 value = RT_IDX_DST_DFLT_Q | /* dest */
580 RT_IDX_TYPE_NICQ | /* type */
581 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
582 break;
583 }
584 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
585 {
586 value = RT_IDX_DST_RSS | /* dest */
587 RT_IDX_TYPE_NICQ | /* type */
588 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
589 break;
590 }
591 case 0: /* Clear the E-bit on an entry. */
592 {
593 value = RT_IDX_DST_DFLT_Q | /* dest */
594 RT_IDX_TYPE_NICQ | /* type */
595 (index << RT_IDX_IDX_SHIFT);/* index */
596 break;
597 }
598 default:
599 netif_err(qdev, ifup, qdev->ndev,
600 "Mask type %d not yet supported.\n", mask);
601 status = -EPERM;
602 goto exit;
603 }
604
605 if (value) {
606 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
607 if (status)
608 goto exit;
609 value |= (enable ? RT_IDX_E : 0);
610 ql_write32(qdev, RT_IDX, value);
611 ql_write32(qdev, RT_DATA, enable ? mask : 0);
612 }
613 exit:
614 return status;
615 }
616
617 static void ql_enable_interrupts(struct ql_adapter *qdev)
618 {
619 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
620 }
621
622 static void ql_disable_interrupts(struct ql_adapter *qdev)
623 {
624 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
625 }
626
627 /* If we're running with multiple MSI-X vectors then we enable on the fly.
628 * Otherwise, we may have multiple outstanding workers and don't want to
629 * enable until the last one finishes. In this case, the irq_cnt gets
630 * incremented every time we queue a worker and decremented every time
631 * a worker finishes. Once it hits zero we enable the interrupt.
632 */
633 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
634 {
635 u32 var = 0;
636 unsigned long hw_flags = 0;
637 struct intr_context *ctx = qdev->intr_context + intr;
638
639 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
640 /* Always enable if we're MSIX multi interrupts and
641 * it's not the default (zeroeth) interrupt.
642 */
643 ql_write32(qdev, INTR_EN,
644 ctx->intr_en_mask);
645 var = ql_read32(qdev, STS);
646 return var;
647 }
648
649 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
650 if (atomic_dec_and_test(&ctx->irq_cnt)) {
651 ql_write32(qdev, INTR_EN,
652 ctx->intr_en_mask);
653 var = ql_read32(qdev, STS);
654 }
655 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
656 return var;
657 }
658
659 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
660 {
661 u32 var = 0;
662 struct intr_context *ctx;
663
664 /* HW disables for us if we're MSIX multi interrupts and
665 * it's not the default (zeroeth) interrupt.
666 */
667 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
668 return 0;
669
670 ctx = qdev->intr_context + intr;
671 spin_lock(&qdev->hw_lock);
672 if (!atomic_read(&ctx->irq_cnt)) {
673 ql_write32(qdev, INTR_EN,
674 ctx->intr_dis_mask);
675 var = ql_read32(qdev, STS);
676 }
677 atomic_inc(&ctx->irq_cnt);
678 spin_unlock(&qdev->hw_lock);
679 return var;
680 }
681
682 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
683 {
684 int i;
685 for (i = 0; i < qdev->intr_count; i++) {
686 /* The enable call does a atomic_dec_and_test
687 * and enables only if the result is zero.
688 * So we precharge it here.
689 */
690 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
691 i == 0))
692 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
693 ql_enable_completion_interrupt(qdev, i);
694 }
695
696 }
697
698 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
699 {
700 int status, i;
701 u16 csum = 0;
702 __le16 *flash = (__le16 *)&qdev->flash;
703
704 status = strncmp((char *)&qdev->flash, str, 4);
705 if (status) {
706 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
707 return status;
708 }
709
710 for (i = 0; i < size; i++)
711 csum += le16_to_cpu(*flash++);
712
713 if (csum)
714 netif_err(qdev, ifup, qdev->ndev,
715 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
716
717 return csum;
718 }
719
720 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
721 {
722 int status = 0;
723 /* wait for reg to come ready */
724 status = ql_wait_reg_rdy(qdev,
725 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
726 if (status)
727 goto exit;
728 /* set up for reg read */
729 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
730 /* wait for reg to come ready */
731 status = ql_wait_reg_rdy(qdev,
732 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
733 if (status)
734 goto exit;
735 /* This data is stored on flash as an array of
736 * __le32. Since ql_read32() returns cpu endian
737 * we need to swap it back.
738 */
739 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
740 exit:
741 return status;
742 }
743
744 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
745 {
746 u32 i, size;
747 int status;
748 __le32 *p = (__le32 *)&qdev->flash;
749 u32 offset;
750 u8 mac_addr[6];
751
752 /* Get flash offset for function and adjust
753 * for dword access.
754 */
755 if (!qdev->port)
756 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
757 else
758 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
759
760 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
761 return -ETIMEDOUT;
762
763 size = sizeof(struct flash_params_8000) / sizeof(u32);
764 for (i = 0; i < size; i++, p++) {
765 status = ql_read_flash_word(qdev, i+offset, p);
766 if (status) {
767 netif_err(qdev, ifup, qdev->ndev,
768 "Error reading flash.\n");
769 goto exit;
770 }
771 }
772
773 status = ql_validate_flash(qdev,
774 sizeof(struct flash_params_8000) / sizeof(u16),
775 "8000");
776 if (status) {
777 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
778 status = -EINVAL;
779 goto exit;
780 }
781
782 /* Extract either manufacturer or BOFM modified
783 * MAC address.
784 */
785 if (qdev->flash.flash_params_8000.data_type1 == 2)
786 memcpy(mac_addr,
787 qdev->flash.flash_params_8000.mac_addr1,
788 qdev->ndev->addr_len);
789 else
790 memcpy(mac_addr,
791 qdev->flash.flash_params_8000.mac_addr,
792 qdev->ndev->addr_len);
793
794 if (!is_valid_ether_addr(mac_addr)) {
795 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
796 status = -EINVAL;
797 goto exit;
798 }
799
800 memcpy(qdev->ndev->dev_addr,
801 mac_addr,
802 qdev->ndev->addr_len);
803
804 exit:
805 ql_sem_unlock(qdev, SEM_FLASH_MASK);
806 return status;
807 }
808
809 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
810 {
811 int i;
812 int status;
813 __le32 *p = (__le32 *)&qdev->flash;
814 u32 offset = 0;
815 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
816
817 /* Second function's parameters follow the first
818 * function's.
819 */
820 if (qdev->port)
821 offset = size;
822
823 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
824 return -ETIMEDOUT;
825
826 for (i = 0; i < size; i++, p++) {
827 status = ql_read_flash_word(qdev, i+offset, p);
828 if (status) {
829 netif_err(qdev, ifup, qdev->ndev,
830 "Error reading flash.\n");
831 goto exit;
832 }
833
834 }
835
836 status = ql_validate_flash(qdev,
837 sizeof(struct flash_params_8012) / sizeof(u16),
838 "8012");
839 if (status) {
840 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
841 status = -EINVAL;
842 goto exit;
843 }
844
845 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
846 status = -EINVAL;
847 goto exit;
848 }
849
850 memcpy(qdev->ndev->dev_addr,
851 qdev->flash.flash_params_8012.mac_addr,
852 qdev->ndev->addr_len);
853
854 exit:
855 ql_sem_unlock(qdev, SEM_FLASH_MASK);
856 return status;
857 }
858
859 /* xgmac register are located behind the xgmac_addr and xgmac_data
860 * register pair. Each read/write requires us to wait for the ready
861 * bit before reading/writing the data.
862 */
863 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
864 {
865 int status;
866 /* wait for reg to come ready */
867 status = ql_wait_reg_rdy(qdev,
868 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
869 if (status)
870 return status;
871 /* write the data to the data reg */
872 ql_write32(qdev, XGMAC_DATA, data);
873 /* trigger the write */
874 ql_write32(qdev, XGMAC_ADDR, reg);
875 return status;
876 }
877
878 /* xgmac register are located behind the xgmac_addr and xgmac_data
879 * register pair. Each read/write requires us to wait for the ready
880 * bit before reading/writing the data.
881 */
882 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
883 {
884 int status = 0;
885 /* wait for reg to come ready */
886 status = ql_wait_reg_rdy(qdev,
887 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
888 if (status)
889 goto exit;
890 /* set up for reg read */
891 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
892 /* wait for reg to come ready */
893 status = ql_wait_reg_rdy(qdev,
894 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
895 if (status)
896 goto exit;
897 /* get the data */
898 *data = ql_read32(qdev, XGMAC_DATA);
899 exit:
900 return status;
901 }
902
903 /* This is used for reading the 64-bit statistics regs. */
904 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
905 {
906 int status = 0;
907 u32 hi = 0;
908 u32 lo = 0;
909
910 status = ql_read_xgmac_reg(qdev, reg, &lo);
911 if (status)
912 goto exit;
913
914 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
915 if (status)
916 goto exit;
917
918 *data = (u64) lo | ((u64) hi << 32);
919
920 exit:
921 return status;
922 }
923
924 static int ql_8000_port_initialize(struct ql_adapter *qdev)
925 {
926 int status;
927 /*
928 * Get MPI firmware version for driver banner
929 * and ethool info.
930 */
931 status = ql_mb_about_fw(qdev);
932 if (status)
933 goto exit;
934 status = ql_mb_get_fw_state(qdev);
935 if (status)
936 goto exit;
937 /* Wake up a worker to get/set the TX/RX frame sizes. */
938 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
939 exit:
940 return status;
941 }
942
943 /* Take the MAC Core out of reset.
944 * Enable statistics counting.
945 * Take the transmitter/receiver out of reset.
946 * This functionality may be done in the MPI firmware at a
947 * later date.
948 */
949 static int ql_8012_port_initialize(struct ql_adapter *qdev)
950 {
951 int status = 0;
952 u32 data;
953
954 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
955 /* Another function has the semaphore, so
956 * wait for the port init bit to come ready.
957 */
958 netif_info(qdev, link, qdev->ndev,
959 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
960 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
961 if (status) {
962 netif_crit(qdev, link, qdev->ndev,
963 "Port initialize timed out.\n");
964 }
965 return status;
966 }
967
968 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
969 /* Set the core reset. */
970 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
971 if (status)
972 goto end;
973 data |= GLOBAL_CFG_RESET;
974 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
975 if (status)
976 goto end;
977
978 /* Clear the core reset and turn on jumbo for receiver. */
979 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
980 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
981 data |= GLOBAL_CFG_TX_STAT_EN;
982 data |= GLOBAL_CFG_RX_STAT_EN;
983 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
984 if (status)
985 goto end;
986
987 /* Enable transmitter, and clear it's reset. */
988 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
989 if (status)
990 goto end;
991 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
992 data |= TX_CFG_EN; /* Enable the transmitter. */
993 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
994 if (status)
995 goto end;
996
997 /* Enable receiver and clear it's reset. */
998 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
999 if (status)
1000 goto end;
1001 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1002 data |= RX_CFG_EN; /* Enable the receiver. */
1003 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1004 if (status)
1005 goto end;
1006
1007 /* Turn on jumbo. */
1008 status =
1009 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1010 if (status)
1011 goto end;
1012 status =
1013 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1014 if (status)
1015 goto end;
1016
1017 /* Signal to the world that the port is enabled. */
1018 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1019 end:
1020 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1021 return status;
1022 }
1023
1024 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1025 {
1026 return PAGE_SIZE << qdev->lbq_buf_order;
1027 }
1028
1029 /* Get the next large buffer. */
1030 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1031 {
1032 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1033 rx_ring->lbq_curr_idx++;
1034 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1035 rx_ring->lbq_curr_idx = 0;
1036 rx_ring->lbq_free_cnt++;
1037 return lbq_desc;
1038 }
1039
1040 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1041 struct rx_ring *rx_ring)
1042 {
1043 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1044
1045 pci_dma_sync_single_for_cpu(qdev->pdev,
1046 dma_unmap_addr(lbq_desc, mapaddr),
1047 rx_ring->lbq_buf_size,
1048 PCI_DMA_FROMDEVICE);
1049
1050 /* If it's the last chunk of our master page then
1051 * we unmap it.
1052 */
1053 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1054 == ql_lbq_block_size(qdev))
1055 pci_unmap_page(qdev->pdev,
1056 lbq_desc->p.pg_chunk.map,
1057 ql_lbq_block_size(qdev),
1058 PCI_DMA_FROMDEVICE);
1059 return lbq_desc;
1060 }
1061
1062 /* Get the next small buffer. */
1063 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1064 {
1065 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1066 rx_ring->sbq_curr_idx++;
1067 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1068 rx_ring->sbq_curr_idx = 0;
1069 rx_ring->sbq_free_cnt++;
1070 return sbq_desc;
1071 }
1072
1073 /* Update an rx ring index. */
1074 static void ql_update_cq(struct rx_ring *rx_ring)
1075 {
1076 rx_ring->cnsmr_idx++;
1077 rx_ring->curr_entry++;
1078 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1079 rx_ring->cnsmr_idx = 0;
1080 rx_ring->curr_entry = rx_ring->cq_base;
1081 }
1082 }
1083
1084 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1085 {
1086 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1087 }
1088
1089 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1090 struct bq_desc *lbq_desc)
1091 {
1092 if (!rx_ring->pg_chunk.page) {
1093 u64 map;
1094 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1095 GFP_ATOMIC,
1096 qdev->lbq_buf_order);
1097 if (unlikely(!rx_ring->pg_chunk.page)) {
1098 netif_err(qdev, drv, qdev->ndev,
1099 "page allocation failed.\n");
1100 return -ENOMEM;
1101 }
1102 rx_ring->pg_chunk.offset = 0;
1103 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1104 0, ql_lbq_block_size(qdev),
1105 PCI_DMA_FROMDEVICE);
1106 if (pci_dma_mapping_error(qdev->pdev, map)) {
1107 __free_pages(rx_ring->pg_chunk.page,
1108 qdev->lbq_buf_order);
1109 netif_err(qdev, drv, qdev->ndev,
1110 "PCI mapping failed.\n");
1111 return -ENOMEM;
1112 }
1113 rx_ring->pg_chunk.map = map;
1114 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1115 }
1116
1117 /* Copy the current master pg_chunk info
1118 * to the current descriptor.
1119 */
1120 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1121
1122 /* Adjust the master page chunk for next
1123 * buffer get.
1124 */
1125 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1126 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1127 rx_ring->pg_chunk.page = NULL;
1128 lbq_desc->p.pg_chunk.last_flag = 1;
1129 } else {
1130 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1131 get_page(rx_ring->pg_chunk.page);
1132 lbq_desc->p.pg_chunk.last_flag = 0;
1133 }
1134 return 0;
1135 }
1136 /* Process (refill) a large buffer queue. */
1137 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1138 {
1139 u32 clean_idx = rx_ring->lbq_clean_idx;
1140 u32 start_idx = clean_idx;
1141 struct bq_desc *lbq_desc;
1142 u64 map;
1143 int i;
1144
1145 while (rx_ring->lbq_free_cnt > 32) {
1146 for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
1147 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1148 "lbq: try cleaning clean_idx = %d.\n",
1149 clean_idx);
1150 lbq_desc = &rx_ring->lbq[clean_idx];
1151 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1152 rx_ring->lbq_clean_idx = clean_idx;
1153 netif_err(qdev, ifup, qdev->ndev,
1154 "Could not get a page chunk, i=%d, clean_idx =%d .\n",
1155 i, clean_idx);
1156 return;
1157 }
1158
1159 map = lbq_desc->p.pg_chunk.map +
1160 lbq_desc->p.pg_chunk.offset;
1161 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1162 dma_unmap_len_set(lbq_desc, maplen,
1163 rx_ring->lbq_buf_size);
1164 *lbq_desc->addr = cpu_to_le64(map);
1165
1166 pci_dma_sync_single_for_device(qdev->pdev, map,
1167 rx_ring->lbq_buf_size,
1168 PCI_DMA_FROMDEVICE);
1169 clean_idx++;
1170 if (clean_idx == rx_ring->lbq_len)
1171 clean_idx = 0;
1172 }
1173
1174 rx_ring->lbq_clean_idx = clean_idx;
1175 rx_ring->lbq_prod_idx += 16;
1176 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1177 rx_ring->lbq_prod_idx = 0;
1178 rx_ring->lbq_free_cnt -= 16;
1179 }
1180
1181 if (start_idx != clean_idx) {
1182 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183 "lbq: updating prod idx = %d.\n",
1184 rx_ring->lbq_prod_idx);
1185 ql_write_db_reg(rx_ring->lbq_prod_idx,
1186 rx_ring->lbq_prod_idx_db_reg);
1187 }
1188 }
1189
1190 /* Process (refill) a small buffer queue. */
1191 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1192 {
1193 u32 clean_idx = rx_ring->sbq_clean_idx;
1194 u32 start_idx = clean_idx;
1195 struct bq_desc *sbq_desc;
1196 u64 map;
1197 int i;
1198
1199 while (rx_ring->sbq_free_cnt > 16) {
1200 for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
1201 sbq_desc = &rx_ring->sbq[clean_idx];
1202 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1203 "sbq: try cleaning clean_idx = %d.\n",
1204 clean_idx);
1205 if (sbq_desc->p.skb == NULL) {
1206 netif_printk(qdev, rx_status, KERN_DEBUG,
1207 qdev->ndev,
1208 "sbq: getting new skb for index %d.\n",
1209 sbq_desc->index);
1210 sbq_desc->p.skb =
1211 netdev_alloc_skb(qdev->ndev,
1212 SMALL_BUFFER_SIZE);
1213 if (sbq_desc->p.skb == NULL) {
1214 netif_err(qdev, probe, qdev->ndev,
1215 "Couldn't get an skb.\n");
1216 rx_ring->sbq_clean_idx = clean_idx;
1217 return;
1218 }
1219 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1220 map = pci_map_single(qdev->pdev,
1221 sbq_desc->p.skb->data,
1222 rx_ring->sbq_buf_size,
1223 PCI_DMA_FROMDEVICE);
1224 if (pci_dma_mapping_error(qdev->pdev, map)) {
1225 netif_err(qdev, ifup, qdev->ndev,
1226 "PCI mapping failed.\n");
1227 rx_ring->sbq_clean_idx = clean_idx;
1228 dev_kfree_skb_any(sbq_desc->p.skb);
1229 sbq_desc->p.skb = NULL;
1230 return;
1231 }
1232 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1233 dma_unmap_len_set(sbq_desc, maplen,
1234 rx_ring->sbq_buf_size);
1235 *sbq_desc->addr = cpu_to_le64(map);
1236 }
1237
1238 clean_idx++;
1239 if (clean_idx == rx_ring->sbq_len)
1240 clean_idx = 0;
1241 }
1242 rx_ring->sbq_clean_idx = clean_idx;
1243 rx_ring->sbq_prod_idx += 16;
1244 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1245 rx_ring->sbq_prod_idx = 0;
1246 rx_ring->sbq_free_cnt -= 16;
1247 }
1248
1249 if (start_idx != clean_idx) {
1250 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1251 "sbq: updating prod idx = %d.\n",
1252 rx_ring->sbq_prod_idx);
1253 ql_write_db_reg(rx_ring->sbq_prod_idx,
1254 rx_ring->sbq_prod_idx_db_reg);
1255 }
1256 }
1257
1258 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1259 struct rx_ring *rx_ring)
1260 {
1261 ql_update_sbq(qdev, rx_ring);
1262 ql_update_lbq(qdev, rx_ring);
1263 }
1264
1265 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1266 * fails at some stage, or from the interrupt when a tx completes.
1267 */
1268 static void ql_unmap_send(struct ql_adapter *qdev,
1269 struct tx_ring_desc *tx_ring_desc, int mapped)
1270 {
1271 int i;
1272 for (i = 0; i < mapped; i++) {
1273 if (i == 0 || (i == 7 && mapped > 7)) {
1274 /*
1275 * Unmap the skb->data area, or the
1276 * external sglist (AKA the Outbound
1277 * Address List (OAL)).
1278 * If its the zeroeth element, then it's
1279 * the skb->data area. If it's the 7th
1280 * element and there is more than 6 frags,
1281 * then its an OAL.
1282 */
1283 if (i == 7) {
1284 netif_printk(qdev, tx_done, KERN_DEBUG,
1285 qdev->ndev,
1286 "unmapping OAL area.\n");
1287 }
1288 pci_unmap_single(qdev->pdev,
1289 dma_unmap_addr(&tx_ring_desc->map[i],
1290 mapaddr),
1291 dma_unmap_len(&tx_ring_desc->map[i],
1292 maplen),
1293 PCI_DMA_TODEVICE);
1294 } else {
1295 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1296 "unmapping frag %d.\n", i);
1297 pci_unmap_page(qdev->pdev,
1298 dma_unmap_addr(&tx_ring_desc->map[i],
1299 mapaddr),
1300 dma_unmap_len(&tx_ring_desc->map[i],
1301 maplen), PCI_DMA_TODEVICE);
1302 }
1303 }
1304
1305 }
1306
1307 /* Map the buffers for this transmit. This will return
1308 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1309 */
1310 static int ql_map_send(struct ql_adapter *qdev,
1311 struct ob_mac_iocb_req *mac_iocb_ptr,
1312 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1313 {
1314 int len = skb_headlen(skb);
1315 dma_addr_t map;
1316 int frag_idx, err, map_idx = 0;
1317 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1318 int frag_cnt = skb_shinfo(skb)->nr_frags;
1319
1320 if (frag_cnt) {
1321 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1322 "frag_cnt = %d.\n", frag_cnt);
1323 }
1324 /*
1325 * Map the skb buffer first.
1326 */
1327 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1328
1329 err = pci_dma_mapping_error(qdev->pdev, map);
1330 if (err) {
1331 netif_err(qdev, tx_queued, qdev->ndev,
1332 "PCI mapping failed with error: %d\n", err);
1333
1334 return NETDEV_TX_BUSY;
1335 }
1336
1337 tbd->len = cpu_to_le32(len);
1338 tbd->addr = cpu_to_le64(map);
1339 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1340 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1341 map_idx++;
1342
1343 /*
1344 * This loop fills the remainder of the 8 address descriptors
1345 * in the IOCB. If there are more than 7 fragments, then the
1346 * eighth address desc will point to an external list (OAL).
1347 * When this happens, the remainder of the frags will be stored
1348 * in this list.
1349 */
1350 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1351 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1352 tbd++;
1353 if (frag_idx == 6 && frag_cnt > 7) {
1354 /* Let's tack on an sglist.
1355 * Our control block will now
1356 * look like this:
1357 * iocb->seg[0] = skb->data
1358 * iocb->seg[1] = frag[0]
1359 * iocb->seg[2] = frag[1]
1360 * iocb->seg[3] = frag[2]
1361 * iocb->seg[4] = frag[3]
1362 * iocb->seg[5] = frag[4]
1363 * iocb->seg[6] = frag[5]
1364 * iocb->seg[7] = ptr to OAL (external sglist)
1365 * oal->seg[0] = frag[6]
1366 * oal->seg[1] = frag[7]
1367 * oal->seg[2] = frag[8]
1368 * oal->seg[3] = frag[9]
1369 * oal->seg[4] = frag[10]
1370 * etc...
1371 */
1372 /* Tack on the OAL in the eighth segment of IOCB. */
1373 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1374 sizeof(struct oal),
1375 PCI_DMA_TODEVICE);
1376 err = pci_dma_mapping_error(qdev->pdev, map);
1377 if (err) {
1378 netif_err(qdev, tx_queued, qdev->ndev,
1379 "PCI mapping outbound address list with error: %d\n",
1380 err);
1381 goto map_error;
1382 }
1383
1384 tbd->addr = cpu_to_le64(map);
1385 /*
1386 * The length is the number of fragments
1387 * that remain to be mapped times the length
1388 * of our sglist (OAL).
1389 */
1390 tbd->len =
1391 cpu_to_le32((sizeof(struct tx_buf_desc) *
1392 (frag_cnt - frag_idx)) | TX_DESC_C);
1393 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1394 map);
1395 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1396 sizeof(struct oal));
1397 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1398 map_idx++;
1399 }
1400
1401 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1402 DMA_TO_DEVICE);
1403
1404 err = dma_mapping_error(&qdev->pdev->dev, map);
1405 if (err) {
1406 netif_err(qdev, tx_queued, qdev->ndev,
1407 "PCI mapping frags failed with error: %d.\n",
1408 err);
1409 goto map_error;
1410 }
1411
1412 tbd->addr = cpu_to_le64(map);
1413 tbd->len = cpu_to_le32(skb_frag_size(frag));
1414 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1415 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1416 skb_frag_size(frag));
1417
1418 }
1419 /* Save the number of segments we've mapped. */
1420 tx_ring_desc->map_cnt = map_idx;
1421 /* Terminate the last segment. */
1422 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1423 return NETDEV_TX_OK;
1424
1425 map_error:
1426 /*
1427 * If the first frag mapping failed, then i will be zero.
1428 * This causes the unmap of the skb->data area. Otherwise
1429 * we pass in the number of frags that mapped successfully
1430 * so they can be umapped.
1431 */
1432 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1433 return NETDEV_TX_BUSY;
1434 }
1435
1436 /* Categorizing receive firmware frame errors */
1437 static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err)
1438 {
1439 struct nic_stats *stats = &qdev->nic_stats;
1440
1441 stats->rx_err_count++;
1442
1443 switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1444 case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1445 stats->rx_code_err++;
1446 break;
1447 case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1448 stats->rx_oversize_err++;
1449 break;
1450 case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1451 stats->rx_undersize_err++;
1452 break;
1453 case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1454 stats->rx_preamble_err++;
1455 break;
1456 case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1457 stats->rx_frame_len_err++;
1458 break;
1459 case IB_MAC_IOCB_RSP_ERR_CRC:
1460 stats->rx_crc_err++;
1461 default:
1462 break;
1463 }
1464 }
1465
1466 /* Process an inbound completion from an rx ring. */
1467 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1468 struct rx_ring *rx_ring,
1469 struct ib_mac_iocb_rsp *ib_mac_rsp,
1470 u32 length,
1471 u16 vlan_id)
1472 {
1473 struct sk_buff *skb;
1474 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1475 struct napi_struct *napi = &rx_ring->napi;
1476
1477 napi->dev = qdev->ndev;
1478
1479 skb = napi_get_frags(napi);
1480 if (!skb) {
1481 netif_err(qdev, drv, qdev->ndev,
1482 "Couldn't get an skb, exiting.\n");
1483 rx_ring->rx_dropped++;
1484 put_page(lbq_desc->p.pg_chunk.page);
1485 return;
1486 }
1487 prefetch(lbq_desc->p.pg_chunk.va);
1488 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1489 lbq_desc->p.pg_chunk.page,
1490 lbq_desc->p.pg_chunk.offset,
1491 length);
1492
1493 skb->len += length;
1494 skb->data_len += length;
1495 skb->truesize += length;
1496 skb_shinfo(skb)->nr_frags++;
1497
1498 rx_ring->rx_packets++;
1499 rx_ring->rx_bytes += length;
1500 skb->ip_summed = CHECKSUM_UNNECESSARY;
1501 skb_record_rx_queue(skb, rx_ring->cq_id);
1502 if (vlan_id != 0xffff)
1503 __vlan_hwaccel_put_tag(skb, vlan_id);
1504 napi_gro_frags(napi);
1505 }
1506
1507 /* Process an inbound completion from an rx ring. */
1508 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1509 struct rx_ring *rx_ring,
1510 struct ib_mac_iocb_rsp *ib_mac_rsp,
1511 u32 length,
1512 u16 vlan_id)
1513 {
1514 struct net_device *ndev = qdev->ndev;
1515 struct sk_buff *skb = NULL;
1516 void *addr;
1517 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1518 struct napi_struct *napi = &rx_ring->napi;
1519
1520 skb = netdev_alloc_skb(ndev, length);
1521 if (!skb) {
1522 netif_err(qdev, drv, qdev->ndev,
1523 "Couldn't get an skb, need to unwind!.\n");
1524 rx_ring->rx_dropped++;
1525 put_page(lbq_desc->p.pg_chunk.page);
1526 return;
1527 }
1528
1529 addr = lbq_desc->p.pg_chunk.va;
1530 prefetch(addr);
1531
1532 /* The max framesize filter on this chip is set higher than
1533 * MTU since FCoE uses 2k frames.
1534 */
1535 if (skb->len > ndev->mtu + ETH_HLEN) {
1536 netif_err(qdev, drv, qdev->ndev,
1537 "Segment too small, dropping.\n");
1538 rx_ring->rx_dropped++;
1539 goto err_out;
1540 }
1541 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1542 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1543 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1544 length);
1545 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1546 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1547 length-ETH_HLEN);
1548 skb->len += length-ETH_HLEN;
1549 skb->data_len += length-ETH_HLEN;
1550 skb->truesize += length-ETH_HLEN;
1551
1552 rx_ring->rx_packets++;
1553 rx_ring->rx_bytes += skb->len;
1554 skb->protocol = eth_type_trans(skb, ndev);
1555 skb_checksum_none_assert(skb);
1556
1557 if ((ndev->features & NETIF_F_RXCSUM) &&
1558 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1559 /* TCP frame. */
1560 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1561 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1562 "TCP checksum done!\n");
1563 skb->ip_summed = CHECKSUM_UNNECESSARY;
1564 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1565 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1566 /* Unfragmented ipv4 UDP frame. */
1567 struct iphdr *iph =
1568 (struct iphdr *) ((u8 *)addr + ETH_HLEN);
1569 if (!(iph->frag_off &
1570 htons(IP_MF|IP_OFFSET))) {
1571 skb->ip_summed = CHECKSUM_UNNECESSARY;
1572 netif_printk(qdev, rx_status, KERN_DEBUG,
1573 qdev->ndev,
1574 "UDP checksum done!\n");
1575 }
1576 }
1577 }
1578
1579 skb_record_rx_queue(skb, rx_ring->cq_id);
1580 if (vlan_id != 0xffff)
1581 __vlan_hwaccel_put_tag(skb, vlan_id);
1582 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1583 napi_gro_receive(napi, skb);
1584 else
1585 netif_receive_skb(skb);
1586 return;
1587 err_out:
1588 dev_kfree_skb_any(skb);
1589 put_page(lbq_desc->p.pg_chunk.page);
1590 }
1591
1592 /* Process an inbound completion from an rx ring. */
1593 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1594 struct rx_ring *rx_ring,
1595 struct ib_mac_iocb_rsp *ib_mac_rsp,
1596 u32 length,
1597 u16 vlan_id)
1598 {
1599 struct net_device *ndev = qdev->ndev;
1600 struct sk_buff *skb = NULL;
1601 struct sk_buff *new_skb = NULL;
1602 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1603
1604 skb = sbq_desc->p.skb;
1605 /* Allocate new_skb and copy */
1606 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1607 if (new_skb == NULL) {
1608 netif_err(qdev, probe, qdev->ndev,
1609 "No skb available, drop the packet.\n");
1610 rx_ring->rx_dropped++;
1611 return;
1612 }
1613 skb_reserve(new_skb, NET_IP_ALIGN);
1614 memcpy(skb_put(new_skb, length), skb->data, length);
1615 skb = new_skb;
1616
1617 /* loopback self test for ethtool */
1618 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1619 ql_check_lb_frame(qdev, skb);
1620 dev_kfree_skb_any(skb);
1621 return;
1622 }
1623
1624 /* The max framesize filter on this chip is set higher than
1625 * MTU since FCoE uses 2k frames.
1626 */
1627 if (skb->len > ndev->mtu + ETH_HLEN) {
1628 dev_kfree_skb_any(skb);
1629 rx_ring->rx_dropped++;
1630 return;
1631 }
1632
1633 prefetch(skb->data);
1634 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1635 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1636 "%s Multicast.\n",
1637 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1638 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1639 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1640 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1641 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1642 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1643 }
1644 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1645 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1646 "Promiscuous Packet.\n");
1647
1648 rx_ring->rx_packets++;
1649 rx_ring->rx_bytes += skb->len;
1650 skb->protocol = eth_type_trans(skb, ndev);
1651 skb_checksum_none_assert(skb);
1652
1653 /* If rx checksum is on, and there are no
1654 * csum or frame errors.
1655 */
1656 if ((ndev->features & NETIF_F_RXCSUM) &&
1657 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1658 /* TCP frame. */
1659 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1660 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1661 "TCP checksum done!\n");
1662 skb->ip_summed = CHECKSUM_UNNECESSARY;
1663 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1664 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1665 /* Unfragmented ipv4 UDP frame. */
1666 struct iphdr *iph = (struct iphdr *) skb->data;
1667 if (!(iph->frag_off &
1668 htons(IP_MF|IP_OFFSET))) {
1669 skb->ip_summed = CHECKSUM_UNNECESSARY;
1670 netif_printk(qdev, rx_status, KERN_DEBUG,
1671 qdev->ndev,
1672 "UDP checksum done!\n");
1673 }
1674 }
1675 }
1676
1677 skb_record_rx_queue(skb, rx_ring->cq_id);
1678 if (vlan_id != 0xffff)
1679 __vlan_hwaccel_put_tag(skb, vlan_id);
1680 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1681 napi_gro_receive(&rx_ring->napi, skb);
1682 else
1683 netif_receive_skb(skb);
1684 }
1685
1686 static void ql_realign_skb(struct sk_buff *skb, int len)
1687 {
1688 void *temp_addr = skb->data;
1689
1690 /* Undo the skb_reserve(skb,32) we did before
1691 * giving to hardware, and realign data on
1692 * a 2-byte boundary.
1693 */
1694 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1695 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1696 skb_copy_to_linear_data(skb, temp_addr,
1697 (unsigned int)len);
1698 }
1699
1700 /*
1701 * This function builds an skb for the given inbound
1702 * completion. It will be rewritten for readability in the near
1703 * future, but for not it works well.
1704 */
1705 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1706 struct rx_ring *rx_ring,
1707 struct ib_mac_iocb_rsp *ib_mac_rsp)
1708 {
1709 struct bq_desc *lbq_desc;
1710 struct bq_desc *sbq_desc;
1711 struct sk_buff *skb = NULL;
1712 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1713 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1714
1715 /*
1716 * Handle the header buffer if present.
1717 */
1718 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1719 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1720 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1721 "Header of %d bytes in small buffer.\n", hdr_len);
1722 /*
1723 * Headers fit nicely into a small buffer.
1724 */
1725 sbq_desc = ql_get_curr_sbuf(rx_ring);
1726 pci_unmap_single(qdev->pdev,
1727 dma_unmap_addr(sbq_desc, mapaddr),
1728 dma_unmap_len(sbq_desc, maplen),
1729 PCI_DMA_FROMDEVICE);
1730 skb = sbq_desc->p.skb;
1731 ql_realign_skb(skb, hdr_len);
1732 skb_put(skb, hdr_len);
1733 sbq_desc->p.skb = NULL;
1734 }
1735
1736 /*
1737 * Handle the data buffer(s).
1738 */
1739 if (unlikely(!length)) { /* Is there data too? */
1740 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1741 "No Data buffer in this packet.\n");
1742 return skb;
1743 }
1744
1745 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1746 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1747 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1748 "Headers in small, data of %d bytes in small, combine them.\n",
1749 length);
1750 /*
1751 * Data is less than small buffer size so it's
1752 * stuffed in a small buffer.
1753 * For this case we append the data
1754 * from the "data" small buffer to the "header" small
1755 * buffer.
1756 */
1757 sbq_desc = ql_get_curr_sbuf(rx_ring);
1758 pci_dma_sync_single_for_cpu(qdev->pdev,
1759 dma_unmap_addr
1760 (sbq_desc, mapaddr),
1761 dma_unmap_len
1762 (sbq_desc, maplen),
1763 PCI_DMA_FROMDEVICE);
1764 memcpy(skb_put(skb, length),
1765 sbq_desc->p.skb->data, length);
1766 pci_dma_sync_single_for_device(qdev->pdev,
1767 dma_unmap_addr
1768 (sbq_desc,
1769 mapaddr),
1770 dma_unmap_len
1771 (sbq_desc,
1772 maplen),
1773 PCI_DMA_FROMDEVICE);
1774 } else {
1775 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1776 "%d bytes in a single small buffer.\n",
1777 length);
1778 sbq_desc = ql_get_curr_sbuf(rx_ring);
1779 skb = sbq_desc->p.skb;
1780 ql_realign_skb(skb, length);
1781 skb_put(skb, length);
1782 pci_unmap_single(qdev->pdev,
1783 dma_unmap_addr(sbq_desc,
1784 mapaddr),
1785 dma_unmap_len(sbq_desc,
1786 maplen),
1787 PCI_DMA_FROMDEVICE);
1788 sbq_desc->p.skb = NULL;
1789 }
1790 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1791 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1792 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1793 "Header in small, %d bytes in large. Chain large to small!\n",
1794 length);
1795 /*
1796 * The data is in a single large buffer. We
1797 * chain it to the header buffer's skb and let
1798 * it rip.
1799 */
1800 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1801 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1802 "Chaining page at offset = %d, for %d bytes to skb.\n",
1803 lbq_desc->p.pg_chunk.offset, length);
1804 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1805 lbq_desc->p.pg_chunk.offset,
1806 length);
1807 skb->len += length;
1808 skb->data_len += length;
1809 skb->truesize += length;
1810 } else {
1811 /*
1812 * The headers and data are in a single large buffer. We
1813 * copy it to a new skb and let it go. This can happen with
1814 * jumbo mtu on a non-TCP/UDP frame.
1815 */
1816 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1817 skb = netdev_alloc_skb(qdev->ndev, length);
1818 if (skb == NULL) {
1819 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1820 "No skb available, drop the packet.\n");
1821 return NULL;
1822 }
1823 pci_unmap_page(qdev->pdev,
1824 dma_unmap_addr(lbq_desc,
1825 mapaddr),
1826 dma_unmap_len(lbq_desc, maplen),
1827 PCI_DMA_FROMDEVICE);
1828 skb_reserve(skb, NET_IP_ALIGN);
1829 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1830 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1831 length);
1832 skb_fill_page_desc(skb, 0,
1833 lbq_desc->p.pg_chunk.page,
1834 lbq_desc->p.pg_chunk.offset,
1835 length);
1836 skb->len += length;
1837 skb->data_len += length;
1838 skb->truesize += length;
1839 length -= length;
1840 __pskb_pull_tail(skb,
1841 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1842 VLAN_ETH_HLEN : ETH_HLEN);
1843 }
1844 } else {
1845 /*
1846 * The data is in a chain of large buffers
1847 * pointed to by a small buffer. We loop
1848 * thru and chain them to the our small header
1849 * buffer's skb.
1850 * frags: There are 18 max frags and our small
1851 * buffer will hold 32 of them. The thing is,
1852 * we'll use 3 max for our 9000 byte jumbo
1853 * frames. If the MTU goes up we could
1854 * eventually be in trouble.
1855 */
1856 int size, i = 0;
1857 sbq_desc = ql_get_curr_sbuf(rx_ring);
1858 pci_unmap_single(qdev->pdev,
1859 dma_unmap_addr(sbq_desc, mapaddr),
1860 dma_unmap_len(sbq_desc, maplen),
1861 PCI_DMA_FROMDEVICE);
1862 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1863 /*
1864 * This is an non TCP/UDP IP frame, so
1865 * the headers aren't split into a small
1866 * buffer. We have to use the small buffer
1867 * that contains our sg list as our skb to
1868 * send upstairs. Copy the sg list here to
1869 * a local buffer and use it to find the
1870 * pages to chain.
1871 */
1872 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1873 "%d bytes of headers & data in chain of large.\n",
1874 length);
1875 skb = sbq_desc->p.skb;
1876 sbq_desc->p.skb = NULL;
1877 skb_reserve(skb, NET_IP_ALIGN);
1878 }
1879 while (length > 0) {
1880 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1881 size = (length < rx_ring->lbq_buf_size) ? length :
1882 rx_ring->lbq_buf_size;
1883
1884 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1885 "Adding page %d to skb for %d bytes.\n",
1886 i, size);
1887 skb_fill_page_desc(skb, i,
1888 lbq_desc->p.pg_chunk.page,
1889 lbq_desc->p.pg_chunk.offset,
1890 size);
1891 skb->len += size;
1892 skb->data_len += size;
1893 skb->truesize += size;
1894 length -= size;
1895 i++;
1896 }
1897 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1898 VLAN_ETH_HLEN : ETH_HLEN);
1899 }
1900 return skb;
1901 }
1902
1903 /* Process an inbound completion from an rx ring. */
1904 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1905 struct rx_ring *rx_ring,
1906 struct ib_mac_iocb_rsp *ib_mac_rsp,
1907 u16 vlan_id)
1908 {
1909 struct net_device *ndev = qdev->ndev;
1910 struct sk_buff *skb = NULL;
1911
1912 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1913
1914 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1915 if (unlikely(!skb)) {
1916 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1917 "No skb available, drop packet.\n");
1918 rx_ring->rx_dropped++;
1919 return;
1920 }
1921
1922 /* The max framesize filter on this chip is set higher than
1923 * MTU since FCoE uses 2k frames.
1924 */
1925 if (skb->len > ndev->mtu + ETH_HLEN) {
1926 dev_kfree_skb_any(skb);
1927 rx_ring->rx_dropped++;
1928 return;
1929 }
1930
1931 /* loopback self test for ethtool */
1932 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1933 ql_check_lb_frame(qdev, skb);
1934 dev_kfree_skb_any(skb);
1935 return;
1936 }
1937
1938 prefetch(skb->data);
1939 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1940 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1941 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1942 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1943 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1944 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1945 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1946 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1947 rx_ring->rx_multicast++;
1948 }
1949 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1950 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1951 "Promiscuous Packet.\n");
1952 }
1953
1954 skb->protocol = eth_type_trans(skb, ndev);
1955 skb_checksum_none_assert(skb);
1956
1957 /* If rx checksum is on, and there are no
1958 * csum or frame errors.
1959 */
1960 if ((ndev->features & NETIF_F_RXCSUM) &&
1961 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1962 /* TCP frame. */
1963 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1964 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1965 "TCP checksum done!\n");
1966 skb->ip_summed = CHECKSUM_UNNECESSARY;
1967 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1968 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1969 /* Unfragmented ipv4 UDP frame. */
1970 struct iphdr *iph = (struct iphdr *) skb->data;
1971 if (!(iph->frag_off &
1972 htons(IP_MF|IP_OFFSET))) {
1973 skb->ip_summed = CHECKSUM_UNNECESSARY;
1974 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1975 "TCP checksum done!\n");
1976 }
1977 }
1978 }
1979
1980 rx_ring->rx_packets++;
1981 rx_ring->rx_bytes += skb->len;
1982 skb_record_rx_queue(skb, rx_ring->cq_id);
1983 if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
1984 __vlan_hwaccel_put_tag(skb, vlan_id);
1985 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1986 napi_gro_receive(&rx_ring->napi, skb);
1987 else
1988 netif_receive_skb(skb);
1989 }
1990
1991 /* Process an inbound completion from an rx ring. */
1992 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
1993 struct rx_ring *rx_ring,
1994 struct ib_mac_iocb_rsp *ib_mac_rsp)
1995 {
1996 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1997 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1998 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
1999 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2000
2001 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2002
2003 /* Frame error, so drop the packet. */
2004 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
2005 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2);
2006 return (unsigned long)length;
2007 }
2008
2009 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2010 /* The data and headers are split into
2011 * separate buffers.
2012 */
2013 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2014 vlan_id);
2015 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2016 /* The data fit in a single small buffer.
2017 * Allocate a new skb, copy the data and
2018 * return the buffer to the free pool.
2019 */
2020 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2021 length, vlan_id);
2022 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2023 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2024 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2025 /* TCP packet in a page chunk that's been checksummed.
2026 * Tack it on to our GRO skb and let it go.
2027 */
2028 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2029 length, vlan_id);
2030 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2031 /* Non-TCP packet in a page chunk. Allocate an
2032 * skb, tack it on frags, and send it up.
2033 */
2034 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2035 length, vlan_id);
2036 } else {
2037 /* Non-TCP/UDP large frames that span multiple buffers
2038 * can be processed corrrectly by the split frame logic.
2039 */
2040 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2041 vlan_id);
2042 }
2043
2044 return (unsigned long)length;
2045 }
2046
2047 /* Process an outbound completion from an rx ring. */
2048 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2049 struct ob_mac_iocb_rsp *mac_rsp)
2050 {
2051 struct tx_ring *tx_ring;
2052 struct tx_ring_desc *tx_ring_desc;
2053
2054 QL_DUMP_OB_MAC_RSP(mac_rsp);
2055 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2056 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2057 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2058 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2059 tx_ring->tx_packets++;
2060 dev_kfree_skb(tx_ring_desc->skb);
2061 tx_ring_desc->skb = NULL;
2062
2063 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2064 OB_MAC_IOCB_RSP_S |
2065 OB_MAC_IOCB_RSP_L |
2066 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2067 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2068 netif_warn(qdev, tx_done, qdev->ndev,
2069 "Total descriptor length did not match transfer length.\n");
2070 }
2071 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2072 netif_warn(qdev, tx_done, qdev->ndev,
2073 "Frame too short to be valid, not sent.\n");
2074 }
2075 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2076 netif_warn(qdev, tx_done, qdev->ndev,
2077 "Frame too long, but sent anyway.\n");
2078 }
2079 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2080 netif_warn(qdev, tx_done, qdev->ndev,
2081 "PCI backplane error. Frame not sent.\n");
2082 }
2083 }
2084 atomic_inc(&tx_ring->tx_count);
2085 }
2086
2087 /* Fire up a handler to reset the MPI processor. */
2088 void ql_queue_fw_error(struct ql_adapter *qdev)
2089 {
2090 ql_link_off(qdev);
2091 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2092 }
2093
2094 void ql_queue_asic_error(struct ql_adapter *qdev)
2095 {
2096 ql_link_off(qdev);
2097 ql_disable_interrupts(qdev);
2098 /* Clear adapter up bit to signal the recovery
2099 * process that it shouldn't kill the reset worker
2100 * thread
2101 */
2102 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2103 /* Set asic recovery bit to indicate reset process that we are
2104 * in fatal error recovery process rather than normal close
2105 */
2106 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2107 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2108 }
2109
2110 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2111 struct ib_ae_iocb_rsp *ib_ae_rsp)
2112 {
2113 switch (ib_ae_rsp->event) {
2114 case MGMT_ERR_EVENT:
2115 netif_err(qdev, rx_err, qdev->ndev,
2116 "Management Processor Fatal Error.\n");
2117 ql_queue_fw_error(qdev);
2118 return;
2119
2120 case CAM_LOOKUP_ERR_EVENT:
2121 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2122 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2123 ql_queue_asic_error(qdev);
2124 return;
2125
2126 case SOFT_ECC_ERROR_EVENT:
2127 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2128 ql_queue_asic_error(qdev);
2129 break;
2130
2131 case PCI_ERR_ANON_BUF_RD:
2132 netdev_err(qdev->ndev, "PCI error occurred when reading "
2133 "anonymous buffers from rx_ring %d.\n",
2134 ib_ae_rsp->q_id);
2135 ql_queue_asic_error(qdev);
2136 break;
2137
2138 default:
2139 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2140 ib_ae_rsp->event);
2141 ql_queue_asic_error(qdev);
2142 break;
2143 }
2144 }
2145
2146 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2147 {
2148 struct ql_adapter *qdev = rx_ring->qdev;
2149 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2150 struct ob_mac_iocb_rsp *net_rsp = NULL;
2151 int count = 0;
2152
2153 struct tx_ring *tx_ring;
2154 /* While there are entries in the completion queue. */
2155 while (prod != rx_ring->cnsmr_idx) {
2156
2157 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2158 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2159 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2160
2161 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2162 rmb();
2163 switch (net_rsp->opcode) {
2164
2165 case OPCODE_OB_MAC_TSO_IOCB:
2166 case OPCODE_OB_MAC_IOCB:
2167 ql_process_mac_tx_intr(qdev, net_rsp);
2168 break;
2169 default:
2170 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2171 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2172 net_rsp->opcode);
2173 }
2174 count++;
2175 ql_update_cq(rx_ring);
2176 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2177 }
2178 if (!net_rsp)
2179 return 0;
2180 ql_write_cq_idx(rx_ring);
2181 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2182 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2183 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2184 /*
2185 * The queue got stopped because the tx_ring was full.
2186 * Wake it up, because it's now at least 25% empty.
2187 */
2188 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2189 }
2190
2191 return count;
2192 }
2193
2194 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2195 {
2196 struct ql_adapter *qdev = rx_ring->qdev;
2197 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2198 struct ql_net_rsp_iocb *net_rsp;
2199 int count = 0;
2200
2201 /* While there are entries in the completion queue. */
2202 while (prod != rx_ring->cnsmr_idx) {
2203
2204 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2205 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2206 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2207
2208 net_rsp = rx_ring->curr_entry;
2209 rmb();
2210 switch (net_rsp->opcode) {
2211 case OPCODE_IB_MAC_IOCB:
2212 ql_process_mac_rx_intr(qdev, rx_ring,
2213 (struct ib_mac_iocb_rsp *)
2214 net_rsp);
2215 break;
2216
2217 case OPCODE_IB_AE_IOCB:
2218 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2219 net_rsp);
2220 break;
2221 default:
2222 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2223 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2224 net_rsp->opcode);
2225 break;
2226 }
2227 count++;
2228 ql_update_cq(rx_ring);
2229 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2230 if (count == budget)
2231 break;
2232 }
2233 ql_update_buffer_queues(qdev, rx_ring);
2234 ql_write_cq_idx(rx_ring);
2235 return count;
2236 }
2237
2238 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2239 {
2240 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2241 struct ql_adapter *qdev = rx_ring->qdev;
2242 struct rx_ring *trx_ring;
2243 int i, work_done = 0;
2244 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2245
2246 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2247 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2248
2249 /* Service the TX rings first. They start
2250 * right after the RSS rings. */
2251 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2252 trx_ring = &qdev->rx_ring[i];
2253 /* If this TX completion ring belongs to this vector and
2254 * it's not empty then service it.
2255 */
2256 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2257 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2258 trx_ring->cnsmr_idx)) {
2259 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2260 "%s: Servicing TX completion ring %d.\n",
2261 __func__, trx_ring->cq_id);
2262 ql_clean_outbound_rx_ring(trx_ring);
2263 }
2264 }
2265
2266 /*
2267 * Now service the RSS ring if it's active.
2268 */
2269 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2270 rx_ring->cnsmr_idx) {
2271 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2272 "%s: Servicing RX completion ring %d.\n",
2273 __func__, rx_ring->cq_id);
2274 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2275 }
2276
2277 if (work_done < budget) {
2278 napi_complete(napi);
2279 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2280 }
2281 return work_done;
2282 }
2283
2284 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2285 {
2286 struct ql_adapter *qdev = netdev_priv(ndev);
2287
2288 if (features & NETIF_F_HW_VLAN_RX) {
2289 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2290 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2291 } else {
2292 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2293 }
2294 }
2295
2296 static netdev_features_t qlge_fix_features(struct net_device *ndev,
2297 netdev_features_t features)
2298 {
2299 /*
2300 * Since there is no support for separate rx/tx vlan accel
2301 * enable/disable make sure tx flag is always in same state as rx.
2302 */
2303 if (features & NETIF_F_HW_VLAN_RX)
2304 features |= NETIF_F_HW_VLAN_TX;
2305 else
2306 features &= ~NETIF_F_HW_VLAN_TX;
2307
2308 return features;
2309 }
2310
2311 static int qlge_set_features(struct net_device *ndev,
2312 netdev_features_t features)
2313 {
2314 netdev_features_t changed = ndev->features ^ features;
2315
2316 if (changed & NETIF_F_HW_VLAN_RX)
2317 qlge_vlan_mode(ndev, features);
2318
2319 return 0;
2320 }
2321
2322 static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2323 {
2324 u32 enable_bit = MAC_ADDR_E;
2325 int err;
2326
2327 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2328 MAC_ADDR_TYPE_VLAN, vid);
2329 if (err)
2330 netif_err(qdev, ifup, qdev->ndev,
2331 "Failed to init vlan address.\n");
2332 return err;
2333 }
2334
2335 static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2336 {
2337 struct ql_adapter *qdev = netdev_priv(ndev);
2338 int status;
2339 int err;
2340
2341 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2342 if (status)
2343 return status;
2344
2345 err = __qlge_vlan_rx_add_vid(qdev, vid);
2346 set_bit(vid, qdev->active_vlans);
2347
2348 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2349
2350 return err;
2351 }
2352
2353 static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2354 {
2355 u32 enable_bit = 0;
2356 int err;
2357
2358 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2359 MAC_ADDR_TYPE_VLAN, vid);
2360 if (err)
2361 netif_err(qdev, ifup, qdev->ndev,
2362 "Failed to clear vlan address.\n");
2363 return err;
2364 }
2365
2366 static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2367 {
2368 struct ql_adapter *qdev = netdev_priv(ndev);
2369 int status;
2370 int err;
2371
2372 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2373 if (status)
2374 return status;
2375
2376 err = __qlge_vlan_rx_kill_vid(qdev, vid);
2377 clear_bit(vid, qdev->active_vlans);
2378
2379 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2380
2381 return err;
2382 }
2383
2384 static void qlge_restore_vlan(struct ql_adapter *qdev)
2385 {
2386 int status;
2387 u16 vid;
2388
2389 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2390 if (status)
2391 return;
2392
2393 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2394 __qlge_vlan_rx_add_vid(qdev, vid);
2395
2396 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2397 }
2398
2399 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2400 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2401 {
2402 struct rx_ring *rx_ring = dev_id;
2403 napi_schedule(&rx_ring->napi);
2404 return IRQ_HANDLED;
2405 }
2406
2407 /* This handles a fatal error, MPI activity, and the default
2408 * rx_ring in an MSI-X multiple vector environment.
2409 * In MSI/Legacy environment it also process the rest of
2410 * the rx_rings.
2411 */
2412 static irqreturn_t qlge_isr(int irq, void *dev_id)
2413 {
2414 struct rx_ring *rx_ring = dev_id;
2415 struct ql_adapter *qdev = rx_ring->qdev;
2416 struct intr_context *intr_context = &qdev->intr_context[0];
2417 u32 var;
2418 int work_done = 0;
2419
2420 spin_lock(&qdev->hw_lock);
2421 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2422 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2423 "Shared Interrupt, Not ours!\n");
2424 spin_unlock(&qdev->hw_lock);
2425 return IRQ_NONE;
2426 }
2427 spin_unlock(&qdev->hw_lock);
2428
2429 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2430
2431 /*
2432 * Check for fatal error.
2433 */
2434 if (var & STS_FE) {
2435 ql_queue_asic_error(qdev);
2436 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2437 var = ql_read32(qdev, ERR_STS);
2438 netdev_err(qdev->ndev, "Resetting chip. "
2439 "Error Status Register = 0x%x\n", var);
2440 return IRQ_HANDLED;
2441 }
2442
2443 /*
2444 * Check MPI processor activity.
2445 */
2446 if ((var & STS_PI) &&
2447 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2448 /*
2449 * We've got an async event or mailbox completion.
2450 * Handle it and clear the source of the interrupt.
2451 */
2452 netif_err(qdev, intr, qdev->ndev,
2453 "Got MPI processor interrupt.\n");
2454 ql_disable_completion_interrupt(qdev, intr_context->intr);
2455 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2456 queue_delayed_work_on(smp_processor_id(),
2457 qdev->workqueue, &qdev->mpi_work, 0);
2458 work_done++;
2459 }
2460
2461 /*
2462 * Get the bit-mask that shows the active queues for this
2463 * pass. Compare it to the queues that this irq services
2464 * and call napi if there's a match.
2465 */
2466 var = ql_read32(qdev, ISR1);
2467 if (var & intr_context->irq_mask) {
2468 netif_info(qdev, intr, qdev->ndev,
2469 "Waking handler for rx_ring[0].\n");
2470 ql_disable_completion_interrupt(qdev, intr_context->intr);
2471 napi_schedule(&rx_ring->napi);
2472 work_done++;
2473 }
2474 ql_enable_completion_interrupt(qdev, intr_context->intr);
2475 return work_done ? IRQ_HANDLED : IRQ_NONE;
2476 }
2477
2478 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2479 {
2480
2481 if (skb_is_gso(skb)) {
2482 int err;
2483 if (skb_header_cloned(skb)) {
2484 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2485 if (err)
2486 return err;
2487 }
2488
2489 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2490 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2491 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2492 mac_iocb_ptr->total_hdrs_len =
2493 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2494 mac_iocb_ptr->net_trans_offset =
2495 cpu_to_le16(skb_network_offset(skb) |
2496 skb_transport_offset(skb)
2497 << OB_MAC_TRANSPORT_HDR_SHIFT);
2498 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2499 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2500 if (likely(skb->protocol == htons(ETH_P_IP))) {
2501 struct iphdr *iph = ip_hdr(skb);
2502 iph->check = 0;
2503 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2504 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2505 iph->daddr, 0,
2506 IPPROTO_TCP,
2507 0);
2508 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2509 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2510 tcp_hdr(skb)->check =
2511 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2512 &ipv6_hdr(skb)->daddr,
2513 0, IPPROTO_TCP, 0);
2514 }
2515 return 1;
2516 }
2517 return 0;
2518 }
2519
2520 static void ql_hw_csum_setup(struct sk_buff *skb,
2521 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2522 {
2523 int len;
2524 struct iphdr *iph = ip_hdr(skb);
2525 __sum16 *check;
2526 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2527 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2528 mac_iocb_ptr->net_trans_offset =
2529 cpu_to_le16(skb_network_offset(skb) |
2530 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2531
2532 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2533 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2534 if (likely(iph->protocol == IPPROTO_TCP)) {
2535 check = &(tcp_hdr(skb)->check);
2536 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2537 mac_iocb_ptr->total_hdrs_len =
2538 cpu_to_le16(skb_transport_offset(skb) +
2539 (tcp_hdr(skb)->doff << 2));
2540 } else {
2541 check = &(udp_hdr(skb)->check);
2542 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2543 mac_iocb_ptr->total_hdrs_len =
2544 cpu_to_le16(skb_transport_offset(skb) +
2545 sizeof(struct udphdr));
2546 }
2547 *check = ~csum_tcpudp_magic(iph->saddr,
2548 iph->daddr, len, iph->protocol, 0);
2549 }
2550
2551 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2552 {
2553 struct tx_ring_desc *tx_ring_desc;
2554 struct ob_mac_iocb_req *mac_iocb_ptr;
2555 struct ql_adapter *qdev = netdev_priv(ndev);
2556 int tso;
2557 struct tx_ring *tx_ring;
2558 u32 tx_ring_idx = (u32) skb->queue_mapping;
2559
2560 tx_ring = &qdev->tx_ring[tx_ring_idx];
2561
2562 if (skb_padto(skb, ETH_ZLEN))
2563 return NETDEV_TX_OK;
2564
2565 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2566 netif_info(qdev, tx_queued, qdev->ndev,
2567 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2568 __func__, tx_ring_idx);
2569 netif_stop_subqueue(ndev, tx_ring->wq_id);
2570 tx_ring->tx_errors++;
2571 return NETDEV_TX_BUSY;
2572 }
2573 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2574 mac_iocb_ptr = tx_ring_desc->queue_entry;
2575 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2576
2577 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2578 mac_iocb_ptr->tid = tx_ring_desc->index;
2579 /* We use the upper 32-bits to store the tx queue for this IO.
2580 * When we get the completion we can use it to establish the context.
2581 */
2582 mac_iocb_ptr->txq_idx = tx_ring_idx;
2583 tx_ring_desc->skb = skb;
2584
2585 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2586
2587 if (vlan_tx_tag_present(skb)) {
2588 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2589 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2590 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2591 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2592 }
2593 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2594 if (tso < 0) {
2595 dev_kfree_skb_any(skb);
2596 return NETDEV_TX_OK;
2597 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2598 ql_hw_csum_setup(skb,
2599 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2600 }
2601 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2602 NETDEV_TX_OK) {
2603 netif_err(qdev, tx_queued, qdev->ndev,
2604 "Could not map the segments.\n");
2605 tx_ring->tx_errors++;
2606 return NETDEV_TX_BUSY;
2607 }
2608 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2609 tx_ring->prod_idx++;
2610 if (tx_ring->prod_idx == tx_ring->wq_len)
2611 tx_ring->prod_idx = 0;
2612 wmb();
2613
2614 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2615 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2616 "tx queued, slot %d, len %d\n",
2617 tx_ring->prod_idx, skb->len);
2618
2619 atomic_dec(&tx_ring->tx_count);
2620
2621 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2622 netif_stop_subqueue(ndev, tx_ring->wq_id);
2623 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2624 /*
2625 * The queue got stopped because the tx_ring was full.
2626 * Wake it up, because it's now at least 25% empty.
2627 */
2628 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2629 }
2630 return NETDEV_TX_OK;
2631 }
2632
2633
2634 static void ql_free_shadow_space(struct ql_adapter *qdev)
2635 {
2636 if (qdev->rx_ring_shadow_reg_area) {
2637 pci_free_consistent(qdev->pdev,
2638 PAGE_SIZE,
2639 qdev->rx_ring_shadow_reg_area,
2640 qdev->rx_ring_shadow_reg_dma);
2641 qdev->rx_ring_shadow_reg_area = NULL;
2642 }
2643 if (qdev->tx_ring_shadow_reg_area) {
2644 pci_free_consistent(qdev->pdev,
2645 PAGE_SIZE,
2646 qdev->tx_ring_shadow_reg_area,
2647 qdev->tx_ring_shadow_reg_dma);
2648 qdev->tx_ring_shadow_reg_area = NULL;
2649 }
2650 }
2651
2652 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2653 {
2654 qdev->rx_ring_shadow_reg_area =
2655 pci_alloc_consistent(qdev->pdev,
2656 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2657 if (qdev->rx_ring_shadow_reg_area == NULL) {
2658 netif_err(qdev, ifup, qdev->ndev,
2659 "Allocation of RX shadow space failed.\n");
2660 return -ENOMEM;
2661 }
2662 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2663 qdev->tx_ring_shadow_reg_area =
2664 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2665 &qdev->tx_ring_shadow_reg_dma);
2666 if (qdev->tx_ring_shadow_reg_area == NULL) {
2667 netif_err(qdev, ifup, qdev->ndev,
2668 "Allocation of TX shadow space failed.\n");
2669 goto err_wqp_sh_area;
2670 }
2671 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2672 return 0;
2673
2674 err_wqp_sh_area:
2675 pci_free_consistent(qdev->pdev,
2676 PAGE_SIZE,
2677 qdev->rx_ring_shadow_reg_area,
2678 qdev->rx_ring_shadow_reg_dma);
2679 return -ENOMEM;
2680 }
2681
2682 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2683 {
2684 struct tx_ring_desc *tx_ring_desc;
2685 int i;
2686 struct ob_mac_iocb_req *mac_iocb_ptr;
2687
2688 mac_iocb_ptr = tx_ring->wq_base;
2689 tx_ring_desc = tx_ring->q;
2690 for (i = 0; i < tx_ring->wq_len; i++) {
2691 tx_ring_desc->index = i;
2692 tx_ring_desc->skb = NULL;
2693 tx_ring_desc->queue_entry = mac_iocb_ptr;
2694 mac_iocb_ptr++;
2695 tx_ring_desc++;
2696 }
2697 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2698 }
2699
2700 static void ql_free_tx_resources(struct ql_adapter *qdev,
2701 struct tx_ring *tx_ring)
2702 {
2703 if (tx_ring->wq_base) {
2704 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2705 tx_ring->wq_base, tx_ring->wq_base_dma);
2706 tx_ring->wq_base = NULL;
2707 }
2708 kfree(tx_ring->q);
2709 tx_ring->q = NULL;
2710 }
2711
2712 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2713 struct tx_ring *tx_ring)
2714 {
2715 tx_ring->wq_base =
2716 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2717 &tx_ring->wq_base_dma);
2718
2719 if ((tx_ring->wq_base == NULL) ||
2720 tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2721 goto pci_alloc_err;
2722
2723 tx_ring->q =
2724 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2725 if (tx_ring->q == NULL)
2726 goto err;
2727
2728 return 0;
2729 err:
2730 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2731 tx_ring->wq_base, tx_ring->wq_base_dma);
2732 tx_ring->wq_base = NULL;
2733 pci_alloc_err:
2734 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2735 return -ENOMEM;
2736 }
2737
2738 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2739 {
2740 struct bq_desc *lbq_desc;
2741
2742 uint32_t curr_idx, clean_idx;
2743
2744 curr_idx = rx_ring->lbq_curr_idx;
2745 clean_idx = rx_ring->lbq_clean_idx;
2746 while (curr_idx != clean_idx) {
2747 lbq_desc = &rx_ring->lbq[curr_idx];
2748
2749 if (lbq_desc->p.pg_chunk.last_flag) {
2750 pci_unmap_page(qdev->pdev,
2751 lbq_desc->p.pg_chunk.map,
2752 ql_lbq_block_size(qdev),
2753 PCI_DMA_FROMDEVICE);
2754 lbq_desc->p.pg_chunk.last_flag = 0;
2755 }
2756
2757 put_page(lbq_desc->p.pg_chunk.page);
2758 lbq_desc->p.pg_chunk.page = NULL;
2759
2760 if (++curr_idx == rx_ring->lbq_len)
2761 curr_idx = 0;
2762
2763 }
2764 }
2765
2766 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2767 {
2768 int i;
2769 struct bq_desc *sbq_desc;
2770
2771 for (i = 0; i < rx_ring->sbq_len; i++) {
2772 sbq_desc = &rx_ring->sbq[i];
2773 if (sbq_desc == NULL) {
2774 netif_err(qdev, ifup, qdev->ndev,
2775 "sbq_desc %d is NULL.\n", i);
2776 return;
2777 }
2778 if (sbq_desc->p.skb) {
2779 pci_unmap_single(qdev->pdev,
2780 dma_unmap_addr(sbq_desc, mapaddr),
2781 dma_unmap_len(sbq_desc, maplen),
2782 PCI_DMA_FROMDEVICE);
2783 dev_kfree_skb(sbq_desc->p.skb);
2784 sbq_desc->p.skb = NULL;
2785 }
2786 }
2787 }
2788
2789 /* Free all large and small rx buffers associated
2790 * with the completion queues for this device.
2791 */
2792 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2793 {
2794 int i;
2795 struct rx_ring *rx_ring;
2796
2797 for (i = 0; i < qdev->rx_ring_count; i++) {
2798 rx_ring = &qdev->rx_ring[i];
2799 if (rx_ring->lbq)
2800 ql_free_lbq_buffers(qdev, rx_ring);
2801 if (rx_ring->sbq)
2802 ql_free_sbq_buffers(qdev, rx_ring);
2803 }
2804 }
2805
2806 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2807 {
2808 struct rx_ring *rx_ring;
2809 int i;
2810
2811 for (i = 0; i < qdev->rx_ring_count; i++) {
2812 rx_ring = &qdev->rx_ring[i];
2813 if (rx_ring->type != TX_Q)
2814 ql_update_buffer_queues(qdev, rx_ring);
2815 }
2816 }
2817
2818 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2819 struct rx_ring *rx_ring)
2820 {
2821 int i;
2822 struct bq_desc *lbq_desc;
2823 __le64 *bq = rx_ring->lbq_base;
2824
2825 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2826 for (i = 0; i < rx_ring->lbq_len; i++) {
2827 lbq_desc = &rx_ring->lbq[i];
2828 memset(lbq_desc, 0, sizeof(*lbq_desc));
2829 lbq_desc->index = i;
2830 lbq_desc->addr = bq;
2831 bq++;
2832 }
2833 }
2834
2835 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2836 struct rx_ring *rx_ring)
2837 {
2838 int i;
2839 struct bq_desc *sbq_desc;
2840 __le64 *bq = rx_ring->sbq_base;
2841
2842 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2843 for (i = 0; i < rx_ring->sbq_len; i++) {
2844 sbq_desc = &rx_ring->sbq[i];
2845 memset(sbq_desc, 0, sizeof(*sbq_desc));
2846 sbq_desc->index = i;
2847 sbq_desc->addr = bq;
2848 bq++;
2849 }
2850 }
2851
2852 static void ql_free_rx_resources(struct ql_adapter *qdev,
2853 struct rx_ring *rx_ring)
2854 {
2855 /* Free the small buffer queue. */
2856 if (rx_ring->sbq_base) {
2857 pci_free_consistent(qdev->pdev,
2858 rx_ring->sbq_size,
2859 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2860 rx_ring->sbq_base = NULL;
2861 }
2862
2863 /* Free the small buffer queue control blocks. */
2864 kfree(rx_ring->sbq);
2865 rx_ring->sbq = NULL;
2866
2867 /* Free the large buffer queue. */
2868 if (rx_ring->lbq_base) {
2869 pci_free_consistent(qdev->pdev,
2870 rx_ring->lbq_size,
2871 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2872 rx_ring->lbq_base = NULL;
2873 }
2874
2875 /* Free the large buffer queue control blocks. */
2876 kfree(rx_ring->lbq);
2877 rx_ring->lbq = NULL;
2878
2879 /* Free the rx queue. */
2880 if (rx_ring->cq_base) {
2881 pci_free_consistent(qdev->pdev,
2882 rx_ring->cq_size,
2883 rx_ring->cq_base, rx_ring->cq_base_dma);
2884 rx_ring->cq_base = NULL;
2885 }
2886 }
2887
2888 /* Allocate queues and buffers for this completions queue based
2889 * on the values in the parameter structure. */
2890 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2891 struct rx_ring *rx_ring)
2892 {
2893
2894 /*
2895 * Allocate the completion queue for this rx_ring.
2896 */
2897 rx_ring->cq_base =
2898 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2899 &rx_ring->cq_base_dma);
2900
2901 if (rx_ring->cq_base == NULL) {
2902 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2903 return -ENOMEM;
2904 }
2905
2906 if (rx_ring->sbq_len) {
2907 /*
2908 * Allocate small buffer queue.
2909 */
2910 rx_ring->sbq_base =
2911 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2912 &rx_ring->sbq_base_dma);
2913
2914 if (rx_ring->sbq_base == NULL) {
2915 netif_err(qdev, ifup, qdev->ndev,
2916 "Small buffer queue allocation failed.\n");
2917 goto err_mem;
2918 }
2919
2920 /*
2921 * Allocate small buffer queue control blocks.
2922 */
2923 rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
2924 sizeof(struct bq_desc),
2925 GFP_KERNEL);
2926 if (rx_ring->sbq == NULL)
2927 goto err_mem;
2928
2929 ql_init_sbq_ring(qdev, rx_ring);
2930 }
2931
2932 if (rx_ring->lbq_len) {
2933 /*
2934 * Allocate large buffer queue.
2935 */
2936 rx_ring->lbq_base =
2937 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2938 &rx_ring->lbq_base_dma);
2939
2940 if (rx_ring->lbq_base == NULL) {
2941 netif_err(qdev, ifup, qdev->ndev,
2942 "Large buffer queue allocation failed.\n");
2943 goto err_mem;
2944 }
2945 /*
2946 * Allocate large buffer queue control blocks.
2947 */
2948 rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
2949 sizeof(struct bq_desc),
2950 GFP_KERNEL);
2951 if (rx_ring->lbq == NULL)
2952 goto err_mem;
2953
2954 ql_init_lbq_ring(qdev, rx_ring);
2955 }
2956
2957 return 0;
2958
2959 err_mem:
2960 ql_free_rx_resources(qdev, rx_ring);
2961 return -ENOMEM;
2962 }
2963
2964 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2965 {
2966 struct tx_ring *tx_ring;
2967 struct tx_ring_desc *tx_ring_desc;
2968 int i, j;
2969
2970 /*
2971 * Loop through all queues and free
2972 * any resources.
2973 */
2974 for (j = 0; j < qdev->tx_ring_count; j++) {
2975 tx_ring = &qdev->tx_ring[j];
2976 for (i = 0; i < tx_ring->wq_len; i++) {
2977 tx_ring_desc = &tx_ring->q[i];
2978 if (tx_ring_desc && tx_ring_desc->skb) {
2979 netif_err(qdev, ifdown, qdev->ndev,
2980 "Freeing lost SKB %p, from queue %d, index %d.\n",
2981 tx_ring_desc->skb, j,
2982 tx_ring_desc->index);
2983 ql_unmap_send(qdev, tx_ring_desc,
2984 tx_ring_desc->map_cnt);
2985 dev_kfree_skb(tx_ring_desc->skb);
2986 tx_ring_desc->skb = NULL;
2987 }
2988 }
2989 }
2990 }
2991
2992 static void ql_free_mem_resources(struct ql_adapter *qdev)
2993 {
2994 int i;
2995
2996 for (i = 0; i < qdev->tx_ring_count; i++)
2997 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2998 for (i = 0; i < qdev->rx_ring_count; i++)
2999 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3000 ql_free_shadow_space(qdev);
3001 }
3002
3003 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3004 {
3005 int i;
3006
3007 /* Allocate space for our shadow registers and such. */
3008 if (ql_alloc_shadow_space(qdev))
3009 return -ENOMEM;
3010
3011 for (i = 0; i < qdev->rx_ring_count; i++) {
3012 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3013 netif_err(qdev, ifup, qdev->ndev,
3014 "RX resource allocation failed.\n");
3015 goto err_mem;
3016 }
3017 }
3018 /* Allocate tx queue resources */
3019 for (i = 0; i < qdev->tx_ring_count; i++) {
3020 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3021 netif_err(qdev, ifup, qdev->ndev,
3022 "TX resource allocation failed.\n");
3023 goto err_mem;
3024 }
3025 }
3026 return 0;
3027
3028 err_mem:
3029 ql_free_mem_resources(qdev);
3030 return -ENOMEM;
3031 }
3032
3033 /* Set up the rx ring control block and pass it to the chip.
3034 * The control block is defined as
3035 * "Completion Queue Initialization Control Block", or cqicb.
3036 */
3037 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3038 {
3039 struct cqicb *cqicb = &rx_ring->cqicb;
3040 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3041 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3042 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3043 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3044 void __iomem *doorbell_area =
3045 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3046 int err = 0;
3047 u16 bq_len;
3048 u64 tmp;
3049 __le64 *base_indirect_ptr;
3050 int page_entries;
3051
3052 /* Set up the shadow registers for this ring. */
3053 rx_ring->prod_idx_sh_reg = shadow_reg;
3054 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3055 *rx_ring->prod_idx_sh_reg = 0;
3056 shadow_reg += sizeof(u64);
3057 shadow_reg_dma += sizeof(u64);
3058 rx_ring->lbq_base_indirect = shadow_reg;
3059 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3060 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3061 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3062 rx_ring->sbq_base_indirect = shadow_reg;
3063 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3064
3065 /* PCI doorbell mem area + 0x00 for consumer index register */
3066 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3067 rx_ring->cnsmr_idx = 0;
3068 rx_ring->curr_entry = rx_ring->cq_base;
3069
3070 /* PCI doorbell mem area + 0x04 for valid register */
3071 rx_ring->valid_db_reg = doorbell_area + 0x04;
3072
3073 /* PCI doorbell mem area + 0x18 for large buffer consumer */
3074 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3075
3076 /* PCI doorbell mem area + 0x1c */
3077 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3078
3079 memset((void *)cqicb, 0, sizeof(struct cqicb));
3080 cqicb->msix_vect = rx_ring->irq;
3081
3082 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3083 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3084
3085 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3086
3087 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3088
3089 /*
3090 * Set up the control block load flags.
3091 */
3092 cqicb->flags = FLAGS_LC | /* Load queue base address */
3093 FLAGS_LV | /* Load MSI-X vector */
3094 FLAGS_LI; /* Load irq delay values */
3095 if (rx_ring->lbq_len) {
3096 cqicb->flags |= FLAGS_LL; /* Load lbq values */
3097 tmp = (u64)rx_ring->lbq_base_dma;
3098 base_indirect_ptr = rx_ring->lbq_base_indirect;
3099 page_entries = 0;
3100 do {
3101 *base_indirect_ptr = cpu_to_le64(tmp);
3102 tmp += DB_PAGE_SIZE;
3103 base_indirect_ptr++;
3104 page_entries++;
3105 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3106 cqicb->lbq_addr =
3107 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3108 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3109 (u16) rx_ring->lbq_buf_size;
3110 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3111 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3112 (u16) rx_ring->lbq_len;
3113 cqicb->lbq_len = cpu_to_le16(bq_len);
3114 rx_ring->lbq_prod_idx = 0;
3115 rx_ring->lbq_curr_idx = 0;
3116 rx_ring->lbq_clean_idx = 0;
3117 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3118 }
3119 if (rx_ring->sbq_len) {
3120 cqicb->flags |= FLAGS_LS; /* Load sbq values */
3121 tmp = (u64)rx_ring->sbq_base_dma;
3122 base_indirect_ptr = rx_ring->sbq_base_indirect;
3123 page_entries = 0;
3124 do {
3125 *base_indirect_ptr = cpu_to_le64(tmp);
3126 tmp += DB_PAGE_SIZE;
3127 base_indirect_ptr++;
3128 page_entries++;
3129 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3130 cqicb->sbq_addr =
3131 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3132 cqicb->sbq_buf_size =
3133 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3134 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3135 (u16) rx_ring->sbq_len;
3136 cqicb->sbq_len = cpu_to_le16(bq_len);
3137 rx_ring->sbq_prod_idx = 0;
3138 rx_ring->sbq_curr_idx = 0;
3139 rx_ring->sbq_clean_idx = 0;
3140 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3141 }
3142 switch (rx_ring->type) {
3143 case TX_Q:
3144 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3145 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3146 break;
3147 case RX_Q:
3148 /* Inbound completion handling rx_rings run in
3149 * separate NAPI contexts.
3150 */
3151 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3152 64);
3153 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3154 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3155 break;
3156 default:
3157 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3158 "Invalid rx_ring->type = %d.\n", rx_ring->type);
3159 }
3160 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3161 CFG_LCQ, rx_ring->cq_id);
3162 if (err) {
3163 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3164 return err;
3165 }
3166 return err;
3167 }
3168
3169 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3170 {
3171 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3172 void __iomem *doorbell_area =
3173 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3174 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3175 (tx_ring->wq_id * sizeof(u64));
3176 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3177 (tx_ring->wq_id * sizeof(u64));
3178 int err = 0;
3179
3180 /*
3181 * Assign doorbell registers for this tx_ring.
3182 */
3183 /* TX PCI doorbell mem area for tx producer index */
3184 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3185 tx_ring->prod_idx = 0;
3186 /* TX PCI doorbell mem area + 0x04 */
3187 tx_ring->valid_db_reg = doorbell_area + 0x04;
3188
3189 /*
3190 * Assign shadow registers for this tx_ring.
3191 */
3192 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3193 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3194
3195 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3196 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3197 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3198 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3199 wqicb->rid = 0;
3200 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3201
3202 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3203
3204 ql_init_tx_ring(qdev, tx_ring);
3205
3206 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3207 (u16) tx_ring->wq_id);
3208 if (err) {
3209 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3210 return err;
3211 }
3212 return err;
3213 }
3214
3215 static void ql_disable_msix(struct ql_adapter *qdev)
3216 {
3217 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3218 pci_disable_msix(qdev->pdev);
3219 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3220 kfree(qdev->msi_x_entry);
3221 qdev->msi_x_entry = NULL;
3222 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3223 pci_disable_msi(qdev->pdev);
3224 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3225 }
3226 }
3227
3228 /* We start by trying to get the number of vectors
3229 * stored in qdev->intr_count. If we don't get that
3230 * many then we reduce the count and try again.
3231 */
3232 static void ql_enable_msix(struct ql_adapter *qdev)
3233 {
3234 int i, err;
3235
3236 /* Get the MSIX vectors. */
3237 if (qlge_irq_type == MSIX_IRQ) {
3238 /* Try to alloc space for the msix struct,
3239 * if it fails then go to MSI/legacy.
3240 */
3241 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3242 sizeof(struct msix_entry),
3243 GFP_KERNEL);
3244 if (!qdev->msi_x_entry) {
3245 qlge_irq_type = MSI_IRQ;
3246 goto msi;
3247 }
3248
3249 for (i = 0; i < qdev->intr_count; i++)
3250 qdev->msi_x_entry[i].entry = i;
3251
3252 /* Loop to get our vectors. We start with
3253 * what we want and settle for what we get.
3254 */
3255 do {
3256 err = pci_enable_msix(qdev->pdev,
3257 qdev->msi_x_entry, qdev->intr_count);
3258 if (err > 0)
3259 qdev->intr_count = err;
3260 } while (err > 0);
3261
3262 if (err < 0) {
3263 kfree(qdev->msi_x_entry);
3264 qdev->msi_x_entry = NULL;
3265 netif_warn(qdev, ifup, qdev->ndev,
3266 "MSI-X Enable failed, trying MSI.\n");
3267 qdev->intr_count = 1;
3268 qlge_irq_type = MSI_IRQ;
3269 } else if (err == 0) {
3270 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3271 netif_info(qdev, ifup, qdev->ndev,
3272 "MSI-X Enabled, got %d vectors.\n",
3273 qdev->intr_count);
3274 return;
3275 }
3276 }
3277 msi:
3278 qdev->intr_count = 1;
3279 if (qlge_irq_type == MSI_IRQ) {
3280 if (!pci_enable_msi(qdev->pdev)) {
3281 set_bit(QL_MSI_ENABLED, &qdev->flags);
3282 netif_info(qdev, ifup, qdev->ndev,
3283 "Running with MSI interrupts.\n");
3284 return;
3285 }
3286 }
3287 qlge_irq_type = LEG_IRQ;
3288 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3289 "Running with legacy interrupts.\n");
3290 }
3291
3292 /* Each vector services 1 RSS ring and and 1 or more
3293 * TX completion rings. This function loops through
3294 * the TX completion rings and assigns the vector that
3295 * will service it. An example would be if there are
3296 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3297 * This would mean that vector 0 would service RSS ring 0
3298 * and TX completion rings 0,1,2 and 3. Vector 1 would
3299 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3300 */
3301 static void ql_set_tx_vect(struct ql_adapter *qdev)
3302 {
3303 int i, j, vect;
3304 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3305
3306 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3307 /* Assign irq vectors to TX rx_rings.*/
3308 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3309 i < qdev->rx_ring_count; i++) {
3310 if (j == tx_rings_per_vector) {
3311 vect++;
3312 j = 0;
3313 }
3314 qdev->rx_ring[i].irq = vect;
3315 j++;
3316 }
3317 } else {
3318 /* For single vector all rings have an irq
3319 * of zero.
3320 */
3321 for (i = 0; i < qdev->rx_ring_count; i++)
3322 qdev->rx_ring[i].irq = 0;
3323 }
3324 }
3325
3326 /* Set the interrupt mask for this vector. Each vector
3327 * will service 1 RSS ring and 1 or more TX completion
3328 * rings. This function sets up a bit mask per vector
3329 * that indicates which rings it services.
3330 */
3331 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3332 {
3333 int j, vect = ctx->intr;
3334 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3335
3336 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3337 /* Add the RSS ring serviced by this vector
3338 * to the mask.
3339 */
3340 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3341 /* Add the TX ring(s) serviced by this vector
3342 * to the mask. */
3343 for (j = 0; j < tx_rings_per_vector; j++) {
3344 ctx->irq_mask |=
3345 (1 << qdev->rx_ring[qdev->rss_ring_count +
3346 (vect * tx_rings_per_vector) + j].cq_id);
3347 }
3348 } else {
3349 /* For single vector we just shift each queue's
3350 * ID into the mask.
3351 */
3352 for (j = 0; j < qdev->rx_ring_count; j++)
3353 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3354 }
3355 }
3356
3357 /*
3358 * Here we build the intr_context structures based on
3359 * our rx_ring count and intr vector count.
3360 * The intr_context structure is used to hook each vector
3361 * to possibly different handlers.
3362 */
3363 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3364 {
3365 int i = 0;
3366 struct intr_context *intr_context = &qdev->intr_context[0];
3367
3368 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3369 /* Each rx_ring has it's
3370 * own intr_context since we have separate
3371 * vectors for each queue.
3372 */
3373 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3374 qdev->rx_ring[i].irq = i;
3375 intr_context->intr = i;
3376 intr_context->qdev = qdev;
3377 /* Set up this vector's bit-mask that indicates
3378 * which queues it services.
3379 */
3380 ql_set_irq_mask(qdev, intr_context);
3381 /*
3382 * We set up each vectors enable/disable/read bits so
3383 * there's no bit/mask calculations in the critical path.
3384 */
3385 intr_context->intr_en_mask =
3386 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3387 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3388 | i;
3389 intr_context->intr_dis_mask =
3390 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3391 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3392 INTR_EN_IHD | i;
3393 intr_context->intr_read_mask =
3394 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3395 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3396 i;
3397 if (i == 0) {
3398 /* The first vector/queue handles
3399 * broadcast/multicast, fatal errors,
3400 * and firmware events. This in addition
3401 * to normal inbound NAPI processing.
3402 */
3403 intr_context->handler = qlge_isr;
3404 sprintf(intr_context->name, "%s-rx-%d",
3405 qdev->ndev->name, i);
3406 } else {
3407 /*
3408 * Inbound queues handle unicast frames only.
3409 */
3410 intr_context->handler = qlge_msix_rx_isr;
3411 sprintf(intr_context->name, "%s-rx-%d",
3412 qdev->ndev->name, i);
3413 }
3414 }
3415 } else {
3416 /*
3417 * All rx_rings use the same intr_context since
3418 * there is only one vector.
3419 */
3420 intr_context->intr = 0;
3421 intr_context->qdev = qdev;
3422 /*
3423 * We set up each vectors enable/disable/read bits so
3424 * there's no bit/mask calculations in the critical path.
3425 */
3426 intr_context->intr_en_mask =
3427 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3428 intr_context->intr_dis_mask =
3429 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3430 INTR_EN_TYPE_DISABLE;
3431 intr_context->intr_read_mask =
3432 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3433 /*
3434 * Single interrupt means one handler for all rings.
3435 */
3436 intr_context->handler = qlge_isr;
3437 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3438 /* Set up this vector's bit-mask that indicates
3439 * which queues it services. In this case there is
3440 * a single vector so it will service all RSS and
3441 * TX completion rings.
3442 */
3443 ql_set_irq_mask(qdev, intr_context);
3444 }
3445 /* Tell the TX completion rings which MSIx vector
3446 * they will be using.
3447 */
3448 ql_set_tx_vect(qdev);
3449 }
3450
3451 static void ql_free_irq(struct ql_adapter *qdev)
3452 {
3453 int i;
3454 struct intr_context *intr_context = &qdev->intr_context[0];
3455
3456 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3457 if (intr_context->hooked) {
3458 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3459 free_irq(qdev->msi_x_entry[i].vector,
3460 &qdev->rx_ring[i]);
3461 } else {
3462 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3463 }
3464 }
3465 }
3466 ql_disable_msix(qdev);
3467 }
3468
3469 static int ql_request_irq(struct ql_adapter *qdev)
3470 {
3471 int i;
3472 int status = 0;
3473 struct pci_dev *pdev = qdev->pdev;
3474 struct intr_context *intr_context = &qdev->intr_context[0];
3475
3476 ql_resolve_queues_to_irqs(qdev);
3477
3478 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3479 atomic_set(&intr_context->irq_cnt, 0);
3480 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3481 status = request_irq(qdev->msi_x_entry[i].vector,
3482 intr_context->handler,
3483 0,
3484 intr_context->name,
3485 &qdev->rx_ring[i]);
3486 if (status) {
3487 netif_err(qdev, ifup, qdev->ndev,
3488 "Failed request for MSIX interrupt %d.\n",
3489 i);
3490 goto err_irq;
3491 }
3492 } else {
3493 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3494 "trying msi or legacy interrupts.\n");
3495 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3496 "%s: irq = %d.\n", __func__, pdev->irq);
3497 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3498 "%s: context->name = %s.\n", __func__,
3499 intr_context->name);
3500 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3501 "%s: dev_id = 0x%p.\n", __func__,
3502 &qdev->rx_ring[0]);
3503 status =
3504 request_irq(pdev->irq, qlge_isr,
3505 test_bit(QL_MSI_ENABLED,
3506 &qdev->
3507 flags) ? 0 : IRQF_SHARED,
3508 intr_context->name, &qdev->rx_ring[0]);
3509 if (status)
3510 goto err_irq;
3511
3512 netif_err(qdev, ifup, qdev->ndev,
3513 "Hooked intr %d, queue type %s, with name %s.\n",
3514 i,
3515 qdev->rx_ring[0].type == DEFAULT_Q ?
3516 "DEFAULT_Q" :
3517 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3518 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3519 intr_context->name);
3520 }
3521 intr_context->hooked = 1;
3522 }
3523 return status;
3524 err_irq:
3525 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
3526 ql_free_irq(qdev);
3527 return status;
3528 }
3529
3530 static int ql_start_rss(struct ql_adapter *qdev)
3531 {
3532 static const u8 init_hash_seed[] = {
3533 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3534 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3535 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3536 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3537 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3538 };
3539 struct ricb *ricb = &qdev->ricb;
3540 int status = 0;
3541 int i;
3542 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3543
3544 memset((void *)ricb, 0, sizeof(*ricb));
3545
3546 ricb->base_cq = RSS_L4K;
3547 ricb->flags =
3548 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3549 ricb->mask = cpu_to_le16((u16)(0x3ff));
3550
3551 /*
3552 * Fill out the Indirection Table.
3553 */
3554 for (i = 0; i < 1024; i++)
3555 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3556
3557 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3558 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3559
3560 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3561 if (status) {
3562 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3563 return status;
3564 }
3565 return status;
3566 }
3567
3568 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3569 {
3570 int i, status = 0;
3571
3572 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3573 if (status)
3574 return status;
3575 /* Clear all the entries in the routing table. */
3576 for (i = 0; i < 16; i++) {
3577 status = ql_set_routing_reg(qdev, i, 0, 0);
3578 if (status) {
3579 netif_err(qdev, ifup, qdev->ndev,
3580 "Failed to init routing register for CAM packets.\n");
3581 break;
3582 }
3583 }
3584 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3585 return status;
3586 }
3587
3588 /* Initialize the frame-to-queue routing. */
3589 static int ql_route_initialize(struct ql_adapter *qdev)
3590 {
3591 int status = 0;
3592
3593 /* Clear all the entries in the routing table. */
3594 status = ql_clear_routing_entries(qdev);
3595 if (status)
3596 return status;
3597
3598 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3599 if (status)
3600 return status;
3601
3602 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3603 RT_IDX_IP_CSUM_ERR, 1);
3604 if (status) {
3605 netif_err(qdev, ifup, qdev->ndev,
3606 "Failed to init routing register "
3607 "for IP CSUM error packets.\n");
3608 goto exit;
3609 }
3610 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3611 RT_IDX_TU_CSUM_ERR, 1);
3612 if (status) {
3613 netif_err(qdev, ifup, qdev->ndev,
3614 "Failed to init routing register "
3615 "for TCP/UDP CSUM error packets.\n");
3616 goto exit;
3617 }
3618 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3619 if (status) {
3620 netif_err(qdev, ifup, qdev->ndev,
3621 "Failed to init routing register for broadcast packets.\n");
3622 goto exit;
3623 }
3624 /* If we have more than one inbound queue, then turn on RSS in the
3625 * routing block.
3626 */
3627 if (qdev->rss_ring_count > 1) {
3628 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3629 RT_IDX_RSS_MATCH, 1);
3630 if (status) {
3631 netif_err(qdev, ifup, qdev->ndev,
3632 "Failed to init routing register for MATCH RSS packets.\n");
3633 goto exit;
3634 }
3635 }
3636
3637 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3638 RT_IDX_CAM_HIT, 1);
3639 if (status)
3640 netif_err(qdev, ifup, qdev->ndev,
3641 "Failed to init routing register for CAM packets.\n");
3642 exit:
3643 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3644 return status;
3645 }
3646
3647 int ql_cam_route_initialize(struct ql_adapter *qdev)
3648 {
3649 int status, set;
3650
3651 /* If check if the link is up and use to
3652 * determine if we are setting or clearing
3653 * the MAC address in the CAM.
3654 */
3655 set = ql_read32(qdev, STS);
3656 set &= qdev->port_link_up;
3657 status = ql_set_mac_addr(qdev, set);
3658 if (status) {
3659 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3660 return status;
3661 }
3662
3663 status = ql_route_initialize(qdev);
3664 if (status)
3665 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3666
3667 return status;
3668 }
3669
3670 static int ql_adapter_initialize(struct ql_adapter *qdev)
3671 {
3672 u32 value, mask;
3673 int i;
3674 int status = 0;
3675
3676 /*
3677 * Set up the System register to halt on errors.
3678 */
3679 value = SYS_EFE | SYS_FAE;
3680 mask = value << 16;
3681 ql_write32(qdev, SYS, mask | value);
3682
3683 /* Set the default queue, and VLAN behavior. */
3684 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3685 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
3686 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3687
3688 /* Set the MPI interrupt to enabled. */
3689 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3690
3691 /* Enable the function, set pagesize, enable error checking. */
3692 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3693 FSC_EC | FSC_VM_PAGE_4K;
3694 value |= SPLT_SETTING;
3695
3696 /* Set/clear header splitting. */
3697 mask = FSC_VM_PAGESIZE_MASK |
3698 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3699 ql_write32(qdev, FSC, mask | value);
3700
3701 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3702
3703 /* Set RX packet routing to use port/pci function on which the
3704 * packet arrived on in addition to usual frame routing.
3705 * This is helpful on bonding where both interfaces can have
3706 * the same MAC address.
3707 */
3708 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3709 /* Reroute all packets to our Interface.
3710 * They may have been routed to MPI firmware
3711 * due to WOL.
3712 */
3713 value = ql_read32(qdev, MGMT_RCV_CFG);
3714 value &= ~MGMT_RCV_CFG_RM;
3715 mask = 0xffff0000;
3716
3717 /* Sticky reg needs clearing due to WOL. */
3718 ql_write32(qdev, MGMT_RCV_CFG, mask);
3719 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3720
3721 /* Default WOL is enable on Mezz cards */
3722 if (qdev->pdev->subsystem_device == 0x0068 ||
3723 qdev->pdev->subsystem_device == 0x0180)
3724 qdev->wol = WAKE_MAGIC;
3725
3726 /* Start up the rx queues. */
3727 for (i = 0; i < qdev->rx_ring_count; i++) {
3728 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3729 if (status) {
3730 netif_err(qdev, ifup, qdev->ndev,
3731 "Failed to start rx ring[%d].\n", i);
3732 return status;
3733 }
3734 }
3735
3736 /* If there is more than one inbound completion queue
3737 * then download a RICB to configure RSS.
3738 */
3739 if (qdev->rss_ring_count > 1) {
3740 status = ql_start_rss(qdev);
3741 if (status) {
3742 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3743 return status;
3744 }
3745 }
3746
3747 /* Start up the tx queues. */
3748 for (i = 0; i < qdev->tx_ring_count; i++) {
3749 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3750 if (status) {
3751 netif_err(qdev, ifup, qdev->ndev,
3752 "Failed to start tx ring[%d].\n", i);
3753 return status;
3754 }
3755 }
3756
3757 /* Initialize the port and set the max framesize. */
3758 status = qdev->nic_ops->port_initialize(qdev);
3759 if (status)
3760 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3761
3762 /* Set up the MAC address and frame routing filter. */
3763 status = ql_cam_route_initialize(qdev);
3764 if (status) {
3765 netif_err(qdev, ifup, qdev->ndev,
3766 "Failed to init CAM/Routing tables.\n");
3767 return status;
3768 }
3769
3770 /* Start NAPI for the RSS queues. */
3771 for (i = 0; i < qdev->rss_ring_count; i++)
3772 napi_enable(&qdev->rx_ring[i].napi);
3773
3774 return status;
3775 }
3776
3777 /* Issue soft reset to chip. */
3778 static int ql_adapter_reset(struct ql_adapter *qdev)
3779 {
3780 u32 value;
3781 int status = 0;
3782 unsigned long end_jiffies;
3783
3784 /* Clear all the entries in the routing table. */
3785 status = ql_clear_routing_entries(qdev);
3786 if (status) {
3787 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3788 return status;
3789 }
3790
3791 end_jiffies = jiffies +
3792 max((unsigned long)1, usecs_to_jiffies(30));
3793
3794 /* Check if bit is set then skip the mailbox command and
3795 * clear the bit, else we are in normal reset process.
3796 */
3797 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3798 /* Stop management traffic. */
3799 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3800
3801 /* Wait for the NIC and MGMNT FIFOs to empty. */
3802 ql_wait_fifo_empty(qdev);
3803 } else
3804 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3805
3806 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3807
3808 do {
3809 value = ql_read32(qdev, RST_FO);
3810 if ((value & RST_FO_FR) == 0)
3811 break;
3812 cpu_relax();
3813 } while (time_before(jiffies, end_jiffies));
3814
3815 if (value & RST_FO_FR) {
3816 netif_err(qdev, ifdown, qdev->ndev,
3817 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3818 status = -ETIMEDOUT;
3819 }
3820
3821 /* Resume management traffic. */
3822 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3823 return status;
3824 }
3825
3826 static void ql_display_dev_info(struct net_device *ndev)
3827 {
3828 struct ql_adapter *qdev = netdev_priv(ndev);
3829
3830 netif_info(qdev, probe, qdev->ndev,
3831 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3832 "XG Roll = %d, XG Rev = %d.\n",
3833 qdev->func,
3834 qdev->port,
3835 qdev->chip_rev_id & 0x0000000f,
3836 qdev->chip_rev_id >> 4 & 0x0000000f,
3837 qdev->chip_rev_id >> 8 & 0x0000000f,
3838 qdev->chip_rev_id >> 12 & 0x0000000f);
3839 netif_info(qdev, probe, qdev->ndev,
3840 "MAC address %pM\n", ndev->dev_addr);
3841 }
3842
3843 static int ql_wol(struct ql_adapter *qdev)
3844 {
3845 int status = 0;
3846 u32 wol = MB_WOL_DISABLE;
3847
3848 /* The CAM is still intact after a reset, but if we
3849 * are doing WOL, then we may need to program the
3850 * routing regs. We would also need to issue the mailbox
3851 * commands to instruct the MPI what to do per the ethtool
3852 * settings.
3853 */
3854
3855 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3856 WAKE_MCAST | WAKE_BCAST)) {
3857 netif_err(qdev, ifdown, qdev->ndev,
3858 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3859 qdev->wol);
3860 return -EINVAL;
3861 }
3862
3863 if (qdev->wol & WAKE_MAGIC) {
3864 status = ql_mb_wol_set_magic(qdev, 1);
3865 if (status) {
3866 netif_err(qdev, ifdown, qdev->ndev,
3867 "Failed to set magic packet on %s.\n",
3868 qdev->ndev->name);
3869 return status;
3870 } else
3871 netif_info(qdev, drv, qdev->ndev,
3872 "Enabled magic packet successfully on %s.\n",
3873 qdev->ndev->name);
3874
3875 wol |= MB_WOL_MAGIC_PKT;
3876 }
3877
3878 if (qdev->wol) {
3879 wol |= MB_WOL_MODE_ON;
3880 status = ql_mb_wol_mode(qdev, wol);
3881 netif_err(qdev, drv, qdev->ndev,
3882 "WOL %s (wol code 0x%x) on %s\n",
3883 (status == 0) ? "Successfully set" : "Failed",
3884 wol, qdev->ndev->name);
3885 }
3886
3887 return status;
3888 }
3889
3890 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3891 {
3892
3893 /* Don't kill the reset worker thread if we
3894 * are in the process of recovery.
3895 */
3896 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3897 cancel_delayed_work_sync(&qdev->asic_reset_work);
3898 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3899 cancel_delayed_work_sync(&qdev->mpi_work);
3900 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3901 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3902 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3903 }
3904
3905 static int ql_adapter_down(struct ql_adapter *qdev)
3906 {
3907 int i, status = 0;
3908
3909 ql_link_off(qdev);
3910
3911 ql_cancel_all_work_sync(qdev);
3912
3913 for (i = 0; i < qdev->rss_ring_count; i++)
3914 napi_disable(&qdev->rx_ring[i].napi);
3915
3916 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3917
3918 ql_disable_interrupts(qdev);
3919
3920 ql_tx_ring_clean(qdev);
3921
3922 /* Call netif_napi_del() from common point.
3923 */
3924 for (i = 0; i < qdev->rss_ring_count; i++)
3925 netif_napi_del(&qdev->rx_ring[i].napi);
3926
3927 status = ql_adapter_reset(qdev);
3928 if (status)
3929 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3930 qdev->func);
3931 ql_free_rx_buffers(qdev);
3932
3933 return status;
3934 }
3935
3936 static int ql_adapter_up(struct ql_adapter *qdev)
3937 {
3938 int err = 0;
3939
3940 err = ql_adapter_initialize(qdev);
3941 if (err) {
3942 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3943 goto err_init;
3944 }
3945 set_bit(QL_ADAPTER_UP, &qdev->flags);
3946 ql_alloc_rx_buffers(qdev);
3947 /* If the port is initialized and the
3948 * link is up the turn on the carrier.
3949 */
3950 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3951 (ql_read32(qdev, STS) & qdev->port_link_up))
3952 ql_link_on(qdev);
3953 /* Restore rx mode. */
3954 clear_bit(QL_ALLMULTI, &qdev->flags);
3955 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3956 qlge_set_multicast_list(qdev->ndev);
3957
3958 /* Restore vlan setting. */
3959 qlge_restore_vlan(qdev);
3960
3961 ql_enable_interrupts(qdev);
3962 ql_enable_all_completion_interrupts(qdev);
3963 netif_tx_start_all_queues(qdev->ndev);
3964
3965 return 0;
3966 err_init:
3967 ql_adapter_reset(qdev);
3968 return err;
3969 }
3970
3971 static void ql_release_adapter_resources(struct ql_adapter *qdev)
3972 {
3973 ql_free_mem_resources(qdev);
3974 ql_free_irq(qdev);
3975 }
3976
3977 static int ql_get_adapter_resources(struct ql_adapter *qdev)
3978 {
3979 int status = 0;
3980
3981 if (ql_alloc_mem_resources(qdev)) {
3982 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
3983 return -ENOMEM;
3984 }
3985 status = ql_request_irq(qdev);
3986 return status;
3987 }
3988
3989 static int qlge_close(struct net_device *ndev)
3990 {
3991 struct ql_adapter *qdev = netdev_priv(ndev);
3992
3993 /* If we hit pci_channel_io_perm_failure
3994 * failure condition, then we already
3995 * brought the adapter down.
3996 */
3997 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
3998 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
3999 clear_bit(QL_EEH_FATAL, &qdev->flags);
4000 return 0;
4001 }
4002
4003 /*
4004 * Wait for device to recover from a reset.
4005 * (Rarely happens, but possible.)
4006 */
4007 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4008 msleep(1);
4009 ql_adapter_down(qdev);
4010 ql_release_adapter_resources(qdev);
4011 return 0;
4012 }
4013
4014 static int ql_configure_rings(struct ql_adapter *qdev)
4015 {
4016 int i;
4017 struct rx_ring *rx_ring;
4018 struct tx_ring *tx_ring;
4019 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4020 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4021 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4022
4023 qdev->lbq_buf_order = get_order(lbq_buf_len);
4024
4025 /* In a perfect world we have one RSS ring for each CPU
4026 * and each has it's own vector. To do that we ask for
4027 * cpu_cnt vectors. ql_enable_msix() will adjust the
4028 * vector count to what we actually get. We then
4029 * allocate an RSS ring for each.
4030 * Essentially, we are doing min(cpu_count, msix_vector_count).
4031 */
4032 qdev->intr_count = cpu_cnt;
4033 ql_enable_msix(qdev);
4034 /* Adjust the RSS ring count to the actual vector count. */
4035 qdev->rss_ring_count = qdev->intr_count;
4036 qdev->tx_ring_count = cpu_cnt;
4037 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4038
4039 for (i = 0; i < qdev->tx_ring_count; i++) {
4040 tx_ring = &qdev->tx_ring[i];
4041 memset((void *)tx_ring, 0, sizeof(*tx_ring));
4042 tx_ring->qdev = qdev;
4043 tx_ring->wq_id = i;
4044 tx_ring->wq_len = qdev->tx_ring_size;
4045 tx_ring->wq_size =
4046 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4047
4048 /*
4049 * The completion queue ID for the tx rings start
4050 * immediately after the rss rings.
4051 */
4052 tx_ring->cq_id = qdev->rss_ring_count + i;
4053 }
4054
4055 for (i = 0; i < qdev->rx_ring_count; i++) {
4056 rx_ring = &qdev->rx_ring[i];
4057 memset((void *)rx_ring, 0, sizeof(*rx_ring));
4058 rx_ring->qdev = qdev;
4059 rx_ring->cq_id = i;
4060 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
4061 if (i < qdev->rss_ring_count) {
4062 /*
4063 * Inbound (RSS) queues.
4064 */
4065 rx_ring->cq_len = qdev->rx_ring_size;
4066 rx_ring->cq_size =
4067 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4068 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4069 rx_ring->lbq_size =
4070 rx_ring->lbq_len * sizeof(__le64);
4071 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4072 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4073 rx_ring->sbq_size =
4074 rx_ring->sbq_len * sizeof(__le64);
4075 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4076 rx_ring->type = RX_Q;
4077 } else {
4078 /*
4079 * Outbound queue handles outbound completions only.
4080 */
4081 /* outbound cq is same size as tx_ring it services. */
4082 rx_ring->cq_len = qdev->tx_ring_size;
4083 rx_ring->cq_size =
4084 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4085 rx_ring->lbq_len = 0;
4086 rx_ring->lbq_size = 0;
4087 rx_ring->lbq_buf_size = 0;
4088 rx_ring->sbq_len = 0;
4089 rx_ring->sbq_size = 0;
4090 rx_ring->sbq_buf_size = 0;
4091 rx_ring->type = TX_Q;
4092 }
4093 }
4094 return 0;
4095 }
4096
4097 static int qlge_open(struct net_device *ndev)
4098 {
4099 int err = 0;
4100 struct ql_adapter *qdev = netdev_priv(ndev);
4101
4102 err = ql_adapter_reset(qdev);
4103 if (err)
4104 return err;
4105
4106 err = ql_configure_rings(qdev);
4107 if (err)
4108 return err;
4109
4110 err = ql_get_adapter_resources(qdev);
4111 if (err)
4112 goto error_up;
4113
4114 err = ql_adapter_up(qdev);
4115 if (err)
4116 goto error_up;
4117
4118 return err;
4119
4120 error_up:
4121 ql_release_adapter_resources(qdev);
4122 return err;
4123 }
4124
4125 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4126 {
4127 struct rx_ring *rx_ring;
4128 int i, status;
4129 u32 lbq_buf_len;
4130
4131 /* Wait for an outstanding reset to complete. */
4132 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4133 int i = 3;
4134 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4135 netif_err(qdev, ifup, qdev->ndev,
4136 "Waiting for adapter UP...\n");
4137 ssleep(1);
4138 }
4139
4140 if (!i) {
4141 netif_err(qdev, ifup, qdev->ndev,
4142 "Timed out waiting for adapter UP\n");
4143 return -ETIMEDOUT;
4144 }
4145 }
4146
4147 status = ql_adapter_down(qdev);
4148 if (status)
4149 goto error;
4150
4151 /* Get the new rx buffer size. */
4152 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4153 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4154 qdev->lbq_buf_order = get_order(lbq_buf_len);
4155
4156 for (i = 0; i < qdev->rss_ring_count; i++) {
4157 rx_ring = &qdev->rx_ring[i];
4158 /* Set the new size. */
4159 rx_ring->lbq_buf_size = lbq_buf_len;
4160 }
4161
4162 status = ql_adapter_up(qdev);
4163 if (status)
4164 goto error;
4165
4166 return status;
4167 error:
4168 netif_alert(qdev, ifup, qdev->ndev,
4169 "Driver up/down cycle failed, closing device.\n");
4170 set_bit(QL_ADAPTER_UP, &qdev->flags);
4171 dev_close(qdev->ndev);
4172 return status;
4173 }
4174
4175 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4176 {
4177 struct ql_adapter *qdev = netdev_priv(ndev);
4178 int status;
4179
4180 if (ndev->mtu == 1500 && new_mtu == 9000) {
4181 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4182 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4183 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4184 } else
4185 return -EINVAL;
4186
4187 queue_delayed_work(qdev->workqueue,
4188 &qdev->mpi_port_cfg_work, 3*HZ);
4189
4190 ndev->mtu = new_mtu;
4191
4192 if (!netif_running(qdev->ndev)) {
4193 return 0;
4194 }
4195
4196 status = ql_change_rx_buffers(qdev);
4197 if (status) {
4198 netif_err(qdev, ifup, qdev->ndev,
4199 "Changing MTU failed.\n");
4200 }
4201
4202 return status;
4203 }
4204
4205 static struct net_device_stats *qlge_get_stats(struct net_device
4206 *ndev)
4207 {
4208 struct ql_adapter *qdev = netdev_priv(ndev);
4209 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4210 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4211 unsigned long pkts, mcast, dropped, errors, bytes;
4212 int i;
4213
4214 /* Get RX stats. */
4215 pkts = mcast = dropped = errors = bytes = 0;
4216 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4217 pkts += rx_ring->rx_packets;
4218 bytes += rx_ring->rx_bytes;
4219 dropped += rx_ring->rx_dropped;
4220 errors += rx_ring->rx_errors;
4221 mcast += rx_ring->rx_multicast;
4222 }
4223 ndev->stats.rx_packets = pkts;
4224 ndev->stats.rx_bytes = bytes;
4225 ndev->stats.rx_dropped = dropped;
4226 ndev->stats.rx_errors = errors;
4227 ndev->stats.multicast = mcast;
4228
4229 /* Get TX stats. */
4230 pkts = errors = bytes = 0;
4231 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4232 pkts += tx_ring->tx_packets;
4233 bytes += tx_ring->tx_bytes;
4234 errors += tx_ring->tx_errors;
4235 }
4236 ndev->stats.tx_packets = pkts;
4237 ndev->stats.tx_bytes = bytes;
4238 ndev->stats.tx_errors = errors;
4239 return &ndev->stats;
4240 }
4241
4242 static void qlge_set_multicast_list(struct net_device *ndev)
4243 {
4244 struct ql_adapter *qdev = netdev_priv(ndev);
4245 struct netdev_hw_addr *ha;
4246 int i, status;
4247
4248 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4249 if (status)
4250 return;
4251 /*
4252 * Set or clear promiscuous mode if a
4253 * transition is taking place.
4254 */
4255 if (ndev->flags & IFF_PROMISC) {
4256 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4257 if (ql_set_routing_reg
4258 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4259 netif_err(qdev, hw, qdev->ndev,
4260 "Failed to set promiscuous mode.\n");
4261 } else {
4262 set_bit(QL_PROMISCUOUS, &qdev->flags);
4263 }
4264 }
4265 } else {
4266 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4267 if (ql_set_routing_reg
4268 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4269 netif_err(qdev, hw, qdev->ndev,
4270 "Failed to clear promiscuous mode.\n");
4271 } else {
4272 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4273 }
4274 }
4275 }
4276
4277 /*
4278 * Set or clear all multicast mode if a
4279 * transition is taking place.
4280 */
4281 if ((ndev->flags & IFF_ALLMULTI) ||
4282 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4283 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4284 if (ql_set_routing_reg
4285 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4286 netif_err(qdev, hw, qdev->ndev,
4287 "Failed to set all-multi mode.\n");
4288 } else {
4289 set_bit(QL_ALLMULTI, &qdev->flags);
4290 }
4291 }
4292 } else {
4293 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4294 if (ql_set_routing_reg
4295 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4296 netif_err(qdev, hw, qdev->ndev,
4297 "Failed to clear all-multi mode.\n");
4298 } else {
4299 clear_bit(QL_ALLMULTI, &qdev->flags);
4300 }
4301 }
4302 }
4303
4304 if (!netdev_mc_empty(ndev)) {
4305 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4306 if (status)
4307 goto exit;
4308 i = 0;
4309 netdev_for_each_mc_addr(ha, ndev) {
4310 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4311 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4312 netif_err(qdev, hw, qdev->ndev,
4313 "Failed to loadmulticast address.\n");
4314 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4315 goto exit;
4316 }
4317 i++;
4318 }
4319 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4320 if (ql_set_routing_reg
4321 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4322 netif_err(qdev, hw, qdev->ndev,
4323 "Failed to set multicast match mode.\n");
4324 } else {
4325 set_bit(QL_ALLMULTI, &qdev->flags);
4326 }
4327 }
4328 exit:
4329 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4330 }
4331
4332 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4333 {
4334 struct ql_adapter *qdev = netdev_priv(ndev);
4335 struct sockaddr *addr = p;
4336 int status;
4337
4338 if (!is_valid_ether_addr(addr->sa_data))
4339 return -EADDRNOTAVAIL;
4340 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4341 /* Update local copy of current mac address. */
4342 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4343
4344 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4345 if (status)
4346 return status;
4347 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4348 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4349 if (status)
4350 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4351 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4352 return status;
4353 }
4354
4355 static void qlge_tx_timeout(struct net_device *ndev)
4356 {
4357 struct ql_adapter *qdev = netdev_priv(ndev);
4358 ql_queue_asic_error(qdev);
4359 }
4360
4361 static void ql_asic_reset_work(struct work_struct *work)
4362 {
4363 struct ql_adapter *qdev =
4364 container_of(work, struct ql_adapter, asic_reset_work.work);
4365 int status;
4366 rtnl_lock();
4367 status = ql_adapter_down(qdev);
4368 if (status)
4369 goto error;
4370
4371 status = ql_adapter_up(qdev);
4372 if (status)
4373 goto error;
4374
4375 /* Restore rx mode. */
4376 clear_bit(QL_ALLMULTI, &qdev->flags);
4377 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4378 qlge_set_multicast_list(qdev->ndev);
4379
4380 rtnl_unlock();
4381 return;
4382 error:
4383 netif_alert(qdev, ifup, qdev->ndev,
4384 "Driver up/down cycle failed, closing device\n");
4385
4386 set_bit(QL_ADAPTER_UP, &qdev->flags);
4387 dev_close(qdev->ndev);
4388 rtnl_unlock();
4389 }
4390
4391 static const struct nic_operations qla8012_nic_ops = {
4392 .get_flash = ql_get_8012_flash_params,
4393 .port_initialize = ql_8012_port_initialize,
4394 };
4395
4396 static const struct nic_operations qla8000_nic_ops = {
4397 .get_flash = ql_get_8000_flash_params,
4398 .port_initialize = ql_8000_port_initialize,
4399 };
4400
4401 /* Find the pcie function number for the other NIC
4402 * on this chip. Since both NIC functions share a
4403 * common firmware we have the lowest enabled function
4404 * do any common work. Examples would be resetting
4405 * after a fatal firmware error, or doing a firmware
4406 * coredump.
4407 */
4408 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4409 {
4410 int status = 0;
4411 u32 temp;
4412 u32 nic_func1, nic_func2;
4413
4414 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4415 &temp);
4416 if (status)
4417 return status;
4418
4419 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4420 MPI_TEST_NIC_FUNC_MASK);
4421 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4422 MPI_TEST_NIC_FUNC_MASK);
4423
4424 if (qdev->func == nic_func1)
4425 qdev->alt_func = nic_func2;
4426 else if (qdev->func == nic_func2)
4427 qdev->alt_func = nic_func1;
4428 else
4429 status = -EIO;
4430
4431 return status;
4432 }
4433
4434 static int ql_get_board_info(struct ql_adapter *qdev)
4435 {
4436 int status;
4437 qdev->func =
4438 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4439 if (qdev->func > 3)
4440 return -EIO;
4441
4442 status = ql_get_alt_pcie_func(qdev);
4443 if (status)
4444 return status;
4445
4446 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4447 if (qdev->port) {
4448 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4449 qdev->port_link_up = STS_PL1;
4450 qdev->port_init = STS_PI1;
4451 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4452 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4453 } else {
4454 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4455 qdev->port_link_up = STS_PL0;
4456 qdev->port_init = STS_PI0;
4457 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4458 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4459 }
4460 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4461 qdev->device_id = qdev->pdev->device;
4462 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4463 qdev->nic_ops = &qla8012_nic_ops;
4464 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4465 qdev->nic_ops = &qla8000_nic_ops;
4466 return status;
4467 }
4468
4469 static void ql_release_all(struct pci_dev *pdev)
4470 {
4471 struct net_device *ndev = pci_get_drvdata(pdev);
4472 struct ql_adapter *qdev = netdev_priv(ndev);
4473
4474 if (qdev->workqueue) {
4475 destroy_workqueue(qdev->workqueue);
4476 qdev->workqueue = NULL;
4477 }
4478
4479 if (qdev->reg_base)
4480 iounmap(qdev->reg_base);
4481 if (qdev->doorbell_area)
4482 iounmap(qdev->doorbell_area);
4483 vfree(qdev->mpi_coredump);
4484 pci_release_regions(pdev);
4485 pci_set_drvdata(pdev, NULL);
4486 }
4487
4488 static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4489 int cards_found)
4490 {
4491 struct ql_adapter *qdev = netdev_priv(ndev);
4492 int err = 0;
4493
4494 memset((void *)qdev, 0, sizeof(*qdev));
4495 err = pci_enable_device(pdev);
4496 if (err) {
4497 dev_err(&pdev->dev, "PCI device enable failed.\n");
4498 return err;
4499 }
4500
4501 qdev->ndev = ndev;
4502 qdev->pdev = pdev;
4503 pci_set_drvdata(pdev, ndev);
4504
4505 /* Set PCIe read request size */
4506 err = pcie_set_readrq(pdev, 4096);
4507 if (err) {
4508 dev_err(&pdev->dev, "Set readrq failed.\n");
4509 goto err_out1;
4510 }
4511
4512 err = pci_request_regions(pdev, DRV_NAME);
4513 if (err) {
4514 dev_err(&pdev->dev, "PCI region request failed.\n");
4515 return err;
4516 }
4517
4518 pci_set_master(pdev);
4519 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4520 set_bit(QL_DMA64, &qdev->flags);
4521 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4522 } else {
4523 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4524 if (!err)
4525 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4526 }
4527
4528 if (err) {
4529 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4530 goto err_out2;
4531 }
4532
4533 /* Set PCIe reset type for EEH to fundamental. */
4534 pdev->needs_freset = 1;
4535 pci_save_state(pdev);
4536 qdev->reg_base =
4537 ioremap_nocache(pci_resource_start(pdev, 1),
4538 pci_resource_len(pdev, 1));
4539 if (!qdev->reg_base) {
4540 dev_err(&pdev->dev, "Register mapping failed.\n");
4541 err = -ENOMEM;
4542 goto err_out2;
4543 }
4544
4545 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4546 qdev->doorbell_area =
4547 ioremap_nocache(pci_resource_start(pdev, 3),
4548 pci_resource_len(pdev, 3));
4549 if (!qdev->doorbell_area) {
4550 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4551 err = -ENOMEM;
4552 goto err_out2;
4553 }
4554
4555 err = ql_get_board_info(qdev);
4556 if (err) {
4557 dev_err(&pdev->dev, "Register access failed.\n");
4558 err = -EIO;
4559 goto err_out2;
4560 }
4561 qdev->msg_enable = netif_msg_init(debug, default_msg);
4562 spin_lock_init(&qdev->hw_lock);
4563 spin_lock_init(&qdev->stats_lock);
4564
4565 if (qlge_mpi_coredump) {
4566 qdev->mpi_coredump =
4567 vmalloc(sizeof(struct ql_mpi_coredump));
4568 if (qdev->mpi_coredump == NULL) {
4569 err = -ENOMEM;
4570 goto err_out2;
4571 }
4572 if (qlge_force_coredump)
4573 set_bit(QL_FRC_COREDUMP, &qdev->flags);
4574 }
4575 /* make sure the EEPROM is good */
4576 err = qdev->nic_ops->get_flash(qdev);
4577 if (err) {
4578 dev_err(&pdev->dev, "Invalid FLASH.\n");
4579 goto err_out2;
4580 }
4581
4582 /* Keep local copy of current mac address. */
4583 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4584
4585 /* Set up the default ring sizes. */
4586 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4587 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4588
4589 /* Set up the coalescing parameters. */
4590 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4591 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4592 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4593 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4594
4595 /*
4596 * Set up the operating parameters.
4597 */
4598 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4599 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4600 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4601 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4602 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4603 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4604 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4605 init_completion(&qdev->ide_completion);
4606 mutex_init(&qdev->mpi_mutex);
4607
4608 if (!cards_found) {
4609 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4610 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4611 DRV_NAME, DRV_VERSION);
4612 }
4613 return 0;
4614 err_out2:
4615 ql_release_all(pdev);
4616 err_out1:
4617 pci_disable_device(pdev);
4618 return err;
4619 }
4620
4621 static const struct net_device_ops qlge_netdev_ops = {
4622 .ndo_open = qlge_open,
4623 .ndo_stop = qlge_close,
4624 .ndo_start_xmit = qlge_send,
4625 .ndo_change_mtu = qlge_change_mtu,
4626 .ndo_get_stats = qlge_get_stats,
4627 .ndo_set_rx_mode = qlge_set_multicast_list,
4628 .ndo_set_mac_address = qlge_set_mac_address,
4629 .ndo_validate_addr = eth_validate_addr,
4630 .ndo_tx_timeout = qlge_tx_timeout,
4631 .ndo_fix_features = qlge_fix_features,
4632 .ndo_set_features = qlge_set_features,
4633 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4634 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
4635 };
4636
4637 static void ql_timer(unsigned long data)
4638 {
4639 struct ql_adapter *qdev = (struct ql_adapter *)data;
4640 u32 var = 0;
4641
4642 var = ql_read32(qdev, STS);
4643 if (pci_channel_offline(qdev->pdev)) {
4644 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4645 return;
4646 }
4647
4648 mod_timer(&qdev->timer, jiffies + (5*HZ));
4649 }
4650
4651 static int qlge_probe(struct pci_dev *pdev,
4652 const struct pci_device_id *pci_entry)
4653 {
4654 struct net_device *ndev = NULL;
4655 struct ql_adapter *qdev = NULL;
4656 static int cards_found = 0;
4657 int err = 0;
4658
4659 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4660 min(MAX_CPUS, netif_get_num_default_rss_queues()));
4661 if (!ndev)
4662 return -ENOMEM;
4663
4664 err = ql_init_device(pdev, ndev, cards_found);
4665 if (err < 0) {
4666 free_netdev(ndev);
4667 return err;
4668 }
4669
4670 qdev = netdev_priv(ndev);
4671 SET_NETDEV_DEV(ndev, &pdev->dev);
4672 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4673 NETIF_F_TSO | NETIF_F_TSO_ECN |
4674 NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
4675 ndev->features = ndev->hw_features |
4676 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4677 ndev->vlan_features = ndev->hw_features;
4678
4679 if (test_bit(QL_DMA64, &qdev->flags))
4680 ndev->features |= NETIF_F_HIGHDMA;
4681
4682 /*
4683 * Set up net_device structure.
4684 */
4685 ndev->tx_queue_len = qdev->tx_ring_size;
4686 ndev->irq = pdev->irq;
4687
4688 ndev->netdev_ops = &qlge_netdev_ops;
4689 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
4690 ndev->watchdog_timeo = 10 * HZ;
4691
4692 err = register_netdev(ndev);
4693 if (err) {
4694 dev_err(&pdev->dev, "net device registration failed.\n");
4695 ql_release_all(pdev);
4696 pci_disable_device(pdev);
4697 return err;
4698 }
4699 /* Start up the timer to trigger EEH if
4700 * the bus goes dead
4701 */
4702 init_timer_deferrable(&qdev->timer);
4703 qdev->timer.data = (unsigned long)qdev;
4704 qdev->timer.function = ql_timer;
4705 qdev->timer.expires = jiffies + (5*HZ);
4706 add_timer(&qdev->timer);
4707 ql_link_off(qdev);
4708 ql_display_dev_info(ndev);
4709 atomic_set(&qdev->lb_count, 0);
4710 cards_found++;
4711 return 0;
4712 }
4713
4714 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4715 {
4716 return qlge_send(skb, ndev);
4717 }
4718
4719 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4720 {
4721 return ql_clean_inbound_rx_ring(rx_ring, budget);
4722 }
4723
4724 static void qlge_remove(struct pci_dev *pdev)
4725 {
4726 struct net_device *ndev = pci_get_drvdata(pdev);
4727 struct ql_adapter *qdev = netdev_priv(ndev);
4728 del_timer_sync(&qdev->timer);
4729 ql_cancel_all_work_sync(qdev);
4730 unregister_netdev(ndev);
4731 ql_release_all(pdev);
4732 pci_disable_device(pdev);
4733 free_netdev(ndev);
4734 }
4735
4736 /* Clean up resources without touching hardware. */
4737 static void ql_eeh_close(struct net_device *ndev)
4738 {
4739 int i;
4740 struct ql_adapter *qdev = netdev_priv(ndev);
4741
4742 if (netif_carrier_ok(ndev)) {
4743 netif_carrier_off(ndev);
4744 netif_stop_queue(ndev);
4745 }
4746
4747 /* Disabling the timer */
4748 del_timer_sync(&qdev->timer);
4749 ql_cancel_all_work_sync(qdev);
4750
4751 for (i = 0; i < qdev->rss_ring_count; i++)
4752 netif_napi_del(&qdev->rx_ring[i].napi);
4753
4754 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4755 ql_tx_ring_clean(qdev);
4756 ql_free_rx_buffers(qdev);
4757 ql_release_adapter_resources(qdev);
4758 }
4759
4760 /*
4761 * This callback is called by the PCI subsystem whenever
4762 * a PCI bus error is detected.
4763 */
4764 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4765 enum pci_channel_state state)
4766 {
4767 struct net_device *ndev = pci_get_drvdata(pdev);
4768 struct ql_adapter *qdev = netdev_priv(ndev);
4769
4770 switch (state) {
4771 case pci_channel_io_normal:
4772 return PCI_ERS_RESULT_CAN_RECOVER;
4773 case pci_channel_io_frozen:
4774 netif_device_detach(ndev);
4775 if (netif_running(ndev))
4776 ql_eeh_close(ndev);
4777 pci_disable_device(pdev);
4778 return PCI_ERS_RESULT_NEED_RESET;
4779 case pci_channel_io_perm_failure:
4780 dev_err(&pdev->dev,
4781 "%s: pci_channel_io_perm_failure.\n", __func__);
4782 ql_eeh_close(ndev);
4783 set_bit(QL_EEH_FATAL, &qdev->flags);
4784 return PCI_ERS_RESULT_DISCONNECT;
4785 }
4786
4787 /* Request a slot reset. */
4788 return PCI_ERS_RESULT_NEED_RESET;
4789 }
4790
4791 /*
4792 * This callback is called after the PCI buss has been reset.
4793 * Basically, this tries to restart the card from scratch.
4794 * This is a shortened version of the device probe/discovery code,
4795 * it resembles the first-half of the () routine.
4796 */
4797 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4798 {
4799 struct net_device *ndev = pci_get_drvdata(pdev);
4800 struct ql_adapter *qdev = netdev_priv(ndev);
4801
4802 pdev->error_state = pci_channel_io_normal;
4803
4804 pci_restore_state(pdev);
4805 if (pci_enable_device(pdev)) {
4806 netif_err(qdev, ifup, qdev->ndev,
4807 "Cannot re-enable PCI device after reset.\n");
4808 return PCI_ERS_RESULT_DISCONNECT;
4809 }
4810 pci_set_master(pdev);
4811
4812 if (ql_adapter_reset(qdev)) {
4813 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4814 set_bit(QL_EEH_FATAL, &qdev->flags);
4815 return PCI_ERS_RESULT_DISCONNECT;
4816 }
4817
4818 return PCI_ERS_RESULT_RECOVERED;
4819 }
4820
4821 static void qlge_io_resume(struct pci_dev *pdev)
4822 {
4823 struct net_device *ndev = pci_get_drvdata(pdev);
4824 struct ql_adapter *qdev = netdev_priv(ndev);
4825 int err = 0;
4826
4827 if (netif_running(ndev)) {
4828 err = qlge_open(ndev);
4829 if (err) {
4830 netif_err(qdev, ifup, qdev->ndev,
4831 "Device initialization failed after reset.\n");
4832 return;
4833 }
4834 } else {
4835 netif_err(qdev, ifup, qdev->ndev,
4836 "Device was not running prior to EEH.\n");
4837 }
4838 mod_timer(&qdev->timer, jiffies + (5*HZ));
4839 netif_device_attach(ndev);
4840 }
4841
4842 static const struct pci_error_handlers qlge_err_handler = {
4843 .error_detected = qlge_io_error_detected,
4844 .slot_reset = qlge_io_slot_reset,
4845 .resume = qlge_io_resume,
4846 };
4847
4848 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4849 {
4850 struct net_device *ndev = pci_get_drvdata(pdev);
4851 struct ql_adapter *qdev = netdev_priv(ndev);
4852 int err;
4853
4854 netif_device_detach(ndev);
4855 del_timer_sync(&qdev->timer);
4856
4857 if (netif_running(ndev)) {
4858 err = ql_adapter_down(qdev);
4859 if (!err)
4860 return err;
4861 }
4862
4863 ql_wol(qdev);
4864 err = pci_save_state(pdev);
4865 if (err)
4866 return err;
4867
4868 pci_disable_device(pdev);
4869
4870 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4871
4872 return 0;
4873 }
4874
4875 #ifdef CONFIG_PM
4876 static int qlge_resume(struct pci_dev *pdev)
4877 {
4878 struct net_device *ndev = pci_get_drvdata(pdev);
4879 struct ql_adapter *qdev = netdev_priv(ndev);
4880 int err;
4881
4882 pci_set_power_state(pdev, PCI_D0);
4883 pci_restore_state(pdev);
4884 err = pci_enable_device(pdev);
4885 if (err) {
4886 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4887 return err;
4888 }
4889 pci_set_master(pdev);
4890
4891 pci_enable_wake(pdev, PCI_D3hot, 0);
4892 pci_enable_wake(pdev, PCI_D3cold, 0);
4893
4894 if (netif_running(ndev)) {
4895 err = ql_adapter_up(qdev);
4896 if (err)
4897 return err;
4898 }
4899
4900 mod_timer(&qdev->timer, jiffies + (5*HZ));
4901 netif_device_attach(ndev);
4902
4903 return 0;
4904 }
4905 #endif /* CONFIG_PM */
4906
4907 static void qlge_shutdown(struct pci_dev *pdev)
4908 {
4909 qlge_suspend(pdev, PMSG_SUSPEND);
4910 }
4911
4912 static struct pci_driver qlge_driver = {
4913 .name = DRV_NAME,
4914 .id_table = qlge_pci_tbl,
4915 .probe = qlge_probe,
4916 .remove = qlge_remove,
4917 #ifdef CONFIG_PM
4918 .suspend = qlge_suspend,
4919 .resume = qlge_resume,
4920 #endif
4921 .shutdown = qlge_shutdown,
4922 .err_handler = &qlge_err_handler
4923 };
4924
4925 static int __init qlge_init_module(void)
4926 {
4927 return pci_register_driver(&qlge_driver);
4928 }
4929
4930 static void __exit qlge_exit(void)
4931 {
4932 pci_unregister_driver(&qlge_driver);
4933 }
4934
4935 module_init(qlge_init_module);
4936 module_exit(qlge_exit);
This page took 0.142602 seconds and 5 git commands to generate.