qlge: Add xgmac reg blocks to firwmare dump.
[deliverable/linux.git] / drivers / net / qlge / qlge_main.c
CommitLineData
c4e84bde
RM
1/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
36#include <linux/skbuff.h>
c4e84bde 37#include <linux/if_vlan.h>
c4e84bde
RM
38#include <linux/delay.h>
39#include <linux/mm.h>
40#include <linux/vmalloc.h>
b7c6bfb7 41#include <net/ip6_checksum.h>
c4e84bde
RM
42
43#include "qlge.h"
44
45char qlge_driver_name[] = DRV_NAME;
46const char qlge_driver_version[] = DRV_VERSION;
47
48MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49MODULE_DESCRIPTION(DRV_STRING " ");
50MODULE_LICENSE("GPL");
51MODULE_VERSION(DRV_VERSION);
52
53static const u32 default_msg =
54 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
55/* NETIF_MSG_TIMER | */
56 NETIF_MSG_IFDOWN |
57 NETIF_MSG_IFUP |
58 NETIF_MSG_RX_ERR |
59 NETIF_MSG_TX_ERR |
4974097a
RM
60/* NETIF_MSG_TX_QUEUED | */
61/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
c4e84bde
RM
62/* NETIF_MSG_PKTDATA | */
63 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
64
65static int debug = 0x00007fff; /* defaults above */
66module_param(debug, int, 0);
67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68
69#define MSIX_IRQ 0
70#define MSI_IRQ 1
71#define LEG_IRQ 2
a5a62a1c
RM
72static int qlge_irq_type = MSIX_IRQ;
73module_param(qlge_irq_type, int, MSIX_IRQ);
74MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
c4e84bde 75
8aae2600
RM
76static int qlge_mpi_coredump;
77module_param(qlge_mpi_coredump, int, 0);
78MODULE_PARM_DESC(qlge_mpi_coredump,
79 "Option to enable MPI firmware dump. "
80 "Default is OFF - Do Not allocate memory. "
81 "Do not perform firmware coredump.");
82
a3aa1884 83static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
b0c2aadf 84 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
cdca8d02 85 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
c4e84bde
RM
86 /* required last entry */
87 {0,}
88};
89
90MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
91
92/* This hardware semaphore causes exclusive access to
93 * resources shared between the NIC driver, MPI firmware,
94 * FCOE firmware and the FC driver.
95 */
96static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
97{
98 u32 sem_bits = 0;
99
100 switch (sem_mask) {
101 case SEM_XGMAC0_MASK:
102 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
103 break;
104 case SEM_XGMAC1_MASK:
105 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
106 break;
107 case SEM_ICB_MASK:
108 sem_bits = SEM_SET << SEM_ICB_SHIFT;
109 break;
110 case SEM_MAC_ADDR_MASK:
111 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
112 break;
113 case SEM_FLASH_MASK:
114 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
115 break;
116 case SEM_PROBE_MASK:
117 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
118 break;
119 case SEM_RT_IDX_MASK:
120 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
121 break;
122 case SEM_PROC_REG_MASK:
123 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
124 break;
125 default:
126 QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
127 return -EINVAL;
128 }
129
130 ql_write32(qdev, SEM, sem_bits | sem_mask);
131 return !(ql_read32(qdev, SEM) & sem_bits);
132}
133
134int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
135{
0857e9d7 136 unsigned int wait_count = 30;
c4e84bde
RM
137 do {
138 if (!ql_sem_trylock(qdev, sem_mask))
139 return 0;
0857e9d7
RM
140 udelay(100);
141 } while (--wait_count);
c4e84bde
RM
142 return -ETIMEDOUT;
143}
144
145void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
146{
147 ql_write32(qdev, SEM, sem_mask);
148 ql_read32(qdev, SEM); /* flush */
149}
150
151/* This function waits for a specific bit to come ready
152 * in a given register. It is used mostly by the initialize
153 * process, but is also used in kernel thread API such as
154 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
155 */
156int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
157{
158 u32 temp;
159 int count = UDELAY_COUNT;
160
161 while (count) {
162 temp = ql_read32(qdev, reg);
163
164 /* check for errors */
165 if (temp & err_bit) {
166 QPRINTK(qdev, PROBE, ALERT,
167 "register 0x%.08x access error, value = 0x%.08x!.\n",
168 reg, temp);
169 return -EIO;
170 } else if (temp & bit)
171 return 0;
172 udelay(UDELAY_DELAY);
173 count--;
174 }
175 QPRINTK(qdev, PROBE, ALERT,
176 "Timed out waiting for reg %x to come ready.\n", reg);
177 return -ETIMEDOUT;
178}
179
180/* The CFG register is used to download TX and RX control blocks
181 * to the chip. This function waits for an operation to complete.
182 */
183static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
184{
185 int count = UDELAY_COUNT;
186 u32 temp;
187
188 while (count) {
189 temp = ql_read32(qdev, CFG);
190 if (temp & CFG_LE)
191 return -EIO;
192 if (!(temp & bit))
193 return 0;
194 udelay(UDELAY_DELAY);
195 count--;
196 }
197 return -ETIMEDOUT;
198}
199
200
201/* Used to issue init control blocks to hw. Maps control block,
202 * sets address, triggers download, waits for completion.
203 */
204int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
205 u16 q_id)
206{
207 u64 map;
208 int status = 0;
209 int direction;
210 u32 mask;
211 u32 value;
212
213 direction =
214 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
215 PCI_DMA_FROMDEVICE;
216
217 map = pci_map_single(qdev->pdev, ptr, size, direction);
218 if (pci_dma_mapping_error(qdev->pdev, map)) {
219 QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
220 return -ENOMEM;
221 }
222
4322c5be
RM
223 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
224 if (status)
225 return status;
226
c4e84bde
RM
227 status = ql_wait_cfg(qdev, bit);
228 if (status) {
229 QPRINTK(qdev, IFUP, ERR,
230 "Timed out waiting for CFG to come ready.\n");
231 goto exit;
232 }
233
c4e84bde
RM
234 ql_write32(qdev, ICB_L, (u32) map);
235 ql_write32(qdev, ICB_H, (u32) (map >> 32));
c4e84bde
RM
236
237 mask = CFG_Q_MASK | (bit << 16);
238 value = bit | (q_id << CFG_Q_SHIFT);
239 ql_write32(qdev, CFG, (mask | value));
240
241 /*
242 * Wait for the bit to clear after signaling hw.
243 */
244 status = ql_wait_cfg(qdev, bit);
245exit:
4322c5be 246 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
c4e84bde
RM
247 pci_unmap_single(qdev->pdev, map, size, direction);
248 return status;
249}
250
251/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
252int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
253 u32 *value)
254{
255 u32 offset = 0;
256 int status;
257
c4e84bde
RM
258 switch (type) {
259 case MAC_ADDR_TYPE_MULTI_MAC:
260 case MAC_ADDR_TYPE_CAM_MAC:
261 {
262 status =
263 ql_wait_reg_rdy(qdev,
939678f8 264 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
265 if (status)
266 goto exit;
267 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
268 (index << MAC_ADDR_IDX_SHIFT) | /* index */
269 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
270 status =
271 ql_wait_reg_rdy(qdev,
939678f8 272 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
c4e84bde
RM
273 if (status)
274 goto exit;
275 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
276 status =
277 ql_wait_reg_rdy(qdev,
939678f8 278 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
279 if (status)
280 goto exit;
281 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
282 (index << MAC_ADDR_IDX_SHIFT) | /* index */
283 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
284 status =
285 ql_wait_reg_rdy(qdev,
939678f8 286 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
c4e84bde
RM
287 if (status)
288 goto exit;
289 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
290 if (type == MAC_ADDR_TYPE_CAM_MAC) {
291 status =
292 ql_wait_reg_rdy(qdev,
939678f8 293 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
294 if (status)
295 goto exit;
296 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
297 (index << MAC_ADDR_IDX_SHIFT) | /* index */
298 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
299 status =
300 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
939678f8 301 MAC_ADDR_MR, 0);
c4e84bde
RM
302 if (status)
303 goto exit;
304 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
305 }
306 break;
307 }
308 case MAC_ADDR_TYPE_VLAN:
309 case MAC_ADDR_TYPE_MULTI_FLTR:
310 default:
311 QPRINTK(qdev, IFUP, CRIT,
312 "Address type %d not yet supported.\n", type);
313 status = -EPERM;
314 }
315exit:
c4e84bde
RM
316 return status;
317}
318
319/* Set up a MAC, multicast or VLAN address for the
320 * inbound frame matching.
321 */
322static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
323 u16 index)
324{
325 u32 offset = 0;
326 int status = 0;
327
c4e84bde
RM
328 switch (type) {
329 case MAC_ADDR_TYPE_MULTI_MAC:
76b26694
RM
330 {
331 u32 upper = (addr[0] << 8) | addr[1];
332 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
333 (addr[4] << 8) | (addr[5]);
334
335 status =
336 ql_wait_reg_rdy(qdev,
337 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
338 if (status)
339 goto exit;
340 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
341 (index << MAC_ADDR_IDX_SHIFT) |
342 type | MAC_ADDR_E);
343 ql_write32(qdev, MAC_ADDR_DATA, lower);
344 status =
345 ql_wait_reg_rdy(qdev,
346 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
347 if (status)
348 goto exit;
349 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
350 (index << MAC_ADDR_IDX_SHIFT) |
351 type | MAC_ADDR_E);
352
353 ql_write32(qdev, MAC_ADDR_DATA, upper);
354 status =
355 ql_wait_reg_rdy(qdev,
356 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
357 if (status)
358 goto exit;
359 break;
360 }
c4e84bde
RM
361 case MAC_ADDR_TYPE_CAM_MAC:
362 {
363 u32 cam_output;
364 u32 upper = (addr[0] << 8) | addr[1];
365 u32 lower =
366 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
367 (addr[5]);
368
4974097a 369 QPRINTK(qdev, IFUP, DEBUG,
7c510e4b 370 "Adding %s address %pM"
c4e84bde
RM
371 " at index %d in the CAM.\n",
372 ((type ==
373 MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
7c510e4b 374 "UNICAST"), addr, index);
c4e84bde
RM
375
376 status =
377 ql_wait_reg_rdy(qdev,
939678f8 378 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
379 if (status)
380 goto exit;
381 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
382 (index << MAC_ADDR_IDX_SHIFT) | /* index */
383 type); /* type */
384 ql_write32(qdev, MAC_ADDR_DATA, lower);
385 status =
386 ql_wait_reg_rdy(qdev,
939678f8 387 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
388 if (status)
389 goto exit;
390 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
391 (index << MAC_ADDR_IDX_SHIFT) | /* index */
392 type); /* type */
393 ql_write32(qdev, MAC_ADDR_DATA, upper);
394 status =
395 ql_wait_reg_rdy(qdev,
939678f8 396 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
397 if (status)
398 goto exit;
399 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
400 (index << MAC_ADDR_IDX_SHIFT) | /* index */
401 type); /* type */
402 /* This field should also include the queue id
403 and possibly the function id. Right now we hardcode
404 the route field to NIC core.
405 */
76b26694
RM
406 cam_output = (CAM_OUT_ROUTE_NIC |
407 (qdev->
408 func << CAM_OUT_FUNC_SHIFT) |
409 (0 << CAM_OUT_CQ_ID_SHIFT));
410 if (qdev->vlgrp)
411 cam_output |= CAM_OUT_RV;
412 /* route to NIC core */
413 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
c4e84bde
RM
414 break;
415 }
416 case MAC_ADDR_TYPE_VLAN:
417 {
418 u32 enable_bit = *((u32 *) &addr[0]);
419 /* For VLAN, the addr actually holds a bit that
420 * either enables or disables the vlan id we are
421 * addressing. It's either MAC_ADDR_E on or off.
422 * That's bit-27 we're talking about.
423 */
424 QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
425 (enable_bit ? "Adding" : "Removing"),
426 index, (enable_bit ? "to" : "from"));
427
428 status =
429 ql_wait_reg_rdy(qdev,
939678f8 430 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
431 if (status)
432 goto exit;
433 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
434 (index << MAC_ADDR_IDX_SHIFT) | /* index */
435 type | /* type */
436 enable_bit); /* enable/disable */
437 break;
438 }
439 case MAC_ADDR_TYPE_MULTI_FLTR:
440 default:
441 QPRINTK(qdev, IFUP, CRIT,
442 "Address type %d not yet supported.\n", type);
443 status = -EPERM;
444 }
445exit:
c4e84bde
RM
446 return status;
447}
448
7fab3bfe
RM
449/* Set or clear MAC address in hardware. We sometimes
450 * have to clear it to prevent wrong frame routing
451 * especially in a bonding environment.
452 */
453static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
454{
455 int status;
456 char zero_mac_addr[ETH_ALEN];
457 char *addr;
458
459 if (set) {
460 addr = &qdev->ndev->dev_addr[0];
461 QPRINTK(qdev, IFUP, DEBUG,
fcb635e8 462 "Set Mac addr %pM\n", addr);
7fab3bfe
RM
463 } else {
464 memset(zero_mac_addr, 0, ETH_ALEN);
465 addr = &zero_mac_addr[0];
466 QPRINTK(qdev, IFUP, DEBUG,
467 "Clearing MAC address on %s\n",
468 qdev->ndev->name);
469 }
470 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
471 if (status)
472 return status;
473 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
474 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
475 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
476 if (status)
477 QPRINTK(qdev, IFUP, ERR, "Failed to init mac "
478 "address.\n");
479 return status;
480}
481
6a473308
RM
482void ql_link_on(struct ql_adapter *qdev)
483{
484 QPRINTK(qdev, LINK, ERR, "%s: Link is up.\n",
485 qdev->ndev->name);
486 netif_carrier_on(qdev->ndev);
487 ql_set_mac_addr(qdev, 1);
488}
489
490void ql_link_off(struct ql_adapter *qdev)
491{
492 QPRINTK(qdev, LINK, ERR, "%s: Link is down.\n",
493 qdev->ndev->name);
494 netif_carrier_off(qdev->ndev);
495 ql_set_mac_addr(qdev, 0);
496}
497
c4e84bde
RM
498/* Get a specific frame routing value from the CAM.
499 * Used for debug and reg dump.
500 */
501int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
502{
503 int status = 0;
504
939678f8 505 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
c4e84bde
RM
506 if (status)
507 goto exit;
508
509 ql_write32(qdev, RT_IDX,
510 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
939678f8 511 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
c4e84bde
RM
512 if (status)
513 goto exit;
514 *value = ql_read32(qdev, RT_DATA);
515exit:
c4e84bde
RM
516 return status;
517}
518
519/* The NIC function for this chip has 16 routing indexes. Each one can be used
520 * to route different frame types to various inbound queues. We send broadcast/
521 * multicast/error frames to the default queue for slow handling,
522 * and CAM hit/RSS frames to the fast handling queues.
523 */
524static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
525 int enable)
526{
8587ea35 527 int status = -EINVAL; /* Return error if no mask match. */
c4e84bde
RM
528 u32 value = 0;
529
c4e84bde
RM
530 QPRINTK(qdev, IFUP, DEBUG,
531 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
532 (enable ? "Adding" : "Removing"),
533 ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
534 ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
535 ((index ==
536 RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
537 ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
538 ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
539 ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
540 ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
541 ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
542 ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
543 ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
544 ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
545 ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
546 ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
547 ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
548 ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
549 ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
550 (enable ? "to" : "from"));
551
552 switch (mask) {
553 case RT_IDX_CAM_HIT:
554 {
555 value = RT_IDX_DST_CAM_Q | /* dest */
556 RT_IDX_TYPE_NICQ | /* type */
557 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
558 break;
559 }
560 case RT_IDX_VALID: /* Promiscuous Mode frames. */
561 {
562 value = RT_IDX_DST_DFLT_Q | /* dest */
563 RT_IDX_TYPE_NICQ | /* type */
564 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
565 break;
566 }
567 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
568 {
569 value = RT_IDX_DST_DFLT_Q | /* dest */
570 RT_IDX_TYPE_NICQ | /* type */
571 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
572 break;
573 }
574 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
575 {
576 value = RT_IDX_DST_DFLT_Q | /* dest */
577 RT_IDX_TYPE_NICQ | /* type */
578 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
579 break;
580 }
581 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
582 {
e163d7f2 583 value = RT_IDX_DST_DFLT_Q | /* dest */
c4e84bde
RM
584 RT_IDX_TYPE_NICQ | /* type */
585 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
586 break;
587 }
588 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
589 {
e163d7f2 590 value = RT_IDX_DST_DFLT_Q | /* dest */
c4e84bde
RM
591 RT_IDX_TYPE_NICQ | /* type */
592 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
593 break;
594 }
595 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
596 {
597 value = RT_IDX_DST_RSS | /* dest */
598 RT_IDX_TYPE_NICQ | /* type */
599 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
600 break;
601 }
602 case 0: /* Clear the E-bit on an entry. */
603 {
604 value = RT_IDX_DST_DFLT_Q | /* dest */
605 RT_IDX_TYPE_NICQ | /* type */
606 (index << RT_IDX_IDX_SHIFT);/* index */
607 break;
608 }
609 default:
610 QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
611 mask);
612 status = -EPERM;
613 goto exit;
614 }
615
616 if (value) {
617 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
618 if (status)
619 goto exit;
620 value |= (enable ? RT_IDX_E : 0);
621 ql_write32(qdev, RT_IDX, value);
622 ql_write32(qdev, RT_DATA, enable ? mask : 0);
623 }
624exit:
c4e84bde
RM
625 return status;
626}
627
628static void ql_enable_interrupts(struct ql_adapter *qdev)
629{
630 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
631}
632
633static void ql_disable_interrupts(struct ql_adapter *qdev)
634{
635 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
636}
637
638/* If we're running with multiple MSI-X vectors then we enable on the fly.
639 * Otherwise, we may have multiple outstanding workers and don't want to
640 * enable until the last one finishes. In this case, the irq_cnt gets
641 * incremented everytime we queue a worker and decremented everytime
642 * a worker finishes. Once it hits zero we enable the interrupt.
643 */
bb0d215c 644u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
c4e84bde 645{
bb0d215c
RM
646 u32 var = 0;
647 unsigned long hw_flags = 0;
648 struct intr_context *ctx = qdev->intr_context + intr;
649
650 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
651 /* Always enable if we're MSIX multi interrupts and
652 * it's not the default (zeroeth) interrupt.
653 */
c4e84bde 654 ql_write32(qdev, INTR_EN,
bb0d215c
RM
655 ctx->intr_en_mask);
656 var = ql_read32(qdev, STS);
657 return var;
c4e84bde 658 }
bb0d215c
RM
659
660 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
661 if (atomic_dec_and_test(&ctx->irq_cnt)) {
662 ql_write32(qdev, INTR_EN,
663 ctx->intr_en_mask);
664 var = ql_read32(qdev, STS);
665 }
666 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
667 return var;
c4e84bde
RM
668}
669
670static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
671{
672 u32 var = 0;
bb0d215c 673 struct intr_context *ctx;
c4e84bde 674
bb0d215c
RM
675 /* HW disables for us if we're MSIX multi interrupts and
676 * it's not the default (zeroeth) interrupt.
677 */
678 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
679 return 0;
680
681 ctx = qdev->intr_context + intr;
08b1bc8f 682 spin_lock(&qdev->hw_lock);
bb0d215c 683 if (!atomic_read(&ctx->irq_cnt)) {
c4e84bde 684 ql_write32(qdev, INTR_EN,
bb0d215c 685 ctx->intr_dis_mask);
c4e84bde
RM
686 var = ql_read32(qdev, STS);
687 }
bb0d215c 688 atomic_inc(&ctx->irq_cnt);
08b1bc8f 689 spin_unlock(&qdev->hw_lock);
c4e84bde
RM
690 return var;
691}
692
693static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
694{
695 int i;
696 for (i = 0; i < qdev->intr_count; i++) {
697 /* The enable call does a atomic_dec_and_test
698 * and enables only if the result is zero.
699 * So we precharge it here.
700 */
bb0d215c
RM
701 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
702 i == 0))
703 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
c4e84bde
RM
704 ql_enable_completion_interrupt(qdev, i);
705 }
706
707}
708
b0c2aadf
RM
709static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
710{
711 int status, i;
712 u16 csum = 0;
713 __le16 *flash = (__le16 *)&qdev->flash;
714
715 status = strncmp((char *)&qdev->flash, str, 4);
716 if (status) {
717 QPRINTK(qdev, IFUP, ERR, "Invalid flash signature.\n");
718 return status;
719 }
720
721 for (i = 0; i < size; i++)
722 csum += le16_to_cpu(*flash++);
723
724 if (csum)
725 QPRINTK(qdev, IFUP, ERR,
726 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
727
728 return csum;
729}
730
26351479 731static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
c4e84bde
RM
732{
733 int status = 0;
734 /* wait for reg to come ready */
735 status = ql_wait_reg_rdy(qdev,
736 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
737 if (status)
738 goto exit;
739 /* set up for reg read */
740 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
741 /* wait for reg to come ready */
742 status = ql_wait_reg_rdy(qdev,
743 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
744 if (status)
745 goto exit;
26351479
RM
746 /* This data is stored on flash as an array of
747 * __le32. Since ql_read32() returns cpu endian
748 * we need to swap it back.
749 */
750 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
c4e84bde
RM
751exit:
752 return status;
753}
754
cdca8d02
RM
755static int ql_get_8000_flash_params(struct ql_adapter *qdev)
756{
757 u32 i, size;
758 int status;
759 __le32 *p = (__le32 *)&qdev->flash;
760 u32 offset;
542512e4 761 u8 mac_addr[6];
cdca8d02
RM
762
763 /* Get flash offset for function and adjust
764 * for dword access.
765 */
e4552f51 766 if (!qdev->port)
cdca8d02
RM
767 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
768 else
769 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
770
771 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
772 return -ETIMEDOUT;
773
774 size = sizeof(struct flash_params_8000) / sizeof(u32);
775 for (i = 0; i < size; i++, p++) {
776 status = ql_read_flash_word(qdev, i+offset, p);
777 if (status) {
778 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
779 goto exit;
780 }
781 }
782
783 status = ql_validate_flash(qdev,
784 sizeof(struct flash_params_8000) / sizeof(u16),
785 "8000");
786 if (status) {
787 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
788 status = -EINVAL;
789 goto exit;
790 }
791
542512e4
RM
792 /* Extract either manufacturer or BOFM modified
793 * MAC address.
794 */
795 if (qdev->flash.flash_params_8000.data_type1 == 2)
796 memcpy(mac_addr,
797 qdev->flash.flash_params_8000.mac_addr1,
798 qdev->ndev->addr_len);
799 else
800 memcpy(mac_addr,
801 qdev->flash.flash_params_8000.mac_addr,
802 qdev->ndev->addr_len);
803
804 if (!is_valid_ether_addr(mac_addr)) {
cdca8d02
RM
805 QPRINTK(qdev, IFUP, ERR, "Invalid MAC address.\n");
806 status = -EINVAL;
807 goto exit;
808 }
809
810 memcpy(qdev->ndev->dev_addr,
542512e4 811 mac_addr,
cdca8d02
RM
812 qdev->ndev->addr_len);
813
814exit:
815 ql_sem_unlock(qdev, SEM_FLASH_MASK);
816 return status;
817}
818
b0c2aadf 819static int ql_get_8012_flash_params(struct ql_adapter *qdev)
c4e84bde
RM
820{
821 int i;
822 int status;
26351479 823 __le32 *p = (__le32 *)&qdev->flash;
e78f5fa7 824 u32 offset = 0;
b0c2aadf 825 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
e78f5fa7
RM
826
827 /* Second function's parameters follow the first
828 * function's.
829 */
e4552f51 830 if (qdev->port)
b0c2aadf 831 offset = size;
c4e84bde
RM
832
833 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
834 return -ETIMEDOUT;
835
b0c2aadf 836 for (i = 0; i < size; i++, p++) {
e78f5fa7 837 status = ql_read_flash_word(qdev, i+offset, p);
c4e84bde
RM
838 if (status) {
839 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
840 goto exit;
841 }
842
843 }
b0c2aadf
RM
844
845 status = ql_validate_flash(qdev,
846 sizeof(struct flash_params_8012) / sizeof(u16),
847 "8012");
848 if (status) {
849 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
850 status = -EINVAL;
851 goto exit;
852 }
853
854 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
855 status = -EINVAL;
856 goto exit;
857 }
858
859 memcpy(qdev->ndev->dev_addr,
860 qdev->flash.flash_params_8012.mac_addr,
861 qdev->ndev->addr_len);
862
c4e84bde
RM
863exit:
864 ql_sem_unlock(qdev, SEM_FLASH_MASK);
865 return status;
866}
867
868/* xgmac register are located behind the xgmac_addr and xgmac_data
869 * register pair. Each read/write requires us to wait for the ready
870 * bit before reading/writing the data.
871 */
872static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
873{
874 int status;
875 /* wait for reg to come ready */
876 status = ql_wait_reg_rdy(qdev,
877 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
878 if (status)
879 return status;
880 /* write the data to the data reg */
881 ql_write32(qdev, XGMAC_DATA, data);
882 /* trigger the write */
883 ql_write32(qdev, XGMAC_ADDR, reg);
884 return status;
885}
886
887/* xgmac register are located behind the xgmac_addr and xgmac_data
888 * register pair. Each read/write requires us to wait for the ready
889 * bit before reading/writing the data.
890 */
891int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
892{
893 int status = 0;
894 /* wait for reg to come ready */
895 status = ql_wait_reg_rdy(qdev,
896 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
897 if (status)
898 goto exit;
899 /* set up for reg read */
900 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
901 /* wait for reg to come ready */
902 status = ql_wait_reg_rdy(qdev,
903 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
904 if (status)
905 goto exit;
906 /* get the data */
907 *data = ql_read32(qdev, XGMAC_DATA);
908exit:
909 return status;
910}
911
912/* This is used for reading the 64-bit statistics regs. */
913int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
914{
915 int status = 0;
916 u32 hi = 0;
917 u32 lo = 0;
918
919 status = ql_read_xgmac_reg(qdev, reg, &lo);
920 if (status)
921 goto exit;
922
923 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
924 if (status)
925 goto exit;
926
927 *data = (u64) lo | ((u64) hi << 32);
928
929exit:
930 return status;
931}
932
cdca8d02
RM
933static int ql_8000_port_initialize(struct ql_adapter *qdev)
934{
bcc2cb3b 935 int status;
cfec0cbc
RM
936 /*
937 * Get MPI firmware version for driver banner
938 * and ethool info.
939 */
940 status = ql_mb_about_fw(qdev);
941 if (status)
942 goto exit;
bcc2cb3b
RM
943 status = ql_mb_get_fw_state(qdev);
944 if (status)
945 goto exit;
946 /* Wake up a worker to get/set the TX/RX frame sizes. */
947 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
948exit:
949 return status;
cdca8d02
RM
950}
951
c4e84bde
RM
952/* Take the MAC Core out of reset.
953 * Enable statistics counting.
954 * Take the transmitter/receiver out of reset.
955 * This functionality may be done in the MPI firmware at a
956 * later date.
957 */
b0c2aadf 958static int ql_8012_port_initialize(struct ql_adapter *qdev)
c4e84bde
RM
959{
960 int status = 0;
961 u32 data;
962
963 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
964 /* Another function has the semaphore, so
965 * wait for the port init bit to come ready.
966 */
967 QPRINTK(qdev, LINK, INFO,
968 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
969 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
970 if (status) {
971 QPRINTK(qdev, LINK, CRIT,
972 "Port initialize timed out.\n");
973 }
974 return status;
975 }
976
977 QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
978 /* Set the core reset. */
979 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
980 if (status)
981 goto end;
982 data |= GLOBAL_CFG_RESET;
983 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
984 if (status)
985 goto end;
986
987 /* Clear the core reset and turn on jumbo for receiver. */
988 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
989 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
990 data |= GLOBAL_CFG_TX_STAT_EN;
991 data |= GLOBAL_CFG_RX_STAT_EN;
992 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
993 if (status)
994 goto end;
995
996 /* Enable transmitter, and clear it's reset. */
997 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
998 if (status)
999 goto end;
1000 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
1001 data |= TX_CFG_EN; /* Enable the transmitter. */
1002 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1003 if (status)
1004 goto end;
1005
1006 /* Enable receiver and clear it's reset. */
1007 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1008 if (status)
1009 goto end;
1010 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1011 data |= RX_CFG_EN; /* Enable the receiver. */
1012 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1013 if (status)
1014 goto end;
1015
1016 /* Turn on jumbo. */
1017 status =
1018 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1019 if (status)
1020 goto end;
1021 status =
1022 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1023 if (status)
1024 goto end;
1025
1026 /* Signal to the world that the port is enabled. */
1027 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1028end:
1029 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1030 return status;
1031}
1032
7c734359
RM
1033static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1034{
1035 return PAGE_SIZE << qdev->lbq_buf_order;
1036}
1037
c4e84bde 1038/* Get the next large buffer. */
8668ae92 1039static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
c4e84bde
RM
1040{
1041 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1042 rx_ring->lbq_curr_idx++;
1043 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1044 rx_ring->lbq_curr_idx = 0;
1045 rx_ring->lbq_free_cnt++;
1046 return lbq_desc;
1047}
1048
7c734359
RM
1049static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1050 struct rx_ring *rx_ring)
1051{
1052 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1053
1054 pci_dma_sync_single_for_cpu(qdev->pdev,
1055 pci_unmap_addr(lbq_desc, mapaddr),
1056 rx_ring->lbq_buf_size,
1057 PCI_DMA_FROMDEVICE);
1058
1059 /* If it's the last chunk of our master page then
1060 * we unmap it.
1061 */
1062 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1063 == ql_lbq_block_size(qdev))
1064 pci_unmap_page(qdev->pdev,
1065 lbq_desc->p.pg_chunk.map,
1066 ql_lbq_block_size(qdev),
1067 PCI_DMA_FROMDEVICE);
1068 return lbq_desc;
1069}
1070
c4e84bde 1071/* Get the next small buffer. */
8668ae92 1072static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
c4e84bde
RM
1073{
1074 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1075 rx_ring->sbq_curr_idx++;
1076 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1077 rx_ring->sbq_curr_idx = 0;
1078 rx_ring->sbq_free_cnt++;
1079 return sbq_desc;
1080}
1081
1082/* Update an rx ring index. */
1083static void ql_update_cq(struct rx_ring *rx_ring)
1084{
1085 rx_ring->cnsmr_idx++;
1086 rx_ring->curr_entry++;
1087 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1088 rx_ring->cnsmr_idx = 0;
1089 rx_ring->curr_entry = rx_ring->cq_base;
1090 }
1091}
1092
1093static void ql_write_cq_idx(struct rx_ring *rx_ring)
1094{
1095 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1096}
1097
7c734359
RM
1098static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1099 struct bq_desc *lbq_desc)
1100{
1101 if (!rx_ring->pg_chunk.page) {
1102 u64 map;
1103 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1104 GFP_ATOMIC,
1105 qdev->lbq_buf_order);
1106 if (unlikely(!rx_ring->pg_chunk.page)) {
1107 QPRINTK(qdev, DRV, ERR,
1108 "page allocation failed.\n");
1109 return -ENOMEM;
1110 }
1111 rx_ring->pg_chunk.offset = 0;
1112 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1113 0, ql_lbq_block_size(qdev),
1114 PCI_DMA_FROMDEVICE);
1115 if (pci_dma_mapping_error(qdev->pdev, map)) {
1116 __free_pages(rx_ring->pg_chunk.page,
1117 qdev->lbq_buf_order);
1118 QPRINTK(qdev, DRV, ERR,
1119 "PCI mapping failed.\n");
1120 return -ENOMEM;
1121 }
1122 rx_ring->pg_chunk.map = map;
1123 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1124 }
1125
1126 /* Copy the current master pg_chunk info
1127 * to the current descriptor.
1128 */
1129 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1130
1131 /* Adjust the master page chunk for next
1132 * buffer get.
1133 */
1134 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1135 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1136 rx_ring->pg_chunk.page = NULL;
1137 lbq_desc->p.pg_chunk.last_flag = 1;
1138 } else {
1139 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1140 get_page(rx_ring->pg_chunk.page);
1141 lbq_desc->p.pg_chunk.last_flag = 0;
1142 }
1143 return 0;
1144}
c4e84bde
RM
1145/* Process (refill) a large buffer queue. */
1146static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1147{
49f2186d
RM
1148 u32 clean_idx = rx_ring->lbq_clean_idx;
1149 u32 start_idx = clean_idx;
c4e84bde 1150 struct bq_desc *lbq_desc;
c4e84bde
RM
1151 u64 map;
1152 int i;
1153
7c734359 1154 while (rx_ring->lbq_free_cnt > 32) {
c4e84bde
RM
1155 for (i = 0; i < 16; i++) {
1156 QPRINTK(qdev, RX_STATUS, DEBUG,
1157 "lbq: try cleaning clean_idx = %d.\n",
1158 clean_idx);
1159 lbq_desc = &rx_ring->lbq[clean_idx];
7c734359
RM
1160 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1161 QPRINTK(qdev, IFUP, ERR,
1162 "Could not get a page chunk.\n");
c4e84bde
RM
1163 return;
1164 }
7c734359
RM
1165
1166 map = lbq_desc->p.pg_chunk.map +
1167 lbq_desc->p.pg_chunk.offset;
c4e84bde 1168 pci_unmap_addr_set(lbq_desc, mapaddr, map);
7c734359
RM
1169 pci_unmap_len_set(lbq_desc, maplen,
1170 rx_ring->lbq_buf_size);
2c9a0d41 1171 *lbq_desc->addr = cpu_to_le64(map);
7c734359
RM
1172
1173 pci_dma_sync_single_for_device(qdev->pdev, map,
1174 rx_ring->lbq_buf_size,
1175 PCI_DMA_FROMDEVICE);
c4e84bde
RM
1176 clean_idx++;
1177 if (clean_idx == rx_ring->lbq_len)
1178 clean_idx = 0;
1179 }
1180
1181 rx_ring->lbq_clean_idx = clean_idx;
1182 rx_ring->lbq_prod_idx += 16;
1183 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1184 rx_ring->lbq_prod_idx = 0;
49f2186d
RM
1185 rx_ring->lbq_free_cnt -= 16;
1186 }
1187
1188 if (start_idx != clean_idx) {
c4e84bde
RM
1189 QPRINTK(qdev, RX_STATUS, DEBUG,
1190 "lbq: updating prod idx = %d.\n",
1191 rx_ring->lbq_prod_idx);
1192 ql_write_db_reg(rx_ring->lbq_prod_idx,
1193 rx_ring->lbq_prod_idx_db_reg);
c4e84bde
RM
1194 }
1195}
1196
1197/* Process (refill) a small buffer queue. */
1198static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1199{
49f2186d
RM
1200 u32 clean_idx = rx_ring->sbq_clean_idx;
1201 u32 start_idx = clean_idx;
c4e84bde 1202 struct bq_desc *sbq_desc;
c4e84bde
RM
1203 u64 map;
1204 int i;
1205
1206 while (rx_ring->sbq_free_cnt > 16) {
1207 for (i = 0; i < 16; i++) {
1208 sbq_desc = &rx_ring->sbq[clean_idx];
1209 QPRINTK(qdev, RX_STATUS, DEBUG,
1210 "sbq: try cleaning clean_idx = %d.\n",
1211 clean_idx);
c4e84bde
RM
1212 if (sbq_desc->p.skb == NULL) {
1213 QPRINTK(qdev, RX_STATUS, DEBUG,
1214 "sbq: getting new skb for index %d.\n",
1215 sbq_desc->index);
1216 sbq_desc->p.skb =
1217 netdev_alloc_skb(qdev->ndev,
52e55f3c 1218 SMALL_BUFFER_SIZE);
c4e84bde
RM
1219 if (sbq_desc->p.skb == NULL) {
1220 QPRINTK(qdev, PROBE, ERR,
1221 "Couldn't get an skb.\n");
1222 rx_ring->sbq_clean_idx = clean_idx;
1223 return;
1224 }
1225 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1226 map = pci_map_single(qdev->pdev,
1227 sbq_desc->p.skb->data,
52e55f3c
RM
1228 rx_ring->sbq_buf_size,
1229 PCI_DMA_FROMDEVICE);
c907a35a
RM
1230 if (pci_dma_mapping_error(qdev->pdev, map)) {
1231 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
1232 rx_ring->sbq_clean_idx = clean_idx;
06a3d510
RM
1233 dev_kfree_skb_any(sbq_desc->p.skb);
1234 sbq_desc->p.skb = NULL;
c907a35a
RM
1235 return;
1236 }
c4e84bde
RM
1237 pci_unmap_addr_set(sbq_desc, mapaddr, map);
1238 pci_unmap_len_set(sbq_desc, maplen,
52e55f3c 1239 rx_ring->sbq_buf_size);
2c9a0d41 1240 *sbq_desc->addr = cpu_to_le64(map);
c4e84bde
RM
1241 }
1242
1243 clean_idx++;
1244 if (clean_idx == rx_ring->sbq_len)
1245 clean_idx = 0;
1246 }
1247 rx_ring->sbq_clean_idx = clean_idx;
1248 rx_ring->sbq_prod_idx += 16;
1249 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1250 rx_ring->sbq_prod_idx = 0;
49f2186d
RM
1251 rx_ring->sbq_free_cnt -= 16;
1252 }
1253
1254 if (start_idx != clean_idx) {
c4e84bde
RM
1255 QPRINTK(qdev, RX_STATUS, DEBUG,
1256 "sbq: updating prod idx = %d.\n",
1257 rx_ring->sbq_prod_idx);
1258 ql_write_db_reg(rx_ring->sbq_prod_idx,
1259 rx_ring->sbq_prod_idx_db_reg);
c4e84bde
RM
1260 }
1261}
1262
1263static void ql_update_buffer_queues(struct ql_adapter *qdev,
1264 struct rx_ring *rx_ring)
1265{
1266 ql_update_sbq(qdev, rx_ring);
1267 ql_update_lbq(qdev, rx_ring);
1268}
1269
1270/* Unmaps tx buffers. Can be called from send() if a pci mapping
1271 * fails at some stage, or from the interrupt when a tx completes.
1272 */
1273static void ql_unmap_send(struct ql_adapter *qdev,
1274 struct tx_ring_desc *tx_ring_desc, int mapped)
1275{
1276 int i;
1277 for (i = 0; i < mapped; i++) {
1278 if (i == 0 || (i == 7 && mapped > 7)) {
1279 /*
1280 * Unmap the skb->data area, or the
1281 * external sglist (AKA the Outbound
1282 * Address List (OAL)).
1283 * If its the zeroeth element, then it's
1284 * the skb->data area. If it's the 7th
1285 * element and there is more than 6 frags,
1286 * then its an OAL.
1287 */
1288 if (i == 7) {
1289 QPRINTK(qdev, TX_DONE, DEBUG,
1290 "unmapping OAL area.\n");
1291 }
1292 pci_unmap_single(qdev->pdev,
1293 pci_unmap_addr(&tx_ring_desc->map[i],
1294 mapaddr),
1295 pci_unmap_len(&tx_ring_desc->map[i],
1296 maplen),
1297 PCI_DMA_TODEVICE);
1298 } else {
1299 QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
1300 i);
1301 pci_unmap_page(qdev->pdev,
1302 pci_unmap_addr(&tx_ring_desc->map[i],
1303 mapaddr),
1304 pci_unmap_len(&tx_ring_desc->map[i],
1305 maplen), PCI_DMA_TODEVICE);
1306 }
1307 }
1308
1309}
1310
1311/* Map the buffers for this transmit. This will return
1312 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1313 */
1314static int ql_map_send(struct ql_adapter *qdev,
1315 struct ob_mac_iocb_req *mac_iocb_ptr,
1316 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1317{
1318 int len = skb_headlen(skb);
1319 dma_addr_t map;
1320 int frag_idx, err, map_idx = 0;
1321 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1322 int frag_cnt = skb_shinfo(skb)->nr_frags;
1323
1324 if (frag_cnt) {
1325 QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
1326 }
1327 /*
1328 * Map the skb buffer first.
1329 */
1330 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1331
1332 err = pci_dma_mapping_error(qdev->pdev, map);
1333 if (err) {
1334 QPRINTK(qdev, TX_QUEUED, ERR,
1335 "PCI mapping failed with error: %d\n", err);
1336
1337 return NETDEV_TX_BUSY;
1338 }
1339
1340 tbd->len = cpu_to_le32(len);
1341 tbd->addr = cpu_to_le64(map);
1342 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1343 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1344 map_idx++;
1345
1346 /*
1347 * This loop fills the remainder of the 8 address descriptors
1348 * in the IOCB. If there are more than 7 fragments, then the
1349 * eighth address desc will point to an external list (OAL).
1350 * When this happens, the remainder of the frags will be stored
1351 * in this list.
1352 */
1353 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1354 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1355 tbd++;
1356 if (frag_idx == 6 && frag_cnt > 7) {
1357 /* Let's tack on an sglist.
1358 * Our control block will now
1359 * look like this:
1360 * iocb->seg[0] = skb->data
1361 * iocb->seg[1] = frag[0]
1362 * iocb->seg[2] = frag[1]
1363 * iocb->seg[3] = frag[2]
1364 * iocb->seg[4] = frag[3]
1365 * iocb->seg[5] = frag[4]
1366 * iocb->seg[6] = frag[5]
1367 * iocb->seg[7] = ptr to OAL (external sglist)
1368 * oal->seg[0] = frag[6]
1369 * oal->seg[1] = frag[7]
1370 * oal->seg[2] = frag[8]
1371 * oal->seg[3] = frag[9]
1372 * oal->seg[4] = frag[10]
1373 * etc...
1374 */
1375 /* Tack on the OAL in the eighth segment of IOCB. */
1376 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1377 sizeof(struct oal),
1378 PCI_DMA_TODEVICE);
1379 err = pci_dma_mapping_error(qdev->pdev, map);
1380 if (err) {
1381 QPRINTK(qdev, TX_QUEUED, ERR,
1382 "PCI mapping outbound address list with error: %d\n",
1383 err);
1384 goto map_error;
1385 }
1386
1387 tbd->addr = cpu_to_le64(map);
1388 /*
1389 * The length is the number of fragments
1390 * that remain to be mapped times the length
1391 * of our sglist (OAL).
1392 */
1393 tbd->len =
1394 cpu_to_le32((sizeof(struct tx_buf_desc) *
1395 (frag_cnt - frag_idx)) | TX_DESC_C);
1396 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1397 map);
1398 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1399 sizeof(struct oal));
1400 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1401 map_idx++;
1402 }
1403
1404 map =
1405 pci_map_page(qdev->pdev, frag->page,
1406 frag->page_offset, frag->size,
1407 PCI_DMA_TODEVICE);
1408
1409 err = pci_dma_mapping_error(qdev->pdev, map);
1410 if (err) {
1411 QPRINTK(qdev, TX_QUEUED, ERR,
1412 "PCI mapping frags failed with error: %d.\n",
1413 err);
1414 goto map_error;
1415 }
1416
1417 tbd->addr = cpu_to_le64(map);
1418 tbd->len = cpu_to_le32(frag->size);
1419 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1420 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1421 frag->size);
1422
1423 }
1424 /* Save the number of segments we've mapped. */
1425 tx_ring_desc->map_cnt = map_idx;
1426 /* Terminate the last segment. */
1427 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1428 return NETDEV_TX_OK;
1429
1430map_error:
1431 /*
1432 * If the first frag mapping failed, then i will be zero.
1433 * This causes the unmap of the skb->data area. Otherwise
1434 * we pass in the number of frags that mapped successfully
1435 * so they can be umapped.
1436 */
1437 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1438 return NETDEV_TX_BUSY;
1439}
1440
63526713
RM
1441/* Process an inbound completion from an rx ring. */
1442static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1443 struct rx_ring *rx_ring,
1444 struct ib_mac_iocb_rsp *ib_mac_rsp,
1445 u32 length,
1446 u16 vlan_id)
1447{
1448 struct sk_buff *skb;
1449 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1450 struct skb_frag_struct *rx_frag;
1451 int nr_frags;
1452 struct napi_struct *napi = &rx_ring->napi;
1453
1454 napi->dev = qdev->ndev;
1455
1456 skb = napi_get_frags(napi);
1457 if (!skb) {
1458 QPRINTK(qdev, DRV, ERR, "Couldn't get an skb, exiting.\n");
1459 rx_ring->rx_dropped++;
1460 put_page(lbq_desc->p.pg_chunk.page);
1461 return;
1462 }
1463 prefetch(lbq_desc->p.pg_chunk.va);
1464 rx_frag = skb_shinfo(skb)->frags;
1465 nr_frags = skb_shinfo(skb)->nr_frags;
1466 rx_frag += nr_frags;
1467 rx_frag->page = lbq_desc->p.pg_chunk.page;
1468 rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
1469 rx_frag->size = length;
1470
1471 skb->len += length;
1472 skb->data_len += length;
1473 skb->truesize += length;
1474 skb_shinfo(skb)->nr_frags++;
1475
1476 rx_ring->rx_packets++;
1477 rx_ring->rx_bytes += length;
1478 skb->ip_summed = CHECKSUM_UNNECESSARY;
1479 skb_record_rx_queue(skb, rx_ring->cq_id);
1480 if (qdev->vlgrp && (vlan_id != 0xffff))
1481 vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
1482 else
1483 napi_gro_frags(napi);
1484}
1485
4f848c0a
RM
1486/* Process an inbound completion from an rx ring. */
1487static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1488 struct rx_ring *rx_ring,
1489 struct ib_mac_iocb_rsp *ib_mac_rsp,
1490 u32 length,
1491 u16 vlan_id)
1492{
1493 struct net_device *ndev = qdev->ndev;
1494 struct sk_buff *skb = NULL;
1495 void *addr;
1496 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1497 struct napi_struct *napi = &rx_ring->napi;
1498
1499 skb = netdev_alloc_skb(ndev, length);
1500 if (!skb) {
1501 QPRINTK(qdev, DRV, ERR, "Couldn't get an skb, "
1502 "need to unwind!.\n");
1503 rx_ring->rx_dropped++;
1504 put_page(lbq_desc->p.pg_chunk.page);
1505 return;
1506 }
1507
1508 addr = lbq_desc->p.pg_chunk.va;
1509 prefetch(addr);
1510
1511
1512 /* Frame error, so drop the packet. */
1513 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1514 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1515 ib_mac_rsp->flags2);
1516 rx_ring->rx_errors++;
1517 goto err_out;
1518 }
1519
1520 /* The max framesize filter on this chip is set higher than
1521 * MTU since FCoE uses 2k frames.
1522 */
1523 if (skb->len > ndev->mtu + ETH_HLEN) {
1524 QPRINTK(qdev, DRV, ERR, "Segment too small, dropping.\n");
1525 rx_ring->rx_dropped++;
1526 goto err_out;
1527 }
1528 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1529 QPRINTK(qdev, RX_STATUS, DEBUG,
1530 "%d bytes of headers and data in large. Chain "
1531 "page to new skb and pull tail.\n", length);
1532 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1533 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1534 length-ETH_HLEN);
1535 skb->len += length-ETH_HLEN;
1536 skb->data_len += length-ETH_HLEN;
1537 skb->truesize += length-ETH_HLEN;
1538
1539 rx_ring->rx_packets++;
1540 rx_ring->rx_bytes += skb->len;
1541 skb->protocol = eth_type_trans(skb, ndev);
1542 skb->ip_summed = CHECKSUM_NONE;
1543
1544 if (qdev->rx_csum &&
1545 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1546 /* TCP frame. */
1547 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1548 QPRINTK(qdev, RX_STATUS, DEBUG,
1549 "TCP checksum done!\n");
1550 skb->ip_summed = CHECKSUM_UNNECESSARY;
1551 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1552 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1553 /* Unfragmented ipv4 UDP frame. */
1554 struct iphdr *iph = (struct iphdr *) skb->data;
1555 if (!(iph->frag_off &
1556 cpu_to_be16(IP_MF|IP_OFFSET))) {
1557 skb->ip_summed = CHECKSUM_UNNECESSARY;
1558 QPRINTK(qdev, RX_STATUS, DEBUG,
1559 "TCP checksum done!\n");
1560 }
1561 }
1562 }
1563
1564 skb_record_rx_queue(skb, rx_ring->cq_id);
1565 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1566 if (qdev->vlgrp && (vlan_id != 0xffff))
1567 vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
1568 else
1569 napi_gro_receive(napi, skb);
1570 } else {
1571 if (qdev->vlgrp && (vlan_id != 0xffff))
1572 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1573 else
1574 netif_receive_skb(skb);
1575 }
1576 return;
1577err_out:
1578 dev_kfree_skb_any(skb);
1579 put_page(lbq_desc->p.pg_chunk.page);
1580}
1581
1582/* Process an inbound completion from an rx ring. */
1583static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1584 struct rx_ring *rx_ring,
1585 struct ib_mac_iocb_rsp *ib_mac_rsp,
1586 u32 length,
1587 u16 vlan_id)
1588{
1589 struct net_device *ndev = qdev->ndev;
1590 struct sk_buff *skb = NULL;
1591 struct sk_buff *new_skb = NULL;
1592 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1593
1594 skb = sbq_desc->p.skb;
1595 /* Allocate new_skb and copy */
1596 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1597 if (new_skb == NULL) {
1598 QPRINTK(qdev, PROBE, ERR,
1599 "No skb available, drop the packet.\n");
1600 rx_ring->rx_dropped++;
1601 return;
1602 }
1603 skb_reserve(new_skb, NET_IP_ALIGN);
1604 memcpy(skb_put(new_skb, length), skb->data, length);
1605 skb = new_skb;
1606
1607 /* Frame error, so drop the packet. */
1608 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1609 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1610 ib_mac_rsp->flags2);
1611 dev_kfree_skb_any(skb);
1612 rx_ring->rx_errors++;
1613 return;
1614 }
1615
1616 /* loopback self test for ethtool */
1617 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1618 ql_check_lb_frame(qdev, skb);
1619 dev_kfree_skb_any(skb);
1620 return;
1621 }
1622
1623 /* The max framesize filter on this chip is set higher than
1624 * MTU since FCoE uses 2k frames.
1625 */
1626 if (skb->len > ndev->mtu + ETH_HLEN) {
1627 dev_kfree_skb_any(skb);
1628 rx_ring->rx_dropped++;
1629 return;
1630 }
1631
1632 prefetch(skb->data);
1633 skb->dev = ndev;
1634 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1635 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1636 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1637 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1638 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1639 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1640 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1641 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1642 }
1643 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1644 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1645
1646 rx_ring->rx_packets++;
1647 rx_ring->rx_bytes += skb->len;
1648 skb->protocol = eth_type_trans(skb, ndev);
1649 skb->ip_summed = CHECKSUM_NONE;
1650
1651 /* If rx checksum is on, and there are no
1652 * csum or frame errors.
1653 */
1654 if (qdev->rx_csum &&
1655 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1656 /* TCP frame. */
1657 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1658 QPRINTK(qdev, RX_STATUS, DEBUG,
1659 "TCP checksum done!\n");
1660 skb->ip_summed = CHECKSUM_UNNECESSARY;
1661 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1662 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1663 /* Unfragmented ipv4 UDP frame. */
1664 struct iphdr *iph = (struct iphdr *) skb->data;
1665 if (!(iph->frag_off &
1666 cpu_to_be16(IP_MF|IP_OFFSET))) {
1667 skb->ip_summed = CHECKSUM_UNNECESSARY;
1668 QPRINTK(qdev, RX_STATUS, DEBUG,
1669 "TCP checksum done!\n");
1670 }
1671 }
1672 }
1673
1674 skb_record_rx_queue(skb, rx_ring->cq_id);
1675 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1676 if (qdev->vlgrp && (vlan_id != 0xffff))
1677 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1678 vlan_id, skb);
1679 else
1680 napi_gro_receive(&rx_ring->napi, skb);
1681 } else {
1682 if (qdev->vlgrp && (vlan_id != 0xffff))
1683 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1684 else
1685 netif_receive_skb(skb);
1686 }
1687}
1688
8668ae92 1689static void ql_realign_skb(struct sk_buff *skb, int len)
c4e84bde
RM
1690{
1691 void *temp_addr = skb->data;
1692
1693 /* Undo the skb_reserve(skb,32) we did before
1694 * giving to hardware, and realign data on
1695 * a 2-byte boundary.
1696 */
1697 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1698 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1699 skb_copy_to_linear_data(skb, temp_addr,
1700 (unsigned int)len);
1701}
1702
1703/*
1704 * This function builds an skb for the given inbound
1705 * completion. It will be rewritten for readability in the near
1706 * future, but for not it works well.
1707 */
1708static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1709 struct rx_ring *rx_ring,
1710 struct ib_mac_iocb_rsp *ib_mac_rsp)
1711{
1712 struct bq_desc *lbq_desc;
1713 struct bq_desc *sbq_desc;
1714 struct sk_buff *skb = NULL;
1715 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1716 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1717
1718 /*
1719 * Handle the header buffer if present.
1720 */
1721 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1722 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1723 QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
1724 /*
1725 * Headers fit nicely into a small buffer.
1726 */
1727 sbq_desc = ql_get_curr_sbuf(rx_ring);
1728 pci_unmap_single(qdev->pdev,
1729 pci_unmap_addr(sbq_desc, mapaddr),
1730 pci_unmap_len(sbq_desc, maplen),
1731 PCI_DMA_FROMDEVICE);
1732 skb = sbq_desc->p.skb;
1733 ql_realign_skb(skb, hdr_len);
1734 skb_put(skb, hdr_len);
1735 sbq_desc->p.skb = NULL;
1736 }
1737
1738 /*
1739 * Handle the data buffer(s).
1740 */
1741 if (unlikely(!length)) { /* Is there data too? */
1742 QPRINTK(qdev, RX_STATUS, DEBUG,
1743 "No Data buffer in this packet.\n");
1744 return skb;
1745 }
1746
1747 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1748 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1749 QPRINTK(qdev, RX_STATUS, DEBUG,
1750 "Headers in small, data of %d bytes in small, combine them.\n", length);
1751 /*
1752 * Data is less than small buffer size so it's
1753 * stuffed in a small buffer.
1754 * For this case we append the data
1755 * from the "data" small buffer to the "header" small
1756 * buffer.
1757 */
1758 sbq_desc = ql_get_curr_sbuf(rx_ring);
1759 pci_dma_sync_single_for_cpu(qdev->pdev,
1760 pci_unmap_addr
1761 (sbq_desc, mapaddr),
1762 pci_unmap_len
1763 (sbq_desc, maplen),
1764 PCI_DMA_FROMDEVICE);
1765 memcpy(skb_put(skb, length),
1766 sbq_desc->p.skb->data, length);
1767 pci_dma_sync_single_for_device(qdev->pdev,
1768 pci_unmap_addr
1769 (sbq_desc,
1770 mapaddr),
1771 pci_unmap_len
1772 (sbq_desc,
1773 maplen),
1774 PCI_DMA_FROMDEVICE);
1775 } else {
1776 QPRINTK(qdev, RX_STATUS, DEBUG,
1777 "%d bytes in a single small buffer.\n", length);
1778 sbq_desc = ql_get_curr_sbuf(rx_ring);
1779 skb = sbq_desc->p.skb;
1780 ql_realign_skb(skb, length);
1781 skb_put(skb, length);
1782 pci_unmap_single(qdev->pdev,
1783 pci_unmap_addr(sbq_desc,
1784 mapaddr),
1785 pci_unmap_len(sbq_desc,
1786 maplen),
1787 PCI_DMA_FROMDEVICE);
1788 sbq_desc->p.skb = NULL;
1789 }
1790 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1791 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1792 QPRINTK(qdev, RX_STATUS, DEBUG,
1793 "Header in small, %d bytes in large. Chain large to small!\n", length);
1794 /*
1795 * The data is in a single large buffer. We
1796 * chain it to the header buffer's skb and let
1797 * it rip.
1798 */
7c734359 1799 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
c4e84bde 1800 QPRINTK(qdev, RX_STATUS, DEBUG,
7c734359
RM
1801 "Chaining page at offset = %d,"
1802 "for %d bytes to skb.\n",
1803 lbq_desc->p.pg_chunk.offset, length);
1804 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1805 lbq_desc->p.pg_chunk.offset,
1806 length);
c4e84bde
RM
1807 skb->len += length;
1808 skb->data_len += length;
1809 skb->truesize += length;
c4e84bde
RM
1810 } else {
1811 /*
1812 * The headers and data are in a single large buffer. We
1813 * copy it to a new skb and let it go. This can happen with
1814 * jumbo mtu on a non-TCP/UDP frame.
1815 */
7c734359 1816 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
c4e84bde
RM
1817 skb = netdev_alloc_skb(qdev->ndev, length);
1818 if (skb == NULL) {
1819 QPRINTK(qdev, PROBE, DEBUG,
1820 "No skb available, drop the packet.\n");
1821 return NULL;
1822 }
4055c7d4
RM
1823 pci_unmap_page(qdev->pdev,
1824 pci_unmap_addr(lbq_desc,
1825 mapaddr),
1826 pci_unmap_len(lbq_desc, maplen),
1827 PCI_DMA_FROMDEVICE);
c4e84bde
RM
1828 skb_reserve(skb, NET_IP_ALIGN);
1829 QPRINTK(qdev, RX_STATUS, DEBUG,
1830 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
7c734359
RM
1831 skb_fill_page_desc(skb, 0,
1832 lbq_desc->p.pg_chunk.page,
1833 lbq_desc->p.pg_chunk.offset,
1834 length);
c4e84bde
RM
1835 skb->len += length;
1836 skb->data_len += length;
1837 skb->truesize += length;
1838 length -= length;
c4e84bde
RM
1839 __pskb_pull_tail(skb,
1840 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1841 VLAN_ETH_HLEN : ETH_HLEN);
1842 }
1843 } else {
1844 /*
1845 * The data is in a chain of large buffers
1846 * pointed to by a small buffer. We loop
1847 * thru and chain them to the our small header
1848 * buffer's skb.
1849 * frags: There are 18 max frags and our small
1850 * buffer will hold 32 of them. The thing is,
1851 * we'll use 3 max for our 9000 byte jumbo
1852 * frames. If the MTU goes up we could
1853 * eventually be in trouble.
1854 */
7c734359 1855 int size, i = 0;
c4e84bde
RM
1856 sbq_desc = ql_get_curr_sbuf(rx_ring);
1857 pci_unmap_single(qdev->pdev,
1858 pci_unmap_addr(sbq_desc, mapaddr),
1859 pci_unmap_len(sbq_desc, maplen),
1860 PCI_DMA_FROMDEVICE);
1861 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1862 /*
1863 * This is an non TCP/UDP IP frame, so
1864 * the headers aren't split into a small
1865 * buffer. We have to use the small buffer
1866 * that contains our sg list as our skb to
1867 * send upstairs. Copy the sg list here to
1868 * a local buffer and use it to find the
1869 * pages to chain.
1870 */
1871 QPRINTK(qdev, RX_STATUS, DEBUG,
1872 "%d bytes of headers & data in chain of large.\n", length);
1873 skb = sbq_desc->p.skb;
c4e84bde
RM
1874 sbq_desc->p.skb = NULL;
1875 skb_reserve(skb, NET_IP_ALIGN);
c4e84bde
RM
1876 }
1877 while (length > 0) {
7c734359
RM
1878 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1879 size = (length < rx_ring->lbq_buf_size) ? length :
1880 rx_ring->lbq_buf_size;
c4e84bde
RM
1881
1882 QPRINTK(qdev, RX_STATUS, DEBUG,
1883 "Adding page %d to skb for %d bytes.\n",
1884 i, size);
7c734359
RM
1885 skb_fill_page_desc(skb, i,
1886 lbq_desc->p.pg_chunk.page,
1887 lbq_desc->p.pg_chunk.offset,
1888 size);
c4e84bde
RM
1889 skb->len += size;
1890 skb->data_len += size;
1891 skb->truesize += size;
1892 length -= size;
c4e84bde
RM
1893 i++;
1894 }
1895 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1896 VLAN_ETH_HLEN : ETH_HLEN);
1897 }
1898 return skb;
1899}
1900
1901/* Process an inbound completion from an rx ring. */
4f848c0a 1902static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
c4e84bde 1903 struct rx_ring *rx_ring,
4f848c0a
RM
1904 struct ib_mac_iocb_rsp *ib_mac_rsp,
1905 u16 vlan_id)
c4e84bde
RM
1906{
1907 struct net_device *ndev = qdev->ndev;
1908 struct sk_buff *skb = NULL;
1909
1910 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1911
1912 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1913 if (unlikely(!skb)) {
1914 QPRINTK(qdev, RX_STATUS, DEBUG,
1915 "No skb available, drop packet.\n");
885ee398 1916 rx_ring->rx_dropped++;
c4e84bde
RM
1917 return;
1918 }
1919
a32959cd
RM
1920 /* Frame error, so drop the packet. */
1921 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1922 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1923 ib_mac_rsp->flags2);
1924 dev_kfree_skb_any(skb);
885ee398 1925 rx_ring->rx_errors++;
a32959cd
RM
1926 return;
1927 }
ec33a491
RM
1928
1929 /* The max framesize filter on this chip is set higher than
1930 * MTU since FCoE uses 2k frames.
1931 */
1932 if (skb->len > ndev->mtu + ETH_HLEN) {
1933 dev_kfree_skb_any(skb);
885ee398 1934 rx_ring->rx_dropped++;
ec33a491
RM
1935 return;
1936 }
1937
9dfbbaa6
RM
1938 /* loopback self test for ethtool */
1939 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1940 ql_check_lb_frame(qdev, skb);
1941 dev_kfree_skb_any(skb);
1942 return;
1943 }
1944
c4e84bde
RM
1945 prefetch(skb->data);
1946 skb->dev = ndev;
1947 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1948 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1949 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1950 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1951 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1952 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1953 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1954 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
885ee398 1955 rx_ring->rx_multicast++;
c4e84bde
RM
1956 }
1957 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1958 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1959 }
d555f592 1960
d555f592
RM
1961 skb->protocol = eth_type_trans(skb, ndev);
1962 skb->ip_summed = CHECKSUM_NONE;
1963
1964 /* If rx checksum is on, and there are no
1965 * csum or frame errors.
1966 */
1967 if (qdev->rx_csum &&
d555f592
RM
1968 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1969 /* TCP frame. */
1970 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1971 QPRINTK(qdev, RX_STATUS, DEBUG,
1972 "TCP checksum done!\n");
1973 skb->ip_summed = CHECKSUM_UNNECESSARY;
1974 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1975 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1976 /* Unfragmented ipv4 UDP frame. */
1977 struct iphdr *iph = (struct iphdr *) skb->data;
1978 if (!(iph->frag_off &
1979 cpu_to_be16(IP_MF|IP_OFFSET))) {
1980 skb->ip_summed = CHECKSUM_UNNECESSARY;
1981 QPRINTK(qdev, RX_STATUS, DEBUG,
1982 "TCP checksum done!\n");
1983 }
1984 }
c4e84bde 1985 }
d555f592 1986
885ee398
RM
1987 rx_ring->rx_packets++;
1988 rx_ring->rx_bytes += skb->len;
b2014ff8 1989 skb_record_rx_queue(skb, rx_ring->cq_id);
22bdd4f5
RM
1990 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1991 if (qdev->vlgrp &&
1992 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1993 (vlan_id != 0))
1994 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1995 vlan_id, skb);
1996 else
1997 napi_gro_receive(&rx_ring->napi, skb);
c4e84bde 1998 } else {
22bdd4f5
RM
1999 if (qdev->vlgrp &&
2000 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2001 (vlan_id != 0))
2002 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
2003 else
2004 netif_receive_skb(skb);
c4e84bde 2005 }
c4e84bde
RM
2006}
2007
4f848c0a
RM
2008/* Process an inbound completion from an rx ring. */
2009static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2010 struct rx_ring *rx_ring,
2011 struct ib_mac_iocb_rsp *ib_mac_rsp)
2012{
2013 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2014 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2015 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2016 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2017
2018 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2019
2020 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2021 /* The data and headers are split into
2022 * separate buffers.
2023 */
2024 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2025 vlan_id);
2026 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2027 /* The data fit in a single small buffer.
2028 * Allocate a new skb, copy the data and
2029 * return the buffer to the free pool.
2030 */
2031 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2032 length, vlan_id);
63526713
RM
2033 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2034 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2035 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2036 /* TCP packet in a page chunk that's been checksummed.
2037 * Tack it on to our GRO skb and let it go.
2038 */
2039 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2040 length, vlan_id);
4f848c0a
RM
2041 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2042 /* Non-TCP packet in a page chunk. Allocate an
2043 * skb, tack it on frags, and send it up.
2044 */
2045 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2046 length, vlan_id);
2047 } else {
2048 struct bq_desc *lbq_desc;
2049
2050 /* Free small buffer that holds the IAL */
2051 lbq_desc = ql_get_curr_sbuf(rx_ring);
2052 QPRINTK(qdev, RX_ERR, ERR, "Dropping frame, len %d > mtu %d\n",
2053 length, qdev->ndev->mtu);
2054
2055 /* Unwind the large buffers for this frame. */
2056 while (length > 0) {
2057 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
2058 length -= (length < rx_ring->lbq_buf_size) ?
2059 length : rx_ring->lbq_buf_size;
2060 put_page(lbq_desc->p.pg_chunk.page);
2061 }
2062 }
2063
2064 return (unsigned long)length;
2065}
2066
c4e84bde
RM
2067/* Process an outbound completion from an rx ring. */
2068static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2069 struct ob_mac_iocb_rsp *mac_rsp)
2070{
2071 struct tx_ring *tx_ring;
2072 struct tx_ring_desc *tx_ring_desc;
2073
2074 QL_DUMP_OB_MAC_RSP(mac_rsp);
2075 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2076 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2077 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
885ee398
RM
2078 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2079 tx_ring->tx_packets++;
c4e84bde
RM
2080 dev_kfree_skb(tx_ring_desc->skb);
2081 tx_ring_desc->skb = NULL;
2082
2083 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2084 OB_MAC_IOCB_RSP_S |
2085 OB_MAC_IOCB_RSP_L |
2086 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2087 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2088 QPRINTK(qdev, TX_DONE, WARNING,
2089 "Total descriptor length did not match transfer length.\n");
2090 }
2091 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2092 QPRINTK(qdev, TX_DONE, WARNING,
2093 "Frame too short to be legal, not sent.\n");
2094 }
2095 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2096 QPRINTK(qdev, TX_DONE, WARNING,
2097 "Frame too long, but sent anyway.\n");
2098 }
2099 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2100 QPRINTK(qdev, TX_DONE, WARNING,
2101 "PCI backplane error. Frame not sent.\n");
2102 }
2103 }
2104 atomic_inc(&tx_ring->tx_count);
2105}
2106
2107/* Fire up a handler to reset the MPI processor. */
2108void ql_queue_fw_error(struct ql_adapter *qdev)
2109{
6a473308 2110 ql_link_off(qdev);
c4e84bde
RM
2111 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2112}
2113
2114void ql_queue_asic_error(struct ql_adapter *qdev)
2115{
6a473308 2116 ql_link_off(qdev);
c4e84bde 2117 ql_disable_interrupts(qdev);
6497b607
RM
2118 /* Clear adapter up bit to signal the recovery
2119 * process that it shouldn't kill the reset worker
2120 * thread
2121 */
2122 clear_bit(QL_ADAPTER_UP, &qdev->flags);
c4e84bde
RM
2123 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2124}
2125
2126static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2127 struct ib_ae_iocb_rsp *ib_ae_rsp)
2128{
2129 switch (ib_ae_rsp->event) {
2130 case MGMT_ERR_EVENT:
2131 QPRINTK(qdev, RX_ERR, ERR,
2132 "Management Processor Fatal Error.\n");
2133 ql_queue_fw_error(qdev);
2134 return;
2135
2136 case CAM_LOOKUP_ERR_EVENT:
2137 QPRINTK(qdev, LINK, ERR,
2138 "Multiple CAM hits lookup occurred.\n");
2139 QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
2140 ql_queue_asic_error(qdev);
2141 return;
2142
2143 case SOFT_ECC_ERROR_EVENT:
2144 QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
2145 ql_queue_asic_error(qdev);
2146 break;
2147
2148 case PCI_ERR_ANON_BUF_RD:
2149 QPRINTK(qdev, RX_ERR, ERR,
2150 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
2151 ib_ae_rsp->q_id);
2152 ql_queue_asic_error(qdev);
2153 break;
2154
2155 default:
2156 QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
2157 ib_ae_rsp->event);
2158 ql_queue_asic_error(qdev);
2159 break;
2160 }
2161}
2162
2163static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2164{
2165 struct ql_adapter *qdev = rx_ring->qdev;
ba7cd3ba 2166 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde
RM
2167 struct ob_mac_iocb_rsp *net_rsp = NULL;
2168 int count = 0;
2169
1e213303 2170 struct tx_ring *tx_ring;
c4e84bde
RM
2171 /* While there are entries in the completion queue. */
2172 while (prod != rx_ring->cnsmr_idx) {
2173
2174 QPRINTK(qdev, RX_STATUS, DEBUG,
2175 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
2176 prod, rx_ring->cnsmr_idx);
2177
2178 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2179 rmb();
2180 switch (net_rsp->opcode) {
2181
2182 case OPCODE_OB_MAC_TSO_IOCB:
2183 case OPCODE_OB_MAC_IOCB:
2184 ql_process_mac_tx_intr(qdev, net_rsp);
2185 break;
2186 default:
2187 QPRINTK(qdev, RX_STATUS, DEBUG,
2188 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2189 net_rsp->opcode);
2190 }
2191 count++;
2192 ql_update_cq(rx_ring);
ba7cd3ba 2193 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde
RM
2194 }
2195 ql_write_cq_idx(rx_ring);
1e213303
RM
2196 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2197 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) &&
2198 net_rsp != NULL) {
c4e84bde
RM
2199 if (atomic_read(&tx_ring->queue_stopped) &&
2200 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2201 /*
2202 * The queue got stopped because the tx_ring was full.
2203 * Wake it up, because it's now at least 25% empty.
2204 */
1e213303 2205 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
c4e84bde
RM
2206 }
2207
2208 return count;
2209}
2210
2211static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2212{
2213 struct ql_adapter *qdev = rx_ring->qdev;
ba7cd3ba 2214 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde
RM
2215 struct ql_net_rsp_iocb *net_rsp;
2216 int count = 0;
2217
2218 /* While there are entries in the completion queue. */
2219 while (prod != rx_ring->cnsmr_idx) {
2220
2221 QPRINTK(qdev, RX_STATUS, DEBUG,
2222 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
2223 prod, rx_ring->cnsmr_idx);
2224
2225 net_rsp = rx_ring->curr_entry;
2226 rmb();
2227 switch (net_rsp->opcode) {
2228 case OPCODE_IB_MAC_IOCB:
2229 ql_process_mac_rx_intr(qdev, rx_ring,
2230 (struct ib_mac_iocb_rsp *)
2231 net_rsp);
2232 break;
2233
2234 case OPCODE_IB_AE_IOCB:
2235 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2236 net_rsp);
2237 break;
2238 default:
2239 {
2240 QPRINTK(qdev, RX_STATUS, DEBUG,
2241 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2242 net_rsp->opcode);
2243 }
2244 }
2245 count++;
2246 ql_update_cq(rx_ring);
ba7cd3ba 2247 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde
RM
2248 if (count == budget)
2249 break;
2250 }
2251 ql_update_buffer_queues(qdev, rx_ring);
2252 ql_write_cq_idx(rx_ring);
2253 return count;
2254}
2255
2256static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2257{
2258 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2259 struct ql_adapter *qdev = rx_ring->qdev;
39aa8165
RM
2260 struct rx_ring *trx_ring;
2261 int i, work_done = 0;
2262 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
c4e84bde
RM
2263
2264 QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
2265 rx_ring->cq_id);
2266
39aa8165
RM
2267 /* Service the TX rings first. They start
2268 * right after the RSS rings. */
2269 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2270 trx_ring = &qdev->rx_ring[i];
2271 /* If this TX completion ring belongs to this vector and
2272 * it's not empty then service it.
2273 */
2274 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2275 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2276 trx_ring->cnsmr_idx)) {
2277 QPRINTK(qdev, INTR, DEBUG,
2278 "%s: Servicing TX completion ring %d.\n",
2279 __func__, trx_ring->cq_id);
2280 ql_clean_outbound_rx_ring(trx_ring);
2281 }
2282 }
2283
2284 /*
2285 * Now service the RSS ring if it's active.
2286 */
2287 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2288 rx_ring->cnsmr_idx) {
2289 QPRINTK(qdev, INTR, DEBUG,
2290 "%s: Servicing RX completion ring %d.\n",
2291 __func__, rx_ring->cq_id);
2292 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2293 }
2294
c4e84bde 2295 if (work_done < budget) {
22bdd4f5 2296 napi_complete(napi);
c4e84bde
RM
2297 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2298 }
2299 return work_done;
2300}
2301
01e6b953 2302static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
c4e84bde
RM
2303{
2304 struct ql_adapter *qdev = netdev_priv(ndev);
2305
2306 qdev->vlgrp = grp;
2307 if (grp) {
2308 QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
2309 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2310 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2311 } else {
2312 QPRINTK(qdev, IFUP, DEBUG,
2313 "Turning off VLAN in NIC_RCV_CFG.\n");
2314 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2315 }
2316}
2317
01e6b953 2318static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
c4e84bde
RM
2319{
2320 struct ql_adapter *qdev = netdev_priv(ndev);
2321 u32 enable_bit = MAC_ADDR_E;
cc288f54 2322 int status;
c4e84bde 2323
cc288f54
RM
2324 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2325 if (status)
2326 return;
c4e84bde
RM
2327 if (ql_set_mac_addr_reg
2328 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2329 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
2330 }
cc288f54 2331 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
c4e84bde
RM
2332}
2333
01e6b953 2334static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
c4e84bde
RM
2335{
2336 struct ql_adapter *qdev = netdev_priv(ndev);
2337 u32 enable_bit = 0;
cc288f54
RM
2338 int status;
2339
2340 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2341 if (status)
2342 return;
c4e84bde 2343
c4e84bde
RM
2344 if (ql_set_mac_addr_reg
2345 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2346 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
2347 }
cc288f54 2348 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
c4e84bde
RM
2349
2350}
2351
c4e84bde
RM
2352/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2353static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2354{
2355 struct rx_ring *rx_ring = dev_id;
288379f0 2356 napi_schedule(&rx_ring->napi);
c4e84bde
RM
2357 return IRQ_HANDLED;
2358}
2359
c4e84bde
RM
2360/* This handles a fatal error, MPI activity, and the default
2361 * rx_ring in an MSI-X multiple vector environment.
2362 * In MSI/Legacy environment it also process the rest of
2363 * the rx_rings.
2364 */
2365static irqreturn_t qlge_isr(int irq, void *dev_id)
2366{
2367 struct rx_ring *rx_ring = dev_id;
2368 struct ql_adapter *qdev = rx_ring->qdev;
2369 struct intr_context *intr_context = &qdev->intr_context[0];
2370 u32 var;
c4e84bde
RM
2371 int work_done = 0;
2372
bb0d215c
RM
2373 spin_lock(&qdev->hw_lock);
2374 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2375 QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
2376 spin_unlock(&qdev->hw_lock);
2377 return IRQ_NONE;
c4e84bde 2378 }
bb0d215c 2379 spin_unlock(&qdev->hw_lock);
c4e84bde 2380
bb0d215c 2381 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
c4e84bde
RM
2382
2383 /*
2384 * Check for fatal error.
2385 */
2386 if (var & STS_FE) {
2387 ql_queue_asic_error(qdev);
2388 QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
2389 var = ql_read32(qdev, ERR_STS);
2390 QPRINTK(qdev, INTR, ERR,
2391 "Resetting chip. Error Status Register = 0x%x\n", var);
2392 return IRQ_HANDLED;
2393 }
2394
2395 /*
2396 * Check MPI processor activity.
2397 */
5ee22a5a
RM
2398 if ((var & STS_PI) &&
2399 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
c4e84bde
RM
2400 /*
2401 * We've got an async event or mailbox completion.
2402 * Handle it and clear the source of the interrupt.
2403 */
2404 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
2405 ql_disable_completion_interrupt(qdev, intr_context->intr);
5ee22a5a
RM
2406 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2407 queue_delayed_work_on(smp_processor_id(),
2408 qdev->workqueue, &qdev->mpi_work, 0);
c4e84bde
RM
2409 work_done++;
2410 }
2411
2412 /*
39aa8165
RM
2413 * Get the bit-mask that shows the active queues for this
2414 * pass. Compare it to the queues that this irq services
2415 * and call napi if there's a match.
c4e84bde 2416 */
39aa8165
RM
2417 var = ql_read32(qdev, ISR1);
2418 if (var & intr_context->irq_mask) {
32a5b2a0 2419 QPRINTK(qdev, INTR, INFO,
39aa8165
RM
2420 "Waking handler for rx_ring[0].\n");
2421 ql_disable_completion_interrupt(qdev, intr_context->intr);
32a5b2a0
RM
2422 napi_schedule(&rx_ring->napi);
2423 work_done++;
2424 }
bb0d215c 2425 ql_enable_completion_interrupt(qdev, intr_context->intr);
c4e84bde
RM
2426 return work_done ? IRQ_HANDLED : IRQ_NONE;
2427}
2428
2429static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2430{
2431
2432 if (skb_is_gso(skb)) {
2433 int err;
2434 if (skb_header_cloned(skb)) {
2435 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2436 if (err)
2437 return err;
2438 }
2439
2440 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2441 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2442 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2443 mac_iocb_ptr->total_hdrs_len =
2444 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2445 mac_iocb_ptr->net_trans_offset =
2446 cpu_to_le16(skb_network_offset(skb) |
2447 skb_transport_offset(skb)
2448 << OB_MAC_TRANSPORT_HDR_SHIFT);
2449 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2450 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2451 if (likely(skb->protocol == htons(ETH_P_IP))) {
2452 struct iphdr *iph = ip_hdr(skb);
2453 iph->check = 0;
2454 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2455 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2456 iph->daddr, 0,
2457 IPPROTO_TCP,
2458 0);
2459 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2460 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2461 tcp_hdr(skb)->check =
2462 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2463 &ipv6_hdr(skb)->daddr,
2464 0, IPPROTO_TCP, 0);
2465 }
2466 return 1;
2467 }
2468 return 0;
2469}
2470
2471static void ql_hw_csum_setup(struct sk_buff *skb,
2472 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2473{
2474 int len;
2475 struct iphdr *iph = ip_hdr(skb);
fd2df4f7 2476 __sum16 *check;
c4e84bde
RM
2477 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2478 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2479 mac_iocb_ptr->net_trans_offset =
2480 cpu_to_le16(skb_network_offset(skb) |
2481 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2482
2483 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2484 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2485 if (likely(iph->protocol == IPPROTO_TCP)) {
2486 check = &(tcp_hdr(skb)->check);
2487 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2488 mac_iocb_ptr->total_hdrs_len =
2489 cpu_to_le16(skb_transport_offset(skb) +
2490 (tcp_hdr(skb)->doff << 2));
2491 } else {
2492 check = &(udp_hdr(skb)->check);
2493 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2494 mac_iocb_ptr->total_hdrs_len =
2495 cpu_to_le16(skb_transport_offset(skb) +
2496 sizeof(struct udphdr));
2497 }
2498 *check = ~csum_tcpudp_magic(iph->saddr,
2499 iph->daddr, len, iph->protocol, 0);
2500}
2501
61357325 2502static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
c4e84bde
RM
2503{
2504 struct tx_ring_desc *tx_ring_desc;
2505 struct ob_mac_iocb_req *mac_iocb_ptr;
2506 struct ql_adapter *qdev = netdev_priv(ndev);
2507 int tso;
2508 struct tx_ring *tx_ring;
1e213303 2509 u32 tx_ring_idx = (u32) skb->queue_mapping;
c4e84bde
RM
2510
2511 tx_ring = &qdev->tx_ring[tx_ring_idx];
2512
74c50b4b
RM
2513 if (skb_padto(skb, ETH_ZLEN))
2514 return NETDEV_TX_OK;
2515
c4e84bde
RM
2516 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2517 QPRINTK(qdev, TX_QUEUED, INFO,
2518 "%s: shutting down tx queue %d du to lack of resources.\n",
2519 __func__, tx_ring_idx);
1e213303 2520 netif_stop_subqueue(ndev, tx_ring->wq_id);
c4e84bde 2521 atomic_inc(&tx_ring->queue_stopped);
885ee398 2522 tx_ring->tx_errors++;
c4e84bde
RM
2523 return NETDEV_TX_BUSY;
2524 }
2525 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2526 mac_iocb_ptr = tx_ring_desc->queue_entry;
e332471c 2527 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
c4e84bde
RM
2528
2529 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2530 mac_iocb_ptr->tid = tx_ring_desc->index;
2531 /* We use the upper 32-bits to store the tx queue for this IO.
2532 * When we get the completion we can use it to establish the context.
2533 */
2534 mac_iocb_ptr->txq_idx = tx_ring_idx;
2535 tx_ring_desc->skb = skb;
2536
2537 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2538
2539 if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
2540 QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
2541 vlan_tx_tag_get(skb));
2542 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2543 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2544 }
2545 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2546 if (tso < 0) {
2547 dev_kfree_skb_any(skb);
2548 return NETDEV_TX_OK;
2549 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2550 ql_hw_csum_setup(skb,
2551 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2552 }
0d979f74
RM
2553 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2554 NETDEV_TX_OK) {
2555 QPRINTK(qdev, TX_QUEUED, ERR,
2556 "Could not map the segments.\n");
885ee398 2557 tx_ring->tx_errors++;
0d979f74
RM
2558 return NETDEV_TX_BUSY;
2559 }
c4e84bde
RM
2560 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2561 tx_ring->prod_idx++;
2562 if (tx_ring->prod_idx == tx_ring->wq_len)
2563 tx_ring->prod_idx = 0;
2564 wmb();
2565
2566 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
c4e84bde
RM
2567 QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
2568 tx_ring->prod_idx, skb->len);
2569
2570 atomic_dec(&tx_ring->tx_count);
2571 return NETDEV_TX_OK;
2572}
2573
9dfbbaa6 2574
c4e84bde
RM
2575static void ql_free_shadow_space(struct ql_adapter *qdev)
2576{
2577 if (qdev->rx_ring_shadow_reg_area) {
2578 pci_free_consistent(qdev->pdev,
2579 PAGE_SIZE,
2580 qdev->rx_ring_shadow_reg_area,
2581 qdev->rx_ring_shadow_reg_dma);
2582 qdev->rx_ring_shadow_reg_area = NULL;
2583 }
2584 if (qdev->tx_ring_shadow_reg_area) {
2585 pci_free_consistent(qdev->pdev,
2586 PAGE_SIZE,
2587 qdev->tx_ring_shadow_reg_area,
2588 qdev->tx_ring_shadow_reg_dma);
2589 qdev->tx_ring_shadow_reg_area = NULL;
2590 }
2591}
2592
2593static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2594{
2595 qdev->rx_ring_shadow_reg_area =
2596 pci_alloc_consistent(qdev->pdev,
2597 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2598 if (qdev->rx_ring_shadow_reg_area == NULL) {
2599 QPRINTK(qdev, IFUP, ERR,
2600 "Allocation of RX shadow space failed.\n");
2601 return -ENOMEM;
2602 }
b25215d0 2603 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
c4e84bde
RM
2604 qdev->tx_ring_shadow_reg_area =
2605 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2606 &qdev->tx_ring_shadow_reg_dma);
2607 if (qdev->tx_ring_shadow_reg_area == NULL) {
2608 QPRINTK(qdev, IFUP, ERR,
2609 "Allocation of TX shadow space failed.\n");
2610 goto err_wqp_sh_area;
2611 }
b25215d0 2612 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
c4e84bde
RM
2613 return 0;
2614
2615err_wqp_sh_area:
2616 pci_free_consistent(qdev->pdev,
2617 PAGE_SIZE,
2618 qdev->rx_ring_shadow_reg_area,
2619 qdev->rx_ring_shadow_reg_dma);
2620 return -ENOMEM;
2621}
2622
2623static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2624{
2625 struct tx_ring_desc *tx_ring_desc;
2626 int i;
2627 struct ob_mac_iocb_req *mac_iocb_ptr;
2628
2629 mac_iocb_ptr = tx_ring->wq_base;
2630 tx_ring_desc = tx_ring->q;
2631 for (i = 0; i < tx_ring->wq_len; i++) {
2632 tx_ring_desc->index = i;
2633 tx_ring_desc->skb = NULL;
2634 tx_ring_desc->queue_entry = mac_iocb_ptr;
2635 mac_iocb_ptr++;
2636 tx_ring_desc++;
2637 }
2638 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2639 atomic_set(&tx_ring->queue_stopped, 0);
2640}
2641
2642static void ql_free_tx_resources(struct ql_adapter *qdev,
2643 struct tx_ring *tx_ring)
2644{
2645 if (tx_ring->wq_base) {
2646 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2647 tx_ring->wq_base, tx_ring->wq_base_dma);
2648 tx_ring->wq_base = NULL;
2649 }
2650 kfree(tx_ring->q);
2651 tx_ring->q = NULL;
2652}
2653
2654static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2655 struct tx_ring *tx_ring)
2656{
2657 tx_ring->wq_base =
2658 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2659 &tx_ring->wq_base_dma);
2660
8e95a202
JP
2661 if ((tx_ring->wq_base == NULL) ||
2662 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
c4e84bde
RM
2663 QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
2664 return -ENOMEM;
2665 }
2666 tx_ring->q =
2667 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2668 if (tx_ring->q == NULL)
2669 goto err;
2670
2671 return 0;
2672err:
2673 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2674 tx_ring->wq_base, tx_ring->wq_base_dma);
2675 return -ENOMEM;
2676}
2677
8668ae92 2678static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
c4e84bde 2679{
c4e84bde
RM
2680 struct bq_desc *lbq_desc;
2681
7c734359
RM
2682 uint32_t curr_idx, clean_idx;
2683
2684 curr_idx = rx_ring->lbq_curr_idx;
2685 clean_idx = rx_ring->lbq_clean_idx;
2686 while (curr_idx != clean_idx) {
2687 lbq_desc = &rx_ring->lbq[curr_idx];
2688
2689 if (lbq_desc->p.pg_chunk.last_flag) {
c4e84bde 2690 pci_unmap_page(qdev->pdev,
7c734359
RM
2691 lbq_desc->p.pg_chunk.map,
2692 ql_lbq_block_size(qdev),
c4e84bde 2693 PCI_DMA_FROMDEVICE);
7c734359 2694 lbq_desc->p.pg_chunk.last_flag = 0;
c4e84bde 2695 }
7c734359
RM
2696
2697 put_page(lbq_desc->p.pg_chunk.page);
2698 lbq_desc->p.pg_chunk.page = NULL;
2699
2700 if (++curr_idx == rx_ring->lbq_len)
2701 curr_idx = 0;
2702
c4e84bde
RM
2703 }
2704}
2705
8668ae92 2706static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
c4e84bde
RM
2707{
2708 int i;
2709 struct bq_desc *sbq_desc;
2710
2711 for (i = 0; i < rx_ring->sbq_len; i++) {
2712 sbq_desc = &rx_ring->sbq[i];
2713 if (sbq_desc == NULL) {
2714 QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
2715 return;
2716 }
2717 if (sbq_desc->p.skb) {
2718 pci_unmap_single(qdev->pdev,
2719 pci_unmap_addr(sbq_desc, mapaddr),
2720 pci_unmap_len(sbq_desc, maplen),
2721 PCI_DMA_FROMDEVICE);
2722 dev_kfree_skb(sbq_desc->p.skb);
2723 sbq_desc->p.skb = NULL;
2724 }
c4e84bde
RM
2725 }
2726}
2727
4545a3f2
RM
2728/* Free all large and small rx buffers associated
2729 * with the completion queues for this device.
2730 */
2731static void ql_free_rx_buffers(struct ql_adapter *qdev)
2732{
2733 int i;
2734 struct rx_ring *rx_ring;
2735
2736 for (i = 0; i < qdev->rx_ring_count; i++) {
2737 rx_ring = &qdev->rx_ring[i];
2738 if (rx_ring->lbq)
2739 ql_free_lbq_buffers(qdev, rx_ring);
2740 if (rx_ring->sbq)
2741 ql_free_sbq_buffers(qdev, rx_ring);
2742 }
2743}
2744
2745static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2746{
2747 struct rx_ring *rx_ring;
2748 int i;
2749
2750 for (i = 0; i < qdev->rx_ring_count; i++) {
2751 rx_ring = &qdev->rx_ring[i];
2752 if (rx_ring->type != TX_Q)
2753 ql_update_buffer_queues(qdev, rx_ring);
2754 }
2755}
2756
2757static void ql_init_lbq_ring(struct ql_adapter *qdev,
2758 struct rx_ring *rx_ring)
2759{
2760 int i;
2761 struct bq_desc *lbq_desc;
2762 __le64 *bq = rx_ring->lbq_base;
2763
2764 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2765 for (i = 0; i < rx_ring->lbq_len; i++) {
2766 lbq_desc = &rx_ring->lbq[i];
2767 memset(lbq_desc, 0, sizeof(*lbq_desc));
2768 lbq_desc->index = i;
2769 lbq_desc->addr = bq;
2770 bq++;
2771 }
2772}
2773
2774static void ql_init_sbq_ring(struct ql_adapter *qdev,
c4e84bde
RM
2775 struct rx_ring *rx_ring)
2776{
2777 int i;
2778 struct bq_desc *sbq_desc;
2c9a0d41 2779 __le64 *bq = rx_ring->sbq_base;
c4e84bde 2780
4545a3f2 2781 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
c4e84bde
RM
2782 for (i = 0; i < rx_ring->sbq_len; i++) {
2783 sbq_desc = &rx_ring->sbq[i];
4545a3f2 2784 memset(sbq_desc, 0, sizeof(*sbq_desc));
c4e84bde 2785 sbq_desc->index = i;
2c9a0d41 2786 sbq_desc->addr = bq;
c4e84bde
RM
2787 bq++;
2788 }
c4e84bde
RM
2789}
2790
2791static void ql_free_rx_resources(struct ql_adapter *qdev,
2792 struct rx_ring *rx_ring)
2793{
c4e84bde
RM
2794 /* Free the small buffer queue. */
2795 if (rx_ring->sbq_base) {
2796 pci_free_consistent(qdev->pdev,
2797 rx_ring->sbq_size,
2798 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2799 rx_ring->sbq_base = NULL;
2800 }
2801
2802 /* Free the small buffer queue control blocks. */
2803 kfree(rx_ring->sbq);
2804 rx_ring->sbq = NULL;
2805
2806 /* Free the large buffer queue. */
2807 if (rx_ring->lbq_base) {
2808 pci_free_consistent(qdev->pdev,
2809 rx_ring->lbq_size,
2810 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2811 rx_ring->lbq_base = NULL;
2812 }
2813
2814 /* Free the large buffer queue control blocks. */
2815 kfree(rx_ring->lbq);
2816 rx_ring->lbq = NULL;
2817
2818 /* Free the rx queue. */
2819 if (rx_ring->cq_base) {
2820 pci_free_consistent(qdev->pdev,
2821 rx_ring->cq_size,
2822 rx_ring->cq_base, rx_ring->cq_base_dma);
2823 rx_ring->cq_base = NULL;
2824 }
2825}
2826
2827/* Allocate queues and buffers for this completions queue based
2828 * on the values in the parameter structure. */
2829static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2830 struct rx_ring *rx_ring)
2831{
2832
2833 /*
2834 * Allocate the completion queue for this rx_ring.
2835 */
2836 rx_ring->cq_base =
2837 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2838 &rx_ring->cq_base_dma);
2839
2840 if (rx_ring->cq_base == NULL) {
2841 QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
2842 return -ENOMEM;
2843 }
2844
2845 if (rx_ring->sbq_len) {
2846 /*
2847 * Allocate small buffer queue.
2848 */
2849 rx_ring->sbq_base =
2850 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2851 &rx_ring->sbq_base_dma);
2852
2853 if (rx_ring->sbq_base == NULL) {
2854 QPRINTK(qdev, IFUP, ERR,
2855 "Small buffer queue allocation failed.\n");
2856 goto err_mem;
2857 }
2858
2859 /*
2860 * Allocate small buffer queue control blocks.
2861 */
2862 rx_ring->sbq =
2863 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2864 GFP_KERNEL);
2865 if (rx_ring->sbq == NULL) {
2866 QPRINTK(qdev, IFUP, ERR,
2867 "Small buffer queue control block allocation failed.\n");
2868 goto err_mem;
2869 }
2870
4545a3f2 2871 ql_init_sbq_ring(qdev, rx_ring);
c4e84bde
RM
2872 }
2873
2874 if (rx_ring->lbq_len) {
2875 /*
2876 * Allocate large buffer queue.
2877 */
2878 rx_ring->lbq_base =
2879 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2880 &rx_ring->lbq_base_dma);
2881
2882 if (rx_ring->lbq_base == NULL) {
2883 QPRINTK(qdev, IFUP, ERR,
2884 "Large buffer queue allocation failed.\n");
2885 goto err_mem;
2886 }
2887 /*
2888 * Allocate large buffer queue control blocks.
2889 */
2890 rx_ring->lbq =
2891 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2892 GFP_KERNEL);
2893 if (rx_ring->lbq == NULL) {
2894 QPRINTK(qdev, IFUP, ERR,
2895 "Large buffer queue control block allocation failed.\n");
2896 goto err_mem;
2897 }
2898
4545a3f2 2899 ql_init_lbq_ring(qdev, rx_ring);
c4e84bde
RM
2900 }
2901
2902 return 0;
2903
2904err_mem:
2905 ql_free_rx_resources(qdev, rx_ring);
2906 return -ENOMEM;
2907}
2908
2909static void ql_tx_ring_clean(struct ql_adapter *qdev)
2910{
2911 struct tx_ring *tx_ring;
2912 struct tx_ring_desc *tx_ring_desc;
2913 int i, j;
2914
2915 /*
2916 * Loop through all queues and free
2917 * any resources.
2918 */
2919 for (j = 0; j < qdev->tx_ring_count; j++) {
2920 tx_ring = &qdev->tx_ring[j];
2921 for (i = 0; i < tx_ring->wq_len; i++) {
2922 tx_ring_desc = &tx_ring->q[i];
2923 if (tx_ring_desc && tx_ring_desc->skb) {
2924 QPRINTK(qdev, IFDOWN, ERR,
2925 "Freeing lost SKB %p, from queue %d, index %d.\n",
2926 tx_ring_desc->skb, j,
2927 tx_ring_desc->index);
2928 ql_unmap_send(qdev, tx_ring_desc,
2929 tx_ring_desc->map_cnt);
2930 dev_kfree_skb(tx_ring_desc->skb);
2931 tx_ring_desc->skb = NULL;
2932 }
2933 }
2934 }
2935}
2936
c4e84bde
RM
2937static void ql_free_mem_resources(struct ql_adapter *qdev)
2938{
2939 int i;
2940
2941 for (i = 0; i < qdev->tx_ring_count; i++)
2942 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2943 for (i = 0; i < qdev->rx_ring_count; i++)
2944 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2945 ql_free_shadow_space(qdev);
2946}
2947
2948static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2949{
2950 int i;
2951
2952 /* Allocate space for our shadow registers and such. */
2953 if (ql_alloc_shadow_space(qdev))
2954 return -ENOMEM;
2955
2956 for (i = 0; i < qdev->rx_ring_count; i++) {
2957 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2958 QPRINTK(qdev, IFUP, ERR,
2959 "RX resource allocation failed.\n");
2960 goto err_mem;
2961 }
2962 }
2963 /* Allocate tx queue resources */
2964 for (i = 0; i < qdev->tx_ring_count; i++) {
2965 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2966 QPRINTK(qdev, IFUP, ERR,
2967 "TX resource allocation failed.\n");
2968 goto err_mem;
2969 }
2970 }
2971 return 0;
2972
2973err_mem:
2974 ql_free_mem_resources(qdev);
2975 return -ENOMEM;
2976}
2977
2978/* Set up the rx ring control block and pass it to the chip.
2979 * The control block is defined as
2980 * "Completion Queue Initialization Control Block", or cqicb.
2981 */
2982static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2983{
2984 struct cqicb *cqicb = &rx_ring->cqicb;
2985 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
b8facca0 2986 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
c4e84bde 2987 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
b8facca0 2988 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
c4e84bde
RM
2989 void __iomem *doorbell_area =
2990 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2991 int err = 0;
2992 u16 bq_len;
d4a4aba6 2993 u64 tmp;
b8facca0
RM
2994 __le64 *base_indirect_ptr;
2995 int page_entries;
c4e84bde
RM
2996
2997 /* Set up the shadow registers for this ring. */
2998 rx_ring->prod_idx_sh_reg = shadow_reg;
2999 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
7c734359 3000 *rx_ring->prod_idx_sh_reg = 0;
c4e84bde
RM
3001 shadow_reg += sizeof(u64);
3002 shadow_reg_dma += sizeof(u64);
3003 rx_ring->lbq_base_indirect = shadow_reg;
3004 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
b8facca0
RM
3005 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3006 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
c4e84bde
RM
3007 rx_ring->sbq_base_indirect = shadow_reg;
3008 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3009
3010 /* PCI doorbell mem area + 0x00 for consumer index register */
8668ae92 3011 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
c4e84bde
RM
3012 rx_ring->cnsmr_idx = 0;
3013 rx_ring->curr_entry = rx_ring->cq_base;
3014
3015 /* PCI doorbell mem area + 0x04 for valid register */
3016 rx_ring->valid_db_reg = doorbell_area + 0x04;
3017
3018 /* PCI doorbell mem area + 0x18 for large buffer consumer */
8668ae92 3019 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
c4e84bde
RM
3020
3021 /* PCI doorbell mem area + 0x1c */
8668ae92 3022 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
c4e84bde
RM
3023
3024 memset((void *)cqicb, 0, sizeof(struct cqicb));
3025 cqicb->msix_vect = rx_ring->irq;
3026
459caf5a
RM
3027 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3028 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
c4e84bde 3029
97345524 3030 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
c4e84bde 3031
97345524 3032 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
c4e84bde
RM
3033
3034 /*
3035 * Set up the control block load flags.
3036 */
3037 cqicb->flags = FLAGS_LC | /* Load queue base address */
3038 FLAGS_LV | /* Load MSI-X vector */
3039 FLAGS_LI; /* Load irq delay values */
3040 if (rx_ring->lbq_len) {
3041 cqicb->flags |= FLAGS_LL; /* Load lbq values */
a419aef8 3042 tmp = (u64)rx_ring->lbq_base_dma;
b8facca0
RM
3043 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
3044 page_entries = 0;
3045 do {
3046 *base_indirect_ptr = cpu_to_le64(tmp);
3047 tmp += DB_PAGE_SIZE;
3048 base_indirect_ptr++;
3049 page_entries++;
3050 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
97345524
RM
3051 cqicb->lbq_addr =
3052 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
459caf5a
RM
3053 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3054 (u16) rx_ring->lbq_buf_size;
3055 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3056 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3057 (u16) rx_ring->lbq_len;
c4e84bde 3058 cqicb->lbq_len = cpu_to_le16(bq_len);
4545a3f2 3059 rx_ring->lbq_prod_idx = 0;
c4e84bde 3060 rx_ring->lbq_curr_idx = 0;
4545a3f2
RM
3061 rx_ring->lbq_clean_idx = 0;
3062 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
c4e84bde
RM
3063 }
3064 if (rx_ring->sbq_len) {
3065 cqicb->flags |= FLAGS_LS; /* Load sbq values */
a419aef8 3066 tmp = (u64)rx_ring->sbq_base_dma;
b8facca0
RM
3067 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
3068 page_entries = 0;
3069 do {
3070 *base_indirect_ptr = cpu_to_le64(tmp);
3071 tmp += DB_PAGE_SIZE;
3072 base_indirect_ptr++;
3073 page_entries++;
3074 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
97345524
RM
3075 cqicb->sbq_addr =
3076 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
c4e84bde 3077 cqicb->sbq_buf_size =
52e55f3c 3078 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
459caf5a
RM
3079 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3080 (u16) rx_ring->sbq_len;
c4e84bde 3081 cqicb->sbq_len = cpu_to_le16(bq_len);
4545a3f2 3082 rx_ring->sbq_prod_idx = 0;
c4e84bde 3083 rx_ring->sbq_curr_idx = 0;
4545a3f2
RM
3084 rx_ring->sbq_clean_idx = 0;
3085 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
c4e84bde
RM
3086 }
3087 switch (rx_ring->type) {
3088 case TX_Q:
c4e84bde
RM
3089 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3090 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3091 break;
c4e84bde
RM
3092 case RX_Q:
3093 /* Inbound completion handling rx_rings run in
3094 * separate NAPI contexts.
3095 */
3096 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3097 64);
3098 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3099 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3100 break;
3101 default:
3102 QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
3103 rx_ring->type);
3104 }
4974097a 3105 QPRINTK(qdev, IFUP, DEBUG, "Initializing rx work queue.\n");
c4e84bde
RM
3106 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3107 CFG_LCQ, rx_ring->cq_id);
3108 if (err) {
3109 QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
3110 return err;
3111 }
c4e84bde
RM
3112 return err;
3113}
3114
3115static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3116{
3117 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3118 void __iomem *doorbell_area =
3119 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3120 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3121 (tx_ring->wq_id * sizeof(u64));
3122 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3123 (tx_ring->wq_id * sizeof(u64));
3124 int err = 0;
3125
3126 /*
3127 * Assign doorbell registers for this tx_ring.
3128 */
3129 /* TX PCI doorbell mem area for tx producer index */
8668ae92 3130 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
c4e84bde
RM
3131 tx_ring->prod_idx = 0;
3132 /* TX PCI doorbell mem area + 0x04 */
3133 tx_ring->valid_db_reg = doorbell_area + 0x04;
3134
3135 /*
3136 * Assign shadow registers for this tx_ring.
3137 */
3138 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3139 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3140
3141 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3142 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3143 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3144 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3145 wqicb->rid = 0;
97345524 3146 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
c4e84bde 3147
97345524 3148 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
c4e84bde
RM
3149
3150 ql_init_tx_ring(qdev, tx_ring);
3151
e332471c 3152 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
c4e84bde
RM
3153 (u16) tx_ring->wq_id);
3154 if (err) {
3155 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
3156 return err;
3157 }
4974097a 3158 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded WQICB.\n");
c4e84bde
RM
3159 return err;
3160}
3161
3162static void ql_disable_msix(struct ql_adapter *qdev)
3163{
3164 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3165 pci_disable_msix(qdev->pdev);
3166 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3167 kfree(qdev->msi_x_entry);
3168 qdev->msi_x_entry = NULL;
3169 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3170 pci_disable_msi(qdev->pdev);
3171 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3172 }
3173}
3174
a4ab6137
RM
3175/* We start by trying to get the number of vectors
3176 * stored in qdev->intr_count. If we don't get that
3177 * many then we reduce the count and try again.
3178 */
c4e84bde
RM
3179static void ql_enable_msix(struct ql_adapter *qdev)
3180{
a4ab6137 3181 int i, err;
c4e84bde 3182
c4e84bde 3183 /* Get the MSIX vectors. */
a5a62a1c 3184 if (qlge_irq_type == MSIX_IRQ) {
c4e84bde
RM
3185 /* Try to alloc space for the msix struct,
3186 * if it fails then go to MSI/legacy.
3187 */
a4ab6137 3188 qdev->msi_x_entry = kcalloc(qdev->intr_count,
c4e84bde
RM
3189 sizeof(struct msix_entry),
3190 GFP_KERNEL);
3191 if (!qdev->msi_x_entry) {
a5a62a1c 3192 qlge_irq_type = MSI_IRQ;
c4e84bde
RM
3193 goto msi;
3194 }
3195
a4ab6137 3196 for (i = 0; i < qdev->intr_count; i++)
c4e84bde
RM
3197 qdev->msi_x_entry[i].entry = i;
3198
a4ab6137
RM
3199 /* Loop to get our vectors. We start with
3200 * what we want and settle for what we get.
3201 */
3202 do {
3203 err = pci_enable_msix(qdev->pdev,
3204 qdev->msi_x_entry, qdev->intr_count);
3205 if (err > 0)
3206 qdev->intr_count = err;
3207 } while (err > 0);
3208
3209 if (err < 0) {
c4e84bde
RM
3210 kfree(qdev->msi_x_entry);
3211 qdev->msi_x_entry = NULL;
3212 QPRINTK(qdev, IFUP, WARNING,
3213 "MSI-X Enable failed, trying MSI.\n");
a4ab6137 3214 qdev->intr_count = 1;
a5a62a1c 3215 qlge_irq_type = MSI_IRQ;
a4ab6137
RM
3216 } else if (err == 0) {
3217 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3218 QPRINTK(qdev, IFUP, INFO,
3219 "MSI-X Enabled, got %d vectors.\n",
3220 qdev->intr_count);
3221 return;
c4e84bde
RM
3222 }
3223 }
3224msi:
a4ab6137 3225 qdev->intr_count = 1;
a5a62a1c 3226 if (qlge_irq_type == MSI_IRQ) {
c4e84bde
RM
3227 if (!pci_enable_msi(qdev->pdev)) {
3228 set_bit(QL_MSI_ENABLED, &qdev->flags);
3229 QPRINTK(qdev, IFUP, INFO,
3230 "Running with MSI interrupts.\n");
3231 return;
3232 }
3233 }
a5a62a1c 3234 qlge_irq_type = LEG_IRQ;
c4e84bde
RM
3235 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
3236}
3237
39aa8165
RM
3238/* Each vector services 1 RSS ring and and 1 or more
3239 * TX completion rings. This function loops through
3240 * the TX completion rings and assigns the vector that
3241 * will service it. An example would be if there are
3242 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3243 * This would mean that vector 0 would service RSS ring 0
3244 * and TX competion rings 0,1,2 and 3. Vector 1 would
3245 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3246 */
3247static void ql_set_tx_vect(struct ql_adapter *qdev)
3248{
3249 int i, j, vect;
3250 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3251
3252 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3253 /* Assign irq vectors to TX rx_rings.*/
3254 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3255 i < qdev->rx_ring_count; i++) {
3256 if (j == tx_rings_per_vector) {
3257 vect++;
3258 j = 0;
3259 }
3260 qdev->rx_ring[i].irq = vect;
3261 j++;
3262 }
3263 } else {
3264 /* For single vector all rings have an irq
3265 * of zero.
3266 */
3267 for (i = 0; i < qdev->rx_ring_count; i++)
3268 qdev->rx_ring[i].irq = 0;
3269 }
3270}
3271
3272/* Set the interrupt mask for this vector. Each vector
3273 * will service 1 RSS ring and 1 or more TX completion
3274 * rings. This function sets up a bit mask per vector
3275 * that indicates which rings it services.
3276 */
3277static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3278{
3279 int j, vect = ctx->intr;
3280 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3281
3282 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3283 /* Add the RSS ring serviced by this vector
3284 * to the mask.
3285 */
3286 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3287 /* Add the TX ring(s) serviced by this vector
3288 * to the mask. */
3289 for (j = 0; j < tx_rings_per_vector; j++) {
3290 ctx->irq_mask |=
3291 (1 << qdev->rx_ring[qdev->rss_ring_count +
3292 (vect * tx_rings_per_vector) + j].cq_id);
3293 }
3294 } else {
3295 /* For single vector we just shift each queue's
3296 * ID into the mask.
3297 */
3298 for (j = 0; j < qdev->rx_ring_count; j++)
3299 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3300 }
3301}
3302
c4e84bde
RM
3303/*
3304 * Here we build the intr_context structures based on
3305 * our rx_ring count and intr vector count.
3306 * The intr_context structure is used to hook each vector
3307 * to possibly different handlers.
3308 */
3309static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3310{
3311 int i = 0;
3312 struct intr_context *intr_context = &qdev->intr_context[0];
3313
c4e84bde
RM
3314 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3315 /* Each rx_ring has it's
3316 * own intr_context since we have separate
3317 * vectors for each queue.
c4e84bde
RM
3318 */
3319 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3320 qdev->rx_ring[i].irq = i;
3321 intr_context->intr = i;
3322 intr_context->qdev = qdev;
39aa8165
RM
3323 /* Set up this vector's bit-mask that indicates
3324 * which queues it services.
3325 */
3326 ql_set_irq_mask(qdev, intr_context);
c4e84bde
RM
3327 /*
3328 * We set up each vectors enable/disable/read bits so
3329 * there's no bit/mask calculations in the critical path.
3330 */
3331 intr_context->intr_en_mask =
3332 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3333 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3334 | i;
3335 intr_context->intr_dis_mask =
3336 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3337 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3338 INTR_EN_IHD | i;
3339 intr_context->intr_read_mask =
3340 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3341 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3342 i;
39aa8165
RM
3343 if (i == 0) {
3344 /* The first vector/queue handles
3345 * broadcast/multicast, fatal errors,
3346 * and firmware events. This in addition
3347 * to normal inbound NAPI processing.
c4e84bde 3348 */
39aa8165 3349 intr_context->handler = qlge_isr;
b2014ff8
RM
3350 sprintf(intr_context->name, "%s-rx-%d",
3351 qdev->ndev->name, i);
3352 } else {
c4e84bde 3353 /*
39aa8165 3354 * Inbound queues handle unicast frames only.
c4e84bde 3355 */
39aa8165
RM
3356 intr_context->handler = qlge_msix_rx_isr;
3357 sprintf(intr_context->name, "%s-rx-%d",
c4e84bde 3358 qdev->ndev->name, i);
c4e84bde
RM
3359 }
3360 }
3361 } else {
3362 /*
3363 * All rx_rings use the same intr_context since
3364 * there is only one vector.
3365 */
3366 intr_context->intr = 0;
3367 intr_context->qdev = qdev;
3368 /*
3369 * We set up each vectors enable/disable/read bits so
3370 * there's no bit/mask calculations in the critical path.
3371 */
3372 intr_context->intr_en_mask =
3373 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3374 intr_context->intr_dis_mask =
3375 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3376 INTR_EN_TYPE_DISABLE;
3377 intr_context->intr_read_mask =
3378 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3379 /*
3380 * Single interrupt means one handler for all rings.
3381 */
3382 intr_context->handler = qlge_isr;
3383 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
39aa8165
RM
3384 /* Set up this vector's bit-mask that indicates
3385 * which queues it services. In this case there is
3386 * a single vector so it will service all RSS and
3387 * TX completion rings.
3388 */
3389 ql_set_irq_mask(qdev, intr_context);
c4e84bde 3390 }
39aa8165
RM
3391 /* Tell the TX completion rings which MSIx vector
3392 * they will be using.
3393 */
3394 ql_set_tx_vect(qdev);
c4e84bde
RM
3395}
3396
3397static void ql_free_irq(struct ql_adapter *qdev)
3398{
3399 int i;
3400 struct intr_context *intr_context = &qdev->intr_context[0];
3401
3402 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3403 if (intr_context->hooked) {
3404 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3405 free_irq(qdev->msi_x_entry[i].vector,
3406 &qdev->rx_ring[i]);
4974097a 3407 QPRINTK(qdev, IFDOWN, DEBUG,
c4e84bde
RM
3408 "freeing msix interrupt %d.\n", i);
3409 } else {
3410 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
4974097a 3411 QPRINTK(qdev, IFDOWN, DEBUG,
c4e84bde
RM
3412 "freeing msi interrupt %d.\n", i);
3413 }
3414 }
3415 }
3416 ql_disable_msix(qdev);
3417}
3418
3419static int ql_request_irq(struct ql_adapter *qdev)
3420{
3421 int i;
3422 int status = 0;
3423 struct pci_dev *pdev = qdev->pdev;
3424 struct intr_context *intr_context = &qdev->intr_context[0];
3425
3426 ql_resolve_queues_to_irqs(qdev);
3427
3428 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3429 atomic_set(&intr_context->irq_cnt, 0);
3430 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3431 status = request_irq(qdev->msi_x_entry[i].vector,
3432 intr_context->handler,
3433 0,
3434 intr_context->name,
3435 &qdev->rx_ring[i]);
3436 if (status) {
3437 QPRINTK(qdev, IFUP, ERR,
3438 "Failed request for MSIX interrupt %d.\n",
3439 i);
3440 goto err_irq;
3441 } else {
4974097a 3442 QPRINTK(qdev, IFUP, DEBUG,
c4e84bde
RM
3443 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3444 i,
3445 qdev->rx_ring[i].type ==
3446 DEFAULT_Q ? "DEFAULT_Q" : "",
3447 qdev->rx_ring[i].type ==
3448 TX_Q ? "TX_Q" : "",
3449 qdev->rx_ring[i].type ==
3450 RX_Q ? "RX_Q" : "", intr_context->name);
3451 }
3452 } else {
3453 QPRINTK(qdev, IFUP, DEBUG,
3454 "trying msi or legacy interrupts.\n");
3455 QPRINTK(qdev, IFUP, DEBUG,
3456 "%s: irq = %d.\n", __func__, pdev->irq);
3457 QPRINTK(qdev, IFUP, DEBUG,
3458 "%s: context->name = %s.\n", __func__,
3459 intr_context->name);
3460 QPRINTK(qdev, IFUP, DEBUG,
3461 "%s: dev_id = 0x%p.\n", __func__,
3462 &qdev->rx_ring[0]);
3463 status =
3464 request_irq(pdev->irq, qlge_isr,
3465 test_bit(QL_MSI_ENABLED,
3466 &qdev->
3467 flags) ? 0 : IRQF_SHARED,
3468 intr_context->name, &qdev->rx_ring[0]);
3469 if (status)
3470 goto err_irq;
3471
3472 QPRINTK(qdev, IFUP, ERR,
3473 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3474 i,
3475 qdev->rx_ring[0].type ==
3476 DEFAULT_Q ? "DEFAULT_Q" : "",
3477 qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
3478 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3479 intr_context->name);
3480 }
3481 intr_context->hooked = 1;
3482 }
3483 return status;
3484err_irq:
3485 QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
3486 ql_free_irq(qdev);
3487 return status;
3488}
3489
3490static int ql_start_rss(struct ql_adapter *qdev)
3491{
541ae28c
RM
3492 u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3493 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
3494 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
3495 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
3496 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
3497 0xbe, 0xac, 0x01, 0xfa};
c4e84bde
RM
3498 struct ricb *ricb = &qdev->ricb;
3499 int status = 0;
3500 int i;
3501 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3502
e332471c 3503 memset((void *)ricb, 0, sizeof(*ricb));
c4e84bde 3504
b2014ff8 3505 ricb->base_cq = RSS_L4K;
c4e84bde 3506 ricb->flags =
541ae28c
RM
3507 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3508 ricb->mask = cpu_to_le16((u16)(0x3ff));
c4e84bde
RM
3509
3510 /*
3511 * Fill out the Indirection Table.
3512 */
541ae28c
RM
3513 for (i = 0; i < 1024; i++)
3514 hash_id[i] = (i & (qdev->rss_ring_count - 1));
c4e84bde 3515
541ae28c
RM
3516 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3517 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
c4e84bde 3518
4974097a 3519 QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
c4e84bde 3520
e332471c 3521 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
c4e84bde
RM
3522 if (status) {
3523 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
3524 return status;
3525 }
4974097a 3526 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded RICB.\n");
c4e84bde
RM
3527 return status;
3528}
3529
a5f59dc9 3530static int ql_clear_routing_entries(struct ql_adapter *qdev)
c4e84bde 3531{
a5f59dc9 3532 int i, status = 0;
c4e84bde 3533
8587ea35
RM
3534 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3535 if (status)
3536 return status;
c4e84bde
RM
3537 /* Clear all the entries in the routing table. */
3538 for (i = 0; i < 16; i++) {
3539 status = ql_set_routing_reg(qdev, i, 0, 0);
3540 if (status) {
3541 QPRINTK(qdev, IFUP, ERR,
a5f59dc9
RM
3542 "Failed to init routing register for CAM "
3543 "packets.\n");
3544 break;
c4e84bde
RM
3545 }
3546 }
a5f59dc9
RM
3547 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3548 return status;
3549}
3550
3551/* Initialize the frame-to-queue routing. */
3552static int ql_route_initialize(struct ql_adapter *qdev)
3553{
3554 int status = 0;
3555
fd21cf52
RM
3556 /* Clear all the entries in the routing table. */
3557 status = ql_clear_routing_entries(qdev);
a5f59dc9
RM
3558 if (status)
3559 return status;
3560
fd21cf52 3561 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
a5f59dc9 3562 if (status)
fd21cf52 3563 return status;
c4e84bde
RM
3564
3565 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
3566 if (status) {
3567 QPRINTK(qdev, IFUP, ERR,
3568 "Failed to init routing register for error packets.\n");
8587ea35 3569 goto exit;
c4e84bde
RM
3570 }
3571 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3572 if (status) {
3573 QPRINTK(qdev, IFUP, ERR,
3574 "Failed to init routing register for broadcast packets.\n");
8587ea35 3575 goto exit;
c4e84bde
RM
3576 }
3577 /* If we have more than one inbound queue, then turn on RSS in the
3578 * routing block.
3579 */
3580 if (qdev->rss_ring_count > 1) {
3581 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3582 RT_IDX_RSS_MATCH, 1);
3583 if (status) {
3584 QPRINTK(qdev, IFUP, ERR,
3585 "Failed to init routing register for MATCH RSS packets.\n");
8587ea35 3586 goto exit;
c4e84bde
RM
3587 }
3588 }
3589
3590 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3591 RT_IDX_CAM_HIT, 1);
8587ea35 3592 if (status)
c4e84bde
RM
3593 QPRINTK(qdev, IFUP, ERR,
3594 "Failed to init routing register for CAM packets.\n");
8587ea35
RM
3595exit:
3596 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
c4e84bde
RM
3597 return status;
3598}
3599
2ee1e272 3600int ql_cam_route_initialize(struct ql_adapter *qdev)
bb58b5b6 3601{
7fab3bfe 3602 int status, set;
bb58b5b6 3603
7fab3bfe
RM
3604 /* If check if the link is up and use to
3605 * determine if we are setting or clearing
3606 * the MAC address in the CAM.
3607 */
3608 set = ql_read32(qdev, STS);
3609 set &= qdev->port_link_up;
3610 status = ql_set_mac_addr(qdev, set);
bb58b5b6
RM
3611 if (status) {
3612 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
3613 return status;
3614 }
3615
3616 status = ql_route_initialize(qdev);
3617 if (status)
3618 QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
3619
3620 return status;
3621}
3622
c4e84bde
RM
3623static int ql_adapter_initialize(struct ql_adapter *qdev)
3624{
3625 u32 value, mask;
3626 int i;
3627 int status = 0;
3628
3629 /*
3630 * Set up the System register to halt on errors.
3631 */
3632 value = SYS_EFE | SYS_FAE;
3633 mask = value << 16;
3634 ql_write32(qdev, SYS, mask | value);
3635
c9cf0a04
RM
3636 /* Set the default queue, and VLAN behavior. */
3637 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3638 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
c4e84bde
RM
3639 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3640
3641 /* Set the MPI interrupt to enabled. */
3642 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3643
3644 /* Enable the function, set pagesize, enable error checking. */
3645 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
572c526f
RM
3646 FSC_EC | FSC_VM_PAGE_4K;
3647 value |= SPLT_SETTING;
c4e84bde
RM
3648
3649 /* Set/clear header splitting. */
3650 mask = FSC_VM_PAGESIZE_MASK |
3651 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3652 ql_write32(qdev, FSC, mask | value);
3653
572c526f 3654 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
c4e84bde 3655
a3b71939
RM
3656 /* Set RX packet routing to use port/pci function on which the
3657 * packet arrived on in addition to usual frame routing.
3658 * This is helpful on bonding where both interfaces can have
3659 * the same MAC address.
3660 */
3661 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
bc083ce9
RM
3662 /* Reroute all packets to our Interface.
3663 * They may have been routed to MPI firmware
3664 * due to WOL.
3665 */
3666 value = ql_read32(qdev, MGMT_RCV_CFG);
3667 value &= ~MGMT_RCV_CFG_RM;
3668 mask = 0xffff0000;
3669
3670 /* Sticky reg needs clearing due to WOL. */
3671 ql_write32(qdev, MGMT_RCV_CFG, mask);
3672 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3673
3674 /* Default WOL is enable on Mezz cards */
3675 if (qdev->pdev->subsystem_device == 0x0068 ||
3676 qdev->pdev->subsystem_device == 0x0180)
3677 qdev->wol = WAKE_MAGIC;
a3b71939 3678
c4e84bde
RM
3679 /* Start up the rx queues. */
3680 for (i = 0; i < qdev->rx_ring_count; i++) {
3681 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3682 if (status) {
3683 QPRINTK(qdev, IFUP, ERR,
3684 "Failed to start rx ring[%d].\n", i);
3685 return status;
3686 }
3687 }
3688
3689 /* If there is more than one inbound completion queue
3690 * then download a RICB to configure RSS.
3691 */
3692 if (qdev->rss_ring_count > 1) {
3693 status = ql_start_rss(qdev);
3694 if (status) {
3695 QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
3696 return status;
3697 }
3698 }
3699
3700 /* Start up the tx queues. */
3701 for (i = 0; i < qdev->tx_ring_count; i++) {
3702 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3703 if (status) {
3704 QPRINTK(qdev, IFUP, ERR,
3705 "Failed to start tx ring[%d].\n", i);
3706 return status;
3707 }
3708 }
3709
b0c2aadf
RM
3710 /* Initialize the port and set the max framesize. */
3711 status = qdev->nic_ops->port_initialize(qdev);
80928860
RM
3712 if (status)
3713 QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
c4e84bde 3714
bb58b5b6
RM
3715 /* Set up the MAC address and frame routing filter. */
3716 status = ql_cam_route_initialize(qdev);
c4e84bde 3717 if (status) {
bb58b5b6
RM
3718 QPRINTK(qdev, IFUP, ERR,
3719 "Failed to init CAM/Routing tables.\n");
c4e84bde
RM
3720 return status;
3721 }
3722
3723 /* Start NAPI for the RSS queues. */
b2014ff8 3724 for (i = 0; i < qdev->rss_ring_count; i++) {
4974097a 3725 QPRINTK(qdev, IFUP, DEBUG, "Enabling NAPI for rx_ring[%d].\n",
c4e84bde
RM
3726 i);
3727 napi_enable(&qdev->rx_ring[i].napi);
3728 }
3729
3730 return status;
3731}
3732
3733/* Issue soft reset to chip. */
3734static int ql_adapter_reset(struct ql_adapter *qdev)
3735{
3736 u32 value;
c4e84bde 3737 int status = 0;
a5f59dc9 3738 unsigned long end_jiffies;
c4e84bde 3739
a5f59dc9
RM
3740 /* Clear all the entries in the routing table. */
3741 status = ql_clear_routing_entries(qdev);
3742 if (status) {
3743 QPRINTK(qdev, IFUP, ERR, "Failed to clear routing bits.\n");
3744 return status;
3745 }
3746
3747 end_jiffies = jiffies +
3748 max((unsigned long)1, usecs_to_jiffies(30));
84087f4d
RM
3749
3750 /* Stop management traffic. */
3751 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3752
3753 /* Wait for the NIC and MGMNT FIFOs to empty. */
3754 ql_wait_fifo_empty(qdev);
3755
c4e84bde 3756 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
a75ee7f1 3757
c4e84bde
RM
3758 do {
3759 value = ql_read32(qdev, RST_FO);
3760 if ((value & RST_FO_FR) == 0)
3761 break;
a75ee7f1
RM
3762 cpu_relax();
3763 } while (time_before(jiffies, end_jiffies));
c4e84bde 3764
c4e84bde 3765 if (value & RST_FO_FR) {
c4e84bde 3766 QPRINTK(qdev, IFDOWN, ERR,
3ac49a1c 3767 "ETIMEDOUT!!! errored out of resetting the chip!\n");
a75ee7f1 3768 status = -ETIMEDOUT;
c4e84bde
RM
3769 }
3770
84087f4d
RM
3771 /* Resume management traffic. */
3772 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
c4e84bde
RM
3773 return status;
3774}
3775
3776static void ql_display_dev_info(struct net_device *ndev)
3777{
3778 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3779
3780 QPRINTK(qdev, PROBE, INFO,
e4552f51 3781 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
c4e84bde
RM
3782 "XG Roll = %d, XG Rev = %d.\n",
3783 qdev->func,
e4552f51 3784 qdev->port,
c4e84bde
RM
3785 qdev->chip_rev_id & 0x0000000f,
3786 qdev->chip_rev_id >> 4 & 0x0000000f,
3787 qdev->chip_rev_id >> 8 & 0x0000000f,
3788 qdev->chip_rev_id >> 12 & 0x0000000f);
7c510e4b 3789 QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
c4e84bde
RM
3790}
3791
bc083ce9
RM
3792int ql_wol(struct ql_adapter *qdev)
3793{
3794 int status = 0;
3795 u32 wol = MB_WOL_DISABLE;
3796
3797 /* The CAM is still intact after a reset, but if we
3798 * are doing WOL, then we may need to program the
3799 * routing regs. We would also need to issue the mailbox
3800 * commands to instruct the MPI what to do per the ethtool
3801 * settings.
3802 */
3803
3804 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3805 WAKE_MCAST | WAKE_BCAST)) {
3806 QPRINTK(qdev, IFDOWN, ERR,
3807 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3808 qdev->wol);
3809 return -EINVAL;
3810 }
3811
3812 if (qdev->wol & WAKE_MAGIC) {
3813 status = ql_mb_wol_set_magic(qdev, 1);
3814 if (status) {
3815 QPRINTK(qdev, IFDOWN, ERR,
3816 "Failed to set magic packet on %s.\n",
3817 qdev->ndev->name);
3818 return status;
3819 } else
3820 QPRINTK(qdev, DRV, INFO,
3821 "Enabled magic packet successfully on %s.\n",
3822 qdev->ndev->name);
3823
3824 wol |= MB_WOL_MAGIC_PKT;
3825 }
3826
3827 if (qdev->wol) {
bc083ce9
RM
3828 wol |= MB_WOL_MODE_ON;
3829 status = ql_mb_wol_mode(qdev, wol);
3830 QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
3831 (status == 0) ? "Sucessfully set" : "Failed", wol,
3832 qdev->ndev->name);
3833 }
3834
3835 return status;
3836}
3837
c4e84bde
RM
3838static int ql_adapter_down(struct ql_adapter *qdev)
3839{
c4e84bde 3840 int i, status = 0;
c4e84bde 3841
6a473308 3842 ql_link_off(qdev);
c4e84bde 3843
6497b607
RM
3844 /* Don't kill the reset worker thread if we
3845 * are in the process of recovery.
3846 */
3847 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3848 cancel_delayed_work_sync(&qdev->asic_reset_work);
c4e84bde
RM
3849 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3850 cancel_delayed_work_sync(&qdev->mpi_work);
2ee1e272 3851 cancel_delayed_work_sync(&qdev->mpi_idc_work);
8aae2600 3852 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
bcc2cb3b 3853 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
c4e84bde 3854
39aa8165
RM
3855 for (i = 0; i < qdev->rss_ring_count; i++)
3856 napi_disable(&qdev->rx_ring[i].napi);
c4e84bde
RM
3857
3858 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3859
3860 ql_disable_interrupts(qdev);
3861
3862 ql_tx_ring_clean(qdev);
3863
6b318cb3
RM
3864 /* Call netif_napi_del() from common point.
3865 */
b2014ff8 3866 for (i = 0; i < qdev->rss_ring_count; i++)
6b318cb3
RM
3867 netif_napi_del(&qdev->rx_ring[i].napi);
3868
4545a3f2 3869 ql_free_rx_buffers(qdev);
2d6a5e95 3870
c4e84bde
RM
3871 status = ql_adapter_reset(qdev);
3872 if (status)
3873 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
3874 qdev->func);
c4e84bde
RM
3875 return status;
3876}
3877
3878static int ql_adapter_up(struct ql_adapter *qdev)
3879{
3880 int err = 0;
3881
c4e84bde
RM
3882 err = ql_adapter_initialize(qdev);
3883 if (err) {
3884 QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
c4e84bde
RM
3885 goto err_init;
3886 }
c4e84bde 3887 set_bit(QL_ADAPTER_UP, &qdev->flags);
4545a3f2 3888 ql_alloc_rx_buffers(qdev);
8b007de1
RM
3889 /* If the port is initialized and the
3890 * link is up the turn on the carrier.
3891 */
3892 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3893 (ql_read32(qdev, STS) & qdev->port_link_up))
6a473308 3894 ql_link_on(qdev);
c4e84bde
RM
3895 ql_enable_interrupts(qdev);
3896 ql_enable_all_completion_interrupts(qdev);
1e213303 3897 netif_tx_start_all_queues(qdev->ndev);
c4e84bde
RM
3898
3899 return 0;
3900err_init:
3901 ql_adapter_reset(qdev);
3902 return err;
3903}
3904
c4e84bde
RM
3905static void ql_release_adapter_resources(struct ql_adapter *qdev)
3906{
3907 ql_free_mem_resources(qdev);
3908 ql_free_irq(qdev);
3909}
3910
3911static int ql_get_adapter_resources(struct ql_adapter *qdev)
3912{
3913 int status = 0;
3914
3915 if (ql_alloc_mem_resources(qdev)) {
3916 QPRINTK(qdev, IFUP, ERR, "Unable to allocate memory.\n");
3917 return -ENOMEM;
3918 }
3919 status = ql_request_irq(qdev);
c4e84bde
RM
3920 return status;
3921}
3922
3923static int qlge_close(struct net_device *ndev)
3924{
3925 struct ql_adapter *qdev = netdev_priv(ndev);
3926
3927 /*
3928 * Wait for device to recover from a reset.
3929 * (Rarely happens, but possible.)
3930 */
3931 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3932 msleep(1);
3933 ql_adapter_down(qdev);
3934 ql_release_adapter_resources(qdev);
c4e84bde
RM
3935 return 0;
3936}
3937
3938static int ql_configure_rings(struct ql_adapter *qdev)
3939{
3940 int i;
3941 struct rx_ring *rx_ring;
3942 struct tx_ring *tx_ring;
a4ab6137 3943 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
7c734359
RM
3944 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
3945 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
3946
3947 qdev->lbq_buf_order = get_order(lbq_buf_len);
a4ab6137
RM
3948
3949 /* In a perfect world we have one RSS ring for each CPU
3950 * and each has it's own vector. To do that we ask for
3951 * cpu_cnt vectors. ql_enable_msix() will adjust the
3952 * vector count to what we actually get. We then
3953 * allocate an RSS ring for each.
3954 * Essentially, we are doing min(cpu_count, msix_vector_count).
c4e84bde 3955 */
a4ab6137
RM
3956 qdev->intr_count = cpu_cnt;
3957 ql_enable_msix(qdev);
3958 /* Adjust the RSS ring count to the actual vector count. */
3959 qdev->rss_ring_count = qdev->intr_count;
c4e84bde 3960 qdev->tx_ring_count = cpu_cnt;
b2014ff8 3961 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
c4e84bde 3962
c4e84bde
RM
3963 for (i = 0; i < qdev->tx_ring_count; i++) {
3964 tx_ring = &qdev->tx_ring[i];
e332471c 3965 memset((void *)tx_ring, 0, sizeof(*tx_ring));
c4e84bde
RM
3966 tx_ring->qdev = qdev;
3967 tx_ring->wq_id = i;
3968 tx_ring->wq_len = qdev->tx_ring_size;
3969 tx_ring->wq_size =
3970 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3971
3972 /*
3973 * The completion queue ID for the tx rings start
39aa8165 3974 * immediately after the rss rings.
c4e84bde 3975 */
39aa8165 3976 tx_ring->cq_id = qdev->rss_ring_count + i;
c4e84bde
RM
3977 }
3978
3979 for (i = 0; i < qdev->rx_ring_count; i++) {
3980 rx_ring = &qdev->rx_ring[i];
e332471c 3981 memset((void *)rx_ring, 0, sizeof(*rx_ring));
c4e84bde
RM
3982 rx_ring->qdev = qdev;
3983 rx_ring->cq_id = i;
3984 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
b2014ff8 3985 if (i < qdev->rss_ring_count) {
39aa8165
RM
3986 /*
3987 * Inbound (RSS) queues.
3988 */
c4e84bde
RM
3989 rx_ring->cq_len = qdev->rx_ring_size;
3990 rx_ring->cq_size =
3991 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3992 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3993 rx_ring->lbq_size =
2c9a0d41 3994 rx_ring->lbq_len * sizeof(__le64);
7c734359
RM
3995 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
3996 QPRINTK(qdev, IFUP, DEBUG,
3997 "lbq_buf_size %d, order = %d\n",
3998 rx_ring->lbq_buf_size, qdev->lbq_buf_order);
c4e84bde
RM
3999 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4000 rx_ring->sbq_size =
2c9a0d41 4001 rx_ring->sbq_len * sizeof(__le64);
52e55f3c 4002 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
b2014ff8
RM
4003 rx_ring->type = RX_Q;
4004 } else {
c4e84bde
RM
4005 /*
4006 * Outbound queue handles outbound completions only.
4007 */
4008 /* outbound cq is same size as tx_ring it services. */
4009 rx_ring->cq_len = qdev->tx_ring_size;
4010 rx_ring->cq_size =
4011 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4012 rx_ring->lbq_len = 0;
4013 rx_ring->lbq_size = 0;
4014 rx_ring->lbq_buf_size = 0;
4015 rx_ring->sbq_len = 0;
4016 rx_ring->sbq_size = 0;
4017 rx_ring->sbq_buf_size = 0;
4018 rx_ring->type = TX_Q;
c4e84bde
RM
4019 }
4020 }
4021 return 0;
4022}
4023
4024static int qlge_open(struct net_device *ndev)
4025{
4026 int err = 0;
4027 struct ql_adapter *qdev = netdev_priv(ndev);
4028
74e12435
RM
4029 err = ql_adapter_reset(qdev);
4030 if (err)
4031 return err;
4032
c4e84bde
RM
4033 err = ql_configure_rings(qdev);
4034 if (err)
4035 return err;
4036
4037 err = ql_get_adapter_resources(qdev);
4038 if (err)
4039 goto error_up;
4040
4041 err = ql_adapter_up(qdev);
4042 if (err)
4043 goto error_up;
4044
4045 return err;
4046
4047error_up:
4048 ql_release_adapter_resources(qdev);
c4e84bde
RM
4049 return err;
4050}
4051
7c734359
RM
4052static int ql_change_rx_buffers(struct ql_adapter *qdev)
4053{
4054 struct rx_ring *rx_ring;
4055 int i, status;
4056 u32 lbq_buf_len;
4057
4058 /* Wait for an oustanding reset to complete. */
4059 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4060 int i = 3;
4061 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4062 QPRINTK(qdev, IFUP, ERR,
4063 "Waiting for adapter UP...\n");
4064 ssleep(1);
4065 }
4066
4067 if (!i) {
4068 QPRINTK(qdev, IFUP, ERR,
4069 "Timed out waiting for adapter UP\n");
4070 return -ETIMEDOUT;
4071 }
4072 }
4073
4074 status = ql_adapter_down(qdev);
4075 if (status)
4076 goto error;
4077
4078 /* Get the new rx buffer size. */
4079 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4080 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4081 qdev->lbq_buf_order = get_order(lbq_buf_len);
4082
4083 for (i = 0; i < qdev->rss_ring_count; i++) {
4084 rx_ring = &qdev->rx_ring[i];
4085 /* Set the new size. */
4086 rx_ring->lbq_buf_size = lbq_buf_len;
4087 }
4088
4089 status = ql_adapter_up(qdev);
4090 if (status)
4091 goto error;
4092
4093 return status;
4094error:
4095 QPRINTK(qdev, IFUP, ALERT,
4096 "Driver up/down cycle failed, closing device.\n");
4097 set_bit(QL_ADAPTER_UP, &qdev->flags);
4098 dev_close(qdev->ndev);
4099 return status;
4100}
4101
c4e84bde
RM
4102static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4103{
4104 struct ql_adapter *qdev = netdev_priv(ndev);
7c734359 4105 int status;
c4e84bde
RM
4106
4107 if (ndev->mtu == 1500 && new_mtu == 9000) {
4108 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
4109 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4110 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
4111 } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
4112 (ndev->mtu == 9000 && new_mtu == 9000)) {
4113 return 0;
4114 } else
4115 return -EINVAL;
7c734359
RM
4116
4117 queue_delayed_work(qdev->workqueue,
4118 &qdev->mpi_port_cfg_work, 3*HZ);
4119
4120 if (!netif_running(qdev->ndev)) {
4121 ndev->mtu = new_mtu;
4122 return 0;
4123 }
4124
c4e84bde 4125 ndev->mtu = new_mtu;
7c734359
RM
4126 status = ql_change_rx_buffers(qdev);
4127 if (status) {
4128 QPRINTK(qdev, IFUP, ERR,
4129 "Changing MTU failed.\n");
4130 }
4131
4132 return status;
c4e84bde
RM
4133}
4134
4135static struct net_device_stats *qlge_get_stats(struct net_device
4136 *ndev)
4137{
885ee398
RM
4138 struct ql_adapter *qdev = netdev_priv(ndev);
4139 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4140 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4141 unsigned long pkts, mcast, dropped, errors, bytes;
4142 int i;
4143
4144 /* Get RX stats. */
4145 pkts = mcast = dropped = errors = bytes = 0;
4146 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4147 pkts += rx_ring->rx_packets;
4148 bytes += rx_ring->rx_bytes;
4149 dropped += rx_ring->rx_dropped;
4150 errors += rx_ring->rx_errors;
4151 mcast += rx_ring->rx_multicast;
4152 }
4153 ndev->stats.rx_packets = pkts;
4154 ndev->stats.rx_bytes = bytes;
4155 ndev->stats.rx_dropped = dropped;
4156 ndev->stats.rx_errors = errors;
4157 ndev->stats.multicast = mcast;
4158
4159 /* Get TX stats. */
4160 pkts = errors = bytes = 0;
4161 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4162 pkts += tx_ring->tx_packets;
4163 bytes += tx_ring->tx_bytes;
4164 errors += tx_ring->tx_errors;
4165 }
4166 ndev->stats.tx_packets = pkts;
4167 ndev->stats.tx_bytes = bytes;
4168 ndev->stats.tx_errors = errors;
bcc90f55 4169 return &ndev->stats;
c4e84bde
RM
4170}
4171
4172static void qlge_set_multicast_list(struct net_device *ndev)
4173{
4174 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4175 struct dev_mc_list *mc_ptr;
cc288f54 4176 int i, status;
c4e84bde 4177
cc288f54
RM
4178 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4179 if (status)
4180 return;
c4e84bde
RM
4181 /*
4182 * Set or clear promiscuous mode if a
4183 * transition is taking place.
4184 */
4185 if (ndev->flags & IFF_PROMISC) {
4186 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4187 if (ql_set_routing_reg
4188 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4189 QPRINTK(qdev, HW, ERR,
4190 "Failed to set promiscous mode.\n");
4191 } else {
4192 set_bit(QL_PROMISCUOUS, &qdev->flags);
4193 }
4194 }
4195 } else {
4196 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4197 if (ql_set_routing_reg
4198 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4199 QPRINTK(qdev, HW, ERR,
4200 "Failed to clear promiscous mode.\n");
4201 } else {
4202 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4203 }
4204 }
4205 }
4206
4207 /*
4208 * Set or clear all multicast mode if a
4209 * transition is taking place.
4210 */
4211 if ((ndev->flags & IFF_ALLMULTI) ||
4212 (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
4213 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4214 if (ql_set_routing_reg
4215 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4216 QPRINTK(qdev, HW, ERR,
4217 "Failed to set all-multi mode.\n");
4218 } else {
4219 set_bit(QL_ALLMULTI, &qdev->flags);
4220 }
4221 }
4222 } else {
4223 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4224 if (ql_set_routing_reg
4225 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4226 QPRINTK(qdev, HW, ERR,
4227 "Failed to clear all-multi mode.\n");
4228 } else {
4229 clear_bit(QL_ALLMULTI, &qdev->flags);
4230 }
4231 }
4232 }
4233
4234 if (ndev->mc_count) {
cc288f54
RM
4235 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4236 if (status)
4237 goto exit;
c4e84bde
RM
4238 for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
4239 i++, mc_ptr = mc_ptr->next)
4240 if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
4241 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4242 QPRINTK(qdev, HW, ERR,
4243 "Failed to loadmulticast address.\n");
cc288f54 4244 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
c4e84bde
RM
4245 goto exit;
4246 }
cc288f54 4247 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
c4e84bde
RM
4248 if (ql_set_routing_reg
4249 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4250 QPRINTK(qdev, HW, ERR,
4251 "Failed to set multicast match mode.\n");
4252 } else {
4253 set_bit(QL_ALLMULTI, &qdev->flags);
4254 }
4255 }
4256exit:
8587ea35 4257 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
c4e84bde
RM
4258}
4259
4260static int qlge_set_mac_address(struct net_device *ndev, void *p)
4261{
4262 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4263 struct sockaddr *addr = p;
cc288f54 4264 int status;
c4e84bde 4265
c4e84bde
RM
4266 if (!is_valid_ether_addr(addr->sa_data))
4267 return -EADDRNOTAVAIL;
4268 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4269
cc288f54
RM
4270 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4271 if (status)
4272 return status;
cc288f54
RM
4273 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4274 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
cc288f54
RM
4275 if (status)
4276 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
4277 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4278 return status;
c4e84bde
RM
4279}
4280
4281static void qlge_tx_timeout(struct net_device *ndev)
4282{
4283 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
6497b607 4284 ql_queue_asic_error(qdev);
c4e84bde
RM
4285}
4286
4287static void ql_asic_reset_work(struct work_struct *work)
4288{
4289 struct ql_adapter *qdev =
4290 container_of(work, struct ql_adapter, asic_reset_work.work);
db98812f 4291 int status;
f2c0d8df 4292 rtnl_lock();
db98812f
RM
4293 status = ql_adapter_down(qdev);
4294 if (status)
4295 goto error;
4296
4297 status = ql_adapter_up(qdev);
4298 if (status)
4299 goto error;
2cd6dbaa
RM
4300
4301 /* Restore rx mode. */
4302 clear_bit(QL_ALLMULTI, &qdev->flags);
4303 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4304 qlge_set_multicast_list(qdev->ndev);
4305
f2c0d8df 4306 rtnl_unlock();
db98812f
RM
4307 return;
4308error:
4309 QPRINTK(qdev, IFUP, ALERT,
4310 "Driver up/down cycle failed, closing device\n");
f2c0d8df 4311
db98812f
RM
4312 set_bit(QL_ADAPTER_UP, &qdev->flags);
4313 dev_close(qdev->ndev);
4314 rtnl_unlock();
c4e84bde
RM
4315}
4316
b0c2aadf
RM
4317static struct nic_operations qla8012_nic_ops = {
4318 .get_flash = ql_get_8012_flash_params,
4319 .port_initialize = ql_8012_port_initialize,
4320};
4321
cdca8d02
RM
4322static struct nic_operations qla8000_nic_ops = {
4323 .get_flash = ql_get_8000_flash_params,
4324 .port_initialize = ql_8000_port_initialize,
4325};
4326
e4552f51
RM
4327/* Find the pcie function number for the other NIC
4328 * on this chip. Since both NIC functions share a
4329 * common firmware we have the lowest enabled function
4330 * do any common work. Examples would be resetting
4331 * after a fatal firmware error, or doing a firmware
4332 * coredump.
4333 */
4334static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4335{
4336 int status = 0;
4337 u32 temp;
4338 u32 nic_func1, nic_func2;
4339
4340 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4341 &temp);
4342 if (status)
4343 return status;
4344
4345 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4346 MPI_TEST_NIC_FUNC_MASK);
4347 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4348 MPI_TEST_NIC_FUNC_MASK);
4349
4350 if (qdev->func == nic_func1)
4351 qdev->alt_func = nic_func2;
4352 else if (qdev->func == nic_func2)
4353 qdev->alt_func = nic_func1;
4354 else
4355 status = -EIO;
4356
4357 return status;
4358}
b0c2aadf 4359
e4552f51 4360static int ql_get_board_info(struct ql_adapter *qdev)
c4e84bde 4361{
e4552f51 4362 int status;
c4e84bde
RM
4363 qdev->func =
4364 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
e4552f51
RM
4365 if (qdev->func > 3)
4366 return -EIO;
4367
4368 status = ql_get_alt_pcie_func(qdev);
4369 if (status)
4370 return status;
4371
4372 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4373 if (qdev->port) {
c4e84bde
RM
4374 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4375 qdev->port_link_up = STS_PL1;
4376 qdev->port_init = STS_PI1;
4377 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4378 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4379 } else {
4380 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4381 qdev->port_link_up = STS_PL0;
4382 qdev->port_init = STS_PI0;
4383 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4384 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4385 }
4386 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
b0c2aadf
RM
4387 qdev->device_id = qdev->pdev->device;
4388 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4389 qdev->nic_ops = &qla8012_nic_ops;
cdca8d02
RM
4390 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4391 qdev->nic_ops = &qla8000_nic_ops;
e4552f51 4392 return status;
c4e84bde
RM
4393}
4394
4395static void ql_release_all(struct pci_dev *pdev)
4396{
4397 struct net_device *ndev = pci_get_drvdata(pdev);
4398 struct ql_adapter *qdev = netdev_priv(ndev);
4399
4400 if (qdev->workqueue) {
4401 destroy_workqueue(qdev->workqueue);
4402 qdev->workqueue = NULL;
4403 }
39aa8165 4404
c4e84bde 4405 if (qdev->reg_base)
8668ae92 4406 iounmap(qdev->reg_base);
c4e84bde
RM
4407 if (qdev->doorbell_area)
4408 iounmap(qdev->doorbell_area);
8aae2600 4409 vfree(qdev->mpi_coredump);
c4e84bde
RM
4410 pci_release_regions(pdev);
4411 pci_set_drvdata(pdev, NULL);
4412}
4413
4414static int __devinit ql_init_device(struct pci_dev *pdev,
4415 struct net_device *ndev, int cards_found)
4416{
4417 struct ql_adapter *qdev = netdev_priv(ndev);
1d1023d0 4418 int err = 0;
c4e84bde 4419
e332471c 4420 memset((void *)qdev, 0, sizeof(*qdev));
c4e84bde
RM
4421 err = pci_enable_device(pdev);
4422 if (err) {
4423 dev_err(&pdev->dev, "PCI device enable failed.\n");
4424 return err;
4425 }
4426
ebd6e774
RM
4427 qdev->ndev = ndev;
4428 qdev->pdev = pdev;
4429 pci_set_drvdata(pdev, ndev);
c4e84bde 4430
bc9167f3
RM
4431 /* Set PCIe read request size */
4432 err = pcie_set_readrq(pdev, 4096);
4433 if (err) {
4434 dev_err(&pdev->dev, "Set readrq failed.\n");
4435 goto err_out;
4436 }
4437
c4e84bde
RM
4438 err = pci_request_regions(pdev, DRV_NAME);
4439 if (err) {
4440 dev_err(&pdev->dev, "PCI region request failed.\n");
ebd6e774 4441 return err;
c4e84bde
RM
4442 }
4443
4444 pci_set_master(pdev);
6a35528a 4445 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
c4e84bde 4446 set_bit(QL_DMA64, &qdev->flags);
6a35528a 4447 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
c4e84bde 4448 } else {
284901a9 4449 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
c4e84bde 4450 if (!err)
284901a9 4451 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
c4e84bde
RM
4452 }
4453
4454 if (err) {
4455 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4456 goto err_out;
4457 }
4458
73475339
RM
4459 /* Set PCIe reset type for EEH to fundamental. */
4460 pdev->needs_freset = 1;
6d190c6e 4461 pci_save_state(pdev);
c4e84bde
RM
4462 qdev->reg_base =
4463 ioremap_nocache(pci_resource_start(pdev, 1),
4464 pci_resource_len(pdev, 1));
4465 if (!qdev->reg_base) {
4466 dev_err(&pdev->dev, "Register mapping failed.\n");
4467 err = -ENOMEM;
4468 goto err_out;
4469 }
4470
4471 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4472 qdev->doorbell_area =
4473 ioremap_nocache(pci_resource_start(pdev, 3),
4474 pci_resource_len(pdev, 3));
4475 if (!qdev->doorbell_area) {
4476 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4477 err = -ENOMEM;
4478 goto err_out;
4479 }
4480
e4552f51
RM
4481 err = ql_get_board_info(qdev);
4482 if (err) {
4483 dev_err(&pdev->dev, "Register access failed.\n");
4484 err = -EIO;
4485 goto err_out;
4486 }
c4e84bde
RM
4487 qdev->msg_enable = netif_msg_init(debug, default_msg);
4488 spin_lock_init(&qdev->hw_lock);
4489 spin_lock_init(&qdev->stats_lock);
4490
8aae2600
RM
4491 if (qlge_mpi_coredump) {
4492 qdev->mpi_coredump =
4493 vmalloc(sizeof(struct ql_mpi_coredump));
4494 if (qdev->mpi_coredump == NULL) {
4495 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4496 err = -ENOMEM;
4497 goto err_out;
4498 }
4499 }
c4e84bde 4500 /* make sure the EEPROM is good */
b0c2aadf 4501 err = qdev->nic_ops->get_flash(qdev);
c4e84bde
RM
4502 if (err) {
4503 dev_err(&pdev->dev, "Invalid FLASH.\n");
4504 goto err_out;
4505 }
4506
c4e84bde
RM
4507 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4508
4509 /* Set up the default ring sizes. */
4510 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4511 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4512
4513 /* Set up the coalescing parameters. */
4514 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4515 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4516 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4517 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4518
4519 /*
4520 * Set up the operating parameters.
4521 */
4522 qdev->rx_csum = 1;
c4e84bde
RM
4523 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4524 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4525 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4526 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
bcc2cb3b 4527 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
2ee1e272 4528 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
8aae2600 4529 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
bcc2cb3b 4530 init_completion(&qdev->ide_completion);
c4e84bde
RM
4531
4532 if (!cards_found) {
4533 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4534 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4535 DRV_NAME, DRV_VERSION);
4536 }
4537 return 0;
4538err_out:
4539 ql_release_all(pdev);
4540 pci_disable_device(pdev);
4541 return err;
4542}
4543
25ed7849
SH
4544static const struct net_device_ops qlge_netdev_ops = {
4545 .ndo_open = qlge_open,
4546 .ndo_stop = qlge_close,
4547 .ndo_start_xmit = qlge_send,
4548 .ndo_change_mtu = qlge_change_mtu,
4549 .ndo_get_stats = qlge_get_stats,
4550 .ndo_set_multicast_list = qlge_set_multicast_list,
4551 .ndo_set_mac_address = qlge_set_mac_address,
4552 .ndo_validate_addr = eth_validate_addr,
4553 .ndo_tx_timeout = qlge_tx_timeout,
01e6b953
RM
4554 .ndo_vlan_rx_register = qlge_vlan_rx_register,
4555 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4556 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
25ed7849
SH
4557};
4558
c4e84bde
RM
4559static int __devinit qlge_probe(struct pci_dev *pdev,
4560 const struct pci_device_id *pci_entry)
4561{
4562 struct net_device *ndev = NULL;
4563 struct ql_adapter *qdev = NULL;
4564 static int cards_found = 0;
4565 int err = 0;
4566
1e213303
RM
4567 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4568 min(MAX_CPUS, (int)num_online_cpus()));
c4e84bde
RM
4569 if (!ndev)
4570 return -ENOMEM;
4571
4572 err = ql_init_device(pdev, ndev, cards_found);
4573 if (err < 0) {
4574 free_netdev(ndev);
4575 return err;
4576 }
4577
4578 qdev = netdev_priv(ndev);
4579 SET_NETDEV_DEV(ndev, &pdev->dev);
4580 ndev->features = (0
4581 | NETIF_F_IP_CSUM
4582 | NETIF_F_SG
4583 | NETIF_F_TSO
4584 | NETIF_F_TSO6
4585 | NETIF_F_TSO_ECN
4586 | NETIF_F_HW_VLAN_TX
4587 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
22bdd4f5 4588 ndev->features |= NETIF_F_GRO;
c4e84bde
RM
4589
4590 if (test_bit(QL_DMA64, &qdev->flags))
4591 ndev->features |= NETIF_F_HIGHDMA;
4592
4593 /*
4594 * Set up net_device structure.
4595 */
4596 ndev->tx_queue_len = qdev->tx_ring_size;
4597 ndev->irq = pdev->irq;
25ed7849
SH
4598
4599 ndev->netdev_ops = &qlge_netdev_ops;
c4e84bde 4600 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
c4e84bde 4601 ndev->watchdog_timeo = 10 * HZ;
25ed7849 4602
c4e84bde
RM
4603 err = register_netdev(ndev);
4604 if (err) {
4605 dev_err(&pdev->dev, "net device registration failed.\n");
4606 ql_release_all(pdev);
4607 pci_disable_device(pdev);
4608 return err;
4609 }
6a473308 4610 ql_link_off(qdev);
c4e84bde 4611 ql_display_dev_info(ndev);
9dfbbaa6 4612 atomic_set(&qdev->lb_count, 0);
c4e84bde
RM
4613 cards_found++;
4614 return 0;
4615}
4616
9dfbbaa6
RM
4617netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4618{
4619 return qlge_send(skb, ndev);
4620}
4621
4622int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4623{
4624 return ql_clean_inbound_rx_ring(rx_ring, budget);
4625}
4626
c4e84bde
RM
4627static void __devexit qlge_remove(struct pci_dev *pdev)
4628{
4629 struct net_device *ndev = pci_get_drvdata(pdev);
4630 unregister_netdev(ndev);
4631 ql_release_all(pdev);
4632 pci_disable_device(pdev);
4633 free_netdev(ndev);
4634}
4635
6d190c6e
RM
4636/* Clean up resources without touching hardware. */
4637static void ql_eeh_close(struct net_device *ndev)
4638{
4639 int i;
4640 struct ql_adapter *qdev = netdev_priv(ndev);
4641
4642 if (netif_carrier_ok(ndev)) {
4643 netif_carrier_off(ndev);
4644 netif_stop_queue(ndev);
4645 }
4646
4647 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
4648 cancel_delayed_work_sync(&qdev->asic_reset_work);
4649 cancel_delayed_work_sync(&qdev->mpi_reset_work);
4650 cancel_delayed_work_sync(&qdev->mpi_work);
4651 cancel_delayed_work_sync(&qdev->mpi_idc_work);
8aae2600 4652 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
6d190c6e
RM
4653 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
4654
4655 for (i = 0; i < qdev->rss_ring_count; i++)
4656 netif_napi_del(&qdev->rx_ring[i].napi);
4657
4658 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4659 ql_tx_ring_clean(qdev);
4660 ql_free_rx_buffers(qdev);
4661 ql_release_adapter_resources(qdev);
4662}
4663
c4e84bde
RM
4664/*
4665 * This callback is called by the PCI subsystem whenever
4666 * a PCI bus error is detected.
4667 */
4668static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4669 enum pci_channel_state state)
4670{
4671 struct net_device *ndev = pci_get_drvdata(pdev);
fbc663ce 4672
6d190c6e
RM
4673 switch (state) {
4674 case pci_channel_io_normal:
4675 return PCI_ERS_RESULT_CAN_RECOVER;
4676 case pci_channel_io_frozen:
4677 netif_device_detach(ndev);
4678 if (netif_running(ndev))
4679 ql_eeh_close(ndev);
4680 pci_disable_device(pdev);
4681 return PCI_ERS_RESULT_NEED_RESET;
4682 case pci_channel_io_perm_failure:
4683 dev_err(&pdev->dev,
4684 "%s: pci_channel_io_perm_failure.\n", __func__);
fbc663ce 4685 return PCI_ERS_RESULT_DISCONNECT;
6d190c6e 4686 }
c4e84bde
RM
4687
4688 /* Request a slot reset. */
4689 return PCI_ERS_RESULT_NEED_RESET;
4690}
4691
4692/*
4693 * This callback is called after the PCI buss has been reset.
4694 * Basically, this tries to restart the card from scratch.
4695 * This is a shortened version of the device probe/discovery code,
4696 * it resembles the first-half of the () routine.
4697 */
4698static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4699{
4700 struct net_device *ndev = pci_get_drvdata(pdev);
4701 struct ql_adapter *qdev = netdev_priv(ndev);
4702
6d190c6e
RM
4703 pdev->error_state = pci_channel_io_normal;
4704
4705 pci_restore_state(pdev);
c4e84bde
RM
4706 if (pci_enable_device(pdev)) {
4707 QPRINTK(qdev, IFUP, ERR,
4708 "Cannot re-enable PCI device after reset.\n");
4709 return PCI_ERS_RESULT_DISCONNECT;
4710 }
c4e84bde 4711 pci_set_master(pdev);
c4e84bde
RM
4712 return PCI_ERS_RESULT_RECOVERED;
4713}
4714
4715static void qlge_io_resume(struct pci_dev *pdev)
4716{
4717 struct net_device *ndev = pci_get_drvdata(pdev);
4718 struct ql_adapter *qdev = netdev_priv(ndev);
6d190c6e 4719 int err = 0;
c4e84bde 4720
6d190c6e
RM
4721 if (ql_adapter_reset(qdev))
4722 QPRINTK(qdev, DRV, ERR, "reset FAILED!\n");
c4e84bde 4723 if (netif_running(ndev)) {
6d190c6e
RM
4724 err = qlge_open(ndev);
4725 if (err) {
c4e84bde
RM
4726 QPRINTK(qdev, IFUP, ERR,
4727 "Device initialization failed after reset.\n");
4728 return;
4729 }
6d190c6e
RM
4730 } else {
4731 QPRINTK(qdev, IFUP, ERR,
4732 "Device was not running prior to EEH.\n");
c4e84bde 4733 }
c4e84bde
RM
4734 netif_device_attach(ndev);
4735}
4736
4737static struct pci_error_handlers qlge_err_handler = {
4738 .error_detected = qlge_io_error_detected,
4739 .slot_reset = qlge_io_slot_reset,
4740 .resume = qlge_io_resume,
4741};
4742
4743static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4744{
4745 struct net_device *ndev = pci_get_drvdata(pdev);
4746 struct ql_adapter *qdev = netdev_priv(ndev);
6b318cb3 4747 int err;
c4e84bde
RM
4748
4749 netif_device_detach(ndev);
4750
4751 if (netif_running(ndev)) {
4752 err = ql_adapter_down(qdev);
4753 if (!err)
4754 return err;
4755 }
4756
bc083ce9 4757 ql_wol(qdev);
c4e84bde
RM
4758 err = pci_save_state(pdev);
4759 if (err)
4760 return err;
4761
4762 pci_disable_device(pdev);
4763
4764 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4765
4766 return 0;
4767}
4768
04da2cf9 4769#ifdef CONFIG_PM
c4e84bde
RM
4770static int qlge_resume(struct pci_dev *pdev)
4771{
4772 struct net_device *ndev = pci_get_drvdata(pdev);
4773 struct ql_adapter *qdev = netdev_priv(ndev);
4774 int err;
4775
4776 pci_set_power_state(pdev, PCI_D0);
4777 pci_restore_state(pdev);
4778 err = pci_enable_device(pdev);
4779 if (err) {
4780 QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
4781 return err;
4782 }
4783 pci_set_master(pdev);
4784
4785 pci_enable_wake(pdev, PCI_D3hot, 0);
4786 pci_enable_wake(pdev, PCI_D3cold, 0);
4787
4788 if (netif_running(ndev)) {
4789 err = ql_adapter_up(qdev);
4790 if (err)
4791 return err;
4792 }
4793
4794 netif_device_attach(ndev);
4795
4796 return 0;
4797}
04da2cf9 4798#endif /* CONFIG_PM */
c4e84bde
RM
4799
4800static void qlge_shutdown(struct pci_dev *pdev)
4801{
4802 qlge_suspend(pdev, PMSG_SUSPEND);
4803}
4804
4805static struct pci_driver qlge_driver = {
4806 .name = DRV_NAME,
4807 .id_table = qlge_pci_tbl,
4808 .probe = qlge_probe,
4809 .remove = __devexit_p(qlge_remove),
4810#ifdef CONFIG_PM
4811 .suspend = qlge_suspend,
4812 .resume = qlge_resume,
4813#endif
4814 .shutdown = qlge_shutdown,
4815 .err_handler = &qlge_err_handler
4816};
4817
4818static int __init qlge_init_module(void)
4819{
4820 return pci_register_driver(&qlge_driver);
4821}
4822
4823static void __exit qlge_exit(void)
4824{
4825 pci_unregister_driver(&qlge_driver);
4826}
4827
4828module_init(qlge_init_module);
4829module_exit(qlge_exit);
This page took 0.464072 seconds and 5 git commands to generate.