2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
27 #include <linux/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/skbuff.h>
37 #include <linux/rtnetlink.h>
38 #include <linux/if_vlan.h>
39 #include <linux/delay.h>
41 #include <linux/vmalloc.h>
42 #include <net/ip6_checksum.h>
46 char qlge_driver_name
[] = DRV_NAME
;
47 const char qlge_driver_version
[] = DRV_VERSION
;
49 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50 MODULE_DESCRIPTION(DRV_STRING
" ");
51 MODULE_LICENSE("GPL");
52 MODULE_VERSION(DRV_VERSION
);
54 static const u32 default_msg
=
55 NETIF_MSG_DRV
| NETIF_MSG_PROBE
| NETIF_MSG_LINK
|
56 /* NETIF_MSG_TIMER | */
62 NETIF_MSG_INTR
| NETIF_MSG_TX_DONE
| NETIF_MSG_RX_STATUS
|
63 /* NETIF_MSG_PKTDATA | */
64 NETIF_MSG_HW
| NETIF_MSG_WOL
| 0;
66 static int debug
= 0x00007fff; /* defaults above */
67 module_param(debug
, int, 0);
68 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
73 static int irq_type
= MSIX_IRQ
;
74 module_param(irq_type
, int, MSIX_IRQ
);
75 MODULE_PARM_DESC(irq_type
, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
77 static struct pci_device_id qlge_pci_tbl
[] __devinitdata
= {
78 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC
, QLGE_DEVICE_ID
)},
79 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC
, QLGE_DEVICE_ID1
)},
80 /* required last entry */
84 MODULE_DEVICE_TABLE(pci
, qlge_pci_tbl
);
86 /* This hardware semaphore causes exclusive access to
87 * resources shared between the NIC driver, MPI firmware,
88 * FCOE firmware and the FC driver.
90 static int ql_sem_trylock(struct ql_adapter
*qdev
, u32 sem_mask
)
96 sem_bits
= SEM_SET
<< SEM_XGMAC0_SHIFT
;
99 sem_bits
= SEM_SET
<< SEM_XGMAC1_SHIFT
;
102 sem_bits
= SEM_SET
<< SEM_ICB_SHIFT
;
104 case SEM_MAC_ADDR_MASK
:
105 sem_bits
= SEM_SET
<< SEM_MAC_ADDR_SHIFT
;
108 sem_bits
= SEM_SET
<< SEM_FLASH_SHIFT
;
111 sem_bits
= SEM_SET
<< SEM_PROBE_SHIFT
;
113 case SEM_RT_IDX_MASK
:
114 sem_bits
= SEM_SET
<< SEM_RT_IDX_SHIFT
;
116 case SEM_PROC_REG_MASK
:
117 sem_bits
= SEM_SET
<< SEM_PROC_REG_SHIFT
;
120 QPRINTK(qdev
, PROBE
, ALERT
, "Bad Semaphore mask!.\n");
124 ql_write32(qdev
, SEM
, sem_bits
| sem_mask
);
125 return !(ql_read32(qdev
, SEM
) & sem_bits
);
128 int ql_sem_spinlock(struct ql_adapter
*qdev
, u32 sem_mask
)
130 unsigned int seconds
= 3;
132 if (!ql_sem_trylock(qdev
, sem_mask
))
139 void ql_sem_unlock(struct ql_adapter
*qdev
, u32 sem_mask
)
141 ql_write32(qdev
, SEM
, sem_mask
);
142 ql_read32(qdev
, SEM
); /* flush */
145 /* This function waits for a specific bit to come ready
146 * in a given register. It is used mostly by the initialize
147 * process, but is also used in kernel thread API such as
148 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
150 int ql_wait_reg_rdy(struct ql_adapter
*qdev
, u32 reg
, u32 bit
, u32 err_bit
)
153 int count
= UDELAY_COUNT
;
156 temp
= ql_read32(qdev
, reg
);
158 /* check for errors */
159 if (temp
& err_bit
) {
160 QPRINTK(qdev
, PROBE
, ALERT
,
161 "register 0x%.08x access error, value = 0x%.08x!.\n",
164 } else if (temp
& bit
)
166 udelay(UDELAY_DELAY
);
169 QPRINTK(qdev
, PROBE
, ALERT
,
170 "Timed out waiting for reg %x to come ready.\n", reg
);
174 /* The CFG register is used to download TX and RX control blocks
175 * to the chip. This function waits for an operation to complete.
177 static int ql_wait_cfg(struct ql_adapter
*qdev
, u32 bit
)
179 int count
= UDELAY_COUNT
;
183 temp
= ql_read32(qdev
, CFG
);
188 udelay(UDELAY_DELAY
);
195 /* Used to issue init control blocks to hw. Maps control block,
196 * sets address, triggers download, waits for completion.
198 int ql_write_cfg(struct ql_adapter
*qdev
, void *ptr
, int size
, u32 bit
,
208 (bit
& (CFG_LRQ
| CFG_LR
| CFG_LCQ
)) ? PCI_DMA_TODEVICE
:
211 map
= pci_map_single(qdev
->pdev
, ptr
, size
, direction
);
212 if (pci_dma_mapping_error(qdev
->pdev
, map
)) {
213 QPRINTK(qdev
, IFUP
, ERR
, "Couldn't map DMA area.\n");
217 status
= ql_wait_cfg(qdev
, bit
);
219 QPRINTK(qdev
, IFUP
, ERR
,
220 "Timed out waiting for CFG to come ready.\n");
224 status
= ql_sem_spinlock(qdev
, SEM_ICB_MASK
);
227 ql_write32(qdev
, ICB_L
, (u32
) map
);
228 ql_write32(qdev
, ICB_H
, (u32
) (map
>> 32));
229 ql_sem_unlock(qdev
, SEM_ICB_MASK
); /* does flush too */
231 mask
= CFG_Q_MASK
| (bit
<< 16);
232 value
= bit
| (q_id
<< CFG_Q_SHIFT
);
233 ql_write32(qdev
, CFG
, (mask
| value
));
236 * Wait for the bit to clear after signaling hw.
238 status
= ql_wait_cfg(qdev
, bit
);
240 pci_unmap_single(qdev
->pdev
, map
, size
, direction
);
244 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
245 int ql_get_mac_addr_reg(struct ql_adapter
*qdev
, u32 type
, u16 index
,
251 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
255 case MAC_ADDR_TYPE_MULTI_MAC
:
256 case MAC_ADDR_TYPE_CAM_MAC
:
259 ql_wait_reg_rdy(qdev
,
260 MAC_ADDR_IDX
, MAC_ADDR_MW
, MAC_ADDR_E
);
263 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
264 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
265 MAC_ADDR_ADR
| MAC_ADDR_RS
| type
); /* type */
267 ql_wait_reg_rdy(qdev
,
268 MAC_ADDR_IDX
, MAC_ADDR_MR
, MAC_ADDR_E
);
271 *value
++ = ql_read32(qdev
, MAC_ADDR_DATA
);
273 ql_wait_reg_rdy(qdev
,
274 MAC_ADDR_IDX
, MAC_ADDR_MW
, MAC_ADDR_E
);
277 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
278 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
279 MAC_ADDR_ADR
| MAC_ADDR_RS
| type
); /* type */
281 ql_wait_reg_rdy(qdev
,
282 MAC_ADDR_IDX
, MAC_ADDR_MR
, MAC_ADDR_E
);
285 *value
++ = ql_read32(qdev
, MAC_ADDR_DATA
);
286 if (type
== MAC_ADDR_TYPE_CAM_MAC
) {
288 ql_wait_reg_rdy(qdev
,
289 MAC_ADDR_IDX
, MAC_ADDR_MW
, MAC_ADDR_E
);
292 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
293 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
294 MAC_ADDR_ADR
| MAC_ADDR_RS
| type
); /* type */
296 ql_wait_reg_rdy(qdev
, MAC_ADDR_IDX
,
297 MAC_ADDR_MR
, MAC_ADDR_E
);
300 *value
++ = ql_read32(qdev
, MAC_ADDR_DATA
);
304 case MAC_ADDR_TYPE_VLAN
:
305 case MAC_ADDR_TYPE_MULTI_FLTR
:
307 QPRINTK(qdev
, IFUP
, CRIT
,
308 "Address type %d not yet supported.\n", type
);
312 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
316 /* Set up a MAC, multicast or VLAN address for the
317 * inbound frame matching.
319 static int ql_set_mac_addr_reg(struct ql_adapter
*qdev
, u8
*addr
, u32 type
,
325 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
329 case MAC_ADDR_TYPE_MULTI_MAC
:
330 case MAC_ADDR_TYPE_CAM_MAC
:
333 u32 upper
= (addr
[0] << 8) | addr
[1];
335 (addr
[2] << 24) | (addr
[3] << 16) | (addr
[4] << 8) |
338 QPRINTK(qdev
, IFUP
, INFO
,
339 "Adding %s address %pM"
340 " at index %d in the CAM.\n",
342 MAC_ADDR_TYPE_MULTI_MAC
) ? "MULTICAST" :
343 "UNICAST"), addr
, index
);
346 ql_wait_reg_rdy(qdev
,
347 MAC_ADDR_IDX
, MAC_ADDR_MW
, MAC_ADDR_E
);
350 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
351 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
353 ql_write32(qdev
, MAC_ADDR_DATA
, lower
);
355 ql_wait_reg_rdy(qdev
,
356 MAC_ADDR_IDX
, MAC_ADDR_MW
, MAC_ADDR_E
);
359 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
360 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
362 ql_write32(qdev
, MAC_ADDR_DATA
, upper
);
364 ql_wait_reg_rdy(qdev
,
365 MAC_ADDR_IDX
, MAC_ADDR_MW
, MAC_ADDR_E
);
368 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
) | /* offset */
369 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
371 /* This field should also include the queue id
372 and possibly the function id. Right now we hardcode
373 the route field to NIC core.
375 if (type
== MAC_ADDR_TYPE_CAM_MAC
) {
376 cam_output
= (CAM_OUT_ROUTE_NIC
|
378 func
<< CAM_OUT_FUNC_SHIFT
) |
380 rss_ring_first_cq_id
<<
381 CAM_OUT_CQ_ID_SHIFT
));
383 cam_output
|= CAM_OUT_RV
;
384 /* route to NIC core */
385 ql_write32(qdev
, MAC_ADDR_DATA
, cam_output
);
389 case MAC_ADDR_TYPE_VLAN
:
391 u32 enable_bit
= *((u32
*) &addr
[0]);
392 /* For VLAN, the addr actually holds a bit that
393 * either enables or disables the vlan id we are
394 * addressing. It's either MAC_ADDR_E on or off.
395 * That's bit-27 we're talking about.
397 QPRINTK(qdev
, IFUP
, INFO
, "%s VLAN ID %d %s the CAM.\n",
398 (enable_bit
? "Adding" : "Removing"),
399 index
, (enable_bit
? "to" : "from"));
402 ql_wait_reg_rdy(qdev
,
403 MAC_ADDR_IDX
, MAC_ADDR_MW
, MAC_ADDR_E
);
406 ql_write32(qdev
, MAC_ADDR_IDX
, offset
| /* offset */
407 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
409 enable_bit
); /* enable/disable */
412 case MAC_ADDR_TYPE_MULTI_FLTR
:
414 QPRINTK(qdev
, IFUP
, CRIT
,
415 "Address type %d not yet supported.\n", type
);
419 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
423 /* Get a specific frame routing value from the CAM.
424 * Used for debug and reg dump.
426 int ql_get_routing_reg(struct ql_adapter
*qdev
, u32 index
, u32
*value
)
430 status
= ql_sem_spinlock(qdev
, SEM_RT_IDX_MASK
);
434 status
= ql_wait_reg_rdy(qdev
, RT_IDX
, RT_IDX_MW
, RT_IDX_E
);
438 ql_write32(qdev
, RT_IDX
,
439 RT_IDX_TYPE_NICQ
| RT_IDX_RS
| (index
<< RT_IDX_IDX_SHIFT
));
440 status
= ql_wait_reg_rdy(qdev
, RT_IDX
, RT_IDX_MR
, RT_IDX_E
);
443 *value
= ql_read32(qdev
, RT_DATA
);
445 ql_sem_unlock(qdev
, SEM_RT_IDX_MASK
);
449 /* The NIC function for this chip has 16 routing indexes. Each one can be used
450 * to route different frame types to various inbound queues. We send broadcast/
451 * multicast/error frames to the default queue for slow handling,
452 * and CAM hit/RSS frames to the fast handling queues.
454 static int ql_set_routing_reg(struct ql_adapter
*qdev
, u32 index
, u32 mask
,
460 status
= ql_sem_spinlock(qdev
, SEM_RT_IDX_MASK
);
464 QPRINTK(qdev
, IFUP
, DEBUG
,
465 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
466 (enable
? "Adding" : "Removing"),
467 ((index
== RT_IDX_ALL_ERR_SLOT
) ? "MAC ERROR/ALL ERROR" : ""),
468 ((index
== RT_IDX_IP_CSUM_ERR_SLOT
) ? "IP CSUM ERROR" : ""),
470 RT_IDX_TCP_UDP_CSUM_ERR_SLOT
) ? "TCP/UDP CSUM ERROR" : ""),
471 ((index
== RT_IDX_BCAST_SLOT
) ? "BROADCAST" : ""),
472 ((index
== RT_IDX_MCAST_MATCH_SLOT
) ? "MULTICAST MATCH" : ""),
473 ((index
== RT_IDX_ALLMULTI_SLOT
) ? "ALL MULTICAST MATCH" : ""),
474 ((index
== RT_IDX_UNUSED6_SLOT
) ? "UNUSED6" : ""),
475 ((index
== RT_IDX_UNUSED7_SLOT
) ? "UNUSED7" : ""),
476 ((index
== RT_IDX_RSS_MATCH_SLOT
) ? "RSS ALL/IPV4 MATCH" : ""),
477 ((index
== RT_IDX_RSS_IPV6_SLOT
) ? "RSS IPV6" : ""),
478 ((index
== RT_IDX_RSS_TCP4_SLOT
) ? "RSS TCP4" : ""),
479 ((index
== RT_IDX_RSS_TCP6_SLOT
) ? "RSS TCP6" : ""),
480 ((index
== RT_IDX_CAM_HIT_SLOT
) ? "CAM HIT" : ""),
481 ((index
== RT_IDX_UNUSED013
) ? "UNUSED13" : ""),
482 ((index
== RT_IDX_UNUSED014
) ? "UNUSED14" : ""),
483 ((index
== RT_IDX_PROMISCUOUS_SLOT
) ? "PROMISCUOUS" : ""),
484 (enable
? "to" : "from"));
489 value
= RT_IDX_DST_CAM_Q
| /* dest */
490 RT_IDX_TYPE_NICQ
| /* type */
491 (RT_IDX_CAM_HIT_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
494 case RT_IDX_VALID
: /* Promiscuous Mode frames. */
496 value
= RT_IDX_DST_DFLT_Q
| /* dest */
497 RT_IDX_TYPE_NICQ
| /* type */
498 (RT_IDX_PROMISCUOUS_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
501 case RT_IDX_ERR
: /* Pass up MAC,IP,TCP/UDP error frames. */
503 value
= RT_IDX_DST_DFLT_Q
| /* dest */
504 RT_IDX_TYPE_NICQ
| /* type */
505 (RT_IDX_ALL_ERR_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
508 case RT_IDX_BCAST
: /* Pass up Broadcast frames to default Q. */
510 value
= RT_IDX_DST_DFLT_Q
| /* dest */
511 RT_IDX_TYPE_NICQ
| /* type */
512 (RT_IDX_BCAST_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
515 case RT_IDX_MCAST
: /* Pass up All Multicast frames. */
517 value
= RT_IDX_DST_CAM_Q
| /* dest */
518 RT_IDX_TYPE_NICQ
| /* type */
519 (RT_IDX_ALLMULTI_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
522 case RT_IDX_MCAST_MATCH
: /* Pass up matched Multicast frames. */
524 value
= RT_IDX_DST_CAM_Q
| /* dest */
525 RT_IDX_TYPE_NICQ
| /* type */
526 (RT_IDX_MCAST_MATCH_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
529 case RT_IDX_RSS_MATCH
: /* Pass up matched RSS frames. */
531 value
= RT_IDX_DST_RSS
| /* dest */
532 RT_IDX_TYPE_NICQ
| /* type */
533 (RT_IDX_RSS_MATCH_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
536 case 0: /* Clear the E-bit on an entry. */
538 value
= RT_IDX_DST_DFLT_Q
| /* dest */
539 RT_IDX_TYPE_NICQ
| /* type */
540 (index
<< RT_IDX_IDX_SHIFT
);/* index */
544 QPRINTK(qdev
, IFUP
, ERR
, "Mask type %d not yet supported.\n",
551 status
= ql_wait_reg_rdy(qdev
, RT_IDX
, RT_IDX_MW
, 0);
554 value
|= (enable
? RT_IDX_E
: 0);
555 ql_write32(qdev
, RT_IDX
, value
);
556 ql_write32(qdev
, RT_DATA
, enable
? mask
: 0);
559 ql_sem_unlock(qdev
, SEM_RT_IDX_MASK
);
563 static void ql_enable_interrupts(struct ql_adapter
*qdev
)
565 ql_write32(qdev
, INTR_EN
, (INTR_EN_EI
<< 16) | INTR_EN_EI
);
568 static void ql_disable_interrupts(struct ql_adapter
*qdev
)
570 ql_write32(qdev
, INTR_EN
, (INTR_EN_EI
<< 16));
573 /* If we're running with multiple MSI-X vectors then we enable on the fly.
574 * Otherwise, we may have multiple outstanding workers and don't want to
575 * enable until the last one finishes. In this case, the irq_cnt gets
576 * incremented everytime we queue a worker and decremented everytime
577 * a worker finishes. Once it hits zero we enable the interrupt.
579 u32
ql_enable_completion_interrupt(struct ql_adapter
*qdev
, u32 intr
)
582 unsigned long hw_flags
= 0;
583 struct intr_context
*ctx
= qdev
->intr_context
+ intr
;
585 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
) && intr
)) {
586 /* Always enable if we're MSIX multi interrupts and
587 * it's not the default (zeroeth) interrupt.
589 ql_write32(qdev
, INTR_EN
,
591 var
= ql_read32(qdev
, STS
);
595 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
596 if (atomic_dec_and_test(&ctx
->irq_cnt
)) {
597 ql_write32(qdev
, INTR_EN
,
599 var
= ql_read32(qdev
, STS
);
601 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
605 static u32
ql_disable_completion_interrupt(struct ql_adapter
*qdev
, u32 intr
)
608 unsigned long hw_flags
;
609 struct intr_context
*ctx
;
611 /* HW disables for us if we're MSIX multi interrupts and
612 * it's not the default (zeroeth) interrupt.
614 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
) && intr
))
617 ctx
= qdev
->intr_context
+ intr
;
618 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
619 if (!atomic_read(&ctx
->irq_cnt
)) {
620 ql_write32(qdev
, INTR_EN
,
622 var
= ql_read32(qdev
, STS
);
624 atomic_inc(&ctx
->irq_cnt
);
625 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
629 static void ql_enable_all_completion_interrupts(struct ql_adapter
*qdev
)
632 for (i
= 0; i
< qdev
->intr_count
; i
++) {
633 /* The enable call does a atomic_dec_and_test
634 * and enables only if the result is zero.
635 * So we precharge it here.
637 if (unlikely(!test_bit(QL_MSIX_ENABLED
, &qdev
->flags
) ||
639 atomic_set(&qdev
->intr_context
[i
].irq_cnt
, 1);
640 ql_enable_completion_interrupt(qdev
, i
);
645 static int ql_read_flash_word(struct ql_adapter
*qdev
, int offset
, u32
*data
)
648 /* wait for reg to come ready */
649 status
= ql_wait_reg_rdy(qdev
,
650 FLASH_ADDR
, FLASH_ADDR_RDY
, FLASH_ADDR_ERR
);
653 /* set up for reg read */
654 ql_write32(qdev
, FLASH_ADDR
, FLASH_ADDR_R
| offset
);
655 /* wait for reg to come ready */
656 status
= ql_wait_reg_rdy(qdev
,
657 FLASH_ADDR
, FLASH_ADDR_RDY
, FLASH_ADDR_ERR
);
661 *data
= ql_read32(qdev
, FLASH_DATA
);
666 static int ql_get_flash_params(struct ql_adapter
*qdev
)
670 u32
*p
= (u32
*)&qdev
->flash
;
672 if (ql_sem_spinlock(qdev
, SEM_FLASH_MASK
))
675 for (i
= 0; i
< sizeof(qdev
->flash
) / sizeof(u32
); i
++, p
++) {
676 status
= ql_read_flash_word(qdev
, i
, p
);
678 QPRINTK(qdev
, IFUP
, ERR
, "Error reading flash.\n");
684 ql_sem_unlock(qdev
, SEM_FLASH_MASK
);
688 /* xgmac register are located behind the xgmac_addr and xgmac_data
689 * register pair. Each read/write requires us to wait for the ready
690 * bit before reading/writing the data.
692 static int ql_write_xgmac_reg(struct ql_adapter
*qdev
, u32 reg
, u32 data
)
695 /* wait for reg to come ready */
696 status
= ql_wait_reg_rdy(qdev
,
697 XGMAC_ADDR
, XGMAC_ADDR_RDY
, XGMAC_ADDR_XME
);
700 /* write the data to the data reg */
701 ql_write32(qdev
, XGMAC_DATA
, data
);
702 /* trigger the write */
703 ql_write32(qdev
, XGMAC_ADDR
, reg
);
707 /* xgmac register are located behind the xgmac_addr and xgmac_data
708 * register pair. Each read/write requires us to wait for the ready
709 * bit before reading/writing the data.
711 int ql_read_xgmac_reg(struct ql_adapter
*qdev
, u32 reg
, u32
*data
)
714 /* wait for reg to come ready */
715 status
= ql_wait_reg_rdy(qdev
,
716 XGMAC_ADDR
, XGMAC_ADDR_RDY
, XGMAC_ADDR_XME
);
719 /* set up for reg read */
720 ql_write32(qdev
, XGMAC_ADDR
, reg
| XGMAC_ADDR_R
);
721 /* wait for reg to come ready */
722 status
= ql_wait_reg_rdy(qdev
,
723 XGMAC_ADDR
, XGMAC_ADDR_RDY
, XGMAC_ADDR_XME
);
727 *data
= ql_read32(qdev
, XGMAC_DATA
);
732 /* This is used for reading the 64-bit statistics regs. */
733 int ql_read_xgmac_reg64(struct ql_adapter
*qdev
, u32 reg
, u64
*data
)
739 status
= ql_read_xgmac_reg(qdev
, reg
, &lo
);
743 status
= ql_read_xgmac_reg(qdev
, reg
+ 4, &hi
);
747 *data
= (u64
) lo
| ((u64
) hi
<< 32);
753 /* Take the MAC Core out of reset.
754 * Enable statistics counting.
755 * Take the transmitter/receiver out of reset.
756 * This functionality may be done in the MPI firmware at a
759 static int ql_port_initialize(struct ql_adapter
*qdev
)
764 if (ql_sem_trylock(qdev
, qdev
->xg_sem_mask
)) {
765 /* Another function has the semaphore, so
766 * wait for the port init bit to come ready.
768 QPRINTK(qdev
, LINK
, INFO
,
769 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
770 status
= ql_wait_reg_rdy(qdev
, STS
, qdev
->port_init
, 0);
772 QPRINTK(qdev
, LINK
, CRIT
,
773 "Port initialize timed out.\n");
778 QPRINTK(qdev
, LINK
, INFO
, "Got xgmac semaphore!.\n");
779 /* Set the core reset. */
780 status
= ql_read_xgmac_reg(qdev
, GLOBAL_CFG
, &data
);
783 data
|= GLOBAL_CFG_RESET
;
784 status
= ql_write_xgmac_reg(qdev
, GLOBAL_CFG
, data
);
788 /* Clear the core reset and turn on jumbo for receiver. */
789 data
&= ~GLOBAL_CFG_RESET
; /* Clear core reset. */
790 data
|= GLOBAL_CFG_JUMBO
; /* Turn on jumbo. */
791 data
|= GLOBAL_CFG_TX_STAT_EN
;
792 data
|= GLOBAL_CFG_RX_STAT_EN
;
793 status
= ql_write_xgmac_reg(qdev
, GLOBAL_CFG
, data
);
797 /* Enable transmitter, and clear it's reset. */
798 status
= ql_read_xgmac_reg(qdev
, TX_CFG
, &data
);
801 data
&= ~TX_CFG_RESET
; /* Clear the TX MAC reset. */
802 data
|= TX_CFG_EN
; /* Enable the transmitter. */
803 status
= ql_write_xgmac_reg(qdev
, TX_CFG
, data
);
807 /* Enable receiver and clear it's reset. */
808 status
= ql_read_xgmac_reg(qdev
, RX_CFG
, &data
);
811 data
&= ~RX_CFG_RESET
; /* Clear the RX MAC reset. */
812 data
|= RX_CFG_EN
; /* Enable the receiver. */
813 status
= ql_write_xgmac_reg(qdev
, RX_CFG
, data
);
819 ql_write_xgmac_reg(qdev
, MAC_TX_PARAMS
, MAC_TX_PARAMS_JUMBO
| (0x2580 << 16));
823 ql_write_xgmac_reg(qdev
, MAC_RX_PARAMS
, 0x2580);
827 /* Signal to the world that the port is enabled. */
828 ql_write32(qdev
, STS
, ((qdev
->port_init
<< 16) | qdev
->port_init
));
830 ql_sem_unlock(qdev
, qdev
->xg_sem_mask
);
834 /* Get the next large buffer. */
835 static struct bq_desc
*ql_get_curr_lbuf(struct rx_ring
*rx_ring
)
837 struct bq_desc
*lbq_desc
= &rx_ring
->lbq
[rx_ring
->lbq_curr_idx
];
838 rx_ring
->lbq_curr_idx
++;
839 if (rx_ring
->lbq_curr_idx
== rx_ring
->lbq_len
)
840 rx_ring
->lbq_curr_idx
= 0;
841 rx_ring
->lbq_free_cnt
++;
845 /* Get the next small buffer. */
846 static struct bq_desc
*ql_get_curr_sbuf(struct rx_ring
*rx_ring
)
848 struct bq_desc
*sbq_desc
= &rx_ring
->sbq
[rx_ring
->sbq_curr_idx
];
849 rx_ring
->sbq_curr_idx
++;
850 if (rx_ring
->sbq_curr_idx
== rx_ring
->sbq_len
)
851 rx_ring
->sbq_curr_idx
= 0;
852 rx_ring
->sbq_free_cnt
++;
856 /* Update an rx ring index. */
857 static void ql_update_cq(struct rx_ring
*rx_ring
)
859 rx_ring
->cnsmr_idx
++;
860 rx_ring
->curr_entry
++;
861 if (unlikely(rx_ring
->cnsmr_idx
== rx_ring
->cq_len
)) {
862 rx_ring
->cnsmr_idx
= 0;
863 rx_ring
->curr_entry
= rx_ring
->cq_base
;
867 static void ql_write_cq_idx(struct rx_ring
*rx_ring
)
869 ql_write_db_reg(rx_ring
->cnsmr_idx
, rx_ring
->cnsmr_idx_db_reg
);
872 /* Process (refill) a large buffer queue. */
873 static void ql_update_lbq(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
875 int clean_idx
= rx_ring
->lbq_clean_idx
;
876 struct bq_desc
*lbq_desc
;
877 struct bq_element
*bq
;
881 while (rx_ring
->lbq_free_cnt
> 16) {
882 for (i
= 0; i
< 16; i
++) {
883 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
884 "lbq: try cleaning clean_idx = %d.\n",
886 lbq_desc
= &rx_ring
->lbq
[clean_idx
];
888 if (lbq_desc
->p
.lbq_page
== NULL
) {
889 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
890 "lbq: getting new page for index %d.\n",
892 lbq_desc
->p
.lbq_page
= alloc_page(GFP_ATOMIC
);
893 if (lbq_desc
->p
.lbq_page
== NULL
) {
894 QPRINTK(qdev
, RX_STATUS
, ERR
,
895 "Couldn't get a page.\n");
898 map
= pci_map_page(qdev
->pdev
,
899 lbq_desc
->p
.lbq_page
,
902 if (pci_dma_mapping_error(qdev
->pdev
, map
)) {
903 QPRINTK(qdev
, RX_STATUS
, ERR
,
904 "PCI mapping failed.\n");
907 pci_unmap_addr_set(lbq_desc
, mapaddr
, map
);
908 pci_unmap_len_set(lbq_desc
, maplen
, PAGE_SIZE
);
909 bq
->addr_lo
= /*lbq_desc->addr_lo = */
911 bq
->addr_hi
= /*lbq_desc->addr_hi = */
912 cpu_to_le32(map
>> 32);
915 if (clean_idx
== rx_ring
->lbq_len
)
919 rx_ring
->lbq_clean_idx
= clean_idx
;
920 rx_ring
->lbq_prod_idx
+= 16;
921 if (rx_ring
->lbq_prod_idx
== rx_ring
->lbq_len
)
922 rx_ring
->lbq_prod_idx
= 0;
923 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
924 "lbq: updating prod idx = %d.\n",
925 rx_ring
->lbq_prod_idx
);
926 ql_write_db_reg(rx_ring
->lbq_prod_idx
,
927 rx_ring
->lbq_prod_idx_db_reg
);
928 rx_ring
->lbq_free_cnt
-= 16;
932 /* Process (refill) a small buffer queue. */
933 static void ql_update_sbq(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
935 int clean_idx
= rx_ring
->sbq_clean_idx
;
936 struct bq_desc
*sbq_desc
;
937 struct bq_element
*bq
;
941 while (rx_ring
->sbq_free_cnt
> 16) {
942 for (i
= 0; i
< 16; i
++) {
943 sbq_desc
= &rx_ring
->sbq
[clean_idx
];
944 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
945 "sbq: try cleaning clean_idx = %d.\n",
948 if (sbq_desc
->p
.skb
== NULL
) {
949 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
950 "sbq: getting new skb for index %d.\n",
953 netdev_alloc_skb(qdev
->ndev
,
954 rx_ring
->sbq_buf_size
);
955 if (sbq_desc
->p
.skb
== NULL
) {
956 QPRINTK(qdev
, PROBE
, ERR
,
957 "Couldn't get an skb.\n");
958 rx_ring
->sbq_clean_idx
= clean_idx
;
961 skb_reserve(sbq_desc
->p
.skb
, QLGE_SB_PAD
);
962 map
= pci_map_single(qdev
->pdev
,
963 sbq_desc
->p
.skb
->data
,
964 rx_ring
->sbq_buf_size
/
965 2, PCI_DMA_FROMDEVICE
);
966 if (pci_dma_mapping_error(qdev
->pdev
, map
)) {
967 QPRINTK(qdev
, IFUP
, ERR
, "PCI mapping failed.\n");
968 rx_ring
->sbq_clean_idx
= clean_idx
;
971 pci_unmap_addr_set(sbq_desc
, mapaddr
, map
);
972 pci_unmap_len_set(sbq_desc
, maplen
,
973 rx_ring
->sbq_buf_size
/ 2);
974 bq
->addr_lo
= cpu_to_le32(map
);
975 bq
->addr_hi
= cpu_to_le32(map
>> 32);
979 if (clean_idx
== rx_ring
->sbq_len
)
982 rx_ring
->sbq_clean_idx
= clean_idx
;
983 rx_ring
->sbq_prod_idx
+= 16;
984 if (rx_ring
->sbq_prod_idx
== rx_ring
->sbq_len
)
985 rx_ring
->sbq_prod_idx
= 0;
986 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
987 "sbq: updating prod idx = %d.\n",
988 rx_ring
->sbq_prod_idx
);
989 ql_write_db_reg(rx_ring
->sbq_prod_idx
,
990 rx_ring
->sbq_prod_idx_db_reg
);
992 rx_ring
->sbq_free_cnt
-= 16;
996 static void ql_update_buffer_queues(struct ql_adapter
*qdev
,
997 struct rx_ring
*rx_ring
)
999 ql_update_sbq(qdev
, rx_ring
);
1000 ql_update_lbq(qdev
, rx_ring
);
1003 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1004 * fails at some stage, or from the interrupt when a tx completes.
1006 static void ql_unmap_send(struct ql_adapter
*qdev
,
1007 struct tx_ring_desc
*tx_ring_desc
, int mapped
)
1010 for (i
= 0; i
< mapped
; i
++) {
1011 if (i
== 0 || (i
== 7 && mapped
> 7)) {
1013 * Unmap the skb->data area, or the
1014 * external sglist (AKA the Outbound
1015 * Address List (OAL)).
1016 * If its the zeroeth element, then it's
1017 * the skb->data area. If it's the 7th
1018 * element and there is more than 6 frags,
1022 QPRINTK(qdev
, TX_DONE
, DEBUG
,
1023 "unmapping OAL area.\n");
1025 pci_unmap_single(qdev
->pdev
,
1026 pci_unmap_addr(&tx_ring_desc
->map
[i
],
1028 pci_unmap_len(&tx_ring_desc
->map
[i
],
1032 QPRINTK(qdev
, TX_DONE
, DEBUG
, "unmapping frag %d.\n",
1034 pci_unmap_page(qdev
->pdev
,
1035 pci_unmap_addr(&tx_ring_desc
->map
[i
],
1037 pci_unmap_len(&tx_ring_desc
->map
[i
],
1038 maplen
), PCI_DMA_TODEVICE
);
1044 /* Map the buffers for this transmit. This will return
1045 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1047 static int ql_map_send(struct ql_adapter
*qdev
,
1048 struct ob_mac_iocb_req
*mac_iocb_ptr
,
1049 struct sk_buff
*skb
, struct tx_ring_desc
*tx_ring_desc
)
1051 int len
= skb_headlen(skb
);
1053 int frag_idx
, err
, map_idx
= 0;
1054 struct tx_buf_desc
*tbd
= mac_iocb_ptr
->tbd
;
1055 int frag_cnt
= skb_shinfo(skb
)->nr_frags
;
1058 QPRINTK(qdev
, TX_QUEUED
, DEBUG
, "frag_cnt = %d.\n", frag_cnt
);
1061 * Map the skb buffer first.
1063 map
= pci_map_single(qdev
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
1065 err
= pci_dma_mapping_error(qdev
->pdev
, map
);
1067 QPRINTK(qdev
, TX_QUEUED
, ERR
,
1068 "PCI mapping failed with error: %d\n", err
);
1070 return NETDEV_TX_BUSY
;
1073 tbd
->len
= cpu_to_le32(len
);
1074 tbd
->addr
= cpu_to_le64(map
);
1075 pci_unmap_addr_set(&tx_ring_desc
->map
[map_idx
], mapaddr
, map
);
1076 pci_unmap_len_set(&tx_ring_desc
->map
[map_idx
], maplen
, len
);
1080 * This loop fills the remainder of the 8 address descriptors
1081 * in the IOCB. If there are more than 7 fragments, then the
1082 * eighth address desc will point to an external list (OAL).
1083 * When this happens, the remainder of the frags will be stored
1086 for (frag_idx
= 0; frag_idx
< frag_cnt
; frag_idx
++, map_idx
++) {
1087 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[frag_idx
];
1089 if (frag_idx
== 6 && frag_cnt
> 7) {
1090 /* Let's tack on an sglist.
1091 * Our control block will now
1093 * iocb->seg[0] = skb->data
1094 * iocb->seg[1] = frag[0]
1095 * iocb->seg[2] = frag[1]
1096 * iocb->seg[3] = frag[2]
1097 * iocb->seg[4] = frag[3]
1098 * iocb->seg[5] = frag[4]
1099 * iocb->seg[6] = frag[5]
1100 * iocb->seg[7] = ptr to OAL (external sglist)
1101 * oal->seg[0] = frag[6]
1102 * oal->seg[1] = frag[7]
1103 * oal->seg[2] = frag[8]
1104 * oal->seg[3] = frag[9]
1105 * oal->seg[4] = frag[10]
1108 /* Tack on the OAL in the eighth segment of IOCB. */
1109 map
= pci_map_single(qdev
->pdev
, &tx_ring_desc
->oal
,
1112 err
= pci_dma_mapping_error(qdev
->pdev
, map
);
1114 QPRINTK(qdev
, TX_QUEUED
, ERR
,
1115 "PCI mapping outbound address list with error: %d\n",
1120 tbd
->addr
= cpu_to_le64(map
);
1122 * The length is the number of fragments
1123 * that remain to be mapped times the length
1124 * of our sglist (OAL).
1127 cpu_to_le32((sizeof(struct tx_buf_desc
) *
1128 (frag_cnt
- frag_idx
)) | TX_DESC_C
);
1129 pci_unmap_addr_set(&tx_ring_desc
->map
[map_idx
], mapaddr
,
1131 pci_unmap_len_set(&tx_ring_desc
->map
[map_idx
], maplen
,
1132 sizeof(struct oal
));
1133 tbd
= (struct tx_buf_desc
*)&tx_ring_desc
->oal
;
1138 pci_map_page(qdev
->pdev
, frag
->page
,
1139 frag
->page_offset
, frag
->size
,
1142 err
= pci_dma_mapping_error(qdev
->pdev
, map
);
1144 QPRINTK(qdev
, TX_QUEUED
, ERR
,
1145 "PCI mapping frags failed with error: %d.\n",
1150 tbd
->addr
= cpu_to_le64(map
);
1151 tbd
->len
= cpu_to_le32(frag
->size
);
1152 pci_unmap_addr_set(&tx_ring_desc
->map
[map_idx
], mapaddr
, map
);
1153 pci_unmap_len_set(&tx_ring_desc
->map
[map_idx
], maplen
,
1157 /* Save the number of segments we've mapped. */
1158 tx_ring_desc
->map_cnt
= map_idx
;
1159 /* Terminate the last segment. */
1160 tbd
->len
= cpu_to_le32(le32_to_cpu(tbd
->len
) | TX_DESC_E
);
1161 return NETDEV_TX_OK
;
1165 * If the first frag mapping failed, then i will be zero.
1166 * This causes the unmap of the skb->data area. Otherwise
1167 * we pass in the number of frags that mapped successfully
1168 * so they can be umapped.
1170 ql_unmap_send(qdev
, tx_ring_desc
, map_idx
);
1171 return NETDEV_TX_BUSY
;
1174 static void ql_realign_skb(struct sk_buff
*skb
, int len
)
1176 void *temp_addr
= skb
->data
;
1178 /* Undo the skb_reserve(skb,32) we did before
1179 * giving to hardware, and realign data on
1180 * a 2-byte boundary.
1182 skb
->data
-= QLGE_SB_PAD
- NET_IP_ALIGN
;
1183 skb
->tail
-= QLGE_SB_PAD
- NET_IP_ALIGN
;
1184 skb_copy_to_linear_data(skb
, temp_addr
,
1189 * This function builds an skb for the given inbound
1190 * completion. It will be rewritten for readability in the near
1191 * future, but for not it works well.
1193 static struct sk_buff
*ql_build_rx_skb(struct ql_adapter
*qdev
,
1194 struct rx_ring
*rx_ring
,
1195 struct ib_mac_iocb_rsp
*ib_mac_rsp
)
1197 struct bq_desc
*lbq_desc
;
1198 struct bq_desc
*sbq_desc
;
1199 struct sk_buff
*skb
= NULL
;
1200 u32 length
= le32_to_cpu(ib_mac_rsp
->data_len
);
1201 u32 hdr_len
= le32_to_cpu(ib_mac_rsp
->hdr_len
);
1204 * Handle the header buffer if present.
1206 if (ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HV
&&
1207 ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
) {
1208 QPRINTK(qdev
, RX_STATUS
, DEBUG
, "Header of %d bytes in small buffer.\n", hdr_len
);
1210 * Headers fit nicely into a small buffer.
1212 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1213 pci_unmap_single(qdev
->pdev
,
1214 pci_unmap_addr(sbq_desc
, mapaddr
),
1215 pci_unmap_len(sbq_desc
, maplen
),
1216 PCI_DMA_FROMDEVICE
);
1217 skb
= sbq_desc
->p
.skb
;
1218 ql_realign_skb(skb
, hdr_len
);
1219 skb_put(skb
, hdr_len
);
1220 sbq_desc
->p
.skb
= NULL
;
1224 * Handle the data buffer(s).
1226 if (unlikely(!length
)) { /* Is there data too? */
1227 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1228 "No Data buffer in this packet.\n");
1232 if (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_DS
) {
1233 if (ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
) {
1234 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1235 "Headers in small, data of %d bytes in small, combine them.\n", length
);
1237 * Data is less than small buffer size so it's
1238 * stuffed in a small buffer.
1239 * For this case we append the data
1240 * from the "data" small buffer to the "header" small
1243 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1244 pci_dma_sync_single_for_cpu(qdev
->pdev
,
1246 (sbq_desc
, mapaddr
),
1249 PCI_DMA_FROMDEVICE
);
1250 memcpy(skb_put(skb
, length
),
1251 sbq_desc
->p
.skb
->data
, length
);
1252 pci_dma_sync_single_for_device(qdev
->pdev
,
1259 PCI_DMA_FROMDEVICE
);
1261 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1262 "%d bytes in a single small buffer.\n", length
);
1263 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1264 skb
= sbq_desc
->p
.skb
;
1265 ql_realign_skb(skb
, length
);
1266 skb_put(skb
, length
);
1267 pci_unmap_single(qdev
->pdev
,
1268 pci_unmap_addr(sbq_desc
,
1270 pci_unmap_len(sbq_desc
,
1272 PCI_DMA_FROMDEVICE
);
1273 sbq_desc
->p
.skb
= NULL
;
1275 } else if (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_DL
) {
1276 if (ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
) {
1277 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1278 "Header in small, %d bytes in large. Chain large to small!\n", length
);
1280 * The data is in a single large buffer. We
1281 * chain it to the header buffer's skb and let
1284 lbq_desc
= ql_get_curr_lbuf(rx_ring
);
1285 pci_unmap_page(qdev
->pdev
,
1286 pci_unmap_addr(lbq_desc
,
1288 pci_unmap_len(lbq_desc
, maplen
),
1289 PCI_DMA_FROMDEVICE
);
1290 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1291 "Chaining page to skb.\n");
1292 skb_fill_page_desc(skb
, 0, lbq_desc
->p
.lbq_page
,
1295 skb
->data_len
+= length
;
1296 skb
->truesize
+= length
;
1297 lbq_desc
->p
.lbq_page
= NULL
;
1300 * The headers and data are in a single large buffer. We
1301 * copy it to a new skb and let it go. This can happen with
1302 * jumbo mtu on a non-TCP/UDP frame.
1304 lbq_desc
= ql_get_curr_lbuf(rx_ring
);
1305 skb
= netdev_alloc_skb(qdev
->ndev
, length
);
1307 QPRINTK(qdev
, PROBE
, DEBUG
,
1308 "No skb available, drop the packet.\n");
1311 pci_unmap_page(qdev
->pdev
,
1312 pci_unmap_addr(lbq_desc
,
1314 pci_unmap_len(lbq_desc
, maplen
),
1315 PCI_DMA_FROMDEVICE
);
1316 skb_reserve(skb
, NET_IP_ALIGN
);
1317 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1318 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length
);
1319 skb_fill_page_desc(skb
, 0, lbq_desc
->p
.lbq_page
,
1322 skb
->data_len
+= length
;
1323 skb
->truesize
+= length
;
1325 lbq_desc
->p
.lbq_page
= NULL
;
1326 __pskb_pull_tail(skb
,
1327 (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
) ?
1328 VLAN_ETH_HLEN
: ETH_HLEN
);
1332 * The data is in a chain of large buffers
1333 * pointed to by a small buffer. We loop
1334 * thru and chain them to the our small header
1336 * frags: There are 18 max frags and our small
1337 * buffer will hold 32 of them. The thing is,
1338 * we'll use 3 max for our 9000 byte jumbo
1339 * frames. If the MTU goes up we could
1340 * eventually be in trouble.
1342 int size
, offset
, i
= 0;
1343 struct bq_element
*bq
, bq_array
[8];
1344 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1345 pci_unmap_single(qdev
->pdev
,
1346 pci_unmap_addr(sbq_desc
, mapaddr
),
1347 pci_unmap_len(sbq_desc
, maplen
),
1348 PCI_DMA_FROMDEVICE
);
1349 if (!(ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
)) {
1351 * This is an non TCP/UDP IP frame, so
1352 * the headers aren't split into a small
1353 * buffer. We have to use the small buffer
1354 * that contains our sg list as our skb to
1355 * send upstairs. Copy the sg list here to
1356 * a local buffer and use it to find the
1359 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1360 "%d bytes of headers & data in chain of large.\n", length
);
1361 skb
= sbq_desc
->p
.skb
;
1363 memcpy(bq
, skb
->data
, sizeof(bq_array
));
1364 sbq_desc
->p
.skb
= NULL
;
1365 skb_reserve(skb
, NET_IP_ALIGN
);
1367 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1368 "Headers in small, %d bytes of data in chain of large.\n", length
);
1369 bq
= (struct bq_element
*)sbq_desc
->p
.skb
->data
;
1371 while (length
> 0) {
1372 lbq_desc
= ql_get_curr_lbuf(rx_ring
);
1373 if ((bq
->addr_lo
& ~BQ_MASK
) != lbq_desc
->bq
->addr_lo
) {
1374 QPRINTK(qdev
, RX_STATUS
, ERR
,
1375 "Panic!!! bad large buffer address, expected 0x%.08x, got 0x%.08x.\n",
1376 lbq_desc
->bq
->addr_lo
, bq
->addr_lo
);
1379 pci_unmap_page(qdev
->pdev
,
1380 pci_unmap_addr(lbq_desc
,
1382 pci_unmap_len(lbq_desc
,
1384 PCI_DMA_FROMDEVICE
);
1385 size
= (length
< PAGE_SIZE
) ? length
: PAGE_SIZE
;
1388 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1389 "Adding page %d to skb for %d bytes.\n",
1391 skb_fill_page_desc(skb
, i
, lbq_desc
->p
.lbq_page
,
1394 skb
->data_len
+= size
;
1395 skb
->truesize
+= size
;
1397 lbq_desc
->p
.lbq_page
= NULL
;
1401 __pskb_pull_tail(skb
, (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
) ?
1402 VLAN_ETH_HLEN
: ETH_HLEN
);
1407 /* Process an inbound completion from an rx ring. */
1408 static void ql_process_mac_rx_intr(struct ql_adapter
*qdev
,
1409 struct rx_ring
*rx_ring
,
1410 struct ib_mac_iocb_rsp
*ib_mac_rsp
)
1412 struct net_device
*ndev
= qdev
->ndev
;
1413 struct sk_buff
*skb
= NULL
;
1415 QL_DUMP_IB_MAC_RSP(ib_mac_rsp
);
1417 skb
= ql_build_rx_skb(qdev
, rx_ring
, ib_mac_rsp
);
1418 if (unlikely(!skb
)) {
1419 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1420 "No skb available, drop packet.\n");
1424 prefetch(skb
->data
);
1426 if (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) {
1427 QPRINTK(qdev
, RX_STATUS
, DEBUG
, "%s%s%s Multicast.\n",
1428 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1429 IB_MAC_IOCB_RSP_M_HASH
? "Hash" : "",
1430 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1431 IB_MAC_IOCB_RSP_M_REG
? "Registered" : "",
1432 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1433 IB_MAC_IOCB_RSP_M_PROM
? "Promiscuous" : "");
1435 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_P
) {
1436 QPRINTK(qdev
, RX_STATUS
, DEBUG
, "Promiscuous Packet.\n");
1438 if (ib_mac_rsp
->flags1
& (IB_MAC_IOCB_RSP_IE
| IB_MAC_IOCB_RSP_TE
)) {
1439 QPRINTK(qdev
, RX_STATUS
, ERR
,
1440 "Bad checksum for this %s packet.\n",
1442 flags2
& IB_MAC_IOCB_RSP_T
) ? "TCP" : "UDP"));
1443 skb
->ip_summed
= CHECKSUM_NONE
;
1444 } else if (qdev
->rx_csum
&&
1445 ((ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_T
) ||
1446 ((ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_U
) &&
1447 !(ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_NU
)))) {
1448 QPRINTK(qdev
, RX_STATUS
, DEBUG
, "RX checksum done!\n");
1449 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1451 qdev
->stats
.rx_packets
++;
1452 qdev
->stats
.rx_bytes
+= skb
->len
;
1453 skb
->protocol
= eth_type_trans(skb
, ndev
);
1454 if (qdev
->vlgrp
&& (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
)) {
1455 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1456 "Passing a VLAN packet upstream.\n");
1457 vlan_hwaccel_rx(skb
, qdev
->vlgrp
,
1458 le16_to_cpu(ib_mac_rsp
->vlan_id
));
1460 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1461 "Passing a normal packet upstream.\n");
1466 /* Process an outbound completion from an rx ring. */
1467 static void ql_process_mac_tx_intr(struct ql_adapter
*qdev
,
1468 struct ob_mac_iocb_rsp
*mac_rsp
)
1470 struct tx_ring
*tx_ring
;
1471 struct tx_ring_desc
*tx_ring_desc
;
1473 QL_DUMP_OB_MAC_RSP(mac_rsp
);
1474 tx_ring
= &qdev
->tx_ring
[mac_rsp
->txq_idx
];
1475 tx_ring_desc
= &tx_ring
->q
[mac_rsp
->tid
];
1476 ql_unmap_send(qdev
, tx_ring_desc
, tx_ring_desc
->map_cnt
);
1477 qdev
->stats
.tx_bytes
+= tx_ring_desc
->map_cnt
;
1478 qdev
->stats
.tx_packets
++;
1479 dev_kfree_skb(tx_ring_desc
->skb
);
1480 tx_ring_desc
->skb
= NULL
;
1482 if (unlikely(mac_rsp
->flags1
& (OB_MAC_IOCB_RSP_E
|
1485 OB_MAC_IOCB_RSP_P
| OB_MAC_IOCB_RSP_B
))) {
1486 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_E
) {
1487 QPRINTK(qdev
, TX_DONE
, WARNING
,
1488 "Total descriptor length did not match transfer length.\n");
1490 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_S
) {
1491 QPRINTK(qdev
, TX_DONE
, WARNING
,
1492 "Frame too short to be legal, not sent.\n");
1494 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_L
) {
1495 QPRINTK(qdev
, TX_DONE
, WARNING
,
1496 "Frame too long, but sent anyway.\n");
1498 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_B
) {
1499 QPRINTK(qdev
, TX_DONE
, WARNING
,
1500 "PCI backplane error. Frame not sent.\n");
1503 atomic_inc(&tx_ring
->tx_count
);
1506 /* Fire up a handler to reset the MPI processor. */
1507 void ql_queue_fw_error(struct ql_adapter
*qdev
)
1509 netif_stop_queue(qdev
->ndev
);
1510 netif_carrier_off(qdev
->ndev
);
1511 queue_delayed_work(qdev
->workqueue
, &qdev
->mpi_reset_work
, 0);
1514 void ql_queue_asic_error(struct ql_adapter
*qdev
)
1516 netif_stop_queue(qdev
->ndev
);
1517 netif_carrier_off(qdev
->ndev
);
1518 ql_disable_interrupts(qdev
);
1519 queue_delayed_work(qdev
->workqueue
, &qdev
->asic_reset_work
, 0);
1522 static void ql_process_chip_ae_intr(struct ql_adapter
*qdev
,
1523 struct ib_ae_iocb_rsp
*ib_ae_rsp
)
1525 switch (ib_ae_rsp
->event
) {
1526 case MGMT_ERR_EVENT
:
1527 QPRINTK(qdev
, RX_ERR
, ERR
,
1528 "Management Processor Fatal Error.\n");
1529 ql_queue_fw_error(qdev
);
1532 case CAM_LOOKUP_ERR_EVENT
:
1533 QPRINTK(qdev
, LINK
, ERR
,
1534 "Multiple CAM hits lookup occurred.\n");
1535 QPRINTK(qdev
, DRV
, ERR
, "This event shouldn't occur.\n");
1536 ql_queue_asic_error(qdev
);
1539 case SOFT_ECC_ERROR_EVENT
:
1540 QPRINTK(qdev
, RX_ERR
, ERR
, "Soft ECC error detected.\n");
1541 ql_queue_asic_error(qdev
);
1544 case PCI_ERR_ANON_BUF_RD
:
1545 QPRINTK(qdev
, RX_ERR
, ERR
,
1546 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
1548 ql_queue_asic_error(qdev
);
1552 QPRINTK(qdev
, DRV
, ERR
, "Unexpected event %d.\n",
1554 ql_queue_asic_error(qdev
);
1559 static int ql_clean_outbound_rx_ring(struct rx_ring
*rx_ring
)
1561 struct ql_adapter
*qdev
= rx_ring
->qdev
;
1562 u32 prod
= le32_to_cpu(*rx_ring
->prod_idx_sh_reg
);
1563 struct ob_mac_iocb_rsp
*net_rsp
= NULL
;
1566 /* While there are entries in the completion queue. */
1567 while (prod
!= rx_ring
->cnsmr_idx
) {
1569 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1570 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring
->cq_id
,
1571 prod
, rx_ring
->cnsmr_idx
);
1573 net_rsp
= (struct ob_mac_iocb_rsp
*)rx_ring
->curr_entry
;
1575 switch (net_rsp
->opcode
) {
1577 case OPCODE_OB_MAC_TSO_IOCB
:
1578 case OPCODE_OB_MAC_IOCB
:
1579 ql_process_mac_tx_intr(qdev
, net_rsp
);
1582 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1583 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1587 ql_update_cq(rx_ring
);
1588 prod
= le32_to_cpu(*rx_ring
->prod_idx_sh_reg
);
1590 ql_write_cq_idx(rx_ring
);
1591 if (netif_queue_stopped(qdev
->ndev
) && net_rsp
!= NULL
) {
1592 struct tx_ring
*tx_ring
= &qdev
->tx_ring
[net_rsp
->txq_idx
];
1593 if (atomic_read(&tx_ring
->queue_stopped
) &&
1594 (atomic_read(&tx_ring
->tx_count
) > (tx_ring
->wq_len
/ 4)))
1596 * The queue got stopped because the tx_ring was full.
1597 * Wake it up, because it's now at least 25% empty.
1599 netif_wake_queue(qdev
->ndev
);
1605 static int ql_clean_inbound_rx_ring(struct rx_ring
*rx_ring
, int budget
)
1607 struct ql_adapter
*qdev
= rx_ring
->qdev
;
1608 u32 prod
= le32_to_cpu(*rx_ring
->prod_idx_sh_reg
);
1609 struct ql_net_rsp_iocb
*net_rsp
;
1612 /* While there are entries in the completion queue. */
1613 while (prod
!= rx_ring
->cnsmr_idx
) {
1615 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1616 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring
->cq_id
,
1617 prod
, rx_ring
->cnsmr_idx
);
1619 net_rsp
= rx_ring
->curr_entry
;
1621 switch (net_rsp
->opcode
) {
1622 case OPCODE_IB_MAC_IOCB
:
1623 ql_process_mac_rx_intr(qdev
, rx_ring
,
1624 (struct ib_mac_iocb_rsp
*)
1628 case OPCODE_IB_AE_IOCB
:
1629 ql_process_chip_ae_intr(qdev
, (struct ib_ae_iocb_rsp
*)
1634 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1635 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1640 ql_update_cq(rx_ring
);
1641 prod
= le32_to_cpu(*rx_ring
->prod_idx_sh_reg
);
1642 if (count
== budget
)
1645 ql_update_buffer_queues(qdev
, rx_ring
);
1646 ql_write_cq_idx(rx_ring
);
1650 static int ql_napi_poll_msix(struct napi_struct
*napi
, int budget
)
1652 struct rx_ring
*rx_ring
= container_of(napi
, struct rx_ring
, napi
);
1653 struct ql_adapter
*qdev
= rx_ring
->qdev
;
1654 int work_done
= ql_clean_inbound_rx_ring(rx_ring
, budget
);
1656 QPRINTK(qdev
, RX_STATUS
, DEBUG
, "Enter, NAPI POLL cq_id = %d.\n",
1659 if (work_done
< budget
) {
1660 __netif_rx_complete(napi
);
1661 ql_enable_completion_interrupt(qdev
, rx_ring
->irq
);
1666 static void ql_vlan_rx_register(struct net_device
*ndev
, struct vlan_group
*grp
)
1668 struct ql_adapter
*qdev
= netdev_priv(ndev
);
1672 QPRINTK(qdev
, IFUP
, DEBUG
, "Turning on VLAN in NIC_RCV_CFG.\n");
1673 ql_write32(qdev
, NIC_RCV_CFG
, NIC_RCV_CFG_VLAN_MASK
|
1674 NIC_RCV_CFG_VLAN_MATCH_AND_NON
);
1676 QPRINTK(qdev
, IFUP
, DEBUG
,
1677 "Turning off VLAN in NIC_RCV_CFG.\n");
1678 ql_write32(qdev
, NIC_RCV_CFG
, NIC_RCV_CFG_VLAN_MASK
);
1682 static void ql_vlan_rx_add_vid(struct net_device
*ndev
, u16 vid
)
1684 struct ql_adapter
*qdev
= netdev_priv(ndev
);
1685 u32 enable_bit
= MAC_ADDR_E
;
1687 spin_lock(&qdev
->hw_lock
);
1688 if (ql_set_mac_addr_reg
1689 (qdev
, (u8
*) &enable_bit
, MAC_ADDR_TYPE_VLAN
, vid
)) {
1690 QPRINTK(qdev
, IFUP
, ERR
, "Failed to init vlan address.\n");
1692 spin_unlock(&qdev
->hw_lock
);
1695 static void ql_vlan_rx_kill_vid(struct net_device
*ndev
, u16 vid
)
1697 struct ql_adapter
*qdev
= netdev_priv(ndev
);
1700 spin_lock(&qdev
->hw_lock
);
1701 if (ql_set_mac_addr_reg
1702 (qdev
, (u8
*) &enable_bit
, MAC_ADDR_TYPE_VLAN
, vid
)) {
1703 QPRINTK(qdev
, IFUP
, ERR
, "Failed to clear vlan address.\n");
1705 spin_unlock(&qdev
->hw_lock
);
1709 /* Worker thread to process a given rx_ring that is dedicated
1710 * to outbound completions.
1712 static void ql_tx_clean(struct work_struct
*work
)
1714 struct rx_ring
*rx_ring
=
1715 container_of(work
, struct rx_ring
, rx_work
.work
);
1716 ql_clean_outbound_rx_ring(rx_ring
);
1717 ql_enable_completion_interrupt(rx_ring
->qdev
, rx_ring
->irq
);
1721 /* Worker thread to process a given rx_ring that is dedicated
1722 * to inbound completions.
1724 static void ql_rx_clean(struct work_struct
*work
)
1726 struct rx_ring
*rx_ring
=
1727 container_of(work
, struct rx_ring
, rx_work
.work
);
1728 ql_clean_inbound_rx_ring(rx_ring
, 64);
1729 ql_enable_completion_interrupt(rx_ring
->qdev
, rx_ring
->irq
);
1732 /* MSI-X Multiple Vector Interrupt Handler for outbound completions. */
1733 static irqreturn_t
qlge_msix_tx_isr(int irq
, void *dev_id
)
1735 struct rx_ring
*rx_ring
= dev_id
;
1736 queue_delayed_work_on(rx_ring
->cpu
, rx_ring
->qdev
->q_workqueue
,
1737 &rx_ring
->rx_work
, 0);
1741 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
1742 static irqreturn_t
qlge_msix_rx_isr(int irq
, void *dev_id
)
1744 struct rx_ring
*rx_ring
= dev_id
;
1745 netif_rx_schedule(&rx_ring
->napi
);
1749 /* This handles a fatal error, MPI activity, and the default
1750 * rx_ring in an MSI-X multiple vector environment.
1751 * In MSI/Legacy environment it also process the rest of
1754 static irqreturn_t
qlge_isr(int irq
, void *dev_id
)
1756 struct rx_ring
*rx_ring
= dev_id
;
1757 struct ql_adapter
*qdev
= rx_ring
->qdev
;
1758 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
1763 spin_lock(&qdev
->hw_lock
);
1764 if (atomic_read(&qdev
->intr_context
[0].irq_cnt
)) {
1765 QPRINTK(qdev
, INTR
, DEBUG
, "Shared Interrupt, Not ours!\n");
1766 spin_unlock(&qdev
->hw_lock
);
1769 spin_unlock(&qdev
->hw_lock
);
1771 var
= ql_disable_completion_interrupt(qdev
, intr_context
->intr
);
1774 * Check for fatal error.
1777 ql_queue_asic_error(qdev
);
1778 QPRINTK(qdev
, INTR
, ERR
, "Got fatal error, STS = %x.\n", var
);
1779 var
= ql_read32(qdev
, ERR_STS
);
1780 QPRINTK(qdev
, INTR
, ERR
,
1781 "Resetting chip. Error Status Register = 0x%x\n", var
);
1786 * Check MPI processor activity.
1790 * We've got an async event or mailbox completion.
1791 * Handle it and clear the source of the interrupt.
1793 QPRINTK(qdev
, INTR
, ERR
, "Got MPI processor interrupt.\n");
1794 ql_disable_completion_interrupt(qdev
, intr_context
->intr
);
1795 queue_delayed_work_on(smp_processor_id(), qdev
->workqueue
,
1796 &qdev
->mpi_work
, 0);
1801 * Check the default queue and wake handler if active.
1803 rx_ring
= &qdev
->rx_ring
[0];
1804 if (le32_to_cpu(*rx_ring
->prod_idx_sh_reg
) != rx_ring
->cnsmr_idx
) {
1805 QPRINTK(qdev
, INTR
, INFO
, "Waking handler for rx_ring[0].\n");
1806 ql_disable_completion_interrupt(qdev
, intr_context
->intr
);
1807 queue_delayed_work_on(smp_processor_id(), qdev
->q_workqueue
,
1808 &rx_ring
->rx_work
, 0);
1812 if (!test_bit(QL_MSIX_ENABLED
, &qdev
->flags
)) {
1814 * Start the DPC for each active queue.
1816 for (i
= 1; i
< qdev
->rx_ring_count
; i
++) {
1817 rx_ring
= &qdev
->rx_ring
[i
];
1818 if (le32_to_cpu(*rx_ring
->prod_idx_sh_reg
) !=
1819 rx_ring
->cnsmr_idx
) {
1820 QPRINTK(qdev
, INTR
, INFO
,
1821 "Waking handler for rx_ring[%d].\n", i
);
1822 ql_disable_completion_interrupt(qdev
,
1825 if (i
< qdev
->rss_ring_first_cq_id
)
1826 queue_delayed_work_on(rx_ring
->cpu
,
1831 netif_rx_schedule(&rx_ring
->napi
);
1836 ql_enable_completion_interrupt(qdev
, intr_context
->intr
);
1837 return work_done
? IRQ_HANDLED
: IRQ_NONE
;
1840 static int ql_tso(struct sk_buff
*skb
, struct ob_mac_tso_iocb_req
*mac_iocb_ptr
)
1843 if (skb_is_gso(skb
)) {
1845 if (skb_header_cloned(skb
)) {
1846 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
1851 mac_iocb_ptr
->opcode
= OPCODE_OB_MAC_TSO_IOCB
;
1852 mac_iocb_ptr
->flags3
|= OB_MAC_TSO_IOCB_IC
;
1853 mac_iocb_ptr
->frame_len
= cpu_to_le32((u32
) skb
->len
);
1854 mac_iocb_ptr
->total_hdrs_len
=
1855 cpu_to_le16(skb_transport_offset(skb
) + tcp_hdrlen(skb
));
1856 mac_iocb_ptr
->net_trans_offset
=
1857 cpu_to_le16(skb_network_offset(skb
) |
1858 skb_transport_offset(skb
)
1859 << OB_MAC_TRANSPORT_HDR_SHIFT
);
1860 mac_iocb_ptr
->mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
1861 mac_iocb_ptr
->flags2
|= OB_MAC_TSO_IOCB_LSO
;
1862 if (likely(skb
->protocol
== htons(ETH_P_IP
))) {
1863 struct iphdr
*iph
= ip_hdr(skb
);
1865 mac_iocb_ptr
->flags1
|= OB_MAC_TSO_IOCB_IP4
;
1866 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
1870 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
1871 mac_iocb_ptr
->flags1
|= OB_MAC_TSO_IOCB_IP6
;
1872 tcp_hdr(skb
)->check
=
1873 ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
1874 &ipv6_hdr(skb
)->daddr
,
1882 static void ql_hw_csum_setup(struct sk_buff
*skb
,
1883 struct ob_mac_tso_iocb_req
*mac_iocb_ptr
)
1886 struct iphdr
*iph
= ip_hdr(skb
);
1888 mac_iocb_ptr
->opcode
= OPCODE_OB_MAC_TSO_IOCB
;
1889 mac_iocb_ptr
->frame_len
= cpu_to_le32((u32
) skb
->len
);
1890 mac_iocb_ptr
->net_trans_offset
=
1891 cpu_to_le16(skb_network_offset(skb
) |
1892 skb_transport_offset(skb
) << OB_MAC_TRANSPORT_HDR_SHIFT
);
1894 mac_iocb_ptr
->flags1
|= OB_MAC_TSO_IOCB_IP4
;
1895 len
= (ntohs(iph
->tot_len
) - (iph
->ihl
<< 2));
1896 if (likely(iph
->protocol
== IPPROTO_TCP
)) {
1897 check
= &(tcp_hdr(skb
)->check
);
1898 mac_iocb_ptr
->flags2
|= OB_MAC_TSO_IOCB_TC
;
1899 mac_iocb_ptr
->total_hdrs_len
=
1900 cpu_to_le16(skb_transport_offset(skb
) +
1901 (tcp_hdr(skb
)->doff
<< 2));
1903 check
= &(udp_hdr(skb
)->check
);
1904 mac_iocb_ptr
->flags2
|= OB_MAC_TSO_IOCB_UC
;
1905 mac_iocb_ptr
->total_hdrs_len
=
1906 cpu_to_le16(skb_transport_offset(skb
) +
1907 sizeof(struct udphdr
));
1909 *check
= ~csum_tcpudp_magic(iph
->saddr
,
1910 iph
->daddr
, len
, iph
->protocol
, 0);
1913 static int qlge_send(struct sk_buff
*skb
, struct net_device
*ndev
)
1915 struct tx_ring_desc
*tx_ring_desc
;
1916 struct ob_mac_iocb_req
*mac_iocb_ptr
;
1917 struct ql_adapter
*qdev
= netdev_priv(ndev
);
1919 struct tx_ring
*tx_ring
;
1920 u32 tx_ring_idx
= (u32
) QL_TXQ_IDX(qdev
, skb
);
1922 tx_ring
= &qdev
->tx_ring
[tx_ring_idx
];
1924 if (unlikely(atomic_read(&tx_ring
->tx_count
) < 2)) {
1925 QPRINTK(qdev
, TX_QUEUED
, INFO
,
1926 "%s: shutting down tx queue %d du to lack of resources.\n",
1927 __func__
, tx_ring_idx
);
1928 netif_stop_queue(ndev
);
1929 atomic_inc(&tx_ring
->queue_stopped
);
1930 return NETDEV_TX_BUSY
;
1932 tx_ring_desc
= &tx_ring
->q
[tx_ring
->prod_idx
];
1933 mac_iocb_ptr
= tx_ring_desc
->queue_entry
;
1934 memset((void *)mac_iocb_ptr
, 0, sizeof(mac_iocb_ptr
));
1935 if (ql_map_send(qdev
, mac_iocb_ptr
, skb
, tx_ring_desc
) != NETDEV_TX_OK
) {
1936 QPRINTK(qdev
, TX_QUEUED
, ERR
, "Could not map the segments.\n");
1937 return NETDEV_TX_BUSY
;
1940 mac_iocb_ptr
->opcode
= OPCODE_OB_MAC_IOCB
;
1941 mac_iocb_ptr
->tid
= tx_ring_desc
->index
;
1942 /* We use the upper 32-bits to store the tx queue for this IO.
1943 * When we get the completion we can use it to establish the context.
1945 mac_iocb_ptr
->txq_idx
= tx_ring_idx
;
1946 tx_ring_desc
->skb
= skb
;
1948 mac_iocb_ptr
->frame_len
= cpu_to_le16((u16
) skb
->len
);
1950 if (qdev
->vlgrp
&& vlan_tx_tag_present(skb
)) {
1951 QPRINTK(qdev
, TX_QUEUED
, DEBUG
, "Adding a vlan tag %d.\n",
1952 vlan_tx_tag_get(skb
));
1953 mac_iocb_ptr
->flags3
|= OB_MAC_IOCB_V
;
1954 mac_iocb_ptr
->vlan_tci
= cpu_to_le16(vlan_tx_tag_get(skb
));
1956 tso
= ql_tso(skb
, (struct ob_mac_tso_iocb_req
*)mac_iocb_ptr
);
1958 dev_kfree_skb_any(skb
);
1959 return NETDEV_TX_OK
;
1960 } else if (unlikely(!tso
) && (skb
->ip_summed
== CHECKSUM_PARTIAL
)) {
1961 ql_hw_csum_setup(skb
,
1962 (struct ob_mac_tso_iocb_req
*)mac_iocb_ptr
);
1964 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr
);
1965 tx_ring
->prod_idx
++;
1966 if (tx_ring
->prod_idx
== tx_ring
->wq_len
)
1967 tx_ring
->prod_idx
= 0;
1970 ql_write_db_reg(tx_ring
->prod_idx
, tx_ring
->prod_idx_db_reg
);
1971 ndev
->trans_start
= jiffies
;
1972 QPRINTK(qdev
, TX_QUEUED
, DEBUG
, "tx queued, slot %d, len %d\n",
1973 tx_ring
->prod_idx
, skb
->len
);
1975 atomic_dec(&tx_ring
->tx_count
);
1976 return NETDEV_TX_OK
;
1979 static void ql_free_shadow_space(struct ql_adapter
*qdev
)
1981 if (qdev
->rx_ring_shadow_reg_area
) {
1982 pci_free_consistent(qdev
->pdev
,
1984 qdev
->rx_ring_shadow_reg_area
,
1985 qdev
->rx_ring_shadow_reg_dma
);
1986 qdev
->rx_ring_shadow_reg_area
= NULL
;
1988 if (qdev
->tx_ring_shadow_reg_area
) {
1989 pci_free_consistent(qdev
->pdev
,
1991 qdev
->tx_ring_shadow_reg_area
,
1992 qdev
->tx_ring_shadow_reg_dma
);
1993 qdev
->tx_ring_shadow_reg_area
= NULL
;
1997 static int ql_alloc_shadow_space(struct ql_adapter
*qdev
)
1999 qdev
->rx_ring_shadow_reg_area
=
2000 pci_alloc_consistent(qdev
->pdev
,
2001 PAGE_SIZE
, &qdev
->rx_ring_shadow_reg_dma
);
2002 if (qdev
->rx_ring_shadow_reg_area
== NULL
) {
2003 QPRINTK(qdev
, IFUP
, ERR
,
2004 "Allocation of RX shadow space failed.\n");
2007 qdev
->tx_ring_shadow_reg_area
=
2008 pci_alloc_consistent(qdev
->pdev
, PAGE_SIZE
,
2009 &qdev
->tx_ring_shadow_reg_dma
);
2010 if (qdev
->tx_ring_shadow_reg_area
== NULL
) {
2011 QPRINTK(qdev
, IFUP
, ERR
,
2012 "Allocation of TX shadow space failed.\n");
2013 goto err_wqp_sh_area
;
2018 pci_free_consistent(qdev
->pdev
,
2020 qdev
->rx_ring_shadow_reg_area
,
2021 qdev
->rx_ring_shadow_reg_dma
);
2025 static void ql_init_tx_ring(struct ql_adapter
*qdev
, struct tx_ring
*tx_ring
)
2027 struct tx_ring_desc
*tx_ring_desc
;
2029 struct ob_mac_iocb_req
*mac_iocb_ptr
;
2031 mac_iocb_ptr
= tx_ring
->wq_base
;
2032 tx_ring_desc
= tx_ring
->q
;
2033 for (i
= 0; i
< tx_ring
->wq_len
; i
++) {
2034 tx_ring_desc
->index
= i
;
2035 tx_ring_desc
->skb
= NULL
;
2036 tx_ring_desc
->queue_entry
= mac_iocb_ptr
;
2040 atomic_set(&tx_ring
->tx_count
, tx_ring
->wq_len
);
2041 atomic_set(&tx_ring
->queue_stopped
, 0);
2044 static void ql_free_tx_resources(struct ql_adapter
*qdev
,
2045 struct tx_ring
*tx_ring
)
2047 if (tx_ring
->wq_base
) {
2048 pci_free_consistent(qdev
->pdev
, tx_ring
->wq_size
,
2049 tx_ring
->wq_base
, tx_ring
->wq_base_dma
);
2050 tx_ring
->wq_base
= NULL
;
2056 static int ql_alloc_tx_resources(struct ql_adapter
*qdev
,
2057 struct tx_ring
*tx_ring
)
2060 pci_alloc_consistent(qdev
->pdev
, tx_ring
->wq_size
,
2061 &tx_ring
->wq_base_dma
);
2063 if ((tx_ring
->wq_base
== NULL
)
2064 || tx_ring
->wq_base_dma
& (tx_ring
->wq_size
- 1)) {
2065 QPRINTK(qdev
, IFUP
, ERR
, "tx_ring alloc failed.\n");
2069 kmalloc(tx_ring
->wq_len
* sizeof(struct tx_ring_desc
), GFP_KERNEL
);
2070 if (tx_ring
->q
== NULL
)
2075 pci_free_consistent(qdev
->pdev
, tx_ring
->wq_size
,
2076 tx_ring
->wq_base
, tx_ring
->wq_base_dma
);
2080 static void ql_free_lbq_buffers(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
2083 struct bq_desc
*lbq_desc
;
2085 for (i
= 0; i
< rx_ring
->lbq_len
; i
++) {
2086 lbq_desc
= &rx_ring
->lbq
[i
];
2087 if (lbq_desc
->p
.lbq_page
) {
2088 pci_unmap_page(qdev
->pdev
,
2089 pci_unmap_addr(lbq_desc
, mapaddr
),
2090 pci_unmap_len(lbq_desc
, maplen
),
2091 PCI_DMA_FROMDEVICE
);
2093 put_page(lbq_desc
->p
.lbq_page
);
2094 lbq_desc
->p
.lbq_page
= NULL
;
2096 lbq_desc
->bq
->addr_lo
= 0;
2097 lbq_desc
->bq
->addr_hi
= 0;
2102 * Allocate and map a page for each element of the lbq.
2104 static int ql_alloc_lbq_buffers(struct ql_adapter
*qdev
,
2105 struct rx_ring
*rx_ring
)
2108 struct bq_desc
*lbq_desc
;
2110 struct bq_element
*bq
= rx_ring
->lbq_base
;
2112 for (i
= 0; i
< rx_ring
->lbq_len
; i
++) {
2113 lbq_desc
= &rx_ring
->lbq
[i
];
2114 memset(lbq_desc
, 0, sizeof(lbq_desc
));
2116 lbq_desc
->index
= i
;
2117 lbq_desc
->p
.lbq_page
= alloc_page(GFP_ATOMIC
);
2118 if (unlikely(!lbq_desc
->p
.lbq_page
)) {
2119 QPRINTK(qdev
, IFUP
, ERR
, "failed alloc_page().\n");
2122 map
= pci_map_page(qdev
->pdev
,
2123 lbq_desc
->p
.lbq_page
,
2124 0, PAGE_SIZE
, PCI_DMA_FROMDEVICE
);
2125 if (pci_dma_mapping_error(qdev
->pdev
, map
)) {
2126 QPRINTK(qdev
, IFUP
, ERR
,
2127 "PCI mapping failed.\n");
2130 pci_unmap_addr_set(lbq_desc
, mapaddr
, map
);
2131 pci_unmap_len_set(lbq_desc
, maplen
, PAGE_SIZE
);
2132 bq
->addr_lo
= cpu_to_le32(map
);
2133 bq
->addr_hi
= cpu_to_le32(map
>> 32);
2139 ql_free_lbq_buffers(qdev
, rx_ring
);
2143 static void ql_free_sbq_buffers(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
2146 struct bq_desc
*sbq_desc
;
2148 for (i
= 0; i
< rx_ring
->sbq_len
; i
++) {
2149 sbq_desc
= &rx_ring
->sbq
[i
];
2150 if (sbq_desc
== NULL
) {
2151 QPRINTK(qdev
, IFUP
, ERR
, "sbq_desc %d is NULL.\n", i
);
2154 if (sbq_desc
->p
.skb
) {
2155 pci_unmap_single(qdev
->pdev
,
2156 pci_unmap_addr(sbq_desc
, mapaddr
),
2157 pci_unmap_len(sbq_desc
, maplen
),
2158 PCI_DMA_FROMDEVICE
);
2159 dev_kfree_skb(sbq_desc
->p
.skb
);
2160 sbq_desc
->p
.skb
= NULL
;
2162 if (sbq_desc
->bq
== NULL
) {
2163 QPRINTK(qdev
, IFUP
, ERR
, "sbq_desc->bq %d is NULL.\n",
2167 sbq_desc
->bq
->addr_lo
= 0;
2168 sbq_desc
->bq
->addr_hi
= 0;
2172 /* Allocate and map an skb for each element of the sbq. */
2173 static int ql_alloc_sbq_buffers(struct ql_adapter
*qdev
,
2174 struct rx_ring
*rx_ring
)
2177 struct bq_desc
*sbq_desc
;
2178 struct sk_buff
*skb
;
2180 struct bq_element
*bq
= rx_ring
->sbq_base
;
2182 for (i
= 0; i
< rx_ring
->sbq_len
; i
++) {
2183 sbq_desc
= &rx_ring
->sbq
[i
];
2184 memset(sbq_desc
, 0, sizeof(sbq_desc
));
2185 sbq_desc
->index
= i
;
2187 skb
= netdev_alloc_skb(qdev
->ndev
, rx_ring
->sbq_buf_size
);
2188 if (unlikely(!skb
)) {
2189 /* Better luck next round */
2190 QPRINTK(qdev
, IFUP
, ERR
,
2191 "small buff alloc failed for %d bytes at index %d.\n",
2192 rx_ring
->sbq_buf_size
, i
);
2195 skb_reserve(skb
, QLGE_SB_PAD
);
2196 sbq_desc
->p
.skb
= skb
;
2198 * Map only half the buffer. Because the
2199 * other half may get some data copied to it
2200 * when the completion arrives.
2202 map
= pci_map_single(qdev
->pdev
,
2204 rx_ring
->sbq_buf_size
/ 2,
2205 PCI_DMA_FROMDEVICE
);
2206 if (pci_dma_mapping_error(qdev
->pdev
, map
)) {
2207 QPRINTK(qdev
, IFUP
, ERR
, "PCI mapping failed.\n");
2210 pci_unmap_addr_set(sbq_desc
, mapaddr
, map
);
2211 pci_unmap_len_set(sbq_desc
, maplen
, rx_ring
->sbq_buf_size
/ 2);
2212 bq
->addr_lo
= /*sbq_desc->addr_lo = */
2214 bq
->addr_hi
= /*sbq_desc->addr_hi = */
2215 cpu_to_le32(map
>> 32);
2220 ql_free_sbq_buffers(qdev
, rx_ring
);
2224 static void ql_free_rx_resources(struct ql_adapter
*qdev
,
2225 struct rx_ring
*rx_ring
)
2227 if (rx_ring
->sbq_len
)
2228 ql_free_sbq_buffers(qdev
, rx_ring
);
2229 if (rx_ring
->lbq_len
)
2230 ql_free_lbq_buffers(qdev
, rx_ring
);
2232 /* Free the small buffer queue. */
2233 if (rx_ring
->sbq_base
) {
2234 pci_free_consistent(qdev
->pdev
,
2236 rx_ring
->sbq_base
, rx_ring
->sbq_base_dma
);
2237 rx_ring
->sbq_base
= NULL
;
2240 /* Free the small buffer queue control blocks. */
2241 kfree(rx_ring
->sbq
);
2242 rx_ring
->sbq
= NULL
;
2244 /* Free the large buffer queue. */
2245 if (rx_ring
->lbq_base
) {
2246 pci_free_consistent(qdev
->pdev
,
2248 rx_ring
->lbq_base
, rx_ring
->lbq_base_dma
);
2249 rx_ring
->lbq_base
= NULL
;
2252 /* Free the large buffer queue control blocks. */
2253 kfree(rx_ring
->lbq
);
2254 rx_ring
->lbq
= NULL
;
2256 /* Free the rx queue. */
2257 if (rx_ring
->cq_base
) {
2258 pci_free_consistent(qdev
->pdev
,
2260 rx_ring
->cq_base
, rx_ring
->cq_base_dma
);
2261 rx_ring
->cq_base
= NULL
;
2265 /* Allocate queues and buffers for this completions queue based
2266 * on the values in the parameter structure. */
2267 static int ql_alloc_rx_resources(struct ql_adapter
*qdev
,
2268 struct rx_ring
*rx_ring
)
2272 * Allocate the completion queue for this rx_ring.
2275 pci_alloc_consistent(qdev
->pdev
, rx_ring
->cq_size
,
2276 &rx_ring
->cq_base_dma
);
2278 if (rx_ring
->cq_base
== NULL
) {
2279 QPRINTK(qdev
, IFUP
, ERR
, "rx_ring alloc failed.\n");
2283 if (rx_ring
->sbq_len
) {
2285 * Allocate small buffer queue.
2288 pci_alloc_consistent(qdev
->pdev
, rx_ring
->sbq_size
,
2289 &rx_ring
->sbq_base_dma
);
2291 if (rx_ring
->sbq_base
== NULL
) {
2292 QPRINTK(qdev
, IFUP
, ERR
,
2293 "Small buffer queue allocation failed.\n");
2298 * Allocate small buffer queue control blocks.
2301 kmalloc(rx_ring
->sbq_len
* sizeof(struct bq_desc
),
2303 if (rx_ring
->sbq
== NULL
) {
2304 QPRINTK(qdev
, IFUP
, ERR
,
2305 "Small buffer queue control block allocation failed.\n");
2309 if (ql_alloc_sbq_buffers(qdev
, rx_ring
)) {
2310 QPRINTK(qdev
, IFUP
, ERR
,
2311 "Small buffer allocation failed.\n");
2316 if (rx_ring
->lbq_len
) {
2318 * Allocate large buffer queue.
2321 pci_alloc_consistent(qdev
->pdev
, rx_ring
->lbq_size
,
2322 &rx_ring
->lbq_base_dma
);
2324 if (rx_ring
->lbq_base
== NULL
) {
2325 QPRINTK(qdev
, IFUP
, ERR
,
2326 "Large buffer queue allocation failed.\n");
2330 * Allocate large buffer queue control blocks.
2333 kmalloc(rx_ring
->lbq_len
* sizeof(struct bq_desc
),
2335 if (rx_ring
->lbq
== NULL
) {
2336 QPRINTK(qdev
, IFUP
, ERR
,
2337 "Large buffer queue control block allocation failed.\n");
2342 * Allocate the buffers.
2344 if (ql_alloc_lbq_buffers(qdev
, rx_ring
)) {
2345 QPRINTK(qdev
, IFUP
, ERR
,
2346 "Large buffer allocation failed.\n");
2354 ql_free_rx_resources(qdev
, rx_ring
);
2358 static void ql_tx_ring_clean(struct ql_adapter
*qdev
)
2360 struct tx_ring
*tx_ring
;
2361 struct tx_ring_desc
*tx_ring_desc
;
2365 * Loop through all queues and free
2368 for (j
= 0; j
< qdev
->tx_ring_count
; j
++) {
2369 tx_ring
= &qdev
->tx_ring
[j
];
2370 for (i
= 0; i
< tx_ring
->wq_len
; i
++) {
2371 tx_ring_desc
= &tx_ring
->q
[i
];
2372 if (tx_ring_desc
&& tx_ring_desc
->skb
) {
2373 QPRINTK(qdev
, IFDOWN
, ERR
,
2374 "Freeing lost SKB %p, from queue %d, index %d.\n",
2375 tx_ring_desc
->skb
, j
,
2376 tx_ring_desc
->index
);
2377 ql_unmap_send(qdev
, tx_ring_desc
,
2378 tx_ring_desc
->map_cnt
);
2379 dev_kfree_skb(tx_ring_desc
->skb
);
2380 tx_ring_desc
->skb
= NULL
;
2386 static void ql_free_ring_cb(struct ql_adapter
*qdev
)
2388 kfree(qdev
->ring_mem
);
2391 static int ql_alloc_ring_cb(struct ql_adapter
*qdev
)
2393 /* Allocate space for tx/rx ring control blocks. */
2394 qdev
->ring_mem_size
=
2395 (qdev
->tx_ring_count
* sizeof(struct tx_ring
)) +
2396 (qdev
->rx_ring_count
* sizeof(struct rx_ring
));
2397 qdev
->ring_mem
= kmalloc(qdev
->ring_mem_size
, GFP_KERNEL
);
2398 if (qdev
->ring_mem
== NULL
) {
2401 qdev
->rx_ring
= qdev
->ring_mem
;
2402 qdev
->tx_ring
= qdev
->ring_mem
+
2403 (qdev
->rx_ring_count
* sizeof(struct rx_ring
));
2408 static void ql_free_mem_resources(struct ql_adapter
*qdev
)
2412 for (i
= 0; i
< qdev
->tx_ring_count
; i
++)
2413 ql_free_tx_resources(qdev
, &qdev
->tx_ring
[i
]);
2414 for (i
= 0; i
< qdev
->rx_ring_count
; i
++)
2415 ql_free_rx_resources(qdev
, &qdev
->rx_ring
[i
]);
2416 ql_free_shadow_space(qdev
);
2419 static int ql_alloc_mem_resources(struct ql_adapter
*qdev
)
2423 /* Allocate space for our shadow registers and such. */
2424 if (ql_alloc_shadow_space(qdev
))
2427 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
2428 if (ql_alloc_rx_resources(qdev
, &qdev
->rx_ring
[i
]) != 0) {
2429 QPRINTK(qdev
, IFUP
, ERR
,
2430 "RX resource allocation failed.\n");
2434 /* Allocate tx queue resources */
2435 for (i
= 0; i
< qdev
->tx_ring_count
; i
++) {
2436 if (ql_alloc_tx_resources(qdev
, &qdev
->tx_ring
[i
]) != 0) {
2437 QPRINTK(qdev
, IFUP
, ERR
,
2438 "TX resource allocation failed.\n");
2445 ql_free_mem_resources(qdev
);
2449 /* Set up the rx ring control block and pass it to the chip.
2450 * The control block is defined as
2451 * "Completion Queue Initialization Control Block", or cqicb.
2453 static int ql_start_rx_ring(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
2455 struct cqicb
*cqicb
= &rx_ring
->cqicb
;
2456 void *shadow_reg
= qdev
->rx_ring_shadow_reg_area
+
2457 (rx_ring
->cq_id
* sizeof(u64
) * 4);
2458 u64 shadow_reg_dma
= qdev
->rx_ring_shadow_reg_dma
+
2459 (rx_ring
->cq_id
* sizeof(u64
) * 4);
2460 void __iomem
*doorbell_area
=
2461 qdev
->doorbell_area
+ (DB_PAGE_SIZE
* (128 + rx_ring
->cq_id
));
2465 /* Set up the shadow registers for this ring. */
2466 rx_ring
->prod_idx_sh_reg
= shadow_reg
;
2467 rx_ring
->prod_idx_sh_reg_dma
= shadow_reg_dma
;
2468 shadow_reg
+= sizeof(u64
);
2469 shadow_reg_dma
+= sizeof(u64
);
2470 rx_ring
->lbq_base_indirect
= shadow_reg
;
2471 rx_ring
->lbq_base_indirect_dma
= shadow_reg_dma
;
2472 shadow_reg
+= sizeof(u64
);
2473 shadow_reg_dma
+= sizeof(u64
);
2474 rx_ring
->sbq_base_indirect
= shadow_reg
;
2475 rx_ring
->sbq_base_indirect_dma
= shadow_reg_dma
;
2477 /* PCI doorbell mem area + 0x00 for consumer index register */
2478 rx_ring
->cnsmr_idx_db_reg
= (u32 __iomem
*) doorbell_area
;
2479 rx_ring
->cnsmr_idx
= 0;
2480 rx_ring
->curr_entry
= rx_ring
->cq_base
;
2482 /* PCI doorbell mem area + 0x04 for valid register */
2483 rx_ring
->valid_db_reg
= doorbell_area
+ 0x04;
2485 /* PCI doorbell mem area + 0x18 for large buffer consumer */
2486 rx_ring
->lbq_prod_idx_db_reg
= (u32 __iomem
*) (doorbell_area
+ 0x18);
2488 /* PCI doorbell mem area + 0x1c */
2489 rx_ring
->sbq_prod_idx_db_reg
= (u32 __iomem
*) (doorbell_area
+ 0x1c);
2491 memset((void *)cqicb
, 0, sizeof(struct cqicb
));
2492 cqicb
->msix_vect
= rx_ring
->irq
;
2494 bq_len
= (rx_ring
->cq_len
== 65536) ? 0 : (u16
) rx_ring
->cq_len
;
2495 cqicb
->len
= cpu_to_le16(bq_len
| LEN_V
| LEN_CPP_CONT
);
2497 cqicb
->addr_lo
= cpu_to_le32(rx_ring
->cq_base_dma
);
2498 cqicb
->addr_hi
= cpu_to_le32((u64
) rx_ring
->cq_base_dma
>> 32);
2500 cqicb
->prod_idx_addr_lo
= cpu_to_le32(rx_ring
->prod_idx_sh_reg_dma
);
2501 cqicb
->prod_idx_addr_hi
=
2502 cpu_to_le32((u64
) rx_ring
->prod_idx_sh_reg_dma
>> 32);
2505 * Set up the control block load flags.
2507 cqicb
->flags
= FLAGS_LC
| /* Load queue base address */
2508 FLAGS_LV
| /* Load MSI-X vector */
2509 FLAGS_LI
; /* Load irq delay values */
2510 if (rx_ring
->lbq_len
) {
2511 cqicb
->flags
|= FLAGS_LL
; /* Load lbq values */
2512 *((u64
*) rx_ring
->lbq_base_indirect
) = rx_ring
->lbq_base_dma
;
2513 cqicb
->lbq_addr_lo
=
2514 cpu_to_le32(rx_ring
->lbq_base_indirect_dma
);
2515 cqicb
->lbq_addr_hi
=
2516 cpu_to_le32((u64
) rx_ring
->lbq_base_indirect_dma
>> 32);
2517 bq_len
= (rx_ring
->lbq_buf_size
== 65536) ? 0 :
2518 (u16
) rx_ring
->lbq_buf_size
;
2519 cqicb
->lbq_buf_size
= cpu_to_le16(bq_len
);
2520 bq_len
= (rx_ring
->lbq_len
== 65536) ? 0 :
2521 (u16
) rx_ring
->lbq_len
;
2522 cqicb
->lbq_len
= cpu_to_le16(bq_len
);
2523 rx_ring
->lbq_prod_idx
= rx_ring
->lbq_len
- 16;
2524 rx_ring
->lbq_curr_idx
= 0;
2525 rx_ring
->lbq_clean_idx
= rx_ring
->lbq_prod_idx
;
2526 rx_ring
->lbq_free_cnt
= 16;
2528 if (rx_ring
->sbq_len
) {
2529 cqicb
->flags
|= FLAGS_LS
; /* Load sbq values */
2530 *((u64
*) rx_ring
->sbq_base_indirect
) = rx_ring
->sbq_base_dma
;
2531 cqicb
->sbq_addr_lo
=
2532 cpu_to_le32(rx_ring
->sbq_base_indirect_dma
);
2533 cqicb
->sbq_addr_hi
=
2534 cpu_to_le32((u64
) rx_ring
->sbq_base_indirect_dma
>> 32);
2535 cqicb
->sbq_buf_size
=
2536 cpu_to_le16(((rx_ring
->sbq_buf_size
/ 2) + 8) & 0xfffffff8);
2537 bq_len
= (rx_ring
->sbq_len
== 65536) ? 0 :
2538 (u16
) rx_ring
->sbq_len
;
2539 cqicb
->sbq_len
= cpu_to_le16(bq_len
);
2540 rx_ring
->sbq_prod_idx
= rx_ring
->sbq_len
- 16;
2541 rx_ring
->sbq_curr_idx
= 0;
2542 rx_ring
->sbq_clean_idx
= rx_ring
->sbq_prod_idx
;
2543 rx_ring
->sbq_free_cnt
= 16;
2545 switch (rx_ring
->type
) {
2547 /* If there's only one interrupt, then we use
2548 * worker threads to process the outbound
2549 * completion handling rx_rings. We do this so
2550 * they can be run on multiple CPUs. There is
2551 * room to play with this more where we would only
2552 * run in a worker if there are more than x number
2553 * of outbound completions on the queue and more
2554 * than one queue active. Some threshold that
2555 * would indicate a benefit in spite of the cost
2556 * of a context switch.
2557 * If there's more than one interrupt, then the
2558 * outbound completions are processed in the ISR.
2560 if (!test_bit(QL_MSIX_ENABLED
, &qdev
->flags
))
2561 INIT_DELAYED_WORK(&rx_ring
->rx_work
, ql_tx_clean
);
2563 /* With all debug warnings on we see a WARN_ON message
2564 * when we free the skb in the interrupt context.
2566 INIT_DELAYED_WORK(&rx_ring
->rx_work
, ql_tx_clean
);
2568 cqicb
->irq_delay
= cpu_to_le16(qdev
->tx_coalesce_usecs
);
2569 cqicb
->pkt_delay
= cpu_to_le16(qdev
->tx_max_coalesced_frames
);
2572 INIT_DELAYED_WORK(&rx_ring
->rx_work
, ql_rx_clean
);
2573 cqicb
->irq_delay
= 0;
2574 cqicb
->pkt_delay
= 0;
2577 /* Inbound completion handling rx_rings run in
2578 * separate NAPI contexts.
2580 netif_napi_add(qdev
->ndev
, &rx_ring
->napi
, ql_napi_poll_msix
,
2582 cqicb
->irq_delay
= cpu_to_le16(qdev
->rx_coalesce_usecs
);
2583 cqicb
->pkt_delay
= cpu_to_le16(qdev
->rx_max_coalesced_frames
);
2586 QPRINTK(qdev
, IFUP
, DEBUG
, "Invalid rx_ring->type = %d.\n",
2589 QPRINTK(qdev
, IFUP
, INFO
, "Initializing rx work queue.\n");
2590 err
= ql_write_cfg(qdev
, cqicb
, sizeof(struct cqicb
),
2591 CFG_LCQ
, rx_ring
->cq_id
);
2593 QPRINTK(qdev
, IFUP
, ERR
, "Failed to load CQICB.\n");
2596 QPRINTK(qdev
, IFUP
, INFO
, "Successfully loaded CQICB.\n");
2598 * Advance the producer index for the buffer queues.
2601 if (rx_ring
->lbq_len
)
2602 ql_write_db_reg(rx_ring
->lbq_prod_idx
,
2603 rx_ring
->lbq_prod_idx_db_reg
);
2604 if (rx_ring
->sbq_len
)
2605 ql_write_db_reg(rx_ring
->sbq_prod_idx
,
2606 rx_ring
->sbq_prod_idx_db_reg
);
2610 static int ql_start_tx_ring(struct ql_adapter
*qdev
, struct tx_ring
*tx_ring
)
2612 struct wqicb
*wqicb
= (struct wqicb
*)tx_ring
;
2613 void __iomem
*doorbell_area
=
2614 qdev
->doorbell_area
+ (DB_PAGE_SIZE
* tx_ring
->wq_id
);
2615 void *shadow_reg
= qdev
->tx_ring_shadow_reg_area
+
2616 (tx_ring
->wq_id
* sizeof(u64
));
2617 u64 shadow_reg_dma
= qdev
->tx_ring_shadow_reg_dma
+
2618 (tx_ring
->wq_id
* sizeof(u64
));
2622 * Assign doorbell registers for this tx_ring.
2624 /* TX PCI doorbell mem area for tx producer index */
2625 tx_ring
->prod_idx_db_reg
= (u32 __iomem
*) doorbell_area
;
2626 tx_ring
->prod_idx
= 0;
2627 /* TX PCI doorbell mem area + 0x04 */
2628 tx_ring
->valid_db_reg
= doorbell_area
+ 0x04;
2631 * Assign shadow registers for this tx_ring.
2633 tx_ring
->cnsmr_idx_sh_reg
= shadow_reg
;
2634 tx_ring
->cnsmr_idx_sh_reg_dma
= shadow_reg_dma
;
2636 wqicb
->len
= cpu_to_le16(tx_ring
->wq_len
| Q_LEN_V
| Q_LEN_CPP_CONT
);
2637 wqicb
->flags
= cpu_to_le16(Q_FLAGS_LC
|
2638 Q_FLAGS_LB
| Q_FLAGS_LI
| Q_FLAGS_LO
);
2639 wqicb
->cq_id_rss
= cpu_to_le16(tx_ring
->cq_id
);
2641 wqicb
->addr_lo
= cpu_to_le32(tx_ring
->wq_base_dma
);
2642 wqicb
->addr_hi
= cpu_to_le32((u64
) tx_ring
->wq_base_dma
>> 32);
2644 wqicb
->cnsmr_idx_addr_lo
= cpu_to_le32(tx_ring
->cnsmr_idx_sh_reg_dma
);
2645 wqicb
->cnsmr_idx_addr_hi
=
2646 cpu_to_le32((u64
) tx_ring
->cnsmr_idx_sh_reg_dma
>> 32);
2648 ql_init_tx_ring(qdev
, tx_ring
);
2650 err
= ql_write_cfg(qdev
, wqicb
, sizeof(wqicb
), CFG_LRQ
,
2651 (u16
) tx_ring
->wq_id
);
2653 QPRINTK(qdev
, IFUP
, ERR
, "Failed to load tx_ring.\n");
2656 QPRINTK(qdev
, IFUP
, INFO
, "Successfully loaded WQICB.\n");
2660 static void ql_disable_msix(struct ql_adapter
*qdev
)
2662 if (test_bit(QL_MSIX_ENABLED
, &qdev
->flags
)) {
2663 pci_disable_msix(qdev
->pdev
);
2664 clear_bit(QL_MSIX_ENABLED
, &qdev
->flags
);
2665 kfree(qdev
->msi_x_entry
);
2666 qdev
->msi_x_entry
= NULL
;
2667 } else if (test_bit(QL_MSI_ENABLED
, &qdev
->flags
)) {
2668 pci_disable_msi(qdev
->pdev
);
2669 clear_bit(QL_MSI_ENABLED
, &qdev
->flags
);
2673 static void ql_enable_msix(struct ql_adapter
*qdev
)
2677 qdev
->intr_count
= 1;
2678 /* Get the MSIX vectors. */
2679 if (irq_type
== MSIX_IRQ
) {
2680 /* Try to alloc space for the msix struct,
2681 * if it fails then go to MSI/legacy.
2683 qdev
->msi_x_entry
= kcalloc(qdev
->rx_ring_count
,
2684 sizeof(struct msix_entry
),
2686 if (!qdev
->msi_x_entry
) {
2691 for (i
= 0; i
< qdev
->rx_ring_count
; i
++)
2692 qdev
->msi_x_entry
[i
].entry
= i
;
2694 if (!pci_enable_msix
2695 (qdev
->pdev
, qdev
->msi_x_entry
, qdev
->rx_ring_count
)) {
2696 set_bit(QL_MSIX_ENABLED
, &qdev
->flags
);
2697 qdev
->intr_count
= qdev
->rx_ring_count
;
2698 QPRINTK(qdev
, IFUP
, INFO
,
2699 "MSI-X Enabled, got %d vectors.\n",
2703 kfree(qdev
->msi_x_entry
);
2704 qdev
->msi_x_entry
= NULL
;
2705 QPRINTK(qdev
, IFUP
, WARNING
,
2706 "MSI-X Enable failed, trying MSI.\n");
2711 if (irq_type
== MSI_IRQ
) {
2712 if (!pci_enable_msi(qdev
->pdev
)) {
2713 set_bit(QL_MSI_ENABLED
, &qdev
->flags
);
2714 QPRINTK(qdev
, IFUP
, INFO
,
2715 "Running with MSI interrupts.\n");
2720 QPRINTK(qdev
, IFUP
, DEBUG
, "Running with legacy interrupts.\n");
2724 * Here we build the intr_context structures based on
2725 * our rx_ring count and intr vector count.
2726 * The intr_context structure is used to hook each vector
2727 * to possibly different handlers.
2729 static void ql_resolve_queues_to_irqs(struct ql_adapter
*qdev
)
2732 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
2734 ql_enable_msix(qdev
);
2736 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
))) {
2737 /* Each rx_ring has it's
2738 * own intr_context since we have separate
2739 * vectors for each queue.
2740 * This only true when MSI-X is enabled.
2742 for (i
= 0; i
< qdev
->intr_count
; i
++, intr_context
++) {
2743 qdev
->rx_ring
[i
].irq
= i
;
2744 intr_context
->intr
= i
;
2745 intr_context
->qdev
= qdev
;
2747 * We set up each vectors enable/disable/read bits so
2748 * there's no bit/mask calculations in the critical path.
2750 intr_context
->intr_en_mask
=
2751 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
2752 INTR_EN_TYPE_ENABLE
| INTR_EN_IHD_MASK
| INTR_EN_IHD
2754 intr_context
->intr_dis_mask
=
2755 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
2756 INTR_EN_TYPE_DISABLE
| INTR_EN_IHD_MASK
|
2758 intr_context
->intr_read_mask
=
2759 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
2760 INTR_EN_TYPE_READ
| INTR_EN_IHD_MASK
| INTR_EN_IHD
|
2765 * Default queue handles bcast/mcast plus
2766 * async events. Needs buffers.
2768 intr_context
->handler
= qlge_isr
;
2769 sprintf(intr_context
->name
, "%s-default-queue",
2771 } else if (i
< qdev
->rss_ring_first_cq_id
) {
2773 * Outbound queue is for outbound completions only.
2775 intr_context
->handler
= qlge_msix_tx_isr
;
2776 sprintf(intr_context
->name
, "%s-txq-%d",
2777 qdev
->ndev
->name
, i
);
2780 * Inbound queues handle unicast frames only.
2782 intr_context
->handler
= qlge_msix_rx_isr
;
2783 sprintf(intr_context
->name
, "%s-rxq-%d",
2784 qdev
->ndev
->name
, i
);
2789 * All rx_rings use the same intr_context since
2790 * there is only one vector.
2792 intr_context
->intr
= 0;
2793 intr_context
->qdev
= qdev
;
2795 * We set up each vectors enable/disable/read bits so
2796 * there's no bit/mask calculations in the critical path.
2798 intr_context
->intr_en_mask
=
2799 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
| INTR_EN_TYPE_ENABLE
;
2800 intr_context
->intr_dis_mask
=
2801 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
2802 INTR_EN_TYPE_DISABLE
;
2803 intr_context
->intr_read_mask
=
2804 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
| INTR_EN_TYPE_READ
;
2806 * Single interrupt means one handler for all rings.
2808 intr_context
->handler
= qlge_isr
;
2809 sprintf(intr_context
->name
, "%s-single_irq", qdev
->ndev
->name
);
2810 for (i
= 0; i
< qdev
->rx_ring_count
; i
++)
2811 qdev
->rx_ring
[i
].irq
= 0;
2815 static void ql_free_irq(struct ql_adapter
*qdev
)
2818 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
2820 for (i
= 0; i
< qdev
->intr_count
; i
++, intr_context
++) {
2821 if (intr_context
->hooked
) {
2822 if (test_bit(QL_MSIX_ENABLED
, &qdev
->flags
)) {
2823 free_irq(qdev
->msi_x_entry
[i
].vector
,
2825 QPRINTK(qdev
, IFDOWN
, ERR
,
2826 "freeing msix interrupt %d.\n", i
);
2828 free_irq(qdev
->pdev
->irq
, &qdev
->rx_ring
[0]);
2829 QPRINTK(qdev
, IFDOWN
, ERR
,
2830 "freeing msi interrupt %d.\n", i
);
2834 ql_disable_msix(qdev
);
2837 static int ql_request_irq(struct ql_adapter
*qdev
)
2841 struct pci_dev
*pdev
= qdev
->pdev
;
2842 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
2844 ql_resolve_queues_to_irqs(qdev
);
2846 for (i
= 0; i
< qdev
->intr_count
; i
++, intr_context
++) {
2847 atomic_set(&intr_context
->irq_cnt
, 0);
2848 if (test_bit(QL_MSIX_ENABLED
, &qdev
->flags
)) {
2849 status
= request_irq(qdev
->msi_x_entry
[i
].vector
,
2850 intr_context
->handler
,
2855 QPRINTK(qdev
, IFUP
, ERR
,
2856 "Failed request for MSIX interrupt %d.\n",
2860 QPRINTK(qdev
, IFUP
, INFO
,
2861 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
2863 qdev
->rx_ring
[i
].type
==
2864 DEFAULT_Q
? "DEFAULT_Q" : "",
2865 qdev
->rx_ring
[i
].type
==
2867 qdev
->rx_ring
[i
].type
==
2868 RX_Q
? "RX_Q" : "", intr_context
->name
);
2871 QPRINTK(qdev
, IFUP
, DEBUG
,
2872 "trying msi or legacy interrupts.\n");
2873 QPRINTK(qdev
, IFUP
, DEBUG
,
2874 "%s: irq = %d.\n", __func__
, pdev
->irq
);
2875 QPRINTK(qdev
, IFUP
, DEBUG
,
2876 "%s: context->name = %s.\n", __func__
,
2877 intr_context
->name
);
2878 QPRINTK(qdev
, IFUP
, DEBUG
,
2879 "%s: dev_id = 0x%p.\n", __func__
,
2882 request_irq(pdev
->irq
, qlge_isr
,
2883 test_bit(QL_MSI_ENABLED
,
2885 flags
) ? 0 : IRQF_SHARED
,
2886 intr_context
->name
, &qdev
->rx_ring
[0]);
2890 QPRINTK(qdev
, IFUP
, ERR
,
2891 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
2893 qdev
->rx_ring
[0].type
==
2894 DEFAULT_Q
? "DEFAULT_Q" : "",
2895 qdev
->rx_ring
[0].type
== TX_Q
? "TX_Q" : "",
2896 qdev
->rx_ring
[0].type
== RX_Q
? "RX_Q" : "",
2897 intr_context
->name
);
2899 intr_context
->hooked
= 1;
2903 QPRINTK(qdev
, IFUP
, ERR
, "Failed to get the interrupts!!!/n");
2908 static int ql_start_rss(struct ql_adapter
*qdev
)
2910 struct ricb
*ricb
= &qdev
->ricb
;
2913 u8
*hash_id
= (u8
*) ricb
->hash_cq_id
;
2915 memset((void *)ricb
, 0, sizeof(ricb
));
2917 ricb
->base_cq
= qdev
->rss_ring_first_cq_id
| RSS_L4K
;
2919 (RSS_L6K
| RSS_LI
| RSS_LB
| RSS_LM
| RSS_RI4
| RSS_RI6
| RSS_RT4
|
2921 ricb
->mask
= cpu_to_le16(qdev
->rss_ring_count
- 1);
2924 * Fill out the Indirection Table.
2926 for (i
= 0; i
< 32; i
++)
2930 * Random values for the IPv6 and IPv4 Hash Keys.
2932 get_random_bytes((void *)&ricb
->ipv6_hash_key
[0], 40);
2933 get_random_bytes((void *)&ricb
->ipv4_hash_key
[0], 16);
2935 QPRINTK(qdev
, IFUP
, INFO
, "Initializing RSS.\n");
2937 status
= ql_write_cfg(qdev
, ricb
, sizeof(ricb
), CFG_LR
, 0);
2939 QPRINTK(qdev
, IFUP
, ERR
, "Failed to load RICB.\n");
2942 QPRINTK(qdev
, IFUP
, INFO
, "Successfully loaded RICB.\n");
2946 /* Initialize the frame-to-queue routing. */
2947 static int ql_route_initialize(struct ql_adapter
*qdev
)
2952 /* Clear all the entries in the routing table. */
2953 for (i
= 0; i
< 16; i
++) {
2954 status
= ql_set_routing_reg(qdev
, i
, 0, 0);
2956 QPRINTK(qdev
, IFUP
, ERR
,
2957 "Failed to init routing register for CAM packets.\n");
2962 status
= ql_set_routing_reg(qdev
, RT_IDX_ALL_ERR_SLOT
, RT_IDX_ERR
, 1);
2964 QPRINTK(qdev
, IFUP
, ERR
,
2965 "Failed to init routing register for error packets.\n");
2968 status
= ql_set_routing_reg(qdev
, RT_IDX_BCAST_SLOT
, RT_IDX_BCAST
, 1);
2970 QPRINTK(qdev
, IFUP
, ERR
,
2971 "Failed to init routing register for broadcast packets.\n");
2974 /* If we have more than one inbound queue, then turn on RSS in the
2977 if (qdev
->rss_ring_count
> 1) {
2978 status
= ql_set_routing_reg(qdev
, RT_IDX_RSS_MATCH_SLOT
,
2979 RT_IDX_RSS_MATCH
, 1);
2981 QPRINTK(qdev
, IFUP
, ERR
,
2982 "Failed to init routing register for MATCH RSS packets.\n");
2987 status
= ql_set_routing_reg(qdev
, RT_IDX_CAM_HIT_SLOT
,
2990 QPRINTK(qdev
, IFUP
, ERR
,
2991 "Failed to init routing register for CAM packets.\n");
2997 static int ql_adapter_initialize(struct ql_adapter
*qdev
)
3004 * Set up the System register to halt on errors.
3006 value
= SYS_EFE
| SYS_FAE
;
3008 ql_write32(qdev
, SYS
, mask
| value
);
3010 /* Set the default queue. */
3011 value
= NIC_RCV_CFG_DFQ
;
3012 mask
= NIC_RCV_CFG_DFQ_MASK
;
3013 ql_write32(qdev
, NIC_RCV_CFG
, (mask
| value
));
3015 /* Set the MPI interrupt to enabled. */
3016 ql_write32(qdev
, INTR_MASK
, (INTR_MASK_PI
<< 16) | INTR_MASK_PI
);
3018 /* Enable the function, set pagesize, enable error checking. */
3019 value
= FSC_FE
| FSC_EPC_INBOUND
| FSC_EPC_OUTBOUND
|
3020 FSC_EC
| FSC_VM_PAGE_4K
| FSC_SH
;
3022 /* Set/clear header splitting. */
3023 mask
= FSC_VM_PAGESIZE_MASK
|
3024 FSC_DBL_MASK
| FSC_DBRST_MASK
| (value
<< 16);
3025 ql_write32(qdev
, FSC
, mask
| value
);
3027 ql_write32(qdev
, SPLT_HDR
, SPLT_HDR_EP
|
3028 min(SMALL_BUFFER_SIZE
, MAX_SPLIT_SIZE
));
3030 /* Start up the rx queues. */
3031 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
3032 status
= ql_start_rx_ring(qdev
, &qdev
->rx_ring
[i
]);
3034 QPRINTK(qdev
, IFUP
, ERR
,
3035 "Failed to start rx ring[%d].\n", i
);
3040 /* If there is more than one inbound completion queue
3041 * then download a RICB to configure RSS.
3043 if (qdev
->rss_ring_count
> 1) {
3044 status
= ql_start_rss(qdev
);
3046 QPRINTK(qdev
, IFUP
, ERR
, "Failed to start RSS.\n");
3051 /* Start up the tx queues. */
3052 for (i
= 0; i
< qdev
->tx_ring_count
; i
++) {
3053 status
= ql_start_tx_ring(qdev
, &qdev
->tx_ring
[i
]);
3055 QPRINTK(qdev
, IFUP
, ERR
,
3056 "Failed to start tx ring[%d].\n", i
);
3061 status
= ql_port_initialize(qdev
);
3063 QPRINTK(qdev
, IFUP
, ERR
, "Failed to start port.\n");
3067 status
= ql_set_mac_addr_reg(qdev
, (u8
*) qdev
->ndev
->perm_addr
,
3068 MAC_ADDR_TYPE_CAM_MAC
, qdev
->func
);
3070 QPRINTK(qdev
, IFUP
, ERR
, "Failed to init mac address.\n");
3074 status
= ql_route_initialize(qdev
);
3076 QPRINTK(qdev
, IFUP
, ERR
, "Failed to init routing table.\n");
3080 /* Start NAPI for the RSS queues. */
3081 for (i
= qdev
->rss_ring_first_cq_id
; i
< qdev
->rx_ring_count
; i
++) {
3082 QPRINTK(qdev
, IFUP
, INFO
, "Enabling NAPI for rx_ring[%d].\n",
3084 napi_enable(&qdev
->rx_ring
[i
].napi
);
3090 /* Issue soft reset to chip. */
3091 static int ql_adapter_reset(struct ql_adapter
*qdev
)
3098 #define MAX_RESET_CNT 1
3101 QPRINTK(qdev
, IFDOWN
, DEBUG
, "Issue soft reset to chip.\n");
3102 ql_write32(qdev
, RST_FO
, (RST_FO_FR
<< 16) | RST_FO_FR
);
3103 /* Wait for reset to complete. */
3105 QPRINTK(qdev
, IFDOWN
, DEBUG
, "Wait %d seconds for reset to complete.\n",
3108 value
= ql_read32(qdev
, RST_FO
);
3109 if ((value
& RST_FO_FR
) == 0)
3113 } while ((--max_wait_time
));
3114 if (value
& RST_FO_FR
) {
3115 QPRINTK(qdev
, IFDOWN
, ERR
,
3116 "Stuck in SoftReset: FSC_SR:0x%08x\n", value
);
3117 if (resetCnt
< MAX_RESET_CNT
)
3120 if (max_wait_time
== 0) {
3121 status
= -ETIMEDOUT
;
3122 QPRINTK(qdev
, IFDOWN
, ERR
,
3123 "ETIMEOUT!!! errored out of resetting the chip!\n");
3129 static void ql_display_dev_info(struct net_device
*ndev
)
3131 struct ql_adapter
*qdev
= (struct ql_adapter
*)netdev_priv(ndev
);
3133 QPRINTK(qdev
, PROBE
, INFO
,
3134 "Function #%d, NIC Roll %d, NIC Rev = %d, "
3135 "XG Roll = %d, XG Rev = %d.\n",
3137 qdev
->chip_rev_id
& 0x0000000f,
3138 qdev
->chip_rev_id
>> 4 & 0x0000000f,
3139 qdev
->chip_rev_id
>> 8 & 0x0000000f,
3140 qdev
->chip_rev_id
>> 12 & 0x0000000f);
3141 QPRINTK(qdev
, PROBE
, INFO
, "MAC address %pM\n", ndev
->dev_addr
);
3144 static int ql_adapter_down(struct ql_adapter
*qdev
)
3146 struct net_device
*ndev
= qdev
->ndev
;
3148 struct rx_ring
*rx_ring
;
3150 netif_stop_queue(ndev
);
3151 netif_carrier_off(ndev
);
3153 cancel_delayed_work_sync(&qdev
->asic_reset_work
);
3154 cancel_delayed_work_sync(&qdev
->mpi_reset_work
);
3155 cancel_delayed_work_sync(&qdev
->mpi_work
);
3157 /* The default queue at index 0 is always processed in
3160 cancel_delayed_work_sync(&qdev
->rx_ring
[0].rx_work
);
3162 /* The rest of the rx_rings are processed in
3163 * a workqueue only if it's a single interrupt
3164 * environment (MSI/Legacy).
3166 for (i
= 1; i
< qdev
->rx_ring_count
; i
++) {
3167 rx_ring
= &qdev
->rx_ring
[i
];
3168 /* Only the RSS rings use NAPI on multi irq
3169 * environment. Outbound completion processing
3170 * is done in interrupt context.
3172 if (i
>= qdev
->rss_ring_first_cq_id
) {
3173 napi_disable(&rx_ring
->napi
);
3175 cancel_delayed_work_sync(&rx_ring
->rx_work
);
3179 clear_bit(QL_ADAPTER_UP
, &qdev
->flags
);
3181 ql_disable_interrupts(qdev
);
3183 ql_tx_ring_clean(qdev
);
3185 spin_lock(&qdev
->hw_lock
);
3186 status
= ql_adapter_reset(qdev
);
3188 QPRINTK(qdev
, IFDOWN
, ERR
, "reset(func #%d) FAILED!\n",
3190 spin_unlock(&qdev
->hw_lock
);
3194 static int ql_adapter_up(struct ql_adapter
*qdev
)
3198 spin_lock(&qdev
->hw_lock
);
3199 err
= ql_adapter_initialize(qdev
);
3201 QPRINTK(qdev
, IFUP
, INFO
, "Unable to initialize adapter.\n");
3202 spin_unlock(&qdev
->hw_lock
);
3205 spin_unlock(&qdev
->hw_lock
);
3206 set_bit(QL_ADAPTER_UP
, &qdev
->flags
);
3207 ql_enable_interrupts(qdev
);
3208 ql_enable_all_completion_interrupts(qdev
);
3209 if ((ql_read32(qdev
, STS
) & qdev
->port_init
)) {
3210 netif_carrier_on(qdev
->ndev
);
3211 netif_start_queue(qdev
->ndev
);
3216 ql_adapter_reset(qdev
);
3220 static int ql_cycle_adapter(struct ql_adapter
*qdev
)
3224 status
= ql_adapter_down(qdev
);
3228 status
= ql_adapter_up(qdev
);
3234 QPRINTK(qdev
, IFUP
, ALERT
,
3235 "Driver up/down cycle failed, closing device\n");
3237 dev_close(qdev
->ndev
);
3242 static void ql_release_adapter_resources(struct ql_adapter
*qdev
)
3244 ql_free_mem_resources(qdev
);
3248 static int ql_get_adapter_resources(struct ql_adapter
*qdev
)
3252 if (ql_alloc_mem_resources(qdev
)) {
3253 QPRINTK(qdev
, IFUP
, ERR
, "Unable to allocate memory.\n");
3256 status
= ql_request_irq(qdev
);
3261 ql_free_mem_resources(qdev
);
3265 static int qlge_close(struct net_device
*ndev
)
3267 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3270 * Wait for device to recover from a reset.
3271 * (Rarely happens, but possible.)
3273 while (!test_bit(QL_ADAPTER_UP
, &qdev
->flags
))
3275 ql_adapter_down(qdev
);
3276 ql_release_adapter_resources(qdev
);
3277 ql_free_ring_cb(qdev
);
3281 static int ql_configure_rings(struct ql_adapter
*qdev
)
3284 struct rx_ring
*rx_ring
;
3285 struct tx_ring
*tx_ring
;
3286 int cpu_cnt
= num_online_cpus();
3289 * For each processor present we allocate one
3290 * rx_ring for outbound completions, and one
3291 * rx_ring for inbound completions. Plus there is
3292 * always the one default queue. For the CPU
3293 * counts we end up with the following rx_rings:
3295 * one default queue +
3296 * (CPU count * outbound completion rx_ring) +
3297 * (CPU count * inbound (RSS) completion rx_ring)
3298 * To keep it simple we limit the total number of
3299 * queues to < 32, so we truncate CPU to 8.
3300 * This limitation can be removed when requested.
3307 * rx_ring[0] is always the default queue.
3309 /* Allocate outbound completion ring for each CPU. */
3310 qdev
->tx_ring_count
= cpu_cnt
;
3311 /* Allocate inbound completion (RSS) ring for each CPU. */
3312 qdev
->rss_ring_count
= cpu_cnt
;
3313 /* cq_id for the first inbound ring handler. */
3314 qdev
->rss_ring_first_cq_id
= cpu_cnt
+ 1;
3316 * qdev->rx_ring_count:
3317 * Total number of rx_rings. This includes the one
3318 * default queue, a number of outbound completion
3319 * handler rx_rings, and the number of inbound
3320 * completion handler rx_rings.
3322 qdev
->rx_ring_count
= qdev
->tx_ring_count
+ qdev
->rss_ring_count
+ 1;
3324 if (ql_alloc_ring_cb(qdev
))
3327 for (i
= 0; i
< qdev
->tx_ring_count
; i
++) {
3328 tx_ring
= &qdev
->tx_ring
[i
];
3329 memset((void *)tx_ring
, 0, sizeof(tx_ring
));
3330 tx_ring
->qdev
= qdev
;
3332 tx_ring
->wq_len
= qdev
->tx_ring_size
;
3334 tx_ring
->wq_len
* sizeof(struct ob_mac_iocb_req
);
3337 * The completion queue ID for the tx rings start
3338 * immediately after the default Q ID, which is zero.
3340 tx_ring
->cq_id
= i
+ 1;
3343 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
3344 rx_ring
= &qdev
->rx_ring
[i
];
3345 memset((void *)rx_ring
, 0, sizeof(rx_ring
));
3346 rx_ring
->qdev
= qdev
;
3348 rx_ring
->cpu
= i
% cpu_cnt
; /* CPU to run handler on. */
3349 if (i
== 0) { /* Default queue at index 0. */
3351 * Default queue handles bcast/mcast plus
3352 * async events. Needs buffers.
3354 rx_ring
->cq_len
= qdev
->rx_ring_size
;
3356 rx_ring
->cq_len
* sizeof(struct ql_net_rsp_iocb
);
3357 rx_ring
->lbq_len
= NUM_LARGE_BUFFERS
;
3359 rx_ring
->lbq_len
* sizeof(struct bq_element
);
3360 rx_ring
->lbq_buf_size
= LARGE_BUFFER_SIZE
;
3361 rx_ring
->sbq_len
= NUM_SMALL_BUFFERS
;
3363 rx_ring
->sbq_len
* sizeof(struct bq_element
);
3364 rx_ring
->sbq_buf_size
= SMALL_BUFFER_SIZE
* 2;
3365 rx_ring
->type
= DEFAULT_Q
;
3366 } else if (i
< qdev
->rss_ring_first_cq_id
) {
3368 * Outbound queue handles outbound completions only.
3370 /* outbound cq is same size as tx_ring it services. */
3371 rx_ring
->cq_len
= qdev
->tx_ring_size
;
3373 rx_ring
->cq_len
* sizeof(struct ql_net_rsp_iocb
);
3374 rx_ring
->lbq_len
= 0;
3375 rx_ring
->lbq_size
= 0;
3376 rx_ring
->lbq_buf_size
= 0;
3377 rx_ring
->sbq_len
= 0;
3378 rx_ring
->sbq_size
= 0;
3379 rx_ring
->sbq_buf_size
= 0;
3380 rx_ring
->type
= TX_Q
;
3381 } else { /* Inbound completions (RSS) queues */
3383 * Inbound queues handle unicast frames only.
3385 rx_ring
->cq_len
= qdev
->rx_ring_size
;
3387 rx_ring
->cq_len
* sizeof(struct ql_net_rsp_iocb
);
3388 rx_ring
->lbq_len
= NUM_LARGE_BUFFERS
;
3390 rx_ring
->lbq_len
* sizeof(struct bq_element
);
3391 rx_ring
->lbq_buf_size
= LARGE_BUFFER_SIZE
;
3392 rx_ring
->sbq_len
= NUM_SMALL_BUFFERS
;
3394 rx_ring
->sbq_len
* sizeof(struct bq_element
);
3395 rx_ring
->sbq_buf_size
= SMALL_BUFFER_SIZE
* 2;
3396 rx_ring
->type
= RX_Q
;
3402 static int qlge_open(struct net_device
*ndev
)
3405 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3407 err
= ql_configure_rings(qdev
);
3411 err
= ql_get_adapter_resources(qdev
);
3415 err
= ql_adapter_up(qdev
);
3422 ql_release_adapter_resources(qdev
);
3423 ql_free_ring_cb(qdev
);
3427 static int qlge_change_mtu(struct net_device
*ndev
, int new_mtu
)
3429 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3431 if (ndev
->mtu
== 1500 && new_mtu
== 9000) {
3432 QPRINTK(qdev
, IFUP
, ERR
, "Changing to jumbo MTU.\n");
3433 } else if (ndev
->mtu
== 9000 && new_mtu
== 1500) {
3434 QPRINTK(qdev
, IFUP
, ERR
, "Changing to normal MTU.\n");
3435 } else if ((ndev
->mtu
== 1500 && new_mtu
== 1500) ||
3436 (ndev
->mtu
== 9000 && new_mtu
== 9000)) {
3440 ndev
->mtu
= new_mtu
;
3444 static struct net_device_stats
*qlge_get_stats(struct net_device
3447 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3448 return &qdev
->stats
;
3451 static void qlge_set_multicast_list(struct net_device
*ndev
)
3453 struct ql_adapter
*qdev
= (struct ql_adapter
*)netdev_priv(ndev
);
3454 struct dev_mc_list
*mc_ptr
;
3457 spin_lock(&qdev
->hw_lock
);
3459 * Set or clear promiscuous mode if a
3460 * transition is taking place.
3462 if (ndev
->flags
& IFF_PROMISC
) {
3463 if (!test_bit(QL_PROMISCUOUS
, &qdev
->flags
)) {
3464 if (ql_set_routing_reg
3465 (qdev
, RT_IDX_PROMISCUOUS_SLOT
, RT_IDX_VALID
, 1)) {
3466 QPRINTK(qdev
, HW
, ERR
,
3467 "Failed to set promiscous mode.\n");
3469 set_bit(QL_PROMISCUOUS
, &qdev
->flags
);
3473 if (test_bit(QL_PROMISCUOUS
, &qdev
->flags
)) {
3474 if (ql_set_routing_reg
3475 (qdev
, RT_IDX_PROMISCUOUS_SLOT
, RT_IDX_VALID
, 0)) {
3476 QPRINTK(qdev
, HW
, ERR
,
3477 "Failed to clear promiscous mode.\n");
3479 clear_bit(QL_PROMISCUOUS
, &qdev
->flags
);
3485 * Set or clear all multicast mode if a
3486 * transition is taking place.
3488 if ((ndev
->flags
& IFF_ALLMULTI
) ||
3489 (ndev
->mc_count
> MAX_MULTICAST_ENTRIES
)) {
3490 if (!test_bit(QL_ALLMULTI
, &qdev
->flags
)) {
3491 if (ql_set_routing_reg
3492 (qdev
, RT_IDX_ALLMULTI_SLOT
, RT_IDX_MCAST
, 1)) {
3493 QPRINTK(qdev
, HW
, ERR
,
3494 "Failed to set all-multi mode.\n");
3496 set_bit(QL_ALLMULTI
, &qdev
->flags
);
3500 if (test_bit(QL_ALLMULTI
, &qdev
->flags
)) {
3501 if (ql_set_routing_reg
3502 (qdev
, RT_IDX_ALLMULTI_SLOT
, RT_IDX_MCAST
, 0)) {
3503 QPRINTK(qdev
, HW
, ERR
,
3504 "Failed to clear all-multi mode.\n");
3506 clear_bit(QL_ALLMULTI
, &qdev
->flags
);
3511 if (ndev
->mc_count
) {
3512 for (i
= 0, mc_ptr
= ndev
->mc_list
; mc_ptr
;
3513 i
++, mc_ptr
= mc_ptr
->next
)
3514 if (ql_set_mac_addr_reg(qdev
, (u8
*) mc_ptr
->dmi_addr
,
3515 MAC_ADDR_TYPE_MULTI_MAC
, i
)) {
3516 QPRINTK(qdev
, HW
, ERR
,
3517 "Failed to loadmulticast address.\n");
3520 if (ql_set_routing_reg
3521 (qdev
, RT_IDX_MCAST_MATCH_SLOT
, RT_IDX_MCAST_MATCH
, 1)) {
3522 QPRINTK(qdev
, HW
, ERR
,
3523 "Failed to set multicast match mode.\n");
3525 set_bit(QL_ALLMULTI
, &qdev
->flags
);
3529 spin_unlock(&qdev
->hw_lock
);
3532 static int qlge_set_mac_address(struct net_device
*ndev
, void *p
)
3534 struct ql_adapter
*qdev
= (struct ql_adapter
*)netdev_priv(ndev
);
3535 struct sockaddr
*addr
= p
;
3538 if (netif_running(ndev
))
3541 if (!is_valid_ether_addr(addr
->sa_data
))
3542 return -EADDRNOTAVAIL
;
3543 memcpy(ndev
->dev_addr
, addr
->sa_data
, ndev
->addr_len
);
3545 spin_lock(&qdev
->hw_lock
);
3546 if (ql_set_mac_addr_reg(qdev
, (u8
*) ndev
->dev_addr
,
3547 MAC_ADDR_TYPE_CAM_MAC
, qdev
->func
)) {/* Unicast */
3548 QPRINTK(qdev
, HW
, ERR
, "Failed to load MAC address.\n");
3551 spin_unlock(&qdev
->hw_lock
);
3556 static void qlge_tx_timeout(struct net_device
*ndev
)
3558 struct ql_adapter
*qdev
= (struct ql_adapter
*)netdev_priv(ndev
);
3559 queue_delayed_work(qdev
->workqueue
, &qdev
->asic_reset_work
, 0);
3562 static void ql_asic_reset_work(struct work_struct
*work
)
3564 struct ql_adapter
*qdev
=
3565 container_of(work
, struct ql_adapter
, asic_reset_work
.work
);
3566 ql_cycle_adapter(qdev
);
3569 static void ql_get_board_info(struct ql_adapter
*qdev
)
3572 (ql_read32(qdev
, STS
) & STS_FUNC_ID_MASK
) >> STS_FUNC_ID_SHIFT
;
3574 qdev
->xg_sem_mask
= SEM_XGMAC1_MASK
;
3575 qdev
->port_link_up
= STS_PL1
;
3576 qdev
->port_init
= STS_PI1
;
3577 qdev
->mailbox_in
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC2_MBI
;
3578 qdev
->mailbox_out
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC2_MBO
;
3580 qdev
->xg_sem_mask
= SEM_XGMAC0_MASK
;
3581 qdev
->port_link_up
= STS_PL0
;
3582 qdev
->port_init
= STS_PI0
;
3583 qdev
->mailbox_in
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC0_MBI
;
3584 qdev
->mailbox_out
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC0_MBO
;
3586 qdev
->chip_rev_id
= ql_read32(qdev
, REV_ID
);
3589 static void ql_release_all(struct pci_dev
*pdev
)
3591 struct net_device
*ndev
= pci_get_drvdata(pdev
);
3592 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3594 if (qdev
->workqueue
) {
3595 destroy_workqueue(qdev
->workqueue
);
3596 qdev
->workqueue
= NULL
;
3598 if (qdev
->q_workqueue
) {
3599 destroy_workqueue(qdev
->q_workqueue
);
3600 qdev
->q_workqueue
= NULL
;
3603 iounmap(qdev
->reg_base
);
3604 if (qdev
->doorbell_area
)
3605 iounmap(qdev
->doorbell_area
);
3606 pci_release_regions(pdev
);
3607 pci_set_drvdata(pdev
, NULL
);
3610 static int __devinit
ql_init_device(struct pci_dev
*pdev
,
3611 struct net_device
*ndev
, int cards_found
)
3613 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3617 memset((void *)qdev
, 0, sizeof(qdev
));
3618 err
= pci_enable_device(pdev
);
3620 dev_err(&pdev
->dev
, "PCI device enable failed.\n");
3624 pos
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
3626 dev_err(&pdev
->dev
, PFX
"Cannot find PCI Express capability, "
3630 pci_read_config_word(pdev
, pos
+ PCI_EXP_DEVCTL
, &val16
);
3631 val16
&= ~PCI_EXP_DEVCTL_NOSNOOP_EN
;
3632 val16
|= (PCI_EXP_DEVCTL_CERE
|
3633 PCI_EXP_DEVCTL_NFERE
|
3634 PCI_EXP_DEVCTL_FERE
| PCI_EXP_DEVCTL_URRE
);
3635 pci_write_config_word(pdev
, pos
+ PCI_EXP_DEVCTL
, val16
);
3638 err
= pci_request_regions(pdev
, DRV_NAME
);
3640 dev_err(&pdev
->dev
, "PCI region request failed.\n");
3644 pci_set_master(pdev
);
3645 if (!pci_set_dma_mask(pdev
, DMA_64BIT_MASK
)) {
3646 set_bit(QL_DMA64
, &qdev
->flags
);
3647 err
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
3649 err
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
3651 err
= pci_set_consistent_dma_mask(pdev
, DMA_32BIT_MASK
);
3655 dev_err(&pdev
->dev
, "No usable DMA configuration.\n");
3659 pci_set_drvdata(pdev
, ndev
);
3661 ioremap_nocache(pci_resource_start(pdev
, 1),
3662 pci_resource_len(pdev
, 1));
3663 if (!qdev
->reg_base
) {
3664 dev_err(&pdev
->dev
, "Register mapping failed.\n");
3669 qdev
->doorbell_area_size
= pci_resource_len(pdev
, 3);
3670 qdev
->doorbell_area
=
3671 ioremap_nocache(pci_resource_start(pdev
, 3),
3672 pci_resource_len(pdev
, 3));
3673 if (!qdev
->doorbell_area
) {
3674 dev_err(&pdev
->dev
, "Doorbell register mapping failed.\n");
3679 ql_get_board_info(qdev
);
3682 qdev
->msg_enable
= netif_msg_init(debug
, default_msg
);
3683 spin_lock_init(&qdev
->hw_lock
);
3684 spin_lock_init(&qdev
->stats_lock
);
3686 /* make sure the EEPROM is good */
3687 err
= ql_get_flash_params(qdev
);
3689 dev_err(&pdev
->dev
, "Invalid FLASH.\n");
3693 if (!is_valid_ether_addr(qdev
->flash
.mac_addr
))
3696 memcpy(ndev
->dev_addr
, qdev
->flash
.mac_addr
, ndev
->addr_len
);
3697 memcpy(ndev
->perm_addr
, ndev
->dev_addr
, ndev
->addr_len
);
3699 /* Set up the default ring sizes. */
3700 qdev
->tx_ring_size
= NUM_TX_RING_ENTRIES
;
3701 qdev
->rx_ring_size
= NUM_RX_RING_ENTRIES
;
3703 /* Set up the coalescing parameters. */
3704 qdev
->rx_coalesce_usecs
= DFLT_COALESCE_WAIT
;
3705 qdev
->tx_coalesce_usecs
= DFLT_COALESCE_WAIT
;
3706 qdev
->rx_max_coalesced_frames
= DFLT_INTER_FRAME_WAIT
;
3707 qdev
->tx_max_coalesced_frames
= DFLT_INTER_FRAME_WAIT
;
3710 * Set up the operating parameters.
3714 qdev
->q_workqueue
= create_workqueue(ndev
->name
);
3715 qdev
->workqueue
= create_singlethread_workqueue(ndev
->name
);
3716 INIT_DELAYED_WORK(&qdev
->asic_reset_work
, ql_asic_reset_work
);
3717 INIT_DELAYED_WORK(&qdev
->mpi_reset_work
, ql_mpi_reset_work
);
3718 INIT_DELAYED_WORK(&qdev
->mpi_work
, ql_mpi_work
);
3721 dev_info(&pdev
->dev
, "%s\n", DRV_STRING
);
3722 dev_info(&pdev
->dev
, "Driver name: %s, Version: %s.\n",
3723 DRV_NAME
, DRV_VERSION
);
3727 ql_release_all(pdev
);
3728 pci_disable_device(pdev
);
3733 static const struct net_device_ops qlge_netdev_ops
= {
3734 .ndo_open
= qlge_open
,
3735 .ndo_stop
= qlge_close
,
3736 .ndo_start_xmit
= qlge_send
,
3737 .ndo_change_mtu
= qlge_change_mtu
,
3738 .ndo_get_stats
= qlge_get_stats
,
3739 .ndo_set_multicast_list
= qlge_set_multicast_list
,
3740 .ndo_set_mac_address
= qlge_set_mac_address
,
3741 .ndo_validate_addr
= eth_validate_addr
,
3742 .ndo_tx_timeout
= qlge_tx_timeout
,
3743 .ndo_vlan_rx_register
= ql_vlan_rx_register
,
3744 .ndo_vlan_rx_add_vid
= ql_vlan_rx_add_vid
,
3745 .ndo_vlan_rx_kill_vid
= ql_vlan_rx_kill_vid
,
3748 static int __devinit
qlge_probe(struct pci_dev
*pdev
,
3749 const struct pci_device_id
*pci_entry
)
3751 struct net_device
*ndev
= NULL
;
3752 struct ql_adapter
*qdev
= NULL
;
3753 static int cards_found
= 0;
3756 ndev
= alloc_etherdev(sizeof(struct ql_adapter
));
3760 err
= ql_init_device(pdev
, ndev
, cards_found
);
3766 qdev
= netdev_priv(ndev
);
3767 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
3774 | NETIF_F_HW_VLAN_TX
3775 | NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_FILTER
);
3777 if (test_bit(QL_DMA64
, &qdev
->flags
))
3778 ndev
->features
|= NETIF_F_HIGHDMA
;
3781 * Set up net_device structure.
3783 ndev
->tx_queue_len
= qdev
->tx_ring_size
;
3784 ndev
->irq
= pdev
->irq
;
3786 ndev
->netdev_ops
= &qlge_netdev_ops
;
3787 SET_ETHTOOL_OPS(ndev
, &qlge_ethtool_ops
);
3788 ndev
->watchdog_timeo
= 10 * HZ
;
3790 err
= register_netdev(ndev
);
3792 dev_err(&pdev
->dev
, "net device registration failed.\n");
3793 ql_release_all(pdev
);
3794 pci_disable_device(pdev
);
3797 netif_carrier_off(ndev
);
3798 netif_stop_queue(ndev
);
3799 ql_display_dev_info(ndev
);
3804 static void __devexit
qlge_remove(struct pci_dev
*pdev
)
3806 struct net_device
*ndev
= pci_get_drvdata(pdev
);
3807 unregister_netdev(ndev
);
3808 ql_release_all(pdev
);
3809 pci_disable_device(pdev
);
3814 * This callback is called by the PCI subsystem whenever
3815 * a PCI bus error is detected.
3817 static pci_ers_result_t
qlge_io_error_detected(struct pci_dev
*pdev
,
3818 enum pci_channel_state state
)
3820 struct net_device
*ndev
= pci_get_drvdata(pdev
);
3821 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3823 if (netif_running(ndev
))
3824 ql_adapter_down(qdev
);
3826 pci_disable_device(pdev
);
3828 /* Request a slot reset. */
3829 return PCI_ERS_RESULT_NEED_RESET
;
3833 * This callback is called after the PCI buss has been reset.
3834 * Basically, this tries to restart the card from scratch.
3835 * This is a shortened version of the device probe/discovery code,
3836 * it resembles the first-half of the () routine.
3838 static pci_ers_result_t
qlge_io_slot_reset(struct pci_dev
*pdev
)
3840 struct net_device
*ndev
= pci_get_drvdata(pdev
);
3841 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3843 if (pci_enable_device(pdev
)) {
3844 QPRINTK(qdev
, IFUP
, ERR
,
3845 "Cannot re-enable PCI device after reset.\n");
3846 return PCI_ERS_RESULT_DISCONNECT
;
3849 pci_set_master(pdev
);
3851 netif_carrier_off(ndev
);
3852 netif_stop_queue(ndev
);
3853 ql_adapter_reset(qdev
);
3855 /* Make sure the EEPROM is good */
3856 memcpy(ndev
->perm_addr
, ndev
->dev_addr
, ndev
->addr_len
);
3858 if (!is_valid_ether_addr(ndev
->perm_addr
)) {
3859 QPRINTK(qdev
, IFUP
, ERR
, "After reset, invalid MAC address.\n");
3860 return PCI_ERS_RESULT_DISCONNECT
;
3863 return PCI_ERS_RESULT_RECOVERED
;
3866 static void qlge_io_resume(struct pci_dev
*pdev
)
3868 struct net_device
*ndev
= pci_get_drvdata(pdev
);
3869 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3871 pci_set_master(pdev
);
3873 if (netif_running(ndev
)) {
3874 if (ql_adapter_up(qdev
)) {
3875 QPRINTK(qdev
, IFUP
, ERR
,
3876 "Device initialization failed after reset.\n");
3881 netif_device_attach(ndev
);
3884 static struct pci_error_handlers qlge_err_handler
= {
3885 .error_detected
= qlge_io_error_detected
,
3886 .slot_reset
= qlge_io_slot_reset
,
3887 .resume
= qlge_io_resume
,
3890 static int qlge_suspend(struct pci_dev
*pdev
, pm_message_t state
)
3892 struct net_device
*ndev
= pci_get_drvdata(pdev
);
3893 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3896 netif_device_detach(ndev
);
3898 if (netif_running(ndev
)) {
3899 err
= ql_adapter_down(qdev
);
3904 err
= pci_save_state(pdev
);
3908 pci_disable_device(pdev
);
3910 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
3916 static int qlge_resume(struct pci_dev
*pdev
)
3918 struct net_device
*ndev
= pci_get_drvdata(pdev
);
3919 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3922 pci_set_power_state(pdev
, PCI_D0
);
3923 pci_restore_state(pdev
);
3924 err
= pci_enable_device(pdev
);
3926 QPRINTK(qdev
, IFUP
, ERR
, "Cannot enable PCI device from suspend\n");
3929 pci_set_master(pdev
);
3931 pci_enable_wake(pdev
, PCI_D3hot
, 0);
3932 pci_enable_wake(pdev
, PCI_D3cold
, 0);
3934 if (netif_running(ndev
)) {
3935 err
= ql_adapter_up(qdev
);
3940 netif_device_attach(ndev
);
3944 #endif /* CONFIG_PM */
3946 static void qlge_shutdown(struct pci_dev
*pdev
)
3948 qlge_suspend(pdev
, PMSG_SUSPEND
);
3951 static struct pci_driver qlge_driver
= {
3953 .id_table
= qlge_pci_tbl
,
3954 .probe
= qlge_probe
,
3955 .remove
= __devexit_p(qlge_remove
),
3957 .suspend
= qlge_suspend
,
3958 .resume
= qlge_resume
,
3960 .shutdown
= qlge_shutdown
,
3961 .err_handler
= &qlge_err_handler
3964 static int __init
qlge_init_module(void)
3966 return pci_register_driver(&qlge_driver
);
3969 static void __exit
qlge_exit(void)
3971 pci_unregister_driver(&qlge_driver
);
3974 module_init(qlge_init_module
);
3975 module_exit(qlge_exit
);