2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2013 QLogic Corporation
5 * See LICENSE.qlcnic for copyright and licensing details.
8 #include <linux/netdevice.h>
9 #include <linux/if_vlan.h>
11 #include <linux/ipv6.h>
15 #define TX_ETHER_PKT 0x01
16 #define TX_TCP_PKT 0x02
17 #define TX_UDP_PKT 0x03
18 #define TX_IP_PKT 0x04
19 #define TX_TCP_LSO 0x05
20 #define TX_TCP_LSO6 0x06
21 #define TX_TCPV6_PKT 0x0b
22 #define TX_UDPV6_PKT 0x0c
23 #define FLAGS_VLAN_TAGGED 0x10
24 #define FLAGS_VLAN_OOB 0x40
26 #define qlcnic_set_tx_vlan_tci(cmd_desc, v) \
27 (cmd_desc)->vlan_TCI = cpu_to_le16(v);
28 #define qlcnic_set_cmd_desc_port(cmd_desc, var) \
29 ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
30 #define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \
31 ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
33 #define qlcnic_set_tx_port(_desc, _port) \
34 ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
36 #define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
37 ((_desc)->flags_opcode |= \
38 cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
40 #define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
41 ((_desc)->nfrags__length = \
42 cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
44 /* owner bits of status_desc */
45 #define STATUS_OWNER_HOST (0x1ULL << 56)
46 #define STATUS_OWNER_PHANTOM (0x2ULL << 56)
49 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
50 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
51 53-55 desc_cnt, 56-57 owner, 58-63 opcode
53 #define qlcnic_get_sts_port(sts_data) \
55 #define qlcnic_get_sts_status(sts_data) \
56 (((sts_data) >> 4) & 0x0F)
57 #define qlcnic_get_sts_type(sts_data) \
58 (((sts_data) >> 8) & 0x0F)
59 #define qlcnic_get_sts_totallength(sts_data) \
60 (((sts_data) >> 12) & 0xFFFF)
61 #define qlcnic_get_sts_refhandle(sts_data) \
62 (((sts_data) >> 28) & 0xFFFF)
63 #define qlcnic_get_sts_prot(sts_data) \
64 (((sts_data) >> 44) & 0x0F)
65 #define qlcnic_get_sts_pkt_offset(sts_data) \
66 (((sts_data) >> 48) & 0x1F)
67 #define qlcnic_get_sts_desc_cnt(sts_data) \
68 (((sts_data) >> 53) & 0x7)
69 #define qlcnic_get_sts_opcode(sts_data) \
70 (((sts_data) >> 58) & 0x03F)
72 #define qlcnic_get_lro_sts_refhandle(sts_data) \
73 ((sts_data) & 0x07FFF)
74 #define qlcnic_get_lro_sts_length(sts_data) \
75 (((sts_data) >> 16) & 0x0FFFF)
76 #define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \
77 (((sts_data) >> 32) & 0x0FF)
78 #define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \
79 (((sts_data) >> 40) & 0x0FF)
80 #define qlcnic_get_lro_sts_timestamp(sts_data) \
81 (((sts_data) >> 48) & 0x1)
82 #define qlcnic_get_lro_sts_type(sts_data) \
83 (((sts_data) >> 49) & 0x7)
84 #define qlcnic_get_lro_sts_push_flag(sts_data) \
85 (((sts_data) >> 52) & 0x1)
86 #define qlcnic_get_lro_sts_seq_number(sts_data) \
87 ((sts_data) & 0x0FFFFFFFF)
88 #define qlcnic_get_lro_sts_mss(sts_data1) \
89 ((sts_data1 >> 32) & 0x0FFFF)
91 #define qlcnic_83xx_get_lro_sts_mss(sts) ((sts) & 0xffff)
93 /* opcode field in status_desc */
94 #define QLCNIC_SYN_OFFLOAD 0x03
95 #define QLCNIC_RXPKT_DESC 0x04
96 #define QLCNIC_OLD_RXPKT_DESC 0x3f
97 #define QLCNIC_RESPONSE_DESC 0x05
98 #define QLCNIC_LRO_DESC 0x12
100 #define QLCNIC_TX_POLL_BUDGET 128
101 #define QLCNIC_TCP_HDR_SIZE 20
102 #define QLCNIC_TCP_TS_OPTION_SIZE 12
103 #define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63)
104 #define QLCNIC_DESC_OWNER_FW cpu_to_le64(STATUS_OWNER_PHANTOM)
106 #define QLCNIC_TCP_TS_HDR_SIZE (QLCNIC_TCP_HDR_SIZE + QLCNIC_TCP_TS_OPTION_SIZE)
108 /* for status field in status_desc */
109 #define STATUS_CKSUM_LOOP 0
110 #define STATUS_CKSUM_OK 2
112 #define qlcnic_83xx_pktln(sts) ((sts >> 32) & 0x3FFF)
113 #define qlcnic_83xx_hndl(sts) ((sts >> 48) & 0x7FFF)
114 #define qlcnic_83xx_csum_status(sts) ((sts >> 39) & 7)
115 #define qlcnic_83xx_opcode(sts) ((sts >> 42) & 0xF)
116 #define qlcnic_83xx_vlan_tag(sts) (((sts) >> 48) & 0xFFFF)
117 #define qlcnic_83xx_lro_pktln(sts) (((sts) >> 32) & 0x3FFF)
118 #define qlcnic_83xx_l2_hdr_off(sts) (((sts) >> 16) & 0xFF)
119 #define qlcnic_83xx_l4_hdr_off(sts) (((sts) >> 24) & 0xFF)
120 #define qlcnic_83xx_pkt_cnt(sts) (((sts) >> 16) & 0x7)
121 #define qlcnic_83xx_is_tstamp(sts) (((sts) >> 40) & 1)
122 #define qlcnic_83xx_is_psh_bit(sts) (((sts) >> 41) & 1)
123 #define qlcnic_83xx_is_ip_align(sts) (((sts) >> 46) & 1)
124 #define qlcnic_83xx_has_vlan_tag(sts) (((sts) >> 47) & 1)
126 struct sk_buff
*qlcnic_process_rxbuf(struct qlcnic_adapter
*,
127 struct qlcnic_host_rds_ring
*, u16
, u16
);
129 inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter
*adapter
,
130 struct qlcnic_host_tx_ring
*tx_ring
)
132 writel(0, tx_ring
->crb_intr_mask
);
135 inline void qlcnic_83xx_disable_tx_intr(struct qlcnic_adapter
*adapter
,
136 struct qlcnic_host_tx_ring
*tx_ring
)
138 writel(1, tx_ring
->crb_intr_mask
);
141 static inline u8
qlcnic_mac_hash(u64 mac
)
143 return (u8
)((mac
& 0xff) ^ ((mac
>> 40) & 0xff));
146 static inline u32
qlcnic_get_ref_handle(struct qlcnic_adapter
*adapter
,
147 u16 handle
, u8 ring_id
)
149 if (adapter
->pdev
->device
== PCI_DEVICE_ID_QLOGIC_QLE834X
)
150 return handle
| (ring_id
<< 15);
155 void qlcnic_82xx_change_filter(struct qlcnic_adapter
*adapter
, u64
*uaddr
,
158 struct cmd_desc_type0
*hwdesc
;
159 struct qlcnic_nic_req
*req
;
160 struct qlcnic_mac_req
*mac_req
;
161 struct qlcnic_vlan_req
*vlan_req
;
162 struct qlcnic_host_tx_ring
*tx_ring
= adapter
->tx_ring
;
166 producer
= tx_ring
->producer
;
167 hwdesc
= &tx_ring
->desc_head
[tx_ring
->producer
];
169 req
= (struct qlcnic_nic_req
*)hwdesc
;
170 memset(req
, 0, sizeof(struct qlcnic_nic_req
));
171 req
->qhdr
= cpu_to_le64(QLCNIC_REQUEST
<< 23);
173 word
= QLCNIC_MAC_EVENT
| ((u64
)(adapter
->portnum
) << 16);
174 req
->req_hdr
= cpu_to_le64(word
);
176 mac_req
= (struct qlcnic_mac_req
*)&(req
->words
[0]);
177 mac_req
->op
= vlan_id
? QLCNIC_MAC_VLAN_ADD
: QLCNIC_MAC_ADD
;
178 memcpy(mac_req
->mac_addr
, &uaddr
, ETH_ALEN
);
180 vlan_req
= (struct qlcnic_vlan_req
*)&req
->words
[1];
181 vlan_req
->vlan_id
= vlan_id
;
183 tx_ring
->producer
= get_next_index(producer
, tx_ring
->num_desc
);
187 static void qlcnic_send_filter(struct qlcnic_adapter
*adapter
,
188 struct cmd_desc_type0
*first_desc
,
191 struct qlcnic_filter
*fil
, *tmp_fil
;
192 struct hlist_node
*tmp_hnode
, *n
;
193 struct hlist_head
*head
;
194 struct net_device
*netdev
= adapter
->netdev
;
195 struct ethhdr
*phdr
= (struct ethhdr
*)(skb
->data
);
200 if (ether_addr_equal(phdr
->h_source
, adapter
->mac_addr
))
203 if (adapter
->fhash
.fnum
>= adapter
->fhash
.fmax
) {
204 adapter
->stats
.mac_filter_limit_overrun
++;
205 netdev_info(netdev
, "Can not add more than %d mac addresses\n",
206 adapter
->fhash
.fmax
);
210 /* Only NPAR capable devices support vlan based learning */
211 if (adapter
->flags
& QLCNIC_ESWITCH_ENABLED
)
212 vlan_id
= first_desc
->vlan_TCI
;
213 memcpy(&src_addr
, phdr
->h_source
, ETH_ALEN
);
214 hindex
= qlcnic_mac_hash(src_addr
) & (adapter
->fhash
.fbucket_size
- 1);
215 head
= &(adapter
->fhash
.fhead
[hindex
]);
217 hlist_for_each_entry_safe(tmp_fil
, tmp_hnode
, n
, head
, fnode
) {
218 if (!memcmp(tmp_fil
->faddr
, &src_addr
, ETH_ALEN
) &&
219 tmp_fil
->vlan_id
== vlan_id
) {
220 if (jiffies
> (QLCNIC_READD_AGE
* HZ
+ tmp_fil
->ftime
))
221 qlcnic_change_filter(adapter
, &src_addr
,
223 tmp_fil
->ftime
= jiffies
;
228 fil
= kzalloc(sizeof(struct qlcnic_filter
), GFP_ATOMIC
);
232 qlcnic_change_filter(adapter
, &src_addr
, vlan_id
);
233 fil
->ftime
= jiffies
;
234 fil
->vlan_id
= vlan_id
;
235 memcpy(fil
->faddr
, &src_addr
, ETH_ALEN
);
236 spin_lock(&adapter
->mac_learn_lock
);
237 hlist_add_head(&(fil
->fnode
), head
);
238 adapter
->fhash
.fnum
++;
239 spin_unlock(&adapter
->mac_learn_lock
);
242 static int qlcnic_tx_pkt(struct qlcnic_adapter
*adapter
,
243 struct cmd_desc_type0
*first_desc
, struct sk_buff
*skb
)
245 u8 l4proto
, opcode
= 0, hdr_len
= 0;
246 u16 flags
= 0, vlan_tci
= 0;
247 int copied
, offset
, copy_len
, size
;
248 struct cmd_desc_type0
*hwdesc
;
249 struct vlan_ethhdr
*vh
;
250 struct qlcnic_host_tx_ring
*tx_ring
= adapter
->tx_ring
;
251 u16 protocol
= ntohs(skb
->protocol
);
252 u32 producer
= tx_ring
->producer
;
254 if (protocol
== ETH_P_8021Q
) {
255 vh
= (struct vlan_ethhdr
*)skb
->data
;
256 flags
= FLAGS_VLAN_TAGGED
;
257 vlan_tci
= ntohs(vh
->h_vlan_TCI
);
258 protocol
= ntohs(vh
->h_vlan_encapsulated_proto
);
259 } else if (vlan_tx_tag_present(skb
)) {
260 flags
= FLAGS_VLAN_OOB
;
261 vlan_tci
= vlan_tx_tag_get(skb
);
263 if (unlikely(adapter
->pvid
)) {
264 if (vlan_tci
&& !(adapter
->flags
& QLCNIC_TAGGING_ENABLED
))
266 if (vlan_tci
&& (adapter
->flags
& QLCNIC_TAGGING_ENABLED
))
269 flags
= FLAGS_VLAN_OOB
;
270 vlan_tci
= adapter
->pvid
;
273 qlcnic_set_tx_vlan_tci(first_desc
, vlan_tci
);
274 qlcnic_set_tx_flags_opcode(first_desc
, flags
, opcode
);
276 if (*(skb
->data
) & BIT_0
) {
278 memcpy(&first_desc
->eth_addr
, skb
->data
, ETH_ALEN
);
280 opcode
= TX_ETHER_PKT
;
281 if ((adapter
->netdev
->features
& (NETIF_F_TSO
| NETIF_F_TSO6
)) &&
282 skb_shinfo(skb
)->gso_size
> 0) {
283 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
284 first_desc
->mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
285 first_desc
->total_hdr_length
= hdr_len
;
286 opcode
= (protocol
== ETH_P_IPV6
) ? TX_TCP_LSO6
: TX_TCP_LSO
;
288 /* For LSO, we need to copy the MAC/IP/TCP headers into
289 * the descriptor ring */
293 if (flags
& FLAGS_VLAN_OOB
) {
294 first_desc
->total_hdr_length
+= VLAN_HLEN
;
295 first_desc
->tcp_hdr_offset
= VLAN_HLEN
;
296 first_desc
->ip_hdr_offset
= VLAN_HLEN
;
298 /* Only in case of TSO on vlan device */
299 flags
|= FLAGS_VLAN_TAGGED
;
301 /* Create a TSO vlan header template for firmware */
302 hwdesc
= &tx_ring
->desc_head
[producer
];
303 tx_ring
->cmd_buf_arr
[producer
].skb
= NULL
;
305 copy_len
= min((int)sizeof(struct cmd_desc_type0
) -
306 offset
, hdr_len
+ VLAN_HLEN
);
308 vh
= (struct vlan_ethhdr
*)((char *) hwdesc
+ 2);
309 skb_copy_from_linear_data(skb
, vh
, 12);
310 vh
->h_vlan_proto
= htons(ETH_P_8021Q
);
311 vh
->h_vlan_TCI
= htons(vlan_tci
);
313 skb_copy_from_linear_data_offset(skb
, 12,
316 copied
= copy_len
- VLAN_HLEN
;
318 producer
= get_next_index(producer
, tx_ring
->num_desc
);
321 while (copied
< hdr_len
) {
322 size
= (int)sizeof(struct cmd_desc_type0
) - offset
;
323 copy_len
= min(size
, (hdr_len
- copied
));
324 hwdesc
= &tx_ring
->desc_head
[producer
];
325 tx_ring
->cmd_buf_arr
[producer
].skb
= NULL
;
326 skb_copy_from_linear_data_offset(skb
, copied
,
331 producer
= get_next_index(producer
, tx_ring
->num_desc
);
334 tx_ring
->producer
= producer
;
336 adapter
->stats
.lso_frames
++;
338 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
339 if (protocol
== ETH_P_IP
) {
340 l4proto
= ip_hdr(skb
)->protocol
;
342 if (l4proto
== IPPROTO_TCP
)
344 else if (l4proto
== IPPROTO_UDP
)
346 } else if (protocol
== ETH_P_IPV6
) {
347 l4proto
= ipv6_hdr(skb
)->nexthdr
;
349 if (l4proto
== IPPROTO_TCP
)
350 opcode
= TX_TCPV6_PKT
;
351 else if (l4proto
== IPPROTO_UDP
)
352 opcode
= TX_UDPV6_PKT
;
355 first_desc
->tcp_hdr_offset
+= skb_transport_offset(skb
);
356 first_desc
->ip_hdr_offset
+= skb_network_offset(skb
);
357 qlcnic_set_tx_flags_opcode(first_desc
, flags
, opcode
);
362 static int qlcnic_map_tx_skb(struct pci_dev
*pdev
, struct sk_buff
*skb
,
363 struct qlcnic_cmd_buffer
*pbuf
)
365 struct qlcnic_skb_frag
*nf
;
366 struct skb_frag_struct
*frag
;
370 nr_frags
= skb_shinfo(skb
)->nr_frags
;
371 nf
= &pbuf
->frag_array
[0];
373 map
= pci_map_single(pdev
, skb
->data
, skb_headlen(skb
),
375 if (pci_dma_mapping_error(pdev
, map
))
379 nf
->length
= skb_headlen(skb
);
381 for (i
= 0; i
< nr_frags
; i
++) {
382 frag
= &skb_shinfo(skb
)->frags
[i
];
383 nf
= &pbuf
->frag_array
[i
+1];
384 map
= skb_frag_dma_map(&pdev
->dev
, frag
, 0, skb_frag_size(frag
),
386 if (dma_mapping_error(&pdev
->dev
, map
))
390 nf
->length
= skb_frag_size(frag
);
397 nf
= &pbuf
->frag_array
[i
+1];
398 pci_unmap_page(pdev
, nf
->dma
, nf
->length
, PCI_DMA_TODEVICE
);
401 nf
= &pbuf
->frag_array
[0];
402 pci_unmap_single(pdev
, nf
->dma
, skb_headlen(skb
), PCI_DMA_TODEVICE
);
408 static void qlcnic_unmap_buffers(struct pci_dev
*pdev
, struct sk_buff
*skb
,
409 struct qlcnic_cmd_buffer
*pbuf
)
411 struct qlcnic_skb_frag
*nf
= &pbuf
->frag_array
[0];
412 int i
, nr_frags
= skb_shinfo(skb
)->nr_frags
;
414 for (i
= 0; i
< nr_frags
; i
++) {
415 nf
= &pbuf
->frag_array
[i
+1];
416 pci_unmap_page(pdev
, nf
->dma
, nf
->length
, PCI_DMA_TODEVICE
);
419 nf
= &pbuf
->frag_array
[0];
420 pci_unmap_single(pdev
, nf
->dma
, skb_headlen(skb
), PCI_DMA_TODEVICE
);
424 static inline void qlcnic_clear_cmddesc(u64
*desc
)
431 netdev_tx_t
qlcnic_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
433 struct qlcnic_adapter
*adapter
= netdev_priv(netdev
);
434 struct qlcnic_host_tx_ring
*tx_ring
= adapter
->tx_ring
;
435 struct qlcnic_cmd_buffer
*pbuf
;
436 struct qlcnic_skb_frag
*buffrag
;
437 struct cmd_desc_type0
*hwdesc
, *first_desc
;
438 struct pci_dev
*pdev
;
440 int i
, k
, frag_count
, delta
= 0;
441 u32 producer
, num_txd
;
443 num_txd
= tx_ring
->num_desc
;
445 if (!test_bit(__QLCNIC_DEV_UP
, &adapter
->state
)) {
446 netif_stop_queue(netdev
);
447 return NETDEV_TX_BUSY
;
450 if (adapter
->flags
& QLCNIC_MACSPOOF
) {
451 phdr
= (struct ethhdr
*)skb
->data
;
452 if (!ether_addr_equal(phdr
->h_source
, adapter
->mac_addr
))
456 frag_count
= skb_shinfo(skb
)->nr_frags
+ 1;
457 /* 14 frags supported for normal packet and
458 * 32 frags supported for TSO packet
460 if (!skb_is_gso(skb
) && frag_count
> QLCNIC_MAX_FRAGS_PER_TX
) {
461 for (i
= 0; i
< (frag_count
- QLCNIC_MAX_FRAGS_PER_TX
); i
++)
462 delta
+= skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
464 if (!__pskb_pull_tail(skb
, delta
))
467 frag_count
= 1 + skb_shinfo(skb
)->nr_frags
;
470 if (unlikely(qlcnic_tx_avail(tx_ring
) <= TX_STOP_THRESH
)) {
471 netif_stop_queue(netdev
);
472 if (qlcnic_tx_avail(tx_ring
) > TX_STOP_THRESH
) {
473 netif_start_queue(netdev
);
475 adapter
->stats
.xmit_off
++;
476 return NETDEV_TX_BUSY
;
480 producer
= tx_ring
->producer
;
481 pbuf
= &tx_ring
->cmd_buf_arr
[producer
];
482 pdev
= adapter
->pdev
;
483 first_desc
= &tx_ring
->desc_head
[producer
];
484 hwdesc
= &tx_ring
->desc_head
[producer
];
485 qlcnic_clear_cmddesc((u64
*)hwdesc
);
487 if (qlcnic_map_tx_skb(pdev
, skb
, pbuf
)) {
488 adapter
->stats
.tx_dma_map_error
++;
493 pbuf
->frag_count
= frag_count
;
495 qlcnic_set_tx_frags_len(first_desc
, frag_count
, skb
->len
);
496 qlcnic_set_tx_port(first_desc
, adapter
->portnum
);
498 for (i
= 0; i
< frag_count
; i
++) {
501 if ((k
== 0) && (i
> 0)) {
502 /* move to next desc.*/
503 producer
= get_next_index(producer
, num_txd
);
504 hwdesc
= &tx_ring
->desc_head
[producer
];
505 qlcnic_clear_cmddesc((u64
*)hwdesc
);
506 tx_ring
->cmd_buf_arr
[producer
].skb
= NULL
;
509 buffrag
= &pbuf
->frag_array
[i
];
510 hwdesc
->buffer_length
[k
] = cpu_to_le16(buffrag
->length
);
513 hwdesc
->addr_buffer1
= cpu_to_le64(buffrag
->dma
);
516 hwdesc
->addr_buffer2
= cpu_to_le64(buffrag
->dma
);
519 hwdesc
->addr_buffer3
= cpu_to_le64(buffrag
->dma
);
522 hwdesc
->addr_buffer4
= cpu_to_le64(buffrag
->dma
);
527 tx_ring
->producer
= get_next_index(producer
, num_txd
);
530 if (unlikely(qlcnic_tx_pkt(adapter
, first_desc
, skb
)))
533 if (adapter
->drv_mac_learn
)
534 qlcnic_send_filter(adapter
, first_desc
, skb
);
536 adapter
->stats
.txbytes
+= skb
->len
;
537 adapter
->stats
.xmitcalled
++;
539 qlcnic_update_cmd_producer(tx_ring
);
544 qlcnic_unmap_buffers(pdev
, skb
, pbuf
);
546 adapter
->stats
.txdropped
++;
547 dev_kfree_skb_any(skb
);
551 void qlcnic_advert_link_change(struct qlcnic_adapter
*adapter
, int linkup
)
553 struct net_device
*netdev
= adapter
->netdev
;
555 if (adapter
->ahw
->linkup
&& !linkup
) {
556 netdev_info(netdev
, "NIC Link is down\n");
557 adapter
->ahw
->linkup
= 0;
558 if (netif_running(netdev
)) {
559 netif_carrier_off(netdev
);
560 netif_stop_queue(netdev
);
562 } else if (!adapter
->ahw
->linkup
&& linkup
) {
563 netdev_info(netdev
, "NIC Link is up\n");
564 adapter
->ahw
->linkup
= 1;
565 if (netif_running(netdev
)) {
566 netif_carrier_on(netdev
);
567 netif_wake_queue(netdev
);
572 static int qlcnic_alloc_rx_skb(struct qlcnic_adapter
*adapter
,
573 struct qlcnic_host_rds_ring
*rds_ring
,
574 struct qlcnic_rx_buffer
*buffer
)
578 struct pci_dev
*pdev
= adapter
->pdev
;
580 skb
= netdev_alloc_skb(adapter
->netdev
, rds_ring
->skb_size
);
582 adapter
->stats
.skb_alloc_failure
++;
586 skb_reserve(skb
, NET_IP_ALIGN
);
587 dma
= pci_map_single(pdev
, skb
->data
,
588 rds_ring
->dma_size
, PCI_DMA_FROMDEVICE
);
590 if (pci_dma_mapping_error(pdev
, dma
)) {
591 adapter
->stats
.rx_dma_map_error
++;
592 dev_kfree_skb_any(skb
);
602 static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter
*adapter
,
603 struct qlcnic_host_rds_ring
*rds_ring
,
606 struct rcv_desc
*pdesc
;
607 struct qlcnic_rx_buffer
*buffer
;
609 uint32_t producer
, handle
;
610 struct list_head
*head
;
612 if (!spin_trylock(&rds_ring
->lock
))
615 producer
= rds_ring
->producer
;
616 head
= &rds_ring
->free_list
;
617 while (!list_empty(head
)) {
618 buffer
= list_entry(head
->next
, struct qlcnic_rx_buffer
, list
);
621 if (qlcnic_alloc_rx_skb(adapter
, rds_ring
, buffer
))
625 list_del(&buffer
->list
);
627 /* make a rcv descriptor */
628 pdesc
= &rds_ring
->desc_head
[producer
];
629 handle
= qlcnic_get_ref_handle(adapter
,
630 buffer
->ref_handle
, ring_id
);
631 pdesc
->reference_handle
= cpu_to_le16(handle
);
632 pdesc
->buffer_length
= cpu_to_le32(rds_ring
->dma_size
);
633 pdesc
->addr_buffer
= cpu_to_le64(buffer
->dma
);
634 producer
= get_next_index(producer
, rds_ring
->num_desc
);
637 rds_ring
->producer
= producer
;
638 writel((producer
- 1) & (rds_ring
->num_desc
- 1),
639 rds_ring
->crb_rcv_producer
);
641 spin_unlock(&rds_ring
->lock
);
644 static int qlcnic_process_cmd_ring(struct qlcnic_adapter
*adapter
,
645 struct qlcnic_host_tx_ring
*tx_ring
,
648 u32 sw_consumer
, hw_consumer
;
649 int i
, done
, count
= 0;
650 struct qlcnic_cmd_buffer
*buffer
;
651 struct pci_dev
*pdev
= adapter
->pdev
;
652 struct net_device
*netdev
= adapter
->netdev
;
653 struct qlcnic_skb_frag
*frag
;
655 if (!spin_trylock(&adapter
->tx_clean_lock
))
658 sw_consumer
= tx_ring
->sw_consumer
;
659 hw_consumer
= le32_to_cpu(*(tx_ring
->hw_consumer
));
661 while (sw_consumer
!= hw_consumer
) {
662 buffer
= &tx_ring
->cmd_buf_arr
[sw_consumer
];
664 frag
= &buffer
->frag_array
[0];
665 pci_unmap_single(pdev
, frag
->dma
, frag
->length
,
668 for (i
= 1; i
< buffer
->frag_count
; i
++) {
670 pci_unmap_page(pdev
, frag
->dma
, frag
->length
,
674 adapter
->stats
.xmitfinished
++;
675 dev_kfree_skb_any(buffer
->skb
);
679 sw_consumer
= get_next_index(sw_consumer
, tx_ring
->num_desc
);
680 if (++count
>= budget
)
684 if (count
&& netif_running(netdev
)) {
685 tx_ring
->sw_consumer
= sw_consumer
;
687 if (netif_queue_stopped(netdev
) && netif_carrier_ok(netdev
)) {
688 if (qlcnic_tx_avail(tx_ring
) > TX_STOP_THRESH
) {
689 netif_wake_queue(netdev
);
690 adapter
->stats
.xmit_on
++;
693 adapter
->tx_timeo_cnt
= 0;
696 * If everything is freed up to consumer then check if the ring is full
697 * If the ring is full then check if more needs to be freed and
698 * schedule the call back again.
700 * This happens when there are 2 CPUs. One could be freeing and the
701 * other filling it. If the ring is full when we get out of here and
702 * the card has already interrupted the host then the host can miss the
705 * There is still a possible race condition and the host could miss an
706 * interrupt. The card has to take care of this.
708 hw_consumer
= le32_to_cpu(*(tx_ring
->hw_consumer
));
709 done
= (sw_consumer
== hw_consumer
);
710 spin_unlock(&adapter
->tx_clean_lock
);
715 static int qlcnic_poll(struct napi_struct
*napi
, int budget
)
717 int tx_complete
, work_done
;
718 struct qlcnic_host_sds_ring
*sds_ring
;
719 struct qlcnic_adapter
*adapter
;
721 sds_ring
= container_of(napi
, struct qlcnic_host_sds_ring
, napi
);
722 adapter
= sds_ring
->adapter
;
723 tx_complete
= qlcnic_process_cmd_ring(adapter
, adapter
->tx_ring
,
725 work_done
= qlcnic_process_rcv_ring(sds_ring
, budget
);
726 if ((work_done
< budget
) && tx_complete
) {
727 napi_complete(&sds_ring
->napi
);
728 if (test_bit(__QLCNIC_DEV_UP
, &adapter
->state
))
729 qlcnic_enable_int(sds_ring
);
735 static int qlcnic_rx_poll(struct napi_struct
*napi
, int budget
)
737 struct qlcnic_host_sds_ring
*sds_ring
;
738 struct qlcnic_adapter
*adapter
;
741 sds_ring
= container_of(napi
, struct qlcnic_host_sds_ring
, napi
);
742 adapter
= sds_ring
->adapter
;
744 work_done
= qlcnic_process_rcv_ring(sds_ring
, budget
);
746 if (work_done
< budget
) {
747 napi_complete(&sds_ring
->napi
);
748 if (test_bit(__QLCNIC_DEV_UP
, &adapter
->state
))
749 qlcnic_enable_int(sds_ring
);
755 static void qlcnic_handle_linkevent(struct qlcnic_adapter
*adapter
,
756 struct qlcnic_fw_msg
*msg
)
759 u16 cable_len
, link_speed
;
760 u8 link_status
, module
, duplex
, autoneg
, lb_status
= 0;
761 struct net_device
*netdev
= adapter
->netdev
;
763 adapter
->ahw
->has_link_events
= 1;
765 cable_OUI
= msg
->body
[1] & 0xffffffff;
766 cable_len
= (msg
->body
[1] >> 32) & 0xffff;
767 link_speed
= (msg
->body
[1] >> 48) & 0xffff;
769 link_status
= msg
->body
[2] & 0xff;
770 duplex
= (msg
->body
[2] >> 16) & 0xff;
771 autoneg
= (msg
->body
[2] >> 24) & 0xff;
772 lb_status
= (msg
->body
[2] >> 32) & 0x3;
774 module
= (msg
->body
[2] >> 8) & 0xff;
775 if (module
== LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE
)
776 dev_info(&netdev
->dev
,
777 "unsupported cable: OUI 0x%x, length %d\n",
778 cable_OUI
, cable_len
);
779 else if (module
== LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN
)
780 dev_info(&netdev
->dev
, "unsupported cable length %d\n",
783 if (!link_status
&& (lb_status
== QLCNIC_ILB_MODE
||
784 lb_status
== QLCNIC_ELB_MODE
))
785 adapter
->ahw
->loopback_state
|= QLCNIC_LINKEVENT
;
787 qlcnic_advert_link_change(adapter
, link_status
);
789 if (duplex
== LINKEVENT_FULL_DUPLEX
)
790 adapter
->ahw
->link_duplex
= DUPLEX_FULL
;
792 adapter
->ahw
->link_duplex
= DUPLEX_HALF
;
794 adapter
->ahw
->module_type
= module
;
795 adapter
->ahw
->link_autoneg
= autoneg
;
798 adapter
->ahw
->link_speed
= link_speed
;
800 adapter
->ahw
->link_speed
= SPEED_UNKNOWN
;
801 adapter
->ahw
->link_duplex
= DUPLEX_UNKNOWN
;
805 static void qlcnic_handle_fw_message(int desc_cnt
, int index
,
806 struct qlcnic_host_sds_ring
*sds_ring
)
808 struct qlcnic_fw_msg msg
;
809 struct status_desc
*desc
;
810 struct qlcnic_adapter
*adapter
;
812 int i
= 0, opcode
, ret
;
814 while (desc_cnt
> 0 && i
< 8) {
815 desc
= &sds_ring
->desc_head
[index
];
816 msg
.words
[i
++] = le64_to_cpu(desc
->status_desc_data
[0]);
817 msg
.words
[i
++] = le64_to_cpu(desc
->status_desc_data
[1]);
819 index
= get_next_index(index
, sds_ring
->num_desc
);
823 adapter
= sds_ring
->adapter
;
824 dev
= &adapter
->pdev
->dev
;
825 opcode
= qlcnic_get_nic_msg_opcode(msg
.body
[0]);
828 case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE
:
829 qlcnic_handle_linkevent(adapter
, &msg
);
831 case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK
:
832 ret
= (u32
)(msg
.body
[1]);
835 adapter
->ahw
->loopback_state
|= QLCNIC_LB_RESPONSE
;
838 dev_info(dev
, "loopback already in progress\n");
839 adapter
->ahw
->diag_cnt
= -QLCNIC_TEST_IN_PROGRESS
;
842 dev_info(dev
, "loopback cable is not connected\n");
843 adapter
->ahw
->diag_cnt
= -QLCNIC_LB_CABLE_NOT_CONN
;
847 "loopback configure request failed, err %x\n",
849 adapter
->ahw
->diag_cnt
= -QLCNIC_UNDEFINED_ERROR
;
858 struct sk_buff
*qlcnic_process_rxbuf(struct qlcnic_adapter
*adapter
,
859 struct qlcnic_host_rds_ring
*ring
,
860 u16 index
, u16 cksum
)
862 struct qlcnic_rx_buffer
*buffer
;
865 buffer
= &ring
->rx_buf_arr
[index
];
866 if (unlikely(buffer
->skb
== NULL
)) {
871 pci_unmap_single(adapter
->pdev
, buffer
->dma
, ring
->dma_size
,
875 if (likely((adapter
->netdev
->features
& NETIF_F_RXCSUM
) &&
876 (cksum
== STATUS_CKSUM_OK
|| cksum
== STATUS_CKSUM_LOOP
))) {
877 adapter
->stats
.csummed
++;
878 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
880 skb_checksum_none_assert(skb
);
889 static inline int qlcnic_check_rx_tagging(struct qlcnic_adapter
*adapter
,
890 struct sk_buff
*skb
, u16
*vlan_tag
)
892 struct ethhdr
*eth_hdr
;
894 if (!__vlan_get_tag(skb
, vlan_tag
)) {
895 eth_hdr
= (struct ethhdr
*)skb
->data
;
896 memmove(skb
->data
+ VLAN_HLEN
, eth_hdr
, ETH_ALEN
* 2);
897 skb_pull(skb
, VLAN_HLEN
);
902 if (*vlan_tag
== adapter
->pvid
) {
903 /* Outer vlan tag. Packet should follow non-vlan path */
907 if (adapter
->flags
& QLCNIC_TAGGING_ENABLED
)
913 static struct qlcnic_rx_buffer
*
914 qlcnic_process_rcv(struct qlcnic_adapter
*adapter
,
915 struct qlcnic_host_sds_ring
*sds_ring
, int ring
,
918 struct net_device
*netdev
= adapter
->netdev
;
919 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
920 struct qlcnic_rx_buffer
*buffer
;
922 struct qlcnic_host_rds_ring
*rds_ring
;
923 int index
, length
, cksum
, pkt_offset
;
926 if (unlikely(ring
>= adapter
->max_rds_rings
))
929 rds_ring
= &recv_ctx
->rds_rings
[ring
];
931 index
= qlcnic_get_sts_refhandle(sts_data0
);
932 if (unlikely(index
>= rds_ring
->num_desc
))
935 buffer
= &rds_ring
->rx_buf_arr
[index
];
936 length
= qlcnic_get_sts_totallength(sts_data0
);
937 cksum
= qlcnic_get_sts_status(sts_data0
);
938 pkt_offset
= qlcnic_get_sts_pkt_offset(sts_data0
);
940 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, cksum
);
944 if (length
> rds_ring
->skb_size
)
945 skb_put(skb
, rds_ring
->skb_size
);
947 skb_put(skb
, length
);
950 skb_pull(skb
, pkt_offset
);
952 if (unlikely(qlcnic_check_rx_tagging(adapter
, skb
, &vid
))) {
953 adapter
->stats
.rxdropped
++;
958 skb
->protocol
= eth_type_trans(skb
, netdev
);
961 __vlan_hwaccel_put_tag(skb
, vid
);
963 napi_gro_receive(&sds_ring
->napi
, skb
);
965 adapter
->stats
.rx_pkts
++;
966 adapter
->stats
.rxbytes
+= length
;
971 #define QLC_TCP_HDR_SIZE 20
972 #define QLC_TCP_TS_OPTION_SIZE 12
973 #define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
975 static struct qlcnic_rx_buffer
*
976 qlcnic_process_lro(struct qlcnic_adapter
*adapter
,
977 int ring
, u64 sts_data0
, u64 sts_data1
)
979 struct net_device
*netdev
= adapter
->netdev
;
980 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
981 struct qlcnic_rx_buffer
*buffer
;
983 struct qlcnic_host_rds_ring
*rds_ring
;
985 struct ipv6hdr
*ipv6h
;
987 bool push
, timestamp
;
988 int index
, l2_hdr_offset
, l4_hdr_offset
;
989 u16 lro_length
, length
, data_offset
, vid
= 0xffff;
992 if (unlikely(ring
> adapter
->max_rds_rings
))
995 rds_ring
= &recv_ctx
->rds_rings
[ring
];
997 index
= qlcnic_get_lro_sts_refhandle(sts_data0
);
998 if (unlikely(index
> rds_ring
->num_desc
))
1001 buffer
= &rds_ring
->rx_buf_arr
[index
];
1003 timestamp
= qlcnic_get_lro_sts_timestamp(sts_data0
);
1004 lro_length
= qlcnic_get_lro_sts_length(sts_data0
);
1005 l2_hdr_offset
= qlcnic_get_lro_sts_l2_hdr_offset(sts_data0
);
1006 l4_hdr_offset
= qlcnic_get_lro_sts_l4_hdr_offset(sts_data0
);
1007 push
= qlcnic_get_lro_sts_push_flag(sts_data0
);
1008 seq_number
= qlcnic_get_lro_sts_seq_number(sts_data1
);
1010 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, STATUS_CKSUM_OK
);
1015 data_offset
= l4_hdr_offset
+ QLC_TCP_TS_HDR_SIZE
;
1017 data_offset
= l4_hdr_offset
+ QLC_TCP_HDR_SIZE
;
1019 skb_put(skb
, lro_length
+ data_offset
);
1020 skb_pull(skb
, l2_hdr_offset
);
1022 if (unlikely(qlcnic_check_rx_tagging(adapter
, skb
, &vid
))) {
1023 adapter
->stats
.rxdropped
++;
1028 skb
->protocol
= eth_type_trans(skb
, netdev
);
1030 if (ntohs(skb
->protocol
) == ETH_P_IPV6
) {
1031 ipv6h
= (struct ipv6hdr
*)skb
->data
;
1032 th
= (struct tcphdr
*)(skb
->data
+ sizeof(struct ipv6hdr
));
1033 length
= (th
->doff
<< 2) + lro_length
;
1034 ipv6h
->payload_len
= htons(length
);
1036 iph
= (struct iphdr
*)skb
->data
;
1037 th
= (struct tcphdr
*)(skb
->data
+ (iph
->ihl
<< 2));
1038 length
= (iph
->ihl
<< 2) + (th
->doff
<< 2) + lro_length
;
1039 iph
->tot_len
= htons(length
);
1041 iph
->check
= ip_fast_csum((unsigned char *)iph
, iph
->ihl
);
1045 th
->seq
= htonl(seq_number
);
1048 if (adapter
->flags
& QLCNIC_FW_LRO_MSS_CAP
) {
1049 skb_shinfo(skb
)->gso_size
= qlcnic_get_lro_sts_mss(sts_data1
);
1050 if (skb
->protocol
== htons(ETH_P_IPV6
))
1051 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
1053 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
1057 __vlan_hwaccel_put_tag(skb
, vid
);
1058 netif_receive_skb(skb
);
1060 adapter
->stats
.lro_pkts
++;
1061 adapter
->stats
.lrobytes
+= length
;
1066 int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring
*sds_ring
, int max
)
1068 struct qlcnic_host_rds_ring
*rds_ring
;
1069 struct qlcnic_adapter
*adapter
= sds_ring
->adapter
;
1070 struct list_head
*cur
;
1071 struct status_desc
*desc
;
1072 struct qlcnic_rx_buffer
*rxbuf
;
1073 int opcode
, desc_cnt
, count
= 0;
1074 u64 sts_data0
, sts_data1
;
1076 u32 consumer
= sds_ring
->consumer
;
1078 while (count
< max
) {
1079 desc
= &sds_ring
->desc_head
[consumer
];
1080 sts_data0
= le64_to_cpu(desc
->status_desc_data
[0]);
1082 if (!(sts_data0
& STATUS_OWNER_HOST
))
1085 desc_cnt
= qlcnic_get_sts_desc_cnt(sts_data0
);
1086 opcode
= qlcnic_get_sts_opcode(sts_data0
);
1088 case QLCNIC_RXPKT_DESC
:
1089 case QLCNIC_OLD_RXPKT_DESC
:
1090 case QLCNIC_SYN_OFFLOAD
:
1091 ring
= qlcnic_get_sts_type(sts_data0
);
1092 rxbuf
= qlcnic_process_rcv(adapter
, sds_ring
, ring
,
1095 case QLCNIC_LRO_DESC
:
1096 ring
= qlcnic_get_lro_sts_type(sts_data0
);
1097 sts_data1
= le64_to_cpu(desc
->status_desc_data
[1]);
1098 rxbuf
= qlcnic_process_lro(adapter
, ring
, sts_data0
,
1101 case QLCNIC_RESPONSE_DESC
:
1102 qlcnic_handle_fw_message(desc_cnt
, consumer
, sds_ring
);
1106 WARN_ON(desc_cnt
> 1);
1109 list_add_tail(&rxbuf
->list
, &sds_ring
->free_list
[ring
]);
1111 adapter
->stats
.null_rxbuf
++;
1113 for (; desc_cnt
> 0; desc_cnt
--) {
1114 desc
= &sds_ring
->desc_head
[consumer
];
1115 desc
->status_desc_data
[0] = QLCNIC_DESC_OWNER_FW
;
1116 consumer
= get_next_index(consumer
, sds_ring
->num_desc
);
1121 for (ring
= 0; ring
< adapter
->max_rds_rings
; ring
++) {
1122 rds_ring
= &adapter
->recv_ctx
->rds_rings
[ring
];
1123 if (!list_empty(&sds_ring
->free_list
[ring
])) {
1124 list_for_each(cur
, &sds_ring
->free_list
[ring
]) {
1125 rxbuf
= list_entry(cur
, struct qlcnic_rx_buffer
,
1127 qlcnic_alloc_rx_skb(adapter
, rds_ring
, rxbuf
);
1129 spin_lock(&rds_ring
->lock
);
1130 list_splice_tail_init(&sds_ring
->free_list
[ring
],
1131 &rds_ring
->free_list
);
1132 spin_unlock(&rds_ring
->lock
);
1135 qlcnic_post_rx_buffers_nodb(adapter
, rds_ring
, ring
);
1139 sds_ring
->consumer
= consumer
;
1140 writel(consumer
, sds_ring
->crb_sts_consumer
);
1146 void qlcnic_post_rx_buffers(struct qlcnic_adapter
*adapter
,
1147 struct qlcnic_host_rds_ring
*rds_ring
, u8 ring_id
)
1149 struct rcv_desc
*pdesc
;
1150 struct qlcnic_rx_buffer
*buffer
;
1152 u32 producer
, handle
;
1153 struct list_head
*head
;
1155 producer
= rds_ring
->producer
;
1156 head
= &rds_ring
->free_list
;
1158 while (!list_empty(head
)) {
1160 buffer
= list_entry(head
->next
, struct qlcnic_rx_buffer
, list
);
1163 if (qlcnic_alloc_rx_skb(adapter
, rds_ring
, buffer
))
1168 list_del(&buffer
->list
);
1170 /* make a rcv descriptor */
1171 pdesc
= &rds_ring
->desc_head
[producer
];
1172 pdesc
->addr_buffer
= cpu_to_le64(buffer
->dma
);
1173 handle
= qlcnic_get_ref_handle(adapter
, buffer
->ref_handle
,
1175 pdesc
->reference_handle
= cpu_to_le16(handle
);
1176 pdesc
->buffer_length
= cpu_to_le32(rds_ring
->dma_size
);
1177 producer
= get_next_index(producer
, rds_ring
->num_desc
);
1181 rds_ring
->producer
= producer
;
1182 writel((producer
-1) & (rds_ring
->num_desc
-1),
1183 rds_ring
->crb_rcv_producer
);
1187 static void dump_skb(struct sk_buff
*skb
, struct qlcnic_adapter
*adapter
)
1190 unsigned char *data
= skb
->data
;
1192 pr_info(KERN_INFO
"\n");
1193 for (i
= 0; i
< skb
->len
; i
++) {
1194 QLCDB(adapter
, DRV
, "%02x ", data
[i
]);
1195 if ((i
& 0x0f) == 8)
1196 pr_info(KERN_INFO
"\n");
1200 static void qlcnic_process_rcv_diag(struct qlcnic_adapter
*adapter
, int ring
,
1203 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1204 struct sk_buff
*skb
;
1205 struct qlcnic_host_rds_ring
*rds_ring
;
1206 int index
, length
, cksum
, pkt_offset
;
1208 if (unlikely(ring
>= adapter
->max_rds_rings
))
1211 rds_ring
= &recv_ctx
->rds_rings
[ring
];
1213 index
= qlcnic_get_sts_refhandle(sts_data0
);
1214 length
= qlcnic_get_sts_totallength(sts_data0
);
1215 if (unlikely(index
>= rds_ring
->num_desc
))
1218 cksum
= qlcnic_get_sts_status(sts_data0
);
1219 pkt_offset
= qlcnic_get_sts_pkt_offset(sts_data0
);
1221 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, cksum
);
1225 if (length
> rds_ring
->skb_size
)
1226 skb_put(skb
, rds_ring
->skb_size
);
1228 skb_put(skb
, length
);
1231 skb_pull(skb
, pkt_offset
);
1233 if (!qlcnic_check_loopback_buff(skb
->data
, adapter
->mac_addr
))
1234 adapter
->ahw
->diag_cnt
++;
1236 dump_skb(skb
, adapter
);
1238 dev_kfree_skb_any(skb
);
1239 adapter
->stats
.rx_pkts
++;
1240 adapter
->stats
.rxbytes
+= length
;
1245 void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring
*sds_ring
)
1247 struct qlcnic_adapter
*adapter
= sds_ring
->adapter
;
1248 struct status_desc
*desc
;
1250 int ring
, opcode
, desc_cnt
;
1252 u32 consumer
= sds_ring
->consumer
;
1254 desc
= &sds_ring
->desc_head
[consumer
];
1255 sts_data0
= le64_to_cpu(desc
->status_desc_data
[0]);
1257 if (!(sts_data0
& STATUS_OWNER_HOST
))
1260 desc_cnt
= qlcnic_get_sts_desc_cnt(sts_data0
);
1261 opcode
= qlcnic_get_sts_opcode(sts_data0
);
1263 case QLCNIC_RESPONSE_DESC
:
1264 qlcnic_handle_fw_message(desc_cnt
, consumer
, sds_ring
);
1267 ring
= qlcnic_get_sts_type(sts_data0
);
1268 qlcnic_process_rcv_diag(adapter
, ring
, sts_data0
);
1272 for (; desc_cnt
> 0; desc_cnt
--) {
1273 desc
= &sds_ring
->desc_head
[consumer
];
1274 desc
->status_desc_data
[0] = cpu_to_le64(STATUS_OWNER_PHANTOM
);
1275 consumer
= get_next_index(consumer
, sds_ring
->num_desc
);
1278 sds_ring
->consumer
= consumer
;
1279 writel(consumer
, sds_ring
->crb_sts_consumer
);
1282 int qlcnic_82xx_napi_add(struct qlcnic_adapter
*adapter
,
1283 struct net_device
*netdev
)
1285 int ring
, max_sds_rings
;
1286 struct qlcnic_host_sds_ring
*sds_ring
;
1287 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1289 if (qlcnic_alloc_sds_rings(recv_ctx
, adapter
->max_sds_rings
))
1292 max_sds_rings
= adapter
->max_sds_rings
;
1294 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
1295 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1296 if (ring
== adapter
->max_sds_rings
- 1)
1297 netif_napi_add(netdev
, &sds_ring
->napi
, qlcnic_poll
,
1298 QLCNIC_NETDEV_WEIGHT
/ max_sds_rings
);
1300 netif_napi_add(netdev
, &sds_ring
->napi
, qlcnic_rx_poll
,
1301 QLCNIC_NETDEV_WEIGHT
*2);
1304 if (qlcnic_alloc_tx_rings(adapter
, netdev
)) {
1305 qlcnic_free_sds_rings(recv_ctx
);
1312 void qlcnic_82xx_napi_del(struct qlcnic_adapter
*adapter
)
1315 struct qlcnic_host_sds_ring
*sds_ring
;
1316 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1318 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
1319 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1320 netif_napi_del(&sds_ring
->napi
);
1323 qlcnic_free_sds_rings(adapter
->recv_ctx
);
1324 qlcnic_free_tx_rings(adapter
);
1327 void qlcnic_82xx_napi_enable(struct qlcnic_adapter
*adapter
)
1330 struct qlcnic_host_sds_ring
*sds_ring
;
1331 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1333 if (adapter
->is_up
!= QLCNIC_ADAPTER_UP_MAGIC
)
1336 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
1337 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1338 napi_enable(&sds_ring
->napi
);
1339 qlcnic_enable_int(sds_ring
);
1343 void qlcnic_82xx_napi_disable(struct qlcnic_adapter
*adapter
)
1346 struct qlcnic_host_sds_ring
*sds_ring
;
1347 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1349 if (adapter
->is_up
!= QLCNIC_ADAPTER_UP_MAGIC
)
1352 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
1353 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1354 qlcnic_disable_int(sds_ring
);
1355 napi_synchronize(&sds_ring
->napi
);
1356 napi_disable(&sds_ring
->napi
);
1360 static struct qlcnic_rx_buffer
*
1361 qlcnic_83xx_process_rcv(struct qlcnic_adapter
*adapter
,
1362 struct qlcnic_host_sds_ring
*sds_ring
,
1363 u8 ring
, u64 sts_data
[])
1365 struct net_device
*netdev
= adapter
->netdev
;
1366 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1367 struct qlcnic_rx_buffer
*buffer
;
1368 struct sk_buff
*skb
;
1369 struct qlcnic_host_rds_ring
*rds_ring
;
1370 int index
, length
, cksum
;
1373 if (unlikely(ring
>= adapter
->max_rds_rings
))
1376 rds_ring
= &recv_ctx
->rds_rings
[ring
];
1378 index
= qlcnic_83xx_hndl(sts_data
[0]);
1379 if (unlikely(index
>= rds_ring
->num_desc
))
1382 buffer
= &rds_ring
->rx_buf_arr
[index
];
1383 length
= qlcnic_83xx_pktln(sts_data
[0]);
1384 cksum
= qlcnic_83xx_csum_status(sts_data
[1]);
1385 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, cksum
);
1389 if (length
> rds_ring
->skb_size
)
1390 skb_put(skb
, rds_ring
->skb_size
);
1392 skb_put(skb
, length
);
1394 if (unlikely(qlcnic_check_rx_tagging(adapter
, skb
, &vid
))) {
1395 adapter
->stats
.rxdropped
++;
1400 skb
->protocol
= eth_type_trans(skb
, netdev
);
1403 __vlan_hwaccel_put_tag(skb
, vid
);
1405 napi_gro_receive(&sds_ring
->napi
, skb
);
1407 adapter
->stats
.rx_pkts
++;
1408 adapter
->stats
.rxbytes
+= length
;
1413 static struct qlcnic_rx_buffer
*
1414 qlcnic_83xx_process_lro(struct qlcnic_adapter
*adapter
,
1415 u8 ring
, u64 sts_data
[])
1417 struct net_device
*netdev
= adapter
->netdev
;
1418 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1419 struct qlcnic_rx_buffer
*buffer
;
1420 struct sk_buff
*skb
;
1421 struct qlcnic_host_rds_ring
*rds_ring
;
1423 struct ipv6hdr
*ipv6h
;
1426 int l2_hdr_offset
, l4_hdr_offset
;
1428 u16 lro_length
, length
, data_offset
, gso_size
;
1431 if (unlikely(ring
> adapter
->max_rds_rings
))
1434 rds_ring
= &recv_ctx
->rds_rings
[ring
];
1436 index
= qlcnic_83xx_hndl(sts_data
[0]);
1437 if (unlikely(index
> rds_ring
->num_desc
))
1440 buffer
= &rds_ring
->rx_buf_arr
[index
];
1442 lro_length
= qlcnic_83xx_lro_pktln(sts_data
[0]);
1443 l2_hdr_offset
= qlcnic_83xx_l2_hdr_off(sts_data
[1]);
1444 l4_hdr_offset
= qlcnic_83xx_l4_hdr_off(sts_data
[1]);
1445 push
= qlcnic_83xx_is_psh_bit(sts_data
[1]);
1447 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, STATUS_CKSUM_OK
);
1450 if (qlcnic_83xx_is_tstamp(sts_data
[1]))
1451 data_offset
= l4_hdr_offset
+ QLCNIC_TCP_TS_HDR_SIZE
;
1453 data_offset
= l4_hdr_offset
+ QLCNIC_TCP_HDR_SIZE
;
1455 skb_put(skb
, lro_length
+ data_offset
);
1456 skb_pull(skb
, l2_hdr_offset
);
1458 if (unlikely(qlcnic_check_rx_tagging(adapter
, skb
, &vid
))) {
1459 adapter
->stats
.rxdropped
++;
1464 skb
->protocol
= eth_type_trans(skb
, netdev
);
1465 if (ntohs(skb
->protocol
) == ETH_P_IPV6
) {
1466 ipv6h
= (struct ipv6hdr
*)skb
->data
;
1467 th
= (struct tcphdr
*)(skb
->data
+ sizeof(struct ipv6hdr
));
1469 length
= (th
->doff
<< 2) + lro_length
;
1470 ipv6h
->payload_len
= htons(length
);
1472 iph
= (struct iphdr
*)skb
->data
;
1473 th
= (struct tcphdr
*)(skb
->data
+ (iph
->ihl
<< 2));
1474 length
= (iph
->ihl
<< 2) + (th
->doff
<< 2) + lro_length
;
1475 iph
->tot_len
= htons(length
);
1477 iph
->check
= ip_fast_csum((unsigned char *)iph
, iph
->ihl
);
1483 if (adapter
->flags
& QLCNIC_FW_LRO_MSS_CAP
) {
1484 gso_size
= qlcnic_83xx_get_lro_sts_mss(sts_data
[0]);
1485 skb_shinfo(skb
)->gso_size
= gso_size
;
1486 if (skb
->protocol
== htons(ETH_P_IPV6
))
1487 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
1489 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
1493 __vlan_hwaccel_put_tag(skb
, vid
);
1495 netif_receive_skb(skb
);
1497 adapter
->stats
.lro_pkts
++;
1498 adapter
->stats
.lrobytes
+= length
;
1502 static int qlcnic_83xx_process_rcv_ring(struct qlcnic_host_sds_ring
*sds_ring
,
1505 struct qlcnic_host_rds_ring
*rds_ring
;
1506 struct qlcnic_adapter
*adapter
= sds_ring
->adapter
;
1507 struct list_head
*cur
;
1508 struct status_desc
*desc
;
1509 struct qlcnic_rx_buffer
*rxbuf
= NULL
;
1512 int count
= 0, opcode
;
1513 u32 consumer
= sds_ring
->consumer
;
1515 while (count
< max
) {
1516 desc
= &sds_ring
->desc_head
[consumer
];
1517 sts_data
[1] = le64_to_cpu(desc
->status_desc_data
[1]);
1518 opcode
= qlcnic_83xx_opcode(sts_data
[1]);
1521 sts_data
[0] = le64_to_cpu(desc
->status_desc_data
[0]);
1522 ring
= QLCNIC_FETCH_RING_ID(sts_data
[0]);
1525 case QLC_83XX_REG_DESC
:
1526 rxbuf
= qlcnic_83xx_process_rcv(adapter
, sds_ring
,
1529 case QLC_83XX_LRO_DESC
:
1530 rxbuf
= qlcnic_83xx_process_lro(adapter
, ring
,
1534 dev_info(&adapter
->pdev
->dev
,
1535 "Unkonwn opcode: 0x%x\n", opcode
);
1540 list_add_tail(&rxbuf
->list
, &sds_ring
->free_list
[ring
]);
1542 adapter
->stats
.null_rxbuf
++;
1544 desc
= &sds_ring
->desc_head
[consumer
];
1545 /* Reset the descriptor */
1546 desc
->status_desc_data
[1] = 0;
1547 consumer
= get_next_index(consumer
, sds_ring
->num_desc
);
1550 for (ring
= 0; ring
< adapter
->max_rds_rings
; ring
++) {
1551 rds_ring
= &adapter
->recv_ctx
->rds_rings
[ring
];
1552 if (!list_empty(&sds_ring
->free_list
[ring
])) {
1553 list_for_each(cur
, &sds_ring
->free_list
[ring
]) {
1554 rxbuf
= list_entry(cur
, struct qlcnic_rx_buffer
,
1556 qlcnic_alloc_rx_skb(adapter
, rds_ring
, rxbuf
);
1558 spin_lock(&rds_ring
->lock
);
1559 list_splice_tail_init(&sds_ring
->free_list
[ring
],
1560 &rds_ring
->free_list
);
1561 spin_unlock(&rds_ring
->lock
);
1563 qlcnic_post_rx_buffers_nodb(adapter
, rds_ring
, ring
);
1566 sds_ring
->consumer
= consumer
;
1567 writel(consumer
, sds_ring
->crb_sts_consumer
);
1572 static int qlcnic_83xx_poll(struct napi_struct
*napi
, int budget
)
1576 struct qlcnic_host_sds_ring
*sds_ring
;
1577 struct qlcnic_adapter
*adapter
;
1578 struct qlcnic_host_tx_ring
*tx_ring
;
1580 sds_ring
= container_of(napi
, struct qlcnic_host_sds_ring
, napi
);
1581 adapter
= sds_ring
->adapter
;
1582 /* tx ring count = 1 */
1583 tx_ring
= adapter
->tx_ring
;
1585 tx_complete
= qlcnic_process_cmd_ring(adapter
, tx_ring
, budget
);
1586 work_done
= qlcnic_83xx_process_rcv_ring(sds_ring
, budget
);
1587 if ((work_done
< budget
) && tx_complete
) {
1588 napi_complete(&sds_ring
->napi
);
1589 qlcnic_83xx_enable_intr(adapter
, sds_ring
);
1595 static int qlcnic_83xx_msix_tx_poll(struct napi_struct
*napi
, int budget
)
1598 struct qlcnic_host_tx_ring
*tx_ring
;
1599 struct qlcnic_adapter
*adapter
;
1601 budget
= QLCNIC_TX_POLL_BUDGET
;
1602 tx_ring
= container_of(napi
, struct qlcnic_host_tx_ring
, napi
);
1603 adapter
= tx_ring
->adapter
;
1604 work_done
= qlcnic_process_cmd_ring(adapter
, tx_ring
, budget
);
1606 napi_complete(&tx_ring
->napi
);
1607 if (test_bit(__QLCNIC_DEV_UP
, &adapter
->state
))
1608 qlcnic_83xx_enable_tx_intr(adapter
, tx_ring
);
1614 static int qlcnic_83xx_rx_poll(struct napi_struct
*napi
, int budget
)
1617 struct qlcnic_host_sds_ring
*sds_ring
;
1618 struct qlcnic_adapter
*adapter
;
1620 sds_ring
= container_of(napi
, struct qlcnic_host_sds_ring
, napi
);
1621 adapter
= sds_ring
->adapter
;
1622 work_done
= qlcnic_83xx_process_rcv_ring(sds_ring
, budget
);
1623 if (work_done
< budget
) {
1624 napi_complete(&sds_ring
->napi
);
1625 if (test_bit(__QLCNIC_DEV_UP
, &adapter
->state
))
1626 qlcnic_83xx_enable_intr(adapter
, sds_ring
);
1632 void qlcnic_83xx_napi_enable(struct qlcnic_adapter
*adapter
)
1635 struct qlcnic_host_sds_ring
*sds_ring
;
1636 struct qlcnic_host_tx_ring
*tx_ring
;
1637 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1639 if (adapter
->is_up
!= QLCNIC_ADAPTER_UP_MAGIC
)
1642 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
1643 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1644 napi_enable(&sds_ring
->napi
);
1645 if (adapter
->flags
& QLCNIC_MSIX_ENABLED
)
1646 qlcnic_83xx_enable_intr(adapter
, sds_ring
);
1649 if (adapter
->flags
& QLCNIC_MSIX_ENABLED
) {
1650 for (ring
= 0; ring
< adapter
->max_drv_tx_rings
; ring
++) {
1651 tx_ring
= &adapter
->tx_ring
[ring
];
1652 napi_enable(&tx_ring
->napi
);
1653 qlcnic_83xx_enable_tx_intr(adapter
, tx_ring
);
1658 void qlcnic_83xx_napi_disable(struct qlcnic_adapter
*adapter
)
1661 struct qlcnic_host_sds_ring
*sds_ring
;
1662 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1663 struct qlcnic_host_tx_ring
*tx_ring
;
1665 if (adapter
->is_up
!= QLCNIC_ADAPTER_UP_MAGIC
)
1668 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
1669 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1670 if (adapter
->flags
& QLCNIC_MSIX_ENABLED
)
1671 qlcnic_83xx_disable_intr(adapter
, sds_ring
);
1672 napi_synchronize(&sds_ring
->napi
);
1673 napi_disable(&sds_ring
->napi
);
1676 if (adapter
->flags
& QLCNIC_MSIX_ENABLED
) {
1677 for (ring
= 0; ring
< adapter
->max_drv_tx_rings
; ring
++) {
1678 tx_ring
= &adapter
->tx_ring
[ring
];
1679 qlcnic_83xx_disable_tx_intr(adapter
, tx_ring
);
1680 napi_synchronize(&tx_ring
->napi
);
1681 napi_disable(&tx_ring
->napi
);
1686 int qlcnic_83xx_napi_add(struct qlcnic_adapter
*adapter
,
1687 struct net_device
*netdev
)
1689 int ring
, max_sds_rings
;
1690 struct qlcnic_host_sds_ring
*sds_ring
;
1691 struct qlcnic_host_tx_ring
*tx_ring
;
1692 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1694 if (qlcnic_alloc_sds_rings(recv_ctx
, adapter
->max_sds_rings
))
1697 max_sds_rings
= adapter
->max_sds_rings
;
1698 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
1699 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1700 if (adapter
->flags
& QLCNIC_MSIX_ENABLED
)
1701 netif_napi_add(netdev
, &sds_ring
->napi
,
1702 qlcnic_83xx_rx_poll
,
1703 QLCNIC_NETDEV_WEIGHT
* 2);
1705 netif_napi_add(netdev
, &sds_ring
->napi
,
1707 QLCNIC_NETDEV_WEIGHT
/ max_sds_rings
);
1710 if (qlcnic_alloc_tx_rings(adapter
, netdev
)) {
1711 qlcnic_free_sds_rings(recv_ctx
);
1715 if (adapter
->flags
& QLCNIC_MSIX_ENABLED
) {
1716 for (ring
= 0; ring
< adapter
->max_drv_tx_rings
; ring
++) {
1717 tx_ring
= &adapter
->tx_ring
[ring
];
1718 netif_napi_add(netdev
, &tx_ring
->napi
,
1719 qlcnic_83xx_msix_tx_poll
,
1720 QLCNIC_NETDEV_WEIGHT
);
1727 void qlcnic_83xx_napi_del(struct qlcnic_adapter
*adapter
)
1730 struct qlcnic_host_sds_ring
*sds_ring
;
1731 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1732 struct qlcnic_host_tx_ring
*tx_ring
;
1734 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
1735 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1736 netif_napi_del(&sds_ring
->napi
);
1739 qlcnic_free_sds_rings(adapter
->recv_ctx
);
1741 if ((adapter
->flags
& QLCNIC_MSIX_ENABLED
)) {
1742 for (ring
= 0; ring
< adapter
->max_drv_tx_rings
; ring
++) {
1743 tx_ring
= &adapter
->tx_ring
[ring
];
1744 netif_napi_del(&tx_ring
->napi
);
1748 qlcnic_free_tx_rings(adapter
);
1751 void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter
*adapter
,
1752 int ring
, u64 sts_data
[])
1754 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1755 struct sk_buff
*skb
;
1756 struct qlcnic_host_rds_ring
*rds_ring
;
1759 if (unlikely(ring
>= adapter
->max_rds_rings
))
1762 rds_ring
= &recv_ctx
->rds_rings
[ring
];
1763 index
= qlcnic_83xx_hndl(sts_data
[0]);
1764 if (unlikely(index
>= rds_ring
->num_desc
))
1767 length
= qlcnic_83xx_pktln(sts_data
[0]);
1769 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, STATUS_CKSUM_OK
);
1773 if (length
> rds_ring
->skb_size
)
1774 skb_put(skb
, rds_ring
->skb_size
);
1776 skb_put(skb
, length
);
1778 if (!qlcnic_check_loopback_buff(skb
->data
, adapter
->mac_addr
))
1779 adapter
->ahw
->diag_cnt
++;
1781 dump_skb(skb
, adapter
);
1783 dev_kfree_skb_any(skb
);
1787 void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring
*sds_ring
)
1789 struct qlcnic_adapter
*adapter
= sds_ring
->adapter
;
1790 struct status_desc
*desc
;
1793 u32 consumer
= sds_ring
->consumer
;
1795 desc
= &sds_ring
->desc_head
[consumer
];
1796 sts_data
[0] = le64_to_cpu(desc
->status_desc_data
[0]);
1797 sts_data
[1] = le64_to_cpu(desc
->status_desc_data
[1]);
1798 opcode
= qlcnic_83xx_opcode(sts_data
[1]);
1802 ring
= QLCNIC_FETCH_RING_ID(qlcnic_83xx_hndl(sts_data
[0]));
1803 qlcnic_83xx_process_rcv_diag(adapter
, ring
, sts_data
);
1804 desc
= &sds_ring
->desc_head
[consumer
];
1805 desc
->status_desc_data
[0] = cpu_to_le64(STATUS_OWNER_PHANTOM
);
1806 consumer
= get_next_index(consumer
, sds_ring
->num_desc
);
1807 sds_ring
->consumer
= consumer
;
1808 writel(consumer
, sds_ring
->crb_sts_consumer
);