qlcnic: fix estimation of receive MSS in case of LRO for 83xx adapter
[deliverable/linux.git] / drivers / net / ethernet / qlogic / qlcnic / qlcnic_io.c
1 /*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2013 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
8 #include <linux/netdevice.h>
9 #include <linux/if_vlan.h>
10 #include <net/ip.h>
11 #include <linux/ipv6.h>
12
13 #include "qlcnic.h"
14
15 #define TX_ETHER_PKT 0x01
16 #define TX_TCP_PKT 0x02
17 #define TX_UDP_PKT 0x03
18 #define TX_IP_PKT 0x04
19 #define TX_TCP_LSO 0x05
20 #define TX_TCP_LSO6 0x06
21 #define TX_TCPV6_PKT 0x0b
22 #define TX_UDPV6_PKT 0x0c
23 #define FLAGS_VLAN_TAGGED 0x10
24 #define FLAGS_VLAN_OOB 0x40
25
26 #define qlcnic_set_tx_vlan_tci(cmd_desc, v) \
27 (cmd_desc)->vlan_TCI = cpu_to_le16(v);
28 #define qlcnic_set_cmd_desc_port(cmd_desc, var) \
29 ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
30 #define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \
31 ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
32
33 #define qlcnic_set_tx_port(_desc, _port) \
34 ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
35
36 #define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
37 ((_desc)->flags_opcode |= \
38 cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
39
40 #define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
41 ((_desc)->nfrags__length = \
42 cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
43
44 /* owner bits of status_desc */
45 #define STATUS_OWNER_HOST (0x1ULL << 56)
46 #define STATUS_OWNER_PHANTOM (0x2ULL << 56)
47
48 /* Status descriptor:
49 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
50 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
51 53-55 desc_cnt, 56-57 owner, 58-63 opcode
52 */
53 #define qlcnic_get_sts_port(sts_data) \
54 ((sts_data) & 0x0F)
55 #define qlcnic_get_sts_status(sts_data) \
56 (((sts_data) >> 4) & 0x0F)
57 #define qlcnic_get_sts_type(sts_data) \
58 (((sts_data) >> 8) & 0x0F)
59 #define qlcnic_get_sts_totallength(sts_data) \
60 (((sts_data) >> 12) & 0xFFFF)
61 #define qlcnic_get_sts_refhandle(sts_data) \
62 (((sts_data) >> 28) & 0xFFFF)
63 #define qlcnic_get_sts_prot(sts_data) \
64 (((sts_data) >> 44) & 0x0F)
65 #define qlcnic_get_sts_pkt_offset(sts_data) \
66 (((sts_data) >> 48) & 0x1F)
67 #define qlcnic_get_sts_desc_cnt(sts_data) \
68 (((sts_data) >> 53) & 0x7)
69 #define qlcnic_get_sts_opcode(sts_data) \
70 (((sts_data) >> 58) & 0x03F)
71
72 #define qlcnic_get_lro_sts_refhandle(sts_data) \
73 ((sts_data) & 0x07FFF)
74 #define qlcnic_get_lro_sts_length(sts_data) \
75 (((sts_data) >> 16) & 0x0FFFF)
76 #define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \
77 (((sts_data) >> 32) & 0x0FF)
78 #define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \
79 (((sts_data) >> 40) & 0x0FF)
80 #define qlcnic_get_lro_sts_timestamp(sts_data) \
81 (((sts_data) >> 48) & 0x1)
82 #define qlcnic_get_lro_sts_type(sts_data) \
83 (((sts_data) >> 49) & 0x7)
84 #define qlcnic_get_lro_sts_push_flag(sts_data) \
85 (((sts_data) >> 52) & 0x1)
86 #define qlcnic_get_lro_sts_seq_number(sts_data) \
87 ((sts_data) & 0x0FFFFFFFF)
88 #define qlcnic_get_lro_sts_mss(sts_data1) \
89 ((sts_data1 >> 32) & 0x0FFFF)
90
91 #define qlcnic_83xx_get_lro_sts_mss(sts) ((sts) & 0xffff)
92
93 /* opcode field in status_desc */
94 #define QLCNIC_SYN_OFFLOAD 0x03
95 #define QLCNIC_RXPKT_DESC 0x04
96 #define QLCNIC_OLD_RXPKT_DESC 0x3f
97 #define QLCNIC_RESPONSE_DESC 0x05
98 #define QLCNIC_LRO_DESC 0x12
99
100 #define QLCNIC_TX_POLL_BUDGET 128
101 #define QLCNIC_TCP_HDR_SIZE 20
102 #define QLCNIC_TCP_TS_OPTION_SIZE 12
103 #define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63)
104 #define QLCNIC_DESC_OWNER_FW cpu_to_le64(STATUS_OWNER_PHANTOM)
105
106 #define QLCNIC_TCP_TS_HDR_SIZE (QLCNIC_TCP_HDR_SIZE + QLCNIC_TCP_TS_OPTION_SIZE)
107
108 /* for status field in status_desc */
109 #define STATUS_CKSUM_LOOP 0
110 #define STATUS_CKSUM_OK 2
111
112 #define qlcnic_83xx_pktln(sts) ((sts >> 32) & 0x3FFF)
113 #define qlcnic_83xx_hndl(sts) ((sts >> 48) & 0x7FFF)
114 #define qlcnic_83xx_csum_status(sts) ((sts >> 39) & 7)
115 #define qlcnic_83xx_opcode(sts) ((sts >> 42) & 0xF)
116 #define qlcnic_83xx_vlan_tag(sts) (((sts) >> 48) & 0xFFFF)
117 #define qlcnic_83xx_lro_pktln(sts) (((sts) >> 32) & 0x3FFF)
118 #define qlcnic_83xx_l2_hdr_off(sts) (((sts) >> 16) & 0xFF)
119 #define qlcnic_83xx_l4_hdr_off(sts) (((sts) >> 24) & 0xFF)
120 #define qlcnic_83xx_pkt_cnt(sts) (((sts) >> 16) & 0x7)
121 #define qlcnic_83xx_is_tstamp(sts) (((sts) >> 40) & 1)
122 #define qlcnic_83xx_is_psh_bit(sts) (((sts) >> 41) & 1)
123 #define qlcnic_83xx_is_ip_align(sts) (((sts) >> 46) & 1)
124 #define qlcnic_83xx_has_vlan_tag(sts) (((sts) >> 47) & 1)
125
126 struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
127 struct qlcnic_host_rds_ring *, u16, u16);
128
129 inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
130 struct qlcnic_host_tx_ring *tx_ring)
131 {
132 writel(0, tx_ring->crb_intr_mask);
133 }
134
135 inline void qlcnic_83xx_disable_tx_intr(struct qlcnic_adapter *adapter,
136 struct qlcnic_host_tx_ring *tx_ring)
137 {
138 writel(1, tx_ring->crb_intr_mask);
139 }
140
141 static inline u8 qlcnic_mac_hash(u64 mac)
142 {
143 return (u8)((mac & 0xff) ^ ((mac >> 40) & 0xff));
144 }
145
146 static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter,
147 u16 handle, u8 ring_id)
148 {
149 if (adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE834X)
150 return handle | (ring_id << 15);
151 else
152 return handle;
153 }
154
155 void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
156 __le16 vlan_id)
157 {
158 struct cmd_desc_type0 *hwdesc;
159 struct qlcnic_nic_req *req;
160 struct qlcnic_mac_req *mac_req;
161 struct qlcnic_vlan_req *vlan_req;
162 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
163 u32 producer;
164 u64 word;
165
166 producer = tx_ring->producer;
167 hwdesc = &tx_ring->desc_head[tx_ring->producer];
168
169 req = (struct qlcnic_nic_req *)hwdesc;
170 memset(req, 0, sizeof(struct qlcnic_nic_req));
171 req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
172
173 word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
174 req->req_hdr = cpu_to_le64(word);
175
176 mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
177 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
178 memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
179
180 vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
181 vlan_req->vlan_id = vlan_id;
182
183 tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
184 smp_mb();
185 }
186
187 static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
188 struct cmd_desc_type0 *first_desc,
189 struct sk_buff *skb)
190 {
191 struct qlcnic_filter *fil, *tmp_fil;
192 struct hlist_node *tmp_hnode, *n;
193 struct hlist_head *head;
194 struct net_device *netdev = adapter->netdev;
195 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
196 u64 src_addr = 0;
197 __le16 vlan_id = 0;
198 u8 hindex;
199
200 if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
201 return;
202
203 if (adapter->fhash.fnum >= adapter->fhash.fmax) {
204 adapter->stats.mac_filter_limit_overrun++;
205 netdev_info(netdev, "Can not add more than %d mac addresses\n",
206 adapter->fhash.fmax);
207 return;
208 }
209
210 /* Only NPAR capable devices support vlan based learning */
211 if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
212 vlan_id = first_desc->vlan_TCI;
213 memcpy(&src_addr, phdr->h_source, ETH_ALEN);
214 hindex = qlcnic_mac_hash(src_addr) & (adapter->fhash.fbucket_size - 1);
215 head = &(adapter->fhash.fhead[hindex]);
216
217 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
218 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
219 tmp_fil->vlan_id == vlan_id) {
220 if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
221 qlcnic_change_filter(adapter, &src_addr,
222 vlan_id);
223 tmp_fil->ftime = jiffies;
224 return;
225 }
226 }
227
228 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
229 if (!fil)
230 return;
231
232 qlcnic_change_filter(adapter, &src_addr, vlan_id);
233 fil->ftime = jiffies;
234 fil->vlan_id = vlan_id;
235 memcpy(fil->faddr, &src_addr, ETH_ALEN);
236 spin_lock(&adapter->mac_learn_lock);
237 hlist_add_head(&(fil->fnode), head);
238 adapter->fhash.fnum++;
239 spin_unlock(&adapter->mac_learn_lock);
240 }
241
242 static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
243 struct cmd_desc_type0 *first_desc, struct sk_buff *skb)
244 {
245 u8 l4proto, opcode = 0, hdr_len = 0;
246 u16 flags = 0, vlan_tci = 0;
247 int copied, offset, copy_len, size;
248 struct cmd_desc_type0 *hwdesc;
249 struct vlan_ethhdr *vh;
250 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
251 u16 protocol = ntohs(skb->protocol);
252 u32 producer = tx_ring->producer;
253
254 if (protocol == ETH_P_8021Q) {
255 vh = (struct vlan_ethhdr *)skb->data;
256 flags = FLAGS_VLAN_TAGGED;
257 vlan_tci = ntohs(vh->h_vlan_TCI);
258 protocol = ntohs(vh->h_vlan_encapsulated_proto);
259 } else if (vlan_tx_tag_present(skb)) {
260 flags = FLAGS_VLAN_OOB;
261 vlan_tci = vlan_tx_tag_get(skb);
262 }
263 if (unlikely(adapter->pvid)) {
264 if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
265 return -EIO;
266 if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
267 goto set_flags;
268
269 flags = FLAGS_VLAN_OOB;
270 vlan_tci = adapter->pvid;
271 }
272 set_flags:
273 qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
274 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
275
276 if (*(skb->data) & BIT_0) {
277 flags |= BIT_0;
278 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
279 }
280 opcode = TX_ETHER_PKT;
281 if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
282 skb_shinfo(skb)->gso_size > 0) {
283 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
284 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
285 first_desc->total_hdr_length = hdr_len;
286 opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
287
288 /* For LSO, we need to copy the MAC/IP/TCP headers into
289 * the descriptor ring */
290 copied = 0;
291 offset = 2;
292
293 if (flags & FLAGS_VLAN_OOB) {
294 first_desc->total_hdr_length += VLAN_HLEN;
295 first_desc->tcp_hdr_offset = VLAN_HLEN;
296 first_desc->ip_hdr_offset = VLAN_HLEN;
297
298 /* Only in case of TSO on vlan device */
299 flags |= FLAGS_VLAN_TAGGED;
300
301 /* Create a TSO vlan header template for firmware */
302 hwdesc = &tx_ring->desc_head[producer];
303 tx_ring->cmd_buf_arr[producer].skb = NULL;
304
305 copy_len = min((int)sizeof(struct cmd_desc_type0) -
306 offset, hdr_len + VLAN_HLEN);
307
308 vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
309 skb_copy_from_linear_data(skb, vh, 12);
310 vh->h_vlan_proto = htons(ETH_P_8021Q);
311 vh->h_vlan_TCI = htons(vlan_tci);
312
313 skb_copy_from_linear_data_offset(skb, 12,
314 (char *)vh + 16,
315 copy_len - 16);
316 copied = copy_len - VLAN_HLEN;
317 offset = 0;
318 producer = get_next_index(producer, tx_ring->num_desc);
319 }
320
321 while (copied < hdr_len) {
322 size = (int)sizeof(struct cmd_desc_type0) - offset;
323 copy_len = min(size, (hdr_len - copied));
324 hwdesc = &tx_ring->desc_head[producer];
325 tx_ring->cmd_buf_arr[producer].skb = NULL;
326 skb_copy_from_linear_data_offset(skb, copied,
327 (char *)hwdesc +
328 offset, copy_len);
329 copied += copy_len;
330 offset = 0;
331 producer = get_next_index(producer, tx_ring->num_desc);
332 }
333
334 tx_ring->producer = producer;
335 smp_mb();
336 adapter->stats.lso_frames++;
337
338 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
339 if (protocol == ETH_P_IP) {
340 l4proto = ip_hdr(skb)->protocol;
341
342 if (l4proto == IPPROTO_TCP)
343 opcode = TX_TCP_PKT;
344 else if (l4proto == IPPROTO_UDP)
345 opcode = TX_UDP_PKT;
346 } else if (protocol == ETH_P_IPV6) {
347 l4proto = ipv6_hdr(skb)->nexthdr;
348
349 if (l4proto == IPPROTO_TCP)
350 opcode = TX_TCPV6_PKT;
351 else if (l4proto == IPPROTO_UDP)
352 opcode = TX_UDPV6_PKT;
353 }
354 }
355 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
356 first_desc->ip_hdr_offset += skb_network_offset(skb);
357 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
358
359 return 0;
360 }
361
362 static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
363 struct qlcnic_cmd_buffer *pbuf)
364 {
365 struct qlcnic_skb_frag *nf;
366 struct skb_frag_struct *frag;
367 int i, nr_frags;
368 dma_addr_t map;
369
370 nr_frags = skb_shinfo(skb)->nr_frags;
371 nf = &pbuf->frag_array[0];
372
373 map = pci_map_single(pdev, skb->data, skb_headlen(skb),
374 PCI_DMA_TODEVICE);
375 if (pci_dma_mapping_error(pdev, map))
376 goto out_err;
377
378 nf->dma = map;
379 nf->length = skb_headlen(skb);
380
381 for (i = 0; i < nr_frags; i++) {
382 frag = &skb_shinfo(skb)->frags[i];
383 nf = &pbuf->frag_array[i+1];
384 map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
385 DMA_TO_DEVICE);
386 if (dma_mapping_error(&pdev->dev, map))
387 goto unwind;
388
389 nf->dma = map;
390 nf->length = skb_frag_size(frag);
391 }
392
393 return 0;
394
395 unwind:
396 while (--i >= 0) {
397 nf = &pbuf->frag_array[i+1];
398 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
399 }
400
401 nf = &pbuf->frag_array[0];
402 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
403
404 out_err:
405 return -ENOMEM;
406 }
407
408 static void qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
409 struct qlcnic_cmd_buffer *pbuf)
410 {
411 struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
412 int i, nr_frags = skb_shinfo(skb)->nr_frags;
413
414 for (i = 0; i < nr_frags; i++) {
415 nf = &pbuf->frag_array[i+1];
416 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
417 }
418
419 nf = &pbuf->frag_array[0];
420 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
421 pbuf->skb = NULL;
422 }
423
424 static inline void qlcnic_clear_cmddesc(u64 *desc)
425 {
426 desc[0] = 0ULL;
427 desc[2] = 0ULL;
428 desc[7] = 0ULL;
429 }
430
431 netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
432 {
433 struct qlcnic_adapter *adapter = netdev_priv(netdev);
434 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
435 struct qlcnic_cmd_buffer *pbuf;
436 struct qlcnic_skb_frag *buffrag;
437 struct cmd_desc_type0 *hwdesc, *first_desc;
438 struct pci_dev *pdev;
439 struct ethhdr *phdr;
440 int i, k, frag_count, delta = 0;
441 u32 producer, num_txd;
442
443 num_txd = tx_ring->num_desc;
444
445 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
446 netif_stop_queue(netdev);
447 return NETDEV_TX_BUSY;
448 }
449
450 if (adapter->flags & QLCNIC_MACSPOOF) {
451 phdr = (struct ethhdr *)skb->data;
452 if (!ether_addr_equal(phdr->h_source, adapter->mac_addr))
453 goto drop_packet;
454 }
455
456 frag_count = skb_shinfo(skb)->nr_frags + 1;
457 /* 14 frags supported for normal packet and
458 * 32 frags supported for TSO packet
459 */
460 if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
461 for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
462 delta += skb_frag_size(&skb_shinfo(skb)->frags[i]);
463
464 if (!__pskb_pull_tail(skb, delta))
465 goto drop_packet;
466
467 frag_count = 1 + skb_shinfo(skb)->nr_frags;
468 }
469
470 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
471 netif_stop_queue(netdev);
472 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
473 netif_start_queue(netdev);
474 } else {
475 adapter->stats.xmit_off++;
476 return NETDEV_TX_BUSY;
477 }
478 }
479
480 producer = tx_ring->producer;
481 pbuf = &tx_ring->cmd_buf_arr[producer];
482 pdev = adapter->pdev;
483 first_desc = &tx_ring->desc_head[producer];
484 hwdesc = &tx_ring->desc_head[producer];
485 qlcnic_clear_cmddesc((u64 *)hwdesc);
486
487 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
488 adapter->stats.tx_dma_map_error++;
489 goto drop_packet;
490 }
491
492 pbuf->skb = skb;
493 pbuf->frag_count = frag_count;
494
495 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
496 qlcnic_set_tx_port(first_desc, adapter->portnum);
497
498 for (i = 0; i < frag_count; i++) {
499 k = i % 4;
500
501 if ((k == 0) && (i > 0)) {
502 /* move to next desc.*/
503 producer = get_next_index(producer, num_txd);
504 hwdesc = &tx_ring->desc_head[producer];
505 qlcnic_clear_cmddesc((u64 *)hwdesc);
506 tx_ring->cmd_buf_arr[producer].skb = NULL;
507 }
508
509 buffrag = &pbuf->frag_array[i];
510 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
511 switch (k) {
512 case 0:
513 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
514 break;
515 case 1:
516 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
517 break;
518 case 2:
519 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
520 break;
521 case 3:
522 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
523 break;
524 }
525 }
526
527 tx_ring->producer = get_next_index(producer, num_txd);
528 smp_mb();
529
530 if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
531 goto unwind_buff;
532
533 if (adapter->drv_mac_learn)
534 qlcnic_send_filter(adapter, first_desc, skb);
535
536 adapter->stats.txbytes += skb->len;
537 adapter->stats.xmitcalled++;
538
539 qlcnic_update_cmd_producer(tx_ring);
540
541 return NETDEV_TX_OK;
542
543 unwind_buff:
544 qlcnic_unmap_buffers(pdev, skb, pbuf);
545 drop_packet:
546 adapter->stats.txdropped++;
547 dev_kfree_skb_any(skb);
548 return NETDEV_TX_OK;
549 }
550
551 void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
552 {
553 struct net_device *netdev = adapter->netdev;
554
555 if (adapter->ahw->linkup && !linkup) {
556 netdev_info(netdev, "NIC Link is down\n");
557 adapter->ahw->linkup = 0;
558 if (netif_running(netdev)) {
559 netif_carrier_off(netdev);
560 netif_stop_queue(netdev);
561 }
562 } else if (!adapter->ahw->linkup && linkup) {
563 netdev_info(netdev, "NIC Link is up\n");
564 adapter->ahw->linkup = 1;
565 if (netif_running(netdev)) {
566 netif_carrier_on(netdev);
567 netif_wake_queue(netdev);
568 }
569 }
570 }
571
572 static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
573 struct qlcnic_host_rds_ring *rds_ring,
574 struct qlcnic_rx_buffer *buffer)
575 {
576 struct sk_buff *skb;
577 dma_addr_t dma;
578 struct pci_dev *pdev = adapter->pdev;
579
580 skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
581 if (!skb) {
582 adapter->stats.skb_alloc_failure++;
583 return -ENOMEM;
584 }
585
586 skb_reserve(skb, NET_IP_ALIGN);
587 dma = pci_map_single(pdev, skb->data,
588 rds_ring->dma_size, PCI_DMA_FROMDEVICE);
589
590 if (pci_dma_mapping_error(pdev, dma)) {
591 adapter->stats.rx_dma_map_error++;
592 dev_kfree_skb_any(skb);
593 return -ENOMEM;
594 }
595
596 buffer->skb = skb;
597 buffer->dma = dma;
598
599 return 0;
600 }
601
602 static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
603 struct qlcnic_host_rds_ring *rds_ring,
604 u8 ring_id)
605 {
606 struct rcv_desc *pdesc;
607 struct qlcnic_rx_buffer *buffer;
608 int count = 0;
609 uint32_t producer, handle;
610 struct list_head *head;
611
612 if (!spin_trylock(&rds_ring->lock))
613 return;
614
615 producer = rds_ring->producer;
616 head = &rds_ring->free_list;
617 while (!list_empty(head)) {
618 buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
619
620 if (!buffer->skb) {
621 if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
622 break;
623 }
624 count++;
625 list_del(&buffer->list);
626
627 /* make a rcv descriptor */
628 pdesc = &rds_ring->desc_head[producer];
629 handle = qlcnic_get_ref_handle(adapter,
630 buffer->ref_handle, ring_id);
631 pdesc->reference_handle = cpu_to_le16(handle);
632 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
633 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
634 producer = get_next_index(producer, rds_ring->num_desc);
635 }
636 if (count) {
637 rds_ring->producer = producer;
638 writel((producer - 1) & (rds_ring->num_desc - 1),
639 rds_ring->crb_rcv_producer);
640 }
641 spin_unlock(&rds_ring->lock);
642 }
643
644 static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
645 struct qlcnic_host_tx_ring *tx_ring,
646 int budget)
647 {
648 u32 sw_consumer, hw_consumer;
649 int i, done, count = 0;
650 struct qlcnic_cmd_buffer *buffer;
651 struct pci_dev *pdev = adapter->pdev;
652 struct net_device *netdev = adapter->netdev;
653 struct qlcnic_skb_frag *frag;
654
655 if (!spin_trylock(&adapter->tx_clean_lock))
656 return 1;
657
658 sw_consumer = tx_ring->sw_consumer;
659 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
660
661 while (sw_consumer != hw_consumer) {
662 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
663 if (buffer->skb) {
664 frag = &buffer->frag_array[0];
665 pci_unmap_single(pdev, frag->dma, frag->length,
666 PCI_DMA_TODEVICE);
667 frag->dma = 0ULL;
668 for (i = 1; i < buffer->frag_count; i++) {
669 frag++;
670 pci_unmap_page(pdev, frag->dma, frag->length,
671 PCI_DMA_TODEVICE);
672 frag->dma = 0ULL;
673 }
674 adapter->stats.xmitfinished++;
675 dev_kfree_skb_any(buffer->skb);
676 buffer->skb = NULL;
677 }
678
679 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
680 if (++count >= budget)
681 break;
682 }
683
684 if (count && netif_running(netdev)) {
685 tx_ring->sw_consumer = sw_consumer;
686 smp_mb();
687 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
688 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
689 netif_wake_queue(netdev);
690 adapter->stats.xmit_on++;
691 }
692 }
693 adapter->tx_timeo_cnt = 0;
694 }
695 /*
696 * If everything is freed up to consumer then check if the ring is full
697 * If the ring is full then check if more needs to be freed and
698 * schedule the call back again.
699 *
700 * This happens when there are 2 CPUs. One could be freeing and the
701 * other filling it. If the ring is full when we get out of here and
702 * the card has already interrupted the host then the host can miss the
703 * interrupt.
704 *
705 * There is still a possible race condition and the host could miss an
706 * interrupt. The card has to take care of this.
707 */
708 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
709 done = (sw_consumer == hw_consumer);
710 spin_unlock(&adapter->tx_clean_lock);
711
712 return done;
713 }
714
715 static int qlcnic_poll(struct napi_struct *napi, int budget)
716 {
717 int tx_complete, work_done;
718 struct qlcnic_host_sds_ring *sds_ring;
719 struct qlcnic_adapter *adapter;
720
721 sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
722 adapter = sds_ring->adapter;
723 tx_complete = qlcnic_process_cmd_ring(adapter, adapter->tx_ring,
724 budget);
725 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
726 if ((work_done < budget) && tx_complete) {
727 napi_complete(&sds_ring->napi);
728 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
729 qlcnic_enable_int(sds_ring);
730 }
731
732 return work_done;
733 }
734
735 static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
736 {
737 struct qlcnic_host_sds_ring *sds_ring;
738 struct qlcnic_adapter *adapter;
739 int work_done;
740
741 sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
742 adapter = sds_ring->adapter;
743
744 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
745
746 if (work_done < budget) {
747 napi_complete(&sds_ring->napi);
748 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
749 qlcnic_enable_int(sds_ring);
750 }
751
752 return work_done;
753 }
754
755 static void qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
756 struct qlcnic_fw_msg *msg)
757 {
758 u32 cable_OUI;
759 u16 cable_len, link_speed;
760 u8 link_status, module, duplex, autoneg, lb_status = 0;
761 struct net_device *netdev = adapter->netdev;
762
763 adapter->ahw->has_link_events = 1;
764
765 cable_OUI = msg->body[1] & 0xffffffff;
766 cable_len = (msg->body[1] >> 32) & 0xffff;
767 link_speed = (msg->body[1] >> 48) & 0xffff;
768
769 link_status = msg->body[2] & 0xff;
770 duplex = (msg->body[2] >> 16) & 0xff;
771 autoneg = (msg->body[2] >> 24) & 0xff;
772 lb_status = (msg->body[2] >> 32) & 0x3;
773
774 module = (msg->body[2] >> 8) & 0xff;
775 if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
776 dev_info(&netdev->dev,
777 "unsupported cable: OUI 0x%x, length %d\n",
778 cable_OUI, cable_len);
779 else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN)
780 dev_info(&netdev->dev, "unsupported cable length %d\n",
781 cable_len);
782
783 if (!link_status && (lb_status == QLCNIC_ILB_MODE ||
784 lb_status == QLCNIC_ELB_MODE))
785 adapter->ahw->loopback_state |= QLCNIC_LINKEVENT;
786
787 qlcnic_advert_link_change(adapter, link_status);
788
789 if (duplex == LINKEVENT_FULL_DUPLEX)
790 adapter->ahw->link_duplex = DUPLEX_FULL;
791 else
792 adapter->ahw->link_duplex = DUPLEX_HALF;
793
794 adapter->ahw->module_type = module;
795 adapter->ahw->link_autoneg = autoneg;
796
797 if (link_status) {
798 adapter->ahw->link_speed = link_speed;
799 } else {
800 adapter->ahw->link_speed = SPEED_UNKNOWN;
801 adapter->ahw->link_duplex = DUPLEX_UNKNOWN;
802 }
803 }
804
805 static void qlcnic_handle_fw_message(int desc_cnt, int index,
806 struct qlcnic_host_sds_ring *sds_ring)
807 {
808 struct qlcnic_fw_msg msg;
809 struct status_desc *desc;
810 struct qlcnic_adapter *adapter;
811 struct device *dev;
812 int i = 0, opcode, ret;
813
814 while (desc_cnt > 0 && i < 8) {
815 desc = &sds_ring->desc_head[index];
816 msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
817 msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
818
819 index = get_next_index(index, sds_ring->num_desc);
820 desc_cnt--;
821 }
822
823 adapter = sds_ring->adapter;
824 dev = &adapter->pdev->dev;
825 opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
826
827 switch (opcode) {
828 case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
829 qlcnic_handle_linkevent(adapter, &msg);
830 break;
831 case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK:
832 ret = (u32)(msg.body[1]);
833 switch (ret) {
834 case 0:
835 adapter->ahw->loopback_state |= QLCNIC_LB_RESPONSE;
836 break;
837 case 1:
838 dev_info(dev, "loopback already in progress\n");
839 adapter->ahw->diag_cnt = -QLCNIC_TEST_IN_PROGRESS;
840 break;
841 case 2:
842 dev_info(dev, "loopback cable is not connected\n");
843 adapter->ahw->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN;
844 break;
845 default:
846 dev_info(dev,
847 "loopback configure request failed, err %x\n",
848 ret);
849 adapter->ahw->diag_cnt = -QLCNIC_UNDEFINED_ERROR;
850 break;
851 }
852 break;
853 default:
854 break;
855 }
856 }
857
858 struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
859 struct qlcnic_host_rds_ring *ring,
860 u16 index, u16 cksum)
861 {
862 struct qlcnic_rx_buffer *buffer;
863 struct sk_buff *skb;
864
865 buffer = &ring->rx_buf_arr[index];
866 if (unlikely(buffer->skb == NULL)) {
867 WARN_ON(1);
868 return NULL;
869 }
870
871 pci_unmap_single(adapter->pdev, buffer->dma, ring->dma_size,
872 PCI_DMA_FROMDEVICE);
873
874 skb = buffer->skb;
875 if (likely((adapter->netdev->features & NETIF_F_RXCSUM) &&
876 (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) {
877 adapter->stats.csummed++;
878 skb->ip_summed = CHECKSUM_UNNECESSARY;
879 } else {
880 skb_checksum_none_assert(skb);
881 }
882
883
884 buffer->skb = NULL;
885
886 return skb;
887 }
888
889 static inline int qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter,
890 struct sk_buff *skb, u16 *vlan_tag)
891 {
892 struct ethhdr *eth_hdr;
893
894 if (!__vlan_get_tag(skb, vlan_tag)) {
895 eth_hdr = (struct ethhdr *)skb->data;
896 memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
897 skb_pull(skb, VLAN_HLEN);
898 }
899 if (!adapter->pvid)
900 return 0;
901
902 if (*vlan_tag == adapter->pvid) {
903 /* Outer vlan tag. Packet should follow non-vlan path */
904 *vlan_tag = 0xffff;
905 return 0;
906 }
907 if (adapter->flags & QLCNIC_TAGGING_ENABLED)
908 return 0;
909
910 return -EINVAL;
911 }
912
913 static struct qlcnic_rx_buffer *
914 qlcnic_process_rcv(struct qlcnic_adapter *adapter,
915 struct qlcnic_host_sds_ring *sds_ring, int ring,
916 u64 sts_data0)
917 {
918 struct net_device *netdev = adapter->netdev;
919 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
920 struct qlcnic_rx_buffer *buffer;
921 struct sk_buff *skb;
922 struct qlcnic_host_rds_ring *rds_ring;
923 int index, length, cksum, pkt_offset;
924 u16 vid = 0xffff;
925
926 if (unlikely(ring >= adapter->max_rds_rings))
927 return NULL;
928
929 rds_ring = &recv_ctx->rds_rings[ring];
930
931 index = qlcnic_get_sts_refhandle(sts_data0);
932 if (unlikely(index >= rds_ring->num_desc))
933 return NULL;
934
935 buffer = &rds_ring->rx_buf_arr[index];
936 length = qlcnic_get_sts_totallength(sts_data0);
937 cksum = qlcnic_get_sts_status(sts_data0);
938 pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
939
940 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
941 if (!skb)
942 return buffer;
943
944 if (length > rds_ring->skb_size)
945 skb_put(skb, rds_ring->skb_size);
946 else
947 skb_put(skb, length);
948
949 if (pkt_offset)
950 skb_pull(skb, pkt_offset);
951
952 if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
953 adapter->stats.rxdropped++;
954 dev_kfree_skb(skb);
955 return buffer;
956 }
957
958 skb->protocol = eth_type_trans(skb, netdev);
959
960 if (vid != 0xffff)
961 __vlan_hwaccel_put_tag(skb, vid);
962
963 napi_gro_receive(&sds_ring->napi, skb);
964
965 adapter->stats.rx_pkts++;
966 adapter->stats.rxbytes += length;
967
968 return buffer;
969 }
970
971 #define QLC_TCP_HDR_SIZE 20
972 #define QLC_TCP_TS_OPTION_SIZE 12
973 #define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
974
975 static struct qlcnic_rx_buffer *
976 qlcnic_process_lro(struct qlcnic_adapter *adapter,
977 int ring, u64 sts_data0, u64 sts_data1)
978 {
979 struct net_device *netdev = adapter->netdev;
980 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
981 struct qlcnic_rx_buffer *buffer;
982 struct sk_buff *skb;
983 struct qlcnic_host_rds_ring *rds_ring;
984 struct iphdr *iph;
985 struct ipv6hdr *ipv6h;
986 struct tcphdr *th;
987 bool push, timestamp;
988 int index, l2_hdr_offset, l4_hdr_offset;
989 u16 lro_length, length, data_offset, vid = 0xffff;
990 u32 seq_number;
991
992 if (unlikely(ring > adapter->max_rds_rings))
993 return NULL;
994
995 rds_ring = &recv_ctx->rds_rings[ring];
996
997 index = qlcnic_get_lro_sts_refhandle(sts_data0);
998 if (unlikely(index > rds_ring->num_desc))
999 return NULL;
1000
1001 buffer = &rds_ring->rx_buf_arr[index];
1002
1003 timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
1004 lro_length = qlcnic_get_lro_sts_length(sts_data0);
1005 l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
1006 l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
1007 push = qlcnic_get_lro_sts_push_flag(sts_data0);
1008 seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
1009
1010 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
1011 if (!skb)
1012 return buffer;
1013
1014 if (timestamp)
1015 data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
1016 else
1017 data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE;
1018
1019 skb_put(skb, lro_length + data_offset);
1020 skb_pull(skb, l2_hdr_offset);
1021
1022 if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1023 adapter->stats.rxdropped++;
1024 dev_kfree_skb(skb);
1025 return buffer;
1026 }
1027
1028 skb->protocol = eth_type_trans(skb, netdev);
1029
1030 if (ntohs(skb->protocol) == ETH_P_IPV6) {
1031 ipv6h = (struct ipv6hdr *)skb->data;
1032 th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
1033 length = (th->doff << 2) + lro_length;
1034 ipv6h->payload_len = htons(length);
1035 } else {
1036 iph = (struct iphdr *)skb->data;
1037 th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1038 length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
1039 iph->tot_len = htons(length);
1040 iph->check = 0;
1041 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
1042 }
1043
1044 th->psh = push;
1045 th->seq = htonl(seq_number);
1046 length = skb->len;
1047
1048 if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
1049 skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
1050 if (skb->protocol == htons(ETH_P_IPV6))
1051 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1052 else
1053 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1054 }
1055
1056 if (vid != 0xffff)
1057 __vlan_hwaccel_put_tag(skb, vid);
1058 netif_receive_skb(skb);
1059
1060 adapter->stats.lro_pkts++;
1061 adapter->stats.lrobytes += length;
1062
1063 return buffer;
1064 }
1065
1066 int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
1067 {
1068 struct qlcnic_host_rds_ring *rds_ring;
1069 struct qlcnic_adapter *adapter = sds_ring->adapter;
1070 struct list_head *cur;
1071 struct status_desc *desc;
1072 struct qlcnic_rx_buffer *rxbuf;
1073 int opcode, desc_cnt, count = 0;
1074 u64 sts_data0, sts_data1;
1075 u8 ring;
1076 u32 consumer = sds_ring->consumer;
1077
1078 while (count < max) {
1079 desc = &sds_ring->desc_head[consumer];
1080 sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1081
1082 if (!(sts_data0 & STATUS_OWNER_HOST))
1083 break;
1084
1085 desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1086 opcode = qlcnic_get_sts_opcode(sts_data0);
1087 switch (opcode) {
1088 case QLCNIC_RXPKT_DESC:
1089 case QLCNIC_OLD_RXPKT_DESC:
1090 case QLCNIC_SYN_OFFLOAD:
1091 ring = qlcnic_get_sts_type(sts_data0);
1092 rxbuf = qlcnic_process_rcv(adapter, sds_ring, ring,
1093 sts_data0);
1094 break;
1095 case QLCNIC_LRO_DESC:
1096 ring = qlcnic_get_lro_sts_type(sts_data0);
1097 sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
1098 rxbuf = qlcnic_process_lro(adapter, ring, sts_data0,
1099 sts_data1);
1100 break;
1101 case QLCNIC_RESPONSE_DESC:
1102 qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
1103 default:
1104 goto skip;
1105 }
1106 WARN_ON(desc_cnt > 1);
1107
1108 if (likely(rxbuf))
1109 list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
1110 else
1111 adapter->stats.null_rxbuf++;
1112 skip:
1113 for (; desc_cnt > 0; desc_cnt--) {
1114 desc = &sds_ring->desc_head[consumer];
1115 desc->status_desc_data[0] = QLCNIC_DESC_OWNER_FW;
1116 consumer = get_next_index(consumer, sds_ring->num_desc);
1117 }
1118 count++;
1119 }
1120
1121 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1122 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1123 if (!list_empty(&sds_ring->free_list[ring])) {
1124 list_for_each(cur, &sds_ring->free_list[ring]) {
1125 rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
1126 list);
1127 qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
1128 }
1129 spin_lock(&rds_ring->lock);
1130 list_splice_tail_init(&sds_ring->free_list[ring],
1131 &rds_ring->free_list);
1132 spin_unlock(&rds_ring->lock);
1133 }
1134
1135 qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring);
1136 }
1137
1138 if (count) {
1139 sds_ring->consumer = consumer;
1140 writel(consumer, sds_ring->crb_sts_consumer);
1141 }
1142
1143 return count;
1144 }
1145
1146 void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
1147 struct qlcnic_host_rds_ring *rds_ring, u8 ring_id)
1148 {
1149 struct rcv_desc *pdesc;
1150 struct qlcnic_rx_buffer *buffer;
1151 int count = 0;
1152 u32 producer, handle;
1153 struct list_head *head;
1154
1155 producer = rds_ring->producer;
1156 head = &rds_ring->free_list;
1157
1158 while (!list_empty(head)) {
1159
1160 buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
1161
1162 if (!buffer->skb) {
1163 if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
1164 break;
1165 }
1166
1167 count++;
1168 list_del(&buffer->list);
1169
1170 /* make a rcv descriptor */
1171 pdesc = &rds_ring->desc_head[producer];
1172 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1173 handle = qlcnic_get_ref_handle(adapter, buffer->ref_handle,
1174 ring_id);
1175 pdesc->reference_handle = cpu_to_le16(handle);
1176 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1177 producer = get_next_index(producer, rds_ring->num_desc);
1178 }
1179
1180 if (count) {
1181 rds_ring->producer = producer;
1182 writel((producer-1) & (rds_ring->num_desc-1),
1183 rds_ring->crb_rcv_producer);
1184 }
1185 }
1186
1187 static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter)
1188 {
1189 int i;
1190 unsigned char *data = skb->data;
1191
1192 pr_info(KERN_INFO "\n");
1193 for (i = 0; i < skb->len; i++) {
1194 QLCDB(adapter, DRV, "%02x ", data[i]);
1195 if ((i & 0x0f) == 8)
1196 pr_info(KERN_INFO "\n");
1197 }
1198 }
1199
1200 static void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, int ring,
1201 u64 sts_data0)
1202 {
1203 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1204 struct sk_buff *skb;
1205 struct qlcnic_host_rds_ring *rds_ring;
1206 int index, length, cksum, pkt_offset;
1207
1208 if (unlikely(ring >= adapter->max_rds_rings))
1209 return;
1210
1211 rds_ring = &recv_ctx->rds_rings[ring];
1212
1213 index = qlcnic_get_sts_refhandle(sts_data0);
1214 length = qlcnic_get_sts_totallength(sts_data0);
1215 if (unlikely(index >= rds_ring->num_desc))
1216 return;
1217
1218 cksum = qlcnic_get_sts_status(sts_data0);
1219 pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
1220
1221 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1222 if (!skb)
1223 return;
1224
1225 if (length > rds_ring->skb_size)
1226 skb_put(skb, rds_ring->skb_size);
1227 else
1228 skb_put(skb, length);
1229
1230 if (pkt_offset)
1231 skb_pull(skb, pkt_offset);
1232
1233 if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
1234 adapter->ahw->diag_cnt++;
1235 else
1236 dump_skb(skb, adapter);
1237
1238 dev_kfree_skb_any(skb);
1239 adapter->stats.rx_pkts++;
1240 adapter->stats.rxbytes += length;
1241
1242 return;
1243 }
1244
1245 void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
1246 {
1247 struct qlcnic_adapter *adapter = sds_ring->adapter;
1248 struct status_desc *desc;
1249 u64 sts_data0;
1250 int ring, opcode, desc_cnt;
1251
1252 u32 consumer = sds_ring->consumer;
1253
1254 desc = &sds_ring->desc_head[consumer];
1255 sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1256
1257 if (!(sts_data0 & STATUS_OWNER_HOST))
1258 return;
1259
1260 desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1261 opcode = qlcnic_get_sts_opcode(sts_data0);
1262 switch (opcode) {
1263 case QLCNIC_RESPONSE_DESC:
1264 qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
1265 break;
1266 default:
1267 ring = qlcnic_get_sts_type(sts_data0);
1268 qlcnic_process_rcv_diag(adapter, ring, sts_data0);
1269 break;
1270 }
1271
1272 for (; desc_cnt > 0; desc_cnt--) {
1273 desc = &sds_ring->desc_head[consumer];
1274 desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
1275 consumer = get_next_index(consumer, sds_ring->num_desc);
1276 }
1277
1278 sds_ring->consumer = consumer;
1279 writel(consumer, sds_ring->crb_sts_consumer);
1280 }
1281
1282 int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
1283 struct net_device *netdev)
1284 {
1285 int ring, max_sds_rings;
1286 struct qlcnic_host_sds_ring *sds_ring;
1287 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1288
1289 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
1290 return -ENOMEM;
1291
1292 max_sds_rings = adapter->max_sds_rings;
1293
1294 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1295 sds_ring = &recv_ctx->sds_rings[ring];
1296 if (ring == adapter->max_sds_rings - 1)
1297 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
1298 QLCNIC_NETDEV_WEIGHT / max_sds_rings);
1299 else
1300 netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
1301 QLCNIC_NETDEV_WEIGHT*2);
1302 }
1303
1304 if (qlcnic_alloc_tx_rings(adapter, netdev)) {
1305 qlcnic_free_sds_rings(recv_ctx);
1306 return -ENOMEM;
1307 }
1308
1309 return 0;
1310 }
1311
1312 void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter)
1313 {
1314 int ring;
1315 struct qlcnic_host_sds_ring *sds_ring;
1316 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1317
1318 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1319 sds_ring = &recv_ctx->sds_rings[ring];
1320 netif_napi_del(&sds_ring->napi);
1321 }
1322
1323 qlcnic_free_sds_rings(adapter->recv_ctx);
1324 qlcnic_free_tx_rings(adapter);
1325 }
1326
1327 void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
1328 {
1329 int ring;
1330 struct qlcnic_host_sds_ring *sds_ring;
1331 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1332
1333 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1334 return;
1335
1336 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1337 sds_ring = &recv_ctx->sds_rings[ring];
1338 napi_enable(&sds_ring->napi);
1339 qlcnic_enable_int(sds_ring);
1340 }
1341 }
1342
1343 void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
1344 {
1345 int ring;
1346 struct qlcnic_host_sds_ring *sds_ring;
1347 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1348
1349 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1350 return;
1351
1352 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1353 sds_ring = &recv_ctx->sds_rings[ring];
1354 qlcnic_disable_int(sds_ring);
1355 napi_synchronize(&sds_ring->napi);
1356 napi_disable(&sds_ring->napi);
1357 }
1358 }
1359
1360 static struct qlcnic_rx_buffer *
1361 qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
1362 struct qlcnic_host_sds_ring *sds_ring,
1363 u8 ring, u64 sts_data[])
1364 {
1365 struct net_device *netdev = adapter->netdev;
1366 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1367 struct qlcnic_rx_buffer *buffer;
1368 struct sk_buff *skb;
1369 struct qlcnic_host_rds_ring *rds_ring;
1370 int index, length, cksum;
1371 u16 vid = 0xffff;
1372
1373 if (unlikely(ring >= adapter->max_rds_rings))
1374 return NULL;
1375
1376 rds_ring = &recv_ctx->rds_rings[ring];
1377
1378 index = qlcnic_83xx_hndl(sts_data[0]);
1379 if (unlikely(index >= rds_ring->num_desc))
1380 return NULL;
1381
1382 buffer = &rds_ring->rx_buf_arr[index];
1383 length = qlcnic_83xx_pktln(sts_data[0]);
1384 cksum = qlcnic_83xx_csum_status(sts_data[1]);
1385 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1386 if (!skb)
1387 return buffer;
1388
1389 if (length > rds_ring->skb_size)
1390 skb_put(skb, rds_ring->skb_size);
1391 else
1392 skb_put(skb, length);
1393
1394 if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1395 adapter->stats.rxdropped++;
1396 dev_kfree_skb(skb);
1397 return buffer;
1398 }
1399
1400 skb->protocol = eth_type_trans(skb, netdev);
1401
1402 if (vid != 0xffff)
1403 __vlan_hwaccel_put_tag(skb, vid);
1404
1405 napi_gro_receive(&sds_ring->napi, skb);
1406
1407 adapter->stats.rx_pkts++;
1408 adapter->stats.rxbytes += length;
1409
1410 return buffer;
1411 }
1412
1413 static struct qlcnic_rx_buffer *
1414 qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
1415 u8 ring, u64 sts_data[])
1416 {
1417 struct net_device *netdev = adapter->netdev;
1418 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1419 struct qlcnic_rx_buffer *buffer;
1420 struct sk_buff *skb;
1421 struct qlcnic_host_rds_ring *rds_ring;
1422 struct iphdr *iph;
1423 struct ipv6hdr *ipv6h;
1424 struct tcphdr *th;
1425 bool push;
1426 int l2_hdr_offset, l4_hdr_offset;
1427 int index;
1428 u16 lro_length, length, data_offset, gso_size;
1429 u16 vid = 0xffff;
1430
1431 if (unlikely(ring > adapter->max_rds_rings))
1432 return NULL;
1433
1434 rds_ring = &recv_ctx->rds_rings[ring];
1435
1436 index = qlcnic_83xx_hndl(sts_data[0]);
1437 if (unlikely(index > rds_ring->num_desc))
1438 return NULL;
1439
1440 buffer = &rds_ring->rx_buf_arr[index];
1441
1442 lro_length = qlcnic_83xx_lro_pktln(sts_data[0]);
1443 l2_hdr_offset = qlcnic_83xx_l2_hdr_off(sts_data[1]);
1444 l4_hdr_offset = qlcnic_83xx_l4_hdr_off(sts_data[1]);
1445 push = qlcnic_83xx_is_psh_bit(sts_data[1]);
1446
1447 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
1448 if (!skb)
1449 return buffer;
1450 if (qlcnic_83xx_is_tstamp(sts_data[1]))
1451 data_offset = l4_hdr_offset + QLCNIC_TCP_TS_HDR_SIZE;
1452 else
1453 data_offset = l4_hdr_offset + QLCNIC_TCP_HDR_SIZE;
1454
1455 skb_put(skb, lro_length + data_offset);
1456 skb_pull(skb, l2_hdr_offset);
1457
1458 if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1459 adapter->stats.rxdropped++;
1460 dev_kfree_skb(skb);
1461 return buffer;
1462 }
1463
1464 skb->protocol = eth_type_trans(skb, netdev);
1465 if (ntohs(skb->protocol) == ETH_P_IPV6) {
1466 ipv6h = (struct ipv6hdr *)skb->data;
1467 th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
1468
1469 length = (th->doff << 2) + lro_length;
1470 ipv6h->payload_len = htons(length);
1471 } else {
1472 iph = (struct iphdr *)skb->data;
1473 th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1474 length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
1475 iph->tot_len = htons(length);
1476 iph->check = 0;
1477 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
1478 }
1479
1480 th->psh = push;
1481 length = skb->len;
1482
1483 if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
1484 gso_size = qlcnic_83xx_get_lro_sts_mss(sts_data[0]);
1485 skb_shinfo(skb)->gso_size = gso_size;
1486 if (skb->protocol == htons(ETH_P_IPV6))
1487 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1488 else
1489 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1490 }
1491
1492 if (vid != 0xffff)
1493 __vlan_hwaccel_put_tag(skb, vid);
1494
1495 netif_receive_skb(skb);
1496
1497 adapter->stats.lro_pkts++;
1498 adapter->stats.lrobytes += length;
1499 return buffer;
1500 }
1501
1502 static int qlcnic_83xx_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring,
1503 int max)
1504 {
1505 struct qlcnic_host_rds_ring *rds_ring;
1506 struct qlcnic_adapter *adapter = sds_ring->adapter;
1507 struct list_head *cur;
1508 struct status_desc *desc;
1509 struct qlcnic_rx_buffer *rxbuf = NULL;
1510 u8 ring;
1511 u64 sts_data[2];
1512 int count = 0, opcode;
1513 u32 consumer = sds_ring->consumer;
1514
1515 while (count < max) {
1516 desc = &sds_ring->desc_head[consumer];
1517 sts_data[1] = le64_to_cpu(desc->status_desc_data[1]);
1518 opcode = qlcnic_83xx_opcode(sts_data[1]);
1519 if (!opcode)
1520 break;
1521 sts_data[0] = le64_to_cpu(desc->status_desc_data[0]);
1522 ring = QLCNIC_FETCH_RING_ID(sts_data[0]);
1523
1524 switch (opcode) {
1525 case QLC_83XX_REG_DESC:
1526 rxbuf = qlcnic_83xx_process_rcv(adapter, sds_ring,
1527 ring, sts_data);
1528 break;
1529 case QLC_83XX_LRO_DESC:
1530 rxbuf = qlcnic_83xx_process_lro(adapter, ring,
1531 sts_data);
1532 break;
1533 default:
1534 dev_info(&adapter->pdev->dev,
1535 "Unkonwn opcode: 0x%x\n", opcode);
1536 goto skip;
1537 }
1538
1539 if (likely(rxbuf))
1540 list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
1541 else
1542 adapter->stats.null_rxbuf++;
1543 skip:
1544 desc = &sds_ring->desc_head[consumer];
1545 /* Reset the descriptor */
1546 desc->status_desc_data[1] = 0;
1547 consumer = get_next_index(consumer, sds_ring->num_desc);
1548 count++;
1549 }
1550 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1551 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1552 if (!list_empty(&sds_ring->free_list[ring])) {
1553 list_for_each(cur, &sds_ring->free_list[ring]) {
1554 rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
1555 list);
1556 qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
1557 }
1558 spin_lock(&rds_ring->lock);
1559 list_splice_tail_init(&sds_ring->free_list[ring],
1560 &rds_ring->free_list);
1561 spin_unlock(&rds_ring->lock);
1562 }
1563 qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring);
1564 }
1565 if (count) {
1566 sds_ring->consumer = consumer;
1567 writel(consumer, sds_ring->crb_sts_consumer);
1568 }
1569 return count;
1570 }
1571
1572 static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
1573 {
1574 int tx_complete;
1575 int work_done;
1576 struct qlcnic_host_sds_ring *sds_ring;
1577 struct qlcnic_adapter *adapter;
1578 struct qlcnic_host_tx_ring *tx_ring;
1579
1580 sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
1581 adapter = sds_ring->adapter;
1582 /* tx ring count = 1 */
1583 tx_ring = adapter->tx_ring;
1584
1585 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
1586 work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
1587 if ((work_done < budget) && tx_complete) {
1588 napi_complete(&sds_ring->napi);
1589 qlcnic_83xx_enable_intr(adapter, sds_ring);
1590 }
1591
1592 return work_done;
1593 }
1594
1595 static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
1596 {
1597 int work_done;
1598 struct qlcnic_host_tx_ring *tx_ring;
1599 struct qlcnic_adapter *adapter;
1600
1601 budget = QLCNIC_TX_POLL_BUDGET;
1602 tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
1603 adapter = tx_ring->adapter;
1604 work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
1605 if (work_done) {
1606 napi_complete(&tx_ring->napi);
1607 if (test_bit(__QLCNIC_DEV_UP , &adapter->state))
1608 qlcnic_83xx_enable_tx_intr(adapter, tx_ring);
1609 }
1610
1611 return work_done;
1612 }
1613
1614 static int qlcnic_83xx_rx_poll(struct napi_struct *napi, int budget)
1615 {
1616 int work_done;
1617 struct qlcnic_host_sds_ring *sds_ring;
1618 struct qlcnic_adapter *adapter;
1619
1620 sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
1621 adapter = sds_ring->adapter;
1622 work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
1623 if (work_done < budget) {
1624 napi_complete(&sds_ring->napi);
1625 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1626 qlcnic_83xx_enable_intr(adapter, sds_ring);
1627 }
1628
1629 return work_done;
1630 }
1631
1632 void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter)
1633 {
1634 int ring;
1635 struct qlcnic_host_sds_ring *sds_ring;
1636 struct qlcnic_host_tx_ring *tx_ring;
1637 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1638
1639 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1640 return;
1641
1642 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1643 sds_ring = &recv_ctx->sds_rings[ring];
1644 napi_enable(&sds_ring->napi);
1645 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1646 qlcnic_83xx_enable_intr(adapter, sds_ring);
1647 }
1648
1649 if (adapter->flags & QLCNIC_MSIX_ENABLED) {
1650 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1651 tx_ring = &adapter->tx_ring[ring];
1652 napi_enable(&tx_ring->napi);
1653 qlcnic_83xx_enable_tx_intr(adapter, tx_ring);
1654 }
1655 }
1656 }
1657
1658 void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
1659 {
1660 int ring;
1661 struct qlcnic_host_sds_ring *sds_ring;
1662 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1663 struct qlcnic_host_tx_ring *tx_ring;
1664
1665 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1666 return;
1667
1668 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1669 sds_ring = &recv_ctx->sds_rings[ring];
1670 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1671 qlcnic_83xx_disable_intr(adapter, sds_ring);
1672 napi_synchronize(&sds_ring->napi);
1673 napi_disable(&sds_ring->napi);
1674 }
1675
1676 if (adapter->flags & QLCNIC_MSIX_ENABLED) {
1677 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1678 tx_ring = &adapter->tx_ring[ring];
1679 qlcnic_83xx_disable_tx_intr(adapter, tx_ring);
1680 napi_synchronize(&tx_ring->napi);
1681 napi_disable(&tx_ring->napi);
1682 }
1683 }
1684 }
1685
1686 int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
1687 struct net_device *netdev)
1688 {
1689 int ring, max_sds_rings;
1690 struct qlcnic_host_sds_ring *sds_ring;
1691 struct qlcnic_host_tx_ring *tx_ring;
1692 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1693
1694 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
1695 return -ENOMEM;
1696
1697 max_sds_rings = adapter->max_sds_rings;
1698 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1699 sds_ring = &recv_ctx->sds_rings[ring];
1700 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1701 netif_napi_add(netdev, &sds_ring->napi,
1702 qlcnic_83xx_rx_poll,
1703 QLCNIC_NETDEV_WEIGHT * 2);
1704 else
1705 netif_napi_add(netdev, &sds_ring->napi,
1706 qlcnic_83xx_poll,
1707 QLCNIC_NETDEV_WEIGHT / max_sds_rings);
1708 }
1709
1710 if (qlcnic_alloc_tx_rings(adapter, netdev)) {
1711 qlcnic_free_sds_rings(recv_ctx);
1712 return -ENOMEM;
1713 }
1714
1715 if (adapter->flags & QLCNIC_MSIX_ENABLED) {
1716 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1717 tx_ring = &adapter->tx_ring[ring];
1718 netif_napi_add(netdev, &tx_ring->napi,
1719 qlcnic_83xx_msix_tx_poll,
1720 QLCNIC_NETDEV_WEIGHT);
1721 }
1722 }
1723
1724 return 0;
1725 }
1726
1727 void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter)
1728 {
1729 int ring;
1730 struct qlcnic_host_sds_ring *sds_ring;
1731 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1732 struct qlcnic_host_tx_ring *tx_ring;
1733
1734 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1735 sds_ring = &recv_ctx->sds_rings[ring];
1736 netif_napi_del(&sds_ring->napi);
1737 }
1738
1739 qlcnic_free_sds_rings(adapter->recv_ctx);
1740
1741 if ((adapter->flags & QLCNIC_MSIX_ENABLED)) {
1742 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1743 tx_ring = &adapter->tx_ring[ring];
1744 netif_napi_del(&tx_ring->napi);
1745 }
1746 }
1747
1748 qlcnic_free_tx_rings(adapter);
1749 }
1750
1751 void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *adapter,
1752 int ring, u64 sts_data[])
1753 {
1754 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1755 struct sk_buff *skb;
1756 struct qlcnic_host_rds_ring *rds_ring;
1757 int index, length;
1758
1759 if (unlikely(ring >= adapter->max_rds_rings))
1760 return;
1761
1762 rds_ring = &recv_ctx->rds_rings[ring];
1763 index = qlcnic_83xx_hndl(sts_data[0]);
1764 if (unlikely(index >= rds_ring->num_desc))
1765 return;
1766
1767 length = qlcnic_83xx_pktln(sts_data[0]);
1768
1769 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
1770 if (!skb)
1771 return;
1772
1773 if (length > rds_ring->skb_size)
1774 skb_put(skb, rds_ring->skb_size);
1775 else
1776 skb_put(skb, length);
1777
1778 if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
1779 adapter->ahw->diag_cnt++;
1780 else
1781 dump_skb(skb, adapter);
1782
1783 dev_kfree_skb_any(skb);
1784 return;
1785 }
1786
1787 void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
1788 {
1789 struct qlcnic_adapter *adapter = sds_ring->adapter;
1790 struct status_desc *desc;
1791 u64 sts_data[2];
1792 int ring, opcode;
1793 u32 consumer = sds_ring->consumer;
1794
1795 desc = &sds_ring->desc_head[consumer];
1796 sts_data[0] = le64_to_cpu(desc->status_desc_data[0]);
1797 sts_data[1] = le64_to_cpu(desc->status_desc_data[1]);
1798 opcode = qlcnic_83xx_opcode(sts_data[1]);
1799 if (!opcode)
1800 return;
1801
1802 ring = QLCNIC_FETCH_RING_ID(qlcnic_83xx_hndl(sts_data[0]));
1803 qlcnic_83xx_process_rcv_diag(adapter, ring, sts_data);
1804 desc = &sds_ring->desc_head[consumer];
1805 desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
1806 consumer = get_next_index(consumer, sds_ring->num_desc);
1807 sds_ring->consumer = consumer;
1808 writel(consumer, sds_ring->crb_sts_consumer);
1809 }
This page took 0.070701 seconds and 5 git commands to generate.