Merge branch 'acpi-lpss'
[deliverable/linux.git] / drivers / net / ethernet / qlogic / qlcnic / qlcnic_io.c
CommitLineData
577ae39d
JK
1/*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2013 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
c70001a9
SC
8#include <linux/netdevice.h>
9#include <linux/if_vlan.h>
10#include <net/ip.h>
11#include <linux/ipv6.h>
12
13#include "qlcnic.h"
14
c70001a9
SC
15#define TX_ETHER_PKT 0x01
16#define TX_TCP_PKT 0x02
17#define TX_UDP_PKT 0x03
18#define TX_IP_PKT 0x04
19#define TX_TCP_LSO 0x05
20#define TX_TCP_LSO6 0x06
21#define TX_TCPV6_PKT 0x0b
22#define TX_UDPV6_PKT 0x0c
23#define FLAGS_VLAN_TAGGED 0x10
24#define FLAGS_VLAN_OOB 0x40
25
26#define qlcnic_set_tx_vlan_tci(cmd_desc, v) \
27 (cmd_desc)->vlan_TCI = cpu_to_le16(v);
28#define qlcnic_set_cmd_desc_port(cmd_desc, var) \
29 ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
30#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \
31 ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
32
33#define qlcnic_set_tx_port(_desc, _port) \
34 ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
35
36#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
37 ((_desc)->flags_opcode |= \
38 cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
39
40#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
41 ((_desc)->nfrags__length = \
42 cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
43
44/* owner bits of status_desc */
45#define STATUS_OWNER_HOST (0x1ULL << 56)
46#define STATUS_OWNER_PHANTOM (0x2ULL << 56)
47
48/* Status descriptor:
49 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
50 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
51 53-55 desc_cnt, 56-57 owner, 58-63 opcode
52 */
53#define qlcnic_get_sts_port(sts_data) \
54 ((sts_data) & 0x0F)
55#define qlcnic_get_sts_status(sts_data) \
56 (((sts_data) >> 4) & 0x0F)
57#define qlcnic_get_sts_type(sts_data) \
58 (((sts_data) >> 8) & 0x0F)
59#define qlcnic_get_sts_totallength(sts_data) \
60 (((sts_data) >> 12) & 0xFFFF)
61#define qlcnic_get_sts_refhandle(sts_data) \
62 (((sts_data) >> 28) & 0xFFFF)
63#define qlcnic_get_sts_prot(sts_data) \
64 (((sts_data) >> 44) & 0x0F)
65#define qlcnic_get_sts_pkt_offset(sts_data) \
66 (((sts_data) >> 48) & 0x1F)
67#define qlcnic_get_sts_desc_cnt(sts_data) \
68 (((sts_data) >> 53) & 0x7)
69#define qlcnic_get_sts_opcode(sts_data) \
70 (((sts_data) >> 58) & 0x03F)
71
72#define qlcnic_get_lro_sts_refhandle(sts_data) \
5796bd04 73 ((sts_data) & 0x07FFF)
c70001a9
SC
74#define qlcnic_get_lro_sts_length(sts_data) \
75 (((sts_data) >> 16) & 0x0FFFF)
76#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \
77 (((sts_data) >> 32) & 0x0FF)
78#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \
79 (((sts_data) >> 40) & 0x0FF)
80#define qlcnic_get_lro_sts_timestamp(sts_data) \
81 (((sts_data) >> 48) & 0x1)
82#define qlcnic_get_lro_sts_type(sts_data) \
83 (((sts_data) >> 49) & 0x7)
84#define qlcnic_get_lro_sts_push_flag(sts_data) \
85 (((sts_data) >> 52) & 0x1)
86#define qlcnic_get_lro_sts_seq_number(sts_data) \
87 ((sts_data) & 0x0FFFFFFFF)
88#define qlcnic_get_lro_sts_mss(sts_data1) \
89 ((sts_data1 >> 32) & 0x0FFFF)
90
99e85879
SS
91#define qlcnic_83xx_get_lro_sts_mss(sts) ((sts) & 0xffff)
92
c70001a9
SC
93/* opcode field in status_desc */
94#define QLCNIC_SYN_OFFLOAD 0x03
95#define QLCNIC_RXPKT_DESC 0x04
96#define QLCNIC_OLD_RXPKT_DESC 0x3f
97#define QLCNIC_RESPONSE_DESC 0x05
98#define QLCNIC_LRO_DESC 0x12
99
4be41e92
SC
100#define QLCNIC_TX_POLL_BUDGET 128
101#define QLCNIC_TCP_HDR_SIZE 20
102#define QLCNIC_TCP_TS_OPTION_SIZE 12
103#define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63)
104#define QLCNIC_DESC_OWNER_FW cpu_to_le64(STATUS_OWNER_PHANTOM)
105
106#define QLCNIC_TCP_TS_HDR_SIZE (QLCNIC_TCP_HDR_SIZE + QLCNIC_TCP_TS_OPTION_SIZE)
107
c70001a9
SC
108/* for status field in status_desc */
109#define STATUS_CKSUM_LOOP 0
110#define STATUS_CKSUM_OK 2
d17dd0d9 111
4be41e92
SC
112#define qlcnic_83xx_pktln(sts) ((sts >> 32) & 0x3FFF)
113#define qlcnic_83xx_hndl(sts) ((sts >> 48) & 0x7FFF)
114#define qlcnic_83xx_csum_status(sts) ((sts >> 39) & 7)
115#define qlcnic_83xx_opcode(sts) ((sts >> 42) & 0xF)
116#define qlcnic_83xx_vlan_tag(sts) (((sts) >> 48) & 0xFFFF)
117#define qlcnic_83xx_lro_pktln(sts) (((sts) >> 32) & 0x3FFF)
118#define qlcnic_83xx_l2_hdr_off(sts) (((sts) >> 16) & 0xFF)
119#define qlcnic_83xx_l4_hdr_off(sts) (((sts) >> 24) & 0xFF)
120#define qlcnic_83xx_pkt_cnt(sts) (((sts) >> 16) & 0x7)
121#define qlcnic_83xx_is_tstamp(sts) (((sts) >> 40) & 1)
122#define qlcnic_83xx_is_psh_bit(sts) (((sts) >> 41) & 1)
123#define qlcnic_83xx_is_ip_align(sts) (((sts) >> 46) & 1)
124#define qlcnic_83xx_has_vlan_tag(sts) (((sts) >> 47) & 1)
125
126struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
127 struct qlcnic_host_rds_ring *, u16, u16);
128
129inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
130 struct qlcnic_host_tx_ring *tx_ring)
131{
132 writel(0, tx_ring->crb_intr_mask);
133}
134
135inline void qlcnic_83xx_disable_tx_intr(struct qlcnic_adapter *adapter,
136 struct qlcnic_host_tx_ring *tx_ring)
137{
138 writel(1, tx_ring->crb_intr_mask);
139}
140
141static inline u8 qlcnic_mac_hash(u64 mac)
142{
143 return (u8)((mac & 0xff) ^ ((mac >> 40) & 0xff));
144}
145
146static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter,
147 u16 handle, u8 ring_id)
148{
149 if (adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE834X)
150 return handle | (ring_id << 15);
151 else
152 return handle;
153}
154
53643a75
SS
155static inline int qlcnic_82xx_is_lb_pkt(u64 sts_data)
156{
157 return (qlcnic_get_sts_status(sts_data) == STATUS_CKSUM_LOOP) ? 1 : 0;
158}
159
160void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
161 int loopback_pkt, __le16 vlan_id)
162{
163 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
164 struct qlcnic_filter *fil, *tmp_fil;
b67bfe0d 165 struct hlist_node *n;
53643a75
SS
166 struct hlist_head *head;
167 unsigned long time;
168 u64 src_addr = 0;
169 u8 hindex, found = 0, op;
170 int ret;
171
172 memcpy(&src_addr, phdr->h_source, ETH_ALEN);
173
174 if (loopback_pkt) {
175 if (adapter->rx_fhash.fnum >= adapter->rx_fhash.fmax)
176 return;
177
178 hindex = qlcnic_mac_hash(src_addr) &
179 (adapter->fhash.fbucket_size - 1);
180 head = &(adapter->rx_fhash.fhead[hindex]);
181
b67bfe0d 182 hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
53643a75
SS
183 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
184 tmp_fil->vlan_id == vlan_id) {
185 time = tmp_fil->ftime;
186 if (jiffies > (QLCNIC_READD_AGE * HZ + time))
187 tmp_fil->ftime = jiffies;
188 return;
189 }
190 }
191
192 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
193 if (!fil)
194 return;
195
196 fil->ftime = jiffies;
197 memcpy(fil->faddr, &src_addr, ETH_ALEN);
198 fil->vlan_id = vlan_id;
199 spin_lock(&adapter->rx_mac_learn_lock);
200 hlist_add_head(&(fil->fnode), head);
201 adapter->rx_fhash.fnum++;
202 spin_unlock(&adapter->rx_mac_learn_lock);
203 } else {
204 hindex = qlcnic_mac_hash(src_addr) &
205 (adapter->fhash.fbucket_size - 1);
206 head = &(adapter->rx_fhash.fhead[hindex]);
207 spin_lock(&adapter->rx_mac_learn_lock);
b67bfe0d 208 hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
53643a75
SS
209 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
210 tmp_fil->vlan_id == vlan_id) {
211 found = 1;
212 break;
213 }
214 }
215
216 if (!found) {
217 spin_unlock(&adapter->rx_mac_learn_lock);
218 return;
219 }
220
221 op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
222 ret = qlcnic_sre_macaddr_change(adapter, (u8 *)&src_addr,
223 vlan_id, op);
224 if (!ret) {
225 op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
226 ret = qlcnic_sre_macaddr_change(adapter,
227 (u8 *)&src_addr,
228 vlan_id, op);
229 if (!ret) {
230 hlist_del(&(tmp_fil->fnode));
231 adapter->rx_fhash.fnum--;
232 }
233 }
234 spin_unlock(&adapter->rx_mac_learn_lock);
235 }
236}
237
7e2cf4fe
SC
238void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
239 __le16 vlan_id)
c70001a9
SC
240{
241 struct cmd_desc_type0 *hwdesc;
242 struct qlcnic_nic_req *req;
243 struct qlcnic_mac_req *mac_req;
244 struct qlcnic_vlan_req *vlan_req;
7e2cf4fe 245 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
c70001a9
SC
246 u32 producer;
247 u64 word;
248
249 producer = tx_ring->producer;
250 hwdesc = &tx_ring->desc_head[tx_ring->producer];
251
252 req = (struct qlcnic_nic_req *)hwdesc;
253 memset(req, 0, sizeof(struct qlcnic_nic_req));
254 req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
255
256 word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
257 req->req_hdr = cpu_to_le64(word);
258
259 mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
260 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
261 memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
262
263 vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
264 vlan_req->vlan_id = vlan_id;
265
266 tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
267 smp_mb();
268}
269
d17dd0d9 270static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
d17dd0d9
SC
271 struct cmd_desc_type0 *first_desc,
272 struct sk_buff *skb)
c70001a9 273{
c70001a9 274 struct qlcnic_filter *fil, *tmp_fil;
b67bfe0d 275 struct hlist_node *n;
c70001a9 276 struct hlist_head *head;
4be41e92 277 struct net_device *netdev = adapter->netdev;
7e2cf4fe 278 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
c70001a9
SC
279 u64 src_addr = 0;
280 __le16 vlan_id = 0;
281 u8 hindex;
282
4be41e92 283 if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
c70001a9
SC
284 return;
285
4be41e92
SC
286 if (adapter->fhash.fnum >= adapter->fhash.fmax) {
287 adapter->stats.mac_filter_limit_overrun++;
288 netdev_info(netdev, "Can not add more than %d mac addresses\n",
289 adapter->fhash.fmax);
c70001a9 290 return;
4be41e92 291 }
c70001a9 292
c70001a9 293 memcpy(&src_addr, phdr->h_source, ETH_ALEN);
4be41e92 294 hindex = qlcnic_mac_hash(src_addr) & (adapter->fhash.fbucket_size - 1);
c70001a9
SC
295 head = &(adapter->fhash.fhead[hindex]);
296
b67bfe0d 297 hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
c70001a9 298 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
7e2cf4fe 299 tmp_fil->vlan_id == vlan_id) {
d17dd0d9 300 if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
7e2cf4fe
SC
301 qlcnic_change_filter(adapter, &src_addr,
302 vlan_id);
c70001a9
SC
303 tmp_fil->ftime = jiffies;
304 return;
305 }
306 }
307
308 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
309 if (!fil)
310 return;
311
7e2cf4fe 312 qlcnic_change_filter(adapter, &src_addr, vlan_id);
c70001a9
SC
313 fil->ftime = jiffies;
314 fil->vlan_id = vlan_id;
315 memcpy(fil->faddr, &src_addr, ETH_ALEN);
316 spin_lock(&adapter->mac_learn_lock);
317 hlist_add_head(&(fil->fnode), head);
318 adapter->fhash.fnum++;
319 spin_unlock(&adapter->mac_learn_lock);
320}
321
d17dd0d9
SC
322static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
323 struct cmd_desc_type0 *first_desc, struct sk_buff *skb)
c70001a9 324{
d17dd0d9 325 u8 l4proto, opcode = 0, hdr_len = 0;
c70001a9 326 u16 flags = 0, vlan_tci = 0;
d17dd0d9 327 int copied, offset, copy_len, size;
c70001a9
SC
328 struct cmd_desc_type0 *hwdesc;
329 struct vlan_ethhdr *vh;
330 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
331 u16 protocol = ntohs(skb->protocol);
332 u32 producer = tx_ring->producer;
333
334 if (protocol == ETH_P_8021Q) {
335 vh = (struct vlan_ethhdr *)skb->data;
336 flags = FLAGS_VLAN_TAGGED;
337 vlan_tci = ntohs(vh->h_vlan_TCI);
338 protocol = ntohs(vh->h_vlan_encapsulated_proto);
339 } else if (vlan_tx_tag_present(skb)) {
340 flags = FLAGS_VLAN_OOB;
341 vlan_tci = vlan_tx_tag_get(skb);
342 }
343 if (unlikely(adapter->pvid)) {
344 if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
345 return -EIO;
346 if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
347 goto set_flags;
348
349 flags = FLAGS_VLAN_OOB;
350 vlan_tci = adapter->pvid;
351 }
352set_flags:
353 qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
354 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
355
356 if (*(skb->data) & BIT_0) {
357 flags |= BIT_0;
358 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
359 }
360 opcode = TX_ETHER_PKT;
3eead213 361 if (skb_is_gso(skb)) {
c70001a9 362 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
c70001a9
SC
363 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
364 first_desc->total_hdr_length = hdr_len;
c70001a9
SC
365 opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
366
367 /* For LSO, we need to copy the MAC/IP/TCP headers into
368 * the descriptor ring */
369 copied = 0;
370 offset = 2;
371
372 if (flags & FLAGS_VLAN_OOB) {
373 first_desc->total_hdr_length += VLAN_HLEN;
374 first_desc->tcp_hdr_offset = VLAN_HLEN;
375 first_desc->ip_hdr_offset = VLAN_HLEN;
d17dd0d9 376
c70001a9
SC
377 /* Only in case of TSO on vlan device */
378 flags |= FLAGS_VLAN_TAGGED;
379
380 /* Create a TSO vlan header template for firmware */
c70001a9
SC
381 hwdesc = &tx_ring->desc_head[producer];
382 tx_ring->cmd_buf_arr[producer].skb = NULL;
383
384 copy_len = min((int)sizeof(struct cmd_desc_type0) -
d17dd0d9 385 offset, hdr_len + VLAN_HLEN);
c70001a9
SC
386
387 vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
388 skb_copy_from_linear_data(skb, vh, 12);
389 vh->h_vlan_proto = htons(ETH_P_8021Q);
390 vh->h_vlan_TCI = htons(vlan_tci);
391
392 skb_copy_from_linear_data_offset(skb, 12,
d17dd0d9
SC
393 (char *)vh + 16,
394 copy_len - 16);
c70001a9
SC
395 copied = copy_len - VLAN_HLEN;
396 offset = 0;
c70001a9
SC
397 producer = get_next_index(producer, tx_ring->num_desc);
398 }
399
400 while (copied < hdr_len) {
d17dd0d9
SC
401 size = (int)sizeof(struct cmd_desc_type0) - offset;
402 copy_len = min(size, (hdr_len - copied));
c70001a9
SC
403 hwdesc = &tx_ring->desc_head[producer];
404 tx_ring->cmd_buf_arr[producer].skb = NULL;
c70001a9 405 skb_copy_from_linear_data_offset(skb, copied,
d17dd0d9
SC
406 (char *)hwdesc +
407 offset, copy_len);
c70001a9
SC
408 copied += copy_len;
409 offset = 0;
c70001a9
SC
410 producer = get_next_index(producer, tx_ring->num_desc);
411 }
412
413 tx_ring->producer = producer;
414 smp_mb();
415 adapter->stats.lso_frames++;
416
417 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c70001a9
SC
418 if (protocol == ETH_P_IP) {
419 l4proto = ip_hdr(skb)->protocol;
420
421 if (l4proto == IPPROTO_TCP)
422 opcode = TX_TCP_PKT;
423 else if (l4proto == IPPROTO_UDP)
424 opcode = TX_UDP_PKT;
425 } else if (protocol == ETH_P_IPV6) {
426 l4proto = ipv6_hdr(skb)->nexthdr;
427
428 if (l4proto == IPPROTO_TCP)
429 opcode = TX_TCPV6_PKT;
430 else if (l4proto == IPPROTO_UDP)
431 opcode = TX_UDPV6_PKT;
432 }
433 }
434 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
435 first_desc->ip_hdr_offset += skb_network_offset(skb);
436 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
437
438 return 0;
439}
440
d17dd0d9
SC
441static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
442 struct qlcnic_cmd_buffer *pbuf)
c70001a9
SC
443{
444 struct qlcnic_skb_frag *nf;
445 struct skb_frag_struct *frag;
446 int i, nr_frags;
447 dma_addr_t map;
448
449 nr_frags = skb_shinfo(skb)->nr_frags;
450 nf = &pbuf->frag_array[0];
451
d17dd0d9
SC
452 map = pci_map_single(pdev, skb->data, skb_headlen(skb),
453 PCI_DMA_TODEVICE);
c70001a9
SC
454 if (pci_dma_mapping_error(pdev, map))
455 goto out_err;
456
457 nf->dma = map;
458 nf->length = skb_headlen(skb);
459
460 for (i = 0; i < nr_frags; i++) {
461 frag = &skb_shinfo(skb)->frags[i];
462 nf = &pbuf->frag_array[i+1];
c70001a9
SC
463 map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
464 DMA_TO_DEVICE);
465 if (dma_mapping_error(&pdev->dev, map))
466 goto unwind;
467
468 nf->dma = map;
469 nf->length = skb_frag_size(frag);
470 }
471
472 return 0;
473
474unwind:
475 while (--i >= 0) {
476 nf = &pbuf->frag_array[i+1];
477 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
478 }
479
480 nf = &pbuf->frag_array[0];
481 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
482
483out_err:
484 return -ENOMEM;
485}
486
d17dd0d9
SC
487static void qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
488 struct qlcnic_cmd_buffer *pbuf)
c70001a9
SC
489{
490 struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
d17dd0d9 491 int i, nr_frags = skb_shinfo(skb)->nr_frags;
c70001a9
SC
492
493 for (i = 0; i < nr_frags; i++) {
494 nf = &pbuf->frag_array[i+1];
495 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
496 }
497
498 nf = &pbuf->frag_array[0];
499 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
500 pbuf->skb = NULL;
501}
502
d17dd0d9 503static inline void qlcnic_clear_cmddesc(u64 *desc)
c70001a9
SC
504{
505 desc[0] = 0ULL;
506 desc[2] = 0ULL;
507 desc[7] = 0ULL;
508}
509
d17dd0d9 510netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
c70001a9
SC
511{
512 struct qlcnic_adapter *adapter = netdev_priv(netdev);
513 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
514 struct qlcnic_cmd_buffer *pbuf;
515 struct qlcnic_skb_frag *buffrag;
516 struct cmd_desc_type0 *hwdesc, *first_desc;
517 struct pci_dev *pdev;
518 struct ethhdr *phdr;
d17dd0d9
SC
519 int i, k, frag_count, delta = 0;
520 u32 producer, num_txd;
c70001a9 521
d17dd0d9 522 num_txd = tx_ring->num_desc;
c70001a9
SC
523
524 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
525 netif_stop_queue(netdev);
526 return NETDEV_TX_BUSY;
527 }
528
529 if (adapter->flags & QLCNIC_MACSPOOF) {
530 phdr = (struct ethhdr *)skb->data;
531 if (!ether_addr_equal(phdr->h_source, adapter->mac_addr))
532 goto drop_packet;
533 }
534
535 frag_count = skb_shinfo(skb)->nr_frags + 1;
536 /* 14 frags supported for normal packet and
537 * 32 frags supported for TSO packet
538 */
539 if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
c70001a9
SC
540 for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
541 delta += skb_frag_size(&skb_shinfo(skb)->frags[i]);
542
543 if (!__pskb_pull_tail(skb, delta))
544 goto drop_packet;
545
546 frag_count = 1 + skb_shinfo(skb)->nr_frags;
547 }
548
549 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
550 netif_stop_queue(netdev);
d17dd0d9 551 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
c70001a9 552 netif_start_queue(netdev);
d17dd0d9 553 } else {
c70001a9
SC
554 adapter->stats.xmit_off++;
555 return NETDEV_TX_BUSY;
556 }
557 }
558
559 producer = tx_ring->producer;
560 pbuf = &tx_ring->cmd_buf_arr[producer];
c70001a9 561 pdev = adapter->pdev;
d17dd0d9
SC
562 first_desc = &tx_ring->desc_head[producer];
563 hwdesc = &tx_ring->desc_head[producer];
c70001a9
SC
564 qlcnic_clear_cmddesc((u64 *)hwdesc);
565
566 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
567 adapter->stats.tx_dma_map_error++;
568 goto drop_packet;
569 }
570
571 pbuf->skb = skb;
572 pbuf->frag_count = frag_count;
573
574 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
575 qlcnic_set_tx_port(first_desc, adapter->portnum);
576
577 for (i = 0; i < frag_count; i++) {
c70001a9
SC
578 k = i % 4;
579
580 if ((k == 0) && (i > 0)) {
581 /* move to next desc.*/
582 producer = get_next_index(producer, num_txd);
583 hwdesc = &tx_ring->desc_head[producer];
584 qlcnic_clear_cmddesc((u64 *)hwdesc);
585 tx_ring->cmd_buf_arr[producer].skb = NULL;
586 }
587
588 buffrag = &pbuf->frag_array[i];
c70001a9
SC
589 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
590 switch (k) {
591 case 0:
592 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
593 break;
594 case 1:
595 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
596 break;
597 case 2:
598 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
599 break;
600 case 3:
601 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
602 break;
603 }
604 }
605
606 tx_ring->producer = get_next_index(producer, num_txd);
607 smp_mb();
608
609 if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
610 goto unwind_buff;
611
fe1adc6b 612 if (adapter->drv_mac_learn)
4be41e92 613 qlcnic_send_filter(adapter, first_desc, skb);
c70001a9
SC
614
615 adapter->stats.txbytes += skb->len;
616 adapter->stats.xmitcalled++;
617
618 qlcnic_update_cmd_producer(tx_ring);
619
620 return NETDEV_TX_OK;
621
622unwind_buff:
623 qlcnic_unmap_buffers(pdev, skb, pbuf);
624drop_packet:
625 adapter->stats.txdropped++;
626 dev_kfree_skb_any(skb);
627 return NETDEV_TX_OK;
628}
629
7f966452 630void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
c70001a9
SC
631{
632 struct net_device *netdev = adapter->netdev;
633
634 if (adapter->ahw->linkup && !linkup) {
635 netdev_info(netdev, "NIC Link is down\n");
636 adapter->ahw->linkup = 0;
637 if (netif_running(netdev)) {
638 netif_carrier_off(netdev);
639 netif_stop_queue(netdev);
640 }
641 } else if (!adapter->ahw->linkup && linkup) {
642 netdev_info(netdev, "NIC Link is up\n");
643 adapter->ahw->linkup = 1;
644 if (netif_running(netdev)) {
645 netif_carrier_on(netdev);
646 netif_wake_queue(netdev);
647 }
648 }
649}
650
d17dd0d9
SC
651static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
652 struct qlcnic_host_rds_ring *rds_ring,
653 struct qlcnic_rx_buffer *buffer)
c70001a9
SC
654{
655 struct sk_buff *skb;
656 dma_addr_t dma;
657 struct pci_dev *pdev = adapter->pdev;
658
659 skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
660 if (!skb) {
661 adapter->stats.skb_alloc_failure++;
662 return -ENOMEM;
663 }
664
665 skb_reserve(skb, NET_IP_ALIGN);
4be41e92
SC
666 dma = pci_map_single(pdev, skb->data,
667 rds_ring->dma_size, PCI_DMA_FROMDEVICE);
c70001a9
SC
668
669 if (pci_dma_mapping_error(pdev, dma)) {
670 adapter->stats.rx_dma_map_error++;
671 dev_kfree_skb_any(skb);
672 return -ENOMEM;
673 }
674
675 buffer->skb = skb;
676 buffer->dma = dma;
677
678 return 0;
679}
680
681static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
4be41e92
SC
682 struct qlcnic_host_rds_ring *rds_ring,
683 u8 ring_id)
c70001a9
SC
684{
685 struct rcv_desc *pdesc;
686 struct qlcnic_rx_buffer *buffer;
687 int count = 0;
4be41e92 688 uint32_t producer, handle;
c70001a9
SC
689 struct list_head *head;
690
691 if (!spin_trylock(&rds_ring->lock))
692 return;
693
694 producer = rds_ring->producer;
c70001a9 695 head = &rds_ring->free_list;
d17dd0d9 696 while (!list_empty(head)) {
c70001a9
SC
697 buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
698
699 if (!buffer->skb) {
700 if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
701 break;
702 }
c70001a9
SC
703 count++;
704 list_del(&buffer->list);
705
706 /* make a rcv descriptor */
707 pdesc = &rds_ring->desc_head[producer];
4be41e92
SC
708 handle = qlcnic_get_ref_handle(adapter,
709 buffer->ref_handle, ring_id);
710 pdesc->reference_handle = cpu_to_le16(handle);
c70001a9
SC
711 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
712 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
c70001a9
SC
713 producer = get_next_index(producer, rds_ring->num_desc);
714 }
c70001a9
SC
715 if (count) {
716 rds_ring->producer = producer;
717 writel((producer - 1) & (rds_ring->num_desc - 1),
d17dd0d9 718 rds_ring->crb_rcv_producer);
c70001a9
SC
719 }
720 spin_unlock(&rds_ring->lock);
721}
722
4be41e92
SC
723static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
724 struct qlcnic_host_tx_ring *tx_ring,
725 int budget)
c70001a9
SC
726{
727 u32 sw_consumer, hw_consumer;
d17dd0d9 728 int i, done, count = 0;
c70001a9
SC
729 struct qlcnic_cmd_buffer *buffer;
730 struct pci_dev *pdev = adapter->pdev;
731 struct net_device *netdev = adapter->netdev;
732 struct qlcnic_skb_frag *frag;
c70001a9
SC
733
734 if (!spin_trylock(&adapter->tx_clean_lock))
735 return 1;
736
737 sw_consumer = tx_ring->sw_consumer;
738 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
739
740 while (sw_consumer != hw_consumer) {
741 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
742 if (buffer->skb) {
743 frag = &buffer->frag_array[0];
744 pci_unmap_single(pdev, frag->dma, frag->length,
745 PCI_DMA_TODEVICE);
746 frag->dma = 0ULL;
747 for (i = 1; i < buffer->frag_count; i++) {
748 frag++;
749 pci_unmap_page(pdev, frag->dma, frag->length,
750 PCI_DMA_TODEVICE);
751 frag->dma = 0ULL;
752 }
c70001a9
SC
753 adapter->stats.xmitfinished++;
754 dev_kfree_skb_any(buffer->skb);
755 buffer->skb = NULL;
756 }
757
758 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
4be41e92 759 if (++count >= budget)
c70001a9
SC
760 break;
761 }
762
763 if (count && netif_running(netdev)) {
764 tx_ring->sw_consumer = sw_consumer;
c70001a9 765 smp_mb();
c70001a9
SC
766 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
767 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
768 netif_wake_queue(netdev);
769 adapter->stats.xmit_on++;
770 }
771 }
772 adapter->tx_timeo_cnt = 0;
773 }
774 /*
775 * If everything is freed up to consumer then check if the ring is full
776 * If the ring is full then check if more needs to be freed and
777 * schedule the call back again.
778 *
779 * This happens when there are 2 CPUs. One could be freeing and the
780 * other filling it. If the ring is full when we get out of here and
781 * the card has already interrupted the host then the host can miss the
782 * interrupt.
783 *
784 * There is still a possible race condition and the host could miss an
785 * interrupt. The card has to take care of this.
786 */
787 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
788 done = (sw_consumer == hw_consumer);
789 spin_unlock(&adapter->tx_clean_lock);
790
791 return done;
792}
793
794static int qlcnic_poll(struct napi_struct *napi, int budget)
795{
4be41e92 796 int tx_complete, work_done;
d17dd0d9
SC
797 struct qlcnic_host_sds_ring *sds_ring;
798 struct qlcnic_adapter *adapter;
c70001a9 799
d17dd0d9
SC
800 sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
801 adapter = sds_ring->adapter;
4be41e92
SC
802 tx_complete = qlcnic_process_cmd_ring(adapter, adapter->tx_ring,
803 budget);
c70001a9 804 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
c70001a9
SC
805 if ((work_done < budget) && tx_complete) {
806 napi_complete(&sds_ring->napi);
807 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
808 qlcnic_enable_int(sds_ring);
809 }
810
811 return work_done;
812}
813
814static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
815{
d17dd0d9
SC
816 struct qlcnic_host_sds_ring *sds_ring;
817 struct qlcnic_adapter *adapter;
c70001a9
SC
818 int work_done;
819
d17dd0d9
SC
820 sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
821 adapter = sds_ring->adapter;
822
c70001a9
SC
823 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
824
825 if (work_done < budget) {
826 napi_complete(&sds_ring->napi);
827 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
828 qlcnic_enable_int(sds_ring);
829 }
830
831 return work_done;
832}
833
d17dd0d9
SC
834static void qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
835 struct qlcnic_fw_msg *msg)
c70001a9
SC
836{
837 u32 cable_OUI;
d17dd0d9
SC
838 u16 cable_len, link_speed;
839 u8 link_status, module, duplex, autoneg, lb_status = 0;
c70001a9
SC
840 struct net_device *netdev = adapter->netdev;
841
79788450 842 adapter->ahw->has_link_events = 1;
c70001a9
SC
843
844 cable_OUI = msg->body[1] & 0xffffffff;
845 cable_len = (msg->body[1] >> 32) & 0xffff;
846 link_speed = (msg->body[1] >> 48) & 0xffff;
847
848 link_status = msg->body[2] & 0xff;
849 duplex = (msg->body[2] >> 16) & 0xff;
850 autoneg = (msg->body[2] >> 24) & 0xff;
851 lb_status = (msg->body[2] >> 32) & 0x3;
852
853 module = (msg->body[2] >> 8) & 0xff;
854 if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
d17dd0d9
SC
855 dev_info(&netdev->dev,
856 "unsupported cable: OUI 0x%x, length %d\n",
857 cable_OUI, cable_len);
c70001a9
SC
858 else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN)
859 dev_info(&netdev->dev, "unsupported cable length %d\n",
d17dd0d9 860 cable_len);
c70001a9
SC
861
862 if (!link_status && (lb_status == QLCNIC_ILB_MODE ||
863 lb_status == QLCNIC_ELB_MODE))
864 adapter->ahw->loopback_state |= QLCNIC_LINKEVENT;
865
866 qlcnic_advert_link_change(adapter, link_status);
867
868 if (duplex == LINKEVENT_FULL_DUPLEX)
79788450 869 adapter->ahw->link_duplex = DUPLEX_FULL;
c70001a9 870 else
79788450 871 adapter->ahw->link_duplex = DUPLEX_HALF;
c70001a9 872
79788450
SC
873 adapter->ahw->module_type = module;
874 adapter->ahw->link_autoneg = autoneg;
c70001a9
SC
875
876 if (link_status) {
79788450 877 adapter->ahw->link_speed = link_speed;
c70001a9 878 } else {
79788450
SC
879 adapter->ahw->link_speed = SPEED_UNKNOWN;
880 adapter->ahw->link_duplex = DUPLEX_UNKNOWN;
c70001a9
SC
881 }
882}
883
d17dd0d9
SC
884static void qlcnic_handle_fw_message(int desc_cnt, int index,
885 struct qlcnic_host_sds_ring *sds_ring)
c70001a9
SC
886{
887 struct qlcnic_fw_msg msg;
888 struct status_desc *desc;
889 struct qlcnic_adapter *adapter;
890 struct device *dev;
891 int i = 0, opcode, ret;
892
893 while (desc_cnt > 0 && i < 8) {
894 desc = &sds_ring->desc_head[index];
895 msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
896 msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
897
898 index = get_next_index(index, sds_ring->num_desc);
899 desc_cnt--;
900 }
901
902 adapter = sds_ring->adapter;
903 dev = &adapter->pdev->dev;
904 opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
905
906 switch (opcode) {
907 case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
908 qlcnic_handle_linkevent(adapter, &msg);
909 break;
910 case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK:
911 ret = (u32)(msg.body[1]);
912 switch (ret) {
913 case 0:
914 adapter->ahw->loopback_state |= QLCNIC_LB_RESPONSE;
915 break;
916 case 1:
917 dev_info(dev, "loopback already in progress\n");
79788450 918 adapter->ahw->diag_cnt = -QLCNIC_TEST_IN_PROGRESS;
c70001a9
SC
919 break;
920 case 2:
921 dev_info(dev, "loopback cable is not connected\n");
79788450 922 adapter->ahw->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN;
c70001a9
SC
923 break;
924 default:
d17dd0d9
SC
925 dev_info(dev,
926 "loopback configure request failed, err %x\n",
927 ret);
79788450 928 adapter->ahw->diag_cnt = -QLCNIC_UNDEFINED_ERROR;
c70001a9
SC
929 break;
930 }
931 break;
932 default:
933 break;
934 }
935}
936
4be41e92
SC
937struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
938 struct qlcnic_host_rds_ring *ring,
939 u16 index, u16 cksum)
c70001a9
SC
940{
941 struct qlcnic_rx_buffer *buffer;
942 struct sk_buff *skb;
943
4be41e92 944 buffer = &ring->rx_buf_arr[index];
c70001a9
SC
945 if (unlikely(buffer->skb == NULL)) {
946 WARN_ON(1);
947 return NULL;
948 }
949
4be41e92 950 pci_unmap_single(adapter->pdev, buffer->dma, ring->dma_size,
d17dd0d9 951 PCI_DMA_FROMDEVICE);
c70001a9
SC
952
953 skb = buffer->skb;
c70001a9 954 if (likely((adapter->netdev->features & NETIF_F_RXCSUM) &&
d17dd0d9 955 (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) {
c70001a9
SC
956 adapter->stats.csummed++;
957 skb->ip_summed = CHECKSUM_UNNECESSARY;
958 } else {
959 skb_checksum_none_assert(skb);
960 }
961
4be41e92 962
c70001a9
SC
963 buffer->skb = NULL;
964
965 return skb;
966}
967
d17dd0d9
SC
968static inline int qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter,
969 struct sk_buff *skb, u16 *vlan_tag)
c70001a9
SC
970{
971 struct ethhdr *eth_hdr;
972
973 if (!__vlan_get_tag(skb, vlan_tag)) {
d17dd0d9 974 eth_hdr = (struct ethhdr *)skb->data;
c70001a9
SC
975 memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
976 skb_pull(skb, VLAN_HLEN);
977 }
978 if (!adapter->pvid)
979 return 0;
980
981 if (*vlan_tag == adapter->pvid) {
982 /* Outer vlan tag. Packet should follow non-vlan path */
983 *vlan_tag = 0xffff;
984 return 0;
985 }
986 if (adapter->flags & QLCNIC_TAGGING_ENABLED)
987 return 0;
988
989 return -EINVAL;
990}
991
992static struct qlcnic_rx_buffer *
993qlcnic_process_rcv(struct qlcnic_adapter *adapter,
d17dd0d9
SC
994 struct qlcnic_host_sds_ring *sds_ring, int ring,
995 u64 sts_data0)
c70001a9
SC
996{
997 struct net_device *netdev = adapter->netdev;
998 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
999 struct qlcnic_rx_buffer *buffer;
1000 struct sk_buff *skb;
1001 struct qlcnic_host_rds_ring *rds_ring;
53643a75
SS
1002 int index, length, cksum, pkt_offset, is_lb_pkt;
1003 u16 vid = 0xffff, t_vid;
c70001a9
SC
1004
1005 if (unlikely(ring >= adapter->max_rds_rings))
1006 return NULL;
1007
1008 rds_ring = &recv_ctx->rds_rings[ring];
1009
1010 index = qlcnic_get_sts_refhandle(sts_data0);
1011 if (unlikely(index >= rds_ring->num_desc))
1012 return NULL;
1013
1014 buffer = &rds_ring->rx_buf_arr[index];
c70001a9
SC
1015 length = qlcnic_get_sts_totallength(sts_data0);
1016 cksum = qlcnic_get_sts_status(sts_data0);
1017 pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
1018
1019 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1020 if (!skb)
1021 return buffer;
1022
53643a75
SS
1023 if (adapter->drv_mac_learn &&
1024 (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
1025 t_vid = 0;
1026 is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
1027 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt,
1028 cpu_to_le16(t_vid));
1029 }
1030
c70001a9
SC
1031 if (length > rds_ring->skb_size)
1032 skb_put(skb, rds_ring->skb_size);
1033 else
1034 skb_put(skb, length);
1035
1036 if (pkt_offset)
1037 skb_pull(skb, pkt_offset);
1038
1039 if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1040 adapter->stats.rxdropped++;
1041 dev_kfree_skb(skb);
1042 return buffer;
1043 }
1044
1045 skb->protocol = eth_type_trans(skb, netdev);
1046
1047 if (vid != 0xffff)
1048 __vlan_hwaccel_put_tag(skb, vid);
1049
1050 napi_gro_receive(&sds_ring->napi, skb);
1051
1052 adapter->stats.rx_pkts++;
1053 adapter->stats.rxbytes += length;
1054
1055 return buffer;
1056}
1057
1058#define QLC_TCP_HDR_SIZE 20
1059#define QLC_TCP_TS_OPTION_SIZE 12
1060#define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
1061
1062static struct qlcnic_rx_buffer *
1063qlcnic_process_lro(struct qlcnic_adapter *adapter,
d17dd0d9 1064 int ring, u64 sts_data0, u64 sts_data1)
c70001a9
SC
1065{
1066 struct net_device *netdev = adapter->netdev;
1067 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1068 struct qlcnic_rx_buffer *buffer;
1069 struct sk_buff *skb;
1070 struct qlcnic_host_rds_ring *rds_ring;
1071 struct iphdr *iph;
776e7bde 1072 struct ipv6hdr *ipv6h;
c70001a9
SC
1073 struct tcphdr *th;
1074 bool push, timestamp;
53643a75
SS
1075 int index, l2_hdr_offset, l4_hdr_offset, is_lb_pkt;
1076 u16 lro_length, length, data_offset, t_vid, vid = 0xffff;
c70001a9 1077 u32 seq_number;
c70001a9
SC
1078
1079 if (unlikely(ring > adapter->max_rds_rings))
1080 return NULL;
1081
1082 rds_ring = &recv_ctx->rds_rings[ring];
1083
1084 index = qlcnic_get_lro_sts_refhandle(sts_data0);
1085 if (unlikely(index > rds_ring->num_desc))
1086 return NULL;
1087
1088 buffer = &rds_ring->rx_buf_arr[index];
1089
1090 timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
1091 lro_length = qlcnic_get_lro_sts_length(sts_data0);
1092 l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
1093 l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
1094 push = qlcnic_get_lro_sts_push_flag(sts_data0);
1095 seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
1096
1097 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
1098 if (!skb)
1099 return buffer;
1100
53643a75
SS
1101 if (adapter->drv_mac_learn &&
1102 (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
1103 t_vid = 0;
1104 is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
1105 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt,
1106 cpu_to_le16(t_vid));
1107 }
1108
c70001a9
SC
1109 if (timestamp)
1110 data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
1111 else
1112 data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE;
1113
1114 skb_put(skb, lro_length + data_offset);
c70001a9
SC
1115 skb_pull(skb, l2_hdr_offset);
1116
1117 if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1118 adapter->stats.rxdropped++;
1119 dev_kfree_skb(skb);
1120 return buffer;
1121 }
1122
1123 skb->protocol = eth_type_trans(skb, netdev);
776e7bde 1124
069048f1 1125 if (ntohs(skb->protocol) == ETH_P_IPV6) {
776e7bde
SS
1126 ipv6h = (struct ipv6hdr *)skb->data;
1127 th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
1128 length = (th->doff << 2) + lro_length;
1129 ipv6h->payload_len = htons(length);
1130 } else {
1131 iph = (struct iphdr *)skb->data;
1132 th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1133 length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
1134 iph->tot_len = htons(length);
1135 iph->check = 0;
1136 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
1137 }
1138
c70001a9
SC
1139 th->psh = push;
1140 th->seq = htonl(seq_number);
c70001a9
SC
1141 length = skb->len;
1142
bd69ba79 1143 if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
c70001a9 1144 skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
bd69ba79
MT
1145 if (skb->protocol == htons(ETH_P_IPV6))
1146 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1147 else
1148 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1149 }
c70001a9
SC
1150
1151 if (vid != 0xffff)
1152 __vlan_hwaccel_put_tag(skb, vid);
1153 netif_receive_skb(skb);
1154
1155 adapter->stats.lro_pkts++;
1156 adapter->stats.lrobytes += length;
1157
1158 return buffer;
1159}
1160
d17dd0d9 1161int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
c70001a9 1162{
d17dd0d9 1163 struct qlcnic_host_rds_ring *rds_ring;
c70001a9
SC
1164 struct qlcnic_adapter *adapter = sds_ring->adapter;
1165 struct list_head *cur;
1166 struct status_desc *desc;
1167 struct qlcnic_rx_buffer *rxbuf;
4be41e92 1168 int opcode, desc_cnt, count = 0;
c70001a9 1169 u64 sts_data0, sts_data1;
4be41e92 1170 u8 ring;
c70001a9
SC
1171 u32 consumer = sds_ring->consumer;
1172
1173 while (count < max) {
1174 desc = &sds_ring->desc_head[consumer];
1175 sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1176
1177 if (!(sts_data0 & STATUS_OWNER_HOST))
1178 break;
1179
1180 desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1181 opcode = qlcnic_get_sts_opcode(sts_data0);
c70001a9
SC
1182 switch (opcode) {
1183 case QLCNIC_RXPKT_DESC:
1184 case QLCNIC_OLD_RXPKT_DESC:
1185 case QLCNIC_SYN_OFFLOAD:
1186 ring = qlcnic_get_sts_type(sts_data0);
d17dd0d9
SC
1187 rxbuf = qlcnic_process_rcv(adapter, sds_ring, ring,
1188 sts_data0);
c70001a9
SC
1189 break;
1190 case QLCNIC_LRO_DESC:
1191 ring = qlcnic_get_lro_sts_type(sts_data0);
1192 sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
1193 rxbuf = qlcnic_process_lro(adapter, ring, sts_data0,
1194 sts_data1);
1195 break;
1196 case QLCNIC_RESPONSE_DESC:
1197 qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
1198 default:
1199 goto skip;
1200 }
c70001a9
SC
1201 WARN_ON(desc_cnt > 1);
1202
1203 if (likely(rxbuf))
1204 list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
1205 else
1206 adapter->stats.null_rxbuf++;
c70001a9
SC
1207skip:
1208 for (; desc_cnt > 0; desc_cnt--) {
1209 desc = &sds_ring->desc_head[consumer];
4be41e92 1210 desc->status_desc_data[0] = QLCNIC_DESC_OWNER_FW;
c70001a9
SC
1211 consumer = get_next_index(consumer, sds_ring->num_desc);
1212 }
1213 count++;
1214 }
1215
1216 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
d17dd0d9 1217 rds_ring = &adapter->recv_ctx->rds_rings[ring];
c70001a9
SC
1218 if (!list_empty(&sds_ring->free_list[ring])) {
1219 list_for_each(cur, &sds_ring->free_list[ring]) {
d17dd0d9
SC
1220 rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
1221 list);
c70001a9
SC
1222 qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
1223 }
1224 spin_lock(&rds_ring->lock);
1225 list_splice_tail_init(&sds_ring->free_list[ring],
d17dd0d9 1226 &rds_ring->free_list);
c70001a9
SC
1227 spin_unlock(&rds_ring->lock);
1228 }
1229
4be41e92 1230 qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring);
c70001a9
SC
1231 }
1232
1233 if (count) {
1234 sds_ring->consumer = consumer;
1235 writel(consumer, sds_ring->crb_sts_consumer);
1236 }
1237
1238 return count;
1239}
1240
d17dd0d9 1241void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
4be41e92 1242 struct qlcnic_host_rds_ring *rds_ring, u8 ring_id)
c70001a9
SC
1243{
1244 struct rcv_desc *pdesc;
1245 struct qlcnic_rx_buffer *buffer;
1246 int count = 0;
4be41e92 1247 u32 producer, handle;
c70001a9
SC
1248 struct list_head *head;
1249
1250 producer = rds_ring->producer;
c70001a9 1251 head = &rds_ring->free_list;
d17dd0d9 1252
c70001a9
SC
1253 while (!list_empty(head)) {
1254
1255 buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
1256
1257 if (!buffer->skb) {
1258 if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
1259 break;
1260 }
1261
1262 count++;
1263 list_del(&buffer->list);
1264
1265 /* make a rcv descriptor */
1266 pdesc = &rds_ring->desc_head[producer];
1267 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
4be41e92
SC
1268 handle = qlcnic_get_ref_handle(adapter, buffer->ref_handle,
1269 ring_id);
1270 pdesc->reference_handle = cpu_to_le16(handle);
c70001a9 1271 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
c70001a9
SC
1272 producer = get_next_index(producer, rds_ring->num_desc);
1273 }
1274
1275 if (count) {
1276 rds_ring->producer = producer;
1277 writel((producer-1) & (rds_ring->num_desc-1),
d17dd0d9 1278 rds_ring->crb_rcv_producer);
c70001a9
SC
1279 }
1280}
1281
1282static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter)
1283{
1284 int i;
1285 unsigned char *data = skb->data;
1286
d17dd0d9 1287 pr_info(KERN_INFO "\n");
c70001a9
SC
1288 for (i = 0; i < skb->len; i++) {
1289 QLCDB(adapter, DRV, "%02x ", data[i]);
1290 if ((i & 0x0f) == 8)
d17dd0d9 1291 pr_info(KERN_INFO "\n");
c70001a9
SC
1292 }
1293}
1294
1295static void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, int ring,
1296 u64 sts_data0)
1297{
1298 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1299 struct sk_buff *skb;
1300 struct qlcnic_host_rds_ring *rds_ring;
1301 int index, length, cksum, pkt_offset;
1302
1303 if (unlikely(ring >= adapter->max_rds_rings))
1304 return;
1305
1306 rds_ring = &recv_ctx->rds_rings[ring];
1307
1308 index = qlcnic_get_sts_refhandle(sts_data0);
1309 length = qlcnic_get_sts_totallength(sts_data0);
1310 if (unlikely(index >= rds_ring->num_desc))
1311 return;
1312
1313 cksum = qlcnic_get_sts_status(sts_data0);
1314 pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
1315
1316 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1317 if (!skb)
1318 return;
1319
1320 if (length > rds_ring->skb_size)
1321 skb_put(skb, rds_ring->skb_size);
1322 else
1323 skb_put(skb, length);
1324
1325 if (pkt_offset)
1326 skb_pull(skb, pkt_offset);
1327
1328 if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
79788450 1329 adapter->ahw->diag_cnt++;
c70001a9
SC
1330 else
1331 dump_skb(skb, adapter);
1332
1333 dev_kfree_skb_any(skb);
1334 adapter->stats.rx_pkts++;
1335 adapter->stats.rxbytes += length;
1336
1337 return;
1338}
1339
7e2cf4fe 1340void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
c70001a9
SC
1341{
1342 struct qlcnic_adapter *adapter = sds_ring->adapter;
1343 struct status_desc *desc;
1344 u64 sts_data0;
1345 int ring, opcode, desc_cnt;
1346
1347 u32 consumer = sds_ring->consumer;
1348
1349 desc = &sds_ring->desc_head[consumer];
1350 sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1351
1352 if (!(sts_data0 & STATUS_OWNER_HOST))
1353 return;
1354
1355 desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1356 opcode = qlcnic_get_sts_opcode(sts_data0);
1357 switch (opcode) {
1358 case QLCNIC_RESPONSE_DESC:
1359 qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
1360 break;
1361 default:
1362 ring = qlcnic_get_sts_type(sts_data0);
1363 qlcnic_process_rcv_diag(adapter, ring, sts_data0);
1364 break;
1365 }
1366
1367 for (; desc_cnt > 0; desc_cnt--) {
1368 desc = &sds_ring->desc_head[consumer];
1369 desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
1370 consumer = get_next_index(consumer, sds_ring->num_desc);
1371 }
1372
1373 sds_ring->consumer = consumer;
1374 writel(consumer, sds_ring->crb_sts_consumer);
1375}
1376
7e2cf4fe
SC
1377int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
1378 struct net_device *netdev)
c70001a9 1379{
d17dd0d9 1380 int ring, max_sds_rings;
c70001a9
SC
1381 struct qlcnic_host_sds_ring *sds_ring;
1382 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1383
1384 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
1385 return -ENOMEM;
1386
d17dd0d9
SC
1387 max_sds_rings = adapter->max_sds_rings;
1388
c70001a9
SC
1389 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1390 sds_ring = &recv_ctx->sds_rings[ring];
4be41e92 1391 if (ring == adapter->max_sds_rings - 1)
c70001a9 1392 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
d17dd0d9 1393 QLCNIC_NETDEV_WEIGHT / max_sds_rings);
c70001a9 1394 else
d17dd0d9
SC
1395 netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
1396 QLCNIC_NETDEV_WEIGHT*2);
c70001a9
SC
1397 }
1398
4be41e92
SC
1399 if (qlcnic_alloc_tx_rings(adapter, netdev)) {
1400 qlcnic_free_sds_rings(recv_ctx);
1401 return -ENOMEM;
1402 }
1403
c70001a9
SC
1404 return 0;
1405}
1406
4be41e92 1407void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter)
c70001a9
SC
1408{
1409 int ring;
1410 struct qlcnic_host_sds_ring *sds_ring;
1411 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1412
1413 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1414 sds_ring = &recv_ctx->sds_rings[ring];
1415 netif_napi_del(&sds_ring->napi);
1416 }
1417
1418 qlcnic_free_sds_rings(adapter->recv_ctx);
4be41e92 1419 qlcnic_free_tx_rings(adapter);
c70001a9
SC
1420}
1421
7e2cf4fe 1422void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
c70001a9
SC
1423{
1424 int ring;
1425 struct qlcnic_host_sds_ring *sds_ring;
1426 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1427
1428 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1429 return;
1430
1431 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1432 sds_ring = &recv_ctx->sds_rings[ring];
1433 napi_enable(&sds_ring->napi);
1434 qlcnic_enable_int(sds_ring);
1435 }
1436}
1437
7e2cf4fe 1438void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
c70001a9
SC
1439{
1440 int ring;
1441 struct qlcnic_host_sds_ring *sds_ring;
1442 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1443
1444 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1445 return;
1446
1447 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1448 sds_ring = &recv_ctx->sds_rings[ring];
1449 qlcnic_disable_int(sds_ring);
1450 napi_synchronize(&sds_ring->napi);
1451 napi_disable(&sds_ring->napi);
1452 }
1453}
4be41e92 1454
53643a75
SS
1455#define QLC_83XX_NORMAL_LB_PKT (1ULL << 36)
1456#define QLC_83XX_LRO_LB_PKT (1ULL << 46)
1457
1458static inline int qlcnic_83xx_is_lb_pkt(u64 sts_data, int lro_pkt)
1459{
1460 if (lro_pkt)
1461 return (sts_data & QLC_83XX_LRO_LB_PKT) ? 1 : 0;
1462 else
1463 return (sts_data & QLC_83XX_NORMAL_LB_PKT) ? 1 : 0;
1464}
1465
4be41e92
SC
1466static struct qlcnic_rx_buffer *
1467qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
1468 struct qlcnic_host_sds_ring *sds_ring,
1469 u8 ring, u64 sts_data[])
1470{
1471 struct net_device *netdev = adapter->netdev;
1472 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1473 struct qlcnic_rx_buffer *buffer;
1474 struct sk_buff *skb;
1475 struct qlcnic_host_rds_ring *rds_ring;
53643a75
SS
1476 int index, length, cksum, is_lb_pkt;
1477 u16 vid = 0xffff, t_vid;
4be41e92
SC
1478
1479 if (unlikely(ring >= adapter->max_rds_rings))
1480 return NULL;
1481
1482 rds_ring = &recv_ctx->rds_rings[ring];
1483
1484 index = qlcnic_83xx_hndl(sts_data[0]);
1485 if (unlikely(index >= rds_ring->num_desc))
1486 return NULL;
1487
1488 buffer = &rds_ring->rx_buf_arr[index];
1489 length = qlcnic_83xx_pktln(sts_data[0]);
1490 cksum = qlcnic_83xx_csum_status(sts_data[1]);
1491 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1492 if (!skb)
1493 return buffer;
1494
53643a75
SS
1495 if (adapter->drv_mac_learn &&
1496 (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
1497 t_vid = 0;
1498 is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 0);
1499 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt,
1500 cpu_to_le16(t_vid));
1501 }
1502
4be41e92
SC
1503 if (length > rds_ring->skb_size)
1504 skb_put(skb, rds_ring->skb_size);
1505 else
1506 skb_put(skb, length);
1507
1508 if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1509 adapter->stats.rxdropped++;
1510 dev_kfree_skb(skb);
1511 return buffer;
1512 }
1513
1514 skb->protocol = eth_type_trans(skb, netdev);
1515
1516 if (vid != 0xffff)
1517 __vlan_hwaccel_put_tag(skb, vid);
1518
1519 napi_gro_receive(&sds_ring->napi, skb);
1520
1521 adapter->stats.rx_pkts++;
1522 adapter->stats.rxbytes += length;
1523
1524 return buffer;
1525}
1526
1527static struct qlcnic_rx_buffer *
1528qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
1529 u8 ring, u64 sts_data[])
1530{
1531 struct net_device *netdev = adapter->netdev;
1532 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1533 struct qlcnic_rx_buffer *buffer;
1534 struct sk_buff *skb;
1535 struct qlcnic_host_rds_ring *rds_ring;
1536 struct iphdr *iph;
1537 struct ipv6hdr *ipv6h;
1538 struct tcphdr *th;
1539 bool push;
1540 int l2_hdr_offset, l4_hdr_offset;
53643a75 1541 int index, is_lb_pkt;
99e85879 1542 u16 lro_length, length, data_offset, gso_size;
53643a75 1543 u16 vid = 0xffff, t_vid;
4be41e92
SC
1544
1545 if (unlikely(ring > adapter->max_rds_rings))
1546 return NULL;
1547
1548 rds_ring = &recv_ctx->rds_rings[ring];
1549
1550 index = qlcnic_83xx_hndl(sts_data[0]);
1551 if (unlikely(index > rds_ring->num_desc))
1552 return NULL;
1553
1554 buffer = &rds_ring->rx_buf_arr[index];
1555
1556 lro_length = qlcnic_83xx_lro_pktln(sts_data[0]);
1557 l2_hdr_offset = qlcnic_83xx_l2_hdr_off(sts_data[1]);
1558 l4_hdr_offset = qlcnic_83xx_l4_hdr_off(sts_data[1]);
1559 push = qlcnic_83xx_is_psh_bit(sts_data[1]);
1560
1561 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
1562 if (!skb)
1563 return buffer;
53643a75
SS
1564
1565 if (adapter->drv_mac_learn &&
1566 (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
1567 t_vid = 0;
1568 is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 1);
1569 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt,
1570 cpu_to_le16(t_vid));
1571 }
4be41e92
SC
1572 if (qlcnic_83xx_is_tstamp(sts_data[1]))
1573 data_offset = l4_hdr_offset + QLCNIC_TCP_TS_HDR_SIZE;
1574 else
1575 data_offset = l4_hdr_offset + QLCNIC_TCP_HDR_SIZE;
1576
1577 skb_put(skb, lro_length + data_offset);
1578 skb_pull(skb, l2_hdr_offset);
1579
1580 if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1581 adapter->stats.rxdropped++;
1582 dev_kfree_skb(skb);
1583 return buffer;
1584 }
1585
1586 skb->protocol = eth_type_trans(skb, netdev);
1587 if (ntohs(skb->protocol) == ETH_P_IPV6) {
1588 ipv6h = (struct ipv6hdr *)skb->data;
1589 th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
1590
1591 length = (th->doff << 2) + lro_length;
1592 ipv6h->payload_len = htons(length);
1593 } else {
1594 iph = (struct iphdr *)skb->data;
1595 th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1596 length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
1597 iph->tot_len = htons(length);
1598 iph->check = 0;
1599 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
1600 }
1601
1602 th->psh = push;
1603 length = skb->len;
1604
99e85879
SS
1605 if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
1606 gso_size = qlcnic_83xx_get_lro_sts_mss(sts_data[0]);
1607 skb_shinfo(skb)->gso_size = gso_size;
1608 if (skb->protocol == htons(ETH_P_IPV6))
1609 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1610 else
1611 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1612 }
1613
4be41e92
SC
1614 if (vid != 0xffff)
1615 __vlan_hwaccel_put_tag(skb, vid);
1616
1617 netif_receive_skb(skb);
1618
1619 adapter->stats.lro_pkts++;
1620 adapter->stats.lrobytes += length;
1621 return buffer;
1622}
1623
1624static int qlcnic_83xx_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring,
1625 int max)
1626{
1627 struct qlcnic_host_rds_ring *rds_ring;
1628 struct qlcnic_adapter *adapter = sds_ring->adapter;
1629 struct list_head *cur;
1630 struct status_desc *desc;
1631 struct qlcnic_rx_buffer *rxbuf = NULL;
1632 u8 ring;
1633 u64 sts_data[2];
1634 int count = 0, opcode;
1635 u32 consumer = sds_ring->consumer;
1636
1637 while (count < max) {
1638 desc = &sds_ring->desc_head[consumer];
1639 sts_data[1] = le64_to_cpu(desc->status_desc_data[1]);
1640 opcode = qlcnic_83xx_opcode(sts_data[1]);
1641 if (!opcode)
1642 break;
1643 sts_data[0] = le64_to_cpu(desc->status_desc_data[0]);
1644 ring = QLCNIC_FETCH_RING_ID(sts_data[0]);
1645
1646 switch (opcode) {
1647 case QLC_83XX_REG_DESC:
1648 rxbuf = qlcnic_83xx_process_rcv(adapter, sds_ring,
1649 ring, sts_data);
1650 break;
1651 case QLC_83XX_LRO_DESC:
1652 rxbuf = qlcnic_83xx_process_lro(adapter, ring,
1653 sts_data);
1654 break;
1655 default:
1656 dev_info(&adapter->pdev->dev,
1657 "Unkonwn opcode: 0x%x\n", opcode);
1658 goto skip;
1659 }
1660
1661 if (likely(rxbuf))
1662 list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
1663 else
1664 adapter->stats.null_rxbuf++;
1665skip:
1666 desc = &sds_ring->desc_head[consumer];
1667 /* Reset the descriptor */
1668 desc->status_desc_data[1] = 0;
1669 consumer = get_next_index(consumer, sds_ring->num_desc);
1670 count++;
1671 }
1672 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1673 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1674 if (!list_empty(&sds_ring->free_list[ring])) {
1675 list_for_each(cur, &sds_ring->free_list[ring]) {
1676 rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
1677 list);
1678 qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
1679 }
1680 spin_lock(&rds_ring->lock);
1681 list_splice_tail_init(&sds_ring->free_list[ring],
1682 &rds_ring->free_list);
1683 spin_unlock(&rds_ring->lock);
1684 }
1685 qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring);
1686 }
1687 if (count) {
1688 sds_ring->consumer = consumer;
1689 writel(consumer, sds_ring->crb_sts_consumer);
1690 }
1691 return count;
1692}
1693
1694static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
1695{
1696 int tx_complete;
1697 int work_done;
1698 struct qlcnic_host_sds_ring *sds_ring;
1699 struct qlcnic_adapter *adapter;
1700 struct qlcnic_host_tx_ring *tx_ring;
1701
1702 sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
1703 adapter = sds_ring->adapter;
1704 /* tx ring count = 1 */
1705 tx_ring = adapter->tx_ring;
1706
4be41e92
SC
1707 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
1708 work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
1709 if ((work_done < budget) && tx_complete) {
1710 napi_complete(&sds_ring->napi);
ac166700 1711 qlcnic_83xx_enable_intr(adapter, sds_ring);
4be41e92
SC
1712 }
1713
1714 return work_done;
1715}
1716
1717static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
1718{
1719 int work_done;
1720 struct qlcnic_host_tx_ring *tx_ring;
1721 struct qlcnic_adapter *adapter;
1722
1723 budget = QLCNIC_TX_POLL_BUDGET;
1724 tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
1725 adapter = tx_ring->adapter;
1726 work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
1727 if (work_done) {
1728 napi_complete(&tx_ring->napi);
1729 if (test_bit(__QLCNIC_DEV_UP , &adapter->state))
1730 qlcnic_83xx_enable_tx_intr(adapter, tx_ring);
1731 }
1732
1733 return work_done;
1734}
1735
1736static int qlcnic_83xx_rx_poll(struct napi_struct *napi, int budget)
1737{
1738 int work_done;
1739 struct qlcnic_host_sds_ring *sds_ring;
1740 struct qlcnic_adapter *adapter;
1741
1742 sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
1743 adapter = sds_ring->adapter;
1744 work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
1745 if (work_done < budget) {
1746 napi_complete(&sds_ring->napi);
1747 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1748 qlcnic_83xx_enable_intr(adapter, sds_ring);
1749 }
1750
1751 return work_done;
1752}
1753
1754void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter)
1755{
1756 int ring;
1757 struct qlcnic_host_sds_ring *sds_ring;
1758 struct qlcnic_host_tx_ring *tx_ring;
1759 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1760
1761 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1762 return;
1763
1764 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1765 sds_ring = &recv_ctx->sds_rings[ring];
1766 napi_enable(&sds_ring->napi);
ac166700
HM
1767 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1768 qlcnic_83xx_enable_intr(adapter, sds_ring);
4be41e92
SC
1769 }
1770
1771 if (adapter->flags & QLCNIC_MSIX_ENABLED) {
1772 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1773 tx_ring = &adapter->tx_ring[ring];
1774 napi_enable(&tx_ring->napi);
1775 qlcnic_83xx_enable_tx_intr(adapter, tx_ring);
1776 }
1777 }
1778}
1779
1780void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
1781{
1782 int ring;
1783 struct qlcnic_host_sds_ring *sds_ring;
1784 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1785 struct qlcnic_host_tx_ring *tx_ring;
1786
1787 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1788 return;
1789
1790 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1791 sds_ring = &recv_ctx->sds_rings[ring];
ac166700
HM
1792 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1793 qlcnic_83xx_disable_intr(adapter, sds_ring);
4be41e92
SC
1794 napi_synchronize(&sds_ring->napi);
1795 napi_disable(&sds_ring->napi);
1796 }
1797
1798 if (adapter->flags & QLCNIC_MSIX_ENABLED) {
1799 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1800 tx_ring = &adapter->tx_ring[ring];
1801 qlcnic_83xx_disable_tx_intr(adapter, tx_ring);
1802 napi_synchronize(&tx_ring->napi);
1803 napi_disable(&tx_ring->napi);
1804 }
1805 }
1806}
1807
1808int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
1809 struct net_device *netdev)
1810{
1811 int ring, max_sds_rings;
1812 struct qlcnic_host_sds_ring *sds_ring;
1813 struct qlcnic_host_tx_ring *tx_ring;
1814 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1815
1816 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
1817 return -ENOMEM;
1818
1819 max_sds_rings = adapter->max_sds_rings;
1820 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1821 sds_ring = &recv_ctx->sds_rings[ring];
1822 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1823 netif_napi_add(netdev, &sds_ring->napi,
1824 qlcnic_83xx_rx_poll,
1825 QLCNIC_NETDEV_WEIGHT * 2);
1826 else
1827 netif_napi_add(netdev, &sds_ring->napi,
1828 qlcnic_83xx_poll,
1829 QLCNIC_NETDEV_WEIGHT / max_sds_rings);
1830 }
1831
1832 if (qlcnic_alloc_tx_rings(adapter, netdev)) {
1833 qlcnic_free_sds_rings(recv_ctx);
1834 return -ENOMEM;
1835 }
1836
1837 if (adapter->flags & QLCNIC_MSIX_ENABLED) {
1838 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1839 tx_ring = &adapter->tx_ring[ring];
1840 netif_napi_add(netdev, &tx_ring->napi,
1841 qlcnic_83xx_msix_tx_poll,
1842 QLCNIC_NETDEV_WEIGHT);
1843 }
1844 }
1845
1846 return 0;
1847}
1848
1849void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter)
1850{
1851 int ring;
1852 struct qlcnic_host_sds_ring *sds_ring;
1853 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1854 struct qlcnic_host_tx_ring *tx_ring;
1855
1856 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1857 sds_ring = &recv_ctx->sds_rings[ring];
1858 netif_napi_del(&sds_ring->napi);
1859 }
1860
1861 qlcnic_free_sds_rings(adapter->recv_ctx);
1862
1863 if ((adapter->flags & QLCNIC_MSIX_ENABLED)) {
1864 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1865 tx_ring = &adapter->tx_ring[ring];
1866 netif_napi_del(&tx_ring->napi);
1867 }
1868 }
1869
1870 qlcnic_free_tx_rings(adapter);
1871}
1872
1873void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *adapter,
1874 int ring, u64 sts_data[])
1875{
1876 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1877 struct sk_buff *skb;
1878 struct qlcnic_host_rds_ring *rds_ring;
1879 int index, length;
1880
1881 if (unlikely(ring >= adapter->max_rds_rings))
1882 return;
1883
1884 rds_ring = &recv_ctx->rds_rings[ring];
1885 index = qlcnic_83xx_hndl(sts_data[0]);
1886 if (unlikely(index >= rds_ring->num_desc))
1887 return;
1888
1889 length = qlcnic_83xx_pktln(sts_data[0]);
1890
1891 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
1892 if (!skb)
1893 return;
1894
1895 if (length > rds_ring->skb_size)
1896 skb_put(skb, rds_ring->skb_size);
1897 else
1898 skb_put(skb, length);
1899
1900 if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
1901 adapter->ahw->diag_cnt++;
1902 else
1903 dump_skb(skb, adapter);
1904
1905 dev_kfree_skb_any(skb);
1906 return;
1907}
1908
1909void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
1910{
1911 struct qlcnic_adapter *adapter = sds_ring->adapter;
1912 struct status_desc *desc;
1913 u64 sts_data[2];
1914 int ring, opcode;
1915 u32 consumer = sds_ring->consumer;
1916
1917 desc = &sds_ring->desc_head[consumer];
1918 sts_data[0] = le64_to_cpu(desc->status_desc_data[0]);
1919 sts_data[1] = le64_to_cpu(desc->status_desc_data[1]);
1920 opcode = qlcnic_83xx_opcode(sts_data[1]);
1921 if (!opcode)
1922 return;
1923
1924 ring = QLCNIC_FETCH_RING_ID(qlcnic_83xx_hndl(sts_data[0]));
1925 qlcnic_83xx_process_rcv_diag(adapter, ring, sts_data);
1926 desc = &sds_ring->desc_head[consumer];
1927 desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
1928 consumer = get_next_index(consumer, sds_ring->num_desc);
1929 sds_ring->consumer = consumer;
1930 writel(consumer, sds_ring->crb_sts_consumer);
1931}
This page took 0.147989 seconds and 5 git commands to generate.