Commit | Line | Data |
---|---|---|
4863dea3 SG |
1 | /* |
2 | * Copyright (C) 2015 Cavium, Inc. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of version 2 of the GNU General Public License | |
6 | * as published by the Free Software Foundation. | |
7 | */ | |
8 | ||
9 | #include <linux/pci.h> | |
10 | #include <linux/netdevice.h> | |
11 | #include <linux/ip.h> | |
12 | #include <linux/etherdevice.h> | |
13 | #include <net/ip.h> | |
14 | #include <net/tso.h> | |
15 | ||
16 | #include "nic_reg.h" | |
17 | #include "nic.h" | |
18 | #include "q_struct.h" | |
19 | #include "nicvf_queues.h" | |
20 | ||
5c2e26f6 SG |
21 | static void nicvf_get_page(struct nicvf *nic) |
22 | { | |
23 | if (!nic->rb_pageref || !nic->rb_page) | |
24 | return; | |
25 | ||
26 | atomic_add(nic->rb_pageref, &nic->rb_page->_count); | |
27 | nic->rb_pageref = 0; | |
28 | } | |
29 | ||
4863dea3 SG |
30 | /* Poll a register for a specific value */ |
31 | static int nicvf_poll_reg(struct nicvf *nic, int qidx, | |
32 | u64 reg, int bit_pos, int bits, int val) | |
33 | { | |
34 | u64 bit_mask; | |
35 | u64 reg_val; | |
36 | int timeout = 10; | |
37 | ||
38 | bit_mask = (1ULL << bits) - 1; | |
39 | bit_mask = (bit_mask << bit_pos); | |
40 | ||
41 | while (timeout) { | |
42 | reg_val = nicvf_queue_reg_read(nic, reg, qidx); | |
43 | if (((reg_val & bit_mask) >> bit_pos) == val) | |
44 | return 0; | |
45 | usleep_range(1000, 2000); | |
46 | timeout--; | |
47 | } | |
48 | netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg); | |
49 | return 1; | |
50 | } | |
51 | ||
52 | /* Allocate memory for a queue's descriptors */ | |
53 | static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem, | |
54 | int q_len, int desc_size, int align_bytes) | |
55 | { | |
56 | dmem->q_len = q_len; | |
57 | dmem->size = (desc_size * q_len) + align_bytes; | |
58 | /* Save address, need it while freeing */ | |
59 | dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size, | |
60 | &dmem->dma, GFP_KERNEL); | |
61 | if (!dmem->unalign_base) | |
62 | return -ENOMEM; | |
63 | ||
64 | /* Align memory address for 'align_bytes' */ | |
65 | dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes); | |
39a0dd0b | 66 | dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma); |
4863dea3 SG |
67 | return 0; |
68 | } | |
69 | ||
70 | /* Free queue's descriptor memory */ | |
71 | static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) | |
72 | { | |
73 | if (!dmem) | |
74 | return; | |
75 | ||
76 | dma_free_coherent(&nic->pdev->dev, dmem->size, | |
77 | dmem->unalign_base, dmem->dma); | |
78 | dmem->unalign_base = NULL; | |
79 | dmem->base = NULL; | |
80 | } | |
81 | ||
82 | /* Allocate buffer for packet reception | |
83 | * HW returns memory address where packet is DMA'ed but not a pointer | |
84 | * into RBDR ring, so save buffer address at the start of fragment and | |
85 | * align the start address to a cache aligned address | |
86 | */ | |
87 | static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, | |
88 | u32 buf_len, u64 **rbuf) | |
89 | { | |
6e4be8d6 | 90 | int order = (PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0; |
4863dea3 SG |
91 | |
92 | /* Check if request can be accomodated in previous allocated page */ | |
5c2e26f6 SG |
93 | if (nic->rb_page && |
94 | ((nic->rb_page_offset + buf_len) < (PAGE_SIZE << order))) { | |
95 | nic->rb_pageref++; | |
96 | goto ret; | |
4863dea3 SG |
97 | } |
98 | ||
5c2e26f6 SG |
99 | nicvf_get_page(nic); |
100 | nic->rb_page = NULL; | |
101 | ||
4863dea3 SG |
102 | /* Allocate a new page */ |
103 | if (!nic->rb_page) { | |
f8ce9666 SG |
104 | nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, |
105 | order); | |
4863dea3 | 106 | if (!nic->rb_page) { |
a05d4845 | 107 | nic->drv_stats.rcv_buffer_alloc_failures++; |
4863dea3 SG |
108 | return -ENOMEM; |
109 | } | |
110 | nic->rb_page_offset = 0; | |
111 | } | |
112 | ||
5c2e26f6 | 113 | ret: |
668dda06 | 114 | *rbuf = (u64 *)((u64)page_address(nic->rb_page) + nic->rb_page_offset); |
5c2e26f6 | 115 | nic->rb_page_offset += buf_len; |
4863dea3 | 116 | |
4863dea3 SG |
117 | return 0; |
118 | } | |
119 | ||
668dda06 | 120 | /* Build skb around receive buffer */ |
4863dea3 SG |
121 | static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic, |
122 | u64 rb_ptr, int len) | |
123 | { | |
668dda06 | 124 | void *data; |
4863dea3 | 125 | struct sk_buff *skb; |
4863dea3 | 126 | |
668dda06 | 127 | data = phys_to_virt(rb_ptr); |
4863dea3 SG |
128 | |
129 | /* Now build an skb to give to stack */ | |
668dda06 | 130 | skb = build_skb(data, RCV_FRAG_LEN); |
4863dea3 | 131 | if (!skb) { |
668dda06 | 132 | put_page(virt_to_page(data)); |
4863dea3 SG |
133 | return NULL; |
134 | } | |
135 | ||
668dda06 | 136 | prefetch(skb->data); |
4863dea3 SG |
137 | return skb; |
138 | } | |
139 | ||
140 | /* Allocate RBDR ring and populate receive buffers */ | |
141 | static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, | |
142 | int ring_len, int buf_size) | |
143 | { | |
144 | int idx; | |
145 | u64 *rbuf; | |
146 | struct rbdr_entry_t *desc; | |
147 | int err; | |
148 | ||
149 | err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len, | |
150 | sizeof(struct rbdr_entry_t), | |
151 | NICVF_RCV_BUF_ALIGN_BYTES); | |
152 | if (err) | |
153 | return err; | |
154 | ||
155 | rbdr->desc = rbdr->dmem.base; | |
156 | /* Buffer size has to be in multiples of 128 bytes */ | |
157 | rbdr->dma_size = buf_size; | |
158 | rbdr->enable = true; | |
159 | rbdr->thresh = RBDR_THRESH; | |
160 | ||
161 | nic->rb_page = NULL; | |
162 | for (idx = 0; idx < ring_len; idx++) { | |
163 | err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN, | |
164 | &rbuf); | |
165 | if (err) | |
166 | return err; | |
167 | ||
168 | desc = GET_RBDR_DESC(rbdr, idx); | |
169 | desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; | |
170 | } | |
5c2e26f6 SG |
171 | |
172 | nicvf_get_page(nic); | |
173 | ||
4863dea3 SG |
174 | return 0; |
175 | } | |
176 | ||
177 | /* Free RBDR ring and its receive buffers */ | |
178 | static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) | |
179 | { | |
180 | int head, tail; | |
181 | u64 buf_addr; | |
182 | struct rbdr_entry_t *desc; | |
4863dea3 SG |
183 | |
184 | if (!rbdr) | |
185 | return; | |
186 | ||
187 | rbdr->enable = false; | |
188 | if (!rbdr->dmem.base) | |
189 | return; | |
190 | ||
191 | head = rbdr->head; | |
192 | tail = rbdr->tail; | |
193 | ||
194 | /* Free SKBs */ | |
195 | while (head != tail) { | |
196 | desc = GET_RBDR_DESC(rbdr, head); | |
197 | buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; | |
668dda06 | 198 | put_page(virt_to_page(phys_to_virt(buf_addr))); |
4863dea3 SG |
199 | head++; |
200 | head &= (rbdr->dmem.q_len - 1); | |
201 | } | |
202 | /* Free SKB of tail desc */ | |
203 | desc = GET_RBDR_DESC(rbdr, tail); | |
204 | buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; | |
668dda06 | 205 | put_page(virt_to_page(phys_to_virt(buf_addr))); |
4863dea3 SG |
206 | |
207 | /* Free RBDR ring */ | |
208 | nicvf_free_q_desc_mem(nic, &rbdr->dmem); | |
209 | } | |
210 | ||
211 | /* Refill receive buffer descriptors with new buffers. | |
212 | */ | |
fd7ec062 | 213 | static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp) |
4863dea3 SG |
214 | { |
215 | struct queue_set *qs = nic->qs; | |
216 | int rbdr_idx = qs->rbdr_cnt; | |
217 | int tail, qcount; | |
218 | int refill_rb_cnt; | |
219 | struct rbdr *rbdr; | |
220 | struct rbdr_entry_t *desc; | |
221 | u64 *rbuf; | |
222 | int new_rb = 0; | |
223 | ||
224 | refill: | |
225 | if (!rbdr_idx) | |
226 | return; | |
227 | rbdr_idx--; | |
228 | rbdr = &qs->rbdr[rbdr_idx]; | |
229 | /* Check if it's enabled */ | |
230 | if (!rbdr->enable) | |
231 | goto next_rbdr; | |
232 | ||
233 | /* Get no of desc's to be refilled */ | |
234 | qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx); | |
235 | qcount &= 0x7FFFF; | |
236 | /* Doorbell can be ringed with a max of ring size minus 1 */ | |
237 | if (qcount >= (qs->rbdr_len - 1)) | |
238 | goto next_rbdr; | |
239 | else | |
240 | refill_rb_cnt = qs->rbdr_len - qcount - 1; | |
241 | ||
242 | /* Start filling descs from tail */ | |
243 | tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3; | |
244 | while (refill_rb_cnt) { | |
245 | tail++; | |
246 | tail &= (rbdr->dmem.q_len - 1); | |
247 | ||
248 | if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf)) | |
249 | break; | |
250 | ||
251 | desc = GET_RBDR_DESC(rbdr, tail); | |
252 | desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; | |
253 | refill_rb_cnt--; | |
254 | new_rb++; | |
255 | } | |
256 | ||
5c2e26f6 SG |
257 | nicvf_get_page(nic); |
258 | ||
4863dea3 SG |
259 | /* make sure all memory stores are done before ringing doorbell */ |
260 | smp_wmb(); | |
261 | ||
262 | /* Check if buffer allocation failed */ | |
263 | if (refill_rb_cnt) | |
264 | nic->rb_alloc_fail = true; | |
265 | else | |
266 | nic->rb_alloc_fail = false; | |
267 | ||
268 | /* Notify HW */ | |
269 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, | |
270 | rbdr_idx, new_rb); | |
271 | next_rbdr: | |
272 | /* Re-enable RBDR interrupts only if buffer allocation is success */ | |
273 | if (!nic->rb_alloc_fail && rbdr->enable) | |
274 | nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); | |
275 | ||
276 | if (rbdr_idx) | |
277 | goto refill; | |
278 | } | |
279 | ||
280 | /* Alloc rcv buffers in non-atomic mode for better success */ | |
281 | void nicvf_rbdr_work(struct work_struct *work) | |
282 | { | |
283 | struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work); | |
284 | ||
285 | nicvf_refill_rbdr(nic, GFP_KERNEL); | |
286 | if (nic->rb_alloc_fail) | |
287 | schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); | |
288 | else | |
289 | nic->rb_work_scheduled = false; | |
290 | } | |
291 | ||
292 | /* In Softirq context, alloc rcv buffers in atomic mode */ | |
293 | void nicvf_rbdr_task(unsigned long data) | |
294 | { | |
295 | struct nicvf *nic = (struct nicvf *)data; | |
296 | ||
297 | nicvf_refill_rbdr(nic, GFP_ATOMIC); | |
298 | if (nic->rb_alloc_fail) { | |
299 | nic->rb_work_scheduled = true; | |
300 | schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); | |
301 | } | |
302 | } | |
303 | ||
304 | /* Initialize completion queue */ | |
305 | static int nicvf_init_cmp_queue(struct nicvf *nic, | |
306 | struct cmp_queue *cq, int q_len) | |
307 | { | |
308 | int err; | |
309 | ||
310 | err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE, | |
311 | NICVF_CQ_BASE_ALIGN_BYTES); | |
312 | if (err) | |
313 | return err; | |
314 | ||
315 | cq->desc = cq->dmem.base; | |
b9687b48 | 316 | cq->thresh = pass1_silicon(nic->pdev) ? 0 : CMP_QUEUE_CQE_THRESH; |
4863dea3 SG |
317 | nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1; |
318 | ||
319 | return 0; | |
320 | } | |
321 | ||
322 | static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq) | |
323 | { | |
324 | if (!cq) | |
325 | return; | |
326 | if (!cq->dmem.base) | |
327 | return; | |
328 | ||
329 | nicvf_free_q_desc_mem(nic, &cq->dmem); | |
330 | } | |
331 | ||
332 | /* Initialize transmit queue */ | |
333 | static int nicvf_init_snd_queue(struct nicvf *nic, | |
334 | struct snd_queue *sq, int q_len) | |
335 | { | |
336 | int err; | |
337 | ||
338 | err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, | |
339 | NICVF_SQ_BASE_ALIGN_BYTES); | |
340 | if (err) | |
341 | return err; | |
342 | ||
343 | sq->desc = sq->dmem.base; | |
86ace693 | 344 | sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL); |
fa1a6c93 AM |
345 | if (!sq->skbuff) |
346 | return -ENOMEM; | |
4863dea3 SG |
347 | sq->head = 0; |
348 | sq->tail = 0; | |
349 | atomic_set(&sq->free_cnt, q_len - 1); | |
350 | sq->thresh = SND_QUEUE_THRESH; | |
351 | ||
352 | /* Preallocate memory for TSO segment's header */ | |
353 | sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev, | |
354 | q_len * TSO_HEADER_SIZE, | |
355 | &sq->tso_hdrs_phys, GFP_KERNEL); | |
356 | if (!sq->tso_hdrs) | |
357 | return -ENOMEM; | |
358 | ||
359 | return 0; | |
360 | } | |
361 | ||
362 | static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) | |
363 | { | |
364 | if (!sq) | |
365 | return; | |
366 | if (!sq->dmem.base) | |
367 | return; | |
368 | ||
369 | if (sq->tso_hdrs) | |
143ceb0b SG |
370 | dma_free_coherent(&nic->pdev->dev, |
371 | sq->dmem.q_len * TSO_HEADER_SIZE, | |
4863dea3 SG |
372 | sq->tso_hdrs, sq->tso_hdrs_phys); |
373 | ||
374 | kfree(sq->skbuff); | |
375 | nicvf_free_q_desc_mem(nic, &sq->dmem); | |
376 | } | |
377 | ||
378 | static void nicvf_reclaim_snd_queue(struct nicvf *nic, | |
379 | struct queue_set *qs, int qidx) | |
380 | { | |
381 | /* Disable send queue */ | |
382 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0); | |
383 | /* Check if SQ is stopped */ | |
384 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01)) | |
385 | return; | |
386 | /* Reset send queue */ | |
387 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); | |
388 | } | |
389 | ||
390 | static void nicvf_reclaim_rcv_queue(struct nicvf *nic, | |
391 | struct queue_set *qs, int qidx) | |
392 | { | |
393 | union nic_mbx mbx = {}; | |
394 | ||
395 | /* Make sure all packets in the pipeline are written back into mem */ | |
396 | mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC; | |
397 | nicvf_send_msg_to_pf(nic, &mbx); | |
398 | } | |
399 | ||
400 | static void nicvf_reclaim_cmp_queue(struct nicvf *nic, | |
401 | struct queue_set *qs, int qidx) | |
402 | { | |
403 | /* Disable timer threshold (doesn't get reset upon CQ reset */ | |
404 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0); | |
405 | /* Disable completion queue */ | |
406 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0); | |
407 | /* Reset completion queue */ | |
408 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); | |
409 | } | |
410 | ||
411 | static void nicvf_reclaim_rbdr(struct nicvf *nic, | |
412 | struct rbdr *rbdr, int qidx) | |
413 | { | |
414 | u64 tmp, fifo_state; | |
415 | int timeout = 10; | |
416 | ||
417 | /* Save head and tail pointers for feeing up buffers */ | |
418 | rbdr->head = nicvf_queue_reg_read(nic, | |
419 | NIC_QSET_RBDR_0_1_HEAD, | |
420 | qidx) >> 3; | |
421 | rbdr->tail = nicvf_queue_reg_read(nic, | |
422 | NIC_QSET_RBDR_0_1_TAIL, | |
423 | qidx) >> 3; | |
424 | ||
425 | /* If RBDR FIFO is in 'FAIL' state then do a reset first | |
426 | * before relaiming. | |
427 | */ | |
428 | fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx); | |
429 | if (((fifo_state >> 62) & 0x03) == 0x3) | |
430 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, | |
431 | qidx, NICVF_RBDR_RESET); | |
432 | ||
433 | /* Disable RBDR */ | |
434 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0); | |
435 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) | |
436 | return; | |
437 | while (1) { | |
438 | tmp = nicvf_queue_reg_read(nic, | |
439 | NIC_QSET_RBDR_0_1_PREFETCH_STATUS, | |
440 | qidx); | |
441 | if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF)) | |
442 | break; | |
443 | usleep_range(1000, 2000); | |
444 | timeout--; | |
445 | if (!timeout) { | |
446 | netdev_err(nic->netdev, | |
447 | "Failed polling on prefetch status\n"); | |
448 | return; | |
449 | } | |
450 | } | |
451 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, | |
452 | qidx, NICVF_RBDR_RESET); | |
453 | ||
454 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02)) | |
455 | return; | |
456 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00); | |
457 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) | |
458 | return; | |
459 | } | |
460 | ||
aa2e259b SG |
461 | void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features) |
462 | { | |
463 | u64 rq_cfg; | |
464 | int sqs; | |
465 | ||
466 | rq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_RQ_GEN_CFG, 0); | |
467 | ||
468 | /* Enable first VLAN stripping */ | |
469 | if (features & NETIF_F_HW_VLAN_CTAG_RX) | |
470 | rq_cfg |= (1ULL << 25); | |
471 | else | |
472 | rq_cfg &= ~(1ULL << 25); | |
473 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, rq_cfg); | |
474 | ||
475 | /* Configure Secondary Qsets, if any */ | |
476 | for (sqs = 0; sqs < nic->sqs_count; sqs++) | |
477 | if (nic->snicvf[sqs]) | |
478 | nicvf_queue_reg_write(nic->snicvf[sqs], | |
479 | NIC_QSET_RQ_GEN_CFG, 0, rq_cfg); | |
480 | } | |
481 | ||
4863dea3 SG |
482 | /* Configures receive queue */ |
483 | static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, | |
484 | int qidx, bool enable) | |
485 | { | |
486 | union nic_mbx mbx = {}; | |
487 | struct rcv_queue *rq; | |
488 | struct rq_cfg rq_cfg; | |
489 | ||
490 | rq = &qs->rq[qidx]; | |
491 | rq->enable = enable; | |
492 | ||
493 | /* Disable receive queue */ | |
494 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0); | |
495 | ||
496 | if (!rq->enable) { | |
497 | nicvf_reclaim_rcv_queue(nic, qs, qidx); | |
498 | return; | |
499 | } | |
500 | ||
501 | rq->cq_qs = qs->vnic_id; | |
502 | rq->cq_idx = qidx; | |
503 | rq->start_rbdr_qs = qs->vnic_id; | |
504 | rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1; | |
505 | rq->cont_rbdr_qs = qs->vnic_id; | |
506 | rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1; | |
507 | /* all writes of RBDR data to be loaded into L2 Cache as well*/ | |
508 | rq->caching = 1; | |
509 | ||
510 | /* Send a mailbox msg to PF to config RQ */ | |
511 | mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; | |
512 | mbx.rq.qs_num = qs->vnic_id; | |
513 | mbx.rq.rq_num = qidx; | |
514 | mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) | | |
515 | (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | | |
516 | (rq->cont_qs_rbdr_idx << 8) | | |
517 | (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx); | |
518 | nicvf_send_msg_to_pf(nic, &mbx); | |
519 | ||
520 | mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG; | |
521 | mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0); | |
522 | nicvf_send_msg_to_pf(nic, &mbx); | |
523 | ||
524 | /* RQ drop config | |
525 | * Enable CQ drop to reserve sufficient CQEs for all tx packets | |
526 | */ | |
527 | mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG; | |
528 | mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8); | |
529 | nicvf_send_msg_to_pf(nic, &mbx); | |
530 | ||
aa2e259b SG |
531 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00); |
532 | if (!nic->sqs_mode) | |
533 | nicvf_config_vlan_stripping(nic, nic->netdev->features); | |
4863dea3 SG |
534 | |
535 | /* Enable Receive queue */ | |
536 | rq_cfg.ena = 1; | |
537 | rq_cfg.tcp_ena = 0; | |
538 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg); | |
539 | } | |
540 | ||
541 | /* Configures completion queue */ | |
542 | void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, | |
543 | int qidx, bool enable) | |
544 | { | |
545 | struct cmp_queue *cq; | |
546 | struct cq_cfg cq_cfg; | |
547 | ||
548 | cq = &qs->cq[qidx]; | |
549 | cq->enable = enable; | |
550 | ||
551 | if (!cq->enable) { | |
552 | nicvf_reclaim_cmp_queue(nic, qs, qidx); | |
553 | return; | |
554 | } | |
555 | ||
556 | /* Reset completion queue */ | |
557 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); | |
558 | ||
559 | if (!cq->enable) | |
560 | return; | |
561 | ||
562 | spin_lock_init(&cq->lock); | |
563 | /* Set completion queue base address */ | |
564 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, | |
565 | qidx, (u64)(cq->dmem.phys_base)); | |
566 | ||
567 | /* Enable Completion queue */ | |
568 | cq_cfg.ena = 1; | |
569 | cq_cfg.reset = 0; | |
570 | cq_cfg.caching = 0; | |
571 | cq_cfg.qsize = CMP_QSIZE; | |
572 | cq_cfg.avg_con = 0; | |
573 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg); | |
574 | ||
575 | /* Set threshold value for interrupt generation */ | |
576 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); | |
577 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, | |
006394a7 | 578 | qidx, CMP_QUEUE_TIMER_THRESH); |
4863dea3 SG |
579 | } |
580 | ||
581 | /* Configures transmit queue */ | |
582 | static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, | |
583 | int qidx, bool enable) | |
584 | { | |
585 | union nic_mbx mbx = {}; | |
586 | struct snd_queue *sq; | |
587 | struct sq_cfg sq_cfg; | |
588 | ||
589 | sq = &qs->sq[qidx]; | |
590 | sq->enable = enable; | |
591 | ||
592 | if (!sq->enable) { | |
593 | nicvf_reclaim_snd_queue(nic, qs, qidx); | |
594 | return; | |
595 | } | |
596 | ||
597 | /* Reset send queue */ | |
598 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); | |
599 | ||
600 | sq->cq_qs = qs->vnic_id; | |
601 | sq->cq_idx = qidx; | |
602 | ||
603 | /* Send a mailbox msg to PF to config SQ */ | |
604 | mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG; | |
605 | mbx.sq.qs_num = qs->vnic_id; | |
606 | mbx.sq.sq_num = qidx; | |
92dc8769 | 607 | mbx.sq.sqs_mode = nic->sqs_mode; |
4863dea3 SG |
608 | mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; |
609 | nicvf_send_msg_to_pf(nic, &mbx); | |
610 | ||
611 | /* Set queue base address */ | |
612 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, | |
613 | qidx, (u64)(sq->dmem.phys_base)); | |
614 | ||
615 | /* Enable send queue & set queue size */ | |
616 | sq_cfg.ena = 1; | |
617 | sq_cfg.reset = 0; | |
618 | sq_cfg.ldwb = 0; | |
619 | sq_cfg.qsize = SND_QSIZE; | |
620 | sq_cfg.tstmp_bgx_intf = 0; | |
621 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg); | |
622 | ||
623 | /* Set threshold value for interrupt generation */ | |
624 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh); | |
625 | ||
626 | /* Set queue:cpu affinity for better load distribution */ | |
627 | if (cpu_online(qidx)) { | |
628 | cpumask_set_cpu(qidx, &sq->affinity_mask); | |
629 | netif_set_xps_queue(nic->netdev, | |
630 | &sq->affinity_mask, qidx); | |
631 | } | |
632 | } | |
633 | ||
634 | /* Configures receive buffer descriptor ring */ | |
635 | static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, | |
636 | int qidx, bool enable) | |
637 | { | |
638 | struct rbdr *rbdr; | |
639 | struct rbdr_cfg rbdr_cfg; | |
640 | ||
641 | rbdr = &qs->rbdr[qidx]; | |
642 | nicvf_reclaim_rbdr(nic, rbdr, qidx); | |
643 | if (!enable) | |
644 | return; | |
645 | ||
646 | /* Set descriptor base address */ | |
647 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, | |
648 | qidx, (u64)(rbdr->dmem.phys_base)); | |
649 | ||
650 | /* Enable RBDR & set queue size */ | |
651 | /* Buffer size should be in multiples of 128 bytes */ | |
652 | rbdr_cfg.ena = 1; | |
653 | rbdr_cfg.reset = 0; | |
654 | rbdr_cfg.ldwb = 0; | |
655 | rbdr_cfg.qsize = RBDR_SIZE; | |
656 | rbdr_cfg.avg_con = 0; | |
657 | rbdr_cfg.lines = rbdr->dma_size / 128; | |
658 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, | |
659 | qidx, *(u64 *)&rbdr_cfg); | |
660 | ||
661 | /* Notify HW */ | |
662 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, | |
663 | qidx, qs->rbdr_len - 1); | |
664 | ||
665 | /* Set threshold value for interrupt generation */ | |
666 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, | |
667 | qidx, rbdr->thresh - 1); | |
668 | } | |
669 | ||
670 | /* Requests PF to assign and enable Qset */ | |
671 | void nicvf_qset_config(struct nicvf *nic, bool enable) | |
672 | { | |
673 | union nic_mbx mbx = {}; | |
674 | struct queue_set *qs = nic->qs; | |
675 | struct qs_cfg *qs_cfg; | |
676 | ||
677 | if (!qs) { | |
678 | netdev_warn(nic->netdev, | |
679 | "Qset is still not allocated, don't init queues\n"); | |
680 | return; | |
681 | } | |
682 | ||
683 | qs->enable = enable; | |
684 | qs->vnic_id = nic->vf_id; | |
685 | ||
686 | /* Send a mailbox msg to PF to config Qset */ | |
687 | mbx.qs.msg = NIC_MBOX_MSG_QS_CFG; | |
688 | mbx.qs.num = qs->vnic_id; | |
92dc8769 | 689 | mbx.qs.sqs_count = nic->sqs_count; |
4863dea3 SG |
690 | |
691 | mbx.qs.cfg = 0; | |
692 | qs_cfg = (struct qs_cfg *)&mbx.qs.cfg; | |
693 | if (qs->enable) { | |
694 | qs_cfg->ena = 1; | |
695 | #ifdef __BIG_ENDIAN | |
696 | qs_cfg->be = 1; | |
697 | #endif | |
698 | qs_cfg->vnic = qs->vnic_id; | |
699 | } | |
700 | nicvf_send_msg_to_pf(nic, &mbx); | |
701 | } | |
702 | ||
703 | static void nicvf_free_resources(struct nicvf *nic) | |
704 | { | |
705 | int qidx; | |
706 | struct queue_set *qs = nic->qs; | |
707 | ||
708 | /* Free receive buffer descriptor ring */ | |
709 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) | |
710 | nicvf_free_rbdr(nic, &qs->rbdr[qidx]); | |
711 | ||
712 | /* Free completion queue */ | |
713 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) | |
714 | nicvf_free_cmp_queue(nic, &qs->cq[qidx]); | |
715 | ||
716 | /* Free send queue */ | |
717 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) | |
718 | nicvf_free_snd_queue(nic, &qs->sq[qidx]); | |
719 | } | |
720 | ||
721 | static int nicvf_alloc_resources(struct nicvf *nic) | |
722 | { | |
723 | int qidx; | |
724 | struct queue_set *qs = nic->qs; | |
725 | ||
726 | /* Alloc receive buffer descriptor ring */ | |
727 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { | |
728 | if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len, | |
729 | DMA_BUFFER_LEN)) | |
730 | goto alloc_fail; | |
731 | } | |
732 | ||
733 | /* Alloc send queue */ | |
734 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) { | |
735 | if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len)) | |
736 | goto alloc_fail; | |
737 | } | |
738 | ||
739 | /* Alloc completion queue */ | |
740 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) { | |
741 | if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len)) | |
742 | goto alloc_fail; | |
743 | } | |
744 | ||
745 | return 0; | |
746 | alloc_fail: | |
747 | nicvf_free_resources(nic); | |
748 | return -ENOMEM; | |
749 | } | |
750 | ||
751 | int nicvf_set_qset_resources(struct nicvf *nic) | |
752 | { | |
753 | struct queue_set *qs; | |
754 | ||
755 | qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL); | |
756 | if (!qs) | |
757 | return -ENOMEM; | |
758 | nic->qs = qs; | |
759 | ||
760 | /* Set count of each queue */ | |
761 | qs->rbdr_cnt = RBDR_CNT; | |
762 | qs->rq_cnt = RCV_QUEUE_CNT; | |
763 | qs->sq_cnt = SND_QUEUE_CNT; | |
764 | qs->cq_cnt = CMP_QUEUE_CNT; | |
765 | ||
766 | /* Set queue lengths */ | |
767 | qs->rbdr_len = RCV_BUF_COUNT; | |
768 | qs->sq_len = SND_QUEUE_LEN; | |
769 | qs->cq_len = CMP_QUEUE_LEN; | |
92dc8769 SG |
770 | |
771 | nic->rx_queues = qs->rq_cnt; | |
772 | nic->tx_queues = qs->sq_cnt; | |
773 | ||
4863dea3 SG |
774 | return 0; |
775 | } | |
776 | ||
777 | int nicvf_config_data_transfer(struct nicvf *nic, bool enable) | |
778 | { | |
779 | bool disable = false; | |
780 | struct queue_set *qs = nic->qs; | |
781 | int qidx; | |
782 | ||
783 | if (!qs) | |
784 | return 0; | |
785 | ||
786 | if (enable) { | |
787 | if (nicvf_alloc_resources(nic)) | |
788 | return -ENOMEM; | |
789 | ||
790 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) | |
791 | nicvf_snd_queue_config(nic, qs, qidx, enable); | |
792 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) | |
793 | nicvf_cmp_queue_config(nic, qs, qidx, enable); | |
794 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) | |
795 | nicvf_rbdr_config(nic, qs, qidx, enable); | |
796 | for (qidx = 0; qidx < qs->rq_cnt; qidx++) | |
797 | nicvf_rcv_queue_config(nic, qs, qidx, enable); | |
798 | } else { | |
799 | for (qidx = 0; qidx < qs->rq_cnt; qidx++) | |
800 | nicvf_rcv_queue_config(nic, qs, qidx, disable); | |
801 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) | |
802 | nicvf_rbdr_config(nic, qs, qidx, disable); | |
803 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) | |
804 | nicvf_snd_queue_config(nic, qs, qidx, disable); | |
805 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) | |
806 | nicvf_cmp_queue_config(nic, qs, qidx, disable); | |
807 | ||
808 | nicvf_free_resources(nic); | |
809 | } | |
810 | ||
811 | return 0; | |
812 | } | |
813 | ||
814 | /* Get a free desc from SQ | |
815 | * returns descriptor ponter & descriptor number | |
816 | */ | |
817 | static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt) | |
818 | { | |
819 | int qentry; | |
820 | ||
821 | qentry = sq->tail; | |
822 | atomic_sub(desc_cnt, &sq->free_cnt); | |
823 | sq->tail += desc_cnt; | |
824 | sq->tail &= (sq->dmem.q_len - 1); | |
825 | ||
826 | return qentry; | |
827 | } | |
828 | ||
829 | /* Free descriptor back to SQ for future use */ | |
830 | void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt) | |
831 | { | |
832 | atomic_add(desc_cnt, &sq->free_cnt); | |
833 | sq->head += desc_cnt; | |
834 | sq->head &= (sq->dmem.q_len - 1); | |
835 | } | |
836 | ||
837 | static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry) | |
838 | { | |
839 | qentry++; | |
840 | qentry &= (sq->dmem.q_len - 1); | |
841 | return qentry; | |
842 | } | |
843 | ||
844 | void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx) | |
845 | { | |
846 | u64 sq_cfg; | |
847 | ||
848 | sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); | |
849 | sq_cfg |= NICVF_SQ_EN; | |
850 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); | |
851 | /* Ring doorbell so that H/W restarts processing SQEs */ | |
852 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0); | |
853 | } | |
854 | ||
855 | void nicvf_sq_disable(struct nicvf *nic, int qidx) | |
856 | { | |
857 | u64 sq_cfg; | |
858 | ||
859 | sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); | |
860 | sq_cfg &= ~NICVF_SQ_EN; | |
861 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); | |
862 | } | |
863 | ||
864 | void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq, | |
865 | int qidx) | |
866 | { | |
867 | u64 head, tail; | |
868 | struct sk_buff *skb; | |
869 | struct nicvf *nic = netdev_priv(netdev); | |
870 | struct sq_hdr_subdesc *hdr; | |
871 | ||
872 | head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4; | |
873 | tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4; | |
874 | while (sq->head != head) { | |
875 | hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); | |
876 | if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { | |
877 | nicvf_put_sq_desc(sq, 1); | |
878 | continue; | |
879 | } | |
880 | skb = (struct sk_buff *)sq->skbuff[sq->head]; | |
143ceb0b SG |
881 | if (skb) |
882 | dev_kfree_skb_any(skb); | |
4863dea3 SG |
883 | atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets); |
884 | atomic64_add(hdr->tot_len, | |
885 | (atomic64_t *)&netdev->stats.tx_bytes); | |
4863dea3 SG |
886 | nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); |
887 | } | |
888 | } | |
889 | ||
890 | /* Calculate no of SQ subdescriptors needed to transmit all | |
891 | * segments of this TSO packet. | |
892 | * Taken from 'Tilera network driver' with a minor modification. | |
893 | */ | |
894 | static int nicvf_tso_count_subdescs(struct sk_buff *skb) | |
895 | { | |
896 | struct skb_shared_info *sh = skb_shinfo(skb); | |
897 | unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | |
898 | unsigned int data_len = skb->len - sh_len; | |
899 | unsigned int p_len = sh->gso_size; | |
900 | long f_id = -1; /* id of the current fragment */ | |
901 | long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ | |
902 | long f_used = 0; /* bytes used from the current fragment */ | |
903 | long n; /* size of the current piece of payload */ | |
904 | int num_edescs = 0; | |
905 | int segment; | |
906 | ||
907 | for (segment = 0; segment < sh->gso_segs; segment++) { | |
908 | unsigned int p_used = 0; | |
909 | ||
910 | /* One edesc for header and for each piece of the payload. */ | |
911 | for (num_edescs++; p_used < p_len; num_edescs++) { | |
912 | /* Advance as needed. */ | |
913 | while (f_used >= f_size) { | |
914 | f_id++; | |
915 | f_size = skb_frag_size(&sh->frags[f_id]); | |
916 | f_used = 0; | |
917 | } | |
918 | ||
919 | /* Use bytes from the current fragment. */ | |
920 | n = p_len - p_used; | |
921 | if (n > f_size - f_used) | |
922 | n = f_size - f_used; | |
923 | f_used += n; | |
924 | p_used += n; | |
925 | } | |
926 | ||
927 | /* The last segment may be less than gso_size. */ | |
928 | data_len -= p_len; | |
929 | if (data_len < p_len) | |
930 | p_len = data_len; | |
931 | } | |
932 | ||
933 | /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */ | |
934 | return num_edescs + sh->gso_segs; | |
935 | } | |
936 | ||
937 | /* Get the number of SQ descriptors needed to xmit this skb */ | |
938 | static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb) | |
939 | { | |
940 | int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT; | |
941 | ||
40fb5f8a | 942 | if (skb_shinfo(skb)->gso_size && !nic->hw_tso) { |
4863dea3 SG |
943 | subdesc_cnt = nicvf_tso_count_subdescs(skb); |
944 | return subdesc_cnt; | |
945 | } | |
946 | ||
947 | if (skb_shinfo(skb)->nr_frags) | |
948 | subdesc_cnt += skb_shinfo(skb)->nr_frags; | |
949 | ||
950 | return subdesc_cnt; | |
951 | } | |
952 | ||
953 | /* Add SQ HEADER subdescriptor. | |
954 | * First subdescriptor for every send descriptor. | |
955 | */ | |
956 | static inline void | |
40fb5f8a | 957 | nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry, |
4863dea3 SG |
958 | int subdesc_cnt, struct sk_buff *skb, int len) |
959 | { | |
960 | int proto; | |
961 | struct sq_hdr_subdesc *hdr; | |
962 | ||
963 | hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); | |
964 | sq->skbuff[qentry] = (u64)skb; | |
965 | ||
966 | memset(hdr, 0, SND_QUEUE_DESC_SIZE); | |
967 | hdr->subdesc_type = SQ_DESC_TYPE_HEADER; | |
968 | /* Enable notification via CQE after processing SQE */ | |
969 | hdr->post_cqe = 1; | |
970 | /* No of subdescriptors following this */ | |
971 | hdr->subdesc_cnt = subdesc_cnt; | |
972 | hdr->tot_len = len; | |
973 | ||
974 | /* Offload checksum calculation to HW */ | |
975 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
4863dea3 SG |
976 | hdr->csum_l3 = 1; /* Enable IP csum calculation */ |
977 | hdr->l3_offset = skb_network_offset(skb); | |
978 | hdr->l4_offset = skb_transport_offset(skb); | |
979 | ||
980 | proto = ip_hdr(skb)->protocol; | |
981 | switch (proto) { | |
982 | case IPPROTO_TCP: | |
983 | hdr->csum_l4 = SEND_L4_CSUM_TCP; | |
984 | break; | |
985 | case IPPROTO_UDP: | |
986 | hdr->csum_l4 = SEND_L4_CSUM_UDP; | |
987 | break; | |
988 | case IPPROTO_SCTP: | |
989 | hdr->csum_l4 = SEND_L4_CSUM_SCTP; | |
990 | break; | |
991 | } | |
992 | } | |
40fb5f8a SG |
993 | |
994 | if (nic->hw_tso && skb_shinfo(skb)->gso_size) { | |
995 | hdr->tso = 1; | |
996 | hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb); | |
997 | hdr->tso_max_paysize = skb_shinfo(skb)->gso_size; | |
998 | /* For non-tunneled pkts, point this to L2 ethertype */ | |
999 | hdr->inner_l3_offset = skb_network_offset(skb) - 2; | |
1000 | nic->drv_stats.tx_tso++; | |
1001 | } | |
4863dea3 SG |
1002 | } |
1003 | ||
1004 | /* SQ GATHER subdescriptor | |
1005 | * Must follow HDR descriptor | |
1006 | */ | |
1007 | static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, | |
1008 | int size, u64 data) | |
1009 | { | |
1010 | struct sq_gather_subdesc *gather; | |
1011 | ||
1012 | qentry &= (sq->dmem.q_len - 1); | |
1013 | gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry); | |
1014 | ||
1015 | memset(gather, 0, SND_QUEUE_DESC_SIZE); | |
1016 | gather->subdesc_type = SQ_DESC_TYPE_GATHER; | |
4b561c17 | 1017 | gather->ld_type = NIC_SEND_LD_TYPE_E_LDD; |
4863dea3 SG |
1018 | gather->size = size; |
1019 | gather->addr = data; | |
1020 | } | |
1021 | ||
1022 | /* Segment a TSO packet into 'gso_size' segments and append | |
1023 | * them to SQ for transfer | |
1024 | */ | |
1025 | static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, | |
92dc8769 | 1026 | int sq_num, int qentry, struct sk_buff *skb) |
4863dea3 SG |
1027 | { |
1028 | struct tso_t tso; | |
1029 | int seg_subdescs = 0, desc_cnt = 0; | |
1030 | int seg_len, total_len, data_left; | |
1031 | int hdr_qentry = qentry; | |
1032 | int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | |
1033 | ||
1034 | tso_start(skb, &tso); | |
1035 | total_len = skb->len - hdr_len; | |
1036 | while (total_len > 0) { | |
1037 | char *hdr; | |
1038 | ||
1039 | /* Save Qentry for adding HDR_SUBDESC at the end */ | |
1040 | hdr_qentry = qentry; | |
1041 | ||
1042 | data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); | |
1043 | total_len -= data_left; | |
1044 | ||
1045 | /* Add segment's header */ | |
1046 | qentry = nicvf_get_nxt_sqentry(sq, qentry); | |
1047 | hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE; | |
1048 | tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); | |
1049 | nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len, | |
1050 | sq->tso_hdrs_phys + | |
1051 | qentry * TSO_HEADER_SIZE); | |
1052 | /* HDR_SUDESC + GATHER */ | |
1053 | seg_subdescs = 2; | |
1054 | seg_len = hdr_len; | |
1055 | ||
1056 | /* Add segment's payload fragments */ | |
1057 | while (data_left > 0) { | |
1058 | int size; | |
1059 | ||
1060 | size = min_t(int, tso.size, data_left); | |
1061 | ||
1062 | qentry = nicvf_get_nxt_sqentry(sq, qentry); | |
1063 | nicvf_sq_add_gather_subdesc(sq, qentry, size, | |
1064 | virt_to_phys(tso.data)); | |
1065 | seg_subdescs++; | |
1066 | seg_len += size; | |
1067 | ||
1068 | data_left -= size; | |
1069 | tso_build_data(skb, &tso, size); | |
1070 | } | |
40fb5f8a | 1071 | nicvf_sq_add_hdr_subdesc(nic, sq, hdr_qentry, |
4863dea3 | 1072 | seg_subdescs - 1, skb, seg_len); |
143ceb0b | 1073 | sq->skbuff[hdr_qentry] = (u64)NULL; |
4863dea3 SG |
1074 | qentry = nicvf_get_nxt_sqentry(sq, qentry); |
1075 | ||
1076 | desc_cnt += seg_subdescs; | |
1077 | } | |
1078 | /* Save SKB in the last segment for freeing */ | |
1079 | sq->skbuff[hdr_qentry] = (u64)skb; | |
1080 | ||
1081 | /* make sure all memory stores are done before ringing doorbell */ | |
1082 | smp_wmb(); | |
1083 | ||
1084 | /* Inform HW to xmit all TSO segments */ | |
1085 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, | |
92dc8769 | 1086 | sq_num, desc_cnt); |
2cb468e0 | 1087 | nic->drv_stats.tx_tso++; |
4863dea3 SG |
1088 | return 1; |
1089 | } | |
1090 | ||
1091 | /* Append an skb to a SQ for packet transfer. */ | |
1092 | int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) | |
1093 | { | |
1094 | int i, size; | |
1095 | int subdesc_cnt; | |
1096 | int sq_num, qentry; | |
92dc8769 | 1097 | struct queue_set *qs; |
4863dea3 SG |
1098 | struct snd_queue *sq; |
1099 | ||
1100 | sq_num = skb_get_queue_mapping(skb); | |
92dc8769 SG |
1101 | if (sq_num >= MAX_SND_QUEUES_PER_QS) { |
1102 | /* Get secondary Qset's SQ structure */ | |
1103 | i = sq_num / MAX_SND_QUEUES_PER_QS; | |
1104 | if (!nic->snicvf[i - 1]) { | |
1105 | netdev_warn(nic->netdev, | |
1106 | "Secondary Qset#%d's ptr not initialized\n", | |
1107 | i - 1); | |
1108 | return 1; | |
1109 | } | |
1110 | nic = (struct nicvf *)nic->snicvf[i - 1]; | |
1111 | sq_num = sq_num % MAX_SND_QUEUES_PER_QS; | |
1112 | } | |
1113 | ||
1114 | qs = nic->qs; | |
4863dea3 SG |
1115 | sq = &qs->sq[sq_num]; |
1116 | ||
1117 | subdesc_cnt = nicvf_sq_subdesc_required(nic, skb); | |
1118 | if (subdesc_cnt > atomic_read(&sq->free_cnt)) | |
1119 | goto append_fail; | |
1120 | ||
1121 | qentry = nicvf_get_sq_desc(sq, subdesc_cnt); | |
1122 | ||
1123 | /* Check if its a TSO packet */ | |
40fb5f8a | 1124 | if (skb_shinfo(skb)->gso_size && !nic->hw_tso) |
92dc8769 | 1125 | return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb); |
4863dea3 SG |
1126 | |
1127 | /* Add SQ header subdesc */ | |
40fb5f8a SG |
1128 | nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1, |
1129 | skb, skb->len); | |
4863dea3 SG |
1130 | |
1131 | /* Add SQ gather subdescs */ | |
1132 | qentry = nicvf_get_nxt_sqentry(sq, qentry); | |
1133 | size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; | |
1134 | nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data)); | |
1135 | ||
1136 | /* Check for scattered buffer */ | |
1137 | if (!skb_is_nonlinear(skb)) | |
1138 | goto doorbell; | |
1139 | ||
1140 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
1141 | const struct skb_frag_struct *frag; | |
1142 | ||
1143 | frag = &skb_shinfo(skb)->frags[i]; | |
1144 | ||
1145 | qentry = nicvf_get_nxt_sqentry(sq, qentry); | |
1146 | size = skb_frag_size(frag); | |
1147 | nicvf_sq_add_gather_subdesc(sq, qentry, size, | |
1148 | virt_to_phys( | |
1149 | skb_frag_address(frag))); | |
1150 | } | |
1151 | ||
1152 | doorbell: | |
1153 | /* make sure all memory stores are done before ringing doorbell */ | |
1154 | smp_wmb(); | |
1155 | ||
1156 | /* Inform HW to xmit new packet */ | |
1157 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, | |
1158 | sq_num, subdesc_cnt); | |
1159 | return 1; | |
1160 | ||
1161 | append_fail: | |
92dc8769 SG |
1162 | /* Use original PCI dev for debug log */ |
1163 | nic = nic->pnicvf; | |
4863dea3 SG |
1164 | netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n"); |
1165 | return 0; | |
1166 | } | |
1167 | ||
1168 | static inline unsigned frag_num(unsigned i) | |
1169 | { | |
1170 | #ifdef __BIG_ENDIAN | |
1171 | return (i & ~3) + 3 - (i & 3); | |
1172 | #else | |
1173 | return i; | |
1174 | #endif | |
1175 | } | |
1176 | ||
1177 | /* Returns SKB for a received packet */ | |
1178 | struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx) | |
1179 | { | |
1180 | int frag; | |
1181 | int payload_len = 0; | |
1182 | struct sk_buff *skb = NULL; | |
1183 | struct sk_buff *skb_frag = NULL; | |
1184 | struct sk_buff *prev_frag = NULL; | |
1185 | u16 *rb_lens = NULL; | |
1186 | u64 *rb_ptrs = NULL; | |
1187 | ||
1188 | rb_lens = (void *)cqe_rx + (3 * sizeof(u64)); | |
1189 | rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64)); | |
1190 | ||
1191 | netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n", | |
1192 | __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz); | |
1193 | ||
1194 | for (frag = 0; frag < cqe_rx->rb_cnt; frag++) { | |
1195 | payload_len = rb_lens[frag_num(frag)]; | |
1196 | if (!frag) { | |
1197 | /* First fragment */ | |
1198 | skb = nicvf_rb_ptr_to_skb(nic, | |
1199 | *rb_ptrs - cqe_rx->align_pad, | |
1200 | payload_len); | |
1201 | if (!skb) | |
1202 | return NULL; | |
1203 | skb_reserve(skb, cqe_rx->align_pad); | |
1204 | skb_put(skb, payload_len); | |
1205 | } else { | |
1206 | /* Add fragments */ | |
1207 | skb_frag = nicvf_rb_ptr_to_skb(nic, *rb_ptrs, | |
1208 | payload_len); | |
1209 | if (!skb_frag) { | |
1210 | dev_kfree_skb(skb); | |
1211 | return NULL; | |
1212 | } | |
1213 | ||
1214 | if (!skb_shinfo(skb)->frag_list) | |
1215 | skb_shinfo(skb)->frag_list = skb_frag; | |
1216 | else | |
1217 | prev_frag->next = skb_frag; | |
1218 | ||
1219 | prev_frag = skb_frag; | |
1220 | skb->len += payload_len; | |
1221 | skb->data_len += payload_len; | |
1222 | skb_frag->len = payload_len; | |
1223 | } | |
1224 | /* Next buffer pointer */ | |
1225 | rb_ptrs++; | |
1226 | } | |
1227 | return skb; | |
1228 | } | |
1229 | ||
b45ceb40 | 1230 | static u64 nicvf_int_type_to_mask(int int_type, int q_idx) |
4863dea3 SG |
1231 | { |
1232 | u64 reg_val; | |
1233 | ||
4863dea3 SG |
1234 | switch (int_type) { |
1235 | case NICVF_INTR_CQ: | |
b45ceb40 | 1236 | reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); |
4863dea3 SG |
1237 | break; |
1238 | case NICVF_INTR_SQ: | |
b45ceb40 | 1239 | reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); |
4863dea3 SG |
1240 | break; |
1241 | case NICVF_INTR_RBDR: | |
b45ceb40 | 1242 | reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); |
4863dea3 SG |
1243 | break; |
1244 | case NICVF_INTR_PKT_DROP: | |
b45ceb40 | 1245 | reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT); |
4863dea3 SG |
1246 | break; |
1247 | case NICVF_INTR_TCP_TIMER: | |
b45ceb40 | 1248 | reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); |
4863dea3 SG |
1249 | break; |
1250 | case NICVF_INTR_MBOX: | |
b45ceb40 | 1251 | reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT); |
4863dea3 SG |
1252 | break; |
1253 | case NICVF_INTR_QS_ERR: | |
b45ceb40 | 1254 | reg_val = (1ULL << NICVF_INTR_QS_ERR_SHIFT); |
4863dea3 SG |
1255 | break; |
1256 | default: | |
b45ceb40 | 1257 | reg_val = 0; |
4863dea3 SG |
1258 | } |
1259 | ||
b45ceb40 YN |
1260 | return reg_val; |
1261 | } | |
1262 | ||
1263 | /* Enable interrupt */ | |
1264 | void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) | |
1265 | { | |
1266 | u64 mask = nicvf_int_type_to_mask(int_type, q_idx); | |
1267 | ||
1268 | if (!mask) { | |
1269 | netdev_dbg(nic->netdev, | |
1270 | "Failed to enable interrupt: unknown type\n"); | |
1271 | return; | |
1272 | } | |
1273 | nicvf_reg_write(nic, NIC_VF_ENA_W1S, | |
1274 | nicvf_reg_read(nic, NIC_VF_ENA_W1S) | mask); | |
4863dea3 SG |
1275 | } |
1276 | ||
1277 | /* Disable interrupt */ | |
1278 | void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) | |
1279 | { | |
b45ceb40 | 1280 | u64 mask = nicvf_int_type_to_mask(int_type, q_idx); |
4863dea3 | 1281 | |
b45ceb40 YN |
1282 | if (!mask) { |
1283 | netdev_dbg(nic->netdev, | |
4863dea3 | 1284 | "Failed to disable interrupt: unknown type\n"); |
b45ceb40 | 1285 | return; |
4863dea3 SG |
1286 | } |
1287 | ||
b45ceb40 | 1288 | nicvf_reg_write(nic, NIC_VF_ENA_W1C, mask); |
4863dea3 SG |
1289 | } |
1290 | ||
1291 | /* Clear interrupt */ | |
1292 | void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) | |
1293 | { | |
b45ceb40 | 1294 | u64 mask = nicvf_int_type_to_mask(int_type, q_idx); |
4863dea3 | 1295 | |
b45ceb40 YN |
1296 | if (!mask) { |
1297 | netdev_dbg(nic->netdev, | |
4863dea3 | 1298 | "Failed to clear interrupt: unknown type\n"); |
b45ceb40 | 1299 | return; |
4863dea3 SG |
1300 | } |
1301 | ||
b45ceb40 | 1302 | nicvf_reg_write(nic, NIC_VF_INT, mask); |
4863dea3 SG |
1303 | } |
1304 | ||
1305 | /* Check if interrupt is enabled */ | |
1306 | int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx) | |
1307 | { | |
b45ceb40 YN |
1308 | u64 mask = nicvf_int_type_to_mask(int_type, q_idx); |
1309 | /* If interrupt type is unknown, we treat it disabled. */ | |
1310 | if (!mask) { | |
1311 | netdev_dbg(nic->netdev, | |
4863dea3 | 1312 | "Failed to check interrupt enable: unknown type\n"); |
b45ceb40 | 1313 | return 0; |
4863dea3 SG |
1314 | } |
1315 | ||
b45ceb40 | 1316 | return mask & nicvf_reg_read(nic, NIC_VF_ENA_W1S); |
4863dea3 SG |
1317 | } |
1318 | ||
1319 | void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx) | |
1320 | { | |
1321 | struct rcv_queue *rq; | |
1322 | ||
1323 | #define GET_RQ_STATS(reg) \ | |
1324 | nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\ | |
1325 | (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) | |
1326 | ||
1327 | rq = &nic->qs->rq[rq_idx]; | |
1328 | rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS); | |
1329 | rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS); | |
1330 | } | |
1331 | ||
1332 | void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) | |
1333 | { | |
1334 | struct snd_queue *sq; | |
1335 | ||
1336 | #define GET_SQ_STATS(reg) \ | |
1337 | nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\ | |
1338 | (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) | |
1339 | ||
1340 | sq = &nic->qs->sq[sq_idx]; | |
1341 | sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS); | |
1342 | sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS); | |
1343 | } | |
1344 | ||
1345 | /* Check for errors in the receive cmp.queue entry */ | |
ad2ecebd | 1346 | int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx) |
4863dea3 | 1347 | { |
a2dc5ded | 1348 | struct nicvf_hw_stats *stats = &nic->hw_stats; |
4863dea3 | 1349 | |
ad2ecebd | 1350 | if (!cqe_rx->err_level && !cqe_rx->err_opcode) |
4863dea3 | 1351 | return 0; |
4863dea3 SG |
1352 | |
1353 | if (netif_msg_rx_err(nic)) | |
1354 | netdev_err(nic->netdev, | |
1355 | "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n", | |
1356 | nic->netdev->name, | |
1357 | cqe_rx->err_level, cqe_rx->err_opcode); | |
1358 | ||
4863dea3 SG |
1359 | switch (cqe_rx->err_opcode) { |
1360 | case CQ_RX_ERROP_RE_PARTIAL: | |
a2dc5ded | 1361 | stats->rx_bgx_truncated_pkts++; |
4863dea3 SG |
1362 | break; |
1363 | case CQ_RX_ERROP_RE_JABBER: | |
a2dc5ded | 1364 | stats->rx_jabber_errs++; |
4863dea3 SG |
1365 | break; |
1366 | case CQ_RX_ERROP_RE_FCS: | |
a2dc5ded | 1367 | stats->rx_fcs_errs++; |
4863dea3 SG |
1368 | break; |
1369 | case CQ_RX_ERROP_RE_RX_CTL: | |
a2dc5ded | 1370 | stats->rx_bgx_errs++; |
4863dea3 SG |
1371 | break; |
1372 | case CQ_RX_ERROP_PREL2_ERR: | |
a2dc5ded | 1373 | stats->rx_prel2_errs++; |
4863dea3 SG |
1374 | break; |
1375 | case CQ_RX_ERROP_L2_MAL: | |
a2dc5ded | 1376 | stats->rx_l2_hdr_malformed++; |
4863dea3 SG |
1377 | break; |
1378 | case CQ_RX_ERROP_L2_OVERSIZE: | |
a2dc5ded | 1379 | stats->rx_oversize++; |
4863dea3 SG |
1380 | break; |
1381 | case CQ_RX_ERROP_L2_UNDERSIZE: | |
a2dc5ded | 1382 | stats->rx_undersize++; |
4863dea3 SG |
1383 | break; |
1384 | case CQ_RX_ERROP_L2_LENMISM: | |
a2dc5ded | 1385 | stats->rx_l2_len_mismatch++; |
4863dea3 SG |
1386 | break; |
1387 | case CQ_RX_ERROP_L2_PCLP: | |
a2dc5ded | 1388 | stats->rx_l2_pclp++; |
4863dea3 SG |
1389 | break; |
1390 | case CQ_RX_ERROP_IP_NOT: | |
a2dc5ded | 1391 | stats->rx_ip_ver_errs++; |
4863dea3 SG |
1392 | break; |
1393 | case CQ_RX_ERROP_IP_CSUM_ERR: | |
a2dc5ded | 1394 | stats->rx_ip_csum_errs++; |
4863dea3 SG |
1395 | break; |
1396 | case CQ_RX_ERROP_IP_MAL: | |
a2dc5ded | 1397 | stats->rx_ip_hdr_malformed++; |
4863dea3 SG |
1398 | break; |
1399 | case CQ_RX_ERROP_IP_MALD: | |
a2dc5ded | 1400 | stats->rx_ip_payload_malformed++; |
4863dea3 SG |
1401 | break; |
1402 | case CQ_RX_ERROP_IP_HOP: | |
a2dc5ded | 1403 | stats->rx_ip_ttl_errs++; |
4863dea3 SG |
1404 | break; |
1405 | case CQ_RX_ERROP_L3_PCLP: | |
a2dc5ded | 1406 | stats->rx_l3_pclp++; |
4863dea3 SG |
1407 | break; |
1408 | case CQ_RX_ERROP_L4_MAL: | |
a2dc5ded | 1409 | stats->rx_l4_malformed++; |
4863dea3 SG |
1410 | break; |
1411 | case CQ_RX_ERROP_L4_CHK: | |
a2dc5ded | 1412 | stats->rx_l4_csum_errs++; |
4863dea3 SG |
1413 | break; |
1414 | case CQ_RX_ERROP_UDP_LEN: | |
a2dc5ded | 1415 | stats->rx_udp_len_errs++; |
4863dea3 SG |
1416 | break; |
1417 | case CQ_RX_ERROP_L4_PORT: | |
a2dc5ded | 1418 | stats->rx_l4_port_errs++; |
4863dea3 SG |
1419 | break; |
1420 | case CQ_RX_ERROP_TCP_FLAG: | |
a2dc5ded | 1421 | stats->rx_tcp_flag_errs++; |
4863dea3 SG |
1422 | break; |
1423 | case CQ_RX_ERROP_TCP_OFFSET: | |
a2dc5ded | 1424 | stats->rx_tcp_offset_errs++; |
4863dea3 SG |
1425 | break; |
1426 | case CQ_RX_ERROP_L4_PCLP: | |
a2dc5ded | 1427 | stats->rx_l4_pclp++; |
4863dea3 SG |
1428 | break; |
1429 | case CQ_RX_ERROP_RBDR_TRUNC: | |
a2dc5ded | 1430 | stats->rx_truncated_pkts++; |
4863dea3 SG |
1431 | break; |
1432 | } | |
1433 | ||
1434 | return 1; | |
1435 | } | |
1436 | ||
1437 | /* Check for errors in the send cmp.queue entry */ | |
1438 | int nicvf_check_cqe_tx_errs(struct nicvf *nic, | |
1439 | struct cmp_queue *cq, struct cqe_send_t *cqe_tx) | |
1440 | { | |
1441 | struct cmp_queue_stats *stats = &cq->stats; | |
1442 | ||
1443 | switch (cqe_tx->send_status) { | |
1444 | case CQ_TX_ERROP_GOOD: | |
1445 | stats->tx.good++; | |
1446 | return 0; | |
1447 | case CQ_TX_ERROP_DESC_FAULT: | |
1448 | stats->tx.desc_fault++; | |
1449 | break; | |
1450 | case CQ_TX_ERROP_HDR_CONS_ERR: | |
1451 | stats->tx.hdr_cons_err++; | |
1452 | break; | |
1453 | case CQ_TX_ERROP_SUBDC_ERR: | |
1454 | stats->tx.subdesc_err++; | |
1455 | break; | |
1456 | case CQ_TX_ERROP_IMM_SIZE_OFLOW: | |
1457 | stats->tx.imm_size_oflow++; | |
1458 | break; | |
1459 | case CQ_TX_ERROP_DATA_SEQUENCE_ERR: | |
1460 | stats->tx.data_seq_err++; | |
1461 | break; | |
1462 | case CQ_TX_ERROP_MEM_SEQUENCE_ERR: | |
1463 | stats->tx.mem_seq_err++; | |
1464 | break; | |
1465 | case CQ_TX_ERROP_LOCK_VIOL: | |
1466 | stats->tx.lock_viol++; | |
1467 | break; | |
1468 | case CQ_TX_ERROP_DATA_FAULT: | |
1469 | stats->tx.data_fault++; | |
1470 | break; | |
1471 | case CQ_TX_ERROP_TSTMP_CONFLICT: | |
1472 | stats->tx.tstmp_conflict++; | |
1473 | break; | |
1474 | case CQ_TX_ERROP_TSTMP_TIMEOUT: | |
1475 | stats->tx.tstmp_timeout++; | |
1476 | break; | |
1477 | case CQ_TX_ERROP_MEM_FAULT: | |
1478 | stats->tx.mem_fault++; | |
1479 | break; | |
1480 | case CQ_TX_ERROP_CK_OVERLAP: | |
1481 | stats->tx.csum_overlap++; | |
1482 | break; | |
1483 | case CQ_TX_ERROP_CK_OFLOW: | |
1484 | stats->tx.csum_overflow++; | |
1485 | break; | |
1486 | } | |
1487 | ||
1488 | return 1; | |
1489 | } |