Commit | Line | Data |
---|---|---|
4863dea3 SG |
1 | /* |
2 | * Copyright (C) 2015 Cavium, Inc. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of version 2 of the GNU General Public License | |
6 | * as published by the Free Software Foundation. | |
7 | */ | |
8 | ||
9 | #include <linux/pci.h> | |
10 | #include <linux/netdevice.h> | |
11 | #include <linux/ip.h> | |
12 | #include <linux/etherdevice.h> | |
13 | #include <net/ip.h> | |
14 | #include <net/tso.h> | |
15 | ||
16 | #include "nic_reg.h" | |
17 | #include "nic.h" | |
18 | #include "q_struct.h" | |
19 | #include "nicvf_queues.h" | |
20 | ||
21 | struct rbuf_info { | |
22 | struct page *page; | |
23 | void *data; | |
24 | u64 offset; | |
25 | }; | |
26 | ||
27 | #define GET_RBUF_INFO(x) ((struct rbuf_info *)(x - NICVF_RCV_BUF_ALIGN_BYTES)) | |
28 | ||
29 | /* Poll a register for a specific value */ | |
30 | static int nicvf_poll_reg(struct nicvf *nic, int qidx, | |
31 | u64 reg, int bit_pos, int bits, int val) | |
32 | { | |
33 | u64 bit_mask; | |
34 | u64 reg_val; | |
35 | int timeout = 10; | |
36 | ||
37 | bit_mask = (1ULL << bits) - 1; | |
38 | bit_mask = (bit_mask << bit_pos); | |
39 | ||
40 | while (timeout) { | |
41 | reg_val = nicvf_queue_reg_read(nic, reg, qidx); | |
42 | if (((reg_val & bit_mask) >> bit_pos) == val) | |
43 | return 0; | |
44 | usleep_range(1000, 2000); | |
45 | timeout--; | |
46 | } | |
47 | netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg); | |
48 | return 1; | |
49 | } | |
50 | ||
51 | /* Allocate memory for a queue's descriptors */ | |
52 | static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem, | |
53 | int q_len, int desc_size, int align_bytes) | |
54 | { | |
55 | dmem->q_len = q_len; | |
56 | dmem->size = (desc_size * q_len) + align_bytes; | |
57 | /* Save address, need it while freeing */ | |
58 | dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size, | |
59 | &dmem->dma, GFP_KERNEL); | |
60 | if (!dmem->unalign_base) | |
61 | return -ENOMEM; | |
62 | ||
63 | /* Align memory address for 'align_bytes' */ | |
64 | dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes); | |
65 | dmem->base = (void *)((u8 *)dmem->unalign_base + | |
66 | (dmem->phys_base - dmem->dma)); | |
67 | return 0; | |
68 | } | |
69 | ||
70 | /* Free queue's descriptor memory */ | |
71 | static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) | |
72 | { | |
73 | if (!dmem) | |
74 | return; | |
75 | ||
76 | dma_free_coherent(&nic->pdev->dev, dmem->size, | |
77 | dmem->unalign_base, dmem->dma); | |
78 | dmem->unalign_base = NULL; | |
79 | dmem->base = NULL; | |
80 | } | |
81 | ||
82 | /* Allocate buffer for packet reception | |
83 | * HW returns memory address where packet is DMA'ed but not a pointer | |
84 | * into RBDR ring, so save buffer address at the start of fragment and | |
85 | * align the start address to a cache aligned address | |
86 | */ | |
87 | static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, | |
88 | u32 buf_len, u64 **rbuf) | |
89 | { | |
90 | u64 data; | |
91 | struct rbuf_info *rinfo; | |
92 | int order = get_order(buf_len); | |
93 | ||
94 | /* Check if request can be accomodated in previous allocated page */ | |
95 | if (nic->rb_page) { | |
96 | if ((nic->rb_page_offset + buf_len + buf_len) > | |
97 | (PAGE_SIZE << order)) { | |
98 | nic->rb_page = NULL; | |
99 | } else { | |
100 | nic->rb_page_offset += buf_len; | |
101 | get_page(nic->rb_page); | |
102 | } | |
103 | } | |
104 | ||
105 | /* Allocate a new page */ | |
106 | if (!nic->rb_page) { | |
107 | nic->rb_page = alloc_pages(gfp | __GFP_COMP, order); | |
108 | if (!nic->rb_page) { | |
109 | netdev_err(nic->netdev, "Failed to allocate new rcv buffer\n"); | |
110 | return -ENOMEM; | |
111 | } | |
112 | nic->rb_page_offset = 0; | |
113 | } | |
114 | ||
115 | data = (u64)page_address(nic->rb_page) + nic->rb_page_offset; | |
116 | ||
117 | /* Align buffer addr to cache line i.e 128 bytes */ | |
118 | rinfo = (struct rbuf_info *)(data + NICVF_RCV_BUF_ALIGN_LEN(data)); | |
119 | /* Save page address for reference updation */ | |
120 | rinfo->page = nic->rb_page; | |
121 | /* Store start address for later retrieval */ | |
122 | rinfo->data = (void *)data; | |
123 | /* Store alignment offset */ | |
124 | rinfo->offset = NICVF_RCV_BUF_ALIGN_LEN(data); | |
125 | ||
126 | data += rinfo->offset; | |
127 | ||
128 | /* Give next aligned address to hw for DMA */ | |
129 | *rbuf = (u64 *)(data + NICVF_RCV_BUF_ALIGN_BYTES); | |
130 | return 0; | |
131 | } | |
132 | ||
133 | /* Retrieve actual buffer start address and build skb for received packet */ | |
134 | static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic, | |
135 | u64 rb_ptr, int len) | |
136 | { | |
137 | struct sk_buff *skb; | |
138 | struct rbuf_info *rinfo; | |
139 | ||
140 | rb_ptr = (u64)phys_to_virt(rb_ptr); | |
141 | /* Get buffer start address and alignment offset */ | |
142 | rinfo = GET_RBUF_INFO(rb_ptr); | |
143 | ||
144 | /* Now build an skb to give to stack */ | |
145 | skb = build_skb(rinfo->data, RCV_FRAG_LEN); | |
146 | if (!skb) { | |
147 | put_page(rinfo->page); | |
148 | return NULL; | |
149 | } | |
150 | ||
151 | /* Set correct skb->data */ | |
152 | skb_reserve(skb, rinfo->offset + NICVF_RCV_BUF_ALIGN_BYTES); | |
153 | ||
154 | prefetch((void *)rb_ptr); | |
155 | return skb; | |
156 | } | |
157 | ||
158 | /* Allocate RBDR ring and populate receive buffers */ | |
159 | static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, | |
160 | int ring_len, int buf_size) | |
161 | { | |
162 | int idx; | |
163 | u64 *rbuf; | |
164 | struct rbdr_entry_t *desc; | |
165 | int err; | |
166 | ||
167 | err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len, | |
168 | sizeof(struct rbdr_entry_t), | |
169 | NICVF_RCV_BUF_ALIGN_BYTES); | |
170 | if (err) | |
171 | return err; | |
172 | ||
173 | rbdr->desc = rbdr->dmem.base; | |
174 | /* Buffer size has to be in multiples of 128 bytes */ | |
175 | rbdr->dma_size = buf_size; | |
176 | rbdr->enable = true; | |
177 | rbdr->thresh = RBDR_THRESH; | |
178 | ||
179 | nic->rb_page = NULL; | |
180 | for (idx = 0; idx < ring_len; idx++) { | |
181 | err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN, | |
182 | &rbuf); | |
183 | if (err) | |
184 | return err; | |
185 | ||
186 | desc = GET_RBDR_DESC(rbdr, idx); | |
187 | desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; | |
188 | } | |
189 | return 0; | |
190 | } | |
191 | ||
192 | /* Free RBDR ring and its receive buffers */ | |
193 | static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) | |
194 | { | |
195 | int head, tail; | |
196 | u64 buf_addr; | |
197 | struct rbdr_entry_t *desc; | |
198 | struct rbuf_info *rinfo; | |
199 | ||
200 | if (!rbdr) | |
201 | return; | |
202 | ||
203 | rbdr->enable = false; | |
204 | if (!rbdr->dmem.base) | |
205 | return; | |
206 | ||
207 | head = rbdr->head; | |
208 | tail = rbdr->tail; | |
209 | ||
210 | /* Free SKBs */ | |
211 | while (head != tail) { | |
212 | desc = GET_RBDR_DESC(rbdr, head); | |
213 | buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; | |
214 | rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr)); | |
215 | put_page(rinfo->page); | |
216 | head++; | |
217 | head &= (rbdr->dmem.q_len - 1); | |
218 | } | |
219 | /* Free SKB of tail desc */ | |
220 | desc = GET_RBDR_DESC(rbdr, tail); | |
221 | buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; | |
222 | rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr)); | |
223 | put_page(rinfo->page); | |
224 | ||
225 | /* Free RBDR ring */ | |
226 | nicvf_free_q_desc_mem(nic, &rbdr->dmem); | |
227 | } | |
228 | ||
229 | /* Refill receive buffer descriptors with new buffers. | |
230 | */ | |
231 | void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp) | |
232 | { | |
233 | struct queue_set *qs = nic->qs; | |
234 | int rbdr_idx = qs->rbdr_cnt; | |
235 | int tail, qcount; | |
236 | int refill_rb_cnt; | |
237 | struct rbdr *rbdr; | |
238 | struct rbdr_entry_t *desc; | |
239 | u64 *rbuf; | |
240 | int new_rb = 0; | |
241 | ||
242 | refill: | |
243 | if (!rbdr_idx) | |
244 | return; | |
245 | rbdr_idx--; | |
246 | rbdr = &qs->rbdr[rbdr_idx]; | |
247 | /* Check if it's enabled */ | |
248 | if (!rbdr->enable) | |
249 | goto next_rbdr; | |
250 | ||
251 | /* Get no of desc's to be refilled */ | |
252 | qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx); | |
253 | qcount &= 0x7FFFF; | |
254 | /* Doorbell can be ringed with a max of ring size minus 1 */ | |
255 | if (qcount >= (qs->rbdr_len - 1)) | |
256 | goto next_rbdr; | |
257 | else | |
258 | refill_rb_cnt = qs->rbdr_len - qcount - 1; | |
259 | ||
260 | /* Start filling descs from tail */ | |
261 | tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3; | |
262 | while (refill_rb_cnt) { | |
263 | tail++; | |
264 | tail &= (rbdr->dmem.q_len - 1); | |
265 | ||
266 | if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf)) | |
267 | break; | |
268 | ||
269 | desc = GET_RBDR_DESC(rbdr, tail); | |
270 | desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; | |
271 | refill_rb_cnt--; | |
272 | new_rb++; | |
273 | } | |
274 | ||
275 | /* make sure all memory stores are done before ringing doorbell */ | |
276 | smp_wmb(); | |
277 | ||
278 | /* Check if buffer allocation failed */ | |
279 | if (refill_rb_cnt) | |
280 | nic->rb_alloc_fail = true; | |
281 | else | |
282 | nic->rb_alloc_fail = false; | |
283 | ||
284 | /* Notify HW */ | |
285 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, | |
286 | rbdr_idx, new_rb); | |
287 | next_rbdr: | |
288 | /* Re-enable RBDR interrupts only if buffer allocation is success */ | |
289 | if (!nic->rb_alloc_fail && rbdr->enable) | |
290 | nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); | |
291 | ||
292 | if (rbdr_idx) | |
293 | goto refill; | |
294 | } | |
295 | ||
296 | /* Alloc rcv buffers in non-atomic mode for better success */ | |
297 | void nicvf_rbdr_work(struct work_struct *work) | |
298 | { | |
299 | struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work); | |
300 | ||
301 | nicvf_refill_rbdr(nic, GFP_KERNEL); | |
302 | if (nic->rb_alloc_fail) | |
303 | schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); | |
304 | else | |
305 | nic->rb_work_scheduled = false; | |
306 | } | |
307 | ||
308 | /* In Softirq context, alloc rcv buffers in atomic mode */ | |
309 | void nicvf_rbdr_task(unsigned long data) | |
310 | { | |
311 | struct nicvf *nic = (struct nicvf *)data; | |
312 | ||
313 | nicvf_refill_rbdr(nic, GFP_ATOMIC); | |
314 | if (nic->rb_alloc_fail) { | |
315 | nic->rb_work_scheduled = true; | |
316 | schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); | |
317 | } | |
318 | } | |
319 | ||
320 | /* Initialize completion queue */ | |
321 | static int nicvf_init_cmp_queue(struct nicvf *nic, | |
322 | struct cmp_queue *cq, int q_len) | |
323 | { | |
324 | int err; | |
325 | ||
326 | err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE, | |
327 | NICVF_CQ_BASE_ALIGN_BYTES); | |
328 | if (err) | |
329 | return err; | |
330 | ||
331 | cq->desc = cq->dmem.base; | |
332 | cq->thresh = CMP_QUEUE_CQE_THRESH; | |
333 | nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1; | |
334 | ||
335 | return 0; | |
336 | } | |
337 | ||
338 | static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq) | |
339 | { | |
340 | if (!cq) | |
341 | return; | |
342 | if (!cq->dmem.base) | |
343 | return; | |
344 | ||
345 | nicvf_free_q_desc_mem(nic, &cq->dmem); | |
346 | } | |
347 | ||
348 | /* Initialize transmit queue */ | |
349 | static int nicvf_init_snd_queue(struct nicvf *nic, | |
350 | struct snd_queue *sq, int q_len) | |
351 | { | |
352 | int err; | |
353 | ||
354 | err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, | |
355 | NICVF_SQ_BASE_ALIGN_BYTES); | |
356 | if (err) | |
357 | return err; | |
358 | ||
359 | sq->desc = sq->dmem.base; | |
360 | sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_ATOMIC); | |
361 | sq->head = 0; | |
362 | sq->tail = 0; | |
363 | atomic_set(&sq->free_cnt, q_len - 1); | |
364 | sq->thresh = SND_QUEUE_THRESH; | |
365 | ||
366 | /* Preallocate memory for TSO segment's header */ | |
367 | sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev, | |
368 | q_len * TSO_HEADER_SIZE, | |
369 | &sq->tso_hdrs_phys, GFP_KERNEL); | |
370 | if (!sq->tso_hdrs) | |
371 | return -ENOMEM; | |
372 | ||
373 | return 0; | |
374 | } | |
375 | ||
376 | static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) | |
377 | { | |
378 | if (!sq) | |
379 | return; | |
380 | if (!sq->dmem.base) | |
381 | return; | |
382 | ||
383 | if (sq->tso_hdrs) | |
384 | dma_free_coherent(&nic->pdev->dev, sq->dmem.q_len, | |
385 | sq->tso_hdrs, sq->tso_hdrs_phys); | |
386 | ||
387 | kfree(sq->skbuff); | |
388 | nicvf_free_q_desc_mem(nic, &sq->dmem); | |
389 | } | |
390 | ||
391 | static void nicvf_reclaim_snd_queue(struct nicvf *nic, | |
392 | struct queue_set *qs, int qidx) | |
393 | { | |
394 | /* Disable send queue */ | |
395 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0); | |
396 | /* Check if SQ is stopped */ | |
397 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01)) | |
398 | return; | |
399 | /* Reset send queue */ | |
400 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); | |
401 | } | |
402 | ||
403 | static void nicvf_reclaim_rcv_queue(struct nicvf *nic, | |
404 | struct queue_set *qs, int qidx) | |
405 | { | |
406 | union nic_mbx mbx = {}; | |
407 | ||
408 | /* Make sure all packets in the pipeline are written back into mem */ | |
409 | mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC; | |
410 | nicvf_send_msg_to_pf(nic, &mbx); | |
411 | } | |
412 | ||
413 | static void nicvf_reclaim_cmp_queue(struct nicvf *nic, | |
414 | struct queue_set *qs, int qidx) | |
415 | { | |
416 | /* Disable timer threshold (doesn't get reset upon CQ reset */ | |
417 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0); | |
418 | /* Disable completion queue */ | |
419 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0); | |
420 | /* Reset completion queue */ | |
421 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); | |
422 | } | |
423 | ||
424 | static void nicvf_reclaim_rbdr(struct nicvf *nic, | |
425 | struct rbdr *rbdr, int qidx) | |
426 | { | |
427 | u64 tmp, fifo_state; | |
428 | int timeout = 10; | |
429 | ||
430 | /* Save head and tail pointers for feeing up buffers */ | |
431 | rbdr->head = nicvf_queue_reg_read(nic, | |
432 | NIC_QSET_RBDR_0_1_HEAD, | |
433 | qidx) >> 3; | |
434 | rbdr->tail = nicvf_queue_reg_read(nic, | |
435 | NIC_QSET_RBDR_0_1_TAIL, | |
436 | qidx) >> 3; | |
437 | ||
438 | /* If RBDR FIFO is in 'FAIL' state then do a reset first | |
439 | * before relaiming. | |
440 | */ | |
441 | fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx); | |
442 | if (((fifo_state >> 62) & 0x03) == 0x3) | |
443 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, | |
444 | qidx, NICVF_RBDR_RESET); | |
445 | ||
446 | /* Disable RBDR */ | |
447 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0); | |
448 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) | |
449 | return; | |
450 | while (1) { | |
451 | tmp = nicvf_queue_reg_read(nic, | |
452 | NIC_QSET_RBDR_0_1_PREFETCH_STATUS, | |
453 | qidx); | |
454 | if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF)) | |
455 | break; | |
456 | usleep_range(1000, 2000); | |
457 | timeout--; | |
458 | if (!timeout) { | |
459 | netdev_err(nic->netdev, | |
460 | "Failed polling on prefetch status\n"); | |
461 | return; | |
462 | } | |
463 | } | |
464 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, | |
465 | qidx, NICVF_RBDR_RESET); | |
466 | ||
467 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02)) | |
468 | return; | |
469 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00); | |
470 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) | |
471 | return; | |
472 | } | |
473 | ||
474 | /* Configures receive queue */ | |
475 | static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, | |
476 | int qidx, bool enable) | |
477 | { | |
478 | union nic_mbx mbx = {}; | |
479 | struct rcv_queue *rq; | |
480 | struct rq_cfg rq_cfg; | |
481 | ||
482 | rq = &qs->rq[qidx]; | |
483 | rq->enable = enable; | |
484 | ||
485 | /* Disable receive queue */ | |
486 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0); | |
487 | ||
488 | if (!rq->enable) { | |
489 | nicvf_reclaim_rcv_queue(nic, qs, qidx); | |
490 | return; | |
491 | } | |
492 | ||
493 | rq->cq_qs = qs->vnic_id; | |
494 | rq->cq_idx = qidx; | |
495 | rq->start_rbdr_qs = qs->vnic_id; | |
496 | rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1; | |
497 | rq->cont_rbdr_qs = qs->vnic_id; | |
498 | rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1; | |
499 | /* all writes of RBDR data to be loaded into L2 Cache as well*/ | |
500 | rq->caching = 1; | |
501 | ||
502 | /* Send a mailbox msg to PF to config RQ */ | |
503 | mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; | |
504 | mbx.rq.qs_num = qs->vnic_id; | |
505 | mbx.rq.rq_num = qidx; | |
506 | mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) | | |
507 | (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | | |
508 | (rq->cont_qs_rbdr_idx << 8) | | |
509 | (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx); | |
510 | nicvf_send_msg_to_pf(nic, &mbx); | |
511 | ||
512 | mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG; | |
513 | mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0); | |
514 | nicvf_send_msg_to_pf(nic, &mbx); | |
515 | ||
516 | /* RQ drop config | |
517 | * Enable CQ drop to reserve sufficient CQEs for all tx packets | |
518 | */ | |
519 | mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG; | |
520 | mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8); | |
521 | nicvf_send_msg_to_pf(nic, &mbx); | |
522 | ||
523 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, qidx, 0x00); | |
524 | ||
525 | /* Enable Receive queue */ | |
526 | rq_cfg.ena = 1; | |
527 | rq_cfg.tcp_ena = 0; | |
528 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg); | |
529 | } | |
530 | ||
531 | /* Configures completion queue */ | |
532 | void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, | |
533 | int qidx, bool enable) | |
534 | { | |
535 | struct cmp_queue *cq; | |
536 | struct cq_cfg cq_cfg; | |
537 | ||
538 | cq = &qs->cq[qidx]; | |
539 | cq->enable = enable; | |
540 | ||
541 | if (!cq->enable) { | |
542 | nicvf_reclaim_cmp_queue(nic, qs, qidx); | |
543 | return; | |
544 | } | |
545 | ||
546 | /* Reset completion queue */ | |
547 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); | |
548 | ||
549 | if (!cq->enable) | |
550 | return; | |
551 | ||
552 | spin_lock_init(&cq->lock); | |
553 | /* Set completion queue base address */ | |
554 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, | |
555 | qidx, (u64)(cq->dmem.phys_base)); | |
556 | ||
557 | /* Enable Completion queue */ | |
558 | cq_cfg.ena = 1; | |
559 | cq_cfg.reset = 0; | |
560 | cq_cfg.caching = 0; | |
561 | cq_cfg.qsize = CMP_QSIZE; | |
562 | cq_cfg.avg_con = 0; | |
563 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg); | |
564 | ||
565 | /* Set threshold value for interrupt generation */ | |
566 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); | |
567 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, | |
568 | qidx, nic->cq_coalesce_usecs); | |
569 | } | |
570 | ||
571 | /* Configures transmit queue */ | |
572 | static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, | |
573 | int qidx, bool enable) | |
574 | { | |
575 | union nic_mbx mbx = {}; | |
576 | struct snd_queue *sq; | |
577 | struct sq_cfg sq_cfg; | |
578 | ||
579 | sq = &qs->sq[qidx]; | |
580 | sq->enable = enable; | |
581 | ||
582 | if (!sq->enable) { | |
583 | nicvf_reclaim_snd_queue(nic, qs, qidx); | |
584 | return; | |
585 | } | |
586 | ||
587 | /* Reset send queue */ | |
588 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); | |
589 | ||
590 | sq->cq_qs = qs->vnic_id; | |
591 | sq->cq_idx = qidx; | |
592 | ||
593 | /* Send a mailbox msg to PF to config SQ */ | |
594 | mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG; | |
595 | mbx.sq.qs_num = qs->vnic_id; | |
596 | mbx.sq.sq_num = qidx; | |
597 | mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; | |
598 | nicvf_send_msg_to_pf(nic, &mbx); | |
599 | ||
600 | /* Set queue base address */ | |
601 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, | |
602 | qidx, (u64)(sq->dmem.phys_base)); | |
603 | ||
604 | /* Enable send queue & set queue size */ | |
605 | sq_cfg.ena = 1; | |
606 | sq_cfg.reset = 0; | |
607 | sq_cfg.ldwb = 0; | |
608 | sq_cfg.qsize = SND_QSIZE; | |
609 | sq_cfg.tstmp_bgx_intf = 0; | |
610 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg); | |
611 | ||
612 | /* Set threshold value for interrupt generation */ | |
613 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh); | |
614 | ||
615 | /* Set queue:cpu affinity for better load distribution */ | |
616 | if (cpu_online(qidx)) { | |
617 | cpumask_set_cpu(qidx, &sq->affinity_mask); | |
618 | netif_set_xps_queue(nic->netdev, | |
619 | &sq->affinity_mask, qidx); | |
620 | } | |
621 | } | |
622 | ||
623 | /* Configures receive buffer descriptor ring */ | |
624 | static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, | |
625 | int qidx, bool enable) | |
626 | { | |
627 | struct rbdr *rbdr; | |
628 | struct rbdr_cfg rbdr_cfg; | |
629 | ||
630 | rbdr = &qs->rbdr[qidx]; | |
631 | nicvf_reclaim_rbdr(nic, rbdr, qidx); | |
632 | if (!enable) | |
633 | return; | |
634 | ||
635 | /* Set descriptor base address */ | |
636 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, | |
637 | qidx, (u64)(rbdr->dmem.phys_base)); | |
638 | ||
639 | /* Enable RBDR & set queue size */ | |
640 | /* Buffer size should be in multiples of 128 bytes */ | |
641 | rbdr_cfg.ena = 1; | |
642 | rbdr_cfg.reset = 0; | |
643 | rbdr_cfg.ldwb = 0; | |
644 | rbdr_cfg.qsize = RBDR_SIZE; | |
645 | rbdr_cfg.avg_con = 0; | |
646 | rbdr_cfg.lines = rbdr->dma_size / 128; | |
647 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, | |
648 | qidx, *(u64 *)&rbdr_cfg); | |
649 | ||
650 | /* Notify HW */ | |
651 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, | |
652 | qidx, qs->rbdr_len - 1); | |
653 | ||
654 | /* Set threshold value for interrupt generation */ | |
655 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, | |
656 | qidx, rbdr->thresh - 1); | |
657 | } | |
658 | ||
659 | /* Requests PF to assign and enable Qset */ | |
660 | void nicvf_qset_config(struct nicvf *nic, bool enable) | |
661 | { | |
662 | union nic_mbx mbx = {}; | |
663 | struct queue_set *qs = nic->qs; | |
664 | struct qs_cfg *qs_cfg; | |
665 | ||
666 | if (!qs) { | |
667 | netdev_warn(nic->netdev, | |
668 | "Qset is still not allocated, don't init queues\n"); | |
669 | return; | |
670 | } | |
671 | ||
672 | qs->enable = enable; | |
673 | qs->vnic_id = nic->vf_id; | |
674 | ||
675 | /* Send a mailbox msg to PF to config Qset */ | |
676 | mbx.qs.msg = NIC_MBOX_MSG_QS_CFG; | |
677 | mbx.qs.num = qs->vnic_id; | |
678 | ||
679 | mbx.qs.cfg = 0; | |
680 | qs_cfg = (struct qs_cfg *)&mbx.qs.cfg; | |
681 | if (qs->enable) { | |
682 | qs_cfg->ena = 1; | |
683 | #ifdef __BIG_ENDIAN | |
684 | qs_cfg->be = 1; | |
685 | #endif | |
686 | qs_cfg->vnic = qs->vnic_id; | |
687 | } | |
688 | nicvf_send_msg_to_pf(nic, &mbx); | |
689 | } | |
690 | ||
691 | static void nicvf_free_resources(struct nicvf *nic) | |
692 | { | |
693 | int qidx; | |
694 | struct queue_set *qs = nic->qs; | |
695 | ||
696 | /* Free receive buffer descriptor ring */ | |
697 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) | |
698 | nicvf_free_rbdr(nic, &qs->rbdr[qidx]); | |
699 | ||
700 | /* Free completion queue */ | |
701 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) | |
702 | nicvf_free_cmp_queue(nic, &qs->cq[qidx]); | |
703 | ||
704 | /* Free send queue */ | |
705 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) | |
706 | nicvf_free_snd_queue(nic, &qs->sq[qidx]); | |
707 | } | |
708 | ||
709 | static int nicvf_alloc_resources(struct nicvf *nic) | |
710 | { | |
711 | int qidx; | |
712 | struct queue_set *qs = nic->qs; | |
713 | ||
714 | /* Alloc receive buffer descriptor ring */ | |
715 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { | |
716 | if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len, | |
717 | DMA_BUFFER_LEN)) | |
718 | goto alloc_fail; | |
719 | } | |
720 | ||
721 | /* Alloc send queue */ | |
722 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) { | |
723 | if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len)) | |
724 | goto alloc_fail; | |
725 | } | |
726 | ||
727 | /* Alloc completion queue */ | |
728 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) { | |
729 | if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len)) | |
730 | goto alloc_fail; | |
731 | } | |
732 | ||
733 | return 0; | |
734 | alloc_fail: | |
735 | nicvf_free_resources(nic); | |
736 | return -ENOMEM; | |
737 | } | |
738 | ||
739 | int nicvf_set_qset_resources(struct nicvf *nic) | |
740 | { | |
741 | struct queue_set *qs; | |
742 | ||
743 | qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL); | |
744 | if (!qs) | |
745 | return -ENOMEM; | |
746 | nic->qs = qs; | |
747 | ||
748 | /* Set count of each queue */ | |
749 | qs->rbdr_cnt = RBDR_CNT; | |
750 | qs->rq_cnt = RCV_QUEUE_CNT; | |
751 | qs->sq_cnt = SND_QUEUE_CNT; | |
752 | qs->cq_cnt = CMP_QUEUE_CNT; | |
753 | ||
754 | /* Set queue lengths */ | |
755 | qs->rbdr_len = RCV_BUF_COUNT; | |
756 | qs->sq_len = SND_QUEUE_LEN; | |
757 | qs->cq_len = CMP_QUEUE_LEN; | |
758 | return 0; | |
759 | } | |
760 | ||
761 | int nicvf_config_data_transfer(struct nicvf *nic, bool enable) | |
762 | { | |
763 | bool disable = false; | |
764 | struct queue_set *qs = nic->qs; | |
765 | int qidx; | |
766 | ||
767 | if (!qs) | |
768 | return 0; | |
769 | ||
770 | if (enable) { | |
771 | if (nicvf_alloc_resources(nic)) | |
772 | return -ENOMEM; | |
773 | ||
774 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) | |
775 | nicvf_snd_queue_config(nic, qs, qidx, enable); | |
776 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) | |
777 | nicvf_cmp_queue_config(nic, qs, qidx, enable); | |
778 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) | |
779 | nicvf_rbdr_config(nic, qs, qidx, enable); | |
780 | for (qidx = 0; qidx < qs->rq_cnt; qidx++) | |
781 | nicvf_rcv_queue_config(nic, qs, qidx, enable); | |
782 | } else { | |
783 | for (qidx = 0; qidx < qs->rq_cnt; qidx++) | |
784 | nicvf_rcv_queue_config(nic, qs, qidx, disable); | |
785 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) | |
786 | nicvf_rbdr_config(nic, qs, qidx, disable); | |
787 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) | |
788 | nicvf_snd_queue_config(nic, qs, qidx, disable); | |
789 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) | |
790 | nicvf_cmp_queue_config(nic, qs, qidx, disable); | |
791 | ||
792 | nicvf_free_resources(nic); | |
793 | } | |
794 | ||
795 | return 0; | |
796 | } | |
797 | ||
798 | /* Get a free desc from SQ | |
799 | * returns descriptor ponter & descriptor number | |
800 | */ | |
801 | static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt) | |
802 | { | |
803 | int qentry; | |
804 | ||
805 | qentry = sq->tail; | |
806 | atomic_sub(desc_cnt, &sq->free_cnt); | |
807 | sq->tail += desc_cnt; | |
808 | sq->tail &= (sq->dmem.q_len - 1); | |
809 | ||
810 | return qentry; | |
811 | } | |
812 | ||
813 | /* Free descriptor back to SQ for future use */ | |
814 | void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt) | |
815 | { | |
816 | atomic_add(desc_cnt, &sq->free_cnt); | |
817 | sq->head += desc_cnt; | |
818 | sq->head &= (sq->dmem.q_len - 1); | |
819 | } | |
820 | ||
821 | static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry) | |
822 | { | |
823 | qentry++; | |
824 | qentry &= (sq->dmem.q_len - 1); | |
825 | return qentry; | |
826 | } | |
827 | ||
828 | void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx) | |
829 | { | |
830 | u64 sq_cfg; | |
831 | ||
832 | sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); | |
833 | sq_cfg |= NICVF_SQ_EN; | |
834 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); | |
835 | /* Ring doorbell so that H/W restarts processing SQEs */ | |
836 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0); | |
837 | } | |
838 | ||
839 | void nicvf_sq_disable(struct nicvf *nic, int qidx) | |
840 | { | |
841 | u64 sq_cfg; | |
842 | ||
843 | sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); | |
844 | sq_cfg &= ~NICVF_SQ_EN; | |
845 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); | |
846 | } | |
847 | ||
848 | void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq, | |
849 | int qidx) | |
850 | { | |
851 | u64 head, tail; | |
852 | struct sk_buff *skb; | |
853 | struct nicvf *nic = netdev_priv(netdev); | |
854 | struct sq_hdr_subdesc *hdr; | |
855 | ||
856 | head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4; | |
857 | tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4; | |
858 | while (sq->head != head) { | |
859 | hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); | |
860 | if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { | |
861 | nicvf_put_sq_desc(sq, 1); | |
862 | continue; | |
863 | } | |
864 | skb = (struct sk_buff *)sq->skbuff[sq->head]; | |
865 | atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets); | |
866 | atomic64_add(hdr->tot_len, | |
867 | (atomic64_t *)&netdev->stats.tx_bytes); | |
868 | dev_kfree_skb_any(skb); | |
869 | nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); | |
870 | } | |
871 | } | |
872 | ||
873 | /* Calculate no of SQ subdescriptors needed to transmit all | |
874 | * segments of this TSO packet. | |
875 | * Taken from 'Tilera network driver' with a minor modification. | |
876 | */ | |
877 | static int nicvf_tso_count_subdescs(struct sk_buff *skb) | |
878 | { | |
879 | struct skb_shared_info *sh = skb_shinfo(skb); | |
880 | unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | |
881 | unsigned int data_len = skb->len - sh_len; | |
882 | unsigned int p_len = sh->gso_size; | |
883 | long f_id = -1; /* id of the current fragment */ | |
884 | long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ | |
885 | long f_used = 0; /* bytes used from the current fragment */ | |
886 | long n; /* size of the current piece of payload */ | |
887 | int num_edescs = 0; | |
888 | int segment; | |
889 | ||
890 | for (segment = 0; segment < sh->gso_segs; segment++) { | |
891 | unsigned int p_used = 0; | |
892 | ||
893 | /* One edesc for header and for each piece of the payload. */ | |
894 | for (num_edescs++; p_used < p_len; num_edescs++) { | |
895 | /* Advance as needed. */ | |
896 | while (f_used >= f_size) { | |
897 | f_id++; | |
898 | f_size = skb_frag_size(&sh->frags[f_id]); | |
899 | f_used = 0; | |
900 | } | |
901 | ||
902 | /* Use bytes from the current fragment. */ | |
903 | n = p_len - p_used; | |
904 | if (n > f_size - f_used) | |
905 | n = f_size - f_used; | |
906 | f_used += n; | |
907 | p_used += n; | |
908 | } | |
909 | ||
910 | /* The last segment may be less than gso_size. */ | |
911 | data_len -= p_len; | |
912 | if (data_len < p_len) | |
913 | p_len = data_len; | |
914 | } | |
915 | ||
916 | /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */ | |
917 | return num_edescs + sh->gso_segs; | |
918 | } | |
919 | ||
920 | /* Get the number of SQ descriptors needed to xmit this skb */ | |
921 | static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb) | |
922 | { | |
923 | int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT; | |
924 | ||
925 | if (skb_shinfo(skb)->gso_size) { | |
926 | subdesc_cnt = nicvf_tso_count_subdescs(skb); | |
927 | return subdesc_cnt; | |
928 | } | |
929 | ||
930 | if (skb_shinfo(skb)->nr_frags) | |
931 | subdesc_cnt += skb_shinfo(skb)->nr_frags; | |
932 | ||
933 | return subdesc_cnt; | |
934 | } | |
935 | ||
936 | /* Add SQ HEADER subdescriptor. | |
937 | * First subdescriptor for every send descriptor. | |
938 | */ | |
939 | static inline void | |
940 | nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry, | |
941 | int subdesc_cnt, struct sk_buff *skb, int len) | |
942 | { | |
943 | int proto; | |
944 | struct sq_hdr_subdesc *hdr; | |
945 | ||
946 | hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); | |
947 | sq->skbuff[qentry] = (u64)skb; | |
948 | ||
949 | memset(hdr, 0, SND_QUEUE_DESC_SIZE); | |
950 | hdr->subdesc_type = SQ_DESC_TYPE_HEADER; | |
951 | /* Enable notification via CQE after processing SQE */ | |
952 | hdr->post_cqe = 1; | |
953 | /* No of subdescriptors following this */ | |
954 | hdr->subdesc_cnt = subdesc_cnt; | |
955 | hdr->tot_len = len; | |
956 | ||
957 | /* Offload checksum calculation to HW */ | |
958 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
959 | if (skb->protocol != htons(ETH_P_IP)) | |
960 | return; | |
961 | ||
962 | hdr->csum_l3 = 1; /* Enable IP csum calculation */ | |
963 | hdr->l3_offset = skb_network_offset(skb); | |
964 | hdr->l4_offset = skb_transport_offset(skb); | |
965 | ||
966 | proto = ip_hdr(skb)->protocol; | |
967 | switch (proto) { | |
968 | case IPPROTO_TCP: | |
969 | hdr->csum_l4 = SEND_L4_CSUM_TCP; | |
970 | break; | |
971 | case IPPROTO_UDP: | |
972 | hdr->csum_l4 = SEND_L4_CSUM_UDP; | |
973 | break; | |
974 | case IPPROTO_SCTP: | |
975 | hdr->csum_l4 = SEND_L4_CSUM_SCTP; | |
976 | break; | |
977 | } | |
978 | } | |
979 | } | |
980 | ||
981 | /* SQ GATHER subdescriptor | |
982 | * Must follow HDR descriptor | |
983 | */ | |
984 | static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, | |
985 | int size, u64 data) | |
986 | { | |
987 | struct sq_gather_subdesc *gather; | |
988 | ||
989 | qentry &= (sq->dmem.q_len - 1); | |
990 | gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry); | |
991 | ||
992 | memset(gather, 0, SND_QUEUE_DESC_SIZE); | |
993 | gather->subdesc_type = SQ_DESC_TYPE_GATHER; | |
994 | gather->ld_type = NIC_SEND_LD_TYPE_E_LDWB; | |
995 | gather->size = size; | |
996 | gather->addr = data; | |
997 | } | |
998 | ||
999 | /* Segment a TSO packet into 'gso_size' segments and append | |
1000 | * them to SQ for transfer | |
1001 | */ | |
1002 | static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, | |
1003 | int qentry, struct sk_buff *skb) | |
1004 | { | |
1005 | struct tso_t tso; | |
1006 | int seg_subdescs = 0, desc_cnt = 0; | |
1007 | int seg_len, total_len, data_left; | |
1008 | int hdr_qentry = qentry; | |
1009 | int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | |
1010 | ||
1011 | tso_start(skb, &tso); | |
1012 | total_len = skb->len - hdr_len; | |
1013 | while (total_len > 0) { | |
1014 | char *hdr; | |
1015 | ||
1016 | /* Save Qentry for adding HDR_SUBDESC at the end */ | |
1017 | hdr_qentry = qentry; | |
1018 | ||
1019 | data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); | |
1020 | total_len -= data_left; | |
1021 | ||
1022 | /* Add segment's header */ | |
1023 | qentry = nicvf_get_nxt_sqentry(sq, qentry); | |
1024 | hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE; | |
1025 | tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); | |
1026 | nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len, | |
1027 | sq->tso_hdrs_phys + | |
1028 | qentry * TSO_HEADER_SIZE); | |
1029 | /* HDR_SUDESC + GATHER */ | |
1030 | seg_subdescs = 2; | |
1031 | seg_len = hdr_len; | |
1032 | ||
1033 | /* Add segment's payload fragments */ | |
1034 | while (data_left > 0) { | |
1035 | int size; | |
1036 | ||
1037 | size = min_t(int, tso.size, data_left); | |
1038 | ||
1039 | qentry = nicvf_get_nxt_sqentry(sq, qentry); | |
1040 | nicvf_sq_add_gather_subdesc(sq, qentry, size, | |
1041 | virt_to_phys(tso.data)); | |
1042 | seg_subdescs++; | |
1043 | seg_len += size; | |
1044 | ||
1045 | data_left -= size; | |
1046 | tso_build_data(skb, &tso, size); | |
1047 | } | |
1048 | nicvf_sq_add_hdr_subdesc(sq, hdr_qentry, | |
1049 | seg_subdescs - 1, skb, seg_len); | |
1050 | sq->skbuff[hdr_qentry] = 0; | |
1051 | qentry = nicvf_get_nxt_sqentry(sq, qentry); | |
1052 | ||
1053 | desc_cnt += seg_subdescs; | |
1054 | } | |
1055 | /* Save SKB in the last segment for freeing */ | |
1056 | sq->skbuff[hdr_qentry] = (u64)skb; | |
1057 | ||
1058 | /* make sure all memory stores are done before ringing doorbell */ | |
1059 | smp_wmb(); | |
1060 | ||
1061 | /* Inform HW to xmit all TSO segments */ | |
1062 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, | |
1063 | skb_get_queue_mapping(skb), desc_cnt); | |
1064 | return 1; | |
1065 | } | |
1066 | ||
1067 | /* Append an skb to a SQ for packet transfer. */ | |
1068 | int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) | |
1069 | { | |
1070 | int i, size; | |
1071 | int subdesc_cnt; | |
1072 | int sq_num, qentry; | |
1073 | struct queue_set *qs = nic->qs; | |
1074 | struct snd_queue *sq; | |
1075 | ||
1076 | sq_num = skb_get_queue_mapping(skb); | |
1077 | sq = &qs->sq[sq_num]; | |
1078 | ||
1079 | subdesc_cnt = nicvf_sq_subdesc_required(nic, skb); | |
1080 | if (subdesc_cnt > atomic_read(&sq->free_cnt)) | |
1081 | goto append_fail; | |
1082 | ||
1083 | qentry = nicvf_get_sq_desc(sq, subdesc_cnt); | |
1084 | ||
1085 | /* Check if its a TSO packet */ | |
1086 | if (skb_shinfo(skb)->gso_size) | |
1087 | return nicvf_sq_append_tso(nic, sq, qentry, skb); | |
1088 | ||
1089 | /* Add SQ header subdesc */ | |
1090 | nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb, skb->len); | |
1091 | ||
1092 | /* Add SQ gather subdescs */ | |
1093 | qentry = nicvf_get_nxt_sqentry(sq, qentry); | |
1094 | size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; | |
1095 | nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data)); | |
1096 | ||
1097 | /* Check for scattered buffer */ | |
1098 | if (!skb_is_nonlinear(skb)) | |
1099 | goto doorbell; | |
1100 | ||
1101 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
1102 | const struct skb_frag_struct *frag; | |
1103 | ||
1104 | frag = &skb_shinfo(skb)->frags[i]; | |
1105 | ||
1106 | qentry = nicvf_get_nxt_sqentry(sq, qentry); | |
1107 | size = skb_frag_size(frag); | |
1108 | nicvf_sq_add_gather_subdesc(sq, qentry, size, | |
1109 | virt_to_phys( | |
1110 | skb_frag_address(frag))); | |
1111 | } | |
1112 | ||
1113 | doorbell: | |
1114 | /* make sure all memory stores are done before ringing doorbell */ | |
1115 | smp_wmb(); | |
1116 | ||
1117 | /* Inform HW to xmit new packet */ | |
1118 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, | |
1119 | sq_num, subdesc_cnt); | |
1120 | return 1; | |
1121 | ||
1122 | append_fail: | |
1123 | netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n"); | |
1124 | return 0; | |
1125 | } | |
1126 | ||
1127 | static inline unsigned frag_num(unsigned i) | |
1128 | { | |
1129 | #ifdef __BIG_ENDIAN | |
1130 | return (i & ~3) + 3 - (i & 3); | |
1131 | #else | |
1132 | return i; | |
1133 | #endif | |
1134 | } | |
1135 | ||
1136 | /* Returns SKB for a received packet */ | |
1137 | struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx) | |
1138 | { | |
1139 | int frag; | |
1140 | int payload_len = 0; | |
1141 | struct sk_buff *skb = NULL; | |
1142 | struct sk_buff *skb_frag = NULL; | |
1143 | struct sk_buff *prev_frag = NULL; | |
1144 | u16 *rb_lens = NULL; | |
1145 | u64 *rb_ptrs = NULL; | |
1146 | ||
1147 | rb_lens = (void *)cqe_rx + (3 * sizeof(u64)); | |
1148 | rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64)); | |
1149 | ||
1150 | netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n", | |
1151 | __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz); | |
1152 | ||
1153 | for (frag = 0; frag < cqe_rx->rb_cnt; frag++) { | |
1154 | payload_len = rb_lens[frag_num(frag)]; | |
1155 | if (!frag) { | |
1156 | /* First fragment */ | |
1157 | skb = nicvf_rb_ptr_to_skb(nic, | |
1158 | *rb_ptrs - cqe_rx->align_pad, | |
1159 | payload_len); | |
1160 | if (!skb) | |
1161 | return NULL; | |
1162 | skb_reserve(skb, cqe_rx->align_pad); | |
1163 | skb_put(skb, payload_len); | |
1164 | } else { | |
1165 | /* Add fragments */ | |
1166 | skb_frag = nicvf_rb_ptr_to_skb(nic, *rb_ptrs, | |
1167 | payload_len); | |
1168 | if (!skb_frag) { | |
1169 | dev_kfree_skb(skb); | |
1170 | return NULL; | |
1171 | } | |
1172 | ||
1173 | if (!skb_shinfo(skb)->frag_list) | |
1174 | skb_shinfo(skb)->frag_list = skb_frag; | |
1175 | else | |
1176 | prev_frag->next = skb_frag; | |
1177 | ||
1178 | prev_frag = skb_frag; | |
1179 | skb->len += payload_len; | |
1180 | skb->data_len += payload_len; | |
1181 | skb_frag->len = payload_len; | |
1182 | } | |
1183 | /* Next buffer pointer */ | |
1184 | rb_ptrs++; | |
1185 | } | |
1186 | return skb; | |
1187 | } | |
1188 | ||
1189 | /* Enable interrupt */ | |
1190 | void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) | |
1191 | { | |
1192 | u64 reg_val; | |
1193 | ||
1194 | reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); | |
1195 | ||
1196 | switch (int_type) { | |
1197 | case NICVF_INTR_CQ: | |
1198 | reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); | |
1199 | break; | |
1200 | case NICVF_INTR_SQ: | |
1201 | reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); | |
1202 | break; | |
1203 | case NICVF_INTR_RBDR: | |
1204 | reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); | |
1205 | break; | |
1206 | case NICVF_INTR_PKT_DROP: | |
1207 | reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT); | |
1208 | break; | |
1209 | case NICVF_INTR_TCP_TIMER: | |
1210 | reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); | |
1211 | break; | |
1212 | case NICVF_INTR_MBOX: | |
1213 | reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT); | |
1214 | break; | |
1215 | case NICVF_INTR_QS_ERR: | |
1216 | reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT); | |
1217 | break; | |
1218 | default: | |
1219 | netdev_err(nic->netdev, | |
1220 | "Failed to enable interrupt: unknown type\n"); | |
1221 | break; | |
1222 | } | |
1223 | ||
1224 | nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val); | |
1225 | } | |
1226 | ||
1227 | /* Disable interrupt */ | |
1228 | void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) | |
1229 | { | |
1230 | u64 reg_val = 0; | |
1231 | ||
1232 | switch (int_type) { | |
1233 | case NICVF_INTR_CQ: | |
1234 | reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); | |
1235 | break; | |
1236 | case NICVF_INTR_SQ: | |
1237 | reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); | |
1238 | break; | |
1239 | case NICVF_INTR_RBDR: | |
1240 | reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); | |
1241 | break; | |
1242 | case NICVF_INTR_PKT_DROP: | |
1243 | reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT); | |
1244 | break; | |
1245 | case NICVF_INTR_TCP_TIMER: | |
1246 | reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); | |
1247 | break; | |
1248 | case NICVF_INTR_MBOX: | |
1249 | reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT); | |
1250 | break; | |
1251 | case NICVF_INTR_QS_ERR: | |
1252 | reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT); | |
1253 | break; | |
1254 | default: | |
1255 | netdev_err(nic->netdev, | |
1256 | "Failed to disable interrupt: unknown type\n"); | |
1257 | break; | |
1258 | } | |
1259 | ||
1260 | nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val); | |
1261 | } | |
1262 | ||
1263 | /* Clear interrupt */ | |
1264 | void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) | |
1265 | { | |
1266 | u64 reg_val = 0; | |
1267 | ||
1268 | switch (int_type) { | |
1269 | case NICVF_INTR_CQ: | |
1270 | reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); | |
1271 | break; | |
1272 | case NICVF_INTR_SQ: | |
1273 | reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); | |
1274 | break; | |
1275 | case NICVF_INTR_RBDR: | |
1276 | reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); | |
1277 | break; | |
1278 | case NICVF_INTR_PKT_DROP: | |
1279 | reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT); | |
1280 | break; | |
1281 | case NICVF_INTR_TCP_TIMER: | |
1282 | reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); | |
1283 | break; | |
1284 | case NICVF_INTR_MBOX: | |
1285 | reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT); | |
1286 | break; | |
1287 | case NICVF_INTR_QS_ERR: | |
1288 | reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT); | |
1289 | break; | |
1290 | default: | |
1291 | netdev_err(nic->netdev, | |
1292 | "Failed to clear interrupt: unknown type\n"); | |
1293 | break; | |
1294 | } | |
1295 | ||
1296 | nicvf_reg_write(nic, NIC_VF_INT, reg_val); | |
1297 | } | |
1298 | ||
1299 | /* Check if interrupt is enabled */ | |
1300 | int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx) | |
1301 | { | |
1302 | u64 reg_val; | |
1303 | u64 mask = 0xff; | |
1304 | ||
1305 | reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); | |
1306 | ||
1307 | switch (int_type) { | |
1308 | case NICVF_INTR_CQ: | |
1309 | mask = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); | |
1310 | break; | |
1311 | case NICVF_INTR_SQ: | |
1312 | mask = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); | |
1313 | break; | |
1314 | case NICVF_INTR_RBDR: | |
1315 | mask = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); | |
1316 | break; | |
1317 | case NICVF_INTR_PKT_DROP: | |
1318 | mask = NICVF_INTR_PKT_DROP_MASK; | |
1319 | break; | |
1320 | case NICVF_INTR_TCP_TIMER: | |
1321 | mask = NICVF_INTR_TCP_TIMER_MASK; | |
1322 | break; | |
1323 | case NICVF_INTR_MBOX: | |
1324 | mask = NICVF_INTR_MBOX_MASK; | |
1325 | break; | |
1326 | case NICVF_INTR_QS_ERR: | |
1327 | mask = NICVF_INTR_QS_ERR_MASK; | |
1328 | break; | |
1329 | default: | |
1330 | netdev_err(nic->netdev, | |
1331 | "Failed to check interrupt enable: unknown type\n"); | |
1332 | break; | |
1333 | } | |
1334 | ||
1335 | return (reg_val & mask); | |
1336 | } | |
1337 | ||
1338 | void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx) | |
1339 | { | |
1340 | struct rcv_queue *rq; | |
1341 | ||
1342 | #define GET_RQ_STATS(reg) \ | |
1343 | nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\ | |
1344 | (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) | |
1345 | ||
1346 | rq = &nic->qs->rq[rq_idx]; | |
1347 | rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS); | |
1348 | rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS); | |
1349 | } | |
1350 | ||
1351 | void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) | |
1352 | { | |
1353 | struct snd_queue *sq; | |
1354 | ||
1355 | #define GET_SQ_STATS(reg) \ | |
1356 | nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\ | |
1357 | (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) | |
1358 | ||
1359 | sq = &nic->qs->sq[sq_idx]; | |
1360 | sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS); | |
1361 | sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS); | |
1362 | } | |
1363 | ||
1364 | /* Check for errors in the receive cmp.queue entry */ | |
1365 | int nicvf_check_cqe_rx_errs(struct nicvf *nic, | |
1366 | struct cmp_queue *cq, struct cqe_rx_t *cqe_rx) | |
1367 | { | |
1368 | struct cmp_queue_stats *stats = &cq->stats; | |
1369 | ||
1370 | if (!cqe_rx->err_level && !cqe_rx->err_opcode) { | |
1371 | stats->rx.errop.good++; | |
1372 | return 0; | |
1373 | } | |
1374 | ||
1375 | if (netif_msg_rx_err(nic)) | |
1376 | netdev_err(nic->netdev, | |
1377 | "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n", | |
1378 | nic->netdev->name, | |
1379 | cqe_rx->err_level, cqe_rx->err_opcode); | |
1380 | ||
1381 | switch (cqe_rx->err_level) { | |
1382 | case CQ_ERRLVL_MAC: | |
1383 | stats->rx.errlvl.mac_errs++; | |
1384 | break; | |
1385 | case CQ_ERRLVL_L2: | |
1386 | stats->rx.errlvl.l2_errs++; | |
1387 | break; | |
1388 | case CQ_ERRLVL_L3: | |
1389 | stats->rx.errlvl.l3_errs++; | |
1390 | break; | |
1391 | case CQ_ERRLVL_L4: | |
1392 | stats->rx.errlvl.l4_errs++; | |
1393 | break; | |
1394 | } | |
1395 | ||
1396 | switch (cqe_rx->err_opcode) { | |
1397 | case CQ_RX_ERROP_RE_PARTIAL: | |
1398 | stats->rx.errop.partial_pkts++; | |
1399 | break; | |
1400 | case CQ_RX_ERROP_RE_JABBER: | |
1401 | stats->rx.errop.jabber_errs++; | |
1402 | break; | |
1403 | case CQ_RX_ERROP_RE_FCS: | |
1404 | stats->rx.errop.fcs_errs++; | |
1405 | break; | |
1406 | case CQ_RX_ERROP_RE_TERMINATE: | |
1407 | stats->rx.errop.terminate_errs++; | |
1408 | break; | |
1409 | case CQ_RX_ERROP_RE_RX_CTL: | |
1410 | stats->rx.errop.bgx_rx_errs++; | |
1411 | break; | |
1412 | case CQ_RX_ERROP_PREL2_ERR: | |
1413 | stats->rx.errop.prel2_errs++; | |
1414 | break; | |
1415 | case CQ_RX_ERROP_L2_FRAGMENT: | |
1416 | stats->rx.errop.l2_frags++; | |
1417 | break; | |
1418 | case CQ_RX_ERROP_L2_OVERRUN: | |
1419 | stats->rx.errop.l2_overruns++; | |
1420 | break; | |
1421 | case CQ_RX_ERROP_L2_PFCS: | |
1422 | stats->rx.errop.l2_pfcs++; | |
1423 | break; | |
1424 | case CQ_RX_ERROP_L2_PUNY: | |
1425 | stats->rx.errop.l2_puny++; | |
1426 | break; | |
1427 | case CQ_RX_ERROP_L2_MAL: | |
1428 | stats->rx.errop.l2_hdr_malformed++; | |
1429 | break; | |
1430 | case CQ_RX_ERROP_L2_OVERSIZE: | |
1431 | stats->rx.errop.l2_oversize++; | |
1432 | break; | |
1433 | case CQ_RX_ERROP_L2_UNDERSIZE: | |
1434 | stats->rx.errop.l2_undersize++; | |
1435 | break; | |
1436 | case CQ_RX_ERROP_L2_LENMISM: | |
1437 | stats->rx.errop.l2_len_mismatch++; | |
1438 | break; | |
1439 | case CQ_RX_ERROP_L2_PCLP: | |
1440 | stats->rx.errop.l2_pclp++; | |
1441 | break; | |
1442 | case CQ_RX_ERROP_IP_NOT: | |
1443 | stats->rx.errop.non_ip++; | |
1444 | break; | |
1445 | case CQ_RX_ERROP_IP_CSUM_ERR: | |
1446 | stats->rx.errop.ip_csum_err++; | |
1447 | break; | |
1448 | case CQ_RX_ERROP_IP_MAL: | |
1449 | stats->rx.errop.ip_hdr_malformed++; | |
1450 | break; | |
1451 | case CQ_RX_ERROP_IP_MALD: | |
1452 | stats->rx.errop.ip_payload_malformed++; | |
1453 | break; | |
1454 | case CQ_RX_ERROP_IP_HOP: | |
1455 | stats->rx.errop.ip_hop_errs++; | |
1456 | break; | |
1457 | case CQ_RX_ERROP_L3_ICRC: | |
1458 | stats->rx.errop.l3_icrc_errs++; | |
1459 | break; | |
1460 | case CQ_RX_ERROP_L3_PCLP: | |
1461 | stats->rx.errop.l3_pclp++; | |
1462 | break; | |
1463 | case CQ_RX_ERROP_L4_MAL: | |
1464 | stats->rx.errop.l4_malformed++; | |
1465 | break; | |
1466 | case CQ_RX_ERROP_L4_CHK: | |
1467 | stats->rx.errop.l4_csum_errs++; | |
1468 | break; | |
1469 | case CQ_RX_ERROP_UDP_LEN: | |
1470 | stats->rx.errop.udp_len_err++; | |
1471 | break; | |
1472 | case CQ_RX_ERROP_L4_PORT: | |
1473 | stats->rx.errop.bad_l4_port++; | |
1474 | break; | |
1475 | case CQ_RX_ERROP_TCP_FLAG: | |
1476 | stats->rx.errop.bad_tcp_flag++; | |
1477 | break; | |
1478 | case CQ_RX_ERROP_TCP_OFFSET: | |
1479 | stats->rx.errop.tcp_offset_errs++; | |
1480 | break; | |
1481 | case CQ_RX_ERROP_L4_PCLP: | |
1482 | stats->rx.errop.l4_pclp++; | |
1483 | break; | |
1484 | case CQ_RX_ERROP_RBDR_TRUNC: | |
1485 | stats->rx.errop.pkt_truncated++; | |
1486 | break; | |
1487 | } | |
1488 | ||
1489 | return 1; | |
1490 | } | |
1491 | ||
1492 | /* Check for errors in the send cmp.queue entry */ | |
1493 | int nicvf_check_cqe_tx_errs(struct nicvf *nic, | |
1494 | struct cmp_queue *cq, struct cqe_send_t *cqe_tx) | |
1495 | { | |
1496 | struct cmp_queue_stats *stats = &cq->stats; | |
1497 | ||
1498 | switch (cqe_tx->send_status) { | |
1499 | case CQ_TX_ERROP_GOOD: | |
1500 | stats->tx.good++; | |
1501 | return 0; | |
1502 | case CQ_TX_ERROP_DESC_FAULT: | |
1503 | stats->tx.desc_fault++; | |
1504 | break; | |
1505 | case CQ_TX_ERROP_HDR_CONS_ERR: | |
1506 | stats->tx.hdr_cons_err++; | |
1507 | break; | |
1508 | case CQ_TX_ERROP_SUBDC_ERR: | |
1509 | stats->tx.subdesc_err++; | |
1510 | break; | |
1511 | case CQ_TX_ERROP_IMM_SIZE_OFLOW: | |
1512 | stats->tx.imm_size_oflow++; | |
1513 | break; | |
1514 | case CQ_TX_ERROP_DATA_SEQUENCE_ERR: | |
1515 | stats->tx.data_seq_err++; | |
1516 | break; | |
1517 | case CQ_TX_ERROP_MEM_SEQUENCE_ERR: | |
1518 | stats->tx.mem_seq_err++; | |
1519 | break; | |
1520 | case CQ_TX_ERROP_LOCK_VIOL: | |
1521 | stats->tx.lock_viol++; | |
1522 | break; | |
1523 | case CQ_TX_ERROP_DATA_FAULT: | |
1524 | stats->tx.data_fault++; | |
1525 | break; | |
1526 | case CQ_TX_ERROP_TSTMP_CONFLICT: | |
1527 | stats->tx.tstmp_conflict++; | |
1528 | break; | |
1529 | case CQ_TX_ERROP_TSTMP_TIMEOUT: | |
1530 | stats->tx.tstmp_timeout++; | |
1531 | break; | |
1532 | case CQ_TX_ERROP_MEM_FAULT: | |
1533 | stats->tx.mem_fault++; | |
1534 | break; | |
1535 | case CQ_TX_ERROP_CK_OVERLAP: | |
1536 | stats->tx.csum_overlap++; | |
1537 | break; | |
1538 | case CQ_TX_ERROP_CK_OFLOW: | |
1539 | stats->tx.csum_overflow++; | |
1540 | break; | |
1541 | } | |
1542 | ||
1543 | return 1; | |
1544 | } |