b294d67d48e4aef7ea287f6a4e44639eff501ee8
[deliverable/linux.git] / drivers / net / ethernet / cavium / thunder / nicvf_queues.c
1 /*
2 * Copyright (C) 2015 Cavium, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
7 */
8
9 #include <linux/pci.h>
10 #include <linux/netdevice.h>
11 #include <linux/ip.h>
12 #include <linux/etherdevice.h>
13 #include <net/ip.h>
14 #include <net/tso.h>
15
16 #include "nic_reg.h"
17 #include "nic.h"
18 #include "q_struct.h"
19 #include "nicvf_queues.h"
20
21 struct rbuf_info {
22 struct page *page;
23 void *data;
24 u64 offset;
25 };
26
27 #define GET_RBUF_INFO(x) ((struct rbuf_info *)(x - NICVF_RCV_BUF_ALIGN_BYTES))
28
29 /* Poll a register for a specific value */
30 static int nicvf_poll_reg(struct nicvf *nic, int qidx,
31 u64 reg, int bit_pos, int bits, int val)
32 {
33 u64 bit_mask;
34 u64 reg_val;
35 int timeout = 10;
36
37 bit_mask = (1ULL << bits) - 1;
38 bit_mask = (bit_mask << bit_pos);
39
40 while (timeout) {
41 reg_val = nicvf_queue_reg_read(nic, reg, qidx);
42 if (((reg_val & bit_mask) >> bit_pos) == val)
43 return 0;
44 usleep_range(1000, 2000);
45 timeout--;
46 }
47 netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg);
48 return 1;
49 }
50
51 /* Allocate memory for a queue's descriptors */
52 static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
53 int q_len, int desc_size, int align_bytes)
54 {
55 dmem->q_len = q_len;
56 dmem->size = (desc_size * q_len) + align_bytes;
57 /* Save address, need it while freeing */
58 dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size,
59 &dmem->dma, GFP_KERNEL);
60 if (!dmem->unalign_base)
61 return -ENOMEM;
62
63 /* Align memory address for 'align_bytes' */
64 dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes);
65 dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma);
66 return 0;
67 }
68
69 /* Free queue's descriptor memory */
70 static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
71 {
72 if (!dmem)
73 return;
74
75 dma_free_coherent(&nic->pdev->dev, dmem->size,
76 dmem->unalign_base, dmem->dma);
77 dmem->unalign_base = NULL;
78 dmem->base = NULL;
79 }
80
81 /* Allocate buffer for packet reception
82 * HW returns memory address where packet is DMA'ed but not a pointer
83 * into RBDR ring, so save buffer address at the start of fragment and
84 * align the start address to a cache aligned address
85 */
86 static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
87 u32 buf_len, u64 **rbuf)
88 {
89 u64 data;
90 struct rbuf_info *rinfo;
91 int order = get_order(buf_len);
92
93 /* Check if request can be accomodated in previous allocated page */
94 if (nic->rb_page) {
95 if ((nic->rb_page_offset + buf_len + buf_len) >
96 (PAGE_SIZE << order)) {
97 nic->rb_page = NULL;
98 } else {
99 nic->rb_page_offset += buf_len;
100 get_page(nic->rb_page);
101 }
102 }
103
104 /* Allocate a new page */
105 if (!nic->rb_page) {
106 nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
107 order);
108 if (!nic->rb_page) {
109 netdev_err(nic->netdev,
110 "Failed to allocate new rcv buffer\n");
111 return -ENOMEM;
112 }
113 nic->rb_page_offset = 0;
114 }
115
116 data = (u64)page_address(nic->rb_page) + nic->rb_page_offset;
117
118 /* Align buffer addr to cache line i.e 128 bytes */
119 rinfo = (struct rbuf_info *)(data + NICVF_RCV_BUF_ALIGN_LEN(data));
120 /* Save page address for reference updation */
121 rinfo->page = nic->rb_page;
122 /* Store start address for later retrieval */
123 rinfo->data = (void *)data;
124 /* Store alignment offset */
125 rinfo->offset = NICVF_RCV_BUF_ALIGN_LEN(data);
126
127 data += rinfo->offset;
128
129 /* Give next aligned address to hw for DMA */
130 *rbuf = (u64 *)(data + NICVF_RCV_BUF_ALIGN_BYTES);
131 return 0;
132 }
133
134 /* Retrieve actual buffer start address and build skb for received packet */
135 static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic,
136 u64 rb_ptr, int len)
137 {
138 struct sk_buff *skb;
139 struct rbuf_info *rinfo;
140
141 rb_ptr = (u64)phys_to_virt(rb_ptr);
142 /* Get buffer start address and alignment offset */
143 rinfo = GET_RBUF_INFO(rb_ptr);
144
145 /* Now build an skb to give to stack */
146 skb = build_skb(rinfo->data, RCV_FRAG_LEN);
147 if (!skb) {
148 put_page(rinfo->page);
149 return NULL;
150 }
151
152 /* Set correct skb->data */
153 skb_reserve(skb, rinfo->offset + NICVF_RCV_BUF_ALIGN_BYTES);
154
155 prefetch((void *)rb_ptr);
156 return skb;
157 }
158
159 /* Allocate RBDR ring and populate receive buffers */
160 static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
161 int ring_len, int buf_size)
162 {
163 int idx;
164 u64 *rbuf;
165 struct rbdr_entry_t *desc;
166 int err;
167
168 err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
169 sizeof(struct rbdr_entry_t),
170 NICVF_RCV_BUF_ALIGN_BYTES);
171 if (err)
172 return err;
173
174 rbdr->desc = rbdr->dmem.base;
175 /* Buffer size has to be in multiples of 128 bytes */
176 rbdr->dma_size = buf_size;
177 rbdr->enable = true;
178 rbdr->thresh = RBDR_THRESH;
179
180 nic->rb_page = NULL;
181 for (idx = 0; idx < ring_len; idx++) {
182 err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN,
183 &rbuf);
184 if (err)
185 return err;
186
187 desc = GET_RBDR_DESC(rbdr, idx);
188 desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
189 }
190 return 0;
191 }
192
193 /* Free RBDR ring and its receive buffers */
194 static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
195 {
196 int head, tail;
197 u64 buf_addr;
198 struct rbdr_entry_t *desc;
199 struct rbuf_info *rinfo;
200
201 if (!rbdr)
202 return;
203
204 rbdr->enable = false;
205 if (!rbdr->dmem.base)
206 return;
207
208 head = rbdr->head;
209 tail = rbdr->tail;
210
211 /* Free SKBs */
212 while (head != tail) {
213 desc = GET_RBDR_DESC(rbdr, head);
214 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
215 rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr));
216 put_page(rinfo->page);
217 head++;
218 head &= (rbdr->dmem.q_len - 1);
219 }
220 /* Free SKB of tail desc */
221 desc = GET_RBDR_DESC(rbdr, tail);
222 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
223 rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr));
224 put_page(rinfo->page);
225
226 /* Free RBDR ring */
227 nicvf_free_q_desc_mem(nic, &rbdr->dmem);
228 }
229
230 /* Refill receive buffer descriptors with new buffers.
231 */
232 static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp)
233 {
234 struct queue_set *qs = nic->qs;
235 int rbdr_idx = qs->rbdr_cnt;
236 int tail, qcount;
237 int refill_rb_cnt;
238 struct rbdr *rbdr;
239 struct rbdr_entry_t *desc;
240 u64 *rbuf;
241 int new_rb = 0;
242
243 refill:
244 if (!rbdr_idx)
245 return;
246 rbdr_idx--;
247 rbdr = &qs->rbdr[rbdr_idx];
248 /* Check if it's enabled */
249 if (!rbdr->enable)
250 goto next_rbdr;
251
252 /* Get no of desc's to be refilled */
253 qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
254 qcount &= 0x7FFFF;
255 /* Doorbell can be ringed with a max of ring size minus 1 */
256 if (qcount >= (qs->rbdr_len - 1))
257 goto next_rbdr;
258 else
259 refill_rb_cnt = qs->rbdr_len - qcount - 1;
260
261 /* Start filling descs from tail */
262 tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
263 while (refill_rb_cnt) {
264 tail++;
265 tail &= (rbdr->dmem.q_len - 1);
266
267 if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf))
268 break;
269
270 desc = GET_RBDR_DESC(rbdr, tail);
271 desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
272 refill_rb_cnt--;
273 new_rb++;
274 }
275
276 /* make sure all memory stores are done before ringing doorbell */
277 smp_wmb();
278
279 /* Check if buffer allocation failed */
280 if (refill_rb_cnt)
281 nic->rb_alloc_fail = true;
282 else
283 nic->rb_alloc_fail = false;
284
285 /* Notify HW */
286 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
287 rbdr_idx, new_rb);
288 next_rbdr:
289 /* Re-enable RBDR interrupts only if buffer allocation is success */
290 if (!nic->rb_alloc_fail && rbdr->enable)
291 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
292
293 if (rbdr_idx)
294 goto refill;
295 }
296
297 /* Alloc rcv buffers in non-atomic mode for better success */
298 void nicvf_rbdr_work(struct work_struct *work)
299 {
300 struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work);
301
302 nicvf_refill_rbdr(nic, GFP_KERNEL);
303 if (nic->rb_alloc_fail)
304 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
305 else
306 nic->rb_work_scheduled = false;
307 }
308
309 /* In Softirq context, alloc rcv buffers in atomic mode */
310 void nicvf_rbdr_task(unsigned long data)
311 {
312 struct nicvf *nic = (struct nicvf *)data;
313
314 nicvf_refill_rbdr(nic, GFP_ATOMIC);
315 if (nic->rb_alloc_fail) {
316 nic->rb_work_scheduled = true;
317 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
318 }
319 }
320
321 /* Initialize completion queue */
322 static int nicvf_init_cmp_queue(struct nicvf *nic,
323 struct cmp_queue *cq, int q_len)
324 {
325 int err;
326
327 err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE,
328 NICVF_CQ_BASE_ALIGN_BYTES);
329 if (err)
330 return err;
331
332 cq->desc = cq->dmem.base;
333 cq->thresh = CMP_QUEUE_CQE_THRESH;
334 nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
335
336 return 0;
337 }
338
339 static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
340 {
341 if (!cq)
342 return;
343 if (!cq->dmem.base)
344 return;
345
346 nicvf_free_q_desc_mem(nic, &cq->dmem);
347 }
348
349 /* Initialize transmit queue */
350 static int nicvf_init_snd_queue(struct nicvf *nic,
351 struct snd_queue *sq, int q_len)
352 {
353 int err;
354
355 err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE,
356 NICVF_SQ_BASE_ALIGN_BYTES);
357 if (err)
358 return err;
359
360 sq->desc = sq->dmem.base;
361 sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL);
362 if (!sq->skbuff)
363 return -ENOMEM;
364 sq->head = 0;
365 sq->tail = 0;
366 atomic_set(&sq->free_cnt, q_len - 1);
367 sq->thresh = SND_QUEUE_THRESH;
368
369 /* Preallocate memory for TSO segment's header */
370 sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev,
371 q_len * TSO_HEADER_SIZE,
372 &sq->tso_hdrs_phys, GFP_KERNEL);
373 if (!sq->tso_hdrs)
374 return -ENOMEM;
375
376 return 0;
377 }
378
379 static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
380 {
381 if (!sq)
382 return;
383 if (!sq->dmem.base)
384 return;
385
386 if (sq->tso_hdrs)
387 dma_free_coherent(&nic->pdev->dev,
388 sq->dmem.q_len * TSO_HEADER_SIZE,
389 sq->tso_hdrs, sq->tso_hdrs_phys);
390
391 kfree(sq->skbuff);
392 nicvf_free_q_desc_mem(nic, &sq->dmem);
393 }
394
395 static void nicvf_reclaim_snd_queue(struct nicvf *nic,
396 struct queue_set *qs, int qidx)
397 {
398 /* Disable send queue */
399 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
400 /* Check if SQ is stopped */
401 if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
402 return;
403 /* Reset send queue */
404 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
405 }
406
407 static void nicvf_reclaim_rcv_queue(struct nicvf *nic,
408 struct queue_set *qs, int qidx)
409 {
410 union nic_mbx mbx = {};
411
412 /* Make sure all packets in the pipeline are written back into mem */
413 mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
414 nicvf_send_msg_to_pf(nic, &mbx);
415 }
416
417 static void nicvf_reclaim_cmp_queue(struct nicvf *nic,
418 struct queue_set *qs, int qidx)
419 {
420 /* Disable timer threshold (doesn't get reset upon CQ reset */
421 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
422 /* Disable completion queue */
423 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
424 /* Reset completion queue */
425 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
426 }
427
428 static void nicvf_reclaim_rbdr(struct nicvf *nic,
429 struct rbdr *rbdr, int qidx)
430 {
431 u64 tmp, fifo_state;
432 int timeout = 10;
433
434 /* Save head and tail pointers for feeing up buffers */
435 rbdr->head = nicvf_queue_reg_read(nic,
436 NIC_QSET_RBDR_0_1_HEAD,
437 qidx) >> 3;
438 rbdr->tail = nicvf_queue_reg_read(nic,
439 NIC_QSET_RBDR_0_1_TAIL,
440 qidx) >> 3;
441
442 /* If RBDR FIFO is in 'FAIL' state then do a reset first
443 * before relaiming.
444 */
445 fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
446 if (((fifo_state >> 62) & 0x03) == 0x3)
447 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
448 qidx, NICVF_RBDR_RESET);
449
450 /* Disable RBDR */
451 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
452 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
453 return;
454 while (1) {
455 tmp = nicvf_queue_reg_read(nic,
456 NIC_QSET_RBDR_0_1_PREFETCH_STATUS,
457 qidx);
458 if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
459 break;
460 usleep_range(1000, 2000);
461 timeout--;
462 if (!timeout) {
463 netdev_err(nic->netdev,
464 "Failed polling on prefetch status\n");
465 return;
466 }
467 }
468 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
469 qidx, NICVF_RBDR_RESET);
470
471 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
472 return;
473 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
474 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
475 return;
476 }
477
478 void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features)
479 {
480 u64 rq_cfg;
481 int sqs;
482
483 rq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_RQ_GEN_CFG, 0);
484
485 /* Enable first VLAN stripping */
486 if (features & NETIF_F_HW_VLAN_CTAG_RX)
487 rq_cfg |= (1ULL << 25);
488 else
489 rq_cfg &= ~(1ULL << 25);
490 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
491
492 /* Configure Secondary Qsets, if any */
493 for (sqs = 0; sqs < nic->sqs_count; sqs++)
494 if (nic->snicvf[sqs])
495 nicvf_queue_reg_write(nic->snicvf[sqs],
496 NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
497 }
498
499 /* Configures receive queue */
500 static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
501 int qidx, bool enable)
502 {
503 union nic_mbx mbx = {};
504 struct rcv_queue *rq;
505 struct rq_cfg rq_cfg;
506
507 rq = &qs->rq[qidx];
508 rq->enable = enable;
509
510 /* Disable receive queue */
511 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
512
513 if (!rq->enable) {
514 nicvf_reclaim_rcv_queue(nic, qs, qidx);
515 return;
516 }
517
518 rq->cq_qs = qs->vnic_id;
519 rq->cq_idx = qidx;
520 rq->start_rbdr_qs = qs->vnic_id;
521 rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
522 rq->cont_rbdr_qs = qs->vnic_id;
523 rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
524 /* all writes of RBDR data to be loaded into L2 Cache as well*/
525 rq->caching = 1;
526
527 /* Send a mailbox msg to PF to config RQ */
528 mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
529 mbx.rq.qs_num = qs->vnic_id;
530 mbx.rq.rq_num = qidx;
531 mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
532 (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
533 (rq->cont_qs_rbdr_idx << 8) |
534 (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
535 nicvf_send_msg_to_pf(nic, &mbx);
536
537 mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
538 mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0);
539 nicvf_send_msg_to_pf(nic, &mbx);
540
541 /* RQ drop config
542 * Enable CQ drop to reserve sufficient CQEs for all tx packets
543 */
544 mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
545 mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8);
546 nicvf_send_msg_to_pf(nic, &mbx);
547
548 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00);
549 if (!nic->sqs_mode)
550 nicvf_config_vlan_stripping(nic, nic->netdev->features);
551
552 /* Enable Receive queue */
553 rq_cfg.ena = 1;
554 rq_cfg.tcp_ena = 0;
555 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg);
556 }
557
558 /* Configures completion queue */
559 void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
560 int qidx, bool enable)
561 {
562 struct cmp_queue *cq;
563 struct cq_cfg cq_cfg;
564
565 cq = &qs->cq[qidx];
566 cq->enable = enable;
567
568 if (!cq->enable) {
569 nicvf_reclaim_cmp_queue(nic, qs, qidx);
570 return;
571 }
572
573 /* Reset completion queue */
574 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
575
576 if (!cq->enable)
577 return;
578
579 spin_lock_init(&cq->lock);
580 /* Set completion queue base address */
581 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE,
582 qidx, (u64)(cq->dmem.phys_base));
583
584 /* Enable Completion queue */
585 cq_cfg.ena = 1;
586 cq_cfg.reset = 0;
587 cq_cfg.caching = 0;
588 cq_cfg.qsize = CMP_QSIZE;
589 cq_cfg.avg_con = 0;
590 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg);
591
592 /* Set threshold value for interrupt generation */
593 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
594 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2,
595 qidx, nic->cq_coalesce_usecs);
596 }
597
598 /* Configures transmit queue */
599 static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
600 int qidx, bool enable)
601 {
602 union nic_mbx mbx = {};
603 struct snd_queue *sq;
604 struct sq_cfg sq_cfg;
605
606 sq = &qs->sq[qidx];
607 sq->enable = enable;
608
609 if (!sq->enable) {
610 nicvf_reclaim_snd_queue(nic, qs, qidx);
611 return;
612 }
613
614 /* Reset send queue */
615 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
616
617 sq->cq_qs = qs->vnic_id;
618 sq->cq_idx = qidx;
619
620 /* Send a mailbox msg to PF to config SQ */
621 mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
622 mbx.sq.qs_num = qs->vnic_id;
623 mbx.sq.sq_num = qidx;
624 mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
625 nicvf_send_msg_to_pf(nic, &mbx);
626
627 /* Set queue base address */
628 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE,
629 qidx, (u64)(sq->dmem.phys_base));
630
631 /* Enable send queue & set queue size */
632 sq_cfg.ena = 1;
633 sq_cfg.reset = 0;
634 sq_cfg.ldwb = 0;
635 sq_cfg.qsize = SND_QSIZE;
636 sq_cfg.tstmp_bgx_intf = 0;
637 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg);
638
639 /* Set threshold value for interrupt generation */
640 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
641
642 /* Set queue:cpu affinity for better load distribution */
643 if (cpu_online(qidx)) {
644 cpumask_set_cpu(qidx, &sq->affinity_mask);
645 netif_set_xps_queue(nic->netdev,
646 &sq->affinity_mask, qidx);
647 }
648 }
649
650 /* Configures receive buffer descriptor ring */
651 static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
652 int qidx, bool enable)
653 {
654 struct rbdr *rbdr;
655 struct rbdr_cfg rbdr_cfg;
656
657 rbdr = &qs->rbdr[qidx];
658 nicvf_reclaim_rbdr(nic, rbdr, qidx);
659 if (!enable)
660 return;
661
662 /* Set descriptor base address */
663 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE,
664 qidx, (u64)(rbdr->dmem.phys_base));
665
666 /* Enable RBDR & set queue size */
667 /* Buffer size should be in multiples of 128 bytes */
668 rbdr_cfg.ena = 1;
669 rbdr_cfg.reset = 0;
670 rbdr_cfg.ldwb = 0;
671 rbdr_cfg.qsize = RBDR_SIZE;
672 rbdr_cfg.avg_con = 0;
673 rbdr_cfg.lines = rbdr->dma_size / 128;
674 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
675 qidx, *(u64 *)&rbdr_cfg);
676
677 /* Notify HW */
678 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
679 qidx, qs->rbdr_len - 1);
680
681 /* Set threshold value for interrupt generation */
682 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH,
683 qidx, rbdr->thresh - 1);
684 }
685
686 /* Requests PF to assign and enable Qset */
687 void nicvf_qset_config(struct nicvf *nic, bool enable)
688 {
689 union nic_mbx mbx = {};
690 struct queue_set *qs = nic->qs;
691 struct qs_cfg *qs_cfg;
692
693 if (!qs) {
694 netdev_warn(nic->netdev,
695 "Qset is still not allocated, don't init queues\n");
696 return;
697 }
698
699 qs->enable = enable;
700 qs->vnic_id = nic->vf_id;
701
702 /* Send a mailbox msg to PF to config Qset */
703 mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
704 mbx.qs.num = qs->vnic_id;
705
706 mbx.qs.cfg = 0;
707 qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
708 if (qs->enable) {
709 qs_cfg->ena = 1;
710 #ifdef __BIG_ENDIAN
711 qs_cfg->be = 1;
712 #endif
713 qs_cfg->vnic = qs->vnic_id;
714 }
715 nicvf_send_msg_to_pf(nic, &mbx);
716 }
717
718 static void nicvf_free_resources(struct nicvf *nic)
719 {
720 int qidx;
721 struct queue_set *qs = nic->qs;
722
723 /* Free receive buffer descriptor ring */
724 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
725 nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
726
727 /* Free completion queue */
728 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
729 nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
730
731 /* Free send queue */
732 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
733 nicvf_free_snd_queue(nic, &qs->sq[qidx]);
734 }
735
736 static int nicvf_alloc_resources(struct nicvf *nic)
737 {
738 int qidx;
739 struct queue_set *qs = nic->qs;
740
741 /* Alloc receive buffer descriptor ring */
742 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
743 if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
744 DMA_BUFFER_LEN))
745 goto alloc_fail;
746 }
747
748 /* Alloc send queue */
749 for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
750 if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len))
751 goto alloc_fail;
752 }
753
754 /* Alloc completion queue */
755 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
756 if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len))
757 goto alloc_fail;
758 }
759
760 return 0;
761 alloc_fail:
762 nicvf_free_resources(nic);
763 return -ENOMEM;
764 }
765
766 int nicvf_set_qset_resources(struct nicvf *nic)
767 {
768 struct queue_set *qs;
769
770 qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL);
771 if (!qs)
772 return -ENOMEM;
773 nic->qs = qs;
774
775 /* Set count of each queue */
776 qs->rbdr_cnt = RBDR_CNT;
777 qs->rq_cnt = RCV_QUEUE_CNT;
778 qs->sq_cnt = SND_QUEUE_CNT;
779 qs->cq_cnt = CMP_QUEUE_CNT;
780
781 /* Set queue lengths */
782 qs->rbdr_len = RCV_BUF_COUNT;
783 qs->sq_len = SND_QUEUE_LEN;
784 qs->cq_len = CMP_QUEUE_LEN;
785 return 0;
786 }
787
788 int nicvf_config_data_transfer(struct nicvf *nic, bool enable)
789 {
790 bool disable = false;
791 struct queue_set *qs = nic->qs;
792 int qidx;
793
794 if (!qs)
795 return 0;
796
797 if (enable) {
798 if (nicvf_alloc_resources(nic))
799 return -ENOMEM;
800
801 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
802 nicvf_snd_queue_config(nic, qs, qidx, enable);
803 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
804 nicvf_cmp_queue_config(nic, qs, qidx, enable);
805 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
806 nicvf_rbdr_config(nic, qs, qidx, enable);
807 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
808 nicvf_rcv_queue_config(nic, qs, qidx, enable);
809 } else {
810 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
811 nicvf_rcv_queue_config(nic, qs, qidx, disable);
812 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
813 nicvf_rbdr_config(nic, qs, qidx, disable);
814 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
815 nicvf_snd_queue_config(nic, qs, qidx, disable);
816 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
817 nicvf_cmp_queue_config(nic, qs, qidx, disable);
818
819 nicvf_free_resources(nic);
820 }
821
822 return 0;
823 }
824
825 /* Get a free desc from SQ
826 * returns descriptor ponter & descriptor number
827 */
828 static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
829 {
830 int qentry;
831
832 qentry = sq->tail;
833 atomic_sub(desc_cnt, &sq->free_cnt);
834 sq->tail += desc_cnt;
835 sq->tail &= (sq->dmem.q_len - 1);
836
837 return qentry;
838 }
839
840 /* Free descriptor back to SQ for future use */
841 void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
842 {
843 atomic_add(desc_cnt, &sq->free_cnt);
844 sq->head += desc_cnt;
845 sq->head &= (sq->dmem.q_len - 1);
846 }
847
848 static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
849 {
850 qentry++;
851 qentry &= (sq->dmem.q_len - 1);
852 return qentry;
853 }
854
855 void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
856 {
857 u64 sq_cfg;
858
859 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
860 sq_cfg |= NICVF_SQ_EN;
861 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
862 /* Ring doorbell so that H/W restarts processing SQEs */
863 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
864 }
865
866 void nicvf_sq_disable(struct nicvf *nic, int qidx)
867 {
868 u64 sq_cfg;
869
870 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
871 sq_cfg &= ~NICVF_SQ_EN;
872 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
873 }
874
875 void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq,
876 int qidx)
877 {
878 u64 head, tail;
879 struct sk_buff *skb;
880 struct nicvf *nic = netdev_priv(netdev);
881 struct sq_hdr_subdesc *hdr;
882
883 head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
884 tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
885 while (sq->head != head) {
886 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
887 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
888 nicvf_put_sq_desc(sq, 1);
889 continue;
890 }
891 skb = (struct sk_buff *)sq->skbuff[sq->head];
892 if (skb)
893 dev_kfree_skb_any(skb);
894 atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets);
895 atomic64_add(hdr->tot_len,
896 (atomic64_t *)&netdev->stats.tx_bytes);
897 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
898 }
899 }
900
901 /* Calculate no of SQ subdescriptors needed to transmit all
902 * segments of this TSO packet.
903 * Taken from 'Tilera network driver' with a minor modification.
904 */
905 static int nicvf_tso_count_subdescs(struct sk_buff *skb)
906 {
907 struct skb_shared_info *sh = skb_shinfo(skb);
908 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
909 unsigned int data_len = skb->len - sh_len;
910 unsigned int p_len = sh->gso_size;
911 long f_id = -1; /* id of the current fragment */
912 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
913 long f_used = 0; /* bytes used from the current fragment */
914 long n; /* size of the current piece of payload */
915 int num_edescs = 0;
916 int segment;
917
918 for (segment = 0; segment < sh->gso_segs; segment++) {
919 unsigned int p_used = 0;
920
921 /* One edesc for header and for each piece of the payload. */
922 for (num_edescs++; p_used < p_len; num_edescs++) {
923 /* Advance as needed. */
924 while (f_used >= f_size) {
925 f_id++;
926 f_size = skb_frag_size(&sh->frags[f_id]);
927 f_used = 0;
928 }
929
930 /* Use bytes from the current fragment. */
931 n = p_len - p_used;
932 if (n > f_size - f_used)
933 n = f_size - f_used;
934 f_used += n;
935 p_used += n;
936 }
937
938 /* The last segment may be less than gso_size. */
939 data_len -= p_len;
940 if (data_len < p_len)
941 p_len = data_len;
942 }
943
944 /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */
945 return num_edescs + sh->gso_segs;
946 }
947
948 /* Get the number of SQ descriptors needed to xmit this skb */
949 static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
950 {
951 int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
952
953 if (skb_shinfo(skb)->gso_size) {
954 subdesc_cnt = nicvf_tso_count_subdescs(skb);
955 return subdesc_cnt;
956 }
957
958 if (skb_shinfo(skb)->nr_frags)
959 subdesc_cnt += skb_shinfo(skb)->nr_frags;
960
961 return subdesc_cnt;
962 }
963
964 /* Add SQ HEADER subdescriptor.
965 * First subdescriptor for every send descriptor.
966 */
967 static inline void
968 nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
969 int subdesc_cnt, struct sk_buff *skb, int len)
970 {
971 int proto;
972 struct sq_hdr_subdesc *hdr;
973
974 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
975 sq->skbuff[qentry] = (u64)skb;
976
977 memset(hdr, 0, SND_QUEUE_DESC_SIZE);
978 hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
979 /* Enable notification via CQE after processing SQE */
980 hdr->post_cqe = 1;
981 /* No of subdescriptors following this */
982 hdr->subdesc_cnt = subdesc_cnt;
983 hdr->tot_len = len;
984
985 /* Offload checksum calculation to HW */
986 if (skb->ip_summed == CHECKSUM_PARTIAL) {
987 hdr->csum_l3 = 1; /* Enable IP csum calculation */
988 hdr->l3_offset = skb_network_offset(skb);
989 hdr->l4_offset = skb_transport_offset(skb);
990
991 proto = ip_hdr(skb)->protocol;
992 switch (proto) {
993 case IPPROTO_TCP:
994 hdr->csum_l4 = SEND_L4_CSUM_TCP;
995 break;
996 case IPPROTO_UDP:
997 hdr->csum_l4 = SEND_L4_CSUM_UDP;
998 break;
999 case IPPROTO_SCTP:
1000 hdr->csum_l4 = SEND_L4_CSUM_SCTP;
1001 break;
1002 }
1003 }
1004 }
1005
1006 /* SQ GATHER subdescriptor
1007 * Must follow HDR descriptor
1008 */
1009 static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
1010 int size, u64 data)
1011 {
1012 struct sq_gather_subdesc *gather;
1013
1014 qentry &= (sq->dmem.q_len - 1);
1015 gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
1016
1017 memset(gather, 0, SND_QUEUE_DESC_SIZE);
1018 gather->subdesc_type = SQ_DESC_TYPE_GATHER;
1019 gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
1020 gather->size = size;
1021 gather->addr = data;
1022 }
1023
1024 /* Segment a TSO packet into 'gso_size' segments and append
1025 * them to SQ for transfer
1026 */
1027 static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
1028 int qentry, struct sk_buff *skb)
1029 {
1030 struct tso_t tso;
1031 int seg_subdescs = 0, desc_cnt = 0;
1032 int seg_len, total_len, data_left;
1033 int hdr_qentry = qentry;
1034 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1035
1036 tso_start(skb, &tso);
1037 total_len = skb->len - hdr_len;
1038 while (total_len > 0) {
1039 char *hdr;
1040
1041 /* Save Qentry for adding HDR_SUBDESC at the end */
1042 hdr_qentry = qentry;
1043
1044 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
1045 total_len -= data_left;
1046
1047 /* Add segment's header */
1048 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1049 hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE;
1050 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
1051 nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len,
1052 sq->tso_hdrs_phys +
1053 qentry * TSO_HEADER_SIZE);
1054 /* HDR_SUDESC + GATHER */
1055 seg_subdescs = 2;
1056 seg_len = hdr_len;
1057
1058 /* Add segment's payload fragments */
1059 while (data_left > 0) {
1060 int size;
1061
1062 size = min_t(int, tso.size, data_left);
1063
1064 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1065 nicvf_sq_add_gather_subdesc(sq, qentry, size,
1066 virt_to_phys(tso.data));
1067 seg_subdescs++;
1068 seg_len += size;
1069
1070 data_left -= size;
1071 tso_build_data(skb, &tso, size);
1072 }
1073 nicvf_sq_add_hdr_subdesc(sq, hdr_qentry,
1074 seg_subdescs - 1, skb, seg_len);
1075 sq->skbuff[hdr_qentry] = (u64)NULL;
1076 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1077
1078 desc_cnt += seg_subdescs;
1079 }
1080 /* Save SKB in the last segment for freeing */
1081 sq->skbuff[hdr_qentry] = (u64)skb;
1082
1083 /* make sure all memory stores are done before ringing doorbell */
1084 smp_wmb();
1085
1086 /* Inform HW to xmit all TSO segments */
1087 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
1088 skb_get_queue_mapping(skb), desc_cnt);
1089 nic->drv_stats.tx_tso++;
1090 return 1;
1091 }
1092
1093 /* Append an skb to a SQ for packet transfer. */
1094 int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
1095 {
1096 int i, size;
1097 int subdesc_cnt;
1098 int sq_num, qentry;
1099 struct queue_set *qs = nic->qs;
1100 struct snd_queue *sq;
1101
1102 sq_num = skb_get_queue_mapping(skb);
1103 sq = &qs->sq[sq_num];
1104
1105 subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
1106 if (subdesc_cnt > atomic_read(&sq->free_cnt))
1107 goto append_fail;
1108
1109 qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
1110
1111 /* Check if its a TSO packet */
1112 if (skb_shinfo(skb)->gso_size)
1113 return nicvf_sq_append_tso(nic, sq, qentry, skb);
1114
1115 /* Add SQ header subdesc */
1116 nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb, skb->len);
1117
1118 /* Add SQ gather subdescs */
1119 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1120 size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
1121 nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data));
1122
1123 /* Check for scattered buffer */
1124 if (!skb_is_nonlinear(skb))
1125 goto doorbell;
1126
1127 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1128 const struct skb_frag_struct *frag;
1129
1130 frag = &skb_shinfo(skb)->frags[i];
1131
1132 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1133 size = skb_frag_size(frag);
1134 nicvf_sq_add_gather_subdesc(sq, qentry, size,
1135 virt_to_phys(
1136 skb_frag_address(frag)));
1137 }
1138
1139 doorbell:
1140 /* make sure all memory stores are done before ringing doorbell */
1141 smp_wmb();
1142
1143 /* Inform HW to xmit new packet */
1144 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
1145 sq_num, subdesc_cnt);
1146 return 1;
1147
1148 append_fail:
1149 netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n");
1150 return 0;
1151 }
1152
1153 static inline unsigned frag_num(unsigned i)
1154 {
1155 #ifdef __BIG_ENDIAN
1156 return (i & ~3) + 3 - (i & 3);
1157 #else
1158 return i;
1159 #endif
1160 }
1161
1162 /* Returns SKB for a received packet */
1163 struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1164 {
1165 int frag;
1166 int payload_len = 0;
1167 struct sk_buff *skb = NULL;
1168 struct sk_buff *skb_frag = NULL;
1169 struct sk_buff *prev_frag = NULL;
1170 u16 *rb_lens = NULL;
1171 u64 *rb_ptrs = NULL;
1172
1173 rb_lens = (void *)cqe_rx + (3 * sizeof(u64));
1174 rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64));
1175
1176 netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n",
1177 __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
1178
1179 for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
1180 payload_len = rb_lens[frag_num(frag)];
1181 if (!frag) {
1182 /* First fragment */
1183 skb = nicvf_rb_ptr_to_skb(nic,
1184 *rb_ptrs - cqe_rx->align_pad,
1185 payload_len);
1186 if (!skb)
1187 return NULL;
1188 skb_reserve(skb, cqe_rx->align_pad);
1189 skb_put(skb, payload_len);
1190 } else {
1191 /* Add fragments */
1192 skb_frag = nicvf_rb_ptr_to_skb(nic, *rb_ptrs,
1193 payload_len);
1194 if (!skb_frag) {
1195 dev_kfree_skb(skb);
1196 return NULL;
1197 }
1198
1199 if (!skb_shinfo(skb)->frag_list)
1200 skb_shinfo(skb)->frag_list = skb_frag;
1201 else
1202 prev_frag->next = skb_frag;
1203
1204 prev_frag = skb_frag;
1205 skb->len += payload_len;
1206 skb->data_len += payload_len;
1207 skb_frag->len = payload_len;
1208 }
1209 /* Next buffer pointer */
1210 rb_ptrs++;
1211 }
1212 return skb;
1213 }
1214
1215 /* Enable interrupt */
1216 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
1217 {
1218 u64 reg_val;
1219
1220 reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
1221
1222 switch (int_type) {
1223 case NICVF_INTR_CQ:
1224 reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
1225 break;
1226 case NICVF_INTR_SQ:
1227 reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
1228 break;
1229 case NICVF_INTR_RBDR:
1230 reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
1231 break;
1232 case NICVF_INTR_PKT_DROP:
1233 reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
1234 break;
1235 case NICVF_INTR_TCP_TIMER:
1236 reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
1237 break;
1238 case NICVF_INTR_MBOX:
1239 reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT);
1240 break;
1241 case NICVF_INTR_QS_ERR:
1242 reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
1243 break;
1244 default:
1245 netdev_err(nic->netdev,
1246 "Failed to enable interrupt: unknown type\n");
1247 break;
1248 }
1249
1250 nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val);
1251 }
1252
1253 /* Disable interrupt */
1254 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
1255 {
1256 u64 reg_val = 0;
1257
1258 switch (int_type) {
1259 case NICVF_INTR_CQ:
1260 reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
1261 break;
1262 case NICVF_INTR_SQ:
1263 reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
1264 break;
1265 case NICVF_INTR_RBDR:
1266 reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
1267 break;
1268 case NICVF_INTR_PKT_DROP:
1269 reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
1270 break;
1271 case NICVF_INTR_TCP_TIMER:
1272 reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
1273 break;
1274 case NICVF_INTR_MBOX:
1275 reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT);
1276 break;
1277 case NICVF_INTR_QS_ERR:
1278 reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
1279 break;
1280 default:
1281 netdev_err(nic->netdev,
1282 "Failed to disable interrupt: unknown type\n");
1283 break;
1284 }
1285
1286 nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val);
1287 }
1288
1289 /* Clear interrupt */
1290 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
1291 {
1292 u64 reg_val = 0;
1293
1294 switch (int_type) {
1295 case NICVF_INTR_CQ:
1296 reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
1297 break;
1298 case NICVF_INTR_SQ:
1299 reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
1300 break;
1301 case NICVF_INTR_RBDR:
1302 reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
1303 break;
1304 case NICVF_INTR_PKT_DROP:
1305 reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
1306 break;
1307 case NICVF_INTR_TCP_TIMER:
1308 reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
1309 break;
1310 case NICVF_INTR_MBOX:
1311 reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT);
1312 break;
1313 case NICVF_INTR_QS_ERR:
1314 reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
1315 break;
1316 default:
1317 netdev_err(nic->netdev,
1318 "Failed to clear interrupt: unknown type\n");
1319 break;
1320 }
1321
1322 nicvf_reg_write(nic, NIC_VF_INT, reg_val);
1323 }
1324
1325 /* Check if interrupt is enabled */
1326 int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
1327 {
1328 u64 reg_val;
1329 u64 mask = 0xff;
1330
1331 reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
1332
1333 switch (int_type) {
1334 case NICVF_INTR_CQ:
1335 mask = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
1336 break;
1337 case NICVF_INTR_SQ:
1338 mask = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
1339 break;
1340 case NICVF_INTR_RBDR:
1341 mask = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
1342 break;
1343 case NICVF_INTR_PKT_DROP:
1344 mask = NICVF_INTR_PKT_DROP_MASK;
1345 break;
1346 case NICVF_INTR_TCP_TIMER:
1347 mask = NICVF_INTR_TCP_TIMER_MASK;
1348 break;
1349 case NICVF_INTR_MBOX:
1350 mask = NICVF_INTR_MBOX_MASK;
1351 break;
1352 case NICVF_INTR_QS_ERR:
1353 mask = NICVF_INTR_QS_ERR_MASK;
1354 break;
1355 default:
1356 netdev_err(nic->netdev,
1357 "Failed to check interrupt enable: unknown type\n");
1358 break;
1359 }
1360
1361 return (reg_val & mask);
1362 }
1363
1364 void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
1365 {
1366 struct rcv_queue *rq;
1367
1368 #define GET_RQ_STATS(reg) \
1369 nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
1370 (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
1371
1372 rq = &nic->qs->rq[rq_idx];
1373 rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
1374 rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
1375 }
1376
1377 void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
1378 {
1379 struct snd_queue *sq;
1380
1381 #define GET_SQ_STATS(reg) \
1382 nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
1383 (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
1384
1385 sq = &nic->qs->sq[sq_idx];
1386 sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
1387 sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
1388 }
1389
1390 /* Check for errors in the receive cmp.queue entry */
1391 int nicvf_check_cqe_rx_errs(struct nicvf *nic,
1392 struct cmp_queue *cq, struct cqe_rx_t *cqe_rx)
1393 {
1394 struct nicvf_hw_stats *stats = &nic->hw_stats;
1395 struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
1396
1397 if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
1398 drv_stats->rx_frames_ok++;
1399 return 0;
1400 }
1401
1402 if (netif_msg_rx_err(nic))
1403 netdev_err(nic->netdev,
1404 "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n",
1405 nic->netdev->name,
1406 cqe_rx->err_level, cqe_rx->err_opcode);
1407
1408 switch (cqe_rx->err_opcode) {
1409 case CQ_RX_ERROP_RE_PARTIAL:
1410 stats->rx_bgx_truncated_pkts++;
1411 break;
1412 case CQ_RX_ERROP_RE_JABBER:
1413 stats->rx_jabber_errs++;
1414 break;
1415 case CQ_RX_ERROP_RE_FCS:
1416 stats->rx_fcs_errs++;
1417 break;
1418 case CQ_RX_ERROP_RE_RX_CTL:
1419 stats->rx_bgx_errs++;
1420 break;
1421 case CQ_RX_ERROP_PREL2_ERR:
1422 stats->rx_prel2_errs++;
1423 break;
1424 case CQ_RX_ERROP_L2_MAL:
1425 stats->rx_l2_hdr_malformed++;
1426 break;
1427 case CQ_RX_ERROP_L2_OVERSIZE:
1428 stats->rx_oversize++;
1429 break;
1430 case CQ_RX_ERROP_L2_UNDERSIZE:
1431 stats->rx_undersize++;
1432 break;
1433 case CQ_RX_ERROP_L2_LENMISM:
1434 stats->rx_l2_len_mismatch++;
1435 break;
1436 case CQ_RX_ERROP_L2_PCLP:
1437 stats->rx_l2_pclp++;
1438 break;
1439 case CQ_RX_ERROP_IP_NOT:
1440 stats->rx_ip_ver_errs++;
1441 break;
1442 case CQ_RX_ERROP_IP_CSUM_ERR:
1443 stats->rx_ip_csum_errs++;
1444 break;
1445 case CQ_RX_ERROP_IP_MAL:
1446 stats->rx_ip_hdr_malformed++;
1447 break;
1448 case CQ_RX_ERROP_IP_MALD:
1449 stats->rx_ip_payload_malformed++;
1450 break;
1451 case CQ_RX_ERROP_IP_HOP:
1452 stats->rx_ip_ttl_errs++;
1453 break;
1454 case CQ_RX_ERROP_L3_PCLP:
1455 stats->rx_l3_pclp++;
1456 break;
1457 case CQ_RX_ERROP_L4_MAL:
1458 stats->rx_l4_malformed++;
1459 break;
1460 case CQ_RX_ERROP_L4_CHK:
1461 stats->rx_l4_csum_errs++;
1462 break;
1463 case CQ_RX_ERROP_UDP_LEN:
1464 stats->rx_udp_len_errs++;
1465 break;
1466 case CQ_RX_ERROP_L4_PORT:
1467 stats->rx_l4_port_errs++;
1468 break;
1469 case CQ_RX_ERROP_TCP_FLAG:
1470 stats->rx_tcp_flag_errs++;
1471 break;
1472 case CQ_RX_ERROP_TCP_OFFSET:
1473 stats->rx_tcp_offset_errs++;
1474 break;
1475 case CQ_RX_ERROP_L4_PCLP:
1476 stats->rx_l4_pclp++;
1477 break;
1478 case CQ_RX_ERROP_RBDR_TRUNC:
1479 stats->rx_truncated_pkts++;
1480 break;
1481 }
1482
1483 return 1;
1484 }
1485
1486 /* Check for errors in the send cmp.queue entry */
1487 int nicvf_check_cqe_tx_errs(struct nicvf *nic,
1488 struct cmp_queue *cq, struct cqe_send_t *cqe_tx)
1489 {
1490 struct cmp_queue_stats *stats = &cq->stats;
1491
1492 switch (cqe_tx->send_status) {
1493 case CQ_TX_ERROP_GOOD:
1494 stats->tx.good++;
1495 return 0;
1496 case CQ_TX_ERROP_DESC_FAULT:
1497 stats->tx.desc_fault++;
1498 break;
1499 case CQ_TX_ERROP_HDR_CONS_ERR:
1500 stats->tx.hdr_cons_err++;
1501 break;
1502 case CQ_TX_ERROP_SUBDC_ERR:
1503 stats->tx.subdesc_err++;
1504 break;
1505 case CQ_TX_ERROP_IMM_SIZE_OFLOW:
1506 stats->tx.imm_size_oflow++;
1507 break;
1508 case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
1509 stats->tx.data_seq_err++;
1510 break;
1511 case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
1512 stats->tx.mem_seq_err++;
1513 break;
1514 case CQ_TX_ERROP_LOCK_VIOL:
1515 stats->tx.lock_viol++;
1516 break;
1517 case CQ_TX_ERROP_DATA_FAULT:
1518 stats->tx.data_fault++;
1519 break;
1520 case CQ_TX_ERROP_TSTMP_CONFLICT:
1521 stats->tx.tstmp_conflict++;
1522 break;
1523 case CQ_TX_ERROP_TSTMP_TIMEOUT:
1524 stats->tx.tstmp_timeout++;
1525 break;
1526 case CQ_TX_ERROP_MEM_FAULT:
1527 stats->tx.mem_fault++;
1528 break;
1529 case CQ_TX_ERROP_CK_OVERLAP:
1530 stats->tx.csum_overlap++;
1531 break;
1532 case CQ_TX_ERROP_CK_OFLOW:
1533 stats->tx.csum_overflow++;
1534 break;
1535 }
1536
1537 return 1;
1538 }
This page took 0.114728 seconds and 4 git commands to generate.