1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2015 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information
21 **********************************************************************/
22 #include <linux/pci.h>
23 #include <linux/netdevice.h>
24 #include <linux/vmalloc.h>
25 #include "liquidio_common.h"
26 #include "octeon_droq.h"
27 #include "octeon_iq.h"
28 #include "response_manager.h"
29 #include "octeon_device.h"
30 #include "octeon_main.h"
31 #include "octeon_network.h"
32 #include "cn66xx_regs.h"
33 #include "cn66xx_device.h"
34 #include "cn23xx_pf_device.h"
36 #define CVM_MIN(d1, d2) (((d1) < (d2)) ? (d1) : (d2))
37 #define CVM_MAX(d1, d2) (((d1) > (d2)) ? (d1) : (d2))
40 struct list_head list
;
45 struct list_head list
;
46 struct octeon_recv_info
*rinfo
;
47 octeon_dispatch_fn_t disp_fn
;
50 /** Get the argument that the user set when registering dispatch
51 * function for a given opcode/subcode.
52 * @param octeon_dev - the octeon device pointer.
53 * @param opcode - the opcode for which the dispatch argument
55 * @param subcode - the subcode for which the dispatch argument
57 * @return Success: void * (argument to the dispatch function)
58 * @return Failure: NULL
61 static inline void *octeon_get_dispatch_arg(struct octeon_device
*octeon_dev
,
62 u16 opcode
, u16 subcode
)
65 struct list_head
*dispatch
;
67 u16 combined_opcode
= OPCODE_SUBCODE(opcode
, subcode
);
69 idx
= combined_opcode
& OCTEON_OPCODE_MASK
;
71 spin_lock_bh(&octeon_dev
->dispatch
.lock
);
73 if (octeon_dev
->dispatch
.count
== 0) {
74 spin_unlock_bh(&octeon_dev
->dispatch
.lock
);
78 if (octeon_dev
->dispatch
.dlist
[idx
].opcode
== combined_opcode
) {
79 fn_arg
= octeon_dev
->dispatch
.dlist
[idx
].arg
;
81 list_for_each(dispatch
,
82 &octeon_dev
->dispatch
.dlist
[idx
].list
) {
83 if (((struct octeon_dispatch
*)dispatch
)->opcode
==
85 fn_arg
= ((struct octeon_dispatch
*)
92 spin_unlock_bh(&octeon_dev
->dispatch
.lock
);
96 /** Check for packets on Droq. This function should be called with lock held.
97 * @param droq - Droq on which count is checked.
98 * @return Returns packet count.
100 u32
octeon_droq_check_hw_for_pkts(struct octeon_droq
*droq
)
105 pkt_count
= readl(droq
->pkts_sent_reg
);
107 last_count
= pkt_count
- droq
->pkt_count
;
108 droq
->pkt_count
= pkt_count
;
110 /* we shall write to cnts at napi irq enable or end of droq tasklet */
112 atomic_add(last_count
, &droq
->pkts_pending
);
117 static void octeon_droq_compute_max_packet_bufs(struct octeon_droq
*droq
)
121 /* max_empty_descs is the max. no. of descs that can have no buffers.
122 * If the empty desc count goes beyond this value, we cannot safely
123 * read in a 64K packet sent by Octeon
124 * (64K is max pkt size from Octeon)
126 droq
->max_empty_descs
= 0;
129 droq
->max_empty_descs
++;
130 count
+= droq
->buffer_size
;
131 } while (count
< (64 * 1024));
133 droq
->max_empty_descs
= droq
->max_count
- droq
->max_empty_descs
;
136 static void octeon_droq_reset_indices(struct octeon_droq
*droq
)
140 droq
->refill_idx
= 0;
141 droq
->refill_count
= 0;
142 atomic_set(&droq
->pkts_pending
, 0);
146 octeon_droq_destroy_ring_buffers(struct octeon_device
*oct
,
147 struct octeon_droq
*droq
)
150 struct octeon_skb_page_info
*pg_info
;
152 for (i
= 0; i
< droq
->max_count
; i
++) {
153 pg_info
= &droq
->recv_buf_list
[i
].pg_info
;
156 lio_unmap_ring(oct
->pci_dev
,
161 recv_buffer_destroy(droq
->recv_buf_list
[i
].buffer
,
164 if (droq
->desc_ring
&& droq
->desc_ring
[i
].info_ptr
)
165 lio_unmap_ring_info(oct
->pci_dev
,
167 desc_ring
[i
].info_ptr
,
169 droq
->recv_buf_list
[i
].buffer
= NULL
;
172 octeon_droq_reset_indices(droq
);
176 octeon_droq_setup_ring_buffers(struct octeon_device
*oct
,
177 struct octeon_droq
*droq
)
181 struct octeon_droq_desc
*desc_ring
= droq
->desc_ring
;
183 for (i
= 0; i
< droq
->max_count
; i
++) {
184 buf
= recv_buffer_alloc(oct
, &droq
->recv_buf_list
[i
].pg_info
);
187 dev_err(&oct
->pci_dev
->dev
, "%s buffer alloc failed\n",
189 droq
->stats
.rx_alloc_failure
++;
193 droq
->recv_buf_list
[i
].buffer
= buf
;
194 droq
->recv_buf_list
[i
].data
= get_rbd(buf
);
195 droq
->info_list
[i
].length
= 0;
197 /* map ring buffers into memory */
198 desc_ring
[i
].info_ptr
= lio_map_ring_info(droq
, i
);
199 desc_ring
[i
].buffer_ptr
=
200 lio_map_ring(droq
->recv_buf_list
[i
].buffer
);
203 octeon_droq_reset_indices(droq
);
205 octeon_droq_compute_max_packet_bufs(droq
);
210 int octeon_delete_droq(struct octeon_device
*oct
, u32 q_no
)
212 struct octeon_droq
*droq
= oct
->droq
[q_no
];
214 dev_dbg(&oct
->pci_dev
->dev
, "%s[%d]\n", __func__
, q_no
);
216 octeon_droq_destroy_ring_buffers(oct
, droq
);
217 vfree(droq
->recv_buf_list
);
219 if (droq
->info_base_addr
)
220 cnnic_free_aligned_dma(oct
->pci_dev
, droq
->info_list
,
221 droq
->info_alloc_size
,
222 droq
->info_base_addr
,
223 droq
->info_list_dma
);
226 lio_dma_free(oct
, (droq
->max_count
* OCT_DROQ_DESC_SIZE
),
227 droq
->desc_ring
, droq
->desc_ring_dma
);
229 memset(droq
, 0, OCT_DROQ_SIZE
);
234 int octeon_init_droq(struct octeon_device
*oct
,
240 struct octeon_droq
*droq
;
241 u32 desc_ring_size
= 0, c_num_descs
= 0, c_buf_size
= 0;
242 u32 c_pkts_per_intr
= 0, c_refill_threshold
= 0;
243 int orig_node
= dev_to_node(&oct
->pci_dev
->dev
);
244 int numa_node
= cpu_to_node(q_no
% num_online_cpus());
246 dev_dbg(&oct
->pci_dev
->dev
, "%s[%d]\n", __func__
, q_no
);
248 droq
= oct
->droq
[q_no
];
249 memset(droq
, 0, OCT_DROQ_SIZE
);
254 droq
->app_ctx
= app_ctx
;
256 droq
->app_ctx
= (void *)(size_t)q_no
;
258 c_num_descs
= num_descs
;
259 c_buf_size
= desc_size
;
260 if (OCTEON_CN6XXX(oct
)) {
261 struct octeon_config
*conf6x
= CHIP_FIELD(oct
, cn6xxx
, conf
);
263 c_pkts_per_intr
= (u32
)CFG_GET_OQ_PKTS_PER_INTR(conf6x
);
265 (u32
)CFG_GET_OQ_REFILL_THRESHOLD(conf6x
);
266 } else if (OCTEON_CN23XX_PF(oct
)) {
267 struct octeon_config
*conf23
= CHIP_FIELD(oct
, cn23xx_pf
, conf
);
269 c_pkts_per_intr
= (u32
)CFG_GET_OQ_PKTS_PER_INTR(conf23
);
270 c_refill_threshold
= (u32
)CFG_GET_OQ_REFILL_THRESHOLD(conf23
);
275 droq
->max_count
= c_num_descs
;
276 droq
->buffer_size
= c_buf_size
;
278 desc_ring_size
= droq
->max_count
* OCT_DROQ_DESC_SIZE
;
279 set_dev_node(&oct
->pci_dev
->dev
, numa_node
);
280 droq
->desc_ring
= lio_dma_alloc(oct
, desc_ring_size
,
281 (dma_addr_t
*)&droq
->desc_ring_dma
);
282 set_dev_node(&oct
->pci_dev
->dev
, orig_node
);
283 if (!droq
->desc_ring
)
284 droq
->desc_ring
= lio_dma_alloc(oct
, desc_ring_size
,
285 (dma_addr_t
*)&droq
->desc_ring_dma
);
287 if (!droq
->desc_ring
) {
288 dev_err(&oct
->pci_dev
->dev
,
289 "Output queue %d ring alloc failed\n", q_no
);
293 dev_dbg(&oct
->pci_dev
->dev
, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
294 q_no
, droq
->desc_ring
, droq
->desc_ring_dma
);
295 dev_dbg(&oct
->pci_dev
->dev
, "droq[%d]: num_desc: %d\n", q_no
,
299 cnnic_numa_alloc_aligned_dma((droq
->max_count
*
301 &droq
->info_alloc_size
,
302 &droq
->info_base_addr
,
304 if (!droq
->info_list
) {
305 dev_err(&oct
->pci_dev
->dev
, "Cannot allocate memory for info list.\n");
306 lio_dma_free(oct
, (droq
->max_count
* OCT_DROQ_DESC_SIZE
),
307 droq
->desc_ring
, droq
->desc_ring_dma
);
311 droq
->recv_buf_list
= (struct octeon_recv_buffer
*)
312 vmalloc_node(droq
->max_count
*
313 OCT_DROQ_RECVBUF_SIZE
,
315 if (!droq
->recv_buf_list
)
316 droq
->recv_buf_list
= (struct octeon_recv_buffer
*)
317 vmalloc(droq
->max_count
*
318 OCT_DROQ_RECVBUF_SIZE
);
319 if (!droq
->recv_buf_list
) {
320 dev_err(&oct
->pci_dev
->dev
, "Output queue recv buf list alloc failed\n");
324 if (octeon_droq_setup_ring_buffers(oct
, droq
))
327 droq
->pkts_per_intr
= c_pkts_per_intr
;
328 droq
->refill_threshold
= c_refill_threshold
;
330 dev_dbg(&oct
->pci_dev
->dev
, "DROQ INIT: max_empty_descs: %d\n",
331 droq
->max_empty_descs
);
333 spin_lock_init(&droq
->lock
);
335 INIT_LIST_HEAD(&droq
->dispatch_list
);
337 /* For 56xx Pass1, this function won't be called, so no checks. */
338 oct
->fn_list
.setup_oq_regs(oct
, q_no
);
340 oct
->io_qmask
.oq
|= (1ULL << q_no
);
345 octeon_delete_droq(oct
, q_no
);
349 /* octeon_create_recv_info
351 * octeon_dev - pointer to the octeon device structure
352 * droq - droq in which the packet arrived.
353 * buf_cnt - no. of buffers used by the packet.
354 * idx - index in the descriptor for the first buffer in the packet.
356 * Allocates a recv_info_t and copies the buffer addresses for packet data
357 * into the recv_pkt space which starts at an 8B offset from recv_info_t.
358 * Flags the descriptors for refill later. If available descriptors go
359 * below the threshold to receive a 64K pkt, new buffers are first allocated
360 * before the recv_pkt_t is created.
361 * This routine will be called in interrupt context.
363 * Success: Pointer to recv_info_t
366 * The droq->lock is held when this routine is called.
368 static inline struct octeon_recv_info
*octeon_create_recv_info(
369 struct octeon_device
*octeon_dev
,
370 struct octeon_droq
*droq
,
374 struct octeon_droq_info
*info
;
375 struct octeon_recv_pkt
*recv_pkt
;
376 struct octeon_recv_info
*recv_info
;
378 struct octeon_skb_page_info
*pg_info
;
380 info
= &droq
->info_list
[idx
];
382 recv_info
= octeon_alloc_recv_info(sizeof(struct __dispatch
));
386 recv_pkt
= recv_info
->recv_pkt
;
387 recv_pkt
->rh
= info
->rh
;
388 recv_pkt
->length
= (u32
)info
->length
;
389 recv_pkt
->buffer_count
= (u16
)buf_cnt
;
390 recv_pkt
->octeon_id
= (u16
)octeon_dev
->octeon_id
;
393 bytes_left
= (u32
)info
->length
;
397 pg_info
= &droq
->recv_buf_list
[idx
].pg_info
;
399 lio_unmap_ring(octeon_dev
->pci_dev
,
401 pg_info
->page
= NULL
;
405 recv_pkt
->buffer_size
[i
] =
407 droq
->buffer_size
) ? droq
->buffer_size
: bytes_left
;
409 recv_pkt
->buffer_ptr
[i
] = droq
->recv_buf_list
[idx
].buffer
;
410 droq
->recv_buf_list
[idx
].buffer
= NULL
;
412 INCR_INDEX_BY1(idx
, droq
->max_count
);
413 bytes_left
-= droq
->buffer_size
;
421 /* If we were not able to refill all buffers, try to move around
422 * the buffers that were not dispatched.
425 octeon_droq_refill_pullup_descs(struct octeon_droq
*droq
,
426 struct octeon_droq_desc
*desc_ring
)
428 u32 desc_refilled
= 0;
430 u32 refill_index
= droq
->refill_idx
;
432 while (refill_index
!= droq
->read_idx
) {
433 if (droq
->recv_buf_list
[refill_index
].buffer
) {
434 droq
->recv_buf_list
[droq
->refill_idx
].buffer
=
435 droq
->recv_buf_list
[refill_index
].buffer
;
436 droq
->recv_buf_list
[droq
->refill_idx
].data
=
437 droq
->recv_buf_list
[refill_index
].data
;
438 desc_ring
[droq
->refill_idx
].buffer_ptr
=
439 desc_ring
[refill_index
].buffer_ptr
;
440 droq
->recv_buf_list
[refill_index
].buffer
= NULL
;
441 desc_ring
[refill_index
].buffer_ptr
= 0;
443 INCR_INDEX_BY1(droq
->refill_idx
,
446 droq
->refill_count
--;
447 } while (droq
->recv_buf_list
[droq
->refill_idx
].
450 INCR_INDEX_BY1(refill_index
, droq
->max_count
);
452 return desc_refilled
;
455 /* octeon_droq_refill
457 * droq - droq in which descriptors require new buffers.
459 * Called during normal DROQ processing in interrupt mode or by the poll
460 * thread to refill the descriptors from which buffers were dispatched
461 * to upper layers. Attempts to allocate new buffers. If that fails, moves
462 * up buffers (that were not dispatched) to form a contiguous ring.
464 * No of descriptors refilled.
466 * This routine is called with droq->lock held.
469 octeon_droq_refill(struct octeon_device
*octeon_dev
, struct octeon_droq
*droq
)
471 struct octeon_droq_desc
*desc_ring
;
474 u32 desc_refilled
= 0;
475 struct octeon_skb_page_info
*pg_info
;
477 desc_ring
= droq
->desc_ring
;
479 while (droq
->refill_count
&& (desc_refilled
< droq
->max_count
)) {
480 /* If a valid buffer exists (happens if there is no dispatch),
482 * the buffer, else allocate.
484 if (!droq
->recv_buf_list
[droq
->refill_idx
].buffer
) {
486 &droq
->recv_buf_list
[droq
->refill_idx
].pg_info
;
487 /* Either recycle the existing pages or go for
491 buf
= recv_buffer_reuse(octeon_dev
, pg_info
);
493 buf
= recv_buffer_alloc(octeon_dev
, pg_info
);
494 /* If a buffer could not be allocated, no point in
498 droq
->stats
.rx_alloc_failure
++;
501 droq
->recv_buf_list
[droq
->refill_idx
].buffer
=
505 data
= get_rbd(droq
->recv_buf_list
506 [droq
->refill_idx
].buffer
);
509 droq
->recv_buf_list
[droq
->refill_idx
].data
= data
;
511 desc_ring
[droq
->refill_idx
].buffer_ptr
=
512 lio_map_ring(droq
->recv_buf_list
[droq
->
514 /* Reset any previous values in the length field. */
515 droq
->info_list
[droq
->refill_idx
].length
= 0;
517 INCR_INDEX_BY1(droq
->refill_idx
, droq
->max_count
);
519 droq
->refill_count
--;
522 if (droq
->refill_count
)
524 octeon_droq_refill_pullup_descs(droq
, desc_ring
);
526 /* if droq->refill_count
527 * The refill count would not change in pass two. We only moved buffers
528 * to close the gap in the ring, but we would still have the same no. of
531 return desc_refilled
;
535 octeon_droq_get_bufcount(u32 buf_size
, u32 total_len
)
539 while (total_len
> (buf_size
* buf_cnt
))
545 octeon_droq_dispatch_pkt(struct octeon_device
*oct
,
546 struct octeon_droq
*droq
,
548 struct octeon_droq_info
*info
)
551 octeon_dispatch_fn_t disp_fn
;
552 struct octeon_recv_info
*rinfo
;
554 cnt
= octeon_droq_get_bufcount(droq
->buffer_size
, (u32
)info
->length
);
556 disp_fn
= octeon_get_dispatch(oct
, (u16
)rh
->r
.opcode
,
559 rinfo
= octeon_create_recv_info(oct
, droq
, cnt
, droq
->read_idx
);
561 struct __dispatch
*rdisp
= rinfo
->rsvd
;
563 rdisp
->rinfo
= rinfo
;
564 rdisp
->disp_fn
= disp_fn
;
565 rinfo
->recv_pkt
->rh
= *rh
;
566 list_add_tail(&rdisp
->list
,
567 &droq
->dispatch_list
);
569 droq
->stats
.dropped_nomem
++;
572 dev_err(&oct
->pci_dev
->dev
, "DROQ: No dispatch function (opcode %u/%u)\n",
573 (unsigned int)rh
->r
.opcode
,
574 (unsigned int)rh
->r
.subcode
);
575 droq
->stats
.dropped_nodispatch
++;
581 static inline void octeon_droq_drop_packets(struct octeon_device
*oct
,
582 struct octeon_droq
*droq
,
586 struct octeon_droq_info
*info
;
588 for (i
= 0; i
< cnt
; i
++) {
589 info
= &droq
->info_list
[droq
->read_idx
];
590 octeon_swap_8B_data((u64
*)info
, 2);
593 info
->length
-= OCT_RH_SIZE
;
594 droq
->stats
.bytes_received
+= info
->length
;
595 buf_cnt
= octeon_droq_get_bufcount(droq
->buffer_size
,
598 dev_err(&oct
->pci_dev
->dev
, "DROQ: In drop: pkt with len 0\n");
602 INCR_INDEX(droq
->read_idx
, buf_cnt
, droq
->max_count
);
603 droq
->refill_count
+= buf_cnt
;
608 octeon_droq_fast_process_packets(struct octeon_device
*oct
,
609 struct octeon_droq
*droq
,
612 struct octeon_droq_info
*info
;
614 u32 pkt
, total_len
= 0, pkt_count
;
616 pkt_count
= pkts_to_process
;
618 for (pkt
= 0; pkt
< pkt_count
; pkt
++) {
620 struct sk_buff
*nicbuf
= NULL
;
621 struct octeon_skb_page_info
*pg_info
;
624 info
= &droq
->info_list
[droq
->read_idx
];
625 octeon_swap_8B_data((u64
*)info
, 2);
628 dev_err(&oct
->pci_dev
->dev
,
629 "DROQ[%d] idx: %d len:0, pkt_cnt: %d\n",
630 droq
->q_no
, droq
->read_idx
, pkt_count
);
631 print_hex_dump_bytes("", DUMP_PREFIX_ADDRESS
,
637 /* Len of resp hdr in included in the received data len. */
638 info
->length
-= OCT_RH_SIZE
;
641 total_len
+= (u32
)info
->length
;
642 if (OPCODE_SLOW_PATH(rh
)) {
645 buf_cnt
= octeon_droq_dispatch_pkt(oct
, droq
, rh
, info
);
646 INCR_INDEX(droq
->read_idx
, buf_cnt
, droq
->max_count
);
647 droq
->refill_count
+= buf_cnt
;
649 if (info
->length
<= droq
->buffer_size
) {
650 pkt_len
= (u32
)info
->length
;
651 nicbuf
= droq
->recv_buf_list
[
652 droq
->read_idx
].buffer
;
653 pg_info
= &droq
->recv_buf_list
[
654 droq
->read_idx
].pg_info
;
655 if (recv_buffer_recycle(oct
, pg_info
))
656 pg_info
->page
= NULL
;
657 droq
->recv_buf_list
[droq
->read_idx
].buffer
=
660 INCR_INDEX_BY1(droq
->read_idx
, droq
->max_count
);
661 droq
->refill_count
++;
663 nicbuf
= octeon_fast_packet_alloc((u32
)
666 /* nicbuf allocation can fail. We'll handle it
669 while (pkt_len
< info
->length
) {
670 int cpy_len
, idx
= droq
->read_idx
;
672 cpy_len
= ((pkt_len
+ droq
->buffer_size
)
674 ((u32
)info
->length
- pkt_len
) :
678 octeon_fast_packet_next(droq
,
682 buf
= droq
->recv_buf_list
[idx
].
684 recv_buffer_fast_free(buf
);
685 droq
->recv_buf_list
[idx
].buffer
688 droq
->stats
.rx_alloc_failure
++;
692 INCR_INDEX_BY1(droq
->read_idx
,
694 droq
->refill_count
++;
699 if (droq
->ops
.fptr
) {
700 droq
->ops
.fptr(oct
->octeon_id
,
705 recv_buffer_free(nicbuf
);
710 if (droq
->refill_count
>= droq
->refill_threshold
) {
711 int desc_refilled
= octeon_droq_refill(oct
, droq
);
713 /* Flush the droq descriptor data to memory to be sure
714 * that when we update the credits the data in memory
718 writel((desc_refilled
), droq
->pkts_credit_reg
);
719 /* make sure mmio write completes */
723 } /* for (each packet)... */
725 /* Increment refill_count by the number of buffers processed. */
726 droq
->stats
.pkts_received
+= pkt
;
727 droq
->stats
.bytes_received
+= total_len
;
729 if ((droq
->ops
.drop_on_max
) && (pkts_to_process
- pkt
)) {
730 octeon_droq_drop_packets(oct
, droq
, (pkts_to_process
- pkt
));
732 droq
->stats
.dropped_toomany
+= (pkts_to_process
- pkt
);
733 return pkts_to_process
;
740 octeon_droq_process_packets(struct octeon_device
*oct
,
741 struct octeon_droq
*droq
,
744 u32 pkt_count
= 0, pkts_processed
= 0;
745 struct list_head
*tmp
, *tmp2
;
747 /* Grab the droq lock */
748 spin_lock(&droq
->lock
);
750 octeon_droq_check_hw_for_pkts(droq
);
751 pkt_count
= atomic_read(&droq
->pkts_pending
);
754 spin_unlock(&droq
->lock
);
758 if (pkt_count
> budget
)
761 pkts_processed
= octeon_droq_fast_process_packets(oct
, droq
, pkt_count
);
763 atomic_sub(pkts_processed
, &droq
->pkts_pending
);
765 /* Release the spin lock */
766 spin_unlock(&droq
->lock
);
768 list_for_each_safe(tmp
, tmp2
, &droq
->dispatch_list
) {
769 struct __dispatch
*rdisp
= (struct __dispatch
*)tmp
;
772 rdisp
->disp_fn(rdisp
->rinfo
,
773 octeon_get_dispatch_arg
775 (u16
)rdisp
->rinfo
->recv_pkt
->rh
.r
.opcode
,
776 (u16
)rdisp
->rinfo
->recv_pkt
->rh
.r
.subcode
));
779 /* If there are packets pending. schedule tasklet again */
780 if (atomic_read(&droq
->pkts_pending
))
787 * Utility function to poll for packets. check_hw_for_packets must be
788 * called before calling this routine.
792 octeon_droq_process_poll_pkts(struct octeon_device
*oct
,
793 struct octeon_droq
*droq
, u32 budget
)
795 struct list_head
*tmp
, *tmp2
;
796 u32 pkts_available
= 0, pkts_processed
= 0;
797 u32 total_pkts_processed
= 0;
799 if (budget
> droq
->max_count
)
800 budget
= droq
->max_count
;
802 spin_lock(&droq
->lock
);
804 while (total_pkts_processed
< budget
) {
805 octeon_droq_check_hw_for_pkts(droq
);
808 CVM_MIN((budget
- total_pkts_processed
),
809 (u32
)(atomic_read(&droq
->pkts_pending
)));
811 if (pkts_available
== 0)
815 octeon_droq_fast_process_packets(oct
, droq
,
818 atomic_sub(pkts_processed
, &droq
->pkts_pending
);
820 total_pkts_processed
+= pkts_processed
;
823 spin_unlock(&droq
->lock
);
825 list_for_each_safe(tmp
, tmp2
, &droq
->dispatch_list
) {
826 struct __dispatch
*rdisp
= (struct __dispatch
*)tmp
;
829 rdisp
->disp_fn(rdisp
->rinfo
,
830 octeon_get_dispatch_arg
832 (u16
)rdisp
->rinfo
->recv_pkt
->rh
.r
.opcode
,
833 (u16
)rdisp
->rinfo
->recv_pkt
->rh
.r
.subcode
));
836 return total_pkts_processed
;
840 octeon_process_droq_poll_cmd(struct octeon_device
*oct
, u32 q_no
, int cmd
,
843 struct octeon_droq
*droq
;
845 droq
= oct
->droq
[q_no
];
847 if (cmd
== POLL_EVENT_PROCESS_PKTS
)
848 return octeon_droq_process_poll_pkts(oct
, droq
, arg
);
850 if (cmd
== POLL_EVENT_PENDING_PKTS
) {
851 u32 pkt_cnt
= atomic_read(&droq
->pkts_pending
);
853 return octeon_droq_process_packets(oct
, droq
, pkt_cnt
);
856 if (cmd
== POLL_EVENT_ENABLE_INTR
) {
860 /* Enable Pkt Interrupt */
861 switch (oct
->chip_id
) {
863 case OCTEON_CN68XX
: {
864 struct octeon_cn6xxx
*cn6xxx
=
865 (struct octeon_cn6xxx
*)oct
->chip
;
867 (&cn6xxx
->lock_for_droq_int_enb_reg
, flags
);
870 CN6XXX_SLI_PKT_TIME_INT_ENB
);
871 value
|= (1 << q_no
);
872 octeon_write_csr(oct
,
873 CN6XXX_SLI_PKT_TIME_INT_ENB
,
877 CN6XXX_SLI_PKT_CNT_INT_ENB
);
878 value
|= (1 << q_no
);
879 octeon_write_csr(oct
,
880 CN6XXX_SLI_PKT_CNT_INT_ENB
,
883 /* don't bother flushing the enables */
885 spin_unlock_irqrestore
886 (&cn6xxx
->lock_for_droq_int_enb_reg
, flags
);
890 case OCTEON_CN23XX_PF_VID
: {
891 lio_enable_irq(oct
->droq
[q_no
], oct
->instr_queue
[q_no
]);
898 dev_err(&oct
->pci_dev
->dev
, "%s Unknown command: %d\n", __func__
, cmd
);
902 int octeon_register_droq_ops(struct octeon_device
*oct
, u32 q_no
,
903 struct octeon_droq_ops
*ops
)
905 struct octeon_droq
*droq
;
907 struct octeon_config
*oct_cfg
= NULL
;
909 oct_cfg
= octeon_get_conf(oct
);
915 dev_err(&oct
->pci_dev
->dev
, "%s: droq_ops pointer is NULL\n",
920 if (q_no
>= CFG_GET_OQ_MAX_Q(oct_cfg
)) {
921 dev_err(&oct
->pci_dev
->dev
, "%s: droq id (%d) exceeds MAX (%d)\n",
922 __func__
, q_no
, (oct
->num_oqs
- 1));
926 droq
= oct
->droq
[q_no
];
928 spin_lock_irqsave(&droq
->lock
, flags
);
930 memcpy(&droq
->ops
, ops
, sizeof(struct octeon_droq_ops
));
932 spin_unlock_irqrestore(&droq
->lock
, flags
);
937 int octeon_unregister_droq_ops(struct octeon_device
*oct
, u32 q_no
)
940 struct octeon_droq
*droq
;
941 struct octeon_config
*oct_cfg
= NULL
;
943 oct_cfg
= octeon_get_conf(oct
);
948 if (q_no
>= CFG_GET_OQ_MAX_Q(oct_cfg
)) {
949 dev_err(&oct
->pci_dev
->dev
, "%s: droq id (%d) exceeds MAX (%d)\n",
950 __func__
, q_no
, oct
->num_oqs
- 1);
954 droq
= oct
->droq
[q_no
];
957 dev_info(&oct
->pci_dev
->dev
,
958 "Droq id (%d) not available.\n", q_no
);
962 spin_lock_irqsave(&droq
->lock
, flags
);
964 droq
->ops
.fptr
= NULL
;
965 droq
->ops
.farg
= NULL
;
966 droq
->ops
.drop_on_max
= 0;
968 spin_unlock_irqrestore(&droq
->lock
, flags
);
973 int octeon_create_droq(struct octeon_device
*oct
,
974 u32 q_no
, u32 num_descs
,
975 u32 desc_size
, void *app_ctx
)
977 struct octeon_droq
*droq
;
978 int numa_node
= cpu_to_node(q_no
% num_online_cpus());
980 if (oct
->droq
[q_no
]) {
981 dev_dbg(&oct
->pci_dev
->dev
, "Droq already in use. Cannot create droq %d again\n",
986 /* Allocate the DS for the new droq. */
987 droq
= vmalloc_node(sizeof(*droq
), numa_node
);
989 droq
= vmalloc(sizeof(*droq
));
991 goto create_droq_fail
;
992 memset(droq
, 0, sizeof(struct octeon_droq
));
994 /*Disable the pkt o/p for this Q */
995 octeon_set_droq_pkt_op(oct
, q_no
, 0);
996 oct
->droq
[q_no
] = droq
;
998 /* Initialize the Droq */
999 octeon_init_droq(oct
, q_no
, num_descs
, desc_size
, app_ctx
);
1003 dev_dbg(&oct
->pci_dev
->dev
, "%s: Total number of OQ: %d\n", __func__
,
1006 /* Global Droq register settings */
1008 /* As of now not required, as setting are done for all 32 Droqs at
1014 octeon_delete_droq(oct
, q_no
);