2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/skbuff.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/if_vlan.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/jiffies.h>
42 #include <linux/prefetch.h>
43 #include <linux/export.h>
46 #ifdef CONFIG_NET_RX_BUSY_POLL
47 #include <net/busy_poll.h>
48 #endif /* CONFIG_NET_RX_BUSY_POLL */
51 #include "t4_values.h"
56 * Rx buffer size. We use largish buffers if possible but settle for single
57 * pages under memory shortage.
60 # define FL_PG_ORDER 0
62 # define FL_PG_ORDER (16 - PAGE_SHIFT)
65 /* RX_PULL_LEN should be <= RX_COPY_THRES */
66 #define RX_COPY_THRES 256
67 #define RX_PULL_LEN 128
70 * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
71 * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
73 #define RX_PKT_SKB_LEN 512
76 * Max number of Tx descriptors we clean up at a time. Should be modest as
77 * freeing skbs isn't cheap and it happens while holding locks. We just need
78 * to free packets faster than they arrive, we eventually catch up and keep
79 * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES.
81 #define MAX_TX_RECLAIM 16
84 * Max number of Rx buffers we replenish at a time. Again keep this modest,
85 * allocating buffers isn't cheap either.
87 #define MAX_RX_REFILL 16U
90 * Period of the Rx queue check timer. This timer is infrequent as it has
91 * something to do only when the system experiences severe memory shortage.
93 #define RX_QCHECK_PERIOD (HZ / 2)
96 * Period of the Tx queue check timer.
98 #define TX_QCHECK_PERIOD (HZ / 2)
100 /* SGE Hung Ingress DMA Threshold Warning time (in Hz) and Warning Repeat Rate
101 * (in RX_QCHECK_PERIOD multiples). If we find one of the SGE Ingress DMA
102 * State Machines in the same state for this amount of time (in HZ) then we'll
103 * issue a warning about a potential hang. We'll repeat the warning as the
104 * SGE Ingress DMA Channel appears to be hung every N RX_QCHECK_PERIODs till
105 * the situation clears. If the situation clears, we'll note that as well.
107 #define SGE_IDMA_WARN_THRESH (1 * HZ)
108 #define SGE_IDMA_WARN_REPEAT (20 * RX_QCHECK_PERIOD)
111 * Max number of Tx descriptors to be reclaimed by the Tx timer.
113 #define MAX_TIMER_TX_RECLAIM 100
116 * Timer index used when backing off due to memory shortage.
118 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
121 * An FL with <= FL_STARVE_THRES buffers is starving and a periodic timer will
122 * attempt to refill it.
124 #define FL_STARVE_THRES 4
127 * Suspend an Ethernet Tx queue with fewer available descriptors than this.
128 * This is the same as calc_tx_descs() for a TSO packet with
129 * nr_frags == MAX_SKB_FRAGS.
131 #define ETHTXQ_STOP_THRES \
132 (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
135 * Suspension threshold for non-Ethernet Tx queues. We require enough room
136 * for a full sized WR.
138 #define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
141 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
144 #define MAX_IMM_TX_PKT_LEN 128
147 * Max size of a WR sent through a control Tx queue.
149 #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
151 struct tx_sw_desc
{ /* SW state per Tx descriptor */
153 struct ulptx_sgl
*sgl
;
156 struct rx_sw_desc
{ /* SW state per Rx descriptor */
162 * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb
163 * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs.
164 * We could easily support more but there doesn't seem to be much need for
167 #define FL_MTU_SMALL 1500
168 #define FL_MTU_LARGE 9000
170 static inline unsigned int fl_mtu_bufsize(struct adapter
*adapter
,
173 struct sge
*s
= &adapter
->sge
;
175 return ALIGN(s
->pktshift
+ ETH_HLEN
+ VLAN_HLEN
+ mtu
, s
->fl_align
);
178 #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
179 #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
182 * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses
183 * these to specify the buffer size as an index into the SGE Free List Buffer
184 * Size register array. We also use bit 4, when the buffer has been unmapped
185 * for DMA, but this is of course never sent to the hardware and is only used
186 * to prevent double unmappings. All of the above requires that the Free List
187 * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
188 * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
189 * Free List Buffer alignment is 32 bytes, this works out for us ...
192 RX_BUF_FLAGS
= 0x1f, /* bottom five bits are special */
193 RX_BUF_SIZE
= 0x0f, /* bottom three bits are for buf sizes */
194 RX_UNMAPPED_BUF
= 0x10, /* buffer is not mapped */
197 * XXX We shouldn't depend on being able to use these indices.
198 * XXX Especially when some other Master PF has initialized the
199 * XXX adapter or we use the Firmware Configuration File. We
200 * XXX should really search through the Host Buffer Size register
201 * XXX array for the appropriately sized buffer indices.
203 RX_SMALL_PG_BUF
= 0x0, /* small (PAGE_SIZE) page buffer */
204 RX_LARGE_PG_BUF
= 0x1, /* buffer large (FL_PG_ORDER) page buffer */
206 RX_SMALL_MTU_BUF
= 0x2, /* small MTU buffer */
207 RX_LARGE_MTU_BUF
= 0x3, /* large MTU buffer */
210 static int timer_pkt_quota
[] = {1, 1, 2, 3, 4, 5};
211 #define MIN_NAPI_WORK 1
213 static inline dma_addr_t
get_buf_addr(const struct rx_sw_desc
*d
)
215 return d
->dma_addr
& ~(dma_addr_t
)RX_BUF_FLAGS
;
218 static inline bool is_buf_mapped(const struct rx_sw_desc
*d
)
220 return !(d
->dma_addr
& RX_UNMAPPED_BUF
);
224 * txq_avail - return the number of available slots in a Tx queue
227 * Returns the number of descriptors in a Tx queue available to write new
230 static inline unsigned int txq_avail(const struct sge_txq
*q
)
232 return q
->size
- 1 - q
->in_use
;
236 * fl_cap - return the capacity of a free-buffer list
239 * Returns the capacity of a free-buffer list. The capacity is less than
240 * the size because one descriptor needs to be left unpopulated, otherwise
241 * HW will think the FL is empty.
243 static inline unsigned int fl_cap(const struct sge_fl
*fl
)
245 return fl
->size
- 8; /* 1 descriptor = 8 buffers */
248 static inline bool fl_starving(const struct sge_fl
*fl
)
250 return fl
->avail
- fl
->pend_cred
<= FL_STARVE_THRES
;
253 static int map_skb(struct device
*dev
, const struct sk_buff
*skb
,
256 const skb_frag_t
*fp
, *end
;
257 const struct skb_shared_info
*si
;
259 *addr
= dma_map_single(dev
, skb
->data
, skb_headlen(skb
), DMA_TO_DEVICE
);
260 if (dma_mapping_error(dev
, *addr
))
263 si
= skb_shinfo(skb
);
264 end
= &si
->frags
[si
->nr_frags
];
266 for (fp
= si
->frags
; fp
< end
; fp
++) {
267 *++addr
= skb_frag_dma_map(dev
, fp
, 0, skb_frag_size(fp
),
269 if (dma_mapping_error(dev
, *addr
))
275 while (fp
-- > si
->frags
)
276 dma_unmap_page(dev
, *--addr
, skb_frag_size(fp
), DMA_TO_DEVICE
);
278 dma_unmap_single(dev
, addr
[-1], skb_headlen(skb
), DMA_TO_DEVICE
);
283 #ifdef CONFIG_NEED_DMA_MAP_STATE
284 static void unmap_skb(struct device
*dev
, const struct sk_buff
*skb
,
285 const dma_addr_t
*addr
)
287 const skb_frag_t
*fp
, *end
;
288 const struct skb_shared_info
*si
;
290 dma_unmap_single(dev
, *addr
++, skb_headlen(skb
), DMA_TO_DEVICE
);
292 si
= skb_shinfo(skb
);
293 end
= &si
->frags
[si
->nr_frags
];
294 for (fp
= si
->frags
; fp
< end
; fp
++)
295 dma_unmap_page(dev
, *addr
++, skb_frag_size(fp
), DMA_TO_DEVICE
);
299 * deferred_unmap_destructor - unmap a packet when it is freed
302 * This is the packet destructor used for Tx packets that need to remain
303 * mapped until they are freed rather than until their Tx descriptors are
306 static void deferred_unmap_destructor(struct sk_buff
*skb
)
308 unmap_skb(skb
->dev
->dev
.parent
, skb
, (dma_addr_t
*)skb
->head
);
312 static void unmap_sgl(struct device
*dev
, const struct sk_buff
*skb
,
313 const struct ulptx_sgl
*sgl
, const struct sge_txq
*q
)
315 const struct ulptx_sge_pair
*p
;
316 unsigned int nfrags
= skb_shinfo(skb
)->nr_frags
;
318 if (likely(skb_headlen(skb
)))
319 dma_unmap_single(dev
, be64_to_cpu(sgl
->addr0
), ntohl(sgl
->len0
),
322 dma_unmap_page(dev
, be64_to_cpu(sgl
->addr0
), ntohl(sgl
->len0
),
328 * the complexity below is because of the possibility of a wrap-around
329 * in the middle of an SGL
331 for (p
= sgl
->sge
; nfrags
>= 2; nfrags
-= 2) {
332 if (likely((u8
*)(p
+ 1) <= (u8
*)q
->stat
)) {
333 unmap
: dma_unmap_page(dev
, be64_to_cpu(p
->addr
[0]),
334 ntohl(p
->len
[0]), DMA_TO_DEVICE
);
335 dma_unmap_page(dev
, be64_to_cpu(p
->addr
[1]),
336 ntohl(p
->len
[1]), DMA_TO_DEVICE
);
338 } else if ((u8
*)p
== (u8
*)q
->stat
) {
339 p
= (const struct ulptx_sge_pair
*)q
->desc
;
341 } else if ((u8
*)p
+ 8 == (u8
*)q
->stat
) {
342 const __be64
*addr
= (const __be64
*)q
->desc
;
344 dma_unmap_page(dev
, be64_to_cpu(addr
[0]),
345 ntohl(p
->len
[0]), DMA_TO_DEVICE
);
346 dma_unmap_page(dev
, be64_to_cpu(addr
[1]),
347 ntohl(p
->len
[1]), DMA_TO_DEVICE
);
348 p
= (const struct ulptx_sge_pair
*)&addr
[2];
350 const __be64
*addr
= (const __be64
*)q
->desc
;
352 dma_unmap_page(dev
, be64_to_cpu(p
->addr
[0]),
353 ntohl(p
->len
[0]), DMA_TO_DEVICE
);
354 dma_unmap_page(dev
, be64_to_cpu(addr
[0]),
355 ntohl(p
->len
[1]), DMA_TO_DEVICE
);
356 p
= (const struct ulptx_sge_pair
*)&addr
[1];
362 if ((u8
*)p
== (u8
*)q
->stat
)
363 p
= (const struct ulptx_sge_pair
*)q
->desc
;
364 addr
= (u8
*)p
+ 16 <= (u8
*)q
->stat
? p
->addr
[0] :
365 *(const __be64
*)q
->desc
;
366 dma_unmap_page(dev
, be64_to_cpu(addr
), ntohl(p
->len
[0]),
372 * free_tx_desc - reclaims Tx descriptors and their buffers
373 * @adapter: the adapter
374 * @q: the Tx queue to reclaim descriptors from
375 * @n: the number of descriptors to reclaim
376 * @unmap: whether the buffers should be unmapped for DMA
378 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
379 * Tx buffers. Called with the Tx queue lock held.
381 static void free_tx_desc(struct adapter
*adap
, struct sge_txq
*q
,
382 unsigned int n
, bool unmap
)
384 struct tx_sw_desc
*d
;
385 unsigned int cidx
= q
->cidx
;
386 struct device
*dev
= adap
->pdev_dev
;
390 if (d
->skb
) { /* an SGL is present */
392 unmap_sgl(dev
, d
->skb
, d
->sgl
, q
);
393 dev_consume_skb_any(d
->skb
);
397 if (++cidx
== q
->size
) {
406 * Return the number of reclaimable descriptors in a Tx queue.
408 static inline int reclaimable(const struct sge_txq
*q
)
410 int hw_cidx
= ntohs(q
->stat
->cidx
);
412 return hw_cidx
< 0 ? hw_cidx
+ q
->size
: hw_cidx
;
416 * reclaim_completed_tx - reclaims completed Tx descriptors
418 * @q: the Tx queue to reclaim completed descriptors from
419 * @unmap: whether the buffers should be unmapped for DMA
421 * Reclaims Tx descriptors that the SGE has indicated it has processed,
422 * and frees the associated buffers if possible. Called with the Tx
425 static inline void reclaim_completed_tx(struct adapter
*adap
, struct sge_txq
*q
,
428 int avail
= reclaimable(q
);
432 * Limit the amount of clean up work we do at a time to keep
433 * the Tx lock hold time O(1).
435 if (avail
> MAX_TX_RECLAIM
)
436 avail
= MAX_TX_RECLAIM
;
438 free_tx_desc(adap
, q
, avail
, unmap
);
443 static inline int get_buf_size(struct adapter
*adapter
,
444 const struct rx_sw_desc
*d
)
446 struct sge
*s
= &adapter
->sge
;
447 unsigned int rx_buf_size_idx
= d
->dma_addr
& RX_BUF_SIZE
;
450 switch (rx_buf_size_idx
) {
451 case RX_SMALL_PG_BUF
:
452 buf_size
= PAGE_SIZE
;
455 case RX_LARGE_PG_BUF
:
456 buf_size
= PAGE_SIZE
<< s
->fl_pg_order
;
459 case RX_SMALL_MTU_BUF
:
460 buf_size
= FL_MTU_SMALL_BUFSIZE(adapter
);
463 case RX_LARGE_MTU_BUF
:
464 buf_size
= FL_MTU_LARGE_BUFSIZE(adapter
);
475 * free_rx_bufs - free the Rx buffers on an SGE free list
477 * @q: the SGE free list to free buffers from
478 * @n: how many buffers to free
480 * Release the next @n buffers on an SGE free-buffer Rx queue. The
481 * buffers must be made inaccessible to HW before calling this function.
483 static void free_rx_bufs(struct adapter
*adap
, struct sge_fl
*q
, int n
)
486 struct rx_sw_desc
*d
= &q
->sdesc
[q
->cidx
];
488 if (is_buf_mapped(d
))
489 dma_unmap_page(adap
->pdev_dev
, get_buf_addr(d
),
490 get_buf_size(adap
, d
),
494 if (++q
->cidx
== q
->size
)
501 * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
503 * @q: the SGE free list
505 * Unmap the current buffer on an SGE free-buffer Rx queue. The
506 * buffer must be made inaccessible to HW before calling this function.
508 * This is similar to @free_rx_bufs above but does not free the buffer.
509 * Do note that the FL still loses any further access to the buffer.
511 static void unmap_rx_buf(struct adapter
*adap
, struct sge_fl
*q
)
513 struct rx_sw_desc
*d
= &q
->sdesc
[q
->cidx
];
515 if (is_buf_mapped(d
))
516 dma_unmap_page(adap
->pdev_dev
, get_buf_addr(d
),
517 get_buf_size(adap
, d
), PCI_DMA_FROMDEVICE
);
519 if (++q
->cidx
== q
->size
)
524 static inline void ring_fl_db(struct adapter
*adap
, struct sge_fl
*q
)
527 if (q
->pend_cred
>= 8) {
528 if (is_t4(adap
->params
.chip
))
529 val
= PIDX_V(q
->pend_cred
/ 8);
531 val
= PIDX_T5_V(q
->pend_cred
/ 8) |
536 /* If we don't have access to the new User Doorbell (T5+), use
537 * the old doorbell mechanism; otherwise use the new BAR2
540 if (unlikely(q
->bar2_addr
== NULL
)) {
541 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL_A
),
542 val
| QID_V(q
->cntxt_id
));
544 writel(val
| QID_V(q
->bar2_qid
),
545 q
->bar2_addr
+ SGE_UDB_KDOORBELL
);
547 /* This Write memory Barrier will force the write to
548 * the User Doorbell area to be flushed.
556 static inline void set_rx_sw_desc(struct rx_sw_desc
*sd
, struct page
*pg
,
560 sd
->dma_addr
= mapping
; /* includes size low bits */
564 * refill_fl - refill an SGE Rx buffer ring
566 * @q: the ring to refill
567 * @n: the number of new buffers to allocate
568 * @gfp: the gfp flags for the allocations
570 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
571 * allocated with the supplied gfp flags. The caller must assure that
572 * @n does not exceed the queue's capacity. If afterwards the queue is
573 * found critically low mark it as starving in the bitmap of starving FLs.
575 * Returns the number of buffers allocated.
577 static unsigned int refill_fl(struct adapter
*adap
, struct sge_fl
*q
, int n
,
580 struct sge
*s
= &adap
->sge
;
583 unsigned int cred
= q
->avail
;
584 __be64
*d
= &q
->desc
[q
->pidx
];
585 struct rx_sw_desc
*sd
= &q
->sdesc
[q
->pidx
];
589 if (s
->fl_pg_order
== 0)
590 goto alloc_small_pages
;
593 * Prefer large buffers
596 pg
= __dev_alloc_pages(gfp
, s
->fl_pg_order
);
598 q
->large_alloc_failed
++;
599 break; /* fall back to single pages */
602 mapping
= dma_map_page(adap
->pdev_dev
, pg
, 0,
603 PAGE_SIZE
<< s
->fl_pg_order
,
605 if (unlikely(dma_mapping_error(adap
->pdev_dev
, mapping
))) {
606 __free_pages(pg
, s
->fl_pg_order
);
607 goto out
; /* do not try small pages for this error */
609 mapping
|= RX_LARGE_PG_BUF
;
610 *d
++ = cpu_to_be64(mapping
);
612 set_rx_sw_desc(sd
, pg
, mapping
);
616 if (++q
->pidx
== q
->size
) {
626 pg
= __dev_alloc_page(gfp
);
632 mapping
= dma_map_page(adap
->pdev_dev
, pg
, 0, PAGE_SIZE
,
634 if (unlikely(dma_mapping_error(adap
->pdev_dev
, mapping
))) {
638 *d
++ = cpu_to_be64(mapping
);
640 set_rx_sw_desc(sd
, pg
, mapping
);
644 if (++q
->pidx
== q
->size
) {
651 out
: cred
= q
->avail
- cred
;
652 q
->pend_cred
+= cred
;
655 if (unlikely(fl_starving(q
))) {
657 set_bit(q
->cntxt_id
- adap
->sge
.egr_start
,
658 adap
->sge
.starving_fl
);
664 static inline void __refill_fl(struct adapter
*adap
, struct sge_fl
*fl
)
666 refill_fl(adap
, fl
, min(MAX_RX_REFILL
, fl_cap(fl
) - fl
->avail
),
671 * alloc_ring - allocate resources for an SGE descriptor ring
672 * @dev: the PCI device's core device
673 * @nelem: the number of descriptors
674 * @elem_size: the size of each descriptor
675 * @sw_size: the size of the SW state associated with each ring element
676 * @phys: the physical address of the allocated ring
677 * @metadata: address of the array holding the SW state for the ring
678 * @stat_size: extra space in HW ring for status information
679 * @node: preferred node for memory allocations
681 * Allocates resources for an SGE descriptor ring, such as Tx queues,
682 * free buffer lists, or response queues. Each SGE ring requires
683 * space for its HW descriptors plus, optionally, space for the SW state
684 * associated with each HW entry (the metadata). The function returns
685 * three values: the virtual address for the HW ring (the return value
686 * of the function), the bus address of the HW ring, and the address
689 static void *alloc_ring(struct device
*dev
, size_t nelem
, size_t elem_size
,
690 size_t sw_size
, dma_addr_t
*phys
, void *metadata
,
691 size_t stat_size
, int node
)
693 size_t len
= nelem
* elem_size
+ stat_size
;
695 void *p
= dma_alloc_coherent(dev
, len
, phys
, GFP_KERNEL
);
700 s
= kzalloc_node(nelem
* sw_size
, GFP_KERNEL
, node
);
703 dma_free_coherent(dev
, len
, p
, *phys
);
708 *(void **)metadata
= s
;
714 * sgl_len - calculates the size of an SGL of the given capacity
715 * @n: the number of SGL entries
717 * Calculates the number of flits needed for a scatter/gather list that
718 * can hold the given number of entries.
720 static inline unsigned int sgl_len(unsigned int n
)
723 return (3 * n
) / 2 + (n
& 1) + 2;
727 * flits_to_desc - returns the num of Tx descriptors for the given flits
728 * @n: the number of flits
730 * Returns the number of Tx descriptors needed for the supplied number
733 static inline unsigned int flits_to_desc(unsigned int n
)
735 BUG_ON(n
> SGE_MAX_WR_LEN
/ 8);
736 return DIV_ROUND_UP(n
, 8);
740 * is_eth_imm - can an Ethernet packet be sent as immediate data?
743 * Returns whether an Ethernet packet is small enough to fit as
744 * immediate data. Return value corresponds to headroom required.
746 static inline int is_eth_imm(const struct sk_buff
*skb
)
748 int hdrlen
= skb_shinfo(skb
)->gso_size
?
749 sizeof(struct cpl_tx_pkt_lso_core
) : 0;
751 hdrlen
+= sizeof(struct cpl_tx_pkt
);
752 if (skb
->len
<= MAX_IMM_TX_PKT_LEN
- hdrlen
)
758 * calc_tx_flits - calculate the number of flits for a packet Tx WR
761 * Returns the number of flits needed for a Tx WR for the given Ethernet
762 * packet, including the needed WR and CPL headers.
764 static inline unsigned int calc_tx_flits(const struct sk_buff
*skb
)
767 int hdrlen
= is_eth_imm(skb
);
770 return DIV_ROUND_UP(skb
->len
+ hdrlen
, sizeof(__be64
));
772 flits
= sgl_len(skb_shinfo(skb
)->nr_frags
+ 1) + 4;
773 if (skb_shinfo(skb
)->gso_size
)
779 * calc_tx_descs - calculate the number of Tx descriptors for a packet
782 * Returns the number of Tx descriptors needed for the given Ethernet
783 * packet, including the needed WR and CPL headers.
785 static inline unsigned int calc_tx_descs(const struct sk_buff
*skb
)
787 return flits_to_desc(calc_tx_flits(skb
));
791 * write_sgl - populate a scatter/gather list for a packet
793 * @q: the Tx queue we are writing into
794 * @sgl: starting location for writing the SGL
795 * @end: points right after the end of the SGL
796 * @start: start offset into skb main-body data to include in the SGL
797 * @addr: the list of bus addresses for the SGL elements
799 * Generates a gather list for the buffers that make up a packet.
800 * The caller must provide adequate space for the SGL that will be written.
801 * The SGL includes all of the packet's page fragments and the data in its
802 * main body except for the first @start bytes. @sgl must be 16-byte
803 * aligned and within a Tx descriptor with available space. @end points
804 * right after the end of the SGL but does not account for any potential
805 * wrap around, i.e., @end > @sgl.
807 static void write_sgl(const struct sk_buff
*skb
, struct sge_txq
*q
,
808 struct ulptx_sgl
*sgl
, u64
*end
, unsigned int start
,
809 const dma_addr_t
*addr
)
812 struct ulptx_sge_pair
*to
;
813 const struct skb_shared_info
*si
= skb_shinfo(skb
);
814 unsigned int nfrags
= si
->nr_frags
;
815 struct ulptx_sge_pair buf
[MAX_SKB_FRAGS
/ 2 + 1];
817 len
= skb_headlen(skb
) - start
;
819 sgl
->len0
= htonl(len
);
820 sgl
->addr0
= cpu_to_be64(addr
[0] + start
);
823 sgl
->len0
= htonl(skb_frag_size(&si
->frags
[0]));
824 sgl
->addr0
= cpu_to_be64(addr
[1]);
827 sgl
->cmd_nsge
= htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL
) |
828 ULPTX_NSGE_V(nfrags
));
829 if (likely(--nfrags
== 0))
832 * Most of the complexity below deals with the possibility we hit the
833 * end of the queue in the middle of writing the SGL. For this case
834 * only we create the SGL in a temporary buffer and then copy it.
836 to
= (u8
*)end
> (u8
*)q
->stat
? buf
: sgl
->sge
;
838 for (i
= (nfrags
!= si
->nr_frags
); nfrags
>= 2; nfrags
-= 2, to
++) {
839 to
->len
[0] = cpu_to_be32(skb_frag_size(&si
->frags
[i
]));
840 to
->len
[1] = cpu_to_be32(skb_frag_size(&si
->frags
[++i
]));
841 to
->addr
[0] = cpu_to_be64(addr
[i
]);
842 to
->addr
[1] = cpu_to_be64(addr
[++i
]);
845 to
->len
[0] = cpu_to_be32(skb_frag_size(&si
->frags
[i
]));
846 to
->len
[1] = cpu_to_be32(0);
847 to
->addr
[0] = cpu_to_be64(addr
[i
+ 1]);
849 if (unlikely((u8
*)end
> (u8
*)q
->stat
)) {
850 unsigned int part0
= (u8
*)q
->stat
- (u8
*)sgl
->sge
, part1
;
853 memcpy(sgl
->sge
, buf
, part0
);
854 part1
= (u8
*)end
- (u8
*)q
->stat
;
855 memcpy(q
->desc
, (u8
*)buf
+ part0
, part1
);
856 end
= (void *)q
->desc
+ part1
;
858 if ((uintptr_t)end
& 8) /* 0-pad to multiple of 16 */
862 /* This function copies 64 byte coalesced work request to
863 * memory mapped BAR2 space. For coalesced WR SGE fetches
864 * data from the FIFO instead of from Host.
866 static void cxgb_pio_copy(u64 __iomem
*dst
, u64
*src
)
879 * ring_tx_db - check and potentially ring a Tx queue's doorbell
882 * @n: number of new descriptors to give to HW
884 * Ring the doorbel for a Tx queue.
886 static inline void ring_tx_db(struct adapter
*adap
, struct sge_txq
*q
, int n
)
888 wmb(); /* write descriptors before telling HW */
890 /* If we don't have access to the new User Doorbell (T5+), use the old
891 * doorbell mechanism; otherwise use the new BAR2 mechanism.
893 if (unlikely(q
->bar2_addr
== NULL
)) {
897 /* For T4 we need to participate in the Doorbell Recovery
900 spin_lock_irqsave(&q
->db_lock
, flags
);
902 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL_A
),
903 QID_V(q
->cntxt_id
) | val
);
906 q
->db_pidx
= q
->pidx
;
907 spin_unlock_irqrestore(&q
->db_lock
, flags
);
909 u32 val
= PIDX_T5_V(n
);
911 /* T4 and later chips share the same PIDX field offset within
912 * the doorbell, but T5 and later shrank the field in order to
913 * gain a bit for Doorbell Priority. The field was absurdly
914 * large in the first place (14 bits) so we just use the T5
915 * and later limits and warn if a Queue ID is too large.
917 WARN_ON(val
& DBPRIO_F
);
919 /* If we're only writing a single TX Descriptor and we can use
920 * Inferred QID registers, we can use the Write Combining
921 * Gather Buffer; otherwise we use the simple doorbell.
923 if (n
== 1 && q
->bar2_qid
== 0) {
927 u64
*wr
= (u64
*)&q
->desc
[index
];
929 cxgb_pio_copy((u64 __iomem
*)
930 (q
->bar2_addr
+ SGE_UDB_WCDOORBELL
),
933 writel(val
| QID_V(q
->bar2_qid
),
934 q
->bar2_addr
+ SGE_UDB_KDOORBELL
);
937 /* This Write Memory Barrier will force the write to the User
938 * Doorbell area to be flushed. This is needed to prevent
939 * writes on different CPUs for the same queue from hitting
940 * the adapter out of order. This is required when some Work
941 * Requests take the Write Combine Gather Buffer path (user
942 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
943 * take the traditional path where we simply increment the
944 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
945 * hardware DMA read the actual Work Request.
952 * inline_tx_skb - inline a packet's data into Tx descriptors
954 * @q: the Tx queue where the packet will be inlined
955 * @pos: starting position in the Tx queue where to inline the packet
957 * Inline a packet's contents directly into Tx descriptors, starting at
958 * the given position within the Tx DMA ring.
959 * Most of the complexity of this operation is dealing with wrap arounds
960 * in the middle of the packet we want to inline.
962 static void inline_tx_skb(const struct sk_buff
*skb
, const struct sge_txq
*q
,
966 int left
= (void *)q
->stat
- pos
;
968 if (likely(skb
->len
<= left
)) {
969 if (likely(!skb
->data_len
))
970 skb_copy_from_linear_data(skb
, pos
, skb
->len
);
972 skb_copy_bits(skb
, 0, pos
, skb
->len
);
975 skb_copy_bits(skb
, 0, pos
, left
);
976 skb_copy_bits(skb
, left
, q
->desc
, skb
->len
- left
);
977 pos
= (void *)q
->desc
+ (skb
->len
- left
);
980 /* 0-pad to multiple of 16 */
981 p
= PTR_ALIGN(pos
, 8);
982 if ((uintptr_t)p
& 8)
987 * Figure out what HW csum a packet wants and return the appropriate control
990 static u64
hwcsum(const struct sk_buff
*skb
)
993 const struct iphdr
*iph
= ip_hdr(skb
);
995 if (iph
->version
== 4) {
996 if (iph
->protocol
== IPPROTO_TCP
)
997 csum_type
= TX_CSUM_TCPIP
;
998 else if (iph
->protocol
== IPPROTO_UDP
)
999 csum_type
= TX_CSUM_UDPIP
;
1002 * unknown protocol, disable HW csum
1003 * and hope a bad packet is detected
1005 return TXPKT_L4CSUM_DIS
;
1009 * this doesn't work with extension headers
1011 const struct ipv6hdr
*ip6h
= (const struct ipv6hdr
*)iph
;
1013 if (ip6h
->nexthdr
== IPPROTO_TCP
)
1014 csum_type
= TX_CSUM_TCPIP6
;
1015 else if (ip6h
->nexthdr
== IPPROTO_UDP
)
1016 csum_type
= TX_CSUM_UDPIP6
;
1021 if (likely(csum_type
>= TX_CSUM_TCPIP
))
1022 return TXPKT_CSUM_TYPE(csum_type
) |
1023 TXPKT_IPHDR_LEN(skb_network_header_len(skb
)) |
1024 TXPKT_ETHHDR_LEN(skb_network_offset(skb
) - ETH_HLEN
);
1026 int start
= skb_transport_offset(skb
);
1028 return TXPKT_CSUM_TYPE(csum_type
) | TXPKT_CSUM_START(start
) |
1029 TXPKT_CSUM_LOC(start
+ skb
->csum_offset
);
1033 static void eth_txq_stop(struct sge_eth_txq
*q
)
1035 netif_tx_stop_queue(q
->txq
);
1039 static inline void txq_advance(struct sge_txq
*q
, unsigned int n
)
1043 if (q
->pidx
>= q
->size
)
1048 * t4_eth_xmit - add a packet to an Ethernet Tx queue
1050 * @dev: the egress net device
1052 * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
1054 netdev_tx_t
t4_eth_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1060 unsigned int flits
, ndesc
;
1061 struct adapter
*adap
;
1062 struct sge_eth_txq
*q
;
1063 const struct port_info
*pi
;
1064 struct fw_eth_tx_pkt_wr
*wr
;
1065 struct cpl_tx_pkt_core
*cpl
;
1066 const struct skb_shared_info
*ssi
;
1067 dma_addr_t addr
[MAX_SKB_FRAGS
+ 1];
1068 bool immediate
= false;
1071 * The chip min packet length is 10 octets but play safe and reject
1072 * anything shorter than an Ethernet header.
1074 if (unlikely(skb
->len
< ETH_HLEN
)) {
1075 out_free
: dev_kfree_skb_any(skb
);
1076 return NETDEV_TX_OK
;
1079 pi
= netdev_priv(dev
);
1081 qidx
= skb_get_queue_mapping(skb
);
1082 q
= &adap
->sge
.ethtxq
[qidx
+ pi
->first_qset
];
1084 reclaim_completed_tx(adap
, &q
->q
, true);
1086 flits
= calc_tx_flits(skb
);
1087 ndesc
= flits_to_desc(flits
);
1088 credits
= txq_avail(&q
->q
) - ndesc
;
1090 if (unlikely(credits
< 0)) {
1092 dev_err(adap
->pdev_dev
,
1093 "%s: Tx ring %u full while queue awake!\n",
1095 return NETDEV_TX_BUSY
;
1098 if (is_eth_imm(skb
))
1102 unlikely(map_skb(adap
->pdev_dev
, skb
, addr
) < 0)) {
1107 wr_mid
= FW_WR_LEN16_V(DIV_ROUND_UP(flits
, 2));
1108 if (unlikely(credits
< ETHTXQ_STOP_THRES
)) {
1110 wr_mid
|= FW_WR_EQUEQ_F
| FW_WR_EQUIQ_F
;
1113 wr
= (void *)&q
->q
.desc
[q
->q
.pidx
];
1114 wr
->equiq_to_len16
= htonl(wr_mid
);
1115 wr
->r3
= cpu_to_be64(0);
1116 end
= (u64
*)wr
+ flits
;
1118 len
= immediate
? skb
->len
: 0;
1119 ssi
= skb_shinfo(skb
);
1120 if (ssi
->gso_size
) {
1121 struct cpl_tx_pkt_lso
*lso
= (void *)wr
;
1122 bool v6
= (ssi
->gso_type
& SKB_GSO_TCPV6
) != 0;
1123 int l3hdr_len
= skb_network_header_len(skb
);
1124 int eth_xtra_len
= skb_network_offset(skb
) - ETH_HLEN
;
1126 len
+= sizeof(*lso
);
1127 wr
->op_immdlen
= htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR
) |
1128 FW_WR_IMMDLEN_V(len
));
1129 lso
->c
.lso_ctrl
= htonl(LSO_OPCODE(CPL_TX_PKT_LSO
) |
1130 LSO_FIRST_SLICE
| LSO_LAST_SLICE
|
1132 LSO_ETHHDR_LEN(eth_xtra_len
/ 4) |
1133 LSO_IPHDR_LEN(l3hdr_len
/ 4) |
1134 LSO_TCPHDR_LEN(tcp_hdr(skb
)->doff
));
1135 lso
->c
.ipid_ofst
= htons(0);
1136 lso
->c
.mss
= htons(ssi
->gso_size
);
1137 lso
->c
.seqno_offset
= htonl(0);
1138 if (is_t4(adap
->params
.chip
))
1139 lso
->c
.len
= htonl(skb
->len
);
1141 lso
->c
.len
= htonl(LSO_T5_XFER_SIZE(skb
->len
));
1142 cpl
= (void *)(lso
+ 1);
1143 cntrl
= TXPKT_CSUM_TYPE(v6
? TX_CSUM_TCPIP6
: TX_CSUM_TCPIP
) |
1144 TXPKT_IPHDR_LEN(l3hdr_len
) |
1145 TXPKT_ETHHDR_LEN(eth_xtra_len
);
1147 q
->tx_cso
+= ssi
->gso_segs
;
1149 len
+= sizeof(*cpl
);
1150 wr
->op_immdlen
= htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR
) |
1151 FW_WR_IMMDLEN_V(len
));
1152 cpl
= (void *)(wr
+ 1);
1153 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1154 cntrl
= hwcsum(skb
) | TXPKT_IPCSUM_DIS
;
1157 cntrl
= TXPKT_L4CSUM_DIS
| TXPKT_IPCSUM_DIS
;
1160 if (skb_vlan_tag_present(skb
)) {
1162 cntrl
|= TXPKT_VLAN_VLD
| TXPKT_VLAN(skb_vlan_tag_get(skb
));
1165 cpl
->ctrl0
= htonl(TXPKT_OPCODE(CPL_TX_PKT_XT
) |
1166 TXPKT_INTF(pi
->tx_chan
) | TXPKT_PF(adap
->fn
));
1167 cpl
->pack
= htons(0);
1168 cpl
->len
= htons(skb
->len
);
1169 cpl
->ctrl1
= cpu_to_be64(cntrl
);
1172 inline_tx_skb(skb
, &q
->q
, cpl
+ 1);
1173 dev_consume_skb_any(skb
);
1177 write_sgl(skb
, &q
->q
, (struct ulptx_sgl
*)(cpl
+ 1), end
, 0,
1181 last_desc
= q
->q
.pidx
+ ndesc
- 1;
1182 if (last_desc
>= q
->q
.size
)
1183 last_desc
-= q
->q
.size
;
1184 q
->q
.sdesc
[last_desc
].skb
= skb
;
1185 q
->q
.sdesc
[last_desc
].sgl
= (struct ulptx_sgl
*)(cpl
+ 1);
1188 txq_advance(&q
->q
, ndesc
);
1190 ring_tx_db(adap
, &q
->q
, ndesc
);
1191 return NETDEV_TX_OK
;
1195 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1196 * @q: the SGE control Tx queue
1198 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1199 * that send only immediate data (presently just the control queues) and
1200 * thus do not have any sk_buffs to release.
1202 static inline void reclaim_completed_tx_imm(struct sge_txq
*q
)
1204 int hw_cidx
= ntohs(q
->stat
->cidx
);
1205 int reclaim
= hw_cidx
- q
->cidx
;
1210 q
->in_use
-= reclaim
;
1215 * is_imm - check whether a packet can be sent as immediate data
1218 * Returns true if a packet can be sent as a WR with immediate data.
1220 static inline int is_imm(const struct sk_buff
*skb
)
1222 return skb
->len
<= MAX_CTRL_WR_LEN
;
1226 * ctrlq_check_stop - check if a control queue is full and should stop
1228 * @wr: most recent WR written to the queue
1230 * Check if a control queue has become full and should be stopped.
1231 * We clean up control queue descriptors very lazily, only when we are out.
1232 * If the queue is still full after reclaiming any completed descriptors
1233 * we suspend it and have the last WR wake it up.
1235 static void ctrlq_check_stop(struct sge_ctrl_txq
*q
, struct fw_wr_hdr
*wr
)
1237 reclaim_completed_tx_imm(&q
->q
);
1238 if (unlikely(txq_avail(&q
->q
) < TXQ_STOP_THRES
)) {
1239 wr
->lo
|= htonl(FW_WR_EQUEQ_F
| FW_WR_EQUIQ_F
);
1246 * ctrl_xmit - send a packet through an SGE control Tx queue
1247 * @q: the control queue
1250 * Send a packet through an SGE control Tx queue. Packets sent through
1251 * a control queue must fit entirely as immediate data.
1253 static int ctrl_xmit(struct sge_ctrl_txq
*q
, struct sk_buff
*skb
)
1256 struct fw_wr_hdr
*wr
;
1258 if (unlikely(!is_imm(skb
))) {
1261 return NET_XMIT_DROP
;
1264 ndesc
= DIV_ROUND_UP(skb
->len
, sizeof(struct tx_desc
));
1265 spin_lock(&q
->sendq
.lock
);
1267 if (unlikely(q
->full
)) {
1268 skb
->priority
= ndesc
; /* save for restart */
1269 __skb_queue_tail(&q
->sendq
, skb
);
1270 spin_unlock(&q
->sendq
.lock
);
1274 wr
= (struct fw_wr_hdr
*)&q
->q
.desc
[q
->q
.pidx
];
1275 inline_tx_skb(skb
, &q
->q
, wr
);
1277 txq_advance(&q
->q
, ndesc
);
1278 if (unlikely(txq_avail(&q
->q
) < TXQ_STOP_THRES
))
1279 ctrlq_check_stop(q
, wr
);
1281 ring_tx_db(q
->adap
, &q
->q
, ndesc
);
1282 spin_unlock(&q
->sendq
.lock
);
1285 return NET_XMIT_SUCCESS
;
1289 * restart_ctrlq - restart a suspended control queue
1290 * @data: the control queue to restart
1292 * Resumes transmission on a suspended Tx control queue.
1294 static void restart_ctrlq(unsigned long data
)
1296 struct sk_buff
*skb
;
1297 unsigned int written
= 0;
1298 struct sge_ctrl_txq
*q
= (struct sge_ctrl_txq
*)data
;
1300 spin_lock(&q
->sendq
.lock
);
1301 reclaim_completed_tx_imm(&q
->q
);
1302 BUG_ON(txq_avail(&q
->q
) < TXQ_STOP_THRES
); /* q should be empty */
1304 while ((skb
= __skb_dequeue(&q
->sendq
)) != NULL
) {
1305 struct fw_wr_hdr
*wr
;
1306 unsigned int ndesc
= skb
->priority
; /* previously saved */
1309 * Write descriptors and free skbs outside the lock to limit
1310 * wait times. q->full is still set so new skbs will be queued.
1312 spin_unlock(&q
->sendq
.lock
);
1314 wr
= (struct fw_wr_hdr
*)&q
->q
.desc
[q
->q
.pidx
];
1315 inline_tx_skb(skb
, &q
->q
, wr
);
1319 txq_advance(&q
->q
, ndesc
);
1320 if (unlikely(txq_avail(&q
->q
) < TXQ_STOP_THRES
)) {
1321 unsigned long old
= q
->q
.stops
;
1323 ctrlq_check_stop(q
, wr
);
1324 if (q
->q
.stops
!= old
) { /* suspended anew */
1325 spin_lock(&q
->sendq
.lock
);
1330 ring_tx_db(q
->adap
, &q
->q
, written
);
1333 spin_lock(&q
->sendq
.lock
);
1336 ringdb
: if (written
)
1337 ring_tx_db(q
->adap
, &q
->q
, written
);
1338 spin_unlock(&q
->sendq
.lock
);
1342 * t4_mgmt_tx - send a management message
1343 * @adap: the adapter
1344 * @skb: the packet containing the management message
1346 * Send a management message through control queue 0.
1348 int t4_mgmt_tx(struct adapter
*adap
, struct sk_buff
*skb
)
1353 ret
= ctrl_xmit(&adap
->sge
.ctrlq
[0], skb
);
1359 * is_ofld_imm - check whether a packet can be sent as immediate data
1362 * Returns true if a packet can be sent as an offload WR with immediate
1363 * data. We currently use the same limit as for Ethernet packets.
1365 static inline int is_ofld_imm(const struct sk_buff
*skb
)
1367 return skb
->len
<= MAX_IMM_TX_PKT_LEN
;
1371 * calc_tx_flits_ofld - calculate # of flits for an offload packet
1374 * Returns the number of flits needed for the given offload packet.
1375 * These packets are already fully constructed and no additional headers
1378 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff
*skb
)
1380 unsigned int flits
, cnt
;
1382 if (is_ofld_imm(skb
))
1383 return DIV_ROUND_UP(skb
->len
, 8);
1385 flits
= skb_transport_offset(skb
) / 8U; /* headers */
1386 cnt
= skb_shinfo(skb
)->nr_frags
;
1387 if (skb_tail_pointer(skb
) != skb_transport_header(skb
))
1389 return flits
+ sgl_len(cnt
);
1393 * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
1394 * @adap: the adapter
1395 * @q: the queue to stop
1397 * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
1398 * inability to map packets. A periodic timer attempts to restart
1401 static void txq_stop_maperr(struct sge_ofld_txq
*q
)
1405 set_bit(q
->q
.cntxt_id
- q
->adap
->sge
.egr_start
,
1406 q
->adap
->sge
.txq_maperr
);
1410 * ofldtxq_stop - stop an offload Tx queue that has become full
1411 * @q: the queue to stop
1412 * @skb: the packet causing the queue to become full
1414 * Stops an offload Tx queue that has become full and modifies the packet
1415 * being written to request a wakeup.
1417 static void ofldtxq_stop(struct sge_ofld_txq
*q
, struct sk_buff
*skb
)
1419 struct fw_wr_hdr
*wr
= (struct fw_wr_hdr
*)skb
->data
;
1421 wr
->lo
|= htonl(FW_WR_EQUEQ_F
| FW_WR_EQUIQ_F
);
1427 * service_ofldq - restart a suspended offload queue
1428 * @q: the offload queue
1430 * Services an offload Tx queue by moving packets from its packet queue
1431 * to the HW Tx ring. The function starts and ends with the queue locked.
1433 static void service_ofldq(struct sge_ofld_txq
*q
)
1437 struct sk_buff
*skb
;
1438 unsigned int written
= 0;
1439 unsigned int flits
, ndesc
;
1441 while ((skb
= skb_peek(&q
->sendq
)) != NULL
&& !q
->full
) {
1443 * We drop the lock but leave skb on sendq, thus retaining
1444 * exclusive access to the state of the queue.
1446 spin_unlock(&q
->sendq
.lock
);
1448 reclaim_completed_tx(q
->adap
, &q
->q
, false);
1450 flits
= skb
->priority
; /* previously saved */
1451 ndesc
= flits_to_desc(flits
);
1452 credits
= txq_avail(&q
->q
) - ndesc
;
1453 BUG_ON(credits
< 0);
1454 if (unlikely(credits
< TXQ_STOP_THRES
))
1455 ofldtxq_stop(q
, skb
);
1457 pos
= (u64
*)&q
->q
.desc
[q
->q
.pidx
];
1458 if (is_ofld_imm(skb
))
1459 inline_tx_skb(skb
, &q
->q
, pos
);
1460 else if (map_skb(q
->adap
->pdev_dev
, skb
,
1461 (dma_addr_t
*)skb
->head
)) {
1463 spin_lock(&q
->sendq
.lock
);
1466 int last_desc
, hdr_len
= skb_transport_offset(skb
);
1468 memcpy(pos
, skb
->data
, hdr_len
);
1469 write_sgl(skb
, &q
->q
, (void *)pos
+ hdr_len
,
1470 pos
+ flits
, hdr_len
,
1471 (dma_addr_t
*)skb
->head
);
1472 #ifdef CONFIG_NEED_DMA_MAP_STATE
1473 skb
->dev
= q
->adap
->port
[0];
1474 skb
->destructor
= deferred_unmap_destructor
;
1476 last_desc
= q
->q
.pidx
+ ndesc
- 1;
1477 if (last_desc
>= q
->q
.size
)
1478 last_desc
-= q
->q
.size
;
1479 q
->q
.sdesc
[last_desc
].skb
= skb
;
1482 txq_advance(&q
->q
, ndesc
);
1484 if (unlikely(written
> 32)) {
1485 ring_tx_db(q
->adap
, &q
->q
, written
);
1489 spin_lock(&q
->sendq
.lock
);
1490 __skb_unlink(skb
, &q
->sendq
);
1491 if (is_ofld_imm(skb
))
1494 if (likely(written
))
1495 ring_tx_db(q
->adap
, &q
->q
, written
);
1499 * ofld_xmit - send a packet through an offload queue
1500 * @q: the Tx offload queue
1503 * Send an offload packet through an SGE offload queue.
1505 static int ofld_xmit(struct sge_ofld_txq
*q
, struct sk_buff
*skb
)
1507 skb
->priority
= calc_tx_flits_ofld(skb
); /* save for restart */
1508 spin_lock(&q
->sendq
.lock
);
1509 __skb_queue_tail(&q
->sendq
, skb
);
1510 if (q
->sendq
.qlen
== 1)
1512 spin_unlock(&q
->sendq
.lock
);
1513 return NET_XMIT_SUCCESS
;
1517 * restart_ofldq - restart a suspended offload queue
1518 * @data: the offload queue to restart
1520 * Resumes transmission on a suspended Tx offload queue.
1522 static void restart_ofldq(unsigned long data
)
1524 struct sge_ofld_txq
*q
= (struct sge_ofld_txq
*)data
;
1526 spin_lock(&q
->sendq
.lock
);
1527 q
->full
= 0; /* the queue actually is completely empty now */
1529 spin_unlock(&q
->sendq
.lock
);
1533 * skb_txq - return the Tx queue an offload packet should use
1536 * Returns the Tx queue an offload packet should use as indicated by bits
1537 * 1-15 in the packet's queue_mapping.
1539 static inline unsigned int skb_txq(const struct sk_buff
*skb
)
1541 return skb
->queue_mapping
>> 1;
1545 * is_ctrl_pkt - return whether an offload packet is a control packet
1548 * Returns whether an offload packet should use an OFLD or a CTRL
1549 * Tx queue as indicated by bit 0 in the packet's queue_mapping.
1551 static inline unsigned int is_ctrl_pkt(const struct sk_buff
*skb
)
1553 return skb
->queue_mapping
& 1;
1556 static inline int ofld_send(struct adapter
*adap
, struct sk_buff
*skb
)
1558 unsigned int idx
= skb_txq(skb
);
1560 if (unlikely(is_ctrl_pkt(skb
))) {
1561 /* Single ctrl queue is a requirement for LE workaround path */
1562 if (adap
->tids
.nsftids
)
1564 return ctrl_xmit(&adap
->sge
.ctrlq
[idx
], skb
);
1566 return ofld_xmit(&adap
->sge
.ofldtxq
[idx
], skb
);
1570 * t4_ofld_send - send an offload packet
1571 * @adap: the adapter
1574 * Sends an offload packet. We use the packet queue_mapping to select the
1575 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1576 * should be sent as regular or control, bits 1-15 select the queue.
1578 int t4_ofld_send(struct adapter
*adap
, struct sk_buff
*skb
)
1583 ret
= ofld_send(adap
, skb
);
1589 * cxgb4_ofld_send - send an offload packet
1590 * @dev: the net device
1593 * Sends an offload packet. This is an exported version of @t4_ofld_send,
1594 * intended for ULDs.
1596 int cxgb4_ofld_send(struct net_device
*dev
, struct sk_buff
*skb
)
1598 return t4_ofld_send(netdev2adap(dev
), skb
);
1600 EXPORT_SYMBOL(cxgb4_ofld_send
);
1602 static inline void copy_frags(struct sk_buff
*skb
,
1603 const struct pkt_gl
*gl
, unsigned int offset
)
1607 /* usually there's just one frag */
1608 __skb_fill_page_desc(skb
, 0, gl
->frags
[0].page
,
1609 gl
->frags
[0].offset
+ offset
,
1610 gl
->frags
[0].size
- offset
);
1611 skb_shinfo(skb
)->nr_frags
= gl
->nfrags
;
1612 for (i
= 1; i
< gl
->nfrags
; i
++)
1613 __skb_fill_page_desc(skb
, i
, gl
->frags
[i
].page
,
1614 gl
->frags
[i
].offset
,
1617 /* get a reference to the last page, we don't own it */
1618 get_page(gl
->frags
[gl
->nfrags
- 1].page
);
1622 * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
1623 * @gl: the gather list
1624 * @skb_len: size of sk_buff main body if it carries fragments
1625 * @pull_len: amount of data to move to the sk_buff's main body
1627 * Builds an sk_buff from the given packet gather list. Returns the
1628 * sk_buff or %NULL if sk_buff allocation failed.
1630 struct sk_buff
*cxgb4_pktgl_to_skb(const struct pkt_gl
*gl
,
1631 unsigned int skb_len
, unsigned int pull_len
)
1633 struct sk_buff
*skb
;
1636 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
1637 * size, which is expected since buffers are at least PAGE_SIZEd.
1638 * In this case packets up to RX_COPY_THRES have only one fragment.
1640 if (gl
->tot_len
<= RX_COPY_THRES
) {
1641 skb
= dev_alloc_skb(gl
->tot_len
);
1644 __skb_put(skb
, gl
->tot_len
);
1645 skb_copy_to_linear_data(skb
, gl
->va
, gl
->tot_len
);
1647 skb
= dev_alloc_skb(skb_len
);
1650 __skb_put(skb
, pull_len
);
1651 skb_copy_to_linear_data(skb
, gl
->va
, pull_len
);
1653 copy_frags(skb
, gl
, pull_len
);
1654 skb
->len
= gl
->tot_len
;
1655 skb
->data_len
= skb
->len
- pull_len
;
1656 skb
->truesize
+= skb
->data_len
;
1660 EXPORT_SYMBOL(cxgb4_pktgl_to_skb
);
1663 * t4_pktgl_free - free a packet gather list
1664 * @gl: the gather list
1666 * Releases the pages of a packet gather list. We do not own the last
1667 * page on the list and do not free it.
1669 static void t4_pktgl_free(const struct pkt_gl
*gl
)
1672 const struct page_frag
*p
;
1674 for (p
= gl
->frags
, n
= gl
->nfrags
- 1; n
--; p
++)
1679 * Process an MPS trace packet. Give it an unused protocol number so it won't
1680 * be delivered to anyone and send it to the stack for capture.
1682 static noinline
int handle_trace_pkt(struct adapter
*adap
,
1683 const struct pkt_gl
*gl
)
1685 struct sk_buff
*skb
;
1687 skb
= cxgb4_pktgl_to_skb(gl
, RX_PULL_LEN
, RX_PULL_LEN
);
1688 if (unlikely(!skb
)) {
1693 if (is_t4(adap
->params
.chip
))
1694 __skb_pull(skb
, sizeof(struct cpl_trace_pkt
));
1696 __skb_pull(skb
, sizeof(struct cpl_t5_trace_pkt
));
1698 skb_reset_mac_header(skb
);
1699 skb
->protocol
= htons(0xffff);
1700 skb
->dev
= adap
->port
[0];
1701 netif_receive_skb(skb
);
1705 static void do_gro(struct sge_eth_rxq
*rxq
, const struct pkt_gl
*gl
,
1706 const struct cpl_rx_pkt
*pkt
)
1708 struct adapter
*adapter
= rxq
->rspq
.adap
;
1709 struct sge
*s
= &adapter
->sge
;
1711 struct sk_buff
*skb
;
1713 skb
= napi_get_frags(&rxq
->rspq
.napi
);
1714 if (unlikely(!skb
)) {
1716 rxq
->stats
.rx_drops
++;
1720 copy_frags(skb
, gl
, s
->pktshift
);
1721 skb
->len
= gl
->tot_len
- s
->pktshift
;
1722 skb
->data_len
= skb
->len
;
1723 skb
->truesize
+= skb
->data_len
;
1724 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1725 skb_record_rx_queue(skb
, rxq
->rspq
.idx
);
1726 skb_mark_napi_id(skb
, &rxq
->rspq
.napi
);
1727 if (rxq
->rspq
.netdev
->features
& NETIF_F_RXHASH
)
1728 skb_set_hash(skb
, (__force u32
)pkt
->rsshdr
.hash_val
,
1731 if (unlikely(pkt
->vlan_ex
)) {
1732 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), ntohs(pkt
->vlan
));
1733 rxq
->stats
.vlan_ex
++;
1735 ret
= napi_gro_frags(&rxq
->rspq
.napi
);
1736 if (ret
== GRO_HELD
)
1737 rxq
->stats
.lro_pkts
++;
1738 else if (ret
== GRO_MERGED
|| ret
== GRO_MERGED_FREE
)
1739 rxq
->stats
.lro_merged
++;
1741 rxq
->stats
.rx_cso
++;
1745 * t4_ethrx_handler - process an ingress ethernet packet
1746 * @q: the response queue that received the packet
1747 * @rsp: the response queue descriptor holding the RX_PKT message
1748 * @si: the gather list of packet fragments
1750 * Process an ingress ethernet packet and deliver it to the stack.
1752 int t4_ethrx_handler(struct sge_rspq
*q
, const __be64
*rsp
,
1753 const struct pkt_gl
*si
)
1756 struct sk_buff
*skb
;
1757 const struct cpl_rx_pkt
*pkt
;
1758 struct sge_eth_rxq
*rxq
= container_of(q
, struct sge_eth_rxq
, rspq
);
1759 struct sge
*s
= &q
->adap
->sge
;
1760 int cpl_trace_pkt
= is_t4(q
->adap
->params
.chip
) ?
1761 CPL_TRACE_PKT
: CPL_TRACE_PKT_T5
;
1763 if (unlikely(*(u8
*)rsp
== cpl_trace_pkt
))
1764 return handle_trace_pkt(q
->adap
, si
);
1766 pkt
= (const struct cpl_rx_pkt
*)rsp
;
1767 csum_ok
= pkt
->csum_calc
&& !pkt
->err_vec
&&
1768 (q
->netdev
->features
& NETIF_F_RXCSUM
);
1769 if ((pkt
->l2info
& htonl(RXF_TCP_F
)) &&
1770 !(cxgb_poll_busy_polling(q
)) &&
1771 (q
->netdev
->features
& NETIF_F_GRO
) && csum_ok
&& !pkt
->ip_frag
) {
1772 do_gro(rxq
, si
, pkt
);
1776 skb
= cxgb4_pktgl_to_skb(si
, RX_PKT_SKB_LEN
, RX_PULL_LEN
);
1777 if (unlikely(!skb
)) {
1779 rxq
->stats
.rx_drops
++;
1783 __skb_pull(skb
, s
->pktshift
); /* remove ethernet header padding */
1784 skb
->protocol
= eth_type_trans(skb
, q
->netdev
);
1785 skb_record_rx_queue(skb
, q
->idx
);
1786 if (skb
->dev
->features
& NETIF_F_RXHASH
)
1787 skb_set_hash(skb
, (__force u32
)pkt
->rsshdr
.hash_val
,
1792 if (csum_ok
&& (pkt
->l2info
& htonl(RXF_UDP_F
| RXF_TCP_F
))) {
1793 if (!pkt
->ip_frag
) {
1794 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1795 rxq
->stats
.rx_cso
++;
1796 } else if (pkt
->l2info
& htonl(RXF_IP_F
)) {
1797 __sum16 c
= (__force __sum16
)pkt
->csum
;
1798 skb
->csum
= csum_unfold(c
);
1799 skb
->ip_summed
= CHECKSUM_COMPLETE
;
1800 rxq
->stats
.rx_cso
++;
1803 skb_checksum_none_assert(skb
);
1805 if (unlikely(pkt
->vlan_ex
)) {
1806 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), ntohs(pkt
->vlan
));
1807 rxq
->stats
.vlan_ex
++;
1809 skb_mark_napi_id(skb
, &q
->napi
);
1810 netif_receive_skb(skb
);
1815 * restore_rx_bufs - put back a packet's Rx buffers
1816 * @si: the packet gather list
1817 * @q: the SGE free list
1818 * @frags: number of FL buffers to restore
1820 * Puts back on an FL the Rx buffers associated with @si. The buffers
1821 * have already been unmapped and are left unmapped, we mark them so to
1822 * prevent further unmapping attempts.
1824 * This function undoes a series of @unmap_rx_buf calls when we find out
1825 * that the current packet can't be processed right away afterall and we
1826 * need to come back to it later. This is a very rare event and there's
1827 * no effort to make this particularly efficient.
1829 static void restore_rx_bufs(const struct pkt_gl
*si
, struct sge_fl
*q
,
1832 struct rx_sw_desc
*d
;
1836 q
->cidx
= q
->size
- 1;
1839 d
= &q
->sdesc
[q
->cidx
];
1840 d
->page
= si
->frags
[frags
].page
;
1841 d
->dma_addr
|= RX_UNMAPPED_BUF
;
1847 * is_new_response - check if a response is newly written
1848 * @r: the response descriptor
1849 * @q: the response queue
1851 * Returns true if a response descriptor contains a yet unprocessed
1854 static inline bool is_new_response(const struct rsp_ctrl
*r
,
1855 const struct sge_rspq
*q
)
1857 return RSPD_GEN(r
->type_gen
) == q
->gen
;
1861 * rspq_next - advance to the next entry in a response queue
1864 * Updates the state of a response queue to advance it to the next entry.
1866 static inline void rspq_next(struct sge_rspq
*q
)
1868 q
->cur_desc
= (void *)q
->cur_desc
+ q
->iqe_len
;
1869 if (unlikely(++q
->cidx
== q
->size
)) {
1872 q
->cur_desc
= q
->desc
;
1877 * process_responses - process responses from an SGE response queue
1878 * @q: the ingress queue to process
1879 * @budget: how many responses can be processed in this round
1881 * Process responses from an SGE response queue up to the supplied budget.
1882 * Responses include received packets as well as control messages from FW
1885 * Additionally choose the interrupt holdoff time for the next interrupt
1886 * on this queue. If the system is under memory shortage use a fairly
1887 * long delay to help recovery.
1889 static int process_responses(struct sge_rspq
*q
, int budget
)
1892 int budget_left
= budget
;
1893 const struct rsp_ctrl
*rc
;
1894 struct sge_eth_rxq
*rxq
= container_of(q
, struct sge_eth_rxq
, rspq
);
1895 struct adapter
*adapter
= q
->adap
;
1896 struct sge
*s
= &adapter
->sge
;
1898 while (likely(budget_left
)) {
1899 rc
= (void *)q
->cur_desc
+ (q
->iqe_len
- sizeof(*rc
));
1900 if (!is_new_response(rc
, q
))
1904 rsp_type
= RSPD_TYPE(rc
->type_gen
);
1905 if (likely(rsp_type
== RSP_TYPE_FLBUF
)) {
1906 struct page_frag
*fp
;
1908 const struct rx_sw_desc
*rsd
;
1909 u32 len
= ntohl(rc
->pldbuflen_qid
), bufsz
, frags
;
1911 if (len
& RSPD_NEWBUF
) {
1912 if (likely(q
->offset
> 0)) {
1913 free_rx_bufs(q
->adap
, &rxq
->fl
, 1);
1916 len
= RSPD_LEN(len
);
1920 /* gather packet fragments */
1921 for (frags
= 0, fp
= si
.frags
; ; frags
++, fp
++) {
1922 rsd
= &rxq
->fl
.sdesc
[rxq
->fl
.cidx
];
1923 bufsz
= get_buf_size(adapter
, rsd
);
1924 fp
->page
= rsd
->page
;
1925 fp
->offset
= q
->offset
;
1926 fp
->size
= min(bufsz
, len
);
1930 unmap_rx_buf(q
->adap
, &rxq
->fl
);
1934 * Last buffer remains mapped so explicitly make it
1935 * coherent for CPU access.
1937 dma_sync_single_for_cpu(q
->adap
->pdev_dev
,
1939 fp
->size
, DMA_FROM_DEVICE
);
1941 si
.va
= page_address(si
.frags
[0].page
) +
1945 si
.nfrags
= frags
+ 1;
1946 ret
= q
->handler(q
, q
->cur_desc
, &si
);
1947 if (likely(ret
== 0))
1948 q
->offset
+= ALIGN(fp
->size
, s
->fl_align
);
1950 restore_rx_bufs(&si
, &rxq
->fl
, frags
);
1951 } else if (likely(rsp_type
== RSP_TYPE_CPL
)) {
1952 ret
= q
->handler(q
, q
->cur_desc
, NULL
);
1954 ret
= q
->handler(q
, (const __be64
*)rc
, CXGB4_MSG_AN
);
1957 if (unlikely(ret
)) {
1958 /* couldn't process descriptor, back off for recovery */
1959 q
->next_intr_params
= QINTR_TIMER_IDX(NOMEM_TMR_IDX
);
1967 if (q
->offset
>= 0 && rxq
->fl
.size
- rxq
->fl
.avail
>= 16)
1968 __refill_fl(q
->adap
, &rxq
->fl
);
1969 return budget
- budget_left
;
1972 #ifdef CONFIG_NET_RX_BUSY_POLL
1973 int cxgb_busy_poll(struct napi_struct
*napi
)
1975 struct sge_rspq
*q
= container_of(napi
, struct sge_rspq
, napi
);
1976 unsigned int params
, work_done
;
1979 if (!cxgb_poll_lock_poll(q
))
1980 return LL_FLUSH_BUSY
;
1982 work_done
= process_responses(q
, 4);
1983 params
= QINTR_TIMER_IDX(TIMERREG_COUNTER0_X
) | QINTR_CNT_EN
;
1984 q
->next_intr_params
= params
;
1985 val
= CIDXINC_V(work_done
) | SEINTARM_V(params
);
1987 /* If we don't have access to the new User GTS (T5+), use the old
1988 * doorbell mechanism; otherwise use the new BAR2 mechanism.
1990 if (unlikely(!q
->bar2_addr
))
1991 t4_write_reg(q
->adap
, MYPF_REG(SGE_PF_GTS_A
),
1992 val
| INGRESSQID_V((u32
)q
->cntxt_id
));
1994 writel(val
| INGRESSQID_V(q
->bar2_qid
),
1995 q
->bar2_addr
+ SGE_UDB_GTS
);
1999 cxgb_poll_unlock_poll(q
);
2002 #endif /* CONFIG_NET_RX_BUSY_POLL */
2005 * napi_rx_handler - the NAPI handler for Rx processing
2006 * @napi: the napi instance
2007 * @budget: how many packets we can process in this round
2009 * Handler for new data events when using NAPI. This does not need any
2010 * locking or protection from interrupts as data interrupts are off at
2011 * this point and other adapter interrupts do not interfere (the latter
2012 * in not a concern at all with MSI-X as non-data interrupts then have
2013 * a separate handler).
2015 static int napi_rx_handler(struct napi_struct
*napi
, int budget
)
2017 unsigned int params
;
2018 struct sge_rspq
*q
= container_of(napi
, struct sge_rspq
, napi
);
2022 if (!cxgb_poll_lock_napi(q
))
2025 work_done
= process_responses(q
, budget
);
2026 if (likely(work_done
< budget
)) {
2029 napi_complete(napi
);
2030 timer_index
= QINTR_TIMER_IDX_GET(q
->next_intr_params
);
2032 if (q
->adaptive_rx
) {
2033 if (work_done
> max(timer_pkt_quota
[timer_index
],
2035 timer_index
= (timer_index
+ 1);
2037 timer_index
= timer_index
- 1;
2039 timer_index
= clamp(timer_index
, 0, SGE_TIMERREGS
- 1);
2040 q
->next_intr_params
= QINTR_TIMER_IDX(timer_index
) |
2042 params
= q
->next_intr_params
;
2044 params
= q
->next_intr_params
;
2045 q
->next_intr_params
= q
->intr_params
;
2048 params
= QINTR_TIMER_IDX(7);
2050 val
= CIDXINC_V(work_done
) | SEINTARM_V(params
);
2052 /* If we don't have access to the new User GTS (T5+), use the old
2053 * doorbell mechanism; otherwise use the new BAR2 mechanism.
2055 if (unlikely(q
->bar2_addr
== NULL
)) {
2056 t4_write_reg(q
->adap
, MYPF_REG(SGE_PF_GTS_A
),
2057 val
| INGRESSQID_V((u32
)q
->cntxt_id
));
2059 writel(val
| INGRESSQID_V(q
->bar2_qid
),
2060 q
->bar2_addr
+ SGE_UDB_GTS
);
2063 cxgb_poll_unlock_napi(q
);
2068 * The MSI-X interrupt handler for an SGE response queue.
2070 irqreturn_t
t4_sge_intr_msix(int irq
, void *cookie
)
2072 struct sge_rspq
*q
= cookie
;
2074 napi_schedule(&q
->napi
);
2079 * Process the indirect interrupt entries in the interrupt queue and kick off
2080 * NAPI for each queue that has generated an entry.
2082 static unsigned int process_intrq(struct adapter
*adap
)
2084 unsigned int credits
;
2085 const struct rsp_ctrl
*rc
;
2086 struct sge_rspq
*q
= &adap
->sge
.intrq
;
2089 spin_lock(&adap
->sge
.intrq_lock
);
2090 for (credits
= 0; ; credits
++) {
2091 rc
= (void *)q
->cur_desc
+ (q
->iqe_len
- sizeof(*rc
));
2092 if (!is_new_response(rc
, q
))
2096 if (RSPD_TYPE(rc
->type_gen
) == RSP_TYPE_INTR
) {
2097 unsigned int qid
= ntohl(rc
->pldbuflen_qid
);
2099 qid
-= adap
->sge
.ingr_start
;
2100 napi_schedule(&adap
->sge
.ingr_map
[qid
]->napi
);
2106 val
= CIDXINC_V(credits
) | SEINTARM_V(q
->intr_params
);
2108 /* If we don't have access to the new User GTS (T5+), use the old
2109 * doorbell mechanism; otherwise use the new BAR2 mechanism.
2111 if (unlikely(q
->bar2_addr
== NULL
)) {
2112 t4_write_reg(adap
, MYPF_REG(SGE_PF_GTS_A
),
2113 val
| INGRESSQID_V(q
->cntxt_id
));
2115 writel(val
| INGRESSQID_V(q
->bar2_qid
),
2116 q
->bar2_addr
+ SGE_UDB_GTS
);
2119 spin_unlock(&adap
->sge
.intrq_lock
);
2124 * The MSI interrupt handler, which handles data events from SGE response queues
2125 * as well as error and other async events as they all use the same MSI vector.
2127 static irqreturn_t
t4_intr_msi(int irq
, void *cookie
)
2129 struct adapter
*adap
= cookie
;
2131 t4_slow_intr_handler(adap
);
2132 process_intrq(adap
);
2137 * Interrupt handler for legacy INTx interrupts.
2138 * Handles data events from SGE response queues as well as error and other
2139 * async events as they all use the same interrupt line.
2141 static irqreturn_t
t4_intr_intx(int irq
, void *cookie
)
2143 struct adapter
*adap
= cookie
;
2145 t4_write_reg(adap
, MYPF_REG(PCIE_PF_CLI_A
), 0);
2146 if (t4_slow_intr_handler(adap
) | process_intrq(adap
))
2148 return IRQ_NONE
; /* probably shared interrupt */
2152 * t4_intr_handler - select the top-level interrupt handler
2153 * @adap: the adapter
2155 * Selects the top-level interrupt handler based on the type of interrupts
2156 * (MSI-X, MSI, or INTx).
2158 irq_handler_t
t4_intr_handler(struct adapter
*adap
)
2160 if (adap
->flags
& USING_MSIX
)
2161 return t4_sge_intr_msix
;
2162 if (adap
->flags
& USING_MSI
)
2164 return t4_intr_intx
;
2167 static void sge_rx_timer_cb(unsigned long data
)
2170 unsigned int i
, idma_same_state_cnt
[2];
2171 struct adapter
*adap
= (struct adapter
*)data
;
2172 struct sge
*s
= &adap
->sge
;
2174 for (i
= 0; i
< ARRAY_SIZE(s
->starving_fl
); i
++)
2175 for (m
= s
->starving_fl
[i
]; m
; m
&= m
- 1) {
2176 struct sge_eth_rxq
*rxq
;
2177 unsigned int id
= __ffs(m
) + i
* BITS_PER_LONG
;
2178 struct sge_fl
*fl
= s
->egr_map
[id
];
2180 clear_bit(id
, s
->starving_fl
);
2181 smp_mb__after_atomic();
2183 if (fl_starving(fl
)) {
2184 rxq
= container_of(fl
, struct sge_eth_rxq
, fl
);
2185 if (napi_reschedule(&rxq
->rspq
.napi
))
2188 set_bit(id
, s
->starving_fl
);
2192 t4_write_reg(adap
, SGE_DEBUG_INDEX_A
, 13);
2193 idma_same_state_cnt
[0] = t4_read_reg(adap
, SGE_DEBUG_DATA_HIGH_A
);
2194 idma_same_state_cnt
[1] = t4_read_reg(adap
, SGE_DEBUG_DATA_LOW_A
);
2196 for (i
= 0; i
< 2; i
++) {
2197 u32 debug0
, debug11
;
2199 /* If the Ingress DMA Same State Counter ("timer") is less
2200 * than 1s, then we can reset our synthesized Stall Timer and
2201 * continue. If we have previously emitted warnings about a
2202 * potential stalled Ingress Queue, issue a note indicating
2203 * that the Ingress Queue has resumed forward progress.
2205 if (idma_same_state_cnt
[i
] < s
->idma_1s_thresh
) {
2206 if (s
->idma_stalled
[i
] >= SGE_IDMA_WARN_THRESH
)
2207 CH_WARN(adap
, "SGE idma%d, queue%u,resumed after %d sec\n",
2209 s
->idma_stalled
[i
]/HZ
);
2210 s
->idma_stalled
[i
] = 0;
2214 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
2215 * domain. The first time we get here it'll be because we
2216 * passed the 1s Threshold; each additional time it'll be
2217 * because the RX Timer Callback is being fired on its regular
2220 * If the stall is below our Potential Hung Ingress Queue
2221 * Warning Threshold, continue.
2223 if (s
->idma_stalled
[i
] == 0)
2224 s
->idma_stalled
[i
] = HZ
;
2226 s
->idma_stalled
[i
] += RX_QCHECK_PERIOD
;
2228 if (s
->idma_stalled
[i
] < SGE_IDMA_WARN_THRESH
)
2231 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT Hz */
2232 if (((s
->idma_stalled
[i
] - HZ
) % SGE_IDMA_WARN_REPEAT
) != 0)
2235 /* Read and save the SGE IDMA State and Queue ID information.
2236 * We do this every time in case it changes across time ...
2238 t4_write_reg(adap
, SGE_DEBUG_INDEX_A
, 0);
2239 debug0
= t4_read_reg(adap
, SGE_DEBUG_DATA_LOW_A
);
2240 s
->idma_state
[i
] = (debug0
>> (i
* 9)) & 0x3f;
2242 t4_write_reg(adap
, SGE_DEBUG_INDEX_A
, 11);
2243 debug11
= t4_read_reg(adap
, SGE_DEBUG_DATA_LOW_A
);
2244 s
->idma_qid
[i
] = (debug11
>> (i
* 16)) & 0xffff;
2246 CH_WARN(adap
, "SGE idma%u, queue%u, maybe stuck state%u %dsecs (debug0=%#x, debug11=%#x)\n",
2247 i
, s
->idma_qid
[i
], s
->idma_state
[i
],
2248 s
->idma_stalled
[i
]/HZ
, debug0
, debug11
);
2249 t4_sge_decode_idma_state(adap
, s
->idma_state
[i
]);
2252 mod_timer(&s
->rx_timer
, jiffies
+ RX_QCHECK_PERIOD
);
2255 static void sge_tx_timer_cb(unsigned long data
)
2258 unsigned int i
, budget
;
2259 struct adapter
*adap
= (struct adapter
*)data
;
2260 struct sge
*s
= &adap
->sge
;
2262 for (i
= 0; i
< ARRAY_SIZE(s
->txq_maperr
); i
++)
2263 for (m
= s
->txq_maperr
[i
]; m
; m
&= m
- 1) {
2264 unsigned long id
= __ffs(m
) + i
* BITS_PER_LONG
;
2265 struct sge_ofld_txq
*txq
= s
->egr_map
[id
];
2267 clear_bit(id
, s
->txq_maperr
);
2268 tasklet_schedule(&txq
->qresume_tsk
);
2271 budget
= MAX_TIMER_TX_RECLAIM
;
2272 i
= s
->ethtxq_rover
;
2274 struct sge_eth_txq
*q
= &s
->ethtxq
[i
];
2277 time_after_eq(jiffies
, q
->txq
->trans_start
+ HZ
/ 100) &&
2278 __netif_tx_trylock(q
->txq
)) {
2279 int avail
= reclaimable(&q
->q
);
2285 free_tx_desc(adap
, &q
->q
, avail
, true);
2286 q
->q
.in_use
-= avail
;
2289 __netif_tx_unlock(q
->txq
);
2292 if (++i
>= s
->ethqsets
)
2294 } while (budget
&& i
!= s
->ethtxq_rover
);
2295 s
->ethtxq_rover
= i
;
2296 mod_timer(&s
->tx_timer
, jiffies
+ (budget
? TX_QCHECK_PERIOD
: 2));
2300 * bar2_address - return the BAR2 address for an SGE Queue's Registers
2301 * @adapter: the adapter
2302 * @qid: the SGE Queue ID
2303 * @qtype: the SGE Queue Type (Egress or Ingress)
2304 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
2306 * Returns the BAR2 address for the SGE Queue Registers associated with
2307 * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also
2308 * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
2309 * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID"
2310 * Registers are supported (e.g. the Write Combining Doorbell Buffer).
2312 static void __iomem
*bar2_address(struct adapter
*adapter
,
2314 enum t4_bar2_qtype qtype
,
2315 unsigned int *pbar2_qid
)
2320 ret
= cxgb4_t4_bar2_sge_qregs(adapter
, qid
, qtype
,
2321 &bar2_qoffset
, pbar2_qid
);
2325 return adapter
->bar2
+ bar2_qoffset
;
2328 int t4_sge_alloc_rxq(struct adapter
*adap
, struct sge_rspq
*iq
, bool fwevtq
,
2329 struct net_device
*dev
, int intr_idx
,
2330 struct sge_fl
*fl
, rspq_handler_t hnd
)
2334 struct sge
*s
= &adap
->sge
;
2335 struct port_info
*pi
= netdev_priv(dev
);
2337 /* Size needs to be multiple of 16, including status entry. */
2338 iq
->size
= roundup(iq
->size
, 16);
2340 iq
->desc
= alloc_ring(adap
->pdev_dev
, iq
->size
, iq
->iqe_len
, 0,
2341 &iq
->phys_addr
, NULL
, 0, NUMA_NO_NODE
);
2345 memset(&c
, 0, sizeof(c
));
2346 c
.op_to_vfn
= htonl(FW_CMD_OP_V(FW_IQ_CMD
) | FW_CMD_REQUEST_F
|
2347 FW_CMD_WRITE_F
| FW_CMD_EXEC_F
|
2348 FW_IQ_CMD_PFN_V(adap
->fn
) | FW_IQ_CMD_VFN_V(0));
2349 c
.alloc_to_len16
= htonl(FW_IQ_CMD_ALLOC_F
| FW_IQ_CMD_IQSTART_F
|
2351 c
.type_to_iqandstindex
= htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP
) |
2352 FW_IQ_CMD_IQASYNCH_V(fwevtq
) | FW_IQ_CMD_VIID_V(pi
->viid
) |
2353 FW_IQ_CMD_IQANDST_V(intr_idx
< 0) | FW_IQ_CMD_IQANUD_V(1) |
2354 FW_IQ_CMD_IQANDSTINDEX_V(intr_idx
>= 0 ? intr_idx
:
2356 c
.iqdroprss_to_iqesize
= htons(FW_IQ_CMD_IQPCIECH_V(pi
->tx_chan
) |
2357 FW_IQ_CMD_IQGTSMODE_F
|
2358 FW_IQ_CMD_IQINTCNTTHRESH_V(iq
->pktcnt_idx
) |
2359 FW_IQ_CMD_IQESIZE_V(ilog2(iq
->iqe_len
) - 4));
2360 c
.iqsize
= htons(iq
->size
);
2361 c
.iqaddr
= cpu_to_be64(iq
->phys_addr
);
2364 fl
->size
= roundup(fl
->size
, 8);
2365 fl
->desc
= alloc_ring(adap
->pdev_dev
, fl
->size
, sizeof(__be64
),
2366 sizeof(struct rx_sw_desc
), &fl
->addr
,
2367 &fl
->sdesc
, s
->stat_len
, NUMA_NO_NODE
);
2371 flsz
= fl
->size
/ 8 + s
->stat_len
/ sizeof(struct tx_desc
);
2372 c
.iqns_to_fl0congen
= htonl(FW_IQ_CMD_FL0PACKEN_F
|
2373 FW_IQ_CMD_FL0FETCHRO_F
|
2374 FW_IQ_CMD_FL0DATARO_F
|
2375 FW_IQ_CMD_FL0PADEN_F
);
2376 c
.fl0dcaen_to_fl0cidxfthresh
= htons(FW_IQ_CMD_FL0FBMIN_V(2) |
2377 FW_IQ_CMD_FL0FBMAX_V(3));
2378 c
.fl0size
= htons(flsz
);
2379 c
.fl0addr
= cpu_to_be64(fl
->addr
);
2382 ret
= t4_wr_mbox(adap
, adap
->fn
, &c
, sizeof(c
), &c
);
2386 netif_napi_add(dev
, &iq
->napi
, napi_rx_handler
, 64);
2387 napi_hash_add(&iq
->napi
);
2388 iq
->cur_desc
= iq
->desc
;
2391 iq
->next_intr_params
= iq
->intr_params
;
2392 iq
->cntxt_id
= ntohs(c
.iqid
);
2393 iq
->abs_id
= ntohs(c
.physiqid
);
2394 iq
->bar2_addr
= bar2_address(adap
,
2396 T4_BAR2_QTYPE_INGRESS
,
2398 iq
->size
--; /* subtract status entry */
2402 /* set offset to -1 to distinguish ingress queues without FL */
2403 iq
->offset
= fl
? 0 : -1;
2405 adap
->sge
.ingr_map
[iq
->cntxt_id
- adap
->sge
.ingr_start
] = iq
;
2408 fl
->cntxt_id
= ntohs(c
.fl0id
);
2409 fl
->avail
= fl
->pend_cred
= 0;
2410 fl
->pidx
= fl
->cidx
= 0;
2411 fl
->alloc_failed
= fl
->large_alloc_failed
= fl
->starving
= 0;
2412 adap
->sge
.egr_map
[fl
->cntxt_id
- adap
->sge
.egr_start
] = fl
;
2414 /* Note, we must initialize the BAR2 Free List User Doorbell
2415 * information before refilling the Free List!
2417 fl
->bar2_addr
= bar2_address(adap
,
2419 T4_BAR2_QTYPE_EGRESS
,
2421 refill_fl(adap
, fl
, fl_cap(fl
), GFP_KERNEL
);
2429 dma_free_coherent(adap
->pdev_dev
, iq
->size
* iq
->iqe_len
,
2430 iq
->desc
, iq
->phys_addr
);
2433 if (fl
&& fl
->desc
) {
2436 dma_free_coherent(adap
->pdev_dev
, flsz
* sizeof(struct tx_desc
),
2437 fl
->desc
, fl
->addr
);
2443 static void init_txq(struct adapter
*adap
, struct sge_txq
*q
, unsigned int id
)
2446 q
->bar2_addr
= bar2_address(adap
,
2448 T4_BAR2_QTYPE_EGRESS
,
2451 q
->cidx
= q
->pidx
= 0;
2452 q
->stops
= q
->restarts
= 0;
2453 q
->stat
= (void *)&q
->desc
[q
->size
];
2454 spin_lock_init(&q
->db_lock
);
2455 adap
->sge
.egr_map
[id
- adap
->sge
.egr_start
] = q
;
2458 int t4_sge_alloc_eth_txq(struct adapter
*adap
, struct sge_eth_txq
*txq
,
2459 struct net_device
*dev
, struct netdev_queue
*netdevq
,
2463 struct fw_eq_eth_cmd c
;
2464 struct sge
*s
= &adap
->sge
;
2465 struct port_info
*pi
= netdev_priv(dev
);
2467 /* Add status entries */
2468 nentries
= txq
->q
.size
+ s
->stat_len
/ sizeof(struct tx_desc
);
2470 txq
->q
.desc
= alloc_ring(adap
->pdev_dev
, txq
->q
.size
,
2471 sizeof(struct tx_desc
), sizeof(struct tx_sw_desc
),
2472 &txq
->q
.phys_addr
, &txq
->q
.sdesc
, s
->stat_len
,
2473 netdev_queue_numa_node_read(netdevq
));
2477 memset(&c
, 0, sizeof(c
));
2478 c
.op_to_vfn
= htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD
) | FW_CMD_REQUEST_F
|
2479 FW_CMD_WRITE_F
| FW_CMD_EXEC_F
|
2480 FW_EQ_ETH_CMD_PFN_V(adap
->fn
) |
2481 FW_EQ_ETH_CMD_VFN_V(0));
2482 c
.alloc_to_len16
= htonl(FW_EQ_ETH_CMD_ALLOC_F
|
2483 FW_EQ_ETH_CMD_EQSTART_F
| FW_LEN16(c
));
2484 c
.viid_pkd
= htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F
|
2485 FW_EQ_ETH_CMD_VIID_V(pi
->viid
));
2486 c
.fetchszm_to_iqid
= htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(2) |
2487 FW_EQ_ETH_CMD_PCIECHN_V(pi
->tx_chan
) |
2488 FW_EQ_ETH_CMD_FETCHRO_V(1) |
2489 FW_EQ_ETH_CMD_IQID_V(iqid
));
2490 c
.dcaen_to_eqsize
= htonl(FW_EQ_ETH_CMD_FBMIN_V(2) |
2491 FW_EQ_ETH_CMD_FBMAX_V(3) |
2492 FW_EQ_ETH_CMD_CIDXFTHRESH_V(5) |
2493 FW_EQ_ETH_CMD_EQSIZE_V(nentries
));
2494 c
.eqaddr
= cpu_to_be64(txq
->q
.phys_addr
);
2496 ret
= t4_wr_mbox(adap
, adap
->fn
, &c
, sizeof(c
), &c
);
2498 kfree(txq
->q
.sdesc
);
2499 txq
->q
.sdesc
= NULL
;
2500 dma_free_coherent(adap
->pdev_dev
,
2501 nentries
* sizeof(struct tx_desc
),
2502 txq
->q
.desc
, txq
->q
.phys_addr
);
2507 init_txq(adap
, &txq
->q
, FW_EQ_ETH_CMD_EQID_G(ntohl(c
.eqid_pkd
)));
2509 txq
->tso
= txq
->tx_cso
= txq
->vlan_ins
= 0;
2510 txq
->mapping_err
= 0;
2514 int t4_sge_alloc_ctrl_txq(struct adapter
*adap
, struct sge_ctrl_txq
*txq
,
2515 struct net_device
*dev
, unsigned int iqid
,
2516 unsigned int cmplqid
)
2519 struct fw_eq_ctrl_cmd c
;
2520 struct sge
*s
= &adap
->sge
;
2521 struct port_info
*pi
= netdev_priv(dev
);
2523 /* Add status entries */
2524 nentries
= txq
->q
.size
+ s
->stat_len
/ sizeof(struct tx_desc
);
2526 txq
->q
.desc
= alloc_ring(adap
->pdev_dev
, nentries
,
2527 sizeof(struct tx_desc
), 0, &txq
->q
.phys_addr
,
2528 NULL
, 0, NUMA_NO_NODE
);
2532 c
.op_to_vfn
= htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD
) | FW_CMD_REQUEST_F
|
2533 FW_CMD_WRITE_F
| FW_CMD_EXEC_F
|
2534 FW_EQ_CTRL_CMD_PFN_V(adap
->fn
) |
2535 FW_EQ_CTRL_CMD_VFN_V(0));
2536 c
.alloc_to_len16
= htonl(FW_EQ_CTRL_CMD_ALLOC_F
|
2537 FW_EQ_CTRL_CMD_EQSTART_F
| FW_LEN16(c
));
2538 c
.cmpliqid_eqid
= htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid
));
2539 c
.physeqid_pkd
= htonl(0);
2540 c
.fetchszm_to_iqid
= htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(2) |
2541 FW_EQ_CTRL_CMD_PCIECHN_V(pi
->tx_chan
) |
2542 FW_EQ_CTRL_CMD_FETCHRO_F
|
2543 FW_EQ_CTRL_CMD_IQID_V(iqid
));
2544 c
.dcaen_to_eqsize
= htonl(FW_EQ_CTRL_CMD_FBMIN_V(2) |
2545 FW_EQ_CTRL_CMD_FBMAX_V(3) |
2546 FW_EQ_CTRL_CMD_CIDXFTHRESH_V(5) |
2547 FW_EQ_CTRL_CMD_EQSIZE_V(nentries
));
2548 c
.eqaddr
= cpu_to_be64(txq
->q
.phys_addr
);
2550 ret
= t4_wr_mbox(adap
, adap
->fn
, &c
, sizeof(c
), &c
);
2552 dma_free_coherent(adap
->pdev_dev
,
2553 nentries
* sizeof(struct tx_desc
),
2554 txq
->q
.desc
, txq
->q
.phys_addr
);
2559 init_txq(adap
, &txq
->q
, FW_EQ_CTRL_CMD_EQID_G(ntohl(c
.cmpliqid_eqid
)));
2561 skb_queue_head_init(&txq
->sendq
);
2562 tasklet_init(&txq
->qresume_tsk
, restart_ctrlq
, (unsigned long)txq
);
2567 int t4_sge_alloc_ofld_txq(struct adapter
*adap
, struct sge_ofld_txq
*txq
,
2568 struct net_device
*dev
, unsigned int iqid
)
2571 struct fw_eq_ofld_cmd c
;
2572 struct sge
*s
= &adap
->sge
;
2573 struct port_info
*pi
= netdev_priv(dev
);
2575 /* Add status entries */
2576 nentries
= txq
->q
.size
+ s
->stat_len
/ sizeof(struct tx_desc
);
2578 txq
->q
.desc
= alloc_ring(adap
->pdev_dev
, txq
->q
.size
,
2579 sizeof(struct tx_desc
), sizeof(struct tx_sw_desc
),
2580 &txq
->q
.phys_addr
, &txq
->q
.sdesc
, s
->stat_len
,
2585 memset(&c
, 0, sizeof(c
));
2586 c
.op_to_vfn
= htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD
) | FW_CMD_REQUEST_F
|
2587 FW_CMD_WRITE_F
| FW_CMD_EXEC_F
|
2588 FW_EQ_OFLD_CMD_PFN_V(adap
->fn
) |
2589 FW_EQ_OFLD_CMD_VFN_V(0));
2590 c
.alloc_to_len16
= htonl(FW_EQ_OFLD_CMD_ALLOC_F
|
2591 FW_EQ_OFLD_CMD_EQSTART_F
| FW_LEN16(c
));
2592 c
.fetchszm_to_iqid
= htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(2) |
2593 FW_EQ_OFLD_CMD_PCIECHN_V(pi
->tx_chan
) |
2594 FW_EQ_OFLD_CMD_FETCHRO_F
|
2595 FW_EQ_OFLD_CMD_IQID_V(iqid
));
2596 c
.dcaen_to_eqsize
= htonl(FW_EQ_OFLD_CMD_FBMIN_V(2) |
2597 FW_EQ_OFLD_CMD_FBMAX_V(3) |
2598 FW_EQ_OFLD_CMD_CIDXFTHRESH_V(5) |
2599 FW_EQ_OFLD_CMD_EQSIZE_V(nentries
));
2600 c
.eqaddr
= cpu_to_be64(txq
->q
.phys_addr
);
2602 ret
= t4_wr_mbox(adap
, adap
->fn
, &c
, sizeof(c
), &c
);
2604 kfree(txq
->q
.sdesc
);
2605 txq
->q
.sdesc
= NULL
;
2606 dma_free_coherent(adap
->pdev_dev
,
2607 nentries
* sizeof(struct tx_desc
),
2608 txq
->q
.desc
, txq
->q
.phys_addr
);
2613 init_txq(adap
, &txq
->q
, FW_EQ_OFLD_CMD_EQID_G(ntohl(c
.eqid_pkd
)));
2615 skb_queue_head_init(&txq
->sendq
);
2616 tasklet_init(&txq
->qresume_tsk
, restart_ofldq
, (unsigned long)txq
);
2618 txq
->mapping_err
= 0;
2622 static void free_txq(struct adapter
*adap
, struct sge_txq
*q
)
2624 struct sge
*s
= &adap
->sge
;
2626 dma_free_coherent(adap
->pdev_dev
,
2627 q
->size
* sizeof(struct tx_desc
) + s
->stat_len
,
2628 q
->desc
, q
->phys_addr
);
2634 static void free_rspq_fl(struct adapter
*adap
, struct sge_rspq
*rq
,
2637 struct sge
*s
= &adap
->sge
;
2638 unsigned int fl_id
= fl
? fl
->cntxt_id
: 0xffff;
2640 adap
->sge
.ingr_map
[rq
->cntxt_id
- adap
->sge
.ingr_start
] = NULL
;
2641 t4_iq_free(adap
, adap
->fn
, adap
->fn
, 0, FW_IQ_TYPE_FL_INT_CAP
,
2642 rq
->cntxt_id
, fl_id
, 0xffff);
2643 dma_free_coherent(adap
->pdev_dev
, (rq
->size
+ 1) * rq
->iqe_len
,
2644 rq
->desc
, rq
->phys_addr
);
2645 napi_hash_del(&rq
->napi
);
2646 netif_napi_del(&rq
->napi
);
2648 rq
->cntxt_id
= rq
->abs_id
= 0;
2652 free_rx_bufs(adap
, fl
, fl
->avail
);
2653 dma_free_coherent(adap
->pdev_dev
, fl
->size
* 8 + s
->stat_len
,
2654 fl
->desc
, fl
->addr
);
2663 * t4_free_ofld_rxqs - free a block of consecutive Rx queues
2664 * @adap: the adapter
2665 * @n: number of queues
2666 * @q: pointer to first queue
2668 * Release the resources of a consecutive block of offload Rx queues.
2670 void t4_free_ofld_rxqs(struct adapter
*adap
, int n
, struct sge_ofld_rxq
*q
)
2672 for ( ; n
; n
--, q
++)
2674 free_rspq_fl(adap
, &q
->rspq
,
2675 q
->fl
.size
? &q
->fl
: NULL
);
2679 * t4_free_sge_resources - free SGE resources
2680 * @adap: the adapter
2682 * Frees resources used by the SGE queue sets.
2684 void t4_free_sge_resources(struct adapter
*adap
)
2687 struct sge_eth_rxq
*eq
= adap
->sge
.ethrxq
;
2688 struct sge_eth_txq
*etq
= adap
->sge
.ethtxq
;
2690 /* clean up Ethernet Tx/Rx queues */
2691 for (i
= 0; i
< adap
->sge
.ethqsets
; i
++, eq
++, etq
++) {
2693 free_rspq_fl(adap
, &eq
->rspq
,
2694 eq
->fl
.size
? &eq
->fl
: NULL
);
2696 t4_eth_eq_free(adap
, adap
->fn
, adap
->fn
, 0,
2698 free_tx_desc(adap
, &etq
->q
, etq
->q
.in_use
, true);
2699 kfree(etq
->q
.sdesc
);
2700 free_txq(adap
, &etq
->q
);
2704 /* clean up RDMA and iSCSI Rx queues */
2705 t4_free_ofld_rxqs(adap
, adap
->sge
.ofldqsets
, adap
->sge
.ofldrxq
);
2706 t4_free_ofld_rxqs(adap
, adap
->sge
.rdmaqs
, adap
->sge
.rdmarxq
);
2707 t4_free_ofld_rxqs(adap
, adap
->sge
.rdmaciqs
, adap
->sge
.rdmaciq
);
2709 /* clean up offload Tx queues */
2710 for (i
= 0; i
< ARRAY_SIZE(adap
->sge
.ofldtxq
); i
++) {
2711 struct sge_ofld_txq
*q
= &adap
->sge
.ofldtxq
[i
];
2714 tasklet_kill(&q
->qresume_tsk
);
2715 t4_ofld_eq_free(adap
, adap
->fn
, adap
->fn
, 0,
2717 free_tx_desc(adap
, &q
->q
, q
->q
.in_use
, false);
2719 __skb_queue_purge(&q
->sendq
);
2720 free_txq(adap
, &q
->q
);
2724 /* clean up control Tx queues */
2725 for (i
= 0; i
< ARRAY_SIZE(adap
->sge
.ctrlq
); i
++) {
2726 struct sge_ctrl_txq
*cq
= &adap
->sge
.ctrlq
[i
];
2729 tasklet_kill(&cq
->qresume_tsk
);
2730 t4_ctrl_eq_free(adap
, adap
->fn
, adap
->fn
, 0,
2732 __skb_queue_purge(&cq
->sendq
);
2733 free_txq(adap
, &cq
->q
);
2737 if (adap
->sge
.fw_evtq
.desc
)
2738 free_rspq_fl(adap
, &adap
->sge
.fw_evtq
, NULL
);
2740 if (adap
->sge
.intrq
.desc
)
2741 free_rspq_fl(adap
, &adap
->sge
.intrq
, NULL
);
2743 /* clear the reverse egress queue map */
2744 memset(adap
->sge
.egr_map
, 0, sizeof(adap
->sge
.egr_map
));
2747 void t4_sge_start(struct adapter
*adap
)
2749 adap
->sge
.ethtxq_rover
= 0;
2750 mod_timer(&adap
->sge
.rx_timer
, jiffies
+ RX_QCHECK_PERIOD
);
2751 mod_timer(&adap
->sge
.tx_timer
, jiffies
+ TX_QCHECK_PERIOD
);
2755 * t4_sge_stop - disable SGE operation
2756 * @adap: the adapter
2758 * Stop tasklets and timers associated with the DMA engine. Note that
2759 * this is effective only if measures have been taken to disable any HW
2760 * events that may restart them.
2762 void t4_sge_stop(struct adapter
*adap
)
2765 struct sge
*s
= &adap
->sge
;
2767 if (in_interrupt()) /* actions below require waiting */
2770 if (s
->rx_timer
.function
)
2771 del_timer_sync(&s
->rx_timer
);
2772 if (s
->tx_timer
.function
)
2773 del_timer_sync(&s
->tx_timer
);
2775 for (i
= 0; i
< ARRAY_SIZE(s
->ofldtxq
); i
++) {
2776 struct sge_ofld_txq
*q
= &s
->ofldtxq
[i
];
2779 tasklet_kill(&q
->qresume_tsk
);
2781 for (i
= 0; i
< ARRAY_SIZE(s
->ctrlq
); i
++) {
2782 struct sge_ctrl_txq
*cq
= &s
->ctrlq
[i
];
2785 tasklet_kill(&cq
->qresume_tsk
);
2790 * t4_sge_init_soft - grab core SGE values needed by SGE code
2791 * @adap: the adapter
2793 * We need to grab the SGE operating parameters that we need to have
2794 * in order to do our job and make sure we can live with them.
2797 static int t4_sge_init_soft(struct adapter
*adap
)
2799 struct sge
*s
= &adap
->sge
;
2800 u32 fl_small_pg
, fl_large_pg
, fl_small_mtu
, fl_large_mtu
;
2801 u32 timer_value_0_and_1
, timer_value_2_and_3
, timer_value_4_and_5
;
2802 u32 ingress_rx_threshold
;
2805 * Verify that CPL messages are going to the Ingress Queue for
2806 * process_responses() and that only packet data is going to the
2809 if ((t4_read_reg(adap
, SGE_CONTROL_A
) & RXPKTCPLMODE_F
) !=
2810 RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X
)) {
2811 dev_err(adap
->pdev_dev
, "bad SGE CPL MODE\n");
2816 * Validate the Host Buffer Register Array indices that we want to
2819 * XXX Note that we should really read through the Host Buffer Size
2820 * XXX register array and find the indices of the Buffer Sizes which
2821 * XXX meet our needs!
2823 #define READ_FL_BUF(x) \
2824 t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32))
2826 fl_small_pg
= READ_FL_BUF(RX_SMALL_PG_BUF
);
2827 fl_large_pg
= READ_FL_BUF(RX_LARGE_PG_BUF
);
2828 fl_small_mtu
= READ_FL_BUF(RX_SMALL_MTU_BUF
);
2829 fl_large_mtu
= READ_FL_BUF(RX_LARGE_MTU_BUF
);
2831 /* We only bother using the Large Page logic if the Large Page Buffer
2832 * is larger than our Page Size Buffer.
2834 if (fl_large_pg
<= fl_small_pg
)
2839 /* The Page Size Buffer must be exactly equal to our Page Size and the
2840 * Large Page Size Buffer should be 0 (per above) or a power of 2.
2842 if (fl_small_pg
!= PAGE_SIZE
||
2843 (fl_large_pg
& (fl_large_pg
-1)) != 0) {
2844 dev_err(adap
->pdev_dev
, "bad SGE FL page buffer sizes [%d, %d]\n",
2845 fl_small_pg
, fl_large_pg
);
2849 s
->fl_pg_order
= ilog2(fl_large_pg
) - PAGE_SHIFT
;
2851 if (fl_small_mtu
< FL_MTU_SMALL_BUFSIZE(adap
) ||
2852 fl_large_mtu
< FL_MTU_LARGE_BUFSIZE(adap
)) {
2853 dev_err(adap
->pdev_dev
, "bad SGE FL MTU sizes [%d, %d]\n",
2854 fl_small_mtu
, fl_large_mtu
);
2859 * Retrieve our RX interrupt holdoff timer values and counter
2860 * threshold values from the SGE parameters.
2862 timer_value_0_and_1
= t4_read_reg(adap
, SGE_TIMER_VALUE_0_AND_1_A
);
2863 timer_value_2_and_3
= t4_read_reg(adap
, SGE_TIMER_VALUE_2_AND_3_A
);
2864 timer_value_4_and_5
= t4_read_reg(adap
, SGE_TIMER_VALUE_4_AND_5_A
);
2865 s
->timer_val
[0] = core_ticks_to_us(adap
,
2866 TIMERVALUE0_G(timer_value_0_and_1
));
2867 s
->timer_val
[1] = core_ticks_to_us(adap
,
2868 TIMERVALUE1_G(timer_value_0_and_1
));
2869 s
->timer_val
[2] = core_ticks_to_us(adap
,
2870 TIMERVALUE2_G(timer_value_2_and_3
));
2871 s
->timer_val
[3] = core_ticks_to_us(adap
,
2872 TIMERVALUE3_G(timer_value_2_and_3
));
2873 s
->timer_val
[4] = core_ticks_to_us(adap
,
2874 TIMERVALUE4_G(timer_value_4_and_5
));
2875 s
->timer_val
[5] = core_ticks_to_us(adap
,
2876 TIMERVALUE5_G(timer_value_4_and_5
));
2878 ingress_rx_threshold
= t4_read_reg(adap
, SGE_INGRESS_RX_THRESHOLD_A
);
2879 s
->counter_val
[0] = THRESHOLD_0_G(ingress_rx_threshold
);
2880 s
->counter_val
[1] = THRESHOLD_1_G(ingress_rx_threshold
);
2881 s
->counter_val
[2] = THRESHOLD_2_G(ingress_rx_threshold
);
2882 s
->counter_val
[3] = THRESHOLD_3_G(ingress_rx_threshold
);
2888 * t4_sge_init - initialize SGE
2889 * @adap: the adapter
2891 * Perform low-level SGE code initialization needed every time after a
2894 int t4_sge_init(struct adapter
*adap
)
2896 struct sge
*s
= &adap
->sge
;
2897 u32 sge_control
, sge_control2
, sge_conm_ctrl
;
2898 unsigned int ingpadboundary
, ingpackboundary
;
2899 int ret
, egress_threshold
;
2902 * Ingress Padding Boundary and Egress Status Page Size are set up by
2903 * t4_fixup_host_params().
2905 sge_control
= t4_read_reg(adap
, SGE_CONTROL_A
);
2906 s
->pktshift
= PKTSHIFT_G(sge_control
);
2907 s
->stat_len
= (sge_control
& EGRSTATUSPAGESIZE_F
) ? 128 : 64;
2909 /* T4 uses a single control field to specify both the PCIe Padding and
2910 * Packing Boundary. T5 introduced the ability to specify these
2911 * separately. The actual Ingress Packet Data alignment boundary
2912 * within Packed Buffer Mode is the maximum of these two
2915 ingpadboundary
= 1 << (INGPADBOUNDARY_G(sge_control
) +
2916 INGPADBOUNDARY_SHIFT_X
);
2917 if (is_t4(adap
->params
.chip
)) {
2918 s
->fl_align
= ingpadboundary
;
2920 /* T5 has a different interpretation of one of the PCIe Packing
2923 sge_control2
= t4_read_reg(adap
, SGE_CONTROL2_A
);
2924 ingpackboundary
= INGPACKBOUNDARY_G(sge_control2
);
2925 if (ingpackboundary
== INGPACKBOUNDARY_16B_X
)
2926 ingpackboundary
= 16;
2928 ingpackboundary
= 1 << (ingpackboundary
+
2929 INGPACKBOUNDARY_SHIFT_X
);
2931 s
->fl_align
= max(ingpadboundary
, ingpackboundary
);
2934 ret
= t4_sge_init_soft(adap
);
2939 * A FL with <= fl_starve_thres buffers is starving and a periodic
2940 * timer will attempt to refill it. This needs to be larger than the
2941 * SGE's Egress Congestion Threshold. If it isn't, then we can get
2942 * stuck waiting for new packets while the SGE is waiting for us to
2943 * give it more Free List entries. (Note that the SGE's Egress
2944 * Congestion Threshold is in units of 2 Free List pointers.) For T4,
2945 * there was only a single field to control this. For T5 there's the
2946 * original field which now only applies to Unpacked Mode Free List
2947 * buffers and a new field which only applies to Packed Mode Free List
2950 sge_conm_ctrl
= t4_read_reg(adap
, SGE_CONM_CTRL_A
);
2951 if (is_t4(adap
->params
.chip
))
2952 egress_threshold
= EGRTHRESHOLD_G(sge_conm_ctrl
);
2954 egress_threshold
= EGRTHRESHOLDPACKING_G(sge_conm_ctrl
);
2955 s
->fl_starve_thres
= 2*egress_threshold
+ 1;
2957 setup_timer(&s
->rx_timer
, sge_rx_timer_cb
, (unsigned long)adap
);
2958 setup_timer(&s
->tx_timer
, sge_tx_timer_cb
, (unsigned long)adap
);
2959 s
->idma_1s_thresh
= core_ticks_per_usec(adap
) * 1000000; /* 1 s */
2960 s
->idma_stalled
[0] = 0;
2961 s
->idma_stalled
[1] = 0;
2962 spin_lock_init(&s
->intrq_lock
);