2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/skbuff.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/prefetch.h>
46 #include "t4vf_common.h"
47 #include "t4vf_defs.h"
49 #include "../cxgb4/t4_regs.h"
50 #include "../cxgb4/t4_values.h"
51 #include "../cxgb4/t4fw_api.h"
52 #include "../cxgb4/t4_msg.h"
59 * Egress Queue sizes, producer and consumer indices are all in units
60 * of Egress Context Units bytes. Note that as far as the hardware is
61 * concerned, the free list is an Egress Queue (the host produces free
62 * buffers which the hardware consumes) and free list entries are
63 * 64-bit PCI DMA addresses.
65 EQ_UNIT
= SGE_EQ_IDXSIZE
,
66 FL_PER_EQ_UNIT
= EQ_UNIT
/ sizeof(__be64
),
67 TXD_PER_EQ_UNIT
= EQ_UNIT
/ sizeof(__be64
),
70 * Max number of TX descriptors we clean up at a time. Should be
71 * modest as freeing skbs isn't cheap and it happens while holding
72 * locks. We just need to free packets faster than they arrive, we
73 * eventually catch up and keep the amortized cost reasonable.
78 * Max number of Rx buffers we replenish at a time. Again keep this
79 * modest, allocating buffers isn't cheap either.
84 * Period of the Rx queue check timer. This timer is infrequent as it
85 * has something to do only when the system experiences severe memory
88 RX_QCHECK_PERIOD
= (HZ
/ 2),
91 * Period of the TX queue check timer and the maximum number of TX
92 * descriptors to be reclaimed by the TX timer.
94 TX_QCHECK_PERIOD
= (HZ
/ 2),
95 MAX_TIMER_TX_RECLAIM
= 100,
98 * Suspend an Ethernet TX queue with fewer available descriptors than
99 * this. We always want to have room for a maximum sized packet:
100 * inline immediate data + MAX_SKB_FRAGS. This is the same as
101 * calc_tx_flits() for a TSO packet with nr_frags == MAX_SKB_FRAGS
102 * (see that function and its helpers for a description of the
105 ETHTXQ_MAX_FRAGS
= MAX_SKB_FRAGS
+ 1,
106 ETHTXQ_MAX_SGL_LEN
= ((3 * (ETHTXQ_MAX_FRAGS
-1))/2 +
107 ((ETHTXQ_MAX_FRAGS
-1) & 1) +
109 ETHTXQ_MAX_HDR
= (sizeof(struct fw_eth_tx_pkt_vm_wr
) +
110 sizeof(struct cpl_tx_pkt_lso_core
) +
111 sizeof(struct cpl_tx_pkt_core
)) / sizeof(__be64
),
112 ETHTXQ_MAX_FLITS
= ETHTXQ_MAX_SGL_LEN
+ ETHTXQ_MAX_HDR
,
114 ETHTXQ_STOP_THRES
= 1 + DIV_ROUND_UP(ETHTXQ_MAX_FLITS
, TXD_PER_EQ_UNIT
),
117 * Max TX descriptor space we allow for an Ethernet packet to be
118 * inlined into a WR. This is limited by the maximum value which
119 * we can specify for immediate data in the firmware Ethernet TX
122 MAX_IMM_TX_PKT_LEN
= FW_WR_IMMDLEN_M
,
125 * Max size of a WR sent through a control TX queue.
127 MAX_CTRL_WR_LEN
= 256,
130 * Maximum amount of data which we'll ever need to inline into a
131 * TX ring: max(MAX_IMM_TX_PKT_LEN, MAX_CTRL_WR_LEN).
133 MAX_IMM_TX_LEN
= (MAX_IMM_TX_PKT_LEN
> MAX_CTRL_WR_LEN
138 * For incoming packets less than RX_COPY_THRES, we copy the data into
139 * an skb rather than referencing the data. We allocate enough
140 * in-line room in skb's to accommodate pulling in RX_PULL_LEN bytes
141 * of the data (header).
147 * Main body length for sk_buffs used for RX Ethernet packets with
148 * fragments. Should be >= RX_PULL_LEN but possibly bigger to give
149 * pskb_may_pull() some room.
155 * Software state per TX descriptor.
158 struct sk_buff
*skb
; /* socket buffer of TX data source */
159 struct ulptx_sgl
*sgl
; /* scatter/gather list in TX Queue */
163 * Software state per RX Free List descriptor. We keep track of the allocated
164 * FL page, its size, and its PCI DMA address (if the page is mapped). The FL
165 * page size and its PCI DMA mapped state are stored in the low bits of the
166 * PCI DMA address as per below.
169 struct page
*page
; /* Free List page buffer */
170 dma_addr_t dma_addr
; /* PCI DMA address (if mapped) */
171 /* and flags (see below) */
175 * The low bits of rx_sw_desc.dma_addr have special meaning. Note that the
176 * SGE also uses the low 4 bits to determine the size of the buffer. It uses
177 * those bits to index into the SGE_FL_BUFFER_SIZE[index] register array.
178 * Since we only use SGE_FL_BUFFER_SIZE0 and SGE_FL_BUFFER_SIZE1, these low 4
179 * bits can only contain a 0 or a 1 to indicate which size buffer we're giving
180 * to the SGE. Thus, our software state of "is the buffer mapped for DMA" is
181 * maintained in an inverse sense so the hardware never sees that bit high.
184 RX_LARGE_BUF
= 1 << 0, /* buffer is SGE_FL_BUFFER_SIZE[1] */
185 RX_UNMAPPED_BUF
= 1 << 1, /* buffer is not mapped */
189 * get_buf_addr - return DMA buffer address of software descriptor
190 * @sdesc: pointer to the software buffer descriptor
192 * Return the DMA buffer address of a software descriptor (stripping out
193 * our low-order flag bits).
195 static inline dma_addr_t
get_buf_addr(const struct rx_sw_desc
*sdesc
)
197 return sdesc
->dma_addr
& ~(dma_addr_t
)(RX_LARGE_BUF
| RX_UNMAPPED_BUF
);
201 * is_buf_mapped - is buffer mapped for DMA?
202 * @sdesc: pointer to the software buffer descriptor
204 * Determine whether the buffer associated with a software descriptor in
205 * mapped for DMA or not.
207 static inline bool is_buf_mapped(const struct rx_sw_desc
*sdesc
)
209 return !(sdesc
->dma_addr
& RX_UNMAPPED_BUF
);
213 * need_skb_unmap - does the platform need unmapping of sk_buffs?
215 * Returns true if the platform needs sk_buff unmapping. The compiler
216 * optimizes away unnecessary code if this returns true.
218 static inline int need_skb_unmap(void)
220 #ifdef CONFIG_NEED_DMA_MAP_STATE
228 * txq_avail - return the number of available slots in a TX queue
231 * Returns the number of available descriptors in a TX queue.
233 static inline unsigned int txq_avail(const struct sge_txq
*tq
)
235 return tq
->size
- 1 - tq
->in_use
;
239 * fl_cap - return the capacity of a Free List
242 * Returns the capacity of a Free List. The capacity is less than the
243 * size because an Egress Queue Index Unit worth of descriptors needs to
244 * be left unpopulated, otherwise the Producer and Consumer indices PIDX
245 * and CIDX will match and the hardware will think the FL is empty.
247 static inline unsigned int fl_cap(const struct sge_fl
*fl
)
249 return fl
->size
- FL_PER_EQ_UNIT
;
253 * fl_starving - return whether a Free List is starving.
254 * @adapter: pointer to the adapter
257 * Tests specified Free List to see whether the number of buffers
258 * available to the hardware has falled below our "starvation"
261 static inline bool fl_starving(const struct adapter
*adapter
,
262 const struct sge_fl
*fl
)
264 const struct sge
*s
= &adapter
->sge
;
266 return fl
->avail
- fl
->pend_cred
<= s
->fl_starve_thres
;
270 * map_skb - map an skb for DMA to the device
271 * @dev: the egress net device
272 * @skb: the packet to map
273 * @addr: a pointer to the base of the DMA mapping array
275 * Map an skb for DMA to the device and return an array of DMA addresses.
277 static int map_skb(struct device
*dev
, const struct sk_buff
*skb
,
280 const skb_frag_t
*fp
, *end
;
281 const struct skb_shared_info
*si
;
283 *addr
= dma_map_single(dev
, skb
->data
, skb_headlen(skb
), DMA_TO_DEVICE
);
284 if (dma_mapping_error(dev
, *addr
))
287 si
= skb_shinfo(skb
);
288 end
= &si
->frags
[si
->nr_frags
];
289 for (fp
= si
->frags
; fp
< end
; fp
++) {
290 *++addr
= skb_frag_dma_map(dev
, fp
, 0, skb_frag_size(fp
),
292 if (dma_mapping_error(dev
, *addr
))
298 while (fp
-- > si
->frags
)
299 dma_unmap_page(dev
, *--addr
, skb_frag_size(fp
), DMA_TO_DEVICE
);
300 dma_unmap_single(dev
, addr
[-1], skb_headlen(skb
), DMA_TO_DEVICE
);
306 static void unmap_sgl(struct device
*dev
, const struct sk_buff
*skb
,
307 const struct ulptx_sgl
*sgl
, const struct sge_txq
*tq
)
309 const struct ulptx_sge_pair
*p
;
310 unsigned int nfrags
= skb_shinfo(skb
)->nr_frags
;
312 if (likely(skb_headlen(skb
)))
313 dma_unmap_single(dev
, be64_to_cpu(sgl
->addr0
),
314 be32_to_cpu(sgl
->len0
), DMA_TO_DEVICE
);
316 dma_unmap_page(dev
, be64_to_cpu(sgl
->addr0
),
317 be32_to_cpu(sgl
->len0
), DMA_TO_DEVICE
);
322 * the complexity below is because of the possibility of a wrap-around
323 * in the middle of an SGL
325 for (p
= sgl
->sge
; nfrags
>= 2; nfrags
-= 2) {
326 if (likely((u8
*)(p
+ 1) <= (u8
*)tq
->stat
)) {
328 dma_unmap_page(dev
, be64_to_cpu(p
->addr
[0]),
329 be32_to_cpu(p
->len
[0]), DMA_TO_DEVICE
);
330 dma_unmap_page(dev
, be64_to_cpu(p
->addr
[1]),
331 be32_to_cpu(p
->len
[1]), DMA_TO_DEVICE
);
333 } else if ((u8
*)p
== (u8
*)tq
->stat
) {
334 p
= (const struct ulptx_sge_pair
*)tq
->desc
;
336 } else if ((u8
*)p
+ 8 == (u8
*)tq
->stat
) {
337 const __be64
*addr
= (const __be64
*)tq
->desc
;
339 dma_unmap_page(dev
, be64_to_cpu(addr
[0]),
340 be32_to_cpu(p
->len
[0]), DMA_TO_DEVICE
);
341 dma_unmap_page(dev
, be64_to_cpu(addr
[1]),
342 be32_to_cpu(p
->len
[1]), DMA_TO_DEVICE
);
343 p
= (const struct ulptx_sge_pair
*)&addr
[2];
345 const __be64
*addr
= (const __be64
*)tq
->desc
;
347 dma_unmap_page(dev
, be64_to_cpu(p
->addr
[0]),
348 be32_to_cpu(p
->len
[0]), DMA_TO_DEVICE
);
349 dma_unmap_page(dev
, be64_to_cpu(addr
[0]),
350 be32_to_cpu(p
->len
[1]), DMA_TO_DEVICE
);
351 p
= (const struct ulptx_sge_pair
*)&addr
[1];
357 if ((u8
*)p
== (u8
*)tq
->stat
)
358 p
= (const struct ulptx_sge_pair
*)tq
->desc
;
359 addr
= ((u8
*)p
+ 16 <= (u8
*)tq
->stat
361 : *(const __be64
*)tq
->desc
);
362 dma_unmap_page(dev
, be64_to_cpu(addr
), be32_to_cpu(p
->len
[0]),
368 * free_tx_desc - reclaims TX descriptors and their buffers
369 * @adapter: the adapter
370 * @tq: the TX queue to reclaim descriptors from
371 * @n: the number of descriptors to reclaim
372 * @unmap: whether the buffers should be unmapped for DMA
374 * Reclaims TX descriptors from an SGE TX queue and frees the associated
375 * TX buffers. Called with the TX queue lock held.
377 static void free_tx_desc(struct adapter
*adapter
, struct sge_txq
*tq
,
378 unsigned int n
, bool unmap
)
380 struct tx_sw_desc
*sdesc
;
381 unsigned int cidx
= tq
->cidx
;
382 struct device
*dev
= adapter
->pdev_dev
;
384 const int need_unmap
= need_skb_unmap() && unmap
;
386 sdesc
= &tq
->sdesc
[cidx
];
389 * If we kept a reference to the original TX skb, we need to
390 * unmap it from PCI DMA space (if required) and free it.
394 unmap_sgl(dev
, sdesc
->skb
, sdesc
->sgl
, tq
);
395 dev_consume_skb_any(sdesc
->skb
);
400 if (++cidx
== tq
->size
) {
409 * Return the number of reclaimable descriptors in a TX queue.
411 static inline int reclaimable(const struct sge_txq
*tq
)
413 int hw_cidx
= be16_to_cpu(tq
->stat
->cidx
);
414 int reclaimable
= hw_cidx
- tq
->cidx
;
416 reclaimable
+= tq
->size
;
421 * reclaim_completed_tx - reclaims completed TX descriptors
422 * @adapter: the adapter
423 * @tq: the TX queue to reclaim completed descriptors from
424 * @unmap: whether the buffers should be unmapped for DMA
426 * Reclaims TX descriptors that the SGE has indicated it has processed,
427 * and frees the associated buffers if possible. Called with the TX
430 static inline void reclaim_completed_tx(struct adapter
*adapter
,
434 int avail
= reclaimable(tq
);
438 * Limit the amount of clean up work we do at a time to keep
439 * the TX lock hold time O(1).
441 if (avail
> MAX_TX_RECLAIM
)
442 avail
= MAX_TX_RECLAIM
;
444 free_tx_desc(adapter
, tq
, avail
, unmap
);
450 * get_buf_size - return the size of an RX Free List buffer.
451 * @adapter: pointer to the associated adapter
452 * @sdesc: pointer to the software buffer descriptor
454 static inline int get_buf_size(const struct adapter
*adapter
,
455 const struct rx_sw_desc
*sdesc
)
457 const struct sge
*s
= &adapter
->sge
;
459 return (s
->fl_pg_order
> 0 && (sdesc
->dma_addr
& RX_LARGE_BUF
)
460 ? (PAGE_SIZE
<< s
->fl_pg_order
) : PAGE_SIZE
);
464 * free_rx_bufs - free RX buffers on an SGE Free List
465 * @adapter: the adapter
466 * @fl: the SGE Free List to free buffers from
467 * @n: how many buffers to free
469 * Release the next @n buffers on an SGE Free List RX queue. The
470 * buffers must be made inaccessible to hardware before calling this
473 static void free_rx_bufs(struct adapter
*adapter
, struct sge_fl
*fl
, int n
)
476 struct rx_sw_desc
*sdesc
= &fl
->sdesc
[fl
->cidx
];
478 if (is_buf_mapped(sdesc
))
479 dma_unmap_page(adapter
->pdev_dev
, get_buf_addr(sdesc
),
480 get_buf_size(adapter
, sdesc
),
482 put_page(sdesc
->page
);
484 if (++fl
->cidx
== fl
->size
)
491 * unmap_rx_buf - unmap the current RX buffer on an SGE Free List
492 * @adapter: the adapter
493 * @fl: the SGE Free List
495 * Unmap the current buffer on an SGE Free List RX queue. The
496 * buffer must be made inaccessible to HW before calling this function.
498 * This is similar to @free_rx_bufs above but does not free the buffer.
499 * Do note that the FL still loses any further access to the buffer.
500 * This is used predominantly to "transfer ownership" of an FL buffer
501 * to another entity (typically an skb's fragment list).
503 static void unmap_rx_buf(struct adapter
*adapter
, struct sge_fl
*fl
)
505 struct rx_sw_desc
*sdesc
= &fl
->sdesc
[fl
->cidx
];
507 if (is_buf_mapped(sdesc
))
508 dma_unmap_page(adapter
->pdev_dev
, get_buf_addr(sdesc
),
509 get_buf_size(adapter
, sdesc
),
512 if (++fl
->cidx
== fl
->size
)
518 * ring_fl_db - righ doorbell on free list
519 * @adapter: the adapter
520 * @fl: the Free List whose doorbell should be rung ...
522 * Tell the Scatter Gather Engine that there are new free list entries
525 static inline void ring_fl_db(struct adapter
*adapter
, struct sge_fl
*fl
)
529 /* The SGE keeps track of its Producer and Consumer Indices in terms
530 * of Egress Queue Units so we can only tell it about integral numbers
531 * of multiples of Free List Entries per Egress Queue Units ...
533 if (fl
->pend_cred
>= FL_PER_EQ_UNIT
) {
534 if (is_t4(adapter
->params
.chip
))
535 val
= PIDX_V(fl
->pend_cred
/ FL_PER_EQ_UNIT
);
537 val
= PIDX_T5_V(fl
->pend_cred
/ FL_PER_EQ_UNIT
) |
541 /* Make sure all memory writes to the Free List queue are
542 * committed before we tell the hardware about them.
546 /* If we don't have access to the new User Doorbell (T5+), use
547 * the old doorbell mechanism; otherwise use the new BAR2
550 if (unlikely(fl
->bar2_addr
== NULL
)) {
551 t4_write_reg(adapter
,
552 T4VF_SGE_BASE_ADDR
+ SGE_VF_KDOORBELL
,
553 QID_V(fl
->cntxt_id
) | val
);
555 writel(val
| QID_V(fl
->bar2_qid
),
556 fl
->bar2_addr
+ SGE_UDB_KDOORBELL
);
558 /* This Write memory Barrier will force the write to
559 * the User Doorbell area to be flushed.
563 fl
->pend_cred
%= FL_PER_EQ_UNIT
;
568 * set_rx_sw_desc - initialize software RX buffer descriptor
569 * @sdesc: pointer to the softwore RX buffer descriptor
570 * @page: pointer to the page data structure backing the RX buffer
571 * @dma_addr: PCI DMA address (possibly with low-bit flags)
573 static inline void set_rx_sw_desc(struct rx_sw_desc
*sdesc
, struct page
*page
,
577 sdesc
->dma_addr
= dma_addr
;
581 * Support for poisoning RX buffers ...
583 #define POISON_BUF_VAL -1
585 static inline void poison_buf(struct page
*page
, size_t sz
)
587 #if POISON_BUF_VAL >= 0
588 memset(page_address(page
), POISON_BUF_VAL
, sz
);
593 * refill_fl - refill an SGE RX buffer ring
594 * @adapter: the adapter
595 * @fl: the Free List ring to refill
596 * @n: the number of new buffers to allocate
597 * @gfp: the gfp flags for the allocations
599 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
600 * allocated with the supplied gfp flags. The caller must assure that
601 * @n does not exceed the queue's capacity -- i.e. (cidx == pidx) _IN
602 * EGRESS QUEUE UNITS_ indicates an empty Free List! Returns the number
603 * of buffers allocated. If afterwards the queue is found critically low,
604 * mark it as starving in the bitmap of starving FLs.
606 static unsigned int refill_fl(struct adapter
*adapter
, struct sge_fl
*fl
,
609 struct sge
*s
= &adapter
->sge
;
612 unsigned int cred
= fl
->avail
;
613 __be64
*d
= &fl
->desc
[fl
->pidx
];
614 struct rx_sw_desc
*sdesc
= &fl
->sdesc
[fl
->pidx
];
617 * Sanity: ensure that the result of adding n Free List buffers
618 * won't result in wrapping the SGE's Producer Index around to
619 * it's Consumer Index thereby indicating an empty Free List ...
621 BUG_ON(fl
->avail
+ n
> fl
->size
- FL_PER_EQ_UNIT
);
626 * If we support large pages, prefer large buffers and fail over to
627 * small pages if we can't allocate large pages to satisfy the refill.
628 * If we don't support large pages, drop directly into the small page
631 if (s
->fl_pg_order
== 0)
632 goto alloc_small_pages
;
635 page
= __dev_alloc_pages(gfp
, s
->fl_pg_order
);
636 if (unlikely(!page
)) {
638 * We've failed inour attempt to allocate a "large
639 * page". Fail over to the "small page" allocation
642 fl
->large_alloc_failed
++;
645 poison_buf(page
, PAGE_SIZE
<< s
->fl_pg_order
);
647 dma_addr
= dma_map_page(adapter
->pdev_dev
, page
, 0,
648 PAGE_SIZE
<< s
->fl_pg_order
,
650 if (unlikely(dma_mapping_error(adapter
->pdev_dev
, dma_addr
))) {
652 * We've run out of DMA mapping space. Free up the
653 * buffer and return with what we've managed to put
654 * into the free list. We don't want to fail over to
655 * the small page allocation below in this case
656 * because DMA mapping resources are typically
657 * critical resources once they become scarse.
659 __free_pages(page
, s
->fl_pg_order
);
662 dma_addr
|= RX_LARGE_BUF
;
663 *d
++ = cpu_to_be64(dma_addr
);
665 set_rx_sw_desc(sdesc
, page
, dma_addr
);
669 if (++fl
->pidx
== fl
->size
) {
679 page
= __dev_alloc_page(gfp
);
680 if (unlikely(!page
)) {
684 poison_buf(page
, PAGE_SIZE
);
686 dma_addr
= dma_map_page(adapter
->pdev_dev
, page
, 0, PAGE_SIZE
,
688 if (unlikely(dma_mapping_error(adapter
->pdev_dev
, dma_addr
))) {
692 *d
++ = cpu_to_be64(dma_addr
);
694 set_rx_sw_desc(sdesc
, page
, dma_addr
);
698 if (++fl
->pidx
== fl
->size
) {
707 * Update our accounting state to incorporate the new Free List
708 * buffers, tell the hardware about them and return the number of
709 * buffers which we were able to allocate.
711 cred
= fl
->avail
- cred
;
712 fl
->pend_cred
+= cred
;
713 ring_fl_db(adapter
, fl
);
715 if (unlikely(fl_starving(adapter
, fl
))) {
717 set_bit(fl
->cntxt_id
, adapter
->sge
.starving_fl
);
724 * Refill a Free List to its capacity or the Maximum Refill Increment,
725 * whichever is smaller ...
727 static inline void __refill_fl(struct adapter
*adapter
, struct sge_fl
*fl
)
729 refill_fl(adapter
, fl
,
730 min((unsigned int)MAX_RX_REFILL
, fl_cap(fl
) - fl
->avail
),
735 * alloc_ring - allocate resources for an SGE descriptor ring
736 * @dev: the PCI device's core device
737 * @nelem: the number of descriptors
738 * @hwsize: the size of each hardware descriptor
739 * @swsize: the size of each software descriptor
740 * @busaddrp: the physical PCI bus address of the allocated ring
741 * @swringp: return address pointer for software ring
742 * @stat_size: extra space in hardware ring for status information
744 * Allocates resources for an SGE descriptor ring, such as TX queues,
745 * free buffer lists, response queues, etc. Each SGE ring requires
746 * space for its hardware descriptors plus, optionally, space for software
747 * state associated with each hardware entry (the metadata). The function
748 * returns three values: the virtual address for the hardware ring (the
749 * return value of the function), the PCI bus address of the hardware
750 * ring (in *busaddrp), and the address of the software ring (in swringp).
751 * Both the hardware and software rings are returned zeroed out.
753 static void *alloc_ring(struct device
*dev
, size_t nelem
, size_t hwsize
,
754 size_t swsize
, dma_addr_t
*busaddrp
, void *swringp
,
758 * Allocate the hardware ring and PCI DMA bus address space for said.
760 size_t hwlen
= nelem
* hwsize
+ stat_size
;
761 void *hwring
= dma_alloc_coherent(dev
, hwlen
, busaddrp
, GFP_KERNEL
);
767 * If the caller wants a software ring, allocate it and return a
768 * pointer to it in *swringp.
770 BUG_ON((swsize
!= 0) != (swringp
!= NULL
));
772 void *swring
= kcalloc(nelem
, swsize
, GFP_KERNEL
);
775 dma_free_coherent(dev
, hwlen
, hwring
, *busaddrp
);
778 *(void **)swringp
= swring
;
782 * Zero out the hardware ring and return its address as our function
785 memset(hwring
, 0, hwlen
);
790 * sgl_len - calculates the size of an SGL of the given capacity
791 * @n: the number of SGL entries
793 * Calculates the number of flits (8-byte units) needed for a Direct
794 * Scatter/Gather List that can hold the given number of entries.
796 static inline unsigned int sgl_len(unsigned int n
)
799 * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
800 * addresses. The DSGL Work Request starts off with a 32-bit DSGL
801 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
802 * repeated sequences of { Length[i], Length[i+1], Address[i],
803 * Address[i+1] } (this ensures that all addresses are on 64-bit
804 * boundaries). If N is even, then Length[N+1] should be set to 0 and
805 * Address[N+1] is omitted.
807 * The following calculation incorporates all of the above. It's
808 * somewhat hard to follow but, briefly: the "+2" accounts for the
809 * first two flits which include the DSGL header, Length0 and
810 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
811 * flits for every pair of the remaining N) +1 if (n-1) is odd; and
812 * finally the "+((n-1)&1)" adds the one remaining flit needed if
816 return (3 * n
) / 2 + (n
& 1) + 2;
820 * flits_to_desc - returns the num of TX descriptors for the given flits
821 * @flits: the number of flits
823 * Returns the number of TX descriptors needed for the supplied number
826 static inline unsigned int flits_to_desc(unsigned int flits
)
828 BUG_ON(flits
> SGE_MAX_WR_LEN
/ sizeof(__be64
));
829 return DIV_ROUND_UP(flits
, TXD_PER_EQ_UNIT
);
833 * is_eth_imm - can an Ethernet packet be sent as immediate data?
836 * Returns whether an Ethernet packet is small enough to fit completely as
839 static inline int is_eth_imm(const struct sk_buff
*skb
)
842 * The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request
843 * which does not accommodate immediate data. We could dike out all
844 * of the support code for immediate data but that would tie our hands
845 * too much if we ever want to enhace the firmware. It would also
846 * create more differences between the PF and VF Drivers.
852 * calc_tx_flits - calculate the number of flits for a packet TX WR
855 * Returns the number of flits needed for a TX Work Request for the
856 * given Ethernet packet, including the needed WR and CPL headers.
858 static inline unsigned int calc_tx_flits(const struct sk_buff
*skb
)
863 * If the skb is small enough, we can pump it out as a work request
864 * with only immediate data. In that case we just have to have the
865 * TX Packet header plus the skb data in the Work Request.
868 return DIV_ROUND_UP(skb
->len
+ sizeof(struct cpl_tx_pkt
),
872 * Otherwise, we're going to have to construct a Scatter gather list
873 * of the skb body and fragments. We also include the flits necessary
874 * for the TX Packet Work Request and CPL. We always have a firmware
875 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
876 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
877 * message or, if we're doing a Large Send Offload, an LSO CPL message
878 * with an embeded TX Packet Write CPL message.
880 flits
= sgl_len(skb_shinfo(skb
)->nr_frags
+ 1);
881 if (skb_shinfo(skb
)->gso_size
)
882 flits
+= (sizeof(struct fw_eth_tx_pkt_vm_wr
) +
883 sizeof(struct cpl_tx_pkt_lso_core
) +
884 sizeof(struct cpl_tx_pkt_core
)) / sizeof(__be64
);
886 flits
+= (sizeof(struct fw_eth_tx_pkt_vm_wr
) +
887 sizeof(struct cpl_tx_pkt_core
)) / sizeof(__be64
);
892 * write_sgl - populate a Scatter/Gather List for a packet
894 * @tq: the TX queue we are writing into
895 * @sgl: starting location for writing the SGL
896 * @end: points right after the end of the SGL
897 * @start: start offset into skb main-body data to include in the SGL
898 * @addr: the list of DMA bus addresses for the SGL elements
900 * Generates a Scatter/Gather List for the buffers that make up a packet.
901 * The caller must provide adequate space for the SGL that will be written.
902 * The SGL includes all of the packet's page fragments and the data in its
903 * main body except for the first @start bytes. @pos must be 16-byte
904 * aligned and within a TX descriptor with available space. @end points
905 * write after the end of the SGL but does not account for any potential
906 * wrap around, i.e., @end > @tq->stat.
908 static void write_sgl(const struct sk_buff
*skb
, struct sge_txq
*tq
,
909 struct ulptx_sgl
*sgl
, u64
*end
, unsigned int start
,
910 const dma_addr_t
*addr
)
913 struct ulptx_sge_pair
*to
;
914 const struct skb_shared_info
*si
= skb_shinfo(skb
);
915 unsigned int nfrags
= si
->nr_frags
;
916 struct ulptx_sge_pair buf
[MAX_SKB_FRAGS
/ 2 + 1];
918 len
= skb_headlen(skb
) - start
;
920 sgl
->len0
= htonl(len
);
921 sgl
->addr0
= cpu_to_be64(addr
[0] + start
);
924 sgl
->len0
= htonl(skb_frag_size(&si
->frags
[0]));
925 sgl
->addr0
= cpu_to_be64(addr
[1]);
928 sgl
->cmd_nsge
= htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL
) |
929 ULPTX_NSGE_V(nfrags
));
930 if (likely(--nfrags
== 0))
933 * Most of the complexity below deals with the possibility we hit the
934 * end of the queue in the middle of writing the SGL. For this case
935 * only we create the SGL in a temporary buffer and then copy it.
937 to
= (u8
*)end
> (u8
*)tq
->stat
? buf
: sgl
->sge
;
939 for (i
= (nfrags
!= si
->nr_frags
); nfrags
>= 2; nfrags
-= 2, to
++) {
940 to
->len
[0] = cpu_to_be32(skb_frag_size(&si
->frags
[i
]));
941 to
->len
[1] = cpu_to_be32(skb_frag_size(&si
->frags
[++i
]));
942 to
->addr
[0] = cpu_to_be64(addr
[i
]);
943 to
->addr
[1] = cpu_to_be64(addr
[++i
]);
946 to
->len
[0] = cpu_to_be32(skb_frag_size(&si
->frags
[i
]));
947 to
->len
[1] = cpu_to_be32(0);
948 to
->addr
[0] = cpu_to_be64(addr
[i
+ 1]);
950 if (unlikely((u8
*)end
> (u8
*)tq
->stat
)) {
951 unsigned int part0
= (u8
*)tq
->stat
- (u8
*)sgl
->sge
, part1
;
954 memcpy(sgl
->sge
, buf
, part0
);
955 part1
= (u8
*)end
- (u8
*)tq
->stat
;
956 memcpy(tq
->desc
, (u8
*)buf
+ part0
, part1
);
957 end
= (void *)tq
->desc
+ part1
;
959 if ((uintptr_t)end
& 8) /* 0-pad to multiple of 16 */
964 * check_ring_tx_db - check and potentially ring a TX queue's doorbell
965 * @adapter: the adapter
967 * @n: number of new descriptors to give to HW
969 * Ring the doorbel for a TX queue.
971 static inline void ring_tx_db(struct adapter
*adapter
, struct sge_txq
*tq
,
974 /* Make sure that all writes to the TX Descriptors are committed
975 * before we tell the hardware about them.
979 /* If we don't have access to the new User Doorbell (T5+), use the old
980 * doorbell mechanism; otherwise use the new BAR2 mechanism.
982 if (unlikely(tq
->bar2_addr
== NULL
)) {
985 t4_write_reg(adapter
, T4VF_SGE_BASE_ADDR
+ SGE_VF_KDOORBELL
,
986 QID_V(tq
->cntxt_id
) | val
);
988 u32 val
= PIDX_T5_V(n
);
990 /* T4 and later chips share the same PIDX field offset within
991 * the doorbell, but T5 and later shrank the field in order to
992 * gain a bit for Doorbell Priority. The field was absurdly
993 * large in the first place (14 bits) so we just use the T5
994 * and later limits and warn if a Queue ID is too large.
996 WARN_ON(val
& DBPRIO_F
);
998 /* If we're only writing a single Egress Unit and the BAR2
999 * Queue ID is 0, we can use the Write Combining Doorbell
1000 * Gather Buffer; otherwise we use the simple doorbell.
1002 if (n
== 1 && tq
->bar2_qid
== 0) {
1003 unsigned int index
= (tq
->pidx
1006 __be64
*src
= (__be64
*)&tq
->desc
[index
];
1007 __be64 __iomem
*dst
= (__be64 __iomem
*)(tq
->bar2_addr
+
1008 SGE_UDB_WCDOORBELL
);
1009 unsigned int count
= EQ_UNIT
/ sizeof(__be64
);
1011 /* Copy the TX Descriptor in a tight loop in order to
1012 * try to get it to the adapter in a single Write
1013 * Combined transfer on the PCI-E Bus. If the Write
1014 * Combine fails (say because of an interrupt, etc.)
1015 * the hardware will simply take the last write as a
1016 * simple doorbell write with a PIDX Increment of 1
1017 * and will fetch the TX Descriptor from memory via
1021 /* the (__force u64) is because the compiler
1022 * doesn't understand the endian swizzling
1025 writeq((__force u64
)*src
, dst
);
1031 writel(val
| QID_V(tq
->bar2_qid
),
1032 tq
->bar2_addr
+ SGE_UDB_KDOORBELL
);
1034 /* This Write Memory Barrier will force the write to the User
1035 * Doorbell area to be flushed. This is needed to prevent
1036 * writes on different CPUs for the same queue from hitting
1037 * the adapter out of order. This is required when some Work
1038 * Requests take the Write Combine Gather Buffer path (user
1039 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
1040 * take the traditional path where we simply increment the
1041 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
1042 * hardware DMA read the actual Work Request.
1049 * inline_tx_skb - inline a packet's data into TX descriptors
1051 * @tq: the TX queue where the packet will be inlined
1052 * @pos: starting position in the TX queue to inline the packet
1054 * Inline a packet's contents directly into TX descriptors, starting at
1055 * the given position within the TX DMA ring.
1056 * Most of the complexity of this operation is dealing with wrap arounds
1057 * in the middle of the packet we want to inline.
1059 static void inline_tx_skb(const struct sk_buff
*skb
, const struct sge_txq
*tq
,
1063 int left
= (void *)tq
->stat
- pos
;
1065 if (likely(skb
->len
<= left
)) {
1066 if (likely(!skb
->data_len
))
1067 skb_copy_from_linear_data(skb
, pos
, skb
->len
);
1069 skb_copy_bits(skb
, 0, pos
, skb
->len
);
1072 skb_copy_bits(skb
, 0, pos
, left
);
1073 skb_copy_bits(skb
, left
, tq
->desc
, skb
->len
- left
);
1074 pos
= (void *)tq
->desc
+ (skb
->len
- left
);
1077 /* 0-pad to multiple of 16 */
1078 p
= PTR_ALIGN(pos
, 8);
1079 if ((uintptr_t)p
& 8)
1084 * Figure out what HW csum a packet wants and return the appropriate control
1087 static u64
hwcsum(const struct sk_buff
*skb
)
1090 const struct iphdr
*iph
= ip_hdr(skb
);
1092 if (iph
->version
== 4) {
1093 if (iph
->protocol
== IPPROTO_TCP
)
1094 csum_type
= TX_CSUM_TCPIP
;
1095 else if (iph
->protocol
== IPPROTO_UDP
)
1096 csum_type
= TX_CSUM_UDPIP
;
1100 * unknown protocol, disable HW csum
1101 * and hope a bad packet is detected
1103 return TXPKT_L4CSUM_DIS
;
1107 * this doesn't work with extension headers
1109 const struct ipv6hdr
*ip6h
= (const struct ipv6hdr
*)iph
;
1111 if (ip6h
->nexthdr
== IPPROTO_TCP
)
1112 csum_type
= TX_CSUM_TCPIP6
;
1113 else if (ip6h
->nexthdr
== IPPROTO_UDP
)
1114 csum_type
= TX_CSUM_UDPIP6
;
1119 if (likely(csum_type
>= TX_CSUM_TCPIP
))
1120 return TXPKT_CSUM_TYPE(csum_type
) |
1121 TXPKT_IPHDR_LEN(skb_network_header_len(skb
)) |
1122 TXPKT_ETHHDR_LEN(skb_network_offset(skb
) - ETH_HLEN
);
1124 int start
= skb_transport_offset(skb
);
1126 return TXPKT_CSUM_TYPE(csum_type
) |
1127 TXPKT_CSUM_START(start
) |
1128 TXPKT_CSUM_LOC(start
+ skb
->csum_offset
);
1133 * Stop an Ethernet TX queue and record that state change.
1135 static void txq_stop(struct sge_eth_txq
*txq
)
1137 netif_tx_stop_queue(txq
->txq
);
1142 * Advance our software state for a TX queue by adding n in use descriptors.
1144 static inline void txq_advance(struct sge_txq
*tq
, unsigned int n
)
1148 if (tq
->pidx
>= tq
->size
)
1149 tq
->pidx
-= tq
->size
;
1153 * t4vf_eth_xmit - add a packet to an Ethernet TX queue
1155 * @dev: the egress net device
1157 * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled.
1159 int t4vf_eth_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1164 unsigned int flits
, ndesc
;
1165 struct adapter
*adapter
;
1166 struct sge_eth_txq
*txq
;
1167 const struct port_info
*pi
;
1168 struct fw_eth_tx_pkt_vm_wr
*wr
;
1169 struct cpl_tx_pkt_core
*cpl
;
1170 const struct skb_shared_info
*ssi
;
1171 dma_addr_t addr
[MAX_SKB_FRAGS
+ 1];
1172 const size_t fw_hdr_copy_len
= (sizeof(wr
->ethmacdst
) +
1173 sizeof(wr
->ethmacsrc
) +
1174 sizeof(wr
->ethtype
) +
1175 sizeof(wr
->vlantci
));
1178 * The chip minimum packet length is 10 octets but the firmware
1179 * command that we are using requires that we copy the Ethernet header
1180 * (including the VLAN tag) into the header so we reject anything
1181 * smaller than that ...
1183 if (unlikely(skb
->len
< fw_hdr_copy_len
))
1187 * Figure out which TX Queue we're going to use.
1189 pi
= netdev_priv(dev
);
1190 adapter
= pi
->adapter
;
1191 qidx
= skb_get_queue_mapping(skb
);
1192 BUG_ON(qidx
>= pi
->nqsets
);
1193 txq
= &adapter
->sge
.ethtxq
[pi
->first_qset
+ qidx
];
1196 * Take this opportunity to reclaim any TX Descriptors whose DMA
1197 * transfers have completed.
1199 reclaim_completed_tx(adapter
, &txq
->q
, true);
1202 * Calculate the number of flits and TX Descriptors we're going to
1203 * need along with how many TX Descriptors will be left over after
1204 * we inject our Work Request.
1206 flits
= calc_tx_flits(skb
);
1207 ndesc
= flits_to_desc(flits
);
1208 credits
= txq_avail(&txq
->q
) - ndesc
;
1210 if (unlikely(credits
< 0)) {
1212 * Not enough room for this packet's Work Request. Stop the
1213 * TX Queue and return a "busy" condition. The queue will get
1214 * started later on when the firmware informs us that space
1218 dev_err(adapter
->pdev_dev
,
1219 "%s: TX ring %u full while queue awake!\n",
1221 return NETDEV_TX_BUSY
;
1224 if (!is_eth_imm(skb
) &&
1225 unlikely(map_skb(adapter
->pdev_dev
, skb
, addr
) < 0)) {
1227 * We need to map the skb into PCI DMA space (because it can't
1228 * be in-lined directly into the Work Request) and the mapping
1229 * operation failed. Record the error and drop the packet.
1235 wr_mid
= FW_WR_LEN16_V(DIV_ROUND_UP(flits
, 2));
1236 if (unlikely(credits
< ETHTXQ_STOP_THRES
)) {
1238 * After we're done injecting the Work Request for this
1239 * packet, we'll be below our "stop threshold" so stop the TX
1240 * Queue now and schedule a request for an SGE Egress Queue
1241 * Update message. The queue will get started later on when
1242 * the firmware processes this Work Request and sends us an
1243 * Egress Queue Status Update message indicating that space
1247 wr_mid
|= FW_WR_EQUEQ_F
| FW_WR_EQUIQ_F
;
1251 * Start filling in our Work Request. Note that we do _not_ handle
1252 * the WR Header wrapping around the TX Descriptor Ring. If our
1253 * maximum header size ever exceeds one TX Descriptor, we'll need to
1254 * do something else here.
1256 BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR
, TXD_PER_EQ_UNIT
) > 1);
1257 wr
= (void *)&txq
->q
.desc
[txq
->q
.pidx
];
1258 wr
->equiq_to_len16
= cpu_to_be32(wr_mid
);
1259 wr
->r3
[0] = cpu_to_be32(0);
1260 wr
->r3
[1] = cpu_to_be32(0);
1261 skb_copy_from_linear_data(skb
, (void *)wr
->ethmacdst
, fw_hdr_copy_len
);
1262 end
= (u64
*)wr
+ flits
;
1265 * If this is a Large Send Offload packet we'll put in an LSO CPL
1266 * message with an encapsulated TX Packet CPL message. Otherwise we
1267 * just use a TX Packet CPL message.
1269 ssi
= skb_shinfo(skb
);
1270 if (ssi
->gso_size
) {
1271 struct cpl_tx_pkt_lso_core
*lso
= (void *)(wr
+ 1);
1272 bool v6
= (ssi
->gso_type
& SKB_GSO_TCPV6
) != 0;
1273 int l3hdr_len
= skb_network_header_len(skb
);
1274 int eth_xtra_len
= skb_network_offset(skb
) - ETH_HLEN
;
1277 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR
) |
1278 FW_WR_IMMDLEN_V(sizeof(*lso
) +
1281 * Fill in the LSO CPL message.
1284 cpu_to_be32(LSO_OPCODE(CPL_TX_PKT_LSO
) |
1288 LSO_ETHHDR_LEN(eth_xtra_len
/4) |
1289 LSO_IPHDR_LEN(l3hdr_len
/4) |
1290 LSO_TCPHDR_LEN(tcp_hdr(skb
)->doff
));
1291 lso
->ipid_ofst
= cpu_to_be16(0);
1292 lso
->mss
= cpu_to_be16(ssi
->gso_size
);
1293 lso
->seqno_offset
= cpu_to_be32(0);
1294 if (is_t4(adapter
->params
.chip
))
1295 lso
->len
= cpu_to_be32(skb
->len
);
1297 lso
->len
= cpu_to_be32(LSO_T5_XFER_SIZE(skb
->len
));
1300 * Set up TX Packet CPL pointer, control word and perform
1303 cpl
= (void *)(lso
+ 1);
1304 cntrl
= (TXPKT_CSUM_TYPE(v6
? TX_CSUM_TCPIP6
: TX_CSUM_TCPIP
) |
1305 TXPKT_IPHDR_LEN(l3hdr_len
) |
1306 TXPKT_ETHHDR_LEN(eth_xtra_len
));
1308 txq
->tx_cso
+= ssi
->gso_segs
;
1312 len
= is_eth_imm(skb
) ? skb
->len
+ sizeof(*cpl
) : sizeof(*cpl
);
1314 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR
) |
1315 FW_WR_IMMDLEN_V(len
));
1318 * Set up TX Packet CPL pointer, control word and perform
1321 cpl
= (void *)(wr
+ 1);
1322 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1323 cntrl
= hwcsum(skb
) | TXPKT_IPCSUM_DIS
;
1326 cntrl
= TXPKT_L4CSUM_DIS
| TXPKT_IPCSUM_DIS
;
1330 * If there's a VLAN tag present, add that to the list of things to
1331 * do in this Work Request.
1333 if (skb_vlan_tag_present(skb
)) {
1335 cntrl
|= TXPKT_VLAN_VLD
| TXPKT_VLAN(skb_vlan_tag_get(skb
));
1339 * Fill in the TX Packet CPL message header.
1341 cpl
->ctrl0
= cpu_to_be32(TXPKT_OPCODE(CPL_TX_PKT_XT
) |
1342 TXPKT_INTF(pi
->port_id
) |
1344 cpl
->pack
= cpu_to_be16(0);
1345 cpl
->len
= cpu_to_be16(skb
->len
);
1346 cpl
->ctrl1
= cpu_to_be64(cntrl
);
1349 T4_TRACE5(adapter
->tb
[txq
->q
.cntxt_id
& 7],
1350 "eth_xmit: ndesc %u, credits %u, pidx %u, len %u, frags %u",
1351 ndesc
, credits
, txq
->q
.pidx
, skb
->len
, ssi
->nr_frags
);
1355 * Fill in the body of the TX Packet CPL message with either in-lined
1356 * data or a Scatter/Gather List.
1358 if (is_eth_imm(skb
)) {
1360 * In-line the packet's data and free the skb since we don't
1361 * need it any longer.
1363 inline_tx_skb(skb
, &txq
->q
, cpl
+ 1);
1364 dev_consume_skb_any(skb
);
1367 * Write the skb's Scatter/Gather list into the TX Packet CPL
1368 * message and retain a pointer to the skb so we can free it
1369 * later when its DMA completes. (We store the skb pointer
1370 * in the Software Descriptor corresponding to the last TX
1371 * Descriptor used by the Work Request.)
1373 * The retained skb will be freed when the corresponding TX
1374 * Descriptors are reclaimed after their DMAs complete.
1375 * However, this could take quite a while since, in general,
1376 * the hardware is set up to be lazy about sending DMA
1377 * completion notifications to us and we mostly perform TX
1378 * reclaims in the transmit routine.
1380 * This is good for performamce but means that we rely on new
1381 * TX packets arriving to run the destructors of completed
1382 * packets, which open up space in their sockets' send queues.
1383 * Sometimes we do not get such new packets causing TX to
1384 * stall. A single UDP transmitter is a good example of this
1385 * situation. We have a clean up timer that periodically
1386 * reclaims completed packets but it doesn't run often enough
1387 * (nor do we want it to) to prevent lengthy stalls. A
1388 * solution to this problem is to run the destructor early,
1389 * after the packet is queued but before it's DMAd. A con is
1390 * that we lie to socket memory accounting, but the amount of
1391 * extra memory is reasonable (limited by the number of TX
1392 * descriptors), the packets do actually get freed quickly by
1393 * new packets almost always, and for protocols like TCP that
1394 * wait for acks to really free up the data the extra memory
1395 * is even less. On the positive side we run the destructors
1396 * on the sending CPU rather than on a potentially different
1397 * completing CPU, usually a good thing.
1399 * Run the destructor before telling the DMA engine about the
1400 * packet to make sure it doesn't complete and get freed
1403 struct ulptx_sgl
*sgl
= (struct ulptx_sgl
*)(cpl
+ 1);
1404 struct sge_txq
*tq
= &txq
->q
;
1408 * If the Work Request header was an exact multiple of our TX
1409 * Descriptor length, then it's possible that the starting SGL
1410 * pointer lines up exactly with the end of our TX Descriptor
1411 * ring. If that's the case, wrap around to the beginning
1414 if (unlikely((void *)sgl
== (void *)tq
->stat
)) {
1415 sgl
= (void *)tq
->desc
;
1416 end
= ((void *)tq
->desc
+ ((void *)end
- (void *)tq
->stat
));
1419 write_sgl(skb
, tq
, sgl
, end
, 0, addr
);
1422 last_desc
= tq
->pidx
+ ndesc
- 1;
1423 if (last_desc
>= tq
->size
)
1424 last_desc
-= tq
->size
;
1425 tq
->sdesc
[last_desc
].skb
= skb
;
1426 tq
->sdesc
[last_desc
].sgl
= sgl
;
1430 * Advance our internal TX Queue state, tell the hardware about
1431 * the new TX descriptors and return success.
1433 txq_advance(&txq
->q
, ndesc
);
1434 dev
->trans_start
= jiffies
;
1435 ring_tx_db(adapter
, &txq
->q
, ndesc
);
1436 return NETDEV_TX_OK
;
1440 * An error of some sort happened. Free the TX skb and tell the
1441 * OS that we've "dealt" with the packet ...
1443 dev_kfree_skb_any(skb
);
1444 return NETDEV_TX_OK
;
1448 * copy_frags - copy fragments from gather list into skb_shared_info
1449 * @skb: destination skb
1450 * @gl: source internal packet gather list
1451 * @offset: packet start offset in first page
1453 * Copy an internal packet gather list into a Linux skb_shared_info
1456 static inline void copy_frags(struct sk_buff
*skb
,
1457 const struct pkt_gl
*gl
,
1458 unsigned int offset
)
1462 /* usually there's just one frag */
1463 __skb_fill_page_desc(skb
, 0, gl
->frags
[0].page
,
1464 gl
->frags
[0].offset
+ offset
,
1465 gl
->frags
[0].size
- offset
);
1466 skb_shinfo(skb
)->nr_frags
= gl
->nfrags
;
1467 for (i
= 1; i
< gl
->nfrags
; i
++)
1468 __skb_fill_page_desc(skb
, i
, gl
->frags
[i
].page
,
1469 gl
->frags
[i
].offset
,
1472 /* get a reference to the last page, we don't own it */
1473 get_page(gl
->frags
[gl
->nfrags
- 1].page
);
1477 * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list
1478 * @gl: the gather list
1479 * @skb_len: size of sk_buff main body if it carries fragments
1480 * @pull_len: amount of data to move to the sk_buff's main body
1482 * Builds an sk_buff from the given packet gather list. Returns the
1483 * sk_buff or %NULL if sk_buff allocation failed.
1485 static struct sk_buff
*t4vf_pktgl_to_skb(const struct pkt_gl
*gl
,
1486 unsigned int skb_len
,
1487 unsigned int pull_len
)
1489 struct sk_buff
*skb
;
1492 * If the ingress packet is small enough, allocate an skb large enough
1493 * for all of the data and copy it inline. Otherwise, allocate an skb
1494 * with enough room to pull in the header and reference the rest of
1495 * the data via the skb fragment list.
1497 * Below we rely on RX_COPY_THRES being less than the smallest Rx
1498 * buff! size, which is expected since buffers are at least
1499 * PAGE_SIZEd. In this case packets up to RX_COPY_THRES have only one
1502 if (gl
->tot_len
<= RX_COPY_THRES
) {
1503 /* small packets have only one fragment */
1504 skb
= alloc_skb(gl
->tot_len
, GFP_ATOMIC
);
1507 __skb_put(skb
, gl
->tot_len
);
1508 skb_copy_to_linear_data(skb
, gl
->va
, gl
->tot_len
);
1510 skb
= alloc_skb(skb_len
, GFP_ATOMIC
);
1513 __skb_put(skb
, pull_len
);
1514 skb_copy_to_linear_data(skb
, gl
->va
, pull_len
);
1516 copy_frags(skb
, gl
, pull_len
);
1517 skb
->len
= gl
->tot_len
;
1518 skb
->data_len
= skb
->len
- pull_len
;
1519 skb
->truesize
+= skb
->data_len
;
1527 * t4vf_pktgl_free - free a packet gather list
1528 * @gl: the gather list
1530 * Releases the pages of a packet gather list. We do not own the last
1531 * page on the list and do not free it.
1533 static void t4vf_pktgl_free(const struct pkt_gl
*gl
)
1537 frag
= gl
->nfrags
- 1;
1539 put_page(gl
->frags
[frag
].page
);
1543 * do_gro - perform Generic Receive Offload ingress packet processing
1544 * @rxq: ingress RX Ethernet Queue
1545 * @gl: gather list for ingress packet
1546 * @pkt: CPL header for last packet fragment
1548 * Perform Generic Receive Offload (GRO) ingress packet processing.
1549 * We use the standard Linux GRO interfaces for this.
1551 static void do_gro(struct sge_eth_rxq
*rxq
, const struct pkt_gl
*gl
,
1552 const struct cpl_rx_pkt
*pkt
)
1554 struct adapter
*adapter
= rxq
->rspq
.adapter
;
1555 struct sge
*s
= &adapter
->sge
;
1557 struct sk_buff
*skb
;
1559 skb
= napi_get_frags(&rxq
->rspq
.napi
);
1560 if (unlikely(!skb
)) {
1561 t4vf_pktgl_free(gl
);
1562 rxq
->stats
.rx_drops
++;
1566 copy_frags(skb
, gl
, s
->pktshift
);
1567 skb
->len
= gl
->tot_len
- s
->pktshift
;
1568 skb
->data_len
= skb
->len
;
1569 skb
->truesize
+= skb
->data_len
;
1570 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1571 skb_record_rx_queue(skb
, rxq
->rspq
.idx
);
1574 __vlan_hwaccel_put_tag(skb
, cpu_to_be16(ETH_P_8021Q
),
1575 be16_to_cpu(pkt
->vlan
));
1576 rxq
->stats
.vlan_ex
++;
1578 ret
= napi_gro_frags(&rxq
->rspq
.napi
);
1580 if (ret
== GRO_HELD
)
1581 rxq
->stats
.lro_pkts
++;
1582 else if (ret
== GRO_MERGED
|| ret
== GRO_MERGED_FREE
)
1583 rxq
->stats
.lro_merged
++;
1585 rxq
->stats
.rx_cso
++;
1589 * t4vf_ethrx_handler - process an ingress ethernet packet
1590 * @rspq: the response queue that received the packet
1591 * @rsp: the response queue descriptor holding the RX_PKT message
1592 * @gl: the gather list of packet fragments
1594 * Process an ingress ethernet packet and deliver it to the stack.
1596 int t4vf_ethrx_handler(struct sge_rspq
*rspq
, const __be64
*rsp
,
1597 const struct pkt_gl
*gl
)
1599 struct sk_buff
*skb
;
1600 const struct cpl_rx_pkt
*pkt
= (void *)rsp
;
1601 bool csum_ok
= pkt
->csum_calc
&& !pkt
->err_vec
&&
1602 (rspq
->netdev
->features
& NETIF_F_RXCSUM
);
1603 struct sge_eth_rxq
*rxq
= container_of(rspq
, struct sge_eth_rxq
, rspq
);
1604 struct adapter
*adapter
= rspq
->adapter
;
1605 struct sge
*s
= &adapter
->sge
;
1608 * If this is a good TCP packet and we have Generic Receive Offload
1609 * enabled, handle the packet in the GRO path.
1611 if ((pkt
->l2info
& cpu_to_be32(RXF_TCP_F
)) &&
1612 (rspq
->netdev
->features
& NETIF_F_GRO
) && csum_ok
&&
1614 do_gro(rxq
, gl
, pkt
);
1619 * Convert the Packet Gather List into an skb.
1621 skb
= t4vf_pktgl_to_skb(gl
, RX_SKB_LEN
, RX_PULL_LEN
);
1622 if (unlikely(!skb
)) {
1623 t4vf_pktgl_free(gl
);
1624 rxq
->stats
.rx_drops
++;
1627 __skb_pull(skb
, s
->pktshift
);
1628 skb
->protocol
= eth_type_trans(skb
, rspq
->netdev
);
1629 skb_record_rx_queue(skb
, rspq
->idx
);
1632 if (csum_ok
&& !pkt
->err_vec
&&
1633 (be32_to_cpu(pkt
->l2info
) & (RXF_UDP_F
| RXF_TCP_F
))) {
1635 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1637 __sum16 c
= (__force __sum16
)pkt
->csum
;
1638 skb
->csum
= csum_unfold(c
);
1639 skb
->ip_summed
= CHECKSUM_COMPLETE
;
1641 rxq
->stats
.rx_cso
++;
1643 skb_checksum_none_assert(skb
);
1646 rxq
->stats
.vlan_ex
++;
1647 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), be16_to_cpu(pkt
->vlan
));
1650 netif_receive_skb(skb
);
1656 * is_new_response - check if a response is newly written
1657 * @rc: the response control descriptor
1658 * @rspq: the response queue
1660 * Returns true if a response descriptor contains a yet unprocessed
1663 static inline bool is_new_response(const struct rsp_ctrl
*rc
,
1664 const struct sge_rspq
*rspq
)
1666 return RSPD_GEN(rc
->type_gen
) == rspq
->gen
;
1670 * restore_rx_bufs - put back a packet's RX buffers
1671 * @gl: the packet gather list
1672 * @fl: the SGE Free List
1673 * @nfrags: how many fragments in @si
1675 * Called when we find out that the current packet, @si, can't be
1676 * processed right away for some reason. This is a very rare event and
1677 * there's no effort to make this suspension/resumption process
1678 * particularly efficient.
1680 * We implement the suspension by putting all of the RX buffers associated
1681 * with the current packet back on the original Free List. The buffers
1682 * have already been unmapped and are left unmapped, we mark them as
1683 * unmapped in order to prevent further unmapping attempts. (Effectively
1684 * this function undoes the series of @unmap_rx_buf calls which were done
1685 * to create the current packet's gather list.) This leaves us ready to
1686 * restart processing of the packet the next time we start processing the
1689 static void restore_rx_bufs(const struct pkt_gl
*gl
, struct sge_fl
*fl
,
1692 struct rx_sw_desc
*sdesc
;
1696 fl
->cidx
= fl
->size
- 1;
1699 sdesc
= &fl
->sdesc
[fl
->cidx
];
1700 sdesc
->page
= gl
->frags
[frags
].page
;
1701 sdesc
->dma_addr
|= RX_UNMAPPED_BUF
;
1707 * rspq_next - advance to the next entry in a response queue
1710 * Updates the state of a response queue to advance it to the next entry.
1712 static inline void rspq_next(struct sge_rspq
*rspq
)
1714 rspq
->cur_desc
= (void *)rspq
->cur_desc
+ rspq
->iqe_len
;
1715 if (unlikely(++rspq
->cidx
== rspq
->size
)) {
1718 rspq
->cur_desc
= rspq
->desc
;
1723 * process_responses - process responses from an SGE response queue
1724 * @rspq: the ingress response queue to process
1725 * @budget: how many responses can be processed in this round
1727 * Process responses from a Scatter Gather Engine response queue up to
1728 * the supplied budget. Responses include received packets as well as
1729 * control messages from firmware or hardware.
1731 * Additionally choose the interrupt holdoff time for the next interrupt
1732 * on this queue. If the system is under memory shortage use a fairly
1733 * long delay to help recovery.
1735 static int process_responses(struct sge_rspq
*rspq
, int budget
)
1737 struct sge_eth_rxq
*rxq
= container_of(rspq
, struct sge_eth_rxq
, rspq
);
1738 struct adapter
*adapter
= rspq
->adapter
;
1739 struct sge
*s
= &adapter
->sge
;
1740 int budget_left
= budget
;
1742 while (likely(budget_left
)) {
1744 const struct rsp_ctrl
*rc
;
1746 rc
= (void *)rspq
->cur_desc
+ (rspq
->iqe_len
- sizeof(*rc
));
1747 if (!is_new_response(rc
, rspq
))
1751 * Figure out what kind of response we've received from the
1755 rsp_type
= RSPD_TYPE(rc
->type_gen
);
1756 if (likely(rsp_type
== RSP_TYPE_FLBUF
)) {
1757 struct page_frag
*fp
;
1759 const struct rx_sw_desc
*sdesc
;
1761 u32 len
= be32_to_cpu(rc
->pldbuflen_qid
);
1764 * If we get a "new buffer" message from the SGE we
1765 * need to move on to the next Free List buffer.
1767 if (len
& RSPD_NEWBUF
) {
1769 * We get one "new buffer" message when we
1770 * first start up a queue so we need to ignore
1771 * it when our offset into the buffer is 0.
1773 if (likely(rspq
->offset
> 0)) {
1774 free_rx_bufs(rspq
->adapter
, &rxq
->fl
,
1778 len
= RSPD_LEN(len
);
1783 * Gather packet fragments.
1785 for (frag
= 0, fp
= gl
.frags
; /**/; frag
++, fp
++) {
1786 BUG_ON(frag
>= MAX_SKB_FRAGS
);
1787 BUG_ON(rxq
->fl
.avail
== 0);
1788 sdesc
= &rxq
->fl
.sdesc
[rxq
->fl
.cidx
];
1789 bufsz
= get_buf_size(adapter
, sdesc
);
1790 fp
->page
= sdesc
->page
;
1791 fp
->offset
= rspq
->offset
;
1792 fp
->size
= min(bufsz
, len
);
1796 unmap_rx_buf(rspq
->adapter
, &rxq
->fl
);
1801 * Last buffer remains mapped so explicitly make it
1802 * coherent for CPU access and start preloading first
1805 dma_sync_single_for_cpu(rspq
->adapter
->pdev_dev
,
1806 get_buf_addr(sdesc
),
1807 fp
->size
, DMA_FROM_DEVICE
);
1808 gl
.va
= (page_address(gl
.frags
[0].page
) +
1809 gl
.frags
[0].offset
);
1813 * Hand the new ingress packet to the handler for
1814 * this Response Queue.
1816 ret
= rspq
->handler(rspq
, rspq
->cur_desc
, &gl
);
1817 if (likely(ret
== 0))
1818 rspq
->offset
+= ALIGN(fp
->size
, s
->fl_align
);
1820 restore_rx_bufs(&gl
, &rxq
->fl
, frag
);
1821 } else if (likely(rsp_type
== RSP_TYPE_CPL
)) {
1822 ret
= rspq
->handler(rspq
, rspq
->cur_desc
, NULL
);
1824 WARN_ON(rsp_type
> RSP_TYPE_CPL
);
1828 if (unlikely(ret
)) {
1830 * Couldn't process descriptor, back off for recovery.
1831 * We use the SGE's last timer which has the longest
1832 * interrupt coalescing value ...
1834 const int NOMEM_TIMER_IDX
= SGE_NTIMERS
-1;
1835 rspq
->next_intr_params
=
1836 QINTR_TIMER_IDX(NOMEM_TIMER_IDX
);
1845 * If this is a Response Queue with an associated Free List and
1846 * at least two Egress Queue units available in the Free List
1847 * for new buffer pointers, refill the Free List.
1849 if (rspq
->offset
>= 0 &&
1850 rxq
->fl
.size
- rxq
->fl
.avail
>= 2*FL_PER_EQ_UNIT
)
1851 __refill_fl(rspq
->adapter
, &rxq
->fl
);
1852 return budget
- budget_left
;
1856 * napi_rx_handler - the NAPI handler for RX processing
1857 * @napi: the napi instance
1858 * @budget: how many packets we can process in this round
1860 * Handler for new data events when using NAPI. This does not need any
1861 * locking or protection from interrupts as data interrupts are off at
1862 * this point and other adapter interrupts do not interfere (the latter
1863 * in not a concern at all with MSI-X as non-data interrupts then have
1864 * a separate handler).
1866 static int napi_rx_handler(struct napi_struct
*napi
, int budget
)
1868 unsigned int intr_params
;
1869 struct sge_rspq
*rspq
= container_of(napi
, struct sge_rspq
, napi
);
1870 int work_done
= process_responses(rspq
, budget
);
1873 if (likely(work_done
< budget
)) {
1874 napi_complete(napi
);
1875 intr_params
= rspq
->next_intr_params
;
1876 rspq
->next_intr_params
= rspq
->intr_params
;
1878 intr_params
= QINTR_TIMER_IDX(SGE_TIMER_UPD_CIDX
);
1880 if (unlikely(work_done
== 0))
1881 rspq
->unhandled_irqs
++;
1883 val
= CIDXINC_V(work_done
) | SEINTARM_V(intr_params
);
1884 if (is_t4(rspq
->adapter
->params
.chip
)) {
1885 t4_write_reg(rspq
->adapter
,
1886 T4VF_SGE_BASE_ADDR
+ SGE_VF_GTS
,
1887 val
| INGRESSQID_V((u32
)rspq
->cntxt_id
));
1889 writel(val
| INGRESSQID_V(rspq
->bar2_qid
),
1890 rspq
->bar2_addr
+ SGE_UDB_GTS
);
1897 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
1898 * (i.e., response queue serviced by NAPI polling).
1900 irqreturn_t
t4vf_sge_intr_msix(int irq
, void *cookie
)
1902 struct sge_rspq
*rspq
= cookie
;
1904 napi_schedule(&rspq
->napi
);
1909 * Process the indirect interrupt entries in the interrupt queue and kick off
1910 * NAPI for each queue that has generated an entry.
1912 static unsigned int process_intrq(struct adapter
*adapter
)
1914 struct sge
*s
= &adapter
->sge
;
1915 struct sge_rspq
*intrq
= &s
->intrq
;
1916 unsigned int work_done
;
1919 spin_lock(&adapter
->sge
.intrq_lock
);
1920 for (work_done
= 0; ; work_done
++) {
1921 const struct rsp_ctrl
*rc
;
1922 unsigned int qid
, iq_idx
;
1923 struct sge_rspq
*rspq
;
1926 * Grab the next response from the interrupt queue and bail
1927 * out if it's not a new response.
1929 rc
= (void *)intrq
->cur_desc
+ (intrq
->iqe_len
- sizeof(*rc
));
1930 if (!is_new_response(rc
, intrq
))
1934 * If the response isn't a forwarded interrupt message issue a
1935 * error and go on to the next response message. This should
1939 if (unlikely(RSPD_TYPE(rc
->type_gen
) != RSP_TYPE_INTR
)) {
1940 dev_err(adapter
->pdev_dev
,
1941 "Unexpected INTRQ response type %d\n",
1942 RSPD_TYPE(rc
->type_gen
));
1947 * Extract the Queue ID from the interrupt message and perform
1948 * sanity checking to make sure it really refers to one of our
1949 * Ingress Queues which is active and matches the queue's ID.
1950 * None of these error conditions should ever happen so we may
1951 * want to either make them fatal and/or conditionalized under
1954 qid
= RSPD_QID(be32_to_cpu(rc
->pldbuflen_qid
));
1955 iq_idx
= IQ_IDX(s
, qid
);
1956 if (unlikely(iq_idx
>= MAX_INGQ
)) {
1957 dev_err(adapter
->pdev_dev
,
1958 "Ingress QID %d out of range\n", qid
);
1961 rspq
= s
->ingr_map
[iq_idx
];
1962 if (unlikely(rspq
== NULL
)) {
1963 dev_err(adapter
->pdev_dev
,
1964 "Ingress QID %d RSPQ=NULL\n", qid
);
1967 if (unlikely(rspq
->abs_id
!= qid
)) {
1968 dev_err(adapter
->pdev_dev
,
1969 "Ingress QID %d refers to RSPQ %d\n",
1975 * Schedule NAPI processing on the indicated Response Queue
1976 * and move on to the next entry in the Forwarded Interrupt
1979 napi_schedule(&rspq
->napi
);
1983 val
= CIDXINC_V(work_done
) | SEINTARM_V(intrq
->intr_params
);
1984 if (is_t4(adapter
->params
.chip
))
1985 t4_write_reg(adapter
, T4VF_SGE_BASE_ADDR
+ SGE_VF_GTS
,
1986 val
| INGRESSQID_V(intrq
->cntxt_id
));
1988 writel(val
| INGRESSQID_V(intrq
->bar2_qid
),
1989 intrq
->bar2_addr
+ SGE_UDB_GTS
);
1993 spin_unlock(&adapter
->sge
.intrq_lock
);
1999 * The MSI interrupt handler handles data events from SGE response queues as
2000 * well as error and other async events as they all use the same MSI vector.
2002 static irqreturn_t
t4vf_intr_msi(int irq
, void *cookie
)
2004 struct adapter
*adapter
= cookie
;
2006 process_intrq(adapter
);
2011 * t4vf_intr_handler - select the top-level interrupt handler
2012 * @adapter: the adapter
2014 * Selects the top-level interrupt handler based on the type of interrupts
2017 irq_handler_t
t4vf_intr_handler(struct adapter
*adapter
)
2019 BUG_ON((adapter
->flags
& (USING_MSIX
|USING_MSI
)) == 0);
2020 if (adapter
->flags
& USING_MSIX
)
2021 return t4vf_sge_intr_msix
;
2023 return t4vf_intr_msi
;
2027 * sge_rx_timer_cb - perform periodic maintenance of SGE RX queues
2028 * @data: the adapter
2030 * Runs periodically from a timer to perform maintenance of SGE RX queues.
2032 * a) Replenishes RX queues that have run out due to memory shortage.
2033 * Normally new RX buffers are added when existing ones are consumed but
2034 * when out of memory a queue can become empty. We schedule NAPI to do
2035 * the actual refill.
2037 static void sge_rx_timer_cb(unsigned long data
)
2039 struct adapter
*adapter
= (struct adapter
*)data
;
2040 struct sge
*s
= &adapter
->sge
;
2044 * Scan the "Starving Free Lists" flag array looking for any Free
2045 * Lists in need of more free buffers. If we find one and it's not
2046 * being actively polled, then bump its "starving" counter and attempt
2047 * to refill it. If we're successful in adding enough buffers to push
2048 * the Free List over the starving threshold, then we can clear its
2049 * "starving" status.
2051 for (i
= 0; i
< ARRAY_SIZE(s
->starving_fl
); i
++) {
2054 for (m
= s
->starving_fl
[i
]; m
; m
&= m
- 1) {
2055 unsigned int id
= __ffs(m
) + i
* BITS_PER_LONG
;
2056 struct sge_fl
*fl
= s
->egr_map
[id
];
2058 clear_bit(id
, s
->starving_fl
);
2059 smp_mb__after_atomic();
2062 * Since we are accessing fl without a lock there's a
2063 * small probability of a false positive where we
2064 * schedule napi but the FL is no longer starving.
2067 if (fl_starving(adapter
, fl
)) {
2068 struct sge_eth_rxq
*rxq
;
2070 rxq
= container_of(fl
, struct sge_eth_rxq
, fl
);
2071 if (napi_reschedule(&rxq
->rspq
.napi
))
2074 set_bit(id
, s
->starving_fl
);
2080 * Reschedule the next scan for starving Free Lists ...
2082 mod_timer(&s
->rx_timer
, jiffies
+ RX_QCHECK_PERIOD
);
2086 * sge_tx_timer_cb - perform periodic maintenance of SGE Tx queues
2087 * @data: the adapter
2089 * Runs periodically from a timer to perform maintenance of SGE TX queues.
2091 * b) Reclaims completed Tx packets for the Ethernet queues. Normally
2092 * packets are cleaned up by new Tx packets, this timer cleans up packets
2093 * when no new packets are being submitted. This is essential for pktgen,
2096 static void sge_tx_timer_cb(unsigned long data
)
2098 struct adapter
*adapter
= (struct adapter
*)data
;
2099 struct sge
*s
= &adapter
->sge
;
2100 unsigned int i
, budget
;
2102 budget
= MAX_TIMER_TX_RECLAIM
;
2103 i
= s
->ethtxq_rover
;
2105 struct sge_eth_txq
*txq
= &s
->ethtxq
[i
];
2107 if (reclaimable(&txq
->q
) && __netif_tx_trylock(txq
->txq
)) {
2108 int avail
= reclaimable(&txq
->q
);
2113 free_tx_desc(adapter
, &txq
->q
, avail
, true);
2114 txq
->q
.in_use
-= avail
;
2115 __netif_tx_unlock(txq
->txq
);
2123 if (i
>= s
->ethqsets
)
2125 } while (i
!= s
->ethtxq_rover
);
2126 s
->ethtxq_rover
= i
;
2129 * If we found too many reclaimable packets schedule a timer in the
2130 * near future to continue where we left off. Otherwise the next timer
2131 * will be at its normal interval.
2133 mod_timer(&s
->tx_timer
, jiffies
+ (budget
? TX_QCHECK_PERIOD
: 2));
2137 * bar2_address - return the BAR2 address for an SGE Queue's Registers
2138 * @adapter: the adapter
2139 * @qid: the SGE Queue ID
2140 * @qtype: the SGE Queue Type (Egress or Ingress)
2141 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
2143 * Returns the BAR2 address for the SGE Queue Registers associated with
2144 * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also
2145 * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
2146 * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID"
2147 * Registers are supported (e.g. the Write Combining Doorbell Buffer).
2149 static void __iomem
*bar2_address(struct adapter
*adapter
,
2151 enum t4_bar2_qtype qtype
,
2152 unsigned int *pbar2_qid
)
2157 ret
= t4_bar2_sge_qregs(adapter
, qid
, qtype
,
2158 &bar2_qoffset
, pbar2_qid
);
2162 return adapter
->bar2
+ bar2_qoffset
;
2166 * t4vf_sge_alloc_rxq - allocate an SGE RX Queue
2167 * @adapter: the adapter
2168 * @rspq: pointer to to the new rxq's Response Queue to be filled in
2169 * @iqasynch: if 0, a normal rspq; if 1, an asynchronous event queue
2170 * @dev: the network device associated with the new rspq
2171 * @intr_dest: MSI-X vector index (overriden in MSI mode)
2172 * @fl: pointer to the new rxq's Free List to be filled in
2173 * @hnd: the interrupt handler to invoke for the rspq
2175 int t4vf_sge_alloc_rxq(struct adapter
*adapter
, struct sge_rspq
*rspq
,
2176 bool iqasynch
, struct net_device
*dev
,
2178 struct sge_fl
*fl
, rspq_handler_t hnd
)
2180 struct sge
*s
= &adapter
->sge
;
2181 struct port_info
*pi
= netdev_priv(dev
);
2182 struct fw_iq_cmd cmd
, rpl
;
2183 int ret
, iqandst
, flsz
= 0;
2186 * If we're using MSI interrupts and we're not initializing the
2187 * Forwarded Interrupt Queue itself, then set up this queue for
2188 * indirect interrupts to the Forwarded Interrupt Queue. Obviously
2189 * the Forwarded Interrupt Queue must be set up before any other
2192 if ((adapter
->flags
& USING_MSI
) && rspq
!= &adapter
->sge
.intrq
) {
2193 iqandst
= SGE_INTRDST_IQ
;
2194 intr_dest
= adapter
->sge
.intrq
.abs_id
;
2196 iqandst
= SGE_INTRDST_PCI
;
2199 * Allocate the hardware ring for the Response Queue. The size needs
2200 * to be a multiple of 16 which includes the mandatory status entry
2201 * (regardless of whether the Status Page capabilities are enabled or
2204 rspq
->size
= roundup(rspq
->size
, 16);
2205 rspq
->desc
= alloc_ring(adapter
->pdev_dev
, rspq
->size
, rspq
->iqe_len
,
2206 0, &rspq
->phys_addr
, NULL
, 0);
2211 * Fill in the Ingress Queue Command. Note: Ideally this code would
2212 * be in t4vf_hw.c but there are so many parameters and dependencies
2213 * on our Linux SGE state that we would end up having to pass tons of
2214 * parameters. We'll have to think about how this might be migrated
2215 * into OS-independent common code ...
2217 memset(&cmd
, 0, sizeof(cmd
));
2218 cmd
.op_to_vfn
= cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD
) |
2222 cmd
.alloc_to_len16
= cpu_to_be32(FW_IQ_CMD_ALLOC_F
|
2223 FW_IQ_CMD_IQSTART_F
|
2225 cmd
.type_to_iqandstindex
=
2226 cpu_to_be32(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP
) |
2227 FW_IQ_CMD_IQASYNCH_V(iqasynch
) |
2228 FW_IQ_CMD_VIID_V(pi
->viid
) |
2229 FW_IQ_CMD_IQANDST_V(iqandst
) |
2230 FW_IQ_CMD_IQANUS_V(1) |
2231 FW_IQ_CMD_IQANUD_V(SGE_UPDATEDEL_INTR
) |
2232 FW_IQ_CMD_IQANDSTINDEX_V(intr_dest
));
2233 cmd
.iqdroprss_to_iqesize
=
2234 cpu_to_be16(FW_IQ_CMD_IQPCIECH_V(pi
->port_id
) |
2235 FW_IQ_CMD_IQGTSMODE_F
|
2236 FW_IQ_CMD_IQINTCNTTHRESH_V(rspq
->pktcnt_idx
) |
2237 FW_IQ_CMD_IQESIZE_V(ilog2(rspq
->iqe_len
) - 4));
2238 cmd
.iqsize
= cpu_to_be16(rspq
->size
);
2239 cmd
.iqaddr
= cpu_to_be64(rspq
->phys_addr
);
2243 * Allocate the ring for the hardware free list (with space
2244 * for its status page) along with the associated software
2245 * descriptor ring. The free list size needs to be a multiple
2246 * of the Egress Queue Unit.
2248 fl
->size
= roundup(fl
->size
, FL_PER_EQ_UNIT
);
2249 fl
->desc
= alloc_ring(adapter
->pdev_dev
, fl
->size
,
2250 sizeof(__be64
), sizeof(struct rx_sw_desc
),
2251 &fl
->addr
, &fl
->sdesc
, s
->stat_len
);
2258 * Calculate the size of the hardware free list ring plus
2259 * Status Page (which the SGE will place after the end of the
2260 * free list ring) in Egress Queue Units.
2262 flsz
= (fl
->size
/ FL_PER_EQ_UNIT
+
2263 s
->stat_len
/ EQ_UNIT
);
2266 * Fill in all the relevant firmware Ingress Queue Command
2267 * fields for the free list.
2269 cmd
.iqns_to_fl0congen
=
2271 FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE
) |
2272 FW_IQ_CMD_FL0PACKEN_F
|
2273 FW_IQ_CMD_FL0PADEN_F
);
2274 cmd
.fl0dcaen_to_fl0cidxfthresh
=
2276 FW_IQ_CMD_FL0FBMIN_V(SGE_FETCHBURSTMIN_64B
) |
2277 FW_IQ_CMD_FL0FBMAX_V(SGE_FETCHBURSTMAX_512B
));
2278 cmd
.fl0size
= cpu_to_be16(flsz
);
2279 cmd
.fl0addr
= cpu_to_be64(fl
->addr
);
2283 * Issue the firmware Ingress Queue Command and extract the results if
2284 * it completes successfully.
2286 ret
= t4vf_wr_mbox(adapter
, &cmd
, sizeof(cmd
), &rpl
);
2290 netif_napi_add(dev
, &rspq
->napi
, napi_rx_handler
, 64);
2291 rspq
->cur_desc
= rspq
->desc
;
2294 rspq
->next_intr_params
= rspq
->intr_params
;
2295 rspq
->cntxt_id
= be16_to_cpu(rpl
.iqid
);
2296 rspq
->bar2_addr
= bar2_address(adapter
,
2298 T4_BAR2_QTYPE_INGRESS
,
2300 rspq
->abs_id
= be16_to_cpu(rpl
.physiqid
);
2301 rspq
->size
--; /* subtract status entry */
2302 rspq
->adapter
= adapter
;
2304 rspq
->handler
= hnd
;
2306 /* set offset to -1 to distinguish ingress queues without FL */
2307 rspq
->offset
= fl
? 0 : -1;
2310 fl
->cntxt_id
= be16_to_cpu(rpl
.fl0id
);
2315 fl
->alloc_failed
= 0;
2316 fl
->large_alloc_failed
= 0;
2319 /* Note, we must initialize the BAR2 Free List User Doorbell
2320 * information before refilling the Free List!
2322 fl
->bar2_addr
= bar2_address(adapter
,
2324 T4_BAR2_QTYPE_EGRESS
,
2327 refill_fl(adapter
, fl
, fl_cap(fl
), GFP_KERNEL
);
2334 * An error occurred. Clean up our partial allocation state and
2338 dma_free_coherent(adapter
->pdev_dev
, rspq
->size
* rspq
->iqe_len
,
2339 rspq
->desc
, rspq
->phys_addr
);
2342 if (fl
&& fl
->desc
) {
2345 dma_free_coherent(adapter
->pdev_dev
, flsz
* EQ_UNIT
,
2346 fl
->desc
, fl
->addr
);
2353 * t4vf_sge_alloc_eth_txq - allocate an SGE Ethernet TX Queue
2354 * @adapter: the adapter
2355 * @txq: pointer to the new txq to be filled in
2356 * @devq: the network TX queue associated with the new txq
2357 * @iqid: the relative ingress queue ID to which events relating to
2358 * the new txq should be directed
2360 int t4vf_sge_alloc_eth_txq(struct adapter
*adapter
, struct sge_eth_txq
*txq
,
2361 struct net_device
*dev
, struct netdev_queue
*devq
,
2364 struct sge
*s
= &adapter
->sge
;
2366 struct fw_eq_eth_cmd cmd
, rpl
;
2367 struct port_info
*pi
= netdev_priv(dev
);
2370 * Calculate the size of the hardware TX Queue (including the Status
2371 * Page on the end of the TX Queue) in units of TX Descriptors.
2373 nentries
= txq
->q
.size
+ s
->stat_len
/ sizeof(struct tx_desc
);
2376 * Allocate the hardware ring for the TX ring (with space for its
2377 * status page) along with the associated software descriptor ring.
2379 txq
->q
.desc
= alloc_ring(adapter
->pdev_dev
, txq
->q
.size
,
2380 sizeof(struct tx_desc
),
2381 sizeof(struct tx_sw_desc
),
2382 &txq
->q
.phys_addr
, &txq
->q
.sdesc
, s
->stat_len
);
2387 * Fill in the Egress Queue Command. Note: As with the direct use of
2388 * the firmware Ingress Queue COmmand above in our RXQ allocation
2389 * routine, ideally, this code would be in t4vf_hw.c. Again, we'll
2390 * have to see if there's some reasonable way to parameterize it
2391 * into the common code ...
2393 memset(&cmd
, 0, sizeof(cmd
));
2394 cmd
.op_to_vfn
= cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD
) |
2398 cmd
.alloc_to_len16
= cpu_to_be32(FW_EQ_ETH_CMD_ALLOC_F
|
2399 FW_EQ_ETH_CMD_EQSTART_F
|
2401 cmd
.viid_pkd
= cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE_F
|
2402 FW_EQ_ETH_CMD_VIID_V(pi
->viid
));
2403 cmd
.fetchszm_to_iqid
=
2404 cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE_V(SGE_HOSTFCMODE_STPG
) |
2405 FW_EQ_ETH_CMD_PCIECHN_V(pi
->port_id
) |
2406 FW_EQ_ETH_CMD_IQID_V(iqid
));
2407 cmd
.dcaen_to_eqsize
=
2408 cpu_to_be32(FW_EQ_ETH_CMD_FBMIN_V(SGE_FETCHBURSTMIN_64B
) |
2409 FW_EQ_ETH_CMD_FBMAX_V(SGE_FETCHBURSTMAX_512B
) |
2410 FW_EQ_ETH_CMD_CIDXFTHRESH_V(
2411 SGE_CIDXFLUSHTHRESH_32
) |
2412 FW_EQ_ETH_CMD_EQSIZE_V(nentries
));
2413 cmd
.eqaddr
= cpu_to_be64(txq
->q
.phys_addr
);
2416 * Issue the firmware Egress Queue Command and extract the results if
2417 * it completes successfully.
2419 ret
= t4vf_wr_mbox(adapter
, &cmd
, sizeof(cmd
), &rpl
);
2422 * The girmware Ingress Queue Command failed for some reason.
2423 * Free up our partial allocation state and return the error.
2425 kfree(txq
->q
.sdesc
);
2426 txq
->q
.sdesc
= NULL
;
2427 dma_free_coherent(adapter
->pdev_dev
,
2428 nentries
* sizeof(struct tx_desc
),
2429 txq
->q
.desc
, txq
->q
.phys_addr
);
2437 txq
->q
.stat
= (void *)&txq
->q
.desc
[txq
->q
.size
];
2438 txq
->q
.cntxt_id
= FW_EQ_ETH_CMD_EQID_G(be32_to_cpu(rpl
.eqid_pkd
));
2439 txq
->q
.bar2_addr
= bar2_address(adapter
,
2441 T4_BAR2_QTYPE_EGRESS
,
2444 FW_EQ_ETH_CMD_PHYSEQID_G(be32_to_cpu(rpl
.physeqid_pkd
));
2450 txq
->q
.restarts
= 0;
2451 txq
->mapping_err
= 0;
2456 * Free the DMA map resources associated with a TX queue.
2458 static void free_txq(struct adapter
*adapter
, struct sge_txq
*tq
)
2460 struct sge
*s
= &adapter
->sge
;
2462 dma_free_coherent(adapter
->pdev_dev
,
2463 tq
->size
* sizeof(*tq
->desc
) + s
->stat_len
,
2464 tq
->desc
, tq
->phys_addr
);
2471 * Free the resources associated with a response queue (possibly including a
2474 static void free_rspq_fl(struct adapter
*adapter
, struct sge_rspq
*rspq
,
2477 struct sge
*s
= &adapter
->sge
;
2478 unsigned int flid
= fl
? fl
->cntxt_id
: 0xffff;
2480 t4vf_iq_free(adapter
, FW_IQ_TYPE_FL_INT_CAP
,
2481 rspq
->cntxt_id
, flid
, 0xffff);
2482 dma_free_coherent(adapter
->pdev_dev
, (rspq
->size
+ 1) * rspq
->iqe_len
,
2483 rspq
->desc
, rspq
->phys_addr
);
2484 netif_napi_del(&rspq
->napi
);
2485 rspq
->netdev
= NULL
;
2491 free_rx_bufs(adapter
, fl
, fl
->avail
);
2492 dma_free_coherent(adapter
->pdev_dev
,
2493 fl
->size
* sizeof(*fl
->desc
) + s
->stat_len
,
2494 fl
->desc
, fl
->addr
);
2503 * t4vf_free_sge_resources - free SGE resources
2504 * @adapter: the adapter
2506 * Frees resources used by the SGE queue sets.
2508 void t4vf_free_sge_resources(struct adapter
*adapter
)
2510 struct sge
*s
= &adapter
->sge
;
2511 struct sge_eth_rxq
*rxq
= s
->ethrxq
;
2512 struct sge_eth_txq
*txq
= s
->ethtxq
;
2513 struct sge_rspq
*evtq
= &s
->fw_evtq
;
2514 struct sge_rspq
*intrq
= &s
->intrq
;
2517 for (qs
= 0; qs
< adapter
->sge
.ethqsets
; qs
++, rxq
++, txq
++) {
2519 free_rspq_fl(adapter
, &rxq
->rspq
, &rxq
->fl
);
2521 t4vf_eth_eq_free(adapter
, txq
->q
.cntxt_id
);
2522 free_tx_desc(adapter
, &txq
->q
, txq
->q
.in_use
, true);
2523 kfree(txq
->q
.sdesc
);
2524 free_txq(adapter
, &txq
->q
);
2528 free_rspq_fl(adapter
, evtq
, NULL
);
2530 free_rspq_fl(adapter
, intrq
, NULL
);
2534 * t4vf_sge_start - enable SGE operation
2535 * @adapter: the adapter
2537 * Start tasklets and timers associated with the DMA engine.
2539 void t4vf_sge_start(struct adapter
*adapter
)
2541 adapter
->sge
.ethtxq_rover
= 0;
2542 mod_timer(&adapter
->sge
.rx_timer
, jiffies
+ RX_QCHECK_PERIOD
);
2543 mod_timer(&adapter
->sge
.tx_timer
, jiffies
+ TX_QCHECK_PERIOD
);
2547 * t4vf_sge_stop - disable SGE operation
2548 * @adapter: the adapter
2550 * Stop tasklets and timers associated with the DMA engine. Note that
2551 * this is effective only if measures have been taken to disable any HW
2552 * events that may restart them.
2554 void t4vf_sge_stop(struct adapter
*adapter
)
2556 struct sge
*s
= &adapter
->sge
;
2558 if (s
->rx_timer
.function
)
2559 del_timer_sync(&s
->rx_timer
);
2560 if (s
->tx_timer
.function
)
2561 del_timer_sync(&s
->tx_timer
);
2565 * t4vf_sge_init - initialize SGE
2566 * @adapter: the adapter
2568 * Performs SGE initialization needed every time after a chip reset.
2569 * We do not initialize any of the queue sets here, instead the driver
2570 * top-level must request those individually. We also do not enable DMA
2571 * here, that should be done after the queues have been set up.
2573 int t4vf_sge_init(struct adapter
*adapter
)
2575 struct sge_params
*sge_params
= &adapter
->params
.sge
;
2576 u32 fl0
= sge_params
->sge_fl_buffer_size
[0];
2577 u32 fl1
= sge_params
->sge_fl_buffer_size
[1];
2578 struct sge
*s
= &adapter
->sge
;
2579 unsigned int ingpadboundary
, ingpackboundary
;
2582 * Start by vetting the basic SGE parameters which have been set up by
2583 * the Physical Function Driver. Ideally we should be able to deal
2584 * with _any_ configuration. Practice is different ...
2586 if (fl0
!= PAGE_SIZE
|| (fl1
!= 0 && fl1
<= fl0
)) {
2587 dev_err(adapter
->pdev_dev
, "bad SGE FL buffer sizes [%d, %d]\n",
2591 if ((sge_params
->sge_control
& RXPKTCPLMODE_F
) == 0) {
2592 dev_err(adapter
->pdev_dev
, "bad SGE CPL MODE\n");
2597 * Now translate the adapter parameters into our internal forms.
2600 s
->fl_pg_order
= ilog2(fl1
) - PAGE_SHIFT
;
2601 s
->stat_len
= ((sge_params
->sge_control
& EGRSTATUSPAGESIZE_F
)
2603 s
->pktshift
= PKTSHIFT_G(sge_params
->sge_control
);
2605 /* T4 uses a single control field to specify both the PCIe Padding and
2606 * Packing Boundary. T5 introduced the ability to specify these
2607 * separately. The actual Ingress Packet Data alignment boundary
2608 * within Packed Buffer Mode is the maximum of these two
2609 * specifications. (Note that it makes no real practical sense to
2610 * have the Pading Boudary be larger than the Packing Boundary but you
2611 * could set the chip up that way and, in fact, legacy T4 code would
2612 * end doing this because it would initialize the Padding Boundary and
2613 * leave the Packing Boundary initialized to 0 (16 bytes).)
2615 ingpadboundary
= 1 << (INGPADBOUNDARY_G(sge_params
->sge_control
) +
2616 INGPADBOUNDARY_SHIFT_X
);
2617 if (is_t4(adapter
->params
.chip
)) {
2618 s
->fl_align
= ingpadboundary
;
2620 /* T5 has a different interpretation of one of the PCIe Packing
2623 ingpackboundary
= INGPACKBOUNDARY_G(sge_params
->sge_control2
);
2624 if (ingpackboundary
== INGPACKBOUNDARY_16B_X
)
2625 ingpackboundary
= 16;
2627 ingpackboundary
= 1 << (ingpackboundary
+
2628 INGPACKBOUNDARY_SHIFT_X
);
2630 s
->fl_align
= max(ingpadboundary
, ingpackboundary
);
2633 /* A FL with <= fl_starve_thres buffers is starving and a periodic
2634 * timer will attempt to refill it. This needs to be larger than the
2635 * SGE's Egress Congestion Threshold. If it isn't, then we can get
2636 * stuck waiting for new packets while the SGE is waiting for us to
2637 * give it more Free List entries. (Note that the SGE's Egress
2638 * Congestion Threshold is in units of 2 Free List pointers.)
2641 = EGRTHRESHOLD_G(sge_params
->sge_congestion_control
)*2 + 1;
2644 * Set up tasklet timers.
2646 setup_timer(&s
->rx_timer
, sge_rx_timer_cb
, (unsigned long)adapter
);
2647 setup_timer(&s
->tx_timer
, sge_tx_timer_cb
, (unsigned long)adapter
);
2650 * Initialize Forwarded Interrupt Queue lock.
2652 spin_lock_init(&s
->intrq_lock
);