Commit | Line | Data |
---|---|---|
4d22de3e | 1 | /* |
a02d44a0 | 2 | * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved. |
4d22de3e | 3 | * |
1d68e93d DLR |
4 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
4d22de3e | 9 | * |
1d68e93d DLR |
10 | * Redistribution and use in source and binary forms, with or |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
4d22de3e | 31 | */ |
4d22de3e DLR |
32 | #include <linux/skbuff.h> |
33 | #include <linux/netdevice.h> | |
34 | #include <linux/etherdevice.h> | |
35 | #include <linux/if_vlan.h> | |
36 | #include <linux/ip.h> | |
37 | #include <linux/tcp.h> | |
38 | #include <linux/dma-mapping.h> | |
5a0e3ad6 | 39 | #include <linux/slab.h> |
70c71606 | 40 | #include <linux/prefetch.h> |
a109a5b9 | 41 | #include <net/arp.h> |
4d22de3e DLR |
42 | #include "common.h" |
43 | #include "regs.h" | |
44 | #include "sge_defs.h" | |
45 | #include "t3_cpl.h" | |
46 | #include "firmware_exports.h" | |
e998f245 | 47 | #include "cxgb3_offload.h" |
4d22de3e DLR |
48 | |
49 | #define USE_GTS 0 | |
50 | ||
51 | #define SGE_RX_SM_BUF_SIZE 1536 | |
e0994eb1 | 52 | |
4d22de3e | 53 | #define SGE_RX_COPY_THRES 256 |
cf992af5 | 54 | #define SGE_RX_PULL_LEN 128 |
4d22de3e | 55 | |
5e68b772 | 56 | #define SGE_PG_RSVD SMP_CACHE_BYTES |
e0994eb1 | 57 | /* |
cf992af5 DLR |
58 | * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks. |
59 | * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs | |
60 | * directly. | |
e0994eb1 | 61 | */ |
cf992af5 | 62 | #define FL0_PG_CHUNK_SIZE 2048 |
7385ecf3 | 63 | #define FL0_PG_ORDER 0 |
5e68b772 | 64 | #define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER) |
7385ecf3 DLR |
65 | #define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192) |
66 | #define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1) | |
5e68b772 | 67 | #define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER) |
cf992af5 | 68 | |
e0994eb1 | 69 | #define SGE_RX_DROP_THRES 16 |
42c8ea17 | 70 | #define RX_RECLAIM_PERIOD (HZ/4) |
4d22de3e | 71 | |
26b3871d DLR |
72 | /* |
73 | * Max number of Rx buffers we replenish at a time. | |
74 | */ | |
75 | #define MAX_RX_REFILL 16U | |
4d22de3e DLR |
76 | /* |
77 | * Period of the Tx buffer reclaim timer. This timer does not need to run | |
78 | * frequently as Tx buffers are usually reclaimed by new Tx packets. | |
79 | */ | |
80 | #define TX_RECLAIM_PERIOD (HZ / 4) | |
42c8ea17 DLR |
81 | #define TX_RECLAIM_TIMER_CHUNK 64U |
82 | #define TX_RECLAIM_CHUNK 16U | |
4d22de3e DLR |
83 | |
84 | /* WR size in bytes */ | |
85 | #define WR_LEN (WR_FLITS * 8) | |
86 | ||
87 | /* | |
88 | * Types of Tx queues in each queue set. Order here matters, do not change. | |
89 | */ | |
90 | enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL }; | |
91 | ||
92 | /* Values for sge_txq.flags */ | |
93 | enum { | |
94 | TXQ_RUNNING = 1 << 0, /* fetch engine is running */ | |
95 | TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */ | |
96 | }; | |
97 | ||
98 | struct tx_desc { | |
fb8e4444 | 99 | __be64 flit[TX_DESC_FLITS]; |
4d22de3e DLR |
100 | }; |
101 | ||
102 | struct rx_desc { | |
103 | __be32 addr_lo; | |
104 | __be32 len_gen; | |
105 | __be32 gen2; | |
106 | __be32 addr_hi; | |
107 | }; | |
108 | ||
109 | struct tx_sw_desc { /* SW state per Tx descriptor */ | |
110 | struct sk_buff *skb; | |
23561c94 DLR |
111 | u8 eop; /* set if last descriptor for packet */ |
112 | u8 addr_idx; /* buffer index of first SGL entry in descriptor */ | |
113 | u8 fragidx; /* first page fragment associated with descriptor */ | |
114 | s8 sflit; /* start flit of first SGL entry in descriptor */ | |
4d22de3e DLR |
115 | }; |
116 | ||
cf992af5 | 117 | struct rx_sw_desc { /* SW state per Rx descriptor */ |
e0994eb1 DLR |
118 | union { |
119 | struct sk_buff *skb; | |
cf992af5 DLR |
120 | struct fl_pg_chunk pg_chunk; |
121 | }; | |
56e3b9df | 122 | DEFINE_DMA_UNMAP_ADDR(dma_addr); |
4d22de3e DLR |
123 | }; |
124 | ||
125 | struct rsp_desc { /* response queue descriptor */ | |
126 | struct rss_header rss_hdr; | |
127 | __be32 flags; | |
128 | __be32 len_cq; | |
129 | u8 imm_data[47]; | |
130 | u8 intr_gen; | |
131 | }; | |
132 | ||
99d7cf30 DLR |
133 | /* |
134 | * Holds unmapping information for Tx packets that need deferred unmapping. | |
135 | * This structure lives at skb->head and must be allocated by callers. | |
136 | */ | |
137 | struct deferred_unmap_info { | |
138 | struct pci_dev *pdev; | |
139 | dma_addr_t addr[MAX_SKB_FRAGS + 1]; | |
140 | }; | |
141 | ||
4d22de3e DLR |
142 | /* |
143 | * Maps a number of flits to the number of Tx descriptors that can hold them. | |
144 | * The formula is | |
145 | * | |
146 | * desc = 1 + (flits - 2) / (WR_FLITS - 1). | |
147 | * | |
148 | * HW allows up to 4 descriptors to be combined into a WR. | |
149 | */ | |
150 | static u8 flit_desc_map[] = { | |
151 | 0, | |
152 | #if SGE_NUM_GENBITS == 1 | |
153 | 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, | |
154 | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, | |
155 | 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, | |
156 | 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 | |
157 | #elif SGE_NUM_GENBITS == 2 | |
158 | 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, | |
159 | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, | |
160 | 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, | |
161 | 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, | |
162 | #else | |
163 | # error "SGE_NUM_GENBITS must be 1 or 2" | |
164 | #endif | |
165 | }; | |
166 | ||
167 | static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx) | |
168 | { | |
169 | return container_of(q, struct sge_qset, fl[qidx]); | |
170 | } | |
171 | ||
172 | static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q) | |
173 | { | |
174 | return container_of(q, struct sge_qset, rspq); | |
175 | } | |
176 | ||
177 | static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx) | |
178 | { | |
179 | return container_of(q, struct sge_qset, txq[qidx]); | |
180 | } | |
181 | ||
182 | /** | |
183 | * refill_rspq - replenish an SGE response queue | |
184 | * @adapter: the adapter | |
185 | * @q: the response queue to replenish | |
186 | * @credits: how many new responses to make available | |
187 | * | |
188 | * Replenishes a response queue by making the supplied number of responses | |
189 | * available to HW. | |
190 | */ | |
191 | static inline void refill_rspq(struct adapter *adapter, | |
192 | const struct sge_rspq *q, unsigned int credits) | |
193 | { | |
afefce66 | 194 | rmb(); |
4d22de3e DLR |
195 | t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN, |
196 | V_RSPQ(q->cntxt_id) | V_CREDITS(credits)); | |
197 | } | |
198 | ||
199 | /** | |
200 | * need_skb_unmap - does the platform need unmapping of sk_buffs? | |
201 | * | |
947af294 | 202 | * Returns true if the platform needs sk_buff unmapping. The compiler |
25985edc | 203 | * optimizes away unnecessary code if this returns true. |
4d22de3e DLR |
204 | */ |
205 | static inline int need_skb_unmap(void) | |
206 | { | |
122e28eb FT |
207 | #ifdef CONFIG_NEED_DMA_MAP_STATE |
208 | return 1; | |
209 | #else | |
210 | return 0; | |
211 | #endif | |
4d22de3e DLR |
212 | } |
213 | ||
214 | /** | |
215 | * unmap_skb - unmap a packet main body and its page fragments | |
216 | * @skb: the packet | |
217 | * @q: the Tx queue containing Tx descriptors for the packet | |
218 | * @cidx: index of Tx descriptor | |
219 | * @pdev: the PCI device | |
220 | * | |
221 | * Unmap the main body of an sk_buff and its page fragments, if any. | |
222 | * Because of the fairly complicated structure of our SGLs and the desire | |
23561c94 DLR |
223 | * to conserve space for metadata, the information necessary to unmap an |
224 | * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx | |
225 | * descriptors (the physical addresses of the various data buffers), and | |
226 | * the SW descriptor state (assorted indices). The send functions | |
227 | * initialize the indices for the first packet descriptor so we can unmap | |
228 | * the buffers held in the first Tx descriptor here, and we have enough | |
229 | * information at this point to set the state for the next Tx descriptor. | |
230 | * | |
231 | * Note that it is possible to clean up the first descriptor of a packet | |
232 | * before the send routines have written the next descriptors, but this | |
233 | * race does not cause any problem. We just end up writing the unmapping | |
234 | * info for the descriptor first. | |
4d22de3e DLR |
235 | */ |
236 | static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q, | |
237 | unsigned int cidx, struct pci_dev *pdev) | |
238 | { | |
239 | const struct sg_ent *sgp; | |
23561c94 DLR |
240 | struct tx_sw_desc *d = &q->sdesc[cidx]; |
241 | int nfrags, frag_idx, curflit, j = d->addr_idx; | |
4d22de3e | 242 | |
23561c94 DLR |
243 | sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit]; |
244 | frag_idx = d->fragidx; | |
4d22de3e | 245 | |
23561c94 DLR |
246 | if (frag_idx == 0 && skb_headlen(skb)) { |
247 | pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), | |
248 | skb_headlen(skb), PCI_DMA_TODEVICE); | |
4d22de3e DLR |
249 | j = 1; |
250 | } | |
251 | ||
23561c94 | 252 | curflit = d->sflit + 1 + j; |
4d22de3e DLR |
253 | nfrags = skb_shinfo(skb)->nr_frags; |
254 | ||
255 | while (frag_idx < nfrags && curflit < WR_FLITS) { | |
256 | pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]), | |
9e903e08 | 257 | skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]), |
4d22de3e DLR |
258 | PCI_DMA_TODEVICE); |
259 | j ^= 1; | |
260 | if (j == 0) { | |
261 | sgp++; | |
262 | curflit++; | |
263 | } | |
264 | curflit++; | |
265 | frag_idx++; | |
266 | } | |
267 | ||
23561c94 DLR |
268 | if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */ |
269 | d = cidx + 1 == q->size ? q->sdesc : d + 1; | |
270 | d->fragidx = frag_idx; | |
271 | d->addr_idx = j; | |
272 | d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */ | |
4d22de3e DLR |
273 | } |
274 | } | |
275 | ||
276 | /** | |
277 | * free_tx_desc - reclaims Tx descriptors and their buffers | |
278 | * @adapter: the adapter | |
279 | * @q: the Tx queue to reclaim descriptors from | |
280 | * @n: the number of descriptors to reclaim | |
281 | * | |
282 | * Reclaims Tx descriptors from an SGE Tx queue and frees the associated | |
283 | * Tx buffers. Called with the Tx queue lock held. | |
284 | */ | |
285 | static void free_tx_desc(struct adapter *adapter, struct sge_txq *q, | |
286 | unsigned int n) | |
287 | { | |
288 | struct tx_sw_desc *d; | |
289 | struct pci_dev *pdev = adapter->pdev; | |
290 | unsigned int cidx = q->cidx; | |
291 | ||
99d7cf30 DLR |
292 | const int need_unmap = need_skb_unmap() && |
293 | q->cntxt_id >= FW_TUNNEL_SGEEC_START; | |
294 | ||
4d22de3e DLR |
295 | d = &q->sdesc[cidx]; |
296 | while (n--) { | |
297 | if (d->skb) { /* an SGL is present */ | |
99d7cf30 | 298 | if (need_unmap) |
4d22de3e | 299 | unmap_skb(d->skb, q, cidx, pdev); |
b1424ed9 | 300 | if (d->eop) { |
f9ec8131 | 301 | dev_consume_skb_any(d->skb); |
b1424ed9 KK |
302 | d->skb = NULL; |
303 | } | |
4d22de3e DLR |
304 | } |
305 | ++d; | |
306 | if (++cidx == q->size) { | |
307 | cidx = 0; | |
308 | d = q->sdesc; | |
309 | } | |
310 | } | |
311 | q->cidx = cidx; | |
312 | } | |
313 | ||
314 | /** | |
315 | * reclaim_completed_tx - reclaims completed Tx descriptors | |
316 | * @adapter: the adapter | |
317 | * @q: the Tx queue to reclaim completed descriptors from | |
42c8ea17 | 318 | * @chunk: maximum number of descriptors to reclaim |
4d22de3e DLR |
319 | * |
320 | * Reclaims Tx descriptors that the SGE has indicated it has processed, | |
321 | * and frees the associated buffers if possible. Called with the Tx | |
322 | * queue's lock held. | |
323 | */ | |
42c8ea17 DLR |
324 | static inline unsigned int reclaim_completed_tx(struct adapter *adapter, |
325 | struct sge_txq *q, | |
326 | unsigned int chunk) | |
4d22de3e DLR |
327 | { |
328 | unsigned int reclaim = q->processed - q->cleaned; | |
329 | ||
42c8ea17 | 330 | reclaim = min(chunk, reclaim); |
4d22de3e DLR |
331 | if (reclaim) { |
332 | free_tx_desc(adapter, q, reclaim); | |
333 | q->cleaned += reclaim; | |
334 | q->in_use -= reclaim; | |
335 | } | |
42c8ea17 | 336 | return q->processed - q->cleaned; |
4d22de3e DLR |
337 | } |
338 | ||
339 | /** | |
340 | * should_restart_tx - are there enough resources to restart a Tx queue? | |
341 | * @q: the Tx queue | |
342 | * | |
343 | * Checks if there are enough descriptors to restart a suspended Tx queue. | |
344 | */ | |
345 | static inline int should_restart_tx(const struct sge_txq *q) | |
346 | { | |
347 | unsigned int r = q->processed - q->cleaned; | |
348 | ||
349 | return q->in_use - r < (q->size >> 1); | |
350 | } | |
351 | ||
5e68b772 DLR |
352 | static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q, |
353 | struct rx_sw_desc *d) | |
9bb2b31e | 354 | { |
5e68b772 DLR |
355 | if (q->use_pages && d->pg_chunk.page) { |
356 | (*d->pg_chunk.p_cnt)--; | |
357 | if (!*d->pg_chunk.p_cnt) | |
358 | pci_unmap_page(pdev, | |
10b6d956 | 359 | d->pg_chunk.mapping, |
5e68b772 DLR |
360 | q->alloc_size, PCI_DMA_FROMDEVICE); |
361 | ||
362 | put_page(d->pg_chunk.page); | |
9bb2b31e DLR |
363 | d->pg_chunk.page = NULL; |
364 | } else { | |
56e3b9df | 365 | pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr), |
5e68b772 | 366 | q->buf_size, PCI_DMA_FROMDEVICE); |
9bb2b31e DLR |
367 | kfree_skb(d->skb); |
368 | d->skb = NULL; | |
369 | } | |
370 | } | |
371 | ||
4d22de3e DLR |
372 | /** |
373 | * free_rx_bufs - free the Rx buffers on an SGE free list | |
374 | * @pdev: the PCI device associated with the adapter | |
375 | * @rxq: the SGE free list to clean up | |
376 | * | |
377 | * Release the buffers on an SGE free-buffer Rx queue. HW fetching from | |
378 | * this queue should be stopped before calling this function. | |
379 | */ | |
380 | static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q) | |
381 | { | |
382 | unsigned int cidx = q->cidx; | |
383 | ||
384 | while (q->credits--) { | |
385 | struct rx_sw_desc *d = &q->sdesc[cidx]; | |
386 | ||
5e68b772 DLR |
387 | |
388 | clear_rx_desc(pdev, q, d); | |
4d22de3e DLR |
389 | if (++cidx == q->size) |
390 | cidx = 0; | |
391 | } | |
e0994eb1 | 392 | |
cf992af5 | 393 | if (q->pg_chunk.page) { |
7385ecf3 | 394 | __free_pages(q->pg_chunk.page, q->order); |
cf992af5 DLR |
395 | q->pg_chunk.page = NULL; |
396 | } | |
4d22de3e DLR |
397 | } |
398 | ||
399 | /** | |
400 | * add_one_rx_buf - add a packet buffer to a free-buffer list | |
cf992af5 | 401 | * @va: buffer start VA |
4d22de3e DLR |
402 | * @len: the buffer length |
403 | * @d: the HW Rx descriptor to write | |
404 | * @sd: the SW Rx descriptor to write | |
405 | * @gen: the generation bit value | |
406 | * @pdev: the PCI device associated with the adapter | |
407 | * | |
408 | * Add a buffer of the given length to the supplied HW and SW Rx | |
409 | * descriptors. | |
410 | */ | |
b1fb1f28 DLR |
411 | static inline int add_one_rx_buf(void *va, unsigned int len, |
412 | struct rx_desc *d, struct rx_sw_desc *sd, | |
413 | unsigned int gen, struct pci_dev *pdev) | |
4d22de3e DLR |
414 | { |
415 | dma_addr_t mapping; | |
416 | ||
e0994eb1 | 417 | mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE); |
8d8bb39b | 418 | if (unlikely(pci_dma_mapping_error(pdev, mapping))) |
b1fb1f28 DLR |
419 | return -ENOMEM; |
420 | ||
56e3b9df | 421 | dma_unmap_addr_set(sd, dma_addr, mapping); |
4d22de3e DLR |
422 | |
423 | d->addr_lo = cpu_to_be32(mapping); | |
424 | d->addr_hi = cpu_to_be32((u64) mapping >> 32); | |
019be1cf | 425 | dma_wmb(); |
4d22de3e DLR |
426 | d->len_gen = cpu_to_be32(V_FLD_GEN1(gen)); |
427 | d->gen2 = cpu_to_be32(V_FLD_GEN2(gen)); | |
b1fb1f28 | 428 | return 0; |
4d22de3e DLR |
429 | } |
430 | ||
5e68b772 DLR |
431 | static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d, |
432 | unsigned int gen) | |
433 | { | |
434 | d->addr_lo = cpu_to_be32(mapping); | |
435 | d->addr_hi = cpu_to_be32((u64) mapping >> 32); | |
019be1cf | 436 | dma_wmb(); |
5e68b772 DLR |
437 | d->len_gen = cpu_to_be32(V_FLD_GEN1(gen)); |
438 | d->gen2 = cpu_to_be32(V_FLD_GEN2(gen)); | |
439 | return 0; | |
440 | } | |
441 | ||
442 | static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q, | |
443 | struct rx_sw_desc *sd, gfp_t gfp, | |
7385ecf3 | 444 | unsigned int order) |
cf992af5 DLR |
445 | { |
446 | if (!q->pg_chunk.page) { | |
5e68b772 DLR |
447 | dma_addr_t mapping; |
448 | ||
7385ecf3 | 449 | q->pg_chunk.page = alloc_pages(gfp, order); |
cf992af5 DLR |
450 | if (unlikely(!q->pg_chunk.page)) |
451 | return -ENOMEM; | |
452 | q->pg_chunk.va = page_address(q->pg_chunk.page); | |
5e68b772 DLR |
453 | q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) - |
454 | SGE_PG_RSVD; | |
cf992af5 | 455 | q->pg_chunk.offset = 0; |
5e68b772 DLR |
456 | mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, |
457 | 0, q->alloc_size, PCI_DMA_FROMDEVICE); | |
10b6d956 | 458 | q->pg_chunk.mapping = mapping; |
cf992af5 DLR |
459 | } |
460 | sd->pg_chunk = q->pg_chunk; | |
461 | ||
5e68b772 DLR |
462 | prefetch(sd->pg_chunk.p_cnt); |
463 | ||
cf992af5 | 464 | q->pg_chunk.offset += q->buf_size; |
7385ecf3 | 465 | if (q->pg_chunk.offset == (PAGE_SIZE << order)) |
cf992af5 DLR |
466 | q->pg_chunk.page = NULL; |
467 | else { | |
468 | q->pg_chunk.va += q->buf_size; | |
469 | get_page(q->pg_chunk.page); | |
470 | } | |
5e68b772 DLR |
471 | |
472 | if (sd->pg_chunk.offset == 0) | |
473 | *sd->pg_chunk.p_cnt = 1; | |
474 | else | |
475 | *sd->pg_chunk.p_cnt += 1; | |
476 | ||
cf992af5 DLR |
477 | return 0; |
478 | } | |
479 | ||
26b3871d DLR |
480 | static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) |
481 | { | |
482 | if (q->pend_cred >= q->credits / 4) { | |
483 | q->pend_cred = 0; | |
2e02644a | 484 | wmb(); |
26b3871d DLR |
485 | t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id)); |
486 | } | |
487 | } | |
488 | ||
4d22de3e DLR |
489 | /** |
490 | * refill_fl - refill an SGE free-buffer list | |
491 | * @adapter: the adapter | |
492 | * @q: the free-list to refill | |
493 | * @n: the number of new buffers to allocate | |
494 | * @gfp: the gfp flags for allocating new buffers | |
495 | * | |
496 | * (Re)populate an SGE free-buffer list with up to @n new packet buffers, | |
497 | * allocated with the supplied gfp flags. The caller must assure that | |
498 | * @n does not exceed the queue's capacity. | |
499 | */ | |
b1fb1f28 | 500 | static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) |
4d22de3e DLR |
501 | { |
502 | struct rx_sw_desc *sd = &q->sdesc[q->pidx]; | |
503 | struct rx_desc *d = &q->desc[q->pidx]; | |
b1fb1f28 | 504 | unsigned int count = 0; |
4d22de3e DLR |
505 | |
506 | while (n--) { | |
5e68b772 | 507 | dma_addr_t mapping; |
b1fb1f28 DLR |
508 | int err; |
509 | ||
cf992af5 | 510 | if (q->use_pages) { |
5e68b772 DLR |
511 | if (unlikely(alloc_pg_chunk(adap, q, sd, gfp, |
512 | q->order))) { | |
cf992af5 | 513 | nomem: q->alloc_failed++; |
e0994eb1 DLR |
514 | break; |
515 | } | |
10b6d956 | 516 | mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset; |
56e3b9df | 517 | dma_unmap_addr_set(sd, dma_addr, mapping); |
5e68b772 DLR |
518 | |
519 | add_one_rx_chunk(mapping, d, q->gen); | |
520 | pci_dma_sync_single_for_device(adap->pdev, mapping, | |
521 | q->buf_size - SGE_PG_RSVD, | |
522 | PCI_DMA_FROMDEVICE); | |
e0994eb1 | 523 | } else { |
5e68b772 | 524 | void *buf_start; |
e0994eb1 | 525 | |
5e68b772 | 526 | struct sk_buff *skb = alloc_skb(q->buf_size, gfp); |
cf992af5 DLR |
527 | if (!skb) |
528 | goto nomem; | |
e0994eb1 | 529 | |
cf992af5 DLR |
530 | sd->skb = skb; |
531 | buf_start = skb->data; | |
5e68b772 DLR |
532 | err = add_one_rx_buf(buf_start, q->buf_size, d, sd, |
533 | q->gen, adap->pdev); | |
534 | if (unlikely(err)) { | |
535 | clear_rx_desc(adap->pdev, q, sd); | |
536 | break; | |
537 | } | |
b1fb1f28 DLR |
538 | } |
539 | ||
4d22de3e DLR |
540 | d++; |
541 | sd++; | |
542 | if (++q->pidx == q->size) { | |
543 | q->pidx = 0; | |
544 | q->gen ^= 1; | |
545 | sd = q->sdesc; | |
546 | d = q->desc; | |
547 | } | |
b1fb1f28 | 548 | count++; |
4d22de3e | 549 | } |
26b3871d DLR |
550 | |
551 | q->credits += count; | |
552 | q->pend_cred += count; | |
553 | ring_fl_db(adap, q); | |
b1fb1f28 DLR |
554 | |
555 | return count; | |
4d22de3e DLR |
556 | } |
557 | ||
558 | static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) | |
559 | { | |
26b3871d | 560 | refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits), |
7385ecf3 | 561 | GFP_ATOMIC | __GFP_COMP); |
4d22de3e DLR |
562 | } |
563 | ||
564 | /** | |
565 | * recycle_rx_buf - recycle a receive buffer | |
566 | * @adapter: the adapter | |
567 | * @q: the SGE free list | |
568 | * @idx: index of buffer to recycle | |
569 | * | |
570 | * Recycles the specified buffer on the given free list by adding it at | |
571 | * the next available slot on the list. | |
572 | */ | |
573 | static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q, | |
574 | unsigned int idx) | |
575 | { | |
576 | struct rx_desc *from = &q->desc[idx]; | |
577 | struct rx_desc *to = &q->desc[q->pidx]; | |
578 | ||
cf992af5 | 579 | q->sdesc[q->pidx] = q->sdesc[idx]; |
4d22de3e DLR |
580 | to->addr_lo = from->addr_lo; /* already big endian */ |
581 | to->addr_hi = from->addr_hi; /* likewise */ | |
019be1cf | 582 | dma_wmb(); |
4d22de3e DLR |
583 | to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen)); |
584 | to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen)); | |
4d22de3e DLR |
585 | |
586 | if (++q->pidx == q->size) { | |
587 | q->pidx = 0; | |
588 | q->gen ^= 1; | |
589 | } | |
26b3871d DLR |
590 | |
591 | q->credits++; | |
592 | q->pend_cred++; | |
593 | ring_fl_db(adap, q); | |
4d22de3e DLR |
594 | } |
595 | ||
596 | /** | |
597 | * alloc_ring - allocate resources for an SGE descriptor ring | |
598 | * @pdev: the PCI device | |
599 | * @nelem: the number of descriptors | |
600 | * @elem_size: the size of each descriptor | |
601 | * @sw_size: the size of the SW state associated with each ring element | |
602 | * @phys: the physical address of the allocated ring | |
603 | * @metadata: address of the array holding the SW state for the ring | |
604 | * | |
605 | * Allocates resources for an SGE descriptor ring, such as Tx queues, | |
606 | * free buffer lists, or response queues. Each SGE ring requires | |
607 | * space for its HW descriptors plus, optionally, space for the SW state | |
608 | * associated with each HW entry (the metadata). The function returns | |
609 | * three values: the virtual address for the HW ring (the return value | |
610 | * of the function), the physical address of the HW ring, and the address | |
611 | * of the SW ring. | |
612 | */ | |
613 | static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size, | |
e0994eb1 | 614 | size_t sw_size, dma_addr_t * phys, void *metadata) |
4d22de3e DLR |
615 | { |
616 | size_t len = nelem * elem_size; | |
617 | void *s = NULL; | |
618 | void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL); | |
619 | ||
620 | if (!p) | |
621 | return NULL; | |
52565544 | 622 | if (sw_size && metadata) { |
4d22de3e DLR |
623 | s = kcalloc(nelem, sw_size, GFP_KERNEL); |
624 | ||
625 | if (!s) { | |
626 | dma_free_coherent(&pdev->dev, len, p, *phys); | |
627 | return NULL; | |
628 | } | |
4d22de3e | 629 | *(void **)metadata = s; |
52565544 | 630 | } |
4d22de3e DLR |
631 | memset(p, 0, len); |
632 | return p; | |
633 | } | |
634 | ||
204e2f98 DLR |
635 | /** |
636 | * t3_reset_qset - reset a sge qset | |
637 | * @q: the queue set | |
638 | * | |
639 | * Reset the qset structure. | |
640 | * the NAPI structure is preserved in the event of | |
641 | * the qset's reincarnation, for example during EEH recovery. | |
642 | */ | |
643 | static void t3_reset_qset(struct sge_qset *q) | |
644 | { | |
645 | if (q->adap && | |
646 | !(q->adap->flags & NAPI_INIT)) { | |
647 | memset(q, 0, sizeof(*q)); | |
648 | return; | |
649 | } | |
650 | ||
651 | q->adap = NULL; | |
652 | memset(&q->rspq, 0, sizeof(q->rspq)); | |
653 | memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET); | |
654 | memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); | |
655 | q->txq_stopped = 0; | |
20d3fc11 | 656 | q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */ |
42c8ea17 | 657 | q->rx_reclaim_timer.function = NULL; |
76620aaf HX |
658 | q->nomem = 0; |
659 | napi_free_frags(&q->napi); | |
204e2f98 DLR |
660 | } |
661 | ||
662 | ||
4d22de3e DLR |
663 | /** |
664 | * free_qset - free the resources of an SGE queue set | |
665 | * @adapter: the adapter owning the queue set | |
666 | * @q: the queue set | |
667 | * | |
668 | * Release the HW and SW resources associated with an SGE queue set, such | |
669 | * as HW contexts, packet buffers, and descriptor rings. Traffic to the | |
670 | * queue set must be quiesced prior to calling this. | |
671 | */ | |
9265fabf | 672 | static void t3_free_qset(struct adapter *adapter, struct sge_qset *q) |
4d22de3e DLR |
673 | { |
674 | int i; | |
675 | struct pci_dev *pdev = adapter->pdev; | |
676 | ||
4d22de3e DLR |
677 | for (i = 0; i < SGE_RXQ_PER_SET; ++i) |
678 | if (q->fl[i].desc) { | |
b1186dee | 679 | spin_lock_irq(&adapter->sge.reg_lock); |
4d22de3e | 680 | t3_sge_disable_fl(adapter, q->fl[i].cntxt_id); |
b1186dee | 681 | spin_unlock_irq(&adapter->sge.reg_lock); |
4d22de3e DLR |
682 | free_rx_bufs(pdev, &q->fl[i]); |
683 | kfree(q->fl[i].sdesc); | |
684 | dma_free_coherent(&pdev->dev, | |
685 | q->fl[i].size * | |
686 | sizeof(struct rx_desc), q->fl[i].desc, | |
687 | q->fl[i].phys_addr); | |
688 | } | |
689 | ||
690 | for (i = 0; i < SGE_TXQ_PER_SET; ++i) | |
691 | if (q->txq[i].desc) { | |
b1186dee | 692 | spin_lock_irq(&adapter->sge.reg_lock); |
4d22de3e | 693 | t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0); |
b1186dee | 694 | spin_unlock_irq(&adapter->sge.reg_lock); |
4d22de3e DLR |
695 | if (q->txq[i].sdesc) { |
696 | free_tx_desc(adapter, &q->txq[i], | |
697 | q->txq[i].in_use); | |
698 | kfree(q->txq[i].sdesc); | |
699 | } | |
700 | dma_free_coherent(&pdev->dev, | |
701 | q->txq[i].size * | |
702 | sizeof(struct tx_desc), | |
703 | q->txq[i].desc, q->txq[i].phys_addr); | |
704 | __skb_queue_purge(&q->txq[i].sendq); | |
705 | } | |
706 | ||
707 | if (q->rspq.desc) { | |
b1186dee | 708 | spin_lock_irq(&adapter->sge.reg_lock); |
4d22de3e | 709 | t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id); |
b1186dee | 710 | spin_unlock_irq(&adapter->sge.reg_lock); |
4d22de3e DLR |
711 | dma_free_coherent(&pdev->dev, |
712 | q->rspq.size * sizeof(struct rsp_desc), | |
713 | q->rspq.desc, q->rspq.phys_addr); | |
714 | } | |
715 | ||
204e2f98 | 716 | t3_reset_qset(q); |
4d22de3e DLR |
717 | } |
718 | ||
719 | /** | |
720 | * init_qset_cntxt - initialize an SGE queue set context info | |
721 | * @qs: the queue set | |
722 | * @id: the queue set id | |
723 | * | |
724 | * Initializes the TIDs and context ids for the queues of a queue set. | |
725 | */ | |
726 | static void init_qset_cntxt(struct sge_qset *qs, unsigned int id) | |
727 | { | |
728 | qs->rspq.cntxt_id = id; | |
729 | qs->fl[0].cntxt_id = 2 * id; | |
730 | qs->fl[1].cntxt_id = 2 * id + 1; | |
731 | qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id; | |
732 | qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id; | |
733 | qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id; | |
734 | qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id; | |
735 | qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id; | |
736 | } | |
737 | ||
738 | /** | |
739 | * sgl_len - calculates the size of an SGL of the given capacity | |
740 | * @n: the number of SGL entries | |
741 | * | |
742 | * Calculates the number of flits needed for a scatter/gather list that | |
743 | * can hold the given number of entries. | |
744 | */ | |
745 | static inline unsigned int sgl_len(unsigned int n) | |
746 | { | |
747 | /* alternatively: 3 * (n / 2) + 2 * (n & 1) */ | |
748 | return (3 * n) / 2 + (n & 1); | |
749 | } | |
750 | ||
751 | /** | |
752 | * flits_to_desc - returns the num of Tx descriptors for the given flits | |
753 | * @n: the number of flits | |
754 | * | |
755 | * Calculates the number of Tx descriptors needed for the supplied number | |
756 | * of flits. | |
757 | */ | |
758 | static inline unsigned int flits_to_desc(unsigned int n) | |
759 | { | |
760 | BUG_ON(n >= ARRAY_SIZE(flit_desc_map)); | |
761 | return flit_desc_map[n]; | |
762 | } | |
763 | ||
cf992af5 DLR |
764 | /** |
765 | * get_packet - return the next ingress packet buffer from a free list | |
766 | * @adap: the adapter that received the packet | |
767 | * @fl: the SGE free list holding the packet | |
768 | * @len: the packet length including any SGE padding | |
769 | * @drop_thres: # of remaining buffers before we start dropping packets | |
770 | * | |
771 | * Get the next packet from a free list and complete setup of the | |
772 | * sk_buff. If the packet is small we make a copy and recycle the | |
773 | * original buffer, otherwise we use the original buffer itself. If a | |
774 | * positive drop threshold is supplied packets are dropped and their | |
775 | * buffers recycled if (a) the number of remaining buffers is under the | |
776 | * threshold and the packet is too big to copy, or (b) the packet should | |
777 | * be copied but there is no memory for the copy. | |
778 | */ | |
779 | static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl, | |
780 | unsigned int len, unsigned int drop_thres) | |
781 | { | |
782 | struct sk_buff *skb = NULL; | |
783 | struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; | |
784 | ||
785 | prefetch(sd->skb->data); | |
786 | fl->credits--; | |
787 | ||
788 | if (len <= SGE_RX_COPY_THRES) { | |
789 | skb = alloc_skb(len, GFP_ATOMIC); | |
790 | if (likely(skb != NULL)) { | |
791 | __skb_put(skb, len); | |
792 | pci_dma_sync_single_for_cpu(adap->pdev, | |
56e3b9df | 793 | dma_unmap_addr(sd, dma_addr), len, |
cf992af5 DLR |
794 | PCI_DMA_FROMDEVICE); |
795 | memcpy(skb->data, sd->skb->data, len); | |
796 | pci_dma_sync_single_for_device(adap->pdev, | |
56e3b9df | 797 | dma_unmap_addr(sd, dma_addr), len, |
cf992af5 DLR |
798 | PCI_DMA_FROMDEVICE); |
799 | } else if (!drop_thres) | |
800 | goto use_orig_buf; | |
801 | recycle: | |
802 | recycle_rx_buf(adap, fl, fl->cidx); | |
803 | return skb; | |
804 | } | |
805 | ||
26b3871d DLR |
806 | if (unlikely(fl->credits < drop_thres) && |
807 | refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1), | |
808 | GFP_ATOMIC | __GFP_COMP) == 0) | |
cf992af5 DLR |
809 | goto recycle; |
810 | ||
811 | use_orig_buf: | |
56e3b9df | 812 | pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr), |
cf992af5 DLR |
813 | fl->buf_size, PCI_DMA_FROMDEVICE); |
814 | skb = sd->skb; | |
815 | skb_put(skb, len); | |
816 | __refill_fl(adap, fl); | |
817 | return skb; | |
818 | } | |
819 | ||
820 | /** | |
821 | * get_packet_pg - return the next ingress packet buffer from a free list | |
822 | * @adap: the adapter that received the packet | |
823 | * @fl: the SGE free list holding the packet | |
824 | * @len: the packet length including any SGE padding | |
825 | * @drop_thres: # of remaining buffers before we start dropping packets | |
826 | * | |
827 | * Get the next packet from a free list populated with page chunks. | |
828 | * If the packet is small we make a copy and recycle the original buffer, | |
829 | * otherwise we attach the original buffer as a page fragment to a fresh | |
830 | * sk_buff. If a positive drop threshold is supplied packets are dropped | |
831 | * and their buffers recycled if (a) the number of remaining buffers is | |
832 | * under the threshold and the packet is too big to copy, or (b) there's | |
833 | * no system memory. | |
834 | * | |
835 | * Note: this function is similar to @get_packet but deals with Rx buffers | |
836 | * that are page chunks rather than sk_buffs. | |
837 | */ | |
838 | static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl, | |
7385ecf3 DLR |
839 | struct sge_rspq *q, unsigned int len, |
840 | unsigned int drop_thres) | |
cf992af5 | 841 | { |
7385ecf3 | 842 | struct sk_buff *newskb, *skb; |
cf992af5 DLR |
843 | struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; |
844 | ||
56e3b9df | 845 | dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr); |
7385ecf3 | 846 | |
5e68b772 | 847 | newskb = skb = q->pg_skb; |
7385ecf3 DLR |
848 | if (!skb && (len <= SGE_RX_COPY_THRES)) { |
849 | newskb = alloc_skb(len, GFP_ATOMIC); | |
850 | if (likely(newskb != NULL)) { | |
851 | __skb_put(newskb, len); | |
5e68b772 | 852 | pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len, |
cf992af5 | 853 | PCI_DMA_FROMDEVICE); |
7385ecf3 | 854 | memcpy(newskb->data, sd->pg_chunk.va, len); |
5e68b772 DLR |
855 | pci_dma_sync_single_for_device(adap->pdev, dma_addr, |
856 | len, | |
857 | PCI_DMA_FROMDEVICE); | |
cf992af5 DLR |
858 | } else if (!drop_thres) |
859 | return NULL; | |
860 | recycle: | |
861 | fl->credits--; | |
862 | recycle_rx_buf(adap, fl, fl->cidx); | |
7385ecf3 DLR |
863 | q->rx_recycle_buf++; |
864 | return newskb; | |
cf992af5 DLR |
865 | } |
866 | ||
7385ecf3 | 867 | if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres))) |
cf992af5 DLR |
868 | goto recycle; |
869 | ||
5e68b772 DLR |
870 | prefetch(sd->pg_chunk.p_cnt); |
871 | ||
7385ecf3 | 872 | if (!skb) |
b47385bd | 873 | newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC); |
5e68b772 | 874 | |
7385ecf3 | 875 | if (unlikely(!newskb)) { |
cf992af5 DLR |
876 | if (!drop_thres) |
877 | return NULL; | |
878 | goto recycle; | |
879 | } | |
880 | ||
5e68b772 DLR |
881 | pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len, |
882 | PCI_DMA_FROMDEVICE); | |
883 | (*sd->pg_chunk.p_cnt)--; | |
70e3bb50 | 884 | if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page) |
5e68b772 | 885 | pci_unmap_page(adap->pdev, |
10b6d956 | 886 | sd->pg_chunk.mapping, |
5e68b772 DLR |
887 | fl->alloc_size, |
888 | PCI_DMA_FROMDEVICE); | |
7385ecf3 DLR |
889 | if (!skb) { |
890 | __skb_put(newskb, SGE_RX_PULL_LEN); | |
891 | memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN); | |
892 | skb_fill_page_desc(newskb, 0, sd->pg_chunk.page, | |
893 | sd->pg_chunk.offset + SGE_RX_PULL_LEN, | |
894 | len - SGE_RX_PULL_LEN); | |
895 | newskb->len = len; | |
896 | newskb->data_len = len - SGE_RX_PULL_LEN; | |
8f435804 | 897 | newskb->truesize += newskb->data_len; |
7385ecf3 DLR |
898 | } else { |
899 | skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags, | |
900 | sd->pg_chunk.page, | |
901 | sd->pg_chunk.offset, len); | |
902 | newskb->len += len; | |
903 | newskb->data_len += len; | |
8f435804 | 904 | newskb->truesize += len; |
7385ecf3 | 905 | } |
cf992af5 DLR |
906 | |
907 | fl->credits--; | |
908 | /* | |
909 | * We do not refill FLs here, we let the caller do it to overlap a | |
910 | * prefetch. | |
911 | */ | |
7385ecf3 | 912 | return newskb; |
cf992af5 DLR |
913 | } |
914 | ||
4d22de3e DLR |
915 | /** |
916 | * get_imm_packet - return the next ingress packet buffer from a response | |
917 | * @resp: the response descriptor containing the packet data | |
918 | * | |
919 | * Return a packet containing the immediate data of the given response. | |
920 | */ | |
921 | static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp) | |
922 | { | |
923 | struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC); | |
924 | ||
925 | if (skb) { | |
926 | __skb_put(skb, IMMED_PKT_SIZE); | |
27d7ff46 | 927 | skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE); |
4d22de3e DLR |
928 | } |
929 | return skb; | |
930 | } | |
931 | ||
932 | /** | |
933 | * calc_tx_descs - calculate the number of Tx descriptors for a packet | |
934 | * @skb: the packet | |
935 | * | |
936 | * Returns the number of Tx descriptors needed for the given Ethernet | |
937 | * packet. Ethernet packets require addition of WR and CPL headers. | |
938 | */ | |
939 | static inline unsigned int calc_tx_descs(const struct sk_buff *skb) | |
940 | { | |
941 | unsigned int flits; | |
942 | ||
943 | if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt)) | |
944 | return 1; | |
945 | ||
946 | flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2; | |
947 | if (skb_shinfo(skb)->gso_size) | |
948 | flits++; | |
949 | return flits_to_desc(flits); | |
950 | } | |
951 | ||
952 | /** | |
728e2cca | 953 | * make_sgl - populate a scatter/gather list for a packet |
4d22de3e DLR |
954 | * @skb: the packet |
955 | * @sgp: the SGL to populate | |
956 | * @start: start address of skb main body data to include in the SGL | |
957 | * @len: length of skb main body data to include in the SGL | |
728e2cca | 958 | * @pdev: the PCI device |
4d22de3e | 959 | * |
728e2cca | 960 | * Generates a scatter/gather list for the buffers that make up a packet |
4d22de3e DLR |
961 | * and returns the SGL size in 8-byte words. The caller must size the SGL |
962 | * appropriately. | |
963 | */ | |
728e2cca | 964 | static inline unsigned int make_sgl(const struct sk_buff *skb, |
4d22de3e | 965 | struct sg_ent *sgp, unsigned char *start, |
728e2cca | 966 | unsigned int len, struct pci_dev *pdev) |
4d22de3e | 967 | { |
728e2cca AK |
968 | dma_addr_t mapping; |
969 | unsigned int i, j = 0, nfrags; | |
4d22de3e DLR |
970 | |
971 | if (len) { | |
728e2cca | 972 | mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE); |
4d22de3e | 973 | sgp->len[0] = cpu_to_be32(len); |
728e2cca AK |
974 | sgp->addr[0] = cpu_to_be64(mapping); |
975 | j = 1; | |
4d22de3e DLR |
976 | } |
977 | ||
978 | nfrags = skb_shinfo(skb)->nr_frags; | |
979 | for (i = 0; i < nfrags; i++) { | |
9e903e08 | 980 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
4d22de3e | 981 | |
728e2cca AK |
982 | mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), |
983 | DMA_TO_DEVICE); | |
9e903e08 | 984 | sgp->len[j] = cpu_to_be32(skb_frag_size(frag)); |
728e2cca | 985 | sgp->addr[j] = cpu_to_be64(mapping); |
4d22de3e DLR |
986 | j ^= 1; |
987 | if (j == 0) | |
988 | ++sgp; | |
989 | } | |
990 | if (j) | |
991 | sgp->len[j] = 0; | |
992 | return ((nfrags + (len != 0)) * 3) / 2 + j; | |
993 | } | |
994 | ||
995 | /** | |
996 | * check_ring_tx_db - check and potentially ring a Tx queue's doorbell | |
997 | * @adap: the adapter | |
998 | * @q: the Tx queue | |
999 | * | |
1000 | * Ring the doorbel if a Tx queue is asleep. There is a natural race, | |
1001 | * where the HW is going to sleep just after we checked, however, | |
1002 | * then the interrupt handler will detect the outstanding TX packet | |
1003 | * and ring the doorbell for us. | |
1004 | * | |
1005 | * When GTS is disabled we unconditionally ring the doorbell. | |
1006 | */ | |
1007 | static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q) | |
1008 | { | |
1009 | #if USE_GTS | |
1010 | clear_bit(TXQ_LAST_PKT_DB, &q->flags); | |
1011 | if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) { | |
1012 | set_bit(TXQ_LAST_PKT_DB, &q->flags); | |
1013 | t3_write_reg(adap, A_SG_KDOORBELL, | |
1014 | F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); | |
1015 | } | |
1016 | #else | |
1017 | wmb(); /* write descriptors before telling HW */ | |
1018 | t3_write_reg(adap, A_SG_KDOORBELL, | |
1019 | F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); | |
1020 | #endif | |
1021 | } | |
1022 | ||
1023 | static inline void wr_gen2(struct tx_desc *d, unsigned int gen) | |
1024 | { | |
1025 | #if SGE_NUM_GENBITS == 2 | |
1026 | d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen); | |
1027 | #endif | |
1028 | } | |
1029 | ||
1030 | /** | |
1031 | * write_wr_hdr_sgl - write a WR header and, optionally, SGL | |
1032 | * @ndesc: number of Tx descriptors spanned by the SGL | |
1033 | * @skb: the packet corresponding to the WR | |
1034 | * @d: first Tx descriptor to be written | |
1035 | * @pidx: index of above descriptors | |
1036 | * @q: the SGE Tx queue | |
1037 | * @sgl: the SGL | |
1038 | * @flits: number of flits to the start of the SGL in the first descriptor | |
1039 | * @sgl_flits: the SGL size in flits | |
1040 | * @gen: the Tx descriptor generation | |
1041 | * @wr_hi: top 32 bits of WR header based on WR type (big endian) | |
1042 | * @wr_lo: low 32 bits of WR header based on WR type (big endian) | |
1043 | * | |
1044 | * Write a work request header and an associated SGL. If the SGL is | |
1045 | * small enough to fit into one Tx descriptor it has already been written | |
1046 | * and we just need to write the WR header. Otherwise we distribute the | |
1047 | * SGL across the number of descriptors it spans. | |
1048 | */ | |
1049 | static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb, | |
1050 | struct tx_desc *d, unsigned int pidx, | |
1051 | const struct sge_txq *q, | |
1052 | const struct sg_ent *sgl, | |
1053 | unsigned int flits, unsigned int sgl_flits, | |
fb8e4444 AV |
1054 | unsigned int gen, __be32 wr_hi, |
1055 | __be32 wr_lo) | |
4d22de3e DLR |
1056 | { |
1057 | struct work_request_hdr *wrp = (struct work_request_hdr *)d; | |
1058 | struct tx_sw_desc *sd = &q->sdesc[pidx]; | |
1059 | ||
1060 | sd->skb = skb; | |
1061 | if (need_skb_unmap()) { | |
23561c94 DLR |
1062 | sd->fragidx = 0; |
1063 | sd->addr_idx = 0; | |
1064 | sd->sflit = flits; | |
4d22de3e DLR |
1065 | } |
1066 | ||
1067 | if (likely(ndesc == 1)) { | |
23561c94 | 1068 | sd->eop = 1; |
4d22de3e DLR |
1069 | wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) | |
1070 | V_WR_SGLSFLT(flits)) | wr_hi; | |
019be1cf | 1071 | dma_wmb(); |
4d22de3e DLR |
1072 | wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) | |
1073 | V_WR_GEN(gen)) | wr_lo; | |
1074 | wr_gen2(d, gen); | |
1075 | } else { | |
1076 | unsigned int ogen = gen; | |
1077 | const u64 *fp = (const u64 *)sgl; | |
1078 | struct work_request_hdr *wp = wrp; | |
1079 | ||
1080 | wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) | | |
1081 | V_WR_SGLSFLT(flits)) | wr_hi; | |
1082 | ||
1083 | while (sgl_flits) { | |
1084 | unsigned int avail = WR_FLITS - flits; | |
1085 | ||
1086 | if (avail > sgl_flits) | |
1087 | avail = sgl_flits; | |
1088 | memcpy(&d->flit[flits], fp, avail * sizeof(*fp)); | |
1089 | sgl_flits -= avail; | |
1090 | ndesc--; | |
1091 | if (!sgl_flits) | |
1092 | break; | |
1093 | ||
1094 | fp += avail; | |
1095 | d++; | |
23561c94 | 1096 | sd->eop = 0; |
4d22de3e DLR |
1097 | sd++; |
1098 | if (++pidx == q->size) { | |
1099 | pidx = 0; | |
1100 | gen ^= 1; | |
1101 | d = q->desc; | |
1102 | sd = q->sdesc; | |
1103 | } | |
1104 | ||
1105 | sd->skb = skb; | |
1106 | wrp = (struct work_request_hdr *)d; | |
1107 | wrp->wr_hi = htonl(V_WR_DATATYPE(1) | | |
1108 | V_WR_SGLSFLT(1)) | wr_hi; | |
1109 | wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS, | |
1110 | sgl_flits + 1)) | | |
1111 | V_WR_GEN(gen)) | wr_lo; | |
1112 | wr_gen2(d, gen); | |
1113 | flits = 1; | |
1114 | } | |
23561c94 | 1115 | sd->eop = 1; |
4d22de3e | 1116 | wrp->wr_hi |= htonl(F_WR_EOP); |
019be1cf | 1117 | dma_wmb(); |
4d22de3e DLR |
1118 | wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo; |
1119 | wr_gen2((struct tx_desc *)wp, ogen); | |
1120 | WARN_ON(ndesc != 0); | |
1121 | } | |
1122 | } | |
1123 | ||
1124 | /** | |
1125 | * write_tx_pkt_wr - write a TX_PKT work request | |
1126 | * @adap: the adapter | |
1127 | * @skb: the packet to send | |
1128 | * @pi: the egress interface | |
1129 | * @pidx: index of the first Tx descriptor to write | |
1130 | * @gen: the generation value to use | |
1131 | * @q: the Tx queue | |
1132 | * @ndesc: number of descriptors the packet will occupy | |
1133 | * @compl: the value of the COMPL bit to use | |
1134 | * | |
1135 | * Generate a TX_PKT work request to send the supplied packet. | |
1136 | */ | |
1137 | static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, | |
1138 | const struct port_info *pi, | |
1139 | unsigned int pidx, unsigned int gen, | |
1140 | struct sge_txq *q, unsigned int ndesc, | |
728e2cca | 1141 | unsigned int compl) |
4d22de3e DLR |
1142 | { |
1143 | unsigned int flits, sgl_flits, cntrl, tso_info; | |
1144 | struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; | |
1145 | struct tx_desc *d = &q->desc[pidx]; | |
1146 | struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d; | |
1147 | ||
3fa58c88 | 1148 | cpl->len = htonl(skb->len); |
4d22de3e DLR |
1149 | cntrl = V_TXPKT_INTF(pi->port_id); |
1150 | ||
df8a39de JP |
1151 | if (skb_vlan_tag_present(skb)) |
1152 | cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(skb_vlan_tag_get(skb)); | |
4d22de3e DLR |
1153 | |
1154 | tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size); | |
1155 | if (tso_info) { | |
1156 | int eth_type; | |
1157 | struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl; | |
1158 | ||
1159 | d->flit[2] = 0; | |
1160 | cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO); | |
1161 | hdr->cntrl = htonl(cntrl); | |
bbe735e4 | 1162 | eth_type = skb_network_offset(skb) == ETH_HLEN ? |
4d22de3e DLR |
1163 | CPL_ETH_II : CPL_ETH_II_VLAN; |
1164 | tso_info |= V_LSO_ETH_TYPE(eth_type) | | |
eddc9ec5 | 1165 | V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) | |
aa8223c7 | 1166 | V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff); |
4d22de3e DLR |
1167 | hdr->lso_info = htonl(tso_info); |
1168 | flits = 3; | |
1169 | } else { | |
1170 | cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT); | |
1171 | cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */ | |
1172 | cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL); | |
1173 | cpl->cntrl = htonl(cntrl); | |
1174 | ||
1175 | if (skb->len <= WR_LEN - sizeof(*cpl)) { | |
1176 | q->sdesc[pidx].skb = NULL; | |
1177 | if (!skb->data_len) | |
d626f62b ACM |
1178 | skb_copy_from_linear_data(skb, &d->flit[2], |
1179 | skb->len); | |
4d22de3e DLR |
1180 | else |
1181 | skb_copy_bits(skb, 0, &d->flit[2], skb->len); | |
1182 | ||
1183 | flits = (skb->len + 7) / 8 + 2; | |
1184 | cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) | | |
1185 | V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | |
1186 | | F_WR_SOP | F_WR_EOP | compl); | |
019be1cf | 1187 | dma_wmb(); |
4d22de3e DLR |
1188 | cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) | |
1189 | V_WR_TID(q->token)); | |
1190 | wr_gen2(d, gen); | |
f9ec8131 | 1191 | dev_consume_skb_any(skb); |
4d22de3e DLR |
1192 | return; |
1193 | } | |
1194 | ||
1195 | flits = 2; | |
1196 | } | |
1197 | ||
1198 | sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; | |
728e2cca | 1199 | sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev); |
4d22de3e DLR |
1200 | |
1201 | write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, | |
1202 | htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl), | |
1203 | htonl(V_WR_TID(q->token))); | |
1204 | } | |
1205 | ||
82ad3329 DLR |
1206 | static inline void t3_stop_tx_queue(struct netdev_queue *txq, |
1207 | struct sge_qset *qs, struct sge_txq *q) | |
a8cc21f6 | 1208 | { |
82ad3329 | 1209 | netif_tx_stop_queue(txq); |
a8cc21f6 KK |
1210 | set_bit(TXQ_ETH, &qs->txq_stopped); |
1211 | q->stops++; | |
1212 | } | |
1213 | ||
4d22de3e DLR |
1214 | /** |
1215 | * eth_xmit - add a packet to the Ethernet Tx queue | |
1216 | * @skb: the packet | |
1217 | * @dev: the egress net device | |
1218 | * | |
1219 | * Add a packet to an SGE Tx queue. Runs with softirqs disabled. | |
1220 | */ | |
61357325 | 1221 | netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) |
4d22de3e | 1222 | { |
82ad3329 | 1223 | int qidx; |
4d22de3e DLR |
1224 | unsigned int ndesc, pidx, credits, gen, compl; |
1225 | const struct port_info *pi = netdev_priv(dev); | |
5fbf816f | 1226 | struct adapter *adap = pi->adapter; |
82ad3329 DLR |
1227 | struct netdev_queue *txq; |
1228 | struct sge_qset *qs; | |
1229 | struct sge_txq *q; | |
4d22de3e DLR |
1230 | |
1231 | /* | |
1232 | * The chip min packet length is 9 octets but play safe and reject | |
1233 | * anything shorter than an Ethernet header. | |
1234 | */ | |
1235 | if (unlikely(skb->len < ETH_HLEN)) { | |
f9ec8131 | 1236 | dev_kfree_skb_any(skb); |
4d22de3e DLR |
1237 | return NETDEV_TX_OK; |
1238 | } | |
1239 | ||
82ad3329 DLR |
1240 | qidx = skb_get_queue_mapping(skb); |
1241 | qs = &pi->qs[qidx]; | |
1242 | q = &qs->txq[TXQ_ETH]; | |
1243 | txq = netdev_get_tx_queue(dev, qidx); | |
1244 | ||
42c8ea17 | 1245 | reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); |
4d22de3e DLR |
1246 | |
1247 | credits = q->size - q->in_use; | |
1248 | ndesc = calc_tx_descs(skb); | |
1249 | ||
1250 | if (unlikely(credits < ndesc)) { | |
82ad3329 | 1251 | t3_stop_tx_queue(txq, qs, q); |
a8cc21f6 KK |
1252 | dev_err(&adap->pdev->dev, |
1253 | "%s: Tx ring %u full while queue awake!\n", | |
1254 | dev->name, q->cntxt_id & 7); | |
4d22de3e DLR |
1255 | return NETDEV_TX_BUSY; |
1256 | } | |
1257 | ||
1258 | q->in_use += ndesc; | |
cd7e9034 | 1259 | if (unlikely(credits - ndesc < q->stop_thres)) { |
82ad3329 | 1260 | t3_stop_tx_queue(txq, qs, q); |
cd7e9034 DLR |
1261 | |
1262 | if (should_restart_tx(q) && | |
1263 | test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) { | |
1264 | q->restarts++; | |
0d9a40de | 1265 | netif_tx_start_queue(txq); |
cd7e9034 DLR |
1266 | } |
1267 | } | |
4d22de3e DLR |
1268 | |
1269 | gen = q->gen; | |
1270 | q->unacked += ndesc; | |
1271 | compl = (q->unacked & 8) << (S_WR_COMPL - 3); | |
1272 | q->unacked &= 7; | |
1273 | pidx = q->pidx; | |
1274 | q->pidx += ndesc; | |
1275 | if (q->pidx >= q->size) { | |
1276 | q->pidx -= q->size; | |
1277 | q->gen ^= 1; | |
1278 | } | |
1279 | ||
1280 | /* update port statistics */ | |
bc6c47b5 | 1281 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
4d22de3e DLR |
1282 | qs->port_stats[SGE_PSTAT_TX_CSUM]++; |
1283 | if (skb_shinfo(skb)->gso_size) | |
1284 | qs->port_stats[SGE_PSTAT_TSO]++; | |
df8a39de | 1285 | if (skb_vlan_tag_present(skb)) |
4d22de3e DLR |
1286 | qs->port_stats[SGE_PSTAT_VLANINS]++; |
1287 | ||
4d22de3e DLR |
1288 | /* |
1289 | * We do not use Tx completion interrupts to free DMAd Tx packets. | |
af901ca1 | 1290 | * This is good for performance but means that we rely on new Tx |
4d22de3e DLR |
1291 | * packets arriving to run the destructors of completed packets, |
1292 | * which open up space in their sockets' send queues. Sometimes | |
1293 | * we do not get such new packets causing Tx to stall. A single | |
1294 | * UDP transmitter is a good example of this situation. We have | |
1295 | * a clean up timer that periodically reclaims completed packets | |
1296 | * but it doesn't run often enough (nor do we want it to) to prevent | |
1297 | * lengthy stalls. A solution to this problem is to run the | |
1298 | * destructor early, after the packet is queued but before it's DMAd. | |
1299 | * A cons is that we lie to socket memory accounting, but the amount | |
1300 | * of extra memory is reasonable (limited by the number of Tx | |
1301 | * descriptors), the packets do actually get freed quickly by new | |
1302 | * packets almost always, and for protocols like TCP that wait for | |
1303 | * acks to really free up the data the extra memory is even less. | |
1304 | * On the positive side we run the destructors on the sending CPU | |
1305 | * rather than on a potentially different completing CPU, usually a | |
1306 | * good thing. We also run them without holding our Tx queue lock, | |
1307 | * unlike what reclaim_completed_tx() would otherwise do. | |
1308 | * | |
1309 | * Run the destructor before telling the DMA engine about the packet | |
1310 | * to make sure it doesn't complete and get freed prematurely. | |
1311 | */ | |
1312 | if (likely(!skb_shared(skb))) | |
1313 | skb_orphan(skb); | |
1314 | ||
728e2cca | 1315 | write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl); |
4d22de3e DLR |
1316 | check_ring_tx_db(adap, q); |
1317 | return NETDEV_TX_OK; | |
1318 | } | |
1319 | ||
1320 | /** | |
1321 | * write_imm - write a packet into a Tx descriptor as immediate data | |
1322 | * @d: the Tx descriptor to write | |
1323 | * @skb: the packet | |
1324 | * @len: the length of packet data to write as immediate data | |
1325 | * @gen: the generation bit value to write | |
1326 | * | |
1327 | * Writes a packet as immediate data into a Tx descriptor. The packet | |
1328 | * contains a work request at its beginning. We must write the packet | |
27186dc3 DLR |
1329 | * carefully so the SGE doesn't read it accidentally before it's written |
1330 | * in its entirety. | |
4d22de3e DLR |
1331 | */ |
1332 | static inline void write_imm(struct tx_desc *d, struct sk_buff *skb, | |
1333 | unsigned int len, unsigned int gen) | |
1334 | { | |
1335 | struct work_request_hdr *from = (struct work_request_hdr *)skb->data; | |
1336 | struct work_request_hdr *to = (struct work_request_hdr *)d; | |
1337 | ||
27186dc3 DLR |
1338 | if (likely(!skb->data_len)) |
1339 | memcpy(&to[1], &from[1], len - sizeof(*from)); | |
1340 | else | |
1341 | skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from)); | |
1342 | ||
4d22de3e DLR |
1343 | to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP | |
1344 | V_WR_BCNTLFLT(len & 7)); | |
019be1cf | 1345 | dma_wmb(); |
4d22de3e DLR |
1346 | to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) | |
1347 | V_WR_LEN((len + 7) / 8)); | |
1348 | wr_gen2(d, gen); | |
1349 | kfree_skb(skb); | |
1350 | } | |
1351 | ||
1352 | /** | |
1353 | * check_desc_avail - check descriptor availability on a send queue | |
1354 | * @adap: the adapter | |
1355 | * @q: the send queue | |
1356 | * @skb: the packet needing the descriptors | |
1357 | * @ndesc: the number of Tx descriptors needed | |
1358 | * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL) | |
1359 | * | |
1360 | * Checks if the requested number of Tx descriptors is available on an | |
1361 | * SGE send queue. If the queue is already suspended or not enough | |
1362 | * descriptors are available the packet is queued for later transmission. | |
1363 | * Must be called with the Tx queue locked. | |
1364 | * | |
1365 | * Returns 0 if enough descriptors are available, 1 if there aren't | |
1366 | * enough descriptors and the packet has been queued, and 2 if the caller | |
1367 | * needs to retry because there weren't enough descriptors at the | |
1368 | * beginning of the call but some freed up in the mean time. | |
1369 | */ | |
1370 | static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q, | |
1371 | struct sk_buff *skb, unsigned int ndesc, | |
1372 | unsigned int qid) | |
1373 | { | |
1374 | if (unlikely(!skb_queue_empty(&q->sendq))) { | |
1375 | addq_exit:__skb_queue_tail(&q->sendq, skb); | |
1376 | return 1; | |
1377 | } | |
1378 | if (unlikely(q->size - q->in_use < ndesc)) { | |
1379 | struct sge_qset *qs = txq_to_qset(q, qid); | |
1380 | ||
1381 | set_bit(qid, &qs->txq_stopped); | |
4e857c58 | 1382 | smp_mb__after_atomic(); |
4d22de3e DLR |
1383 | |
1384 | if (should_restart_tx(q) && | |
1385 | test_and_clear_bit(qid, &qs->txq_stopped)) | |
1386 | return 2; | |
1387 | ||
1388 | q->stops++; | |
1389 | goto addq_exit; | |
1390 | } | |
1391 | return 0; | |
1392 | } | |
1393 | ||
1394 | /** | |
1395 | * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs | |
1396 | * @q: the SGE control Tx queue | |
1397 | * | |
1398 | * This is a variant of reclaim_completed_tx() that is used for Tx queues | |
1399 | * that send only immediate data (presently just the control queues) and | |
1400 | * thus do not have any sk_buffs to release. | |
1401 | */ | |
1402 | static inline void reclaim_completed_tx_imm(struct sge_txq *q) | |
1403 | { | |
1404 | unsigned int reclaim = q->processed - q->cleaned; | |
1405 | ||
1406 | q->in_use -= reclaim; | |
1407 | q->cleaned += reclaim; | |
1408 | } | |
1409 | ||
1410 | static inline int immediate(const struct sk_buff *skb) | |
1411 | { | |
27186dc3 | 1412 | return skb->len <= WR_LEN; |
4d22de3e DLR |
1413 | } |
1414 | ||
1415 | /** | |
1416 | * ctrl_xmit - send a packet through an SGE control Tx queue | |
1417 | * @adap: the adapter | |
1418 | * @q: the control queue | |
1419 | * @skb: the packet | |
1420 | * | |
1421 | * Send a packet through an SGE control Tx queue. Packets sent through | |
1422 | * a control queue must fit entirely as immediate data in a single Tx | |
1423 | * descriptor and have no page fragments. | |
1424 | */ | |
1425 | static int ctrl_xmit(struct adapter *adap, struct sge_txq *q, | |
1426 | struct sk_buff *skb) | |
1427 | { | |
1428 | int ret; | |
1429 | struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data; | |
1430 | ||
1431 | if (unlikely(!immediate(skb))) { | |
1432 | WARN_ON(1); | |
1433 | dev_kfree_skb(skb); | |
1434 | return NET_XMIT_SUCCESS; | |
1435 | } | |
1436 | ||
1437 | wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP); | |
1438 | wrp->wr_lo = htonl(V_WR_TID(q->token)); | |
1439 | ||
1440 | spin_lock(&q->lock); | |
1441 | again:reclaim_completed_tx_imm(q); | |
1442 | ||
1443 | ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL); | |
1444 | if (unlikely(ret)) { | |
1445 | if (ret == 1) { | |
1446 | spin_unlock(&q->lock); | |
1447 | return NET_XMIT_CN; | |
1448 | } | |
1449 | goto again; | |
1450 | } | |
1451 | ||
1452 | write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); | |
1453 | ||
1454 | q->in_use++; | |
1455 | if (++q->pidx >= q->size) { | |
1456 | q->pidx = 0; | |
1457 | q->gen ^= 1; | |
1458 | } | |
1459 | spin_unlock(&q->lock); | |
1460 | wmb(); | |
1461 | t3_write_reg(adap, A_SG_KDOORBELL, | |
1462 | F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); | |
1463 | return NET_XMIT_SUCCESS; | |
1464 | } | |
1465 | ||
1466 | /** | |
1467 | * restart_ctrlq - restart a suspended control queue | |
1468 | * @qs: the queue set cotaining the control queue | |
1469 | * | |
1470 | * Resumes transmission on a suspended Tx control queue. | |
1471 | */ | |
1472 | static void restart_ctrlq(unsigned long data) | |
1473 | { | |
1474 | struct sk_buff *skb; | |
1475 | struct sge_qset *qs = (struct sge_qset *)data; | |
1476 | struct sge_txq *q = &qs->txq[TXQ_CTRL]; | |
4d22de3e DLR |
1477 | |
1478 | spin_lock(&q->lock); | |
1479 | again:reclaim_completed_tx_imm(q); | |
1480 | ||
bea3348e SH |
1481 | while (q->in_use < q->size && |
1482 | (skb = __skb_dequeue(&q->sendq)) != NULL) { | |
4d22de3e DLR |
1483 | |
1484 | write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); | |
1485 | ||
1486 | if (++q->pidx >= q->size) { | |
1487 | q->pidx = 0; | |
1488 | q->gen ^= 1; | |
1489 | } | |
1490 | q->in_use++; | |
1491 | } | |
1492 | ||
1493 | if (!skb_queue_empty(&q->sendq)) { | |
1494 | set_bit(TXQ_CTRL, &qs->txq_stopped); | |
4e857c58 | 1495 | smp_mb__after_atomic(); |
4d22de3e DLR |
1496 | |
1497 | if (should_restart_tx(q) && | |
1498 | test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) | |
1499 | goto again; | |
1500 | q->stops++; | |
1501 | } | |
1502 | ||
1503 | spin_unlock(&q->lock); | |
afefce66 | 1504 | wmb(); |
bea3348e | 1505 | t3_write_reg(qs->adap, A_SG_KDOORBELL, |
4d22de3e DLR |
1506 | F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); |
1507 | } | |
1508 | ||
14ab9892 DLR |
1509 | /* |
1510 | * Send a management message through control queue 0 | |
1511 | */ | |
1512 | int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb) | |
1513 | { | |
204e2f98 | 1514 | int ret; |
bc4b6b52 DLR |
1515 | local_bh_disable(); |
1516 | ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb); | |
1517 | local_bh_enable(); | |
1518 | ||
1519 | return ret; | |
14ab9892 DLR |
1520 | } |
1521 | ||
99d7cf30 DLR |
1522 | /** |
1523 | * deferred_unmap_destructor - unmap a packet when it is freed | |
1524 | * @skb: the packet | |
1525 | * | |
1526 | * This is the packet destructor used for Tx packets that need to remain | |
1527 | * mapped until they are freed rather than until their Tx descriptors are | |
1528 | * freed. | |
1529 | */ | |
1530 | static void deferred_unmap_destructor(struct sk_buff *skb) | |
1531 | { | |
1532 | int i; | |
1533 | const dma_addr_t *p; | |
1534 | const struct skb_shared_info *si; | |
1535 | const struct deferred_unmap_info *dui; | |
99d7cf30 DLR |
1536 | |
1537 | dui = (struct deferred_unmap_info *)skb->head; | |
1538 | p = dui->addr; | |
1539 | ||
15dd16c2 | 1540 | if (skb_tail_pointer(skb) - skb_transport_header(skb)) |
be8b678c SH |
1541 | pci_unmap_single(dui->pdev, *p++, skb_tail_pointer(skb) - |
1542 | skb_transport_header(skb), PCI_DMA_TODEVICE); | |
99d7cf30 DLR |
1543 | |
1544 | si = skb_shinfo(skb); | |
1545 | for (i = 0; i < si->nr_frags; i++) | |
9e903e08 | 1546 | pci_unmap_page(dui->pdev, *p++, skb_frag_size(&si->frags[i]), |
99d7cf30 DLR |
1547 | PCI_DMA_TODEVICE); |
1548 | } | |
1549 | ||
1550 | static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev, | |
1551 | const struct sg_ent *sgl, int sgl_flits) | |
1552 | { | |
1553 | dma_addr_t *p; | |
1554 | struct deferred_unmap_info *dui; | |
1555 | ||
1556 | dui = (struct deferred_unmap_info *)skb->head; | |
1557 | dui->pdev = pdev; | |
1558 | for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) { | |
1559 | *p++ = be64_to_cpu(sgl->addr[0]); | |
1560 | *p++ = be64_to_cpu(sgl->addr[1]); | |
1561 | } | |
1562 | if (sgl_flits) | |
1563 | *p = be64_to_cpu(sgl->addr[0]); | |
1564 | } | |
1565 | ||
4d22de3e DLR |
1566 | /** |
1567 | * write_ofld_wr - write an offload work request | |
1568 | * @adap: the adapter | |
1569 | * @skb: the packet to send | |
1570 | * @q: the Tx queue | |
1571 | * @pidx: index of the first Tx descriptor to write | |
1572 | * @gen: the generation value to use | |
1573 | * @ndesc: number of descriptors the packet will occupy | |
1574 | * | |
1575 | * Write an offload work request to send the supplied packet. The packet | |
1576 | * data already carry the work request with most fields populated. | |
1577 | */ | |
1578 | static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, | |
1579 | struct sge_txq *q, unsigned int pidx, | |
728e2cca | 1580 | unsigned int gen, unsigned int ndesc) |
4d22de3e DLR |
1581 | { |
1582 | unsigned int sgl_flits, flits; | |
1583 | struct work_request_hdr *from; | |
1584 | struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; | |
1585 | struct tx_desc *d = &q->desc[pidx]; | |
1586 | ||
1587 | if (immediate(skb)) { | |
1588 | q->sdesc[pidx].skb = NULL; | |
1589 | write_imm(d, skb, skb->len, gen); | |
1590 | return; | |
1591 | } | |
1592 | ||
1593 | /* Only TX_DATA builds SGLs */ | |
1594 | ||
1595 | from = (struct work_request_hdr *)skb->data; | |
ea2ae17d ACM |
1596 | memcpy(&d->flit[1], &from[1], |
1597 | skb_transport_offset(skb) - sizeof(*from)); | |
4d22de3e | 1598 | |
ea2ae17d | 1599 | flits = skb_transport_offset(skb) / 8; |
4d22de3e | 1600 | sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; |
728e2cca | 1601 | sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb), |
262e827f BH |
1602 | skb_tail_pointer(skb) - |
1603 | skb_transport_header(skb), | |
728e2cca | 1604 | adap->pdev); |
99d7cf30 DLR |
1605 | if (need_skb_unmap()) { |
1606 | setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); | |
1607 | skb->destructor = deferred_unmap_destructor; | |
99d7cf30 | 1608 | } |
4d22de3e DLR |
1609 | |
1610 | write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, | |
1611 | gen, from->wr_hi, from->wr_lo); | |
1612 | } | |
1613 | ||
1614 | /** | |
1615 | * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet | |
1616 | * @skb: the packet | |
1617 | * | |
1618 | * Returns the number of Tx descriptors needed for the given offload | |
1619 | * packet. These packets are already fully constructed. | |
1620 | */ | |
1621 | static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb) | |
1622 | { | |
27186dc3 | 1623 | unsigned int flits, cnt; |
4d22de3e | 1624 | |
27186dc3 | 1625 | if (skb->len <= WR_LEN) |
4d22de3e DLR |
1626 | return 1; /* packet fits as immediate data */ |
1627 | ||
ea2ae17d | 1628 | flits = skb_transport_offset(skb) / 8; /* headers */ |
27186dc3 | 1629 | cnt = skb_shinfo(skb)->nr_frags; |
be8b678c | 1630 | if (skb_tail_pointer(skb) != skb_transport_header(skb)) |
4d22de3e DLR |
1631 | cnt++; |
1632 | return flits_to_desc(flits + sgl_len(cnt)); | |
1633 | } | |
1634 | ||
1635 | /** | |
1636 | * ofld_xmit - send a packet through an offload queue | |
1637 | * @adap: the adapter | |
1638 | * @q: the Tx offload queue | |
1639 | * @skb: the packet | |
1640 | * | |
1641 | * Send an offload packet through an SGE offload queue. | |
1642 | */ | |
1643 | static int ofld_xmit(struct adapter *adap, struct sge_txq *q, | |
1644 | struct sk_buff *skb) | |
1645 | { | |
1646 | int ret; | |
1647 | unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen; | |
1648 | ||
1649 | spin_lock(&q->lock); | |
42c8ea17 | 1650 | again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); |
4d22de3e DLR |
1651 | |
1652 | ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD); | |
1653 | if (unlikely(ret)) { | |
1654 | if (ret == 1) { | |
1655 | skb->priority = ndesc; /* save for restart */ | |
1656 | spin_unlock(&q->lock); | |
1657 | return NET_XMIT_CN; | |
1658 | } | |
1659 | goto again; | |
1660 | } | |
1661 | ||
1662 | gen = q->gen; | |
1663 | q->in_use += ndesc; | |
1664 | pidx = q->pidx; | |
1665 | q->pidx += ndesc; | |
1666 | if (q->pidx >= q->size) { | |
1667 | q->pidx -= q->size; | |
1668 | q->gen ^= 1; | |
1669 | } | |
1670 | spin_unlock(&q->lock); | |
1671 | ||
728e2cca | 1672 | write_ofld_wr(adap, skb, q, pidx, gen, ndesc); |
4d22de3e DLR |
1673 | check_ring_tx_db(adap, q); |
1674 | return NET_XMIT_SUCCESS; | |
1675 | } | |
1676 | ||
1677 | /** | |
1678 | * restart_offloadq - restart a suspended offload queue | |
1679 | * @qs: the queue set cotaining the offload queue | |
1680 | * | |
1681 | * Resumes transmission on a suspended Tx offload queue. | |
1682 | */ | |
1683 | static void restart_offloadq(unsigned long data) | |
1684 | { | |
1685 | struct sk_buff *skb; | |
1686 | struct sge_qset *qs = (struct sge_qset *)data; | |
1687 | struct sge_txq *q = &qs->txq[TXQ_OFLD]; | |
5fbf816f DLR |
1688 | const struct port_info *pi = netdev_priv(qs->netdev); |
1689 | struct adapter *adap = pi->adapter; | |
4d22de3e DLR |
1690 | |
1691 | spin_lock(&q->lock); | |
42c8ea17 | 1692 | again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); |
4d22de3e DLR |
1693 | |
1694 | while ((skb = skb_peek(&q->sendq)) != NULL) { | |
1695 | unsigned int gen, pidx; | |
1696 | unsigned int ndesc = skb->priority; | |
1697 | ||
1698 | if (unlikely(q->size - q->in_use < ndesc)) { | |
1699 | set_bit(TXQ_OFLD, &qs->txq_stopped); | |
4e857c58 | 1700 | smp_mb__after_atomic(); |
4d22de3e DLR |
1701 | |
1702 | if (should_restart_tx(q) && | |
1703 | test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) | |
1704 | goto again; | |
1705 | q->stops++; | |
1706 | break; | |
1707 | } | |
1708 | ||
1709 | gen = q->gen; | |
1710 | q->in_use += ndesc; | |
1711 | pidx = q->pidx; | |
1712 | q->pidx += ndesc; | |
1713 | if (q->pidx >= q->size) { | |
1714 | q->pidx -= q->size; | |
1715 | q->gen ^= 1; | |
1716 | } | |
1717 | __skb_unlink(skb, &q->sendq); | |
1718 | spin_unlock(&q->lock); | |
1719 | ||
728e2cca | 1720 | write_ofld_wr(adap, skb, q, pidx, gen, ndesc); |
4d22de3e DLR |
1721 | spin_lock(&q->lock); |
1722 | } | |
1723 | spin_unlock(&q->lock); | |
1724 | ||
1725 | #if USE_GTS | |
1726 | set_bit(TXQ_RUNNING, &q->flags); | |
1727 | set_bit(TXQ_LAST_PKT_DB, &q->flags); | |
1728 | #endif | |
afefce66 | 1729 | wmb(); |
728e2cca AK |
1730 | t3_write_reg(adap, A_SG_KDOORBELL, |
1731 | F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); | |
4d22de3e DLR |
1732 | } |
1733 | ||
1734 | /** | |
1735 | * queue_set - return the queue set a packet should use | |
1736 | * @skb: the packet | |
1737 | * | |
1738 | * Maps a packet to the SGE queue set it should use. The desired queue | |
1739 | * set is carried in bits 1-3 in the packet's priority. | |
1740 | */ | |
1741 | static inline int queue_set(const struct sk_buff *skb) | |
1742 | { | |
1743 | return skb->priority >> 1; | |
1744 | } | |
1745 | ||
1746 | /** | |
1747 | * is_ctrl_pkt - return whether an offload packet is a control packet | |
1748 | * @skb: the packet | |
1749 | * | |
1750 | * Determines whether an offload packet should use an OFLD or a CTRL | |
1751 | * Tx queue. This is indicated by bit 0 in the packet's priority. | |
1752 | */ | |
1753 | static inline int is_ctrl_pkt(const struct sk_buff *skb) | |
1754 | { | |
1755 | return skb->priority & 1; | |
1756 | } | |
1757 | ||
1758 | /** | |
1759 | * t3_offload_tx - send an offload packet | |
1760 | * @tdev: the offload device to send to | |
1761 | * @skb: the packet | |
1762 | * | |
1763 | * Sends an offload packet. We use the packet priority to select the | |
1764 | * appropriate Tx queue as follows: bit 0 indicates whether the packet | |
1765 | * should be sent as regular or control, bits 1-3 select the queue set. | |
1766 | */ | |
1767 | int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb) | |
1768 | { | |
1769 | struct adapter *adap = tdev2adap(tdev); | |
1770 | struct sge_qset *qs = &adap->sge.qs[queue_set(skb)]; | |
1771 | ||
1772 | if (unlikely(is_ctrl_pkt(skb))) | |
1773 | return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb); | |
1774 | ||
1775 | return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb); | |
1776 | } | |
1777 | ||
1778 | /** | |
1779 | * offload_enqueue - add an offload packet to an SGE offload receive queue | |
1780 | * @q: the SGE response queue | |
1781 | * @skb: the packet | |
1782 | * | |
1783 | * Add a new offload packet to an SGE response queue's offload packet | |
1784 | * queue. If the packet is the first on the queue it schedules the RX | |
1785 | * softirq to process the queue. | |
1786 | */ | |
1787 | static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb) | |
1788 | { | |
147e70e6 DM |
1789 | int was_empty = skb_queue_empty(&q->rx_queue); |
1790 | ||
1791 | __skb_queue_tail(&q->rx_queue, skb); | |
1792 | ||
1793 | if (was_empty) { | |
4d22de3e DLR |
1794 | struct sge_qset *qs = rspq_to_qset(q); |
1795 | ||
bea3348e | 1796 | napi_schedule(&qs->napi); |
4d22de3e | 1797 | } |
4d22de3e DLR |
1798 | } |
1799 | ||
1800 | /** | |
1801 | * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts | |
1802 | * @tdev: the offload device that will be receiving the packets | |
1803 | * @q: the SGE response queue that assembled the bundle | |
1804 | * @skbs: the partial bundle | |
1805 | * @n: the number of packets in the bundle | |
1806 | * | |
1807 | * Delivers a (partial) bundle of Rx offload packets to an offload device. | |
1808 | */ | |
1809 | static inline void deliver_partial_bundle(struct t3cdev *tdev, | |
1810 | struct sge_rspq *q, | |
1811 | struct sk_buff *skbs[], int n) | |
1812 | { | |
1813 | if (n) { | |
1814 | q->offload_bundles++; | |
1815 | tdev->recv(tdev, skbs, n); | |
1816 | } | |
1817 | } | |
1818 | ||
1819 | /** | |
1820 | * ofld_poll - NAPI handler for offload packets in interrupt mode | |
1821 | * @dev: the network device doing the polling | |
1822 | * @budget: polling budget | |
1823 | * | |
1824 | * The NAPI handler for offload packets when a response queue is serviced | |
1825 | * by the hard interrupt handler, i.e., when it's operating in non-polling | |
1826 | * mode. Creates small packet batches and sends them through the offload | |
1827 | * receive handler. Batches need to be of modest size as we do prefetches | |
1828 | * on the packets in each. | |
1829 | */ | |
bea3348e | 1830 | static int ofld_poll(struct napi_struct *napi, int budget) |
4d22de3e | 1831 | { |
bea3348e | 1832 | struct sge_qset *qs = container_of(napi, struct sge_qset, napi); |
4d22de3e | 1833 | struct sge_rspq *q = &qs->rspq; |
bea3348e SH |
1834 | struct adapter *adapter = qs->adap; |
1835 | int work_done = 0; | |
4d22de3e | 1836 | |
bea3348e | 1837 | while (work_done < budget) { |
147e70e6 DM |
1838 | struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE]; |
1839 | struct sk_buff_head queue; | |
4d22de3e DLR |
1840 | int ngathered; |
1841 | ||
1842 | spin_lock_irq(&q->lock); | |
147e70e6 DM |
1843 | __skb_queue_head_init(&queue); |
1844 | skb_queue_splice_init(&q->rx_queue, &queue); | |
1845 | if (skb_queue_empty(&queue)) { | |
bea3348e | 1846 | napi_complete(napi); |
4d22de3e | 1847 | spin_unlock_irq(&q->lock); |
bea3348e | 1848 | return work_done; |
4d22de3e | 1849 | } |
4d22de3e DLR |
1850 | spin_unlock_irq(&q->lock); |
1851 | ||
147e70e6 DM |
1852 | ngathered = 0; |
1853 | skb_queue_walk_safe(&queue, skb, tmp) { | |
1854 | if (work_done >= budget) | |
1855 | break; | |
1856 | work_done++; | |
1857 | ||
1858 | __skb_unlink(skb, &queue); | |
1859 | prefetch(skb->data); | |
1860 | skbs[ngathered] = skb; | |
4d22de3e DLR |
1861 | if (++ngathered == RX_BUNDLE_SIZE) { |
1862 | q->offload_bundles++; | |
1863 | adapter->tdev.recv(&adapter->tdev, skbs, | |
1864 | ngathered); | |
1865 | ngathered = 0; | |
1866 | } | |
1867 | } | |
147e70e6 DM |
1868 | if (!skb_queue_empty(&queue)) { |
1869 | /* splice remaining packets back onto Rx queue */ | |
4d22de3e | 1870 | spin_lock_irq(&q->lock); |
147e70e6 | 1871 | skb_queue_splice(&queue, &q->rx_queue); |
4d22de3e DLR |
1872 | spin_unlock_irq(&q->lock); |
1873 | } | |
1874 | deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered); | |
1875 | } | |
bea3348e SH |
1876 | |
1877 | return work_done; | |
4d22de3e DLR |
1878 | } |
1879 | ||
1880 | /** | |
1881 | * rx_offload - process a received offload packet | |
1882 | * @tdev: the offload device receiving the packet | |
1883 | * @rq: the response queue that received the packet | |
1884 | * @skb: the packet | |
1885 | * @rx_gather: a gather list of packets if we are building a bundle | |
1886 | * @gather_idx: index of the next available slot in the bundle | |
1887 | * | |
1888 | * Process an ingress offload pakcet and add it to the offload ingress | |
1889 | * queue. Returns the index of the next available slot in the bundle. | |
1890 | */ | |
1891 | static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq, | |
1892 | struct sk_buff *skb, struct sk_buff *rx_gather[], | |
1893 | unsigned int gather_idx) | |
1894 | { | |
459a98ed | 1895 | skb_reset_mac_header(skb); |
c1d2bbe1 | 1896 | skb_reset_network_header(skb); |
badff6d0 | 1897 | skb_reset_transport_header(skb); |
4d22de3e DLR |
1898 | |
1899 | if (rq->polling) { | |
1900 | rx_gather[gather_idx++] = skb; | |
1901 | if (gather_idx == RX_BUNDLE_SIZE) { | |
1902 | tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE); | |
1903 | gather_idx = 0; | |
1904 | rq->offload_bundles++; | |
1905 | } | |
1906 | } else | |
1907 | offload_enqueue(rq, skb); | |
1908 | ||
1909 | return gather_idx; | |
1910 | } | |
1911 | ||
4d22de3e DLR |
1912 | /** |
1913 | * restart_tx - check whether to restart suspended Tx queues | |
1914 | * @qs: the queue set to resume | |
1915 | * | |
1916 | * Restarts suspended Tx queues of an SGE queue set if they have enough | |
1917 | * free resources to resume operation. | |
1918 | */ | |
1919 | static void restart_tx(struct sge_qset *qs) | |
1920 | { | |
1921 | if (test_bit(TXQ_ETH, &qs->txq_stopped) && | |
1922 | should_restart_tx(&qs->txq[TXQ_ETH]) && | |
1923 | test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) { | |
1924 | qs->txq[TXQ_ETH].restarts++; | |
1925 | if (netif_running(qs->netdev)) | |
82ad3329 | 1926 | netif_tx_wake_queue(qs->tx_q); |
4d22de3e DLR |
1927 | } |
1928 | ||
1929 | if (test_bit(TXQ_OFLD, &qs->txq_stopped) && | |
1930 | should_restart_tx(&qs->txq[TXQ_OFLD]) && | |
1931 | test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) { | |
1932 | qs->txq[TXQ_OFLD].restarts++; | |
1933 | tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk); | |
1934 | } | |
1935 | if (test_bit(TXQ_CTRL, &qs->txq_stopped) && | |
1936 | should_restart_tx(&qs->txq[TXQ_CTRL]) && | |
1937 | test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) { | |
1938 | qs->txq[TXQ_CTRL].restarts++; | |
1939 | tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk); | |
1940 | } | |
1941 | } | |
1942 | ||
a109a5b9 KX |
1943 | /** |
1944 | * cxgb3_arp_process - process an ARP request probing a private IP address | |
1945 | * @adapter: the adapter | |
1946 | * @skb: the skbuff containing the ARP request | |
1947 | * | |
1948 | * Check if the ARP request is probing the private IP address | |
1949 | * dedicated to iSCSI, generate an ARP reply if so. | |
1950 | */ | |
f14d42f3 | 1951 | static void cxgb3_arp_process(struct port_info *pi, struct sk_buff *skb) |
a109a5b9 KX |
1952 | { |
1953 | struct net_device *dev = skb->dev; | |
a109a5b9 KX |
1954 | struct arphdr *arp; |
1955 | unsigned char *arp_ptr; | |
1956 | unsigned char *sha; | |
1957 | __be32 sip, tip; | |
1958 | ||
1959 | if (!dev) | |
1960 | return; | |
1961 | ||
1962 | skb_reset_network_header(skb); | |
1963 | arp = arp_hdr(skb); | |
1964 | ||
1965 | if (arp->ar_op != htons(ARPOP_REQUEST)) | |
1966 | return; | |
1967 | ||
1968 | arp_ptr = (unsigned char *)(arp + 1); | |
1969 | sha = arp_ptr; | |
1970 | arp_ptr += dev->addr_len; | |
1971 | memcpy(&sip, arp_ptr, sizeof(sip)); | |
1972 | arp_ptr += sizeof(sip); | |
1973 | arp_ptr += dev->addr_len; | |
1974 | memcpy(&tip, arp_ptr, sizeof(tip)); | |
1975 | ||
a109a5b9 KX |
1976 | if (tip != pi->iscsi_ipv4addr) |
1977 | return; | |
1978 | ||
1979 | arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha, | |
f14d42f3 | 1980 | pi->iscsic.mac_addr, sha); |
a109a5b9 KX |
1981 | |
1982 | } | |
1983 | ||
1984 | static inline int is_arp(struct sk_buff *skb) | |
1985 | { | |
1986 | return skb->protocol == htons(ETH_P_ARP); | |
1987 | } | |
1988 | ||
f14d42f3 KX |
1989 | static void cxgb3_process_iscsi_prov_pack(struct port_info *pi, |
1990 | struct sk_buff *skb) | |
1991 | { | |
1992 | if (is_arp(skb)) { | |
1993 | cxgb3_arp_process(pi, skb); | |
1994 | return; | |
1995 | } | |
1996 | ||
1997 | if (pi->iscsic.recv) | |
1998 | pi->iscsic.recv(pi, skb); | |
1999 | ||
2000 | } | |
2001 | ||
4d22de3e DLR |
2002 | /** |
2003 | * rx_eth - process an ingress ethernet packet | |
2004 | * @adap: the adapter | |
2005 | * @rq: the response queue that received the packet | |
2006 | * @skb: the packet | |
2007 | * @pad: amount of padding at the start of the buffer | |
2008 | * | |
2009 | * Process an ingress ethernet pakcet and deliver it to the stack. | |
2010 | * The padding is 2 if the packet was delivered in an Rx buffer and 0 | |
2011 | * if it was immediate data in a response. | |
2012 | */ | |
2013 | static void rx_eth(struct adapter *adap, struct sge_rspq *rq, | |
b47385bd | 2014 | struct sk_buff *skb, int pad, int lro) |
4d22de3e DLR |
2015 | { |
2016 | struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad); | |
b47385bd | 2017 | struct sge_qset *qs = rspq_to_qset(rq); |
4d22de3e DLR |
2018 | struct port_info *pi; |
2019 | ||
4d22de3e | 2020 | skb_pull(skb, sizeof(*p) + pad); |
4c13eb66 | 2021 | skb->protocol = eth_type_trans(skb, adap->port[p->iff]); |
4d22de3e | 2022 | pi = netdev_priv(skb->dev); |
d2fe2755 | 2023 | if ((skb->dev->features & NETIF_F_RXCSUM) && p->csum_valid && |
5e68b772 | 2024 | p->csum == htons(0xffff) && !p->fragment) { |
a109a5b9 | 2025 | qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; |
4d22de3e DLR |
2026 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
2027 | } else | |
bc8acf2c | 2028 | skb_checksum_none_assert(skb); |
d6fe5f4e | 2029 | skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]); |
4d22de3e | 2030 | |
892ef5d8 | 2031 | if (p->vlan_valid) { |
b47385bd | 2032 | qs->port_stats[SGE_PSTAT_VLANEX]++; |
86a9bad3 | 2033 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan)); |
892ef5d8 JP |
2034 | } |
2035 | if (rq->polling) { | |
b47385bd | 2036 | if (lro) |
7be2df45 | 2037 | napi_gro_receive(&qs->napi, skb); |
a109a5b9 | 2038 | else { |
f14d42f3 KX |
2039 | if (unlikely(pi->iscsic.flags)) |
2040 | cxgb3_process_iscsi_prov_pack(pi, skb); | |
b47385bd | 2041 | netif_receive_skb(skb); |
a109a5b9 | 2042 | } |
b47385bd | 2043 | } else |
4d22de3e DLR |
2044 | netif_rx(skb); |
2045 | } | |
2046 | ||
b47385bd DLR |
2047 | static inline int is_eth_tcp(u32 rss) |
2048 | { | |
2049 | return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE; | |
2050 | } | |
2051 | ||
b47385bd DLR |
2052 | /** |
2053 | * lro_add_page - add a page chunk to an LRO session | |
2054 | * @adap: the adapter | |
2055 | * @qs: the associated queue set | |
2056 | * @fl: the free list containing the page chunk to add | |
2057 | * @len: packet length | |
2058 | * @complete: Indicates the last fragment of a frame | |
2059 | * | |
2060 | * Add a received packet contained in a page chunk to an existing LRO | |
2061 | * session. | |
2062 | */ | |
2063 | static void lro_add_page(struct adapter *adap, struct sge_qset *qs, | |
2064 | struct sge_fl *fl, int len, int complete) | |
2065 | { | |
2066 | struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; | |
2d171886 | 2067 | struct port_info *pi = netdev_priv(qs->netdev); |
76620aaf | 2068 | struct sk_buff *skb = NULL; |
b47385bd | 2069 | struct cpl_rx_pkt *cpl; |
76620aaf HX |
2070 | struct skb_frag_struct *rx_frag; |
2071 | int nr_frags; | |
b47385bd DLR |
2072 | int offset = 0; |
2073 | ||
76620aaf HX |
2074 | if (!qs->nomem) { |
2075 | skb = napi_get_frags(&qs->napi); | |
2076 | qs->nomem = !skb; | |
b47385bd DLR |
2077 | } |
2078 | ||
2079 | fl->credits--; | |
2080 | ||
5e68b772 | 2081 | pci_dma_sync_single_for_cpu(adap->pdev, |
56e3b9df | 2082 | dma_unmap_addr(sd, dma_addr), |
5e68b772 DLR |
2083 | fl->buf_size - SGE_PG_RSVD, |
2084 | PCI_DMA_FROMDEVICE); | |
2085 | ||
2086 | (*sd->pg_chunk.p_cnt)--; | |
70e3bb50 | 2087 | if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page) |
5e68b772 | 2088 | pci_unmap_page(adap->pdev, |
10b6d956 | 2089 | sd->pg_chunk.mapping, |
5e68b772 DLR |
2090 | fl->alloc_size, |
2091 | PCI_DMA_FROMDEVICE); | |
b47385bd | 2092 | |
76620aaf HX |
2093 | if (!skb) { |
2094 | put_page(sd->pg_chunk.page); | |
2095 | if (complete) | |
2096 | qs->nomem = 0; | |
2097 | return; | |
2098 | } | |
2099 | ||
2100 | rx_frag = skb_shinfo(skb)->frags; | |
2101 | nr_frags = skb_shinfo(skb)->nr_frags; | |
2102 | ||
2103 | if (!nr_frags) { | |
2104 | offset = 2 + sizeof(struct cpl_rx_pkt); | |
2d171886 DLR |
2105 | cpl = qs->lro_va = sd->pg_chunk.va + 2; |
2106 | ||
d2fe2755 | 2107 | if ((qs->netdev->features & NETIF_F_RXCSUM) && |
2d171886 DLR |
2108 | cpl->csum_valid && cpl->csum == htons(0xffff)) { |
2109 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
2110 | qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; | |
2111 | } else | |
2112 | skb->ip_summed = CHECKSUM_NONE; | |
2113 | } else | |
2114 | cpl = qs->lro_va; | |
76620aaf | 2115 | |
2d171886 | 2116 | len -= offset; |
b2b964f0 | 2117 | |
b47385bd | 2118 | rx_frag += nr_frags; |
6a930b9f | 2119 | __skb_frag_set_page(rx_frag, sd->pg_chunk.page); |
b47385bd | 2120 | rx_frag->page_offset = sd->pg_chunk.offset + offset; |
9e903e08 | 2121 | skb_frag_size_set(rx_frag, len); |
b47385bd | 2122 | |
76620aaf HX |
2123 | skb->len += len; |
2124 | skb->data_len += len; | |
2125 | skb->truesize += len; | |
2126 | skb_shinfo(skb)->nr_frags++; | |
5e68b772 | 2127 | |
b47385bd DLR |
2128 | if (!complete) |
2129 | return; | |
2130 | ||
d6fe5f4e | 2131 | skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]); |
b47385bd | 2132 | |
72073ad2 VP |
2133 | if (cpl->vlan_valid) { |
2134 | qs->port_stats[SGE_PSTAT_VLANEX]++; | |
86a9bad3 | 2135 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan)); |
72073ad2 | 2136 | } |
76620aaf | 2137 | napi_gro_frags(&qs->napi); |
b47385bd DLR |
2138 | } |
2139 | ||
4d22de3e DLR |
2140 | /** |
2141 | * handle_rsp_cntrl_info - handles control information in a response | |
2142 | * @qs: the queue set corresponding to the response | |
2143 | * @flags: the response control flags | |
4d22de3e DLR |
2144 | * |
2145 | * Handles the control information of an SGE response, such as GTS | |
2146 | * indications and completion credits for the queue set's Tx queues. | |
6195c71d | 2147 | * HW coalesces credits, we don't do any extra SW coalescing. |
4d22de3e | 2148 | */ |
6195c71d | 2149 | static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags) |
4d22de3e DLR |
2150 | { |
2151 | unsigned int credits; | |
2152 | ||
2153 | #if USE_GTS | |
2154 | if (flags & F_RSPD_TXQ0_GTS) | |
2155 | clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags); | |
2156 | #endif | |
2157 | ||
4d22de3e DLR |
2158 | credits = G_RSPD_TXQ0_CR(flags); |
2159 | if (credits) | |
2160 | qs->txq[TXQ_ETH].processed += credits; | |
2161 | ||
6195c71d DLR |
2162 | credits = G_RSPD_TXQ2_CR(flags); |
2163 | if (credits) | |
2164 | qs->txq[TXQ_CTRL].processed += credits; | |
2165 | ||
4d22de3e DLR |
2166 | # if USE_GTS |
2167 | if (flags & F_RSPD_TXQ1_GTS) | |
2168 | clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags); | |
2169 | # endif | |
6195c71d DLR |
2170 | credits = G_RSPD_TXQ1_CR(flags); |
2171 | if (credits) | |
2172 | qs->txq[TXQ_OFLD].processed += credits; | |
4d22de3e DLR |
2173 | } |
2174 | ||
2175 | /** | |
2176 | * check_ring_db - check if we need to ring any doorbells | |
2177 | * @adapter: the adapter | |
2178 | * @qs: the queue set whose Tx queues are to be examined | |
2179 | * @sleeping: indicates which Tx queue sent GTS | |
2180 | * | |
2181 | * Checks if some of a queue set's Tx queues need to ring their doorbells | |
2182 | * to resume transmission after idling while they still have unprocessed | |
2183 | * descriptors. | |
2184 | */ | |
2185 | static void check_ring_db(struct adapter *adap, struct sge_qset *qs, | |
2186 | unsigned int sleeping) | |
2187 | { | |
2188 | if (sleeping & F_RSPD_TXQ0_GTS) { | |
2189 | struct sge_txq *txq = &qs->txq[TXQ_ETH]; | |
2190 | ||
2191 | if (txq->cleaned + txq->in_use != txq->processed && | |
2192 | !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) { | |
2193 | set_bit(TXQ_RUNNING, &txq->flags); | |
2194 | t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | | |
2195 | V_EGRCNTX(txq->cntxt_id)); | |
2196 | } | |
2197 | } | |
2198 | ||
2199 | if (sleeping & F_RSPD_TXQ1_GTS) { | |
2200 | struct sge_txq *txq = &qs->txq[TXQ_OFLD]; | |
2201 | ||
2202 | if (txq->cleaned + txq->in_use != txq->processed && | |
2203 | !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) { | |
2204 | set_bit(TXQ_RUNNING, &txq->flags); | |
2205 | t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | | |
2206 | V_EGRCNTX(txq->cntxt_id)); | |
2207 | } | |
2208 | } | |
2209 | } | |
2210 | ||
2211 | /** | |
2212 | * is_new_response - check if a response is newly written | |
2213 | * @r: the response descriptor | |
2214 | * @q: the response queue | |
2215 | * | |
2216 | * Returns true if a response descriptor contains a yet unprocessed | |
2217 | * response. | |
2218 | */ | |
2219 | static inline int is_new_response(const struct rsp_desc *r, | |
2220 | const struct sge_rspq *q) | |
2221 | { | |
2222 | return (r->intr_gen & F_RSPD_GEN2) == q->gen; | |
2223 | } | |
2224 | ||
7385ecf3 DLR |
2225 | static inline void clear_rspq_bufstate(struct sge_rspq * const q) |
2226 | { | |
2227 | q->pg_skb = NULL; | |
2228 | q->rx_recycle_buf = 0; | |
2229 | } | |
2230 | ||
4d22de3e DLR |
2231 | #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS) |
2232 | #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \ | |
2233 | V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \ | |
2234 | V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \ | |
2235 | V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR)) | |
2236 | ||
2237 | /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */ | |
2238 | #define NOMEM_INTR_DELAY 2500 | |
2239 | ||
2240 | /** | |
2241 | * process_responses - process responses from an SGE response queue | |
2242 | * @adap: the adapter | |
2243 | * @qs: the queue set to which the response queue belongs | |
2244 | * @budget: how many responses can be processed in this round | |
2245 | * | |
2246 | * Process responses from an SGE response queue up to the supplied budget. | |
2247 | * Responses include received packets as well as credits and other events | |
2248 | * for the queues that belong to the response queue's queue set. | |
2249 | * A negative budget is effectively unlimited. | |
2250 | * | |
2251 | * Additionally choose the interrupt holdoff time for the next interrupt | |
2252 | * on this queue. If the system is under memory shortage use a fairly | |
2253 | * long delay to help recovery. | |
2254 | */ | |
2255 | static int process_responses(struct adapter *adap, struct sge_qset *qs, | |
2256 | int budget) | |
2257 | { | |
2258 | struct sge_rspq *q = &qs->rspq; | |
2259 | struct rsp_desc *r = &q->desc[q->cidx]; | |
2260 | int budget_left = budget; | |
6195c71d | 2261 | unsigned int sleeping = 0; |
4d22de3e DLR |
2262 | struct sk_buff *offload_skbs[RX_BUNDLE_SIZE]; |
2263 | int ngathered = 0; | |
2264 | ||
2265 | q->next_holdoff = q->holdoff_tmr; | |
2266 | ||
2267 | while (likely(budget_left && is_new_response(r, q))) { | |
d2fe2755 MM |
2268 | int packet_complete, eth, ethpad = 2; |
2269 | int lro = !!(qs->netdev->features & NETIF_F_GRO); | |
4d22de3e | 2270 | struct sk_buff *skb = NULL; |
2e02644a DLR |
2271 | u32 len, flags; |
2272 | __be32 rss_hi, rss_lo; | |
4d22de3e | 2273 | |
019be1cf | 2274 | dma_rmb(); |
4d22de3e | 2275 | eth = r->rss_hdr.opcode == CPL_RX_PKT; |
2e02644a DLR |
2276 | rss_hi = *(const __be32 *)r; |
2277 | rss_lo = r->rss_hdr.rss_hash_val; | |
2278 | flags = ntohl(r->flags); | |
4d22de3e DLR |
2279 | |
2280 | if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) { | |
2281 | skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC); | |
2282 | if (!skb) | |
2283 | goto no_mem; | |
2284 | ||
2285 | memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE); | |
2286 | skb->data[0] = CPL_ASYNC_NOTIF; | |
2287 | rss_hi = htonl(CPL_ASYNC_NOTIF << 24); | |
2288 | q->async_notif++; | |
2289 | } else if (flags & F_RSPD_IMM_DATA_VALID) { | |
2290 | skb = get_imm_packet(r); | |
2291 | if (unlikely(!skb)) { | |
cf992af5 | 2292 | no_mem: |
4d22de3e DLR |
2293 | q->next_holdoff = NOMEM_INTR_DELAY; |
2294 | q->nomem++; | |
2295 | /* consume one credit since we tried */ | |
2296 | budget_left--; | |
2297 | break; | |
2298 | } | |
2299 | q->imm_data++; | |
e0994eb1 | 2300 | ethpad = 0; |
4d22de3e | 2301 | } else if ((len = ntohl(r->len_cq)) != 0) { |
cf992af5 | 2302 | struct sge_fl *fl; |
e0994eb1 | 2303 | |
65ab8385 | 2304 | lro &= eth && is_eth_tcp(rss_hi); |
b47385bd | 2305 | |
cf992af5 DLR |
2306 | fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0]; |
2307 | if (fl->use_pages) { | |
2308 | void *addr = fl->sdesc[fl->cidx].pg_chunk.va; | |
e0994eb1 | 2309 | |
cf992af5 DLR |
2310 | prefetch(addr); |
2311 | #if L1_CACHE_BYTES < 128 | |
2312 | prefetch(addr + L1_CACHE_BYTES); | |
2313 | #endif | |
e0994eb1 | 2314 | __refill_fl(adap, fl); |
b47385bd DLR |
2315 | if (lro > 0) { |
2316 | lro_add_page(adap, qs, fl, | |
2317 | G_RSPD_LEN(len), | |
2318 | flags & F_RSPD_EOP); | |
2319 | goto next_fl; | |
2320 | } | |
e0994eb1 | 2321 | |
7385ecf3 DLR |
2322 | skb = get_packet_pg(adap, fl, q, |
2323 | G_RSPD_LEN(len), | |
2324 | eth ? | |
2325 | SGE_RX_DROP_THRES : 0); | |
2326 | q->pg_skb = skb; | |
cf992af5 | 2327 | } else |
e0994eb1 DLR |
2328 | skb = get_packet(adap, fl, G_RSPD_LEN(len), |
2329 | eth ? SGE_RX_DROP_THRES : 0); | |
cf992af5 DLR |
2330 | if (unlikely(!skb)) { |
2331 | if (!eth) | |
2332 | goto no_mem; | |
2333 | q->rx_drops++; | |
2334 | } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT)) | |
2335 | __skb_pull(skb, 2); | |
b47385bd | 2336 | next_fl: |
4d22de3e DLR |
2337 | if (++fl->cidx == fl->size) |
2338 | fl->cidx = 0; | |
2339 | } else | |
2340 | q->pure_rsps++; | |
2341 | ||
2342 | if (flags & RSPD_CTRL_MASK) { | |
2343 | sleeping |= flags & RSPD_GTS_MASK; | |
6195c71d | 2344 | handle_rsp_cntrl_info(qs, flags); |
4d22de3e DLR |
2345 | } |
2346 | ||
2347 | r++; | |
2348 | if (unlikely(++q->cidx == q->size)) { | |
2349 | q->cidx = 0; | |
2350 | q->gen ^= 1; | |
2351 | r = q->desc; | |
2352 | } | |
2353 | prefetch(r); | |
2354 | ||
2355 | if (++q->credits >= (q->size / 4)) { | |
2356 | refill_rspq(adap, q, q->credits); | |
2357 | q->credits = 0; | |
2358 | } | |
2359 | ||
7385ecf3 DLR |
2360 | packet_complete = flags & |
2361 | (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID | | |
2362 | F_RSPD_ASYNC_NOTIF); | |
2363 | ||
2364 | if (skb != NULL && packet_complete) { | |
4d22de3e | 2365 | if (eth) |
b47385bd | 2366 | rx_eth(adap, q, skb, ethpad, lro); |
4d22de3e | 2367 | else { |
afefce66 | 2368 | q->offload_pkts++; |
cf992af5 DLR |
2369 | /* Preserve the RSS info in csum & priority */ |
2370 | skb->csum = rss_hi; | |
2371 | skb->priority = rss_lo; | |
2372 | ngathered = rx_offload(&adap->tdev, q, skb, | |
2373 | offload_skbs, | |
e0994eb1 | 2374 | ngathered); |
4d22de3e | 2375 | } |
7385ecf3 DLR |
2376 | |
2377 | if (flags & F_RSPD_EOP) | |
b47385bd | 2378 | clear_rspq_bufstate(q); |
4d22de3e | 2379 | } |
4d22de3e DLR |
2380 | --budget_left; |
2381 | } | |
2382 | ||
4d22de3e | 2383 | deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered); |
b47385bd | 2384 | |
4d22de3e DLR |
2385 | if (sleeping) |
2386 | check_ring_db(adap, qs, sleeping); | |
2387 | ||
2388 | smp_mb(); /* commit Tx queue .processed updates */ | |
2389 | if (unlikely(qs->txq_stopped != 0)) | |
2390 | restart_tx(qs); | |
2391 | ||
2392 | budget -= budget_left; | |
2393 | return budget; | |
2394 | } | |
2395 | ||
2396 | static inline int is_pure_response(const struct rsp_desc *r) | |
2397 | { | |
c5419e6f | 2398 | __be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID); |
4d22de3e DLR |
2399 | |
2400 | return (n | r->len_cq) == 0; | |
2401 | } | |
2402 | ||
2403 | /** | |
2404 | * napi_rx_handler - the NAPI handler for Rx processing | |
bea3348e | 2405 | * @napi: the napi instance |
4d22de3e DLR |
2406 | * @budget: how many packets we can process in this round |
2407 | * | |
2408 | * Handler for new data events when using NAPI. | |
2409 | */ | |
bea3348e | 2410 | static int napi_rx_handler(struct napi_struct *napi, int budget) |
4d22de3e | 2411 | { |
bea3348e SH |
2412 | struct sge_qset *qs = container_of(napi, struct sge_qset, napi); |
2413 | struct adapter *adap = qs->adap; | |
2414 | int work_done = process_responses(adap, qs, budget); | |
4d22de3e | 2415 | |
bea3348e SH |
2416 | if (likely(work_done < budget)) { |
2417 | napi_complete(napi); | |
4d22de3e | 2418 | |
bea3348e SH |
2419 | /* |
2420 | * Because we don't atomically flush the following | |
2421 | * write it is possible that in very rare cases it can | |
2422 | * reach the device in a way that races with a new | |
2423 | * response being written plus an error interrupt | |
2424 | * causing the NAPI interrupt handler below to return | |
2425 | * unhandled status to the OS. To protect against | |
2426 | * this would require flushing the write and doing | |
2427 | * both the write and the flush with interrupts off. | |
2428 | * Way too expensive and unjustifiable given the | |
2429 | * rarity of the race. | |
2430 | * | |
2431 | * The race cannot happen at all with MSI-X. | |
2432 | */ | |
2433 | t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) | | |
2434 | V_NEWTIMER(qs->rspq.next_holdoff) | | |
2435 | V_NEWINDEX(qs->rspq.cidx)); | |
2436 | } | |
2437 | return work_done; | |
4d22de3e DLR |
2438 | } |
2439 | ||
2440 | /* | |
2441 | * Returns true if the device is already scheduled for polling. | |
2442 | */ | |
bea3348e | 2443 | static inline int napi_is_scheduled(struct napi_struct *napi) |
4d22de3e | 2444 | { |
bea3348e | 2445 | return test_bit(NAPI_STATE_SCHED, &napi->state); |
4d22de3e DLR |
2446 | } |
2447 | ||
2448 | /** | |
2449 | * process_pure_responses - process pure responses from a response queue | |
2450 | * @adap: the adapter | |
2451 | * @qs: the queue set owning the response queue | |
2452 | * @r: the first pure response to process | |
2453 | * | |
2454 | * A simpler version of process_responses() that handles only pure (i.e., | |
2455 | * non data-carrying) responses. Such respones are too light-weight to | |
2456 | * justify calling a softirq under NAPI, so we handle them specially in | |
2457 | * the interrupt handler. The function is called with a pointer to a | |
2458 | * response, which the caller must ensure is a valid pure response. | |
2459 | * | |
2460 | * Returns 1 if it encounters a valid data-carrying response, 0 otherwise. | |
2461 | */ | |
2462 | static int process_pure_responses(struct adapter *adap, struct sge_qset *qs, | |
2463 | struct rsp_desc *r) | |
2464 | { | |
2465 | struct sge_rspq *q = &qs->rspq; | |
6195c71d | 2466 | unsigned int sleeping = 0; |
4d22de3e DLR |
2467 | |
2468 | do { | |
2469 | u32 flags = ntohl(r->flags); | |
2470 | ||
2471 | r++; | |
2472 | if (unlikely(++q->cidx == q->size)) { | |
2473 | q->cidx = 0; | |
2474 | q->gen ^= 1; | |
2475 | r = q->desc; | |
2476 | } | |
2477 | prefetch(r); | |
2478 | ||
2479 | if (flags & RSPD_CTRL_MASK) { | |
2480 | sleeping |= flags & RSPD_GTS_MASK; | |
6195c71d | 2481 | handle_rsp_cntrl_info(qs, flags); |
4d22de3e DLR |
2482 | } |
2483 | ||
2484 | q->pure_rsps++; | |
2485 | if (++q->credits >= (q->size / 4)) { | |
2486 | refill_rspq(adap, q, q->credits); | |
2487 | q->credits = 0; | |
2488 | } | |
2e02644a DLR |
2489 | if (!is_new_response(r, q)) |
2490 | break; | |
019be1cf | 2491 | dma_rmb(); |
2e02644a | 2492 | } while (is_pure_response(r)); |
4d22de3e | 2493 | |
4d22de3e DLR |
2494 | if (sleeping) |
2495 | check_ring_db(adap, qs, sleeping); | |
2496 | ||
2497 | smp_mb(); /* commit Tx queue .processed updates */ | |
2498 | if (unlikely(qs->txq_stopped != 0)) | |
2499 | restart_tx(qs); | |
2500 | ||
2501 | return is_new_response(r, q); | |
2502 | } | |
2503 | ||
2504 | /** | |
2505 | * handle_responses - decide what to do with new responses in NAPI mode | |
2506 | * @adap: the adapter | |
2507 | * @q: the response queue | |
2508 | * | |
2509 | * This is used by the NAPI interrupt handlers to decide what to do with | |
2510 | * new SGE responses. If there are no new responses it returns -1. If | |
2511 | * there are new responses and they are pure (i.e., non-data carrying) | |
2512 | * it handles them straight in hard interrupt context as they are very | |
2513 | * cheap and don't deliver any packets. Finally, if there are any data | |
2514 | * signaling responses it schedules the NAPI handler. Returns 1 if it | |
2515 | * schedules NAPI, 0 if all new responses were pure. | |
2516 | * | |
2517 | * The caller must ascertain NAPI is not already running. | |
2518 | */ | |
2519 | static inline int handle_responses(struct adapter *adap, struct sge_rspq *q) | |
2520 | { | |
2521 | struct sge_qset *qs = rspq_to_qset(q); | |
2522 | struct rsp_desc *r = &q->desc[q->cidx]; | |
2523 | ||
2524 | if (!is_new_response(r, q)) | |
2525 | return -1; | |
019be1cf | 2526 | dma_rmb(); |
4d22de3e DLR |
2527 | if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) { |
2528 | t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | | |
2529 | V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx)); | |
2530 | return 0; | |
2531 | } | |
bea3348e | 2532 | napi_schedule(&qs->napi); |
4d22de3e DLR |
2533 | return 1; |
2534 | } | |
2535 | ||
2536 | /* | |
2537 | * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case | |
2538 | * (i.e., response queue serviced in hard interrupt). | |
2539 | */ | |
a5190b4e | 2540 | static irqreturn_t t3_sge_intr_msix(int irq, void *cookie) |
4d22de3e DLR |
2541 | { |
2542 | struct sge_qset *qs = cookie; | |
bea3348e | 2543 | struct adapter *adap = qs->adap; |
4d22de3e DLR |
2544 | struct sge_rspq *q = &qs->rspq; |
2545 | ||
2546 | spin_lock(&q->lock); | |
2547 | if (process_responses(adap, qs, -1) == 0) | |
2548 | q->unhandled_irqs++; | |
2549 | t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | | |
2550 | V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx)); | |
2551 | spin_unlock(&q->lock); | |
2552 | return IRQ_HANDLED; | |
2553 | } | |
2554 | ||
2555 | /* | |
2556 | * The MSI-X interrupt handler for an SGE response queue for the NAPI case | |
2557 | * (i.e., response queue serviced by NAPI polling). | |
2558 | */ | |
9265fabf | 2559 | static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie) |
4d22de3e DLR |
2560 | { |
2561 | struct sge_qset *qs = cookie; | |
4d22de3e DLR |
2562 | struct sge_rspq *q = &qs->rspq; |
2563 | ||
2564 | spin_lock(&q->lock); | |
4d22de3e | 2565 | |
bea3348e | 2566 | if (handle_responses(qs->adap, q) < 0) |
4d22de3e DLR |
2567 | q->unhandled_irqs++; |
2568 | spin_unlock(&q->lock); | |
2569 | return IRQ_HANDLED; | |
2570 | } | |
2571 | ||
2572 | /* | |
2573 | * The non-NAPI MSI interrupt handler. This needs to handle data events from | |
2574 | * SGE response queues as well as error and other async events as they all use | |
2575 | * the same MSI vector. We use one SGE response queue per port in this mode | |
2576 | * and protect all response queues with queue 0's lock. | |
2577 | */ | |
2578 | static irqreturn_t t3_intr_msi(int irq, void *cookie) | |
2579 | { | |
2580 | int new_packets = 0; | |
2581 | struct adapter *adap = cookie; | |
2582 | struct sge_rspq *q = &adap->sge.qs[0].rspq; | |
2583 | ||
2584 | spin_lock(&q->lock); | |
2585 | ||
2586 | if (process_responses(adap, &adap->sge.qs[0], -1)) { | |
2587 | t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | | |
2588 | V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx)); | |
2589 | new_packets = 1; | |
2590 | } | |
2591 | ||
2592 | if (adap->params.nports == 2 && | |
2593 | process_responses(adap, &adap->sge.qs[1], -1)) { | |
2594 | struct sge_rspq *q1 = &adap->sge.qs[1].rspq; | |
2595 | ||
2596 | t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) | | |
2597 | V_NEWTIMER(q1->next_holdoff) | | |
2598 | V_NEWINDEX(q1->cidx)); | |
2599 | new_packets = 1; | |
2600 | } | |
2601 | ||
2602 | if (!new_packets && t3_slow_intr_handler(adap) == 0) | |
2603 | q->unhandled_irqs++; | |
2604 | ||
2605 | spin_unlock(&q->lock); | |
2606 | return IRQ_HANDLED; | |
2607 | } | |
2608 | ||
bea3348e | 2609 | static int rspq_check_napi(struct sge_qset *qs) |
4d22de3e | 2610 | { |
bea3348e SH |
2611 | struct sge_rspq *q = &qs->rspq; |
2612 | ||
2613 | if (!napi_is_scheduled(&qs->napi) && | |
2614 | is_new_response(&q->desc[q->cidx], q)) { | |
2615 | napi_schedule(&qs->napi); | |
4d22de3e DLR |
2616 | return 1; |
2617 | } | |
2618 | return 0; | |
2619 | } | |
2620 | ||
2621 | /* | |
2622 | * The MSI interrupt handler for the NAPI case (i.e., response queues serviced | |
2623 | * by NAPI polling). Handles data events from SGE response queues as well as | |
2624 | * error and other async events as they all use the same MSI vector. We use | |
2625 | * one SGE response queue per port in this mode and protect all response | |
2626 | * queues with queue 0's lock. | |
2627 | */ | |
9265fabf | 2628 | static irqreturn_t t3_intr_msi_napi(int irq, void *cookie) |
4d22de3e DLR |
2629 | { |
2630 | int new_packets; | |
2631 | struct adapter *adap = cookie; | |
2632 | struct sge_rspq *q = &adap->sge.qs[0].rspq; | |
2633 | ||
2634 | spin_lock(&q->lock); | |
2635 | ||
bea3348e | 2636 | new_packets = rspq_check_napi(&adap->sge.qs[0]); |
4d22de3e | 2637 | if (adap->params.nports == 2) |
bea3348e | 2638 | new_packets += rspq_check_napi(&adap->sge.qs[1]); |
4d22de3e DLR |
2639 | if (!new_packets && t3_slow_intr_handler(adap) == 0) |
2640 | q->unhandled_irqs++; | |
2641 | ||
2642 | spin_unlock(&q->lock); | |
2643 | return IRQ_HANDLED; | |
2644 | } | |
2645 | ||
2646 | /* | |
2647 | * A helper function that processes responses and issues GTS. | |
2648 | */ | |
2649 | static inline int process_responses_gts(struct adapter *adap, | |
2650 | struct sge_rspq *rq) | |
2651 | { | |
2652 | int work; | |
2653 | ||
2654 | work = process_responses(adap, rspq_to_qset(rq), -1); | |
2655 | t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) | | |
2656 | V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx)); | |
2657 | return work; | |
2658 | } | |
2659 | ||
2660 | /* | |
2661 | * The legacy INTx interrupt handler. This needs to handle data events from | |
2662 | * SGE response queues as well as error and other async events as they all use | |
2663 | * the same interrupt pin. We use one SGE response queue per port in this mode | |
2664 | * and protect all response queues with queue 0's lock. | |
2665 | */ | |
2666 | static irqreturn_t t3_intr(int irq, void *cookie) | |
2667 | { | |
2668 | int work_done, w0, w1; | |
2669 | struct adapter *adap = cookie; | |
2670 | struct sge_rspq *q0 = &adap->sge.qs[0].rspq; | |
2671 | struct sge_rspq *q1 = &adap->sge.qs[1].rspq; | |
2672 | ||
2673 | spin_lock(&q0->lock); | |
2674 | ||
2675 | w0 = is_new_response(&q0->desc[q0->cidx], q0); | |
2676 | w1 = adap->params.nports == 2 && | |
2677 | is_new_response(&q1->desc[q1->cidx], q1); | |
2678 | ||
2679 | if (likely(w0 | w1)) { | |
2680 | t3_write_reg(adap, A_PL_CLI, 0); | |
2681 | t3_read_reg(adap, A_PL_CLI); /* flush */ | |
2682 | ||
2683 | if (likely(w0)) | |
2684 | process_responses_gts(adap, q0); | |
2685 | ||
2686 | if (w1) | |
2687 | process_responses_gts(adap, q1); | |
2688 | ||
2689 | work_done = w0 | w1; | |
2690 | } else | |
2691 | work_done = t3_slow_intr_handler(adap); | |
2692 | ||
2693 | spin_unlock(&q0->lock); | |
2694 | return IRQ_RETVAL(work_done != 0); | |
2695 | } | |
2696 | ||
2697 | /* | |
2698 | * Interrupt handler for legacy INTx interrupts for T3B-based cards. | |
2699 | * Handles data events from SGE response queues as well as error and other | |
2700 | * async events as they all use the same interrupt pin. We use one SGE | |
2701 | * response queue per port in this mode and protect all response queues with | |
2702 | * queue 0's lock. | |
2703 | */ | |
2704 | static irqreturn_t t3b_intr(int irq, void *cookie) | |
2705 | { | |
2706 | u32 map; | |
2707 | struct adapter *adap = cookie; | |
2708 | struct sge_rspq *q0 = &adap->sge.qs[0].rspq; | |
2709 | ||
2710 | t3_write_reg(adap, A_PL_CLI, 0); | |
2711 | map = t3_read_reg(adap, A_SG_DATA_INTR); | |
2712 | ||
2713 | if (unlikely(!map)) /* shared interrupt, most likely */ | |
2714 | return IRQ_NONE; | |
2715 | ||
2716 | spin_lock(&q0->lock); | |
2717 | ||
2718 | if (unlikely(map & F_ERRINTR)) | |
2719 | t3_slow_intr_handler(adap); | |
2720 | ||
2721 | if (likely(map & 1)) | |
2722 | process_responses_gts(adap, q0); | |
2723 | ||
2724 | if (map & 2) | |
2725 | process_responses_gts(adap, &adap->sge.qs[1].rspq); | |
2726 | ||
2727 | spin_unlock(&q0->lock); | |
2728 | return IRQ_HANDLED; | |
2729 | } | |
2730 | ||
2731 | /* | |
2732 | * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards. | |
2733 | * Handles data events from SGE response queues as well as error and other | |
2734 | * async events as they all use the same interrupt pin. We use one SGE | |
2735 | * response queue per port in this mode and protect all response queues with | |
2736 | * queue 0's lock. | |
2737 | */ | |
2738 | static irqreturn_t t3b_intr_napi(int irq, void *cookie) | |
2739 | { | |
2740 | u32 map; | |
4d22de3e | 2741 | struct adapter *adap = cookie; |
bea3348e SH |
2742 | struct sge_qset *qs0 = &adap->sge.qs[0]; |
2743 | struct sge_rspq *q0 = &qs0->rspq; | |
4d22de3e DLR |
2744 | |
2745 | t3_write_reg(adap, A_PL_CLI, 0); | |
2746 | map = t3_read_reg(adap, A_SG_DATA_INTR); | |
2747 | ||
2748 | if (unlikely(!map)) /* shared interrupt, most likely */ | |
2749 | return IRQ_NONE; | |
2750 | ||
2751 | spin_lock(&q0->lock); | |
2752 | ||
2753 | if (unlikely(map & F_ERRINTR)) | |
2754 | t3_slow_intr_handler(adap); | |
2755 | ||
bea3348e SH |
2756 | if (likely(map & 1)) |
2757 | napi_schedule(&qs0->napi); | |
4d22de3e | 2758 | |
bea3348e SH |
2759 | if (map & 2) |
2760 | napi_schedule(&adap->sge.qs[1].napi); | |
4d22de3e DLR |
2761 | |
2762 | spin_unlock(&q0->lock); | |
2763 | return IRQ_HANDLED; | |
2764 | } | |
2765 | ||
2766 | /** | |
2767 | * t3_intr_handler - select the top-level interrupt handler | |
2768 | * @adap: the adapter | |
2769 | * @polling: whether using NAPI to service response queues | |
2770 | * | |
2771 | * Selects the top-level interrupt handler based on the type of interrupts | |
2772 | * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the | |
2773 | * response queues. | |
2774 | */ | |
7c239975 | 2775 | irq_handler_t t3_intr_handler(struct adapter *adap, int polling) |
4d22de3e DLR |
2776 | { |
2777 | if (adap->flags & USING_MSIX) | |
2778 | return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix; | |
2779 | if (adap->flags & USING_MSI) | |
2780 | return polling ? t3_intr_msi_napi : t3_intr_msi; | |
2781 | if (adap->params.rev > 0) | |
2782 | return polling ? t3b_intr_napi : t3b_intr; | |
2783 | return t3_intr; | |
2784 | } | |
2785 | ||
b881955b DLR |
2786 | #define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \ |
2787 | F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \ | |
2788 | V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \ | |
2789 | F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \ | |
2790 | F_HIRCQPARITYERROR) | |
2791 | #define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR) | |
2792 | #define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \ | |
2793 | F_RSPQDISABLED) | |
2794 | ||
4d22de3e DLR |
2795 | /** |
2796 | * t3_sge_err_intr_handler - SGE async event interrupt handler | |
2797 | * @adapter: the adapter | |
2798 | * | |
2799 | * Interrupt handler for SGE asynchronous (non-data) events. | |
2800 | */ | |
2801 | void t3_sge_err_intr_handler(struct adapter *adapter) | |
2802 | { | |
fc882196 DLR |
2803 | unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE) & |
2804 | ~F_FLEMPTY; | |
4d22de3e | 2805 | |
b881955b DLR |
2806 | if (status & SGE_PARERR) |
2807 | CH_ALERT(adapter, "SGE parity error (0x%x)\n", | |
2808 | status & SGE_PARERR); | |
2809 | if (status & SGE_FRAMINGERR) | |
2810 | CH_ALERT(adapter, "SGE framing error (0x%x)\n", | |
2811 | status & SGE_FRAMINGERR); | |
2812 | ||
4d22de3e DLR |
2813 | if (status & F_RSPQCREDITOVERFOW) |
2814 | CH_ALERT(adapter, "SGE response queue credit overflow\n"); | |
2815 | ||
2816 | if (status & F_RSPQDISABLED) { | |
2817 | v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS); | |
2818 | ||
2819 | CH_ALERT(adapter, | |
2820 | "packet delivered to disabled response queue " | |
2821 | "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff); | |
2822 | } | |
2823 | ||
6e3f03b7 | 2824 | if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR)) |
e998f245 SW |
2825 | queue_work(cxgb3_wq, &adapter->db_drop_task); |
2826 | ||
2827 | if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL)) | |
2828 | queue_work(cxgb3_wq, &adapter->db_full_task); | |
2829 | ||
2830 | if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY)) | |
2831 | queue_work(cxgb3_wq, &adapter->db_empty_task); | |
6e3f03b7 | 2832 | |
4d22de3e | 2833 | t3_write_reg(adapter, A_SG_INT_CAUSE, status); |
b881955b | 2834 | if (status & SGE_FATALERR) |
4d22de3e DLR |
2835 | t3_fatal_err(adapter); |
2836 | } | |
2837 | ||
2838 | /** | |
42c8ea17 | 2839 | * sge_timer_tx - perform periodic maintenance of an SGE qset |
4d22de3e DLR |
2840 | * @data: the SGE queue set to maintain |
2841 | * | |
2842 | * Runs periodically from a timer to perform maintenance of an SGE queue | |
2843 | * set. It performs two tasks: | |
2844 | * | |
42c8ea17 | 2845 | * Cleans up any completed Tx descriptors that may still be pending. |
4d22de3e DLR |
2846 | * Normal descriptor cleanup happens when new packets are added to a Tx |
2847 | * queue so this timer is relatively infrequent and does any cleanup only | |
2848 | * if the Tx queue has not seen any new packets in a while. We make a | |
2849 | * best effort attempt to reclaim descriptors, in that we don't wait | |
2850 | * around if we cannot get a queue's lock (which most likely is because | |
2851 | * someone else is queueing new packets and so will also handle the clean | |
2852 | * up). Since control queues use immediate data exclusively we don't | |
2853 | * bother cleaning them up here. | |
2854 | * | |
4d22de3e | 2855 | */ |
42c8ea17 | 2856 | static void sge_timer_tx(unsigned long data) |
4d22de3e | 2857 | { |
4d22de3e | 2858 | struct sge_qset *qs = (struct sge_qset *)data; |
42c8ea17 DLR |
2859 | struct port_info *pi = netdev_priv(qs->netdev); |
2860 | struct adapter *adap = pi->adapter; | |
2861 | unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0}; | |
2862 | unsigned long next_period; | |
4d22de3e | 2863 | |
c3a8c5b6 DLR |
2864 | if (__netif_tx_trylock(qs->tx_q)) { |
2865 | tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH], | |
2866 | TX_RECLAIM_TIMER_CHUNK); | |
2867 | __netif_tx_unlock(qs->tx_q); | |
4d22de3e | 2868 | } |
c3a8c5b6 | 2869 | |
4d22de3e | 2870 | if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) { |
42c8ea17 DLR |
2871 | tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD], |
2872 | TX_RECLAIM_TIMER_CHUNK); | |
4d22de3e DLR |
2873 | spin_unlock(&qs->txq[TXQ_OFLD].lock); |
2874 | } | |
42c8ea17 DLR |
2875 | |
2876 | next_period = TX_RECLAIM_PERIOD >> | |
c3a8c5b6 DLR |
2877 | (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) / |
2878 | TX_RECLAIM_TIMER_CHUNK); | |
42c8ea17 DLR |
2879 | mod_timer(&qs->tx_reclaim_timer, jiffies + next_period); |
2880 | } | |
2881 | ||
49ce9c2c | 2882 | /** |
42c8ea17 DLR |
2883 | * sge_timer_rx - perform periodic maintenance of an SGE qset |
2884 | * @data: the SGE queue set to maintain | |
2885 | * | |
2886 | * a) Replenishes Rx queues that have run out due to memory shortage. | |
2887 | * Normally new Rx buffers are added when existing ones are consumed but | |
2888 | * when out of memory a queue can become empty. We try to add only a few | |
2889 | * buffers here, the queue will be replenished fully as these new buffers | |
2890 | * are used up if memory shortage has subsided. | |
2891 | * | |
2892 | * b) Return coalesced response queue credits in case a response queue is | |
2893 | * starved. | |
2894 | * | |
2895 | */ | |
2896 | static void sge_timer_rx(unsigned long data) | |
2897 | { | |
2898 | spinlock_t *lock; | |
2899 | struct sge_qset *qs = (struct sge_qset *)data; | |
2900 | struct port_info *pi = netdev_priv(qs->netdev); | |
2901 | struct adapter *adap = pi->adapter; | |
2902 | u32 status; | |
2903 | ||
2904 | lock = adap->params.rev > 0 ? | |
2905 | &qs->rspq.lock : &adap->sge.qs[0].rspq.lock; | |
2906 | ||
2907 | if (!spin_trylock_irq(lock)) | |
2908 | goto out; | |
2909 | ||
2910 | if (napi_is_scheduled(&qs->napi)) | |
2911 | goto unlock; | |
2912 | ||
2913 | if (adap->params.rev < 4) { | |
2914 | status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS); | |
2915 | ||
2916 | if (status & (1 << qs->rspq.cntxt_id)) { | |
2917 | qs->rspq.starved++; | |
2918 | if (qs->rspq.credits) { | |
2919 | qs->rspq.credits--; | |
2920 | refill_rspq(adap, &qs->rspq, 1); | |
2921 | qs->rspq.restarted++; | |
2922 | t3_write_reg(adap, A_SG_RSPQ_FL_STATUS, | |
2923 | 1 << qs->rspq.cntxt_id); | |
bae73f44 | 2924 | } |
4d22de3e | 2925 | } |
4d22de3e | 2926 | } |
42c8ea17 DLR |
2927 | |
2928 | if (qs->fl[0].credits < qs->fl[0].size) | |
2929 | __refill_fl(adap, &qs->fl[0]); | |
2930 | if (qs->fl[1].credits < qs->fl[1].size) | |
2931 | __refill_fl(adap, &qs->fl[1]); | |
2932 | ||
2933 | unlock: | |
2934 | spin_unlock_irq(lock); | |
2935 | out: | |
2936 | mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD); | |
4d22de3e DLR |
2937 | } |
2938 | ||
2939 | /** | |
2940 | * t3_update_qset_coalesce - update coalescing settings for a queue set | |
2941 | * @qs: the SGE queue set | |
2942 | * @p: new queue set parameters | |
2943 | * | |
2944 | * Update the coalescing settings for an SGE queue set. Nothing is done | |
2945 | * if the queue set is not initialized yet. | |
2946 | */ | |
2947 | void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p) | |
2948 | { | |
4d22de3e DLR |
2949 | qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */ |
2950 | qs->rspq.polling = p->polling; | |
bea3348e | 2951 | qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll; |
4d22de3e DLR |
2952 | } |
2953 | ||
2954 | /** | |
2955 | * t3_sge_alloc_qset - initialize an SGE queue set | |
2956 | * @adapter: the adapter | |
2957 | * @id: the queue set id | |
2958 | * @nports: how many Ethernet ports will be using this queue set | |
2959 | * @irq_vec_idx: the IRQ vector index for response queue interrupts | |
2960 | * @p: configuration parameters for this queue set | |
2961 | * @ntxq: number of Tx queues for the queue set | |
2962 | * @netdev: net device associated with this queue set | |
82ad3329 | 2963 | * @netdevq: net device TX queue associated with this queue set |
4d22de3e DLR |
2964 | * |
2965 | * Allocate resources and initialize an SGE queue set. A queue set | |
2966 | * comprises a response queue, two Rx free-buffer queues, and up to 3 | |
2967 | * Tx queues. The Tx queues are assigned roles in the order Ethernet | |
2968 | * queue, offload queue, and control queue. | |
2969 | */ | |
2970 | int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, | |
2971 | int irq_vec_idx, const struct qset_params *p, | |
82ad3329 DLR |
2972 | int ntxq, struct net_device *dev, |
2973 | struct netdev_queue *netdevq) | |
4d22de3e | 2974 | { |
b1fb1f28 | 2975 | int i, avail, ret = -ENOMEM; |
4d22de3e DLR |
2976 | struct sge_qset *q = &adapter->sge.qs[id]; |
2977 | ||
2978 | init_qset_cntxt(q, id); | |
42c8ea17 DLR |
2979 | setup_timer(&q->tx_reclaim_timer, sge_timer_tx, (unsigned long)q); |
2980 | setup_timer(&q->rx_reclaim_timer, sge_timer_rx, (unsigned long)q); | |
4d22de3e DLR |
2981 | |
2982 | q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size, | |
2983 | sizeof(struct rx_desc), | |
2984 | sizeof(struct rx_sw_desc), | |
2985 | &q->fl[0].phys_addr, &q->fl[0].sdesc); | |
2986 | if (!q->fl[0].desc) | |
2987 | goto err; | |
2988 | ||
2989 | q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size, | |
2990 | sizeof(struct rx_desc), | |
2991 | sizeof(struct rx_sw_desc), | |
2992 | &q->fl[1].phys_addr, &q->fl[1].sdesc); | |
2993 | if (!q->fl[1].desc) | |
2994 | goto err; | |
2995 | ||
2996 | q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size, | |
2997 | sizeof(struct rsp_desc), 0, | |
2998 | &q->rspq.phys_addr, NULL); | |
2999 | if (!q->rspq.desc) | |
3000 | goto err; | |
3001 | ||
3002 | for (i = 0; i < ntxq; ++i) { | |
3003 | /* | |
3004 | * The control queue always uses immediate data so does not | |
3005 | * need to keep track of any sk_buffs. | |
3006 | */ | |
3007 | size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc); | |
3008 | ||
3009 | q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i], | |
3010 | sizeof(struct tx_desc), sz, | |
3011 | &q->txq[i].phys_addr, | |
3012 | &q->txq[i].sdesc); | |
3013 | if (!q->txq[i].desc) | |
3014 | goto err; | |
3015 | ||
3016 | q->txq[i].gen = 1; | |
3017 | q->txq[i].size = p->txq_size[i]; | |
3018 | spin_lock_init(&q->txq[i].lock); | |
3019 | skb_queue_head_init(&q->txq[i].sendq); | |
3020 | } | |
3021 | ||
3022 | tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq, | |
3023 | (unsigned long)q); | |
3024 | tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq, | |
3025 | (unsigned long)q); | |
3026 | ||
3027 | q->fl[0].gen = q->fl[1].gen = 1; | |
3028 | q->fl[0].size = p->fl_size; | |
3029 | q->fl[1].size = p->jumbo_size; | |
3030 | ||
3031 | q->rspq.gen = 1; | |
3032 | q->rspq.size = p->rspq_size; | |
3033 | spin_lock_init(&q->rspq.lock); | |
147e70e6 | 3034 | skb_queue_head_init(&q->rspq.rx_queue); |
4d22de3e DLR |
3035 | |
3036 | q->txq[TXQ_ETH].stop_thres = nports * | |
3037 | flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3); | |
3038 | ||
cf992af5 DLR |
3039 | #if FL0_PG_CHUNK_SIZE > 0 |
3040 | q->fl[0].buf_size = FL0_PG_CHUNK_SIZE; | |
e0994eb1 | 3041 | #else |
cf992af5 | 3042 | q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data); |
e0994eb1 | 3043 | #endif |
7385ecf3 DLR |
3044 | #if FL1_PG_CHUNK_SIZE > 0 |
3045 | q->fl[1].buf_size = FL1_PG_CHUNK_SIZE; | |
3046 | #else | |
cf992af5 DLR |
3047 | q->fl[1].buf_size = is_offload(adapter) ? |
3048 | (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : | |
3049 | MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt); | |
7385ecf3 DLR |
3050 | #endif |
3051 | ||
3052 | q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0; | |
3053 | q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0; | |
3054 | q->fl[0].order = FL0_PG_ORDER; | |
3055 | q->fl[1].order = FL1_PG_ORDER; | |
5e68b772 DLR |
3056 | q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE; |
3057 | q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE; | |
4d22de3e | 3058 | |
b1186dee | 3059 | spin_lock_irq(&adapter->sge.reg_lock); |
4d22de3e DLR |
3060 | |
3061 | /* FL threshold comparison uses < */ | |
3062 | ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx, | |
3063 | q->rspq.phys_addr, q->rspq.size, | |
5e68b772 | 3064 | q->fl[0].buf_size - SGE_PG_RSVD, 1, 0); |
4d22de3e DLR |
3065 | if (ret) |
3066 | goto err_unlock; | |
3067 | ||
3068 | for (i = 0; i < SGE_RXQ_PER_SET; ++i) { | |
3069 | ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0, | |
3070 | q->fl[i].phys_addr, q->fl[i].size, | |
5e68b772 DLR |
3071 | q->fl[i].buf_size - SGE_PG_RSVD, |
3072 | p->cong_thres, 1, 0); | |
4d22de3e DLR |
3073 | if (ret) |
3074 | goto err_unlock; | |
3075 | } | |
3076 | ||
3077 | ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS, | |
3078 | SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr, | |
3079 | q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token, | |
3080 | 1, 0); | |
3081 | if (ret) | |
3082 | goto err_unlock; | |
3083 | ||
3084 | if (ntxq > 1) { | |
3085 | ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id, | |
3086 | USE_GTS, SGE_CNTXT_OFLD, id, | |
3087 | q->txq[TXQ_OFLD].phys_addr, | |
3088 | q->txq[TXQ_OFLD].size, 0, 1, 0); | |
3089 | if (ret) | |
3090 | goto err_unlock; | |
3091 | } | |
3092 | ||
3093 | if (ntxq > 2) { | |
3094 | ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0, | |
3095 | SGE_CNTXT_CTRL, id, | |
3096 | q->txq[TXQ_CTRL].phys_addr, | |
3097 | q->txq[TXQ_CTRL].size, | |
3098 | q->txq[TXQ_CTRL].token, 1, 0); | |
3099 | if (ret) | |
3100 | goto err_unlock; | |
3101 | } | |
3102 | ||
b1186dee | 3103 | spin_unlock_irq(&adapter->sge.reg_lock); |
4d22de3e | 3104 | |
bea3348e SH |
3105 | q->adap = adapter; |
3106 | q->netdev = dev; | |
82ad3329 | 3107 | q->tx_q = netdevq; |
bea3348e | 3108 | t3_update_qset_coalesce(q, p); |
b47385bd | 3109 | |
7385ecf3 DLR |
3110 | avail = refill_fl(adapter, &q->fl[0], q->fl[0].size, |
3111 | GFP_KERNEL | __GFP_COMP); | |
b1fb1f28 DLR |
3112 | if (!avail) { |
3113 | CH_ALERT(adapter, "free list queue 0 initialization failed\n"); | |
3114 | goto err; | |
3115 | } | |
3116 | if (avail < q->fl[0].size) | |
3117 | CH_WARN(adapter, "free list queue 0 enabled with %d credits\n", | |
3118 | avail); | |
3119 | ||
7385ecf3 DLR |
3120 | avail = refill_fl(adapter, &q->fl[1], q->fl[1].size, |
3121 | GFP_KERNEL | __GFP_COMP); | |
b1fb1f28 DLR |
3122 | if (avail < q->fl[1].size) |
3123 | CH_WARN(adapter, "free list queue 1 enabled with %d credits\n", | |
3124 | avail); | |
4d22de3e DLR |
3125 | refill_rspq(adapter, &q->rspq, q->rspq.size - 1); |
3126 | ||
3127 | t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) | | |
3128 | V_NEWTIMER(q->rspq.holdoff_tmr)); | |
3129 | ||
4d22de3e DLR |
3130 | return 0; |
3131 | ||
b1fb1f28 | 3132 | err_unlock: |
b1186dee | 3133 | spin_unlock_irq(&adapter->sge.reg_lock); |
b1fb1f28 | 3134 | err: |
4d22de3e DLR |
3135 | t3_free_qset(adapter, q); |
3136 | return ret; | |
3137 | } | |
3138 | ||
31563789 DLR |
3139 | /** |
3140 | * t3_start_sge_timers - start SGE timer call backs | |
3141 | * @adap: the adapter | |
3142 | * | |
3143 | * Starts each SGE queue set's timer call back | |
3144 | */ | |
3145 | void t3_start_sge_timers(struct adapter *adap) | |
3146 | { | |
3147 | int i; | |
3148 | ||
3149 | for (i = 0; i < SGE_QSETS; ++i) { | |
3150 | struct sge_qset *q = &adap->sge.qs[i]; | |
3151 | ||
3152 | if (q->tx_reclaim_timer.function) | |
3153 | mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); | |
3154 | ||
3155 | if (q->rx_reclaim_timer.function) | |
3156 | mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD); | |
3157 | } | |
3158 | } | |
3159 | ||
0ca41c04 DLR |
3160 | /** |
3161 | * t3_stop_sge_timers - stop SGE timer call backs | |
3162 | * @adap: the adapter | |
3163 | * | |
3164 | * Stops each SGE queue set's timer call back | |
3165 | */ | |
3166 | void t3_stop_sge_timers(struct adapter *adap) | |
3167 | { | |
3168 | int i; | |
3169 | ||
3170 | for (i = 0; i < SGE_QSETS; ++i) { | |
3171 | struct sge_qset *q = &adap->sge.qs[i]; | |
3172 | ||
3173 | if (q->tx_reclaim_timer.function) | |
3174 | del_timer_sync(&q->tx_reclaim_timer); | |
42c8ea17 DLR |
3175 | if (q->rx_reclaim_timer.function) |
3176 | del_timer_sync(&q->rx_reclaim_timer); | |
0ca41c04 DLR |
3177 | } |
3178 | } | |
3179 | ||
4d22de3e DLR |
3180 | /** |
3181 | * t3_free_sge_resources - free SGE resources | |
3182 | * @adap: the adapter | |
3183 | * | |
3184 | * Frees resources used by the SGE queue sets. | |
3185 | */ | |
3186 | void t3_free_sge_resources(struct adapter *adap) | |
3187 | { | |
3188 | int i; | |
3189 | ||
3190 | for (i = 0; i < SGE_QSETS; ++i) | |
3191 | t3_free_qset(adap, &adap->sge.qs[i]); | |
3192 | } | |
3193 | ||
3194 | /** | |
3195 | * t3_sge_start - enable SGE | |
3196 | * @adap: the adapter | |
3197 | * | |
3198 | * Enables the SGE for DMAs. This is the last step in starting packet | |
3199 | * transfers. | |
3200 | */ | |
3201 | void t3_sge_start(struct adapter *adap) | |
3202 | { | |
3203 | t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE); | |
3204 | } | |
3205 | ||
3206 | /** | |
3207 | * t3_sge_stop - disable SGE operation | |
3208 | * @adap: the adapter | |
3209 | * | |
3210 | * Disables the DMA engine. This can be called in emeregencies (e.g., | |
3211 | * from error interrupts) or from normal process context. In the latter | |
3212 | * case it also disables any pending queue restart tasklets. Note that | |
3213 | * if it is called in interrupt context it cannot disable the restart | |
3214 | * tasklets as it cannot wait, however the tasklets will have no effect | |
3215 | * since the doorbells are disabled and the driver will call this again | |
3216 | * later from process context, at which time the tasklets will be stopped | |
3217 | * if they are still running. | |
3218 | */ | |
3219 | void t3_sge_stop(struct adapter *adap) | |
3220 | { | |
3221 | t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0); | |
3222 | if (!in_interrupt()) { | |
3223 | int i; | |
3224 | ||
3225 | for (i = 0; i < SGE_QSETS; ++i) { | |
3226 | struct sge_qset *qs = &adap->sge.qs[i]; | |
3227 | ||
3228 | tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk); | |
3229 | tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk); | |
3230 | } | |
3231 | } | |
3232 | } | |
3233 | ||
3234 | /** | |
3235 | * t3_sge_init - initialize SGE | |
3236 | * @adap: the adapter | |
3237 | * @p: the SGE parameters | |
3238 | * | |
3239 | * Performs SGE initialization needed every time after a chip reset. | |
3240 | * We do not initialize any of the queue sets here, instead the driver | |
3241 | * top-level must request those individually. We also do not enable DMA | |
3242 | * here, that should be done after the queues have been set up. | |
3243 | */ | |
3244 | void t3_sge_init(struct adapter *adap, struct sge_params *p) | |
3245 | { | |
3246 | unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12); | |
3247 | ||
3248 | ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL | | |
b881955b | 3249 | F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN | |
4d22de3e DLR |
3250 | V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS | |
3251 | V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING; | |
3252 | #if SGE_NUM_GENBITS == 1 | |
3253 | ctrl |= F_EGRGENCTRL; | |
3254 | #endif | |
3255 | if (adap->params.rev > 0) { | |
3256 | if (!(adap->flags & (USING_MSIX | USING_MSI))) | |
3257 | ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ; | |
4d22de3e DLR |
3258 | } |
3259 | t3_write_reg(adap, A_SG_CONTROL, ctrl); | |
3260 | t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) | | |
3261 | V_LORCQDRBTHRSH(512)); | |
3262 | t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10); | |
3263 | t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) | | |
6195c71d | 3264 | V_TIMEOUT(200 * core_ticks_per_usec(adap))); |
b881955b DLR |
3265 | t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, |
3266 | adap->params.rev < T3_REV_C ? 1000 : 500); | |
4d22de3e DLR |
3267 | t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256); |
3268 | t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000); | |
3269 | t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256); | |
3270 | t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff)); | |
3271 | t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024); | |
3272 | } | |
3273 | ||
3274 | /** | |
3275 | * t3_sge_prep - one-time SGE initialization | |
3276 | * @adap: the associated adapter | |
3277 | * @p: SGE parameters | |
3278 | * | |
3279 | * Performs one-time initialization of SGE SW state. Includes determining | |
3280 | * defaults for the assorted SGE parameters, which admins can change until | |
3281 | * they are used to initialize the SGE. | |
3282 | */ | |
7b9b0943 | 3283 | void t3_sge_prep(struct adapter *adap, struct sge_params *p) |
4d22de3e DLR |
3284 | { |
3285 | int i; | |
3286 | ||
3287 | p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) - | |
3288 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
3289 | ||
3290 | for (i = 0; i < SGE_QSETS; ++i) { | |
3291 | struct qset_params *q = p->qset + i; | |
3292 | ||
3293 | q->polling = adap->params.rev > 0; | |
3294 | q->coalesce_usecs = 5; | |
3295 | q->rspq_size = 1024; | |
e0994eb1 | 3296 | q->fl_size = 1024; |
7385ecf3 | 3297 | q->jumbo_size = 512; |
4d22de3e DLR |
3298 | q->txq_size[TXQ_ETH] = 1024; |
3299 | q->txq_size[TXQ_OFLD] = 1024; | |
3300 | q->txq_size[TXQ_CTRL] = 256; | |
3301 | q->cong_thres = 0; | |
3302 | } | |
3303 | ||
3304 | spin_lock_init(&adap->sge.reg_lock); | |
3305 | } |