[SK_BUFF]: Introduce skb_copy_from_linear_data{_offset}
[deliverable/linux.git] / drivers / net / cxgb3 / sge.c
1 /*
2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32 #include <linux/skbuff.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/dma-mapping.h>
39 #include "common.h"
40 #include "regs.h"
41 #include "sge_defs.h"
42 #include "t3_cpl.h"
43 #include "firmware_exports.h"
44
45 #define USE_GTS 0
46
47 #define SGE_RX_SM_BUF_SIZE 1536
48
49 /*
50 * If USE_RX_PAGE is defined, the small freelist populated with (partial)
51 * pages instead of skbs. Pages are carved up into RX_PAGE_SIZE chunks (must
52 * be a multiple of the host page size).
53 */
54 #define USE_RX_PAGE
55 #define RX_PAGE_SIZE 2048
56
57 /*
58 * skb freelist packets are copied into a new skb (and the freelist one is
59 * reused) if their len is <=
60 */
61 #define SGE_RX_COPY_THRES 256
62
63 /*
64 * Minimum number of freelist entries before we start dropping TUNNEL frames.
65 */
66 #define SGE_RX_DROP_THRES 16
67
68 /*
69 * Period of the Tx buffer reclaim timer. This timer does not need to run
70 * frequently as Tx buffers are usually reclaimed by new Tx packets.
71 */
72 #define TX_RECLAIM_PERIOD (HZ / 4)
73
74 /* WR size in bytes */
75 #define WR_LEN (WR_FLITS * 8)
76
77 /*
78 * Types of Tx queues in each queue set. Order here matters, do not change.
79 */
80 enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
81
82 /* Values for sge_txq.flags */
83 enum {
84 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
85 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
86 };
87
88 struct tx_desc {
89 u64 flit[TX_DESC_FLITS];
90 };
91
92 struct rx_desc {
93 __be32 addr_lo;
94 __be32 len_gen;
95 __be32 gen2;
96 __be32 addr_hi;
97 };
98
99 struct tx_sw_desc { /* SW state per Tx descriptor */
100 struct sk_buff *skb;
101 };
102
103 struct rx_sw_desc { /* SW state per Rx descriptor */
104 union {
105 struct sk_buff *skb;
106 struct sge_fl_page page;
107 } t;
108 DECLARE_PCI_UNMAP_ADDR(dma_addr);
109 };
110
111 struct rsp_desc { /* response queue descriptor */
112 struct rss_header rss_hdr;
113 __be32 flags;
114 __be32 len_cq;
115 u8 imm_data[47];
116 u8 intr_gen;
117 };
118
119 struct unmap_info { /* packet unmapping info, overlays skb->cb */
120 int sflit; /* start flit of first SGL entry in Tx descriptor */
121 u16 fragidx; /* first page fragment in current Tx descriptor */
122 u16 addr_idx; /* buffer index of first SGL entry in descriptor */
123 u32 len; /* mapped length of skb main body */
124 };
125
126 /*
127 * Holds unmapping information for Tx packets that need deferred unmapping.
128 * This structure lives at skb->head and must be allocated by callers.
129 */
130 struct deferred_unmap_info {
131 struct pci_dev *pdev;
132 dma_addr_t addr[MAX_SKB_FRAGS + 1];
133 };
134
135 /*
136 * Maps a number of flits to the number of Tx descriptors that can hold them.
137 * The formula is
138 *
139 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
140 *
141 * HW allows up to 4 descriptors to be combined into a WR.
142 */
143 static u8 flit_desc_map[] = {
144 0,
145 #if SGE_NUM_GENBITS == 1
146 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
147 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
148 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
149 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
150 #elif SGE_NUM_GENBITS == 2
151 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
152 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
153 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
154 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
155 #else
156 # error "SGE_NUM_GENBITS must be 1 or 2"
157 #endif
158 };
159
160 static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
161 {
162 return container_of(q, struct sge_qset, fl[qidx]);
163 }
164
165 static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
166 {
167 return container_of(q, struct sge_qset, rspq);
168 }
169
170 static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
171 {
172 return container_of(q, struct sge_qset, txq[qidx]);
173 }
174
175 /**
176 * refill_rspq - replenish an SGE response queue
177 * @adapter: the adapter
178 * @q: the response queue to replenish
179 * @credits: how many new responses to make available
180 *
181 * Replenishes a response queue by making the supplied number of responses
182 * available to HW.
183 */
184 static inline void refill_rspq(struct adapter *adapter,
185 const struct sge_rspq *q, unsigned int credits)
186 {
187 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
188 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
189 }
190
191 /**
192 * need_skb_unmap - does the platform need unmapping of sk_buffs?
193 *
194 * Returns true if the platfrom needs sk_buff unmapping. The compiler
195 * optimizes away unecessary code if this returns true.
196 */
197 static inline int need_skb_unmap(void)
198 {
199 /*
200 * This structure is used to tell if the platfrom needs buffer
201 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
202 */
203 struct dummy {
204 DECLARE_PCI_UNMAP_ADDR(addr);
205 };
206
207 return sizeof(struct dummy) != 0;
208 }
209
210 /**
211 * unmap_skb - unmap a packet main body and its page fragments
212 * @skb: the packet
213 * @q: the Tx queue containing Tx descriptors for the packet
214 * @cidx: index of Tx descriptor
215 * @pdev: the PCI device
216 *
217 * Unmap the main body of an sk_buff and its page fragments, if any.
218 * Because of the fairly complicated structure of our SGLs and the desire
219 * to conserve space for metadata, we keep the information necessary to
220 * unmap an sk_buff partly in the sk_buff itself (in its cb), and partly
221 * in the Tx descriptors (the physical addresses of the various data
222 * buffers). The send functions initialize the state in skb->cb so we
223 * can unmap the buffers held in the first Tx descriptor here, and we
224 * have enough information at this point to update the state for the next
225 * Tx descriptor.
226 */
227 static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
228 unsigned int cidx, struct pci_dev *pdev)
229 {
230 const struct sg_ent *sgp;
231 struct unmap_info *ui = (struct unmap_info *)skb->cb;
232 int nfrags, frag_idx, curflit, j = ui->addr_idx;
233
234 sgp = (struct sg_ent *)&q->desc[cidx].flit[ui->sflit];
235
236 if (ui->len) {
237 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), ui->len,
238 PCI_DMA_TODEVICE);
239 ui->len = 0; /* so we know for next descriptor for this skb */
240 j = 1;
241 }
242
243 frag_idx = ui->fragidx;
244 curflit = ui->sflit + 1 + j;
245 nfrags = skb_shinfo(skb)->nr_frags;
246
247 while (frag_idx < nfrags && curflit < WR_FLITS) {
248 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
249 skb_shinfo(skb)->frags[frag_idx].size,
250 PCI_DMA_TODEVICE);
251 j ^= 1;
252 if (j == 0) {
253 sgp++;
254 curflit++;
255 }
256 curflit++;
257 frag_idx++;
258 }
259
260 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
261 ui->fragidx = frag_idx;
262 ui->addr_idx = j;
263 ui->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
264 }
265 }
266
267 /**
268 * free_tx_desc - reclaims Tx descriptors and their buffers
269 * @adapter: the adapter
270 * @q: the Tx queue to reclaim descriptors from
271 * @n: the number of descriptors to reclaim
272 *
273 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
274 * Tx buffers. Called with the Tx queue lock held.
275 */
276 static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
277 unsigned int n)
278 {
279 struct tx_sw_desc *d;
280 struct pci_dev *pdev = adapter->pdev;
281 unsigned int cidx = q->cidx;
282
283 const int need_unmap = need_skb_unmap() &&
284 q->cntxt_id >= FW_TUNNEL_SGEEC_START;
285
286 d = &q->sdesc[cidx];
287 while (n--) {
288 if (d->skb) { /* an SGL is present */
289 if (need_unmap)
290 unmap_skb(d->skb, q, cidx, pdev);
291 if (d->skb->priority == cidx)
292 kfree_skb(d->skb);
293 }
294 ++d;
295 if (++cidx == q->size) {
296 cidx = 0;
297 d = q->sdesc;
298 }
299 }
300 q->cidx = cidx;
301 }
302
303 /**
304 * reclaim_completed_tx - reclaims completed Tx descriptors
305 * @adapter: the adapter
306 * @q: the Tx queue to reclaim completed descriptors from
307 *
308 * Reclaims Tx descriptors that the SGE has indicated it has processed,
309 * and frees the associated buffers if possible. Called with the Tx
310 * queue's lock held.
311 */
312 static inline void reclaim_completed_tx(struct adapter *adapter,
313 struct sge_txq *q)
314 {
315 unsigned int reclaim = q->processed - q->cleaned;
316
317 if (reclaim) {
318 free_tx_desc(adapter, q, reclaim);
319 q->cleaned += reclaim;
320 q->in_use -= reclaim;
321 }
322 }
323
324 /**
325 * should_restart_tx - are there enough resources to restart a Tx queue?
326 * @q: the Tx queue
327 *
328 * Checks if there are enough descriptors to restart a suspended Tx queue.
329 */
330 static inline int should_restart_tx(const struct sge_txq *q)
331 {
332 unsigned int r = q->processed - q->cleaned;
333
334 return q->in_use - r < (q->size >> 1);
335 }
336
337 /**
338 * free_rx_bufs - free the Rx buffers on an SGE free list
339 * @pdev: the PCI device associated with the adapter
340 * @rxq: the SGE free list to clean up
341 *
342 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
343 * this queue should be stopped before calling this function.
344 */
345 static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
346 {
347 unsigned int cidx = q->cidx;
348
349 while (q->credits--) {
350 struct rx_sw_desc *d = &q->sdesc[cidx];
351
352 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
353 q->buf_size, PCI_DMA_FROMDEVICE);
354
355 if (q->buf_size != RX_PAGE_SIZE) {
356 kfree_skb(d->t.skb);
357 d->t.skb = NULL;
358 } else {
359 if (d->t.page.frag.page)
360 put_page(d->t.page.frag.page);
361 d->t.page.frag.page = NULL;
362 }
363 if (++cidx == q->size)
364 cidx = 0;
365 }
366
367 if (q->page.frag.page)
368 put_page(q->page.frag.page);
369 q->page.frag.page = NULL;
370 }
371
372 /**
373 * add_one_rx_buf - add a packet buffer to a free-buffer list
374 * @va: va of the buffer to add
375 * @len: the buffer length
376 * @d: the HW Rx descriptor to write
377 * @sd: the SW Rx descriptor to write
378 * @gen: the generation bit value
379 * @pdev: the PCI device associated with the adapter
380 *
381 * Add a buffer of the given length to the supplied HW and SW Rx
382 * descriptors.
383 */
384 static inline void add_one_rx_buf(unsigned char *va, unsigned int len,
385 struct rx_desc *d, struct rx_sw_desc *sd,
386 unsigned int gen, struct pci_dev *pdev)
387 {
388 dma_addr_t mapping;
389
390 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
391 pci_unmap_addr_set(sd, dma_addr, mapping);
392
393 d->addr_lo = cpu_to_be32(mapping);
394 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
395 wmb();
396 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
397 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
398 }
399
400 /**
401 * refill_fl - refill an SGE free-buffer list
402 * @adapter: the adapter
403 * @q: the free-list to refill
404 * @n: the number of new buffers to allocate
405 * @gfp: the gfp flags for allocating new buffers
406 *
407 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
408 * allocated with the supplied gfp flags. The caller must assure that
409 * @n does not exceed the queue's capacity.
410 */
411 static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
412 {
413 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
414 struct rx_desc *d = &q->desc[q->pidx];
415 struct sge_fl_page *p = &q->page;
416
417 while (n--) {
418 unsigned char *va;
419
420 if (unlikely(q->buf_size != RX_PAGE_SIZE)) {
421 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
422
423 if (!skb) {
424 q->alloc_failed++;
425 break;
426 }
427 va = skb->data;
428 sd->t.skb = skb;
429 } else {
430 if (!p->frag.page) {
431 p->frag.page = alloc_pages(gfp, 0);
432 if (unlikely(!p->frag.page)) {
433 q->alloc_failed++;
434 break;
435 } else {
436 p->frag.size = RX_PAGE_SIZE;
437 p->frag.page_offset = 0;
438 p->va = page_address(p->frag.page);
439 }
440 }
441
442 memcpy(&sd->t, p, sizeof(*p));
443 va = p->va;
444
445 p->frag.page_offset += RX_PAGE_SIZE;
446 BUG_ON(p->frag.page_offset > PAGE_SIZE);
447 p->va += RX_PAGE_SIZE;
448 if (p->frag.page_offset == PAGE_SIZE)
449 p->frag.page = NULL;
450 else
451 get_page(p->frag.page);
452 }
453
454 add_one_rx_buf(va, q->buf_size, d, sd, q->gen, adap->pdev);
455
456 d++;
457 sd++;
458 if (++q->pidx == q->size) {
459 q->pidx = 0;
460 q->gen ^= 1;
461 sd = q->sdesc;
462 d = q->desc;
463 }
464 q->credits++;
465 }
466
467 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
468 }
469
470 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
471 {
472 refill_fl(adap, fl, min(16U, fl->size - fl->credits), GFP_ATOMIC);
473 }
474
475 /**
476 * recycle_rx_buf - recycle a receive buffer
477 * @adapter: the adapter
478 * @q: the SGE free list
479 * @idx: index of buffer to recycle
480 *
481 * Recycles the specified buffer on the given free list by adding it at
482 * the next available slot on the list.
483 */
484 static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
485 unsigned int idx)
486 {
487 struct rx_desc *from = &q->desc[idx];
488 struct rx_desc *to = &q->desc[q->pidx];
489
490 memcpy(&q->sdesc[q->pidx], &q->sdesc[idx], sizeof(struct rx_sw_desc));
491 to->addr_lo = from->addr_lo; /* already big endian */
492 to->addr_hi = from->addr_hi; /* likewise */
493 wmb();
494 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
495 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
496 q->credits++;
497
498 if (++q->pidx == q->size) {
499 q->pidx = 0;
500 q->gen ^= 1;
501 }
502 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
503 }
504
505 /**
506 * alloc_ring - allocate resources for an SGE descriptor ring
507 * @pdev: the PCI device
508 * @nelem: the number of descriptors
509 * @elem_size: the size of each descriptor
510 * @sw_size: the size of the SW state associated with each ring element
511 * @phys: the physical address of the allocated ring
512 * @metadata: address of the array holding the SW state for the ring
513 *
514 * Allocates resources for an SGE descriptor ring, such as Tx queues,
515 * free buffer lists, or response queues. Each SGE ring requires
516 * space for its HW descriptors plus, optionally, space for the SW state
517 * associated with each HW entry (the metadata). The function returns
518 * three values: the virtual address for the HW ring (the return value
519 * of the function), the physical address of the HW ring, and the address
520 * of the SW ring.
521 */
522 static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
523 size_t sw_size, dma_addr_t * phys, void *metadata)
524 {
525 size_t len = nelem * elem_size;
526 void *s = NULL;
527 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
528
529 if (!p)
530 return NULL;
531 if (sw_size) {
532 s = kcalloc(nelem, sw_size, GFP_KERNEL);
533
534 if (!s) {
535 dma_free_coherent(&pdev->dev, len, p, *phys);
536 return NULL;
537 }
538 }
539 if (metadata)
540 *(void **)metadata = s;
541 memset(p, 0, len);
542 return p;
543 }
544
545 /**
546 * free_qset - free the resources of an SGE queue set
547 * @adapter: the adapter owning the queue set
548 * @q: the queue set
549 *
550 * Release the HW and SW resources associated with an SGE queue set, such
551 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
552 * queue set must be quiesced prior to calling this.
553 */
554 void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
555 {
556 int i;
557 struct pci_dev *pdev = adapter->pdev;
558
559 if (q->tx_reclaim_timer.function)
560 del_timer_sync(&q->tx_reclaim_timer);
561
562 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
563 if (q->fl[i].desc) {
564 spin_lock(&adapter->sge.reg_lock);
565 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
566 spin_unlock(&adapter->sge.reg_lock);
567 free_rx_bufs(pdev, &q->fl[i]);
568 kfree(q->fl[i].sdesc);
569 dma_free_coherent(&pdev->dev,
570 q->fl[i].size *
571 sizeof(struct rx_desc), q->fl[i].desc,
572 q->fl[i].phys_addr);
573 }
574
575 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
576 if (q->txq[i].desc) {
577 spin_lock(&adapter->sge.reg_lock);
578 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
579 spin_unlock(&adapter->sge.reg_lock);
580 if (q->txq[i].sdesc) {
581 free_tx_desc(adapter, &q->txq[i],
582 q->txq[i].in_use);
583 kfree(q->txq[i].sdesc);
584 }
585 dma_free_coherent(&pdev->dev,
586 q->txq[i].size *
587 sizeof(struct tx_desc),
588 q->txq[i].desc, q->txq[i].phys_addr);
589 __skb_queue_purge(&q->txq[i].sendq);
590 }
591
592 if (q->rspq.desc) {
593 spin_lock(&adapter->sge.reg_lock);
594 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
595 spin_unlock(&adapter->sge.reg_lock);
596 dma_free_coherent(&pdev->dev,
597 q->rspq.size * sizeof(struct rsp_desc),
598 q->rspq.desc, q->rspq.phys_addr);
599 }
600
601 if (q->netdev)
602 q->netdev->atalk_ptr = NULL;
603
604 memset(q, 0, sizeof(*q));
605 }
606
607 /**
608 * init_qset_cntxt - initialize an SGE queue set context info
609 * @qs: the queue set
610 * @id: the queue set id
611 *
612 * Initializes the TIDs and context ids for the queues of a queue set.
613 */
614 static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
615 {
616 qs->rspq.cntxt_id = id;
617 qs->fl[0].cntxt_id = 2 * id;
618 qs->fl[1].cntxt_id = 2 * id + 1;
619 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
620 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
621 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
622 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
623 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
624 }
625
626 /**
627 * sgl_len - calculates the size of an SGL of the given capacity
628 * @n: the number of SGL entries
629 *
630 * Calculates the number of flits needed for a scatter/gather list that
631 * can hold the given number of entries.
632 */
633 static inline unsigned int sgl_len(unsigned int n)
634 {
635 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
636 return (3 * n) / 2 + (n & 1);
637 }
638
639 /**
640 * flits_to_desc - returns the num of Tx descriptors for the given flits
641 * @n: the number of flits
642 *
643 * Calculates the number of Tx descriptors needed for the supplied number
644 * of flits.
645 */
646 static inline unsigned int flits_to_desc(unsigned int n)
647 {
648 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
649 return flit_desc_map[n];
650 }
651
652 /**
653 * get_imm_packet - return the next ingress packet buffer from a response
654 * @resp: the response descriptor containing the packet data
655 *
656 * Return a packet containing the immediate data of the given response.
657 */
658 static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
659 {
660 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
661
662 if (skb) {
663 __skb_put(skb, IMMED_PKT_SIZE);
664 memcpy(skb->data, resp->imm_data, IMMED_PKT_SIZE);
665 }
666 return skb;
667 }
668
669 /**
670 * calc_tx_descs - calculate the number of Tx descriptors for a packet
671 * @skb: the packet
672 *
673 * Returns the number of Tx descriptors needed for the given Ethernet
674 * packet. Ethernet packets require addition of WR and CPL headers.
675 */
676 static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
677 {
678 unsigned int flits;
679
680 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
681 return 1;
682
683 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
684 if (skb_shinfo(skb)->gso_size)
685 flits++;
686 return flits_to_desc(flits);
687 }
688
689 /**
690 * make_sgl - populate a scatter/gather list for a packet
691 * @skb: the packet
692 * @sgp: the SGL to populate
693 * @start: start address of skb main body data to include in the SGL
694 * @len: length of skb main body data to include in the SGL
695 * @pdev: the PCI device
696 *
697 * Generates a scatter/gather list for the buffers that make up a packet
698 * and returns the SGL size in 8-byte words. The caller must size the SGL
699 * appropriately.
700 */
701 static inline unsigned int make_sgl(const struct sk_buff *skb,
702 struct sg_ent *sgp, unsigned char *start,
703 unsigned int len, struct pci_dev *pdev)
704 {
705 dma_addr_t mapping;
706 unsigned int i, j = 0, nfrags;
707
708 if (len) {
709 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
710 sgp->len[0] = cpu_to_be32(len);
711 sgp->addr[0] = cpu_to_be64(mapping);
712 j = 1;
713 }
714
715 nfrags = skb_shinfo(skb)->nr_frags;
716 for (i = 0; i < nfrags; i++) {
717 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
718
719 mapping = pci_map_page(pdev, frag->page, frag->page_offset,
720 frag->size, PCI_DMA_TODEVICE);
721 sgp->len[j] = cpu_to_be32(frag->size);
722 sgp->addr[j] = cpu_to_be64(mapping);
723 j ^= 1;
724 if (j == 0)
725 ++sgp;
726 }
727 if (j)
728 sgp->len[j] = 0;
729 return ((nfrags + (len != 0)) * 3) / 2 + j;
730 }
731
732 /**
733 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
734 * @adap: the adapter
735 * @q: the Tx queue
736 *
737 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
738 * where the HW is going to sleep just after we checked, however,
739 * then the interrupt handler will detect the outstanding TX packet
740 * and ring the doorbell for us.
741 *
742 * When GTS is disabled we unconditionally ring the doorbell.
743 */
744 static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
745 {
746 #if USE_GTS
747 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
748 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
749 set_bit(TXQ_LAST_PKT_DB, &q->flags);
750 t3_write_reg(adap, A_SG_KDOORBELL,
751 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
752 }
753 #else
754 wmb(); /* write descriptors before telling HW */
755 t3_write_reg(adap, A_SG_KDOORBELL,
756 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
757 #endif
758 }
759
760 static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
761 {
762 #if SGE_NUM_GENBITS == 2
763 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
764 #endif
765 }
766
767 /**
768 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
769 * @ndesc: number of Tx descriptors spanned by the SGL
770 * @skb: the packet corresponding to the WR
771 * @d: first Tx descriptor to be written
772 * @pidx: index of above descriptors
773 * @q: the SGE Tx queue
774 * @sgl: the SGL
775 * @flits: number of flits to the start of the SGL in the first descriptor
776 * @sgl_flits: the SGL size in flits
777 * @gen: the Tx descriptor generation
778 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
779 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
780 *
781 * Write a work request header and an associated SGL. If the SGL is
782 * small enough to fit into one Tx descriptor it has already been written
783 * and we just need to write the WR header. Otherwise we distribute the
784 * SGL across the number of descriptors it spans.
785 */
786 static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
787 struct tx_desc *d, unsigned int pidx,
788 const struct sge_txq *q,
789 const struct sg_ent *sgl,
790 unsigned int flits, unsigned int sgl_flits,
791 unsigned int gen, unsigned int wr_hi,
792 unsigned int wr_lo)
793 {
794 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
795 struct tx_sw_desc *sd = &q->sdesc[pidx];
796
797 sd->skb = skb;
798 if (need_skb_unmap()) {
799 struct unmap_info *ui = (struct unmap_info *)skb->cb;
800
801 ui->fragidx = 0;
802 ui->addr_idx = 0;
803 ui->sflit = flits;
804 }
805
806 if (likely(ndesc == 1)) {
807 skb->priority = pidx;
808 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
809 V_WR_SGLSFLT(flits)) | wr_hi;
810 wmb();
811 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
812 V_WR_GEN(gen)) | wr_lo;
813 wr_gen2(d, gen);
814 } else {
815 unsigned int ogen = gen;
816 const u64 *fp = (const u64 *)sgl;
817 struct work_request_hdr *wp = wrp;
818
819 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
820 V_WR_SGLSFLT(flits)) | wr_hi;
821
822 while (sgl_flits) {
823 unsigned int avail = WR_FLITS - flits;
824
825 if (avail > sgl_flits)
826 avail = sgl_flits;
827 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
828 sgl_flits -= avail;
829 ndesc--;
830 if (!sgl_flits)
831 break;
832
833 fp += avail;
834 d++;
835 sd++;
836 if (++pidx == q->size) {
837 pidx = 0;
838 gen ^= 1;
839 d = q->desc;
840 sd = q->sdesc;
841 }
842
843 sd->skb = skb;
844 wrp = (struct work_request_hdr *)d;
845 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
846 V_WR_SGLSFLT(1)) | wr_hi;
847 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
848 sgl_flits + 1)) |
849 V_WR_GEN(gen)) | wr_lo;
850 wr_gen2(d, gen);
851 flits = 1;
852 }
853 skb->priority = pidx;
854 wrp->wr_hi |= htonl(F_WR_EOP);
855 wmb();
856 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
857 wr_gen2((struct tx_desc *)wp, ogen);
858 WARN_ON(ndesc != 0);
859 }
860 }
861
862 /**
863 * write_tx_pkt_wr - write a TX_PKT work request
864 * @adap: the adapter
865 * @skb: the packet to send
866 * @pi: the egress interface
867 * @pidx: index of the first Tx descriptor to write
868 * @gen: the generation value to use
869 * @q: the Tx queue
870 * @ndesc: number of descriptors the packet will occupy
871 * @compl: the value of the COMPL bit to use
872 *
873 * Generate a TX_PKT work request to send the supplied packet.
874 */
875 static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
876 const struct port_info *pi,
877 unsigned int pidx, unsigned int gen,
878 struct sge_txq *q, unsigned int ndesc,
879 unsigned int compl)
880 {
881 unsigned int flits, sgl_flits, cntrl, tso_info;
882 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
883 struct tx_desc *d = &q->desc[pidx];
884 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
885
886 cpl->len = htonl(skb->len | 0x80000000);
887 cntrl = V_TXPKT_INTF(pi->port_id);
888
889 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
890 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
891
892 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
893 if (tso_info) {
894 int eth_type;
895 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
896
897 d->flit[2] = 0;
898 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
899 hdr->cntrl = htonl(cntrl);
900 eth_type = skb_network_offset(skb) == ETH_HLEN ?
901 CPL_ETH_II : CPL_ETH_II_VLAN;
902 tso_info |= V_LSO_ETH_TYPE(eth_type) |
903 V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
904 V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
905 hdr->lso_info = htonl(tso_info);
906 flits = 3;
907 } else {
908 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
909 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
910 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
911 cpl->cntrl = htonl(cntrl);
912
913 if (skb->len <= WR_LEN - sizeof(*cpl)) {
914 q->sdesc[pidx].skb = NULL;
915 if (!skb->data_len)
916 skb_copy_from_linear_data(skb, &d->flit[2],
917 skb->len);
918 else
919 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
920
921 flits = (skb->len + 7) / 8 + 2;
922 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
923 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
924 | F_WR_SOP | F_WR_EOP | compl);
925 wmb();
926 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
927 V_WR_TID(q->token));
928 wr_gen2(d, gen);
929 kfree_skb(skb);
930 return;
931 }
932
933 flits = 2;
934 }
935
936 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
937 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
938 if (need_skb_unmap())
939 ((struct unmap_info *)skb->cb)->len = skb_headlen(skb);
940
941 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
942 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
943 htonl(V_WR_TID(q->token)));
944 }
945
946 /**
947 * eth_xmit - add a packet to the Ethernet Tx queue
948 * @skb: the packet
949 * @dev: the egress net device
950 *
951 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
952 */
953 int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
954 {
955 unsigned int ndesc, pidx, credits, gen, compl;
956 const struct port_info *pi = netdev_priv(dev);
957 struct adapter *adap = dev->priv;
958 struct sge_qset *qs = dev2qset(dev);
959 struct sge_txq *q = &qs->txq[TXQ_ETH];
960
961 /*
962 * The chip min packet length is 9 octets but play safe and reject
963 * anything shorter than an Ethernet header.
964 */
965 if (unlikely(skb->len < ETH_HLEN)) {
966 dev_kfree_skb(skb);
967 return NETDEV_TX_OK;
968 }
969
970 spin_lock(&q->lock);
971 reclaim_completed_tx(adap, q);
972
973 credits = q->size - q->in_use;
974 ndesc = calc_tx_descs(skb);
975
976 if (unlikely(credits < ndesc)) {
977 if (!netif_queue_stopped(dev)) {
978 netif_stop_queue(dev);
979 set_bit(TXQ_ETH, &qs->txq_stopped);
980 q->stops++;
981 dev_err(&adap->pdev->dev,
982 "%s: Tx ring %u full while queue awake!\n",
983 dev->name, q->cntxt_id & 7);
984 }
985 spin_unlock(&q->lock);
986 return NETDEV_TX_BUSY;
987 }
988
989 q->in_use += ndesc;
990 if (unlikely(credits - ndesc < q->stop_thres)) {
991 q->stops++;
992 netif_stop_queue(dev);
993 set_bit(TXQ_ETH, &qs->txq_stopped);
994 #if !USE_GTS
995 if (should_restart_tx(q) &&
996 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
997 q->restarts++;
998 netif_wake_queue(dev);
999 }
1000 #endif
1001 }
1002
1003 gen = q->gen;
1004 q->unacked += ndesc;
1005 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
1006 q->unacked &= 7;
1007 pidx = q->pidx;
1008 q->pidx += ndesc;
1009 if (q->pidx >= q->size) {
1010 q->pidx -= q->size;
1011 q->gen ^= 1;
1012 }
1013
1014 /* update port statistics */
1015 if (skb->ip_summed == CHECKSUM_COMPLETE)
1016 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1017 if (skb_shinfo(skb)->gso_size)
1018 qs->port_stats[SGE_PSTAT_TSO]++;
1019 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1020 qs->port_stats[SGE_PSTAT_VLANINS]++;
1021
1022 dev->trans_start = jiffies;
1023 spin_unlock(&q->lock);
1024
1025 /*
1026 * We do not use Tx completion interrupts to free DMAd Tx packets.
1027 * This is good for performamce but means that we rely on new Tx
1028 * packets arriving to run the destructors of completed packets,
1029 * which open up space in their sockets' send queues. Sometimes
1030 * we do not get such new packets causing Tx to stall. A single
1031 * UDP transmitter is a good example of this situation. We have
1032 * a clean up timer that periodically reclaims completed packets
1033 * but it doesn't run often enough (nor do we want it to) to prevent
1034 * lengthy stalls. A solution to this problem is to run the
1035 * destructor early, after the packet is queued but before it's DMAd.
1036 * A cons is that we lie to socket memory accounting, but the amount
1037 * of extra memory is reasonable (limited by the number of Tx
1038 * descriptors), the packets do actually get freed quickly by new
1039 * packets almost always, and for protocols like TCP that wait for
1040 * acks to really free up the data the extra memory is even less.
1041 * On the positive side we run the destructors on the sending CPU
1042 * rather than on a potentially different completing CPU, usually a
1043 * good thing. We also run them without holding our Tx queue lock,
1044 * unlike what reclaim_completed_tx() would otherwise do.
1045 *
1046 * Run the destructor before telling the DMA engine about the packet
1047 * to make sure it doesn't complete and get freed prematurely.
1048 */
1049 if (likely(!skb_shared(skb)))
1050 skb_orphan(skb);
1051
1052 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1053 check_ring_tx_db(adap, q);
1054 return NETDEV_TX_OK;
1055 }
1056
1057 /**
1058 * write_imm - write a packet into a Tx descriptor as immediate data
1059 * @d: the Tx descriptor to write
1060 * @skb: the packet
1061 * @len: the length of packet data to write as immediate data
1062 * @gen: the generation bit value to write
1063 *
1064 * Writes a packet as immediate data into a Tx descriptor. The packet
1065 * contains a work request at its beginning. We must write the packet
1066 * carefully so the SGE doesn't read accidentally before it's written in
1067 * its entirety.
1068 */
1069 static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1070 unsigned int len, unsigned int gen)
1071 {
1072 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1073 struct work_request_hdr *to = (struct work_request_hdr *)d;
1074
1075 memcpy(&to[1], &from[1], len - sizeof(*from));
1076 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1077 V_WR_BCNTLFLT(len & 7));
1078 wmb();
1079 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1080 V_WR_LEN((len + 7) / 8));
1081 wr_gen2(d, gen);
1082 kfree_skb(skb);
1083 }
1084
1085 /**
1086 * check_desc_avail - check descriptor availability on a send queue
1087 * @adap: the adapter
1088 * @q: the send queue
1089 * @skb: the packet needing the descriptors
1090 * @ndesc: the number of Tx descriptors needed
1091 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1092 *
1093 * Checks if the requested number of Tx descriptors is available on an
1094 * SGE send queue. If the queue is already suspended or not enough
1095 * descriptors are available the packet is queued for later transmission.
1096 * Must be called with the Tx queue locked.
1097 *
1098 * Returns 0 if enough descriptors are available, 1 if there aren't
1099 * enough descriptors and the packet has been queued, and 2 if the caller
1100 * needs to retry because there weren't enough descriptors at the
1101 * beginning of the call but some freed up in the mean time.
1102 */
1103 static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1104 struct sk_buff *skb, unsigned int ndesc,
1105 unsigned int qid)
1106 {
1107 if (unlikely(!skb_queue_empty(&q->sendq))) {
1108 addq_exit:__skb_queue_tail(&q->sendq, skb);
1109 return 1;
1110 }
1111 if (unlikely(q->size - q->in_use < ndesc)) {
1112 struct sge_qset *qs = txq_to_qset(q, qid);
1113
1114 set_bit(qid, &qs->txq_stopped);
1115 smp_mb__after_clear_bit();
1116
1117 if (should_restart_tx(q) &&
1118 test_and_clear_bit(qid, &qs->txq_stopped))
1119 return 2;
1120
1121 q->stops++;
1122 goto addq_exit;
1123 }
1124 return 0;
1125 }
1126
1127 /**
1128 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1129 * @q: the SGE control Tx queue
1130 *
1131 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1132 * that send only immediate data (presently just the control queues) and
1133 * thus do not have any sk_buffs to release.
1134 */
1135 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1136 {
1137 unsigned int reclaim = q->processed - q->cleaned;
1138
1139 q->in_use -= reclaim;
1140 q->cleaned += reclaim;
1141 }
1142
1143 static inline int immediate(const struct sk_buff *skb)
1144 {
1145 return skb->len <= WR_LEN && !skb->data_len;
1146 }
1147
1148 /**
1149 * ctrl_xmit - send a packet through an SGE control Tx queue
1150 * @adap: the adapter
1151 * @q: the control queue
1152 * @skb: the packet
1153 *
1154 * Send a packet through an SGE control Tx queue. Packets sent through
1155 * a control queue must fit entirely as immediate data in a single Tx
1156 * descriptor and have no page fragments.
1157 */
1158 static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1159 struct sk_buff *skb)
1160 {
1161 int ret;
1162 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1163
1164 if (unlikely(!immediate(skb))) {
1165 WARN_ON(1);
1166 dev_kfree_skb(skb);
1167 return NET_XMIT_SUCCESS;
1168 }
1169
1170 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1171 wrp->wr_lo = htonl(V_WR_TID(q->token));
1172
1173 spin_lock(&q->lock);
1174 again:reclaim_completed_tx_imm(q);
1175
1176 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1177 if (unlikely(ret)) {
1178 if (ret == 1) {
1179 spin_unlock(&q->lock);
1180 return NET_XMIT_CN;
1181 }
1182 goto again;
1183 }
1184
1185 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1186
1187 q->in_use++;
1188 if (++q->pidx >= q->size) {
1189 q->pidx = 0;
1190 q->gen ^= 1;
1191 }
1192 spin_unlock(&q->lock);
1193 wmb();
1194 t3_write_reg(adap, A_SG_KDOORBELL,
1195 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1196 return NET_XMIT_SUCCESS;
1197 }
1198
1199 /**
1200 * restart_ctrlq - restart a suspended control queue
1201 * @qs: the queue set cotaining the control queue
1202 *
1203 * Resumes transmission on a suspended Tx control queue.
1204 */
1205 static void restart_ctrlq(unsigned long data)
1206 {
1207 struct sk_buff *skb;
1208 struct sge_qset *qs = (struct sge_qset *)data;
1209 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1210 struct adapter *adap = qs->netdev->priv;
1211
1212 spin_lock(&q->lock);
1213 again:reclaim_completed_tx_imm(q);
1214
1215 while (q->in_use < q->size && (skb = __skb_dequeue(&q->sendq)) != NULL) {
1216
1217 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1218
1219 if (++q->pidx >= q->size) {
1220 q->pidx = 0;
1221 q->gen ^= 1;
1222 }
1223 q->in_use++;
1224 }
1225
1226 if (!skb_queue_empty(&q->sendq)) {
1227 set_bit(TXQ_CTRL, &qs->txq_stopped);
1228 smp_mb__after_clear_bit();
1229
1230 if (should_restart_tx(q) &&
1231 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1232 goto again;
1233 q->stops++;
1234 }
1235
1236 spin_unlock(&q->lock);
1237 t3_write_reg(adap, A_SG_KDOORBELL,
1238 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1239 }
1240
1241 /*
1242 * Send a management message through control queue 0
1243 */
1244 int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1245 {
1246 return ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1247 }
1248
1249 /**
1250 * deferred_unmap_destructor - unmap a packet when it is freed
1251 * @skb: the packet
1252 *
1253 * This is the packet destructor used for Tx packets that need to remain
1254 * mapped until they are freed rather than until their Tx descriptors are
1255 * freed.
1256 */
1257 static void deferred_unmap_destructor(struct sk_buff *skb)
1258 {
1259 int i;
1260 const dma_addr_t *p;
1261 const struct skb_shared_info *si;
1262 const struct deferred_unmap_info *dui;
1263 const struct unmap_info *ui = (struct unmap_info *)skb->cb;
1264
1265 dui = (struct deferred_unmap_info *)skb->head;
1266 p = dui->addr;
1267
1268 if (ui->len)
1269 pci_unmap_single(dui->pdev, *p++, ui->len, PCI_DMA_TODEVICE);
1270
1271 si = skb_shinfo(skb);
1272 for (i = 0; i < si->nr_frags; i++)
1273 pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
1274 PCI_DMA_TODEVICE);
1275 }
1276
1277 static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1278 const struct sg_ent *sgl, int sgl_flits)
1279 {
1280 dma_addr_t *p;
1281 struct deferred_unmap_info *dui;
1282
1283 dui = (struct deferred_unmap_info *)skb->head;
1284 dui->pdev = pdev;
1285 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1286 *p++ = be64_to_cpu(sgl->addr[0]);
1287 *p++ = be64_to_cpu(sgl->addr[1]);
1288 }
1289 if (sgl_flits)
1290 *p = be64_to_cpu(sgl->addr[0]);
1291 }
1292
1293 /**
1294 * write_ofld_wr - write an offload work request
1295 * @adap: the adapter
1296 * @skb: the packet to send
1297 * @q: the Tx queue
1298 * @pidx: index of the first Tx descriptor to write
1299 * @gen: the generation value to use
1300 * @ndesc: number of descriptors the packet will occupy
1301 *
1302 * Write an offload work request to send the supplied packet. The packet
1303 * data already carry the work request with most fields populated.
1304 */
1305 static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1306 struct sge_txq *q, unsigned int pidx,
1307 unsigned int gen, unsigned int ndesc)
1308 {
1309 unsigned int sgl_flits, flits;
1310 struct work_request_hdr *from;
1311 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1312 struct tx_desc *d = &q->desc[pidx];
1313
1314 if (immediate(skb)) {
1315 q->sdesc[pidx].skb = NULL;
1316 write_imm(d, skb, skb->len, gen);
1317 return;
1318 }
1319
1320 /* Only TX_DATA builds SGLs */
1321
1322 from = (struct work_request_hdr *)skb->data;
1323 memcpy(&d->flit[1], &from[1],
1324 skb_transport_offset(skb) - sizeof(*from));
1325
1326 flits = skb_transport_offset(skb) / 8;
1327 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1328 sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
1329 skb->tail - skb->transport_header,
1330 adap->pdev);
1331 if (need_skb_unmap()) {
1332 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1333 skb->destructor = deferred_unmap_destructor;
1334 ((struct unmap_info *)skb->cb)->len = (skb->tail -
1335 skb->transport_header);
1336 }
1337
1338 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1339 gen, from->wr_hi, from->wr_lo);
1340 }
1341
1342 /**
1343 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1344 * @skb: the packet
1345 *
1346 * Returns the number of Tx descriptors needed for the given offload
1347 * packet. These packets are already fully constructed.
1348 */
1349 static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1350 {
1351 unsigned int flits, cnt = skb_shinfo(skb)->nr_frags;
1352
1353 if (skb->len <= WR_LEN && cnt == 0)
1354 return 1; /* packet fits as immediate data */
1355
1356 flits = skb_transport_offset(skb) / 8; /* headers */
1357 if (skb->tail != skb->transport_header)
1358 cnt++;
1359 return flits_to_desc(flits + sgl_len(cnt));
1360 }
1361
1362 /**
1363 * ofld_xmit - send a packet through an offload queue
1364 * @adap: the adapter
1365 * @q: the Tx offload queue
1366 * @skb: the packet
1367 *
1368 * Send an offload packet through an SGE offload queue.
1369 */
1370 static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1371 struct sk_buff *skb)
1372 {
1373 int ret;
1374 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1375
1376 spin_lock(&q->lock);
1377 again:reclaim_completed_tx(adap, q);
1378
1379 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1380 if (unlikely(ret)) {
1381 if (ret == 1) {
1382 skb->priority = ndesc; /* save for restart */
1383 spin_unlock(&q->lock);
1384 return NET_XMIT_CN;
1385 }
1386 goto again;
1387 }
1388
1389 gen = q->gen;
1390 q->in_use += ndesc;
1391 pidx = q->pidx;
1392 q->pidx += ndesc;
1393 if (q->pidx >= q->size) {
1394 q->pidx -= q->size;
1395 q->gen ^= 1;
1396 }
1397 spin_unlock(&q->lock);
1398
1399 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1400 check_ring_tx_db(adap, q);
1401 return NET_XMIT_SUCCESS;
1402 }
1403
1404 /**
1405 * restart_offloadq - restart a suspended offload queue
1406 * @qs: the queue set cotaining the offload queue
1407 *
1408 * Resumes transmission on a suspended Tx offload queue.
1409 */
1410 static void restart_offloadq(unsigned long data)
1411 {
1412 struct sk_buff *skb;
1413 struct sge_qset *qs = (struct sge_qset *)data;
1414 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1415 struct adapter *adap = qs->netdev->priv;
1416
1417 spin_lock(&q->lock);
1418 again:reclaim_completed_tx(adap, q);
1419
1420 while ((skb = skb_peek(&q->sendq)) != NULL) {
1421 unsigned int gen, pidx;
1422 unsigned int ndesc = skb->priority;
1423
1424 if (unlikely(q->size - q->in_use < ndesc)) {
1425 set_bit(TXQ_OFLD, &qs->txq_stopped);
1426 smp_mb__after_clear_bit();
1427
1428 if (should_restart_tx(q) &&
1429 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1430 goto again;
1431 q->stops++;
1432 break;
1433 }
1434
1435 gen = q->gen;
1436 q->in_use += ndesc;
1437 pidx = q->pidx;
1438 q->pidx += ndesc;
1439 if (q->pidx >= q->size) {
1440 q->pidx -= q->size;
1441 q->gen ^= 1;
1442 }
1443 __skb_unlink(skb, &q->sendq);
1444 spin_unlock(&q->lock);
1445
1446 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1447 spin_lock(&q->lock);
1448 }
1449 spin_unlock(&q->lock);
1450
1451 #if USE_GTS
1452 set_bit(TXQ_RUNNING, &q->flags);
1453 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1454 #endif
1455 t3_write_reg(adap, A_SG_KDOORBELL,
1456 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1457 }
1458
1459 /**
1460 * queue_set - return the queue set a packet should use
1461 * @skb: the packet
1462 *
1463 * Maps a packet to the SGE queue set it should use. The desired queue
1464 * set is carried in bits 1-3 in the packet's priority.
1465 */
1466 static inline int queue_set(const struct sk_buff *skb)
1467 {
1468 return skb->priority >> 1;
1469 }
1470
1471 /**
1472 * is_ctrl_pkt - return whether an offload packet is a control packet
1473 * @skb: the packet
1474 *
1475 * Determines whether an offload packet should use an OFLD or a CTRL
1476 * Tx queue. This is indicated by bit 0 in the packet's priority.
1477 */
1478 static inline int is_ctrl_pkt(const struct sk_buff *skb)
1479 {
1480 return skb->priority & 1;
1481 }
1482
1483 /**
1484 * t3_offload_tx - send an offload packet
1485 * @tdev: the offload device to send to
1486 * @skb: the packet
1487 *
1488 * Sends an offload packet. We use the packet priority to select the
1489 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1490 * should be sent as regular or control, bits 1-3 select the queue set.
1491 */
1492 int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1493 {
1494 struct adapter *adap = tdev2adap(tdev);
1495 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1496
1497 if (unlikely(is_ctrl_pkt(skb)))
1498 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1499
1500 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1501 }
1502
1503 /**
1504 * offload_enqueue - add an offload packet to an SGE offload receive queue
1505 * @q: the SGE response queue
1506 * @skb: the packet
1507 *
1508 * Add a new offload packet to an SGE response queue's offload packet
1509 * queue. If the packet is the first on the queue it schedules the RX
1510 * softirq to process the queue.
1511 */
1512 static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1513 {
1514 skb->next = skb->prev = NULL;
1515 if (q->rx_tail)
1516 q->rx_tail->next = skb;
1517 else {
1518 struct sge_qset *qs = rspq_to_qset(q);
1519
1520 if (__netif_rx_schedule_prep(qs->netdev))
1521 __netif_rx_schedule(qs->netdev);
1522 q->rx_head = skb;
1523 }
1524 q->rx_tail = skb;
1525 }
1526
1527 /**
1528 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1529 * @tdev: the offload device that will be receiving the packets
1530 * @q: the SGE response queue that assembled the bundle
1531 * @skbs: the partial bundle
1532 * @n: the number of packets in the bundle
1533 *
1534 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1535 */
1536 static inline void deliver_partial_bundle(struct t3cdev *tdev,
1537 struct sge_rspq *q,
1538 struct sk_buff *skbs[], int n)
1539 {
1540 if (n) {
1541 q->offload_bundles++;
1542 tdev->recv(tdev, skbs, n);
1543 }
1544 }
1545
1546 /**
1547 * ofld_poll - NAPI handler for offload packets in interrupt mode
1548 * @dev: the network device doing the polling
1549 * @budget: polling budget
1550 *
1551 * The NAPI handler for offload packets when a response queue is serviced
1552 * by the hard interrupt handler, i.e., when it's operating in non-polling
1553 * mode. Creates small packet batches and sends them through the offload
1554 * receive handler. Batches need to be of modest size as we do prefetches
1555 * on the packets in each.
1556 */
1557 static int ofld_poll(struct net_device *dev, int *budget)
1558 {
1559 struct adapter *adapter = dev->priv;
1560 struct sge_qset *qs = dev2qset(dev);
1561 struct sge_rspq *q = &qs->rspq;
1562 int work_done, limit = min(*budget, dev->quota), avail = limit;
1563
1564 while (avail) {
1565 struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE];
1566 int ngathered;
1567
1568 spin_lock_irq(&q->lock);
1569 head = q->rx_head;
1570 if (!head) {
1571 work_done = limit - avail;
1572 *budget -= work_done;
1573 dev->quota -= work_done;
1574 __netif_rx_complete(dev);
1575 spin_unlock_irq(&q->lock);
1576 return 0;
1577 }
1578
1579 tail = q->rx_tail;
1580 q->rx_head = q->rx_tail = NULL;
1581 spin_unlock_irq(&q->lock);
1582
1583 for (ngathered = 0; avail && head; avail--) {
1584 prefetch(head->data);
1585 skbs[ngathered] = head;
1586 head = head->next;
1587 skbs[ngathered]->next = NULL;
1588 if (++ngathered == RX_BUNDLE_SIZE) {
1589 q->offload_bundles++;
1590 adapter->tdev.recv(&adapter->tdev, skbs,
1591 ngathered);
1592 ngathered = 0;
1593 }
1594 }
1595 if (head) { /* splice remaining packets back onto Rx queue */
1596 spin_lock_irq(&q->lock);
1597 tail->next = q->rx_head;
1598 if (!q->rx_head)
1599 q->rx_tail = tail;
1600 q->rx_head = head;
1601 spin_unlock_irq(&q->lock);
1602 }
1603 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1604 }
1605 work_done = limit - avail;
1606 *budget -= work_done;
1607 dev->quota -= work_done;
1608 return 1;
1609 }
1610
1611 /**
1612 * rx_offload - process a received offload packet
1613 * @tdev: the offload device receiving the packet
1614 * @rq: the response queue that received the packet
1615 * @skb: the packet
1616 * @rx_gather: a gather list of packets if we are building a bundle
1617 * @gather_idx: index of the next available slot in the bundle
1618 *
1619 * Process an ingress offload pakcet and add it to the offload ingress
1620 * queue. Returns the index of the next available slot in the bundle.
1621 */
1622 static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1623 struct sk_buff *skb, struct sk_buff *rx_gather[],
1624 unsigned int gather_idx)
1625 {
1626 rq->offload_pkts++;
1627 skb_reset_mac_header(skb);
1628 skb_reset_network_header(skb);
1629 skb_reset_transport_header(skb);
1630
1631 if (rq->polling) {
1632 rx_gather[gather_idx++] = skb;
1633 if (gather_idx == RX_BUNDLE_SIZE) {
1634 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1635 gather_idx = 0;
1636 rq->offload_bundles++;
1637 }
1638 } else
1639 offload_enqueue(rq, skb);
1640
1641 return gather_idx;
1642 }
1643
1644 /**
1645 * restart_tx - check whether to restart suspended Tx queues
1646 * @qs: the queue set to resume
1647 *
1648 * Restarts suspended Tx queues of an SGE queue set if they have enough
1649 * free resources to resume operation.
1650 */
1651 static void restart_tx(struct sge_qset *qs)
1652 {
1653 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1654 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1655 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1656 qs->txq[TXQ_ETH].restarts++;
1657 if (netif_running(qs->netdev))
1658 netif_wake_queue(qs->netdev);
1659 }
1660
1661 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1662 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1663 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1664 qs->txq[TXQ_OFLD].restarts++;
1665 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1666 }
1667 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1668 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1669 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1670 qs->txq[TXQ_CTRL].restarts++;
1671 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1672 }
1673 }
1674
1675 /**
1676 * rx_eth - process an ingress ethernet packet
1677 * @adap: the adapter
1678 * @rq: the response queue that received the packet
1679 * @skb: the packet
1680 * @pad: amount of padding at the start of the buffer
1681 *
1682 * Process an ingress ethernet pakcet and deliver it to the stack.
1683 * The padding is 2 if the packet was delivered in an Rx buffer and 0
1684 * if it was immediate data in a response.
1685 */
1686 static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1687 struct sk_buff *skb, int pad)
1688 {
1689 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
1690 struct port_info *pi;
1691
1692 skb_pull(skb, sizeof(*p) + pad);
1693 skb->dev->last_rx = jiffies;
1694 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
1695 pi = netdev_priv(skb->dev);
1696 if (pi->rx_csum_offload && p->csum_valid && p->csum == 0xffff &&
1697 !p->fragment) {
1698 rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
1699 skb->ip_summed = CHECKSUM_UNNECESSARY;
1700 } else
1701 skb->ip_summed = CHECKSUM_NONE;
1702
1703 if (unlikely(p->vlan_valid)) {
1704 struct vlan_group *grp = pi->vlan_grp;
1705
1706 rspq_to_qset(rq)->port_stats[SGE_PSTAT_VLANEX]++;
1707 if (likely(grp))
1708 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
1709 rq->polling);
1710 else
1711 dev_kfree_skb_any(skb);
1712 } else if (rq->polling)
1713 netif_receive_skb(skb);
1714 else
1715 netif_rx(skb);
1716 }
1717
1718 #define SKB_DATA_SIZE 128
1719
1720 static void skb_data_init(struct sk_buff *skb, struct sge_fl_page *p,
1721 unsigned int len)
1722 {
1723 skb->len = len;
1724 if (len <= SKB_DATA_SIZE) {
1725 memcpy(skb->data, p->va, len);
1726 skb->tail += len;
1727 put_page(p->frag.page);
1728 } else {
1729 memcpy(skb->data, p->va, SKB_DATA_SIZE);
1730 skb_shinfo(skb)->frags[0].page = p->frag.page;
1731 skb_shinfo(skb)->frags[0].page_offset =
1732 p->frag.page_offset + SKB_DATA_SIZE;
1733 skb_shinfo(skb)->frags[0].size = len - SKB_DATA_SIZE;
1734 skb_shinfo(skb)->nr_frags = 1;
1735 skb->data_len = len - SKB_DATA_SIZE;
1736 skb->tail += SKB_DATA_SIZE;
1737 skb->truesize += skb->data_len;
1738 }
1739 }
1740
1741 /**
1742 * get_packet - return the next ingress packet buffer from a free list
1743 * @adap: the adapter that received the packet
1744 * @fl: the SGE free list holding the packet
1745 * @len: the packet length including any SGE padding
1746 * @drop_thres: # of remaining buffers before we start dropping packets
1747 *
1748 * Get the next packet from a free list and complete setup of the
1749 * sk_buff. If the packet is small we make a copy and recycle the
1750 * original buffer, otherwise we use the original buffer itself. If a
1751 * positive drop threshold is supplied packets are dropped and their
1752 * buffers recycled if (a) the number of remaining buffers is under the
1753 * threshold and the packet is too big to copy, or (b) the packet should
1754 * be copied but there is no memory for the copy.
1755 */
1756 static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
1757 unsigned int len, unsigned int drop_thres)
1758 {
1759 struct sk_buff *skb = NULL;
1760 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
1761
1762 prefetch(sd->t.skb->data);
1763
1764 if (len <= SGE_RX_COPY_THRES) {
1765 skb = alloc_skb(len, GFP_ATOMIC);
1766 if (likely(skb != NULL)) {
1767 struct rx_desc *d = &fl->desc[fl->cidx];
1768 dma_addr_t mapping =
1769 (dma_addr_t)((u64) be32_to_cpu(d->addr_hi) << 32 |
1770 be32_to_cpu(d->addr_lo));
1771
1772 __skb_put(skb, len);
1773 pci_dma_sync_single_for_cpu(adap->pdev, mapping, len,
1774 PCI_DMA_FROMDEVICE);
1775 skb_copy_from_linear_data(sd->t.skb, skb->data, len);
1776 pci_dma_sync_single_for_device(adap->pdev, mapping, len,
1777 PCI_DMA_FROMDEVICE);
1778 } else if (!drop_thres)
1779 goto use_orig_buf;
1780 recycle:
1781 recycle_rx_buf(adap, fl, fl->cidx);
1782 return skb;
1783 }
1784
1785 if (unlikely(fl->credits < drop_thres))
1786 goto recycle;
1787
1788 use_orig_buf:
1789 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
1790 fl->buf_size, PCI_DMA_FROMDEVICE);
1791 skb = sd->t.skb;
1792 skb_put(skb, len);
1793 __refill_fl(adap, fl);
1794 return skb;
1795 }
1796
1797 /**
1798 * handle_rsp_cntrl_info - handles control information in a response
1799 * @qs: the queue set corresponding to the response
1800 * @flags: the response control flags
1801 *
1802 * Handles the control information of an SGE response, such as GTS
1803 * indications and completion credits for the queue set's Tx queues.
1804 * HW coalesces credits, we don't do any extra SW coalescing.
1805 */
1806 static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
1807 {
1808 unsigned int credits;
1809
1810 #if USE_GTS
1811 if (flags & F_RSPD_TXQ0_GTS)
1812 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
1813 #endif
1814
1815 credits = G_RSPD_TXQ0_CR(flags);
1816 if (credits)
1817 qs->txq[TXQ_ETH].processed += credits;
1818
1819 credits = G_RSPD_TXQ2_CR(flags);
1820 if (credits)
1821 qs->txq[TXQ_CTRL].processed += credits;
1822
1823 # if USE_GTS
1824 if (flags & F_RSPD_TXQ1_GTS)
1825 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
1826 # endif
1827 credits = G_RSPD_TXQ1_CR(flags);
1828 if (credits)
1829 qs->txq[TXQ_OFLD].processed += credits;
1830 }
1831
1832 /**
1833 * check_ring_db - check if we need to ring any doorbells
1834 * @adapter: the adapter
1835 * @qs: the queue set whose Tx queues are to be examined
1836 * @sleeping: indicates which Tx queue sent GTS
1837 *
1838 * Checks if some of a queue set's Tx queues need to ring their doorbells
1839 * to resume transmission after idling while they still have unprocessed
1840 * descriptors.
1841 */
1842 static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
1843 unsigned int sleeping)
1844 {
1845 if (sleeping & F_RSPD_TXQ0_GTS) {
1846 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1847
1848 if (txq->cleaned + txq->in_use != txq->processed &&
1849 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1850 set_bit(TXQ_RUNNING, &txq->flags);
1851 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1852 V_EGRCNTX(txq->cntxt_id));
1853 }
1854 }
1855
1856 if (sleeping & F_RSPD_TXQ1_GTS) {
1857 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
1858
1859 if (txq->cleaned + txq->in_use != txq->processed &&
1860 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1861 set_bit(TXQ_RUNNING, &txq->flags);
1862 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1863 V_EGRCNTX(txq->cntxt_id));
1864 }
1865 }
1866 }
1867
1868 /**
1869 * is_new_response - check if a response is newly written
1870 * @r: the response descriptor
1871 * @q: the response queue
1872 *
1873 * Returns true if a response descriptor contains a yet unprocessed
1874 * response.
1875 */
1876 static inline int is_new_response(const struct rsp_desc *r,
1877 const struct sge_rspq *q)
1878 {
1879 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
1880 }
1881
1882 #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
1883 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
1884 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
1885 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
1886 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
1887
1888 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
1889 #define NOMEM_INTR_DELAY 2500
1890
1891 /**
1892 * process_responses - process responses from an SGE response queue
1893 * @adap: the adapter
1894 * @qs: the queue set to which the response queue belongs
1895 * @budget: how many responses can be processed in this round
1896 *
1897 * Process responses from an SGE response queue up to the supplied budget.
1898 * Responses include received packets as well as credits and other events
1899 * for the queues that belong to the response queue's queue set.
1900 * A negative budget is effectively unlimited.
1901 *
1902 * Additionally choose the interrupt holdoff time for the next interrupt
1903 * on this queue. If the system is under memory shortage use a fairly
1904 * long delay to help recovery.
1905 */
1906 static int process_responses(struct adapter *adap, struct sge_qset *qs,
1907 int budget)
1908 {
1909 struct sge_rspq *q = &qs->rspq;
1910 struct rsp_desc *r = &q->desc[q->cidx];
1911 int budget_left = budget;
1912 unsigned int sleeping = 0;
1913 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
1914 int ngathered = 0;
1915
1916 q->next_holdoff = q->holdoff_tmr;
1917
1918 while (likely(budget_left && is_new_response(r, q))) {
1919 int eth, ethpad = 2;
1920 struct sk_buff *skb = NULL;
1921 u32 len, flags = ntohl(r->flags);
1922 u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val;
1923
1924 eth = r->rss_hdr.opcode == CPL_RX_PKT;
1925
1926 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
1927 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
1928 if (!skb)
1929 goto no_mem;
1930
1931 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
1932 skb->data[0] = CPL_ASYNC_NOTIF;
1933 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
1934 q->async_notif++;
1935 } else if (flags & F_RSPD_IMM_DATA_VALID) {
1936 skb = get_imm_packet(r);
1937 if (unlikely(!skb)) {
1938 no_mem:
1939 q->next_holdoff = NOMEM_INTR_DELAY;
1940 q->nomem++;
1941 /* consume one credit since we tried */
1942 budget_left--;
1943 break;
1944 }
1945 q->imm_data++;
1946 ethpad = 0;
1947 } else if ((len = ntohl(r->len_cq)) != 0) {
1948 struct sge_fl *fl =
1949 (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
1950
1951 if (fl->buf_size == RX_PAGE_SIZE) {
1952 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
1953 struct sge_fl_page *p = &sd->t.page;
1954
1955 prefetch(p->va);
1956 prefetch(p->va + L1_CACHE_BYTES);
1957
1958 __refill_fl(adap, fl);
1959
1960 pci_unmap_single(adap->pdev,
1961 pci_unmap_addr(sd, dma_addr),
1962 fl->buf_size,
1963 PCI_DMA_FROMDEVICE);
1964
1965 if (eth) {
1966 if (unlikely(fl->credits <
1967 SGE_RX_DROP_THRES))
1968 goto eth_recycle;
1969
1970 skb = alloc_skb(SKB_DATA_SIZE,
1971 GFP_ATOMIC);
1972 if (unlikely(!skb)) {
1973 eth_recycle:
1974 q->rx_drops++;
1975 recycle_rx_buf(adap, fl,
1976 fl->cidx);
1977 goto eth_done;
1978 }
1979 } else {
1980 skb = alloc_skb(SKB_DATA_SIZE,
1981 GFP_ATOMIC);
1982 if (unlikely(!skb))
1983 goto no_mem;
1984 }
1985
1986 skb_data_init(skb, p, G_RSPD_LEN(len));
1987 eth_done:
1988 fl->credits--;
1989 q->eth_pkts++;
1990 } else {
1991 fl->credits--;
1992 skb = get_packet(adap, fl, G_RSPD_LEN(len),
1993 eth ? SGE_RX_DROP_THRES : 0);
1994 }
1995
1996 if (++fl->cidx == fl->size)
1997 fl->cidx = 0;
1998 } else
1999 q->pure_rsps++;
2000
2001 if (flags & RSPD_CTRL_MASK) {
2002 sleeping |= flags & RSPD_GTS_MASK;
2003 handle_rsp_cntrl_info(qs, flags);
2004 }
2005
2006 r++;
2007 if (unlikely(++q->cidx == q->size)) {
2008 q->cidx = 0;
2009 q->gen ^= 1;
2010 r = q->desc;
2011 }
2012 prefetch(r);
2013
2014 if (++q->credits >= (q->size / 4)) {
2015 refill_rspq(adap, q, q->credits);
2016 q->credits = 0;
2017 }
2018
2019 if (skb) {
2020 /* Preserve the RSS info in csum & priority */
2021 skb->csum = rss_hi;
2022 skb->priority = rss_lo;
2023
2024 if (eth)
2025 rx_eth(adap, q, skb, ethpad);
2026 else {
2027 if (unlikely(r->rss_hdr.opcode ==
2028 CPL_TRACE_PKT))
2029 __skb_pull(skb, ethpad);
2030
2031 ngathered = rx_offload(&adap->tdev, q,
2032 skb, offload_skbs,
2033 ngathered);
2034 }
2035 }
2036 --budget_left;
2037 }
2038
2039 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2040 if (sleeping)
2041 check_ring_db(adap, qs, sleeping);
2042
2043 smp_mb(); /* commit Tx queue .processed updates */
2044 if (unlikely(qs->txq_stopped != 0))
2045 restart_tx(qs);
2046
2047 budget -= budget_left;
2048 return budget;
2049 }
2050
2051 static inline int is_pure_response(const struct rsp_desc *r)
2052 {
2053 u32 n = ntohl(r->flags) & (F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
2054
2055 return (n | r->len_cq) == 0;
2056 }
2057
2058 /**
2059 * napi_rx_handler - the NAPI handler for Rx processing
2060 * @dev: the net device
2061 * @budget: how many packets we can process in this round
2062 *
2063 * Handler for new data events when using NAPI.
2064 */
2065 static int napi_rx_handler(struct net_device *dev, int *budget)
2066 {
2067 struct adapter *adap = dev->priv;
2068 struct sge_qset *qs = dev2qset(dev);
2069 int effective_budget = min(*budget, dev->quota);
2070
2071 int work_done = process_responses(adap, qs, effective_budget);
2072 *budget -= work_done;
2073 dev->quota -= work_done;
2074
2075 if (work_done >= effective_budget)
2076 return 1;
2077
2078 netif_rx_complete(dev);
2079
2080 /*
2081 * Because we don't atomically flush the following write it is
2082 * possible that in very rare cases it can reach the device in a way
2083 * that races with a new response being written plus an error interrupt
2084 * causing the NAPI interrupt handler below to return unhandled status
2085 * to the OS. To protect against this would require flushing the write
2086 * and doing both the write and the flush with interrupts off. Way too
2087 * expensive and unjustifiable given the rarity of the race.
2088 *
2089 * The race cannot happen at all with MSI-X.
2090 */
2091 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2092 V_NEWTIMER(qs->rspq.next_holdoff) |
2093 V_NEWINDEX(qs->rspq.cidx));
2094 return 0;
2095 }
2096
2097 /*
2098 * Returns true if the device is already scheduled for polling.
2099 */
2100 static inline int napi_is_scheduled(struct net_device *dev)
2101 {
2102 return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
2103 }
2104
2105 /**
2106 * process_pure_responses - process pure responses from a response queue
2107 * @adap: the adapter
2108 * @qs: the queue set owning the response queue
2109 * @r: the first pure response to process
2110 *
2111 * A simpler version of process_responses() that handles only pure (i.e.,
2112 * non data-carrying) responses. Such respones are too light-weight to
2113 * justify calling a softirq under NAPI, so we handle them specially in
2114 * the interrupt handler. The function is called with a pointer to a
2115 * response, which the caller must ensure is a valid pure response.
2116 *
2117 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2118 */
2119 static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2120 struct rsp_desc *r)
2121 {
2122 struct sge_rspq *q = &qs->rspq;
2123 unsigned int sleeping = 0;
2124
2125 do {
2126 u32 flags = ntohl(r->flags);
2127
2128 r++;
2129 if (unlikely(++q->cidx == q->size)) {
2130 q->cidx = 0;
2131 q->gen ^= 1;
2132 r = q->desc;
2133 }
2134 prefetch(r);
2135
2136 if (flags & RSPD_CTRL_MASK) {
2137 sleeping |= flags & RSPD_GTS_MASK;
2138 handle_rsp_cntrl_info(qs, flags);
2139 }
2140
2141 q->pure_rsps++;
2142 if (++q->credits >= (q->size / 4)) {
2143 refill_rspq(adap, q, q->credits);
2144 q->credits = 0;
2145 }
2146 } while (is_new_response(r, q) && is_pure_response(r));
2147
2148 if (sleeping)
2149 check_ring_db(adap, qs, sleeping);
2150
2151 smp_mb(); /* commit Tx queue .processed updates */
2152 if (unlikely(qs->txq_stopped != 0))
2153 restart_tx(qs);
2154
2155 return is_new_response(r, q);
2156 }
2157
2158 /**
2159 * handle_responses - decide what to do with new responses in NAPI mode
2160 * @adap: the adapter
2161 * @q: the response queue
2162 *
2163 * This is used by the NAPI interrupt handlers to decide what to do with
2164 * new SGE responses. If there are no new responses it returns -1. If
2165 * there are new responses and they are pure (i.e., non-data carrying)
2166 * it handles them straight in hard interrupt context as they are very
2167 * cheap and don't deliver any packets. Finally, if there are any data
2168 * signaling responses it schedules the NAPI handler. Returns 1 if it
2169 * schedules NAPI, 0 if all new responses were pure.
2170 *
2171 * The caller must ascertain NAPI is not already running.
2172 */
2173 static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2174 {
2175 struct sge_qset *qs = rspq_to_qset(q);
2176 struct rsp_desc *r = &q->desc[q->cidx];
2177
2178 if (!is_new_response(r, q))
2179 return -1;
2180 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2181 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2182 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2183 return 0;
2184 }
2185 if (likely(__netif_rx_schedule_prep(qs->netdev)))
2186 __netif_rx_schedule(qs->netdev);
2187 return 1;
2188 }
2189
2190 /*
2191 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2192 * (i.e., response queue serviced in hard interrupt).
2193 */
2194 irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2195 {
2196 struct sge_qset *qs = cookie;
2197 struct adapter *adap = qs->netdev->priv;
2198 struct sge_rspq *q = &qs->rspq;
2199
2200 spin_lock(&q->lock);
2201 if (process_responses(adap, qs, -1) == 0)
2202 q->unhandled_irqs++;
2203 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2204 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2205 spin_unlock(&q->lock);
2206 return IRQ_HANDLED;
2207 }
2208
2209 /*
2210 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2211 * (i.e., response queue serviced by NAPI polling).
2212 */
2213 irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2214 {
2215 struct sge_qset *qs = cookie;
2216 struct adapter *adap = qs->netdev->priv;
2217 struct sge_rspq *q = &qs->rspq;
2218
2219 spin_lock(&q->lock);
2220 BUG_ON(napi_is_scheduled(qs->netdev));
2221
2222 if (handle_responses(adap, q) < 0)
2223 q->unhandled_irqs++;
2224 spin_unlock(&q->lock);
2225 return IRQ_HANDLED;
2226 }
2227
2228 /*
2229 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2230 * SGE response queues as well as error and other async events as they all use
2231 * the same MSI vector. We use one SGE response queue per port in this mode
2232 * and protect all response queues with queue 0's lock.
2233 */
2234 static irqreturn_t t3_intr_msi(int irq, void *cookie)
2235 {
2236 int new_packets = 0;
2237 struct adapter *adap = cookie;
2238 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2239
2240 spin_lock(&q->lock);
2241
2242 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2243 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2244 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2245 new_packets = 1;
2246 }
2247
2248 if (adap->params.nports == 2 &&
2249 process_responses(adap, &adap->sge.qs[1], -1)) {
2250 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2251
2252 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2253 V_NEWTIMER(q1->next_holdoff) |
2254 V_NEWINDEX(q1->cidx));
2255 new_packets = 1;
2256 }
2257
2258 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2259 q->unhandled_irqs++;
2260
2261 spin_unlock(&q->lock);
2262 return IRQ_HANDLED;
2263 }
2264
2265 static int rspq_check_napi(struct net_device *dev, struct sge_rspq *q)
2266 {
2267 if (!napi_is_scheduled(dev) && is_new_response(&q->desc[q->cidx], q)) {
2268 if (likely(__netif_rx_schedule_prep(dev)))
2269 __netif_rx_schedule(dev);
2270 return 1;
2271 }
2272 return 0;
2273 }
2274
2275 /*
2276 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2277 * by NAPI polling). Handles data events from SGE response queues as well as
2278 * error and other async events as they all use the same MSI vector. We use
2279 * one SGE response queue per port in this mode and protect all response
2280 * queues with queue 0's lock.
2281 */
2282 irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2283 {
2284 int new_packets;
2285 struct adapter *adap = cookie;
2286 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2287
2288 spin_lock(&q->lock);
2289
2290 new_packets = rspq_check_napi(adap->sge.qs[0].netdev, q);
2291 if (adap->params.nports == 2)
2292 new_packets += rspq_check_napi(adap->sge.qs[1].netdev,
2293 &adap->sge.qs[1].rspq);
2294 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2295 q->unhandled_irqs++;
2296
2297 spin_unlock(&q->lock);
2298 return IRQ_HANDLED;
2299 }
2300
2301 /*
2302 * A helper function that processes responses and issues GTS.
2303 */
2304 static inline int process_responses_gts(struct adapter *adap,
2305 struct sge_rspq *rq)
2306 {
2307 int work;
2308
2309 work = process_responses(adap, rspq_to_qset(rq), -1);
2310 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2311 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2312 return work;
2313 }
2314
2315 /*
2316 * The legacy INTx interrupt handler. This needs to handle data events from
2317 * SGE response queues as well as error and other async events as they all use
2318 * the same interrupt pin. We use one SGE response queue per port in this mode
2319 * and protect all response queues with queue 0's lock.
2320 */
2321 static irqreturn_t t3_intr(int irq, void *cookie)
2322 {
2323 int work_done, w0, w1;
2324 struct adapter *adap = cookie;
2325 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2326 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2327
2328 spin_lock(&q0->lock);
2329
2330 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2331 w1 = adap->params.nports == 2 &&
2332 is_new_response(&q1->desc[q1->cidx], q1);
2333
2334 if (likely(w0 | w1)) {
2335 t3_write_reg(adap, A_PL_CLI, 0);
2336 t3_read_reg(adap, A_PL_CLI); /* flush */
2337
2338 if (likely(w0))
2339 process_responses_gts(adap, q0);
2340
2341 if (w1)
2342 process_responses_gts(adap, q1);
2343
2344 work_done = w0 | w1;
2345 } else
2346 work_done = t3_slow_intr_handler(adap);
2347
2348 spin_unlock(&q0->lock);
2349 return IRQ_RETVAL(work_done != 0);
2350 }
2351
2352 /*
2353 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2354 * Handles data events from SGE response queues as well as error and other
2355 * async events as they all use the same interrupt pin. We use one SGE
2356 * response queue per port in this mode and protect all response queues with
2357 * queue 0's lock.
2358 */
2359 static irqreturn_t t3b_intr(int irq, void *cookie)
2360 {
2361 u32 map;
2362 struct adapter *adap = cookie;
2363 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2364
2365 t3_write_reg(adap, A_PL_CLI, 0);
2366 map = t3_read_reg(adap, A_SG_DATA_INTR);
2367
2368 if (unlikely(!map)) /* shared interrupt, most likely */
2369 return IRQ_NONE;
2370
2371 spin_lock(&q0->lock);
2372
2373 if (unlikely(map & F_ERRINTR))
2374 t3_slow_intr_handler(adap);
2375
2376 if (likely(map & 1))
2377 process_responses_gts(adap, q0);
2378
2379 if (map & 2)
2380 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2381
2382 spin_unlock(&q0->lock);
2383 return IRQ_HANDLED;
2384 }
2385
2386 /*
2387 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2388 * Handles data events from SGE response queues as well as error and other
2389 * async events as they all use the same interrupt pin. We use one SGE
2390 * response queue per port in this mode and protect all response queues with
2391 * queue 0's lock.
2392 */
2393 static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2394 {
2395 u32 map;
2396 struct net_device *dev;
2397 struct adapter *adap = cookie;
2398 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2399
2400 t3_write_reg(adap, A_PL_CLI, 0);
2401 map = t3_read_reg(adap, A_SG_DATA_INTR);
2402
2403 if (unlikely(!map)) /* shared interrupt, most likely */
2404 return IRQ_NONE;
2405
2406 spin_lock(&q0->lock);
2407
2408 if (unlikely(map & F_ERRINTR))
2409 t3_slow_intr_handler(adap);
2410
2411 if (likely(map & 1)) {
2412 dev = adap->sge.qs[0].netdev;
2413
2414 if (likely(__netif_rx_schedule_prep(dev)))
2415 __netif_rx_schedule(dev);
2416 }
2417 if (map & 2) {
2418 dev = adap->sge.qs[1].netdev;
2419
2420 if (likely(__netif_rx_schedule_prep(dev)))
2421 __netif_rx_schedule(dev);
2422 }
2423
2424 spin_unlock(&q0->lock);
2425 return IRQ_HANDLED;
2426 }
2427
2428 /**
2429 * t3_intr_handler - select the top-level interrupt handler
2430 * @adap: the adapter
2431 * @polling: whether using NAPI to service response queues
2432 *
2433 * Selects the top-level interrupt handler based on the type of interrupts
2434 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2435 * response queues.
2436 */
2437 intr_handler_t t3_intr_handler(struct adapter *adap, int polling)
2438 {
2439 if (adap->flags & USING_MSIX)
2440 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2441 if (adap->flags & USING_MSI)
2442 return polling ? t3_intr_msi_napi : t3_intr_msi;
2443 if (adap->params.rev > 0)
2444 return polling ? t3b_intr_napi : t3b_intr;
2445 return t3_intr;
2446 }
2447
2448 /**
2449 * t3_sge_err_intr_handler - SGE async event interrupt handler
2450 * @adapter: the adapter
2451 *
2452 * Interrupt handler for SGE asynchronous (non-data) events.
2453 */
2454 void t3_sge_err_intr_handler(struct adapter *adapter)
2455 {
2456 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2457
2458 if (status & F_RSPQCREDITOVERFOW)
2459 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2460
2461 if (status & F_RSPQDISABLED) {
2462 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2463
2464 CH_ALERT(adapter,
2465 "packet delivered to disabled response queue "
2466 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2467 }
2468
2469 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2470 if (status & (F_RSPQCREDITOVERFOW | F_RSPQDISABLED))
2471 t3_fatal_err(adapter);
2472 }
2473
2474 /**
2475 * sge_timer_cb - perform periodic maintenance of an SGE qset
2476 * @data: the SGE queue set to maintain
2477 *
2478 * Runs periodically from a timer to perform maintenance of an SGE queue
2479 * set. It performs two tasks:
2480 *
2481 * a) Cleans up any completed Tx descriptors that may still be pending.
2482 * Normal descriptor cleanup happens when new packets are added to a Tx
2483 * queue so this timer is relatively infrequent and does any cleanup only
2484 * if the Tx queue has not seen any new packets in a while. We make a
2485 * best effort attempt to reclaim descriptors, in that we don't wait
2486 * around if we cannot get a queue's lock (which most likely is because
2487 * someone else is queueing new packets and so will also handle the clean
2488 * up). Since control queues use immediate data exclusively we don't
2489 * bother cleaning them up here.
2490 *
2491 * b) Replenishes Rx queues that have run out due to memory shortage.
2492 * Normally new Rx buffers are added when existing ones are consumed but
2493 * when out of memory a queue can become empty. We try to add only a few
2494 * buffers here, the queue will be replenished fully as these new buffers
2495 * are used up if memory shortage has subsided.
2496 */
2497 static void sge_timer_cb(unsigned long data)
2498 {
2499 spinlock_t *lock;
2500 struct sge_qset *qs = (struct sge_qset *)data;
2501 struct adapter *adap = qs->netdev->priv;
2502
2503 if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
2504 reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
2505 spin_unlock(&qs->txq[TXQ_ETH].lock);
2506 }
2507 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2508 reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD]);
2509 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2510 }
2511 lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
2512 &adap->sge.qs[0].rspq.lock;
2513 if (spin_trylock_irq(lock)) {
2514 if (!napi_is_scheduled(qs->netdev)) {
2515 u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2516
2517 if (qs->fl[0].credits < qs->fl[0].size)
2518 __refill_fl(adap, &qs->fl[0]);
2519 if (qs->fl[1].credits < qs->fl[1].size)
2520 __refill_fl(adap, &qs->fl[1]);
2521
2522 if (status & (1 << qs->rspq.cntxt_id)) {
2523 qs->rspq.starved++;
2524 if (qs->rspq.credits) {
2525 refill_rspq(adap, &qs->rspq, 1);
2526 qs->rspq.credits--;
2527 qs->rspq.restarted++;
2528 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
2529 1 << qs->rspq.cntxt_id);
2530 }
2531 }
2532 }
2533 spin_unlock_irq(lock);
2534 }
2535 mod_timer(&qs->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2536 }
2537
2538 /**
2539 * t3_update_qset_coalesce - update coalescing settings for a queue set
2540 * @qs: the SGE queue set
2541 * @p: new queue set parameters
2542 *
2543 * Update the coalescing settings for an SGE queue set. Nothing is done
2544 * if the queue set is not initialized yet.
2545 */
2546 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2547 {
2548 if (!qs->netdev)
2549 return;
2550
2551 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
2552 qs->rspq.polling = p->polling;
2553 qs->netdev->poll = p->polling ? napi_rx_handler : ofld_poll;
2554 }
2555
2556 /**
2557 * t3_sge_alloc_qset - initialize an SGE queue set
2558 * @adapter: the adapter
2559 * @id: the queue set id
2560 * @nports: how many Ethernet ports will be using this queue set
2561 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2562 * @p: configuration parameters for this queue set
2563 * @ntxq: number of Tx queues for the queue set
2564 * @netdev: net device associated with this queue set
2565 *
2566 * Allocate resources and initialize an SGE queue set. A queue set
2567 * comprises a response queue, two Rx free-buffer queues, and up to 3
2568 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2569 * queue, offload queue, and control queue.
2570 */
2571 int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2572 int irq_vec_idx, const struct qset_params *p,
2573 int ntxq, struct net_device *netdev)
2574 {
2575 int i, ret = -ENOMEM;
2576 struct sge_qset *q = &adapter->sge.qs[id];
2577
2578 init_qset_cntxt(q, id);
2579 init_timer(&q->tx_reclaim_timer);
2580 q->tx_reclaim_timer.data = (unsigned long)q;
2581 q->tx_reclaim_timer.function = sge_timer_cb;
2582
2583 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2584 sizeof(struct rx_desc),
2585 sizeof(struct rx_sw_desc),
2586 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2587 if (!q->fl[0].desc)
2588 goto err;
2589
2590 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2591 sizeof(struct rx_desc),
2592 sizeof(struct rx_sw_desc),
2593 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2594 if (!q->fl[1].desc)
2595 goto err;
2596
2597 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
2598 sizeof(struct rsp_desc), 0,
2599 &q->rspq.phys_addr, NULL);
2600 if (!q->rspq.desc)
2601 goto err;
2602
2603 for (i = 0; i < ntxq; ++i) {
2604 /*
2605 * The control queue always uses immediate data so does not
2606 * need to keep track of any sk_buffs.
2607 */
2608 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2609
2610 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
2611 sizeof(struct tx_desc), sz,
2612 &q->txq[i].phys_addr,
2613 &q->txq[i].sdesc);
2614 if (!q->txq[i].desc)
2615 goto err;
2616
2617 q->txq[i].gen = 1;
2618 q->txq[i].size = p->txq_size[i];
2619 spin_lock_init(&q->txq[i].lock);
2620 skb_queue_head_init(&q->txq[i].sendq);
2621 }
2622
2623 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
2624 (unsigned long)q);
2625 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
2626 (unsigned long)q);
2627
2628 q->fl[0].gen = q->fl[1].gen = 1;
2629 q->fl[0].size = p->fl_size;
2630 q->fl[1].size = p->jumbo_size;
2631
2632 q->rspq.gen = 1;
2633 q->rspq.size = p->rspq_size;
2634 spin_lock_init(&q->rspq.lock);
2635
2636 q->txq[TXQ_ETH].stop_thres = nports *
2637 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
2638
2639 if (!is_offload(adapter)) {
2640 #ifdef USE_RX_PAGE
2641 q->fl[0].buf_size = RX_PAGE_SIZE;
2642 #else
2643 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 +
2644 sizeof(struct cpl_rx_pkt);
2645 #endif
2646 q->fl[1].buf_size = MAX_FRAME_SIZE + 2 +
2647 sizeof(struct cpl_rx_pkt);
2648 } else {
2649 #ifdef USE_RX_PAGE
2650 q->fl[0].buf_size = RX_PAGE_SIZE;
2651 #else
2652 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE +
2653 sizeof(struct cpl_rx_data);
2654 #endif
2655 q->fl[1].buf_size = (16 * 1024) -
2656 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2657 }
2658
2659 spin_lock(&adapter->sge.reg_lock);
2660
2661 /* FL threshold comparison uses < */
2662 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
2663 q->rspq.phys_addr, q->rspq.size,
2664 q->fl[0].buf_size, 1, 0);
2665 if (ret)
2666 goto err_unlock;
2667
2668 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2669 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
2670 q->fl[i].phys_addr, q->fl[i].size,
2671 q->fl[i].buf_size, p->cong_thres, 1,
2672 0);
2673 if (ret)
2674 goto err_unlock;
2675 }
2676
2677 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2678 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2679 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2680 1, 0);
2681 if (ret)
2682 goto err_unlock;
2683
2684 if (ntxq > 1) {
2685 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
2686 USE_GTS, SGE_CNTXT_OFLD, id,
2687 q->txq[TXQ_OFLD].phys_addr,
2688 q->txq[TXQ_OFLD].size, 0, 1, 0);
2689 if (ret)
2690 goto err_unlock;
2691 }
2692
2693 if (ntxq > 2) {
2694 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
2695 SGE_CNTXT_CTRL, id,
2696 q->txq[TXQ_CTRL].phys_addr,
2697 q->txq[TXQ_CTRL].size,
2698 q->txq[TXQ_CTRL].token, 1, 0);
2699 if (ret)
2700 goto err_unlock;
2701 }
2702
2703 spin_unlock(&adapter->sge.reg_lock);
2704 q->netdev = netdev;
2705 t3_update_qset_coalesce(q, p);
2706
2707 /*
2708 * We use atalk_ptr as a backpointer to a qset. In case a device is
2709 * associated with multiple queue sets only the first one sets
2710 * atalk_ptr.
2711 */
2712 if (netdev->atalk_ptr == NULL)
2713 netdev->atalk_ptr = q;
2714
2715 refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL);
2716 refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL);
2717 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
2718
2719 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2720 V_NEWTIMER(q->rspq.holdoff_tmr));
2721
2722 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2723 return 0;
2724
2725 err_unlock:
2726 spin_unlock(&adapter->sge.reg_lock);
2727 err:
2728 t3_free_qset(adapter, q);
2729 return ret;
2730 }
2731
2732 /**
2733 * t3_free_sge_resources - free SGE resources
2734 * @adap: the adapter
2735 *
2736 * Frees resources used by the SGE queue sets.
2737 */
2738 void t3_free_sge_resources(struct adapter *adap)
2739 {
2740 int i;
2741
2742 for (i = 0; i < SGE_QSETS; ++i)
2743 t3_free_qset(adap, &adap->sge.qs[i]);
2744 }
2745
2746 /**
2747 * t3_sge_start - enable SGE
2748 * @adap: the adapter
2749 *
2750 * Enables the SGE for DMAs. This is the last step in starting packet
2751 * transfers.
2752 */
2753 void t3_sge_start(struct adapter *adap)
2754 {
2755 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
2756 }
2757
2758 /**
2759 * t3_sge_stop - disable SGE operation
2760 * @adap: the adapter
2761 *
2762 * Disables the DMA engine. This can be called in emeregencies (e.g.,
2763 * from error interrupts) or from normal process context. In the latter
2764 * case it also disables any pending queue restart tasklets. Note that
2765 * if it is called in interrupt context it cannot disable the restart
2766 * tasklets as it cannot wait, however the tasklets will have no effect
2767 * since the doorbells are disabled and the driver will call this again
2768 * later from process context, at which time the tasklets will be stopped
2769 * if they are still running.
2770 */
2771 void t3_sge_stop(struct adapter *adap)
2772 {
2773 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
2774 if (!in_interrupt()) {
2775 int i;
2776
2777 for (i = 0; i < SGE_QSETS; ++i) {
2778 struct sge_qset *qs = &adap->sge.qs[i];
2779
2780 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
2781 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
2782 }
2783 }
2784 }
2785
2786 /**
2787 * t3_sge_init - initialize SGE
2788 * @adap: the adapter
2789 * @p: the SGE parameters
2790 *
2791 * Performs SGE initialization needed every time after a chip reset.
2792 * We do not initialize any of the queue sets here, instead the driver
2793 * top-level must request those individually. We also do not enable DMA
2794 * here, that should be done after the queues have been set up.
2795 */
2796 void t3_sge_init(struct adapter *adap, struct sge_params *p)
2797 {
2798 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
2799
2800 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
2801 F_CQCRDTCTRL |
2802 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
2803 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
2804 #if SGE_NUM_GENBITS == 1
2805 ctrl |= F_EGRGENCTRL;
2806 #endif
2807 if (adap->params.rev > 0) {
2808 if (!(adap->flags & (USING_MSIX | USING_MSI)))
2809 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
2810 ctrl |= F_CQCRDTCTRL | F_AVOIDCQOVFL;
2811 }
2812 t3_write_reg(adap, A_SG_CONTROL, ctrl);
2813 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
2814 V_LORCQDRBTHRSH(512));
2815 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
2816 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
2817 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
2818 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, 1000);
2819 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
2820 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
2821 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
2822 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
2823 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
2824 }
2825
2826 /**
2827 * t3_sge_prep - one-time SGE initialization
2828 * @adap: the associated adapter
2829 * @p: SGE parameters
2830 *
2831 * Performs one-time initialization of SGE SW state. Includes determining
2832 * defaults for the assorted SGE parameters, which admins can change until
2833 * they are used to initialize the SGE.
2834 */
2835 void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p)
2836 {
2837 int i;
2838
2839 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
2840 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2841
2842 for (i = 0; i < SGE_QSETS; ++i) {
2843 struct qset_params *q = p->qset + i;
2844
2845 q->polling = adap->params.rev > 0;
2846 q->coalesce_usecs = 5;
2847 q->rspq_size = 1024;
2848 q->fl_size = 1024;
2849 q->jumbo_size = 512;
2850 q->txq_size[TXQ_ETH] = 1024;
2851 q->txq_size[TXQ_OFLD] = 1024;
2852 q->txq_size[TXQ_CTRL] = 256;
2853 q->cong_thres = 0;
2854 }
2855
2856 spin_lock_init(&adap->sge.reg_lock);
2857 }
2858
2859 /**
2860 * t3_get_desc - dump an SGE descriptor for debugging purposes
2861 * @qs: the queue set
2862 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
2863 * @idx: the descriptor index in the queue
2864 * @data: where to dump the descriptor contents
2865 *
2866 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
2867 * size of the descriptor.
2868 */
2869 int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
2870 unsigned char *data)
2871 {
2872 if (qnum >= 6)
2873 return -EINVAL;
2874
2875 if (qnum < 3) {
2876 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
2877 return -EINVAL;
2878 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
2879 return sizeof(struct tx_desc);
2880 }
2881
2882 if (qnum == 3) {
2883 if (!qs->rspq.desc || idx >= qs->rspq.size)
2884 return -EINVAL;
2885 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
2886 return sizeof(struct rsp_desc);
2887 }
2888
2889 qnum -= 4;
2890 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
2891 return -EINVAL;
2892 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
2893 return sizeof(struct rx_desc);
2894 }
This page took 0.094155 seconds and 5 git commands to generate.