mlx4_en: Counting all the dropped packets on the TX side
[deliverable/linux.git] / drivers / net / mlx4 / en_tx.c
1 /*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34 #include <asm/page.h>
35 #include <linux/mlx4/cq.h>
36 #include <linux/mlx4/qp.h>
37 #include <linux/skbuff.h>
38 #include <linux/if_vlan.h>
39 #include <linux/vmalloc.h>
40
41 #include "mlx4_en.h"
42
43 enum {
44 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
45 };
46
47 static int inline_thold __read_mostly = MAX_INLINE;
48
49 module_param_named(inline_thold, inline_thold, int, 0444);
50 MODULE_PARM_DESC(inline_thold, "treshold for using inline data");
51
52 int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
53 struct mlx4_en_tx_ring *ring, u32 size,
54 u16 stride)
55 {
56 struct mlx4_en_dev *mdev = priv->mdev;
57 int tmp;
58 int err;
59
60 ring->size = size;
61 ring->size_mask = size - 1;
62 ring->stride = stride;
63
64 inline_thold = min(inline_thold, MAX_INLINE);
65
66 spin_lock_init(&ring->comp_lock);
67
68 tmp = size * sizeof(struct mlx4_en_tx_info);
69 ring->tx_info = vmalloc(tmp);
70 if (!ring->tx_info) {
71 en_err(priv, "Failed allocating tx_info ring\n");
72 return -ENOMEM;
73 }
74 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
75 ring->tx_info, tmp);
76
77 ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
78 if (!ring->bounce_buf) {
79 en_err(priv, "Failed allocating bounce buffer\n");
80 err = -ENOMEM;
81 goto err_tx;
82 }
83 ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
84
85 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
86 2 * PAGE_SIZE);
87 if (err) {
88 en_err(priv, "Failed allocating hwq resources\n");
89 goto err_bounce;
90 }
91
92 err = mlx4_en_map_buffer(&ring->wqres.buf);
93 if (err) {
94 en_err(priv, "Failed to map TX buffer\n");
95 goto err_hwq_res;
96 }
97
98 ring->buf = ring->wqres.buf.direct.buf;
99
100 en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
101 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
102 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
103
104 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn);
105 if (err) {
106 en_err(priv, "Failed reserving qp for tx ring.\n");
107 goto err_map;
108 }
109
110 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
111 if (err) {
112 en_err(priv, "Failed allocating qp %d\n", ring->qpn);
113 goto err_reserve;
114 }
115 ring->qp.event = mlx4_en_sqp_event;
116
117 return 0;
118
119 err_reserve:
120 mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
121 err_map:
122 mlx4_en_unmap_buffer(&ring->wqres.buf);
123 err_hwq_res:
124 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
125 err_bounce:
126 kfree(ring->bounce_buf);
127 ring->bounce_buf = NULL;
128 err_tx:
129 vfree(ring->tx_info);
130 ring->tx_info = NULL;
131 return err;
132 }
133
134 void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
135 struct mlx4_en_tx_ring *ring)
136 {
137 struct mlx4_en_dev *mdev = priv->mdev;
138 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
139
140 mlx4_qp_remove(mdev->dev, &ring->qp);
141 mlx4_qp_free(mdev->dev, &ring->qp);
142 mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
143 mlx4_en_unmap_buffer(&ring->wqres.buf);
144 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
145 kfree(ring->bounce_buf);
146 ring->bounce_buf = NULL;
147 vfree(ring->tx_info);
148 ring->tx_info = NULL;
149 }
150
151 int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
152 struct mlx4_en_tx_ring *ring,
153 int cq, int srqn)
154 {
155 struct mlx4_en_dev *mdev = priv->mdev;
156 int err;
157
158 ring->cqn = cq;
159 ring->prod = 0;
160 ring->cons = 0xffffffff;
161 ring->last_nr_txbb = 1;
162 ring->poll_cnt = 0;
163 ring->blocked = 0;
164 memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
165 memset(ring->buf, 0, ring->buf_size);
166
167 ring->qp_state = MLX4_QP_STATE_RST;
168 ring->doorbell_qpn = swab32(ring->qp.qpn << 8);
169
170 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
171 ring->cqn, srqn, &ring->context);
172
173 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
174 &ring->qp, &ring->qp_state);
175
176 return err;
177 }
178
179 void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
180 struct mlx4_en_tx_ring *ring)
181 {
182 struct mlx4_en_dev *mdev = priv->mdev;
183
184 mlx4_qp_modify(mdev->dev, NULL, ring->qp_state,
185 MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
186 }
187
188
189 static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
190 struct mlx4_en_tx_ring *ring,
191 int index, u8 owner)
192 {
193 struct mlx4_en_dev *mdev = priv->mdev;
194 struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
195 struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
196 struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset;
197 struct sk_buff *skb = tx_info->skb;
198 struct skb_frag_struct *frag;
199 void *end = ring->buf + ring->buf_size;
200 int frags = skb_shinfo(skb)->nr_frags;
201 int i;
202 __be32 *ptr = (__be32 *)tx_desc;
203 __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
204
205 /* Optimize the common case when there are no wraparounds */
206 if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
207 if (!tx_info->inl) {
208 if (tx_info->linear) {
209 pci_unmap_single(mdev->pdev,
210 (dma_addr_t) be64_to_cpu(data->addr),
211 be32_to_cpu(data->byte_count),
212 PCI_DMA_TODEVICE);
213 ++data;
214 }
215
216 for (i = 0; i < frags; i++) {
217 frag = &skb_shinfo(skb)->frags[i];
218 pci_unmap_page(mdev->pdev,
219 (dma_addr_t) be64_to_cpu(data[i].addr),
220 frag->size, PCI_DMA_TODEVICE);
221 }
222 }
223 /* Stamp the freed descriptor */
224 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
225 *ptr = stamp;
226 ptr += STAMP_DWORDS;
227 }
228
229 } else {
230 if (!tx_info->inl) {
231 if ((void *) data >= end) {
232 data = (struct mlx4_wqe_data_seg *)
233 (ring->buf + ((void *) data - end));
234 }
235
236 if (tx_info->linear) {
237 pci_unmap_single(mdev->pdev,
238 (dma_addr_t) be64_to_cpu(data->addr),
239 be32_to_cpu(data->byte_count),
240 PCI_DMA_TODEVICE);
241 ++data;
242 }
243
244 for (i = 0; i < frags; i++) {
245 /* Check for wraparound before unmapping */
246 if ((void *) data >= end)
247 data = (struct mlx4_wqe_data_seg *) ring->buf;
248 frag = &skb_shinfo(skb)->frags[i];
249 pci_unmap_page(mdev->pdev,
250 (dma_addr_t) be64_to_cpu(data->addr),
251 frag->size, PCI_DMA_TODEVICE);
252 }
253 }
254 /* Stamp the freed descriptor */
255 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
256 *ptr = stamp;
257 ptr += STAMP_DWORDS;
258 if ((void *) ptr >= end) {
259 ptr = ring->buf;
260 stamp ^= cpu_to_be32(0x80000000);
261 }
262 }
263
264 }
265 dev_kfree_skb_any(skb);
266 return tx_info->nr_txbb;
267 }
268
269
270 int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
271 {
272 struct mlx4_en_priv *priv = netdev_priv(dev);
273 int cnt = 0;
274
275 /* Skip last polled descriptor */
276 ring->cons += ring->last_nr_txbb;
277 en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
278 ring->cons, ring->prod);
279
280 if ((u32) (ring->prod - ring->cons) > ring->size) {
281 if (netif_msg_tx_err(priv))
282 en_warn(priv, "Tx consumer passed producer!\n");
283 return 0;
284 }
285
286 while (ring->cons != ring->prod) {
287 ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring,
288 ring->cons & ring->size_mask,
289 !!(ring->cons & ring->size));
290 ring->cons += ring->last_nr_txbb;
291 cnt++;
292 }
293
294 if (cnt)
295 en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
296
297 return cnt;
298 }
299
300
301 static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
302 {
303 struct mlx4_en_priv *priv = netdev_priv(dev);
304 struct mlx4_cq *mcq = &cq->mcq;
305 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
306 struct mlx4_cqe *cqe = cq->buf;
307 u16 index;
308 u16 new_index;
309 u32 txbbs_skipped = 0;
310 u32 cq_last_sav;
311
312 /* index always points to the first TXBB of the last polled descriptor */
313 index = ring->cons & ring->size_mask;
314 new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
315 if (index == new_index)
316 return;
317
318 if (!priv->port_up)
319 return;
320
321 /*
322 * We use a two-stage loop:
323 * - the first samples the HW-updated CQE
324 * - the second frees TXBBs until the last sample
325 * This lets us amortize CQE cache misses, while still polling the CQ
326 * until is quiescent.
327 */
328 cq_last_sav = mcq->cons_index;
329 do {
330 do {
331 /* Skip over last polled CQE */
332 index = (index + ring->last_nr_txbb) & ring->size_mask;
333 txbbs_skipped += ring->last_nr_txbb;
334
335 /* Poll next CQE */
336 ring->last_nr_txbb = mlx4_en_free_tx_desc(
337 priv, ring, index,
338 !!((ring->cons + txbbs_skipped) &
339 ring->size));
340 ++mcq->cons_index;
341
342 } while (index != new_index);
343
344 new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
345 } while (index != new_index);
346 AVG_PERF_COUNTER(priv->pstats.tx_coal_avg,
347 (u32) (mcq->cons_index - cq_last_sav));
348
349 /*
350 * To prevent CQ overflow we first update CQ consumer and only then
351 * the ring consumer.
352 */
353 mlx4_cq_set_ci(mcq);
354 wmb();
355 ring->cons += txbbs_skipped;
356
357 /* Wakeup Tx queue if this ring stopped it */
358 if (unlikely(ring->blocked)) {
359 if ((u32) (ring->prod - ring->cons) <=
360 ring->size - HEADROOM - MAX_DESC_TXBBS) {
361 ring->blocked = 0;
362 netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
363 priv->port_stats.wake_queue++;
364 }
365 }
366 }
367
368 void mlx4_en_tx_irq(struct mlx4_cq *mcq)
369 {
370 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
371 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
372 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
373
374 if (!spin_trylock(&ring->comp_lock))
375 return;
376 mlx4_en_process_tx_cq(cq->dev, cq);
377 mod_timer(&cq->timer, jiffies + 1);
378 spin_unlock(&ring->comp_lock);
379 }
380
381
382 void mlx4_en_poll_tx_cq(unsigned long data)
383 {
384 struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data;
385 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
386 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
387 u32 inflight;
388
389 INC_PERF_COUNTER(priv->pstats.tx_poll);
390
391 if (!spin_trylock_irq(&ring->comp_lock)) {
392 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
393 return;
394 }
395 mlx4_en_process_tx_cq(cq->dev, cq);
396 inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
397
398 /* If there are still packets in flight and the timer has not already
399 * been scheduled by the Tx routine then schedule it here to guarantee
400 * completion processing of these packets */
401 if (inflight && priv->port_up)
402 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
403
404 spin_unlock_irq(&ring->comp_lock);
405 }
406
407 static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
408 struct mlx4_en_tx_ring *ring,
409 u32 index,
410 unsigned int desc_size)
411 {
412 u32 copy = (ring->size - index) * TXBB_SIZE;
413 int i;
414
415 for (i = desc_size - copy - 4; i >= 0; i -= 4) {
416 if ((i & (TXBB_SIZE - 1)) == 0)
417 wmb();
418
419 *((u32 *) (ring->buf + i)) =
420 *((u32 *) (ring->bounce_buf + copy + i));
421 }
422
423 for (i = copy - 4; i >= 4 ; i -= 4) {
424 if ((i & (TXBB_SIZE - 1)) == 0)
425 wmb();
426
427 *((u32 *) (ring->buf + index * TXBB_SIZE + i)) =
428 *((u32 *) (ring->bounce_buf + i));
429 }
430
431 /* Return real descriptor location */
432 return ring->buf + index * TXBB_SIZE;
433 }
434
435 static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
436 {
437 struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
438 struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
439
440 /* If we don't have a pending timer, set one up to catch our recent
441 post in case the interface becomes idle */
442 if (!timer_pending(&cq->timer))
443 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
444
445 /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
446 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
447 if (spin_trylock_irq(&ring->comp_lock)) {
448 mlx4_en_process_tx_cq(priv->dev, cq);
449 spin_unlock_irq(&ring->comp_lock);
450 }
451 }
452
453 static void *get_frag_ptr(struct sk_buff *skb)
454 {
455 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
456 struct page *page = frag->page;
457 void *ptr;
458
459 ptr = page_address(page);
460 if (unlikely(!ptr))
461 return NULL;
462
463 return ptr + frag->page_offset;
464 }
465
466 static int is_inline(struct sk_buff *skb, void **pfrag)
467 {
468 void *ptr;
469
470 if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) {
471 if (skb_shinfo(skb)->nr_frags == 1) {
472 ptr = get_frag_ptr(skb);
473 if (unlikely(!ptr))
474 return 0;
475
476 if (pfrag)
477 *pfrag = ptr;
478
479 return 1;
480 } else if (unlikely(skb_shinfo(skb)->nr_frags))
481 return 0;
482 else
483 return 1;
484 }
485
486 return 0;
487 }
488
489 static int inline_size(struct sk_buff *skb)
490 {
491 if (skb->len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg)
492 <= MLX4_INLINE_ALIGN)
493 return ALIGN(skb->len + CTRL_SIZE +
494 sizeof(struct mlx4_wqe_inline_seg), 16);
495 else
496 return ALIGN(skb->len + CTRL_SIZE + 2 *
497 sizeof(struct mlx4_wqe_inline_seg), 16);
498 }
499
500 static int get_real_size(struct sk_buff *skb, struct net_device *dev,
501 int *lso_header_size)
502 {
503 struct mlx4_en_priv *priv = netdev_priv(dev);
504 int real_size;
505
506 if (skb_is_gso(skb)) {
507 *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
508 real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE +
509 ALIGN(*lso_header_size + 4, DS_SIZE);
510 if (unlikely(*lso_header_size != skb_headlen(skb))) {
511 /* We add a segment for the skb linear buffer only if
512 * it contains data */
513 if (*lso_header_size < skb_headlen(skb))
514 real_size += DS_SIZE;
515 else {
516 if (netif_msg_tx_err(priv))
517 en_warn(priv, "Non-linear headers\n");
518 return 0;
519 }
520 }
521 if (unlikely(*lso_header_size > MAX_LSO_HDR_SIZE)) {
522 if (netif_msg_tx_err(priv))
523 en_warn(priv, "LSO header size too big\n");
524 return 0;
525 }
526 } else {
527 *lso_header_size = 0;
528 if (!is_inline(skb, NULL))
529 real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE;
530 else
531 real_size = inline_size(skb);
532 }
533
534 return real_size;
535 }
536
537 static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *skb,
538 int real_size, u16 *vlan_tag, int tx_ind, void *fragptr)
539 {
540 struct mlx4_wqe_inline_seg *inl = &tx_desc->inl;
541 int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl;
542
543 if (skb->len <= spc) {
544 inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
545 skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
546 if (skb_shinfo(skb)->nr_frags)
547 memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr,
548 skb_shinfo(skb)->frags[0].size);
549
550 } else {
551 inl->byte_count = cpu_to_be32(1 << 31 | spc);
552 if (skb_headlen(skb) <= spc) {
553 skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
554 if (skb_headlen(skb) < spc) {
555 memcpy(((void *)(inl + 1)) + skb_headlen(skb),
556 fragptr, spc - skb_headlen(skb));
557 fragptr += spc - skb_headlen(skb);
558 }
559 inl = (void *) (inl + 1) + spc;
560 memcpy(((void *)(inl + 1)), fragptr, skb->len - spc);
561 } else {
562 skb_copy_from_linear_data(skb, inl + 1, spc);
563 inl = (void *) (inl + 1) + spc;
564 skb_copy_from_linear_data_offset(skb, spc, inl + 1,
565 skb_headlen(skb) - spc);
566 if (skb_shinfo(skb)->nr_frags)
567 memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc,
568 fragptr, skb_shinfo(skb)->frags[0].size);
569 }
570
571 wmb();
572 inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
573 }
574 tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
575 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag);
576 tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
577 }
578
579 u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
580 {
581 struct mlx4_en_priv *priv = netdev_priv(dev);
582 u16 vlan_tag = 0;
583
584 /* If we support per priority flow control and the packet contains
585 * a vlan tag, send the packet to the TX ring assigned to that priority
586 */
587 if (priv->prof->rx_ppp && priv->vlgrp && vlan_tx_tag_present(skb)) {
588 vlan_tag = vlan_tx_tag_get(skb);
589 return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13);
590 }
591
592 return skb_tx_hash(dev, skb);
593 }
594
595 int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
596 {
597 struct mlx4_en_priv *priv = netdev_priv(dev);
598 struct mlx4_en_dev *mdev = priv->mdev;
599 struct mlx4_en_tx_ring *ring;
600 struct mlx4_en_cq *cq;
601 struct mlx4_en_tx_desc *tx_desc;
602 struct mlx4_wqe_data_seg *data;
603 struct skb_frag_struct *frag;
604 struct mlx4_en_tx_info *tx_info;
605 int tx_ind = 0;
606 int nr_txbb;
607 int desc_size;
608 int real_size;
609 dma_addr_t dma;
610 u32 index;
611 __be32 op_own;
612 u16 vlan_tag = 0;
613 int i;
614 int lso_header_size;
615 void *fragptr;
616
617 if (unlikely(!skb->len)) {
618 dev_kfree_skb_any(skb);
619 return NETDEV_TX_OK;
620 }
621 real_size = get_real_size(skb, dev, &lso_header_size);
622 if (unlikely(!real_size))
623 goto tx_drop;
624
625 /* Allign descriptor to TXBB size */
626 desc_size = ALIGN(real_size, TXBB_SIZE);
627 nr_txbb = desc_size / TXBB_SIZE;
628 if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
629 if (netif_msg_tx_err(priv))
630 en_warn(priv, "Oversized header or SG list\n");
631 goto tx_drop;
632 }
633
634 tx_ind = skb->queue_mapping;
635 ring = &priv->tx_ring[tx_ind];
636 if (priv->vlgrp && vlan_tx_tag_present(skb))
637 vlan_tag = vlan_tx_tag_get(skb);
638
639 /* Check available TXBBs And 2K spare for prefetch */
640 if (unlikely(((int)(ring->prod - ring->cons)) >
641 ring->size - HEADROOM - MAX_DESC_TXBBS)) {
642 /* every full Tx ring stops queue */
643 netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
644 ring->blocked = 1;
645 priv->port_stats.queue_stopped++;
646
647 /* Use interrupts to find out when queue opened */
648 cq = &priv->tx_cq[tx_ind];
649 mlx4_en_arm_cq(priv, cq);
650 return NETDEV_TX_BUSY;
651 }
652
653 /* Now that we know what Tx ring to use */
654 if (unlikely(!priv->port_up)) {
655 if (netif_msg_tx_err(priv))
656 en_warn(priv, "xmit: port down!\n");
657 goto tx_drop;
658 }
659
660 /* Track current inflight packets for performance analysis */
661 AVG_PERF_COUNTER(priv->pstats.inflight_avg,
662 (u32) (ring->prod - ring->cons - 1));
663
664 /* Packet is good - grab an index and transmit it */
665 index = ring->prod & ring->size_mask;
666
667 /* See if we have enough space for whole descriptor TXBB for setting
668 * SW ownership on next descriptor; if not, use a bounce buffer. */
669 if (likely(index + nr_txbb <= ring->size))
670 tx_desc = ring->buf + index * TXBB_SIZE;
671 else
672 tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
673
674 /* Save skb in tx_info ring */
675 tx_info = &ring->tx_info[index];
676 tx_info->skb = skb;
677 tx_info->nr_txbb = nr_txbb;
678
679 /* Prepare ctrl segement apart opcode+ownership, which depends on
680 * whether LSO is used */
681 tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
682 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!vlan_tag;
683 tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
684 tx_desc->ctrl.srcrb_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
685 MLX4_WQE_CTRL_SOLICITED);
686 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
687 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
688 MLX4_WQE_CTRL_TCP_UDP_CSUM);
689 priv->port_stats.tx_chksum_offload++;
690 }
691
692 /* Handle LSO (TSO) packets */
693 if (lso_header_size) {
694 /* Mark opcode as LSO */
695 op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) |
696 ((ring->prod & ring->size) ?
697 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
698
699 /* Fill in the LSO prefix */
700 tx_desc->lso.mss_hdr_size = cpu_to_be32(
701 skb_shinfo(skb)->gso_size << 16 | lso_header_size);
702
703 /* Copy headers;
704 * note that we already verified that it is linear */
705 memcpy(tx_desc->lso.header, skb->data, lso_header_size);
706 data = ((void *) &tx_desc->lso +
707 ALIGN(lso_header_size + 4, DS_SIZE));
708
709 priv->port_stats.tso_packets++;
710 i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
711 !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size);
712 ring->bytes += skb->len + (i - 1) * lso_header_size;
713 ring->packets += i;
714 } else {
715 /* Normal (Non LSO) packet */
716 op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
717 ((ring->prod & ring->size) ?
718 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
719 data = &tx_desc->data;
720 ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN);
721 ring->packets++;
722
723 }
724 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
725
726
727 /* valid only for none inline segments */
728 tx_info->data_offset = (void *) data - (void *) tx_desc;
729
730 tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0;
731 data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
732
733 if (!is_inline(skb, &fragptr)) {
734 /* Map fragments */
735 for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
736 frag = &skb_shinfo(skb)->frags[i];
737 dma = pci_map_page(mdev->dev->pdev, frag->page, frag->page_offset,
738 frag->size, PCI_DMA_TODEVICE);
739 data->addr = cpu_to_be64(dma);
740 data->lkey = cpu_to_be32(mdev->mr.key);
741 wmb();
742 data->byte_count = cpu_to_be32(frag->size);
743 --data;
744 }
745
746 /* Map linear part */
747 if (tx_info->linear) {
748 dma = pci_map_single(mdev->dev->pdev, skb->data + lso_header_size,
749 skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE);
750 data->addr = cpu_to_be64(dma);
751 data->lkey = cpu_to_be32(mdev->mr.key);
752 wmb();
753 data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size);
754 }
755 tx_info->inl = 0;
756 } else {
757 build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr);
758 tx_info->inl = 1;
759 }
760
761 ring->prod += nr_txbb;
762
763 /* If we used a bounce buffer then copy descriptor back into place */
764 if (tx_desc == (struct mlx4_en_tx_desc *) ring->bounce_buf)
765 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
766
767 /* Run destructor before passing skb to HW */
768 if (likely(!skb_shared(skb)))
769 skb_orphan(skb);
770
771 /* Ensure new descirptor hits memory
772 * before setting ownership of this descriptor to HW */
773 wmb();
774 tx_desc->ctrl.owner_opcode = op_own;
775
776 /* Ring doorbell! */
777 wmb();
778 writel(ring->doorbell_qpn, mdev->uar_map + MLX4_SEND_DOORBELL);
779
780 /* Poll CQ here */
781 mlx4_en_xmit_poll(priv, tx_ind);
782
783 return 0;
784
785 tx_drop:
786 dev_kfree_skb_any(skb);
787 priv->stats.tx_dropped++;
788 return NETDEV_TX_OK;
789 }
790
This page took 0.04803 seconds and 6 git commands to generate.