1 /****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2013 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/pci.h>
12 #include <linux/tcp.h>
15 #include <linux/ipv6.h>
16 #include <linux/slab.h>
18 #include <linux/if_ether.h>
19 #include <linux/highmem.h>
20 #include <linux/cache.h>
21 #include "net_driver.h"
25 #include "workarounds.h"
26 #include "ef10_regs.h"
30 #define EFX_PIOBUF_SIZE_MAX ER_DZ_TX_PIOBUF_SIZE
31 #define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
32 unsigned int efx_piobuf_size __read_mostly
= EFX_PIOBUF_SIZE_DEF
;
34 #endif /* EFX_USE_PIO */
36 static inline unsigned int
37 efx_tx_queue_get_insert_index(const struct efx_tx_queue
*tx_queue
)
39 return tx_queue
->insert_count
& tx_queue
->ptr_mask
;
42 static inline struct efx_tx_buffer
*
43 __efx_tx_queue_get_insert_buffer(const struct efx_tx_queue
*tx_queue
)
45 return &tx_queue
->buffer
[efx_tx_queue_get_insert_index(tx_queue
)];
48 static inline struct efx_tx_buffer
*
49 efx_tx_queue_get_insert_buffer(const struct efx_tx_queue
*tx_queue
)
51 struct efx_tx_buffer
*buffer
=
52 __efx_tx_queue_get_insert_buffer(tx_queue
);
54 EFX_BUG_ON_PARANOID(buffer
->len
);
55 EFX_BUG_ON_PARANOID(buffer
->flags
);
56 EFX_BUG_ON_PARANOID(buffer
->unmap_len
);
61 static void efx_dequeue_buffer(struct efx_tx_queue
*tx_queue
,
62 struct efx_tx_buffer
*buffer
,
63 unsigned int *pkts_compl
,
64 unsigned int *bytes_compl
)
66 if (buffer
->unmap_len
) {
67 struct device
*dma_dev
= &tx_queue
->efx
->pci_dev
->dev
;
68 dma_addr_t unmap_addr
= buffer
->dma_addr
- buffer
->dma_offset
;
69 if (buffer
->flags
& EFX_TX_BUF_MAP_SINGLE
)
70 dma_unmap_single(dma_dev
, unmap_addr
, buffer
->unmap_len
,
73 dma_unmap_page(dma_dev
, unmap_addr
, buffer
->unmap_len
,
75 buffer
->unmap_len
= 0;
78 if (buffer
->flags
& EFX_TX_BUF_SKB
) {
80 (*bytes_compl
) += buffer
->skb
->len
;
81 dev_consume_skb_any((struct sk_buff
*)buffer
->skb
);
82 netif_vdbg(tx_queue
->efx
, tx_done
, tx_queue
->efx
->net_dev
,
83 "TX queue %d transmission id %x complete\n",
84 tx_queue
->queue
, tx_queue
->read_count
);
85 } else if (buffer
->flags
& EFX_TX_BUF_HEAP
) {
86 kfree(buffer
->heap_buf
);
93 static int efx_enqueue_skb_tso(struct efx_tx_queue
*tx_queue
,
96 static inline unsigned
97 efx_max_tx_len(struct efx_nic
*efx
, dma_addr_t dma_addr
)
99 /* Depending on the NIC revision, we can use descriptor
100 * lengths up to 8K or 8K-1. However, since PCI Express
101 * devices must split read requests at 4K boundaries, there is
102 * little benefit from using descriptors that cross those
103 * boundaries and we keep things simple by not doing so.
105 unsigned len
= (~dma_addr
& (EFX_PAGE_SIZE
- 1)) + 1;
107 /* Work around hardware bug for unaligned buffers. */
108 if (EFX_WORKAROUND_5391(efx
) && (dma_addr
& 0xf))
109 len
= min_t(unsigned, len
, 512 - (dma_addr
& 0xf));
114 unsigned int efx_tx_max_skb_descs(struct efx_nic
*efx
)
116 /* Header and payload descriptor for each output segment, plus
117 * one for every input fragment boundary within a segment
119 unsigned int max_descs
= EFX_TSO_MAX_SEGS
* 2 + MAX_SKB_FRAGS
;
121 /* Possibly one more per segment for the alignment workaround,
122 * or for option descriptors
124 if (EFX_WORKAROUND_5391(efx
) || efx_nic_rev(efx
) >= EFX_REV_HUNT_A0
)
125 max_descs
+= EFX_TSO_MAX_SEGS
;
127 /* Possibly more for PCIe page boundaries within input fragments */
128 if (PAGE_SIZE
> EFX_PAGE_SIZE
)
129 max_descs
+= max_t(unsigned int, MAX_SKB_FRAGS
,
130 DIV_ROUND_UP(GSO_MAX_SIZE
, EFX_PAGE_SIZE
));
135 static void efx_tx_maybe_stop_queue(struct efx_tx_queue
*txq1
)
137 /* We need to consider both queues that the net core sees as one */
138 struct efx_tx_queue
*txq2
= efx_tx_queue_partner(txq1
);
139 struct efx_nic
*efx
= txq1
->efx
;
140 unsigned int fill_level
;
142 fill_level
= max(txq1
->insert_count
- txq1
->old_read_count
,
143 txq2
->insert_count
- txq2
->old_read_count
);
144 if (likely(fill_level
< efx
->txq_stop_thresh
))
147 /* We used the stale old_read_count above, which gives us a
148 * pessimistic estimate of the fill level (which may even
149 * validly be >= efx->txq_entries). Now try again using
150 * read_count (more likely to be a cache miss).
152 * If we read read_count and then conditionally stop the
153 * queue, it is possible for the completion path to race with
154 * us and complete all outstanding descriptors in the middle,
155 * after which there will be no more completions to wake it.
156 * Therefore we stop the queue first, then read read_count
157 * (with a memory barrier to ensure the ordering), then
158 * restart the queue if the fill level turns out to be low
161 netif_tx_stop_queue(txq1
->core_txq
);
163 txq1
->old_read_count
= ACCESS_ONCE(txq1
->read_count
);
164 txq2
->old_read_count
= ACCESS_ONCE(txq2
->read_count
);
166 fill_level
= max(txq1
->insert_count
- txq1
->old_read_count
,
167 txq2
->insert_count
- txq2
->old_read_count
);
168 EFX_BUG_ON_PARANOID(fill_level
>= efx
->txq_entries
);
169 if (likely(fill_level
< efx
->txq_stop_thresh
)) {
171 if (likely(!efx
->loopback_selftest
))
172 netif_tx_start_queue(txq1
->core_txq
);
178 struct efx_short_copy_buffer
{
180 u8 buf
[L1_CACHE_BYTES
];
183 /* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
184 * Advances piobuf pointer. Leaves additional data in the copy buffer.
186 static void efx_memcpy_toio_aligned(struct efx_nic
*efx
, u8 __iomem
**piobuf
,
188 struct efx_short_copy_buffer
*copy_buf
)
190 int block_len
= len
& ~(sizeof(copy_buf
->buf
) - 1);
192 __iowrite64_copy(*piobuf
, data
, block_len
>> 3);
193 *piobuf
+= block_len
;
198 BUG_ON(copy_buf
->used
);
199 BUG_ON(len
> sizeof(copy_buf
->buf
));
200 memcpy(copy_buf
->buf
, data
, len
);
201 copy_buf
->used
= len
;
205 /* Copy to PIO, respecting dword alignment, popping data from copy buffer first.
206 * Advances piobuf pointer. Leaves additional data in the copy buffer.
208 static void efx_memcpy_toio_aligned_cb(struct efx_nic
*efx
, u8 __iomem
**piobuf
,
210 struct efx_short_copy_buffer
*copy_buf
)
212 if (copy_buf
->used
) {
213 /* if the copy buffer is partially full, fill it up and write */
215 min_t(int, sizeof(copy_buf
->buf
) - copy_buf
->used
, len
);
217 memcpy(copy_buf
->buf
+ copy_buf
->used
, data
, copy_to_buf
);
218 copy_buf
->used
+= copy_to_buf
;
220 /* if we didn't fill it up then we're done for now */
221 if (copy_buf
->used
< sizeof(copy_buf
->buf
))
224 __iowrite64_copy(*piobuf
, copy_buf
->buf
,
225 sizeof(copy_buf
->buf
) >> 3);
226 *piobuf
+= sizeof(copy_buf
->buf
);
232 efx_memcpy_toio_aligned(efx
, piobuf
, data
, len
, copy_buf
);
235 static void efx_flush_copy_buffer(struct efx_nic
*efx
, u8 __iomem
*piobuf
,
236 struct efx_short_copy_buffer
*copy_buf
)
238 /* if there's anything in it, write the whole buffer, including junk */
240 __iowrite64_copy(piobuf
, copy_buf
->buf
,
241 sizeof(copy_buf
->buf
) >> 3);
244 /* Traverse skb structure and copy fragments in to PIO buffer.
245 * Advances piobuf pointer.
247 static void efx_skb_copy_bits_to_pio(struct efx_nic
*efx
, struct sk_buff
*skb
,
249 struct efx_short_copy_buffer
*copy_buf
)
253 efx_memcpy_toio_aligned(efx
, piobuf
, skb
->data
, skb_headlen(skb
),
256 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; ++i
) {
257 skb_frag_t
*f
= &skb_shinfo(skb
)->frags
[i
];
260 vaddr
= kmap_atomic(skb_frag_page(f
));
262 efx_memcpy_toio_aligned_cb(efx
, piobuf
, vaddr
+ f
->page_offset
,
263 skb_frag_size(f
), copy_buf
);
264 kunmap_atomic(vaddr
);
267 EFX_BUG_ON_PARANOID(skb_shinfo(skb
)->frag_list
);
270 static struct efx_tx_buffer
*
271 efx_enqueue_skb_pio(struct efx_tx_queue
*tx_queue
, struct sk_buff
*skb
)
273 struct efx_tx_buffer
*buffer
=
274 efx_tx_queue_get_insert_buffer(tx_queue
);
275 u8 __iomem
*piobuf
= tx_queue
->piobuf
;
277 /* Copy to PIO buffer. Ensure the writes are padded to the end
278 * of a cache line, as this is required for write-combining to be
279 * effective on at least x86.
282 if (skb_shinfo(skb
)->nr_frags
) {
283 /* The size of the copy buffer will ensure all writes
284 * are the size of a cache line.
286 struct efx_short_copy_buffer copy_buf
;
290 efx_skb_copy_bits_to_pio(tx_queue
->efx
, skb
,
292 efx_flush_copy_buffer(tx_queue
->efx
, piobuf
, ©_buf
);
294 /* Pad the write to the size of a cache line.
295 * We can do this because we know the skb_shared_info sruct is
296 * after the source, and the destination buffer is big enough.
298 BUILD_BUG_ON(L1_CACHE_BYTES
>
299 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
)));
300 __iowrite64_copy(tx_queue
->piobuf
, skb
->data
,
301 ALIGN(skb
->len
, L1_CACHE_BYTES
) >> 3);
304 EFX_POPULATE_QWORD_5(buffer
->option
,
305 ESF_DZ_TX_DESC_IS_OPT
, 1,
306 ESF_DZ_TX_OPTION_TYPE
, ESE_DZ_TX_OPTION_DESC_PIO
,
307 ESF_DZ_TX_PIO_CONT
, 0,
308 ESF_DZ_TX_PIO_BYTE_CNT
, skb
->len
,
309 ESF_DZ_TX_PIO_BUF_ADDR
,
310 tx_queue
->piobuf_offset
);
311 ++tx_queue
->pio_packets
;
312 ++tx_queue
->insert_count
;
315 #endif /* EFX_USE_PIO */
318 * Add a socket buffer to a TX queue
320 * This maps all fragments of a socket buffer for DMA and adds them to
321 * the TX queue. The queue's insert pointer will be incremented by
322 * the number of fragments in the socket buffer.
324 * If any DMA mapping fails, any mapped fragments will be unmapped,
325 * the queue's insert pointer will be restored to its original value.
327 * This function is split out from efx_hard_start_xmit to allow the
328 * loopback test to direct packets via specific TX queues.
330 * Returns NETDEV_TX_OK.
331 * You must hold netif_tx_lock() to call this function.
333 netdev_tx_t
efx_enqueue_skb(struct efx_tx_queue
*tx_queue
, struct sk_buff
*skb
)
335 struct efx_nic
*efx
= tx_queue
->efx
;
336 struct device
*dma_dev
= &efx
->pci_dev
->dev
;
337 struct efx_tx_buffer
*buffer
;
338 unsigned int old_insert_count
= tx_queue
->insert_count
;
339 skb_frag_t
*fragment
;
340 unsigned int len
, unmap_len
= 0;
341 dma_addr_t dma_addr
, unmap_addr
= 0;
342 unsigned int dma_len
;
343 unsigned short dma_flags
;
346 if (skb_shinfo(skb
)->gso_size
)
347 return efx_enqueue_skb_tso(tx_queue
, skb
);
349 /* Get size of the initial fragment */
350 len
= skb_headlen(skb
);
352 /* Pad if necessary */
353 if (EFX_WORKAROUND_15592(efx
) && skb
->len
<= 32) {
354 EFX_BUG_ON_PARANOID(skb
->data_len
);
356 if (skb_pad(skb
, len
- skb
->len
))
360 /* Consider using PIO for short packets */
362 if (skb
->len
<= efx_piobuf_size
&& !skb
->xmit_more
&&
363 efx_nic_may_tx_pio(tx_queue
)) {
364 buffer
= efx_enqueue_skb_pio(tx_queue
, skb
);
365 dma_flags
= EFX_TX_BUF_OPTION
;
370 /* Map for DMA. Use dma_map_single rather than dma_map_page
371 * since this is more efficient on machines with sparse
374 dma_flags
= EFX_TX_BUF_MAP_SINGLE
;
375 dma_addr
= dma_map_single(dma_dev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
377 /* Process all fragments */
379 if (unlikely(dma_mapping_error(dma_dev
, dma_addr
)))
382 /* Store fields for marking in the per-fragment final
385 unmap_addr
= dma_addr
;
387 /* Add to TX queue, splitting across DMA boundaries */
389 buffer
= efx_tx_queue_get_insert_buffer(tx_queue
);
391 dma_len
= efx_max_tx_len(efx
, dma_addr
);
392 if (likely(dma_len
>= len
))
395 /* Fill out per descriptor fields */
396 buffer
->len
= dma_len
;
397 buffer
->dma_addr
= dma_addr
;
398 buffer
->flags
= EFX_TX_BUF_CONT
;
401 ++tx_queue
->insert_count
;
404 /* Transfer ownership of the unmapping to the final buffer */
405 buffer
->flags
= EFX_TX_BUF_CONT
| dma_flags
;
406 buffer
->unmap_len
= unmap_len
;
407 buffer
->dma_offset
= buffer
->dma_addr
- unmap_addr
;
410 /* Get address and size of next fragment */
411 if (i
>= skb_shinfo(skb
)->nr_frags
)
413 fragment
= &skb_shinfo(skb
)->frags
[i
];
414 len
= skb_frag_size(fragment
);
418 dma_addr
= skb_frag_dma_map(dma_dev
, fragment
, 0, len
,
422 /* Transfer ownership of the skb to the final buffer */
427 buffer
->flags
= EFX_TX_BUF_SKB
| dma_flags
;
429 netdev_tx_sent_queue(tx_queue
->core_txq
, skb
->len
);
431 efx_tx_maybe_stop_queue(tx_queue
);
433 /* Pass off to hardware */
434 if (!skb
->xmit_more
|| netif_xmit_stopped(tx_queue
->core_txq
))
435 efx_nic_push_buffers(tx_queue
);
437 tx_queue
->tx_packets
++;
442 netif_err(efx
, tx_err
, efx
->net_dev
,
443 " TX queue %d could not map skb with %d bytes %d "
444 "fragments for DMA\n", tx_queue
->queue
, skb
->len
,
445 skb_shinfo(skb
)->nr_frags
+ 1);
447 /* Mark the packet as transmitted, and free the SKB ourselves */
448 dev_kfree_skb_any(skb
);
450 /* Work backwards until we hit the original insert pointer value */
451 while (tx_queue
->insert_count
!= old_insert_count
) {
452 unsigned int pkts_compl
= 0, bytes_compl
= 0;
453 --tx_queue
->insert_count
;
454 buffer
= __efx_tx_queue_get_insert_buffer(tx_queue
);
455 efx_dequeue_buffer(tx_queue
, buffer
, &pkts_compl
, &bytes_compl
);
458 /* Free the fragment we were mid-way through pushing */
460 if (dma_flags
& EFX_TX_BUF_MAP_SINGLE
)
461 dma_unmap_single(dma_dev
, unmap_addr
, unmap_len
,
464 dma_unmap_page(dma_dev
, unmap_addr
, unmap_len
,
471 /* Remove packets from the TX queue
473 * This removes packets from the TX queue, up to and including the
476 static void efx_dequeue_buffers(struct efx_tx_queue
*tx_queue
,
478 unsigned int *pkts_compl
,
479 unsigned int *bytes_compl
)
481 struct efx_nic
*efx
= tx_queue
->efx
;
482 unsigned int stop_index
, read_ptr
;
484 stop_index
= (index
+ 1) & tx_queue
->ptr_mask
;
485 read_ptr
= tx_queue
->read_count
& tx_queue
->ptr_mask
;
487 while (read_ptr
!= stop_index
) {
488 struct efx_tx_buffer
*buffer
= &tx_queue
->buffer
[read_ptr
];
490 if (!(buffer
->flags
& EFX_TX_BUF_OPTION
) &&
491 unlikely(buffer
->len
== 0)) {
492 netif_err(efx
, tx_err
, efx
->net_dev
,
493 "TX queue %d spurious TX completion id %x\n",
494 tx_queue
->queue
, read_ptr
);
495 efx_schedule_reset(efx
, RESET_TYPE_TX_SKIP
);
499 efx_dequeue_buffer(tx_queue
, buffer
, pkts_compl
, bytes_compl
);
501 ++tx_queue
->read_count
;
502 read_ptr
= tx_queue
->read_count
& tx_queue
->ptr_mask
;
506 /* Initiate a packet transmission. We use one channel per CPU
507 * (sharing when we have more CPUs than channels). On Falcon, the TX
508 * completion events will be directed back to the CPU that transmitted
509 * the packet, which should be cache-efficient.
511 * Context: non-blocking.
512 * Note that returning anything other than NETDEV_TX_OK will cause the
513 * OS to free the skb.
515 netdev_tx_t
efx_hard_start_xmit(struct sk_buff
*skb
,
516 struct net_device
*net_dev
)
518 struct efx_nic
*efx
= netdev_priv(net_dev
);
519 struct efx_tx_queue
*tx_queue
;
520 unsigned index
, type
;
522 EFX_WARN_ON_PARANOID(!netif_device_present(net_dev
));
524 /* PTP "event" packet */
525 if (unlikely(efx_xmit_with_hwtstamp(skb
)) &&
526 unlikely(efx_ptp_is_ptp_tx(efx
, skb
))) {
527 return efx_ptp_tx(efx
, skb
);
530 index
= skb_get_queue_mapping(skb
);
531 type
= skb
->ip_summed
== CHECKSUM_PARTIAL
? EFX_TXQ_TYPE_OFFLOAD
: 0;
532 if (index
>= efx
->n_tx_channels
) {
533 index
-= efx
->n_tx_channels
;
534 type
|= EFX_TXQ_TYPE_HIGHPRI
;
536 tx_queue
= efx_get_tx_queue(efx
, index
, type
);
538 return efx_enqueue_skb(tx_queue
, skb
);
541 void efx_init_tx_queue_core_txq(struct efx_tx_queue
*tx_queue
)
543 struct efx_nic
*efx
= tx_queue
->efx
;
545 /* Must be inverse of queue lookup in efx_hard_start_xmit() */
547 netdev_get_tx_queue(efx
->net_dev
,
548 tx_queue
->queue
/ EFX_TXQ_TYPES
+
549 ((tx_queue
->queue
& EFX_TXQ_TYPE_HIGHPRI
) ?
550 efx
->n_tx_channels
: 0));
553 int efx_setup_tc(struct net_device
*net_dev
, u8 num_tc
)
555 struct efx_nic
*efx
= netdev_priv(net_dev
);
556 struct efx_channel
*channel
;
557 struct efx_tx_queue
*tx_queue
;
561 if (efx_nic_rev(efx
) < EFX_REV_FALCON_B0
|| num_tc
> EFX_MAX_TX_TC
)
564 if (num_tc
== net_dev
->num_tc
)
567 for (tc
= 0; tc
< num_tc
; tc
++) {
568 net_dev
->tc_to_txq
[tc
].offset
= tc
* efx
->n_tx_channels
;
569 net_dev
->tc_to_txq
[tc
].count
= efx
->n_tx_channels
;
572 if (num_tc
> net_dev
->num_tc
) {
573 /* Initialise high-priority queues as necessary */
574 efx_for_each_channel(channel
, efx
) {
575 efx_for_each_possible_channel_tx_queue(tx_queue
,
577 if (!(tx_queue
->queue
& EFX_TXQ_TYPE_HIGHPRI
))
579 if (!tx_queue
->buffer
) {
580 rc
= efx_probe_tx_queue(tx_queue
);
584 if (!tx_queue
->initialised
)
585 efx_init_tx_queue(tx_queue
);
586 efx_init_tx_queue_core_txq(tx_queue
);
590 /* Reduce number of classes before number of queues */
591 net_dev
->num_tc
= num_tc
;
594 rc
= netif_set_real_num_tx_queues(net_dev
,
595 max_t(int, num_tc
, 1) *
600 /* Do not destroy high-priority queues when they become
601 * unused. We would have to flush them first, and it is
602 * fairly difficult to flush a subset of TX queues. Leave
603 * it to efx_fini_channels().
606 net_dev
->num_tc
= num_tc
;
610 void efx_xmit_done(struct efx_tx_queue
*tx_queue
, unsigned int index
)
613 struct efx_nic
*efx
= tx_queue
->efx
;
614 struct efx_tx_queue
*txq2
;
615 unsigned int pkts_compl
= 0, bytes_compl
= 0;
617 EFX_BUG_ON_PARANOID(index
> tx_queue
->ptr_mask
);
619 efx_dequeue_buffers(tx_queue
, index
, &pkts_compl
, &bytes_compl
);
620 netdev_tx_completed_queue(tx_queue
->core_txq
, pkts_compl
, bytes_compl
);
623 ++tx_queue
->merge_events
;
625 /* See if we need to restart the netif queue. This memory
626 * barrier ensures that we write read_count (inside
627 * efx_dequeue_buffers()) before reading the queue status.
630 if (unlikely(netif_tx_queue_stopped(tx_queue
->core_txq
)) &&
631 likely(efx
->port_enabled
) &&
632 likely(netif_device_present(efx
->net_dev
))) {
633 txq2
= efx_tx_queue_partner(tx_queue
);
634 fill_level
= max(tx_queue
->insert_count
- tx_queue
->read_count
,
635 txq2
->insert_count
- txq2
->read_count
);
636 if (fill_level
<= efx
->txq_wake_thresh
)
637 netif_tx_wake_queue(tx_queue
->core_txq
);
640 /* Check whether the hardware queue is now empty */
641 if ((int)(tx_queue
->read_count
- tx_queue
->old_write_count
) >= 0) {
642 tx_queue
->old_write_count
= ACCESS_ONCE(tx_queue
->write_count
);
643 if (tx_queue
->read_count
== tx_queue
->old_write_count
) {
645 tx_queue
->empty_read_count
=
646 tx_queue
->read_count
| EFX_EMPTY_COUNT_VALID
;
651 /* Size of page-based TSO header buffers. Larger blocks must be
652 * allocated from the heap.
654 #define TSOH_STD_SIZE 128
655 #define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE)
657 /* At most half the descriptors in the queue at any time will refer to
658 * a TSO header buffer, since they must always be followed by a
659 * payload descriptor referring to an skb.
661 static unsigned int efx_tsoh_page_count(struct efx_tx_queue
*tx_queue
)
663 return DIV_ROUND_UP(tx_queue
->ptr_mask
+ 1, 2 * TSOH_PER_PAGE
);
666 int efx_probe_tx_queue(struct efx_tx_queue
*tx_queue
)
668 struct efx_nic
*efx
= tx_queue
->efx
;
669 unsigned int entries
;
672 /* Create the smallest power-of-two aligned ring */
673 entries
= max(roundup_pow_of_two(efx
->txq_entries
), EFX_MIN_DMAQ_SIZE
);
674 EFX_BUG_ON_PARANOID(entries
> EFX_MAX_DMAQ_SIZE
);
675 tx_queue
->ptr_mask
= entries
- 1;
677 netif_dbg(efx
, probe
, efx
->net_dev
,
678 "creating TX queue %d size %#x mask %#x\n",
679 tx_queue
->queue
, efx
->txq_entries
, tx_queue
->ptr_mask
);
681 /* Allocate software ring */
682 tx_queue
->buffer
= kcalloc(entries
, sizeof(*tx_queue
->buffer
),
684 if (!tx_queue
->buffer
)
687 if (tx_queue
->queue
& EFX_TXQ_TYPE_OFFLOAD
) {
688 tx_queue
->tsoh_page
=
689 kcalloc(efx_tsoh_page_count(tx_queue
),
690 sizeof(tx_queue
->tsoh_page
[0]), GFP_KERNEL
);
691 if (!tx_queue
->tsoh_page
) {
697 /* Allocate hardware ring */
698 rc
= efx_nic_probe_tx(tx_queue
);
705 kfree(tx_queue
->tsoh_page
);
706 tx_queue
->tsoh_page
= NULL
;
708 kfree(tx_queue
->buffer
);
709 tx_queue
->buffer
= NULL
;
713 void efx_init_tx_queue(struct efx_tx_queue
*tx_queue
)
715 netif_dbg(tx_queue
->efx
, drv
, tx_queue
->efx
->net_dev
,
716 "initialising TX queue %d\n", tx_queue
->queue
);
718 tx_queue
->insert_count
= 0;
719 tx_queue
->write_count
= 0;
720 tx_queue
->old_write_count
= 0;
721 tx_queue
->read_count
= 0;
722 tx_queue
->old_read_count
= 0;
723 tx_queue
->empty_read_count
= 0 | EFX_EMPTY_COUNT_VALID
;
725 /* Set up TX descriptor ring */
726 efx_nic_init_tx(tx_queue
);
728 tx_queue
->initialised
= true;
731 void efx_fini_tx_queue(struct efx_tx_queue
*tx_queue
)
733 struct efx_tx_buffer
*buffer
;
735 netif_dbg(tx_queue
->efx
, drv
, tx_queue
->efx
->net_dev
,
736 "shutting down TX queue %d\n", tx_queue
->queue
);
738 if (!tx_queue
->buffer
)
741 /* Free any buffers left in the ring */
742 while (tx_queue
->read_count
!= tx_queue
->write_count
) {
743 unsigned int pkts_compl
= 0, bytes_compl
= 0;
744 buffer
= &tx_queue
->buffer
[tx_queue
->read_count
& tx_queue
->ptr_mask
];
745 efx_dequeue_buffer(tx_queue
, buffer
, &pkts_compl
, &bytes_compl
);
747 ++tx_queue
->read_count
;
749 netdev_tx_reset_queue(tx_queue
->core_txq
);
752 void efx_remove_tx_queue(struct efx_tx_queue
*tx_queue
)
756 if (!tx_queue
->buffer
)
759 netif_dbg(tx_queue
->efx
, drv
, tx_queue
->efx
->net_dev
,
760 "destroying TX queue %d\n", tx_queue
->queue
);
761 efx_nic_remove_tx(tx_queue
);
763 if (tx_queue
->tsoh_page
) {
764 for (i
= 0; i
< efx_tsoh_page_count(tx_queue
); i
++)
765 efx_nic_free_buffer(tx_queue
->efx
,
766 &tx_queue
->tsoh_page
[i
]);
767 kfree(tx_queue
->tsoh_page
);
768 tx_queue
->tsoh_page
= NULL
;
771 kfree(tx_queue
->buffer
);
772 tx_queue
->buffer
= NULL
;
776 /* Efx TCP segmentation acceleration.
778 * Why? Because by doing it here in the driver we can go significantly
779 * faster than the GSO.
781 * Requires TX checksum offload support.
784 #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
787 * struct tso_state - TSO state for an SKB
788 * @out_len: Remaining length in current segment
789 * @seqnum: Current sequence number
790 * @ipv4_id: Current IPv4 ID, host endian
791 * @packet_space: Remaining space in current packet
792 * @dma_addr: DMA address of current position
793 * @in_len: Remaining length in current SKB fragment
794 * @unmap_len: Length of SKB fragment
795 * @unmap_addr: DMA address of SKB fragment
796 * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0
797 * @protocol: Network protocol (after any VLAN header)
798 * @ip_off: Offset of IP header
799 * @tcp_off: Offset of TCP header
800 * @header_len: Number of bytes of header
801 * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
802 * @header_dma_addr: Header DMA address, when using option descriptors
803 * @header_unmap_len: Header DMA mapped length, or 0 if not using option
806 * The state used during segmentation. It is put into this data structure
807 * just to make it easy to pass into inline functions.
810 /* Output position */
814 unsigned packet_space
;
820 dma_addr_t unmap_addr
;
821 unsigned short dma_flags
;
825 unsigned int tcp_off
;
827 unsigned int ip_base_len
;
828 dma_addr_t header_dma_addr
;
829 unsigned int header_unmap_len
;
834 * Verify that our various assumptions about sk_buffs and the conditions
835 * under which TSO will be attempted hold true. Return the protocol number.
837 static __be16
efx_tso_check_protocol(struct sk_buff
*skb
)
839 __be16 protocol
= skb
->protocol
;
841 EFX_BUG_ON_PARANOID(((struct ethhdr
*)skb
->data
)->h_proto
!=
843 if (protocol
== htons(ETH_P_8021Q
)) {
844 struct vlan_ethhdr
*veh
= (struct vlan_ethhdr
*)skb
->data
;
845 protocol
= veh
->h_vlan_encapsulated_proto
;
848 if (protocol
== htons(ETH_P_IP
)) {
849 EFX_BUG_ON_PARANOID(ip_hdr(skb
)->protocol
!= IPPROTO_TCP
);
851 EFX_BUG_ON_PARANOID(protocol
!= htons(ETH_P_IPV6
));
852 EFX_BUG_ON_PARANOID(ipv6_hdr(skb
)->nexthdr
!= NEXTHDR_TCP
);
854 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb
), skb
->data
)
855 + (tcp_hdr(skb
)->doff
<< 2u)) >
861 static u8
*efx_tsoh_get_buffer(struct efx_tx_queue
*tx_queue
,
862 struct efx_tx_buffer
*buffer
, unsigned int len
)
866 EFX_BUG_ON_PARANOID(buffer
->len
);
867 EFX_BUG_ON_PARANOID(buffer
->flags
);
868 EFX_BUG_ON_PARANOID(buffer
->unmap_len
);
870 if (likely(len
<= TSOH_STD_SIZE
- NET_IP_ALIGN
)) {
872 (tx_queue
->insert_count
& tx_queue
->ptr_mask
) / 2;
873 struct efx_buffer
*page_buf
=
874 &tx_queue
->tsoh_page
[index
/ TSOH_PER_PAGE
];
876 TSOH_STD_SIZE
* (index
% TSOH_PER_PAGE
) + NET_IP_ALIGN
;
878 if (unlikely(!page_buf
->addr
) &&
879 efx_nic_alloc_buffer(tx_queue
->efx
, page_buf
, PAGE_SIZE
,
883 result
= (u8
*)page_buf
->addr
+ offset
;
884 buffer
->dma_addr
= page_buf
->dma_addr
+ offset
;
885 buffer
->flags
= EFX_TX_BUF_CONT
;
887 tx_queue
->tso_long_headers
++;
889 buffer
->heap_buf
= kmalloc(NET_IP_ALIGN
+ len
, GFP_ATOMIC
);
890 if (unlikely(!buffer
->heap_buf
))
892 result
= (u8
*)buffer
->heap_buf
+ NET_IP_ALIGN
;
893 buffer
->flags
= EFX_TX_BUF_CONT
| EFX_TX_BUF_HEAP
;
902 * efx_tx_queue_insert - push descriptors onto the TX queue
903 * @tx_queue: Efx TX queue
904 * @dma_addr: DMA address of fragment
905 * @len: Length of fragment
906 * @final_buffer: The final buffer inserted into the queue
908 * Push descriptors onto the TX queue.
910 static void efx_tx_queue_insert(struct efx_tx_queue
*tx_queue
,
911 dma_addr_t dma_addr
, unsigned len
,
912 struct efx_tx_buffer
**final_buffer
)
914 struct efx_tx_buffer
*buffer
;
915 struct efx_nic
*efx
= tx_queue
->efx
;
918 EFX_BUG_ON_PARANOID(len
<= 0);
921 buffer
= efx_tx_queue_get_insert_buffer(tx_queue
);
922 ++tx_queue
->insert_count
;
924 EFX_BUG_ON_PARANOID(tx_queue
->insert_count
-
925 tx_queue
->read_count
>=
928 buffer
->dma_addr
= dma_addr
;
930 dma_len
= efx_max_tx_len(efx
, dma_addr
);
932 /* If there is enough space to send then do so */
936 buffer
->len
= dma_len
;
937 buffer
->flags
= EFX_TX_BUF_CONT
;
942 EFX_BUG_ON_PARANOID(!len
);
944 *final_buffer
= buffer
;
949 * Put a TSO header into the TX queue.
951 * This is special-cased because we know that it is small enough to fit in
952 * a single fragment, and we know it doesn't cross a page boundary. It
953 * also allows us to not worry about end-of-packet etc.
955 static int efx_tso_put_header(struct efx_tx_queue
*tx_queue
,
956 struct efx_tx_buffer
*buffer
, u8
*header
)
958 if (unlikely(buffer
->flags
& EFX_TX_BUF_HEAP
)) {
959 buffer
->dma_addr
= dma_map_single(&tx_queue
->efx
->pci_dev
->dev
,
962 if (unlikely(dma_mapping_error(&tx_queue
->efx
->pci_dev
->dev
,
963 buffer
->dma_addr
))) {
964 kfree(buffer
->heap_buf
);
969 buffer
->unmap_len
= buffer
->len
;
970 buffer
->dma_offset
= 0;
971 buffer
->flags
|= EFX_TX_BUF_MAP_SINGLE
;
974 ++tx_queue
->insert_count
;
979 /* Remove buffers put into a tx_queue. None of the buffers must have
982 static void efx_enqueue_unwind(struct efx_tx_queue
*tx_queue
,
983 unsigned int insert_count
)
985 struct efx_tx_buffer
*buffer
;
987 /* Work backwards until we hit the original insert pointer value */
988 while (tx_queue
->insert_count
!= insert_count
) {
989 --tx_queue
->insert_count
;
990 buffer
= __efx_tx_queue_get_insert_buffer(tx_queue
);
991 efx_dequeue_buffer(tx_queue
, buffer
, NULL
, NULL
);
996 /* Parse the SKB header and initialise state. */
997 static int tso_start(struct tso_state
*st
, struct efx_nic
*efx
,
998 const struct sk_buff
*skb
)
1000 bool use_opt_desc
= efx_nic_rev(efx
) >= EFX_REV_HUNT_A0
;
1001 struct device
*dma_dev
= &efx
->pci_dev
->dev
;
1002 unsigned int header_len
, in_len
;
1003 dma_addr_t dma_addr
;
1005 st
->ip_off
= skb_network_header(skb
) - skb
->data
;
1006 st
->tcp_off
= skb_transport_header(skb
) - skb
->data
;
1007 header_len
= st
->tcp_off
+ (tcp_hdr(skb
)->doff
<< 2u);
1008 in_len
= skb_headlen(skb
) - header_len
;
1009 st
->header_len
= header_len
;
1010 st
->in_len
= in_len
;
1011 if (st
->protocol
== htons(ETH_P_IP
)) {
1012 st
->ip_base_len
= st
->header_len
- st
->ip_off
;
1013 st
->ipv4_id
= ntohs(ip_hdr(skb
)->id
);
1015 st
->ip_base_len
= st
->header_len
- st
->tcp_off
;
1018 st
->seqnum
= ntohl(tcp_hdr(skb
)->seq
);
1020 EFX_BUG_ON_PARANOID(tcp_hdr(skb
)->urg
);
1021 EFX_BUG_ON_PARANOID(tcp_hdr(skb
)->syn
);
1022 EFX_BUG_ON_PARANOID(tcp_hdr(skb
)->rst
);
1024 st
->out_len
= skb
->len
- header_len
;
1026 if (!use_opt_desc
) {
1027 st
->header_unmap_len
= 0;
1029 if (likely(in_len
== 0)) {
1035 dma_addr
= dma_map_single(dma_dev
, skb
->data
+ header_len
,
1036 in_len
, DMA_TO_DEVICE
);
1037 st
->dma_flags
= EFX_TX_BUF_MAP_SINGLE
;
1038 st
->dma_addr
= dma_addr
;
1039 st
->unmap_addr
= dma_addr
;
1040 st
->unmap_len
= in_len
;
1042 dma_addr
= dma_map_single(dma_dev
, skb
->data
,
1043 skb_headlen(skb
), DMA_TO_DEVICE
);
1044 st
->header_dma_addr
= dma_addr
;
1045 st
->header_unmap_len
= skb_headlen(skb
);
1047 st
->dma_addr
= dma_addr
+ header_len
;
1051 return unlikely(dma_mapping_error(dma_dev
, dma_addr
)) ? -ENOMEM
: 0;
1054 static int tso_get_fragment(struct tso_state
*st
, struct efx_nic
*efx
,
1057 st
->unmap_addr
= skb_frag_dma_map(&efx
->pci_dev
->dev
, frag
, 0,
1058 skb_frag_size(frag
), DMA_TO_DEVICE
);
1059 if (likely(!dma_mapping_error(&efx
->pci_dev
->dev
, st
->unmap_addr
))) {
1061 st
->unmap_len
= skb_frag_size(frag
);
1062 st
->in_len
= skb_frag_size(frag
);
1063 st
->dma_addr
= st
->unmap_addr
;
1071 * tso_fill_packet_with_fragment - form descriptors for the current fragment
1072 * @tx_queue: Efx TX queue
1073 * @skb: Socket buffer
1076 * Form descriptors for the current fragment, until we reach the end
1077 * of fragment or end-of-packet.
1079 static void tso_fill_packet_with_fragment(struct efx_tx_queue
*tx_queue
,
1080 const struct sk_buff
*skb
,
1081 struct tso_state
*st
)
1083 struct efx_tx_buffer
*buffer
;
1086 if (st
->in_len
== 0)
1088 if (st
->packet_space
== 0)
1091 EFX_BUG_ON_PARANOID(st
->in_len
<= 0);
1092 EFX_BUG_ON_PARANOID(st
->packet_space
<= 0);
1094 n
= min(st
->in_len
, st
->packet_space
);
1096 st
->packet_space
-= n
;
1100 efx_tx_queue_insert(tx_queue
, st
->dma_addr
, n
, &buffer
);
1102 if (st
->out_len
== 0) {
1103 /* Transfer ownership of the skb */
1105 buffer
->flags
= EFX_TX_BUF_SKB
;
1106 } else if (st
->packet_space
!= 0) {
1107 buffer
->flags
= EFX_TX_BUF_CONT
;
1110 if (st
->in_len
== 0) {
1111 /* Transfer ownership of the DMA mapping */
1112 buffer
->unmap_len
= st
->unmap_len
;
1113 buffer
->dma_offset
= buffer
->unmap_len
- buffer
->len
;
1114 buffer
->flags
|= st
->dma_flags
;
1123 * tso_start_new_packet - generate a new header and prepare for the new packet
1124 * @tx_queue: Efx TX queue
1125 * @skb: Socket buffer
1128 * Generate a new header and prepare for the new packet. Return 0 on
1129 * success, or -%ENOMEM if failed to alloc header.
1131 static int tso_start_new_packet(struct efx_tx_queue
*tx_queue
,
1132 const struct sk_buff
*skb
,
1133 struct tso_state
*st
)
1135 struct efx_tx_buffer
*buffer
=
1136 efx_tx_queue_get_insert_buffer(tx_queue
);
1137 bool is_last
= st
->out_len
<= skb_shinfo(skb
)->gso_size
;
1141 st
->packet_space
= skb_shinfo(skb
)->gso_size
;
1142 tcp_flags_clear
= 0x09; /* mask out FIN and PSH */
1144 st
->packet_space
= st
->out_len
;
1145 tcp_flags_clear
= 0x00;
1148 if (!st
->header_unmap_len
) {
1149 /* Allocate and insert a DMA-mapped header buffer. */
1150 struct tcphdr
*tsoh_th
;
1155 header
= efx_tsoh_get_buffer(tx_queue
, buffer
, st
->header_len
);
1159 tsoh_th
= (struct tcphdr
*)(header
+ st
->tcp_off
);
1161 /* Copy and update the headers. */
1162 memcpy(header
, skb
->data
, st
->header_len
);
1164 tsoh_th
->seq
= htonl(st
->seqnum
);
1165 ((u8
*)tsoh_th
)[13] &= ~tcp_flags_clear
;
1167 ip_length
= st
->ip_base_len
+ st
->packet_space
;
1169 if (st
->protocol
== htons(ETH_P_IP
)) {
1170 struct iphdr
*tsoh_iph
=
1171 (struct iphdr
*)(header
+ st
->ip_off
);
1173 tsoh_iph
->tot_len
= htons(ip_length
);
1174 tsoh_iph
->id
= htons(st
->ipv4_id
);
1176 struct ipv6hdr
*tsoh_iph
=
1177 (struct ipv6hdr
*)(header
+ st
->ip_off
);
1179 tsoh_iph
->payload_len
= htons(ip_length
);
1182 rc
= efx_tso_put_header(tx_queue
, buffer
, header
);
1186 /* Send the original headers with a TSO option descriptor
1189 u8 tcp_flags
= ((u8
*)tcp_hdr(skb
))[13] & ~tcp_flags_clear
;
1191 buffer
->flags
= EFX_TX_BUF_OPTION
;
1193 buffer
->unmap_len
= 0;
1194 EFX_POPULATE_QWORD_5(buffer
->option
,
1195 ESF_DZ_TX_DESC_IS_OPT
, 1,
1196 ESF_DZ_TX_OPTION_TYPE
,
1197 ESE_DZ_TX_OPTION_DESC_TSO
,
1198 ESF_DZ_TX_TSO_TCP_FLAGS
, tcp_flags
,
1199 ESF_DZ_TX_TSO_IP_ID
, st
->ipv4_id
,
1200 ESF_DZ_TX_TSO_TCP_SEQNO
, st
->seqnum
);
1201 ++tx_queue
->insert_count
;
1203 /* We mapped the headers in tso_start(). Unmap them
1204 * when the last segment is completed.
1206 buffer
= efx_tx_queue_get_insert_buffer(tx_queue
);
1207 buffer
->dma_addr
= st
->header_dma_addr
;
1208 buffer
->len
= st
->header_len
;
1210 buffer
->flags
= EFX_TX_BUF_CONT
| EFX_TX_BUF_MAP_SINGLE
;
1211 buffer
->unmap_len
= st
->header_unmap_len
;
1212 buffer
->dma_offset
= 0;
1213 /* Ensure we only unmap them once in case of a
1214 * later DMA mapping error and rollback
1216 st
->header_unmap_len
= 0;
1218 buffer
->flags
= EFX_TX_BUF_CONT
;
1219 buffer
->unmap_len
= 0;
1221 ++tx_queue
->insert_count
;
1224 st
->seqnum
+= skb_shinfo(skb
)->gso_size
;
1226 /* Linux leaves suitable gaps in the IP ID space for us to fill. */
1229 ++tx_queue
->tso_packets
;
1231 ++tx_queue
->tx_packets
;
1238 * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
1239 * @tx_queue: Efx TX queue
1240 * @skb: Socket buffer
1242 * Context: You must hold netif_tx_lock() to call this function.
1244 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1245 * @skb was not enqueued. In all cases @skb is consumed. Return
1248 static int efx_enqueue_skb_tso(struct efx_tx_queue
*tx_queue
,
1249 struct sk_buff
*skb
)
1251 struct efx_nic
*efx
= tx_queue
->efx
;
1252 unsigned int old_insert_count
= tx_queue
->insert_count
;
1254 struct tso_state state
;
1256 /* Find the packet protocol and sanity-check it */
1257 state
.protocol
= efx_tso_check_protocol(skb
);
1259 rc
= tso_start(&state
, efx
, skb
);
1263 if (likely(state
.in_len
== 0)) {
1264 /* Grab the first payload fragment. */
1265 EFX_BUG_ON_PARANOID(skb_shinfo(skb
)->nr_frags
< 1);
1267 rc
= tso_get_fragment(&state
, efx
,
1268 skb_shinfo(skb
)->frags
+ frag_i
);
1272 /* Payload starts in the header area. */
1276 if (tso_start_new_packet(tx_queue
, skb
, &state
) < 0)
1280 tso_fill_packet_with_fragment(tx_queue
, skb
, &state
);
1282 /* Move onto the next fragment? */
1283 if (state
.in_len
== 0) {
1284 if (++frag_i
>= skb_shinfo(skb
)->nr_frags
)
1285 /* End of payload reached. */
1287 rc
= tso_get_fragment(&state
, efx
,
1288 skb_shinfo(skb
)->frags
+ frag_i
);
1293 /* Start at new packet? */
1294 if (state
.packet_space
== 0 &&
1295 tso_start_new_packet(tx_queue
, skb
, &state
) < 0)
1299 netdev_tx_sent_queue(tx_queue
->core_txq
, skb
->len
);
1301 efx_tx_maybe_stop_queue(tx_queue
);
1303 /* Pass off to hardware */
1304 if (!skb
->xmit_more
|| netif_xmit_stopped(tx_queue
->core_txq
))
1305 efx_nic_push_buffers(tx_queue
);
1307 tx_queue
->tso_bursts
++;
1308 return NETDEV_TX_OK
;
1311 netif_err(efx
, tx_err
, efx
->net_dev
,
1312 "Out of memory for TSO headers, or DMA mapping error\n");
1313 dev_kfree_skb_any(skb
);
1315 /* Free the DMA mapping we were in the process of writing out */
1316 if (state
.unmap_len
) {
1317 if (state
.dma_flags
& EFX_TX_BUF_MAP_SINGLE
)
1318 dma_unmap_single(&efx
->pci_dev
->dev
, state
.unmap_addr
,
1319 state
.unmap_len
, DMA_TO_DEVICE
);
1321 dma_unmap_page(&efx
->pci_dev
->dev
, state
.unmap_addr
,
1322 state
.unmap_len
, DMA_TO_DEVICE
);
1325 /* Free the header DMA mapping, if using option descriptors */
1326 if (state
.header_unmap_len
)
1327 dma_unmap_single(&efx
->pci_dev
->dev
, state
.header_dma_addr
,
1328 state
.header_unmap_len
, DMA_TO_DEVICE
);
1330 efx_enqueue_unwind(tx_queue
, old_insert_count
);
1331 return NETDEV_TX_OK
;