1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/pci.h>
12 #include <linux/tcp.h>
15 #include <linux/if_ether.h>
16 #include <linux/highmem.h>
17 #include "net_driver.h"
21 #include "workarounds.h"
24 * TX descriptor ring full threshold
26 * The tx_queue descriptor ring fill-level must fall below this value
27 * before we restart the netif queue
29 #define EFX_NETDEV_TX_THRESHOLD(_tx_queue) \
30 (_tx_queue->efx->type->txd_ring_mask / 2u)
32 /* We want to be able to nest calls to netif_stop_queue(), since each
33 * channel can have an individual stop on the queue.
35 void efx_stop_queue(struct efx_nic
*efx
)
37 spin_lock_bh(&efx
->netif_stop_lock
);
38 EFX_TRACE(efx
, "stop TX queue\n");
40 atomic_inc(&efx
->netif_stop_count
);
41 netif_stop_queue(efx
->net_dev
);
43 spin_unlock_bh(&efx
->netif_stop_lock
);
46 /* Wake netif's TX queue
47 * We want to be able to nest calls to netif_stop_queue(), since each
48 * channel can have an individual stop on the queue.
50 void efx_wake_queue(struct efx_nic
*efx
)
53 if (atomic_dec_and_lock(&efx
->netif_stop_count
,
54 &efx
->netif_stop_lock
)) {
55 EFX_TRACE(efx
, "waking TX queue\n");
56 netif_wake_queue(efx
->net_dev
);
57 spin_unlock(&efx
->netif_stop_lock
);
62 static void efx_dequeue_buffer(struct efx_tx_queue
*tx_queue
,
63 struct efx_tx_buffer
*buffer
)
65 if (buffer
->unmap_len
) {
66 struct pci_dev
*pci_dev
= tx_queue
->efx
->pci_dev
;
67 dma_addr_t unmap_addr
= (buffer
->dma_addr
+ buffer
->len
-
69 if (buffer
->unmap_single
)
70 pci_unmap_single(pci_dev
, unmap_addr
, buffer
->unmap_len
,
73 pci_unmap_page(pci_dev
, unmap_addr
, buffer
->unmap_len
,
75 buffer
->unmap_len
= 0;
76 buffer
->unmap_single
= false;
80 dev_kfree_skb_any((struct sk_buff
*) buffer
->skb
);
82 EFX_TRACE(tx_queue
->efx
, "TX queue %d transmission id %x "
83 "complete\n", tx_queue
->queue
, read_ptr
);
88 * struct efx_tso_header - a DMA mapped buffer for packet headers
89 * @next: Linked list of free ones.
90 * The list is protected by the TX queue lock.
91 * @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
92 * @dma_addr: The DMA address of the header below.
94 * This controls the memory used for a TSO header. Use TSOH_DATA()
95 * to find the packet header data. Use TSOH_SIZE() to calculate the
96 * total size required for a given packet header length. TSO headers
97 * in the free list are exactly %TSOH_STD_SIZE bytes in size.
99 struct efx_tso_header
{
101 struct efx_tso_header
*next
;
107 static int efx_enqueue_skb_tso(struct efx_tx_queue
*tx_queue
,
108 struct sk_buff
*skb
);
109 static void efx_fini_tso(struct efx_tx_queue
*tx_queue
);
110 static void efx_tsoh_heap_free(struct efx_tx_queue
*tx_queue
,
111 struct efx_tso_header
*tsoh
);
113 static void efx_tsoh_free(struct efx_tx_queue
*tx_queue
,
114 struct efx_tx_buffer
*buffer
)
117 if (likely(!buffer
->tsoh
->unmap_len
)) {
118 buffer
->tsoh
->next
= tx_queue
->tso_headers_free
;
119 tx_queue
->tso_headers_free
= buffer
->tsoh
;
121 efx_tsoh_heap_free(tx_queue
, buffer
->tsoh
);
129 * Add a socket buffer to a TX queue
131 * This maps all fragments of a socket buffer for DMA and adds them to
132 * the TX queue. The queue's insert pointer will be incremented by
133 * the number of fragments in the socket buffer.
135 * If any DMA mapping fails, any mapped fragments will be unmapped,
136 * the queue's insert pointer will be restored to its original value.
138 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
139 * You must hold netif_tx_lock() to call this function.
141 static int efx_enqueue_skb(struct efx_tx_queue
*tx_queue
,
144 struct efx_nic
*efx
= tx_queue
->efx
;
145 struct pci_dev
*pci_dev
= efx
->pci_dev
;
146 struct efx_tx_buffer
*buffer
;
147 skb_frag_t
*fragment
;
150 unsigned int len
, unmap_len
= 0, fill_level
, insert_ptr
, misalign
;
151 dma_addr_t dma_addr
, unmap_addr
= 0;
152 unsigned int dma_len
;
155 int rc
= NETDEV_TX_OK
;
157 EFX_BUG_ON_PARANOID(tx_queue
->write_count
!= tx_queue
->insert_count
);
159 if (skb_shinfo((struct sk_buff
*)skb
)->gso_size
)
160 return efx_enqueue_skb_tso(tx_queue
, skb
);
162 /* Get size of the initial fragment */
163 len
= skb_headlen(skb
);
165 /* Pad if necessary */
166 if (EFX_WORKAROUND_15592(efx
) && skb
->len
<= 32) {
167 EFX_BUG_ON_PARANOID(skb
->data_len
);
169 if (skb_pad(skb
, len
- skb
->len
))
173 fill_level
= tx_queue
->insert_count
- tx_queue
->old_read_count
;
174 q_space
= efx
->type
->txd_ring_mask
- 1 - fill_level
;
176 /* Map for DMA. Use pci_map_single rather than pci_map_page
177 * since this is more efficient on machines with sparse
181 dma_addr
= pci_map_single(pci_dev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
183 /* Process all fragments */
185 if (unlikely(pci_dma_mapping_error(pci_dev
, dma_addr
)))
188 /* Store fields for marking in the per-fragment final
191 unmap_addr
= dma_addr
;
193 /* Add to TX queue, splitting across DMA boundaries */
195 if (unlikely(q_space
-- <= 0)) {
196 /* It might be that completions have
197 * happened since the xmit path last
198 * checked. Update the xmit path's
199 * copy of read_count.
202 /* This memory barrier protects the
203 * change of stopped from the access
206 tx_queue
->old_read_count
=
207 *(volatile unsigned *)
208 &tx_queue
->read_count
;
209 fill_level
= (tx_queue
->insert_count
210 - tx_queue
->old_read_count
);
211 q_space
= (efx
->type
->txd_ring_mask
- 1 -
213 if (unlikely(q_space
-- <= 0))
219 insert_ptr
= (tx_queue
->insert_count
&
220 efx
->type
->txd_ring_mask
);
221 buffer
= &tx_queue
->buffer
[insert_ptr
];
222 efx_tsoh_free(tx_queue
, buffer
);
223 EFX_BUG_ON_PARANOID(buffer
->tsoh
);
224 EFX_BUG_ON_PARANOID(buffer
->skb
);
225 EFX_BUG_ON_PARANOID(buffer
->len
);
226 EFX_BUG_ON_PARANOID(!buffer
->continuation
);
227 EFX_BUG_ON_PARANOID(buffer
->unmap_len
);
229 dma_len
= (((~dma_addr
) & efx
->type
->tx_dma_mask
) + 1);
230 if (likely(dma_len
> len
))
233 misalign
= (unsigned)dma_addr
& efx
->type
->bug5391_mask
;
234 if (misalign
&& dma_len
+ misalign
> 512)
235 dma_len
= 512 - misalign
;
237 /* Fill out per descriptor fields */
238 buffer
->len
= dma_len
;
239 buffer
->dma_addr
= dma_addr
;
242 ++tx_queue
->insert_count
;
245 /* Transfer ownership of the unmapping to the final buffer */
246 buffer
->unmap_single
= unmap_single
;
247 buffer
->unmap_len
= unmap_len
;
250 /* Get address and size of next fragment */
251 if (i
>= skb_shinfo(skb
)->nr_frags
)
253 fragment
= &skb_shinfo(skb
)->frags
[i
];
254 len
= fragment
->size
;
255 page
= fragment
->page
;
256 page_offset
= fragment
->page_offset
;
259 unmap_single
= false;
260 dma_addr
= pci_map_page(pci_dev
, page
, page_offset
, len
,
264 /* Transfer ownership of the skb to the final buffer */
266 buffer
->continuation
= false;
268 /* Pass off to hardware */
269 falcon_push_buffers(tx_queue
);
274 EFX_ERR_RL(efx
, " TX queue %d could not map skb with %d bytes %d "
275 "fragments for DMA\n", tx_queue
->queue
, skb
->len
,
276 skb_shinfo(skb
)->nr_frags
+ 1);
278 /* Mark the packet as transmitted, and free the SKB ourselves */
279 dev_kfree_skb_any((struct sk_buff
*)skb
);
285 if (tx_queue
->stopped
== 1)
289 /* Work backwards until we hit the original insert pointer value */
290 while (tx_queue
->insert_count
!= tx_queue
->write_count
) {
291 --tx_queue
->insert_count
;
292 insert_ptr
= tx_queue
->insert_count
& efx
->type
->txd_ring_mask
;
293 buffer
= &tx_queue
->buffer
[insert_ptr
];
294 efx_dequeue_buffer(tx_queue
, buffer
);
298 /* Free the fragment we were mid-way through pushing */
301 pci_unmap_single(pci_dev
, unmap_addr
, unmap_len
,
304 pci_unmap_page(pci_dev
, unmap_addr
, unmap_len
,
311 /* Remove packets from the TX queue
313 * This removes packets from the TX queue, up to and including the
316 static void efx_dequeue_buffers(struct efx_tx_queue
*tx_queue
,
319 struct efx_nic
*efx
= tx_queue
->efx
;
320 unsigned int stop_index
, read_ptr
;
321 unsigned int mask
= tx_queue
->efx
->type
->txd_ring_mask
;
323 stop_index
= (index
+ 1) & mask
;
324 read_ptr
= tx_queue
->read_count
& mask
;
326 while (read_ptr
!= stop_index
) {
327 struct efx_tx_buffer
*buffer
= &tx_queue
->buffer
[read_ptr
];
328 if (unlikely(buffer
->len
== 0)) {
329 EFX_ERR(tx_queue
->efx
, "TX queue %d spurious TX "
330 "completion id %x\n", tx_queue
->queue
,
332 efx_schedule_reset(efx
, RESET_TYPE_TX_SKIP
);
336 efx_dequeue_buffer(tx_queue
, buffer
);
337 buffer
->continuation
= true;
340 ++tx_queue
->read_count
;
341 read_ptr
= tx_queue
->read_count
& mask
;
345 /* Initiate a packet transmission on the specified TX queue.
346 * Note that returning anything other than NETDEV_TX_OK will cause the
347 * OS to free the skb.
349 * This function is split out from efx_hard_start_xmit to allow the
350 * loopback test to direct packets via specific TX queues. It is
351 * therefore a non-static inline, so as not to penalise performance
352 * for non-loopback transmissions.
354 * Context: netif_tx_lock held
356 inline int efx_xmit(struct efx_nic
*efx
,
357 struct efx_tx_queue
*tx_queue
, struct sk_buff
*skb
)
361 /* Map fragments for DMA and add to TX queue */
362 rc
= efx_enqueue_skb(tx_queue
, skb
);
363 if (unlikely(rc
!= NETDEV_TX_OK
))
366 /* Update last TX timer */
367 efx
->net_dev
->trans_start
= jiffies
;
373 /* Initiate a packet transmission. We use one channel per CPU
374 * (sharing when we have more CPUs than channels). On Falcon, the TX
375 * completion events will be directed back to the CPU that transmitted
376 * the packet, which should be cache-efficient.
378 * Context: non-blocking.
379 * Note that returning anything other than NETDEV_TX_OK will cause the
380 * OS to free the skb.
382 int efx_hard_start_xmit(struct sk_buff
*skb
, struct net_device
*net_dev
)
384 struct efx_nic
*efx
= netdev_priv(net_dev
);
385 struct efx_tx_queue
*tx_queue
;
387 if (unlikely(efx
->port_inhibited
))
388 return NETDEV_TX_BUSY
;
390 if (likely(skb
->ip_summed
== CHECKSUM_PARTIAL
))
391 tx_queue
= &efx
->tx_queue
[EFX_TX_QUEUE_OFFLOAD_CSUM
];
393 tx_queue
= &efx
->tx_queue
[EFX_TX_QUEUE_NO_CSUM
];
395 return efx_xmit(efx
, tx_queue
, skb
);
398 void efx_xmit_done(struct efx_tx_queue
*tx_queue
, unsigned int index
)
401 struct efx_nic
*efx
= tx_queue
->efx
;
403 EFX_BUG_ON_PARANOID(index
> efx
->type
->txd_ring_mask
);
405 efx_dequeue_buffers(tx_queue
, index
);
407 /* See if we need to restart the netif queue. This barrier
408 * separates the update of read_count from the test of
411 if (unlikely(tx_queue
->stopped
) && likely(efx
->port_enabled
)) {
412 fill_level
= tx_queue
->insert_count
- tx_queue
->read_count
;
413 if (fill_level
< EFX_NETDEV_TX_THRESHOLD(tx_queue
)) {
414 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx
));
416 /* Do this under netif_tx_lock(), to avoid racing
417 * with efx_xmit(). */
418 netif_tx_lock(efx
->net_dev
);
419 if (tx_queue
->stopped
) {
420 tx_queue
->stopped
= 0;
423 netif_tx_unlock(efx
->net_dev
);
428 int efx_probe_tx_queue(struct efx_tx_queue
*tx_queue
)
430 struct efx_nic
*efx
= tx_queue
->efx
;
431 unsigned int txq_size
;
434 EFX_LOG(efx
, "creating TX queue %d\n", tx_queue
->queue
);
436 /* Allocate software ring */
437 txq_size
= (efx
->type
->txd_ring_mask
+ 1) * sizeof(*tx_queue
->buffer
);
438 tx_queue
->buffer
= kzalloc(txq_size
, GFP_KERNEL
);
439 if (!tx_queue
->buffer
)
441 for (i
= 0; i
<= efx
->type
->txd_ring_mask
; ++i
)
442 tx_queue
->buffer
[i
].continuation
= true;
444 /* Allocate hardware ring */
445 rc
= falcon_probe_tx(tx_queue
);
452 kfree(tx_queue
->buffer
);
453 tx_queue
->buffer
= NULL
;
457 void efx_init_tx_queue(struct efx_tx_queue
*tx_queue
)
459 EFX_LOG(tx_queue
->efx
, "initialising TX queue %d\n", tx_queue
->queue
);
461 tx_queue
->insert_count
= 0;
462 tx_queue
->write_count
= 0;
463 tx_queue
->read_count
= 0;
464 tx_queue
->old_read_count
= 0;
465 BUG_ON(tx_queue
->stopped
);
467 /* Set up TX descriptor ring */
468 falcon_init_tx(tx_queue
);
471 void efx_release_tx_buffers(struct efx_tx_queue
*tx_queue
)
473 struct efx_tx_buffer
*buffer
;
475 if (!tx_queue
->buffer
)
478 /* Free any buffers left in the ring */
479 while (tx_queue
->read_count
!= tx_queue
->write_count
) {
480 buffer
= &tx_queue
->buffer
[tx_queue
->read_count
&
481 tx_queue
->efx
->type
->txd_ring_mask
];
482 efx_dequeue_buffer(tx_queue
, buffer
);
483 buffer
->continuation
= true;
486 ++tx_queue
->read_count
;
490 void efx_fini_tx_queue(struct efx_tx_queue
*tx_queue
)
492 EFX_LOG(tx_queue
->efx
, "shutting down TX queue %d\n", tx_queue
->queue
);
494 /* Flush TX queue, remove descriptor ring */
495 falcon_fini_tx(tx_queue
);
497 efx_release_tx_buffers(tx_queue
);
499 /* Free up TSO header cache */
500 efx_fini_tso(tx_queue
);
502 /* Release queue's stop on port, if any */
503 if (tx_queue
->stopped
) {
504 tx_queue
->stopped
= 0;
505 efx_wake_queue(tx_queue
->efx
);
509 void efx_remove_tx_queue(struct efx_tx_queue
*tx_queue
)
511 EFX_LOG(tx_queue
->efx
, "destroying TX queue %d\n", tx_queue
->queue
);
512 falcon_remove_tx(tx_queue
);
514 kfree(tx_queue
->buffer
);
515 tx_queue
->buffer
= NULL
;
519 /* Efx TCP segmentation acceleration.
521 * Why? Because by doing it here in the driver we can go significantly
522 * faster than the GSO.
524 * Requires TX checksum offload support.
527 /* Number of bytes inserted at the start of a TSO header buffer,
528 * similar to NET_IP_ALIGN.
530 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
531 #define TSOH_OFFSET 0
533 #define TSOH_OFFSET NET_IP_ALIGN
536 #define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET)
538 /* Total size of struct efx_tso_header, buffer and padding */
539 #define TSOH_SIZE(hdr_len) \
540 (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
542 /* Size of blocks on free list. Larger blocks must be allocated from
545 #define TSOH_STD_SIZE 128
547 #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
548 #define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data)
549 #define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data)
550 #define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
553 * struct tso_state - TSO state for an SKB
554 * @out_len: Remaining length in current segment
555 * @seqnum: Current sequence number
556 * @ipv4_id: Current IPv4 ID, host endian
557 * @packet_space: Remaining space in current packet
558 * @dma_addr: DMA address of current position
559 * @in_len: Remaining length in current SKB fragment
560 * @unmap_len: Length of SKB fragment
561 * @unmap_addr: DMA address of SKB fragment
562 * @unmap_single: DMA single vs page mapping flag
563 * @header_len: Number of bytes of header
564 * @full_packet_size: Number of bytes to put in each outgoing segment
566 * The state used during segmentation. It is put into this data structure
567 * just to make it easy to pass into inline functions.
570 /* Output position */
574 unsigned packet_space
;
580 dma_addr_t unmap_addr
;
584 int full_packet_size
;
589 * Verify that our various assumptions about sk_buffs and the conditions
590 * under which TSO will be attempted hold true.
592 static void efx_tso_check_safe(struct sk_buff
*skb
)
594 __be16 protocol
= skb
->protocol
;
596 EFX_BUG_ON_PARANOID(((struct ethhdr
*)skb
->data
)->h_proto
!=
598 if (protocol
== htons(ETH_P_8021Q
)) {
599 /* Find the encapsulated protocol; reset network header
600 * and transport header based on that. */
601 struct vlan_ethhdr
*veh
= (struct vlan_ethhdr
*)skb
->data
;
602 protocol
= veh
->h_vlan_encapsulated_proto
;
603 skb_set_network_header(skb
, sizeof(*veh
));
604 if (protocol
== htons(ETH_P_IP
))
605 skb_set_transport_header(skb
, sizeof(*veh
) +
606 4 * ip_hdr(skb
)->ihl
);
609 EFX_BUG_ON_PARANOID(protocol
!= htons(ETH_P_IP
));
610 EFX_BUG_ON_PARANOID(ip_hdr(skb
)->protocol
!= IPPROTO_TCP
);
611 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb
), skb
->data
)
612 + (tcp_hdr(skb
)->doff
<< 2u)) >
618 * Allocate a page worth of efx_tso_header structures, and string them
619 * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
621 static int efx_tsoh_block_alloc(struct efx_tx_queue
*tx_queue
)
624 struct pci_dev
*pci_dev
= tx_queue
->efx
->pci_dev
;
625 struct efx_tso_header
*tsoh
;
629 base_kva
= pci_alloc_consistent(pci_dev
, PAGE_SIZE
, &dma_addr
);
630 if (base_kva
== NULL
) {
631 EFX_ERR(tx_queue
->efx
, "Unable to allocate page for TSO"
636 /* pci_alloc_consistent() allocates pages. */
637 EFX_BUG_ON_PARANOID(dma_addr
& (PAGE_SIZE
- 1u));
639 for (kva
= base_kva
; kva
< base_kva
+ PAGE_SIZE
; kva
+= TSOH_STD_SIZE
) {
640 tsoh
= (struct efx_tso_header
*)kva
;
641 tsoh
->dma_addr
= dma_addr
+ (TSOH_BUFFER(tsoh
) - base_kva
);
642 tsoh
->next
= tx_queue
->tso_headers_free
;
643 tx_queue
->tso_headers_free
= tsoh
;
650 /* Free up a TSO header, and all others in the same page. */
651 static void efx_tsoh_block_free(struct efx_tx_queue
*tx_queue
,
652 struct efx_tso_header
*tsoh
,
653 struct pci_dev
*pci_dev
)
655 struct efx_tso_header
**p
;
656 unsigned long base_kva
;
659 base_kva
= (unsigned long)tsoh
& PAGE_MASK
;
660 base_dma
= tsoh
->dma_addr
& PAGE_MASK
;
662 p
= &tx_queue
->tso_headers_free
;
664 if (((unsigned long)*p
& PAGE_MASK
) == base_kva
)
670 pci_free_consistent(pci_dev
, PAGE_SIZE
, (void *)base_kva
, base_dma
);
673 static struct efx_tso_header
*
674 efx_tsoh_heap_alloc(struct efx_tx_queue
*tx_queue
, size_t header_len
)
676 struct efx_tso_header
*tsoh
;
678 tsoh
= kmalloc(TSOH_SIZE(header_len
), GFP_ATOMIC
| GFP_DMA
);
682 tsoh
->dma_addr
= pci_map_single(tx_queue
->efx
->pci_dev
,
683 TSOH_BUFFER(tsoh
), header_len
,
685 if (unlikely(pci_dma_mapping_error(tx_queue
->efx
->pci_dev
,
691 tsoh
->unmap_len
= header_len
;
696 efx_tsoh_heap_free(struct efx_tx_queue
*tx_queue
, struct efx_tso_header
*tsoh
)
698 pci_unmap_single(tx_queue
->efx
->pci_dev
,
699 tsoh
->dma_addr
, tsoh
->unmap_len
,
705 * efx_tx_queue_insert - push descriptors onto the TX queue
706 * @tx_queue: Efx TX queue
707 * @dma_addr: DMA address of fragment
708 * @len: Length of fragment
709 * @final_buffer: The final buffer inserted into the queue
711 * Push descriptors onto the TX queue. Return 0 on success or 1 if
714 static int efx_tx_queue_insert(struct efx_tx_queue
*tx_queue
,
715 dma_addr_t dma_addr
, unsigned len
,
716 struct efx_tx_buffer
**final_buffer
)
718 struct efx_tx_buffer
*buffer
;
719 struct efx_nic
*efx
= tx_queue
->efx
;
720 unsigned dma_len
, fill_level
, insert_ptr
, misalign
;
723 EFX_BUG_ON_PARANOID(len
<= 0);
725 fill_level
= tx_queue
->insert_count
- tx_queue
->old_read_count
;
726 /* -1 as there is no way to represent all descriptors used */
727 q_space
= efx
->type
->txd_ring_mask
- 1 - fill_level
;
730 if (unlikely(q_space
-- <= 0)) {
731 /* It might be that completions have happened
732 * since the xmit path last checked. Update
733 * the xmit path's copy of read_count.
736 /* This memory barrier protects the change of
737 * stopped from the access of read_count. */
739 tx_queue
->old_read_count
=
740 *(volatile unsigned *)&tx_queue
->read_count
;
741 fill_level
= (tx_queue
->insert_count
742 - tx_queue
->old_read_count
);
743 q_space
= efx
->type
->txd_ring_mask
- 1 - fill_level
;
744 if (unlikely(q_space
-- <= 0)) {
745 *final_buffer
= NULL
;
752 insert_ptr
= tx_queue
->insert_count
& efx
->type
->txd_ring_mask
;
753 buffer
= &tx_queue
->buffer
[insert_ptr
];
754 ++tx_queue
->insert_count
;
756 EFX_BUG_ON_PARANOID(tx_queue
->insert_count
-
757 tx_queue
->read_count
>
758 efx
->type
->txd_ring_mask
);
760 efx_tsoh_free(tx_queue
, buffer
);
761 EFX_BUG_ON_PARANOID(buffer
->len
);
762 EFX_BUG_ON_PARANOID(buffer
->unmap_len
);
763 EFX_BUG_ON_PARANOID(buffer
->skb
);
764 EFX_BUG_ON_PARANOID(!buffer
->continuation
);
765 EFX_BUG_ON_PARANOID(buffer
->tsoh
);
767 buffer
->dma_addr
= dma_addr
;
769 /* Ensure we do not cross a boundary unsupported by H/W */
770 dma_len
= (~dma_addr
& efx
->type
->tx_dma_mask
) + 1;
772 misalign
= (unsigned)dma_addr
& efx
->type
->bug5391_mask
;
773 if (misalign
&& dma_len
+ misalign
> 512)
774 dma_len
= 512 - misalign
;
776 /* If there is enough space to send then do so */
780 buffer
->len
= dma_len
; /* Don't set the other members */
785 EFX_BUG_ON_PARANOID(!len
);
787 *final_buffer
= buffer
;
793 * Put a TSO header into the TX queue.
795 * This is special-cased because we know that it is small enough to fit in
796 * a single fragment, and we know it doesn't cross a page boundary. It
797 * also allows us to not worry about end-of-packet etc.
799 static void efx_tso_put_header(struct efx_tx_queue
*tx_queue
,
800 struct efx_tso_header
*tsoh
, unsigned len
)
802 struct efx_tx_buffer
*buffer
;
804 buffer
= &tx_queue
->buffer
[tx_queue
->insert_count
&
805 tx_queue
->efx
->type
->txd_ring_mask
];
806 efx_tsoh_free(tx_queue
, buffer
);
807 EFX_BUG_ON_PARANOID(buffer
->len
);
808 EFX_BUG_ON_PARANOID(buffer
->unmap_len
);
809 EFX_BUG_ON_PARANOID(buffer
->skb
);
810 EFX_BUG_ON_PARANOID(!buffer
->continuation
);
811 EFX_BUG_ON_PARANOID(buffer
->tsoh
);
813 buffer
->dma_addr
= tsoh
->dma_addr
;
816 ++tx_queue
->insert_count
;
820 /* Remove descriptors put into a tx_queue. */
821 static void efx_enqueue_unwind(struct efx_tx_queue
*tx_queue
)
823 struct efx_tx_buffer
*buffer
;
824 dma_addr_t unmap_addr
;
826 /* Work backwards until we hit the original insert pointer value */
827 while (tx_queue
->insert_count
!= tx_queue
->write_count
) {
828 --tx_queue
->insert_count
;
829 buffer
= &tx_queue
->buffer
[tx_queue
->insert_count
&
830 tx_queue
->efx
->type
->txd_ring_mask
];
831 efx_tsoh_free(tx_queue
, buffer
);
832 EFX_BUG_ON_PARANOID(buffer
->skb
);
834 buffer
->continuation
= true;
835 if (buffer
->unmap_len
) {
836 unmap_addr
= (buffer
->dma_addr
+ buffer
->len
-
838 if (buffer
->unmap_single
)
839 pci_unmap_single(tx_queue
->efx
->pci_dev
,
840 unmap_addr
, buffer
->unmap_len
,
843 pci_unmap_page(tx_queue
->efx
->pci_dev
,
844 unmap_addr
, buffer
->unmap_len
,
846 buffer
->unmap_len
= 0;
852 /* Parse the SKB header and initialise state. */
853 static void tso_start(struct tso_state
*st
, const struct sk_buff
*skb
)
855 /* All ethernet/IP/TCP headers combined size is TCP header size
856 * plus offset of TCP header relative to start of packet.
858 st
->header_len
= ((tcp_hdr(skb
)->doff
<< 2u)
859 + PTR_DIFF(tcp_hdr(skb
), skb
->data
));
860 st
->full_packet_size
= st
->header_len
+ skb_shinfo(skb
)->gso_size
;
862 st
->ipv4_id
= ntohs(ip_hdr(skb
)->id
);
863 st
->seqnum
= ntohl(tcp_hdr(skb
)->seq
);
865 EFX_BUG_ON_PARANOID(tcp_hdr(skb
)->urg
);
866 EFX_BUG_ON_PARANOID(tcp_hdr(skb
)->syn
);
867 EFX_BUG_ON_PARANOID(tcp_hdr(skb
)->rst
);
869 st
->packet_space
= st
->full_packet_size
;
870 st
->out_len
= skb
->len
- st
->header_len
;
872 st
->unmap_single
= false;
875 static int tso_get_fragment(struct tso_state
*st
, struct efx_nic
*efx
,
878 st
->unmap_addr
= pci_map_page(efx
->pci_dev
, frag
->page
,
879 frag
->page_offset
, frag
->size
,
881 if (likely(!pci_dma_mapping_error(efx
->pci_dev
, st
->unmap_addr
))) {
882 st
->unmap_single
= false;
883 st
->unmap_len
= frag
->size
;
884 st
->in_len
= frag
->size
;
885 st
->dma_addr
= st
->unmap_addr
;
891 static int tso_get_head_fragment(struct tso_state
*st
, struct efx_nic
*efx
,
892 const struct sk_buff
*skb
)
894 int hl
= st
->header_len
;
895 int len
= skb_headlen(skb
) - hl
;
897 st
->unmap_addr
= pci_map_single(efx
->pci_dev
, skb
->data
+ hl
,
898 len
, PCI_DMA_TODEVICE
);
899 if (likely(!pci_dma_mapping_error(efx
->pci_dev
, st
->unmap_addr
))) {
900 st
->unmap_single
= true;
903 st
->dma_addr
= st
->unmap_addr
;
911 * tso_fill_packet_with_fragment - form descriptors for the current fragment
912 * @tx_queue: Efx TX queue
913 * @skb: Socket buffer
916 * Form descriptors for the current fragment, until we reach the end
917 * of fragment or end-of-packet. Return 0 on success, 1 if not enough
918 * space in @tx_queue.
920 static int tso_fill_packet_with_fragment(struct efx_tx_queue
*tx_queue
,
921 const struct sk_buff
*skb
,
922 struct tso_state
*st
)
924 struct efx_tx_buffer
*buffer
;
925 int n
, end_of_packet
, rc
;
929 if (st
->packet_space
== 0)
932 EFX_BUG_ON_PARANOID(st
->in_len
<= 0);
933 EFX_BUG_ON_PARANOID(st
->packet_space
<= 0);
935 n
= min(st
->in_len
, st
->packet_space
);
937 st
->packet_space
-= n
;
941 rc
= efx_tx_queue_insert(tx_queue
, st
->dma_addr
, n
, &buffer
);
942 if (likely(rc
== 0)) {
943 if (st
->out_len
== 0)
944 /* Transfer ownership of the skb */
947 end_of_packet
= st
->out_len
== 0 || st
->packet_space
== 0;
948 buffer
->continuation
= !end_of_packet
;
950 if (st
->in_len
== 0) {
951 /* Transfer ownership of the pci mapping */
952 buffer
->unmap_len
= st
->unmap_len
;
953 buffer
->unmap_single
= st
->unmap_single
;
964 * tso_start_new_packet - generate a new header and prepare for the new packet
965 * @tx_queue: Efx TX queue
966 * @skb: Socket buffer
969 * Generate a new header and prepare for the new packet. Return 0 on
970 * success, or -1 if failed to alloc header.
972 static int tso_start_new_packet(struct efx_tx_queue
*tx_queue
,
973 const struct sk_buff
*skb
,
974 struct tso_state
*st
)
976 struct efx_tso_header
*tsoh
;
977 struct iphdr
*tsoh_iph
;
978 struct tcphdr
*tsoh_th
;
982 /* Allocate a DMA-mapped header buffer. */
983 if (likely(TSOH_SIZE(st
->header_len
) <= TSOH_STD_SIZE
)) {
984 if (tx_queue
->tso_headers_free
== NULL
) {
985 if (efx_tsoh_block_alloc(tx_queue
))
988 EFX_BUG_ON_PARANOID(!tx_queue
->tso_headers_free
);
989 tsoh
= tx_queue
->tso_headers_free
;
990 tx_queue
->tso_headers_free
= tsoh
->next
;
993 tx_queue
->tso_long_headers
++;
994 tsoh
= efx_tsoh_heap_alloc(tx_queue
, st
->header_len
);
999 header
= TSOH_BUFFER(tsoh
);
1000 tsoh_th
= (struct tcphdr
*)(header
+ SKB_TCP_OFF(skb
));
1001 tsoh_iph
= (struct iphdr
*)(header
+ SKB_IPV4_OFF(skb
));
1003 /* Copy and update the headers. */
1004 memcpy(header
, skb
->data
, st
->header_len
);
1006 tsoh_th
->seq
= htonl(st
->seqnum
);
1007 st
->seqnum
+= skb_shinfo(skb
)->gso_size
;
1008 if (st
->out_len
> skb_shinfo(skb
)->gso_size
) {
1009 /* This packet will not finish the TSO burst. */
1010 ip_length
= st
->full_packet_size
- ETH_HDR_LEN(skb
);
1014 /* This packet will be the last in the TSO burst. */
1015 ip_length
= st
->header_len
- ETH_HDR_LEN(skb
) + st
->out_len
;
1016 tsoh_th
->fin
= tcp_hdr(skb
)->fin
;
1017 tsoh_th
->psh
= tcp_hdr(skb
)->psh
;
1019 tsoh_iph
->tot_len
= htons(ip_length
);
1021 /* Linux leaves suitable gaps in the IP ID space for us to fill. */
1022 tsoh_iph
->id
= htons(st
->ipv4_id
);
1025 st
->packet_space
= skb_shinfo(skb
)->gso_size
;
1026 ++tx_queue
->tso_packets
;
1028 /* Form a descriptor for this header. */
1029 efx_tso_put_header(tx_queue
, tsoh
, st
->header_len
);
1036 * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
1037 * @tx_queue: Efx TX queue
1038 * @skb: Socket buffer
1040 * Context: You must hold netif_tx_lock() to call this function.
1042 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1043 * @skb was not enqueued. In all cases @skb is consumed. Return
1044 * %NETDEV_TX_OK or %NETDEV_TX_BUSY.
1046 static int efx_enqueue_skb_tso(struct efx_tx_queue
*tx_queue
,
1047 struct sk_buff
*skb
)
1049 struct efx_nic
*efx
= tx_queue
->efx
;
1050 int frag_i
, rc
, rc2
= NETDEV_TX_OK
;
1051 struct tso_state state
;
1053 /* Verify TSO is safe - these checks should never fail. */
1054 efx_tso_check_safe(skb
);
1056 EFX_BUG_ON_PARANOID(tx_queue
->write_count
!= tx_queue
->insert_count
);
1058 tso_start(&state
, skb
);
1060 /* Assume that skb header area contains exactly the headers, and
1061 * all payload is in the frag list.
1063 if (skb_headlen(skb
) == state
.header_len
) {
1064 /* Grab the first payload fragment. */
1065 EFX_BUG_ON_PARANOID(skb_shinfo(skb
)->nr_frags
< 1);
1067 rc
= tso_get_fragment(&state
, efx
,
1068 skb_shinfo(skb
)->frags
+ frag_i
);
1072 rc
= tso_get_head_fragment(&state
, efx
, skb
);
1078 if (tso_start_new_packet(tx_queue
, skb
, &state
) < 0)
1082 rc
= tso_fill_packet_with_fragment(tx_queue
, skb
, &state
);
1086 /* Move onto the next fragment? */
1087 if (state
.in_len
== 0) {
1088 if (++frag_i
>= skb_shinfo(skb
)->nr_frags
)
1089 /* End of payload reached. */
1091 rc
= tso_get_fragment(&state
, efx
,
1092 skb_shinfo(skb
)->frags
+ frag_i
);
1097 /* Start at new packet? */
1098 if (state
.packet_space
== 0 &&
1099 tso_start_new_packet(tx_queue
, skb
, &state
) < 0)
1103 /* Pass off to hardware */
1104 falcon_push_buffers(tx_queue
);
1106 tx_queue
->tso_bursts
++;
1107 return NETDEV_TX_OK
;
1110 EFX_ERR(efx
, "Out of memory for TSO headers, or PCI mapping error\n");
1111 dev_kfree_skb_any((struct sk_buff
*)skb
);
1115 rc2
= NETDEV_TX_BUSY
;
1117 /* Stop the queue if it wasn't stopped before. */
1118 if (tx_queue
->stopped
== 1)
1119 efx_stop_queue(efx
);
1122 /* Free the DMA mapping we were in the process of writing out */
1123 if (state
.unmap_len
) {
1124 if (state
.unmap_single
)
1125 pci_unmap_single(efx
->pci_dev
, state
.unmap_addr
,
1126 state
.unmap_len
, PCI_DMA_TODEVICE
);
1128 pci_unmap_page(efx
->pci_dev
, state
.unmap_addr
,
1129 state
.unmap_len
, PCI_DMA_TODEVICE
);
1132 efx_enqueue_unwind(tx_queue
);
1138 * Free up all TSO datastructures associated with tx_queue. This
1139 * routine should be called only once the tx_queue is both empty and
1140 * will no longer be used.
1142 static void efx_fini_tso(struct efx_tx_queue
*tx_queue
)
1146 if (tx_queue
->buffer
) {
1147 for (i
= 0; i
<= tx_queue
->efx
->type
->txd_ring_mask
; ++i
)
1148 efx_tsoh_free(tx_queue
, &tx_queue
->buffer
[i
]);
1151 while (tx_queue
->tso_headers_free
!= NULL
)
1152 efx_tsoh_block_free(tx_queue
, tx_queue
->tso_headers_free
,
1153 tx_queue
->efx
->pci_dev
);