1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 ******************************************************************************/
30 static inline __le64
build_ctob(u32 td_cmd
, u32 td_offset
, unsigned int size
,
33 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA
|
34 ((u64
)td_cmd
<< I40E_TXD_QW1_CMD_SHIFT
) |
35 ((u64
)td_offset
<< I40E_TXD_QW1_OFFSET_SHIFT
) |
36 ((u64
)size
<< I40E_TXD_QW1_TX_BUF_SZ_SHIFT
) |
37 ((u64
)td_tag
<< I40E_TXD_QW1_L2TAG1_SHIFT
));
41 * i40e_program_fdir_filter - Program a Flow Director filter
42 * @fdir_input: Packet data that will be filter parameters
44 * @add: True for add/update, False for remove
46 int i40e_program_fdir_filter(struct i40e_fdir_data
*fdir_data
,
47 struct i40e_pf
*pf
, bool add
)
49 struct i40e_filter_program_desc
*fdir_desc
;
50 struct i40e_tx_buffer
*tx_buf
;
51 struct i40e_tx_desc
*tx_desc
;
52 struct i40e_ring
*tx_ring
;
59 /* find existing FDIR VSI */
61 for (i
= 0; i
< pf
->hw
.func_caps
.num_vsis
; i
++)
62 if (pf
->vsi
[i
] && pf
->vsi
[i
]->type
== I40E_VSI_FDIR
)
67 tx_ring
= &vsi
->tx_rings
[0];
70 dma
= dma_map_single(dev
, fdir_data
->raw_packet
,
71 I40E_FDIR_MAX_RAW_PACKET_LOOKUP
, DMA_TO_DEVICE
);
72 if (dma_mapping_error(dev
, dma
))
75 /* grab the next descriptor */
76 fdir_desc
= I40E_TX_FDIRDESC(tx_ring
, tx_ring
->next_to_use
);
77 tx_buf
= &tx_ring
->tx_bi
[tx_ring
->next_to_use
];
78 tx_ring
->next_to_use
++;
79 if (tx_ring
->next_to_use
== tx_ring
->count
)
80 tx_ring
->next_to_use
= 0;
82 fdir_desc
->qindex_flex_ptype_vsi
= cpu_to_le32((fdir_data
->q_index
83 << I40E_TXD_FLTR_QW0_QINDEX_SHIFT
)
84 & I40E_TXD_FLTR_QW0_QINDEX_MASK
);
86 fdir_desc
->qindex_flex_ptype_vsi
|= cpu_to_le32((fdir_data
->flex_off
87 << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT
)
88 & I40E_TXD_FLTR_QW0_FLEXOFF_MASK
);
90 fdir_desc
->qindex_flex_ptype_vsi
|= cpu_to_le32((fdir_data
->pctype
91 << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT
)
92 & I40E_TXD_FLTR_QW0_PCTYPE_MASK
);
94 /* Use LAN VSI Id if not programmed by user */
95 if (fdir_data
->dest_vsi
== 0)
96 fdir_desc
->qindex_flex_ptype_vsi
|=
97 cpu_to_le32((pf
->vsi
[pf
->lan_vsi
]->id
)
98 << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT
);
100 fdir_desc
->qindex_flex_ptype_vsi
|=
101 cpu_to_le32((fdir_data
->dest_vsi
102 << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT
)
103 & I40E_TXD_FLTR_QW0_DEST_VSI_MASK
);
105 fdir_desc
->dtype_cmd_cntindex
=
106 cpu_to_le32(I40E_TX_DESC_DTYPE_FILTER_PROG
);
109 fdir_desc
->dtype_cmd_cntindex
|= cpu_to_le32(
110 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE
111 << I40E_TXD_FLTR_QW1_PCMD_SHIFT
);
113 fdir_desc
->dtype_cmd_cntindex
|= cpu_to_le32(
114 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE
115 << I40E_TXD_FLTR_QW1_PCMD_SHIFT
);
117 fdir_desc
->dtype_cmd_cntindex
|= cpu_to_le32((fdir_data
->dest_ctl
118 << I40E_TXD_FLTR_QW1_DEST_SHIFT
)
119 & I40E_TXD_FLTR_QW1_DEST_MASK
);
121 fdir_desc
->dtype_cmd_cntindex
|= cpu_to_le32(
122 (fdir_data
->fd_status
<< I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT
)
123 & I40E_TXD_FLTR_QW1_FD_STATUS_MASK
);
125 if (fdir_data
->cnt_index
!= 0) {
126 fdir_desc
->dtype_cmd_cntindex
|=
127 cpu_to_le32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK
);
128 fdir_desc
->dtype_cmd_cntindex
|=
129 cpu_to_le32((fdir_data
->cnt_index
130 << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT
)
131 & I40E_TXD_FLTR_QW1_CNTINDEX_MASK
);
134 fdir_desc
->fd_id
= cpu_to_le32(fdir_data
->fd_id
);
136 /* Now program a dummy descriptor */
137 tx_desc
= I40E_TX_DESC(tx_ring
, tx_ring
->next_to_use
);
138 tx_buf
= &tx_ring
->tx_bi
[tx_ring
->next_to_use
];
139 tx_ring
->next_to_use
++;
140 if (tx_ring
->next_to_use
== tx_ring
->count
)
141 tx_ring
->next_to_use
= 0;
143 tx_desc
->buffer_addr
= cpu_to_le64(dma
);
144 td_cmd
= I40E_TX_DESC_CMD_EOP
|
145 I40E_TX_DESC_CMD_RS
|
146 I40E_TX_DESC_CMD_DUMMY
;
148 tx_desc
->cmd_type_offset_bsz
=
149 build_ctob(td_cmd
, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP
, 0);
151 /* Mark the data descriptor to be watched */
152 tx_buf
->next_to_watch
= tx_desc
;
154 /* Force memory writes to complete before letting h/w
155 * know there are new descriptors to fetch. (Only
156 * applicable for weak-ordered memory model archs,
161 writel(tx_ring
->next_to_use
, tx_ring
->tail
);
169 * i40e_fd_handle_status - check the Programming Status for FD
170 * @rx_ring: the Rx ring for this descriptor
171 * @qw: the descriptor data
172 * @prog_id: the id originally used for programming
174 * This is used to verify if the FD programming or invalidation
175 * requested by SW to the HW is successful or not and take actions accordingly.
177 static void i40e_fd_handle_status(struct i40e_ring
*rx_ring
, u32 qw
, u8 prog_id
)
179 struct pci_dev
*pdev
= rx_ring
->vsi
->back
->pdev
;
182 error
= (qw
& I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK
) >>
183 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT
;
185 /* for now just print the Status */
186 dev_info(&pdev
->dev
, "FD programming id %02x, Status %08x\n",
191 * i40e_unmap_tx_resource - Release a Tx buffer
192 * @ring: the ring that owns the buffer
193 * @tx_buffer: the buffer to free
195 static inline void i40e_unmap_tx_resource(struct i40e_ring
*ring
,
196 struct i40e_tx_buffer
*tx_buffer
)
198 if (tx_buffer
->dma
) {
199 if (tx_buffer
->tx_flags
& I40E_TX_FLAGS_MAPPED_AS_PAGE
)
200 dma_unmap_page(ring
->dev
,
205 dma_unmap_single(ring
->dev
,
211 tx_buffer
->time_stamp
= 0;
215 * i40e_clean_tx_ring - Free any empty Tx buffers
216 * @tx_ring: ring to be cleaned
218 void i40e_clean_tx_ring(struct i40e_ring
*tx_ring
)
220 struct i40e_tx_buffer
*tx_buffer
;
221 unsigned long bi_size
;
224 /* ring already cleared, nothing to do */
228 /* Free all the Tx ring sk_buffs */
229 for (i
= 0; i
< tx_ring
->count
; i
++) {
230 tx_buffer
= &tx_ring
->tx_bi
[i
];
231 i40e_unmap_tx_resource(tx_ring
, tx_buffer
);
233 dev_kfree_skb_any(tx_buffer
->skb
);
234 tx_buffer
->skb
= NULL
;
237 bi_size
= sizeof(struct i40e_tx_buffer
) * tx_ring
->count
;
238 memset(tx_ring
->tx_bi
, 0, bi_size
);
240 /* Zero out the descriptor ring */
241 memset(tx_ring
->desc
, 0, tx_ring
->size
);
243 tx_ring
->next_to_use
= 0;
244 tx_ring
->next_to_clean
= 0;
248 * i40e_free_tx_resources - Free Tx resources per queue
249 * @tx_ring: Tx descriptor ring for a specific queue
251 * Free all transmit software resources
253 void i40e_free_tx_resources(struct i40e_ring
*tx_ring
)
255 i40e_clean_tx_ring(tx_ring
);
256 kfree(tx_ring
->tx_bi
);
257 tx_ring
->tx_bi
= NULL
;
260 dma_free_coherent(tx_ring
->dev
, tx_ring
->size
,
261 tx_ring
->desc
, tx_ring
->dma
);
262 tx_ring
->desc
= NULL
;
267 * i40e_get_tx_pending - how many tx descriptors not processed
268 * @tx_ring: the ring of descriptors
270 * Since there is no access to the ring head register
271 * in XL710, we need to use our local copies
273 static u32
i40e_get_tx_pending(struct i40e_ring
*ring
)
275 u32 ntu
= ((ring
->next_to_clean
<= ring
->next_to_use
)
277 : ring
->next_to_use
+ ring
->count
);
278 return ntu
- ring
->next_to_clean
;
282 * i40e_check_tx_hang - Is there a hang in the Tx queue
283 * @tx_ring: the ring of descriptors
285 static bool i40e_check_tx_hang(struct i40e_ring
*tx_ring
)
287 u32 tx_pending
= i40e_get_tx_pending(tx_ring
);
290 clear_check_for_tx_hang(tx_ring
);
292 /* Check for a hung queue, but be thorough. This verifies
293 * that a transmit has been completed since the previous
294 * check AND there is at least one packet pending. The
295 * ARMED bit is set to indicate a potential hang. The
296 * bit is cleared if a pause frame is received to remove
297 * false hang detection due to PFC or 802.3x frames. By
298 * requiring this to fail twice we avoid races with
299 * PFC clearing the ARMED bit and conditions where we
300 * run the check_tx_hang logic with a transmit completion
301 * pending but without time to complete it yet.
303 if ((tx_ring
->tx_stats
.tx_done_old
== tx_ring
->tx_stats
.packets
) &&
305 /* make sure it is true for two checks in a row */
306 ret
= test_and_set_bit(__I40E_HANG_CHECK_ARMED
,
309 /* update completed stats and disarm the hang check */
310 tx_ring
->tx_stats
.tx_done_old
= tx_ring
->tx_stats
.packets
;
311 clear_bit(__I40E_HANG_CHECK_ARMED
, &tx_ring
->state
);
318 * i40e_clean_tx_irq - Reclaim resources after transmit completes
319 * @tx_ring: tx ring to clean
320 * @budget: how many cleans we're allowed
322 * Returns true if there's any budget left (e.g. the clean is finished)
324 static bool i40e_clean_tx_irq(struct i40e_ring
*tx_ring
, int budget
)
326 u16 i
= tx_ring
->next_to_clean
;
327 struct i40e_tx_buffer
*tx_buf
;
328 struct i40e_tx_desc
*tx_desc
;
329 unsigned int total_packets
= 0;
330 unsigned int total_bytes
= 0;
332 tx_buf
= &tx_ring
->tx_bi
[i
];
333 tx_desc
= I40E_TX_DESC(tx_ring
, i
);
335 for (; budget
; budget
--) {
336 struct i40e_tx_desc
*eop_desc
;
338 eop_desc
= tx_buf
->next_to_watch
;
340 /* if next_to_watch is not set then there is no work pending */
344 /* if the descriptor isn't done, no work yet to do */
345 if (!(eop_desc
->cmd_type_offset_bsz
&
346 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE
)))
349 /* count the packet as being completed */
350 tx_ring
->tx_stats
.completed
++;
351 tx_buf
->next_to_watch
= NULL
;
352 tx_buf
->time_stamp
= 0;
354 /* set memory barrier before eop_desc is verified */
358 i40e_unmap_tx_resource(tx_ring
, tx_buf
);
360 /* clear dtype status */
361 tx_desc
->cmd_type_offset_bsz
&=
362 ~cpu_to_le64(I40E_TXD_QW1_DTYPE_MASK
);
364 if (likely(tx_desc
== eop_desc
)) {
367 dev_kfree_skb_any(tx_buf
->skb
);
370 total_bytes
+= tx_buf
->bytecount
;
371 total_packets
+= tx_buf
->gso_segs
;
377 if (unlikely(i
== tx_ring
->count
)) {
379 tx_buf
= tx_ring
->tx_bi
;
380 tx_desc
= I40E_TX_DESC(tx_ring
, 0);
385 tx_ring
->next_to_clean
= i
;
386 tx_ring
->tx_stats
.bytes
+= total_bytes
;
387 tx_ring
->tx_stats
.packets
+= total_packets
;
388 tx_ring
->q_vector
->tx
.total_bytes
+= total_bytes
;
389 tx_ring
->q_vector
->tx
.total_packets
+= total_packets
;
390 if (check_for_tx_hang(tx_ring
) && i40e_check_tx_hang(tx_ring
)) {
391 /* schedule immediate reset if we believe we hung */
392 dev_info(tx_ring
->dev
, "Detected Tx Unit Hang\n"
395 " next_to_use <%x>\n"
396 " next_to_clean <%x>\n",
398 tx_ring
->queue_index
,
399 tx_ring
->next_to_use
, i
);
400 dev_info(tx_ring
->dev
, "tx_bi[next_to_clean]\n"
401 " time_stamp <%lx>\n"
403 tx_ring
->tx_bi
[i
].time_stamp
, jiffies
);
405 netif_stop_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
407 dev_info(tx_ring
->dev
,
408 "tx hang detected on queue %d, resetting adapter\n",
409 tx_ring
->queue_index
);
411 tx_ring
->netdev
->netdev_ops
->ndo_tx_timeout(tx_ring
->netdev
);
413 /* the adapter is about to reset, no point in enabling stuff */
417 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
418 if (unlikely(total_packets
&& netif_carrier_ok(tx_ring
->netdev
) &&
419 (I40E_DESC_UNUSED(tx_ring
) >= TX_WAKE_THRESHOLD
))) {
420 /* Make sure that anybody stopping the queue after this
421 * sees the new next_to_clean.
424 if (__netif_subqueue_stopped(tx_ring
->netdev
,
425 tx_ring
->queue_index
) &&
426 !test_bit(__I40E_DOWN
, &tx_ring
->vsi
->state
)) {
427 netif_wake_subqueue(tx_ring
->netdev
,
428 tx_ring
->queue_index
);
429 ++tx_ring
->tx_stats
.restart_queue
;
437 * i40e_set_new_dynamic_itr - Find new ITR level
438 * @rc: structure containing ring performance data
440 * Stores a new ITR value based on packets and byte counts during
441 * the last interrupt. The advantage of per interrupt computation
442 * is faster updates and more accurate ITR for the current traffic
443 * pattern. Constants in this function were computed based on
444 * theoretical maximum wire speed and thresholds were set based on
445 * testing data as well as attempting to minimize response time
446 * while increasing bulk throughput.
448 static void i40e_set_new_dynamic_itr(struct i40e_ring_container
*rc
)
450 enum i40e_latency_range new_latency_range
= rc
->latency_range
;
451 u32 new_itr
= rc
->itr
;
454 if (rc
->total_packets
== 0 || !rc
->itr
)
457 /* simple throttlerate management
458 * 0-10MB/s lowest (100000 ints/s)
459 * 10-20MB/s low (20000 ints/s)
460 * 20-1249MB/s bulk (8000 ints/s)
462 bytes_per_int
= rc
->total_bytes
/ rc
->itr
;
464 case I40E_LOWEST_LATENCY
:
465 if (bytes_per_int
> 10)
466 new_latency_range
= I40E_LOW_LATENCY
;
468 case I40E_LOW_LATENCY
:
469 if (bytes_per_int
> 20)
470 new_latency_range
= I40E_BULK_LATENCY
;
471 else if (bytes_per_int
<= 10)
472 new_latency_range
= I40E_LOWEST_LATENCY
;
474 case I40E_BULK_LATENCY
:
475 if (bytes_per_int
<= 20)
476 rc
->latency_range
= I40E_LOW_LATENCY
;
480 switch (new_latency_range
) {
481 case I40E_LOWEST_LATENCY
:
482 new_itr
= I40E_ITR_100K
;
484 case I40E_LOW_LATENCY
:
485 new_itr
= I40E_ITR_20K
;
487 case I40E_BULK_LATENCY
:
488 new_itr
= I40E_ITR_8K
;
494 if (new_itr
!= rc
->itr
) {
495 /* do an exponential smoothing */
496 new_itr
= (10 * new_itr
* rc
->itr
) /
497 ((9 * new_itr
) + rc
->itr
);
498 rc
->itr
= new_itr
& I40E_MAX_ITR
;
502 rc
->total_packets
= 0;
506 * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
507 * @q_vector: the vector to adjust
509 static void i40e_update_dynamic_itr(struct i40e_q_vector
*q_vector
)
511 u16 vector
= q_vector
->vsi
->base_vector
+ q_vector
->v_idx
;
512 struct i40e_hw
*hw
= &q_vector
->vsi
->back
->hw
;
516 reg_addr
= I40E_PFINT_ITRN(I40E_RX_ITR
, vector
- 1);
517 old_itr
= q_vector
->rx
.itr
;
518 i40e_set_new_dynamic_itr(&q_vector
->rx
);
519 if (old_itr
!= q_vector
->rx
.itr
)
520 wr32(hw
, reg_addr
, q_vector
->rx
.itr
);
522 reg_addr
= I40E_PFINT_ITRN(I40E_TX_ITR
, vector
- 1);
523 old_itr
= q_vector
->tx
.itr
;
524 i40e_set_new_dynamic_itr(&q_vector
->tx
);
525 if (old_itr
!= q_vector
->tx
.itr
)
526 wr32(hw
, reg_addr
, q_vector
->tx
.itr
);
532 * i40e_clean_programming_status - clean the programming status descriptor
533 * @rx_ring: the rx ring that has this descriptor
534 * @rx_desc: the rx descriptor written back by HW
536 * Flow director should handle FD_FILTER_STATUS to check its filter programming
537 * status being successful or not and take actions accordingly. FCoE should
538 * handle its context/filter programming/invalidation status and take actions.
541 static void i40e_clean_programming_status(struct i40e_ring
*rx_ring
,
542 union i40e_rx_desc
*rx_desc
)
547 qw
= le64_to_cpu(rx_desc
->wb
.qword1
.status_error_len
);
548 id
= (qw
& I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK
) >>
549 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT
;
551 if (id
== I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS
)
552 i40e_fd_handle_status(rx_ring
, qw
, id
);
556 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
557 * @tx_ring: the tx ring to set up
559 * Return 0 on success, negative on error
561 int i40e_setup_tx_descriptors(struct i40e_ring
*tx_ring
)
563 struct device
*dev
= tx_ring
->dev
;
569 bi_size
= sizeof(struct i40e_tx_buffer
) * tx_ring
->count
;
570 tx_ring
->tx_bi
= kzalloc(bi_size
, GFP_KERNEL
);
574 /* round up to nearest 4K */
575 tx_ring
->size
= tx_ring
->count
* sizeof(struct i40e_tx_desc
);
576 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
577 tx_ring
->desc
= dma_alloc_coherent(dev
, tx_ring
->size
,
578 &tx_ring
->dma
, GFP_KERNEL
);
579 if (!tx_ring
->desc
) {
580 dev_info(dev
, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
585 tx_ring
->next_to_use
= 0;
586 tx_ring
->next_to_clean
= 0;
590 kfree(tx_ring
->tx_bi
);
591 tx_ring
->tx_bi
= NULL
;
596 * i40e_clean_rx_ring - Free Rx buffers
597 * @rx_ring: ring to be cleaned
599 void i40e_clean_rx_ring(struct i40e_ring
*rx_ring
)
601 struct device
*dev
= rx_ring
->dev
;
602 struct i40e_rx_buffer
*rx_bi
;
603 unsigned long bi_size
;
606 /* ring already cleared, nothing to do */
610 /* Free all the Rx ring sk_buffs */
611 for (i
= 0; i
< rx_ring
->count
; i
++) {
612 rx_bi
= &rx_ring
->rx_bi
[i
];
614 dma_unmap_single(dev
,
621 dev_kfree_skb(rx_bi
->skb
);
625 if (rx_bi
->page_dma
) {
632 __free_page(rx_bi
->page
);
634 rx_bi
->page_offset
= 0;
638 bi_size
= sizeof(struct i40e_rx_buffer
) * rx_ring
->count
;
639 memset(rx_ring
->rx_bi
, 0, bi_size
);
641 /* Zero out the descriptor ring */
642 memset(rx_ring
->desc
, 0, rx_ring
->size
);
644 rx_ring
->next_to_clean
= 0;
645 rx_ring
->next_to_use
= 0;
649 * i40e_free_rx_resources - Free Rx resources
650 * @rx_ring: ring to clean the resources from
652 * Free all receive software resources
654 void i40e_free_rx_resources(struct i40e_ring
*rx_ring
)
656 i40e_clean_rx_ring(rx_ring
);
657 kfree(rx_ring
->rx_bi
);
658 rx_ring
->rx_bi
= NULL
;
661 dma_free_coherent(rx_ring
->dev
, rx_ring
->size
,
662 rx_ring
->desc
, rx_ring
->dma
);
663 rx_ring
->desc
= NULL
;
668 * i40e_setup_rx_descriptors - Allocate Rx descriptors
669 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
671 * Returns 0 on success, negative on failure
673 int i40e_setup_rx_descriptors(struct i40e_ring
*rx_ring
)
675 struct device
*dev
= rx_ring
->dev
;
678 bi_size
= sizeof(struct i40e_rx_buffer
) * rx_ring
->count
;
679 rx_ring
->rx_bi
= kzalloc(bi_size
, GFP_KERNEL
);
683 /* Round up to nearest 4K */
684 rx_ring
->size
= ring_is_16byte_desc_enabled(rx_ring
)
685 ? rx_ring
->count
* sizeof(union i40e_16byte_rx_desc
)
686 : rx_ring
->count
* sizeof(union i40e_32byte_rx_desc
);
687 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
688 rx_ring
->desc
= dma_alloc_coherent(dev
, rx_ring
->size
,
689 &rx_ring
->dma
, GFP_KERNEL
);
691 if (!rx_ring
->desc
) {
692 dev_info(dev
, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
697 rx_ring
->next_to_clean
= 0;
698 rx_ring
->next_to_use
= 0;
702 kfree(rx_ring
->rx_bi
);
703 rx_ring
->rx_bi
= NULL
;
708 * i40e_release_rx_desc - Store the new tail and head values
709 * @rx_ring: ring to bump
710 * @val: new head index
712 static inline void i40e_release_rx_desc(struct i40e_ring
*rx_ring
, u32 val
)
714 rx_ring
->next_to_use
= val
;
715 /* Force memory writes to complete before letting h/w
716 * know there are new descriptors to fetch. (Only
717 * applicable for weak-ordered memory model archs,
721 writel(val
, rx_ring
->tail
);
725 * i40e_alloc_rx_buffers - Replace used receive buffers; packet split
726 * @rx_ring: ring to place buffers on
727 * @cleaned_count: number of buffers to replace
729 void i40e_alloc_rx_buffers(struct i40e_ring
*rx_ring
, u16 cleaned_count
)
731 u16 i
= rx_ring
->next_to_use
;
732 union i40e_rx_desc
*rx_desc
;
733 struct i40e_rx_buffer
*bi
;
736 /* do nothing if no valid netdev defined */
737 if (!rx_ring
->netdev
|| !cleaned_count
)
740 while (cleaned_count
--) {
741 rx_desc
= I40E_RX_DESC(rx_ring
, i
);
742 bi
= &rx_ring
->rx_bi
[i
];
746 skb
= netdev_alloc_skb_ip_align(rx_ring
->netdev
,
747 rx_ring
->rx_buf_len
);
749 rx_ring
->rx_stats
.alloc_rx_buff_failed
++;
752 /* initialize queue mapping */
753 skb_record_rx_queue(skb
, rx_ring
->queue_index
);
758 bi
->dma
= dma_map_single(rx_ring
->dev
,
762 if (dma_mapping_error(rx_ring
->dev
, bi
->dma
)) {
763 rx_ring
->rx_stats
.alloc_rx_buff_failed
++;
769 if (ring_is_ps_enabled(rx_ring
)) {
771 bi
->page
= alloc_page(GFP_ATOMIC
);
773 rx_ring
->rx_stats
.alloc_rx_page_failed
++;
779 /* use a half page if we're re-using */
780 bi
->page_offset
^= PAGE_SIZE
/ 2;
781 bi
->page_dma
= dma_map_page(rx_ring
->dev
,
786 if (dma_mapping_error(rx_ring
->dev
,
788 rx_ring
->rx_stats
.alloc_rx_page_failed
++;
794 /* Refresh the desc even if buffer_addrs didn't change
795 * because each write-back erases this info.
797 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->page_dma
);
798 rx_desc
->read
.hdr_addr
= cpu_to_le64(bi
->dma
);
800 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->dma
);
801 rx_desc
->read
.hdr_addr
= 0;
804 if (i
== rx_ring
->count
)
809 if (rx_ring
->next_to_use
!= i
)
810 i40e_release_rx_desc(rx_ring
, i
);
814 * i40e_receive_skb - Send a completed packet up the stack
815 * @rx_ring: rx ring in play
816 * @skb: packet to send up
817 * @vlan_tag: vlan tag for packet
819 static void i40e_receive_skb(struct i40e_ring
*rx_ring
,
820 struct sk_buff
*skb
, u16 vlan_tag
)
822 struct i40e_q_vector
*q_vector
= rx_ring
->q_vector
;
823 struct i40e_vsi
*vsi
= rx_ring
->vsi
;
824 u64 flags
= vsi
->back
->flags
;
826 if (vlan_tag
& VLAN_VID_MASK
)
827 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vlan_tag
);
829 if (flags
& I40E_FLAG_IN_NETPOLL
)
832 napi_gro_receive(&q_vector
->napi
, skb
);
836 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
837 * @vsi: the VSI we care about
838 * @skb: skb currently being received and modified
839 * @rx_status: status value of last descriptor in packet
840 * @rx_error: error value of last descriptor in packet
842 static inline void i40e_rx_checksum(struct i40e_vsi
*vsi
,
847 skb
->ip_summed
= CHECKSUM_NONE
;
849 /* Rx csum enabled and ip headers found? */
850 if (!(vsi
->netdev
->features
& NETIF_F_RXCSUM
&&
851 rx_status
& (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT
)))
854 /* IP or L4 checksum error */
855 if (rx_error
& ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT
) |
856 (1 << I40E_RX_DESC_ERROR_L4E_SHIFT
))) {
857 vsi
->back
->hw_csum_rx_error
++;
861 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
865 * i40e_rx_hash - returns the hash value from the Rx descriptor
866 * @ring: descriptor ring
867 * @rx_desc: specific descriptor
869 static inline u32
i40e_rx_hash(struct i40e_ring
*ring
,
870 union i40e_rx_desc
*rx_desc
)
872 if (ring
->netdev
->features
& NETIF_F_RXHASH
) {
873 if ((le64_to_cpu(rx_desc
->wb
.qword1
.status_error_len
) >>
874 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT
) &
875 I40E_RX_DESC_FLTSTAT_RSS_HASH
)
876 return le32_to_cpu(rx_desc
->wb
.qword0
.hi_dword
.rss
);
882 * i40e_clean_rx_irq - Reclaim resources after receive completes
883 * @rx_ring: rx ring to clean
884 * @budget: how many cleans we're allowed
886 * Returns true if there's any budget left (e.g. the clean is finished)
888 static int i40e_clean_rx_irq(struct i40e_ring
*rx_ring
, int budget
)
890 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
891 u16 rx_packet_len
, rx_header_len
, rx_sph
, rx_hbo
;
892 u16 cleaned_count
= I40E_DESC_UNUSED(rx_ring
);
893 const int current_node
= numa_node_id();
894 struct i40e_vsi
*vsi
= rx_ring
->vsi
;
895 u16 i
= rx_ring
->next_to_clean
;
896 union i40e_rx_desc
*rx_desc
;
897 u32 rx_error
, rx_status
;
900 rx_desc
= I40E_RX_DESC(rx_ring
, i
);
901 qword
= le64_to_cpu(rx_desc
->wb
.qword1
.status_error_len
);
902 rx_status
= (qword
& I40E_RXD_QW1_STATUS_MASK
)
903 >> I40E_RXD_QW1_STATUS_SHIFT
;
905 while (rx_status
& (1 << I40E_RX_DESC_STATUS_DD_SHIFT
)) {
906 union i40e_rx_desc
*next_rxd
;
907 struct i40e_rx_buffer
*rx_bi
;
910 if (i40e_rx_is_programming_status(qword
)) {
911 i40e_clean_programming_status(rx_ring
, rx_desc
);
912 I40E_RX_NEXT_DESC_PREFETCH(rx_ring
, i
, next_rxd
);
915 rx_bi
= &rx_ring
->rx_bi
[i
];
919 rx_packet_len
= (qword
& I40E_RXD_QW1_LENGTH_PBUF_MASK
)
920 >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT
;
921 rx_header_len
= (qword
& I40E_RXD_QW1_LENGTH_HBUF_MASK
)
922 >> I40E_RXD_QW1_LENGTH_HBUF_SHIFT
;
923 rx_sph
= (qword
& I40E_RXD_QW1_LENGTH_SPH_MASK
)
924 >> I40E_RXD_QW1_LENGTH_SPH_SHIFT
;
926 rx_error
= (qword
& I40E_RXD_QW1_ERROR_MASK
)
927 >> I40E_RXD_QW1_ERROR_SHIFT
;
928 rx_hbo
= rx_error
& (1 << I40E_RX_DESC_ERROR_HBO_SHIFT
);
929 rx_error
&= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT
);
933 /* This memory barrier is needed to keep us from reading
934 * any other fields out of the rx_desc until we know the
935 * STATUS_DD bit is set
939 /* Get the header and possibly the whole packet
940 * If this is an skb from previous receive dma will be 0
946 len
= I40E_RX_HDR_SIZE
;
949 else if (rx_packet_len
)
950 len
= rx_packet_len
; /* 1buf/no split found */
952 len
= rx_header_len
; /* split always mode */
955 dma_unmap_single(rx_ring
->dev
,
962 /* Get the rest of the data if this was a header split */
963 if (ring_is_ps_enabled(rx_ring
) && rx_packet_len
) {
965 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
970 skb
->len
+= rx_packet_len
;
971 skb
->data_len
+= rx_packet_len
;
972 skb
->truesize
+= rx_packet_len
;
974 if ((page_count(rx_bi
->page
) == 1) &&
975 (page_to_nid(rx_bi
->page
) == current_node
))
976 get_page(rx_bi
->page
);
980 dma_unmap_page(rx_ring
->dev
,
986 I40E_RX_NEXT_DESC_PREFETCH(rx_ring
, i
, next_rxd
);
989 !(rx_status
& (1 << I40E_RX_DESC_STATUS_EOF_SHIFT
)))) {
990 struct i40e_rx_buffer
*next_buffer
;
992 next_buffer
= &rx_ring
->rx_bi
[i
];
994 if (ring_is_ps_enabled(rx_ring
)) {
995 rx_bi
->skb
= next_buffer
->skb
;
996 rx_bi
->dma
= next_buffer
->dma
;
997 next_buffer
->skb
= skb
;
998 next_buffer
->dma
= 0;
1000 rx_ring
->rx_stats
.non_eop_descs
++;
1004 /* ERR_MASK will only have valid bits if EOP set */
1005 if (unlikely(rx_error
& (1 << I40E_RX_DESC_ERROR_RXE_SHIFT
))) {
1006 dev_kfree_skb_any(skb
);
1010 skb
->rxhash
= i40e_rx_hash(rx_ring
, rx_desc
);
1011 i40e_rx_checksum(vsi
, skb
, rx_status
, rx_error
);
1013 /* probably a little skewed due to removing CRC */
1014 total_rx_bytes
+= skb
->len
;
1017 skb
->protocol
= eth_type_trans(skb
, rx_ring
->netdev
);
1018 vlan_tag
= rx_status
& (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT
)
1019 ? le16_to_cpu(rx_desc
->wb
.qword0
.lo_dword
.l2tag1
)
1021 i40e_receive_skb(rx_ring
, skb
, vlan_tag
);
1023 rx_ring
->netdev
->last_rx
= jiffies
;
1026 rx_desc
->wb
.qword1
.status_error_len
= 0;
1031 /* return some buffers to hardware, one at a time is too slow */
1032 if (cleaned_count
>= I40E_RX_BUFFER_WRITE
) {
1033 i40e_alloc_rx_buffers(rx_ring
, cleaned_count
);
1037 /* use prefetched values */
1039 qword
= le64_to_cpu(rx_desc
->wb
.qword1
.status_error_len
);
1040 rx_status
= (qword
& I40E_RXD_QW1_STATUS_MASK
)
1041 >> I40E_RXD_QW1_STATUS_SHIFT
;
1044 rx_ring
->next_to_clean
= i
;
1045 rx_ring
->rx_stats
.packets
+= total_rx_packets
;
1046 rx_ring
->rx_stats
.bytes
+= total_rx_bytes
;
1047 rx_ring
->q_vector
->rx
.total_packets
+= total_rx_packets
;
1048 rx_ring
->q_vector
->rx
.total_bytes
+= total_rx_bytes
;
1051 i40e_alloc_rx_buffers(rx_ring
, cleaned_count
);
1057 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
1058 * @napi: napi struct with our devices info in it
1059 * @budget: amount of work driver is allowed to do this pass, in packets
1061 * This function will clean all queues associated with a q_vector.
1063 * Returns the amount of work done
1065 int i40e_napi_poll(struct napi_struct
*napi
, int budget
)
1067 struct i40e_q_vector
*q_vector
=
1068 container_of(napi
, struct i40e_q_vector
, napi
);
1069 struct i40e_vsi
*vsi
= q_vector
->vsi
;
1070 bool clean_complete
= true;
1071 int budget_per_ring
;
1074 if (test_bit(__I40E_DOWN
, &vsi
->state
)) {
1075 napi_complete(napi
);
1079 /* We attempt to distribute budget to each Rx queue fairly, but don't
1080 * allow the budget to go below 1 because that would exit polling early.
1081 * Since the actual Tx work is minimal, we can give the Tx a larger
1082 * budget and be more aggressive about cleaning up the Tx descriptors.
1084 budget_per_ring
= max(budget
/q_vector
->num_ringpairs
, 1);
1085 for (i
= 0; i
< q_vector
->num_ringpairs
; i
++) {
1086 clean_complete
&= i40e_clean_tx_irq(q_vector
->tx
.ring
[i
],
1088 clean_complete
&= i40e_clean_rx_irq(q_vector
->rx
.ring
[i
],
1092 /* If work not completed, return budget and polling will return */
1093 if (!clean_complete
)
1096 /* Work is done so exit the polling mode and re-enable the interrupt */
1097 napi_complete(napi
);
1098 if (ITR_IS_DYNAMIC(vsi
->rx_itr_setting
) ||
1099 ITR_IS_DYNAMIC(vsi
->tx_itr_setting
))
1100 i40e_update_dynamic_itr(q_vector
);
1102 if (!test_bit(__I40E_DOWN
, &vsi
->state
)) {
1103 if (vsi
->back
->flags
& I40E_FLAG_MSIX_ENABLED
) {
1104 i40e_irq_dynamic_enable(vsi
,
1105 q_vector
->v_idx
+ vsi
->base_vector
);
1107 struct i40e_hw
*hw
= &vsi
->back
->hw
;
1108 /* We re-enable the queue 0 cause, but
1109 * don't worry about dynamic_enable
1110 * because we left it on for the other
1111 * possible interrupts during napi
1113 u32 qval
= rd32(hw
, I40E_QINT_RQCTL(0));
1114 qval
|= I40E_QINT_RQCTL_CAUSE_ENA_MASK
;
1115 wr32(hw
, I40E_QINT_RQCTL(0), qval
);
1117 qval
= rd32(hw
, I40E_QINT_TQCTL(0));
1118 qval
|= I40E_QINT_TQCTL_CAUSE_ENA_MASK
;
1119 wr32(hw
, I40E_QINT_TQCTL(0), qval
);
1128 * i40e_atr - Add a Flow Director ATR filter
1129 * @tx_ring: ring to add programming descriptor to
1131 * @flags: send flags
1132 * @protocol: wire protocol
1134 static void i40e_atr(struct i40e_ring
*tx_ring
, struct sk_buff
*skb
,
1135 u32 flags
, __be16 protocol
)
1137 struct i40e_filter_program_desc
*fdir_desc
;
1138 struct i40e_pf
*pf
= tx_ring
->vsi
->back
;
1140 unsigned char *network
;
1142 struct ipv6hdr
*ipv6
;
1146 u32 flex_ptype
, dtype_cmd
;
1148 /* make sure ATR is enabled */
1149 if (!(pf
->flags
& I40E_FLAG_FDIR_ATR_ENABLED
))
1152 /* if sampling is disabled do nothing */
1153 if (!tx_ring
->atr_sample_rate
)
1156 tx_ring
->atr_count
++;
1158 /* snag network header to get L4 type and address */
1159 hdr
.network
= skb_network_header(skb
);
1161 /* Currently only IPv4/IPv6 with TCP is supported */
1162 if (protocol
== htons(ETH_P_IP
)) {
1163 if (hdr
.ipv4
->protocol
!= IPPROTO_TCP
)
1166 /* access ihl as a u8 to avoid unaligned access on ia64 */
1167 hlen
= (hdr
.network
[0] & 0x0F) << 2;
1168 } else if (protocol
== htons(ETH_P_IPV6
)) {
1169 if (hdr
.ipv6
->nexthdr
!= IPPROTO_TCP
)
1172 hlen
= sizeof(struct ipv6hdr
);
1177 th
= (struct tcphdr
*)(hdr
.network
+ hlen
);
1179 /* sample on all syn/fin packets or once every atr sample rate */
1180 if (!th
->fin
&& !th
->syn
&& (tx_ring
->atr_count
< tx_ring
->atr_sample_rate
))
1183 tx_ring
->atr_count
= 0;
1185 /* grab the next descriptor */
1186 fdir_desc
= I40E_TX_FDIRDESC(tx_ring
, tx_ring
->next_to_use
);
1187 tx_ring
->next_to_use
++;
1188 if (tx_ring
->next_to_use
== tx_ring
->count
)
1189 tx_ring
->next_to_use
= 0;
1191 flex_ptype
= (tx_ring
->queue_index
<< I40E_TXD_FLTR_QW0_QINDEX_SHIFT
) &
1192 I40E_TXD_FLTR_QW0_QINDEX_MASK
;
1193 flex_ptype
|= (protocol
== htons(ETH_P_IP
)) ?
1194 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP
<<
1195 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT
) :
1196 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP
<<
1197 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT
);
1199 flex_ptype
|= tx_ring
->vsi
->id
<< I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT
;
1201 dtype_cmd
= I40E_TX_DESC_DTYPE_FILTER_PROG
;
1203 dtype_cmd
|= th
->fin
?
1204 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE
<<
1205 I40E_TXD_FLTR_QW1_PCMD_SHIFT
) :
1206 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE
<<
1207 I40E_TXD_FLTR_QW1_PCMD_SHIFT
);
1209 dtype_cmd
|= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX
<<
1210 I40E_TXD_FLTR_QW1_DEST_SHIFT
;
1212 dtype_cmd
|= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID
<<
1213 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT
;
1215 fdir_desc
->qindex_flex_ptype_vsi
= cpu_to_le32(flex_ptype
);
1216 fdir_desc
->dtype_cmd_cntindex
= cpu_to_le32(dtype_cmd
);
1219 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
1221 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
1223 * @tx_ring: ring to send buffer on
1224 * @flags: the tx flags to be set
1226 * Checks the skb and set up correspondingly several generic transmit flags
1227 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1229 * Returns error code indicate the frame should be dropped upon error and the
1230 * otherwise returns 0 to indicate the flags has been set properly.
1232 static int i40e_tx_prepare_vlan_flags(struct sk_buff
*skb
,
1233 struct i40e_ring
*tx_ring
,
1236 __be16 protocol
= skb
->protocol
;
1239 /* if we have a HW VLAN tag being added, default to the HW one */
1240 if (vlan_tx_tag_present(skb
)) {
1241 tx_flags
|= vlan_tx_tag_get(skb
) << I40E_TX_FLAGS_VLAN_SHIFT
;
1242 tx_flags
|= I40E_TX_FLAGS_HW_VLAN
;
1243 /* else if it is a SW VLAN, check the next protocol and store the tag */
1244 } else if (protocol
== __constant_htons(ETH_P_8021Q
)) {
1245 struct vlan_hdr
*vhdr
, _vhdr
;
1246 vhdr
= skb_header_pointer(skb
, ETH_HLEN
, sizeof(_vhdr
), &_vhdr
);
1250 protocol
= vhdr
->h_vlan_encapsulated_proto
;
1251 tx_flags
|= ntohs(vhdr
->h_vlan_TCI
) << I40E_TX_FLAGS_VLAN_SHIFT
;
1252 tx_flags
|= I40E_TX_FLAGS_SW_VLAN
;
1255 /* Insert 802.1p priority into VLAN header */
1256 if ((tx_ring
->vsi
->back
->flags
& I40E_FLAG_DCB_ENABLED
) &&
1257 ((tx_flags
& (I40E_TX_FLAGS_HW_VLAN
| I40E_TX_FLAGS_SW_VLAN
)) ||
1258 (skb
->priority
!= TC_PRIO_CONTROL
))) {
1259 tx_flags
&= ~I40E_TX_FLAGS_VLAN_PRIO_MASK
;
1260 tx_flags
|= (skb
->priority
& 0x7) <<
1261 I40E_TX_FLAGS_VLAN_PRIO_SHIFT
;
1262 if (tx_flags
& I40E_TX_FLAGS_SW_VLAN
) {
1263 struct vlan_ethhdr
*vhdr
;
1264 if (skb_header_cloned(skb
) &&
1265 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
))
1267 vhdr
= (struct vlan_ethhdr
*)skb
->data
;
1268 vhdr
->h_vlan_TCI
= htons(tx_flags
>>
1269 I40E_TX_FLAGS_VLAN_SHIFT
);
1271 tx_flags
|= I40E_TX_FLAGS_HW_VLAN
;
1279 * i40e_tx_csum - is checksum offload requested
1280 * @tx_ring: ptr to the ring to send
1281 * @skb: ptr to the skb we're sending
1282 * @tx_flags: the collected send information
1283 * @protocol: the send protocol
1285 * Returns true if checksum offload is requested
1287 static bool i40e_tx_csum(struct i40e_ring
*tx_ring
, struct sk_buff
*skb
,
1288 u32 tx_flags
, __be16 protocol
)
1290 if ((skb
->ip_summed
!= CHECKSUM_PARTIAL
) &&
1291 !(tx_flags
& I40E_TX_FLAGS_TXSW
)) {
1292 if (!(tx_flags
& I40E_TX_FLAGS_HW_VLAN
))
1296 return skb
->ip_summed
== CHECKSUM_PARTIAL
;
1300 * i40e_tso - set up the tso context descriptor
1301 * @tx_ring: ptr to the ring to send
1302 * @skb: ptr to the skb we're sending
1303 * @tx_flags: the collected send information
1304 * @protocol: the send protocol
1305 * @hdr_len: ptr to the size of the packet header
1306 * @cd_tunneling: ptr to context descriptor bits
1308 * Returns 0 if no TSO can happen, 1 if tso is going, or error
1310 static int i40e_tso(struct i40e_ring
*tx_ring
, struct sk_buff
*skb
,
1311 u32 tx_flags
, __be16 protocol
, u8
*hdr_len
,
1312 u64
*cd_type_cmd_tso_mss
, u32
*cd_tunneling
)
1314 u32 cd_cmd
, cd_tso_len
, cd_mss
;
1315 struct tcphdr
*tcph
;
1319 struct ipv6hdr
*ipv6h
;
1321 if (!skb_is_gso(skb
))
1324 if (skb_header_cloned(skb
)) {
1325 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
1330 if (protocol
== __constant_htons(ETH_P_IP
)) {
1331 iph
= skb
->encapsulation
? inner_ip_hdr(skb
) : ip_hdr(skb
);
1332 tcph
= skb
->encapsulation
? inner_tcp_hdr(skb
) : tcp_hdr(skb
);
1335 tcph
->check
= ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
,
1337 } else if (skb_is_gso_v6(skb
)) {
1339 ipv6h
= skb
->encapsulation
? inner_ipv6_hdr(skb
)
1341 tcph
= skb
->encapsulation
? inner_tcp_hdr(skb
) : tcp_hdr(skb
);
1342 ipv6h
->payload_len
= 0;
1343 tcph
->check
= ~csum_ipv6_magic(&ipv6h
->saddr
, &ipv6h
->daddr
,
1347 l4len
= skb
->encapsulation
? inner_tcp_hdrlen(skb
) : tcp_hdrlen(skb
);
1348 *hdr_len
= (skb
->encapsulation
1349 ? (skb_inner_transport_header(skb
) - skb
->data
)
1350 : skb_transport_offset(skb
)) + l4len
;
1352 /* find the field values */
1353 cd_cmd
= I40E_TX_CTX_DESC_TSO
;
1354 cd_tso_len
= skb
->len
- *hdr_len
;
1355 cd_mss
= skb_shinfo(skb
)->gso_size
;
1356 *cd_type_cmd_tso_mss
|= ((u64
)cd_cmd
<< I40E_TXD_CTX_QW1_CMD_SHIFT
)
1358 << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT
)
1359 | ((u64
)cd_mss
<< I40E_TXD_CTX_QW1_MSS_SHIFT
);
1364 * i40e_tx_enable_csum - Enable Tx checksum offloads
1366 * @tx_flags: Tx flags currently set
1367 * @td_cmd: Tx descriptor command bits to set
1368 * @td_offset: Tx descriptor header offsets to set
1369 * @cd_tunneling: ptr to context desc bits
1371 static void i40e_tx_enable_csum(struct sk_buff
*skb
, u32 tx_flags
,
1372 u32
*td_cmd
, u32
*td_offset
,
1373 struct i40e_ring
*tx_ring
,
1376 struct ipv6hdr
*this_ipv6_hdr
;
1377 unsigned int this_tcp_hdrlen
;
1378 struct iphdr
*this_ip_hdr
;
1379 u32 network_hdr_len
;
1382 if (skb
->encapsulation
) {
1383 network_hdr_len
= skb_inner_network_header_len(skb
);
1384 this_ip_hdr
= inner_ip_hdr(skb
);
1385 this_ipv6_hdr
= inner_ipv6_hdr(skb
);
1386 this_tcp_hdrlen
= inner_tcp_hdrlen(skb
);
1388 if (tx_flags
& I40E_TX_FLAGS_IPV4
) {
1390 if (tx_flags
& I40E_TX_FLAGS_TSO
) {
1391 *cd_tunneling
|= I40E_TX_CTX_EXT_IP_IPV4
;
1392 ip_hdr(skb
)->check
= 0;
1395 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM
;
1397 } else if (tx_flags
& I40E_TX_FLAGS_IPV6
) {
1398 if (tx_flags
& I40E_TX_FLAGS_TSO
) {
1399 *cd_tunneling
|= I40E_TX_CTX_EXT_IP_IPV6
;
1400 ip_hdr(skb
)->check
= 0;
1403 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM
;
1407 /* Now set the ctx descriptor fields */
1408 *cd_tunneling
|= (skb_network_header_len(skb
) >> 2) <<
1409 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT
|
1410 I40E_TXD_CTX_UDP_TUNNELING
|
1411 ((skb_inner_network_offset(skb
) -
1412 skb_transport_offset(skb
)) >> 1) <<
1413 I40E_TXD_CTX_QW0_NATLEN_SHIFT
;
1416 network_hdr_len
= skb_network_header_len(skb
);
1417 this_ip_hdr
= ip_hdr(skb
);
1418 this_ipv6_hdr
= ipv6_hdr(skb
);
1419 this_tcp_hdrlen
= tcp_hdrlen(skb
);
1422 /* Enable IP checksum offloads */
1423 if (tx_flags
& I40E_TX_FLAGS_IPV4
) {
1424 l4_hdr
= this_ip_hdr
->protocol
;
1425 /* the stack computes the IP header already, the only time we
1426 * need the hardware to recompute it is in the case of TSO.
1428 if (tx_flags
& I40E_TX_FLAGS_TSO
) {
1429 *td_cmd
|= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM
;
1430 this_ip_hdr
->check
= 0;
1432 *td_cmd
|= I40E_TX_DESC_CMD_IIPT_IPV4
;
1434 /* Now set the td_offset for IP header length */
1435 *td_offset
= (network_hdr_len
>> 2) <<
1436 I40E_TX_DESC_LENGTH_IPLEN_SHIFT
;
1437 } else if (tx_flags
& I40E_TX_FLAGS_IPV6
) {
1438 l4_hdr
= this_ipv6_hdr
->nexthdr
;
1439 *td_cmd
|= I40E_TX_DESC_CMD_IIPT_IPV6
;
1440 /* Now set the td_offset for IP header length */
1441 *td_offset
= (network_hdr_len
>> 2) <<
1442 I40E_TX_DESC_LENGTH_IPLEN_SHIFT
;
1444 /* words in MACLEN + dwords in IPLEN + dwords in L4Len */
1445 *td_offset
|= (skb_network_offset(skb
) >> 1) <<
1446 I40E_TX_DESC_LENGTH_MACLEN_SHIFT
;
1448 /* Enable L4 checksum offloads */
1451 /* enable checksum offloads */
1452 *td_cmd
|= I40E_TX_DESC_CMD_L4T_EOFT_TCP
;
1453 *td_offset
|= (this_tcp_hdrlen
>> 2) <<
1454 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT
;
1457 /* enable SCTP checksum offload */
1458 *td_cmd
|= I40E_TX_DESC_CMD_L4T_EOFT_SCTP
;
1459 *td_offset
|= (sizeof(struct sctphdr
) >> 2) <<
1460 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT
;
1463 /* enable UDP checksum offload */
1464 *td_cmd
|= I40E_TX_DESC_CMD_L4T_EOFT_UDP
;
1465 *td_offset
|= (sizeof(struct udphdr
) >> 2) <<
1466 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT
;
1474 * i40e_create_tx_ctx Build the Tx context descriptor
1475 * @tx_ring: ring to create the descriptor on
1476 * @cd_type_cmd_tso_mss: Quad Word 1
1477 * @cd_tunneling: Quad Word 0 - bits 0-31
1478 * @cd_l2tag2: Quad Word 0 - bits 32-63
1480 static void i40e_create_tx_ctx(struct i40e_ring
*tx_ring
,
1481 const u64 cd_type_cmd_tso_mss
,
1482 const u32 cd_tunneling
, const u32 cd_l2tag2
)
1484 struct i40e_tx_context_desc
*context_desc
;
1486 if (!cd_type_cmd_tso_mss
&& !cd_tunneling
&& !cd_l2tag2
)
1489 /* grab the next descriptor */
1490 context_desc
= I40E_TX_CTXTDESC(tx_ring
, tx_ring
->next_to_use
);
1491 tx_ring
->next_to_use
++;
1492 if (tx_ring
->next_to_use
== tx_ring
->count
)
1493 tx_ring
->next_to_use
= 0;
1495 /* cpu_to_le32 and assign to struct fields */
1496 context_desc
->tunneling_params
= cpu_to_le32(cd_tunneling
);
1497 context_desc
->l2tag2
= cpu_to_le16(cd_l2tag2
);
1498 context_desc
->type_cmd_tso_mss
= cpu_to_le64(cd_type_cmd_tso_mss
);
1502 * i40e_tx_map - Build the Tx descriptor
1503 * @tx_ring: ring to send buffer on
1505 * @first: first buffer info buffer to use
1506 * @tx_flags: collected send information
1507 * @hdr_len: size of the packet header
1508 * @td_cmd: the command field in the descriptor
1509 * @td_offset: offset for checksum or crc
1511 static void i40e_tx_map(struct i40e_ring
*tx_ring
, struct sk_buff
*skb
,
1512 struct i40e_tx_buffer
*first
, u32 tx_flags
,
1513 const u8 hdr_len
, u32 td_cmd
, u32 td_offset
)
1515 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[0];
1516 unsigned int data_len
= skb
->data_len
;
1517 unsigned int size
= skb_headlen(skb
);
1518 struct device
*dev
= tx_ring
->dev
;
1519 u32 paylen
= skb
->len
- hdr_len
;
1520 u16 i
= tx_ring
->next_to_use
;
1521 struct i40e_tx_buffer
*tx_bi
;
1522 struct i40e_tx_desc
*tx_desc
;
1528 dma
= dma_map_single(dev
, skb
->data
, size
, DMA_TO_DEVICE
);
1529 if (dma_mapping_error(dev
, dma
))
1532 if (tx_flags
& I40E_TX_FLAGS_HW_VLAN
) {
1533 td_cmd
|= I40E_TX_DESC_CMD_IL2TAG1
;
1534 td_tag
= (tx_flags
& I40E_TX_FLAGS_VLAN_MASK
) >>
1535 I40E_TX_FLAGS_VLAN_SHIFT
;
1538 tx_desc
= I40E_TX_DESC(tx_ring
, i
);
1540 while (size
> I40E_MAX_DATA_PER_TXD
) {
1541 tx_desc
->buffer_addr
= cpu_to_le64(dma
+ buf_offset
);
1542 tx_desc
->cmd_type_offset_bsz
=
1543 build_ctob(td_cmd
, td_offset
,
1544 I40E_MAX_DATA_PER_TXD
, td_tag
);
1546 buf_offset
+= I40E_MAX_DATA_PER_TXD
;
1547 size
-= I40E_MAX_DATA_PER_TXD
;
1551 if (i
== tx_ring
->count
) {
1552 tx_desc
= I40E_TX_DESC(tx_ring
, 0);
1557 tx_bi
= &tx_ring
->tx_bi
[i
];
1558 tx_bi
->length
= buf_offset
+ size
;
1559 tx_bi
->tx_flags
= tx_flags
;
1562 tx_desc
->buffer_addr
= cpu_to_le64(dma
+ buf_offset
);
1563 tx_desc
->cmd_type_offset_bsz
= build_ctob(td_cmd
, td_offset
,
1566 if (likely(!data_len
))
1569 size
= skb_frag_size(frag
);
1572 tx_flags
|= I40E_TX_FLAGS_MAPPED_AS_PAGE
;
1574 dma
= skb_frag_dma_map(dev
, frag
, 0, size
, DMA_TO_DEVICE
);
1575 if (dma_mapping_error(dev
, dma
))
1580 if (i
== tx_ring
->count
) {
1581 tx_desc
= I40E_TX_DESC(tx_ring
, 0);
1588 tx_desc
->cmd_type_offset_bsz
|=
1589 cpu_to_le64((u64
)I40E_TXD_CMD
<< I40E_TXD_QW1_CMD_SHIFT
);
1592 if (i
== tx_ring
->count
)
1595 tx_ring
->next_to_use
= i
;
1597 if (tx_flags
& (I40E_TX_FLAGS_TSO
| I40E_TX_FLAGS_FSO
))
1598 gso_segs
= skb_shinfo(skb
)->gso_segs
;
1602 /* multiply data chunks by size of headers */
1603 tx_bi
->bytecount
= paylen
+ (gso_segs
* hdr_len
);
1604 tx_bi
->gso_segs
= gso_segs
;
1607 /* set the timestamp and next to watch values */
1608 first
->time_stamp
= jiffies
;
1609 first
->next_to_watch
= tx_desc
;
1611 /* Force memory writes to complete before letting h/w
1612 * know there are new descriptors to fetch. (Only
1613 * applicable for weak-ordered memory model archs,
1618 writel(i
, tx_ring
->tail
);
1622 dev_info(dev
, "TX DMA map failed\n");
1624 /* clear dma mappings for failed tx_bi map */
1626 tx_bi
= &tx_ring
->tx_bi
[i
];
1627 i40e_unmap_tx_resource(tx_ring
, tx_bi
);
1635 dev_kfree_skb_any(skb
);
1637 tx_ring
->next_to_use
= i
;
1641 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
1642 * @tx_ring: the ring to be checked
1643 * @size: the size buffer we want to assure is available
1645 * Returns -EBUSY if a stop is needed, else 0
1647 static inline int __i40e_maybe_stop_tx(struct i40e_ring
*tx_ring
, int size
)
1649 netif_stop_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
1652 /* Check again in a case another CPU has just made room available. */
1653 if (likely(I40E_DESC_UNUSED(tx_ring
) < size
))
1656 /* A reprieve! - use start_queue because it doesn't call schedule */
1657 netif_start_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
1658 ++tx_ring
->tx_stats
.restart_queue
;
1663 * i40e_maybe_stop_tx - 1st level check for tx stop conditions
1664 * @tx_ring: the ring to be checked
1665 * @size: the size buffer we want to assure is available
1667 * Returns 0 if stop is not needed
1669 static int i40e_maybe_stop_tx(struct i40e_ring
*tx_ring
, int size
)
1671 if (likely(I40E_DESC_UNUSED(tx_ring
) >= size
))
1673 return __i40e_maybe_stop_tx(tx_ring
, size
);
1677 * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
1679 * @tx_ring: ring to send buffer on
1681 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
1682 * there is not enough descriptors available in this ring since we need at least
1685 static int i40e_xmit_descriptor_count(struct sk_buff
*skb
,
1686 struct i40e_ring
*tx_ring
)
1688 #if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
1693 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
1694 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
1695 * + 2 desc gap to keep tail from touching head,
1696 * + 1 desc for context descriptor,
1697 * otherwise try next time
1699 #if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
1700 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++)
1701 count
+= TXD_USE_COUNT(skb_shinfo(skb
)->frags
[f
].size
);
1703 count
+= skb_shinfo(skb
)->nr_frags
;
1705 count
+= TXD_USE_COUNT(skb_headlen(skb
));
1706 if (i40e_maybe_stop_tx(tx_ring
, count
+ 3)) {
1707 tx_ring
->tx_stats
.tx_busy
++;
1714 * i40e_xmit_frame_ring - Sends buffer on Tx ring
1716 * @tx_ring: ring to send buffer on
1718 * Returns NETDEV_TX_OK if sent, else an error code
1720 static netdev_tx_t
i40e_xmit_frame_ring(struct sk_buff
*skb
,
1721 struct i40e_ring
*tx_ring
)
1723 u64 cd_type_cmd_tso_mss
= I40E_TX_DESC_DTYPE_CONTEXT
;
1724 u32 cd_tunneling
= 0, cd_l2tag2
= 0;
1725 struct i40e_tx_buffer
*first
;
1732 if (0 == i40e_xmit_descriptor_count(skb
, tx_ring
))
1733 return NETDEV_TX_BUSY
;
1735 /* prepare the xmit flags */
1736 if (i40e_tx_prepare_vlan_flags(skb
, tx_ring
, &tx_flags
))
1739 /* obtain protocol of skb */
1740 protocol
= skb
->protocol
;
1742 /* record the location of the first descriptor for this packet */
1743 first
= &tx_ring
->tx_bi
[tx_ring
->next_to_use
];
1745 /* setup IPv4/IPv6 offloads */
1746 if (protocol
== __constant_htons(ETH_P_IP
))
1747 tx_flags
|= I40E_TX_FLAGS_IPV4
;
1748 else if (protocol
== __constant_htons(ETH_P_IPV6
))
1749 tx_flags
|= I40E_TX_FLAGS_IPV6
;
1751 tso
= i40e_tso(tx_ring
, skb
, tx_flags
, protocol
, &hdr_len
,
1752 &cd_type_cmd_tso_mss
, &cd_tunneling
);
1757 tx_flags
|= I40E_TX_FLAGS_TSO
;
1759 skb_tx_timestamp(skb
);
1761 /* Always offload the checksum, since it's in the data descriptor */
1762 if (i40e_tx_csum(tx_ring
, skb
, tx_flags
, protocol
))
1763 tx_flags
|= I40E_TX_FLAGS_CSUM
;
1765 /* always enable offload insertion */
1766 td_cmd
|= I40E_TX_DESC_CMD_ICRC
;
1768 if (tx_flags
& I40E_TX_FLAGS_CSUM
)
1769 i40e_tx_enable_csum(skb
, tx_flags
, &td_cmd
, &td_offset
,
1770 tx_ring
, &cd_tunneling
);
1772 i40e_create_tx_ctx(tx_ring
, cd_type_cmd_tso_mss
,
1773 cd_tunneling
, cd_l2tag2
);
1775 /* Add Flow Director ATR if it's enabled.
1777 * NOTE: this must always be directly before the data descriptor.
1779 i40e_atr(tx_ring
, skb
, tx_flags
, protocol
);
1781 i40e_tx_map(tx_ring
, skb
, first
, tx_flags
, hdr_len
,
1784 i40e_maybe_stop_tx(tx_ring
, DESC_NEEDED
);
1786 return NETDEV_TX_OK
;
1789 dev_kfree_skb_any(skb
);
1790 return NETDEV_TX_OK
;
1794 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
1796 * @netdev: network interface device structure
1798 * Returns NETDEV_TX_OK if sent, else an error code
1800 netdev_tx_t
i40e_lan_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
1802 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
1803 struct i40e_vsi
*vsi
= np
->vsi
;
1804 struct i40e_ring
*tx_ring
= &vsi
->tx_rings
[skb
->queue_mapping
];
1806 /* hardware can't handle really short frames, hardware padding works
1809 if (unlikely(skb
->len
< I40E_MIN_TX_LEN
)) {
1810 if (skb_pad(skb
, I40E_MIN_TX_LEN
- skb
->len
))
1811 return NETDEV_TX_OK
;
1812 skb
->len
= I40E_MIN_TX_LEN
;
1813 skb_set_tail_pointer(skb
, I40E_MIN_TX_LEN
);
1816 return i40e_xmit_frame_ring(skb
, tx_ring
);