1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2014 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
27 #include <linux/prefetch.h>
29 #include "i40e_prototype.h"
31 static inline __le64
build_ctob(u32 td_cmd
, u32 td_offset
, unsigned int size
,
34 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA
|
35 ((u64
)td_cmd
<< I40E_TXD_QW1_CMD_SHIFT
) |
36 ((u64
)td_offset
<< I40E_TXD_QW1_OFFSET_SHIFT
) |
37 ((u64
)size
<< I40E_TXD_QW1_TX_BUF_SZ_SHIFT
) |
38 ((u64
)td_tag
<< I40E_TXD_QW1_L2TAG1_SHIFT
));
41 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
43 * i40e_program_fdir_filter - Program a Flow Director filter
44 * @fdir_data: Packet data that will be filter parameters
45 * @raw_packet: the pre-allocated packet buffer for FDir
47 * @add: True for add/update, False for remove
49 int i40e_program_fdir_filter(struct i40e_fdir_filter
*fdir_data
, u8
*raw_packet
,
50 struct i40e_pf
*pf
, bool add
)
52 struct i40e_filter_program_desc
*fdir_desc
;
53 struct i40e_tx_buffer
*tx_buf
;
54 struct i40e_tx_desc
*tx_desc
;
55 struct i40e_ring
*tx_ring
;
56 unsigned int fpt
, dcc
;
63 /* find existing FDIR VSI */
65 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++)
66 if (pf
->vsi
[i
] && pf
->vsi
[i
]->type
== I40E_VSI_FDIR
)
71 tx_ring
= vsi
->tx_rings
[0];
74 dma
= dma_map_single(dev
, raw_packet
,
75 I40E_FDIR_MAX_RAW_PACKET_SIZE
, DMA_TO_DEVICE
);
76 if (dma_mapping_error(dev
, dma
))
79 /* grab the next descriptor */
80 i
= tx_ring
->next_to_use
;
81 fdir_desc
= I40E_TX_FDIRDESC(tx_ring
, i
);
83 tx_ring
->next_to_use
= (i
+ 1 < tx_ring
->count
) ? i
+ 1 : 0;
85 fpt
= (fdir_data
->q_index
<< I40E_TXD_FLTR_QW0_QINDEX_SHIFT
) &
86 I40E_TXD_FLTR_QW0_QINDEX_MASK
;
88 fpt
|= (fdir_data
->flex_off
<< I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT
) &
89 I40E_TXD_FLTR_QW0_FLEXOFF_MASK
;
91 fpt
|= (fdir_data
->pctype
<< I40E_TXD_FLTR_QW0_PCTYPE_SHIFT
) &
92 I40E_TXD_FLTR_QW0_PCTYPE_MASK
;
94 /* Use LAN VSI Id if not programmed by user */
95 if (fdir_data
->dest_vsi
== 0)
96 fpt
|= (pf
->vsi
[pf
->lan_vsi
]->id
) <<
97 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT
;
99 fpt
|= ((u32
)fdir_data
->dest_vsi
<<
100 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT
) &
101 I40E_TXD_FLTR_QW0_DEST_VSI_MASK
;
103 dcc
= I40E_TX_DESC_DTYPE_FILTER_PROG
;
106 dcc
|= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE
<<
107 I40E_TXD_FLTR_QW1_PCMD_SHIFT
;
109 dcc
|= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE
<<
110 I40E_TXD_FLTR_QW1_PCMD_SHIFT
;
112 dcc
|= (fdir_data
->dest_ctl
<< I40E_TXD_FLTR_QW1_DEST_SHIFT
) &
113 I40E_TXD_FLTR_QW1_DEST_MASK
;
115 dcc
|= (fdir_data
->fd_status
<< I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT
) &
116 I40E_TXD_FLTR_QW1_FD_STATUS_MASK
;
118 if (fdir_data
->cnt_index
!= 0) {
119 dcc
|= I40E_TXD_FLTR_QW1_CNT_ENA_MASK
;
120 dcc
|= ((u32
)fdir_data
->cnt_index
<<
121 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT
) &
122 I40E_TXD_FLTR_QW1_CNTINDEX_MASK
;
125 fdir_desc
->qindex_flex_ptype_vsi
= cpu_to_le32(fpt
);
126 fdir_desc
->rsvd
= cpu_to_le32(0);
127 fdir_desc
->dtype_cmd_cntindex
= cpu_to_le32(dcc
);
128 fdir_desc
->fd_id
= cpu_to_le32(fdir_data
->fd_id
);
130 /* Now program a dummy descriptor */
131 i
= tx_ring
->next_to_use
;
132 tx_desc
= I40E_TX_DESC(tx_ring
, i
);
133 tx_buf
= &tx_ring
->tx_bi
[i
];
135 tx_ring
->next_to_use
= (i
+ 1 < tx_ring
->count
) ? i
+ 1 : 0;
137 /* record length, and DMA address */
138 dma_unmap_len_set(tx_buf
, len
, I40E_FDIR_MAX_RAW_PACKET_SIZE
);
139 dma_unmap_addr_set(tx_buf
, dma
, dma
);
141 tx_desc
->buffer_addr
= cpu_to_le64(dma
);
142 td_cmd
= I40E_TXD_CMD
| I40E_TX_DESC_CMD_DUMMY
;
144 tx_desc
->cmd_type_offset_bsz
=
145 build_ctob(td_cmd
, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE
, 0);
147 /* set the timestamp */
148 tx_buf
->time_stamp
= jiffies
;
150 /* Force memory writes to complete before letting h/w
151 * know there are new descriptors to fetch. (Only
152 * applicable for weak-ordered memory model archs,
157 /* Mark the data descriptor to be watched */
158 tx_buf
->next_to_watch
= tx_desc
;
160 writel(tx_ring
->next_to_use
, tx_ring
->tail
);
167 #define IP_HEADER_OFFSET 14
168 #define I40E_UDPIP_DUMMY_PACKET_LEN 42
170 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
171 * @vsi: pointer to the targeted VSI
172 * @fd_data: the flow director data required for the FDir descriptor
173 * @raw_packet: the pre-allocated packet buffer for FDir
174 * @add: true adds a filter, false removes it
176 * Returns 0 if the filters were successfully added or removed
178 static int i40e_add_del_fdir_udpv4(struct i40e_vsi
*vsi
,
179 struct i40e_fdir_filter
*fd_data
,
180 u8
*raw_packet
, bool add
)
182 struct i40e_pf
*pf
= vsi
->back
;
187 static char packet
[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
188 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
189 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
191 memcpy(raw_packet
, packet
, I40E_UDPIP_DUMMY_PACKET_LEN
);
193 ip
= (struct iphdr
*)(raw_packet
+ IP_HEADER_OFFSET
);
194 udp
= (struct udphdr
*)(raw_packet
+ IP_HEADER_OFFSET
195 + sizeof(struct iphdr
));
197 ip
->daddr
= fd_data
->dst_ip
[0];
198 udp
->dest
= fd_data
->dst_port
;
199 ip
->saddr
= fd_data
->src_ip
[0];
200 udp
->source
= fd_data
->src_port
;
202 fd_data
->pctype
= I40E_FILTER_PCTYPE_NONF_IPV4_UDP
;
203 ret
= i40e_program_fdir_filter(fd_data
, raw_packet
, pf
, add
);
205 dev_info(&pf
->pdev
->dev
,
206 "Filter command send failed for PCTYPE %d (ret = %d)\n",
207 fd_data
->pctype
, ret
);
210 dev_info(&pf
->pdev
->dev
,
211 "Filter OK for PCTYPE %d (ret = %d)\n",
212 fd_data
->pctype
, ret
);
215 return err
? -EOPNOTSUPP
: 0;
218 #define I40E_TCPIP_DUMMY_PACKET_LEN 54
220 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
221 * @vsi: pointer to the targeted VSI
222 * @fd_data: the flow director data required for the FDir descriptor
223 * @raw_packet: the pre-allocated packet buffer for FDir
224 * @add: true adds a filter, false removes it
226 * Returns 0 if the filters were successfully added or removed
228 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi
*vsi
,
229 struct i40e_fdir_filter
*fd_data
,
230 u8
*raw_packet
, bool add
)
232 struct i40e_pf
*pf
= vsi
->back
;
238 static char packet
[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
239 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
240 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
241 0x0, 0x72, 0, 0, 0, 0};
243 memcpy(raw_packet
, packet
, I40E_TCPIP_DUMMY_PACKET_LEN
);
245 ip
= (struct iphdr
*)(raw_packet
+ IP_HEADER_OFFSET
);
246 tcp
= (struct tcphdr
*)(raw_packet
+ IP_HEADER_OFFSET
247 + sizeof(struct iphdr
));
249 ip
->daddr
= fd_data
->dst_ip
[0];
250 tcp
->dest
= fd_data
->dst_port
;
251 ip
->saddr
= fd_data
->src_ip
[0];
252 tcp
->source
= fd_data
->src_port
;
255 if (pf
->flags
& I40E_FLAG_FD_ATR_ENABLED
) {
256 dev_info(&pf
->pdev
->dev
, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
257 pf
->flags
&= ~I40E_FLAG_FD_ATR_ENABLED
;
261 fd_data
->pctype
= I40E_FILTER_PCTYPE_NONF_IPV4_TCP
;
262 ret
= i40e_program_fdir_filter(fd_data
, raw_packet
, pf
, add
);
265 dev_info(&pf
->pdev
->dev
,
266 "Filter command send failed for PCTYPE %d (ret = %d)\n",
267 fd_data
->pctype
, ret
);
270 dev_info(&pf
->pdev
->dev
, "Filter OK for PCTYPE %d (ret = %d)\n",
271 fd_data
->pctype
, ret
);
274 return err
? -EOPNOTSUPP
: 0;
278 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
279 * a specific flow spec
280 * @vsi: pointer to the targeted VSI
281 * @fd_data: the flow director data required for the FDir descriptor
282 * @raw_packet: the pre-allocated packet buffer for FDir
283 * @add: true adds a filter, false removes it
285 * Always returns -EOPNOTSUPP
287 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi
*vsi
,
288 struct i40e_fdir_filter
*fd_data
,
289 u8
*raw_packet
, bool add
)
294 #define I40E_IP_DUMMY_PACKET_LEN 34
296 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
297 * a specific flow spec
298 * @vsi: pointer to the targeted VSI
299 * @fd_data: the flow director data required for the FDir descriptor
300 * @raw_packet: the pre-allocated packet buffer for FDir
301 * @add: true adds a filter, false removes it
303 * Returns 0 if the filters were successfully added or removed
305 static int i40e_add_del_fdir_ipv4(struct i40e_vsi
*vsi
,
306 struct i40e_fdir_filter
*fd_data
,
307 u8
*raw_packet
, bool add
)
309 struct i40e_pf
*pf
= vsi
->back
;
314 static char packet
[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
315 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
318 memcpy(raw_packet
, packet
, I40E_IP_DUMMY_PACKET_LEN
);
319 ip
= (struct iphdr
*)(raw_packet
+ IP_HEADER_OFFSET
);
321 ip
->saddr
= fd_data
->src_ip
[0];
322 ip
->daddr
= fd_data
->dst_ip
[0];
325 for (i
= I40E_FILTER_PCTYPE_NONF_IPV4_OTHER
;
326 i
<= I40E_FILTER_PCTYPE_FRAG_IPV4
; i
++) {
328 ret
= i40e_program_fdir_filter(fd_data
, raw_packet
, pf
, add
);
331 dev_info(&pf
->pdev
->dev
,
332 "Filter command send failed for PCTYPE %d (ret = %d)\n",
333 fd_data
->pctype
, ret
);
336 dev_info(&pf
->pdev
->dev
,
337 "Filter OK for PCTYPE %d (ret = %d)\n",
338 fd_data
->pctype
, ret
);
342 return err
? -EOPNOTSUPP
: 0;
346 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
347 * @vsi: pointer to the targeted VSI
348 * @cmd: command to get or set RX flow classification rules
349 * @add: true adds a filter, false removes it
352 int i40e_add_del_fdir(struct i40e_vsi
*vsi
,
353 struct i40e_fdir_filter
*input
, bool add
)
355 struct i40e_pf
*pf
= vsi
->back
;
359 /* Populate the Flow Director that we have at the moment
360 * and allocate the raw packet buffer for the calling functions
362 raw_packet
= kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE
, GFP_KERNEL
);
366 switch (input
->flow_type
& ~FLOW_EXT
) {
368 ret
= i40e_add_del_fdir_tcpv4(vsi
, input
, raw_packet
,
372 ret
= i40e_add_del_fdir_udpv4(vsi
, input
, raw_packet
,
376 ret
= i40e_add_del_fdir_sctpv4(vsi
, input
, raw_packet
,
380 ret
= i40e_add_del_fdir_ipv4(vsi
, input
, raw_packet
,
384 switch (input
->ip4_proto
) {
386 ret
= i40e_add_del_fdir_tcpv4(vsi
, input
,
390 ret
= i40e_add_del_fdir_udpv4(vsi
, input
,
394 ret
= i40e_add_del_fdir_sctpv4(vsi
, input
,
398 ret
= i40e_add_del_fdir_ipv4(vsi
, input
,
404 dev_info(&pf
->pdev
->dev
, "Could not specify spec type %d\n",
414 * i40e_fd_handle_status - check the Programming Status for FD
415 * @rx_ring: the Rx ring for this descriptor
416 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
417 * @prog_id: the id originally used for programming
419 * This is used to verify if the FD programming or invalidation
420 * requested by SW to the HW is successful or not and take actions accordingly.
422 static void i40e_fd_handle_status(struct i40e_ring
*rx_ring
,
423 union i40e_rx_desc
*rx_desc
, u8 prog_id
)
425 struct i40e_pf
*pf
= rx_ring
->vsi
->back
;
426 struct pci_dev
*pdev
= pf
->pdev
;
427 u32 fcnt_prog
, fcnt_avail
;
431 qw
= le64_to_cpu(rx_desc
->wb
.qword1
.status_error_len
);
432 error
= (qw
& I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK
) >>
433 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT
;
435 if (error
== (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT
)) {
436 dev_warn(&pdev
->dev
, "ntuple filter loc = %d, could not be added\n",
437 rx_desc
->wb
.qword0
.hi_dword
.fd_id
);
439 /* filter programming failed most likely due to table full */
440 fcnt_prog
= i40e_get_cur_guaranteed_fd_count(pf
);
441 fcnt_avail
= pf
->fdir_pf_filter_count
;
442 /* If ATR is running fcnt_prog can quickly change,
443 * if we are very close to full, it makes sense to disable
444 * FD ATR/SB and then re-enable it when there is room.
446 if (fcnt_prog
>= (fcnt_avail
- I40E_FDIR_BUFFER_FULL_MARGIN
)) {
447 /* Turn off ATR first */
448 if ((pf
->flags
& I40E_FLAG_FD_ATR_ENABLED
) &&
449 !(pf
->auto_disable_flags
&
450 I40E_FLAG_FD_ATR_ENABLED
)) {
451 dev_warn(&pdev
->dev
, "FD filter space full, ATR for further flows will be turned off\n");
452 pf
->auto_disable_flags
|=
453 I40E_FLAG_FD_ATR_ENABLED
;
454 pf
->flags
|= I40E_FLAG_FDIR_REQUIRES_REINIT
;
455 } else if ((pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) &&
456 !(pf
->auto_disable_flags
&
457 I40E_FLAG_FD_SB_ENABLED
)) {
458 dev_warn(&pdev
->dev
, "FD filter space full, new ntuple rules will not be added\n");
459 pf
->auto_disable_flags
|=
460 I40E_FLAG_FD_SB_ENABLED
;
461 pf
->flags
|= I40E_FLAG_FDIR_REQUIRES_REINIT
;
464 dev_info(&pdev
->dev
, "FD filter programming error\n");
467 (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT
)) {
468 if (I40E_DEBUG_FD
& pf
->hw
.debug_mask
)
469 dev_info(&pdev
->dev
, "ntuple filter loc = %d, could not be removed\n",
470 rx_desc
->wb
.qword0
.hi_dword
.fd_id
);
475 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
476 * @ring: the ring that owns the buffer
477 * @tx_buffer: the buffer to free
479 static void i40e_unmap_and_free_tx_resource(struct i40e_ring
*ring
,
480 struct i40e_tx_buffer
*tx_buffer
)
482 if (tx_buffer
->skb
) {
483 dev_kfree_skb_any(tx_buffer
->skb
);
484 if (dma_unmap_len(tx_buffer
, len
))
485 dma_unmap_single(ring
->dev
,
486 dma_unmap_addr(tx_buffer
, dma
),
487 dma_unmap_len(tx_buffer
, len
),
489 } else if (dma_unmap_len(tx_buffer
, len
)) {
490 dma_unmap_page(ring
->dev
,
491 dma_unmap_addr(tx_buffer
, dma
),
492 dma_unmap_len(tx_buffer
, len
),
495 tx_buffer
->next_to_watch
= NULL
;
496 tx_buffer
->skb
= NULL
;
497 dma_unmap_len_set(tx_buffer
, len
, 0);
498 /* tx_buffer must be completely set up in the transmit path */
502 * i40e_clean_tx_ring - Free any empty Tx buffers
503 * @tx_ring: ring to be cleaned
505 void i40e_clean_tx_ring(struct i40e_ring
*tx_ring
)
507 unsigned long bi_size
;
510 /* ring already cleared, nothing to do */
514 /* Free all the Tx ring sk_buffs */
515 for (i
= 0; i
< tx_ring
->count
; i
++)
516 i40e_unmap_and_free_tx_resource(tx_ring
, &tx_ring
->tx_bi
[i
]);
518 bi_size
= sizeof(struct i40e_tx_buffer
) * tx_ring
->count
;
519 memset(tx_ring
->tx_bi
, 0, bi_size
);
521 /* Zero out the descriptor ring */
522 memset(tx_ring
->desc
, 0, tx_ring
->size
);
524 tx_ring
->next_to_use
= 0;
525 tx_ring
->next_to_clean
= 0;
527 if (!tx_ring
->netdev
)
530 /* cleanup Tx queue statistics */
531 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring
->netdev
,
532 tx_ring
->queue_index
));
536 * i40e_free_tx_resources - Free Tx resources per queue
537 * @tx_ring: Tx descriptor ring for a specific queue
539 * Free all transmit software resources
541 void i40e_free_tx_resources(struct i40e_ring
*tx_ring
)
543 i40e_clean_tx_ring(tx_ring
);
544 kfree(tx_ring
->tx_bi
);
545 tx_ring
->tx_bi
= NULL
;
548 dma_free_coherent(tx_ring
->dev
, tx_ring
->size
,
549 tx_ring
->desc
, tx_ring
->dma
);
550 tx_ring
->desc
= NULL
;
555 * i40e_get_tx_pending - how many tx descriptors not processed
556 * @tx_ring: the ring of descriptors
558 * Since there is no access to the ring head register
559 * in XL710, we need to use our local copies
561 static u32
i40e_get_tx_pending(struct i40e_ring
*ring
)
563 u32 ntu
= ((ring
->next_to_clean
<= ring
->next_to_use
)
565 : ring
->next_to_use
+ ring
->count
);
566 return ntu
- ring
->next_to_clean
;
570 * i40e_check_tx_hang - Is there a hang in the Tx queue
571 * @tx_ring: the ring of descriptors
573 static bool i40e_check_tx_hang(struct i40e_ring
*tx_ring
)
575 u32 tx_pending
= i40e_get_tx_pending(tx_ring
);
578 clear_check_for_tx_hang(tx_ring
);
580 /* Check for a hung queue, but be thorough. This verifies
581 * that a transmit has been completed since the previous
582 * check AND there is at least one packet pending. The
583 * ARMED bit is set to indicate a potential hang. The
584 * bit is cleared if a pause frame is received to remove
585 * false hang detection due to PFC or 802.3x frames. By
586 * requiring this to fail twice we avoid races with
587 * PFC clearing the ARMED bit and conditions where we
588 * run the check_tx_hang logic with a transmit completion
589 * pending but without time to complete it yet.
591 if ((tx_ring
->tx_stats
.tx_done_old
== tx_ring
->stats
.packets
) &&
593 /* make sure it is true for two checks in a row */
594 ret
= test_and_set_bit(__I40E_HANG_CHECK_ARMED
,
597 /* update completed stats and disarm the hang check */
598 tx_ring
->tx_stats
.tx_done_old
= tx_ring
->stats
.packets
;
599 clear_bit(__I40E_HANG_CHECK_ARMED
, &tx_ring
->state
);
606 * i40e_get_head - Retrieve head from head writeback
607 * @tx_ring: tx ring to fetch head of
609 * Returns value of Tx ring head based on value stored
610 * in head write-back location
612 static inline u32
i40e_get_head(struct i40e_ring
*tx_ring
)
614 void *head
= (struct i40e_tx_desc
*)tx_ring
->desc
+ tx_ring
->count
;
616 return le32_to_cpu(*(volatile __le32
*)head
);
620 * i40e_clean_tx_irq - Reclaim resources after transmit completes
621 * @tx_ring: tx ring to clean
622 * @budget: how many cleans we're allowed
624 * Returns true if there's any budget left (e.g. the clean is finished)
626 static bool i40e_clean_tx_irq(struct i40e_ring
*tx_ring
, int budget
)
628 u16 i
= tx_ring
->next_to_clean
;
629 struct i40e_tx_buffer
*tx_buf
;
630 struct i40e_tx_desc
*tx_head
;
631 struct i40e_tx_desc
*tx_desc
;
632 unsigned int total_packets
= 0;
633 unsigned int total_bytes
= 0;
635 tx_buf
= &tx_ring
->tx_bi
[i
];
636 tx_desc
= I40E_TX_DESC(tx_ring
, i
);
639 tx_head
= I40E_TX_DESC(tx_ring
, i40e_get_head(tx_ring
));
642 struct i40e_tx_desc
*eop_desc
= tx_buf
->next_to_watch
;
644 /* if next_to_watch is not set then there is no work pending */
648 /* prevent any other reads prior to eop_desc */
649 read_barrier_depends();
651 /* we have caught up to head, no work left to do */
652 if (tx_head
== tx_desc
)
655 /* clear next_to_watch to prevent false hangs */
656 tx_buf
->next_to_watch
= NULL
;
658 /* update the statistics for this packet */
659 total_bytes
+= tx_buf
->bytecount
;
660 total_packets
+= tx_buf
->gso_segs
;
663 dev_kfree_skb_any(tx_buf
->skb
);
665 /* unmap skb header data */
666 dma_unmap_single(tx_ring
->dev
,
667 dma_unmap_addr(tx_buf
, dma
),
668 dma_unmap_len(tx_buf
, len
),
671 /* clear tx_buffer data */
673 dma_unmap_len_set(tx_buf
, len
, 0);
675 /* unmap remaining buffers */
676 while (tx_desc
!= eop_desc
) {
683 tx_buf
= tx_ring
->tx_bi
;
684 tx_desc
= I40E_TX_DESC(tx_ring
, 0);
687 /* unmap any remaining paged data */
688 if (dma_unmap_len(tx_buf
, len
)) {
689 dma_unmap_page(tx_ring
->dev
,
690 dma_unmap_addr(tx_buf
, dma
),
691 dma_unmap_len(tx_buf
, len
),
693 dma_unmap_len_set(tx_buf
, len
, 0);
697 /* move us one more past the eop_desc for start of next pkt */
703 tx_buf
= tx_ring
->tx_bi
;
704 tx_desc
= I40E_TX_DESC(tx_ring
, 0);
707 /* update budget accounting */
709 } while (likely(budget
));
712 tx_ring
->next_to_clean
= i
;
713 u64_stats_update_begin(&tx_ring
->syncp
);
714 tx_ring
->stats
.bytes
+= total_bytes
;
715 tx_ring
->stats
.packets
+= total_packets
;
716 u64_stats_update_end(&tx_ring
->syncp
);
717 tx_ring
->q_vector
->tx
.total_bytes
+= total_bytes
;
718 tx_ring
->q_vector
->tx
.total_packets
+= total_packets
;
720 if (check_for_tx_hang(tx_ring
) && i40e_check_tx_hang(tx_ring
)) {
721 /* schedule immediate reset if we believe we hung */
722 dev_info(tx_ring
->dev
, "Detected Tx Unit Hang\n"
725 " next_to_use <%x>\n"
726 " next_to_clean <%x>\n",
728 tx_ring
->queue_index
,
729 tx_ring
->next_to_use
, i
);
730 dev_info(tx_ring
->dev
, "tx_bi[next_to_clean]\n"
731 " time_stamp <%lx>\n"
733 tx_ring
->tx_bi
[i
].time_stamp
, jiffies
);
735 netif_stop_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
737 dev_info(tx_ring
->dev
,
738 "tx hang detected on queue %d, resetting adapter\n",
739 tx_ring
->queue_index
);
741 tx_ring
->netdev
->netdev_ops
->ndo_tx_timeout(tx_ring
->netdev
);
743 /* the adapter is about to reset, no point in enabling stuff */
747 netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring
->netdev
,
748 tx_ring
->queue_index
),
749 total_packets
, total_bytes
);
751 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
752 if (unlikely(total_packets
&& netif_carrier_ok(tx_ring
->netdev
) &&
753 (I40E_DESC_UNUSED(tx_ring
) >= TX_WAKE_THRESHOLD
))) {
754 /* Make sure that anybody stopping the queue after this
755 * sees the new next_to_clean.
758 if (__netif_subqueue_stopped(tx_ring
->netdev
,
759 tx_ring
->queue_index
) &&
760 !test_bit(__I40E_DOWN
, &tx_ring
->vsi
->state
)) {
761 netif_wake_subqueue(tx_ring
->netdev
,
762 tx_ring
->queue_index
);
763 ++tx_ring
->tx_stats
.restart_queue
;
771 * i40e_set_new_dynamic_itr - Find new ITR level
772 * @rc: structure containing ring performance data
774 * Stores a new ITR value based on packets and byte counts during
775 * the last interrupt. The advantage of per interrupt computation
776 * is faster updates and more accurate ITR for the current traffic
777 * pattern. Constants in this function were computed based on
778 * theoretical maximum wire speed and thresholds were set based on
779 * testing data as well as attempting to minimize response time
780 * while increasing bulk throughput.
782 static void i40e_set_new_dynamic_itr(struct i40e_ring_container
*rc
)
784 enum i40e_latency_range new_latency_range
= rc
->latency_range
;
785 u32 new_itr
= rc
->itr
;
788 if (rc
->total_packets
== 0 || !rc
->itr
)
791 /* simple throttlerate management
792 * 0-10MB/s lowest (100000 ints/s)
793 * 10-20MB/s low (20000 ints/s)
794 * 20-1249MB/s bulk (8000 ints/s)
796 bytes_per_int
= rc
->total_bytes
/ rc
->itr
;
798 case I40E_LOWEST_LATENCY
:
799 if (bytes_per_int
> 10)
800 new_latency_range
= I40E_LOW_LATENCY
;
802 case I40E_LOW_LATENCY
:
803 if (bytes_per_int
> 20)
804 new_latency_range
= I40E_BULK_LATENCY
;
805 else if (bytes_per_int
<= 10)
806 new_latency_range
= I40E_LOWEST_LATENCY
;
808 case I40E_BULK_LATENCY
:
809 if (bytes_per_int
<= 20)
810 rc
->latency_range
= I40E_LOW_LATENCY
;
814 switch (new_latency_range
) {
815 case I40E_LOWEST_LATENCY
:
816 new_itr
= I40E_ITR_100K
;
818 case I40E_LOW_LATENCY
:
819 new_itr
= I40E_ITR_20K
;
821 case I40E_BULK_LATENCY
:
822 new_itr
= I40E_ITR_8K
;
828 if (new_itr
!= rc
->itr
) {
829 /* do an exponential smoothing */
830 new_itr
= (10 * new_itr
* rc
->itr
) /
831 ((9 * new_itr
) + rc
->itr
);
832 rc
->itr
= new_itr
& I40E_MAX_ITR
;
836 rc
->total_packets
= 0;
840 * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
841 * @q_vector: the vector to adjust
843 static void i40e_update_dynamic_itr(struct i40e_q_vector
*q_vector
)
845 u16 vector
= q_vector
->vsi
->base_vector
+ q_vector
->v_idx
;
846 struct i40e_hw
*hw
= &q_vector
->vsi
->back
->hw
;
850 reg_addr
= I40E_PFINT_ITRN(I40E_RX_ITR
, vector
- 1);
851 old_itr
= q_vector
->rx
.itr
;
852 i40e_set_new_dynamic_itr(&q_vector
->rx
);
853 if (old_itr
!= q_vector
->rx
.itr
)
854 wr32(hw
, reg_addr
, q_vector
->rx
.itr
);
856 reg_addr
= I40E_PFINT_ITRN(I40E_TX_ITR
, vector
- 1);
857 old_itr
= q_vector
->tx
.itr
;
858 i40e_set_new_dynamic_itr(&q_vector
->tx
);
859 if (old_itr
!= q_vector
->tx
.itr
)
860 wr32(hw
, reg_addr
, q_vector
->tx
.itr
);
864 * i40e_clean_programming_status - clean the programming status descriptor
865 * @rx_ring: the rx ring that has this descriptor
866 * @rx_desc: the rx descriptor written back by HW
868 * Flow director should handle FD_FILTER_STATUS to check its filter programming
869 * status being successful or not and take actions accordingly. FCoE should
870 * handle its context/filter programming/invalidation status and take actions.
873 static void i40e_clean_programming_status(struct i40e_ring
*rx_ring
,
874 union i40e_rx_desc
*rx_desc
)
879 qw
= le64_to_cpu(rx_desc
->wb
.qword1
.status_error_len
);
880 id
= (qw
& I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK
) >>
881 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT
;
883 if (id
== I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS
)
884 i40e_fd_handle_status(rx_ring
, rx_desc
, id
);
888 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
889 * @tx_ring: the tx ring to set up
891 * Return 0 on success, negative on error
893 int i40e_setup_tx_descriptors(struct i40e_ring
*tx_ring
)
895 struct device
*dev
= tx_ring
->dev
;
901 bi_size
= sizeof(struct i40e_tx_buffer
) * tx_ring
->count
;
902 tx_ring
->tx_bi
= kzalloc(bi_size
, GFP_KERNEL
);
906 /* round up to nearest 4K */
907 tx_ring
->size
= tx_ring
->count
* sizeof(struct i40e_tx_desc
);
908 /* add u32 for head writeback, align after this takes care of
909 * guaranteeing this is at least one cache line in size
911 tx_ring
->size
+= sizeof(u32
);
912 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
913 tx_ring
->desc
= dma_alloc_coherent(dev
, tx_ring
->size
,
914 &tx_ring
->dma
, GFP_KERNEL
);
915 if (!tx_ring
->desc
) {
916 dev_info(dev
, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
921 tx_ring
->next_to_use
= 0;
922 tx_ring
->next_to_clean
= 0;
926 kfree(tx_ring
->tx_bi
);
927 tx_ring
->tx_bi
= NULL
;
932 * i40e_clean_rx_ring - Free Rx buffers
933 * @rx_ring: ring to be cleaned
935 void i40e_clean_rx_ring(struct i40e_ring
*rx_ring
)
937 struct device
*dev
= rx_ring
->dev
;
938 struct i40e_rx_buffer
*rx_bi
;
939 unsigned long bi_size
;
942 /* ring already cleared, nothing to do */
946 /* Free all the Rx ring sk_buffs */
947 for (i
= 0; i
< rx_ring
->count
; i
++) {
948 rx_bi
= &rx_ring
->rx_bi
[i
];
950 dma_unmap_single(dev
,
957 dev_kfree_skb(rx_bi
->skb
);
961 if (rx_bi
->page_dma
) {
968 __free_page(rx_bi
->page
);
970 rx_bi
->page_offset
= 0;
974 bi_size
= sizeof(struct i40e_rx_buffer
) * rx_ring
->count
;
975 memset(rx_ring
->rx_bi
, 0, bi_size
);
977 /* Zero out the descriptor ring */
978 memset(rx_ring
->desc
, 0, rx_ring
->size
);
980 rx_ring
->next_to_clean
= 0;
981 rx_ring
->next_to_use
= 0;
985 * i40e_free_rx_resources - Free Rx resources
986 * @rx_ring: ring to clean the resources from
988 * Free all receive software resources
990 void i40e_free_rx_resources(struct i40e_ring
*rx_ring
)
992 i40e_clean_rx_ring(rx_ring
);
993 kfree(rx_ring
->rx_bi
);
994 rx_ring
->rx_bi
= NULL
;
997 dma_free_coherent(rx_ring
->dev
, rx_ring
->size
,
998 rx_ring
->desc
, rx_ring
->dma
);
999 rx_ring
->desc
= NULL
;
1004 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1005 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1007 * Returns 0 on success, negative on failure
1009 int i40e_setup_rx_descriptors(struct i40e_ring
*rx_ring
)
1011 struct device
*dev
= rx_ring
->dev
;
1014 bi_size
= sizeof(struct i40e_rx_buffer
) * rx_ring
->count
;
1015 rx_ring
->rx_bi
= kzalloc(bi_size
, GFP_KERNEL
);
1016 if (!rx_ring
->rx_bi
)
1019 /* Round up to nearest 4K */
1020 rx_ring
->size
= ring_is_16byte_desc_enabled(rx_ring
)
1021 ? rx_ring
->count
* sizeof(union i40e_16byte_rx_desc
)
1022 : rx_ring
->count
* sizeof(union i40e_32byte_rx_desc
);
1023 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
1024 rx_ring
->desc
= dma_alloc_coherent(dev
, rx_ring
->size
,
1025 &rx_ring
->dma
, GFP_KERNEL
);
1027 if (!rx_ring
->desc
) {
1028 dev_info(dev
, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1033 rx_ring
->next_to_clean
= 0;
1034 rx_ring
->next_to_use
= 0;
1038 kfree(rx_ring
->rx_bi
);
1039 rx_ring
->rx_bi
= NULL
;
1044 * i40e_release_rx_desc - Store the new tail and head values
1045 * @rx_ring: ring to bump
1046 * @val: new head index
1048 static inline void i40e_release_rx_desc(struct i40e_ring
*rx_ring
, u32 val
)
1050 rx_ring
->next_to_use
= val
;
1051 /* Force memory writes to complete before letting h/w
1052 * know there are new descriptors to fetch. (Only
1053 * applicable for weak-ordered memory model archs,
1057 writel(val
, rx_ring
->tail
);
1061 * i40e_alloc_rx_buffers - Replace used receive buffers; packet split
1062 * @rx_ring: ring to place buffers on
1063 * @cleaned_count: number of buffers to replace
1065 void i40e_alloc_rx_buffers(struct i40e_ring
*rx_ring
, u16 cleaned_count
)
1067 u16 i
= rx_ring
->next_to_use
;
1068 union i40e_rx_desc
*rx_desc
;
1069 struct i40e_rx_buffer
*bi
;
1070 struct sk_buff
*skb
;
1072 /* do nothing if no valid netdev defined */
1073 if (!rx_ring
->netdev
|| !cleaned_count
)
1076 while (cleaned_count
--) {
1077 rx_desc
= I40E_RX_DESC(rx_ring
, i
);
1078 bi
= &rx_ring
->rx_bi
[i
];
1082 skb
= netdev_alloc_skb_ip_align(rx_ring
->netdev
,
1083 rx_ring
->rx_buf_len
);
1085 rx_ring
->rx_stats
.alloc_buff_failed
++;
1088 /* initialize queue mapping */
1089 skb_record_rx_queue(skb
, rx_ring
->queue_index
);
1094 bi
->dma
= dma_map_single(rx_ring
->dev
,
1096 rx_ring
->rx_buf_len
,
1098 if (dma_mapping_error(rx_ring
->dev
, bi
->dma
)) {
1099 rx_ring
->rx_stats
.alloc_buff_failed
++;
1105 if (ring_is_ps_enabled(rx_ring
)) {
1107 bi
->page
= alloc_page(GFP_ATOMIC
);
1109 rx_ring
->rx_stats
.alloc_page_failed
++;
1114 if (!bi
->page_dma
) {
1115 /* use a half page if we're re-using */
1116 bi
->page_offset
^= PAGE_SIZE
/ 2;
1117 bi
->page_dma
= dma_map_page(rx_ring
->dev
,
1122 if (dma_mapping_error(rx_ring
->dev
,
1124 rx_ring
->rx_stats
.alloc_page_failed
++;
1130 /* Refresh the desc even if buffer_addrs didn't change
1131 * because each write-back erases this info.
1133 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->page_dma
);
1134 rx_desc
->read
.hdr_addr
= cpu_to_le64(bi
->dma
);
1136 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->dma
);
1137 rx_desc
->read
.hdr_addr
= 0;
1140 if (i
== rx_ring
->count
)
1145 if (rx_ring
->next_to_use
!= i
)
1146 i40e_release_rx_desc(rx_ring
, i
);
1150 * i40e_receive_skb - Send a completed packet up the stack
1151 * @rx_ring: rx ring in play
1152 * @skb: packet to send up
1153 * @vlan_tag: vlan tag for packet
1155 static void i40e_receive_skb(struct i40e_ring
*rx_ring
,
1156 struct sk_buff
*skb
, u16 vlan_tag
)
1158 struct i40e_q_vector
*q_vector
= rx_ring
->q_vector
;
1159 struct i40e_vsi
*vsi
= rx_ring
->vsi
;
1160 u64 flags
= vsi
->back
->flags
;
1162 if (vlan_tag
& VLAN_VID_MASK
)
1163 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vlan_tag
);
1165 if (flags
& I40E_FLAG_IN_NETPOLL
)
1168 napi_gro_receive(&q_vector
->napi
, skb
);
1172 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1173 * @vsi: the VSI we care about
1174 * @skb: skb currently being received and modified
1175 * @rx_status: status value of last descriptor in packet
1176 * @rx_error: error value of last descriptor in packet
1177 * @rx_ptype: ptype value of last descriptor in packet
1179 static inline void i40e_rx_checksum(struct i40e_vsi
*vsi
,
1180 struct sk_buff
*skb
,
1185 struct i40e_rx_ptype_decoded decoded
= decode_rx_desc_ptype(rx_ptype
);
1186 bool ipv4
= false, ipv6
= false;
1187 bool ipv4_tunnel
, ipv6_tunnel
;
1192 ipv4_tunnel
= (rx_ptype
> I40E_RX_PTYPE_GRENAT4_MAC_PAY3
) &&
1193 (rx_ptype
< I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4
);
1194 ipv6_tunnel
= (rx_ptype
> I40E_RX_PTYPE_GRENAT6_MAC_PAY3
) &&
1195 (rx_ptype
< I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4
);
1197 skb
->encapsulation
= ipv4_tunnel
|| ipv6_tunnel
;
1198 skb
->ip_summed
= CHECKSUM_NONE
;
1200 /* Rx csum enabled and ip headers found? */
1201 if (!(vsi
->netdev
->features
& NETIF_F_RXCSUM
))
1204 /* did the hardware decode the packet and checksum? */
1205 if (!(rx_status
& (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT
)))
1208 /* both known and outer_ip must be set for the below code to work */
1209 if (!(decoded
.known
&& decoded
.outer_ip
))
1212 if (decoded
.outer_ip
== I40E_RX_PTYPE_OUTER_IP
&&
1213 decoded
.outer_ip_ver
== I40E_RX_PTYPE_OUTER_IPV4
)
1215 else if (decoded
.outer_ip
== I40E_RX_PTYPE_OUTER_IP
&&
1216 decoded
.outer_ip_ver
== I40E_RX_PTYPE_OUTER_IPV6
)
1220 (rx_error
& ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT
) |
1221 (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT
))))
1224 /* likely incorrect csum if alternate IP extension headers found */
1226 decoded
.inner_prot
== I40E_RX_PTYPE_INNER_PROT_TCP
&&
1227 rx_error
& (1 << I40E_RX_DESC_ERROR_L4E_SHIFT
) &&
1228 rx_status
& (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT
))
1229 /* don't increment checksum err here, non-fatal err */
1232 /* there was some L4 error, count error and punt packet to the stack */
1233 if (rx_error
& (1 << I40E_RX_DESC_ERROR_L4E_SHIFT
))
1236 /* handle packets that were not able to be checksummed due
1237 * to arrival speed, in this case the stack can compute
1240 if (rx_error
& (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT
))
1243 /* If VXLAN traffic has an outer UDPv4 checksum we need to check
1244 * it in the driver, hardware does not do it for us.
1245 * Since L3L4P bit was set we assume a valid IHL value (>=5)
1246 * so the total length of IPv4 header is IHL*4 bytes
1247 * The UDP_0 bit *may* bet set if the *inner* header is UDP
1250 (decoded
.inner_prot
!= I40E_RX_PTYPE_INNER_PROT_UDP
) &&
1251 !(rx_status
& (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT
))) {
1252 skb
->transport_header
= skb
->mac_header
+
1253 sizeof(struct ethhdr
) +
1254 (ip_hdr(skb
)->ihl
* 4);
1256 /* Add 4 bytes for VLAN tagged packets */
1257 skb
->transport_header
+= (skb
->protocol
== htons(ETH_P_8021Q
) ||
1258 skb
->protocol
== htons(ETH_P_8021AD
))
1261 rx_udp_csum
= udp_csum(skb
);
1263 csum
= csum_tcpudp_magic(
1264 iph
->saddr
, iph
->daddr
,
1265 (skb
->len
- skb_transport_offset(skb
)),
1266 IPPROTO_UDP
, rx_udp_csum
);
1268 if (udp_hdr(skb
)->check
!= csum
)
1272 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1277 vsi
->back
->hw_csum_rx_error
++;
1281 * i40e_rx_hash - returns the hash value from the Rx descriptor
1282 * @ring: descriptor ring
1283 * @rx_desc: specific descriptor
1285 static inline u32
i40e_rx_hash(struct i40e_ring
*ring
,
1286 union i40e_rx_desc
*rx_desc
)
1288 const __le64 rss_mask
=
1289 cpu_to_le64((u64
)I40E_RX_DESC_FLTSTAT_RSS_HASH
<<
1290 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT
);
1292 if ((ring
->netdev
->features
& NETIF_F_RXHASH
) &&
1293 (rx_desc
->wb
.qword1
.status_error_len
& rss_mask
) == rss_mask
)
1294 return le32_to_cpu(rx_desc
->wb
.qword0
.hi_dword
.rss
);
1300 * i40e_ptype_to_hash - get a hash type
1301 * @ptype: the ptype value from the descriptor
1303 * Returns a hash type to be used by skb_set_hash
1305 static inline enum pkt_hash_types
i40e_ptype_to_hash(u8 ptype
)
1307 struct i40e_rx_ptype_decoded decoded
= decode_rx_desc_ptype(ptype
);
1310 return PKT_HASH_TYPE_NONE
;
1312 if (decoded
.outer_ip
== I40E_RX_PTYPE_OUTER_IP
&&
1313 decoded
.payload_layer
== I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4
)
1314 return PKT_HASH_TYPE_L4
;
1315 else if (decoded
.outer_ip
== I40E_RX_PTYPE_OUTER_IP
&&
1316 decoded
.payload_layer
== I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3
)
1317 return PKT_HASH_TYPE_L3
;
1319 return PKT_HASH_TYPE_L2
;
1323 * i40e_clean_rx_irq - Reclaim resources after receive completes
1324 * @rx_ring: rx ring to clean
1325 * @budget: how many cleans we're allowed
1327 * Returns true if there's any budget left (e.g. the clean is finished)
1329 static int i40e_clean_rx_irq(struct i40e_ring
*rx_ring
, int budget
)
1331 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
1332 u16 rx_packet_len
, rx_header_len
, rx_sph
, rx_hbo
;
1333 u16 cleaned_count
= I40E_DESC_UNUSED(rx_ring
);
1334 const int current_node
= numa_node_id();
1335 struct i40e_vsi
*vsi
= rx_ring
->vsi
;
1336 u16 i
= rx_ring
->next_to_clean
;
1337 union i40e_rx_desc
*rx_desc
;
1338 u32 rx_error
, rx_status
;
1345 rx_desc
= I40E_RX_DESC(rx_ring
, i
);
1346 qword
= le64_to_cpu(rx_desc
->wb
.qword1
.status_error_len
);
1347 rx_status
= (qword
& I40E_RXD_QW1_STATUS_MASK
) >>
1348 I40E_RXD_QW1_STATUS_SHIFT
;
1350 while (rx_status
& (1 << I40E_RX_DESC_STATUS_DD_SHIFT
)) {
1351 union i40e_rx_desc
*next_rxd
;
1352 struct i40e_rx_buffer
*rx_bi
;
1353 struct sk_buff
*skb
;
1355 if (i40e_rx_is_programming_status(qword
)) {
1356 i40e_clean_programming_status(rx_ring
, rx_desc
);
1357 I40E_RX_NEXT_DESC_PREFETCH(rx_ring
, i
, next_rxd
);
1360 rx_bi
= &rx_ring
->rx_bi
[i
];
1362 prefetch(skb
->data
);
1364 rx_packet_len
= (qword
& I40E_RXD_QW1_LENGTH_PBUF_MASK
) >>
1365 I40E_RXD_QW1_LENGTH_PBUF_SHIFT
;
1366 rx_header_len
= (qword
& I40E_RXD_QW1_LENGTH_HBUF_MASK
) >>
1367 I40E_RXD_QW1_LENGTH_HBUF_SHIFT
;
1368 rx_sph
= (qword
& I40E_RXD_QW1_LENGTH_SPH_MASK
) >>
1369 I40E_RXD_QW1_LENGTH_SPH_SHIFT
;
1371 rx_error
= (qword
& I40E_RXD_QW1_ERROR_MASK
) >>
1372 I40E_RXD_QW1_ERROR_SHIFT
;
1373 rx_hbo
= rx_error
& (1 << I40E_RX_DESC_ERROR_HBO_SHIFT
);
1374 rx_error
&= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT
);
1376 rx_ptype
= (qword
& I40E_RXD_QW1_PTYPE_MASK
) >>
1377 I40E_RXD_QW1_PTYPE_SHIFT
;
1380 /* This memory barrier is needed to keep us from reading
1381 * any other fields out of the rx_desc until we know the
1382 * STATUS_DD bit is set
1386 /* Get the header and possibly the whole packet
1387 * If this is an skb from previous receive dma will be 0
1393 len
= I40E_RX_HDR_SIZE
;
1395 len
= rx_header_len
;
1396 else if (rx_packet_len
)
1397 len
= rx_packet_len
; /* 1buf/no split found */
1399 len
= rx_header_len
; /* split always mode */
1402 dma_unmap_single(rx_ring
->dev
,
1404 rx_ring
->rx_buf_len
,
1409 /* Get the rest of the data if this was a header split */
1410 if (ring_is_ps_enabled(rx_ring
) && rx_packet_len
) {
1412 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
1417 skb
->len
+= rx_packet_len
;
1418 skb
->data_len
+= rx_packet_len
;
1419 skb
->truesize
+= rx_packet_len
;
1421 if ((page_count(rx_bi
->page
) == 1) &&
1422 (page_to_nid(rx_bi
->page
) == current_node
))
1423 get_page(rx_bi
->page
);
1427 dma_unmap_page(rx_ring
->dev
,
1431 rx_bi
->page_dma
= 0;
1433 I40E_RX_NEXT_DESC_PREFETCH(rx_ring
, i
, next_rxd
);
1436 !(rx_status
& (1 << I40E_RX_DESC_STATUS_EOF_SHIFT
)))) {
1437 struct i40e_rx_buffer
*next_buffer
;
1439 next_buffer
= &rx_ring
->rx_bi
[i
];
1441 if (ring_is_ps_enabled(rx_ring
)) {
1442 rx_bi
->skb
= next_buffer
->skb
;
1443 rx_bi
->dma
= next_buffer
->dma
;
1444 next_buffer
->skb
= skb
;
1445 next_buffer
->dma
= 0;
1447 rx_ring
->rx_stats
.non_eop_descs
++;
1451 /* ERR_MASK will only have valid bits if EOP set */
1452 if (unlikely(rx_error
& (1 << I40E_RX_DESC_ERROR_RXE_SHIFT
))) {
1453 dev_kfree_skb_any(skb
);
1454 /* TODO: shouldn't we increment a counter indicating the
1460 skb_set_hash(skb
, i40e_rx_hash(rx_ring
, rx_desc
),
1461 i40e_ptype_to_hash(rx_ptype
));
1462 if (unlikely(rx_status
& I40E_RXD_QW1_STATUS_TSYNVALID_MASK
)) {
1463 i40e_ptp_rx_hwtstamp(vsi
->back
, skb
, (rx_status
&
1464 I40E_RXD_QW1_STATUS_TSYNINDX_MASK
) >>
1465 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT
);
1466 rx_ring
->last_rx_timestamp
= jiffies
;
1469 /* probably a little skewed due to removing CRC */
1470 total_rx_bytes
+= skb
->len
;
1473 skb
->protocol
= eth_type_trans(skb
, rx_ring
->netdev
);
1475 i40e_rx_checksum(vsi
, skb
, rx_status
, rx_error
, rx_ptype
);
1477 vlan_tag
= rx_status
& (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT
)
1478 ? le16_to_cpu(rx_desc
->wb
.qword0
.lo_dword
.l2tag1
)
1480 i40e_receive_skb(rx_ring
, skb
, vlan_tag
);
1482 rx_ring
->netdev
->last_rx
= jiffies
;
1485 rx_desc
->wb
.qword1
.status_error_len
= 0;
1490 /* return some buffers to hardware, one at a time is too slow */
1491 if (cleaned_count
>= I40E_RX_BUFFER_WRITE
) {
1492 i40e_alloc_rx_buffers(rx_ring
, cleaned_count
);
1496 /* use prefetched values */
1498 qword
= le64_to_cpu(rx_desc
->wb
.qword1
.status_error_len
);
1499 rx_status
= (qword
& I40E_RXD_QW1_STATUS_MASK
) >>
1500 I40E_RXD_QW1_STATUS_SHIFT
;
1503 rx_ring
->next_to_clean
= i
;
1504 u64_stats_update_begin(&rx_ring
->syncp
);
1505 rx_ring
->stats
.packets
+= total_rx_packets
;
1506 rx_ring
->stats
.bytes
+= total_rx_bytes
;
1507 u64_stats_update_end(&rx_ring
->syncp
);
1508 rx_ring
->q_vector
->rx
.total_packets
+= total_rx_packets
;
1509 rx_ring
->q_vector
->rx
.total_bytes
+= total_rx_bytes
;
1512 i40e_alloc_rx_buffers(rx_ring
, cleaned_count
);
1518 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
1519 * @napi: napi struct with our devices info in it
1520 * @budget: amount of work driver is allowed to do this pass, in packets
1522 * This function will clean all queues associated with a q_vector.
1524 * Returns the amount of work done
1526 int i40e_napi_poll(struct napi_struct
*napi
, int budget
)
1528 struct i40e_q_vector
*q_vector
=
1529 container_of(napi
, struct i40e_q_vector
, napi
);
1530 struct i40e_vsi
*vsi
= q_vector
->vsi
;
1531 struct i40e_ring
*ring
;
1532 bool clean_complete
= true;
1533 int budget_per_ring
;
1535 if (test_bit(__I40E_DOWN
, &vsi
->state
)) {
1536 napi_complete(napi
);
1540 /* Since the actual Tx work is minimal, we can give the Tx a larger
1541 * budget and be more aggressive about cleaning up the Tx descriptors.
1543 i40e_for_each_ring(ring
, q_vector
->tx
)
1544 clean_complete
&= i40e_clean_tx_irq(ring
, vsi
->work_limit
);
1546 /* We attempt to distribute budget to each Rx queue fairly, but don't
1547 * allow the budget to go below 1 because that would exit polling early.
1549 budget_per_ring
= max(budget
/q_vector
->num_ringpairs
, 1);
1551 i40e_for_each_ring(ring
, q_vector
->rx
)
1552 clean_complete
&= i40e_clean_rx_irq(ring
, budget_per_ring
);
1554 /* If work not completed, return budget and polling will return */
1555 if (!clean_complete
)
1558 /* Work is done so exit the polling mode and re-enable the interrupt */
1559 napi_complete(napi
);
1560 if (ITR_IS_DYNAMIC(vsi
->rx_itr_setting
) ||
1561 ITR_IS_DYNAMIC(vsi
->tx_itr_setting
))
1562 i40e_update_dynamic_itr(q_vector
);
1564 if (!test_bit(__I40E_DOWN
, &vsi
->state
)) {
1565 if (vsi
->back
->flags
& I40E_FLAG_MSIX_ENABLED
) {
1566 i40e_irq_dynamic_enable(vsi
,
1567 q_vector
->v_idx
+ vsi
->base_vector
);
1569 struct i40e_hw
*hw
= &vsi
->back
->hw
;
1570 /* We re-enable the queue 0 cause, but
1571 * don't worry about dynamic_enable
1572 * because we left it on for the other
1573 * possible interrupts during napi
1575 u32 qval
= rd32(hw
, I40E_QINT_RQCTL(0));
1576 qval
|= I40E_QINT_RQCTL_CAUSE_ENA_MASK
;
1577 wr32(hw
, I40E_QINT_RQCTL(0), qval
);
1579 qval
= rd32(hw
, I40E_QINT_TQCTL(0));
1580 qval
|= I40E_QINT_TQCTL_CAUSE_ENA_MASK
;
1581 wr32(hw
, I40E_QINT_TQCTL(0), qval
);
1583 i40e_irq_dynamic_enable_icr0(vsi
->back
);
1591 * i40e_atr - Add a Flow Director ATR filter
1592 * @tx_ring: ring to add programming descriptor to
1594 * @flags: send flags
1595 * @protocol: wire protocol
1597 static void i40e_atr(struct i40e_ring
*tx_ring
, struct sk_buff
*skb
,
1598 u32 flags
, __be16 protocol
)
1600 struct i40e_filter_program_desc
*fdir_desc
;
1601 struct i40e_pf
*pf
= tx_ring
->vsi
->back
;
1603 unsigned char *network
;
1605 struct ipv6hdr
*ipv6
;
1609 u32 flex_ptype
, dtype_cmd
;
1612 /* make sure ATR is enabled */
1613 if (!(pf
->flags
& I40E_FLAG_FD_ATR_ENABLED
))
1616 /* if sampling is disabled do nothing */
1617 if (!tx_ring
->atr_sample_rate
)
1620 /* snag network header to get L4 type and address */
1621 hdr
.network
= skb_network_header(skb
);
1623 /* Currently only IPv4/IPv6 with TCP is supported */
1624 if (protocol
== htons(ETH_P_IP
)) {
1625 if (hdr
.ipv4
->protocol
!= IPPROTO_TCP
)
1628 /* access ihl as a u8 to avoid unaligned access on ia64 */
1629 hlen
= (hdr
.network
[0] & 0x0F) << 2;
1630 } else if (protocol
== htons(ETH_P_IPV6
)) {
1631 if (hdr
.ipv6
->nexthdr
!= IPPROTO_TCP
)
1634 hlen
= sizeof(struct ipv6hdr
);
1639 th
= (struct tcphdr
*)(hdr
.network
+ hlen
);
1641 /* Due to lack of space, no more new filters can be programmed */
1642 if (th
->syn
&& (pf
->auto_disable_flags
& I40E_FLAG_FD_ATR_ENABLED
))
1645 tx_ring
->atr_count
++;
1647 /* sample on all syn/fin/rst packets or once every atr sample rate */
1651 (tx_ring
->atr_count
< tx_ring
->atr_sample_rate
))
1654 tx_ring
->atr_count
= 0;
1656 /* grab the next descriptor */
1657 i
= tx_ring
->next_to_use
;
1658 fdir_desc
= I40E_TX_FDIRDESC(tx_ring
, i
);
1661 tx_ring
->next_to_use
= (i
< tx_ring
->count
) ? i
: 0;
1663 flex_ptype
= (tx_ring
->queue_index
<< I40E_TXD_FLTR_QW0_QINDEX_SHIFT
) &
1664 I40E_TXD_FLTR_QW0_QINDEX_MASK
;
1665 flex_ptype
|= (protocol
== htons(ETH_P_IP
)) ?
1666 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP
<<
1667 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT
) :
1668 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP
<<
1669 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT
);
1671 flex_ptype
|= tx_ring
->vsi
->id
<< I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT
;
1673 dtype_cmd
= I40E_TX_DESC_DTYPE_FILTER_PROG
;
1675 dtype_cmd
|= (th
->fin
|| th
->rst
) ?
1676 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE
<<
1677 I40E_TXD_FLTR_QW1_PCMD_SHIFT
) :
1678 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE
<<
1679 I40E_TXD_FLTR_QW1_PCMD_SHIFT
);
1681 dtype_cmd
|= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX
<<
1682 I40E_TXD_FLTR_QW1_DEST_SHIFT
;
1684 dtype_cmd
|= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID
<<
1685 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT
;
1687 dtype_cmd
|= I40E_TXD_FLTR_QW1_CNT_ENA_MASK
;
1689 ((u32
)pf
->fd_atr_cnt_idx
<< I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT
) &
1690 I40E_TXD_FLTR_QW1_CNTINDEX_MASK
;
1692 fdir_desc
->qindex_flex_ptype_vsi
= cpu_to_le32(flex_ptype
);
1693 fdir_desc
->rsvd
= cpu_to_le32(0);
1694 fdir_desc
->dtype_cmd_cntindex
= cpu_to_le32(dtype_cmd
);
1695 fdir_desc
->fd_id
= cpu_to_le32(0);
1699 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
1701 * @tx_ring: ring to send buffer on
1702 * @flags: the tx flags to be set
1704 * Checks the skb and set up correspondingly several generic transmit flags
1705 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1707 * Returns error code indicate the frame should be dropped upon error and the
1708 * otherwise returns 0 to indicate the flags has been set properly.
1710 static int i40e_tx_prepare_vlan_flags(struct sk_buff
*skb
,
1711 struct i40e_ring
*tx_ring
,
1714 __be16 protocol
= skb
->protocol
;
1717 /* if we have a HW VLAN tag being added, default to the HW one */
1718 if (vlan_tx_tag_present(skb
)) {
1719 tx_flags
|= vlan_tx_tag_get(skb
) << I40E_TX_FLAGS_VLAN_SHIFT
;
1720 tx_flags
|= I40E_TX_FLAGS_HW_VLAN
;
1721 /* else if it is a SW VLAN, check the next protocol and store the tag */
1722 } else if (protocol
== htons(ETH_P_8021Q
)) {
1723 struct vlan_hdr
*vhdr
, _vhdr
;
1724 vhdr
= skb_header_pointer(skb
, ETH_HLEN
, sizeof(_vhdr
), &_vhdr
);
1728 protocol
= vhdr
->h_vlan_encapsulated_proto
;
1729 tx_flags
|= ntohs(vhdr
->h_vlan_TCI
) << I40E_TX_FLAGS_VLAN_SHIFT
;
1730 tx_flags
|= I40E_TX_FLAGS_SW_VLAN
;
1733 /* Insert 802.1p priority into VLAN header */
1734 if ((tx_ring
->vsi
->back
->flags
& I40E_FLAG_DCB_ENABLED
) &&
1735 ((tx_flags
& (I40E_TX_FLAGS_HW_VLAN
| I40E_TX_FLAGS_SW_VLAN
)) ||
1736 (skb
->priority
!= TC_PRIO_CONTROL
))) {
1737 tx_flags
&= ~I40E_TX_FLAGS_VLAN_PRIO_MASK
;
1738 tx_flags
|= (skb
->priority
& 0x7) <<
1739 I40E_TX_FLAGS_VLAN_PRIO_SHIFT
;
1740 if (tx_flags
& I40E_TX_FLAGS_SW_VLAN
) {
1741 struct vlan_ethhdr
*vhdr
;
1744 rc
= skb_cow_head(skb
, 0);
1747 vhdr
= (struct vlan_ethhdr
*)skb
->data
;
1748 vhdr
->h_vlan_TCI
= htons(tx_flags
>>
1749 I40E_TX_FLAGS_VLAN_SHIFT
);
1751 tx_flags
|= I40E_TX_FLAGS_HW_VLAN
;
1759 * i40e_tso - set up the tso context descriptor
1760 * @tx_ring: ptr to the ring to send
1761 * @skb: ptr to the skb we're sending
1762 * @tx_flags: the collected send information
1763 * @protocol: the send protocol
1764 * @hdr_len: ptr to the size of the packet header
1765 * @cd_tunneling: ptr to context descriptor bits
1767 * Returns 0 if no TSO can happen, 1 if tso is going, or error
1769 static int i40e_tso(struct i40e_ring
*tx_ring
, struct sk_buff
*skb
,
1770 u32 tx_flags
, __be16 protocol
, u8
*hdr_len
,
1771 u64
*cd_type_cmd_tso_mss
, u32
*cd_tunneling
)
1773 u32 cd_cmd
, cd_tso_len
, cd_mss
;
1774 struct ipv6hdr
*ipv6h
;
1775 struct tcphdr
*tcph
;
1780 if (!skb_is_gso(skb
))
1783 err
= skb_cow_head(skb
, 0);
1787 if (protocol
== htons(ETH_P_IP
)) {
1788 iph
= skb
->encapsulation
? inner_ip_hdr(skb
) : ip_hdr(skb
);
1789 tcph
= skb
->encapsulation
? inner_tcp_hdr(skb
) : tcp_hdr(skb
);
1792 tcph
->check
= ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
,
1794 } else if (skb_is_gso_v6(skb
)) {
1796 ipv6h
= skb
->encapsulation
? inner_ipv6_hdr(skb
)
1798 tcph
= skb
->encapsulation
? inner_tcp_hdr(skb
) : tcp_hdr(skb
);
1799 ipv6h
->payload_len
= 0;
1800 tcph
->check
= ~csum_ipv6_magic(&ipv6h
->saddr
, &ipv6h
->daddr
,
1804 l4len
= skb
->encapsulation
? inner_tcp_hdrlen(skb
) : tcp_hdrlen(skb
);
1805 *hdr_len
= (skb
->encapsulation
1806 ? (skb_inner_transport_header(skb
) - skb
->data
)
1807 : skb_transport_offset(skb
)) + l4len
;
1809 /* find the field values */
1810 cd_cmd
= I40E_TX_CTX_DESC_TSO
;
1811 cd_tso_len
= skb
->len
- *hdr_len
;
1812 cd_mss
= skb_shinfo(skb
)->gso_size
;
1813 *cd_type_cmd_tso_mss
|= ((u64
)cd_cmd
<< I40E_TXD_CTX_QW1_CMD_SHIFT
) |
1815 I40E_TXD_CTX_QW1_TSO_LEN_SHIFT
) |
1816 ((u64
)cd_mss
<< I40E_TXD_CTX_QW1_MSS_SHIFT
);
1821 * i40e_tsyn - set up the tsyn context descriptor
1822 * @tx_ring: ptr to the ring to send
1823 * @skb: ptr to the skb we're sending
1824 * @tx_flags: the collected send information
1826 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
1828 static int i40e_tsyn(struct i40e_ring
*tx_ring
, struct sk_buff
*skb
,
1829 u32 tx_flags
, u64
*cd_type_cmd_tso_mss
)
1833 if (likely(!(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)))
1836 /* Tx timestamps cannot be sampled when doing TSO */
1837 if (tx_flags
& I40E_TX_FLAGS_TSO
)
1840 /* only timestamp the outbound packet if the user has requested it and
1841 * we are not already transmitting a packet to be timestamped
1843 pf
= i40e_netdev_to_pf(tx_ring
->netdev
);
1844 if (pf
->ptp_tx
&& !pf
->ptp_tx_skb
) {
1845 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
1846 pf
->ptp_tx_skb
= skb_get(skb
);
1851 *cd_type_cmd_tso_mss
|= (u64
)I40E_TX_CTX_DESC_TSYN
<<
1852 I40E_TXD_CTX_QW1_CMD_SHIFT
;
1858 * i40e_tx_enable_csum - Enable Tx checksum offloads
1860 * @tx_flags: Tx flags currently set
1861 * @td_cmd: Tx descriptor command bits to set
1862 * @td_offset: Tx descriptor header offsets to set
1863 * @cd_tunneling: ptr to context desc bits
1865 static void i40e_tx_enable_csum(struct sk_buff
*skb
, u32 tx_flags
,
1866 u32
*td_cmd
, u32
*td_offset
,
1867 struct i40e_ring
*tx_ring
,
1870 struct ipv6hdr
*this_ipv6_hdr
;
1871 unsigned int this_tcp_hdrlen
;
1872 struct iphdr
*this_ip_hdr
;
1873 u32 network_hdr_len
;
1876 if (skb
->encapsulation
) {
1877 network_hdr_len
= skb_inner_network_header_len(skb
);
1878 this_ip_hdr
= inner_ip_hdr(skb
);
1879 this_ipv6_hdr
= inner_ipv6_hdr(skb
);
1880 this_tcp_hdrlen
= inner_tcp_hdrlen(skb
);
1882 if (tx_flags
& I40E_TX_FLAGS_IPV4
) {
1884 if (tx_flags
& I40E_TX_FLAGS_TSO
) {
1885 *cd_tunneling
|= I40E_TX_CTX_EXT_IP_IPV4
;
1886 ip_hdr(skb
)->check
= 0;
1889 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM
;
1891 } else if (tx_flags
& I40E_TX_FLAGS_IPV6
) {
1892 if (tx_flags
& I40E_TX_FLAGS_TSO
) {
1893 *cd_tunneling
|= I40E_TX_CTX_EXT_IP_IPV6
;
1894 ip_hdr(skb
)->check
= 0;
1897 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM
;
1901 /* Now set the ctx descriptor fields */
1902 *cd_tunneling
|= (skb_network_header_len(skb
) >> 2) <<
1903 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT
|
1904 I40E_TXD_CTX_UDP_TUNNELING
|
1905 ((skb_inner_network_offset(skb
) -
1906 skb_transport_offset(skb
)) >> 1) <<
1907 I40E_TXD_CTX_QW0_NATLEN_SHIFT
;
1910 network_hdr_len
= skb_network_header_len(skb
);
1911 this_ip_hdr
= ip_hdr(skb
);
1912 this_ipv6_hdr
= ipv6_hdr(skb
);
1913 this_tcp_hdrlen
= tcp_hdrlen(skb
);
1916 /* Enable IP checksum offloads */
1917 if (tx_flags
& I40E_TX_FLAGS_IPV4
) {
1918 l4_hdr
= this_ip_hdr
->protocol
;
1919 /* the stack computes the IP header already, the only time we
1920 * need the hardware to recompute it is in the case of TSO.
1922 if (tx_flags
& I40E_TX_FLAGS_TSO
) {
1923 *td_cmd
|= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM
;
1924 this_ip_hdr
->check
= 0;
1926 *td_cmd
|= I40E_TX_DESC_CMD_IIPT_IPV4
;
1928 /* Now set the td_offset for IP header length */
1929 *td_offset
= (network_hdr_len
>> 2) <<
1930 I40E_TX_DESC_LENGTH_IPLEN_SHIFT
;
1931 } else if (tx_flags
& I40E_TX_FLAGS_IPV6
) {
1932 l4_hdr
= this_ipv6_hdr
->nexthdr
;
1933 *td_cmd
|= I40E_TX_DESC_CMD_IIPT_IPV6
;
1934 /* Now set the td_offset for IP header length */
1935 *td_offset
= (network_hdr_len
>> 2) <<
1936 I40E_TX_DESC_LENGTH_IPLEN_SHIFT
;
1938 /* words in MACLEN + dwords in IPLEN + dwords in L4Len */
1939 *td_offset
|= (skb_network_offset(skb
) >> 1) <<
1940 I40E_TX_DESC_LENGTH_MACLEN_SHIFT
;
1942 /* Enable L4 checksum offloads */
1945 /* enable checksum offloads */
1946 *td_cmd
|= I40E_TX_DESC_CMD_L4T_EOFT_TCP
;
1947 *td_offset
|= (this_tcp_hdrlen
>> 2) <<
1948 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT
;
1951 /* enable SCTP checksum offload */
1952 *td_cmd
|= I40E_TX_DESC_CMD_L4T_EOFT_SCTP
;
1953 *td_offset
|= (sizeof(struct sctphdr
) >> 2) <<
1954 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT
;
1957 /* enable UDP checksum offload */
1958 *td_cmd
|= I40E_TX_DESC_CMD_L4T_EOFT_UDP
;
1959 *td_offset
|= (sizeof(struct udphdr
) >> 2) <<
1960 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT
;
1968 * i40e_create_tx_ctx Build the Tx context descriptor
1969 * @tx_ring: ring to create the descriptor on
1970 * @cd_type_cmd_tso_mss: Quad Word 1
1971 * @cd_tunneling: Quad Word 0 - bits 0-31
1972 * @cd_l2tag2: Quad Word 0 - bits 32-63
1974 static void i40e_create_tx_ctx(struct i40e_ring
*tx_ring
,
1975 const u64 cd_type_cmd_tso_mss
,
1976 const u32 cd_tunneling
, const u32 cd_l2tag2
)
1978 struct i40e_tx_context_desc
*context_desc
;
1979 int i
= tx_ring
->next_to_use
;
1981 if ((cd_type_cmd_tso_mss
== I40E_TX_DESC_DTYPE_CONTEXT
) &&
1982 !cd_tunneling
&& !cd_l2tag2
)
1985 /* grab the next descriptor */
1986 context_desc
= I40E_TX_CTXTDESC(tx_ring
, i
);
1989 tx_ring
->next_to_use
= (i
< tx_ring
->count
) ? i
: 0;
1991 /* cpu_to_le32 and assign to struct fields */
1992 context_desc
->tunneling_params
= cpu_to_le32(cd_tunneling
);
1993 context_desc
->l2tag2
= cpu_to_le16(cd_l2tag2
);
1994 context_desc
->type_cmd_tso_mss
= cpu_to_le64(cd_type_cmd_tso_mss
);
1998 * i40e_tx_map - Build the Tx descriptor
1999 * @tx_ring: ring to send buffer on
2001 * @first: first buffer info buffer to use
2002 * @tx_flags: collected send information
2003 * @hdr_len: size of the packet header
2004 * @td_cmd: the command field in the descriptor
2005 * @td_offset: offset for checksum or crc
2007 static void i40e_tx_map(struct i40e_ring
*tx_ring
, struct sk_buff
*skb
,
2008 struct i40e_tx_buffer
*first
, u32 tx_flags
,
2009 const u8 hdr_len
, u32 td_cmd
, u32 td_offset
)
2011 unsigned int data_len
= skb
->data_len
;
2012 unsigned int size
= skb_headlen(skb
);
2013 struct skb_frag_struct
*frag
;
2014 struct i40e_tx_buffer
*tx_bi
;
2015 struct i40e_tx_desc
*tx_desc
;
2016 u16 i
= tx_ring
->next_to_use
;
2021 if (tx_flags
& I40E_TX_FLAGS_HW_VLAN
) {
2022 td_cmd
|= I40E_TX_DESC_CMD_IL2TAG1
;
2023 td_tag
= (tx_flags
& I40E_TX_FLAGS_VLAN_MASK
) >>
2024 I40E_TX_FLAGS_VLAN_SHIFT
;
2027 if (tx_flags
& (I40E_TX_FLAGS_TSO
| I40E_TX_FLAGS_FSO
))
2028 gso_segs
= skb_shinfo(skb
)->gso_segs
;
2032 /* multiply data chunks by size of headers */
2033 first
->bytecount
= skb
->len
- hdr_len
+ (gso_segs
* hdr_len
);
2034 first
->gso_segs
= gso_segs
;
2036 first
->tx_flags
= tx_flags
;
2038 dma
= dma_map_single(tx_ring
->dev
, skb
->data
, size
, DMA_TO_DEVICE
);
2040 tx_desc
= I40E_TX_DESC(tx_ring
, i
);
2043 for (frag
= &skb_shinfo(skb
)->frags
[0];; frag
++) {
2044 if (dma_mapping_error(tx_ring
->dev
, dma
))
2047 /* record length, and DMA address */
2048 dma_unmap_len_set(tx_bi
, len
, size
);
2049 dma_unmap_addr_set(tx_bi
, dma
, dma
);
2051 tx_desc
->buffer_addr
= cpu_to_le64(dma
);
2053 while (unlikely(size
> I40E_MAX_DATA_PER_TXD
)) {
2054 tx_desc
->cmd_type_offset_bsz
=
2055 build_ctob(td_cmd
, td_offset
,
2056 I40E_MAX_DATA_PER_TXD
, td_tag
);
2060 if (i
== tx_ring
->count
) {
2061 tx_desc
= I40E_TX_DESC(tx_ring
, 0);
2065 dma
+= I40E_MAX_DATA_PER_TXD
;
2066 size
-= I40E_MAX_DATA_PER_TXD
;
2068 tx_desc
->buffer_addr
= cpu_to_le64(dma
);
2071 if (likely(!data_len
))
2074 tx_desc
->cmd_type_offset_bsz
= build_ctob(td_cmd
, td_offset
,
2079 if (i
== tx_ring
->count
) {
2080 tx_desc
= I40E_TX_DESC(tx_ring
, 0);
2084 size
= skb_frag_size(frag
);
2087 dma
= skb_frag_dma_map(tx_ring
->dev
, frag
, 0, size
,
2090 tx_bi
= &tx_ring
->tx_bi
[i
];
2093 /* Place RS bit on last descriptor of any packet that spans across the
2094 * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
2096 #define WB_STRIDE 0x3
2097 if (((i
& WB_STRIDE
) != WB_STRIDE
) &&
2098 (first
<= &tx_ring
->tx_bi
[i
]) &&
2099 (first
>= &tx_ring
->tx_bi
[i
& ~WB_STRIDE
])) {
2100 tx_desc
->cmd_type_offset_bsz
=
2101 build_ctob(td_cmd
, td_offset
, size
, td_tag
) |
2102 cpu_to_le64((u64
)I40E_TX_DESC_CMD_EOP
<<
2103 I40E_TXD_QW1_CMD_SHIFT
);
2105 tx_desc
->cmd_type_offset_bsz
=
2106 build_ctob(td_cmd
, td_offset
, size
, td_tag
) |
2107 cpu_to_le64((u64
)I40E_TXD_CMD
<<
2108 I40E_TXD_QW1_CMD_SHIFT
);
2111 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring
->netdev
,
2112 tx_ring
->queue_index
),
2115 /* set the timestamp */
2116 first
->time_stamp
= jiffies
;
2118 /* Force memory writes to complete before letting h/w
2119 * know there are new descriptors to fetch. (Only
2120 * applicable for weak-ordered memory model archs,
2125 /* set next_to_watch value indicating a packet is present */
2126 first
->next_to_watch
= tx_desc
;
2129 if (i
== tx_ring
->count
)
2132 tx_ring
->next_to_use
= i
;
2134 /* notify HW of packet */
2135 writel(i
, tx_ring
->tail
);
2140 dev_info(tx_ring
->dev
, "TX DMA map failed\n");
2142 /* clear dma mappings for failed tx_bi map */
2144 tx_bi
= &tx_ring
->tx_bi
[i
];
2145 i40e_unmap_and_free_tx_resource(tx_ring
, tx_bi
);
2153 tx_ring
->next_to_use
= i
;
2157 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2158 * @tx_ring: the ring to be checked
2159 * @size: the size buffer we want to assure is available
2161 * Returns -EBUSY if a stop is needed, else 0
2163 static inline int __i40e_maybe_stop_tx(struct i40e_ring
*tx_ring
, int size
)
2165 netif_stop_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
2166 /* Memory barrier before checking head and tail */
2169 /* Check again in a case another CPU has just made room available. */
2170 if (likely(I40E_DESC_UNUSED(tx_ring
) < size
))
2173 /* A reprieve! - use start_queue because it doesn't call schedule */
2174 netif_start_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
2175 ++tx_ring
->tx_stats
.restart_queue
;
2180 * i40e_maybe_stop_tx - 1st level check for tx stop conditions
2181 * @tx_ring: the ring to be checked
2182 * @size: the size buffer we want to assure is available
2184 * Returns 0 if stop is not needed
2186 static int i40e_maybe_stop_tx(struct i40e_ring
*tx_ring
, int size
)
2188 if (likely(I40E_DESC_UNUSED(tx_ring
) >= size
))
2190 return __i40e_maybe_stop_tx(tx_ring
, size
);
2194 * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
2196 * @tx_ring: ring to send buffer on
2198 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
2199 * there is not enough descriptors available in this ring since we need at least
2202 static int i40e_xmit_descriptor_count(struct sk_buff
*skb
,
2203 struct i40e_ring
*tx_ring
)
2208 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
2209 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
2210 * + 4 desc gap to avoid the cache line where head is,
2211 * + 1 desc for context descriptor,
2212 * otherwise try next time
2214 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++)
2215 count
+= TXD_USE_COUNT(skb_shinfo(skb
)->frags
[f
].size
);
2217 count
+= TXD_USE_COUNT(skb_headlen(skb
));
2218 if (i40e_maybe_stop_tx(tx_ring
, count
+ 4 + 1)) {
2219 tx_ring
->tx_stats
.tx_busy
++;
2226 * i40e_xmit_frame_ring - Sends buffer on Tx ring
2228 * @tx_ring: ring to send buffer on
2230 * Returns NETDEV_TX_OK if sent, else an error code
2232 static netdev_tx_t
i40e_xmit_frame_ring(struct sk_buff
*skb
,
2233 struct i40e_ring
*tx_ring
)
2235 u64 cd_type_cmd_tso_mss
= I40E_TX_DESC_DTYPE_CONTEXT
;
2236 u32 cd_tunneling
= 0, cd_l2tag2
= 0;
2237 struct i40e_tx_buffer
*first
;
2245 if (0 == i40e_xmit_descriptor_count(skb
, tx_ring
))
2246 return NETDEV_TX_BUSY
;
2248 /* prepare the xmit flags */
2249 if (i40e_tx_prepare_vlan_flags(skb
, tx_ring
, &tx_flags
))
2252 /* obtain protocol of skb */
2253 protocol
= skb
->protocol
;
2255 /* record the location of the first descriptor for this packet */
2256 first
= &tx_ring
->tx_bi
[tx_ring
->next_to_use
];
2258 /* setup IPv4/IPv6 offloads */
2259 if (protocol
== htons(ETH_P_IP
))
2260 tx_flags
|= I40E_TX_FLAGS_IPV4
;
2261 else if (protocol
== htons(ETH_P_IPV6
))
2262 tx_flags
|= I40E_TX_FLAGS_IPV6
;
2264 tso
= i40e_tso(tx_ring
, skb
, tx_flags
, protocol
, &hdr_len
,
2265 &cd_type_cmd_tso_mss
, &cd_tunneling
);
2270 tx_flags
|= I40E_TX_FLAGS_TSO
;
2272 skb_tx_timestamp(skb
);
2274 tsyn
= i40e_tsyn(tx_ring
, skb
, tx_flags
, &cd_type_cmd_tso_mss
);
2277 tx_flags
|= I40E_TX_FLAGS_TSYN
;
2279 /* always enable CRC insertion offload */
2280 td_cmd
|= I40E_TX_DESC_CMD_ICRC
;
2282 /* Always offload the checksum, since it's in the data descriptor */
2283 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2284 tx_flags
|= I40E_TX_FLAGS_CSUM
;
2286 i40e_tx_enable_csum(skb
, tx_flags
, &td_cmd
, &td_offset
,
2287 tx_ring
, &cd_tunneling
);
2290 i40e_create_tx_ctx(tx_ring
, cd_type_cmd_tso_mss
,
2291 cd_tunneling
, cd_l2tag2
);
2293 /* Add Flow Director ATR if it's enabled.
2295 * NOTE: this must always be directly before the data descriptor.
2297 i40e_atr(tx_ring
, skb
, tx_flags
, protocol
);
2299 i40e_tx_map(tx_ring
, skb
, first
, tx_flags
, hdr_len
,
2302 i40e_maybe_stop_tx(tx_ring
, DESC_NEEDED
);
2304 return NETDEV_TX_OK
;
2307 dev_kfree_skb_any(skb
);
2308 return NETDEV_TX_OK
;
2312 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2314 * @netdev: network interface device structure
2316 * Returns NETDEV_TX_OK if sent, else an error code
2318 netdev_tx_t
i40e_lan_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
2320 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2321 struct i40e_vsi
*vsi
= np
->vsi
;
2322 struct i40e_ring
*tx_ring
= vsi
->tx_rings
[skb
->queue_mapping
];
2324 /* hardware can't handle really short frames, hardware padding works
2327 if (unlikely(skb
->len
< I40E_MIN_TX_LEN
)) {
2328 if (skb_pad(skb
, I40E_MIN_TX_LEN
- skb
->len
))
2329 return NETDEV_TX_OK
;
2330 skb
->len
= I40E_MIN_TX_LEN
;
2331 skb_set_tail_pointer(skb
, I40E_MIN_TX_LEN
);
2334 return i40e_xmit_frame_ring(skb
, tx_ring
);