2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/tcp.h>
34 #include <linux/if_vlan.h>
37 #define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
38 #define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
41 void mlx5e_send_nop(struct mlx5e_sq
*sq
, bool notify_hw
)
43 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
45 u16 pi
= sq
->pc
& wq
->sz_m1
;
46 struct mlx5e_tx_wqe
*wqe
= mlx5_wq_cyc_get_wqe(wq
, pi
);
48 struct mlx5_wqe_ctrl_seg
*cseg
= &wqe
->ctrl
;
50 memset(cseg
, 0, sizeof(*cseg
));
52 cseg
->opmod_idx_opcode
= cpu_to_be32((sq
->pc
<< 8) | MLX5_OPCODE_NOP
);
53 cseg
->qpn_ds
= cpu_to_be32((sq
->sqn
<< 8) | 0x01);
59 cseg
->fm_ce_se
= MLX5_WQE_CTRL_CQ_UPDATE
;
60 mlx5e_tx_notify_hw(sq
, wqe
);
64 static void mlx5e_dma_pop_last_pushed(struct mlx5e_sq
*sq
, dma_addr_t
*addr
,
68 *addr
= sq
->dma_fifo
[sq
->dma_fifo_pc
& sq
->dma_fifo_mask
].addr
;
69 *size
= sq
->dma_fifo
[sq
->dma_fifo_pc
& sq
->dma_fifo_mask
].size
;
72 static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq
*sq
, struct sk_buff
*skb
)
78 for (i
= 0; i
< MLX5E_TX_SKB_CB(skb
)->num_dma
; i
++) {
79 mlx5e_dma_pop_last_pushed(sq
, &addr
, &size
);
80 dma_unmap_single(sq
->pdev
, addr
, size
, DMA_TO_DEVICE
);
84 static inline void mlx5e_dma_push(struct mlx5e_sq
*sq
, dma_addr_t addr
,
87 sq
->dma_fifo
[sq
->dma_fifo_pc
& sq
->dma_fifo_mask
].addr
= addr
;
88 sq
->dma_fifo
[sq
->dma_fifo_pc
& sq
->dma_fifo_mask
].size
= size
;
92 static inline void mlx5e_dma_get(struct mlx5e_sq
*sq
, u32 i
, dma_addr_t
*addr
,
95 *addr
= sq
->dma_fifo
[i
& sq
->dma_fifo_mask
].addr
;
96 *size
= sq
->dma_fifo
[i
& sq
->dma_fifo_mask
].size
;
99 u16
mlx5e_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
100 void *accel_priv
, select_queue_fallback_t fallback
)
102 struct mlx5e_priv
*priv
= netdev_priv(dev
);
103 int channel_ix
= fallback(dev
, skb
);
104 int up
= skb_vlan_tag_present(skb
) ?
105 skb
->vlan_tci
>> VLAN_PRIO_SHIFT
:
106 priv
->default_vlan_prio
;
107 int tc
= netdev_get_prio_tc_map(dev
, up
);
109 return priv
->channel
[channel_ix
]->tc_to_txq_map
[tc
];
112 static inline u16
mlx5e_get_inline_hdr_size(struct mlx5e_sq
*sq
,
115 #define MLX5E_MIN_INLINE 16 /* eth header with vlan (w/o next ethertype) */
116 return MLX5E_MIN_INLINE
;
119 static netdev_tx_t
mlx5e_sq_xmit(struct mlx5e_sq
*sq
, struct sk_buff
*skb
)
121 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
123 u16 pi
= sq
->pc
& wq
->sz_m1
;
124 struct mlx5e_tx_wqe
*wqe
= mlx5_wq_cyc_get_wqe(wq
, pi
);
126 struct mlx5_wqe_ctrl_seg
*cseg
= &wqe
->ctrl
;
127 struct mlx5_wqe_eth_seg
*eseg
= &wqe
->eth
;
128 struct mlx5_wqe_data_seg
*dseg
;
130 u8 opcode
= MLX5_OPCODE_SEND
;
131 dma_addr_t dma_addr
= 0;
137 memset(wqe
, 0, sizeof(*wqe
));
139 if (likely(skb
->ip_summed
== CHECKSUM_PARTIAL
))
140 eseg
->cs_flags
= MLX5_ETH_WQE_L3_CSUM
| MLX5_ETH_WQE_L4_CSUM
;
142 sq
->stats
.csum_offload_none
++;
144 if (skb_is_gso(skb
)) {
147 eseg
->mss
= cpu_to_be16(skb_shinfo(skb
)->gso_size
);
148 opcode
= MLX5_OPCODE_LSO
;
149 ihs
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
150 payload_len
= skb
->len
- ihs
;
151 MLX5E_TX_SKB_CB(skb
)->num_bytes
= skb
->len
+
152 (skb_shinfo(skb
)->gso_segs
- 1) * ihs
;
153 sq
->stats
.tso_packets
++;
154 sq
->stats
.tso_bytes
+= payload_len
;
156 ihs
= mlx5e_get_inline_hdr_size(sq
, skb
);
157 MLX5E_TX_SKB_CB(skb
)->num_bytes
= max_t(unsigned int, skb
->len
,
161 skb_copy_from_linear_data(skb
, eseg
->inline_hdr_start
, ihs
);
162 skb_pull_inline(skb
, ihs
);
164 eseg
->inline_hdr_sz
= cpu_to_be16(ihs
);
166 ds_cnt
= sizeof(*wqe
) / MLX5_SEND_WQE_DS
;
167 ds_cnt
+= DIV_ROUND_UP(ihs
- sizeof(eseg
->inline_hdr_start
),
169 dseg
= (struct mlx5_wqe_data_seg
*)cseg
+ ds_cnt
;
171 MLX5E_TX_SKB_CB(skb
)->num_dma
= 0;
173 headlen
= skb_headlen(skb
);
175 dma_addr
= dma_map_single(sq
->pdev
, skb
->data
, headlen
,
177 if (unlikely(dma_mapping_error(sq
->pdev
, dma_addr
)))
178 goto dma_unmap_wqe_err
;
180 dseg
->addr
= cpu_to_be64(dma_addr
);
181 dseg
->lkey
= sq
->mkey_be
;
182 dseg
->byte_count
= cpu_to_be32(headlen
);
184 mlx5e_dma_push(sq
, dma_addr
, headlen
);
185 MLX5E_TX_SKB_CB(skb
)->num_dma
++;
190 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
191 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
192 int fsz
= skb_frag_size(frag
);
194 dma_addr
= skb_frag_dma_map(sq
->pdev
, frag
, 0, fsz
,
196 if (unlikely(dma_mapping_error(sq
->pdev
, dma_addr
)))
197 goto dma_unmap_wqe_err
;
199 dseg
->addr
= cpu_to_be64(dma_addr
);
200 dseg
->lkey
= sq
->mkey_be
;
201 dseg
->byte_count
= cpu_to_be32(fsz
);
203 mlx5e_dma_push(sq
, dma_addr
, fsz
);
204 MLX5E_TX_SKB_CB(skb
)->num_dma
++;
209 ds_cnt
+= MLX5E_TX_SKB_CB(skb
)->num_dma
;
211 cseg
->opmod_idx_opcode
= cpu_to_be32((sq
->pc
<< 8) | opcode
);
212 cseg
->qpn_ds
= cpu_to_be32((sq
->sqn
<< 8) | ds_cnt
);
216 MLX5E_TX_SKB_CB(skb
)->num_wqebbs
= DIV_ROUND_UP(ds_cnt
,
217 MLX5_SEND_WQEBB_NUM_DS
);
218 sq
->pc
+= MLX5E_TX_SKB_CB(skb
)->num_wqebbs
;
220 netdev_tx_sent_queue(sq
->txq
, MLX5E_TX_SKB_CB(skb
)->num_bytes
);
222 if (unlikely(!mlx5e_sq_has_room_for(sq
, MLX5E_SQ_STOP_ROOM
))) {
223 netif_tx_stop_queue(sq
->txq
);
227 if (!skb
->xmit_more
|| netif_xmit_stopped(sq
->txq
)) {
228 cseg
->fm_ce_se
= MLX5_WQE_CTRL_CQ_UPDATE
;
229 mlx5e_tx_notify_hw(sq
, wqe
);
232 /* fill sq edge with nops to avoid wqe wrap around */
233 while ((sq
->pc
& wq
->sz_m1
) > sq
->edge
)
234 mlx5e_send_nop(sq
, false);
241 mlx5e_dma_unmap_wqe_err(sq
, skb
);
243 dev_kfree_skb_any(skb
);
248 netdev_tx_t
mlx5e_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
250 struct mlx5e_priv
*priv
= netdev_priv(dev
);
251 struct mlx5e_sq
*sq
= priv
->txq_to_sq_map
[skb_get_queue_mapping(skb
)];
253 return mlx5e_sq_xmit(sq
, skb
);
256 bool mlx5e_poll_tx_cq(struct mlx5e_cq
*cq
)
265 /* avoid accessing cq (dma coherent memory) if not needed */
266 if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES
, &cq
->flags
))
269 sq
= container_of(cq
, struct mlx5e_sq
, cq
);
274 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
275 * otherwise a cq overrun may occur
279 /* avoid dirtying sq cache line every cqe */
280 dma_fifo_cc
= sq
->dma_fifo_cc
;
282 for (i
= 0; i
< MLX5E_TX_CQ_POLL_BUDGET
; i
++) {
283 struct mlx5_cqe64
*cqe
;
287 cqe
= mlx5e_get_cqe(cq
);
291 mlx5_cqwq_pop(&cq
->wq
);
293 wqe_counter
= be16_to_cpu(cqe
->wqe_counter
);
300 last_wqe
= (sqcc
== wqe_counter
);
302 ci
= sqcc
& sq
->wq
.sz_m1
;
305 if (unlikely(!skb
)) { /* nop */
311 for (j
= 0; j
< MLX5E_TX_SKB_CB(skb
)->num_dma
; j
++) {
315 mlx5e_dma_get(sq
, dma_fifo_cc
, &addr
, &size
);
317 dma_unmap_single(sq
->pdev
, addr
, size
,
322 nbytes
+= MLX5E_TX_SKB_CB(skb
)->num_bytes
;
323 sqcc
+= MLX5E_TX_SKB_CB(skb
)->num_wqebbs
;
328 mlx5_cqwq_update_db_record(&cq
->wq
);
330 /* ensure cq space is freed before enabling more cqes */
333 sq
->dma_fifo_cc
= dma_fifo_cc
;
336 netdev_tx_completed_queue(sq
->txq
, npkts
, nbytes
);
338 if (netif_tx_queue_stopped(sq
->txq
) &&
339 mlx5e_sq_has_room_for(sq
, MLX5E_SQ_STOP_ROOM
) &&
340 likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE
, &sq
->state
))) {
341 netif_tx_wake_queue(sq
->txq
);
344 if (i
== MLX5E_TX_CQ_POLL_BUDGET
) {
345 set_bit(MLX5E_CQ_HAS_CQES
, &cq
->flags
);