2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/tcp.h>
34 #include <linux/if_vlan.h>
37 static void mlx5e_dma_pop_last_pushed(struct mlx5e_sq
*sq
, dma_addr_t
*addr
,
41 *addr
= sq
->dma_fifo
[sq
->dma_fifo_pc
& sq
->dma_fifo_mask
].addr
;
42 *size
= sq
->dma_fifo
[sq
->dma_fifo_pc
& sq
->dma_fifo_mask
].size
;
45 static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq
*sq
, struct sk_buff
*skb
)
51 for (i
= 0; i
< MLX5E_TX_SKB_CB(skb
)->num_dma
; i
++) {
52 mlx5e_dma_pop_last_pushed(sq
, &addr
, &size
);
53 dma_unmap_single(sq
->pdev
, addr
, size
, DMA_TO_DEVICE
);
57 static inline void mlx5e_dma_push(struct mlx5e_sq
*sq
, dma_addr_t addr
,
60 sq
->dma_fifo
[sq
->dma_fifo_pc
& sq
->dma_fifo_mask
].addr
= addr
;
61 sq
->dma_fifo
[sq
->dma_fifo_pc
& sq
->dma_fifo_mask
].size
= size
;
65 static inline void mlx5e_dma_get(struct mlx5e_sq
*sq
, u32 i
, dma_addr_t
*addr
,
68 *addr
= sq
->dma_fifo
[i
& sq
->dma_fifo_mask
].addr
;
69 *size
= sq
->dma_fifo
[i
& sq
->dma_fifo_mask
].size
;
72 u16
mlx5e_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
73 void *accel_priv
, select_queue_fallback_t fallback
)
75 struct mlx5e_priv
*priv
= netdev_priv(dev
);
76 int channel_ix
= fallback(dev
, skb
);
77 int up
= skb_vlan_tag_present(skb
) ?
78 skb
->vlan_tci
>> VLAN_PRIO_SHIFT
:
79 priv
->default_vlan_prio
;
80 int tc
= netdev_get_prio_tc_map(dev
, up
);
82 return (tc
<< priv
->order_base_2_num_channels
) | channel_ix
;
85 static inline u16
mlx5e_get_inline_hdr_size(struct mlx5e_sq
*sq
,
88 #define MLX5E_MIN_INLINE 16 /* eth header with vlan (w/o next ethertype) */
89 return MLX5E_MIN_INLINE
;
92 static inline void mlx5e_insert_vlan(void *start
, struct sk_buff
*skb
, u16 ihs
)
94 struct vlan_ethhdr
*vhdr
= (struct vlan_ethhdr
*)start
;
95 int cpy1_sz
= 2 * ETH_ALEN
;
96 int cpy2_sz
= ihs
- cpy1_sz
- VLAN_HLEN
;
98 skb_copy_from_linear_data(skb
, vhdr
, cpy1_sz
);
99 skb_pull_inline(skb
, cpy1_sz
);
100 vhdr
->h_vlan_proto
= skb
->vlan_proto
;
101 vhdr
->h_vlan_TCI
= cpu_to_be16(skb_vlan_tag_get(skb
));
102 skb_copy_from_linear_data(skb
, &vhdr
->h_vlan_encapsulated_proto
,
104 skb_pull_inline(skb
, cpy2_sz
);
107 static netdev_tx_t
mlx5e_sq_xmit(struct mlx5e_sq
*sq
, struct sk_buff
*skb
)
109 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
111 u16 pi
= sq
->pc
& wq
->sz_m1
;
112 struct mlx5e_tx_wqe
*wqe
= mlx5_wq_cyc_get_wqe(wq
, pi
);
114 struct mlx5_wqe_ctrl_seg
*cseg
= &wqe
->ctrl
;
115 struct mlx5_wqe_eth_seg
*eseg
= &wqe
->eth
;
116 struct mlx5_wqe_data_seg
*dseg
;
118 u8 opcode
= MLX5_OPCODE_SEND
;
119 dma_addr_t dma_addr
= 0;
125 memset(wqe
, 0, sizeof(*wqe
));
127 if (likely(skb
->ip_summed
== CHECKSUM_PARTIAL
))
128 eseg
->cs_flags
= MLX5_ETH_WQE_L3_CSUM
| MLX5_ETH_WQE_L4_CSUM
;
130 sq
->stats
.csum_offload_none
++;
132 if (skb_is_gso(skb
)) {
136 eseg
->mss
= cpu_to_be16(skb_shinfo(skb
)->gso_size
);
137 opcode
= MLX5_OPCODE_LSO
;
138 ihs
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
139 payload_len
= skb
->len
- ihs
;
140 num_pkts
= (payload_len
/ skb_shinfo(skb
)->gso_size
) +
141 !!(payload_len
% skb_shinfo(skb
)->gso_size
);
142 MLX5E_TX_SKB_CB(skb
)->num_bytes
= skb
->len
+
143 (num_pkts
- 1) * ihs
;
144 sq
->stats
.tso_packets
++;
145 sq
->stats
.tso_bytes
+= payload_len
;
147 ihs
= mlx5e_get_inline_hdr_size(sq
, skb
);
148 MLX5E_TX_SKB_CB(skb
)->num_bytes
= max_t(unsigned int, skb
->len
,
152 if (skb_vlan_tag_present(skb
)) {
153 mlx5e_insert_vlan(eseg
->inline_hdr_start
, skb
, ihs
);
155 skb_copy_from_linear_data(skb
, eseg
->inline_hdr_start
, ihs
);
156 skb_pull_inline(skb
, ihs
);
159 eseg
->inline_hdr_sz
= cpu_to_be16(ihs
);
161 ds_cnt
= sizeof(*wqe
) / MLX5_SEND_WQE_DS
;
162 ds_cnt
+= DIV_ROUND_UP(ihs
- sizeof(eseg
->inline_hdr_start
),
164 dseg
= (struct mlx5_wqe_data_seg
*)cseg
+ ds_cnt
;
166 MLX5E_TX_SKB_CB(skb
)->num_dma
= 0;
168 headlen
= skb_headlen(skb
);
170 dma_addr
= dma_map_single(sq
->pdev
, skb
->data
, headlen
,
172 if (unlikely(dma_mapping_error(sq
->pdev
, dma_addr
)))
173 goto dma_unmap_wqe_err
;
175 dseg
->addr
= cpu_to_be64(dma_addr
);
176 dseg
->lkey
= sq
->mkey_be
;
177 dseg
->byte_count
= cpu_to_be32(headlen
);
179 mlx5e_dma_push(sq
, dma_addr
, headlen
);
180 MLX5E_TX_SKB_CB(skb
)->num_dma
++;
185 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
186 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
187 int fsz
= skb_frag_size(frag
);
189 dma_addr
= skb_frag_dma_map(sq
->pdev
, frag
, 0, fsz
,
191 if (unlikely(dma_mapping_error(sq
->pdev
, dma_addr
)))
192 goto dma_unmap_wqe_err
;
194 dseg
->addr
= cpu_to_be64(dma_addr
);
195 dseg
->lkey
= sq
->mkey_be
;
196 dseg
->byte_count
= cpu_to_be32(fsz
);
198 mlx5e_dma_push(sq
, dma_addr
, fsz
);
199 MLX5E_TX_SKB_CB(skb
)->num_dma
++;
204 ds_cnt
+= MLX5E_TX_SKB_CB(skb
)->num_dma
;
206 cseg
->opmod_idx_opcode
= cpu_to_be32((sq
->pc
<< 8) | opcode
);
207 cseg
->qpn_ds
= cpu_to_be32((sq
->sqn
<< 8) | ds_cnt
);
208 cseg
->fm_ce_se
= MLX5_WQE_CTRL_CQ_UPDATE
;
212 MLX5E_TX_SKB_CB(skb
)->num_wqebbs
= DIV_ROUND_UP(ds_cnt
,
213 MLX5_SEND_WQEBB_NUM_DS
);
214 sq
->pc
+= MLX5E_TX_SKB_CB(skb
)->num_wqebbs
;
216 netdev_tx_sent_queue(sq
->txq
, MLX5E_TX_SKB_CB(skb
)->num_bytes
);
218 if (unlikely(!mlx5e_sq_has_room_for(sq
, MLX5_SEND_WQE_MAX_WQEBBS
))) {
219 netif_tx_stop_queue(sq
->txq
);
223 if (!skb
->xmit_more
|| netif_xmit_stopped(sq
->txq
))
224 mlx5e_tx_notify_hw(sq
, wqe
);
231 mlx5e_dma_unmap_wqe_err(sq
, skb
);
233 dev_kfree_skb_any(skb
);
238 netdev_tx_t
mlx5e_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
240 struct mlx5e_priv
*priv
= netdev_priv(dev
);
241 int ix
= skb
->queue_mapping
;
243 struct mlx5e_channel
*c
= priv
->channel
[ix
];
244 struct mlx5e_sq
*sq
= &c
->sq
[tc
];
246 return mlx5e_sq_xmit(sq
, skb
);
249 netdev_tx_t
mlx5e_xmit_multi_tc(struct sk_buff
*skb
, struct net_device
*dev
)
251 struct mlx5e_priv
*priv
= netdev_priv(dev
);
252 int ix
= skb
->queue_mapping
& priv
->queue_mapping_channel_mask
;
253 int tc
= skb
->queue_mapping
>> priv
->order_base_2_num_channels
;
254 struct mlx5e_channel
*c
= priv
->channel
[ix
];
255 struct mlx5e_sq
*sq
= &c
->sq
[tc
];
257 return mlx5e_sq_xmit(sq
, skb
);
260 bool mlx5e_poll_tx_cq(struct mlx5e_cq
*cq
)
269 /* avoid accessing cq (dma coherent memory) if not needed */
270 if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES
, &cq
->flags
))
278 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
279 * otherwise a cq overrun may occur
283 /* avoid dirtying sq cache line every cqe */
284 dma_fifo_cc
= sq
->dma_fifo_cc
;
286 for (i
= 0; i
< MLX5E_TX_CQ_POLL_BUDGET
; i
++) {
287 struct mlx5_cqe64
*cqe
;
292 cqe
= mlx5e_get_cqe(cq
);
296 ci
= sqcc
& sq
->wq
.sz_m1
;
299 if (unlikely(!skb
)) { /* nop */
305 for (j
= 0; j
< MLX5E_TX_SKB_CB(skb
)->num_dma
; j
++) {
309 mlx5e_dma_get(sq
, dma_fifo_cc
, &addr
, &size
);
311 dma_unmap_single(sq
->pdev
, addr
, size
, DMA_TO_DEVICE
);
315 nbytes
+= MLX5E_TX_SKB_CB(skb
)->num_bytes
;
316 sqcc
+= MLX5E_TX_SKB_CB(skb
)->num_wqebbs
;
322 mlx5_cqwq_update_db_record(&cq
->wq
);
324 /* ensure cq space is freed before enabling more cqes */
327 sq
->dma_fifo_cc
= dma_fifo_cc
;
330 netdev_tx_completed_queue(sq
->txq
, npkts
, nbytes
);
332 if (netif_tx_queue_stopped(sq
->txq
) &&
333 mlx5e_sq_has_room_for(sq
, MLX5_SEND_WQE_MAX_WQEBBS
) &&
334 likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE
, &sq
->state
))) {
335 netif_tx_wake_queue(sq
->txq
);
338 if (i
== MLX5E_TX_CQ_POLL_BUDGET
) {
339 set_bit(MLX5E_CQ_HAS_CQES
, &cq
->flags
);