2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mlx5/fs.h>
37 struct mlx5e_rq_param
{
38 u32 rqc
[MLX5_ST_SZ_DW(rqc
)];
39 struct mlx5_wq_param wq
;
42 struct mlx5e_sq_param
{
43 u32 sqc
[MLX5_ST_SZ_DW(sqc
)];
44 struct mlx5_wq_param wq
;
48 struct mlx5e_cq_param
{
49 u32 cqc
[MLX5_ST_SZ_DW(cqc
)];
50 struct mlx5_wq_param wq
;
54 struct mlx5e_channel_param
{
55 struct mlx5e_rq_param rq
;
56 struct mlx5e_sq_param sq
;
57 struct mlx5e_cq_param rx_cq
;
58 struct mlx5e_cq_param tx_cq
;
61 static void mlx5e_update_carrier(struct mlx5e_priv
*priv
)
63 struct mlx5_core_dev
*mdev
= priv
->mdev
;
66 port_state
= mlx5_query_vport_state(mdev
,
67 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT
, 0);
69 if (port_state
== VPORT_STATE_UP
)
70 netif_carrier_on(priv
->netdev
);
72 netif_carrier_off(priv
->netdev
);
75 static void mlx5e_update_carrier_work(struct work_struct
*work
)
77 struct mlx5e_priv
*priv
= container_of(work
, struct mlx5e_priv
,
80 mutex_lock(&priv
->state_lock
);
81 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
82 mlx5e_update_carrier(priv
);
83 mutex_unlock(&priv
->state_lock
);
86 static void mlx5e_update_pport_counters(struct mlx5e_priv
*priv
)
88 struct mlx5_core_dev
*mdev
= priv
->mdev
;
89 struct mlx5e_pport_stats
*s
= &priv
->stats
.pport
;
92 int sz
= MLX5_ST_SZ_BYTES(ppcnt_reg
);
94 in
= mlx5_vzalloc(sz
);
95 out
= mlx5_vzalloc(sz
);
99 MLX5_SET(ppcnt_reg
, in
, local_port
, 1);
101 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_IEEE_802_3_COUNTERS_GROUP
);
102 mlx5_core_access_reg(mdev
, in
, sz
, out
,
103 sz
, MLX5_REG_PPCNT
, 0, 0);
104 memcpy(s
->IEEE_802_3_counters
,
105 MLX5_ADDR_OF(ppcnt_reg
, out
, counter_set
),
106 sizeof(s
->IEEE_802_3_counters
));
108 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_RFC_2863_COUNTERS_GROUP
);
109 mlx5_core_access_reg(mdev
, in
, sz
, out
,
110 sz
, MLX5_REG_PPCNT
, 0, 0);
111 memcpy(s
->RFC_2863_counters
,
112 MLX5_ADDR_OF(ppcnt_reg
, out
, counter_set
),
113 sizeof(s
->RFC_2863_counters
));
115 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_RFC_2819_COUNTERS_GROUP
);
116 mlx5_core_access_reg(mdev
, in
, sz
, out
,
117 sz
, MLX5_REG_PPCNT
, 0, 0);
118 memcpy(s
->RFC_2819_counters
,
119 MLX5_ADDR_OF(ppcnt_reg
, out
, counter_set
),
120 sizeof(s
->RFC_2819_counters
));
127 void mlx5e_update_stats(struct mlx5e_priv
*priv
)
129 struct mlx5_core_dev
*mdev
= priv
->mdev
;
130 struct mlx5e_vport_stats
*s
= &priv
->stats
.vport
;
131 struct mlx5e_rq_stats
*rq_stats
;
132 struct mlx5e_sq_stats
*sq_stats
;
133 u32 in
[MLX5_ST_SZ_DW(query_vport_counter_in
)];
135 int outlen
= MLX5_ST_SZ_BYTES(query_vport_counter_out
);
139 out
= mlx5_vzalloc(outlen
);
143 /* Collect firts the SW counters and then HW for consistency */
146 s
->tx_queue_stopped
= 0;
147 s
->tx_queue_wake
= 0;
148 s
->tx_queue_dropped
= 0;
155 for (i
= 0; i
< priv
->params
.num_channels
; i
++) {
156 rq_stats
= &priv
->channel
[i
]->rq
.stats
;
158 s
->lro_packets
+= rq_stats
->lro_packets
;
159 s
->lro_bytes
+= rq_stats
->lro_bytes
;
160 s
->rx_csum_none
+= rq_stats
->csum_none
;
161 s
->rx_csum_sw
+= rq_stats
->csum_sw
;
162 s
->rx_wqe_err
+= rq_stats
->wqe_err
;
164 for (j
= 0; j
< priv
->params
.num_tc
; j
++) {
165 sq_stats
= &priv
->channel
[i
]->sq
[j
].stats
;
167 s
->tso_packets
+= sq_stats
->tso_packets
;
168 s
->tso_bytes
+= sq_stats
->tso_bytes
;
169 s
->tx_queue_stopped
+= sq_stats
->stopped
;
170 s
->tx_queue_wake
+= sq_stats
->wake
;
171 s
->tx_queue_dropped
+= sq_stats
->dropped
;
172 tx_offload_none
+= sq_stats
->csum_offload_none
;
177 memset(in
, 0, sizeof(in
));
179 MLX5_SET(query_vport_counter_in
, in
, opcode
,
180 MLX5_CMD_OP_QUERY_VPORT_COUNTER
);
181 MLX5_SET(query_vport_counter_in
, in
, op_mod
, 0);
182 MLX5_SET(query_vport_counter_in
, in
, other_vport
, 0);
184 memset(out
, 0, outlen
);
186 if (mlx5_cmd_exec(mdev
, in
, sizeof(in
), out
, outlen
))
189 #define MLX5_GET_CTR(p, x) \
190 MLX5_GET64(query_vport_counter_out, p, x)
192 s
->rx_error_packets
=
193 MLX5_GET_CTR(out
, received_errors
.packets
);
195 MLX5_GET_CTR(out
, received_errors
.octets
);
196 s
->tx_error_packets
=
197 MLX5_GET_CTR(out
, transmit_errors
.packets
);
199 MLX5_GET_CTR(out
, transmit_errors
.octets
);
201 s
->rx_unicast_packets
=
202 MLX5_GET_CTR(out
, received_eth_unicast
.packets
);
203 s
->rx_unicast_bytes
=
204 MLX5_GET_CTR(out
, received_eth_unicast
.octets
);
205 s
->tx_unicast_packets
=
206 MLX5_GET_CTR(out
, transmitted_eth_unicast
.packets
);
207 s
->tx_unicast_bytes
=
208 MLX5_GET_CTR(out
, transmitted_eth_unicast
.octets
);
210 s
->rx_multicast_packets
=
211 MLX5_GET_CTR(out
, received_eth_multicast
.packets
);
212 s
->rx_multicast_bytes
=
213 MLX5_GET_CTR(out
, received_eth_multicast
.octets
);
214 s
->tx_multicast_packets
=
215 MLX5_GET_CTR(out
, transmitted_eth_multicast
.packets
);
216 s
->tx_multicast_bytes
=
217 MLX5_GET_CTR(out
, transmitted_eth_multicast
.octets
);
219 s
->rx_broadcast_packets
=
220 MLX5_GET_CTR(out
, received_eth_broadcast
.packets
);
221 s
->rx_broadcast_bytes
=
222 MLX5_GET_CTR(out
, received_eth_broadcast
.octets
);
223 s
->tx_broadcast_packets
=
224 MLX5_GET_CTR(out
, transmitted_eth_broadcast
.packets
);
225 s
->tx_broadcast_bytes
=
226 MLX5_GET_CTR(out
, transmitted_eth_broadcast
.octets
);
229 s
->rx_unicast_packets
+
230 s
->rx_multicast_packets
+
231 s
->rx_broadcast_packets
;
233 s
->rx_unicast_bytes
+
234 s
->rx_multicast_bytes
+
235 s
->rx_broadcast_bytes
;
237 s
->tx_unicast_packets
+
238 s
->tx_multicast_packets
+
239 s
->tx_broadcast_packets
;
241 s
->tx_unicast_bytes
+
242 s
->tx_multicast_bytes
+
243 s
->tx_broadcast_bytes
;
245 /* Update calculated offload counters */
246 s
->tx_csum_offload
= s
->tx_packets
- tx_offload_none
;
247 s
->rx_csum_good
= s
->rx_packets
- s
->rx_csum_none
-
250 mlx5e_update_pport_counters(priv
);
255 static void mlx5e_update_stats_work(struct work_struct
*work
)
257 struct delayed_work
*dwork
= to_delayed_work(work
);
258 struct mlx5e_priv
*priv
= container_of(dwork
, struct mlx5e_priv
,
260 mutex_lock(&priv
->state_lock
);
261 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
)) {
262 mlx5e_update_stats(priv
);
263 schedule_delayed_work(dwork
,
265 MLX5E_UPDATE_STATS_INTERVAL
));
267 mutex_unlock(&priv
->state_lock
);
270 static void __mlx5e_async_event(struct mlx5e_priv
*priv
,
271 enum mlx5_dev_event event
)
274 case MLX5_DEV_EVENT_PORT_UP
:
275 case MLX5_DEV_EVENT_PORT_DOWN
:
276 schedule_work(&priv
->update_carrier_work
);
284 static void mlx5e_async_event(struct mlx5_core_dev
*mdev
, void *vpriv
,
285 enum mlx5_dev_event event
, unsigned long param
)
287 struct mlx5e_priv
*priv
= vpriv
;
289 spin_lock(&priv
->async_events_spinlock
);
290 if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE
, &priv
->state
))
291 __mlx5e_async_event(priv
, event
);
292 spin_unlock(&priv
->async_events_spinlock
);
295 static void mlx5e_enable_async_events(struct mlx5e_priv
*priv
)
297 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE
, &priv
->state
);
300 static void mlx5e_disable_async_events(struct mlx5e_priv
*priv
)
302 spin_lock_irq(&priv
->async_events_spinlock
);
303 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE
, &priv
->state
);
304 spin_unlock_irq(&priv
->async_events_spinlock
);
307 #define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
308 #define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
310 static int mlx5e_create_rq(struct mlx5e_channel
*c
,
311 struct mlx5e_rq_param
*param
,
314 struct mlx5e_priv
*priv
= c
->priv
;
315 struct mlx5_core_dev
*mdev
= priv
->mdev
;
316 void *rqc
= param
->rqc
;
317 void *rqc_wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
322 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
324 err
= mlx5_wq_ll_create(mdev
, ¶m
->wq
, rqc_wq
, &rq
->wq
,
329 rq
->wq
.db
= &rq
->wq
.db
[MLX5_RCV_DBR
];
331 wq_sz
= mlx5_wq_ll_get_size(&rq
->wq
);
332 rq
->skb
= kzalloc_node(wq_sz
* sizeof(*rq
->skb
), GFP_KERNEL
,
333 cpu_to_node(c
->cpu
));
336 goto err_rq_wq_destroy
;
339 rq
->wqe_sz
= (priv
->params
.lro_en
) ? priv
->params
.lro_wqe_sz
:
340 MLX5E_SW2HW_MTU(priv
->netdev
->mtu
);
341 rq
->wqe_sz
= SKB_DATA_ALIGN(rq
->wqe_sz
+ MLX5E_NET_IP_ALIGN
);
343 for (i
= 0; i
< wq_sz
; i
++) {
344 struct mlx5e_rx_wqe
*wqe
= mlx5_wq_ll_get_wqe(&rq
->wq
, i
);
345 u32 byte_count
= rq
->wqe_sz
- MLX5E_NET_IP_ALIGN
;
347 wqe
->data
.lkey
= c
->mkey_be
;
348 wqe
->data
.byte_count
=
349 cpu_to_be32(byte_count
| MLX5_HW_START_PADDING
);
353 rq
->netdev
= c
->netdev
;
354 rq
->tstamp
= &priv
->tstamp
;
362 mlx5_wq_destroy(&rq
->wq_ctrl
);
367 static void mlx5e_destroy_rq(struct mlx5e_rq
*rq
)
370 mlx5_wq_destroy(&rq
->wq_ctrl
);
373 static int mlx5e_enable_rq(struct mlx5e_rq
*rq
, struct mlx5e_rq_param
*param
)
375 struct mlx5e_priv
*priv
= rq
->priv
;
376 struct mlx5_core_dev
*mdev
= priv
->mdev
;
384 inlen
= MLX5_ST_SZ_BYTES(create_rq_in
) +
385 sizeof(u64
) * rq
->wq_ctrl
.buf
.npages
;
386 in
= mlx5_vzalloc(inlen
);
390 rqc
= MLX5_ADDR_OF(create_rq_in
, in
, ctx
);
391 wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
393 memcpy(rqc
, param
->rqc
, sizeof(param
->rqc
));
395 MLX5_SET(rqc
, rqc
, cqn
, rq
->cq
.mcq
.cqn
);
396 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RST
);
397 MLX5_SET(rqc
, rqc
, flush_in_error_en
, 1);
398 MLX5_SET(wq
, wq
, log_wq_pg_sz
, rq
->wq_ctrl
.buf
.page_shift
-
399 MLX5_ADAPTER_PAGE_SHIFT
);
400 MLX5_SET64(wq
, wq
, dbr_addr
, rq
->wq_ctrl
.db
.dma
);
402 mlx5_fill_page_array(&rq
->wq_ctrl
.buf
,
403 (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
));
405 err
= mlx5_core_create_rq(mdev
, in
, inlen
, &rq
->rqn
);
412 static int mlx5e_modify_rq(struct mlx5e_rq
*rq
, int curr_state
, int next_state
)
414 struct mlx5e_channel
*c
= rq
->channel
;
415 struct mlx5e_priv
*priv
= c
->priv
;
416 struct mlx5_core_dev
*mdev
= priv
->mdev
;
423 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
424 in
= mlx5_vzalloc(inlen
);
428 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
430 MLX5_SET(modify_rq_in
, in
, rq_state
, curr_state
);
431 MLX5_SET(rqc
, rqc
, state
, next_state
);
433 err
= mlx5_core_modify_rq(mdev
, rq
->rqn
, in
, inlen
);
440 static void mlx5e_disable_rq(struct mlx5e_rq
*rq
)
442 mlx5_core_destroy_rq(rq
->priv
->mdev
, rq
->rqn
);
445 static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq
*rq
)
447 unsigned long exp_time
= jiffies
+ msecs_to_jiffies(20000);
448 struct mlx5e_channel
*c
= rq
->channel
;
449 struct mlx5e_priv
*priv
= c
->priv
;
450 struct mlx5_wq_ll
*wq
= &rq
->wq
;
452 while (time_before(jiffies
, exp_time
)) {
453 if (wq
->cur_sz
>= priv
->params
.min_rx_wqes
)
462 static int mlx5e_open_rq(struct mlx5e_channel
*c
,
463 struct mlx5e_rq_param
*param
,
468 err
= mlx5e_create_rq(c
, param
, rq
);
472 err
= mlx5e_enable_rq(rq
, param
);
476 err
= mlx5e_modify_rq(rq
, MLX5_RQC_STATE_RST
, MLX5_RQC_STATE_RDY
);
480 set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE
, &rq
->state
);
481 mlx5e_send_nop(&c
->sq
[0], true); /* trigger mlx5e_post_rx_wqes() */
486 mlx5e_disable_rq(rq
);
488 mlx5e_destroy_rq(rq
);
493 static void mlx5e_close_rq(struct mlx5e_rq
*rq
)
495 clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE
, &rq
->state
);
496 napi_synchronize(&rq
->channel
->napi
); /* prevent mlx5e_post_rx_wqes */
498 mlx5e_modify_rq(rq
, MLX5_RQC_STATE_RDY
, MLX5_RQC_STATE_ERR
);
499 while (!mlx5_wq_ll_is_empty(&rq
->wq
))
502 /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
503 napi_synchronize(&rq
->channel
->napi
);
505 mlx5e_disable_rq(rq
);
506 mlx5e_destroy_rq(rq
);
509 static void mlx5e_free_sq_db(struct mlx5e_sq
*sq
)
516 static int mlx5e_alloc_sq_db(struct mlx5e_sq
*sq
, int numa
)
518 int wq_sz
= mlx5_wq_cyc_get_size(&sq
->wq
);
519 int df_sz
= wq_sz
* MLX5_SEND_WQEBB_NUM_DS
;
521 sq
->skb
= kzalloc_node(wq_sz
* sizeof(*sq
->skb
), GFP_KERNEL
, numa
);
522 sq
->dma_fifo
= kzalloc_node(df_sz
* sizeof(*sq
->dma_fifo
), GFP_KERNEL
,
524 sq
->wqe_info
= kzalloc_node(wq_sz
* sizeof(*sq
->wqe_info
), GFP_KERNEL
,
527 if (!sq
->skb
|| !sq
->dma_fifo
|| !sq
->wqe_info
) {
528 mlx5e_free_sq_db(sq
);
532 sq
->dma_fifo_mask
= df_sz
- 1;
537 static int mlx5e_create_sq(struct mlx5e_channel
*c
,
539 struct mlx5e_sq_param
*param
,
542 struct mlx5e_priv
*priv
= c
->priv
;
543 struct mlx5_core_dev
*mdev
= priv
->mdev
;
545 void *sqc
= param
->sqc
;
546 void *sqc_wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
550 err
= mlx5_alloc_map_uar(mdev
, &sq
->uar
);
554 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
556 err
= mlx5_wq_cyc_create(mdev
, ¶m
->wq
, sqc_wq
, &sq
->wq
,
559 goto err_unmap_free_uar
;
561 sq
->wq
.db
= &sq
->wq
.db
[MLX5_SND_DBR
];
562 sq
->uar_map
= sq
->uar
.map
;
563 sq
->uar_bf_map
= sq
->uar
.bf_map
;
564 sq
->bf_buf_size
= (1 << MLX5_CAP_GEN(mdev
, log_bf_reg_size
)) / 2;
565 sq
->max_inline
= param
->max_inline
;
567 err
= mlx5e_alloc_sq_db(sq
, cpu_to_node(c
->cpu
));
569 goto err_sq_wq_destroy
;
571 txq_ix
= c
->ix
+ tc
* priv
->params
.num_channels
;
572 sq
->txq
= netdev_get_tx_queue(priv
->netdev
, txq_ix
);
575 sq
->tstamp
= &priv
->tstamp
;
576 sq
->mkey_be
= c
->mkey_be
;
579 sq
->edge
= (sq
->wq
.sz_m1
+ 1) - MLX5_SEND_WQE_MAX_WQEBBS
;
580 sq
->bf_budget
= MLX5E_SQ_BF_BUDGET
;
581 priv
->txq_to_sq_map
[txq_ix
] = sq
;
586 mlx5_wq_destroy(&sq
->wq_ctrl
);
589 mlx5_unmap_free_uar(mdev
, &sq
->uar
);
594 static void mlx5e_destroy_sq(struct mlx5e_sq
*sq
)
596 struct mlx5e_channel
*c
= sq
->channel
;
597 struct mlx5e_priv
*priv
= c
->priv
;
599 mlx5e_free_sq_db(sq
);
600 mlx5_wq_destroy(&sq
->wq_ctrl
);
601 mlx5_unmap_free_uar(priv
->mdev
, &sq
->uar
);
604 static int mlx5e_enable_sq(struct mlx5e_sq
*sq
, struct mlx5e_sq_param
*param
)
606 struct mlx5e_channel
*c
= sq
->channel
;
607 struct mlx5e_priv
*priv
= c
->priv
;
608 struct mlx5_core_dev
*mdev
= priv
->mdev
;
616 inlen
= MLX5_ST_SZ_BYTES(create_sq_in
) +
617 sizeof(u64
) * sq
->wq_ctrl
.buf
.npages
;
618 in
= mlx5_vzalloc(inlen
);
622 sqc
= MLX5_ADDR_OF(create_sq_in
, in
, ctx
);
623 wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
625 memcpy(sqc
, param
->sqc
, sizeof(param
->sqc
));
627 MLX5_SET(sqc
, sqc
, tis_num_0
, priv
->tisn
[sq
->tc
]);
628 MLX5_SET(sqc
, sqc
, cqn
, c
->sq
[sq
->tc
].cq
.mcq
.cqn
);
629 MLX5_SET(sqc
, sqc
, state
, MLX5_SQC_STATE_RST
);
630 MLX5_SET(sqc
, sqc
, tis_lst_sz
, 1);
631 MLX5_SET(sqc
, sqc
, flush_in_error_en
, 1);
633 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_CYCLIC
);
634 MLX5_SET(wq
, wq
, uar_page
, sq
->uar
.index
);
635 MLX5_SET(wq
, wq
, log_wq_pg_sz
, sq
->wq_ctrl
.buf
.page_shift
-
636 MLX5_ADAPTER_PAGE_SHIFT
);
637 MLX5_SET64(wq
, wq
, dbr_addr
, sq
->wq_ctrl
.db
.dma
);
639 mlx5_fill_page_array(&sq
->wq_ctrl
.buf
,
640 (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
));
642 err
= mlx5_core_create_sq(mdev
, in
, inlen
, &sq
->sqn
);
649 static int mlx5e_modify_sq(struct mlx5e_sq
*sq
, int curr_state
, int next_state
)
651 struct mlx5e_channel
*c
= sq
->channel
;
652 struct mlx5e_priv
*priv
= c
->priv
;
653 struct mlx5_core_dev
*mdev
= priv
->mdev
;
660 inlen
= MLX5_ST_SZ_BYTES(modify_sq_in
);
661 in
= mlx5_vzalloc(inlen
);
665 sqc
= MLX5_ADDR_OF(modify_sq_in
, in
, ctx
);
667 MLX5_SET(modify_sq_in
, in
, sq_state
, curr_state
);
668 MLX5_SET(sqc
, sqc
, state
, next_state
);
670 err
= mlx5_core_modify_sq(mdev
, sq
->sqn
, in
, inlen
);
677 static void mlx5e_disable_sq(struct mlx5e_sq
*sq
)
679 struct mlx5e_channel
*c
= sq
->channel
;
680 struct mlx5e_priv
*priv
= c
->priv
;
681 struct mlx5_core_dev
*mdev
= priv
->mdev
;
683 mlx5_core_destroy_sq(mdev
, sq
->sqn
);
686 static int mlx5e_open_sq(struct mlx5e_channel
*c
,
688 struct mlx5e_sq_param
*param
,
693 err
= mlx5e_create_sq(c
, tc
, param
, sq
);
697 err
= mlx5e_enable_sq(sq
, param
);
701 err
= mlx5e_modify_sq(sq
, MLX5_SQC_STATE_RST
, MLX5_SQC_STATE_RDY
);
705 set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE
, &sq
->state
);
706 netdev_tx_reset_queue(sq
->txq
);
707 netif_tx_start_queue(sq
->txq
);
712 mlx5e_disable_sq(sq
);
714 mlx5e_destroy_sq(sq
);
719 static inline void netif_tx_disable_queue(struct netdev_queue
*txq
)
721 __netif_tx_lock_bh(txq
);
722 netif_tx_stop_queue(txq
);
723 __netif_tx_unlock_bh(txq
);
726 static void mlx5e_close_sq(struct mlx5e_sq
*sq
)
728 clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE
, &sq
->state
);
729 napi_synchronize(&sq
->channel
->napi
); /* prevent netif_tx_wake_queue */
730 netif_tx_disable_queue(sq
->txq
);
732 /* ensure hw is notified of all pending wqes */
733 if (mlx5e_sq_has_room_for(sq
, 1))
734 mlx5e_send_nop(sq
, true);
736 mlx5e_modify_sq(sq
, MLX5_SQC_STATE_RDY
, MLX5_SQC_STATE_ERR
);
737 while (sq
->cc
!= sq
->pc
) /* wait till sq is empty */
740 /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
741 napi_synchronize(&sq
->channel
->napi
);
743 mlx5e_disable_sq(sq
);
744 mlx5e_destroy_sq(sq
);
747 static int mlx5e_create_cq(struct mlx5e_channel
*c
,
748 struct mlx5e_cq_param
*param
,
751 struct mlx5e_priv
*priv
= c
->priv
;
752 struct mlx5_core_dev
*mdev
= priv
->mdev
;
753 struct mlx5_core_cq
*mcq
= &cq
->mcq
;
759 param
->wq
.buf_numa_node
= cpu_to_node(c
->cpu
);
760 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
761 param
->eq_ix
= c
->ix
;
763 err
= mlx5_cqwq_create(mdev
, ¶m
->wq
, param
->cqc
, &cq
->wq
,
768 mlx5_vector2eqn(mdev
, param
->eq_ix
, &eqn_not_used
, &irqn
);
773 mcq
->set_ci_db
= cq
->wq_ctrl
.db
.db
;
774 mcq
->arm_db
= cq
->wq_ctrl
.db
.db
+ 1;
777 mcq
->vector
= param
->eq_ix
;
778 mcq
->comp
= mlx5e_completion_event
;
779 mcq
->event
= mlx5e_cq_error_event
;
781 mcq
->uar
= &priv
->cq_uar
;
783 for (i
= 0; i
< mlx5_cqwq_get_size(&cq
->wq
); i
++) {
784 struct mlx5_cqe64
*cqe
= mlx5_cqwq_get_wqe(&cq
->wq
, i
);
795 static void mlx5e_destroy_cq(struct mlx5e_cq
*cq
)
797 mlx5_wq_destroy(&cq
->wq_ctrl
);
800 static int mlx5e_enable_cq(struct mlx5e_cq
*cq
, struct mlx5e_cq_param
*param
)
802 struct mlx5e_priv
*priv
= cq
->priv
;
803 struct mlx5_core_dev
*mdev
= priv
->mdev
;
804 struct mlx5_core_cq
*mcq
= &cq
->mcq
;
809 unsigned int irqn_not_used
;
813 inlen
= MLX5_ST_SZ_BYTES(create_cq_in
) +
814 sizeof(u64
) * cq
->wq_ctrl
.buf
.npages
;
815 in
= mlx5_vzalloc(inlen
);
819 cqc
= MLX5_ADDR_OF(create_cq_in
, in
, cq_context
);
821 memcpy(cqc
, param
->cqc
, sizeof(param
->cqc
));
823 mlx5_fill_page_array(&cq
->wq_ctrl
.buf
,
824 (__be64
*)MLX5_ADDR_OF(create_cq_in
, in
, pas
));
826 mlx5_vector2eqn(mdev
, param
->eq_ix
, &eqn
, &irqn_not_used
);
828 MLX5_SET(cqc
, cqc
, c_eqn
, eqn
);
829 MLX5_SET(cqc
, cqc
, uar_page
, mcq
->uar
->index
);
830 MLX5_SET(cqc
, cqc
, log_page_size
, cq
->wq_ctrl
.buf
.page_shift
-
831 MLX5_ADAPTER_PAGE_SHIFT
);
832 MLX5_SET64(cqc
, cqc
, dbr_addr
, cq
->wq_ctrl
.db
.dma
);
834 err
= mlx5_core_create_cq(mdev
, mcq
, in
, inlen
);
846 static void mlx5e_disable_cq(struct mlx5e_cq
*cq
)
848 struct mlx5e_priv
*priv
= cq
->priv
;
849 struct mlx5_core_dev
*mdev
= priv
->mdev
;
851 mlx5_core_destroy_cq(mdev
, &cq
->mcq
);
854 static int mlx5e_open_cq(struct mlx5e_channel
*c
,
855 struct mlx5e_cq_param
*param
,
857 u16 moderation_usecs
,
858 u16 moderation_frames
)
861 struct mlx5e_priv
*priv
= c
->priv
;
862 struct mlx5_core_dev
*mdev
= priv
->mdev
;
864 err
= mlx5e_create_cq(c
, param
, cq
);
868 err
= mlx5e_enable_cq(cq
, param
);
872 err
= mlx5_core_modify_cq_moderation(mdev
, &cq
->mcq
,
881 mlx5e_destroy_cq(cq
);
886 static void mlx5e_close_cq(struct mlx5e_cq
*cq
)
888 mlx5e_disable_cq(cq
);
889 mlx5e_destroy_cq(cq
);
892 static int mlx5e_get_cpu(struct mlx5e_priv
*priv
, int ix
)
894 return cpumask_first(priv
->mdev
->priv
.irq_info
[ix
].mask
);
897 static int mlx5e_open_tx_cqs(struct mlx5e_channel
*c
,
898 struct mlx5e_channel_param
*cparam
)
900 struct mlx5e_priv
*priv
= c
->priv
;
904 for (tc
= 0; tc
< c
->num_tc
; tc
++) {
905 err
= mlx5e_open_cq(c
, &cparam
->tx_cq
, &c
->sq
[tc
].cq
,
906 priv
->params
.tx_cq_moderation_usec
,
907 priv
->params
.tx_cq_moderation_pkts
);
909 goto err_close_tx_cqs
;
915 for (tc
--; tc
>= 0; tc
--)
916 mlx5e_close_cq(&c
->sq
[tc
].cq
);
921 static void mlx5e_close_tx_cqs(struct mlx5e_channel
*c
)
925 for (tc
= 0; tc
< c
->num_tc
; tc
++)
926 mlx5e_close_cq(&c
->sq
[tc
].cq
);
929 static int mlx5e_open_sqs(struct mlx5e_channel
*c
,
930 struct mlx5e_channel_param
*cparam
)
935 for (tc
= 0; tc
< c
->num_tc
; tc
++) {
936 err
= mlx5e_open_sq(c
, tc
, &cparam
->sq
, &c
->sq
[tc
]);
944 for (tc
--; tc
>= 0; tc
--)
945 mlx5e_close_sq(&c
->sq
[tc
]);
950 static void mlx5e_close_sqs(struct mlx5e_channel
*c
)
954 for (tc
= 0; tc
< c
->num_tc
; tc
++)
955 mlx5e_close_sq(&c
->sq
[tc
]);
958 static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv
*priv
, int ix
)
962 for (i
= 0; i
< MLX5E_MAX_NUM_TC
; i
++)
963 priv
->channeltc_to_txq_map
[ix
][i
] =
964 ix
+ i
* priv
->params
.num_channels
;
967 static int mlx5e_open_channel(struct mlx5e_priv
*priv
, int ix
,
968 struct mlx5e_channel_param
*cparam
,
969 struct mlx5e_channel
**cp
)
971 struct net_device
*netdev
= priv
->netdev
;
972 int cpu
= mlx5e_get_cpu(priv
, ix
);
973 struct mlx5e_channel
*c
;
976 c
= kzalloc_node(sizeof(*c
), GFP_KERNEL
, cpu_to_node(cpu
));
983 c
->pdev
= &priv
->mdev
->pdev
->dev
;
984 c
->netdev
= priv
->netdev
;
985 c
->mkey_be
= cpu_to_be32(priv
->mr
.key
);
986 c
->num_tc
= priv
->params
.num_tc
;
988 mlx5e_build_channeltc_to_txq_map(priv
, ix
);
990 netif_napi_add(netdev
, &c
->napi
, mlx5e_napi_poll
, 64);
992 err
= mlx5e_open_tx_cqs(c
, cparam
);
996 err
= mlx5e_open_cq(c
, &cparam
->rx_cq
, &c
->rq
.cq
,
997 priv
->params
.rx_cq_moderation_usec
,
998 priv
->params
.rx_cq_moderation_pkts
);
1000 goto err_close_tx_cqs
;
1002 napi_enable(&c
->napi
);
1004 err
= mlx5e_open_sqs(c
, cparam
);
1006 goto err_disable_napi
;
1008 err
= mlx5e_open_rq(c
, &cparam
->rq
, &c
->rq
);
1012 netif_set_xps_queue(netdev
, get_cpu_mask(c
->cpu
), ix
);
1021 napi_disable(&c
->napi
);
1022 mlx5e_close_cq(&c
->rq
.cq
);
1025 mlx5e_close_tx_cqs(c
);
1028 netif_napi_del(&c
->napi
);
1029 napi_hash_del(&c
->napi
);
1035 static void mlx5e_close_channel(struct mlx5e_channel
*c
)
1037 mlx5e_close_rq(&c
->rq
);
1039 napi_disable(&c
->napi
);
1040 mlx5e_close_cq(&c
->rq
.cq
);
1041 mlx5e_close_tx_cqs(c
);
1042 netif_napi_del(&c
->napi
);
1044 napi_hash_del(&c
->napi
);
1050 static void mlx5e_build_rq_param(struct mlx5e_priv
*priv
,
1051 struct mlx5e_rq_param
*param
)
1053 void *rqc
= param
->rqc
;
1054 void *wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
1056 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_LINKED_LIST
);
1057 MLX5_SET(wq
, wq
, end_padding_mode
, MLX5_WQ_END_PAD_MODE_ALIGN
);
1058 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(sizeof(struct mlx5e_rx_wqe
)));
1059 MLX5_SET(wq
, wq
, log_wq_sz
, priv
->params
.log_rq_size
);
1060 MLX5_SET(wq
, wq
, pd
, priv
->pdn
);
1062 param
->wq
.buf_numa_node
= dev_to_node(&priv
->mdev
->pdev
->dev
);
1063 param
->wq
.linear
= 1;
1066 static void mlx5e_build_sq_param(struct mlx5e_priv
*priv
,
1067 struct mlx5e_sq_param
*param
)
1069 void *sqc
= param
->sqc
;
1070 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
1072 MLX5_SET(wq
, wq
, log_wq_sz
, priv
->params
.log_sq_size
);
1073 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(MLX5_SEND_WQE_BB
));
1074 MLX5_SET(wq
, wq
, pd
, priv
->pdn
);
1076 param
->wq
.buf_numa_node
= dev_to_node(&priv
->mdev
->pdev
->dev
);
1077 param
->max_inline
= priv
->params
.tx_max_inline
;
1080 static void mlx5e_build_common_cq_param(struct mlx5e_priv
*priv
,
1081 struct mlx5e_cq_param
*param
)
1083 void *cqc
= param
->cqc
;
1085 MLX5_SET(cqc
, cqc
, uar_page
, priv
->cq_uar
.index
);
1088 static void mlx5e_build_rx_cq_param(struct mlx5e_priv
*priv
,
1089 struct mlx5e_cq_param
*param
)
1091 void *cqc
= param
->cqc
;
1093 MLX5_SET(cqc
, cqc
, log_cq_size
, priv
->params
.log_rq_size
);
1095 mlx5e_build_common_cq_param(priv
, param
);
1098 static void mlx5e_build_tx_cq_param(struct mlx5e_priv
*priv
,
1099 struct mlx5e_cq_param
*param
)
1101 void *cqc
= param
->cqc
;
1103 MLX5_SET(cqc
, cqc
, log_cq_size
, priv
->params
.log_sq_size
);
1105 mlx5e_build_common_cq_param(priv
, param
);
1108 static void mlx5e_build_channel_param(struct mlx5e_priv
*priv
,
1109 struct mlx5e_channel_param
*cparam
)
1111 memset(cparam
, 0, sizeof(*cparam
));
1113 mlx5e_build_rq_param(priv
, &cparam
->rq
);
1114 mlx5e_build_sq_param(priv
, &cparam
->sq
);
1115 mlx5e_build_rx_cq_param(priv
, &cparam
->rx_cq
);
1116 mlx5e_build_tx_cq_param(priv
, &cparam
->tx_cq
);
1119 static int mlx5e_open_channels(struct mlx5e_priv
*priv
)
1121 struct mlx5e_channel_param cparam
;
1122 int nch
= priv
->params
.num_channels
;
1127 priv
->channel
= kcalloc(nch
, sizeof(struct mlx5e_channel
*),
1130 priv
->txq_to_sq_map
= kcalloc(nch
* priv
->params
.num_tc
,
1131 sizeof(struct mlx5e_sq
*), GFP_KERNEL
);
1133 if (!priv
->channel
|| !priv
->txq_to_sq_map
)
1134 goto err_free_txq_to_sq_map
;
1136 mlx5e_build_channel_param(priv
, &cparam
);
1137 for (i
= 0; i
< nch
; i
++) {
1138 err
= mlx5e_open_channel(priv
, i
, &cparam
, &priv
->channel
[i
]);
1140 goto err_close_channels
;
1143 for (j
= 0; j
< nch
; j
++) {
1144 err
= mlx5e_wait_for_min_rx_wqes(&priv
->channel
[j
]->rq
);
1146 goto err_close_channels
;
1152 for (i
--; i
>= 0; i
--)
1153 mlx5e_close_channel(priv
->channel
[i
]);
1155 err_free_txq_to_sq_map
:
1156 kfree(priv
->txq_to_sq_map
);
1157 kfree(priv
->channel
);
1162 static void mlx5e_close_channels(struct mlx5e_priv
*priv
)
1166 for (i
= 0; i
< priv
->params
.num_channels
; i
++)
1167 mlx5e_close_channel(priv
->channel
[i
]);
1169 kfree(priv
->txq_to_sq_map
);
1170 kfree(priv
->channel
);
1173 static int mlx5e_rx_hash_fn(int hfunc
)
1175 return (hfunc
== ETH_RSS_HASH_TOP
) ?
1176 MLX5_RX_HASH_FN_TOEPLITZ
:
1177 MLX5_RX_HASH_FN_INVERTED_XOR8
;
1180 static int mlx5e_bits_invert(unsigned long a
, int size
)
1185 for (i
= 0; i
< size
; i
++)
1186 inv
|= (test_bit(size
- i
- 1, &a
) ? 1 : 0) << i
;
1191 static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv
*priv
, void *rqtc
)
1195 for (i
= 0; i
< MLX5E_INDIR_RQT_SIZE
; i
++) {
1198 if (priv
->params
.rss_hfunc
== ETH_RSS_HASH_XOR
)
1199 ix
= mlx5e_bits_invert(i
, MLX5E_LOG_INDIR_RQT_SIZE
);
1201 ix
= priv
->params
.indirection_rqt
[ix
];
1202 ix
= ix
% priv
->params
.num_channels
;
1203 MLX5_SET(rqtc
, rqtc
, rq_num
[i
],
1204 test_bit(MLX5E_STATE_OPENED
, &priv
->state
) ?
1205 priv
->channel
[ix
]->rq
.rqn
:
1210 static void mlx5e_fill_rqt_rqns(struct mlx5e_priv
*priv
, void *rqtc
,
1211 enum mlx5e_rqt_ix rqt_ix
)
1215 case MLX5E_INDIRECTION_RQT
:
1216 mlx5e_fill_indir_rqt_rqns(priv
, rqtc
);
1220 default: /* MLX5E_SINGLE_RQ_RQT */
1221 MLX5_SET(rqtc
, rqtc
, rq_num
[0],
1222 test_bit(MLX5E_STATE_OPENED
, &priv
->state
) ?
1223 priv
->channel
[0]->rq
.rqn
:
1230 static int mlx5e_create_rqt(struct mlx5e_priv
*priv
, enum mlx5e_rqt_ix rqt_ix
)
1232 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1239 sz
= (rqt_ix
== MLX5E_SINGLE_RQ_RQT
) ? 1 : MLX5E_INDIR_RQT_SIZE
;
1241 inlen
= MLX5_ST_SZ_BYTES(create_rqt_in
) + sizeof(u32
) * sz
;
1242 in
= mlx5_vzalloc(inlen
);
1246 rqtc
= MLX5_ADDR_OF(create_rqt_in
, in
, rqt_context
);
1248 MLX5_SET(rqtc
, rqtc
, rqt_actual_size
, sz
);
1249 MLX5_SET(rqtc
, rqtc
, rqt_max_size
, sz
);
1251 mlx5e_fill_rqt_rqns(priv
, rqtc
, rqt_ix
);
1253 err
= mlx5_core_create_rqt(mdev
, in
, inlen
, &priv
->rqtn
[rqt_ix
]);
1260 int mlx5e_redirect_rqt(struct mlx5e_priv
*priv
, enum mlx5e_rqt_ix rqt_ix
)
1262 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1269 sz
= (rqt_ix
== MLX5E_SINGLE_RQ_RQT
) ? 1 : MLX5E_INDIR_RQT_SIZE
;
1271 inlen
= MLX5_ST_SZ_BYTES(modify_rqt_in
) + sizeof(u32
) * sz
;
1272 in
= mlx5_vzalloc(inlen
);
1276 rqtc
= MLX5_ADDR_OF(modify_rqt_in
, in
, ctx
);
1278 MLX5_SET(rqtc
, rqtc
, rqt_actual_size
, sz
);
1280 mlx5e_fill_rqt_rqns(priv
, rqtc
, rqt_ix
);
1282 MLX5_SET(modify_rqt_in
, in
, bitmask
.rqn_list
, 1);
1284 err
= mlx5_core_modify_rqt(mdev
, priv
->rqtn
[rqt_ix
], in
, inlen
);
1291 static void mlx5e_destroy_rqt(struct mlx5e_priv
*priv
, enum mlx5e_rqt_ix rqt_ix
)
1293 mlx5_core_destroy_rqt(priv
->mdev
, priv
->rqtn
[rqt_ix
]);
1296 static void mlx5e_redirect_rqts(struct mlx5e_priv
*priv
)
1298 mlx5e_redirect_rqt(priv
, MLX5E_INDIRECTION_RQT
);
1299 mlx5e_redirect_rqt(priv
, MLX5E_SINGLE_RQ_RQT
);
1302 static void mlx5e_build_tir_ctx_lro(void *tirc
, struct mlx5e_priv
*priv
)
1304 if (!priv
->params
.lro_en
)
1307 #define ROUGH_MAX_L2_L3_HDR_SZ 256
1309 MLX5_SET(tirc
, tirc
, lro_enable_mask
,
1310 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO
|
1311 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO
);
1312 MLX5_SET(tirc
, tirc
, lro_max_ip_payload_size
,
1313 (priv
->params
.lro_wqe_sz
-
1314 ROUGH_MAX_L2_L3_HDR_SZ
) >> 8);
1315 MLX5_SET(tirc
, tirc
, lro_timeout_period_usecs
,
1316 MLX5_CAP_ETH(priv
->mdev
,
1317 lro_timer_supported_periods
[2]));
1320 void mlx5e_build_tir_ctx_hash(void *tirc
, struct mlx5e_priv
*priv
)
1322 MLX5_SET(tirc
, tirc
, rx_hash_fn
,
1323 mlx5e_rx_hash_fn(priv
->params
.rss_hfunc
));
1324 if (priv
->params
.rss_hfunc
== ETH_RSS_HASH_TOP
) {
1325 void *rss_key
= MLX5_ADDR_OF(tirc
, tirc
,
1326 rx_hash_toeplitz_key
);
1327 size_t len
= MLX5_FLD_SZ_BYTES(tirc
,
1328 rx_hash_toeplitz_key
);
1330 MLX5_SET(tirc
, tirc
, rx_hash_symmetric
, 1);
1331 memcpy(rss_key
, priv
->params
.toeplitz_hash_key
, len
);
1335 static int mlx5e_modify_tirs_lro(struct mlx5e_priv
*priv
)
1337 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1345 inlen
= MLX5_ST_SZ_BYTES(modify_tir_in
);
1346 in
= mlx5_vzalloc(inlen
);
1350 MLX5_SET(modify_tir_in
, in
, bitmask
.lro
, 1);
1351 tirc
= MLX5_ADDR_OF(modify_tir_in
, in
, ctx
);
1353 mlx5e_build_tir_ctx_lro(tirc
, priv
);
1355 for (tt
= 0; tt
< MLX5E_NUM_TT
; tt
++) {
1356 err
= mlx5_core_modify_tir(mdev
, priv
->tirn
[tt
], in
, inlen
);
1366 static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev
*mdev
,
1373 inlen
= MLX5_ST_SZ_BYTES(modify_tir_in
);
1374 in
= mlx5_vzalloc(inlen
);
1378 MLX5_SET(modify_tir_in
, in
, bitmask
.self_lb_en
, 1);
1380 err
= mlx5_core_modify_tir(mdev
, tirn
, in
, inlen
);
1387 static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv
*priv
)
1392 for (i
= 0; i
< MLX5E_NUM_TT
; i
++) {
1393 err
= mlx5e_refresh_tir_self_loopback_enable(priv
->mdev
,
1402 static int mlx5e_set_dev_port_mtu(struct net_device
*netdev
)
1404 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1405 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1409 err
= mlx5_set_port_mtu(mdev
, MLX5E_SW2HW_MTU(netdev
->mtu
), 1);
1413 mlx5_query_port_oper_mtu(mdev
, &hw_mtu
, 1);
1415 if (MLX5E_HW2SW_MTU(hw_mtu
) != netdev
->mtu
)
1416 netdev_warn(netdev
, "%s: Port MTU %d is different than netdev mtu %d\n",
1417 __func__
, MLX5E_HW2SW_MTU(hw_mtu
), netdev
->mtu
);
1419 netdev
->mtu
= MLX5E_HW2SW_MTU(hw_mtu
);
1423 int mlx5e_open_locked(struct net_device
*netdev
)
1425 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1429 set_bit(MLX5E_STATE_OPENED
, &priv
->state
);
1431 num_txqs
= priv
->params
.num_channels
* priv
->params
.num_tc
;
1432 netif_set_real_num_tx_queues(netdev
, num_txqs
);
1433 netif_set_real_num_rx_queues(netdev
, priv
->params
.num_channels
);
1435 err
= mlx5e_set_dev_port_mtu(netdev
);
1437 goto err_clear_state_opened_flag
;
1439 err
= mlx5e_open_channels(priv
);
1441 netdev_err(netdev
, "%s: mlx5e_open_channels failed, %d\n",
1443 goto err_clear_state_opened_flag
;
1446 err
= mlx5e_refresh_tirs_self_loopback_enable(priv
);
1448 netdev_err(netdev
, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
1450 goto err_close_channels
;
1453 mlx5e_update_carrier(priv
);
1454 mlx5e_redirect_rqts(priv
);
1455 mlx5e_timestamp_init(priv
);
1457 schedule_delayed_work(&priv
->update_stats_work
, 0);
1462 mlx5e_close_channels(priv
);
1463 err_clear_state_opened_flag
:
1464 clear_bit(MLX5E_STATE_OPENED
, &priv
->state
);
1468 static int mlx5e_open(struct net_device
*netdev
)
1470 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1473 mutex_lock(&priv
->state_lock
);
1474 err
= mlx5e_open_locked(netdev
);
1475 mutex_unlock(&priv
->state_lock
);
1480 int mlx5e_close_locked(struct net_device
*netdev
)
1482 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1484 /* May already be CLOSED in case a previous configuration operation
1485 * (e.g RX/TX queue size change) that involves close&open failed.
1487 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
1490 clear_bit(MLX5E_STATE_OPENED
, &priv
->state
);
1492 mlx5e_timestamp_cleanup(priv
);
1493 mlx5e_redirect_rqts(priv
);
1494 netif_carrier_off(priv
->netdev
);
1495 mlx5e_close_channels(priv
);
1500 static int mlx5e_close(struct net_device
*netdev
)
1502 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1505 mutex_lock(&priv
->state_lock
);
1506 err
= mlx5e_close_locked(netdev
);
1507 mutex_unlock(&priv
->state_lock
);
1512 static int mlx5e_create_drop_rq(struct mlx5e_priv
*priv
,
1513 struct mlx5e_rq
*rq
,
1514 struct mlx5e_rq_param
*param
)
1516 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1517 void *rqc
= param
->rqc
;
1518 void *rqc_wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
1521 param
->wq
.db_numa_node
= param
->wq
.buf_numa_node
;
1523 err
= mlx5_wq_ll_create(mdev
, ¶m
->wq
, rqc_wq
, &rq
->wq
,
1533 static int mlx5e_create_drop_cq(struct mlx5e_priv
*priv
,
1534 struct mlx5e_cq
*cq
,
1535 struct mlx5e_cq_param
*param
)
1537 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1538 struct mlx5_core_cq
*mcq
= &cq
->mcq
;
1543 err
= mlx5_cqwq_create(mdev
, ¶m
->wq
, param
->cqc
, &cq
->wq
,
1548 mlx5_vector2eqn(mdev
, param
->eq_ix
, &eqn_not_used
, &irqn
);
1551 mcq
->set_ci_db
= cq
->wq_ctrl
.db
.db
;
1552 mcq
->arm_db
= cq
->wq_ctrl
.db
.db
+ 1;
1553 *mcq
->set_ci_db
= 0;
1555 mcq
->vector
= param
->eq_ix
;
1556 mcq
->comp
= mlx5e_completion_event
;
1557 mcq
->event
= mlx5e_cq_error_event
;
1559 mcq
->uar
= &priv
->cq_uar
;
1566 static int mlx5e_open_drop_rq(struct mlx5e_priv
*priv
)
1568 struct mlx5e_cq_param cq_param
;
1569 struct mlx5e_rq_param rq_param
;
1570 struct mlx5e_rq
*rq
= &priv
->drop_rq
;
1571 struct mlx5e_cq
*cq
= &priv
->drop_rq
.cq
;
1574 memset(&cq_param
, 0, sizeof(cq_param
));
1575 memset(&rq_param
, 0, sizeof(rq_param
));
1576 mlx5e_build_rx_cq_param(priv
, &cq_param
);
1577 mlx5e_build_rq_param(priv
, &rq_param
);
1579 err
= mlx5e_create_drop_cq(priv
, cq
, &cq_param
);
1583 err
= mlx5e_enable_cq(cq
, &cq_param
);
1585 goto err_destroy_cq
;
1587 err
= mlx5e_create_drop_rq(priv
, rq
, &rq_param
);
1589 goto err_disable_cq
;
1591 err
= mlx5e_enable_rq(rq
, &rq_param
);
1593 goto err_destroy_rq
;
1598 mlx5e_destroy_rq(&priv
->drop_rq
);
1601 mlx5e_disable_cq(&priv
->drop_rq
.cq
);
1604 mlx5e_destroy_cq(&priv
->drop_rq
.cq
);
1609 static void mlx5e_close_drop_rq(struct mlx5e_priv
*priv
)
1611 mlx5e_disable_rq(&priv
->drop_rq
);
1612 mlx5e_destroy_rq(&priv
->drop_rq
);
1613 mlx5e_disable_cq(&priv
->drop_rq
.cq
);
1614 mlx5e_destroy_cq(&priv
->drop_rq
.cq
);
1617 static int mlx5e_create_tis(struct mlx5e_priv
*priv
, int tc
)
1619 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1620 u32 in
[MLX5_ST_SZ_DW(create_tis_in
)];
1621 void *tisc
= MLX5_ADDR_OF(create_tis_in
, in
, ctx
);
1623 memset(in
, 0, sizeof(in
));
1625 MLX5_SET(tisc
, tisc
, prio
, tc
);
1626 MLX5_SET(tisc
, tisc
, transport_domain
, priv
->tdn
);
1628 return mlx5_core_create_tis(mdev
, in
, sizeof(in
), &priv
->tisn
[tc
]);
1631 static void mlx5e_destroy_tis(struct mlx5e_priv
*priv
, int tc
)
1633 mlx5_core_destroy_tis(priv
->mdev
, priv
->tisn
[tc
]);
1636 static int mlx5e_create_tises(struct mlx5e_priv
*priv
)
1641 for (tc
= 0; tc
< priv
->params
.num_tc
; tc
++) {
1642 err
= mlx5e_create_tis(priv
, tc
);
1644 goto err_close_tises
;
1650 for (tc
--; tc
>= 0; tc
--)
1651 mlx5e_destroy_tis(priv
, tc
);
1656 static void mlx5e_destroy_tises(struct mlx5e_priv
*priv
)
1660 for (tc
= 0; tc
< priv
->params
.num_tc
; tc
++)
1661 mlx5e_destroy_tis(priv
, tc
);
1664 static void mlx5e_build_tir_ctx(struct mlx5e_priv
*priv
, u32
*tirc
, int tt
)
1666 void *hfso
= MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_outer
);
1668 MLX5_SET(tirc
, tirc
, transport_domain
, priv
->tdn
);
1670 #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
1671 MLX5_HASH_FIELD_SEL_DST_IP)
1673 #define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
1674 MLX5_HASH_FIELD_SEL_DST_IP |\
1675 MLX5_HASH_FIELD_SEL_L4_SPORT |\
1676 MLX5_HASH_FIELD_SEL_L4_DPORT)
1678 #define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
1679 MLX5_HASH_FIELD_SEL_DST_IP |\
1680 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
1682 mlx5e_build_tir_ctx_lro(tirc
, priv
);
1684 MLX5_SET(tirc
, tirc
, disp_type
, MLX5_TIRC_DISP_TYPE_INDIRECT
);
1688 MLX5_SET(tirc
, tirc
, indirect_table
,
1689 priv
->rqtn
[MLX5E_SINGLE_RQ_RQT
]);
1690 MLX5_SET(tirc
, tirc
, rx_hash_fn
, MLX5_RX_HASH_FN_INVERTED_XOR8
);
1693 MLX5_SET(tirc
, tirc
, indirect_table
,
1694 priv
->rqtn
[MLX5E_INDIRECTION_RQT
]);
1695 mlx5e_build_tir_ctx_hash(tirc
, priv
);
1700 case MLX5E_TT_IPV4_TCP
:
1701 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1702 MLX5_L3_PROT_TYPE_IPV4
);
1703 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
1704 MLX5_L4_PROT_TYPE_TCP
);
1705 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1706 MLX5_HASH_IP_L4PORTS
);
1709 case MLX5E_TT_IPV6_TCP
:
1710 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1711 MLX5_L3_PROT_TYPE_IPV6
);
1712 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
1713 MLX5_L4_PROT_TYPE_TCP
);
1714 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1715 MLX5_HASH_IP_L4PORTS
);
1718 case MLX5E_TT_IPV4_UDP
:
1719 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1720 MLX5_L3_PROT_TYPE_IPV4
);
1721 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
1722 MLX5_L4_PROT_TYPE_UDP
);
1723 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1724 MLX5_HASH_IP_L4PORTS
);
1727 case MLX5E_TT_IPV6_UDP
:
1728 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1729 MLX5_L3_PROT_TYPE_IPV6
);
1730 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
1731 MLX5_L4_PROT_TYPE_UDP
);
1732 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1733 MLX5_HASH_IP_L4PORTS
);
1736 case MLX5E_TT_IPV4_IPSEC_AH
:
1737 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1738 MLX5_L3_PROT_TYPE_IPV4
);
1739 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1740 MLX5_HASH_IP_IPSEC_SPI
);
1743 case MLX5E_TT_IPV6_IPSEC_AH
:
1744 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1745 MLX5_L3_PROT_TYPE_IPV6
);
1746 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1747 MLX5_HASH_IP_IPSEC_SPI
);
1750 case MLX5E_TT_IPV4_IPSEC_ESP
:
1751 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1752 MLX5_L3_PROT_TYPE_IPV4
);
1753 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1754 MLX5_HASH_IP_IPSEC_SPI
);
1757 case MLX5E_TT_IPV6_IPSEC_ESP
:
1758 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1759 MLX5_L3_PROT_TYPE_IPV6
);
1760 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1761 MLX5_HASH_IP_IPSEC_SPI
);
1765 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1766 MLX5_L3_PROT_TYPE_IPV4
);
1767 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1772 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1773 MLX5_L3_PROT_TYPE_IPV6
);
1774 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1780 static int mlx5e_create_tir(struct mlx5e_priv
*priv
, int tt
)
1782 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1788 inlen
= MLX5_ST_SZ_BYTES(create_tir_in
);
1789 in
= mlx5_vzalloc(inlen
);
1793 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
1795 mlx5e_build_tir_ctx(priv
, tirc
, tt
);
1797 err
= mlx5_core_create_tir(mdev
, in
, inlen
, &priv
->tirn
[tt
]);
1804 static void mlx5e_destroy_tir(struct mlx5e_priv
*priv
, int tt
)
1806 mlx5_core_destroy_tir(priv
->mdev
, priv
->tirn
[tt
]);
1809 static int mlx5e_create_tirs(struct mlx5e_priv
*priv
)
1814 for (i
= 0; i
< MLX5E_NUM_TT
; i
++) {
1815 err
= mlx5e_create_tir(priv
, i
);
1817 goto err_destroy_tirs
;
1823 for (i
--; i
>= 0; i
--)
1824 mlx5e_destroy_tir(priv
, i
);
1829 static void mlx5e_destroy_tirs(struct mlx5e_priv
*priv
)
1833 for (i
= 0; i
< MLX5E_NUM_TT
; i
++)
1834 mlx5e_destroy_tir(priv
, i
);
1837 static struct rtnl_link_stats64
*
1838 mlx5e_get_stats(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
1840 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1841 struct mlx5e_vport_stats
*vstats
= &priv
->stats
.vport
;
1843 stats
->rx_packets
= vstats
->rx_packets
;
1844 stats
->rx_bytes
= vstats
->rx_bytes
;
1845 stats
->tx_packets
= vstats
->tx_packets
;
1846 stats
->tx_bytes
= vstats
->tx_bytes
;
1847 stats
->multicast
= vstats
->rx_multicast_packets
+
1848 vstats
->tx_multicast_packets
;
1849 stats
->tx_errors
= vstats
->tx_error_packets
;
1850 stats
->rx_errors
= vstats
->rx_error_packets
;
1851 stats
->tx_dropped
= vstats
->tx_queue_dropped
;
1852 stats
->rx_crc_errors
= 0;
1853 stats
->rx_length_errors
= 0;
1858 static void mlx5e_set_rx_mode(struct net_device
*dev
)
1860 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1862 schedule_work(&priv
->set_rx_mode_work
);
1865 static int mlx5e_set_mac(struct net_device
*netdev
, void *addr
)
1867 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1868 struct sockaddr
*saddr
= addr
;
1870 if (!is_valid_ether_addr(saddr
->sa_data
))
1871 return -EADDRNOTAVAIL
;
1873 netif_addr_lock_bh(netdev
);
1874 ether_addr_copy(netdev
->dev_addr
, saddr
->sa_data
);
1875 netif_addr_unlock_bh(netdev
);
1877 schedule_work(&priv
->set_rx_mode_work
);
1882 static int mlx5e_set_features(struct net_device
*netdev
,
1883 netdev_features_t features
)
1885 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1887 netdev_features_t changes
= features
^ netdev
->features
;
1889 mutex_lock(&priv
->state_lock
);
1891 if (changes
& NETIF_F_LRO
) {
1892 bool was_opened
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
1895 mlx5e_close_locked(priv
->netdev
);
1897 priv
->params
.lro_en
= !!(features
& NETIF_F_LRO
);
1898 err
= mlx5e_modify_tirs_lro(priv
);
1900 mlx5_core_warn(priv
->mdev
, "lro modify failed, %d\n",
1904 err
= mlx5e_open_locked(priv
->netdev
);
1907 mutex_unlock(&priv
->state_lock
);
1909 if (changes
& NETIF_F_HW_VLAN_CTAG_FILTER
) {
1910 if (features
& NETIF_F_HW_VLAN_CTAG_FILTER
)
1911 mlx5e_enable_vlan_filter(priv
);
1913 mlx5e_disable_vlan_filter(priv
);
1919 static int mlx5e_change_mtu(struct net_device
*netdev
, int new_mtu
)
1921 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1922 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1927 mlx5_query_port_max_mtu(mdev
, &max_mtu
, 1);
1929 max_mtu
= MLX5E_HW2SW_MTU(max_mtu
);
1931 if (new_mtu
> max_mtu
) {
1933 "%s: Bad MTU (%d) > (%d) Max\n",
1934 __func__
, new_mtu
, max_mtu
);
1938 mutex_lock(&priv
->state_lock
);
1940 was_opened
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
1942 mlx5e_close_locked(netdev
);
1944 netdev
->mtu
= new_mtu
;
1947 err
= mlx5e_open_locked(netdev
);
1949 mutex_unlock(&priv
->state_lock
);
1954 static int mlx5e_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
1958 return mlx5e_hwstamp_set(dev
, ifr
);
1960 return mlx5e_hwstamp_get(dev
, ifr
);
1966 static int mlx5e_set_vf_mac(struct net_device
*dev
, int vf
, u8
*mac
)
1968 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1969 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1971 return mlx5_eswitch_set_vport_mac(mdev
->priv
.eswitch
, vf
+ 1, mac
);
1974 static int mlx5e_set_vf_vlan(struct net_device
*dev
, int vf
, u16 vlan
, u8 qos
)
1976 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1977 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1979 return mlx5_eswitch_set_vport_vlan(mdev
->priv
.eswitch
, vf
+ 1,
1983 static int mlx5_vport_link2ifla(u8 esw_link
)
1986 case MLX5_ESW_VPORT_ADMIN_STATE_DOWN
:
1987 return IFLA_VF_LINK_STATE_DISABLE
;
1988 case MLX5_ESW_VPORT_ADMIN_STATE_UP
:
1989 return IFLA_VF_LINK_STATE_ENABLE
;
1991 return IFLA_VF_LINK_STATE_AUTO
;
1994 static int mlx5_ifla_link2vport(u8 ifla_link
)
1996 switch (ifla_link
) {
1997 case IFLA_VF_LINK_STATE_DISABLE
:
1998 return MLX5_ESW_VPORT_ADMIN_STATE_DOWN
;
1999 case IFLA_VF_LINK_STATE_ENABLE
:
2000 return MLX5_ESW_VPORT_ADMIN_STATE_UP
;
2002 return MLX5_ESW_VPORT_ADMIN_STATE_AUTO
;
2005 static int mlx5e_set_vf_link_state(struct net_device
*dev
, int vf
,
2008 struct mlx5e_priv
*priv
= netdev_priv(dev
);
2009 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2011 return mlx5_eswitch_set_vport_state(mdev
->priv
.eswitch
, vf
+ 1,
2012 mlx5_ifla_link2vport(link_state
));
2015 static int mlx5e_get_vf_config(struct net_device
*dev
,
2016 int vf
, struct ifla_vf_info
*ivi
)
2018 struct mlx5e_priv
*priv
= netdev_priv(dev
);
2019 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2022 err
= mlx5_eswitch_get_vport_config(mdev
->priv
.eswitch
, vf
+ 1, ivi
);
2025 ivi
->linkstate
= mlx5_vport_link2ifla(ivi
->linkstate
);
2029 static int mlx5e_get_vf_stats(struct net_device
*dev
,
2030 int vf
, struct ifla_vf_stats
*vf_stats
)
2032 struct mlx5e_priv
*priv
= netdev_priv(dev
);
2033 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2035 return mlx5_eswitch_get_vport_stats(mdev
->priv
.eswitch
, vf
+ 1,
2039 static const struct net_device_ops mlx5e_netdev_ops_basic
= {
2040 .ndo_open
= mlx5e_open
,
2041 .ndo_stop
= mlx5e_close
,
2042 .ndo_start_xmit
= mlx5e_xmit
,
2043 .ndo_get_stats64
= mlx5e_get_stats
,
2044 .ndo_set_rx_mode
= mlx5e_set_rx_mode
,
2045 .ndo_set_mac_address
= mlx5e_set_mac
,
2046 .ndo_vlan_rx_add_vid
= mlx5e_vlan_rx_add_vid
,
2047 .ndo_vlan_rx_kill_vid
= mlx5e_vlan_rx_kill_vid
,
2048 .ndo_set_features
= mlx5e_set_features
,
2049 .ndo_change_mtu
= mlx5e_change_mtu
,
2050 .ndo_do_ioctl
= mlx5e_ioctl
,
2053 static const struct net_device_ops mlx5e_netdev_ops_sriov
= {
2054 .ndo_open
= mlx5e_open
,
2055 .ndo_stop
= mlx5e_close
,
2056 .ndo_start_xmit
= mlx5e_xmit
,
2057 .ndo_get_stats64
= mlx5e_get_stats
,
2058 .ndo_set_rx_mode
= mlx5e_set_rx_mode
,
2059 .ndo_set_mac_address
= mlx5e_set_mac
,
2060 .ndo_vlan_rx_add_vid
= mlx5e_vlan_rx_add_vid
,
2061 .ndo_vlan_rx_kill_vid
= mlx5e_vlan_rx_kill_vid
,
2062 .ndo_set_features
= mlx5e_set_features
,
2063 .ndo_change_mtu
= mlx5e_change_mtu
,
2064 .ndo_do_ioctl
= mlx5e_ioctl
,
2065 .ndo_set_vf_mac
= mlx5e_set_vf_mac
,
2066 .ndo_set_vf_vlan
= mlx5e_set_vf_vlan
,
2067 .ndo_get_vf_config
= mlx5e_get_vf_config
,
2068 .ndo_set_vf_link_state
= mlx5e_set_vf_link_state
,
2069 .ndo_get_vf_stats
= mlx5e_get_vf_stats
,
2072 static int mlx5e_check_required_hca_cap(struct mlx5_core_dev
*mdev
)
2074 if (MLX5_CAP_GEN(mdev
, port_type
) != MLX5_CAP_PORT_TYPE_ETH
)
2076 if (!MLX5_CAP_GEN(mdev
, eth_net_offloads
) ||
2077 !MLX5_CAP_GEN(mdev
, nic_flow_table
) ||
2078 !MLX5_CAP_ETH(mdev
, csum_cap
) ||
2079 !MLX5_CAP_ETH(mdev
, max_lso_cap
) ||
2080 !MLX5_CAP_ETH(mdev
, vlan_cap
) ||
2081 !MLX5_CAP_ETH(mdev
, rss_ind_tbl_cap
) ||
2082 MLX5_CAP_FLOWTABLE(mdev
,
2083 flow_table_properties_nic_receive
.max_ft_level
)
2085 mlx5_core_warn(mdev
,
2086 "Not creating net device, some required device capabilities are missing\n");
2089 if (!MLX5_CAP_ETH(mdev
, self_lb_en_modifiable
))
2090 mlx5_core_warn(mdev
, "Self loop back prevention is not supported\n");
2095 u16
mlx5e_get_max_inline_cap(struct mlx5_core_dev
*mdev
)
2097 int bf_buf_size
= (1 << MLX5_CAP_GEN(mdev
, log_bf_reg_size
)) / 2;
2099 return bf_buf_size
-
2100 sizeof(struct mlx5e_tx_wqe
) +
2101 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
2104 static void mlx5e_build_netdev_priv(struct mlx5_core_dev
*mdev
,
2105 struct net_device
*netdev
,
2108 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2111 priv
->params
.log_sq_size
=
2112 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE
;
2113 priv
->params
.log_rq_size
=
2114 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE
;
2115 priv
->params
.rx_cq_moderation_usec
=
2116 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC
;
2117 priv
->params
.rx_cq_moderation_pkts
=
2118 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS
;
2119 priv
->params
.tx_cq_moderation_usec
=
2120 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC
;
2121 priv
->params
.tx_cq_moderation_pkts
=
2122 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS
;
2123 priv
->params
.tx_max_inline
= mlx5e_get_max_inline_cap(mdev
);
2124 priv
->params
.min_rx_wqes
=
2125 MLX5E_PARAMS_DEFAULT_MIN_RX_WQES
;
2126 priv
->params
.num_tc
= 1;
2127 priv
->params
.default_vlan_prio
= 0;
2128 priv
->params
.rss_hfunc
= ETH_RSS_HASH_XOR
;
2130 netdev_rss_key_fill(priv
->params
.toeplitz_hash_key
,
2131 sizeof(priv
->params
.toeplitz_hash_key
));
2133 for (i
= 0; i
< MLX5E_INDIR_RQT_SIZE
; i
++)
2134 priv
->params
.indirection_rqt
[i
] = i
% num_channels
;
2136 priv
->params
.lro_wqe_sz
=
2137 MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ
;
2140 priv
->netdev
= netdev
;
2141 priv
->params
.num_channels
= num_channels
;
2142 priv
->default_vlan_prio
= priv
->params
.default_vlan_prio
;
2144 spin_lock_init(&priv
->async_events_spinlock
);
2145 mutex_init(&priv
->state_lock
);
2147 INIT_WORK(&priv
->update_carrier_work
, mlx5e_update_carrier_work
);
2148 INIT_WORK(&priv
->set_rx_mode_work
, mlx5e_set_rx_mode_work
);
2149 INIT_DELAYED_WORK(&priv
->update_stats_work
, mlx5e_update_stats_work
);
2152 static void mlx5e_set_netdev_dev_addr(struct net_device
*netdev
)
2154 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2156 mlx5_query_nic_vport_mac_address(priv
->mdev
, 0, netdev
->dev_addr
);
2157 if (is_zero_ether_addr(netdev
->dev_addr
) &&
2158 !MLX5_CAP_GEN(priv
->mdev
, vport_group_manager
)) {
2159 eth_hw_addr_random(netdev
);
2160 mlx5_core_info(priv
->mdev
, "Assigned random MAC address %pM\n", netdev
->dev_addr
);
2164 static void mlx5e_build_netdev(struct net_device
*netdev
)
2166 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2167 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2169 SET_NETDEV_DEV(netdev
, &mdev
->pdev
->dev
);
2171 if (MLX5_CAP_GEN(mdev
, vport_group_manager
))
2172 netdev
->netdev_ops
= &mlx5e_netdev_ops_sriov
;
2174 netdev
->netdev_ops
= &mlx5e_netdev_ops_basic
;
2176 netdev
->watchdog_timeo
= 15 * HZ
;
2178 netdev
->ethtool_ops
= &mlx5e_ethtool_ops
;
2180 netdev
->vlan_features
|= NETIF_F_SG
;
2181 netdev
->vlan_features
|= NETIF_F_IP_CSUM
;
2182 netdev
->vlan_features
|= NETIF_F_IPV6_CSUM
;
2183 netdev
->vlan_features
|= NETIF_F_GRO
;
2184 netdev
->vlan_features
|= NETIF_F_TSO
;
2185 netdev
->vlan_features
|= NETIF_F_TSO6
;
2186 netdev
->vlan_features
|= NETIF_F_RXCSUM
;
2187 netdev
->vlan_features
|= NETIF_F_RXHASH
;
2189 if (!!MLX5_CAP_ETH(mdev
, lro_cap
))
2190 netdev
->vlan_features
|= NETIF_F_LRO
;
2192 netdev
->hw_features
= netdev
->vlan_features
;
2193 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_TX
;
2194 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_RX
;
2195 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
2197 netdev
->features
= netdev
->hw_features
;
2198 if (!priv
->params
.lro_en
)
2199 netdev
->features
&= ~NETIF_F_LRO
;
2201 netdev
->features
|= NETIF_F_HIGHDMA
;
2203 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
2205 mlx5e_set_netdev_dev_addr(netdev
);
2208 static int mlx5e_create_mkey(struct mlx5e_priv
*priv
, u32 pdn
,
2209 struct mlx5_core_mr
*mr
)
2211 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2212 struct mlx5_create_mkey_mbox_in
*in
;
2215 in
= mlx5_vzalloc(sizeof(*in
));
2219 in
->seg
.flags
= MLX5_PERM_LOCAL_WRITE
|
2220 MLX5_PERM_LOCAL_READ
|
2221 MLX5_ACCESS_MODE_PA
;
2222 in
->seg
.flags_pd
= cpu_to_be32(pdn
| MLX5_MKEY_LEN64
);
2223 in
->seg
.qpn_mkey7_0
= cpu_to_be32(0xffffff << 8);
2225 err
= mlx5_core_create_mkey(mdev
, mr
, in
, sizeof(*in
), NULL
, NULL
,
2233 static void *mlx5e_create_netdev(struct mlx5_core_dev
*mdev
)
2235 struct net_device
*netdev
;
2236 struct mlx5e_priv
*priv
;
2237 int nch
= mlx5e_get_max_num_channels(mdev
);
2240 if (mlx5e_check_required_hca_cap(mdev
))
2243 netdev
= alloc_etherdev_mqs(sizeof(struct mlx5e_priv
), nch
, nch
);
2245 mlx5_core_err(mdev
, "alloc_etherdev_mqs() failed\n");
2249 mlx5e_build_netdev_priv(mdev
, netdev
, nch
);
2250 mlx5e_build_netdev(netdev
);
2252 netif_carrier_off(netdev
);
2254 priv
= netdev_priv(netdev
);
2256 err
= mlx5_alloc_map_uar(mdev
, &priv
->cq_uar
);
2258 mlx5_core_err(mdev
, "alloc_map uar failed, %d\n", err
);
2259 goto err_free_netdev
;
2262 err
= mlx5_core_alloc_pd(mdev
, &priv
->pdn
);
2264 mlx5_core_err(mdev
, "alloc pd failed, %d\n", err
);
2265 goto err_unmap_free_uar
;
2268 err
= mlx5_core_alloc_transport_domain(mdev
, &priv
->tdn
);
2270 mlx5_core_err(mdev
, "alloc td failed, %d\n", err
);
2271 goto err_dealloc_pd
;
2274 err
= mlx5e_create_mkey(priv
, priv
->pdn
, &priv
->mr
);
2276 mlx5_core_err(mdev
, "create mkey failed, %d\n", err
);
2277 goto err_dealloc_transport_domain
;
2280 err
= mlx5e_create_tises(priv
);
2282 mlx5_core_warn(mdev
, "create tises failed, %d\n", err
);
2283 goto err_destroy_mkey
;
2286 err
= mlx5e_open_drop_rq(priv
);
2288 mlx5_core_err(mdev
, "open drop rq failed, %d\n", err
);
2289 goto err_destroy_tises
;
2292 err
= mlx5e_create_rqt(priv
, MLX5E_INDIRECTION_RQT
);
2294 mlx5_core_warn(mdev
, "create rqt(INDIR) failed, %d\n", err
);
2295 goto err_close_drop_rq
;
2298 err
= mlx5e_create_rqt(priv
, MLX5E_SINGLE_RQ_RQT
);
2300 mlx5_core_warn(mdev
, "create rqt(SINGLE) failed, %d\n", err
);
2301 goto err_destroy_rqt_indir
;
2304 err
= mlx5e_create_tirs(priv
);
2306 mlx5_core_warn(mdev
, "create tirs failed, %d\n", err
);
2307 goto err_destroy_rqt_single
;
2310 err
= mlx5e_create_flow_tables(priv
);
2312 mlx5_core_warn(mdev
, "create flow tables failed, %d\n", err
);
2313 goto err_destroy_tirs
;
2316 mlx5e_init_eth_addr(priv
);
2318 err
= register_netdev(netdev
);
2320 mlx5_core_err(mdev
, "register_netdev failed, %d\n", err
);
2321 goto err_destroy_flow_tables
;
2324 mlx5e_enable_async_events(priv
);
2325 schedule_work(&priv
->set_rx_mode_work
);
2329 err_destroy_flow_tables
:
2330 mlx5e_destroy_flow_tables(priv
);
2333 mlx5e_destroy_tirs(priv
);
2335 err_destroy_rqt_single
:
2336 mlx5e_destroy_rqt(priv
, MLX5E_SINGLE_RQ_RQT
);
2338 err_destroy_rqt_indir
:
2339 mlx5e_destroy_rqt(priv
, MLX5E_INDIRECTION_RQT
);
2342 mlx5e_close_drop_rq(priv
);
2345 mlx5e_destroy_tises(priv
);
2348 mlx5_core_destroy_mkey(mdev
, &priv
->mr
);
2350 err_dealloc_transport_domain
:
2351 mlx5_core_dealloc_transport_domain(mdev
, priv
->tdn
);
2354 mlx5_core_dealloc_pd(mdev
, priv
->pdn
);
2357 mlx5_unmap_free_uar(mdev
, &priv
->cq_uar
);
2360 free_netdev(netdev
);
2365 static void mlx5e_destroy_netdev(struct mlx5_core_dev
*mdev
, void *vpriv
)
2367 struct mlx5e_priv
*priv
= vpriv
;
2368 struct net_device
*netdev
= priv
->netdev
;
2370 set_bit(MLX5E_STATE_DESTROYING
, &priv
->state
);
2372 schedule_work(&priv
->set_rx_mode_work
);
2373 mlx5e_disable_async_events(priv
);
2374 flush_scheduled_work();
2375 unregister_netdev(netdev
);
2376 mlx5e_destroy_flow_tables(priv
);
2377 mlx5e_destroy_tirs(priv
);
2378 mlx5e_destroy_rqt(priv
, MLX5E_SINGLE_RQ_RQT
);
2379 mlx5e_destroy_rqt(priv
, MLX5E_INDIRECTION_RQT
);
2380 mlx5e_close_drop_rq(priv
);
2381 mlx5e_destroy_tises(priv
);
2382 mlx5_core_destroy_mkey(priv
->mdev
, &priv
->mr
);
2383 mlx5_core_dealloc_transport_domain(priv
->mdev
, priv
->tdn
);
2384 mlx5_core_dealloc_pd(priv
->mdev
, priv
->pdn
);
2385 mlx5_unmap_free_uar(priv
->mdev
, &priv
->cq_uar
);
2386 free_netdev(netdev
);
2389 static void *mlx5e_get_netdev(void *vpriv
)
2391 struct mlx5e_priv
*priv
= vpriv
;
2393 return priv
->netdev
;
2396 static struct mlx5_interface mlx5e_interface
= {
2397 .add
= mlx5e_create_netdev
,
2398 .remove
= mlx5e_destroy_netdev
,
2399 .event
= mlx5e_async_event
,
2400 .protocol
= MLX5_INTERFACE_PROTOCOL_ETH
,
2401 .get_dev
= mlx5e_get_netdev
,
2404 void mlx5e_init(void)
2406 mlx5_register_interface(&mlx5e_interface
);
2409 void mlx5e_cleanup(void)
2411 mlx5_unregister_interface(&mlx5e_interface
);