2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mlx5/flow_table.h>
36 struct mlx5e_rq_param
{
37 u32 rqc
[MLX5_ST_SZ_DW(rqc
)];
38 struct mlx5_wq_param wq
;
41 struct mlx5e_sq_param
{
42 u32 sqc
[MLX5_ST_SZ_DW(sqc
)];
43 struct mlx5_wq_param wq
;
47 struct mlx5e_cq_param
{
48 u32 cqc
[MLX5_ST_SZ_DW(cqc
)];
49 struct mlx5_wq_param wq
;
53 struct mlx5e_channel_param
{
54 struct mlx5e_rq_param rq
;
55 struct mlx5e_sq_param sq
;
56 struct mlx5e_cq_param rx_cq
;
57 struct mlx5e_cq_param tx_cq
;
60 static void mlx5e_update_carrier(struct mlx5e_priv
*priv
)
62 struct mlx5_core_dev
*mdev
= priv
->mdev
;
65 port_state
= mlx5_query_vport_state(mdev
,
66 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT
);
68 if (port_state
== VPORT_STATE_UP
)
69 netif_carrier_on(priv
->netdev
);
71 netif_carrier_off(priv
->netdev
);
74 static void mlx5e_update_carrier_work(struct work_struct
*work
)
76 struct mlx5e_priv
*priv
= container_of(work
, struct mlx5e_priv
,
79 mutex_lock(&priv
->state_lock
);
80 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
81 mlx5e_update_carrier(priv
);
82 mutex_unlock(&priv
->state_lock
);
85 void mlx5e_update_stats(struct mlx5e_priv
*priv
)
87 struct mlx5_core_dev
*mdev
= priv
->mdev
;
88 struct mlx5e_vport_stats
*s
= &priv
->stats
.vport
;
89 struct mlx5e_rq_stats
*rq_stats
;
90 struct mlx5e_sq_stats
*sq_stats
;
91 u32 in
[MLX5_ST_SZ_DW(query_vport_counter_in
)];
93 int outlen
= MLX5_ST_SZ_BYTES(query_vport_counter_out
);
97 out
= mlx5_vzalloc(outlen
);
101 /* Collect firts the SW counters and then HW for consistency */
104 s
->tx_queue_stopped
= 0;
105 s
->tx_queue_wake
= 0;
106 s
->tx_queue_dropped
= 0;
112 for (i
= 0; i
< priv
->params
.num_channels
; i
++) {
113 rq_stats
= &priv
->channel
[i
]->rq
.stats
;
115 s
->lro_packets
+= rq_stats
->lro_packets
;
116 s
->lro_bytes
+= rq_stats
->lro_bytes
;
117 s
->rx_csum_none
+= rq_stats
->csum_none
;
118 s
->rx_wqe_err
+= rq_stats
->wqe_err
;
120 for (j
= 0; j
< priv
->params
.num_tc
; j
++) {
121 sq_stats
= &priv
->channel
[i
]->sq
[j
].stats
;
123 s
->tso_packets
+= sq_stats
->tso_packets
;
124 s
->tso_bytes
+= sq_stats
->tso_bytes
;
125 s
->tx_queue_stopped
+= sq_stats
->stopped
;
126 s
->tx_queue_wake
+= sq_stats
->wake
;
127 s
->tx_queue_dropped
+= sq_stats
->dropped
;
128 tx_offload_none
+= sq_stats
->csum_offload_none
;
133 memset(in
, 0, sizeof(in
));
135 MLX5_SET(query_vport_counter_in
, in
, opcode
,
136 MLX5_CMD_OP_QUERY_VPORT_COUNTER
);
137 MLX5_SET(query_vport_counter_in
, in
, op_mod
, 0);
138 MLX5_SET(query_vport_counter_in
, in
, other_vport
, 0);
140 memset(out
, 0, outlen
);
142 if (mlx5_cmd_exec(mdev
, in
, sizeof(in
), out
, outlen
))
145 #define MLX5_GET_CTR(p, x) \
146 MLX5_GET64(query_vport_counter_out, p, x)
148 s
->rx_error_packets
=
149 MLX5_GET_CTR(out
, received_errors
.packets
);
151 MLX5_GET_CTR(out
, received_errors
.octets
);
152 s
->tx_error_packets
=
153 MLX5_GET_CTR(out
, transmit_errors
.packets
);
155 MLX5_GET_CTR(out
, transmit_errors
.octets
);
157 s
->rx_unicast_packets
=
158 MLX5_GET_CTR(out
, received_eth_unicast
.packets
);
159 s
->rx_unicast_bytes
=
160 MLX5_GET_CTR(out
, received_eth_unicast
.octets
);
161 s
->tx_unicast_packets
=
162 MLX5_GET_CTR(out
, transmitted_eth_unicast
.packets
);
163 s
->tx_unicast_bytes
=
164 MLX5_GET_CTR(out
, transmitted_eth_unicast
.octets
);
166 s
->rx_multicast_packets
=
167 MLX5_GET_CTR(out
, received_eth_multicast
.packets
);
168 s
->rx_multicast_bytes
=
169 MLX5_GET_CTR(out
, received_eth_multicast
.octets
);
170 s
->tx_multicast_packets
=
171 MLX5_GET_CTR(out
, transmitted_eth_multicast
.packets
);
172 s
->tx_multicast_bytes
=
173 MLX5_GET_CTR(out
, transmitted_eth_multicast
.octets
);
175 s
->rx_broadcast_packets
=
176 MLX5_GET_CTR(out
, received_eth_broadcast
.packets
);
177 s
->rx_broadcast_bytes
=
178 MLX5_GET_CTR(out
, received_eth_broadcast
.octets
);
179 s
->tx_broadcast_packets
=
180 MLX5_GET_CTR(out
, transmitted_eth_broadcast
.packets
);
181 s
->tx_broadcast_bytes
=
182 MLX5_GET_CTR(out
, transmitted_eth_broadcast
.octets
);
185 s
->rx_unicast_packets
+
186 s
->rx_multicast_packets
+
187 s
->rx_broadcast_packets
;
189 s
->rx_unicast_bytes
+
190 s
->rx_multicast_bytes
+
191 s
->rx_broadcast_bytes
;
193 s
->tx_unicast_packets
+
194 s
->tx_multicast_packets
+
195 s
->tx_broadcast_packets
;
197 s
->tx_unicast_bytes
+
198 s
->tx_multicast_bytes
+
199 s
->tx_broadcast_bytes
;
201 /* Update calculated offload counters */
202 s
->tx_csum_offload
= s
->tx_packets
- tx_offload_none
;
203 s
->rx_csum_good
= s
->rx_packets
- s
->rx_csum_none
;
209 static void mlx5e_update_stats_work(struct work_struct
*work
)
211 struct delayed_work
*dwork
= to_delayed_work(work
);
212 struct mlx5e_priv
*priv
= container_of(dwork
, struct mlx5e_priv
,
214 mutex_lock(&priv
->state_lock
);
215 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
)) {
216 mlx5e_update_stats(priv
);
217 schedule_delayed_work(dwork
,
219 MLX5E_UPDATE_STATS_INTERVAL
));
221 mutex_unlock(&priv
->state_lock
);
224 static void __mlx5e_async_event(struct mlx5e_priv
*priv
,
225 enum mlx5_dev_event event
)
228 case MLX5_DEV_EVENT_PORT_UP
:
229 case MLX5_DEV_EVENT_PORT_DOWN
:
230 schedule_work(&priv
->update_carrier_work
);
238 static void mlx5e_async_event(struct mlx5_core_dev
*mdev
, void *vpriv
,
239 enum mlx5_dev_event event
, unsigned long param
)
241 struct mlx5e_priv
*priv
= vpriv
;
243 spin_lock(&priv
->async_events_spinlock
);
244 if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE
, &priv
->state
))
245 __mlx5e_async_event(priv
, event
);
246 spin_unlock(&priv
->async_events_spinlock
);
249 static void mlx5e_enable_async_events(struct mlx5e_priv
*priv
)
251 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE
, &priv
->state
);
254 static void mlx5e_disable_async_events(struct mlx5e_priv
*priv
)
256 spin_lock_irq(&priv
->async_events_spinlock
);
257 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE
, &priv
->state
);
258 spin_unlock_irq(&priv
->async_events_spinlock
);
261 #define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
262 #define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
264 static int mlx5e_create_rq(struct mlx5e_channel
*c
,
265 struct mlx5e_rq_param
*param
,
268 struct mlx5e_priv
*priv
= c
->priv
;
269 struct mlx5_core_dev
*mdev
= priv
->mdev
;
270 void *rqc
= param
->rqc
;
271 void *rqc_wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
276 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
278 err
= mlx5_wq_ll_create(mdev
, ¶m
->wq
, rqc_wq
, &rq
->wq
,
283 rq
->wq
.db
= &rq
->wq
.db
[MLX5_RCV_DBR
];
285 wq_sz
= mlx5_wq_ll_get_size(&rq
->wq
);
286 rq
->skb
= kzalloc_node(wq_sz
* sizeof(*rq
->skb
), GFP_KERNEL
,
287 cpu_to_node(c
->cpu
));
290 goto err_rq_wq_destroy
;
293 rq
->wqe_sz
= (priv
->params
.lro_en
) ? priv
->params
.lro_wqe_sz
:
294 MLX5E_SW2HW_MTU(priv
->netdev
->mtu
);
295 rq
->wqe_sz
= SKB_DATA_ALIGN(rq
->wqe_sz
+ MLX5E_NET_IP_ALIGN
);
297 for (i
= 0; i
< wq_sz
; i
++) {
298 struct mlx5e_rx_wqe
*wqe
= mlx5_wq_ll_get_wqe(&rq
->wq
, i
);
299 u32 byte_count
= rq
->wqe_sz
- MLX5E_NET_IP_ALIGN
;
301 wqe
->data
.lkey
= c
->mkey_be
;
302 wqe
->data
.byte_count
=
303 cpu_to_be32(byte_count
| MLX5_HW_START_PADDING
);
307 rq
->netdev
= c
->netdev
;
314 mlx5_wq_destroy(&rq
->wq_ctrl
);
319 static void mlx5e_destroy_rq(struct mlx5e_rq
*rq
)
322 mlx5_wq_destroy(&rq
->wq_ctrl
);
325 static int mlx5e_enable_rq(struct mlx5e_rq
*rq
, struct mlx5e_rq_param
*param
)
327 struct mlx5e_channel
*c
= rq
->channel
;
328 struct mlx5e_priv
*priv
= c
->priv
;
329 struct mlx5_core_dev
*mdev
= priv
->mdev
;
337 inlen
= MLX5_ST_SZ_BYTES(create_rq_in
) +
338 sizeof(u64
) * rq
->wq_ctrl
.buf
.npages
;
339 in
= mlx5_vzalloc(inlen
);
343 rqc
= MLX5_ADDR_OF(create_rq_in
, in
, ctx
);
344 wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
346 memcpy(rqc
, param
->rqc
, sizeof(param
->rqc
));
348 MLX5_SET(rqc
, rqc
, cqn
, rq
->cq
.mcq
.cqn
);
349 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RST
);
350 MLX5_SET(rqc
, rqc
, flush_in_error_en
, 1);
351 MLX5_SET(wq
, wq
, log_wq_pg_sz
, rq
->wq_ctrl
.buf
.page_shift
-
352 MLX5_ADAPTER_PAGE_SHIFT
);
353 MLX5_SET64(wq
, wq
, dbr_addr
, rq
->wq_ctrl
.db
.dma
);
355 mlx5_fill_page_array(&rq
->wq_ctrl
.buf
,
356 (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
));
358 err
= mlx5_core_create_rq(mdev
, in
, inlen
, &rq
->rqn
);
365 static int mlx5e_modify_rq(struct mlx5e_rq
*rq
, int curr_state
, int next_state
)
367 struct mlx5e_channel
*c
= rq
->channel
;
368 struct mlx5e_priv
*priv
= c
->priv
;
369 struct mlx5_core_dev
*mdev
= priv
->mdev
;
376 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
377 in
= mlx5_vzalloc(inlen
);
381 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
383 MLX5_SET(modify_rq_in
, in
, rq_state
, curr_state
);
384 MLX5_SET(rqc
, rqc
, state
, next_state
);
386 err
= mlx5_core_modify_rq(mdev
, rq
->rqn
, in
, inlen
);
393 static void mlx5e_disable_rq(struct mlx5e_rq
*rq
)
395 struct mlx5e_channel
*c
= rq
->channel
;
396 struct mlx5e_priv
*priv
= c
->priv
;
397 struct mlx5_core_dev
*mdev
= priv
->mdev
;
399 mlx5_core_destroy_rq(mdev
, rq
->rqn
);
402 static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq
*rq
)
404 struct mlx5e_channel
*c
= rq
->channel
;
405 struct mlx5e_priv
*priv
= c
->priv
;
406 struct mlx5_wq_ll
*wq
= &rq
->wq
;
409 for (i
= 0; i
< 1000; i
++) {
410 if (wq
->cur_sz
>= priv
->params
.min_rx_wqes
)
419 static int mlx5e_open_rq(struct mlx5e_channel
*c
,
420 struct mlx5e_rq_param
*param
,
425 err
= mlx5e_create_rq(c
, param
, rq
);
429 err
= mlx5e_enable_rq(rq
, param
);
433 err
= mlx5e_modify_rq(rq
, MLX5_RQC_STATE_RST
, MLX5_RQC_STATE_RDY
);
437 set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE
, &rq
->state
);
438 mlx5e_send_nop(&c
->sq
[0], true); /* trigger mlx5e_post_rx_wqes() */
443 mlx5e_disable_rq(rq
);
445 mlx5e_destroy_rq(rq
);
450 static void mlx5e_close_rq(struct mlx5e_rq
*rq
)
452 clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE
, &rq
->state
);
453 napi_synchronize(&rq
->channel
->napi
); /* prevent mlx5e_post_rx_wqes */
455 mlx5e_modify_rq(rq
, MLX5_RQC_STATE_RDY
, MLX5_RQC_STATE_ERR
);
456 while (!mlx5_wq_ll_is_empty(&rq
->wq
))
459 /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
460 napi_synchronize(&rq
->channel
->napi
);
462 mlx5e_disable_rq(rq
);
463 mlx5e_destroy_rq(rq
);
466 static void mlx5e_free_sq_db(struct mlx5e_sq
*sq
)
472 static int mlx5e_alloc_sq_db(struct mlx5e_sq
*sq
, int numa
)
474 int wq_sz
= mlx5_wq_cyc_get_size(&sq
->wq
);
475 int df_sz
= wq_sz
* MLX5_SEND_WQEBB_NUM_DS
;
477 sq
->skb
= kzalloc_node(wq_sz
* sizeof(*sq
->skb
), GFP_KERNEL
, numa
);
478 sq
->dma_fifo
= kzalloc_node(df_sz
* sizeof(*sq
->dma_fifo
), GFP_KERNEL
,
481 if (!sq
->skb
|| !sq
->dma_fifo
) {
482 mlx5e_free_sq_db(sq
);
486 sq
->dma_fifo_mask
= df_sz
- 1;
491 static int mlx5e_create_sq(struct mlx5e_channel
*c
,
493 struct mlx5e_sq_param
*param
,
496 struct mlx5e_priv
*priv
= c
->priv
;
497 struct mlx5_core_dev
*mdev
= priv
->mdev
;
499 void *sqc
= param
->sqc
;
500 void *sqc_wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
504 err
= mlx5_alloc_map_uar(mdev
, &sq
->uar
);
508 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
510 err
= mlx5_wq_cyc_create(mdev
, ¶m
->wq
, sqc_wq
, &sq
->wq
,
513 goto err_unmap_free_uar
;
515 sq
->wq
.db
= &sq
->wq
.db
[MLX5_SND_DBR
];
516 sq
->uar_map
= sq
->uar
.map
;
517 sq
->uar_bf_map
= sq
->uar
.bf_map
;
518 sq
->bf_buf_size
= (1 << MLX5_CAP_GEN(mdev
, log_bf_reg_size
)) / 2;
519 sq
->max_inline
= param
->max_inline
;
521 err
= mlx5e_alloc_sq_db(sq
, cpu_to_node(c
->cpu
));
523 goto err_sq_wq_destroy
;
525 txq_ix
= c
->ix
+ tc
* priv
->params
.num_channels
;
526 sq
->txq
= netdev_get_tx_queue(priv
->netdev
, txq_ix
);
529 sq
->mkey_be
= c
->mkey_be
;
532 sq
->edge
= (sq
->wq
.sz_m1
+ 1) - MLX5_SEND_WQE_MAX_WQEBBS
;
533 sq
->bf_budget
= MLX5E_SQ_BF_BUDGET
;
534 priv
->txq_to_sq_map
[txq_ix
] = sq
;
539 mlx5_wq_destroy(&sq
->wq_ctrl
);
542 mlx5_unmap_free_uar(mdev
, &sq
->uar
);
547 static void mlx5e_destroy_sq(struct mlx5e_sq
*sq
)
549 struct mlx5e_channel
*c
= sq
->channel
;
550 struct mlx5e_priv
*priv
= c
->priv
;
552 mlx5e_free_sq_db(sq
);
553 mlx5_wq_destroy(&sq
->wq_ctrl
);
554 mlx5_unmap_free_uar(priv
->mdev
, &sq
->uar
);
557 static int mlx5e_enable_sq(struct mlx5e_sq
*sq
, struct mlx5e_sq_param
*param
)
559 struct mlx5e_channel
*c
= sq
->channel
;
560 struct mlx5e_priv
*priv
= c
->priv
;
561 struct mlx5_core_dev
*mdev
= priv
->mdev
;
569 inlen
= MLX5_ST_SZ_BYTES(create_sq_in
) +
570 sizeof(u64
) * sq
->wq_ctrl
.buf
.npages
;
571 in
= mlx5_vzalloc(inlen
);
575 sqc
= MLX5_ADDR_OF(create_sq_in
, in
, ctx
);
576 wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
578 memcpy(sqc
, param
->sqc
, sizeof(param
->sqc
));
580 MLX5_SET(sqc
, sqc
, tis_num_0
, priv
->tisn
[sq
->tc
]);
581 MLX5_SET(sqc
, sqc
, cqn
, c
->sq
[sq
->tc
].cq
.mcq
.cqn
);
582 MLX5_SET(sqc
, sqc
, state
, MLX5_SQC_STATE_RST
);
583 MLX5_SET(sqc
, sqc
, tis_lst_sz
, 1);
584 MLX5_SET(sqc
, sqc
, flush_in_error_en
, 1);
586 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_CYCLIC
);
587 MLX5_SET(wq
, wq
, uar_page
, sq
->uar
.index
);
588 MLX5_SET(wq
, wq
, log_wq_pg_sz
, sq
->wq_ctrl
.buf
.page_shift
-
589 MLX5_ADAPTER_PAGE_SHIFT
);
590 MLX5_SET64(wq
, wq
, dbr_addr
, sq
->wq_ctrl
.db
.dma
);
592 mlx5_fill_page_array(&sq
->wq_ctrl
.buf
,
593 (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
));
595 err
= mlx5_core_create_sq(mdev
, in
, inlen
, &sq
->sqn
);
602 static int mlx5e_modify_sq(struct mlx5e_sq
*sq
, int curr_state
, int next_state
)
604 struct mlx5e_channel
*c
= sq
->channel
;
605 struct mlx5e_priv
*priv
= c
->priv
;
606 struct mlx5_core_dev
*mdev
= priv
->mdev
;
613 inlen
= MLX5_ST_SZ_BYTES(modify_sq_in
);
614 in
= mlx5_vzalloc(inlen
);
618 sqc
= MLX5_ADDR_OF(modify_sq_in
, in
, ctx
);
620 MLX5_SET(modify_sq_in
, in
, sq_state
, curr_state
);
621 MLX5_SET(sqc
, sqc
, state
, next_state
);
623 err
= mlx5_core_modify_sq(mdev
, sq
->sqn
, in
, inlen
);
630 static void mlx5e_disable_sq(struct mlx5e_sq
*sq
)
632 struct mlx5e_channel
*c
= sq
->channel
;
633 struct mlx5e_priv
*priv
= c
->priv
;
634 struct mlx5_core_dev
*mdev
= priv
->mdev
;
636 mlx5_core_destroy_sq(mdev
, sq
->sqn
);
639 static int mlx5e_open_sq(struct mlx5e_channel
*c
,
641 struct mlx5e_sq_param
*param
,
646 err
= mlx5e_create_sq(c
, tc
, param
, sq
);
650 err
= mlx5e_enable_sq(sq
, param
);
654 err
= mlx5e_modify_sq(sq
, MLX5_SQC_STATE_RST
, MLX5_SQC_STATE_RDY
);
658 set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE
, &sq
->state
);
659 netdev_tx_reset_queue(sq
->txq
);
660 netif_tx_start_queue(sq
->txq
);
665 mlx5e_disable_sq(sq
);
667 mlx5e_destroy_sq(sq
);
672 static inline void netif_tx_disable_queue(struct netdev_queue
*txq
)
674 __netif_tx_lock_bh(txq
);
675 netif_tx_stop_queue(txq
);
676 __netif_tx_unlock_bh(txq
);
679 static void mlx5e_close_sq(struct mlx5e_sq
*sq
)
681 clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE
, &sq
->state
);
682 napi_synchronize(&sq
->channel
->napi
); /* prevent netif_tx_wake_queue */
683 netif_tx_disable_queue(sq
->txq
);
685 /* ensure hw is notified of all pending wqes */
686 if (mlx5e_sq_has_room_for(sq
, 1))
687 mlx5e_send_nop(sq
, true);
689 mlx5e_modify_sq(sq
, MLX5_SQC_STATE_RDY
, MLX5_SQC_STATE_ERR
);
690 while (sq
->cc
!= sq
->pc
) /* wait till sq is empty */
693 /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
694 napi_synchronize(&sq
->channel
->napi
);
696 mlx5e_disable_sq(sq
);
697 mlx5e_destroy_sq(sq
);
700 static int mlx5e_create_cq(struct mlx5e_channel
*c
,
701 struct mlx5e_cq_param
*param
,
704 struct mlx5e_priv
*priv
= c
->priv
;
705 struct mlx5_core_dev
*mdev
= priv
->mdev
;
706 struct mlx5_core_cq
*mcq
= &cq
->mcq
;
712 param
->wq
.buf_numa_node
= cpu_to_node(c
->cpu
);
713 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
714 param
->eq_ix
= c
->ix
;
716 err
= mlx5_cqwq_create(mdev
, ¶m
->wq
, param
->cqc
, &cq
->wq
,
721 mlx5_vector2eqn(mdev
, param
->eq_ix
, &eqn_not_used
, &irqn
);
726 mcq
->set_ci_db
= cq
->wq_ctrl
.db
.db
;
727 mcq
->arm_db
= cq
->wq_ctrl
.db
.db
+ 1;
730 mcq
->vector
= param
->eq_ix
;
731 mcq
->comp
= mlx5e_completion_event
;
732 mcq
->event
= mlx5e_cq_error_event
;
734 mcq
->uar
= &priv
->cq_uar
;
736 for (i
= 0; i
< mlx5_cqwq_get_size(&cq
->wq
); i
++) {
737 struct mlx5_cqe64
*cqe
= mlx5_cqwq_get_wqe(&cq
->wq
, i
);
747 static void mlx5e_destroy_cq(struct mlx5e_cq
*cq
)
749 mlx5_wq_destroy(&cq
->wq_ctrl
);
752 static int mlx5e_enable_cq(struct mlx5e_cq
*cq
, struct mlx5e_cq_param
*param
)
754 struct mlx5e_channel
*c
= cq
->channel
;
755 struct mlx5e_priv
*priv
= c
->priv
;
756 struct mlx5_core_dev
*mdev
= priv
->mdev
;
757 struct mlx5_core_cq
*mcq
= &cq
->mcq
;
766 inlen
= MLX5_ST_SZ_BYTES(create_cq_in
) +
767 sizeof(u64
) * cq
->wq_ctrl
.buf
.npages
;
768 in
= mlx5_vzalloc(inlen
);
772 cqc
= MLX5_ADDR_OF(create_cq_in
, in
, cq_context
);
774 memcpy(cqc
, param
->cqc
, sizeof(param
->cqc
));
776 mlx5_fill_page_array(&cq
->wq_ctrl
.buf
,
777 (__be64
*)MLX5_ADDR_OF(create_cq_in
, in
, pas
));
779 mlx5_vector2eqn(mdev
, param
->eq_ix
, &eqn
, &irqn_not_used
);
781 MLX5_SET(cqc
, cqc
, c_eqn
, eqn
);
782 MLX5_SET(cqc
, cqc
, uar_page
, mcq
->uar
->index
);
783 MLX5_SET(cqc
, cqc
, log_page_size
, cq
->wq_ctrl
.buf
.page_shift
-
784 MLX5_ADAPTER_PAGE_SHIFT
);
785 MLX5_SET64(cqc
, cqc
, dbr_addr
, cq
->wq_ctrl
.db
.dma
);
787 err
= mlx5_core_create_cq(mdev
, mcq
, in
, inlen
);
799 static void mlx5e_disable_cq(struct mlx5e_cq
*cq
)
801 struct mlx5e_channel
*c
= cq
->channel
;
802 struct mlx5e_priv
*priv
= c
->priv
;
803 struct mlx5_core_dev
*mdev
= priv
->mdev
;
805 mlx5_core_destroy_cq(mdev
, &cq
->mcq
);
808 static int mlx5e_open_cq(struct mlx5e_channel
*c
,
809 struct mlx5e_cq_param
*param
,
811 u16 moderation_usecs
,
812 u16 moderation_frames
)
815 struct mlx5e_priv
*priv
= c
->priv
;
816 struct mlx5_core_dev
*mdev
= priv
->mdev
;
818 err
= mlx5e_create_cq(c
, param
, cq
);
822 err
= mlx5e_enable_cq(cq
, param
);
826 err
= mlx5_core_modify_cq_moderation(mdev
, &cq
->mcq
,
835 mlx5e_destroy_cq(cq
);
840 static void mlx5e_close_cq(struct mlx5e_cq
*cq
)
842 mlx5e_disable_cq(cq
);
843 mlx5e_destroy_cq(cq
);
846 static int mlx5e_get_cpu(struct mlx5e_priv
*priv
, int ix
)
848 return cpumask_first(priv
->mdev
->priv
.irq_info
[ix
].mask
);
851 static int mlx5e_open_tx_cqs(struct mlx5e_channel
*c
,
852 struct mlx5e_channel_param
*cparam
)
854 struct mlx5e_priv
*priv
= c
->priv
;
858 for (tc
= 0; tc
< c
->num_tc
; tc
++) {
859 err
= mlx5e_open_cq(c
, &cparam
->tx_cq
, &c
->sq
[tc
].cq
,
860 priv
->params
.tx_cq_moderation_usec
,
861 priv
->params
.tx_cq_moderation_pkts
);
863 goto err_close_tx_cqs
;
869 for (tc
--; tc
>= 0; tc
--)
870 mlx5e_close_cq(&c
->sq
[tc
].cq
);
875 static void mlx5e_close_tx_cqs(struct mlx5e_channel
*c
)
879 for (tc
= 0; tc
< c
->num_tc
; tc
++)
880 mlx5e_close_cq(&c
->sq
[tc
].cq
);
883 static int mlx5e_open_sqs(struct mlx5e_channel
*c
,
884 struct mlx5e_channel_param
*cparam
)
889 for (tc
= 0; tc
< c
->num_tc
; tc
++) {
890 err
= mlx5e_open_sq(c
, tc
, &cparam
->sq
, &c
->sq
[tc
]);
898 for (tc
--; tc
>= 0; tc
--)
899 mlx5e_close_sq(&c
->sq
[tc
]);
904 static void mlx5e_close_sqs(struct mlx5e_channel
*c
)
908 for (tc
= 0; tc
< c
->num_tc
; tc
++)
909 mlx5e_close_sq(&c
->sq
[tc
]);
912 static void mlx5e_build_tc_to_txq_map(struct mlx5e_channel
*c
,
917 for (i
= 0; i
< MLX5E_MAX_NUM_TC
; i
++)
918 c
->tc_to_txq_map
[i
] = c
->ix
+ i
* num_channels
;
921 static int mlx5e_open_channel(struct mlx5e_priv
*priv
, int ix
,
922 struct mlx5e_channel_param
*cparam
,
923 struct mlx5e_channel
**cp
)
925 struct net_device
*netdev
= priv
->netdev
;
926 int cpu
= mlx5e_get_cpu(priv
, ix
);
927 struct mlx5e_channel
*c
;
930 c
= kzalloc_node(sizeof(*c
), GFP_KERNEL
, cpu_to_node(cpu
));
937 c
->pdev
= &priv
->mdev
->pdev
->dev
;
938 c
->netdev
= priv
->netdev
;
939 c
->mkey_be
= cpu_to_be32(priv
->mr
.key
);
940 c
->num_tc
= priv
->params
.num_tc
;
942 mlx5e_build_tc_to_txq_map(c
, priv
->params
.num_channels
);
944 netif_napi_add(netdev
, &c
->napi
, mlx5e_napi_poll
, 64);
946 err
= mlx5e_open_tx_cqs(c
, cparam
);
950 err
= mlx5e_open_cq(c
, &cparam
->rx_cq
, &c
->rq
.cq
,
951 priv
->params
.rx_cq_moderation_usec
,
952 priv
->params
.rx_cq_moderation_pkts
);
954 goto err_close_tx_cqs
;
956 napi_enable(&c
->napi
);
958 err
= mlx5e_open_sqs(c
, cparam
);
960 goto err_disable_napi
;
962 err
= mlx5e_open_rq(c
, &cparam
->rq
, &c
->rq
);
966 netif_set_xps_queue(netdev
, get_cpu_mask(c
->cpu
), ix
);
975 napi_disable(&c
->napi
);
976 mlx5e_close_cq(&c
->rq
.cq
);
979 mlx5e_close_tx_cqs(c
);
982 netif_napi_del(&c
->napi
);
988 static void mlx5e_close_channel(struct mlx5e_channel
*c
)
990 mlx5e_close_rq(&c
->rq
);
992 napi_disable(&c
->napi
);
993 mlx5e_close_cq(&c
->rq
.cq
);
994 mlx5e_close_tx_cqs(c
);
995 netif_napi_del(&c
->napi
);
999 static void mlx5e_build_rq_param(struct mlx5e_priv
*priv
,
1000 struct mlx5e_rq_param
*param
)
1002 void *rqc
= param
->rqc
;
1003 void *wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
1005 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_LINKED_LIST
);
1006 MLX5_SET(wq
, wq
, end_padding_mode
, MLX5_WQ_END_PAD_MODE_ALIGN
);
1007 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(sizeof(struct mlx5e_rx_wqe
)));
1008 MLX5_SET(wq
, wq
, log_wq_sz
, priv
->params
.log_rq_size
);
1009 MLX5_SET(wq
, wq
, pd
, priv
->pdn
);
1011 param
->wq
.buf_numa_node
= dev_to_node(&priv
->mdev
->pdev
->dev
);
1012 param
->wq
.linear
= 1;
1015 static void mlx5e_build_sq_param(struct mlx5e_priv
*priv
,
1016 struct mlx5e_sq_param
*param
)
1018 void *sqc
= param
->sqc
;
1019 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
1021 MLX5_SET(wq
, wq
, log_wq_sz
, priv
->params
.log_sq_size
);
1022 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(MLX5_SEND_WQE_BB
));
1023 MLX5_SET(wq
, wq
, pd
, priv
->pdn
);
1025 param
->wq
.buf_numa_node
= dev_to_node(&priv
->mdev
->pdev
->dev
);
1026 param
->max_inline
= priv
->params
.tx_max_inline
;
1029 static void mlx5e_build_common_cq_param(struct mlx5e_priv
*priv
,
1030 struct mlx5e_cq_param
*param
)
1032 void *cqc
= param
->cqc
;
1034 MLX5_SET(cqc
, cqc
, uar_page
, priv
->cq_uar
.index
);
1037 static void mlx5e_build_rx_cq_param(struct mlx5e_priv
*priv
,
1038 struct mlx5e_cq_param
*param
)
1040 void *cqc
= param
->cqc
;
1042 MLX5_SET(cqc
, cqc
, log_cq_size
, priv
->params
.log_rq_size
);
1044 mlx5e_build_common_cq_param(priv
, param
);
1047 static void mlx5e_build_tx_cq_param(struct mlx5e_priv
*priv
,
1048 struct mlx5e_cq_param
*param
)
1050 void *cqc
= param
->cqc
;
1052 MLX5_SET(cqc
, cqc
, log_cq_size
, priv
->params
.log_sq_size
);
1054 mlx5e_build_common_cq_param(priv
, param
);
1057 static void mlx5e_build_channel_param(struct mlx5e_priv
*priv
,
1058 struct mlx5e_channel_param
*cparam
)
1060 memset(cparam
, 0, sizeof(*cparam
));
1062 mlx5e_build_rq_param(priv
, &cparam
->rq
);
1063 mlx5e_build_sq_param(priv
, &cparam
->sq
);
1064 mlx5e_build_rx_cq_param(priv
, &cparam
->rx_cq
);
1065 mlx5e_build_tx_cq_param(priv
, &cparam
->tx_cq
);
1068 static int mlx5e_open_channels(struct mlx5e_priv
*priv
)
1070 struct mlx5e_channel_param cparam
;
1071 int nch
= priv
->params
.num_channels
;
1076 priv
->channel
= kcalloc(nch
, sizeof(struct mlx5e_channel
*),
1079 priv
->txq_to_sq_map
= kcalloc(nch
* priv
->params
.num_tc
,
1080 sizeof(struct mlx5e_sq
*), GFP_KERNEL
);
1082 if (!priv
->channel
|| !priv
->txq_to_sq_map
)
1083 goto err_free_txq_to_sq_map
;
1085 mlx5e_build_channel_param(priv
, &cparam
);
1086 for (i
= 0; i
< nch
; i
++) {
1087 err
= mlx5e_open_channel(priv
, i
, &cparam
, &priv
->channel
[i
]);
1089 goto err_close_channels
;
1092 for (j
= 0; j
< nch
; j
++) {
1093 err
= mlx5e_wait_for_min_rx_wqes(&priv
->channel
[j
]->rq
);
1095 goto err_close_channels
;
1101 for (i
--; i
>= 0; i
--)
1102 mlx5e_close_channel(priv
->channel
[i
]);
1104 err_free_txq_to_sq_map
:
1105 kfree(priv
->txq_to_sq_map
);
1106 kfree(priv
->channel
);
1111 static void mlx5e_close_channels(struct mlx5e_priv
*priv
)
1115 for (i
= 0; i
< priv
->params
.num_channels
; i
++)
1116 mlx5e_close_channel(priv
->channel
[i
]);
1118 kfree(priv
->txq_to_sq_map
);
1119 kfree(priv
->channel
);
1122 static int mlx5e_open_tis(struct mlx5e_priv
*priv
, int tc
)
1124 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1125 u32 in
[MLX5_ST_SZ_DW(create_tis_in
)];
1126 void *tisc
= MLX5_ADDR_OF(create_tis_in
, in
, ctx
);
1128 memset(in
, 0, sizeof(in
));
1130 MLX5_SET(tisc
, tisc
, prio
, tc
);
1131 MLX5_SET(tisc
, tisc
, transport_domain
, priv
->tdn
);
1133 return mlx5_core_create_tis(mdev
, in
, sizeof(in
), &priv
->tisn
[tc
]);
1136 static void mlx5e_close_tis(struct mlx5e_priv
*priv
, int tc
)
1138 mlx5_core_destroy_tis(priv
->mdev
, priv
->tisn
[tc
]);
1141 static int mlx5e_open_tises(struct mlx5e_priv
*priv
)
1146 for (tc
= 0; tc
< priv
->params
.num_tc
; tc
++) {
1147 err
= mlx5e_open_tis(priv
, tc
);
1149 goto err_close_tises
;
1155 for (tc
--; tc
>= 0; tc
--)
1156 mlx5e_close_tis(priv
, tc
);
1161 static void mlx5e_close_tises(struct mlx5e_priv
*priv
)
1165 for (tc
= 0; tc
< priv
->params
.num_tc
; tc
++)
1166 mlx5e_close_tis(priv
, tc
);
1169 static int mlx5e_rx_hash_fn(int hfunc
)
1171 return (hfunc
== ETH_RSS_HASH_TOP
) ?
1172 MLX5_RX_HASH_FN_TOEPLITZ
:
1173 MLX5_RX_HASH_FN_INVERTED_XOR8
;
1176 static int mlx5e_bits_invert(unsigned long a
, int size
)
1181 for (i
= 0; i
< size
; i
++)
1182 inv
|= (test_bit(size
- i
- 1, &a
) ? 1 : 0) << i
;
1187 static int mlx5e_open_rqt(struct mlx5e_priv
*priv
)
1189 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1191 u32 out
[MLX5_ST_SZ_DW(create_rqt_out
)];
1195 int log_tbl_sz
= priv
->params
.rx_hash_log_tbl_sz
;
1196 int sz
= 1 << log_tbl_sz
;
1199 inlen
= MLX5_ST_SZ_BYTES(create_rqt_in
) + sizeof(u32
) * sz
;
1200 in
= mlx5_vzalloc(inlen
);
1204 rqtc
= MLX5_ADDR_OF(create_rqt_in
, in
, rqt_context
);
1206 MLX5_SET(rqtc
, rqtc
, rqt_actual_size
, sz
);
1207 MLX5_SET(rqtc
, rqtc
, rqt_max_size
, sz
);
1209 for (i
= 0; i
< sz
; i
++) {
1212 if (priv
->params
.rss_hfunc
== ETH_RSS_HASH_XOR
)
1213 ix
= mlx5e_bits_invert(i
, log_tbl_sz
);
1215 ix
= ix
% priv
->params
.num_channels
;
1216 MLX5_SET(rqtc
, rqtc
, rq_num
[i
], priv
->channel
[ix
]->rq
.rqn
);
1219 MLX5_SET(create_rqt_in
, in
, opcode
, MLX5_CMD_OP_CREATE_RQT
);
1221 memset(out
, 0, sizeof(out
));
1222 err
= mlx5_cmd_exec_check_status(mdev
, in
, inlen
, out
, sizeof(out
));
1224 priv
->rqtn
= MLX5_GET(create_rqt_out
, out
, rqtn
);
1231 static void mlx5e_close_rqt(struct mlx5e_priv
*priv
)
1233 u32 in
[MLX5_ST_SZ_DW(destroy_rqt_in
)];
1234 u32 out
[MLX5_ST_SZ_DW(destroy_rqt_out
)];
1236 memset(in
, 0, sizeof(in
));
1238 MLX5_SET(destroy_rqt_in
, in
, opcode
, MLX5_CMD_OP_DESTROY_RQT
);
1239 MLX5_SET(destroy_rqt_in
, in
, rqtn
, priv
->rqtn
);
1241 mlx5_cmd_exec_check_status(priv
->mdev
, in
, sizeof(in
), out
,
1245 static void mlx5e_build_tir_ctx(struct mlx5e_priv
*priv
, u32
*tirc
, int tt
)
1247 void *hfso
= MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_outer
);
1249 MLX5_SET(tirc
, tirc
, transport_domain
, priv
->tdn
);
1251 #define ROUGH_MAX_L2_L3_HDR_SZ 256
1253 #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
1254 MLX5_HASH_FIELD_SEL_DST_IP)
1256 #define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
1257 MLX5_HASH_FIELD_SEL_DST_IP |\
1258 MLX5_HASH_FIELD_SEL_L4_SPORT |\
1259 MLX5_HASH_FIELD_SEL_L4_DPORT)
1261 #define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
1262 MLX5_HASH_FIELD_SEL_DST_IP |\
1263 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
1265 if (priv
->params
.lro_en
) {
1266 MLX5_SET(tirc
, tirc
, lro_enable_mask
,
1267 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO
|
1268 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO
);
1269 MLX5_SET(tirc
, tirc
, lro_max_ip_payload_size
,
1270 (priv
->params
.lro_wqe_sz
-
1271 ROUGH_MAX_L2_L3_HDR_SZ
) >> 8);
1272 MLX5_SET(tirc
, tirc
, lro_timeout_period_usecs
,
1273 MLX5_CAP_ETH(priv
->mdev
,
1274 lro_timer_supported_periods
[3]));
1279 MLX5_SET(tirc
, tirc
, disp_type
,
1280 MLX5_TIRC_DISP_TYPE_DIRECT
);
1281 MLX5_SET(tirc
, tirc
, inline_rqn
,
1282 priv
->channel
[0]->rq
.rqn
);
1285 MLX5_SET(tirc
, tirc
, disp_type
,
1286 MLX5_TIRC_DISP_TYPE_INDIRECT
);
1287 MLX5_SET(tirc
, tirc
, indirect_table
,
1289 MLX5_SET(tirc
, tirc
, rx_hash_fn
,
1290 mlx5e_rx_hash_fn(priv
->params
.rss_hfunc
));
1291 if (priv
->params
.rss_hfunc
== ETH_RSS_HASH_TOP
) {
1292 void *rss_key
= MLX5_ADDR_OF(tirc
, tirc
,
1293 rx_hash_toeplitz_key
);
1294 size_t len
= MLX5_FLD_SZ_BYTES(tirc
,
1295 rx_hash_toeplitz_key
);
1297 MLX5_SET(tirc
, tirc
, rx_hash_symmetric
, 1);
1298 netdev_rss_key_fill(rss_key
, len
);
1304 case MLX5E_TT_IPV4_TCP
:
1305 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1306 MLX5_L3_PROT_TYPE_IPV4
);
1307 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
1308 MLX5_L4_PROT_TYPE_TCP
);
1309 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1310 MLX5_HASH_IP_L4PORTS
);
1313 case MLX5E_TT_IPV6_TCP
:
1314 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1315 MLX5_L3_PROT_TYPE_IPV6
);
1316 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
1317 MLX5_L4_PROT_TYPE_TCP
);
1318 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1319 MLX5_HASH_IP_L4PORTS
);
1322 case MLX5E_TT_IPV4_UDP
:
1323 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1324 MLX5_L3_PROT_TYPE_IPV4
);
1325 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
1326 MLX5_L4_PROT_TYPE_UDP
);
1327 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1328 MLX5_HASH_IP_L4PORTS
);
1331 case MLX5E_TT_IPV6_UDP
:
1332 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1333 MLX5_L3_PROT_TYPE_IPV6
);
1334 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
1335 MLX5_L4_PROT_TYPE_UDP
);
1336 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1337 MLX5_HASH_IP_L4PORTS
);
1340 case MLX5E_TT_IPV4_IPSEC_AH
:
1341 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1342 MLX5_L3_PROT_TYPE_IPV4
);
1343 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1344 MLX5_HASH_IP_IPSEC_SPI
);
1347 case MLX5E_TT_IPV6_IPSEC_AH
:
1348 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1349 MLX5_L3_PROT_TYPE_IPV6
);
1350 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1351 MLX5_HASH_IP_IPSEC_SPI
);
1354 case MLX5E_TT_IPV4_IPSEC_ESP
:
1355 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1356 MLX5_L3_PROT_TYPE_IPV4
);
1357 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1358 MLX5_HASH_IP_IPSEC_SPI
);
1361 case MLX5E_TT_IPV6_IPSEC_ESP
:
1362 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1363 MLX5_L3_PROT_TYPE_IPV6
);
1364 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1365 MLX5_HASH_IP_IPSEC_SPI
);
1369 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1370 MLX5_L3_PROT_TYPE_IPV4
);
1371 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1376 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1377 MLX5_L3_PROT_TYPE_IPV6
);
1378 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1384 static int mlx5e_open_tir(struct mlx5e_priv
*priv
, int tt
)
1386 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1392 inlen
= MLX5_ST_SZ_BYTES(create_tir_in
);
1393 in
= mlx5_vzalloc(inlen
);
1397 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
1399 mlx5e_build_tir_ctx(priv
, tirc
, tt
);
1401 err
= mlx5_core_create_tir(mdev
, in
, inlen
, &priv
->tirn
[tt
]);
1408 static void mlx5e_close_tir(struct mlx5e_priv
*priv
, int tt
)
1410 mlx5_core_destroy_tir(priv
->mdev
, priv
->tirn
[tt
]);
1413 static int mlx5e_open_tirs(struct mlx5e_priv
*priv
)
1418 for (i
= 0; i
< MLX5E_NUM_TT
; i
++) {
1419 err
= mlx5e_open_tir(priv
, i
);
1421 goto err_close_tirs
;
1427 for (i
--; i
>= 0; i
--)
1428 mlx5e_close_tir(priv
, i
);
1433 static void mlx5e_close_tirs(struct mlx5e_priv
*priv
)
1437 for (i
= 0; i
< MLX5E_NUM_TT
; i
++)
1438 mlx5e_close_tir(priv
, i
);
1441 static int mlx5e_set_dev_port_mtu(struct net_device
*netdev
)
1443 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1444 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1448 err
= mlx5_set_port_mtu(mdev
, MLX5E_SW2HW_MTU(netdev
->mtu
), 1);
1452 mlx5_query_port_oper_mtu(mdev
, &hw_mtu
, 1);
1454 if (MLX5E_HW2SW_MTU(hw_mtu
) != netdev
->mtu
)
1455 netdev_warn(netdev
, "%s: Port MTU %d is different than netdev mtu %d\n",
1456 __func__
, MLX5E_HW2SW_MTU(hw_mtu
), netdev
->mtu
);
1458 netdev
->mtu
= MLX5E_HW2SW_MTU(hw_mtu
);
1462 int mlx5e_open_locked(struct net_device
*netdev
)
1464 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1468 num_txqs
= priv
->params
.num_channels
* priv
->params
.num_tc
;
1469 netif_set_real_num_tx_queues(netdev
, num_txqs
);
1470 netif_set_real_num_rx_queues(netdev
, priv
->params
.num_channels
);
1472 err
= mlx5e_set_dev_port_mtu(netdev
);
1476 err
= mlx5e_open_tises(priv
);
1478 netdev_err(netdev
, "%s: mlx5e_open_tises failed, %d\n",
1483 err
= mlx5e_open_channels(priv
);
1485 netdev_err(netdev
, "%s: mlx5e_open_channels failed, %d\n",
1487 goto err_close_tises
;
1490 err
= mlx5e_open_rqt(priv
);
1492 netdev_err(netdev
, "%s: mlx5e_open_rqt failed, %d\n",
1494 goto err_close_channels
;
1497 err
= mlx5e_open_tirs(priv
);
1499 netdev_err(netdev
, "%s: mlx5e_open_tir failed, %d\n",
1501 goto err_close_rqls
;
1504 err
= mlx5e_open_flow_table(priv
);
1506 netdev_err(netdev
, "%s: mlx5e_open_flow_table failed, %d\n",
1508 goto err_close_tirs
;
1511 err
= mlx5e_add_all_vlan_rules(priv
);
1513 netdev_err(netdev
, "%s: mlx5e_add_all_vlan_rules failed, %d\n",
1515 goto err_close_flow_table
;
1518 mlx5e_init_eth_addr(priv
);
1520 set_bit(MLX5E_STATE_OPENED
, &priv
->state
);
1522 mlx5e_update_carrier(priv
);
1523 mlx5e_set_rx_mode_core(priv
);
1525 schedule_delayed_work(&priv
->update_stats_work
, 0);
1528 err_close_flow_table
:
1529 mlx5e_close_flow_table(priv
);
1532 mlx5e_close_tirs(priv
);
1535 mlx5e_close_rqt(priv
);
1538 mlx5e_close_channels(priv
);
1541 mlx5e_close_tises(priv
);
1546 static int mlx5e_open(struct net_device
*netdev
)
1548 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1551 mutex_lock(&priv
->state_lock
);
1552 err
= mlx5e_open_locked(netdev
);
1553 mutex_unlock(&priv
->state_lock
);
1558 int mlx5e_close_locked(struct net_device
*netdev
)
1560 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1562 clear_bit(MLX5E_STATE_OPENED
, &priv
->state
);
1564 mlx5e_set_rx_mode_core(priv
);
1565 mlx5e_del_all_vlan_rules(priv
);
1566 netif_carrier_off(priv
->netdev
);
1567 mlx5e_close_flow_table(priv
);
1568 mlx5e_close_tirs(priv
);
1569 mlx5e_close_rqt(priv
);
1570 mlx5e_close_channels(priv
);
1571 mlx5e_close_tises(priv
);
1576 static int mlx5e_close(struct net_device
*netdev
)
1578 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1581 mutex_lock(&priv
->state_lock
);
1582 err
= mlx5e_close_locked(netdev
);
1583 mutex_unlock(&priv
->state_lock
);
1588 int mlx5e_update_priv_params(struct mlx5e_priv
*priv
,
1589 struct mlx5e_params
*new_params
)
1594 WARN_ON(!mutex_is_locked(&priv
->state_lock
));
1596 was_opened
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
1598 mlx5e_close_locked(priv
->netdev
);
1600 priv
->params
= *new_params
;
1603 err
= mlx5e_open_locked(priv
->netdev
);
1608 static struct rtnl_link_stats64
*
1609 mlx5e_get_stats(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
1611 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1612 struct mlx5e_vport_stats
*vstats
= &priv
->stats
.vport
;
1614 stats
->rx_packets
= vstats
->rx_packets
;
1615 stats
->rx_bytes
= vstats
->rx_bytes
;
1616 stats
->tx_packets
= vstats
->tx_packets
;
1617 stats
->tx_bytes
= vstats
->tx_bytes
;
1618 stats
->multicast
= vstats
->rx_multicast_packets
+
1619 vstats
->tx_multicast_packets
;
1620 stats
->tx_errors
= vstats
->tx_error_packets
;
1621 stats
->rx_errors
= vstats
->rx_error_packets
;
1622 stats
->tx_dropped
= vstats
->tx_queue_dropped
;
1623 stats
->rx_crc_errors
= 0;
1624 stats
->rx_length_errors
= 0;
1629 static void mlx5e_set_rx_mode(struct net_device
*dev
)
1631 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1633 schedule_work(&priv
->set_rx_mode_work
);
1636 static int mlx5e_set_mac(struct net_device
*netdev
, void *addr
)
1638 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1639 struct sockaddr
*saddr
= addr
;
1641 if (!is_valid_ether_addr(saddr
->sa_data
))
1642 return -EADDRNOTAVAIL
;
1644 netif_addr_lock_bh(netdev
);
1645 ether_addr_copy(netdev
->dev_addr
, saddr
->sa_data
);
1646 netif_addr_unlock_bh(netdev
);
1648 schedule_work(&priv
->set_rx_mode_work
);
1653 static int mlx5e_set_features(struct net_device
*netdev
,
1654 netdev_features_t features
)
1656 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1657 netdev_features_t changes
= features
^ netdev
->features
;
1658 struct mlx5e_params new_params
;
1659 bool update_params
= false;
1661 mutex_lock(&priv
->state_lock
);
1662 new_params
= priv
->params
;
1664 if (changes
& NETIF_F_LRO
) {
1665 new_params
.lro_en
= !!(features
& NETIF_F_LRO
);
1666 update_params
= true;
1670 mlx5e_update_priv_params(priv
, &new_params
);
1672 if (changes
& NETIF_F_HW_VLAN_CTAG_FILTER
) {
1673 if (features
& NETIF_F_HW_VLAN_CTAG_FILTER
)
1674 mlx5e_enable_vlan_filter(priv
);
1676 mlx5e_disable_vlan_filter(priv
);
1679 mutex_unlock(&priv
->state_lock
);
1684 static int mlx5e_change_mtu(struct net_device
*netdev
, int new_mtu
)
1686 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1687 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1691 mlx5_query_port_max_mtu(mdev
, &max_mtu
, 1);
1693 if (new_mtu
> max_mtu
) {
1695 "%s: Bad MTU (%d) > (%d) Max\n",
1696 __func__
, new_mtu
, max_mtu
);
1700 mutex_lock(&priv
->state_lock
);
1701 netdev
->mtu
= new_mtu
;
1702 err
= mlx5e_update_priv_params(priv
, &priv
->params
);
1703 mutex_unlock(&priv
->state_lock
);
1708 static struct net_device_ops mlx5e_netdev_ops
= {
1709 .ndo_open
= mlx5e_open
,
1710 .ndo_stop
= mlx5e_close
,
1711 .ndo_start_xmit
= mlx5e_xmit
,
1712 .ndo_get_stats64
= mlx5e_get_stats
,
1713 .ndo_set_rx_mode
= mlx5e_set_rx_mode
,
1714 .ndo_set_mac_address
= mlx5e_set_mac
,
1715 .ndo_vlan_rx_add_vid
= mlx5e_vlan_rx_add_vid
,
1716 .ndo_vlan_rx_kill_vid
= mlx5e_vlan_rx_kill_vid
,
1717 .ndo_set_features
= mlx5e_set_features
,
1718 .ndo_change_mtu
= mlx5e_change_mtu
,
1721 static int mlx5e_check_required_hca_cap(struct mlx5_core_dev
*mdev
)
1723 if (MLX5_CAP_GEN(mdev
, port_type
) != MLX5_CAP_PORT_TYPE_ETH
)
1725 if (!MLX5_CAP_GEN(mdev
, eth_net_offloads
) ||
1726 !MLX5_CAP_GEN(mdev
, nic_flow_table
) ||
1727 !MLX5_CAP_ETH(mdev
, csum_cap
) ||
1728 !MLX5_CAP_ETH(mdev
, max_lso_cap
) ||
1729 !MLX5_CAP_ETH(mdev
, vlan_cap
) ||
1730 !MLX5_CAP_ETH(mdev
, rss_ind_tbl_cap
) ||
1731 MLX5_CAP_FLOWTABLE(mdev
,
1732 flow_table_properties_nic_receive
.max_ft_level
)
1734 mlx5_core_warn(mdev
,
1735 "Not creating net device, some required device capabilities are missing\n");
1741 u16
mlx5e_get_max_inline_cap(struct mlx5_core_dev
*mdev
)
1743 int bf_buf_size
= (1 << MLX5_CAP_GEN(mdev
, log_bf_reg_size
)) / 2;
1745 return bf_buf_size
-
1746 sizeof(struct mlx5e_tx_wqe
) +
1747 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
1750 static void mlx5e_build_netdev_priv(struct mlx5_core_dev
*mdev
,
1751 struct net_device
*netdev
,
1752 int num_comp_vectors
)
1754 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1756 priv
->params
.log_sq_size
=
1757 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE
;
1758 priv
->params
.log_rq_size
=
1759 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE
;
1760 priv
->params
.rx_cq_moderation_usec
=
1761 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC
;
1762 priv
->params
.rx_cq_moderation_pkts
=
1763 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS
;
1764 priv
->params
.tx_cq_moderation_usec
=
1765 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC
;
1766 priv
->params
.tx_cq_moderation_pkts
=
1767 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS
;
1768 priv
->params
.tx_max_inline
= mlx5e_get_max_inline_cap(mdev
);
1769 priv
->params
.min_rx_wqes
=
1770 MLX5E_PARAMS_DEFAULT_MIN_RX_WQES
;
1771 priv
->params
.rx_hash_log_tbl_sz
=
1772 (order_base_2(num_comp_vectors
) >
1773 MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ
) ?
1774 order_base_2(num_comp_vectors
) :
1775 MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ
;
1776 priv
->params
.num_tc
= 1;
1777 priv
->params
.default_vlan_prio
= 0;
1778 priv
->params
.rss_hfunc
= ETH_RSS_HASH_XOR
;
1780 priv
->params
.lro_en
= false && !!MLX5_CAP_ETH(priv
->mdev
, lro_cap
);
1781 priv
->params
.lro_wqe_sz
=
1782 MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ
;
1785 priv
->netdev
= netdev
;
1786 priv
->params
.num_channels
= num_comp_vectors
;
1787 priv
->default_vlan_prio
= priv
->params
.default_vlan_prio
;
1789 spin_lock_init(&priv
->async_events_spinlock
);
1790 mutex_init(&priv
->state_lock
);
1792 INIT_WORK(&priv
->update_carrier_work
, mlx5e_update_carrier_work
);
1793 INIT_WORK(&priv
->set_rx_mode_work
, mlx5e_set_rx_mode_work
);
1794 INIT_DELAYED_WORK(&priv
->update_stats_work
, mlx5e_update_stats_work
);
1797 static void mlx5e_set_netdev_dev_addr(struct net_device
*netdev
)
1799 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1801 mlx5_query_nic_vport_mac_address(priv
->mdev
, netdev
->dev_addr
);
1804 static void mlx5e_build_netdev(struct net_device
*netdev
)
1806 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1807 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1809 SET_NETDEV_DEV(netdev
, &mdev
->pdev
->dev
);
1811 if (priv
->params
.num_tc
> 1)
1812 mlx5e_netdev_ops
.ndo_select_queue
= mlx5e_select_queue
;
1814 netdev
->netdev_ops
= &mlx5e_netdev_ops
;
1815 netdev
->watchdog_timeo
= 15 * HZ
;
1817 netdev
->ethtool_ops
= &mlx5e_ethtool_ops
;
1819 netdev
->vlan_features
|= NETIF_F_SG
;
1820 netdev
->vlan_features
|= NETIF_F_IP_CSUM
;
1821 netdev
->vlan_features
|= NETIF_F_IPV6_CSUM
;
1822 netdev
->vlan_features
|= NETIF_F_GRO
;
1823 netdev
->vlan_features
|= NETIF_F_TSO
;
1824 netdev
->vlan_features
|= NETIF_F_TSO6
;
1825 netdev
->vlan_features
|= NETIF_F_RXCSUM
;
1826 netdev
->vlan_features
|= NETIF_F_RXHASH
;
1828 if (!!MLX5_CAP_ETH(mdev
, lro_cap
))
1829 netdev
->vlan_features
|= NETIF_F_LRO
;
1831 netdev
->hw_features
= netdev
->vlan_features
;
1832 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_RX
;
1833 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
1835 netdev
->features
= netdev
->hw_features
;
1836 if (!priv
->params
.lro_en
)
1837 netdev
->features
&= ~NETIF_F_LRO
;
1839 netdev
->features
|= NETIF_F_HIGHDMA
;
1841 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
1843 mlx5e_set_netdev_dev_addr(netdev
);
1846 static int mlx5e_create_mkey(struct mlx5e_priv
*priv
, u32 pdn
,
1847 struct mlx5_core_mr
*mr
)
1849 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1850 struct mlx5_create_mkey_mbox_in
*in
;
1853 in
= mlx5_vzalloc(sizeof(*in
));
1857 in
->seg
.flags
= MLX5_PERM_LOCAL_WRITE
|
1858 MLX5_PERM_LOCAL_READ
|
1859 MLX5_ACCESS_MODE_PA
;
1860 in
->seg
.flags_pd
= cpu_to_be32(pdn
| MLX5_MKEY_LEN64
);
1861 in
->seg
.qpn_mkey7_0
= cpu_to_be32(0xffffff << 8);
1863 err
= mlx5_core_create_mkey(mdev
, mr
, in
, sizeof(*in
), NULL
, NULL
,
1871 static void *mlx5e_create_netdev(struct mlx5_core_dev
*mdev
)
1873 struct net_device
*netdev
;
1874 struct mlx5e_priv
*priv
;
1875 int ncv
= mdev
->priv
.eq_table
.num_comp_vectors
;
1878 if (mlx5e_check_required_hca_cap(mdev
))
1881 netdev
= alloc_etherdev_mqs(sizeof(struct mlx5e_priv
), ncv
, ncv
);
1883 mlx5_core_err(mdev
, "alloc_etherdev_mqs() failed\n");
1887 mlx5e_build_netdev_priv(mdev
, netdev
, ncv
);
1888 mlx5e_build_netdev(netdev
);
1890 netif_carrier_off(netdev
);
1892 priv
= netdev_priv(netdev
);
1894 err
= mlx5_alloc_map_uar(mdev
, &priv
->cq_uar
);
1896 netdev_err(netdev
, "%s: mlx5_alloc_map_uar failed, %d\n",
1898 goto err_free_netdev
;
1901 err
= mlx5_core_alloc_pd(mdev
, &priv
->pdn
);
1903 netdev_err(netdev
, "%s: mlx5_core_alloc_pd failed, %d\n",
1905 goto err_unmap_free_uar
;
1908 err
= mlx5_alloc_transport_domain(mdev
, &priv
->tdn
);
1910 netdev_err(netdev
, "%s: mlx5_alloc_transport_domain failed, %d\n",
1912 goto err_dealloc_pd
;
1915 err
= mlx5e_create_mkey(priv
, priv
->pdn
, &priv
->mr
);
1917 netdev_err(netdev
, "%s: mlx5e_create_mkey failed, %d\n",
1919 goto err_dealloc_transport_domain
;
1922 err
= register_netdev(netdev
);
1924 netdev_err(netdev
, "%s: register_netdev failed, %d\n",
1926 goto err_destroy_mkey
;
1929 mlx5e_enable_async_events(priv
);
1934 mlx5_core_destroy_mkey(mdev
, &priv
->mr
);
1936 err_dealloc_transport_domain
:
1937 mlx5_dealloc_transport_domain(mdev
, priv
->tdn
);
1940 mlx5_core_dealloc_pd(mdev
, priv
->pdn
);
1943 mlx5_unmap_free_uar(mdev
, &priv
->cq_uar
);
1946 free_netdev(netdev
);
1951 static void mlx5e_destroy_netdev(struct mlx5_core_dev
*mdev
, void *vpriv
)
1953 struct mlx5e_priv
*priv
= vpriv
;
1954 struct net_device
*netdev
= priv
->netdev
;
1956 unregister_netdev(netdev
);
1957 mlx5_core_destroy_mkey(priv
->mdev
, &priv
->mr
);
1958 mlx5_dealloc_transport_domain(priv
->mdev
, priv
->tdn
);
1959 mlx5_core_dealloc_pd(priv
->mdev
, priv
->pdn
);
1960 mlx5_unmap_free_uar(priv
->mdev
, &priv
->cq_uar
);
1961 mlx5e_disable_async_events(priv
);
1962 flush_scheduled_work();
1963 free_netdev(netdev
);
1966 static void *mlx5e_get_netdev(void *vpriv
)
1968 struct mlx5e_priv
*priv
= vpriv
;
1970 return priv
->netdev
;
1973 static struct mlx5_interface mlx5e_interface
= {
1974 .add
= mlx5e_create_netdev
,
1975 .remove
= mlx5e_destroy_netdev
,
1976 .event
= mlx5e_async_event
,
1977 .protocol
= MLX5_INTERFACE_PROTOCOL_ETH
,
1978 .get_dev
= mlx5e_get_netdev
,
1981 void mlx5e_init(void)
1983 mlx5_register_interface(&mlx5e_interface
);
1986 void mlx5e_cleanup(void)
1988 mlx5_unregister_interface(&mlx5e_interface
);