Commit | Line | Data |
---|---|---|
f62b8bb8 AV |
1 | /* |
2 | * Copyright (c) 2015, Mellanox Technologies. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #include <linux/mlx5/flow_table.h> | |
34 | #include "en.h" | |
35 | ||
36 | struct mlx5e_rq_param { | |
37 | u32 rqc[MLX5_ST_SZ_DW(rqc)]; | |
38 | struct mlx5_wq_param wq; | |
39 | }; | |
40 | ||
41 | struct mlx5e_sq_param { | |
42 | u32 sqc[MLX5_ST_SZ_DW(sqc)]; | |
43 | struct mlx5_wq_param wq; | |
44 | }; | |
45 | ||
46 | struct mlx5e_cq_param { | |
47 | u32 cqc[MLX5_ST_SZ_DW(cqc)]; | |
48 | struct mlx5_wq_param wq; | |
49 | u16 eq_ix; | |
50 | }; | |
51 | ||
52 | struct mlx5e_channel_param { | |
53 | struct mlx5e_rq_param rq; | |
54 | struct mlx5e_sq_param sq; | |
55 | struct mlx5e_cq_param rx_cq; | |
56 | struct mlx5e_cq_param tx_cq; | |
57 | }; | |
58 | ||
59 | static void mlx5e_update_carrier(struct mlx5e_priv *priv) | |
60 | { | |
61 | struct mlx5_core_dev *mdev = priv->mdev; | |
62 | u8 port_state; | |
63 | ||
64 | port_state = mlx5_query_vport_state(mdev, | |
65 | MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT); | |
66 | ||
67 | if (port_state == VPORT_STATE_UP) | |
68 | netif_carrier_on(priv->netdev); | |
69 | else | |
70 | netif_carrier_off(priv->netdev); | |
71 | } | |
72 | ||
73 | static void mlx5e_update_carrier_work(struct work_struct *work) | |
74 | { | |
75 | struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, | |
76 | update_carrier_work); | |
77 | ||
78 | mutex_lock(&priv->state_lock); | |
79 | if (test_bit(MLX5E_STATE_OPENED, &priv->state)) | |
80 | mlx5e_update_carrier(priv); | |
81 | mutex_unlock(&priv->state_lock); | |
82 | } | |
83 | ||
84 | void mlx5e_update_stats(struct mlx5e_priv *priv) | |
85 | { | |
86 | struct mlx5_core_dev *mdev = priv->mdev; | |
87 | struct mlx5e_vport_stats *s = &priv->stats.vport; | |
88 | struct mlx5e_rq_stats *rq_stats; | |
89 | struct mlx5e_sq_stats *sq_stats; | |
90 | u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)]; | |
91 | u32 *out; | |
92 | int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); | |
93 | u64 tx_offload_none; | |
94 | int i, j; | |
95 | ||
96 | out = mlx5_vzalloc(outlen); | |
97 | if (!out) | |
98 | return; | |
99 | ||
100 | /* Collect firts the SW counters and then HW for consistency */ | |
101 | s->tso_packets = 0; | |
102 | s->tso_bytes = 0; | |
103 | s->tx_queue_stopped = 0; | |
104 | s->tx_queue_wake = 0; | |
105 | s->tx_queue_dropped = 0; | |
106 | tx_offload_none = 0; | |
107 | s->lro_packets = 0; | |
108 | s->lro_bytes = 0; | |
109 | s->rx_csum_none = 0; | |
110 | s->rx_wqe_err = 0; | |
111 | for (i = 0; i < priv->params.num_channels; i++) { | |
112 | rq_stats = &priv->channel[i]->rq.stats; | |
113 | ||
114 | s->lro_packets += rq_stats->lro_packets; | |
115 | s->lro_bytes += rq_stats->lro_bytes; | |
116 | s->rx_csum_none += rq_stats->csum_none; | |
117 | s->rx_wqe_err += rq_stats->wqe_err; | |
118 | ||
119 | for (j = 0; j < priv->num_tc; j++) { | |
120 | sq_stats = &priv->channel[i]->sq[j].stats; | |
121 | ||
122 | s->tso_packets += sq_stats->tso_packets; | |
123 | s->tso_bytes += sq_stats->tso_bytes; | |
124 | s->tx_queue_stopped += sq_stats->stopped; | |
125 | s->tx_queue_wake += sq_stats->wake; | |
126 | s->tx_queue_dropped += sq_stats->dropped; | |
127 | tx_offload_none += sq_stats->csum_offload_none; | |
128 | } | |
129 | } | |
130 | ||
131 | /* HW counters */ | |
132 | memset(in, 0, sizeof(in)); | |
133 | ||
134 | MLX5_SET(query_vport_counter_in, in, opcode, | |
135 | MLX5_CMD_OP_QUERY_VPORT_COUNTER); | |
136 | MLX5_SET(query_vport_counter_in, in, op_mod, 0); | |
137 | MLX5_SET(query_vport_counter_in, in, other_vport, 0); | |
138 | ||
139 | memset(out, 0, outlen); | |
140 | ||
141 | if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen)) | |
142 | goto free_out; | |
143 | ||
144 | #define MLX5_GET_CTR(p, x) \ | |
145 | MLX5_GET64(query_vport_counter_out, p, x) | |
146 | ||
147 | s->rx_error_packets = | |
148 | MLX5_GET_CTR(out, received_errors.packets); | |
149 | s->rx_error_bytes = | |
150 | MLX5_GET_CTR(out, received_errors.octets); | |
151 | s->tx_error_packets = | |
152 | MLX5_GET_CTR(out, transmit_errors.packets); | |
153 | s->tx_error_bytes = | |
154 | MLX5_GET_CTR(out, transmit_errors.octets); | |
155 | ||
156 | s->rx_unicast_packets = | |
157 | MLX5_GET_CTR(out, received_eth_unicast.packets); | |
158 | s->rx_unicast_bytes = | |
159 | MLX5_GET_CTR(out, received_eth_unicast.octets); | |
160 | s->tx_unicast_packets = | |
161 | MLX5_GET_CTR(out, transmitted_eth_unicast.packets); | |
162 | s->tx_unicast_bytes = | |
163 | MLX5_GET_CTR(out, transmitted_eth_unicast.octets); | |
164 | ||
165 | s->rx_multicast_packets = | |
166 | MLX5_GET_CTR(out, received_eth_multicast.packets); | |
167 | s->rx_multicast_bytes = | |
168 | MLX5_GET_CTR(out, received_eth_multicast.octets); | |
169 | s->tx_multicast_packets = | |
170 | MLX5_GET_CTR(out, transmitted_eth_multicast.packets); | |
171 | s->tx_multicast_bytes = | |
172 | MLX5_GET_CTR(out, transmitted_eth_multicast.octets); | |
173 | ||
174 | s->rx_broadcast_packets = | |
175 | MLX5_GET_CTR(out, received_eth_broadcast.packets); | |
176 | s->rx_broadcast_bytes = | |
177 | MLX5_GET_CTR(out, received_eth_broadcast.octets); | |
178 | s->tx_broadcast_packets = | |
179 | MLX5_GET_CTR(out, transmitted_eth_broadcast.packets); | |
180 | s->tx_broadcast_bytes = | |
181 | MLX5_GET_CTR(out, transmitted_eth_broadcast.octets); | |
182 | ||
183 | s->rx_packets = | |
184 | s->rx_unicast_packets + | |
185 | s->rx_multicast_packets + | |
186 | s->rx_broadcast_packets; | |
187 | s->rx_bytes = | |
188 | s->rx_unicast_bytes + | |
189 | s->rx_multicast_bytes + | |
190 | s->rx_broadcast_bytes; | |
191 | s->tx_packets = | |
192 | s->tx_unicast_packets + | |
193 | s->tx_multicast_packets + | |
194 | s->tx_broadcast_packets; | |
195 | s->tx_bytes = | |
196 | s->tx_unicast_bytes + | |
197 | s->tx_multicast_bytes + | |
198 | s->tx_broadcast_bytes; | |
199 | ||
200 | /* Update calculated offload counters */ | |
201 | s->tx_csum_offload = s->tx_packets - tx_offload_none; | |
202 | s->rx_csum_good = s->rx_packets - s->rx_csum_none; | |
203 | ||
204 | free_out: | |
205 | kvfree(out); | |
206 | } | |
207 | ||
208 | static void mlx5e_update_stats_work(struct work_struct *work) | |
209 | { | |
210 | struct delayed_work *dwork = to_delayed_work(work); | |
211 | struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv, | |
212 | update_stats_work); | |
213 | mutex_lock(&priv->state_lock); | |
214 | if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { | |
215 | mlx5e_update_stats(priv); | |
216 | schedule_delayed_work(dwork, | |
217 | msecs_to_jiffies( | |
218 | MLX5E_UPDATE_STATS_INTERVAL)); | |
219 | } | |
220 | mutex_unlock(&priv->state_lock); | |
221 | } | |
222 | ||
223 | static void __mlx5e_async_event(struct mlx5e_priv *priv, | |
224 | enum mlx5_dev_event event) | |
225 | { | |
226 | switch (event) { | |
227 | case MLX5_DEV_EVENT_PORT_UP: | |
228 | case MLX5_DEV_EVENT_PORT_DOWN: | |
229 | schedule_work(&priv->update_carrier_work); | |
230 | break; | |
231 | ||
232 | default: | |
233 | break; | |
234 | } | |
235 | } | |
236 | ||
237 | static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, | |
238 | enum mlx5_dev_event event, unsigned long param) | |
239 | { | |
240 | struct mlx5e_priv *priv = vpriv; | |
241 | ||
242 | spin_lock(&priv->async_events_spinlock); | |
243 | if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state)) | |
244 | __mlx5e_async_event(priv, event); | |
245 | spin_unlock(&priv->async_events_spinlock); | |
246 | } | |
247 | ||
248 | static void mlx5e_enable_async_events(struct mlx5e_priv *priv) | |
249 | { | |
250 | set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); | |
251 | } | |
252 | ||
253 | static void mlx5e_disable_async_events(struct mlx5e_priv *priv) | |
254 | { | |
255 | spin_lock_irq(&priv->async_events_spinlock); | |
256 | clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); | |
257 | spin_unlock_irq(&priv->async_events_spinlock); | |
258 | } | |
259 | ||
260 | static void mlx5e_send_nop(struct mlx5e_sq *sq) | |
261 | { | |
262 | struct mlx5_wq_cyc *wq = &sq->wq; | |
263 | ||
264 | u16 pi = sq->pc & wq->sz_m1; | |
265 | struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); | |
266 | ||
267 | struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; | |
268 | ||
269 | memset(cseg, 0, sizeof(*cseg)); | |
270 | ||
271 | cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP); | |
272 | cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | 0x01); | |
273 | cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; | |
274 | ||
275 | sq->skb[pi] = NULL; | |
276 | sq->pc++; | |
277 | mlx5e_tx_notify_hw(sq, wqe); | |
278 | } | |
279 | ||
280 | static int mlx5e_create_rq(struct mlx5e_channel *c, | |
281 | struct mlx5e_rq_param *param, | |
282 | struct mlx5e_rq *rq) | |
283 | { | |
284 | struct mlx5e_priv *priv = c->priv; | |
285 | struct mlx5_core_dev *mdev = priv->mdev; | |
286 | void *rqc = param->rqc; | |
287 | void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); | |
288 | int wq_sz; | |
289 | int err; | |
290 | int i; | |
291 | ||
292 | err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq, | |
293 | &rq->wq_ctrl); | |
294 | if (err) | |
295 | return err; | |
296 | ||
297 | rq->wq.db = &rq->wq.db[MLX5_RCV_DBR]; | |
298 | ||
299 | wq_sz = mlx5_wq_ll_get_size(&rq->wq); | |
300 | rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL, | |
301 | cpu_to_node(c->cpu)); | |
302 | if (!rq->skb) { | |
303 | err = -ENOMEM; | |
304 | goto err_rq_wq_destroy; | |
305 | } | |
306 | ||
307 | rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz : | |
308 | priv->netdev->mtu + ETH_HLEN + VLAN_HLEN; | |
309 | ||
310 | for (i = 0; i < wq_sz; i++) { | |
311 | struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i); | |
312 | ||
313 | wqe->data.lkey = c->mkey_be; | |
314 | wqe->data.byte_count = cpu_to_be32(rq->wqe_sz); | |
315 | } | |
316 | ||
317 | rq->pdev = c->pdev; | |
318 | rq->netdev = c->netdev; | |
319 | rq->channel = c; | |
320 | rq->ix = c->ix; | |
321 | ||
322 | return 0; | |
323 | ||
324 | err_rq_wq_destroy: | |
325 | mlx5_wq_destroy(&rq->wq_ctrl); | |
326 | ||
327 | return err; | |
328 | } | |
329 | ||
330 | static void mlx5e_destroy_rq(struct mlx5e_rq *rq) | |
331 | { | |
332 | kfree(rq->skb); | |
333 | mlx5_wq_destroy(&rq->wq_ctrl); | |
334 | } | |
335 | ||
336 | static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) | |
337 | { | |
338 | struct mlx5e_channel *c = rq->channel; | |
339 | struct mlx5e_priv *priv = c->priv; | |
340 | struct mlx5_core_dev *mdev = priv->mdev; | |
341 | ||
342 | void *in; | |
343 | void *rqc; | |
344 | void *wq; | |
345 | int inlen; | |
346 | int err; | |
347 | ||
348 | inlen = MLX5_ST_SZ_BYTES(create_rq_in) + | |
349 | sizeof(u64) * rq->wq_ctrl.buf.npages; | |
350 | in = mlx5_vzalloc(inlen); | |
351 | if (!in) | |
352 | return -ENOMEM; | |
353 | ||
354 | rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); | |
355 | wq = MLX5_ADDR_OF(rqc, rqc, wq); | |
356 | ||
357 | memcpy(rqc, param->rqc, sizeof(param->rqc)); | |
358 | ||
359 | MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn); | |
360 | MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); | |
361 | MLX5_SET(rqc, rqc, flush_in_error_en, 1); | |
362 | MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); | |
363 | MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift - | |
364 | PAGE_SHIFT); | |
365 | MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma); | |
366 | ||
367 | mlx5_fill_page_array(&rq->wq_ctrl.buf, | |
368 | (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); | |
369 | ||
7db22ffb | 370 | err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn); |
f62b8bb8 AV |
371 | |
372 | kvfree(in); | |
373 | ||
374 | return err; | |
375 | } | |
376 | ||
377 | static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state) | |
378 | { | |
379 | struct mlx5e_channel *c = rq->channel; | |
380 | struct mlx5e_priv *priv = c->priv; | |
381 | struct mlx5_core_dev *mdev = priv->mdev; | |
382 | ||
383 | void *in; | |
384 | void *rqc; | |
385 | int inlen; | |
386 | int err; | |
387 | ||
388 | inlen = MLX5_ST_SZ_BYTES(modify_rq_in); | |
389 | in = mlx5_vzalloc(inlen); | |
390 | if (!in) | |
391 | return -ENOMEM; | |
392 | ||
393 | rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); | |
394 | ||
395 | MLX5_SET(modify_rq_in, in, rq_state, curr_state); | |
396 | MLX5_SET(rqc, rqc, state, next_state); | |
397 | ||
7db22ffb | 398 | err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen); |
f62b8bb8 AV |
399 | |
400 | kvfree(in); | |
401 | ||
402 | return err; | |
403 | } | |
404 | ||
405 | static void mlx5e_disable_rq(struct mlx5e_rq *rq) | |
406 | { | |
407 | struct mlx5e_channel *c = rq->channel; | |
408 | struct mlx5e_priv *priv = c->priv; | |
409 | struct mlx5_core_dev *mdev = priv->mdev; | |
410 | ||
7db22ffb | 411 | mlx5_core_destroy_rq(mdev, rq->rqn); |
f62b8bb8 AV |
412 | } |
413 | ||
414 | static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq) | |
415 | { | |
416 | struct mlx5e_channel *c = rq->channel; | |
417 | struct mlx5e_priv *priv = c->priv; | |
418 | struct mlx5_wq_ll *wq = &rq->wq; | |
419 | int i; | |
420 | ||
421 | for (i = 0; i < 1000; i++) { | |
422 | if (wq->cur_sz >= priv->params.min_rx_wqes) | |
423 | return 0; | |
424 | ||
425 | msleep(20); | |
426 | } | |
427 | ||
428 | return -ETIMEDOUT; | |
429 | } | |
430 | ||
431 | static int mlx5e_open_rq(struct mlx5e_channel *c, | |
432 | struct mlx5e_rq_param *param, | |
433 | struct mlx5e_rq *rq) | |
434 | { | |
435 | int err; | |
436 | ||
437 | err = mlx5e_create_rq(c, param, rq); | |
438 | if (err) | |
439 | return err; | |
440 | ||
441 | err = mlx5e_enable_rq(rq, param); | |
442 | if (err) | |
443 | goto err_destroy_rq; | |
444 | ||
445 | err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); | |
446 | if (err) | |
447 | goto err_disable_rq; | |
448 | ||
449 | set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state); | |
450 | mlx5e_send_nop(&c->sq[0]); /* trigger mlx5e_post_rx_wqes() */ | |
451 | ||
452 | return 0; | |
453 | ||
454 | err_disable_rq: | |
455 | mlx5e_disable_rq(rq); | |
456 | err_destroy_rq: | |
457 | mlx5e_destroy_rq(rq); | |
458 | ||
459 | return err; | |
460 | } | |
461 | ||
462 | static void mlx5e_close_rq(struct mlx5e_rq *rq) | |
463 | { | |
464 | clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state); | |
465 | napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */ | |
466 | ||
467 | mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR); | |
468 | while (!mlx5_wq_ll_is_empty(&rq->wq)) | |
469 | msleep(20); | |
470 | ||
471 | /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */ | |
472 | napi_synchronize(&rq->channel->napi); | |
473 | ||
474 | mlx5e_disable_rq(rq); | |
475 | mlx5e_destroy_rq(rq); | |
476 | } | |
477 | ||
478 | static void mlx5e_free_sq_db(struct mlx5e_sq *sq) | |
479 | { | |
480 | kfree(sq->dma_fifo); | |
481 | kfree(sq->skb); | |
482 | } | |
483 | ||
484 | static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa) | |
485 | { | |
486 | int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); | |
487 | int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS; | |
488 | ||
489 | sq->skb = kzalloc_node(wq_sz * sizeof(*sq->skb), GFP_KERNEL, numa); | |
490 | sq->dma_fifo = kzalloc_node(df_sz * sizeof(*sq->dma_fifo), GFP_KERNEL, | |
491 | numa); | |
492 | ||
493 | if (!sq->skb || !sq->dma_fifo) { | |
494 | mlx5e_free_sq_db(sq); | |
495 | return -ENOMEM; | |
496 | } | |
497 | ||
498 | sq->dma_fifo_mask = df_sz - 1; | |
499 | ||
500 | return 0; | |
501 | } | |
502 | ||
503 | static int mlx5e_create_sq(struct mlx5e_channel *c, | |
504 | int tc, | |
505 | struct mlx5e_sq_param *param, | |
506 | struct mlx5e_sq *sq) | |
507 | { | |
508 | struct mlx5e_priv *priv = c->priv; | |
509 | struct mlx5_core_dev *mdev = priv->mdev; | |
510 | ||
511 | void *sqc = param->sqc; | |
512 | void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq); | |
513 | int err; | |
514 | ||
515 | err = mlx5_alloc_map_uar(mdev, &sq->uar); | |
516 | if (err) | |
517 | return err; | |
518 | ||
519 | err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, | |
520 | &sq->wq_ctrl); | |
521 | if (err) | |
522 | goto err_unmap_free_uar; | |
523 | ||
524 | sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; | |
525 | sq->uar_map = sq->uar.map; | |
526 | sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; | |
527 | ||
528 | if (mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu))) | |
529 | goto err_sq_wq_destroy; | |
530 | ||
531 | sq->txq = netdev_get_tx_queue(priv->netdev, | |
532 | c->ix + tc * priv->params.num_channels); | |
533 | ||
534 | sq->pdev = c->pdev; | |
535 | sq->mkey_be = c->mkey_be; | |
536 | sq->channel = c; | |
537 | sq->tc = tc; | |
538 | ||
539 | return 0; | |
540 | ||
541 | err_sq_wq_destroy: | |
542 | mlx5_wq_destroy(&sq->wq_ctrl); | |
543 | ||
544 | err_unmap_free_uar: | |
545 | mlx5_unmap_free_uar(mdev, &sq->uar); | |
546 | ||
547 | return err; | |
548 | } | |
549 | ||
550 | static void mlx5e_destroy_sq(struct mlx5e_sq *sq) | |
551 | { | |
552 | struct mlx5e_channel *c = sq->channel; | |
553 | struct mlx5e_priv *priv = c->priv; | |
554 | ||
555 | mlx5e_free_sq_db(sq); | |
556 | mlx5_wq_destroy(&sq->wq_ctrl); | |
557 | mlx5_unmap_free_uar(priv->mdev, &sq->uar); | |
558 | } | |
559 | ||
560 | static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param) | |
561 | { | |
562 | struct mlx5e_channel *c = sq->channel; | |
563 | struct mlx5e_priv *priv = c->priv; | |
564 | struct mlx5_core_dev *mdev = priv->mdev; | |
565 | ||
566 | void *in; | |
567 | void *sqc; | |
568 | void *wq; | |
569 | int inlen; | |
570 | int err; | |
571 | ||
572 | inlen = MLX5_ST_SZ_BYTES(create_sq_in) + | |
573 | sizeof(u64) * sq->wq_ctrl.buf.npages; | |
574 | in = mlx5_vzalloc(inlen); | |
575 | if (!in) | |
576 | return -ENOMEM; | |
577 | ||
578 | sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); | |
579 | wq = MLX5_ADDR_OF(sqc, sqc, wq); | |
580 | ||
581 | memcpy(sqc, param->sqc, sizeof(param->sqc)); | |
582 | ||
583 | MLX5_SET(sqc, sqc, user_index, sq->tc); | |
584 | MLX5_SET(sqc, sqc, tis_num_0, priv->tisn[sq->tc]); | |
585 | MLX5_SET(sqc, sqc, cqn, c->sq[sq->tc].cq.mcq.cqn); | |
586 | MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); | |
587 | MLX5_SET(sqc, sqc, tis_lst_sz, 1); | |
588 | MLX5_SET(sqc, sqc, flush_in_error_en, 1); | |
589 | ||
590 | MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); | |
591 | MLX5_SET(wq, wq, uar_page, sq->uar.index); | |
592 | MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - | |
593 | PAGE_SHIFT); | |
594 | MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma); | |
595 | ||
596 | mlx5_fill_page_array(&sq->wq_ctrl.buf, | |
597 | (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); | |
598 | ||
7db22ffb | 599 | err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn); |
f62b8bb8 AV |
600 | |
601 | kvfree(in); | |
602 | ||
603 | return err; | |
604 | } | |
605 | ||
606 | static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state) | |
607 | { | |
608 | struct mlx5e_channel *c = sq->channel; | |
609 | struct mlx5e_priv *priv = c->priv; | |
610 | struct mlx5_core_dev *mdev = priv->mdev; | |
611 | ||
612 | void *in; | |
613 | void *sqc; | |
614 | int inlen; | |
615 | int err; | |
616 | ||
617 | inlen = MLX5_ST_SZ_BYTES(modify_sq_in); | |
618 | in = mlx5_vzalloc(inlen); | |
619 | if (!in) | |
620 | return -ENOMEM; | |
621 | ||
622 | sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); | |
623 | ||
624 | MLX5_SET(modify_sq_in, in, sq_state, curr_state); | |
625 | MLX5_SET(sqc, sqc, state, next_state); | |
626 | ||
7db22ffb | 627 | err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen); |
f62b8bb8 AV |
628 | |
629 | kvfree(in); | |
630 | ||
631 | return err; | |
632 | } | |
633 | ||
634 | static void mlx5e_disable_sq(struct mlx5e_sq *sq) | |
635 | { | |
636 | struct mlx5e_channel *c = sq->channel; | |
637 | struct mlx5e_priv *priv = c->priv; | |
638 | struct mlx5_core_dev *mdev = priv->mdev; | |
639 | ||
7db22ffb | 640 | mlx5_core_destroy_sq(mdev, sq->sqn); |
f62b8bb8 AV |
641 | } |
642 | ||
643 | static int mlx5e_open_sq(struct mlx5e_channel *c, | |
644 | int tc, | |
645 | struct mlx5e_sq_param *param, | |
646 | struct mlx5e_sq *sq) | |
647 | { | |
648 | int err; | |
649 | ||
650 | err = mlx5e_create_sq(c, tc, param, sq); | |
651 | if (err) | |
652 | return err; | |
653 | ||
654 | err = mlx5e_enable_sq(sq, param); | |
655 | if (err) | |
656 | goto err_destroy_sq; | |
657 | ||
658 | err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY); | |
659 | if (err) | |
660 | goto err_disable_sq; | |
661 | ||
662 | set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state); | |
663 | netdev_tx_reset_queue(sq->txq); | |
664 | netif_tx_start_queue(sq->txq); | |
665 | ||
666 | return 0; | |
667 | ||
668 | err_disable_sq: | |
669 | mlx5e_disable_sq(sq); | |
670 | err_destroy_sq: | |
671 | mlx5e_destroy_sq(sq); | |
672 | ||
673 | return err; | |
674 | } | |
675 | ||
676 | static inline void netif_tx_disable_queue(struct netdev_queue *txq) | |
677 | { | |
678 | __netif_tx_lock_bh(txq); | |
679 | netif_tx_stop_queue(txq); | |
680 | __netif_tx_unlock_bh(txq); | |
681 | } | |
682 | ||
683 | static void mlx5e_close_sq(struct mlx5e_sq *sq) | |
684 | { | |
685 | clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state); | |
686 | napi_synchronize(&sq->channel->napi); /* prevent netif_tx_wake_queue */ | |
687 | netif_tx_disable_queue(sq->txq); | |
688 | ||
689 | /* ensure hw is notified of all pending wqes */ | |
690 | if (mlx5e_sq_has_room_for(sq, 1)) | |
691 | mlx5e_send_nop(sq); | |
692 | ||
693 | mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR); | |
694 | while (sq->cc != sq->pc) /* wait till sq is empty */ | |
695 | msleep(20); | |
696 | ||
697 | /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */ | |
698 | napi_synchronize(&sq->channel->napi); | |
699 | ||
700 | mlx5e_disable_sq(sq); | |
701 | mlx5e_destroy_sq(sq); | |
702 | } | |
703 | ||
704 | static int mlx5e_create_cq(struct mlx5e_channel *c, | |
705 | struct mlx5e_cq_param *param, | |
706 | struct mlx5e_cq *cq) | |
707 | { | |
708 | struct mlx5e_priv *priv = c->priv; | |
709 | struct mlx5_core_dev *mdev = priv->mdev; | |
710 | struct mlx5_core_cq *mcq = &cq->mcq; | |
711 | int eqn_not_used; | |
712 | int irqn; | |
713 | int err; | |
714 | u32 i; | |
715 | ||
716 | param->wq.numa = cpu_to_node(c->cpu); | |
717 | param->eq_ix = c->ix; | |
718 | ||
719 | err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, | |
720 | &cq->wq_ctrl); | |
721 | if (err) | |
722 | return err; | |
723 | ||
724 | mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn); | |
725 | ||
726 | cq->napi = &c->napi; | |
727 | ||
728 | mcq->cqe_sz = 64; | |
729 | mcq->set_ci_db = cq->wq_ctrl.db.db; | |
730 | mcq->arm_db = cq->wq_ctrl.db.db + 1; | |
731 | *mcq->set_ci_db = 0; | |
732 | *mcq->arm_db = 0; | |
733 | mcq->vector = param->eq_ix; | |
734 | mcq->comp = mlx5e_completion_event; | |
735 | mcq->event = mlx5e_cq_error_event; | |
736 | mcq->irqn = irqn; | |
737 | mcq->uar = &priv->cq_uar; | |
738 | ||
739 | for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { | |
740 | struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); | |
741 | ||
742 | cqe->op_own = 0xf1; | |
743 | } | |
744 | ||
745 | cq->channel = c; | |
746 | ||
747 | return 0; | |
748 | } | |
749 | ||
750 | static void mlx5e_destroy_cq(struct mlx5e_cq *cq) | |
751 | { | |
752 | mlx5_wq_destroy(&cq->wq_ctrl); | |
753 | } | |
754 | ||
755 | static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) | |
756 | { | |
757 | struct mlx5e_channel *c = cq->channel; | |
758 | struct mlx5e_priv *priv = c->priv; | |
759 | struct mlx5_core_dev *mdev = priv->mdev; | |
760 | struct mlx5_core_cq *mcq = &cq->mcq; | |
761 | ||
762 | void *in; | |
763 | void *cqc; | |
764 | int inlen; | |
765 | int irqn_not_used; | |
766 | int eqn; | |
767 | int err; | |
768 | ||
769 | inlen = MLX5_ST_SZ_BYTES(create_cq_in) + | |
770 | sizeof(u64) * cq->wq_ctrl.buf.npages; | |
771 | in = mlx5_vzalloc(inlen); | |
772 | if (!in) | |
773 | return -ENOMEM; | |
774 | ||
775 | cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); | |
776 | ||
777 | memcpy(cqc, param->cqc, sizeof(param->cqc)); | |
778 | ||
779 | mlx5_fill_page_array(&cq->wq_ctrl.buf, | |
780 | (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); | |
781 | ||
782 | mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used); | |
783 | ||
784 | MLX5_SET(cqc, cqc, c_eqn, eqn); | |
785 | MLX5_SET(cqc, cqc, uar_page, mcq->uar->index); | |
786 | MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - | |
787 | PAGE_SHIFT); | |
788 | MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); | |
789 | ||
790 | err = mlx5_core_create_cq(mdev, mcq, in, inlen); | |
791 | ||
792 | kvfree(in); | |
793 | ||
794 | if (err) | |
795 | return err; | |
796 | ||
797 | mlx5e_cq_arm(cq); | |
798 | ||
799 | return 0; | |
800 | } | |
801 | ||
802 | static void mlx5e_disable_cq(struct mlx5e_cq *cq) | |
803 | { | |
804 | struct mlx5e_channel *c = cq->channel; | |
805 | struct mlx5e_priv *priv = c->priv; | |
806 | struct mlx5_core_dev *mdev = priv->mdev; | |
807 | ||
808 | mlx5_core_destroy_cq(mdev, &cq->mcq); | |
809 | } | |
810 | ||
811 | static int mlx5e_open_cq(struct mlx5e_channel *c, | |
812 | struct mlx5e_cq_param *param, | |
813 | struct mlx5e_cq *cq, | |
814 | u16 moderation_usecs, | |
815 | u16 moderation_frames) | |
816 | { | |
817 | int err; | |
818 | struct mlx5e_priv *priv = c->priv; | |
819 | struct mlx5_core_dev *mdev = priv->mdev; | |
820 | ||
821 | err = mlx5e_create_cq(c, param, cq); | |
822 | if (err) | |
823 | return err; | |
824 | ||
825 | err = mlx5e_enable_cq(cq, param); | |
826 | if (err) | |
827 | goto err_destroy_cq; | |
828 | ||
829 | err = mlx5_core_modify_cq_moderation(mdev, &cq->mcq, | |
830 | moderation_usecs, | |
831 | moderation_frames); | |
832 | if (err) | |
833 | goto err_destroy_cq; | |
834 | ||
835 | return 0; | |
836 | ||
837 | err_destroy_cq: | |
838 | mlx5e_destroy_cq(cq); | |
839 | ||
840 | return err; | |
841 | } | |
842 | ||
843 | static void mlx5e_close_cq(struct mlx5e_cq *cq) | |
844 | { | |
845 | mlx5e_disable_cq(cq); | |
846 | mlx5e_destroy_cq(cq); | |
847 | } | |
848 | ||
849 | static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix) | |
850 | { | |
851 | return cpumask_first(priv->mdev->priv.irq_info[ix].mask); | |
852 | } | |
853 | ||
854 | static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, | |
855 | struct mlx5e_channel_param *cparam) | |
856 | { | |
857 | struct mlx5e_priv *priv = c->priv; | |
858 | int err; | |
859 | int tc; | |
860 | ||
861 | for (tc = 0; tc < c->num_tc; tc++) { | |
862 | err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq, | |
863 | priv->params.tx_cq_moderation_usec, | |
864 | priv->params.tx_cq_moderation_pkts); | |
865 | if (err) | |
866 | goto err_close_tx_cqs; | |
867 | ||
868 | c->sq[tc].cq.sqrq = &c->sq[tc]; | |
869 | } | |
870 | ||
871 | return 0; | |
872 | ||
873 | err_close_tx_cqs: | |
874 | for (tc--; tc >= 0; tc--) | |
875 | mlx5e_close_cq(&c->sq[tc].cq); | |
876 | ||
877 | return err; | |
878 | } | |
879 | ||
880 | static void mlx5e_close_tx_cqs(struct mlx5e_channel *c) | |
881 | { | |
882 | int tc; | |
883 | ||
884 | for (tc = 0; tc < c->num_tc; tc++) | |
885 | mlx5e_close_cq(&c->sq[tc].cq); | |
886 | } | |
887 | ||
888 | static int mlx5e_open_sqs(struct mlx5e_channel *c, | |
889 | struct mlx5e_channel_param *cparam) | |
890 | { | |
891 | int err; | |
892 | int tc; | |
893 | ||
894 | for (tc = 0; tc < c->num_tc; tc++) { | |
895 | err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]); | |
896 | if (err) | |
897 | goto err_close_sqs; | |
898 | } | |
899 | ||
900 | return 0; | |
901 | ||
902 | err_close_sqs: | |
903 | for (tc--; tc >= 0; tc--) | |
904 | mlx5e_close_sq(&c->sq[tc]); | |
905 | ||
906 | return err; | |
907 | } | |
908 | ||
909 | static void mlx5e_close_sqs(struct mlx5e_channel *c) | |
910 | { | |
911 | int tc; | |
912 | ||
913 | for (tc = 0; tc < c->num_tc; tc++) | |
914 | mlx5e_close_sq(&c->sq[tc]); | |
915 | } | |
916 | ||
917 | static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, | |
918 | struct mlx5e_channel_param *cparam, | |
919 | struct mlx5e_channel **cp) | |
920 | { | |
921 | struct net_device *netdev = priv->netdev; | |
922 | int cpu = mlx5e_get_cpu(priv, ix); | |
923 | struct mlx5e_channel *c; | |
924 | int err; | |
925 | ||
926 | c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); | |
927 | if (!c) | |
928 | return -ENOMEM; | |
929 | ||
930 | c->priv = priv; | |
931 | c->ix = ix; | |
932 | c->cpu = cpu; | |
933 | c->pdev = &priv->mdev->pdev->dev; | |
934 | c->netdev = priv->netdev; | |
935 | c->mkey_be = cpu_to_be32(priv->mr.key); | |
936 | c->num_tc = priv->num_tc; | |
937 | ||
938 | netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); | |
939 | ||
940 | err = mlx5e_open_tx_cqs(c, cparam); | |
941 | if (err) | |
942 | goto err_napi_del; | |
943 | ||
944 | err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq, | |
945 | priv->params.rx_cq_moderation_usec, | |
946 | priv->params.rx_cq_moderation_pkts); | |
947 | if (err) | |
948 | goto err_close_tx_cqs; | |
949 | c->rq.cq.sqrq = &c->rq; | |
950 | ||
951 | napi_enable(&c->napi); | |
952 | ||
953 | err = mlx5e_open_sqs(c, cparam); | |
954 | if (err) | |
955 | goto err_disable_napi; | |
956 | ||
957 | err = mlx5e_open_rq(c, &cparam->rq, &c->rq); | |
958 | if (err) | |
959 | goto err_close_sqs; | |
960 | ||
961 | netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix); | |
962 | *cp = c; | |
963 | ||
964 | return 0; | |
965 | ||
966 | err_close_sqs: | |
967 | mlx5e_close_sqs(c); | |
968 | ||
969 | err_disable_napi: | |
970 | napi_disable(&c->napi); | |
971 | mlx5e_close_cq(&c->rq.cq); | |
972 | ||
973 | err_close_tx_cqs: | |
974 | mlx5e_close_tx_cqs(c); | |
975 | ||
976 | err_napi_del: | |
977 | netif_napi_del(&c->napi); | |
978 | kfree(c); | |
979 | ||
980 | return err; | |
981 | } | |
982 | ||
983 | static void mlx5e_close_channel(struct mlx5e_channel *c) | |
984 | { | |
985 | mlx5e_close_rq(&c->rq); | |
986 | mlx5e_close_sqs(c); | |
987 | napi_disable(&c->napi); | |
988 | mlx5e_close_cq(&c->rq.cq); | |
989 | mlx5e_close_tx_cqs(c); | |
990 | netif_napi_del(&c->napi); | |
991 | kfree(c); | |
992 | } | |
993 | ||
994 | static void mlx5e_build_rq_param(struct mlx5e_priv *priv, | |
995 | struct mlx5e_rq_param *param) | |
996 | { | |
997 | void *rqc = param->rqc; | |
998 | void *wq = MLX5_ADDR_OF(rqc, rqc, wq); | |
999 | ||
1000 | MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); | |
1001 | MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); | |
1002 | MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); | |
1003 | MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size); | |
1004 | MLX5_SET(wq, wq, pd, priv->pdn); | |
1005 | ||
1006 | param->wq.numa = dev_to_node(&priv->mdev->pdev->dev); | |
1007 | param->wq.linear = 1; | |
1008 | } | |
1009 | ||
1010 | static void mlx5e_build_sq_param(struct mlx5e_priv *priv, | |
1011 | struct mlx5e_sq_param *param) | |
1012 | { | |
1013 | void *sqc = param->sqc; | |
1014 | void *wq = MLX5_ADDR_OF(sqc, sqc, wq); | |
1015 | ||
1016 | MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size); | |
1017 | MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); | |
1018 | MLX5_SET(wq, wq, pd, priv->pdn); | |
1019 | ||
1020 | param->wq.numa = dev_to_node(&priv->mdev->pdev->dev); | |
1021 | } | |
1022 | ||
1023 | static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv, | |
1024 | struct mlx5e_cq_param *param) | |
1025 | { | |
1026 | void *cqc = param->cqc; | |
1027 | ||
1028 | MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index); | |
1029 | } | |
1030 | ||
1031 | static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, | |
1032 | struct mlx5e_cq_param *param) | |
1033 | { | |
1034 | void *cqc = param->cqc; | |
1035 | ||
1036 | MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size); | |
1037 | ||
1038 | mlx5e_build_common_cq_param(priv, param); | |
1039 | } | |
1040 | ||
1041 | static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, | |
1042 | struct mlx5e_cq_param *param) | |
1043 | { | |
1044 | void *cqc = param->cqc; | |
1045 | ||
1046 | MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size); | |
1047 | ||
1048 | mlx5e_build_common_cq_param(priv, param); | |
1049 | } | |
1050 | ||
1051 | static void mlx5e_build_channel_param(struct mlx5e_priv *priv, | |
1052 | struct mlx5e_channel_param *cparam) | |
1053 | { | |
1054 | memset(cparam, 0, sizeof(*cparam)); | |
1055 | ||
1056 | mlx5e_build_rq_param(priv, &cparam->rq); | |
1057 | mlx5e_build_sq_param(priv, &cparam->sq); | |
1058 | mlx5e_build_rx_cq_param(priv, &cparam->rx_cq); | |
1059 | mlx5e_build_tx_cq_param(priv, &cparam->tx_cq); | |
1060 | } | |
1061 | ||
1062 | static int mlx5e_open_channels(struct mlx5e_priv *priv) | |
1063 | { | |
1064 | struct mlx5e_channel_param cparam; | |
1065 | int err; | |
1066 | int i; | |
1067 | int j; | |
1068 | ||
1069 | priv->channel = kcalloc(priv->params.num_channels, | |
1070 | sizeof(struct mlx5e_channel *), GFP_KERNEL); | |
1071 | if (!priv->channel) | |
1072 | return -ENOMEM; | |
1073 | ||
1074 | mlx5e_build_channel_param(priv, &cparam); | |
1075 | for (i = 0; i < priv->params.num_channels; i++) { | |
1076 | err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]); | |
1077 | if (err) | |
1078 | goto err_close_channels; | |
1079 | } | |
1080 | ||
1081 | for (j = 0; j < priv->params.num_channels; j++) { | |
1082 | err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq); | |
1083 | if (err) | |
1084 | goto err_close_channels; | |
1085 | } | |
1086 | ||
1087 | return 0; | |
1088 | ||
1089 | err_close_channels: | |
1090 | for (i--; i >= 0; i--) | |
1091 | mlx5e_close_channel(priv->channel[i]); | |
1092 | ||
1093 | kfree(priv->channel); | |
1094 | ||
1095 | return err; | |
1096 | } | |
1097 | ||
1098 | static void mlx5e_close_channels(struct mlx5e_priv *priv) | |
1099 | { | |
1100 | int i; | |
1101 | ||
1102 | for (i = 0; i < priv->params.num_channels; i++) | |
1103 | mlx5e_close_channel(priv->channel[i]); | |
1104 | ||
1105 | kfree(priv->channel); | |
1106 | } | |
1107 | ||
1108 | static int mlx5e_open_tis(struct mlx5e_priv *priv, int tc) | |
1109 | { | |
1110 | struct mlx5_core_dev *mdev = priv->mdev; | |
1111 | u32 in[MLX5_ST_SZ_DW(create_tis_in)]; | |
1112 | void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); | |
1113 | ||
1114 | memset(in, 0, sizeof(in)); | |
1115 | ||
1116 | MLX5_SET(tisc, tisc, prio, tc); | |
1117 | ||
7db22ffb | 1118 | return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]); |
f62b8bb8 AV |
1119 | } |
1120 | ||
1121 | static void mlx5e_close_tis(struct mlx5e_priv *priv, int tc) | |
1122 | { | |
7db22ffb | 1123 | mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]); |
f62b8bb8 AV |
1124 | } |
1125 | ||
1126 | static int mlx5e_open_tises(struct mlx5e_priv *priv) | |
1127 | { | |
1128 | int num_tc = priv->num_tc; | |
1129 | int err; | |
1130 | int tc; | |
1131 | ||
1132 | for (tc = 0; tc < num_tc; tc++) { | |
1133 | err = mlx5e_open_tis(priv, tc); | |
1134 | if (err) | |
1135 | goto err_close_tises; | |
1136 | } | |
1137 | ||
1138 | return 0; | |
1139 | ||
1140 | err_close_tises: | |
1141 | for (tc--; tc >= 0; tc--) | |
1142 | mlx5e_close_tis(priv, tc); | |
1143 | ||
1144 | return err; | |
1145 | } | |
1146 | ||
1147 | static void mlx5e_close_tises(struct mlx5e_priv *priv) | |
1148 | { | |
1149 | int num_tc = priv->num_tc; | |
1150 | int tc; | |
1151 | ||
1152 | for (tc = 0; tc < num_tc; tc++) | |
1153 | mlx5e_close_tis(priv, tc); | |
1154 | } | |
1155 | ||
1156 | static int mlx5e_open_rqt(struct mlx5e_priv *priv) | |
1157 | { | |
1158 | struct mlx5_core_dev *mdev = priv->mdev; | |
1159 | u32 *in; | |
1160 | u32 out[MLX5_ST_SZ_DW(create_rqt_out)]; | |
1161 | void *rqtc; | |
1162 | int inlen; | |
1163 | int err; | |
1164 | int sz; | |
1165 | int i; | |
1166 | ||
1167 | sz = 1 << priv->params.rx_hash_log_tbl_sz; | |
1168 | ||
1169 | inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; | |
1170 | in = mlx5_vzalloc(inlen); | |
1171 | if (!in) | |
1172 | return -ENOMEM; | |
1173 | ||
1174 | rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); | |
1175 | ||
1176 | MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); | |
1177 | MLX5_SET(rqtc, rqtc, rqt_max_size, sz); | |
1178 | ||
1179 | for (i = 0; i < sz; i++) { | |
1180 | int ix = i % priv->params.num_channels; | |
1181 | ||
1182 | MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn); | |
1183 | } | |
1184 | ||
1185 | MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT); | |
1186 | ||
1187 | memset(out, 0, sizeof(out)); | |
1188 | err = mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out)); | |
1189 | if (!err) | |
1190 | priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn); | |
1191 | ||
1192 | kvfree(in); | |
1193 | ||
1194 | return err; | |
1195 | } | |
1196 | ||
1197 | static void mlx5e_close_rqt(struct mlx5e_priv *priv) | |
1198 | { | |
1199 | u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)]; | |
1200 | u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)]; | |
1201 | ||
1202 | memset(in, 0, sizeof(in)); | |
1203 | ||
1204 | MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT); | |
1205 | MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn); | |
1206 | ||
1207 | mlx5_cmd_exec_check_status(priv->mdev, in, sizeof(in), out, | |
1208 | sizeof(out)); | |
1209 | } | |
1210 | ||
1211 | static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) | |
1212 | { | |
1213 | void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); | |
1214 | ||
1215 | #define ROUGH_MAX_L2_L3_HDR_SZ 256 | |
1216 | ||
1217 | #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ | |
1218 | MLX5_HASH_FIELD_SEL_DST_IP) | |
1219 | ||
1220 | #define MLX5_HASH_ALL (MLX5_HASH_FIELD_SEL_SRC_IP |\ | |
1221 | MLX5_HASH_FIELD_SEL_DST_IP |\ | |
1222 | MLX5_HASH_FIELD_SEL_L4_SPORT |\ | |
1223 | MLX5_HASH_FIELD_SEL_L4_DPORT) | |
1224 | ||
1225 | if (priv->params.lro_en) { | |
1226 | MLX5_SET(tirc, tirc, lro_enable_mask, | |
1227 | MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | | |
1228 | MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO); | |
1229 | MLX5_SET(tirc, tirc, lro_max_ip_payload_size, | |
1230 | (priv->params.lro_wqe_sz - | |
1231 | ROUGH_MAX_L2_L3_HDR_SZ) >> 8); | |
1232 | MLX5_SET(tirc, tirc, lro_timeout_period_usecs, | |
1233 | MLX5_CAP_ETH(priv->mdev, | |
1234 | lro_timer_supported_periods[3])); | |
1235 | } | |
1236 | ||
1237 | switch (tt) { | |
1238 | case MLX5E_TT_ANY: | |
1239 | MLX5_SET(tirc, tirc, disp_type, | |
1240 | MLX5_TIRC_DISP_TYPE_DIRECT); | |
1241 | MLX5_SET(tirc, tirc, inline_rqn, | |
1242 | priv->channel[0]->rq.rqn); | |
1243 | break; | |
1244 | default: | |
1245 | MLX5_SET(tirc, tirc, disp_type, | |
1246 | MLX5_TIRC_DISP_TYPE_INDIRECT); | |
1247 | MLX5_SET(tirc, tirc, indirect_table, | |
1248 | priv->rqtn); | |
1249 | MLX5_SET(tirc, tirc, rx_hash_fn, | |
1250 | MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ); | |
1251 | MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); | |
1252 | netdev_rss_key_fill(MLX5_ADDR_OF(tirc, tirc, | |
1253 | rx_hash_toeplitz_key), | |
1254 | MLX5_FLD_SZ_BYTES(tirc, | |
1255 | rx_hash_toeplitz_key)); | |
1256 | break; | |
1257 | } | |
1258 | ||
1259 | switch (tt) { | |
1260 | case MLX5E_TT_IPV4_TCP: | |
1261 | MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, | |
1262 | MLX5_L3_PROT_TYPE_IPV4); | |
1263 | MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, | |
1264 | MLX5_L4_PROT_TYPE_TCP); | |
1265 | MLX5_SET(rx_hash_field_select, hfso, selected_fields, | |
1266 | MLX5_HASH_ALL); | |
1267 | break; | |
1268 | ||
1269 | case MLX5E_TT_IPV6_TCP: | |
1270 | MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, | |
1271 | MLX5_L3_PROT_TYPE_IPV6); | |
1272 | MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, | |
1273 | MLX5_L4_PROT_TYPE_TCP); | |
1274 | MLX5_SET(rx_hash_field_select, hfso, selected_fields, | |
1275 | MLX5_HASH_ALL); | |
1276 | break; | |
1277 | ||
1278 | case MLX5E_TT_IPV4_UDP: | |
1279 | MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, | |
1280 | MLX5_L3_PROT_TYPE_IPV4); | |
1281 | MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, | |
1282 | MLX5_L4_PROT_TYPE_UDP); | |
1283 | MLX5_SET(rx_hash_field_select, hfso, selected_fields, | |
1284 | MLX5_HASH_ALL); | |
1285 | break; | |
1286 | ||
1287 | case MLX5E_TT_IPV6_UDP: | |
1288 | MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, | |
1289 | MLX5_L3_PROT_TYPE_IPV6); | |
1290 | MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, | |
1291 | MLX5_L4_PROT_TYPE_UDP); | |
1292 | MLX5_SET(rx_hash_field_select, hfso, selected_fields, | |
1293 | MLX5_HASH_ALL); | |
1294 | break; | |
1295 | ||
1296 | case MLX5E_TT_IPV4: | |
1297 | MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, | |
1298 | MLX5_L3_PROT_TYPE_IPV4); | |
1299 | MLX5_SET(rx_hash_field_select, hfso, selected_fields, | |
1300 | MLX5_HASH_IP); | |
1301 | break; | |
1302 | ||
1303 | case MLX5E_TT_IPV6: | |
1304 | MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, | |
1305 | MLX5_L3_PROT_TYPE_IPV6); | |
1306 | MLX5_SET(rx_hash_field_select, hfso, selected_fields, | |
1307 | MLX5_HASH_IP); | |
1308 | break; | |
1309 | } | |
1310 | } | |
1311 | ||
1312 | static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt) | |
1313 | { | |
1314 | struct mlx5_core_dev *mdev = priv->mdev; | |
1315 | u32 *in; | |
1316 | void *tirc; | |
1317 | int inlen; | |
1318 | int err; | |
1319 | ||
1320 | inlen = MLX5_ST_SZ_BYTES(create_tir_in); | |
1321 | in = mlx5_vzalloc(inlen); | |
1322 | if (!in) | |
1323 | return -ENOMEM; | |
1324 | ||
1325 | tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); | |
1326 | ||
1327 | mlx5e_build_tir_ctx(priv, tirc, tt); | |
1328 | ||
7db22ffb | 1329 | err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]); |
f62b8bb8 AV |
1330 | |
1331 | kvfree(in); | |
1332 | ||
1333 | return err; | |
1334 | } | |
1335 | ||
1336 | static void mlx5e_close_tir(struct mlx5e_priv *priv, int tt) | |
1337 | { | |
7db22ffb | 1338 | mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]); |
f62b8bb8 AV |
1339 | } |
1340 | ||
1341 | static int mlx5e_open_tirs(struct mlx5e_priv *priv) | |
1342 | { | |
1343 | int err; | |
1344 | int i; | |
1345 | ||
1346 | for (i = 0; i < MLX5E_NUM_TT; i++) { | |
1347 | err = mlx5e_open_tir(priv, i); | |
1348 | if (err) | |
1349 | goto err_close_tirs; | |
1350 | } | |
1351 | ||
1352 | return 0; | |
1353 | ||
1354 | err_close_tirs: | |
1355 | for (i--; i >= 0; i--) | |
1356 | mlx5e_close_tir(priv, i); | |
1357 | ||
1358 | return err; | |
1359 | } | |
1360 | ||
1361 | static void mlx5e_close_tirs(struct mlx5e_priv *priv) | |
1362 | { | |
1363 | int i; | |
1364 | ||
1365 | for (i = 0; i < MLX5E_NUM_TT; i++) | |
1366 | mlx5e_close_tir(priv, i); | |
1367 | } | |
1368 | ||
1369 | int mlx5e_open_locked(struct net_device *netdev) | |
1370 | { | |
1371 | struct mlx5e_priv *priv = netdev_priv(netdev); | |
1372 | struct mlx5_core_dev *mdev = priv->mdev; | |
1373 | int actual_mtu; | |
1374 | int num_txqs; | |
1375 | int err; | |
1376 | ||
1377 | num_txqs = roundup_pow_of_two(priv->params.num_channels) * | |
1378 | priv->params.num_tc; | |
1379 | netif_set_real_num_tx_queues(netdev, num_txqs); | |
1380 | netif_set_real_num_rx_queues(netdev, priv->params.num_channels); | |
1381 | ||
1382 | err = mlx5_set_port_mtu(mdev, netdev->mtu); | |
1383 | if (err) { | |
1384 | netdev_err(netdev, "%s: mlx5_set_port_mtu failed %d\n", | |
1385 | __func__, err); | |
1386 | return err; | |
1387 | } | |
1388 | ||
1389 | err = mlx5_query_port_oper_mtu(mdev, &actual_mtu); | |
1390 | if (err) { | |
1391 | netdev_err(netdev, "%s: mlx5_query_port_oper_mtu failed %d\n", | |
1392 | __func__, err); | |
1393 | return err; | |
1394 | } | |
1395 | ||
1396 | if (actual_mtu != netdev->mtu) | |
1397 | netdev_warn(netdev, "%s: Failed to set MTU to %d\n", | |
1398 | __func__, netdev->mtu); | |
1399 | ||
1400 | netdev->mtu = actual_mtu; | |
1401 | ||
1402 | err = mlx5e_open_tises(priv); | |
1403 | if (err) { | |
1404 | netdev_err(netdev, "%s: mlx5e_open_tises failed, %d\n", | |
1405 | __func__, err); | |
1406 | return err; | |
1407 | } | |
1408 | ||
1409 | err = mlx5e_open_channels(priv); | |
1410 | if (err) { | |
1411 | netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n", | |
1412 | __func__, err); | |
1413 | goto err_close_tises; | |
1414 | } | |
1415 | ||
1416 | err = mlx5e_open_rqt(priv); | |
1417 | if (err) { | |
1418 | netdev_err(netdev, "%s: mlx5e_open_rqt failed, %d\n", | |
1419 | __func__, err); | |
1420 | goto err_close_channels; | |
1421 | } | |
1422 | ||
1423 | err = mlx5e_open_tirs(priv); | |
1424 | if (err) { | |
1425 | netdev_err(netdev, "%s: mlx5e_open_tir failed, %d\n", | |
1426 | __func__, err); | |
1427 | goto err_close_rqls; | |
1428 | } | |
1429 | ||
1430 | err = mlx5e_open_flow_table(priv); | |
1431 | if (err) { | |
1432 | netdev_err(netdev, "%s: mlx5e_open_flow_table failed, %d\n", | |
1433 | __func__, err); | |
1434 | goto err_close_tirs; | |
1435 | } | |
1436 | ||
1437 | err = mlx5e_add_all_vlan_rules(priv); | |
1438 | if (err) { | |
1439 | netdev_err(netdev, "%s: mlx5e_add_all_vlan_rules failed, %d\n", | |
1440 | __func__, err); | |
1441 | goto err_close_flow_table; | |
1442 | } | |
1443 | ||
1444 | mlx5e_init_eth_addr(priv); | |
1445 | ||
1446 | set_bit(MLX5E_STATE_OPENED, &priv->state); | |
1447 | ||
1448 | mlx5e_update_carrier(priv); | |
1449 | mlx5e_set_rx_mode_core(priv); | |
1450 | ||
1451 | schedule_delayed_work(&priv->update_stats_work, 0); | |
1452 | return 0; | |
1453 | ||
1454 | err_close_flow_table: | |
1455 | mlx5e_close_flow_table(priv); | |
1456 | ||
1457 | err_close_tirs: | |
1458 | mlx5e_close_tirs(priv); | |
1459 | ||
1460 | err_close_rqls: | |
1461 | mlx5e_close_rqt(priv); | |
1462 | ||
1463 | err_close_channels: | |
1464 | mlx5e_close_channels(priv); | |
1465 | ||
1466 | err_close_tises: | |
1467 | mlx5e_close_tises(priv); | |
1468 | ||
1469 | return err; | |
1470 | } | |
1471 | ||
1472 | static int mlx5e_open(struct net_device *netdev) | |
1473 | { | |
1474 | struct mlx5e_priv *priv = netdev_priv(netdev); | |
1475 | int err; | |
1476 | ||
1477 | mutex_lock(&priv->state_lock); | |
1478 | err = mlx5e_open_locked(netdev); | |
1479 | mutex_unlock(&priv->state_lock); | |
1480 | ||
1481 | return err; | |
1482 | } | |
1483 | ||
1484 | int mlx5e_close_locked(struct net_device *netdev) | |
1485 | { | |
1486 | struct mlx5e_priv *priv = netdev_priv(netdev); | |
1487 | ||
1488 | clear_bit(MLX5E_STATE_OPENED, &priv->state); | |
1489 | ||
1490 | mlx5e_set_rx_mode_core(priv); | |
1491 | mlx5e_del_all_vlan_rules(priv); | |
1492 | netif_carrier_off(priv->netdev); | |
1493 | mlx5e_close_flow_table(priv); | |
1494 | mlx5e_close_tirs(priv); | |
1495 | mlx5e_close_rqt(priv); | |
1496 | mlx5e_close_channels(priv); | |
1497 | mlx5e_close_tises(priv); | |
1498 | ||
1499 | return 0; | |
1500 | } | |
1501 | ||
1502 | static int mlx5e_close(struct net_device *netdev) | |
1503 | { | |
1504 | struct mlx5e_priv *priv = netdev_priv(netdev); | |
1505 | int err; | |
1506 | ||
1507 | mutex_lock(&priv->state_lock); | |
1508 | err = mlx5e_close_locked(netdev); | |
1509 | mutex_unlock(&priv->state_lock); | |
1510 | ||
1511 | return err; | |
1512 | } | |
1513 | ||
1514 | int mlx5e_update_priv_params(struct mlx5e_priv *priv, | |
1515 | struct mlx5e_params *new_params) | |
1516 | { | |
1517 | int err = 0; | |
1518 | int was_opened; | |
1519 | ||
1520 | WARN_ON(!mutex_is_locked(&priv->state_lock)); | |
1521 | ||
1522 | was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); | |
1523 | if (was_opened) | |
1524 | mlx5e_close_locked(priv->netdev); | |
1525 | ||
1526 | priv->params = *new_params; | |
1527 | ||
1528 | if (was_opened) | |
1529 | err = mlx5e_open_locked(priv->netdev); | |
1530 | ||
1531 | return err; | |
1532 | } | |
1533 | ||
1534 | static struct rtnl_link_stats64 * | |
1535 | mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) | |
1536 | { | |
1537 | struct mlx5e_priv *priv = netdev_priv(dev); | |
1538 | struct mlx5e_vport_stats *vstats = &priv->stats.vport; | |
1539 | ||
1540 | stats->rx_packets = vstats->rx_packets; | |
1541 | stats->rx_bytes = vstats->rx_bytes; | |
1542 | stats->tx_packets = vstats->tx_packets; | |
1543 | stats->tx_bytes = vstats->tx_bytes; | |
1544 | stats->multicast = vstats->rx_multicast_packets + | |
1545 | vstats->tx_multicast_packets; | |
1546 | stats->tx_errors = vstats->tx_error_packets; | |
1547 | stats->rx_errors = vstats->rx_error_packets; | |
1548 | stats->tx_dropped = vstats->tx_queue_dropped; | |
1549 | stats->rx_crc_errors = 0; | |
1550 | stats->rx_length_errors = 0; | |
1551 | ||
1552 | return stats; | |
1553 | } | |
1554 | ||
1555 | static void mlx5e_set_rx_mode(struct net_device *dev) | |
1556 | { | |
1557 | struct mlx5e_priv *priv = netdev_priv(dev); | |
1558 | ||
1559 | schedule_work(&priv->set_rx_mode_work); | |
1560 | } | |
1561 | ||
1562 | static int mlx5e_set_mac(struct net_device *netdev, void *addr) | |
1563 | { | |
1564 | struct mlx5e_priv *priv = netdev_priv(netdev); | |
1565 | struct sockaddr *saddr = addr; | |
1566 | ||
1567 | if (!is_valid_ether_addr(saddr->sa_data)) | |
1568 | return -EADDRNOTAVAIL; | |
1569 | ||
1570 | netif_addr_lock_bh(netdev); | |
1571 | ether_addr_copy(netdev->dev_addr, saddr->sa_data); | |
1572 | netif_addr_unlock_bh(netdev); | |
1573 | ||
1574 | schedule_work(&priv->set_rx_mode_work); | |
1575 | ||
1576 | return 0; | |
1577 | } | |
1578 | ||
1579 | static int mlx5e_set_features(struct net_device *netdev, | |
1580 | netdev_features_t features) | |
1581 | { | |
1582 | struct mlx5e_priv *priv = netdev_priv(netdev); | |
1583 | netdev_features_t changes = features ^ netdev->features; | |
1584 | struct mlx5e_params new_params; | |
1585 | bool update_params = false; | |
1586 | ||
1587 | mutex_lock(&priv->state_lock); | |
1588 | new_params = priv->params; | |
1589 | ||
1590 | if (changes & NETIF_F_LRO) { | |
1591 | new_params.lro_en = !!(features & NETIF_F_LRO); | |
1592 | update_params = true; | |
1593 | } | |
1594 | ||
1595 | if (update_params) | |
1596 | mlx5e_update_priv_params(priv, &new_params); | |
1597 | ||
1598 | if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) { | |
1599 | if (features & NETIF_F_HW_VLAN_CTAG_FILTER) | |
1600 | mlx5e_enable_vlan_filter(priv); | |
1601 | else | |
1602 | mlx5e_disable_vlan_filter(priv); | |
1603 | } | |
1604 | ||
1605 | mutex_unlock(&priv->state_lock); | |
1606 | ||
1607 | return 0; | |
1608 | } | |
1609 | ||
1610 | static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) | |
1611 | { | |
1612 | struct mlx5e_priv *priv = netdev_priv(netdev); | |
1613 | struct mlx5_core_dev *mdev = priv->mdev; | |
1614 | int max_mtu; | |
1615 | int err = 0; | |
1616 | ||
1617 | err = mlx5_query_port_max_mtu(mdev, &max_mtu); | |
1618 | if (err) | |
1619 | return err; | |
1620 | ||
1621 | if (new_mtu > max_mtu || new_mtu < MLX5E_PARAMS_MIN_MTU) { | |
1622 | netdev_err(netdev, "%s: Bad MTU size, mtu must be [%d-%d]\n", | |
1623 | __func__, MLX5E_PARAMS_MIN_MTU, max_mtu); | |
1624 | return -EINVAL; | |
1625 | } | |
1626 | ||
1627 | mutex_lock(&priv->state_lock); | |
1628 | netdev->mtu = new_mtu; | |
1629 | err = mlx5e_update_priv_params(priv, &priv->params); | |
1630 | mutex_unlock(&priv->state_lock); | |
1631 | ||
1632 | return err; | |
1633 | } | |
1634 | ||
1635 | static struct net_device_ops mlx5e_netdev_ops = { | |
1636 | .ndo_open = mlx5e_open, | |
1637 | .ndo_stop = mlx5e_close, | |
1638 | .ndo_start_xmit = mlx5e_xmit, | |
1639 | .ndo_get_stats64 = mlx5e_get_stats, | |
1640 | .ndo_set_rx_mode = mlx5e_set_rx_mode, | |
1641 | .ndo_set_mac_address = mlx5e_set_mac, | |
1642 | .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid, | |
1643 | .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid, | |
1644 | .ndo_set_features = mlx5e_set_features, | |
1645 | .ndo_change_mtu = mlx5e_change_mtu, | |
1646 | }; | |
1647 | ||
1648 | static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) | |
1649 | { | |
1650 | if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) | |
1651 | return -ENOTSUPP; | |
1652 | if (!MLX5_CAP_GEN(mdev, eth_net_offloads) || | |
1653 | !MLX5_CAP_GEN(mdev, nic_flow_table) || | |
1654 | !MLX5_CAP_ETH(mdev, csum_cap) || | |
1655 | !MLX5_CAP_ETH(mdev, max_lso_cap) || | |
1656 | !MLX5_CAP_ETH(mdev, vlan_cap) || | |
1657 | !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap)) { | |
1658 | mlx5_core_warn(mdev, | |
1659 | "Not creating net device, some required device capabilities are missing\n"); | |
1660 | return -ENOTSUPP; | |
1661 | } | |
1662 | return 0; | |
1663 | } | |
1664 | ||
1665 | static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, | |
1666 | struct net_device *netdev, | |
1667 | int num_comp_vectors) | |
1668 | { | |
1669 | struct mlx5e_priv *priv = netdev_priv(netdev); | |
1670 | ||
1671 | priv->params.log_sq_size = | |
1672 | MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; | |
1673 | priv->params.log_rq_size = | |
1674 | MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; | |
1675 | priv->params.rx_cq_moderation_usec = | |
1676 | MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; | |
1677 | priv->params.rx_cq_moderation_pkts = | |
1678 | MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; | |
1679 | priv->params.tx_cq_moderation_usec = | |
1680 | MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; | |
1681 | priv->params.tx_cq_moderation_pkts = | |
1682 | MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; | |
1683 | priv->params.min_rx_wqes = | |
1684 | MLX5E_PARAMS_DEFAULT_MIN_RX_WQES; | |
1685 | priv->params.rx_hash_log_tbl_sz = | |
1686 | (order_base_2(num_comp_vectors) > | |
1687 | MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ? | |
1688 | order_base_2(num_comp_vectors) : | |
1689 | MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ; | |
1690 | priv->params.num_tc = 1; | |
1691 | priv->params.default_vlan_prio = 0; | |
1692 | ||
1693 | priv->params.lro_en = false && !!MLX5_CAP_ETH(priv->mdev, lro_cap); | |
1694 | priv->params.lro_wqe_sz = | |
1695 | MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; | |
1696 | ||
1697 | priv->mdev = mdev; | |
1698 | priv->netdev = netdev; | |
1699 | priv->params.num_channels = num_comp_vectors; | |
1700 | priv->order_base_2_num_channels = order_base_2(num_comp_vectors); | |
1701 | priv->queue_mapping_channel_mask = | |
1702 | roundup_pow_of_two(num_comp_vectors) - 1; | |
1703 | priv->num_tc = priv->params.num_tc; | |
1704 | priv->default_vlan_prio = priv->params.default_vlan_prio; | |
1705 | ||
1706 | spin_lock_init(&priv->async_events_spinlock); | |
1707 | mutex_init(&priv->state_lock); | |
1708 | ||
1709 | INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work); | |
1710 | INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); | |
1711 | INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work); | |
1712 | } | |
1713 | ||
1714 | static void mlx5e_set_netdev_dev_addr(struct net_device *netdev) | |
1715 | { | |
1716 | struct mlx5e_priv *priv = netdev_priv(netdev); | |
1717 | ||
1718 | mlx5_query_vport_mac_address(priv->mdev, netdev->dev_addr); | |
1719 | } | |
1720 | ||
1721 | static void mlx5e_build_netdev(struct net_device *netdev) | |
1722 | { | |
1723 | struct mlx5e_priv *priv = netdev_priv(netdev); | |
1724 | struct mlx5_core_dev *mdev = priv->mdev; | |
1725 | ||
1726 | SET_NETDEV_DEV(netdev, &mdev->pdev->dev); | |
1727 | ||
1728 | if (priv->num_tc > 1) { | |
1729 | mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue; | |
1730 | mlx5e_netdev_ops.ndo_start_xmit = mlx5e_xmit_multi_tc; | |
1731 | } | |
1732 | ||
1733 | netdev->netdev_ops = &mlx5e_netdev_ops; | |
1734 | netdev->watchdog_timeo = 15 * HZ; | |
1735 | ||
1736 | netdev->ethtool_ops = &mlx5e_ethtool_ops; | |
1737 | ||
1738 | netdev->vlan_features |= NETIF_F_IP_CSUM; | |
1739 | netdev->vlan_features |= NETIF_F_IPV6_CSUM; | |
1740 | netdev->vlan_features |= NETIF_F_GRO; | |
1741 | netdev->vlan_features |= NETIF_F_TSO; | |
1742 | netdev->vlan_features |= NETIF_F_TSO6; | |
1743 | netdev->vlan_features |= NETIF_F_RXCSUM; | |
1744 | netdev->vlan_features |= NETIF_F_RXHASH; | |
1745 | ||
1746 | if (!!MLX5_CAP_ETH(mdev, lro_cap)) | |
1747 | netdev->vlan_features |= NETIF_F_LRO; | |
1748 | ||
1749 | netdev->hw_features = netdev->vlan_features; | |
1750 | netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; | |
1751 | netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; | |
1752 | netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; | |
1753 | ||
1754 | netdev->features = netdev->hw_features; | |
1755 | if (!priv->params.lro_en) | |
1756 | netdev->features &= ~NETIF_F_LRO; | |
1757 | ||
1758 | netdev->features |= NETIF_F_HIGHDMA; | |
1759 | ||
1760 | netdev->priv_flags |= IFF_UNICAST_FLT; | |
1761 | ||
1762 | mlx5e_set_netdev_dev_addr(netdev); | |
1763 | } | |
1764 | ||
1765 | static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn, | |
1766 | struct mlx5_core_mr *mr) | |
1767 | { | |
1768 | struct mlx5_core_dev *mdev = priv->mdev; | |
1769 | struct mlx5_create_mkey_mbox_in *in; | |
1770 | int err; | |
1771 | ||
1772 | in = mlx5_vzalloc(sizeof(*in)); | |
1773 | if (!in) | |
1774 | return -ENOMEM; | |
1775 | ||
1776 | in->seg.flags = MLX5_PERM_LOCAL_WRITE | | |
1777 | MLX5_PERM_LOCAL_READ | | |
1778 | MLX5_ACCESS_MODE_PA; | |
1779 | in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64); | |
1780 | in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); | |
1781 | ||
1782 | err = mlx5_core_create_mkey(mdev, mr, in, sizeof(*in), NULL, NULL, | |
1783 | NULL); | |
1784 | ||
1785 | kvfree(in); | |
1786 | ||
1787 | return err; | |
1788 | } | |
1789 | ||
1790 | static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) | |
1791 | { | |
1792 | struct net_device *netdev; | |
1793 | struct mlx5e_priv *priv; | |
1794 | int ncv = mdev->priv.eq_table.num_comp_vectors; | |
1795 | int err; | |
1796 | ||
1797 | if (mlx5e_check_required_hca_cap(mdev)) | |
1798 | return NULL; | |
1799 | ||
1800 | netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), | |
1801 | roundup_pow_of_two(ncv) * MLX5E_MAX_NUM_TC, | |
1802 | ncv); | |
1803 | if (!netdev) { | |
1804 | mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n"); | |
1805 | return NULL; | |
1806 | } | |
1807 | ||
1808 | mlx5e_build_netdev_priv(mdev, netdev, ncv); | |
1809 | mlx5e_build_netdev(netdev); | |
1810 | ||
1811 | netif_carrier_off(netdev); | |
1812 | ||
1813 | priv = netdev_priv(netdev); | |
1814 | ||
1815 | err = mlx5_alloc_map_uar(mdev, &priv->cq_uar); | |
1816 | if (err) { | |
1817 | netdev_err(netdev, "%s: mlx5_alloc_map_uar failed, %d\n", | |
1818 | __func__, err); | |
1819 | goto err_free_netdev; | |
1820 | } | |
1821 | ||
1822 | err = mlx5_core_alloc_pd(mdev, &priv->pdn); | |
1823 | if (err) { | |
1824 | netdev_err(netdev, "%s: mlx5_core_alloc_pd failed, %d\n", | |
1825 | __func__, err); | |
1826 | goto err_unmap_free_uar; | |
1827 | } | |
1828 | ||
1829 | err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr); | |
1830 | if (err) { | |
1831 | netdev_err(netdev, "%s: mlx5e_create_mkey failed, %d\n", | |
1832 | __func__, err); | |
1833 | goto err_dealloc_pd; | |
1834 | } | |
1835 | ||
1836 | err = register_netdev(netdev); | |
1837 | if (err) { | |
1838 | netdev_err(netdev, "%s: register_netdev failed, %d\n", | |
1839 | __func__, err); | |
1840 | goto err_destroy_mkey; | |
1841 | } | |
1842 | ||
1843 | mlx5e_enable_async_events(priv); | |
1844 | ||
1845 | return priv; | |
1846 | ||
1847 | err_destroy_mkey: | |
1848 | mlx5_core_destroy_mkey(mdev, &priv->mr); | |
1849 | ||
1850 | err_dealloc_pd: | |
1851 | mlx5_core_dealloc_pd(mdev, priv->pdn); | |
1852 | ||
1853 | err_unmap_free_uar: | |
1854 | mlx5_unmap_free_uar(mdev, &priv->cq_uar); | |
1855 | ||
1856 | err_free_netdev: | |
1857 | free_netdev(netdev); | |
1858 | ||
1859 | return NULL; | |
1860 | } | |
1861 | ||
1862 | static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) | |
1863 | { | |
1864 | struct mlx5e_priv *priv = vpriv; | |
1865 | struct net_device *netdev = priv->netdev; | |
1866 | ||
1867 | unregister_netdev(netdev); | |
1868 | mlx5_core_destroy_mkey(priv->mdev, &priv->mr); | |
1869 | mlx5_core_dealloc_pd(priv->mdev, priv->pdn); | |
1870 | mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar); | |
1871 | mlx5e_disable_async_events(priv); | |
1872 | flush_scheduled_work(); | |
1873 | free_netdev(netdev); | |
1874 | } | |
1875 | ||
1876 | static void *mlx5e_get_netdev(void *vpriv) | |
1877 | { | |
1878 | struct mlx5e_priv *priv = vpriv; | |
1879 | ||
1880 | return priv->netdev; | |
1881 | } | |
1882 | ||
1883 | static struct mlx5_interface mlx5e_interface = { | |
1884 | .add = mlx5e_create_netdev, | |
1885 | .remove = mlx5e_destroy_netdev, | |
1886 | .event = mlx5e_async_event, | |
1887 | .protocol = MLX5_INTERFACE_PROTOCOL_ETH, | |
1888 | .get_dev = mlx5e_get_netdev, | |
1889 | }; | |
1890 | ||
1891 | void mlx5e_init(void) | |
1892 | { | |
1893 | mlx5_register_interface(&mlx5e_interface); | |
1894 | } | |
1895 | ||
1896 | void mlx5e_cleanup(void) | |
1897 | { | |
1898 | mlx5_unregister_interface(&mlx5e_interface); | |
1899 | } |