Merge tag 'omap-for-v4.8/legacy-signed' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / vport.c
1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/export.h>
34 #include <linux/etherdevice.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/vport.h>
37 #include "mlx5_core.h"
38
39 static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
40 u16 vport, u32 *out, int outlen)
41 {
42 int err;
43 u32 in[MLX5_ST_SZ_DW(query_vport_state_in)];
44
45 memset(in, 0, sizeof(in));
46
47 MLX5_SET(query_vport_state_in, in, opcode,
48 MLX5_CMD_OP_QUERY_VPORT_STATE);
49 MLX5_SET(query_vport_state_in, in, op_mod, opmod);
50 MLX5_SET(query_vport_state_in, in, vport_number, vport);
51 if (vport)
52 MLX5_SET(query_vport_state_in, in, other_vport, 1);
53
54 err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
55 if (err)
56 mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
57
58 return err;
59 }
60
61 u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
62 {
63 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
64
65 _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
66
67 return MLX5_GET(query_vport_state_out, out, state);
68 }
69 EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
70
71 u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
72 {
73 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
74
75 _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
76
77 return MLX5_GET(query_vport_state_out, out, admin_state);
78 }
79 EXPORT_SYMBOL_GPL(mlx5_query_vport_admin_state);
80
81 int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
82 u16 vport, u8 state)
83 {
84 u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)];
85 u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)];
86 int err;
87
88 memset(in, 0, sizeof(in));
89
90 MLX5_SET(modify_vport_state_in, in, opcode,
91 MLX5_CMD_OP_MODIFY_VPORT_STATE);
92 MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
93 MLX5_SET(modify_vport_state_in, in, vport_number, vport);
94
95 if (vport)
96 MLX5_SET(modify_vport_state_in, in, other_vport, 1);
97
98 MLX5_SET(modify_vport_state_in, in, admin_state, state);
99
100 err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
101 sizeof(out));
102 if (err)
103 mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_VPORT_STATE failed\n");
104
105 return err;
106 }
107 EXPORT_SYMBOL_GPL(mlx5_modify_vport_admin_state);
108
109 static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
110 u32 *out, int outlen)
111 {
112 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
113
114 memset(in, 0, sizeof(in));
115
116 MLX5_SET(query_nic_vport_context_in, in, opcode,
117 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
118
119 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
120 if (vport)
121 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
122
123 return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
124 }
125
126 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
127 int inlen)
128 {
129 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
130
131 MLX5_SET(modify_nic_vport_context_in, in, opcode,
132 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
133
134 memset(out, 0, sizeof(out));
135 return mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
136 }
137
138 void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
139 u8 *min_inline_mode)
140 {
141 u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
142
143 mlx5_query_nic_vport_context(mdev, 0, out, sizeof(out));
144
145 *min_inline_mode = MLX5_GET(query_nic_vport_context_out, out,
146 nic_vport_context.min_wqe_inline_mode);
147 }
148 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
149
150 int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
151 u16 vport, u8 *addr)
152 {
153 u32 *out;
154 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
155 u8 *out_addr;
156 int err;
157
158 out = mlx5_vzalloc(outlen);
159 if (!out)
160 return -ENOMEM;
161
162 out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
163 nic_vport_context.permanent_address);
164
165 err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
166 if (!err)
167 ether_addr_copy(addr, &out_addr[2]);
168
169 kvfree(out);
170 return err;
171 }
172 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
173
174 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
175 u16 vport, u8 *addr)
176 {
177 void *in;
178 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
179 int err;
180 void *nic_vport_ctx;
181 u8 *perm_mac;
182
183 in = mlx5_vzalloc(inlen);
184 if (!in) {
185 mlx5_core_warn(mdev, "failed to allocate inbox\n");
186 return -ENOMEM;
187 }
188
189 MLX5_SET(modify_nic_vport_context_in, in,
190 field_select.permanent_address, 1);
191 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
192
193 if (vport)
194 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
195
196 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
197 in, nic_vport_context);
198 perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
199 permanent_address);
200
201 ether_addr_copy(&perm_mac[2], addr);
202
203 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
204
205 kvfree(in);
206
207 return err;
208 }
209 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
210
211 int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
212 {
213 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
214 u32 *out;
215 int err;
216
217 out = mlx5_vzalloc(outlen);
218 if (!out)
219 return -ENOMEM;
220
221 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
222 if (!err)
223 *mtu = MLX5_GET(query_nic_vport_context_out, out,
224 nic_vport_context.mtu);
225
226 kvfree(out);
227 return err;
228 }
229 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
230
231 int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
232 {
233 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
234 void *in;
235 int err;
236
237 in = mlx5_vzalloc(inlen);
238 if (!in)
239 return -ENOMEM;
240
241 MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
242 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
243
244 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
245
246 kvfree(in);
247 return err;
248 }
249 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
250
251 int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
252 u32 vport,
253 enum mlx5_list_type list_type,
254 u8 addr_list[][ETH_ALEN],
255 int *list_size)
256 {
257 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
258 void *nic_vport_ctx;
259 int max_list_size;
260 int req_list_size;
261 int out_sz;
262 void *out;
263 int err;
264 int i;
265
266 req_list_size = *list_size;
267
268 max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
269 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
270 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
271
272 if (req_list_size > max_list_size) {
273 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
274 req_list_size, max_list_size);
275 req_list_size = max_list_size;
276 }
277
278 out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
279 req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
280
281 memset(in, 0, sizeof(in));
282 out = kzalloc(out_sz, GFP_KERNEL);
283 if (!out)
284 return -ENOMEM;
285
286 MLX5_SET(query_nic_vport_context_in, in, opcode,
287 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
288 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
289 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
290
291 if (vport)
292 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
293
294 err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz);
295 if (err)
296 goto out;
297
298 nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
299 nic_vport_context);
300 req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
301 allowed_list_size);
302
303 *list_size = req_list_size;
304 for (i = 0; i < req_list_size; i++) {
305 u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
306 nic_vport_ctx,
307 current_uc_mac_address[i]) + 2;
308 ether_addr_copy(addr_list[i], mac_addr);
309 }
310 out:
311 kfree(out);
312 return err;
313 }
314 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
315
316 int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
317 enum mlx5_list_type list_type,
318 u8 addr_list[][ETH_ALEN],
319 int list_size)
320 {
321 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
322 void *nic_vport_ctx;
323 int max_list_size;
324 int in_sz;
325 void *in;
326 int err;
327 int i;
328
329 max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
330 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
331 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
332
333 if (list_size > max_list_size)
334 return -ENOSPC;
335
336 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
337 list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
338
339 memset(out, 0, sizeof(out));
340 in = kzalloc(in_sz, GFP_KERNEL);
341 if (!in)
342 return -ENOMEM;
343
344 MLX5_SET(modify_nic_vport_context_in, in, opcode,
345 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
346 MLX5_SET(modify_nic_vport_context_in, in,
347 field_select.addresses_list, 1);
348
349 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
350 nic_vport_context);
351
352 MLX5_SET(nic_vport_context, nic_vport_ctx,
353 allowed_list_type, list_type);
354 MLX5_SET(nic_vport_context, nic_vport_ctx,
355 allowed_list_size, list_size);
356
357 for (i = 0; i < list_size; i++) {
358 u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
359 nic_vport_ctx,
360 current_uc_mac_address[i]) + 2;
361 ether_addr_copy(curr_mac, addr_list[i]);
362 }
363
364 err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out));
365 kfree(in);
366 return err;
367 }
368 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
369
370 int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
371 u32 vport,
372 u16 vlans[],
373 int *size)
374 {
375 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
376 void *nic_vport_ctx;
377 int req_list_size;
378 int max_list_size;
379 int out_sz;
380 void *out;
381 int err;
382 int i;
383
384 req_list_size = *size;
385 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
386 if (req_list_size > max_list_size) {
387 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n",
388 req_list_size, max_list_size);
389 req_list_size = max_list_size;
390 }
391
392 out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
393 req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
394
395 memset(in, 0, sizeof(in));
396 out = kzalloc(out_sz, GFP_KERNEL);
397 if (!out)
398 return -ENOMEM;
399
400 MLX5_SET(query_nic_vport_context_in, in, opcode,
401 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
402 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type,
403 MLX5_NVPRT_LIST_TYPE_VLAN);
404 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
405
406 if (vport)
407 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
408
409 err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz);
410 if (err)
411 goto out;
412
413 nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
414 nic_vport_context);
415 req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
416 allowed_list_size);
417
418 *size = req_list_size;
419 for (i = 0; i < req_list_size; i++) {
420 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
421 nic_vport_ctx,
422 current_uc_mac_address[i]);
423 vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
424 }
425 out:
426 kfree(out);
427 return err;
428 }
429 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans);
430
431 int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
432 u16 vlans[],
433 int list_size)
434 {
435 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
436 void *nic_vport_ctx;
437 int max_list_size;
438 int in_sz;
439 void *in;
440 int err;
441 int i;
442
443 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
444
445 if (list_size > max_list_size)
446 return -ENOSPC;
447
448 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
449 list_size * MLX5_ST_SZ_BYTES(vlan_layout);
450
451 memset(out, 0, sizeof(out));
452 in = kzalloc(in_sz, GFP_KERNEL);
453 if (!in)
454 return -ENOMEM;
455
456 MLX5_SET(modify_nic_vport_context_in, in, opcode,
457 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
458 MLX5_SET(modify_nic_vport_context_in, in,
459 field_select.addresses_list, 1);
460
461 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
462 nic_vport_context);
463
464 MLX5_SET(nic_vport_context, nic_vport_ctx,
465 allowed_list_type, MLX5_NVPRT_LIST_TYPE_VLAN);
466 MLX5_SET(nic_vport_context, nic_vport_ctx,
467 allowed_list_size, list_size);
468
469 for (i = 0; i < list_size; i++) {
470 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
471 nic_vport_ctx,
472 current_uc_mac_address[i]);
473 MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
474 }
475
476 err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out));
477 kfree(in);
478 return err;
479 }
480 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
481
482 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
483 u64 *system_image_guid)
484 {
485 u32 *out;
486 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
487
488 out = mlx5_vzalloc(outlen);
489 if (!out)
490 return -ENOMEM;
491
492 mlx5_query_nic_vport_context(mdev, 0, out, outlen);
493
494 *system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
495 nic_vport_context.system_image_guid);
496
497 kfree(out);
498
499 return 0;
500 }
501 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
502
503 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
504 {
505 u32 *out;
506 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
507
508 out = mlx5_vzalloc(outlen);
509 if (!out)
510 return -ENOMEM;
511
512 mlx5_query_nic_vport_context(mdev, 0, out, outlen);
513
514 *node_guid = MLX5_GET64(query_nic_vport_context_out, out,
515 nic_vport_context.node_guid);
516
517 kfree(out);
518
519 return 0;
520 }
521 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
522
523 int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
524 u32 vport, u64 node_guid)
525 {
526 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
527 void *nic_vport_context;
528 void *in;
529 int err;
530
531 if (!vport)
532 return -EINVAL;
533 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
534 return -EACCES;
535 if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
536 return -ENOTSUPP;
537
538 in = mlx5_vzalloc(inlen);
539 if (!in)
540 return -ENOMEM;
541
542 MLX5_SET(modify_nic_vport_context_in, in,
543 field_select.node_guid, 1);
544 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
545 MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport);
546
547 nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
548 in, nic_vport_context);
549 MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
550
551 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
552
553 kvfree(in);
554
555 return err;
556 }
557
558 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
559 u16 *qkey_viol_cntr)
560 {
561 u32 *out;
562 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
563
564 out = mlx5_vzalloc(outlen);
565 if (!out)
566 return -ENOMEM;
567
568 mlx5_query_nic_vport_context(mdev, 0, out, outlen);
569
570 *qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
571 nic_vport_context.qkey_violation_counter);
572
573 kfree(out);
574
575 return 0;
576 }
577 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
578
579 int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
580 u8 port_num, u16 vf_num, u16 gid_index,
581 union ib_gid *gid)
582 {
583 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
584 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
585 int is_group_manager;
586 void *out = NULL;
587 void *in = NULL;
588 union ib_gid *tmp;
589 int tbsz;
590 int nout;
591 int err;
592
593 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
594 tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
595 mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n",
596 vf_num, gid_index, tbsz);
597
598 if (gid_index > tbsz && gid_index != 0xffff)
599 return -EINVAL;
600
601 if (gid_index == 0xffff)
602 nout = tbsz;
603 else
604 nout = 1;
605
606 out_sz += nout * sizeof(*gid);
607
608 in = kzalloc(in_sz, GFP_KERNEL);
609 out = kzalloc(out_sz, GFP_KERNEL);
610 if (!in || !out) {
611 err = -ENOMEM;
612 goto out;
613 }
614
615 MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
616 if (other_vport) {
617 if (is_group_manager) {
618 MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num);
619 MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
620 } else {
621 err = -EPERM;
622 goto out;
623 }
624 }
625 MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
626
627 if (MLX5_CAP_GEN(dev, num_ports) == 2)
628 MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
629
630 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
631 if (err)
632 goto out;
633
634 err = mlx5_cmd_status_to_err_v2(out);
635 if (err)
636 goto out;
637
638 tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
639 gid->global.subnet_prefix = tmp->global.subnet_prefix;
640 gid->global.interface_id = tmp->global.interface_id;
641
642 out:
643 kfree(in);
644 kfree(out);
645 return err;
646 }
647 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
648
649 int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
650 u8 port_num, u16 vf_num, u16 pkey_index,
651 u16 *pkey)
652 {
653 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
654 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
655 int is_group_manager;
656 void *out = NULL;
657 void *in = NULL;
658 void *pkarr;
659 int nout;
660 int tbsz;
661 int err;
662 int i;
663
664 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
665
666 tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
667 if (pkey_index > tbsz && pkey_index != 0xffff)
668 return -EINVAL;
669
670 if (pkey_index == 0xffff)
671 nout = tbsz;
672 else
673 nout = 1;
674
675 out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
676
677 in = kzalloc(in_sz, GFP_KERNEL);
678 out = kzalloc(out_sz, GFP_KERNEL);
679 if (!in || !out) {
680 err = -ENOMEM;
681 goto out;
682 }
683
684 MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
685 if (other_vport) {
686 if (is_group_manager) {
687 MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num);
688 MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
689 } else {
690 err = -EPERM;
691 goto out;
692 }
693 }
694 MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
695
696 if (MLX5_CAP_GEN(dev, num_ports) == 2)
697 MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
698
699 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
700 if (err)
701 goto out;
702
703 err = mlx5_cmd_status_to_err_v2(out);
704 if (err)
705 goto out;
706
707 pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
708 for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey))
709 *pkey = MLX5_GET_PR(pkey, pkarr, pkey);
710
711 out:
712 kfree(in);
713 kfree(out);
714 return err;
715 }
716 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
717
718 int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
719 u8 other_vport, u8 port_num,
720 u16 vf_num,
721 struct mlx5_hca_vport_context *rep)
722 {
723 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
724 int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)];
725 int is_group_manager;
726 void *out;
727 void *ctx;
728 int err;
729
730 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
731
732 memset(in, 0, sizeof(in));
733 out = kzalloc(out_sz, GFP_KERNEL);
734 if (!out)
735 return -ENOMEM;
736
737 MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
738
739 if (other_vport) {
740 if (is_group_manager) {
741 MLX5_SET(query_hca_vport_context_in, in, other_vport, 1);
742 MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num);
743 } else {
744 err = -EPERM;
745 goto ex;
746 }
747 }
748
749 if (MLX5_CAP_GEN(dev, num_ports) == 2)
750 MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
751
752 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
753 if (err)
754 goto ex;
755 err = mlx5_cmd_status_to_err_v2(out);
756 if (err)
757 goto ex;
758
759 ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context);
760 rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select);
761 rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware);
762 rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi);
763 rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw);
764 rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy);
765 rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx,
766 port_physical_state);
767 rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state);
768 rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx,
769 port_physical_state);
770 rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid);
771 rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid);
772 rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1);
773 rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx,
774 cap_mask1_field_select);
775 rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2);
776 rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx,
777 cap_mask2_field_select);
778 rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid);
779 rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx,
780 init_type_reply);
781 rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc);
782 rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx,
783 subnet_timeout);
784 rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid);
785 rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl);
786 rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
787 qkey_violation_counter);
788 rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
789 pkey_violation_counter);
790 rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required);
791 rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx,
792 system_image_guid);
793
794 ex:
795 kfree(out);
796 return err;
797 }
798 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context);
799
800 int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
801 u64 *sys_image_guid)
802 {
803 struct mlx5_hca_vport_context *rep;
804 int err;
805
806 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
807 if (!rep)
808 return -ENOMEM;
809
810 err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
811 if (!err)
812 *sys_image_guid = rep->sys_image_guid;
813
814 kfree(rep);
815 return err;
816 }
817 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
818
819 int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
820 u64 *node_guid)
821 {
822 struct mlx5_hca_vport_context *rep;
823 int err;
824
825 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
826 if (!rep)
827 return -ENOMEM;
828
829 err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
830 if (!err)
831 *node_guid = rep->node_guid;
832
833 kfree(rep);
834 return err;
835 }
836 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
837
838 int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
839 u32 vport,
840 int *promisc_uc,
841 int *promisc_mc,
842 int *promisc_all)
843 {
844 u32 *out;
845 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
846 int err;
847
848 out = kzalloc(outlen, GFP_KERNEL);
849 if (!out)
850 return -ENOMEM;
851
852 err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
853 if (err)
854 goto out;
855
856 *promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
857 nic_vport_context.promisc_uc);
858 *promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
859 nic_vport_context.promisc_mc);
860 *promisc_all = MLX5_GET(query_nic_vport_context_out, out,
861 nic_vport_context.promisc_all);
862
863 out:
864 kfree(out);
865 return err;
866 }
867 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
868
869 int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
870 int promisc_uc,
871 int promisc_mc,
872 int promisc_all)
873 {
874 void *in;
875 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
876 int err;
877
878 in = mlx5_vzalloc(inlen);
879 if (!in) {
880 mlx5_core_err(mdev, "failed to allocate inbox\n");
881 return -ENOMEM;
882 }
883
884 MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
885 MLX5_SET(modify_nic_vport_context_in, in,
886 nic_vport_context.promisc_uc, promisc_uc);
887 MLX5_SET(modify_nic_vport_context_in, in,
888 nic_vport_context.promisc_mc, promisc_mc);
889 MLX5_SET(modify_nic_vport_context_in, in,
890 nic_vport_context.promisc_all, promisc_all);
891
892 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
893
894 kvfree(in);
895
896 return err;
897 }
898 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
899
900 enum mlx5_vport_roce_state {
901 MLX5_VPORT_ROCE_DISABLED = 0,
902 MLX5_VPORT_ROCE_ENABLED = 1,
903 };
904
905 static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
906 enum mlx5_vport_roce_state state)
907 {
908 void *in;
909 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
910 int err;
911
912 in = mlx5_vzalloc(inlen);
913 if (!in) {
914 mlx5_core_warn(mdev, "failed to allocate inbox\n");
915 return -ENOMEM;
916 }
917
918 MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
919 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
920 state);
921
922 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
923
924 kvfree(in);
925
926 return err;
927 }
928
929 int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
930 {
931 return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
932 }
933 EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
934
935 int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
936 {
937 return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
938 }
939 EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
940
941 int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
942 int vf, u8 port_num, void *out,
943 size_t out_sz)
944 {
945 int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
946 int is_group_manager;
947 void *in;
948 int err;
949
950 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
951 in = mlx5_vzalloc(in_sz);
952 if (!in) {
953 err = -ENOMEM;
954 return err;
955 }
956
957 MLX5_SET(query_vport_counter_in, in, opcode,
958 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
959 if (other_vport) {
960 if (is_group_manager) {
961 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
962 MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1);
963 } else {
964 err = -EPERM;
965 goto free;
966 }
967 }
968 if (MLX5_CAP_GEN(dev, num_ports) == 2)
969 MLX5_SET(query_vport_counter_in, in, port_num, port_num);
970
971 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
972 if (err)
973 goto free;
974 err = mlx5_cmd_status_to_err_v2(out);
975
976 free:
977 kvfree(in);
978 return err;
979 }
980 EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
981
982 int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
983 u8 other_vport, u8 port_num,
984 int vf,
985 struct mlx5_hca_vport_context *req)
986 {
987 int in_sz = MLX5_ST_SZ_BYTES(modify_hca_vport_context_in);
988 u8 out[MLX5_ST_SZ_BYTES(modify_hca_vport_context_out)];
989 int is_group_manager;
990 void *in;
991 int err;
992 void *ctx;
993
994 mlx5_core_dbg(dev, "vf %d\n", vf);
995 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
996 in = kzalloc(in_sz, GFP_KERNEL);
997 if (!in)
998 return -ENOMEM;
999
1000 memset(out, 0, sizeof(out));
1001 MLX5_SET(modify_hca_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT);
1002 if (other_vport) {
1003 if (is_group_manager) {
1004 MLX5_SET(modify_hca_vport_context_in, in, other_vport, 1);
1005 MLX5_SET(modify_hca_vport_context_in, in, vport_number, vf);
1006 } else {
1007 err = -EPERM;
1008 goto ex;
1009 }
1010 }
1011
1012 if (MLX5_CAP_GEN(dev, num_ports) > 1)
1013 MLX5_SET(modify_hca_vport_context_in, in, port_num, port_num);
1014
1015 ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context);
1016 MLX5_SET(hca_vport_context, ctx, field_select, req->field_select);
1017 MLX5_SET(hca_vport_context, ctx, sm_virt_aware, req->sm_virt_aware);
1018 MLX5_SET(hca_vport_context, ctx, has_smi, req->has_smi);
1019 MLX5_SET(hca_vport_context, ctx, has_raw, req->has_raw);
1020 MLX5_SET(hca_vport_context, ctx, vport_state_policy, req->policy);
1021 MLX5_SET(hca_vport_context, ctx, port_physical_state, req->phys_state);
1022 MLX5_SET(hca_vport_context, ctx, vport_state, req->vport_state);
1023 MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
1024 MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
1025 MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
1026 MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select, req->cap_mask1_perm);
1027 MLX5_SET(hca_vport_context, ctx, cap_mask2, req->cap_mask2);
1028 MLX5_SET(hca_vport_context, ctx, cap_mask2_field_select, req->cap_mask2_perm);
1029 MLX5_SET(hca_vport_context, ctx, lid, req->lid);
1030 MLX5_SET(hca_vport_context, ctx, init_type_reply, req->init_type_reply);
1031 MLX5_SET(hca_vport_context, ctx, lmc, req->lmc);
1032 MLX5_SET(hca_vport_context, ctx, subnet_timeout, req->subnet_timeout);
1033 MLX5_SET(hca_vport_context, ctx, sm_lid, req->sm_lid);
1034 MLX5_SET(hca_vport_context, ctx, sm_sl, req->sm_sl);
1035 MLX5_SET(hca_vport_context, ctx, qkey_violation_counter, req->qkey_violation_counter);
1036 MLX5_SET(hca_vport_context, ctx, pkey_violation_counter, req->pkey_violation_counter);
1037 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
1038 if (err)
1039 goto ex;
1040
1041 err = mlx5_cmd_status_to_err_v2(out);
1042
1043 ex:
1044 kfree(in);
1045 return err;
1046 }
1047 EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context);
This page took 0.052074 seconds and 6 git commands to generate.