2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/export.h>
34 #include <linux/etherdevice.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/vport.h>
37 #include "mlx5_core.h"
39 static int _mlx5_query_vport_state(struct mlx5_core_dev
*mdev
, u8 opmod
,
40 u16 vport
, u32
*out
, int outlen
)
43 u32 in
[MLX5_ST_SZ_DW(query_vport_state_in
)];
45 memset(in
, 0, sizeof(in
));
47 MLX5_SET(query_vport_state_in
, in
, opcode
,
48 MLX5_CMD_OP_QUERY_VPORT_STATE
);
49 MLX5_SET(query_vport_state_in
, in
, op_mod
, opmod
);
50 MLX5_SET(query_vport_state_in
, in
, vport_number
, vport
);
52 MLX5_SET(query_vport_state_in
, in
, other_vport
, 1);
54 err
= mlx5_cmd_exec_check_status(mdev
, in
, sizeof(in
), out
, outlen
);
56 mlx5_core_warn(mdev
, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
61 u8
mlx5_query_vport_state(struct mlx5_core_dev
*mdev
, u8 opmod
, u16 vport
)
63 u32 out
[MLX5_ST_SZ_DW(query_vport_state_out
)] = {0};
65 _mlx5_query_vport_state(mdev
, opmod
, vport
, out
, sizeof(out
));
67 return MLX5_GET(query_vport_state_out
, out
, state
);
69 EXPORT_SYMBOL_GPL(mlx5_query_vport_state
);
71 u8
mlx5_query_vport_admin_state(struct mlx5_core_dev
*mdev
, u8 opmod
, u16 vport
)
73 u32 out
[MLX5_ST_SZ_DW(query_vport_state_out
)] = {0};
75 _mlx5_query_vport_state(mdev
, opmod
, vport
, out
, sizeof(out
));
77 return MLX5_GET(query_vport_state_out
, out
, admin_state
);
79 EXPORT_SYMBOL_GPL(mlx5_query_vport_admin_state
);
81 int mlx5_modify_vport_admin_state(struct mlx5_core_dev
*mdev
, u8 opmod
,
84 u32 in
[MLX5_ST_SZ_DW(modify_vport_state_in
)];
85 u32 out
[MLX5_ST_SZ_DW(modify_vport_state_out
)];
88 memset(in
, 0, sizeof(in
));
90 MLX5_SET(modify_vport_state_in
, in
, opcode
,
91 MLX5_CMD_OP_MODIFY_VPORT_STATE
);
92 MLX5_SET(modify_vport_state_in
, in
, op_mod
, opmod
);
93 MLX5_SET(modify_vport_state_in
, in
, vport_number
, vport
);
96 MLX5_SET(modify_vport_state_in
, in
, other_vport
, 1);
98 MLX5_SET(modify_vport_state_in
, in
, admin_state
, state
);
100 err
= mlx5_cmd_exec_check_status(mdev
, in
, sizeof(in
), out
,
103 mlx5_core_warn(mdev
, "MLX5_CMD_OP_MODIFY_VPORT_STATE failed\n");
107 EXPORT_SYMBOL_GPL(mlx5_modify_vport_admin_state
);
109 static int mlx5_query_nic_vport_context(struct mlx5_core_dev
*mdev
, u16 vport
,
110 u32
*out
, int outlen
)
112 u32 in
[MLX5_ST_SZ_DW(query_nic_vport_context_in
)];
114 memset(in
, 0, sizeof(in
));
116 MLX5_SET(query_nic_vport_context_in
, in
, opcode
,
117 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT
);
119 MLX5_SET(query_nic_vport_context_in
, in
, vport_number
, vport
);
121 MLX5_SET(query_nic_vport_context_in
, in
, other_vport
, 1);
123 return mlx5_cmd_exec_check_status(mdev
, in
, sizeof(in
), out
, outlen
);
126 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev
*mdev
, void *in
,
129 u32 out
[MLX5_ST_SZ_DW(modify_nic_vport_context_out
)];
131 MLX5_SET(modify_nic_vport_context_in
, in
, opcode
,
132 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT
);
134 memset(out
, 0, sizeof(out
));
135 return mlx5_cmd_exec_check_status(mdev
, in
, inlen
, out
, sizeof(out
));
138 void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev
*mdev
,
141 u32 out
[MLX5_ST_SZ_DW(query_nic_vport_context_out
)] = {0};
143 mlx5_query_nic_vport_context(mdev
, 0, out
, sizeof(out
));
145 *min_inline_mode
= MLX5_GET(query_nic_vport_context_out
, out
,
146 nic_vport_context
.min_wqe_inline_mode
);
148 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline
);
150 int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev
*mdev
,
154 int outlen
= MLX5_ST_SZ_BYTES(query_nic_vport_context_out
);
158 out
= mlx5_vzalloc(outlen
);
162 out_addr
= MLX5_ADDR_OF(query_nic_vport_context_out
, out
,
163 nic_vport_context
.permanent_address
);
165 err
= mlx5_query_nic_vport_context(mdev
, vport
, out
, outlen
);
167 ether_addr_copy(addr
, &out_addr
[2]);
172 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address
);
174 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev
*mdev
,
178 int inlen
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
);
183 in
= mlx5_vzalloc(inlen
);
185 mlx5_core_warn(mdev
, "failed to allocate inbox\n");
189 MLX5_SET(modify_nic_vport_context_in
, in
,
190 field_select
.permanent_address
, 1);
191 MLX5_SET(modify_nic_vport_context_in
, in
, vport_number
, vport
);
194 MLX5_SET(modify_nic_vport_context_in
, in
, other_vport
, 1);
196 nic_vport_ctx
= MLX5_ADDR_OF(modify_nic_vport_context_in
,
197 in
, nic_vport_context
);
198 perm_mac
= MLX5_ADDR_OF(nic_vport_context
, nic_vport_ctx
,
201 ether_addr_copy(&perm_mac
[2], addr
);
203 err
= mlx5_modify_nic_vport_context(mdev
, in
, inlen
);
209 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address
);
211 int mlx5_query_nic_vport_mtu(struct mlx5_core_dev
*mdev
, u16
*mtu
)
213 int outlen
= MLX5_ST_SZ_BYTES(query_nic_vport_context_out
);
217 out
= mlx5_vzalloc(outlen
);
221 err
= mlx5_query_nic_vport_context(mdev
, 0, out
, outlen
);
223 *mtu
= MLX5_GET(query_nic_vport_context_out
, out
,
224 nic_vport_context
.mtu
);
229 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu
);
231 int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev
*mdev
, u16 mtu
)
233 int inlen
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
);
237 in
= mlx5_vzalloc(inlen
);
241 MLX5_SET(modify_nic_vport_context_in
, in
, field_select
.mtu
, 1);
242 MLX5_SET(modify_nic_vport_context_in
, in
, nic_vport_context
.mtu
, mtu
);
244 err
= mlx5_modify_nic_vport_context(mdev
, in
, inlen
);
249 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu
);
251 int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev
*dev
,
253 enum mlx5_list_type list_type
,
254 u8 addr_list
[][ETH_ALEN
],
257 u32 in
[MLX5_ST_SZ_DW(query_nic_vport_context_in
)];
266 req_list_size
= *list_size
;
268 max_list_size
= list_type
== MLX5_NVPRT_LIST_TYPE_UC
?
269 1 << MLX5_CAP_GEN(dev
, log_max_current_uc_list
) :
270 1 << MLX5_CAP_GEN(dev
, log_max_current_mc_list
);
272 if (req_list_size
> max_list_size
) {
273 mlx5_core_warn(dev
, "Requested list size (%d) > (%d) max_list_size\n",
274 req_list_size
, max_list_size
);
275 req_list_size
= max_list_size
;
278 out_sz
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
) +
279 req_list_size
* MLX5_ST_SZ_BYTES(mac_address_layout
);
281 memset(in
, 0, sizeof(in
));
282 out
= kzalloc(out_sz
, GFP_KERNEL
);
286 MLX5_SET(query_nic_vport_context_in
, in
, opcode
,
287 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT
);
288 MLX5_SET(query_nic_vport_context_in
, in
, allowed_list_type
, list_type
);
289 MLX5_SET(query_nic_vport_context_in
, in
, vport_number
, vport
);
292 MLX5_SET(query_nic_vport_context_in
, in
, other_vport
, 1);
294 err
= mlx5_cmd_exec_check_status(dev
, in
, sizeof(in
), out
, out_sz
);
298 nic_vport_ctx
= MLX5_ADDR_OF(query_nic_vport_context_out
, out
,
300 req_list_size
= MLX5_GET(nic_vport_context
, nic_vport_ctx
,
303 *list_size
= req_list_size
;
304 for (i
= 0; i
< req_list_size
; i
++) {
305 u8
*mac_addr
= MLX5_ADDR_OF(nic_vport_context
,
307 current_uc_mac_address
[i
]) + 2;
308 ether_addr_copy(addr_list
[i
], mac_addr
);
314 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list
);
316 int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev
*dev
,
317 enum mlx5_list_type list_type
,
318 u8 addr_list
[][ETH_ALEN
],
321 u32 out
[MLX5_ST_SZ_DW(modify_nic_vport_context_out
)];
329 max_list_size
= list_type
== MLX5_NVPRT_LIST_TYPE_UC
?
330 1 << MLX5_CAP_GEN(dev
, log_max_current_uc_list
) :
331 1 << MLX5_CAP_GEN(dev
, log_max_current_mc_list
);
333 if (list_size
> max_list_size
)
336 in_sz
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
) +
337 list_size
* MLX5_ST_SZ_BYTES(mac_address_layout
);
339 memset(out
, 0, sizeof(out
));
340 in
= kzalloc(in_sz
, GFP_KERNEL
);
344 MLX5_SET(modify_nic_vport_context_in
, in
, opcode
,
345 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT
);
346 MLX5_SET(modify_nic_vport_context_in
, in
,
347 field_select
.addresses_list
, 1);
349 nic_vport_ctx
= MLX5_ADDR_OF(modify_nic_vport_context_in
, in
,
352 MLX5_SET(nic_vport_context
, nic_vport_ctx
,
353 allowed_list_type
, list_type
);
354 MLX5_SET(nic_vport_context
, nic_vport_ctx
,
355 allowed_list_size
, list_size
);
357 for (i
= 0; i
< list_size
; i
++) {
358 u8
*curr_mac
= MLX5_ADDR_OF(nic_vport_context
,
360 current_uc_mac_address
[i
]) + 2;
361 ether_addr_copy(curr_mac
, addr_list
[i
]);
364 err
= mlx5_cmd_exec_check_status(dev
, in
, in_sz
, out
, sizeof(out
));
368 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list
);
370 int mlx5_query_nic_vport_vlans(struct mlx5_core_dev
*dev
,
375 u32 in
[MLX5_ST_SZ_DW(query_nic_vport_context_in
)];
384 req_list_size
= *size
;
385 max_list_size
= 1 << MLX5_CAP_GEN(dev
, log_max_vlan_list
);
386 if (req_list_size
> max_list_size
) {
387 mlx5_core_warn(dev
, "Requested list size (%d) > (%d) max list size\n",
388 req_list_size
, max_list_size
);
389 req_list_size
= max_list_size
;
392 out_sz
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
) +
393 req_list_size
* MLX5_ST_SZ_BYTES(vlan_layout
);
395 memset(in
, 0, sizeof(in
));
396 out
= kzalloc(out_sz
, GFP_KERNEL
);
400 MLX5_SET(query_nic_vport_context_in
, in
, opcode
,
401 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT
);
402 MLX5_SET(query_nic_vport_context_in
, in
, allowed_list_type
,
403 MLX5_NVPRT_LIST_TYPE_VLAN
);
404 MLX5_SET(query_nic_vport_context_in
, in
, vport_number
, vport
);
407 MLX5_SET(query_nic_vport_context_in
, in
, other_vport
, 1);
409 err
= mlx5_cmd_exec_check_status(dev
, in
, sizeof(in
), out
, out_sz
);
413 nic_vport_ctx
= MLX5_ADDR_OF(query_nic_vport_context_out
, out
,
415 req_list_size
= MLX5_GET(nic_vport_context
, nic_vport_ctx
,
418 *size
= req_list_size
;
419 for (i
= 0; i
< req_list_size
; i
++) {
420 void *vlan_addr
= MLX5_ADDR_OF(nic_vport_context
,
422 current_uc_mac_address
[i
]);
423 vlans
[i
] = MLX5_GET(vlan_layout
, vlan_addr
, vlan
);
429 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans
);
431 int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev
*dev
,
435 u32 out
[MLX5_ST_SZ_DW(modify_nic_vport_context_out
)];
443 max_list_size
= 1 << MLX5_CAP_GEN(dev
, log_max_vlan_list
);
445 if (list_size
> max_list_size
)
448 in_sz
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
) +
449 list_size
* MLX5_ST_SZ_BYTES(vlan_layout
);
451 memset(out
, 0, sizeof(out
));
452 in
= kzalloc(in_sz
, GFP_KERNEL
);
456 MLX5_SET(modify_nic_vport_context_in
, in
, opcode
,
457 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT
);
458 MLX5_SET(modify_nic_vport_context_in
, in
,
459 field_select
.addresses_list
, 1);
461 nic_vport_ctx
= MLX5_ADDR_OF(modify_nic_vport_context_in
, in
,
464 MLX5_SET(nic_vport_context
, nic_vport_ctx
,
465 allowed_list_type
, MLX5_NVPRT_LIST_TYPE_VLAN
);
466 MLX5_SET(nic_vport_context
, nic_vport_ctx
,
467 allowed_list_size
, list_size
);
469 for (i
= 0; i
< list_size
; i
++) {
470 void *vlan_addr
= MLX5_ADDR_OF(nic_vport_context
,
472 current_uc_mac_address
[i
]);
473 MLX5_SET(vlan_layout
, vlan_addr
, vlan
, vlans
[i
]);
476 err
= mlx5_cmd_exec_check_status(dev
, in
, in_sz
, out
, sizeof(out
));
480 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans
);
482 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev
*mdev
,
483 u64
*system_image_guid
)
486 int outlen
= MLX5_ST_SZ_BYTES(query_nic_vport_context_out
);
488 out
= mlx5_vzalloc(outlen
);
492 mlx5_query_nic_vport_context(mdev
, 0, out
, outlen
);
494 *system_image_guid
= MLX5_GET64(query_nic_vport_context_out
, out
,
495 nic_vport_context
.system_image_guid
);
501 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid
);
503 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev
*mdev
, u64
*node_guid
)
506 int outlen
= MLX5_ST_SZ_BYTES(query_nic_vport_context_out
);
508 out
= mlx5_vzalloc(outlen
);
512 mlx5_query_nic_vport_context(mdev
, 0, out
, outlen
);
514 *node_guid
= MLX5_GET64(query_nic_vport_context_out
, out
,
515 nic_vport_context
.node_guid
);
521 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid
);
523 int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev
*mdev
,
524 u32 vport
, u64 node_guid
)
526 int inlen
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
);
527 void *nic_vport_context
;
533 if (!MLX5_CAP_GEN(mdev
, vport_group_manager
))
535 if (!MLX5_CAP_ESW(mdev
, nic_vport_node_guid_modify
))
538 in
= mlx5_vzalloc(inlen
);
542 MLX5_SET(modify_nic_vport_context_in
, in
,
543 field_select
.node_guid
, 1);
544 MLX5_SET(modify_nic_vport_context_in
, in
, vport_number
, vport
);
545 MLX5_SET(modify_nic_vport_context_in
, in
, other_vport
, !!vport
);
547 nic_vport_context
= MLX5_ADDR_OF(modify_nic_vport_context_in
,
548 in
, nic_vport_context
);
549 MLX5_SET64(nic_vport_context
, nic_vport_context
, node_guid
, node_guid
);
551 err
= mlx5_modify_nic_vport_context(mdev
, in
, inlen
);
558 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev
*mdev
,
562 int outlen
= MLX5_ST_SZ_BYTES(query_nic_vport_context_out
);
564 out
= mlx5_vzalloc(outlen
);
568 mlx5_query_nic_vport_context(mdev
, 0, out
, outlen
);
570 *qkey_viol_cntr
= MLX5_GET(query_nic_vport_context_out
, out
,
571 nic_vport_context
.qkey_violation_counter
);
577 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr
);
579 int mlx5_query_hca_vport_gid(struct mlx5_core_dev
*dev
, u8 other_vport
,
580 u8 port_num
, u16 vf_num
, u16 gid_index
,
583 int in_sz
= MLX5_ST_SZ_BYTES(query_hca_vport_gid_in
);
584 int out_sz
= MLX5_ST_SZ_BYTES(query_hca_vport_gid_out
);
585 int is_group_manager
;
593 is_group_manager
= MLX5_CAP_GEN(dev
, vport_group_manager
);
594 tbsz
= mlx5_get_gid_table_len(MLX5_CAP_GEN(dev
, gid_table_size
));
595 mlx5_core_dbg(dev
, "vf_num %d, index %d, gid_table_size %d\n",
596 vf_num
, gid_index
, tbsz
);
598 if (gid_index
> tbsz
&& gid_index
!= 0xffff)
601 if (gid_index
== 0xffff)
606 out_sz
+= nout
* sizeof(*gid
);
608 in
= kzalloc(in_sz
, GFP_KERNEL
);
609 out
= kzalloc(out_sz
, GFP_KERNEL
);
615 MLX5_SET(query_hca_vport_gid_in
, in
, opcode
, MLX5_CMD_OP_QUERY_HCA_VPORT_GID
);
617 if (is_group_manager
) {
618 MLX5_SET(query_hca_vport_gid_in
, in
, vport_number
, vf_num
);
619 MLX5_SET(query_hca_vport_gid_in
, in
, other_vport
, 1);
625 MLX5_SET(query_hca_vport_gid_in
, in
, gid_index
, gid_index
);
627 if (MLX5_CAP_GEN(dev
, num_ports
) == 2)
628 MLX5_SET(query_hca_vport_gid_in
, in
, port_num
, port_num
);
630 err
= mlx5_cmd_exec(dev
, in
, in_sz
, out
, out_sz
);
634 err
= mlx5_cmd_status_to_err_v2(out
);
638 tmp
= out
+ MLX5_ST_SZ_BYTES(query_hca_vport_gid_out
);
639 gid
->global
.subnet_prefix
= tmp
->global
.subnet_prefix
;
640 gid
->global
.interface_id
= tmp
->global
.interface_id
;
647 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid
);
649 int mlx5_query_hca_vport_pkey(struct mlx5_core_dev
*dev
, u8 other_vport
,
650 u8 port_num
, u16 vf_num
, u16 pkey_index
,
653 int in_sz
= MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in
);
654 int out_sz
= MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out
);
655 int is_group_manager
;
664 is_group_manager
= MLX5_CAP_GEN(dev
, vport_group_manager
);
666 tbsz
= mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev
, pkey_table_size
));
667 if (pkey_index
> tbsz
&& pkey_index
!= 0xffff)
670 if (pkey_index
== 0xffff)
675 out_sz
+= nout
* MLX5_ST_SZ_BYTES(pkey
);
677 in
= kzalloc(in_sz
, GFP_KERNEL
);
678 out
= kzalloc(out_sz
, GFP_KERNEL
);
684 MLX5_SET(query_hca_vport_pkey_in
, in
, opcode
, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY
);
686 if (is_group_manager
) {
687 MLX5_SET(query_hca_vport_pkey_in
, in
, vport_number
, vf_num
);
688 MLX5_SET(query_hca_vport_pkey_in
, in
, other_vport
, 1);
694 MLX5_SET(query_hca_vport_pkey_in
, in
, pkey_index
, pkey_index
);
696 if (MLX5_CAP_GEN(dev
, num_ports
) == 2)
697 MLX5_SET(query_hca_vport_pkey_in
, in
, port_num
, port_num
);
699 err
= mlx5_cmd_exec(dev
, in
, in_sz
, out
, out_sz
);
703 err
= mlx5_cmd_status_to_err_v2(out
);
707 pkarr
= MLX5_ADDR_OF(query_hca_vport_pkey_out
, out
, pkey
);
708 for (i
= 0; i
< nout
; i
++, pkey
++, pkarr
+= MLX5_ST_SZ_BYTES(pkey
))
709 *pkey
= MLX5_GET_PR(pkey
, pkarr
, pkey
);
716 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey
);
718 int mlx5_query_hca_vport_context(struct mlx5_core_dev
*dev
,
719 u8 other_vport
, u8 port_num
,
721 struct mlx5_hca_vport_context
*rep
)
723 int out_sz
= MLX5_ST_SZ_BYTES(query_hca_vport_context_out
);
724 int in
[MLX5_ST_SZ_DW(query_hca_vport_context_in
)];
725 int is_group_manager
;
730 is_group_manager
= MLX5_CAP_GEN(dev
, vport_group_manager
);
732 memset(in
, 0, sizeof(in
));
733 out
= kzalloc(out_sz
, GFP_KERNEL
);
737 MLX5_SET(query_hca_vport_context_in
, in
, opcode
, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT
);
740 if (is_group_manager
) {
741 MLX5_SET(query_hca_vport_context_in
, in
, other_vport
, 1);
742 MLX5_SET(query_hca_vport_context_in
, in
, vport_number
, vf_num
);
749 if (MLX5_CAP_GEN(dev
, num_ports
) == 2)
750 MLX5_SET(query_hca_vport_context_in
, in
, port_num
, port_num
);
752 err
= mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, out_sz
);
755 err
= mlx5_cmd_status_to_err_v2(out
);
759 ctx
= MLX5_ADDR_OF(query_hca_vport_context_out
, out
, hca_vport_context
);
760 rep
->field_select
= MLX5_GET_PR(hca_vport_context
, ctx
, field_select
);
761 rep
->sm_virt_aware
= MLX5_GET_PR(hca_vport_context
, ctx
, sm_virt_aware
);
762 rep
->has_smi
= MLX5_GET_PR(hca_vport_context
, ctx
, has_smi
);
763 rep
->has_raw
= MLX5_GET_PR(hca_vport_context
, ctx
, has_raw
);
764 rep
->policy
= MLX5_GET_PR(hca_vport_context
, ctx
, vport_state_policy
);
765 rep
->phys_state
= MLX5_GET_PR(hca_vport_context
, ctx
,
766 port_physical_state
);
767 rep
->vport_state
= MLX5_GET_PR(hca_vport_context
, ctx
, vport_state
);
768 rep
->port_physical_state
= MLX5_GET_PR(hca_vport_context
, ctx
,
769 port_physical_state
);
770 rep
->port_guid
= MLX5_GET64_PR(hca_vport_context
, ctx
, port_guid
);
771 rep
->node_guid
= MLX5_GET64_PR(hca_vport_context
, ctx
, node_guid
);
772 rep
->cap_mask1
= MLX5_GET_PR(hca_vport_context
, ctx
, cap_mask1
);
773 rep
->cap_mask1_perm
= MLX5_GET_PR(hca_vport_context
, ctx
,
774 cap_mask1_field_select
);
775 rep
->cap_mask2
= MLX5_GET_PR(hca_vport_context
, ctx
, cap_mask2
);
776 rep
->cap_mask2_perm
= MLX5_GET_PR(hca_vport_context
, ctx
,
777 cap_mask2_field_select
);
778 rep
->lid
= MLX5_GET_PR(hca_vport_context
, ctx
, lid
);
779 rep
->init_type_reply
= MLX5_GET_PR(hca_vport_context
, ctx
,
781 rep
->lmc
= MLX5_GET_PR(hca_vport_context
, ctx
, lmc
);
782 rep
->subnet_timeout
= MLX5_GET_PR(hca_vport_context
, ctx
,
784 rep
->sm_lid
= MLX5_GET_PR(hca_vport_context
, ctx
, sm_lid
);
785 rep
->sm_sl
= MLX5_GET_PR(hca_vport_context
, ctx
, sm_sl
);
786 rep
->qkey_violation_counter
= MLX5_GET_PR(hca_vport_context
, ctx
,
787 qkey_violation_counter
);
788 rep
->pkey_violation_counter
= MLX5_GET_PR(hca_vport_context
, ctx
,
789 pkey_violation_counter
);
790 rep
->grh_required
= MLX5_GET_PR(hca_vport_context
, ctx
, grh_required
);
791 rep
->sys_image_guid
= MLX5_GET64_PR(hca_vport_context
, ctx
,
798 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context
);
800 int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev
*dev
,
803 struct mlx5_hca_vport_context
*rep
;
806 rep
= kzalloc(sizeof(*rep
), GFP_KERNEL
);
810 err
= mlx5_query_hca_vport_context(dev
, 0, 1, 0, rep
);
812 *sys_image_guid
= rep
->sys_image_guid
;
817 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid
);
819 int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev
*dev
,
822 struct mlx5_hca_vport_context
*rep
;
825 rep
= kzalloc(sizeof(*rep
), GFP_KERNEL
);
829 err
= mlx5_query_hca_vport_context(dev
, 0, 1, 0, rep
);
831 *node_guid
= rep
->node_guid
;
836 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid
);
838 int mlx5_query_nic_vport_promisc(struct mlx5_core_dev
*mdev
,
845 int outlen
= MLX5_ST_SZ_BYTES(query_nic_vport_context_out
);
848 out
= kzalloc(outlen
, GFP_KERNEL
);
852 err
= mlx5_query_nic_vport_context(mdev
, vport
, out
, outlen
);
856 *promisc_uc
= MLX5_GET(query_nic_vport_context_out
, out
,
857 nic_vport_context
.promisc_uc
);
858 *promisc_mc
= MLX5_GET(query_nic_vport_context_out
, out
,
859 nic_vport_context
.promisc_mc
);
860 *promisc_all
= MLX5_GET(query_nic_vport_context_out
, out
,
861 nic_vport_context
.promisc_all
);
867 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc
);
869 int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev
*mdev
,
875 int inlen
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
);
878 in
= mlx5_vzalloc(inlen
);
880 mlx5_core_err(mdev
, "failed to allocate inbox\n");
884 MLX5_SET(modify_nic_vport_context_in
, in
, field_select
.promisc
, 1);
885 MLX5_SET(modify_nic_vport_context_in
, in
,
886 nic_vport_context
.promisc_uc
, promisc_uc
);
887 MLX5_SET(modify_nic_vport_context_in
, in
,
888 nic_vport_context
.promisc_mc
, promisc_mc
);
889 MLX5_SET(modify_nic_vport_context_in
, in
,
890 nic_vport_context
.promisc_all
, promisc_all
);
892 err
= mlx5_modify_nic_vport_context(mdev
, in
, inlen
);
898 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc
);
900 enum mlx5_vport_roce_state
{
901 MLX5_VPORT_ROCE_DISABLED
= 0,
902 MLX5_VPORT_ROCE_ENABLED
= 1,
905 static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev
*mdev
,
906 enum mlx5_vport_roce_state state
)
909 int inlen
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
);
912 in
= mlx5_vzalloc(inlen
);
914 mlx5_core_warn(mdev
, "failed to allocate inbox\n");
918 MLX5_SET(modify_nic_vport_context_in
, in
, field_select
.roce_en
, 1);
919 MLX5_SET(modify_nic_vport_context_in
, in
, nic_vport_context
.roce_en
,
922 err
= mlx5_modify_nic_vport_context(mdev
, in
, inlen
);
929 int mlx5_nic_vport_enable_roce(struct mlx5_core_dev
*mdev
)
931 return mlx5_nic_vport_update_roce_state(mdev
, MLX5_VPORT_ROCE_ENABLED
);
933 EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce
);
935 int mlx5_nic_vport_disable_roce(struct mlx5_core_dev
*mdev
)
937 return mlx5_nic_vport_update_roce_state(mdev
, MLX5_VPORT_ROCE_DISABLED
);
939 EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce
);
941 int mlx5_core_query_vport_counter(struct mlx5_core_dev
*dev
, u8 other_vport
,
942 int vf
, u8 port_num
, void *out
,
945 int in_sz
= MLX5_ST_SZ_BYTES(query_vport_counter_in
);
946 int is_group_manager
;
950 is_group_manager
= MLX5_CAP_GEN(dev
, vport_group_manager
);
951 in
= mlx5_vzalloc(in_sz
);
957 MLX5_SET(query_vport_counter_in
, in
, opcode
,
958 MLX5_CMD_OP_QUERY_VPORT_COUNTER
);
960 if (is_group_manager
) {
961 MLX5_SET(query_vport_counter_in
, in
, other_vport
, 1);
962 MLX5_SET(query_vport_counter_in
, in
, vport_number
, vf
+ 1);
968 if (MLX5_CAP_GEN(dev
, num_ports
) == 2)
969 MLX5_SET(query_vport_counter_in
, in
, port_num
, port_num
);
971 err
= mlx5_cmd_exec(dev
, in
, in_sz
, out
, out_sz
);
974 err
= mlx5_cmd_status_to_err_v2(out
);
980 EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter
);
982 int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev
*dev
,
983 u8 other_vport
, u8 port_num
,
985 struct mlx5_hca_vport_context
*req
)
987 int in_sz
= MLX5_ST_SZ_BYTES(modify_hca_vport_context_in
);
988 u8 out
[MLX5_ST_SZ_BYTES(modify_hca_vport_context_out
)];
989 int is_group_manager
;
994 mlx5_core_dbg(dev
, "vf %d\n", vf
);
995 is_group_manager
= MLX5_CAP_GEN(dev
, vport_group_manager
);
996 in
= kzalloc(in_sz
, GFP_KERNEL
);
1000 memset(out
, 0, sizeof(out
));
1001 MLX5_SET(modify_hca_vport_context_in
, in
, opcode
, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT
);
1003 if (is_group_manager
) {
1004 MLX5_SET(modify_hca_vport_context_in
, in
, other_vport
, 1);
1005 MLX5_SET(modify_hca_vport_context_in
, in
, vport_number
, vf
);
1012 if (MLX5_CAP_GEN(dev
, num_ports
) > 1)
1013 MLX5_SET(modify_hca_vport_context_in
, in
, port_num
, port_num
);
1015 ctx
= MLX5_ADDR_OF(modify_hca_vport_context_in
, in
, hca_vport_context
);
1016 MLX5_SET(hca_vport_context
, ctx
, field_select
, req
->field_select
);
1017 MLX5_SET(hca_vport_context
, ctx
, sm_virt_aware
, req
->sm_virt_aware
);
1018 MLX5_SET(hca_vport_context
, ctx
, has_smi
, req
->has_smi
);
1019 MLX5_SET(hca_vport_context
, ctx
, has_raw
, req
->has_raw
);
1020 MLX5_SET(hca_vport_context
, ctx
, vport_state_policy
, req
->policy
);
1021 MLX5_SET(hca_vport_context
, ctx
, port_physical_state
, req
->phys_state
);
1022 MLX5_SET(hca_vport_context
, ctx
, vport_state
, req
->vport_state
);
1023 MLX5_SET64(hca_vport_context
, ctx
, port_guid
, req
->port_guid
);
1024 MLX5_SET64(hca_vport_context
, ctx
, node_guid
, req
->node_guid
);
1025 MLX5_SET(hca_vport_context
, ctx
, cap_mask1
, req
->cap_mask1
);
1026 MLX5_SET(hca_vport_context
, ctx
, cap_mask1_field_select
, req
->cap_mask1_perm
);
1027 MLX5_SET(hca_vport_context
, ctx
, cap_mask2
, req
->cap_mask2
);
1028 MLX5_SET(hca_vport_context
, ctx
, cap_mask2_field_select
, req
->cap_mask2_perm
);
1029 MLX5_SET(hca_vport_context
, ctx
, lid
, req
->lid
);
1030 MLX5_SET(hca_vport_context
, ctx
, init_type_reply
, req
->init_type_reply
);
1031 MLX5_SET(hca_vport_context
, ctx
, lmc
, req
->lmc
);
1032 MLX5_SET(hca_vport_context
, ctx
, subnet_timeout
, req
->subnet_timeout
);
1033 MLX5_SET(hca_vport_context
, ctx
, sm_lid
, req
->sm_lid
);
1034 MLX5_SET(hca_vport_context
, ctx
, sm_sl
, req
->sm_sl
);
1035 MLX5_SET(hca_vport_context
, ctx
, qkey_violation_counter
, req
->qkey_violation_counter
);
1036 MLX5_SET(hca_vport_context
, ctx
, pkey_violation_counter
, req
->pkey_violation_counter
);
1037 err
= mlx5_cmd_exec(dev
, in
, in_sz
, out
, sizeof(out
));
1041 err
= mlx5_cmd_status_to_err_v2(out
);
1047 EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context
);