2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/export.h>
34 #include <linux/etherdevice.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/vport.h>
37 #include "mlx5_core.h"
39 static int _mlx5_query_vport_state(struct mlx5_core_dev
*mdev
, u8 opmod
,
40 u16 vport
, u32
*out
, int outlen
)
43 u32 in
[MLX5_ST_SZ_DW(query_vport_state_in
)];
45 memset(in
, 0, sizeof(in
));
47 MLX5_SET(query_vport_state_in
, in
, opcode
,
48 MLX5_CMD_OP_QUERY_VPORT_STATE
);
49 MLX5_SET(query_vport_state_in
, in
, op_mod
, opmod
);
50 MLX5_SET(query_vport_state_in
, in
, vport_number
, vport
);
52 MLX5_SET(query_vport_state_in
, in
, other_vport
, 1);
54 err
= mlx5_cmd_exec_check_status(mdev
, in
, sizeof(in
), out
, outlen
);
56 mlx5_core_warn(mdev
, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
61 u8
mlx5_query_vport_state(struct mlx5_core_dev
*mdev
, u8 opmod
, u16 vport
)
63 u32 out
[MLX5_ST_SZ_DW(query_vport_state_out
)] = {0};
65 _mlx5_query_vport_state(mdev
, opmod
, vport
, out
, sizeof(out
));
67 return MLX5_GET(query_vport_state_out
, out
, state
);
69 EXPORT_SYMBOL_GPL(mlx5_query_vport_state
);
71 u8
mlx5_query_vport_admin_state(struct mlx5_core_dev
*mdev
, u8 opmod
, u16 vport
)
73 u32 out
[MLX5_ST_SZ_DW(query_vport_state_out
)] = {0};
75 _mlx5_query_vport_state(mdev
, opmod
, vport
, out
, sizeof(out
));
77 return MLX5_GET(query_vport_state_out
, out
, admin_state
);
79 EXPORT_SYMBOL(mlx5_query_vport_admin_state
);
81 int mlx5_modify_vport_admin_state(struct mlx5_core_dev
*mdev
, u8 opmod
,
84 u32 in
[MLX5_ST_SZ_DW(modify_vport_state_in
)];
85 u32 out
[MLX5_ST_SZ_DW(modify_vport_state_out
)];
88 memset(in
, 0, sizeof(in
));
90 MLX5_SET(modify_vport_state_in
, in
, opcode
,
91 MLX5_CMD_OP_MODIFY_VPORT_STATE
);
92 MLX5_SET(modify_vport_state_in
, in
, op_mod
, opmod
);
93 MLX5_SET(modify_vport_state_in
, in
, vport_number
, vport
);
96 MLX5_SET(modify_vport_state_in
, in
, other_vport
, 1);
98 MLX5_SET(modify_vport_state_in
, in
, admin_state
, state
);
100 err
= mlx5_cmd_exec_check_status(mdev
, in
, sizeof(in
), out
,
103 mlx5_core_warn(mdev
, "MLX5_CMD_OP_MODIFY_VPORT_STATE failed\n");
107 EXPORT_SYMBOL(mlx5_modify_vport_admin_state
);
109 static int mlx5_query_nic_vport_context(struct mlx5_core_dev
*mdev
, u16 vport
,
110 u32
*out
, int outlen
)
112 u32 in
[MLX5_ST_SZ_DW(query_nic_vport_context_in
)];
114 memset(in
, 0, sizeof(in
));
116 MLX5_SET(query_nic_vport_context_in
, in
, opcode
,
117 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT
);
119 MLX5_SET(query_nic_vport_context_in
, in
, vport_number
, vport
);
121 MLX5_SET(query_nic_vport_context_in
, in
, other_vport
, 1);
123 return mlx5_cmd_exec_check_status(mdev
, in
, sizeof(in
), out
, outlen
);
126 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev
*mdev
, void *in
,
129 u32 out
[MLX5_ST_SZ_DW(modify_nic_vport_context_out
)];
131 MLX5_SET(modify_nic_vport_context_in
, in
, opcode
,
132 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT
);
134 memset(out
, 0, sizeof(out
));
135 return mlx5_cmd_exec_check_status(mdev
, in
, inlen
, out
, sizeof(out
));
138 int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev
*mdev
,
142 int outlen
= MLX5_ST_SZ_BYTES(query_nic_vport_context_out
);
146 out
= mlx5_vzalloc(outlen
);
150 out_addr
= MLX5_ADDR_OF(query_nic_vport_context_out
, out
,
151 nic_vport_context
.permanent_address
);
153 err
= mlx5_query_nic_vport_context(mdev
, vport
, out
, outlen
);
157 ether_addr_copy(addr
, &out_addr
[2]);
163 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address
);
165 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev
*mdev
,
169 int inlen
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
);
174 in
= mlx5_vzalloc(inlen
);
176 mlx5_core_warn(mdev
, "failed to allocate inbox\n");
180 MLX5_SET(modify_nic_vport_context_in
, in
,
181 field_select
.permanent_address
, 1);
182 MLX5_SET(modify_nic_vport_context_in
, in
, vport_number
, vport
);
185 MLX5_SET(modify_nic_vport_context_in
, in
, other_vport
, 1);
187 nic_vport_ctx
= MLX5_ADDR_OF(modify_nic_vport_context_in
,
188 in
, nic_vport_context
);
189 perm_mac
= MLX5_ADDR_OF(nic_vport_context
, nic_vport_ctx
,
192 ether_addr_copy(&perm_mac
[2], addr
);
194 err
= mlx5_modify_nic_vport_context(mdev
, in
, inlen
);
200 EXPORT_SYMBOL(mlx5_modify_nic_vport_mac_address
);
202 int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev
*dev
,
204 enum mlx5_list_type list_type
,
205 u8 addr_list
[][ETH_ALEN
],
208 u32 in
[MLX5_ST_SZ_DW(query_nic_vport_context_in
)];
217 req_list_size
= *list_size
;
219 max_list_size
= list_type
== MLX5_NVPRT_LIST_TYPE_UC
?
220 1 << MLX5_CAP_GEN(dev
, log_max_current_uc_list
) :
221 1 << MLX5_CAP_GEN(dev
, log_max_current_mc_list
);
223 if (req_list_size
> max_list_size
) {
224 mlx5_core_warn(dev
, "Requested list size (%d) > (%d) max_list_size\n",
225 req_list_size
, max_list_size
);
226 req_list_size
= max_list_size
;
229 out_sz
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
) +
230 req_list_size
* MLX5_ST_SZ_BYTES(mac_address_layout
);
232 memset(in
, 0, sizeof(in
));
233 out
= kzalloc(out_sz
, GFP_KERNEL
);
237 MLX5_SET(query_nic_vport_context_in
, in
, opcode
,
238 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT
);
239 MLX5_SET(query_nic_vport_context_in
, in
, allowed_list_type
, list_type
);
240 MLX5_SET(query_nic_vport_context_in
, in
, vport_number
, vport
);
243 MLX5_SET(query_nic_vport_context_in
, in
, other_vport
, 1);
245 err
= mlx5_cmd_exec_check_status(dev
, in
, sizeof(in
), out
, out_sz
);
249 nic_vport_ctx
= MLX5_ADDR_OF(query_nic_vport_context_out
, out
,
251 req_list_size
= MLX5_GET(nic_vport_context
, nic_vport_ctx
,
254 *list_size
= req_list_size
;
255 for (i
= 0; i
< req_list_size
; i
++) {
256 u8
*mac_addr
= MLX5_ADDR_OF(nic_vport_context
,
258 current_uc_mac_address
[i
]) + 2;
259 ether_addr_copy(addr_list
[i
], mac_addr
);
265 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list
);
267 int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev
*dev
,
268 enum mlx5_list_type list_type
,
269 u8 addr_list
[][ETH_ALEN
],
272 u32 out
[MLX5_ST_SZ_DW(modify_nic_vport_context_out
)];
280 max_list_size
= list_type
== MLX5_NVPRT_LIST_TYPE_UC
?
281 1 << MLX5_CAP_GEN(dev
, log_max_current_uc_list
) :
282 1 << MLX5_CAP_GEN(dev
, log_max_current_mc_list
);
284 if (list_size
> max_list_size
)
287 in_sz
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
) +
288 list_size
* MLX5_ST_SZ_BYTES(mac_address_layout
);
290 memset(out
, 0, sizeof(out
));
291 in
= kzalloc(in_sz
, GFP_KERNEL
);
295 MLX5_SET(modify_nic_vport_context_in
, in
, opcode
,
296 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT
);
297 MLX5_SET(modify_nic_vport_context_in
, in
,
298 field_select
.addresses_list
, 1);
300 nic_vport_ctx
= MLX5_ADDR_OF(modify_nic_vport_context_in
, in
,
303 MLX5_SET(nic_vport_context
, nic_vport_ctx
,
304 allowed_list_type
, list_type
);
305 MLX5_SET(nic_vport_context
, nic_vport_ctx
,
306 allowed_list_size
, list_size
);
308 for (i
= 0; i
< list_size
; i
++) {
309 u8
*curr_mac
= MLX5_ADDR_OF(nic_vport_context
,
311 current_uc_mac_address
[i
]) + 2;
312 ether_addr_copy(curr_mac
, addr_list
[i
]);
315 err
= mlx5_cmd_exec_check_status(dev
, in
, in_sz
, out
, sizeof(out
));
319 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list
);
321 int mlx5_query_nic_vport_vlans(struct mlx5_core_dev
*dev
,
326 u32 in
[MLX5_ST_SZ_DW(query_nic_vport_context_in
)];
335 req_list_size
= *size
;
336 max_list_size
= 1 << MLX5_CAP_GEN(dev
, log_max_vlan_list
);
337 if (req_list_size
> max_list_size
) {
338 mlx5_core_warn(dev
, "Requested list size (%d) > (%d) max list size\n",
339 req_list_size
, max_list_size
);
340 req_list_size
= max_list_size
;
343 out_sz
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
) +
344 req_list_size
* MLX5_ST_SZ_BYTES(vlan_layout
);
346 memset(in
, 0, sizeof(in
));
347 out
= kzalloc(out_sz
, GFP_KERNEL
);
351 MLX5_SET(query_nic_vport_context_in
, in
, opcode
,
352 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT
);
353 MLX5_SET(query_nic_vport_context_in
, in
, allowed_list_type
,
354 MLX5_NVPRT_LIST_TYPE_VLAN
);
355 MLX5_SET(query_nic_vport_context_in
, in
, vport_number
, vport
);
358 MLX5_SET(query_nic_vport_context_in
, in
, other_vport
, 1);
360 err
= mlx5_cmd_exec_check_status(dev
, in
, sizeof(in
), out
, out_sz
);
364 nic_vport_ctx
= MLX5_ADDR_OF(query_nic_vport_context_out
, out
,
366 req_list_size
= MLX5_GET(nic_vport_context
, nic_vport_ctx
,
369 *size
= req_list_size
;
370 for (i
= 0; i
< req_list_size
; i
++) {
371 void *vlan_addr
= MLX5_ADDR_OF(nic_vport_context
,
373 current_uc_mac_address
[i
]);
374 vlans
[i
] = MLX5_GET(vlan_layout
, vlan_addr
, vlan
);
380 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans
);
382 int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev
*dev
,
386 u32 out
[MLX5_ST_SZ_DW(modify_nic_vport_context_out
)];
394 max_list_size
= 1 << MLX5_CAP_GEN(dev
, log_max_vlan_list
);
396 if (list_size
> max_list_size
)
399 in_sz
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
) +
400 list_size
* MLX5_ST_SZ_BYTES(vlan_layout
);
402 memset(out
, 0, sizeof(out
));
403 in
= kzalloc(in_sz
, GFP_KERNEL
);
407 MLX5_SET(modify_nic_vport_context_in
, in
, opcode
,
408 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT
);
409 MLX5_SET(modify_nic_vport_context_in
, in
,
410 field_select
.addresses_list
, 1);
412 nic_vport_ctx
= MLX5_ADDR_OF(modify_nic_vport_context_in
, in
,
415 MLX5_SET(nic_vport_context
, nic_vport_ctx
,
416 allowed_list_type
, MLX5_NVPRT_LIST_TYPE_VLAN
);
417 MLX5_SET(nic_vport_context
, nic_vport_ctx
,
418 allowed_list_size
, list_size
);
420 for (i
= 0; i
< list_size
; i
++) {
421 void *vlan_addr
= MLX5_ADDR_OF(nic_vport_context
,
423 current_uc_mac_address
[i
]);
424 MLX5_SET(vlan_layout
, vlan_addr
, vlan
, vlans
[i
]);
427 err
= mlx5_cmd_exec_check_status(dev
, in
, in_sz
, out
, sizeof(out
));
431 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans
);
433 int mlx5_query_hca_vport_gid(struct mlx5_core_dev
*dev
, u8 other_vport
,
434 u8 port_num
, u16 vf_num
, u16 gid_index
,
437 int in_sz
= MLX5_ST_SZ_BYTES(query_hca_vport_gid_in
);
438 int out_sz
= MLX5_ST_SZ_BYTES(query_hca_vport_gid_out
);
439 int is_group_manager
;
447 is_group_manager
= MLX5_CAP_GEN(dev
, vport_group_manager
);
448 tbsz
= mlx5_get_gid_table_len(MLX5_CAP_GEN(dev
, gid_table_size
));
449 mlx5_core_dbg(dev
, "vf_num %d, index %d, gid_table_size %d\n",
450 vf_num
, gid_index
, tbsz
);
452 if (gid_index
> tbsz
&& gid_index
!= 0xffff)
455 if (gid_index
== 0xffff)
460 out_sz
+= nout
* sizeof(*gid
);
462 in
= kzalloc(in_sz
, GFP_KERNEL
);
463 out
= kzalloc(out_sz
, GFP_KERNEL
);
469 MLX5_SET(query_hca_vport_gid_in
, in
, opcode
, MLX5_CMD_OP_QUERY_HCA_VPORT_GID
);
471 if (is_group_manager
) {
472 MLX5_SET(query_hca_vport_gid_in
, in
, vport_number
, vf_num
);
473 MLX5_SET(query_hca_vport_gid_in
, in
, other_vport
, 1);
479 MLX5_SET(query_hca_vport_gid_in
, in
, gid_index
, gid_index
);
481 if (MLX5_CAP_GEN(dev
, num_ports
) == 2)
482 MLX5_SET(query_hca_vport_gid_in
, in
, port_num
, port_num
);
484 err
= mlx5_cmd_exec(dev
, in
, in_sz
, out
, out_sz
);
488 err
= mlx5_cmd_status_to_err_v2(out
);
492 tmp
= out
+ MLX5_ST_SZ_BYTES(query_hca_vport_gid_out
);
493 gid
->global
.subnet_prefix
= tmp
->global
.subnet_prefix
;
494 gid
->global
.interface_id
= tmp
->global
.interface_id
;
501 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid
);
503 int mlx5_query_hca_vport_pkey(struct mlx5_core_dev
*dev
, u8 other_vport
,
504 u8 port_num
, u16 vf_num
, u16 pkey_index
,
507 int in_sz
= MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in
);
508 int out_sz
= MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out
);
509 int is_group_manager
;
518 is_group_manager
= MLX5_CAP_GEN(dev
, vport_group_manager
);
520 tbsz
= mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev
, pkey_table_size
));
521 if (pkey_index
> tbsz
&& pkey_index
!= 0xffff)
524 if (pkey_index
== 0xffff)
529 out_sz
+= nout
* MLX5_ST_SZ_BYTES(pkey
);
531 in
= kzalloc(in_sz
, GFP_KERNEL
);
532 out
= kzalloc(out_sz
, GFP_KERNEL
);
538 MLX5_SET(query_hca_vport_pkey_in
, in
, opcode
, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY
);
540 if (is_group_manager
) {
541 MLX5_SET(query_hca_vport_pkey_in
, in
, vport_number
, vf_num
);
542 MLX5_SET(query_hca_vport_pkey_in
, in
, other_vport
, 1);
548 MLX5_SET(query_hca_vport_pkey_in
, in
, pkey_index
, pkey_index
);
550 if (MLX5_CAP_GEN(dev
, num_ports
) == 2)
551 MLX5_SET(query_hca_vport_pkey_in
, in
, port_num
, port_num
);
553 err
= mlx5_cmd_exec(dev
, in
, in_sz
, out
, out_sz
);
557 err
= mlx5_cmd_status_to_err_v2(out
);
561 pkarr
= MLX5_ADDR_OF(query_hca_vport_pkey_out
, out
, pkey
);
562 for (i
= 0; i
< nout
; i
++, pkey
++, pkarr
+= MLX5_ST_SZ_BYTES(pkey
))
563 *pkey
= MLX5_GET_PR(pkey
, pkarr
, pkey
);
570 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey
);
572 int mlx5_query_hca_vport_context(struct mlx5_core_dev
*dev
,
573 u8 other_vport
, u8 port_num
,
575 struct mlx5_hca_vport_context
*rep
)
577 int out_sz
= MLX5_ST_SZ_BYTES(query_hca_vport_context_out
);
578 int in
[MLX5_ST_SZ_DW(query_hca_vport_context_in
)];
579 int is_group_manager
;
584 is_group_manager
= MLX5_CAP_GEN(dev
, vport_group_manager
);
586 memset(in
, 0, sizeof(in
));
587 out
= kzalloc(out_sz
, GFP_KERNEL
);
591 MLX5_SET(query_hca_vport_context_in
, in
, opcode
, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT
);
594 if (is_group_manager
) {
595 MLX5_SET(query_hca_vport_context_in
, in
, other_vport
, 1);
596 MLX5_SET(query_hca_vport_context_in
, in
, vport_number
, vf_num
);
603 if (MLX5_CAP_GEN(dev
, num_ports
) == 2)
604 MLX5_SET(query_hca_vport_context_in
, in
, port_num
, port_num
);
606 err
= mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, out_sz
);
609 err
= mlx5_cmd_status_to_err_v2(out
);
613 ctx
= MLX5_ADDR_OF(query_hca_vport_context_out
, out
, hca_vport_context
);
614 rep
->field_select
= MLX5_GET_PR(hca_vport_context
, ctx
, field_select
);
615 rep
->sm_virt_aware
= MLX5_GET_PR(hca_vport_context
, ctx
, sm_virt_aware
);
616 rep
->has_smi
= MLX5_GET_PR(hca_vport_context
, ctx
, has_smi
);
617 rep
->has_raw
= MLX5_GET_PR(hca_vport_context
, ctx
, has_raw
);
618 rep
->policy
= MLX5_GET_PR(hca_vport_context
, ctx
, vport_state_policy
);
619 rep
->phys_state
= MLX5_GET_PR(hca_vport_context
, ctx
,
620 port_physical_state
);
621 rep
->vport_state
= MLX5_GET_PR(hca_vport_context
, ctx
, vport_state
);
622 rep
->port_physical_state
= MLX5_GET_PR(hca_vport_context
, ctx
,
623 port_physical_state
);
624 rep
->port_guid
= MLX5_GET64_PR(hca_vport_context
, ctx
, port_guid
);
625 rep
->node_guid
= MLX5_GET64_PR(hca_vport_context
, ctx
, node_guid
);
626 rep
->cap_mask1
= MLX5_GET_PR(hca_vport_context
, ctx
, cap_mask1
);
627 rep
->cap_mask1_perm
= MLX5_GET_PR(hca_vport_context
, ctx
,
628 cap_mask1_field_select
);
629 rep
->cap_mask2
= MLX5_GET_PR(hca_vport_context
, ctx
, cap_mask2
);
630 rep
->cap_mask2_perm
= MLX5_GET_PR(hca_vport_context
, ctx
,
631 cap_mask2_field_select
);
632 rep
->lid
= MLX5_GET_PR(hca_vport_context
, ctx
, lid
);
633 rep
->init_type_reply
= MLX5_GET_PR(hca_vport_context
, ctx
,
635 rep
->lmc
= MLX5_GET_PR(hca_vport_context
, ctx
, lmc
);
636 rep
->subnet_timeout
= MLX5_GET_PR(hca_vport_context
, ctx
,
638 rep
->sm_lid
= MLX5_GET_PR(hca_vport_context
, ctx
, sm_lid
);
639 rep
->sm_sl
= MLX5_GET_PR(hca_vport_context
, ctx
, sm_sl
);
640 rep
->qkey_violation_counter
= MLX5_GET_PR(hca_vport_context
, ctx
,
641 qkey_violation_counter
);
642 rep
->pkey_violation_counter
= MLX5_GET_PR(hca_vport_context
, ctx
,
643 pkey_violation_counter
);
644 rep
->grh_required
= MLX5_GET_PR(hca_vport_context
, ctx
, grh_required
);
645 rep
->sys_image_guid
= MLX5_GET64_PR(hca_vport_context
, ctx
,
652 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context
);
654 int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev
*dev
,
657 struct mlx5_hca_vport_context
*rep
;
660 rep
= kzalloc(sizeof(*rep
), GFP_KERNEL
);
664 err
= mlx5_query_hca_vport_context(dev
, 0, 1, 0, rep
);
666 *sys_image_guid
= rep
->sys_image_guid
;
671 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid
);
673 int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev
*dev
,
676 struct mlx5_hca_vport_context
*rep
;
679 rep
= kzalloc(sizeof(*rep
), GFP_KERNEL
);
683 err
= mlx5_query_hca_vport_context(dev
, 0, 1, 0, rep
);
685 *node_guid
= rep
->node_guid
;
690 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid
);
692 int mlx5_query_nic_vport_promisc(struct mlx5_core_dev
*mdev
,
699 int outlen
= MLX5_ST_SZ_BYTES(query_nic_vport_context_out
);
702 out
= kzalloc(outlen
, GFP_KERNEL
);
706 err
= mlx5_query_nic_vport_context(mdev
, vport
, out
, outlen
);
710 *promisc_uc
= MLX5_GET(query_nic_vport_context_out
, out
,
711 nic_vport_context
.promisc_uc
);
712 *promisc_mc
= MLX5_GET(query_nic_vport_context_out
, out
,
713 nic_vport_context
.promisc_mc
);
714 *promisc_all
= MLX5_GET(query_nic_vport_context_out
, out
,
715 nic_vport_context
.promisc_all
);
721 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc
);
723 int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev
*mdev
,
729 int inlen
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
);
732 in
= mlx5_vzalloc(inlen
);
734 mlx5_core_err(mdev
, "failed to allocate inbox\n");
738 MLX5_SET(modify_nic_vport_context_in
, in
, field_select
.promisc
, 1);
739 MLX5_SET(modify_nic_vport_context_in
, in
,
740 nic_vport_context
.promisc_uc
, promisc_uc
);
741 MLX5_SET(modify_nic_vport_context_in
, in
,
742 nic_vport_context
.promisc_mc
, promisc_mc
);
743 MLX5_SET(modify_nic_vport_context_in
, in
,
744 nic_vport_context
.promisc_all
, promisc_all
);
746 err
= mlx5_modify_nic_vport_context(mdev
, in
, inlen
);
752 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc
);