IB/mlx5: Support IB device's callback for getting its netdev
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / vport.c
CommitLineData
afb736e9
AV
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/export.h>
34#include <linux/etherdevice.h>
35#include <linux/mlx5/driver.h>
d18a9470 36#include <linux/mlx5/vport.h>
afb736e9
AV
37#include "mlx5_core.h"
38
39u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod)
40{
41 u32 in[MLX5_ST_SZ_DW(query_vport_state_in)];
42 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)];
43 int err;
44
45 memset(in, 0, sizeof(in));
46
47 MLX5_SET(query_vport_state_in, in, opcode,
48 MLX5_CMD_OP_QUERY_VPORT_STATE);
49 MLX5_SET(query_vport_state_in, in, op_mod, opmod);
50
51 err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
52 sizeof(out));
53 if (err)
54 mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
55
56 return MLX5_GET(query_vport_state_out, out, state);
57}
d18a9470 58EXPORT_SYMBOL(mlx5_query_vport_state);
afb736e9 59
d18a9470 60void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr)
afb736e9
AV
61{
62 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
63 u32 *out;
64 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
65 u8 *out_addr;
66
67 out = mlx5_vzalloc(outlen);
68 if (!out)
69 return;
70
71 out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
72 nic_vport_context.permanent_address);
73
74 memset(in, 0, sizeof(in));
75
76 MLX5_SET(query_nic_vport_context_in, in, opcode,
77 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
78
79 memset(out, 0, outlen);
80 mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
81
82 ether_addr_copy(addr, &out_addr[2]);
83
84 kvfree(out);
85}
d18a9470 86EXPORT_SYMBOL(mlx5_query_nic_vport_mac_address);
707c4602
MD
87
88int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
89 u8 port_num, u16 vf_num, u16 gid_index,
90 union ib_gid *gid)
91{
92 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
93 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
94 int is_group_manager;
95 void *out = NULL;
96 void *in = NULL;
97 union ib_gid *tmp;
98 int tbsz;
99 int nout;
100 int err;
101
102 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
103 tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
104 mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n",
105 vf_num, gid_index, tbsz);
106
107 if (gid_index > tbsz && gid_index != 0xffff)
108 return -EINVAL;
109
110 if (gid_index == 0xffff)
111 nout = tbsz;
112 else
113 nout = 1;
114
115 out_sz += nout * sizeof(*gid);
116
117 in = kzalloc(in_sz, GFP_KERNEL);
118 out = kzalloc(out_sz, GFP_KERNEL);
119 if (!in || !out) {
120 err = -ENOMEM;
121 goto out;
122 }
123
124 MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
125 if (other_vport) {
126 if (is_group_manager) {
127 MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num);
128 MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
129 } else {
130 err = -EPERM;
131 goto out;
132 }
133 }
134 MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
135
136 if (MLX5_CAP_GEN(dev, num_ports) == 2)
137 MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
138
139 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
140 if (err)
141 goto out;
142
143 err = mlx5_cmd_status_to_err_v2(out);
144 if (err)
145 goto out;
146
147 tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
148 gid->global.subnet_prefix = tmp->global.subnet_prefix;
149 gid->global.interface_id = tmp->global.interface_id;
150
151out:
152 kfree(in);
153 kfree(out);
154 return err;
155}
156EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
157
158int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
159 u8 port_num, u16 vf_num, u16 pkey_index,
160 u16 *pkey)
161{
162 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
163 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
164 int is_group_manager;
165 void *out = NULL;
166 void *in = NULL;
167 void *pkarr;
168 int nout;
169 int tbsz;
170 int err;
171 int i;
172
173 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
174
175 tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
176 if (pkey_index > tbsz && pkey_index != 0xffff)
177 return -EINVAL;
178
179 if (pkey_index == 0xffff)
180 nout = tbsz;
181 else
182 nout = 1;
183
184 out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
185
186 in = kzalloc(in_sz, GFP_KERNEL);
187 out = kzalloc(out_sz, GFP_KERNEL);
188 if (!in || !out) {
189 err = -ENOMEM;
190 goto out;
191 }
192
193 MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
194 if (other_vport) {
195 if (is_group_manager) {
196 MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num);
197 MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
198 } else {
199 err = -EPERM;
200 goto out;
201 }
202 }
203 MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
204
205 if (MLX5_CAP_GEN(dev, num_ports) == 2)
206 MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
207
208 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
209 if (err)
210 goto out;
211
212 err = mlx5_cmd_status_to_err_v2(out);
213 if (err)
214 goto out;
215
216 pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
217 for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey))
218 *pkey = MLX5_GET_PR(pkey, pkarr, pkey);
219
220out:
221 kfree(in);
222 kfree(out);
223 return err;
224}
225EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
226
227int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
228 u8 other_vport, u8 port_num,
229 u16 vf_num,
230 struct mlx5_hca_vport_context *rep)
231{
232 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
233 int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)];
234 int is_group_manager;
235 void *out;
236 void *ctx;
237 int err;
238
239 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
240
241 memset(in, 0, sizeof(in));
242 out = kzalloc(out_sz, GFP_KERNEL);
243 if (!out)
244 return -ENOMEM;
245
246 MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
247
248 if (other_vport) {
249 if (is_group_manager) {
250 MLX5_SET(query_hca_vport_context_in, in, other_vport, 1);
251 MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num);
252 } else {
253 err = -EPERM;
254 goto ex;
255 }
256 }
257
258 if (MLX5_CAP_GEN(dev, num_ports) == 2)
259 MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
260
261 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
262 if (err)
263 goto ex;
264 err = mlx5_cmd_status_to_err_v2(out);
265 if (err)
266 goto ex;
267
268 ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context);
269 rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select);
270 rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware);
271 rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi);
272 rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw);
273 rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy);
274 rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx,
275 port_physical_state);
276 rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state);
277 rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx,
278 port_physical_state);
279 rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid);
280 rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid);
281 rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1);
282 rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx,
283 cap_mask1_field_select);
284 rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2);
285 rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx,
286 cap_mask2_field_select);
287 rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid);
288 rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx,
289 init_type_reply);
290 rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc);
291 rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx,
292 subnet_timeout);
293 rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid);
294 rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl);
295 rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
296 qkey_violation_counter);
297 rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
298 pkey_violation_counter);
299 rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required);
300 rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx,
301 system_image_guid);
302
303ex:
304 kfree(out);
305 return err;
306}
307EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context);
308
309int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
7cf7fa52 310 u64 *sys_image_guid)
707c4602
MD
311{
312 struct mlx5_hca_vport_context *rep;
313 int err;
314
315 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
316 if (!rep)
317 return -ENOMEM;
318
319 err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
320 if (!err)
321 *sys_image_guid = rep->sys_image_guid;
322
323 kfree(rep);
324 return err;
325}
326EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
327
328int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
329 u64 *node_guid)
330{
331 struct mlx5_hca_vport_context *rep;
332 int err;
333
334 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
335 if (!rep)
336 return -ENOMEM;
337
338 err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
339 if (!err)
340 *node_guid = rep->node_guid;
341
342 kfree(rep);
343 return err;
344}
345EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
This page took 0.069544 seconds and 5 git commands to generate.