net/mlx4: New file for QoS related firmware commands
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx4 / port.c
1 /*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/errno.h>
34 #include <linux/if_ether.h>
35 #include <linux/if_vlan.h>
36 #include <linux/export.h>
37
38 #include <linux/mlx4/cmd.h>
39
40 #include "mlx4.h"
41 #include "mlx4_stats.h"
42
43 #define MLX4_MAC_VALID (1ull << 63)
44
45 #define MLX4_VLAN_VALID (1u << 31)
46 #define MLX4_VLAN_MASK 0xfff
47
48 void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
49 {
50 int i;
51
52 mutex_init(&table->mutex);
53 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
54 table->entries[i] = 0;
55 table->refs[i] = 0;
56 }
57 table->max = 1 << dev->caps.log_num_macs;
58 table->total = 0;
59 }
60
61 void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
62 {
63 int i;
64
65 mutex_init(&table->mutex);
66 for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
67 table->entries[i] = 0;
68 table->refs[i] = 0;
69 }
70 table->max = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR;
71 table->total = 0;
72 }
73
74 void mlx4_init_roce_gid_table(struct mlx4_dev *dev,
75 struct mlx4_roce_gid_table *table)
76 {
77 int i;
78
79 mutex_init(&table->mutex);
80 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++)
81 memset(table->roce_gids[i].raw, 0, MLX4_ROCE_GID_ENTRY_SIZE);
82 }
83
84 static int validate_index(struct mlx4_dev *dev,
85 struct mlx4_mac_table *table, int index)
86 {
87 int err = 0;
88
89 if (index < 0 || index >= table->max || !table->entries[index]) {
90 mlx4_warn(dev, "No valid Mac entry for the given index\n");
91 err = -EINVAL;
92 }
93 return err;
94 }
95
96 static int find_index(struct mlx4_dev *dev,
97 struct mlx4_mac_table *table, u64 mac)
98 {
99 int i;
100
101 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
102 if (table->refs[i] &&
103 (MLX4_MAC_MASK & mac) ==
104 (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
105 return i;
106 }
107 /* Mac not found */
108 return -EINVAL;
109 }
110
111 static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
112 __be64 *entries)
113 {
114 struct mlx4_cmd_mailbox *mailbox;
115 u32 in_mod;
116 int err;
117
118 mailbox = mlx4_alloc_cmd_mailbox(dev);
119 if (IS_ERR(mailbox))
120 return PTR_ERR(mailbox);
121
122 memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
123
124 in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
125
126 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
127 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
128
129 mlx4_free_cmd_mailbox(dev, mailbox);
130 return err;
131 }
132
133 int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx)
134 {
135 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
136 struct mlx4_mac_table *table = &info->mac_table;
137 int i;
138
139 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
140 if (!table->refs[i])
141 continue;
142
143 if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
144 *idx = i;
145 return 0;
146 }
147 }
148
149 return -ENOENT;
150 }
151 EXPORT_SYMBOL_GPL(mlx4_find_cached_mac);
152
153 int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
154 {
155 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
156 struct mlx4_mac_table *table = &info->mac_table;
157 int i, err = 0;
158 int free = -1;
159
160 mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d\n",
161 (unsigned long long) mac, port);
162
163 mutex_lock(&table->mutex);
164 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
165 if (!table->refs[i]) {
166 if (free < 0)
167 free = i;
168 continue;
169 }
170
171 if ((MLX4_MAC_MASK & mac) ==
172 (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
173 /* MAC already registered, increment ref count */
174 err = i;
175 ++table->refs[i];
176 goto out;
177 }
178 }
179
180 mlx4_dbg(dev, "Free MAC index is %d\n", free);
181
182 if (table->total == table->max) {
183 /* No free mac entries */
184 err = -ENOSPC;
185 goto out;
186 }
187
188 /* Register new MAC */
189 table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
190
191 err = mlx4_set_port_mac_table(dev, port, table->entries);
192 if (unlikely(err)) {
193 mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
194 (unsigned long long) mac);
195 table->entries[free] = 0;
196 goto out;
197 }
198 table->refs[free] = 1;
199 err = free;
200 ++table->total;
201 out:
202 mutex_unlock(&table->mutex);
203 return err;
204 }
205 EXPORT_SYMBOL_GPL(__mlx4_register_mac);
206
207 int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
208 {
209 u64 out_param = 0;
210 int err = -EINVAL;
211
212 if (mlx4_is_mfunc(dev)) {
213 if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
214 err = mlx4_cmd_imm(dev, mac, &out_param,
215 ((u32) port) << 8 | (u32) RES_MAC,
216 RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
217 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
218 }
219 if (err && err == -EINVAL && mlx4_is_slave(dev)) {
220 /* retry using old REG_MAC format */
221 set_param_l(&out_param, port);
222 err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
223 RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
224 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
225 if (!err)
226 dev->flags |= MLX4_FLAG_OLD_REG_MAC;
227 }
228 if (err)
229 return err;
230
231 return get_param_l(&out_param);
232 }
233 return __mlx4_register_mac(dev, port, mac);
234 }
235 EXPORT_SYMBOL_GPL(mlx4_register_mac);
236
237 int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port)
238 {
239 return dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
240 (port - 1) * (1 << dev->caps.log_num_macs);
241 }
242 EXPORT_SYMBOL_GPL(mlx4_get_base_qpn);
243
244 void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
245 {
246 struct mlx4_port_info *info;
247 struct mlx4_mac_table *table;
248 int index;
249
250 if (port < 1 || port > dev->caps.num_ports) {
251 mlx4_warn(dev, "invalid port number (%d), aborting...\n", port);
252 return;
253 }
254 info = &mlx4_priv(dev)->port[port];
255 table = &info->mac_table;
256 mutex_lock(&table->mutex);
257 index = find_index(dev, table, mac);
258
259 if (validate_index(dev, table, index))
260 goto out;
261 if (--table->refs[index]) {
262 mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n",
263 index);
264 goto out;
265 }
266
267 table->entries[index] = 0;
268 mlx4_set_port_mac_table(dev, port, table->entries);
269 --table->total;
270 out:
271 mutex_unlock(&table->mutex);
272 }
273 EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
274
275 void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
276 {
277 u64 out_param = 0;
278
279 if (mlx4_is_mfunc(dev)) {
280 if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
281 (void) mlx4_cmd_imm(dev, mac, &out_param,
282 ((u32) port) << 8 | (u32) RES_MAC,
283 RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
284 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
285 } else {
286 /* use old unregister mac format */
287 set_param_l(&out_param, port);
288 (void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
289 RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
290 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
291 }
292 return;
293 }
294 __mlx4_unregister_mac(dev, port, mac);
295 return;
296 }
297 EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
298
299 int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
300 {
301 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
302 struct mlx4_mac_table *table = &info->mac_table;
303 int index = qpn - info->base_qpn;
304 int err = 0;
305
306 /* CX1 doesn't support multi-functions */
307 mutex_lock(&table->mutex);
308
309 err = validate_index(dev, table, index);
310 if (err)
311 goto out;
312
313 table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
314
315 err = mlx4_set_port_mac_table(dev, port, table->entries);
316 if (unlikely(err)) {
317 mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
318 (unsigned long long) new_mac);
319 table->entries[index] = 0;
320 }
321 out:
322 mutex_unlock(&table->mutex);
323 return err;
324 }
325 EXPORT_SYMBOL_GPL(__mlx4_replace_mac);
326
327 static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
328 __be32 *entries)
329 {
330 struct mlx4_cmd_mailbox *mailbox;
331 u32 in_mod;
332 int err;
333
334 mailbox = mlx4_alloc_cmd_mailbox(dev);
335 if (IS_ERR(mailbox))
336 return PTR_ERR(mailbox);
337
338 memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
339 in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
340 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
341 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
342
343 mlx4_free_cmd_mailbox(dev, mailbox);
344
345 return err;
346 }
347
348 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
349 {
350 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
351 int i;
352
353 for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) {
354 if (table->refs[i] &&
355 (vid == (MLX4_VLAN_MASK &
356 be32_to_cpu(table->entries[i])))) {
357 /* VLAN already registered, increase reference count */
358 *idx = i;
359 return 0;
360 }
361 }
362
363 return -ENOENT;
364 }
365 EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
366
367 int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
368 int *index)
369 {
370 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
371 int i, err = 0;
372 int free = -1;
373
374 mutex_lock(&table->mutex);
375
376 if (table->total == table->max) {
377 /* No free vlan entries */
378 err = -ENOSPC;
379 goto out;
380 }
381
382 for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
383 if (free < 0 && (table->refs[i] == 0)) {
384 free = i;
385 continue;
386 }
387
388 if (table->refs[i] &&
389 (vlan == (MLX4_VLAN_MASK &
390 be32_to_cpu(table->entries[i])))) {
391 /* Vlan already registered, increase references count */
392 *index = i;
393 ++table->refs[i];
394 goto out;
395 }
396 }
397
398 if (free < 0) {
399 err = -ENOMEM;
400 goto out;
401 }
402
403 /* Register new VLAN */
404 table->refs[free] = 1;
405 table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
406
407 err = mlx4_set_port_vlan_table(dev, port, table->entries);
408 if (unlikely(err)) {
409 mlx4_warn(dev, "Failed adding vlan: %u\n", vlan);
410 table->refs[free] = 0;
411 table->entries[free] = 0;
412 goto out;
413 }
414
415 *index = free;
416 ++table->total;
417 out:
418 mutex_unlock(&table->mutex);
419 return err;
420 }
421
422 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
423 {
424 u64 out_param = 0;
425 int err;
426
427 if (vlan > 4095)
428 return -EINVAL;
429
430 if (mlx4_is_mfunc(dev)) {
431 err = mlx4_cmd_imm(dev, vlan, &out_param,
432 ((u32) port) << 8 | (u32) RES_VLAN,
433 RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
434 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
435 if (!err)
436 *index = get_param_l(&out_param);
437
438 return err;
439 }
440 return __mlx4_register_vlan(dev, port, vlan, index);
441 }
442 EXPORT_SYMBOL_GPL(mlx4_register_vlan);
443
444 void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
445 {
446 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
447 int index;
448
449 mutex_lock(&table->mutex);
450 if (mlx4_find_cached_vlan(dev, port, vlan, &index)) {
451 mlx4_warn(dev, "vlan 0x%x is not in the vlan table\n", vlan);
452 goto out;
453 }
454
455 if (index < MLX4_VLAN_REGULAR) {
456 mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
457 goto out;
458 }
459
460 if (--table->refs[index]) {
461 mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n",
462 table->refs[index], index);
463 goto out;
464 }
465 table->entries[index] = 0;
466 mlx4_set_port_vlan_table(dev, port, table->entries);
467 --table->total;
468 out:
469 mutex_unlock(&table->mutex);
470 }
471
472 void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
473 {
474 u64 out_param = 0;
475
476 if (mlx4_is_mfunc(dev)) {
477 (void) mlx4_cmd_imm(dev, vlan, &out_param,
478 ((u32) port) << 8 | (u32) RES_VLAN,
479 RES_OP_RESERVE_AND_MAP,
480 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
481 MLX4_CMD_WRAPPED);
482 return;
483 }
484 __mlx4_unregister_vlan(dev, port, vlan);
485 }
486 EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
487
488 int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
489 {
490 struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
491 u8 *inbuf, *outbuf;
492 int err;
493
494 inmailbox = mlx4_alloc_cmd_mailbox(dev);
495 if (IS_ERR(inmailbox))
496 return PTR_ERR(inmailbox);
497
498 outmailbox = mlx4_alloc_cmd_mailbox(dev);
499 if (IS_ERR(outmailbox)) {
500 mlx4_free_cmd_mailbox(dev, inmailbox);
501 return PTR_ERR(outmailbox);
502 }
503
504 inbuf = inmailbox->buf;
505 outbuf = outmailbox->buf;
506 inbuf[0] = 1;
507 inbuf[1] = 1;
508 inbuf[2] = 1;
509 inbuf[3] = 1;
510 *(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015);
511 *(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
512
513 err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
514 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
515 MLX4_CMD_NATIVE);
516 if (!err)
517 *caps = *(__be32 *) (outbuf + 84);
518 mlx4_free_cmd_mailbox(dev, inmailbox);
519 mlx4_free_cmd_mailbox(dev, outmailbox);
520 return err;
521 }
522 static struct mlx4_roce_gid_entry zgid_entry;
523
524 int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port)
525 {
526 int vfs;
527 int slave_gid = slave;
528 unsigned i;
529 struct mlx4_slaves_pport slaves_pport;
530 struct mlx4_active_ports actv_ports;
531 unsigned max_port_p_one;
532
533 if (slave == 0)
534 return MLX4_ROCE_PF_GIDS;
535
536 /* Slave is a VF */
537 slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
538 actv_ports = mlx4_get_active_ports(dev, slave);
539 max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
540 bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
541
542 for (i = 1; i < max_port_p_one; i++) {
543 struct mlx4_active_ports exclusive_ports;
544 struct mlx4_slaves_pport slaves_pport_actv;
545 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
546 set_bit(i - 1, exclusive_ports.ports);
547 if (i == port)
548 continue;
549 slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
550 dev, &exclusive_ports);
551 slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
552 dev->persist->num_vfs + 1);
553 }
554 vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1;
555 if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs))
556 return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1;
557 return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs;
558 }
559
560 int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port)
561 {
562 int gids;
563 unsigned i;
564 int slave_gid = slave;
565 int vfs;
566
567 struct mlx4_slaves_pport slaves_pport;
568 struct mlx4_active_ports actv_ports;
569 unsigned max_port_p_one;
570
571 if (slave == 0)
572 return 0;
573
574 slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
575 actv_ports = mlx4_get_active_ports(dev, slave);
576 max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
577 bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
578
579 for (i = 1; i < max_port_p_one; i++) {
580 struct mlx4_active_ports exclusive_ports;
581 struct mlx4_slaves_pport slaves_pport_actv;
582 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
583 set_bit(i - 1, exclusive_ports.ports);
584 if (i == port)
585 continue;
586 slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
587 dev, &exclusive_ports);
588 slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
589 dev->persist->num_vfs + 1);
590 }
591 gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
592 vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1;
593 if (slave_gid <= gids % vfs)
594 return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1);
595
596 return MLX4_ROCE_PF_GIDS + (gids % vfs) +
597 ((gids / vfs) * (slave_gid - 1));
598 }
599 EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix);
600
601 static int mlx4_reset_roce_port_gids(struct mlx4_dev *dev, int slave,
602 int port, struct mlx4_cmd_mailbox *mailbox)
603 {
604 struct mlx4_roce_gid_entry *gid_entry_mbox;
605 struct mlx4_priv *priv = mlx4_priv(dev);
606 int num_gids, base, offset;
607 int i, err;
608
609 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
610 base = mlx4_get_base_gid_ix(dev, slave, port);
611
612 memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
613
614 mutex_lock(&(priv->port[port].gid_table.mutex));
615 /* Zero-out gids belonging to that slave in the port GID table */
616 for (i = 0, offset = base; i < num_gids; offset++, i++)
617 memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
618 zgid_entry.raw, MLX4_ROCE_GID_ENTRY_SIZE);
619
620 /* Now, copy roce port gids table to mailbox for passing to FW */
621 gid_entry_mbox = (struct mlx4_roce_gid_entry *)mailbox->buf;
622 for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
623 memcpy(gid_entry_mbox->raw,
624 priv->port[port].gid_table.roce_gids[i].raw,
625 MLX4_ROCE_GID_ENTRY_SIZE);
626
627 err = mlx4_cmd(dev, mailbox->dma,
628 ((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8), 1,
629 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
630 MLX4_CMD_NATIVE);
631 mutex_unlock(&(priv->port[port].gid_table.mutex));
632 return err;
633 }
634
635
636 void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave)
637 {
638 struct mlx4_active_ports actv_ports;
639 struct mlx4_cmd_mailbox *mailbox;
640 int num_eth_ports, err;
641 int i;
642
643 if (slave < 0 || slave > dev->persist->num_vfs)
644 return;
645
646 actv_ports = mlx4_get_active_ports(dev, slave);
647
648 for (i = 0, num_eth_ports = 0; i < dev->caps.num_ports; i++) {
649 if (test_bit(i, actv_ports.ports)) {
650 if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
651 continue;
652 num_eth_ports++;
653 }
654 }
655
656 if (!num_eth_ports)
657 return;
658
659 /* have ETH ports. Alloc mailbox for SET_PORT command */
660 mailbox = mlx4_alloc_cmd_mailbox(dev);
661 if (IS_ERR(mailbox))
662 return;
663
664 for (i = 0; i < dev->caps.num_ports; i++) {
665 if (test_bit(i, actv_ports.ports)) {
666 if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
667 continue;
668 err = mlx4_reset_roce_port_gids(dev, slave, i + 1, mailbox);
669 if (err)
670 mlx4_warn(dev, "Could not reset ETH port GID table for slave %d, port %d (%d)\n",
671 slave, i + 1, err);
672 }
673 }
674
675 mlx4_free_cmd_mailbox(dev, mailbox);
676 return;
677 }
678
679 static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
680 u8 op_mod, struct mlx4_cmd_mailbox *inbox)
681 {
682 struct mlx4_priv *priv = mlx4_priv(dev);
683 struct mlx4_port_info *port_info;
684 struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
685 struct mlx4_slave_state *slave_st = &master->slave_state[slave];
686 struct mlx4_set_port_rqp_calc_context *qpn_context;
687 struct mlx4_set_port_general_context *gen_context;
688 struct mlx4_roce_gid_entry *gid_entry_tbl, *gid_entry_mbox, *gid_entry_mb1;
689 int reset_qkey_viols;
690 int port;
691 int is_eth;
692 int num_gids;
693 int base;
694 u32 in_modifier;
695 u32 promisc;
696 u16 mtu, prev_mtu;
697 int err;
698 int i, j;
699 int offset;
700 __be32 agg_cap_mask;
701 __be32 slave_cap_mask;
702 __be32 new_cap_mask;
703
704 port = in_mod & 0xff;
705 in_modifier = in_mod >> 8;
706 is_eth = op_mod;
707 port_info = &priv->port[port];
708
709 /* Slaves cannot perform SET_PORT operations except changing MTU */
710 if (is_eth) {
711 if (slave != dev->caps.function &&
712 in_modifier != MLX4_SET_PORT_GENERAL &&
713 in_modifier != MLX4_SET_PORT_GID_TABLE) {
714 mlx4_warn(dev, "denying SET_PORT for slave:%d\n",
715 slave);
716 return -EINVAL;
717 }
718 switch (in_modifier) {
719 case MLX4_SET_PORT_RQP_CALC:
720 qpn_context = inbox->buf;
721 qpn_context->base_qpn =
722 cpu_to_be32(port_info->base_qpn);
723 qpn_context->n_mac = 0x7;
724 promisc = be32_to_cpu(qpn_context->promisc) >>
725 SET_PORT_PROMISC_SHIFT;
726 qpn_context->promisc = cpu_to_be32(
727 promisc << SET_PORT_PROMISC_SHIFT |
728 port_info->base_qpn);
729 promisc = be32_to_cpu(qpn_context->mcast) >>
730 SET_PORT_MC_PROMISC_SHIFT;
731 qpn_context->mcast = cpu_to_be32(
732 promisc << SET_PORT_MC_PROMISC_SHIFT |
733 port_info->base_qpn);
734 break;
735 case MLX4_SET_PORT_GENERAL:
736 gen_context = inbox->buf;
737 /* Mtu is configured as the max MTU among all the
738 * the functions on the port. */
739 mtu = be16_to_cpu(gen_context->mtu);
740 mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] +
741 ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
742 prev_mtu = slave_st->mtu[port];
743 slave_st->mtu[port] = mtu;
744 if (mtu > master->max_mtu[port])
745 master->max_mtu[port] = mtu;
746 if (mtu < prev_mtu && prev_mtu ==
747 master->max_mtu[port]) {
748 slave_st->mtu[port] = mtu;
749 master->max_mtu[port] = mtu;
750 for (i = 0; i < dev->num_slaves; i++) {
751 master->max_mtu[port] =
752 max(master->max_mtu[port],
753 master->slave_state[i].mtu[port]);
754 }
755 }
756
757 gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
758 break;
759 case MLX4_SET_PORT_GID_TABLE:
760 /* change to MULTIPLE entries: number of guest's gids
761 * need a FOR-loop here over number of gids the guest has.
762 * 1. Check no duplicates in gids passed by slave
763 */
764 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
765 base = mlx4_get_base_gid_ix(dev, slave, port);
766 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
767 for (i = 0; i < num_gids; gid_entry_mbox++, i++) {
768 if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
769 sizeof(zgid_entry)))
770 continue;
771 gid_entry_mb1 = gid_entry_mbox + 1;
772 for (j = i + 1; j < num_gids; gid_entry_mb1++, j++) {
773 if (!memcmp(gid_entry_mb1->raw,
774 zgid_entry.raw, sizeof(zgid_entry)))
775 continue;
776 if (!memcmp(gid_entry_mb1->raw, gid_entry_mbox->raw,
777 sizeof(gid_entry_mbox->raw))) {
778 /* found duplicate */
779 return -EINVAL;
780 }
781 }
782 }
783
784 /* 2. Check that do not have duplicates in OTHER
785 * entries in the port GID table
786 */
787
788 mutex_lock(&(priv->port[port].gid_table.mutex));
789 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
790 if (i >= base && i < base + num_gids)
791 continue; /* don't compare to slave's current gids */
792 gid_entry_tbl = &priv->port[port].gid_table.roce_gids[i];
793 if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry)))
794 continue;
795 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
796 for (j = 0; j < num_gids; gid_entry_mbox++, j++) {
797 if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
798 sizeof(zgid_entry)))
799 continue;
800 if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw,
801 sizeof(gid_entry_tbl->raw))) {
802 /* found duplicate */
803 mlx4_warn(dev, "requested gid entry for slave:%d is a duplicate of gid at index %d\n",
804 slave, i);
805 mutex_unlock(&(priv->port[port].gid_table.mutex));
806 return -EINVAL;
807 }
808 }
809 }
810
811 /* insert slave GIDs with memcpy, starting at slave's base index */
812 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
813 for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++)
814 memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
815 gid_entry_mbox->raw, MLX4_ROCE_GID_ENTRY_SIZE);
816
817 /* Now, copy roce port gids table to current mailbox for passing to FW */
818 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
819 for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
820 memcpy(gid_entry_mbox->raw,
821 priv->port[port].gid_table.roce_gids[i].raw,
822 MLX4_ROCE_GID_ENTRY_SIZE);
823
824 err = mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
825 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
826 MLX4_CMD_NATIVE);
827 mutex_unlock(&(priv->port[port].gid_table.mutex));
828 return err;
829 }
830
831 return mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
832 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
833 MLX4_CMD_NATIVE);
834 }
835
836 /* For IB, we only consider:
837 * - The capability mask, which is set to the aggregate of all
838 * slave function capabilities
839 * - The QKey violatin counter - reset according to each request.
840 */
841
842 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
843 reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40;
844 new_cap_mask = ((__be32 *) inbox->buf)[2];
845 } else {
846 reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1;
847 new_cap_mask = ((__be32 *) inbox->buf)[1];
848 }
849
850 /* slave may not set the IS_SM capability for the port */
851 if (slave != mlx4_master_func_num(dev) &&
852 (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_IS_SM))
853 return -EINVAL;
854
855 /* No DEV_MGMT in multifunc mode */
856 if (mlx4_is_mfunc(dev) &&
857 (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_DEV_MGMT_SUP))
858 return -EINVAL;
859
860 agg_cap_mask = 0;
861 slave_cap_mask =
862 priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
863 priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask;
864 for (i = 0; i < dev->num_slaves; i++)
865 agg_cap_mask |=
866 priv->mfunc.master.slave_state[i].ib_cap_mask[port];
867
868 /* only clear mailbox for guests. Master may be setting
869 * MTU or PKEY table size
870 */
871 if (slave != dev->caps.function)
872 memset(inbox->buf, 0, 256);
873 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
874 *(u8 *) inbox->buf |= !!reset_qkey_viols << 6;
875 ((__be32 *) inbox->buf)[2] = agg_cap_mask;
876 } else {
877 ((u8 *) inbox->buf)[3] |= !!reset_qkey_viols;
878 ((__be32 *) inbox->buf)[1] = agg_cap_mask;
879 }
880
881 err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
882 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
883 if (err)
884 priv->mfunc.master.slave_state[slave].ib_cap_mask[port] =
885 slave_cap_mask;
886 return err;
887 }
888
889 int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
890 struct mlx4_vhcr *vhcr,
891 struct mlx4_cmd_mailbox *inbox,
892 struct mlx4_cmd_mailbox *outbox,
893 struct mlx4_cmd_info *cmd)
894 {
895 int port = mlx4_slave_convert_port(
896 dev, slave, vhcr->in_modifier & 0xFF);
897
898 if (port < 0)
899 return -EINVAL;
900
901 vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) |
902 (port & 0xFF);
903
904 return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
905 vhcr->op_modifier, inbox);
906 }
907
908 /* bit locations for set port command with zero op modifier */
909 enum {
910 MLX4_SET_PORT_VL_CAP = 4, /* bits 7:4 */
911 MLX4_SET_PORT_MTU_CAP = 12, /* bits 15:12 */
912 MLX4_CHANGE_PORT_PKEY_TBL_SZ = 20,
913 MLX4_CHANGE_PORT_VL_CAP = 21,
914 MLX4_CHANGE_PORT_MTU_CAP = 22,
915 };
916
917 int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz)
918 {
919 struct mlx4_cmd_mailbox *mailbox;
920 int err, vl_cap, pkey_tbl_flag = 0;
921
922 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
923 return 0;
924
925 mailbox = mlx4_alloc_cmd_mailbox(dev);
926 if (IS_ERR(mailbox))
927 return PTR_ERR(mailbox);
928
929 ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
930
931 if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) {
932 pkey_tbl_flag = 1;
933 ((__be16 *) mailbox->buf)[20] = cpu_to_be16(pkey_tbl_sz);
934 }
935
936 /* IB VL CAP enum isn't used by the firmware, just numerical values */
937 for (vl_cap = 8; vl_cap >= 1; vl_cap >>= 1) {
938 ((__be32 *) mailbox->buf)[0] = cpu_to_be32(
939 (1 << MLX4_CHANGE_PORT_MTU_CAP) |
940 (1 << MLX4_CHANGE_PORT_VL_CAP) |
941 (pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) |
942 (dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) |
943 (vl_cap << MLX4_SET_PORT_VL_CAP));
944 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
945 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
946 if (err != -ENOMEM)
947 break;
948 }
949
950 mlx4_free_cmd_mailbox(dev, mailbox);
951 return err;
952 }
953
954 int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
955 u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
956 {
957 struct mlx4_cmd_mailbox *mailbox;
958 struct mlx4_set_port_general_context *context;
959 int err;
960 u32 in_mod;
961
962 mailbox = mlx4_alloc_cmd_mailbox(dev);
963 if (IS_ERR(mailbox))
964 return PTR_ERR(mailbox);
965 context = mailbox->buf;
966 context->flags = SET_PORT_GEN_ALL_VALID;
967 context->mtu = cpu_to_be16(mtu);
968 context->pptx = (pptx * (!pfctx)) << 7;
969 context->pfctx = pfctx;
970 context->pprx = (pprx * (!pfcrx)) << 7;
971 context->pfcrx = pfcrx;
972
973 in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
974 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
975 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
976
977 mlx4_free_cmd_mailbox(dev, mailbox);
978 return err;
979 }
980 EXPORT_SYMBOL(mlx4_SET_PORT_general);
981
982 int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
983 u8 promisc)
984 {
985 struct mlx4_cmd_mailbox *mailbox;
986 struct mlx4_set_port_rqp_calc_context *context;
987 int err;
988 u32 in_mod;
989 u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
990 MCAST_DIRECT : MCAST_DEFAULT;
991
992 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
993 return 0;
994
995 mailbox = mlx4_alloc_cmd_mailbox(dev);
996 if (IS_ERR(mailbox))
997 return PTR_ERR(mailbox);
998 context = mailbox->buf;
999 context->base_qpn = cpu_to_be32(base_qpn);
1000 context->n_mac = dev->caps.log_num_macs;
1001 context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
1002 base_qpn);
1003 context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
1004 base_qpn);
1005 context->intra_no_vlan = 0;
1006 context->no_vlan = MLX4_NO_VLAN_IDX;
1007 context->intra_vlan_miss = 0;
1008 context->vlan_miss = MLX4_VLAN_MISS_IDX;
1009
1010 in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
1011 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
1012 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
1013
1014 mlx4_free_cmd_mailbox(dev, mailbox);
1015 return err;
1016 }
1017 EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
1018
1019 enum {
1020 VXLAN_ENABLE_MODIFY = 1 << 7,
1021 VXLAN_STEERING_MODIFY = 1 << 6,
1022
1023 VXLAN_ENABLE = 1 << 7,
1024 };
1025
1026 struct mlx4_set_port_vxlan_context {
1027 u32 reserved1;
1028 u8 modify_flags;
1029 u8 reserved2;
1030 u8 enable_flags;
1031 u8 steering;
1032 };
1033
1034 int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable)
1035 {
1036 int err;
1037 u32 in_mod;
1038 struct mlx4_cmd_mailbox *mailbox;
1039 struct mlx4_set_port_vxlan_context *context;
1040
1041 mailbox = mlx4_alloc_cmd_mailbox(dev);
1042 if (IS_ERR(mailbox))
1043 return PTR_ERR(mailbox);
1044 context = mailbox->buf;
1045 memset(context, 0, sizeof(*context));
1046
1047 context->modify_flags = VXLAN_ENABLE_MODIFY | VXLAN_STEERING_MODIFY;
1048 if (enable)
1049 context->enable_flags = VXLAN_ENABLE;
1050 context->steering = steering;
1051
1052 in_mod = MLX4_SET_PORT_VXLAN << 8 | port;
1053 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
1054 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1055
1056 mlx4_free_cmd_mailbox(dev, mailbox);
1057 return err;
1058 }
1059 EXPORT_SYMBOL(mlx4_SET_PORT_VXLAN);
1060
1061 int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
1062 struct mlx4_vhcr *vhcr,
1063 struct mlx4_cmd_mailbox *inbox,
1064 struct mlx4_cmd_mailbox *outbox,
1065 struct mlx4_cmd_info *cmd)
1066 {
1067 int err = 0;
1068
1069 return err;
1070 }
1071
1072 int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
1073 u64 mac, u64 clear, u8 mode)
1074 {
1075 return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
1076 MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B,
1077 MLX4_CMD_WRAPPED);
1078 }
1079 EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR);
1080
1081 int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
1082 struct mlx4_vhcr *vhcr,
1083 struct mlx4_cmd_mailbox *inbox,
1084 struct mlx4_cmd_mailbox *outbox,
1085 struct mlx4_cmd_info *cmd)
1086 {
1087 int err = 0;
1088
1089 return err;
1090 }
1091
1092 int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave,
1093 u32 in_mod, struct mlx4_cmd_mailbox *outbox)
1094 {
1095 return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0,
1096 MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
1097 MLX4_CMD_NATIVE);
1098 }
1099
1100 int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
1101 struct mlx4_vhcr *vhcr,
1102 struct mlx4_cmd_mailbox *inbox,
1103 struct mlx4_cmd_mailbox *outbox,
1104 struct mlx4_cmd_info *cmd)
1105 {
1106 if (slave != dev->caps.function)
1107 return 0;
1108 return mlx4_common_dump_eth_stats(dev, slave,
1109 vhcr->in_modifier, outbox);
1110 }
1111
1112 int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
1113 int *slave_id)
1114 {
1115 struct mlx4_priv *priv = mlx4_priv(dev);
1116 int i, found_ix = -1;
1117 int vf_gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
1118 struct mlx4_slaves_pport slaves_pport;
1119 unsigned num_vfs;
1120 int slave_gid;
1121
1122 if (!mlx4_is_mfunc(dev))
1123 return -EINVAL;
1124
1125 slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
1126 num_vfs = bitmap_weight(slaves_pport.slaves,
1127 dev->persist->num_vfs + 1) - 1;
1128
1129 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
1130 if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid,
1131 MLX4_ROCE_GID_ENTRY_SIZE)) {
1132 found_ix = i;
1133 break;
1134 }
1135 }
1136
1137 if (found_ix >= 0) {
1138 /* Calculate a slave_gid which is the slave number in the gid
1139 * table and not a globally unique slave number.
1140 */
1141 if (found_ix < MLX4_ROCE_PF_GIDS)
1142 slave_gid = 0;
1143 else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) *
1144 (vf_gids / num_vfs + 1))
1145 slave_gid = ((found_ix - MLX4_ROCE_PF_GIDS) /
1146 (vf_gids / num_vfs + 1)) + 1;
1147 else
1148 slave_gid =
1149 ((found_ix - MLX4_ROCE_PF_GIDS -
1150 ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) /
1151 (vf_gids / num_vfs)) + vf_gids % num_vfs + 1;
1152
1153 /* Calculate the globally unique slave id */
1154 if (slave_gid) {
1155 struct mlx4_active_ports exclusive_ports;
1156 struct mlx4_active_ports actv_ports;
1157 struct mlx4_slaves_pport slaves_pport_actv;
1158 unsigned max_port_p_one;
1159 int num_vfs_before = 0;
1160 int candidate_slave_gid;
1161
1162 /* Calculate how many VFs are on the previous port, if exists */
1163 for (i = 1; i < port; i++) {
1164 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
1165 set_bit(i - 1, exclusive_ports.ports);
1166 slaves_pport_actv =
1167 mlx4_phys_to_slaves_pport_actv(
1168 dev, &exclusive_ports);
1169 num_vfs_before += bitmap_weight(
1170 slaves_pport_actv.slaves,
1171 dev->persist->num_vfs + 1);
1172 }
1173
1174 /* candidate_slave_gid isn't necessarily the correct slave, but
1175 * it has the same number of ports and is assigned to the same
1176 * ports as the real slave we're looking for. On dual port VF,
1177 * slave_gid = [single port VFs on port <port>] +
1178 * [offset of the current slave from the first dual port VF] +
1179 * 1 (for the PF).
1180 */
1181 candidate_slave_gid = slave_gid + num_vfs_before;
1182
1183 actv_ports = mlx4_get_active_ports(dev, candidate_slave_gid);
1184 max_port_p_one = find_first_bit(
1185 actv_ports.ports, dev->caps.num_ports) +
1186 bitmap_weight(actv_ports.ports,
1187 dev->caps.num_ports) + 1;
1188
1189 /* Calculate the real slave number */
1190 for (i = 1; i < max_port_p_one; i++) {
1191 if (i == port)
1192 continue;
1193 bitmap_zero(exclusive_ports.ports,
1194 dev->caps.num_ports);
1195 set_bit(i - 1, exclusive_ports.ports);
1196 slaves_pport_actv =
1197 mlx4_phys_to_slaves_pport_actv(
1198 dev, &exclusive_ports);
1199 slave_gid += bitmap_weight(
1200 slaves_pport_actv.slaves,
1201 dev->persist->num_vfs + 1);
1202 }
1203 }
1204 *slave_id = slave_gid;
1205 }
1206
1207 return (found_ix >= 0) ? 0 : -EINVAL;
1208 }
1209 EXPORT_SYMBOL(mlx4_get_slave_from_roce_gid);
1210
1211 int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id,
1212 u8 *gid)
1213 {
1214 struct mlx4_priv *priv = mlx4_priv(dev);
1215
1216 if (!mlx4_is_master(dev))
1217 return -EINVAL;
1218
1219 memcpy(gid, priv->port[port].gid_table.roce_gids[slave_id].raw,
1220 MLX4_ROCE_GID_ENTRY_SIZE);
1221 return 0;
1222 }
1223 EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
1224
1225 /* Cable Module Info */
1226 #define MODULE_INFO_MAX_READ 48
1227
1228 #define I2C_ADDR_LOW 0x50
1229 #define I2C_ADDR_HIGH 0x51
1230 #define I2C_PAGE_SIZE 256
1231
1232 /* Module Info Data */
1233 struct mlx4_cable_info {
1234 u8 i2c_addr;
1235 u8 page_num;
1236 __be16 dev_mem_address;
1237 __be16 reserved1;
1238 __be16 size;
1239 __be32 reserved2[2];
1240 u8 data[MODULE_INFO_MAX_READ];
1241 };
1242
1243 enum cable_info_err {
1244 CABLE_INF_INV_PORT = 0x1,
1245 CABLE_INF_OP_NOSUP = 0x2,
1246 CABLE_INF_NOT_CONN = 0x3,
1247 CABLE_INF_NO_EEPRM = 0x4,
1248 CABLE_INF_PAGE_ERR = 0x5,
1249 CABLE_INF_INV_ADDR = 0x6,
1250 CABLE_INF_I2C_ADDR = 0x7,
1251 CABLE_INF_QSFP_VIO = 0x8,
1252 CABLE_INF_I2C_BUSY = 0x9,
1253 };
1254
1255 #define MAD_STATUS_2_CABLE_ERR(mad_status) ((mad_status >> 8) & 0xFF)
1256
1257 static inline const char *cable_info_mad_err_str(u16 mad_status)
1258 {
1259 u8 err = MAD_STATUS_2_CABLE_ERR(mad_status);
1260
1261 switch (err) {
1262 case CABLE_INF_INV_PORT:
1263 return "invalid port selected";
1264 case CABLE_INF_OP_NOSUP:
1265 return "operation not supported for this port (the port is of type CX4 or internal)";
1266 case CABLE_INF_NOT_CONN:
1267 return "cable is not connected";
1268 case CABLE_INF_NO_EEPRM:
1269 return "the connected cable has no EPROM (passive copper cable)";
1270 case CABLE_INF_PAGE_ERR:
1271 return "page number is greater than 15";
1272 case CABLE_INF_INV_ADDR:
1273 return "invalid device_address or size (that is, size equals 0 or address+size is greater than 256)";
1274 case CABLE_INF_I2C_ADDR:
1275 return "invalid I2C slave address";
1276 case CABLE_INF_QSFP_VIO:
1277 return "at least one cable violates the QSFP specification and ignores the modsel signal";
1278 case CABLE_INF_I2C_BUSY:
1279 return "I2C bus is constantly busy";
1280 }
1281 return "Unknown Error";
1282 }
1283
1284 /**
1285 * mlx4_get_module_info - Read cable module eeprom data
1286 * @dev: mlx4_dev.
1287 * @port: port number.
1288 * @offset: byte offset in eeprom to start reading data from.
1289 * @size: num of bytes to read.
1290 * @data: output buffer to put the requested data into.
1291 *
1292 * Reads cable module eeprom data, puts the outcome data into
1293 * data pointer paramer.
1294 * Returns num of read bytes on success or a negative error
1295 * code.
1296 */
1297 int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
1298 u16 offset, u16 size, u8 *data)
1299 {
1300 struct mlx4_cmd_mailbox *inbox, *outbox;
1301 struct mlx4_mad_ifc *inmad, *outmad;
1302 struct mlx4_cable_info *cable_info;
1303 u16 i2c_addr;
1304 int ret;
1305
1306 if (size > MODULE_INFO_MAX_READ)
1307 size = MODULE_INFO_MAX_READ;
1308
1309 inbox = mlx4_alloc_cmd_mailbox(dev);
1310 if (IS_ERR(inbox))
1311 return PTR_ERR(inbox);
1312
1313 outbox = mlx4_alloc_cmd_mailbox(dev);
1314 if (IS_ERR(outbox)) {
1315 mlx4_free_cmd_mailbox(dev, inbox);
1316 return PTR_ERR(outbox);
1317 }
1318
1319 inmad = (struct mlx4_mad_ifc *)(inbox->buf);
1320 outmad = (struct mlx4_mad_ifc *)(outbox->buf);
1321
1322 inmad->method = 0x1; /* Get */
1323 inmad->class_version = 0x1;
1324 inmad->mgmt_class = 0x1;
1325 inmad->base_version = 0x1;
1326 inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */
1327
1328 if (offset < I2C_PAGE_SIZE && offset + size > I2C_PAGE_SIZE)
1329 /* Cross pages reads are not allowed
1330 * read until offset 256 in low page
1331 */
1332 size -= offset + size - I2C_PAGE_SIZE;
1333
1334 i2c_addr = I2C_ADDR_LOW;
1335 if (offset >= I2C_PAGE_SIZE) {
1336 /* Reset offset to high page */
1337 i2c_addr = I2C_ADDR_HIGH;
1338 offset -= I2C_PAGE_SIZE;
1339 }
1340
1341 cable_info = (struct mlx4_cable_info *)inmad->data;
1342 cable_info->dev_mem_address = cpu_to_be16(offset);
1343 cable_info->page_num = 0;
1344 cable_info->i2c_addr = i2c_addr;
1345 cable_info->size = cpu_to_be16(size);
1346
1347 ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
1348 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
1349 MLX4_CMD_NATIVE);
1350 if (ret)
1351 goto out;
1352
1353 if (be16_to_cpu(outmad->status)) {
1354 /* Mad returned with bad status */
1355 ret = be16_to_cpu(outmad->status);
1356 mlx4_warn(dev,
1357 "MLX4_CMD_MAD_IFC Get Module info attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n",
1358 0xFF60, port, i2c_addr, offset, size,
1359 ret, cable_info_mad_err_str(ret));
1360
1361 if (i2c_addr == I2C_ADDR_HIGH &&
1362 MAD_STATUS_2_CABLE_ERR(ret) == CABLE_INF_I2C_ADDR)
1363 /* Some SFP cables do not support i2c slave
1364 * address 0x51 (high page), abort silently.
1365 */
1366 ret = 0;
1367 else
1368 ret = -ret;
1369 goto out;
1370 }
1371 cable_info = (struct mlx4_cable_info *)outmad->data;
1372 memcpy(data, cable_info->data, size);
1373 ret = size;
1374 out:
1375 mlx4_free_cmd_mailbox(dev, inbox);
1376 mlx4_free_cmd_mailbox(dev, outbox);
1377 return ret;
1378 }
1379 EXPORT_SYMBOL(mlx4_get_module_info);
This page took 0.059665 seconds and 5 git commands to generate.