2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/errno.h>
34 #include <linux/if_ether.h>
35 #include <linux/if_vlan.h>
36 #include <linux/export.h>
38 #include <linux/mlx4/cmd.h>
41 #include "mlx4_stats.h"
43 #define MLX4_MAC_VALID (1ull << 63)
45 #define MLX4_VLAN_VALID (1u << 31)
46 #define MLX4_VLAN_MASK 0xfff
48 #define MLX4_STATS_TRAFFIC_COUNTERS_MASK 0xfULL
49 #define MLX4_STATS_TRAFFIC_DROPS_MASK 0xc0ULL
50 #define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL
51 #define MLX4_STATS_PORT_COUNTERS_MASK 0x1fe00000ULL
53 #define MLX4_FLAG_V_IGNORE_FCS_MASK 0x2
54 #define MLX4_IGNORE_FCS_MASK 0x1
56 void mlx4_init_mac_table(struct mlx4_dev
*dev
, struct mlx4_mac_table
*table
)
60 mutex_init(&table
->mutex
);
61 for (i
= 0; i
< MLX4_MAX_MAC_NUM
; i
++) {
62 table
->entries
[i
] = 0;
65 table
->max
= 1 << dev
->caps
.log_num_macs
;
69 void mlx4_init_vlan_table(struct mlx4_dev
*dev
, struct mlx4_vlan_table
*table
)
73 mutex_init(&table
->mutex
);
74 for (i
= 0; i
< MLX4_MAX_VLAN_NUM
; i
++) {
75 table
->entries
[i
] = 0;
78 table
->max
= (1 << dev
->caps
.log_num_vlans
) - MLX4_VLAN_REGULAR
;
82 void mlx4_init_roce_gid_table(struct mlx4_dev
*dev
,
83 struct mlx4_roce_gid_table
*table
)
87 mutex_init(&table
->mutex
);
88 for (i
= 0; i
< MLX4_ROCE_MAX_GIDS
; i
++)
89 memset(table
->roce_gids
[i
].raw
, 0, MLX4_ROCE_GID_ENTRY_SIZE
);
92 static int validate_index(struct mlx4_dev
*dev
,
93 struct mlx4_mac_table
*table
, int index
)
97 if (index
< 0 || index
>= table
->max
|| !table
->entries
[index
]) {
98 mlx4_warn(dev
, "No valid Mac entry for the given index\n");
104 static int find_index(struct mlx4_dev
*dev
,
105 struct mlx4_mac_table
*table
, u64 mac
)
109 for (i
= 0; i
< MLX4_MAX_MAC_NUM
; i
++) {
110 if (table
->refs
[i
] &&
111 (MLX4_MAC_MASK
& mac
) ==
112 (MLX4_MAC_MASK
& be64_to_cpu(table
->entries
[i
])))
119 static int mlx4_set_port_mac_table(struct mlx4_dev
*dev
, u8 port
,
122 struct mlx4_cmd_mailbox
*mailbox
;
126 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
128 return PTR_ERR(mailbox
);
130 memcpy(mailbox
->buf
, entries
, MLX4_MAC_TABLE_SIZE
);
132 in_mod
= MLX4_SET_PORT_MAC_TABLE
<< 8 | port
;
134 err
= mlx4_cmd(dev
, mailbox
->dma
, in_mod
, MLX4_SET_PORT_ETH_OPCODE
,
135 MLX4_CMD_SET_PORT
, MLX4_CMD_TIME_CLASS_B
,
138 mlx4_free_cmd_mailbox(dev
, mailbox
);
142 int mlx4_find_cached_mac(struct mlx4_dev
*dev
, u8 port
, u64 mac
, int *idx
)
144 struct mlx4_port_info
*info
= &mlx4_priv(dev
)->port
[port
];
145 struct mlx4_mac_table
*table
= &info
->mac_table
;
148 for (i
= 0; i
< MLX4_MAX_MAC_NUM
; i
++) {
152 if (mac
== (MLX4_MAC_MASK
& be64_to_cpu(table
->entries
[i
]))) {
160 EXPORT_SYMBOL_GPL(mlx4_find_cached_mac
);
162 int __mlx4_register_mac(struct mlx4_dev
*dev
, u8 port
, u64 mac
)
164 struct mlx4_port_info
*info
= &mlx4_priv(dev
)->port
[port
];
165 struct mlx4_mac_table
*table
= &info
->mac_table
;
169 mlx4_dbg(dev
, "Registering MAC: 0x%llx for port %d\n",
170 (unsigned long long) mac
, port
);
172 mutex_lock(&table
->mutex
);
173 for (i
= 0; i
< MLX4_MAX_MAC_NUM
; i
++) {
174 if (!table
->refs
[i
]) {
180 if ((MLX4_MAC_MASK
& mac
) ==
181 (MLX4_MAC_MASK
& be64_to_cpu(table
->entries
[i
]))) {
182 /* MAC already registered, increment ref count */
189 mlx4_dbg(dev
, "Free MAC index is %d\n", free
);
191 if (table
->total
== table
->max
) {
192 /* No free mac entries */
197 /* Register new MAC */
198 table
->entries
[free
] = cpu_to_be64(mac
| MLX4_MAC_VALID
);
200 err
= mlx4_set_port_mac_table(dev
, port
, table
->entries
);
202 mlx4_err(dev
, "Failed adding MAC: 0x%llx\n",
203 (unsigned long long) mac
);
204 table
->entries
[free
] = 0;
207 table
->refs
[free
] = 1;
211 mutex_unlock(&table
->mutex
);
214 EXPORT_SYMBOL_GPL(__mlx4_register_mac
);
216 int mlx4_register_mac(struct mlx4_dev
*dev
, u8 port
, u64 mac
)
221 if (mlx4_is_mfunc(dev
)) {
222 if (!(dev
->flags
& MLX4_FLAG_OLD_REG_MAC
)) {
223 err
= mlx4_cmd_imm(dev
, mac
, &out_param
,
224 ((u32
) port
) << 8 | (u32
) RES_MAC
,
225 RES_OP_RESERVE_AND_MAP
, MLX4_CMD_ALLOC_RES
,
226 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
228 if (err
&& err
== -EINVAL
&& mlx4_is_slave(dev
)) {
229 /* retry using old REG_MAC format */
230 set_param_l(&out_param
, port
);
231 err
= mlx4_cmd_imm(dev
, mac
, &out_param
, RES_MAC
,
232 RES_OP_RESERVE_AND_MAP
, MLX4_CMD_ALLOC_RES
,
233 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
235 dev
->flags
|= MLX4_FLAG_OLD_REG_MAC
;
240 return get_param_l(&out_param
);
242 return __mlx4_register_mac(dev
, port
, mac
);
244 EXPORT_SYMBOL_GPL(mlx4_register_mac
);
246 int mlx4_get_base_qpn(struct mlx4_dev
*dev
, u8 port
)
248 return dev
->caps
.reserved_qps_base
[MLX4_QP_REGION_ETH_ADDR
] +
249 (port
- 1) * (1 << dev
->caps
.log_num_macs
);
251 EXPORT_SYMBOL_GPL(mlx4_get_base_qpn
);
253 void __mlx4_unregister_mac(struct mlx4_dev
*dev
, u8 port
, u64 mac
)
255 struct mlx4_port_info
*info
;
256 struct mlx4_mac_table
*table
;
259 if (port
< 1 || port
> dev
->caps
.num_ports
) {
260 mlx4_warn(dev
, "invalid port number (%d), aborting...\n", port
);
263 info
= &mlx4_priv(dev
)->port
[port
];
264 table
= &info
->mac_table
;
265 mutex_lock(&table
->mutex
);
266 index
= find_index(dev
, table
, mac
);
268 if (validate_index(dev
, table
, index
))
270 if (--table
->refs
[index
]) {
271 mlx4_dbg(dev
, "Have more references for index %d, no need to modify mac table\n",
276 table
->entries
[index
] = 0;
277 mlx4_set_port_mac_table(dev
, port
, table
->entries
);
280 mutex_unlock(&table
->mutex
);
282 EXPORT_SYMBOL_GPL(__mlx4_unregister_mac
);
284 void mlx4_unregister_mac(struct mlx4_dev
*dev
, u8 port
, u64 mac
)
288 if (mlx4_is_mfunc(dev
)) {
289 if (!(dev
->flags
& MLX4_FLAG_OLD_REG_MAC
)) {
290 (void) mlx4_cmd_imm(dev
, mac
, &out_param
,
291 ((u32
) port
) << 8 | (u32
) RES_MAC
,
292 RES_OP_RESERVE_AND_MAP
, MLX4_CMD_FREE_RES
,
293 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
295 /* use old unregister mac format */
296 set_param_l(&out_param
, port
);
297 (void) mlx4_cmd_imm(dev
, mac
, &out_param
, RES_MAC
,
298 RES_OP_RESERVE_AND_MAP
, MLX4_CMD_FREE_RES
,
299 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
303 __mlx4_unregister_mac(dev
, port
, mac
);
306 EXPORT_SYMBOL_GPL(mlx4_unregister_mac
);
308 int __mlx4_replace_mac(struct mlx4_dev
*dev
, u8 port
, int qpn
, u64 new_mac
)
310 struct mlx4_port_info
*info
= &mlx4_priv(dev
)->port
[port
];
311 struct mlx4_mac_table
*table
= &info
->mac_table
;
312 int index
= qpn
- info
->base_qpn
;
315 /* CX1 doesn't support multi-functions */
316 mutex_lock(&table
->mutex
);
318 err
= validate_index(dev
, table
, index
);
322 table
->entries
[index
] = cpu_to_be64(new_mac
| MLX4_MAC_VALID
);
324 err
= mlx4_set_port_mac_table(dev
, port
, table
->entries
);
326 mlx4_err(dev
, "Failed adding MAC: 0x%llx\n",
327 (unsigned long long) new_mac
);
328 table
->entries
[index
] = 0;
331 mutex_unlock(&table
->mutex
);
334 EXPORT_SYMBOL_GPL(__mlx4_replace_mac
);
336 static int mlx4_set_port_vlan_table(struct mlx4_dev
*dev
, u8 port
,
339 struct mlx4_cmd_mailbox
*mailbox
;
343 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
345 return PTR_ERR(mailbox
);
347 memcpy(mailbox
->buf
, entries
, MLX4_VLAN_TABLE_SIZE
);
348 in_mod
= MLX4_SET_PORT_VLAN_TABLE
<< 8 | port
;
349 err
= mlx4_cmd(dev
, mailbox
->dma
, in_mod
, MLX4_SET_PORT_ETH_OPCODE
,
350 MLX4_CMD_SET_PORT
, MLX4_CMD_TIME_CLASS_B
,
353 mlx4_free_cmd_mailbox(dev
, mailbox
);
358 int mlx4_find_cached_vlan(struct mlx4_dev
*dev
, u8 port
, u16 vid
, int *idx
)
360 struct mlx4_vlan_table
*table
= &mlx4_priv(dev
)->port
[port
].vlan_table
;
363 for (i
= 0; i
< MLX4_MAX_VLAN_NUM
; ++i
) {
364 if (table
->refs
[i
] &&
365 (vid
== (MLX4_VLAN_MASK
&
366 be32_to_cpu(table
->entries
[i
])))) {
367 /* VLAN already registered, increase reference count */
375 EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan
);
377 int __mlx4_register_vlan(struct mlx4_dev
*dev
, u8 port
, u16 vlan
,
380 struct mlx4_vlan_table
*table
= &mlx4_priv(dev
)->port
[port
].vlan_table
;
384 mutex_lock(&table
->mutex
);
386 if (table
->total
== table
->max
) {
387 /* No free vlan entries */
392 for (i
= MLX4_VLAN_REGULAR
; i
< MLX4_MAX_VLAN_NUM
; i
++) {
393 if (free
< 0 && (table
->refs
[i
] == 0)) {
398 if (table
->refs
[i
] &&
399 (vlan
== (MLX4_VLAN_MASK
&
400 be32_to_cpu(table
->entries
[i
])))) {
401 /* Vlan already registered, increase references count */
413 /* Register new VLAN */
414 table
->refs
[free
] = 1;
415 table
->entries
[free
] = cpu_to_be32(vlan
| MLX4_VLAN_VALID
);
417 err
= mlx4_set_port_vlan_table(dev
, port
, table
->entries
);
419 mlx4_warn(dev
, "Failed adding vlan: %u\n", vlan
);
420 table
->refs
[free
] = 0;
421 table
->entries
[free
] = 0;
428 mutex_unlock(&table
->mutex
);
432 int mlx4_register_vlan(struct mlx4_dev
*dev
, u8 port
, u16 vlan
, int *index
)
440 if (mlx4_is_mfunc(dev
)) {
441 err
= mlx4_cmd_imm(dev
, vlan
, &out_param
,
442 ((u32
) port
) << 8 | (u32
) RES_VLAN
,
443 RES_OP_RESERVE_AND_MAP
, MLX4_CMD_ALLOC_RES
,
444 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
446 *index
= get_param_l(&out_param
);
450 return __mlx4_register_vlan(dev
, port
, vlan
, index
);
452 EXPORT_SYMBOL_GPL(mlx4_register_vlan
);
454 void __mlx4_unregister_vlan(struct mlx4_dev
*dev
, u8 port
, u16 vlan
)
456 struct mlx4_vlan_table
*table
= &mlx4_priv(dev
)->port
[port
].vlan_table
;
459 mutex_lock(&table
->mutex
);
460 if (mlx4_find_cached_vlan(dev
, port
, vlan
, &index
)) {
461 mlx4_warn(dev
, "vlan 0x%x is not in the vlan table\n", vlan
);
465 if (index
< MLX4_VLAN_REGULAR
) {
466 mlx4_warn(dev
, "Trying to free special vlan index %d\n", index
);
470 if (--table
->refs
[index
]) {
471 mlx4_dbg(dev
, "Have %d more references for index %d, no need to modify vlan table\n",
472 table
->refs
[index
], index
);
475 table
->entries
[index
] = 0;
476 mlx4_set_port_vlan_table(dev
, port
, table
->entries
);
479 mutex_unlock(&table
->mutex
);
482 void mlx4_unregister_vlan(struct mlx4_dev
*dev
, u8 port
, u16 vlan
)
486 if (mlx4_is_mfunc(dev
)) {
487 (void) mlx4_cmd_imm(dev
, vlan
, &out_param
,
488 ((u32
) port
) << 8 | (u32
) RES_VLAN
,
489 RES_OP_RESERVE_AND_MAP
,
490 MLX4_CMD_FREE_RES
, MLX4_CMD_TIME_CLASS_A
,
494 __mlx4_unregister_vlan(dev
, port
, vlan
);
496 EXPORT_SYMBOL_GPL(mlx4_unregister_vlan
);
498 int mlx4_get_port_ib_caps(struct mlx4_dev
*dev
, u8 port
, __be32
*caps
)
500 struct mlx4_cmd_mailbox
*inmailbox
, *outmailbox
;
504 inmailbox
= mlx4_alloc_cmd_mailbox(dev
);
505 if (IS_ERR(inmailbox
))
506 return PTR_ERR(inmailbox
);
508 outmailbox
= mlx4_alloc_cmd_mailbox(dev
);
509 if (IS_ERR(outmailbox
)) {
510 mlx4_free_cmd_mailbox(dev
, inmailbox
);
511 return PTR_ERR(outmailbox
);
514 inbuf
= inmailbox
->buf
;
515 outbuf
= outmailbox
->buf
;
520 *(__be16
*) (&inbuf
[16]) = cpu_to_be16(0x0015);
521 *(__be32
*) (&inbuf
[20]) = cpu_to_be32(port
);
523 err
= mlx4_cmd_box(dev
, inmailbox
->dma
, outmailbox
->dma
, port
, 3,
524 MLX4_CMD_MAD_IFC
, MLX4_CMD_TIME_CLASS_C
,
527 *caps
= *(__be32
*) (outbuf
+ 84);
528 mlx4_free_cmd_mailbox(dev
, inmailbox
);
529 mlx4_free_cmd_mailbox(dev
, outmailbox
);
532 static struct mlx4_roce_gid_entry zgid_entry
;
534 int mlx4_get_slave_num_gids(struct mlx4_dev
*dev
, int slave
, int port
)
537 int slave_gid
= slave
;
539 struct mlx4_slaves_pport slaves_pport
;
540 struct mlx4_active_ports actv_ports
;
541 unsigned max_port_p_one
;
544 return MLX4_ROCE_PF_GIDS
;
547 slaves_pport
= mlx4_phys_to_slaves_pport(dev
, port
);
548 actv_ports
= mlx4_get_active_ports(dev
, slave
);
549 max_port_p_one
= find_first_bit(actv_ports
.ports
, dev
->caps
.num_ports
) +
550 bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
) + 1;
552 for (i
= 1; i
< max_port_p_one
; i
++) {
553 struct mlx4_active_ports exclusive_ports
;
554 struct mlx4_slaves_pport slaves_pport_actv
;
555 bitmap_zero(exclusive_ports
.ports
, dev
->caps
.num_ports
);
556 set_bit(i
- 1, exclusive_ports
.ports
);
559 slaves_pport_actv
= mlx4_phys_to_slaves_pport_actv(
560 dev
, &exclusive_ports
);
561 slave_gid
-= bitmap_weight(slaves_pport_actv
.slaves
,
562 dev
->persist
->num_vfs
+ 1);
564 vfs
= bitmap_weight(slaves_pport
.slaves
, dev
->persist
->num_vfs
+ 1) - 1;
565 if (slave_gid
<= ((MLX4_ROCE_MAX_GIDS
- MLX4_ROCE_PF_GIDS
) % vfs
))
566 return ((MLX4_ROCE_MAX_GIDS
- MLX4_ROCE_PF_GIDS
) / vfs
) + 1;
567 return (MLX4_ROCE_MAX_GIDS
- MLX4_ROCE_PF_GIDS
) / vfs
;
570 int mlx4_get_base_gid_ix(struct mlx4_dev
*dev
, int slave
, int port
)
574 int slave_gid
= slave
;
577 struct mlx4_slaves_pport slaves_pport
;
578 struct mlx4_active_ports actv_ports
;
579 unsigned max_port_p_one
;
584 slaves_pport
= mlx4_phys_to_slaves_pport(dev
, port
);
585 actv_ports
= mlx4_get_active_ports(dev
, slave
);
586 max_port_p_one
= find_first_bit(actv_ports
.ports
, dev
->caps
.num_ports
) +
587 bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
) + 1;
589 for (i
= 1; i
< max_port_p_one
; i
++) {
590 struct mlx4_active_ports exclusive_ports
;
591 struct mlx4_slaves_pport slaves_pport_actv
;
592 bitmap_zero(exclusive_ports
.ports
, dev
->caps
.num_ports
);
593 set_bit(i
- 1, exclusive_ports
.ports
);
596 slaves_pport_actv
= mlx4_phys_to_slaves_pport_actv(
597 dev
, &exclusive_ports
);
598 slave_gid
-= bitmap_weight(slaves_pport_actv
.slaves
,
599 dev
->persist
->num_vfs
+ 1);
601 gids
= MLX4_ROCE_MAX_GIDS
- MLX4_ROCE_PF_GIDS
;
602 vfs
= bitmap_weight(slaves_pport
.slaves
, dev
->persist
->num_vfs
+ 1) - 1;
603 if (slave_gid
<= gids
% vfs
)
604 return MLX4_ROCE_PF_GIDS
+ ((gids
/ vfs
) + 1) * (slave_gid
- 1);
606 return MLX4_ROCE_PF_GIDS
+ (gids
% vfs
) +
607 ((gids
/ vfs
) * (slave_gid
- 1));
609 EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix
);
611 static int mlx4_reset_roce_port_gids(struct mlx4_dev
*dev
, int slave
,
612 int port
, struct mlx4_cmd_mailbox
*mailbox
)
614 struct mlx4_roce_gid_entry
*gid_entry_mbox
;
615 struct mlx4_priv
*priv
= mlx4_priv(dev
);
616 int num_gids
, base
, offset
;
619 num_gids
= mlx4_get_slave_num_gids(dev
, slave
, port
);
620 base
= mlx4_get_base_gid_ix(dev
, slave
, port
);
622 memset(mailbox
->buf
, 0, MLX4_MAILBOX_SIZE
);
624 mutex_lock(&(priv
->port
[port
].gid_table
.mutex
));
625 /* Zero-out gids belonging to that slave in the port GID table */
626 for (i
= 0, offset
= base
; i
< num_gids
; offset
++, i
++)
627 memcpy(priv
->port
[port
].gid_table
.roce_gids
[offset
].raw
,
628 zgid_entry
.raw
, MLX4_ROCE_GID_ENTRY_SIZE
);
630 /* Now, copy roce port gids table to mailbox for passing to FW */
631 gid_entry_mbox
= (struct mlx4_roce_gid_entry
*)mailbox
->buf
;
632 for (i
= 0; i
< MLX4_ROCE_MAX_GIDS
; gid_entry_mbox
++, i
++)
633 memcpy(gid_entry_mbox
->raw
,
634 priv
->port
[port
].gid_table
.roce_gids
[i
].raw
,
635 MLX4_ROCE_GID_ENTRY_SIZE
);
637 err
= mlx4_cmd(dev
, mailbox
->dma
,
638 ((u32
)port
) | (MLX4_SET_PORT_GID_TABLE
<< 8),
639 MLX4_SET_PORT_ETH_OPCODE
, MLX4_CMD_SET_PORT
,
640 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
641 mutex_unlock(&(priv
->port
[port
].gid_table
.mutex
));
646 void mlx4_reset_roce_gids(struct mlx4_dev
*dev
, int slave
)
648 struct mlx4_active_ports actv_ports
;
649 struct mlx4_cmd_mailbox
*mailbox
;
650 int num_eth_ports
, err
;
653 if (slave
< 0 || slave
> dev
->persist
->num_vfs
)
656 actv_ports
= mlx4_get_active_ports(dev
, slave
);
658 for (i
= 0, num_eth_ports
= 0; i
< dev
->caps
.num_ports
; i
++) {
659 if (test_bit(i
, actv_ports
.ports
)) {
660 if (dev
->caps
.port_type
[i
+ 1] != MLX4_PORT_TYPE_ETH
)
669 /* have ETH ports. Alloc mailbox for SET_PORT command */
670 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
674 for (i
= 0; i
< dev
->caps
.num_ports
; i
++) {
675 if (test_bit(i
, actv_ports
.ports
)) {
676 if (dev
->caps
.port_type
[i
+ 1] != MLX4_PORT_TYPE_ETH
)
678 err
= mlx4_reset_roce_port_gids(dev
, slave
, i
+ 1, mailbox
);
680 mlx4_warn(dev
, "Could not reset ETH port GID table for slave %d, port %d (%d)\n",
685 mlx4_free_cmd_mailbox(dev
, mailbox
);
689 static int mlx4_common_set_port(struct mlx4_dev
*dev
, int slave
, u32 in_mod
,
690 u8 op_mod
, struct mlx4_cmd_mailbox
*inbox
)
692 struct mlx4_priv
*priv
= mlx4_priv(dev
);
693 struct mlx4_port_info
*port_info
;
694 struct mlx4_mfunc_master_ctx
*master
= &priv
->mfunc
.master
;
695 struct mlx4_slave_state
*slave_st
= &master
->slave_state
[slave
];
696 struct mlx4_set_port_rqp_calc_context
*qpn_context
;
697 struct mlx4_set_port_general_context
*gen_context
;
698 struct mlx4_roce_gid_entry
*gid_entry_tbl
, *gid_entry_mbox
, *gid_entry_mb1
;
699 int reset_qkey_viols
;
711 __be32 slave_cap_mask
;
714 port
= in_mod
& 0xff;
715 in_modifier
= in_mod
>> 8;
717 port_info
= &priv
->port
[port
];
719 /* Slaves cannot perform SET_PORT operations except changing MTU */
721 if (slave
!= dev
->caps
.function
&&
722 in_modifier
!= MLX4_SET_PORT_GENERAL
&&
723 in_modifier
!= MLX4_SET_PORT_GID_TABLE
) {
724 mlx4_warn(dev
, "denying SET_PORT for slave:%d\n",
728 switch (in_modifier
) {
729 case MLX4_SET_PORT_RQP_CALC
:
730 qpn_context
= inbox
->buf
;
731 qpn_context
->base_qpn
=
732 cpu_to_be32(port_info
->base_qpn
);
733 qpn_context
->n_mac
= 0x7;
734 promisc
= be32_to_cpu(qpn_context
->promisc
) >>
735 SET_PORT_PROMISC_SHIFT
;
736 qpn_context
->promisc
= cpu_to_be32(
737 promisc
<< SET_PORT_PROMISC_SHIFT
|
738 port_info
->base_qpn
);
739 promisc
= be32_to_cpu(qpn_context
->mcast
) >>
740 SET_PORT_MC_PROMISC_SHIFT
;
741 qpn_context
->mcast
= cpu_to_be32(
742 promisc
<< SET_PORT_MC_PROMISC_SHIFT
|
743 port_info
->base_qpn
);
745 case MLX4_SET_PORT_GENERAL
:
746 gen_context
= inbox
->buf
;
747 /* Mtu is configured as the max MTU among all the
748 * the functions on the port. */
749 mtu
= be16_to_cpu(gen_context
->mtu
);
750 mtu
= min_t(int, mtu
, dev
->caps
.eth_mtu_cap
[port
] +
751 ETH_HLEN
+ VLAN_HLEN
+ ETH_FCS_LEN
);
752 prev_mtu
= slave_st
->mtu
[port
];
753 slave_st
->mtu
[port
] = mtu
;
754 if (mtu
> master
->max_mtu
[port
])
755 master
->max_mtu
[port
] = mtu
;
756 if (mtu
< prev_mtu
&& prev_mtu
==
757 master
->max_mtu
[port
]) {
758 slave_st
->mtu
[port
] = mtu
;
759 master
->max_mtu
[port
] = mtu
;
760 for (i
= 0; i
< dev
->num_slaves
; i
++) {
761 master
->max_mtu
[port
] =
762 max(master
->max_mtu
[port
],
763 master
->slave_state
[i
].mtu
[port
]);
767 gen_context
->mtu
= cpu_to_be16(master
->max_mtu
[port
]);
769 case MLX4_SET_PORT_GID_TABLE
:
770 /* change to MULTIPLE entries: number of guest's gids
771 * need a FOR-loop here over number of gids the guest has.
772 * 1. Check no duplicates in gids passed by slave
774 num_gids
= mlx4_get_slave_num_gids(dev
, slave
, port
);
775 base
= mlx4_get_base_gid_ix(dev
, slave
, port
);
776 gid_entry_mbox
= (struct mlx4_roce_gid_entry
*)(inbox
->buf
);
777 for (i
= 0; i
< num_gids
; gid_entry_mbox
++, i
++) {
778 if (!memcmp(gid_entry_mbox
->raw
, zgid_entry
.raw
,
781 gid_entry_mb1
= gid_entry_mbox
+ 1;
782 for (j
= i
+ 1; j
< num_gids
; gid_entry_mb1
++, j
++) {
783 if (!memcmp(gid_entry_mb1
->raw
,
784 zgid_entry
.raw
, sizeof(zgid_entry
)))
786 if (!memcmp(gid_entry_mb1
->raw
, gid_entry_mbox
->raw
,
787 sizeof(gid_entry_mbox
->raw
))) {
788 /* found duplicate */
794 /* 2. Check that do not have duplicates in OTHER
795 * entries in the port GID table
798 mutex_lock(&(priv
->port
[port
].gid_table
.mutex
));
799 for (i
= 0; i
< MLX4_ROCE_MAX_GIDS
; i
++) {
800 if (i
>= base
&& i
< base
+ num_gids
)
801 continue; /* don't compare to slave's current gids */
802 gid_entry_tbl
= &priv
->port
[port
].gid_table
.roce_gids
[i
];
803 if (!memcmp(gid_entry_tbl
->raw
, zgid_entry
.raw
, sizeof(zgid_entry
)))
805 gid_entry_mbox
= (struct mlx4_roce_gid_entry
*)(inbox
->buf
);
806 for (j
= 0; j
< num_gids
; gid_entry_mbox
++, j
++) {
807 if (!memcmp(gid_entry_mbox
->raw
, zgid_entry
.raw
,
810 if (!memcmp(gid_entry_mbox
->raw
, gid_entry_tbl
->raw
,
811 sizeof(gid_entry_tbl
->raw
))) {
812 /* found duplicate */
813 mlx4_warn(dev
, "requested gid entry for slave:%d is a duplicate of gid at index %d\n",
815 mutex_unlock(&(priv
->port
[port
].gid_table
.mutex
));
821 /* insert slave GIDs with memcpy, starting at slave's base index */
822 gid_entry_mbox
= (struct mlx4_roce_gid_entry
*)(inbox
->buf
);
823 for (i
= 0, offset
= base
; i
< num_gids
; gid_entry_mbox
++, offset
++, i
++)
824 memcpy(priv
->port
[port
].gid_table
.roce_gids
[offset
].raw
,
825 gid_entry_mbox
->raw
, MLX4_ROCE_GID_ENTRY_SIZE
);
827 /* Now, copy roce port gids table to current mailbox for passing to FW */
828 gid_entry_mbox
= (struct mlx4_roce_gid_entry
*)(inbox
->buf
);
829 for (i
= 0; i
< MLX4_ROCE_MAX_GIDS
; gid_entry_mbox
++, i
++)
830 memcpy(gid_entry_mbox
->raw
,
831 priv
->port
[port
].gid_table
.roce_gids
[i
].raw
,
832 MLX4_ROCE_GID_ENTRY_SIZE
);
834 err
= mlx4_cmd(dev
, inbox
->dma
, in_mod
& 0xffff, op_mod
,
835 MLX4_CMD_SET_PORT
, MLX4_CMD_TIME_CLASS_B
,
837 mutex_unlock(&(priv
->port
[port
].gid_table
.mutex
));
841 return mlx4_cmd(dev
, inbox
->dma
, in_mod
& 0xffff, op_mod
,
842 MLX4_CMD_SET_PORT
, MLX4_CMD_TIME_CLASS_B
,
846 /* Slaves are not allowed to SET_PORT beacon (LED) blink */
847 if (op_mod
== MLX4_SET_PORT_BEACON_OPCODE
) {
848 mlx4_warn(dev
, "denying SET_PORT Beacon slave:%d\n", slave
);
852 /* For IB, we only consider:
853 * - The capability mask, which is set to the aggregate of all
854 * slave function capabilities
855 * - The QKey violatin counter - reset according to each request.
858 if (dev
->flags
& MLX4_FLAG_OLD_PORT_CMDS
) {
859 reset_qkey_viols
= (*(u8
*) inbox
->buf
) & 0x40;
860 new_cap_mask
= ((__be32
*) inbox
->buf
)[2];
862 reset_qkey_viols
= ((u8
*) inbox
->buf
)[3] & 0x1;
863 new_cap_mask
= ((__be32
*) inbox
->buf
)[1];
866 /* slave may not set the IS_SM capability for the port */
867 if (slave
!= mlx4_master_func_num(dev
) &&
868 (be32_to_cpu(new_cap_mask
) & MLX4_PORT_CAP_IS_SM
))
871 /* No DEV_MGMT in multifunc mode */
872 if (mlx4_is_mfunc(dev
) &&
873 (be32_to_cpu(new_cap_mask
) & MLX4_PORT_CAP_DEV_MGMT_SUP
))
878 priv
->mfunc
.master
.slave_state
[slave
].ib_cap_mask
[port
];
879 priv
->mfunc
.master
.slave_state
[slave
].ib_cap_mask
[port
] = new_cap_mask
;
880 for (i
= 0; i
< dev
->num_slaves
; i
++)
882 priv
->mfunc
.master
.slave_state
[i
].ib_cap_mask
[port
];
884 /* only clear mailbox for guests. Master may be setting
885 * MTU or PKEY table size
887 if (slave
!= dev
->caps
.function
)
888 memset(inbox
->buf
, 0, 256);
889 if (dev
->flags
& MLX4_FLAG_OLD_PORT_CMDS
) {
890 *(u8
*) inbox
->buf
|= !!reset_qkey_viols
<< 6;
891 ((__be32
*) inbox
->buf
)[2] = agg_cap_mask
;
893 ((u8
*) inbox
->buf
)[3] |= !!reset_qkey_viols
;
894 ((__be32
*) inbox
->buf
)[1] = agg_cap_mask
;
897 err
= mlx4_cmd(dev
, inbox
->dma
, port
, is_eth
, MLX4_CMD_SET_PORT
,
898 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
900 priv
->mfunc
.master
.slave_state
[slave
].ib_cap_mask
[port
] =
905 int mlx4_SET_PORT_wrapper(struct mlx4_dev
*dev
, int slave
,
906 struct mlx4_vhcr
*vhcr
,
907 struct mlx4_cmd_mailbox
*inbox
,
908 struct mlx4_cmd_mailbox
*outbox
,
909 struct mlx4_cmd_info
*cmd
)
911 int port
= mlx4_slave_convert_port(
912 dev
, slave
, vhcr
->in_modifier
& 0xFF);
917 vhcr
->in_modifier
= (vhcr
->in_modifier
& ~0xFF) |
920 return mlx4_common_set_port(dev
, slave
, vhcr
->in_modifier
,
921 vhcr
->op_modifier
, inbox
);
924 /* bit locations for set port command with zero op modifier */
926 MLX4_SET_PORT_VL_CAP
= 4, /* bits 7:4 */
927 MLX4_SET_PORT_MTU_CAP
= 12, /* bits 15:12 */
928 MLX4_CHANGE_PORT_PKEY_TBL_SZ
= 20,
929 MLX4_CHANGE_PORT_VL_CAP
= 21,
930 MLX4_CHANGE_PORT_MTU_CAP
= 22,
933 int mlx4_SET_PORT(struct mlx4_dev
*dev
, u8 port
, int pkey_tbl_sz
)
935 struct mlx4_cmd_mailbox
*mailbox
;
936 int err
, vl_cap
, pkey_tbl_flag
= 0;
938 if (dev
->caps
.port_type
[port
] == MLX4_PORT_TYPE_ETH
)
941 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
943 return PTR_ERR(mailbox
);
945 ((__be32
*) mailbox
->buf
)[1] = dev
->caps
.ib_port_def_cap
[port
];
947 if (pkey_tbl_sz
>= 0 && mlx4_is_master(dev
)) {
949 ((__be16
*) mailbox
->buf
)[20] = cpu_to_be16(pkey_tbl_sz
);
952 /* IB VL CAP enum isn't used by the firmware, just numerical values */
953 for (vl_cap
= 8; vl_cap
>= 1; vl_cap
>>= 1) {
954 ((__be32
*) mailbox
->buf
)[0] = cpu_to_be32(
955 (1 << MLX4_CHANGE_PORT_MTU_CAP
) |
956 (1 << MLX4_CHANGE_PORT_VL_CAP
) |
957 (pkey_tbl_flag
<< MLX4_CHANGE_PORT_PKEY_TBL_SZ
) |
958 (dev
->caps
.port_ib_mtu
[port
] << MLX4_SET_PORT_MTU_CAP
) |
959 (vl_cap
<< MLX4_SET_PORT_VL_CAP
));
960 err
= mlx4_cmd(dev
, mailbox
->dma
, port
,
961 MLX4_SET_PORT_IB_OPCODE
, MLX4_CMD_SET_PORT
,
962 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_WRAPPED
);
967 mlx4_free_cmd_mailbox(dev
, mailbox
);
971 int mlx4_SET_PORT_general(struct mlx4_dev
*dev
, u8 port
, int mtu
,
972 u8 pptx
, u8 pfctx
, u8 pprx
, u8 pfcrx
)
974 struct mlx4_cmd_mailbox
*mailbox
;
975 struct mlx4_set_port_general_context
*context
;
979 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
981 return PTR_ERR(mailbox
);
982 context
= mailbox
->buf
;
983 context
->flags
= SET_PORT_GEN_ALL_VALID
;
984 context
->mtu
= cpu_to_be16(mtu
);
985 context
->pptx
= (pptx
* (!pfctx
)) << 7;
986 context
->pfctx
= pfctx
;
987 context
->pprx
= (pprx
* (!pfcrx
)) << 7;
988 context
->pfcrx
= pfcrx
;
990 in_mod
= MLX4_SET_PORT_GENERAL
<< 8 | port
;
991 err
= mlx4_cmd(dev
, mailbox
->dma
, in_mod
, MLX4_SET_PORT_ETH_OPCODE
,
992 MLX4_CMD_SET_PORT
, MLX4_CMD_TIME_CLASS_B
,
995 mlx4_free_cmd_mailbox(dev
, mailbox
);
998 EXPORT_SYMBOL(mlx4_SET_PORT_general
);
1000 int mlx4_SET_PORT_qpn_calc(struct mlx4_dev
*dev
, u8 port
, u32 base_qpn
,
1003 struct mlx4_cmd_mailbox
*mailbox
;
1004 struct mlx4_set_port_rqp_calc_context
*context
;
1007 u32 m_promisc
= (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_VEP_MC_STEER
) ?
1008 MCAST_DIRECT
: MCAST_DEFAULT
;
1010 if (dev
->caps
.steering_mode
!= MLX4_STEERING_MODE_A0
)
1013 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1014 if (IS_ERR(mailbox
))
1015 return PTR_ERR(mailbox
);
1016 context
= mailbox
->buf
;
1017 context
->base_qpn
= cpu_to_be32(base_qpn
);
1018 context
->n_mac
= dev
->caps
.log_num_macs
;
1019 context
->promisc
= cpu_to_be32(promisc
<< SET_PORT_PROMISC_SHIFT
|
1021 context
->mcast
= cpu_to_be32(m_promisc
<< SET_PORT_MC_PROMISC_SHIFT
|
1023 context
->intra_no_vlan
= 0;
1024 context
->no_vlan
= MLX4_NO_VLAN_IDX
;
1025 context
->intra_vlan_miss
= 0;
1026 context
->vlan_miss
= MLX4_VLAN_MISS_IDX
;
1028 in_mod
= MLX4_SET_PORT_RQP_CALC
<< 8 | port
;
1029 err
= mlx4_cmd(dev
, mailbox
->dma
, in_mod
, MLX4_SET_PORT_ETH_OPCODE
,
1030 MLX4_CMD_SET_PORT
, MLX4_CMD_TIME_CLASS_B
,
1033 mlx4_free_cmd_mailbox(dev
, mailbox
);
1036 EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc
);
1038 int mlx4_SET_PORT_fcs_check(struct mlx4_dev
*dev
, u8 port
, u8 ignore_fcs_value
)
1040 struct mlx4_cmd_mailbox
*mailbox
;
1041 struct mlx4_set_port_general_context
*context
;
1045 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1046 if (IS_ERR(mailbox
))
1047 return PTR_ERR(mailbox
);
1048 context
= mailbox
->buf
;
1049 context
->v_ignore_fcs
|= MLX4_FLAG_V_IGNORE_FCS_MASK
;
1050 if (ignore_fcs_value
)
1051 context
->ignore_fcs
|= MLX4_IGNORE_FCS_MASK
;
1053 context
->ignore_fcs
&= ~MLX4_IGNORE_FCS_MASK
;
1055 in_mod
= MLX4_SET_PORT_GENERAL
<< 8 | port
;
1056 err
= mlx4_cmd(dev
, mailbox
->dma
, in_mod
, 1, MLX4_CMD_SET_PORT
,
1057 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
1059 mlx4_free_cmd_mailbox(dev
, mailbox
);
1062 EXPORT_SYMBOL(mlx4_SET_PORT_fcs_check
);
1065 VXLAN_ENABLE_MODIFY
= 1 << 7,
1066 VXLAN_STEERING_MODIFY
= 1 << 6,
1068 VXLAN_ENABLE
= 1 << 7,
1071 struct mlx4_set_port_vxlan_context
{
1079 int mlx4_SET_PORT_VXLAN(struct mlx4_dev
*dev
, u8 port
, u8 steering
, int enable
)
1083 struct mlx4_cmd_mailbox
*mailbox
;
1084 struct mlx4_set_port_vxlan_context
*context
;
1086 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1087 if (IS_ERR(mailbox
))
1088 return PTR_ERR(mailbox
);
1089 context
= mailbox
->buf
;
1090 memset(context
, 0, sizeof(*context
));
1092 context
->modify_flags
= VXLAN_ENABLE_MODIFY
| VXLAN_STEERING_MODIFY
;
1094 context
->enable_flags
= VXLAN_ENABLE
;
1095 context
->steering
= steering
;
1097 in_mod
= MLX4_SET_PORT_VXLAN
<< 8 | port
;
1098 err
= mlx4_cmd(dev
, mailbox
->dma
, in_mod
, MLX4_SET_PORT_ETH_OPCODE
,
1099 MLX4_CMD_SET_PORT
, MLX4_CMD_TIME_CLASS_B
,
1102 mlx4_free_cmd_mailbox(dev
, mailbox
);
1105 EXPORT_SYMBOL(mlx4_SET_PORT_VXLAN
);
1107 int mlx4_SET_PORT_BEACON(struct mlx4_dev
*dev
, u8 port
, u16 time
)
1110 struct mlx4_cmd_mailbox
*mailbox
;
1112 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1113 if (IS_ERR(mailbox
))
1114 return PTR_ERR(mailbox
);
1116 *((__be32
*)mailbox
->buf
) = cpu_to_be32(time
);
1118 err
= mlx4_cmd(dev
, mailbox
->dma
, port
, MLX4_SET_PORT_BEACON_OPCODE
,
1119 MLX4_CMD_SET_PORT
, MLX4_CMD_TIME_CLASS_B
,
1122 mlx4_free_cmd_mailbox(dev
, mailbox
);
1125 EXPORT_SYMBOL(mlx4_SET_PORT_BEACON
);
1127 int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev
*dev
, int slave
,
1128 struct mlx4_vhcr
*vhcr
,
1129 struct mlx4_cmd_mailbox
*inbox
,
1130 struct mlx4_cmd_mailbox
*outbox
,
1131 struct mlx4_cmd_info
*cmd
)
1138 int mlx4_SET_MCAST_FLTR(struct mlx4_dev
*dev
, u8 port
,
1139 u64 mac
, u64 clear
, u8 mode
)
1141 return mlx4_cmd(dev
, (mac
| (clear
<< 63)), port
, mode
,
1142 MLX4_CMD_SET_MCAST_FLTR
, MLX4_CMD_TIME_CLASS_B
,
1145 EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR
);
1147 int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev
*dev
, int slave
,
1148 struct mlx4_vhcr
*vhcr
,
1149 struct mlx4_cmd_mailbox
*inbox
,
1150 struct mlx4_cmd_mailbox
*outbox
,
1151 struct mlx4_cmd_info
*cmd
)
1158 int mlx4_common_dump_eth_stats(struct mlx4_dev
*dev
, int slave
,
1159 u32 in_mod
, struct mlx4_cmd_mailbox
*outbox
)
1161 return mlx4_cmd_box(dev
, 0, outbox
->dma
, in_mod
, 0,
1162 MLX4_CMD_DUMP_ETH_STATS
, MLX4_CMD_TIME_CLASS_B
,
1166 int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev
*dev
, int slave
,
1167 struct mlx4_vhcr
*vhcr
,
1168 struct mlx4_cmd_mailbox
*inbox
,
1169 struct mlx4_cmd_mailbox
*outbox
,
1170 struct mlx4_cmd_info
*cmd
)
1172 if (slave
!= dev
->caps
.function
)
1174 return mlx4_common_dump_eth_stats(dev
, slave
,
1175 vhcr
->in_modifier
, outbox
);
1178 int mlx4_get_slave_from_roce_gid(struct mlx4_dev
*dev
, int port
, u8
*gid
,
1181 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1182 int i
, found_ix
= -1;
1183 int vf_gids
= MLX4_ROCE_MAX_GIDS
- MLX4_ROCE_PF_GIDS
;
1184 struct mlx4_slaves_pport slaves_pport
;
1188 if (!mlx4_is_mfunc(dev
))
1191 slaves_pport
= mlx4_phys_to_slaves_pport(dev
, port
);
1192 num_vfs
= bitmap_weight(slaves_pport
.slaves
,
1193 dev
->persist
->num_vfs
+ 1) - 1;
1195 for (i
= 0; i
< MLX4_ROCE_MAX_GIDS
; i
++) {
1196 if (!memcmp(priv
->port
[port
].gid_table
.roce_gids
[i
].raw
, gid
,
1197 MLX4_ROCE_GID_ENTRY_SIZE
)) {
1203 if (found_ix
>= 0) {
1204 /* Calculate a slave_gid which is the slave number in the gid
1205 * table and not a globally unique slave number.
1207 if (found_ix
< MLX4_ROCE_PF_GIDS
)
1209 else if (found_ix
< MLX4_ROCE_PF_GIDS
+ (vf_gids
% num_vfs
) *
1210 (vf_gids
/ num_vfs
+ 1))
1211 slave_gid
= ((found_ix
- MLX4_ROCE_PF_GIDS
) /
1212 (vf_gids
/ num_vfs
+ 1)) + 1;
1215 ((found_ix
- MLX4_ROCE_PF_GIDS
-
1216 ((vf_gids
% num_vfs
) * ((vf_gids
/ num_vfs
+ 1)))) /
1217 (vf_gids
/ num_vfs
)) + vf_gids
% num_vfs
+ 1;
1219 /* Calculate the globally unique slave id */
1221 struct mlx4_active_ports exclusive_ports
;
1222 struct mlx4_active_ports actv_ports
;
1223 struct mlx4_slaves_pport slaves_pport_actv
;
1224 unsigned max_port_p_one
;
1225 int num_vfs_before
= 0;
1226 int candidate_slave_gid
;
1228 /* Calculate how many VFs are on the previous port, if exists */
1229 for (i
= 1; i
< port
; i
++) {
1230 bitmap_zero(exclusive_ports
.ports
, dev
->caps
.num_ports
);
1231 set_bit(i
- 1, exclusive_ports
.ports
);
1233 mlx4_phys_to_slaves_pport_actv(
1234 dev
, &exclusive_ports
);
1235 num_vfs_before
+= bitmap_weight(
1236 slaves_pport_actv
.slaves
,
1237 dev
->persist
->num_vfs
+ 1);
1240 /* candidate_slave_gid isn't necessarily the correct slave, but
1241 * it has the same number of ports and is assigned to the same
1242 * ports as the real slave we're looking for. On dual port VF,
1243 * slave_gid = [single port VFs on port <port>] +
1244 * [offset of the current slave from the first dual port VF] +
1247 candidate_slave_gid
= slave_gid
+ num_vfs_before
;
1249 actv_ports
= mlx4_get_active_ports(dev
, candidate_slave_gid
);
1250 max_port_p_one
= find_first_bit(
1251 actv_ports
.ports
, dev
->caps
.num_ports
) +
1252 bitmap_weight(actv_ports
.ports
,
1253 dev
->caps
.num_ports
) + 1;
1255 /* Calculate the real slave number */
1256 for (i
= 1; i
< max_port_p_one
; i
++) {
1259 bitmap_zero(exclusive_ports
.ports
,
1260 dev
->caps
.num_ports
);
1261 set_bit(i
- 1, exclusive_ports
.ports
);
1263 mlx4_phys_to_slaves_pport_actv(
1264 dev
, &exclusive_ports
);
1265 slave_gid
+= bitmap_weight(
1266 slaves_pport_actv
.slaves
,
1267 dev
->persist
->num_vfs
+ 1);
1270 *slave_id
= slave_gid
;
1273 return (found_ix
>= 0) ? 0 : -EINVAL
;
1275 EXPORT_SYMBOL(mlx4_get_slave_from_roce_gid
);
1277 int mlx4_get_roce_gid_from_slave(struct mlx4_dev
*dev
, int port
, int slave_id
,
1280 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1282 if (!mlx4_is_master(dev
))
1285 memcpy(gid
, priv
->port
[port
].gid_table
.roce_gids
[slave_id
].raw
,
1286 MLX4_ROCE_GID_ENTRY_SIZE
);
1289 EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave
);
1291 /* Cable Module Info */
1292 #define MODULE_INFO_MAX_READ 48
1294 #define I2C_ADDR_LOW 0x50
1295 #define I2C_ADDR_HIGH 0x51
1296 #define I2C_PAGE_SIZE 256
1298 /* Module Info Data */
1299 struct mlx4_cable_info
{
1302 __be16 dev_mem_address
;
1305 __be32 reserved2
[2];
1306 u8 data
[MODULE_INFO_MAX_READ
];
1309 enum cable_info_err
{
1310 CABLE_INF_INV_PORT
= 0x1,
1311 CABLE_INF_OP_NOSUP
= 0x2,
1312 CABLE_INF_NOT_CONN
= 0x3,
1313 CABLE_INF_NO_EEPRM
= 0x4,
1314 CABLE_INF_PAGE_ERR
= 0x5,
1315 CABLE_INF_INV_ADDR
= 0x6,
1316 CABLE_INF_I2C_ADDR
= 0x7,
1317 CABLE_INF_QSFP_VIO
= 0x8,
1318 CABLE_INF_I2C_BUSY
= 0x9,
1321 #define MAD_STATUS_2_CABLE_ERR(mad_status) ((mad_status >> 8) & 0xFF)
1323 static inline const char *cable_info_mad_err_str(u16 mad_status
)
1325 u8 err
= MAD_STATUS_2_CABLE_ERR(mad_status
);
1328 case CABLE_INF_INV_PORT
:
1329 return "invalid port selected";
1330 case CABLE_INF_OP_NOSUP
:
1331 return "operation not supported for this port (the port is of type CX4 or internal)";
1332 case CABLE_INF_NOT_CONN
:
1333 return "cable is not connected";
1334 case CABLE_INF_NO_EEPRM
:
1335 return "the connected cable has no EPROM (passive copper cable)";
1336 case CABLE_INF_PAGE_ERR
:
1337 return "page number is greater than 15";
1338 case CABLE_INF_INV_ADDR
:
1339 return "invalid device_address or size (that is, size equals 0 or address+size is greater than 256)";
1340 case CABLE_INF_I2C_ADDR
:
1341 return "invalid I2C slave address";
1342 case CABLE_INF_QSFP_VIO
:
1343 return "at least one cable violates the QSFP specification and ignores the modsel signal";
1344 case CABLE_INF_I2C_BUSY
:
1345 return "I2C bus is constantly busy";
1347 return "Unknown Error";
1351 * mlx4_get_module_info - Read cable module eeprom data
1353 * @port: port number.
1354 * @offset: byte offset in eeprom to start reading data from.
1355 * @size: num of bytes to read.
1356 * @data: output buffer to put the requested data into.
1358 * Reads cable module eeprom data, puts the outcome data into
1359 * data pointer paramer.
1360 * Returns num of read bytes on success or a negative error
1363 int mlx4_get_module_info(struct mlx4_dev
*dev
, u8 port
,
1364 u16 offset
, u16 size
, u8
*data
)
1366 struct mlx4_cmd_mailbox
*inbox
, *outbox
;
1367 struct mlx4_mad_ifc
*inmad
, *outmad
;
1368 struct mlx4_cable_info
*cable_info
;
1372 if (size
> MODULE_INFO_MAX_READ
)
1373 size
= MODULE_INFO_MAX_READ
;
1375 inbox
= mlx4_alloc_cmd_mailbox(dev
);
1377 return PTR_ERR(inbox
);
1379 outbox
= mlx4_alloc_cmd_mailbox(dev
);
1380 if (IS_ERR(outbox
)) {
1381 mlx4_free_cmd_mailbox(dev
, inbox
);
1382 return PTR_ERR(outbox
);
1385 inmad
= (struct mlx4_mad_ifc
*)(inbox
->buf
);
1386 outmad
= (struct mlx4_mad_ifc
*)(outbox
->buf
);
1388 inmad
->method
= 0x1; /* Get */
1389 inmad
->class_version
= 0x1;
1390 inmad
->mgmt_class
= 0x1;
1391 inmad
->base_version
= 0x1;
1392 inmad
->attr_id
= cpu_to_be16(0xFF60); /* Module Info */
1394 if (offset
< I2C_PAGE_SIZE
&& offset
+ size
> I2C_PAGE_SIZE
)
1395 /* Cross pages reads are not allowed
1396 * read until offset 256 in low page
1398 size
-= offset
+ size
- I2C_PAGE_SIZE
;
1400 i2c_addr
= I2C_ADDR_LOW
;
1401 if (offset
>= I2C_PAGE_SIZE
) {
1402 /* Reset offset to high page */
1403 i2c_addr
= I2C_ADDR_HIGH
;
1404 offset
-= I2C_PAGE_SIZE
;
1407 cable_info
= (struct mlx4_cable_info
*)inmad
->data
;
1408 cable_info
->dev_mem_address
= cpu_to_be16(offset
);
1409 cable_info
->page_num
= 0;
1410 cable_info
->i2c_addr
= i2c_addr
;
1411 cable_info
->size
= cpu_to_be16(size
);
1413 ret
= mlx4_cmd_box(dev
, inbox
->dma
, outbox
->dma
, port
, 3,
1414 MLX4_CMD_MAD_IFC
, MLX4_CMD_TIME_CLASS_C
,
1419 if (be16_to_cpu(outmad
->status
)) {
1420 /* Mad returned with bad status */
1421 ret
= be16_to_cpu(outmad
->status
);
1423 "MLX4_CMD_MAD_IFC Get Module info attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n",
1424 0xFF60, port
, i2c_addr
, offset
, size
,
1425 ret
, cable_info_mad_err_str(ret
));
1427 if (i2c_addr
== I2C_ADDR_HIGH
&&
1428 MAD_STATUS_2_CABLE_ERR(ret
) == CABLE_INF_I2C_ADDR
)
1429 /* Some SFP cables do not support i2c slave
1430 * address 0x51 (high page), abort silently.
1437 cable_info
= (struct mlx4_cable_info
*)outmad
->data
;
1438 memcpy(data
, cable_info
->data
, size
);
1441 mlx4_free_cmd_mailbox(dev
, inbox
);
1442 mlx4_free_cmd_mailbox(dev
, outbox
);
1445 EXPORT_SYMBOL(mlx4_get_module_info
);