2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/errno.h>
34 #include <linux/if_ether.h>
35 #include <linux/if_vlan.h>
36 #include <linux/export.h>
38 #include <linux/mlx4/cmd.h>
42 #define MLX4_MAC_VALID (1ull << 63)
44 #define MLX4_VLAN_VALID (1u << 31)
45 #define MLX4_VLAN_MASK 0xfff
47 #define MLX4_STATS_TRAFFIC_COUNTERS_MASK 0xfULL
48 #define MLX4_STATS_TRAFFIC_DROPS_MASK 0xc0ULL
49 #define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL
50 #define MLX4_STATS_PORT_COUNTERS_MASK 0x1fe00000ULL
52 void mlx4_init_mac_table(struct mlx4_dev
*dev
, struct mlx4_mac_table
*table
)
56 mutex_init(&table
->mutex
);
57 for (i
= 0; i
< MLX4_MAX_MAC_NUM
; i
++) {
58 table
->entries
[i
] = 0;
61 table
->max
= 1 << dev
->caps
.log_num_macs
;
65 void mlx4_init_vlan_table(struct mlx4_dev
*dev
, struct mlx4_vlan_table
*table
)
69 mutex_init(&table
->mutex
);
70 for (i
= 0; i
< MLX4_MAX_VLAN_NUM
; i
++) {
71 table
->entries
[i
] = 0;
74 table
->max
= (1 << dev
->caps
.log_num_vlans
) - MLX4_VLAN_REGULAR
;
78 static int validate_index(struct mlx4_dev
*dev
,
79 struct mlx4_mac_table
*table
, int index
)
83 if (index
< 0 || index
>= table
->max
|| !table
->entries
[index
]) {
84 mlx4_warn(dev
, "No valid Mac entry for the given index\n");
90 static int find_index(struct mlx4_dev
*dev
,
91 struct mlx4_mac_table
*table
, u64 mac
)
95 for (i
= 0; i
< MLX4_MAX_MAC_NUM
; i
++) {
96 if ((mac
& MLX4_MAC_MASK
) ==
97 (MLX4_MAC_MASK
& be64_to_cpu(table
->entries
[i
])))
104 static int mlx4_set_port_mac_table(struct mlx4_dev
*dev
, u8 port
,
107 struct mlx4_cmd_mailbox
*mailbox
;
111 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
113 return PTR_ERR(mailbox
);
115 memcpy(mailbox
->buf
, entries
, MLX4_MAC_TABLE_SIZE
);
117 in_mod
= MLX4_SET_PORT_MAC_TABLE
<< 8 | port
;
119 err
= mlx4_cmd(dev
, mailbox
->dma
, in_mod
, 1, MLX4_CMD_SET_PORT
,
120 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
122 mlx4_free_cmd_mailbox(dev
, mailbox
);
126 int mlx4_find_cached_mac(struct mlx4_dev
*dev
, u8 port
, u64 mac
, int *idx
)
128 struct mlx4_port_info
*info
= &mlx4_priv(dev
)->port
[port
];
129 struct mlx4_mac_table
*table
= &info
->mac_table
;
132 for (i
= 0; i
< MLX4_MAX_MAC_NUM
; i
++) {
136 if (mac
== (MLX4_MAC_MASK
& be64_to_cpu(table
->entries
[i
]))) {
144 EXPORT_SYMBOL_GPL(mlx4_find_cached_mac
);
146 int __mlx4_register_mac(struct mlx4_dev
*dev
, u8 port
, u64 mac
)
148 struct mlx4_port_info
*info
= &mlx4_priv(dev
)->port
[port
];
149 struct mlx4_mac_table
*table
= &info
->mac_table
;
153 mlx4_dbg(dev
, "Registering MAC: 0x%llx for port %d\n",
154 (unsigned long long) mac
, port
);
156 mutex_lock(&table
->mutex
);
157 for (i
= 0; i
< MLX4_MAX_MAC_NUM
; i
++) {
158 if (free
< 0 && !table
->entries
[i
]) {
163 if (mac
== (MLX4_MAC_MASK
& be64_to_cpu(table
->entries
[i
]))) {
164 /* MAC already registered, increment ref count */
171 mlx4_dbg(dev
, "Free MAC index is %d\n", free
);
173 if (table
->total
== table
->max
) {
174 /* No free mac entries */
179 /* Register new MAC */
180 table
->entries
[free
] = cpu_to_be64(mac
| MLX4_MAC_VALID
);
182 err
= mlx4_set_port_mac_table(dev
, port
, table
->entries
);
184 mlx4_err(dev
, "Failed adding MAC: 0x%llx\n",
185 (unsigned long long) mac
);
186 table
->entries
[free
] = 0;
189 table
->refs
[free
] = 1;
193 mutex_unlock(&table
->mutex
);
196 EXPORT_SYMBOL_GPL(__mlx4_register_mac
);
198 int mlx4_register_mac(struct mlx4_dev
*dev
, u8 port
, u64 mac
)
203 if (mlx4_is_mfunc(dev
)) {
204 if (!(dev
->flags
& MLX4_FLAG_OLD_REG_MAC
)) {
205 err
= mlx4_cmd_imm(dev
, mac
, &out_param
,
206 ((u32
) port
) << 8 | (u32
) RES_MAC
,
207 RES_OP_RESERVE_AND_MAP
, MLX4_CMD_ALLOC_RES
,
208 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
210 if (err
&& err
== -EINVAL
&& mlx4_is_slave(dev
)) {
211 /* retry using old REG_MAC format */
212 set_param_l(&out_param
, port
);
213 err
= mlx4_cmd_imm(dev
, mac
, &out_param
, RES_MAC
,
214 RES_OP_RESERVE_AND_MAP
, MLX4_CMD_ALLOC_RES
,
215 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
217 dev
->flags
|= MLX4_FLAG_OLD_REG_MAC
;
222 return get_param_l(&out_param
);
224 return __mlx4_register_mac(dev
, port
, mac
);
226 EXPORT_SYMBOL_GPL(mlx4_register_mac
);
228 int mlx4_get_base_qpn(struct mlx4_dev
*dev
, u8 port
)
230 return dev
->caps
.reserved_qps_base
[MLX4_QP_REGION_ETH_ADDR
] +
231 (port
- 1) * (1 << dev
->caps
.log_num_macs
);
233 EXPORT_SYMBOL_GPL(mlx4_get_base_qpn
);
235 void __mlx4_unregister_mac(struct mlx4_dev
*dev
, u8 port
, u64 mac
)
237 struct mlx4_port_info
*info
= &mlx4_priv(dev
)->port
[port
];
238 struct mlx4_mac_table
*table
= &info
->mac_table
;
241 mutex_lock(&table
->mutex
);
242 index
= find_index(dev
, table
, mac
);
244 if (validate_index(dev
, table
, index
))
246 if (--table
->refs
[index
]) {
247 mlx4_dbg(dev
, "Have more references for index %d,"
248 "no need to modify mac table\n", index
);
252 table
->entries
[index
] = 0;
253 mlx4_set_port_mac_table(dev
, port
, table
->entries
);
256 mutex_unlock(&table
->mutex
);
258 EXPORT_SYMBOL_GPL(__mlx4_unregister_mac
);
260 void mlx4_unregister_mac(struct mlx4_dev
*dev
, u8 port
, u64 mac
)
264 if (mlx4_is_mfunc(dev
)) {
265 if (!(dev
->flags
& MLX4_FLAG_OLD_REG_MAC
)) {
266 (void) mlx4_cmd_imm(dev
, mac
, &out_param
,
267 ((u32
) port
) << 8 | (u32
) RES_MAC
,
268 RES_OP_RESERVE_AND_MAP
, MLX4_CMD_FREE_RES
,
269 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
271 /* use old unregister mac format */
272 set_param_l(&out_param
, port
);
273 (void) mlx4_cmd_imm(dev
, mac
, &out_param
, RES_MAC
,
274 RES_OP_RESERVE_AND_MAP
, MLX4_CMD_FREE_RES
,
275 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
279 __mlx4_unregister_mac(dev
, port
, mac
);
282 EXPORT_SYMBOL_GPL(mlx4_unregister_mac
);
284 int __mlx4_replace_mac(struct mlx4_dev
*dev
, u8 port
, int qpn
, u64 new_mac
)
286 struct mlx4_port_info
*info
= &mlx4_priv(dev
)->port
[port
];
287 struct mlx4_mac_table
*table
= &info
->mac_table
;
288 int index
= qpn
- info
->base_qpn
;
291 /* CX1 doesn't support multi-functions */
292 mutex_lock(&table
->mutex
);
294 err
= validate_index(dev
, table
, index
);
298 table
->entries
[index
] = cpu_to_be64(new_mac
| MLX4_MAC_VALID
);
300 err
= mlx4_set_port_mac_table(dev
, port
, table
->entries
);
302 mlx4_err(dev
, "Failed adding MAC: 0x%llx\n",
303 (unsigned long long) new_mac
);
304 table
->entries
[index
] = 0;
307 mutex_unlock(&table
->mutex
);
310 EXPORT_SYMBOL_GPL(__mlx4_replace_mac
);
312 static int mlx4_set_port_vlan_table(struct mlx4_dev
*dev
, u8 port
,
315 struct mlx4_cmd_mailbox
*mailbox
;
319 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
321 return PTR_ERR(mailbox
);
323 memcpy(mailbox
->buf
, entries
, MLX4_VLAN_TABLE_SIZE
);
324 in_mod
= MLX4_SET_PORT_VLAN_TABLE
<< 8 | port
;
325 err
= mlx4_cmd(dev
, mailbox
->dma
, in_mod
, 1, MLX4_CMD_SET_PORT
,
326 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
328 mlx4_free_cmd_mailbox(dev
, mailbox
);
333 int mlx4_find_cached_vlan(struct mlx4_dev
*dev
, u8 port
, u16 vid
, int *idx
)
335 struct mlx4_vlan_table
*table
= &mlx4_priv(dev
)->port
[port
].vlan_table
;
338 for (i
= 0; i
< MLX4_MAX_VLAN_NUM
; ++i
) {
339 if (table
->refs
[i
] &&
340 (vid
== (MLX4_VLAN_MASK
&
341 be32_to_cpu(table
->entries
[i
])))) {
342 /* VLAN already registered, increase reference count */
350 EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan
);
352 int __mlx4_register_vlan(struct mlx4_dev
*dev
, u8 port
, u16 vlan
,
355 struct mlx4_vlan_table
*table
= &mlx4_priv(dev
)->port
[port
].vlan_table
;
359 mutex_lock(&table
->mutex
);
361 if (table
->total
== table
->max
) {
362 /* No free vlan entries */
367 for (i
= MLX4_VLAN_REGULAR
; i
< MLX4_MAX_VLAN_NUM
; i
++) {
368 if (free
< 0 && (table
->refs
[i
] == 0)) {
373 if (table
->refs
[i
] &&
374 (vlan
== (MLX4_VLAN_MASK
&
375 be32_to_cpu(table
->entries
[i
])))) {
376 /* Vlan already registered, increase references count */
388 /* Register new VLAN */
389 table
->refs
[free
] = 1;
390 table
->entries
[free
] = cpu_to_be32(vlan
| MLX4_VLAN_VALID
);
392 err
= mlx4_set_port_vlan_table(dev
, port
, table
->entries
);
394 mlx4_warn(dev
, "Failed adding vlan: %u\n", vlan
);
395 table
->refs
[free
] = 0;
396 table
->entries
[free
] = 0;
403 mutex_unlock(&table
->mutex
);
407 int mlx4_register_vlan(struct mlx4_dev
*dev
, u8 port
, u16 vlan
, int *index
)
415 if (mlx4_is_mfunc(dev
)) {
416 err
= mlx4_cmd_imm(dev
, vlan
, &out_param
,
417 ((u32
) port
) << 8 | (u32
) RES_VLAN
,
418 RES_OP_RESERVE_AND_MAP
, MLX4_CMD_ALLOC_RES
,
419 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
421 *index
= get_param_l(&out_param
);
425 return __mlx4_register_vlan(dev
, port
, vlan
, index
);
427 EXPORT_SYMBOL_GPL(mlx4_register_vlan
);
429 void __mlx4_unregister_vlan(struct mlx4_dev
*dev
, u8 port
, u16 vlan
)
431 struct mlx4_vlan_table
*table
= &mlx4_priv(dev
)->port
[port
].vlan_table
;
434 mutex_lock(&table
->mutex
);
435 if (mlx4_find_cached_vlan(dev
, port
, vlan
, &index
)) {
436 mlx4_warn(dev
, "vlan 0x%x is not in the vlan table\n", vlan
);
440 if (index
< MLX4_VLAN_REGULAR
) {
441 mlx4_warn(dev
, "Trying to free special vlan index %d\n", index
);
445 if (--table
->refs
[index
]) {
446 mlx4_dbg(dev
, "Have %d more references for index %d,"
447 "no need to modify vlan table\n", table
->refs
[index
],
451 table
->entries
[index
] = 0;
452 mlx4_set_port_vlan_table(dev
, port
, table
->entries
);
455 mutex_unlock(&table
->mutex
);
458 void mlx4_unregister_vlan(struct mlx4_dev
*dev
, u8 port
, u16 vlan
)
462 if (mlx4_is_mfunc(dev
)) {
463 (void) mlx4_cmd_imm(dev
, vlan
, &out_param
,
464 ((u32
) port
) << 8 | (u32
) RES_VLAN
,
465 RES_OP_RESERVE_AND_MAP
,
466 MLX4_CMD_FREE_RES
, MLX4_CMD_TIME_CLASS_A
,
470 __mlx4_unregister_vlan(dev
, port
, vlan
);
472 EXPORT_SYMBOL_GPL(mlx4_unregister_vlan
);
474 int mlx4_get_port_ib_caps(struct mlx4_dev
*dev
, u8 port
, __be32
*caps
)
476 struct mlx4_cmd_mailbox
*inmailbox
, *outmailbox
;
480 inmailbox
= mlx4_alloc_cmd_mailbox(dev
);
481 if (IS_ERR(inmailbox
))
482 return PTR_ERR(inmailbox
);
484 outmailbox
= mlx4_alloc_cmd_mailbox(dev
);
485 if (IS_ERR(outmailbox
)) {
486 mlx4_free_cmd_mailbox(dev
, inmailbox
);
487 return PTR_ERR(outmailbox
);
490 inbuf
= inmailbox
->buf
;
491 outbuf
= outmailbox
->buf
;
496 *(__be16
*) (&inbuf
[16]) = cpu_to_be16(0x0015);
497 *(__be32
*) (&inbuf
[20]) = cpu_to_be32(port
);
499 err
= mlx4_cmd_box(dev
, inmailbox
->dma
, outmailbox
->dma
, port
, 3,
500 MLX4_CMD_MAD_IFC
, MLX4_CMD_TIME_CLASS_C
,
503 *caps
= *(__be32
*) (outbuf
+ 84);
504 mlx4_free_cmd_mailbox(dev
, inmailbox
);
505 mlx4_free_cmd_mailbox(dev
, outmailbox
);
508 static struct mlx4_roce_gid_entry zgid_entry
;
510 int mlx4_get_slave_num_gids(struct mlx4_dev
*dev
, int slave
, int port
)
513 int slave_gid
= slave
;
515 struct mlx4_slaves_pport slaves_pport
;
516 struct mlx4_active_ports actv_ports
;
517 unsigned max_port_p_one
;
520 return MLX4_ROCE_PF_GIDS
;
523 slaves_pport
= mlx4_phys_to_slaves_pport(dev
, port
);
524 actv_ports
= mlx4_get_active_ports(dev
, slave
);
525 max_port_p_one
= find_first_bit(actv_ports
.ports
, dev
->caps
.num_ports
) +
526 bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
) + 1;
528 for (i
= 1; i
< max_port_p_one
; i
++) {
529 struct mlx4_active_ports exclusive_ports
;
530 struct mlx4_slaves_pport slaves_pport_actv
;
531 bitmap_zero(exclusive_ports
.ports
, dev
->caps
.num_ports
);
532 set_bit(i
- 1, exclusive_ports
.ports
);
535 slaves_pport_actv
= mlx4_phys_to_slaves_pport_actv(
536 dev
, &exclusive_ports
);
537 slave_gid
-= bitmap_weight(slaves_pport_actv
.slaves
,
540 vfs
= bitmap_weight(slaves_pport
.slaves
, dev
->num_vfs
+ 1) - 1;
541 if (slave_gid
<= ((MLX4_ROCE_MAX_GIDS
- MLX4_ROCE_PF_GIDS
) % vfs
))
542 return ((MLX4_ROCE_MAX_GIDS
- MLX4_ROCE_PF_GIDS
) / vfs
) + 1;
543 return (MLX4_ROCE_MAX_GIDS
- MLX4_ROCE_PF_GIDS
) / vfs
;
546 int mlx4_get_base_gid_ix(struct mlx4_dev
*dev
, int slave
, int port
)
550 int slave_gid
= slave
;
553 struct mlx4_slaves_pport slaves_pport
;
554 struct mlx4_active_ports actv_ports
;
555 unsigned max_port_p_one
;
560 slaves_pport
= mlx4_phys_to_slaves_pport(dev
, port
);
561 actv_ports
= mlx4_get_active_ports(dev
, slave
);
562 max_port_p_one
= find_first_bit(actv_ports
.ports
, dev
->caps
.num_ports
) +
563 bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
) + 1;
565 for (i
= 1; i
< max_port_p_one
; i
++) {
566 struct mlx4_active_ports exclusive_ports
;
567 struct mlx4_slaves_pport slaves_pport_actv
;
568 bitmap_zero(exclusive_ports
.ports
, dev
->caps
.num_ports
);
569 set_bit(i
- 1, exclusive_ports
.ports
);
572 slaves_pport_actv
= mlx4_phys_to_slaves_pport_actv(
573 dev
, &exclusive_ports
);
574 slave_gid
-= bitmap_weight(slaves_pport_actv
.slaves
,
577 gids
= MLX4_ROCE_MAX_GIDS
- MLX4_ROCE_PF_GIDS
;
578 vfs
= bitmap_weight(slaves_pport
.slaves
, dev
->num_vfs
+ 1) - 1;
579 if (slave_gid
<= gids
% vfs
)
580 return MLX4_ROCE_PF_GIDS
+ ((gids
/ vfs
) + 1) * (slave_gid
- 1);
582 return MLX4_ROCE_PF_GIDS
+ (gids
% vfs
) +
583 ((gids
/ vfs
) * (slave_gid
- 1));
585 EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix
);
587 static int mlx4_common_set_port(struct mlx4_dev
*dev
, int slave
, u32 in_mod
,
588 u8 op_mod
, struct mlx4_cmd_mailbox
*inbox
)
590 struct mlx4_priv
*priv
= mlx4_priv(dev
);
591 struct mlx4_port_info
*port_info
;
592 struct mlx4_mfunc_master_ctx
*master
= &priv
->mfunc
.master
;
593 struct mlx4_slave_state
*slave_st
= &master
->slave_state
[slave
];
594 struct mlx4_set_port_rqp_calc_context
*qpn_context
;
595 struct mlx4_set_port_general_context
*gen_context
;
596 struct mlx4_roce_gid_entry
*gid_entry_tbl
, *gid_entry_mbox
, *gid_entry_mb1
;
597 int reset_qkey_viols
;
609 __be32 slave_cap_mask
;
612 port
= in_mod
& 0xff;
613 in_modifier
= in_mod
>> 8;
615 port_info
= &priv
->port
[port
];
617 /* Slaves cannot perform SET_PORT operations except changing MTU */
619 if (slave
!= dev
->caps
.function
&&
620 in_modifier
!= MLX4_SET_PORT_GENERAL
&&
621 in_modifier
!= MLX4_SET_PORT_GID_TABLE
) {
622 mlx4_warn(dev
, "denying SET_PORT for slave:%d\n",
626 switch (in_modifier
) {
627 case MLX4_SET_PORT_RQP_CALC
:
628 qpn_context
= inbox
->buf
;
629 qpn_context
->base_qpn
=
630 cpu_to_be32(port_info
->base_qpn
);
631 qpn_context
->n_mac
= 0x7;
632 promisc
= be32_to_cpu(qpn_context
->promisc
) >>
633 SET_PORT_PROMISC_SHIFT
;
634 qpn_context
->promisc
= cpu_to_be32(
635 promisc
<< SET_PORT_PROMISC_SHIFT
|
636 port_info
->base_qpn
);
637 promisc
= be32_to_cpu(qpn_context
->mcast
) >>
638 SET_PORT_MC_PROMISC_SHIFT
;
639 qpn_context
->mcast
= cpu_to_be32(
640 promisc
<< SET_PORT_MC_PROMISC_SHIFT
|
641 port_info
->base_qpn
);
643 case MLX4_SET_PORT_GENERAL
:
644 gen_context
= inbox
->buf
;
645 /* Mtu is configured as the max MTU among all the
646 * the functions on the port. */
647 mtu
= be16_to_cpu(gen_context
->mtu
);
648 mtu
= min_t(int, mtu
, dev
->caps
.eth_mtu_cap
[port
] +
649 ETH_HLEN
+ VLAN_HLEN
+ ETH_FCS_LEN
);
650 prev_mtu
= slave_st
->mtu
[port
];
651 slave_st
->mtu
[port
] = mtu
;
652 if (mtu
> master
->max_mtu
[port
])
653 master
->max_mtu
[port
] = mtu
;
654 if (mtu
< prev_mtu
&& prev_mtu
==
655 master
->max_mtu
[port
]) {
656 slave_st
->mtu
[port
] = mtu
;
657 master
->max_mtu
[port
] = mtu
;
658 for (i
= 0; i
< dev
->num_slaves
; i
++) {
659 master
->max_mtu
[port
] =
660 max(master
->max_mtu
[port
],
661 master
->slave_state
[i
].mtu
[port
]);
665 gen_context
->mtu
= cpu_to_be16(master
->max_mtu
[port
]);
667 case MLX4_SET_PORT_GID_TABLE
:
668 /* change to MULTIPLE entries: number of guest's gids
669 * need a FOR-loop here over number of gids the guest has.
670 * 1. Check no duplicates in gids passed by slave
672 num_gids
= mlx4_get_slave_num_gids(dev
, slave
, port
);
673 base
= mlx4_get_base_gid_ix(dev
, slave
, port
);
674 gid_entry_mbox
= (struct mlx4_roce_gid_entry
*)(inbox
->buf
);
675 for (i
= 0; i
< num_gids
; gid_entry_mbox
++, i
++) {
676 if (!memcmp(gid_entry_mbox
->raw
, zgid_entry
.raw
,
679 gid_entry_mb1
= gid_entry_mbox
+ 1;
680 for (j
= i
+ 1; j
< num_gids
; gid_entry_mb1
++, j
++) {
681 if (!memcmp(gid_entry_mb1
->raw
,
682 zgid_entry
.raw
, sizeof(zgid_entry
)))
684 if (!memcmp(gid_entry_mb1
->raw
, gid_entry_mbox
->raw
,
685 sizeof(gid_entry_mbox
->raw
))) {
686 /* found duplicate */
692 /* 2. Check that do not have duplicates in OTHER
693 * entries in the port GID table
695 for (i
= 0; i
< MLX4_ROCE_MAX_GIDS
; i
++) {
696 if (i
>= base
&& i
< base
+ num_gids
)
697 continue; /* don't compare to slave's current gids */
698 gid_entry_tbl
= &priv
->roce_gids
[port
- 1][i
];
699 if (!memcmp(gid_entry_tbl
->raw
, zgid_entry
.raw
, sizeof(zgid_entry
)))
701 gid_entry_mbox
= (struct mlx4_roce_gid_entry
*)(inbox
->buf
);
702 for (j
= 0; j
< num_gids
; gid_entry_mbox
++, j
++) {
703 if (!memcmp(gid_entry_mbox
->raw
, zgid_entry
.raw
,
706 if (!memcmp(gid_entry_mbox
->raw
, gid_entry_tbl
->raw
,
707 sizeof(gid_entry_tbl
->raw
))) {
708 /* found duplicate */
709 mlx4_warn(dev
, "requested gid entry for slave:%d "
710 "is a duplicate of gid at index %d\n",
717 /* insert slave GIDs with memcpy, starting at slave's base index */
718 gid_entry_mbox
= (struct mlx4_roce_gid_entry
*)(inbox
->buf
);
719 for (i
= 0, offset
= base
; i
< num_gids
; gid_entry_mbox
++, offset
++, i
++)
720 memcpy(priv
->roce_gids
[port
- 1][offset
].raw
, gid_entry_mbox
->raw
, 16);
722 /* Now, copy roce port gids table to current mailbox for passing to FW */
723 gid_entry_mbox
= (struct mlx4_roce_gid_entry
*)(inbox
->buf
);
724 for (i
= 0; i
< MLX4_ROCE_MAX_GIDS
; gid_entry_mbox
++, i
++)
725 memcpy(gid_entry_mbox
->raw
, priv
->roce_gids
[port
- 1][i
].raw
, 16);
729 return mlx4_cmd(dev
, inbox
->dma
, in_mod
, op_mod
,
730 MLX4_CMD_SET_PORT
, MLX4_CMD_TIME_CLASS_B
,
734 /* For IB, we only consider:
735 * - The capability mask, which is set to the aggregate of all
736 * slave function capabilities
737 * - The QKey violatin counter - reset according to each request.
740 if (dev
->flags
& MLX4_FLAG_OLD_PORT_CMDS
) {
741 reset_qkey_viols
= (*(u8
*) inbox
->buf
) & 0x40;
742 new_cap_mask
= ((__be32
*) inbox
->buf
)[2];
744 reset_qkey_viols
= ((u8
*) inbox
->buf
)[3] & 0x1;
745 new_cap_mask
= ((__be32
*) inbox
->buf
)[1];
748 /* slave may not set the IS_SM capability for the port */
749 if (slave
!= mlx4_master_func_num(dev
) &&
750 (be32_to_cpu(new_cap_mask
) & MLX4_PORT_CAP_IS_SM
))
753 /* No DEV_MGMT in multifunc mode */
754 if (mlx4_is_mfunc(dev
) &&
755 (be32_to_cpu(new_cap_mask
) & MLX4_PORT_CAP_DEV_MGMT_SUP
))
760 priv
->mfunc
.master
.slave_state
[slave
].ib_cap_mask
[port
];
761 priv
->mfunc
.master
.slave_state
[slave
].ib_cap_mask
[port
] = new_cap_mask
;
762 for (i
= 0; i
< dev
->num_slaves
; i
++)
764 priv
->mfunc
.master
.slave_state
[i
].ib_cap_mask
[port
];
766 /* only clear mailbox for guests. Master may be setting
767 * MTU or PKEY table size
769 if (slave
!= dev
->caps
.function
)
770 memset(inbox
->buf
, 0, 256);
771 if (dev
->flags
& MLX4_FLAG_OLD_PORT_CMDS
) {
772 *(u8
*) inbox
->buf
|= !!reset_qkey_viols
<< 6;
773 ((__be32
*) inbox
->buf
)[2] = agg_cap_mask
;
775 ((u8
*) inbox
->buf
)[3] |= !!reset_qkey_viols
;
776 ((__be32
*) inbox
->buf
)[1] = agg_cap_mask
;
779 err
= mlx4_cmd(dev
, inbox
->dma
, port
, is_eth
, MLX4_CMD_SET_PORT
,
780 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
782 priv
->mfunc
.master
.slave_state
[slave
].ib_cap_mask
[port
] =
787 int mlx4_SET_PORT_wrapper(struct mlx4_dev
*dev
, int slave
,
788 struct mlx4_vhcr
*vhcr
,
789 struct mlx4_cmd_mailbox
*inbox
,
790 struct mlx4_cmd_mailbox
*outbox
,
791 struct mlx4_cmd_info
*cmd
)
793 int port
= mlx4_slave_convert_port(
794 dev
, slave
, vhcr
->in_modifier
& 0xFF);
799 vhcr
->in_modifier
= (vhcr
->in_modifier
& ~0xFF) |
802 return mlx4_common_set_port(dev
, slave
, vhcr
->in_modifier
,
803 vhcr
->op_modifier
, inbox
);
806 /* bit locations for set port command with zero op modifier */
808 MLX4_SET_PORT_VL_CAP
= 4, /* bits 7:4 */
809 MLX4_SET_PORT_MTU_CAP
= 12, /* bits 15:12 */
810 MLX4_CHANGE_PORT_PKEY_TBL_SZ
= 20,
811 MLX4_CHANGE_PORT_VL_CAP
= 21,
812 MLX4_CHANGE_PORT_MTU_CAP
= 22,
815 int mlx4_SET_PORT(struct mlx4_dev
*dev
, u8 port
, int pkey_tbl_sz
)
817 struct mlx4_cmd_mailbox
*mailbox
;
818 int err
, vl_cap
, pkey_tbl_flag
= 0;
820 if (dev
->caps
.port_type
[port
] == MLX4_PORT_TYPE_ETH
)
823 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
825 return PTR_ERR(mailbox
);
827 ((__be32
*) mailbox
->buf
)[1] = dev
->caps
.ib_port_def_cap
[port
];
829 if (pkey_tbl_sz
>= 0 && mlx4_is_master(dev
)) {
831 ((__be16
*) mailbox
->buf
)[20] = cpu_to_be16(pkey_tbl_sz
);
834 /* IB VL CAP enum isn't used by the firmware, just numerical values */
835 for (vl_cap
= 8; vl_cap
>= 1; vl_cap
>>= 1) {
836 ((__be32
*) mailbox
->buf
)[0] = cpu_to_be32(
837 (1 << MLX4_CHANGE_PORT_MTU_CAP
) |
838 (1 << MLX4_CHANGE_PORT_VL_CAP
) |
839 (pkey_tbl_flag
<< MLX4_CHANGE_PORT_PKEY_TBL_SZ
) |
840 (dev
->caps
.port_ib_mtu
[port
] << MLX4_SET_PORT_MTU_CAP
) |
841 (vl_cap
<< MLX4_SET_PORT_VL_CAP
));
842 err
= mlx4_cmd(dev
, mailbox
->dma
, port
, 0, MLX4_CMD_SET_PORT
,
843 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_WRAPPED
);
848 mlx4_free_cmd_mailbox(dev
, mailbox
);
852 int mlx4_SET_PORT_general(struct mlx4_dev
*dev
, u8 port
, int mtu
,
853 u8 pptx
, u8 pfctx
, u8 pprx
, u8 pfcrx
)
855 struct mlx4_cmd_mailbox
*mailbox
;
856 struct mlx4_set_port_general_context
*context
;
860 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
862 return PTR_ERR(mailbox
);
863 context
= mailbox
->buf
;
864 context
->flags
= SET_PORT_GEN_ALL_VALID
;
865 context
->mtu
= cpu_to_be16(mtu
);
866 context
->pptx
= (pptx
* (!pfctx
)) << 7;
867 context
->pfctx
= pfctx
;
868 context
->pprx
= (pprx
* (!pfcrx
)) << 7;
869 context
->pfcrx
= pfcrx
;
871 in_mod
= MLX4_SET_PORT_GENERAL
<< 8 | port
;
872 err
= mlx4_cmd(dev
, mailbox
->dma
, in_mod
, 1, MLX4_CMD_SET_PORT
,
873 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_WRAPPED
);
875 mlx4_free_cmd_mailbox(dev
, mailbox
);
878 EXPORT_SYMBOL(mlx4_SET_PORT_general
);
880 int mlx4_SET_PORT_qpn_calc(struct mlx4_dev
*dev
, u8 port
, u32 base_qpn
,
883 struct mlx4_cmd_mailbox
*mailbox
;
884 struct mlx4_set_port_rqp_calc_context
*context
;
887 u32 m_promisc
= (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_VEP_MC_STEER
) ?
888 MCAST_DIRECT
: MCAST_DEFAULT
;
890 if (dev
->caps
.steering_mode
!= MLX4_STEERING_MODE_A0
)
893 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
895 return PTR_ERR(mailbox
);
896 context
= mailbox
->buf
;
897 context
->base_qpn
= cpu_to_be32(base_qpn
);
898 context
->n_mac
= dev
->caps
.log_num_macs
;
899 context
->promisc
= cpu_to_be32(promisc
<< SET_PORT_PROMISC_SHIFT
|
901 context
->mcast
= cpu_to_be32(m_promisc
<< SET_PORT_MC_PROMISC_SHIFT
|
903 context
->intra_no_vlan
= 0;
904 context
->no_vlan
= MLX4_NO_VLAN_IDX
;
905 context
->intra_vlan_miss
= 0;
906 context
->vlan_miss
= MLX4_VLAN_MISS_IDX
;
908 in_mod
= MLX4_SET_PORT_RQP_CALC
<< 8 | port
;
909 err
= mlx4_cmd(dev
, mailbox
->dma
, in_mod
, 1, MLX4_CMD_SET_PORT
,
910 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_WRAPPED
);
912 mlx4_free_cmd_mailbox(dev
, mailbox
);
915 EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc
);
917 int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev
*dev
, u8 port
, u8
*prio2tc
)
919 struct mlx4_cmd_mailbox
*mailbox
;
920 struct mlx4_set_port_prio2tc_context
*context
;
925 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
927 return PTR_ERR(mailbox
);
928 context
= mailbox
->buf
;
929 for (i
= 0; i
< MLX4_NUM_UP
; i
+= 2)
930 context
->prio2tc
[i
>> 1] = prio2tc
[i
] << 4 | prio2tc
[i
+ 1];
932 in_mod
= MLX4_SET_PORT_PRIO2TC
<< 8 | port
;
933 err
= mlx4_cmd(dev
, mailbox
->dma
, in_mod
, 1, MLX4_CMD_SET_PORT
,
934 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
936 mlx4_free_cmd_mailbox(dev
, mailbox
);
939 EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC
);
941 int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev
*dev
, u8 port
, u8
*tc_tx_bw
,
942 u8
*pg
, u16
*ratelimit
)
944 struct mlx4_cmd_mailbox
*mailbox
;
945 struct mlx4_set_port_scheduler_context
*context
;
950 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
952 return PTR_ERR(mailbox
);
953 context
= mailbox
->buf
;
955 for (i
= 0; i
< MLX4_NUM_TC
; i
++) {
956 struct mlx4_port_scheduler_tc_cfg_be
*tc
= &context
->tc
[i
];
957 u16 r
= ratelimit
&& ratelimit
[i
] ? ratelimit
[i
] :
958 MLX4_RATELIMIT_DEFAULT
;
960 tc
->pg
= htons(pg
[i
]);
961 tc
->bw_precentage
= htons(tc_tx_bw
[i
]);
963 tc
->max_bw_units
= htons(MLX4_RATELIMIT_UNITS
);
964 tc
->max_bw_value
= htons(r
);
967 in_mod
= MLX4_SET_PORT_SCHEDULER
<< 8 | port
;
968 err
= mlx4_cmd(dev
, mailbox
->dma
, in_mod
, 1, MLX4_CMD_SET_PORT
,
969 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
971 mlx4_free_cmd_mailbox(dev
, mailbox
);
974 EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER
);
977 VXLAN_ENABLE_MODIFY
= 1 << 7,
978 VXLAN_STEERING_MODIFY
= 1 << 6,
980 VXLAN_ENABLE
= 1 << 7,
983 struct mlx4_set_port_vxlan_context
{
991 int mlx4_SET_PORT_VXLAN(struct mlx4_dev
*dev
, u8 port
, u8 steering
, int enable
)
995 struct mlx4_cmd_mailbox
*mailbox
;
996 struct mlx4_set_port_vxlan_context
*context
;
998 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1000 return PTR_ERR(mailbox
);
1001 context
= mailbox
->buf
;
1002 memset(context
, 0, sizeof(*context
));
1004 context
->modify_flags
= VXLAN_ENABLE_MODIFY
| VXLAN_STEERING_MODIFY
;
1006 context
->enable_flags
= VXLAN_ENABLE
;
1007 context
->steering
= steering
;
1009 in_mod
= MLX4_SET_PORT_VXLAN
<< 8 | port
;
1010 err
= mlx4_cmd(dev
, mailbox
->dma
, in_mod
, 1, MLX4_CMD_SET_PORT
,
1011 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
1013 mlx4_free_cmd_mailbox(dev
, mailbox
);
1016 EXPORT_SYMBOL(mlx4_SET_PORT_VXLAN
);
1018 int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev
*dev
, int slave
,
1019 struct mlx4_vhcr
*vhcr
,
1020 struct mlx4_cmd_mailbox
*inbox
,
1021 struct mlx4_cmd_mailbox
*outbox
,
1022 struct mlx4_cmd_info
*cmd
)
1029 int mlx4_SET_MCAST_FLTR(struct mlx4_dev
*dev
, u8 port
,
1030 u64 mac
, u64 clear
, u8 mode
)
1032 return mlx4_cmd(dev
, (mac
| (clear
<< 63)), port
, mode
,
1033 MLX4_CMD_SET_MCAST_FLTR
, MLX4_CMD_TIME_CLASS_B
,
1036 EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR
);
1038 int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev
*dev
, int slave
,
1039 struct mlx4_vhcr
*vhcr
,
1040 struct mlx4_cmd_mailbox
*inbox
,
1041 struct mlx4_cmd_mailbox
*outbox
,
1042 struct mlx4_cmd_info
*cmd
)
1049 int mlx4_common_dump_eth_stats(struct mlx4_dev
*dev
, int slave
,
1050 u32 in_mod
, struct mlx4_cmd_mailbox
*outbox
)
1052 return mlx4_cmd_box(dev
, 0, outbox
->dma
, in_mod
, 0,
1053 MLX4_CMD_DUMP_ETH_STATS
, MLX4_CMD_TIME_CLASS_B
,
1057 int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev
*dev
, int slave
,
1058 struct mlx4_vhcr
*vhcr
,
1059 struct mlx4_cmd_mailbox
*inbox
,
1060 struct mlx4_cmd_mailbox
*outbox
,
1061 struct mlx4_cmd_info
*cmd
)
1063 if (slave
!= dev
->caps
.function
)
1065 return mlx4_common_dump_eth_stats(dev
, slave
,
1066 vhcr
->in_modifier
, outbox
);
1069 void mlx4_set_stats_bitmap(struct mlx4_dev
*dev
, u64
*stats_bitmap
)
1071 if (!mlx4_is_mfunc(dev
)) {
1076 *stats_bitmap
= (MLX4_STATS_TRAFFIC_COUNTERS_MASK
|
1077 MLX4_STATS_TRAFFIC_DROPS_MASK
|
1078 MLX4_STATS_PORT_COUNTERS_MASK
);
1080 if (mlx4_is_master(dev
))
1081 *stats_bitmap
|= MLX4_STATS_ERROR_COUNTERS_MASK
;
1083 EXPORT_SYMBOL(mlx4_set_stats_bitmap
);
1085 int mlx4_get_slave_from_roce_gid(struct mlx4_dev
*dev
, int port
, u8
*gid
,
1088 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1089 int i
, found_ix
= -1;
1090 int vf_gids
= MLX4_ROCE_MAX_GIDS
- MLX4_ROCE_PF_GIDS
;
1091 struct mlx4_slaves_pport slaves_pport
;
1095 if (!mlx4_is_mfunc(dev
))
1098 slaves_pport
= mlx4_phys_to_slaves_pport(dev
, port
);
1099 num_vfs
= bitmap_weight(slaves_pport
.slaves
, dev
->num_vfs
+ 1) - 1;
1101 for (i
= 0; i
< MLX4_ROCE_MAX_GIDS
; i
++) {
1102 if (!memcmp(priv
->roce_gids
[port
- 1][i
].raw
, gid
, 16)) {
1108 if (found_ix
>= 0) {
1109 if (found_ix
< MLX4_ROCE_PF_GIDS
)
1111 else if (found_ix
< MLX4_ROCE_PF_GIDS
+ (vf_gids
% num_vfs
) *
1112 (vf_gids
/ num_vfs
+ 1))
1113 slave_gid
= ((found_ix
- MLX4_ROCE_PF_GIDS
) /
1114 (vf_gids
/ num_vfs
+ 1)) + 1;
1117 ((found_ix
- MLX4_ROCE_PF_GIDS
-
1118 ((vf_gids
% num_vfs
) * ((vf_gids
/ num_vfs
+ 1)))) /
1119 (vf_gids
/ num_vfs
)) + vf_gids
% num_vfs
+ 1;
1122 struct mlx4_active_ports exclusive_ports
;
1123 struct mlx4_active_ports actv_ports
;
1124 struct mlx4_slaves_pport slaves_pport_actv
;
1125 unsigned max_port_p_one
;
1126 int num_slaves_before
= 1;
1128 for (i
= 1; i
< port
; i
++) {
1129 bitmap_zero(exclusive_ports
.ports
, dev
->caps
.num_ports
);
1130 set_bit(i
, exclusive_ports
.ports
);
1132 mlx4_phys_to_slaves_pport_actv(
1133 dev
, &exclusive_ports
);
1134 num_slaves_before
+= bitmap_weight(
1135 slaves_pport_actv
.slaves
,
1139 if (slave_gid
< num_slaves_before
) {
1140 bitmap_zero(exclusive_ports
.ports
, dev
->caps
.num_ports
);
1141 set_bit(port
- 1, exclusive_ports
.ports
);
1143 mlx4_phys_to_slaves_pport_actv(
1144 dev
, &exclusive_ports
);
1145 slave_gid
+= bitmap_weight(
1146 slaves_pport_actv
.slaves
,
1150 actv_ports
= mlx4_get_active_ports(dev
, slave_gid
);
1151 max_port_p_one
= find_first_bit(
1152 actv_ports
.ports
, dev
->caps
.num_ports
) +
1153 bitmap_weight(actv_ports
.ports
,
1154 dev
->caps
.num_ports
) + 1;
1156 for (i
= 1; i
< max_port_p_one
; i
++) {
1159 bitmap_zero(exclusive_ports
.ports
,
1160 dev
->caps
.num_ports
);
1161 set_bit(i
- 1, exclusive_ports
.ports
);
1163 mlx4_phys_to_slaves_pport_actv(
1164 dev
, &exclusive_ports
);
1165 slave_gid
+= bitmap_weight(
1166 slaves_pport_actv
.slaves
,
1170 *slave_id
= slave_gid
;
1173 return (found_ix
>= 0) ? 0 : -EINVAL
;
1175 EXPORT_SYMBOL(mlx4_get_slave_from_roce_gid
);
1177 int mlx4_get_roce_gid_from_slave(struct mlx4_dev
*dev
, int port
, int slave_id
,
1180 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1182 if (!mlx4_is_master(dev
))
1185 memcpy(gid
, priv
->roce_gids
[port
- 1][slave_id
].raw
, 16);
1188 EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave
);