2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/string.h>
35 #include <linux/etherdevice.h>
37 #include <linux/mlx4/cmd.h>
38 #include <linux/export.h>
42 static const u8 zero_gid
[16]; /* automatically initialized to 0 */
44 int mlx4_get_mgm_entry_size(struct mlx4_dev
*dev
)
46 return 1 << dev
->oper_log_mgm_entry_size
;
49 int mlx4_get_qp_per_mgm(struct mlx4_dev
*dev
)
51 return 4 * (mlx4_get_mgm_entry_size(dev
) / 16 - 2);
54 static int mlx4_QP_FLOW_STEERING_ATTACH(struct mlx4_dev
*dev
,
55 struct mlx4_cmd_mailbox
*mailbox
,
62 err
= mlx4_cmd_imm(dev
, mailbox
->dma
, &imm
, size
, 0,
63 MLX4_QP_FLOW_STEERING_ATTACH
, MLX4_CMD_TIME_CLASS_A
,
72 static int mlx4_QP_FLOW_STEERING_DETACH(struct mlx4_dev
*dev
, u64 regid
)
76 err
= mlx4_cmd(dev
, regid
, 0, 0,
77 MLX4_QP_FLOW_STEERING_DETACH
, MLX4_CMD_TIME_CLASS_A
,
83 static int mlx4_READ_ENTRY(struct mlx4_dev
*dev
, int index
,
84 struct mlx4_cmd_mailbox
*mailbox
)
86 return mlx4_cmd_box(dev
, 0, mailbox
->dma
, index
, 0, MLX4_CMD_READ_MCG
,
87 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
90 static int mlx4_WRITE_ENTRY(struct mlx4_dev
*dev
, int index
,
91 struct mlx4_cmd_mailbox
*mailbox
)
93 return mlx4_cmd(dev
, mailbox
->dma
, index
, 0, MLX4_CMD_WRITE_MCG
,
94 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
97 static int mlx4_WRITE_PROMISC(struct mlx4_dev
*dev
, u8 port
, u8 steer
,
98 struct mlx4_cmd_mailbox
*mailbox
)
102 in_mod
= (u32
) port
<< 16 | steer
<< 1;
103 return mlx4_cmd(dev
, mailbox
->dma
, in_mod
, 0x1,
104 MLX4_CMD_WRITE_MCG
, MLX4_CMD_TIME_CLASS_A
,
108 static int mlx4_GID_HASH(struct mlx4_dev
*dev
, struct mlx4_cmd_mailbox
*mailbox
,
109 u16
*hash
, u8 op_mod
)
114 err
= mlx4_cmd_imm(dev
, mailbox
->dma
, &imm
, 0, op_mod
,
115 MLX4_CMD_MGID_HASH
, MLX4_CMD_TIME_CLASS_A
,
124 static struct mlx4_promisc_qp
*get_promisc_qp(struct mlx4_dev
*dev
, u8 port
,
125 enum mlx4_steer_type steer
,
128 struct mlx4_steer
*s_steer
= &mlx4_priv(dev
)->steer
[port
- 1];
129 struct mlx4_promisc_qp
*pqp
;
131 list_for_each_entry(pqp
, &s_steer
->promisc_qps
[steer
], list
) {
140 * Add new entry to steering data structure.
141 * All promisc QPs should be added as well
143 static int new_steering_entry(struct mlx4_dev
*dev
, u8 port
,
144 enum mlx4_steer_type steer
,
145 unsigned int index
, u32 qpn
)
147 struct mlx4_steer
*s_steer
;
148 struct mlx4_cmd_mailbox
*mailbox
;
149 struct mlx4_mgm
*mgm
;
151 struct mlx4_steer_index
*new_entry
;
152 struct mlx4_promisc_qp
*pqp
;
153 struct mlx4_promisc_qp
*dqp
= NULL
;
157 s_steer
= &mlx4_priv(dev
)->steer
[port
- 1];
158 new_entry
= kzalloc(sizeof *new_entry
, GFP_KERNEL
);
162 INIT_LIST_HEAD(&new_entry
->duplicates
);
163 new_entry
->index
= index
;
164 list_add_tail(&new_entry
->list
, &s_steer
->steer_entries
[steer
]);
166 /* If the given qpn is also a promisc qp,
167 * it should be inserted to duplicates list
169 pqp
= get_promisc_qp(dev
, port
, steer
, qpn
);
171 dqp
= kmalloc(sizeof *dqp
, GFP_KERNEL
);
177 list_add_tail(&dqp
->list
, &new_entry
->duplicates
);
180 /* if no promisc qps for this vep, we are done */
181 if (list_empty(&s_steer
->promisc_qps
[steer
]))
184 /* now need to add all the promisc qps to the new
185 * steering entry, as they should also receive the packets
186 * destined to this address */
187 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
188 if (IS_ERR(mailbox
)) {
194 err
= mlx4_READ_ENTRY(dev
, index
, mailbox
);
198 members_count
= be32_to_cpu(mgm
->members_count
) & 0xffffff;
199 prot
= be32_to_cpu(mgm
->members_count
) >> 30;
200 list_for_each_entry(pqp
, &s_steer
->promisc_qps
[steer
], list
) {
201 /* don't add already existing qpn */
204 if (members_count
== dev
->caps
.num_qp_per_mgm
) {
211 mgm
->qp
[members_count
++] = cpu_to_be32(pqp
->qpn
& MGM_QPN_MASK
);
213 /* update the qps count and update the entry with all the promisc qps*/
214 mgm
->members_count
= cpu_to_be32(members_count
| (prot
<< 30));
215 err
= mlx4_WRITE_ENTRY(dev
, index
, mailbox
);
218 mlx4_free_cmd_mailbox(dev
, mailbox
);
223 list_del(&dqp
->list
);
226 list_del(&new_entry
->list
);
231 /* update the data structures with existing steering entry */
232 static int existing_steering_entry(struct mlx4_dev
*dev
, u8 port
,
233 enum mlx4_steer_type steer
,
234 unsigned int index
, u32 qpn
)
236 struct mlx4_steer
*s_steer
;
237 struct mlx4_steer_index
*tmp_entry
, *entry
= NULL
;
238 struct mlx4_promisc_qp
*pqp
;
239 struct mlx4_promisc_qp
*dqp
;
241 s_steer
= &mlx4_priv(dev
)->steer
[port
- 1];
243 pqp
= get_promisc_qp(dev
, port
, steer
, qpn
);
245 return 0; /* nothing to do */
247 list_for_each_entry(tmp_entry
, &s_steer
->steer_entries
[steer
], list
) {
248 if (tmp_entry
->index
== index
) {
253 if (unlikely(!entry
)) {
254 mlx4_warn(dev
, "Steering entry at index %x is not registered\n", index
);
258 /* the given qpn is listed as a promisc qpn
259 * we need to add it as a duplicate to this entry
260 * for future references */
261 list_for_each_entry(dqp
, &entry
->duplicates
, list
) {
263 return 0; /* qp is already duplicated */
266 /* add the qp as a duplicate on this index */
267 dqp
= kmalloc(sizeof *dqp
, GFP_KERNEL
);
271 list_add_tail(&dqp
->list
, &entry
->duplicates
);
276 /* Check whether a qpn is a duplicate on steering entry
277 * If so, it should not be removed from mgm */
278 static bool check_duplicate_entry(struct mlx4_dev
*dev
, u8 port
,
279 enum mlx4_steer_type steer
,
280 unsigned int index
, u32 qpn
)
282 struct mlx4_steer
*s_steer
;
283 struct mlx4_steer_index
*tmp_entry
, *entry
= NULL
;
284 struct mlx4_promisc_qp
*dqp
, *tmp_dqp
;
286 s_steer
= &mlx4_priv(dev
)->steer
[port
- 1];
288 /* if qp is not promisc, it cannot be duplicated */
289 if (!get_promisc_qp(dev
, port
, steer
, qpn
))
292 /* The qp is promisc qp so it is a duplicate on this index
293 * Find the index entry, and remove the duplicate */
294 list_for_each_entry(tmp_entry
, &s_steer
->steer_entries
[steer
], list
) {
295 if (tmp_entry
->index
== index
) {
300 if (unlikely(!entry
)) {
301 mlx4_warn(dev
, "Steering entry for index %x is not registered\n", index
);
304 list_for_each_entry_safe(dqp
, tmp_dqp
, &entry
->duplicates
, list
) {
305 if (dqp
->qpn
== qpn
) {
306 list_del(&dqp
->list
);
313 /* I a steering entry contains only promisc QPs, it can be removed. */
314 static bool can_remove_steering_entry(struct mlx4_dev
*dev
, u8 port
,
315 enum mlx4_steer_type steer
,
316 unsigned int index
, u32 tqpn
)
318 struct mlx4_steer
*s_steer
;
319 struct mlx4_cmd_mailbox
*mailbox
;
320 struct mlx4_mgm
*mgm
;
321 struct mlx4_steer_index
*entry
= NULL
, *tmp_entry
;
327 s_steer
= &mlx4_priv(dev
)->steer
[port
- 1];
329 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
334 if (mlx4_READ_ENTRY(dev
, index
, mailbox
))
336 members_count
= be32_to_cpu(mgm
->members_count
) & 0xffffff;
337 for (i
= 0; i
< members_count
; i
++) {
338 qpn
= be32_to_cpu(mgm
->qp
[i
]) & MGM_QPN_MASK
;
339 if (!get_promisc_qp(dev
, port
, steer
, qpn
) && qpn
!= tqpn
) {
340 /* the qp is not promisc, the entry can't be removed */
344 /* All the qps currently registered for this entry are promiscuous,
345 * Checking for duplicates */
347 list_for_each_entry_safe(entry
, tmp_entry
, &s_steer
->steer_entries
[steer
], list
) {
348 if (entry
->index
== index
) {
349 if (list_empty(&entry
->duplicates
)) {
350 list_del(&entry
->list
);
353 /* This entry contains duplicates so it shouldn't be removed */
361 mlx4_free_cmd_mailbox(dev
, mailbox
);
365 static int add_promisc_qp(struct mlx4_dev
*dev
, u8 port
,
366 enum mlx4_steer_type steer
, u32 qpn
)
368 struct mlx4_steer
*s_steer
;
369 struct mlx4_cmd_mailbox
*mailbox
;
370 struct mlx4_mgm
*mgm
;
371 struct mlx4_steer_index
*entry
;
372 struct mlx4_promisc_qp
*pqp
;
373 struct mlx4_promisc_qp
*dqp
;
379 struct mlx4_priv
*priv
= mlx4_priv(dev
);
381 s_steer
= &mlx4_priv(dev
)->steer
[port
- 1];
383 mutex_lock(&priv
->mcg_table
.mutex
);
385 if (get_promisc_qp(dev
, port
, steer
, qpn
)) {
386 err
= 0; /* Noting to do, already exists */
390 pqp
= kmalloc(sizeof *pqp
, GFP_KERNEL
);
397 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
398 if (IS_ERR(mailbox
)) {
404 /* the promisc qp needs to be added for each one of the steering
405 * entries, if it already exists, needs to be added as a duplicate
407 list_for_each_entry(entry
, &s_steer
->steer_entries
[steer
], list
) {
408 err
= mlx4_READ_ENTRY(dev
, entry
->index
, mailbox
);
412 members_count
= be32_to_cpu(mgm
->members_count
) & 0xffffff;
413 prot
= be32_to_cpu(mgm
->members_count
) >> 30;
415 for (i
= 0; i
< members_count
; i
++) {
416 if ((be32_to_cpu(mgm
->qp
[i
]) & MGM_QPN_MASK
) == qpn
) {
417 /* Entry already exists, add to duplicates */
418 dqp
= kmalloc(sizeof *dqp
, GFP_KERNEL
);
424 list_add_tail(&dqp
->list
, &entry
->duplicates
);
429 /* Need to add the qpn to mgm */
430 if (members_count
== dev
->caps
.num_qp_per_mgm
) {
435 mgm
->qp
[members_count
++] = cpu_to_be32(qpn
& MGM_QPN_MASK
);
436 mgm
->members_count
= cpu_to_be32(members_count
| (prot
<< 30));
437 err
= mlx4_WRITE_ENTRY(dev
, entry
->index
, mailbox
);
443 /* add the new qpn to list of promisc qps */
444 list_add_tail(&pqp
->list
, &s_steer
->promisc_qps
[steer
]);
445 /* now need to add all the promisc qps to default entry */
446 memset(mgm
, 0, sizeof *mgm
);
448 list_for_each_entry(dqp
, &s_steer
->promisc_qps
[steer
], list
)
449 mgm
->qp
[members_count
++] = cpu_to_be32(dqp
->qpn
& MGM_QPN_MASK
);
450 mgm
->members_count
= cpu_to_be32(members_count
| MLX4_PROT_ETH
<< 30);
452 err
= mlx4_WRITE_PROMISC(dev
, port
, steer
, mailbox
);
456 mlx4_free_cmd_mailbox(dev
, mailbox
);
457 mutex_unlock(&priv
->mcg_table
.mutex
);
461 list_del(&pqp
->list
);
463 mlx4_free_cmd_mailbox(dev
, mailbox
);
467 mutex_unlock(&priv
->mcg_table
.mutex
);
471 static int remove_promisc_qp(struct mlx4_dev
*dev
, u8 port
,
472 enum mlx4_steer_type steer
, u32 qpn
)
474 struct mlx4_priv
*priv
= mlx4_priv(dev
);
475 struct mlx4_steer
*s_steer
;
476 struct mlx4_cmd_mailbox
*mailbox
;
477 struct mlx4_mgm
*mgm
;
478 struct mlx4_steer_index
*entry
;
479 struct mlx4_promisc_qp
*pqp
;
480 struct mlx4_promisc_qp
*dqp
;
483 bool back_to_list
= false;
487 s_steer
= &mlx4_priv(dev
)->steer
[port
- 1];
488 mutex_lock(&priv
->mcg_table
.mutex
);
490 pqp
= get_promisc_qp(dev
, port
, steer
, qpn
);
491 if (unlikely(!pqp
)) {
492 mlx4_warn(dev
, "QP %x is not promiscuous QP\n", qpn
);
498 /*remove from list of promisc qps */
499 list_del(&pqp
->list
);
501 /* set the default entry not to include the removed one */
502 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
503 if (IS_ERR(mailbox
)) {
510 list_for_each_entry(dqp
, &s_steer
->promisc_qps
[steer
], list
)
511 mgm
->qp
[members_count
++] = cpu_to_be32(dqp
->qpn
& MGM_QPN_MASK
);
512 mgm
->members_count
= cpu_to_be32(members_count
| MLX4_PROT_ETH
<< 30);
514 err
= mlx4_WRITE_PROMISC(dev
, port
, steer
, mailbox
);
518 /* remove the qp from all the steering entries*/
519 list_for_each_entry(entry
, &s_steer
->steer_entries
[steer
], list
) {
521 list_for_each_entry(dqp
, &entry
->duplicates
, list
) {
522 if (dqp
->qpn
== qpn
) {
528 /* a duplicate, no need to change the mgm,
529 * only update the duplicates list */
530 list_del(&dqp
->list
);
533 err
= mlx4_READ_ENTRY(dev
, entry
->index
, mailbox
);
536 members_count
= be32_to_cpu(mgm
->members_count
) & 0xffffff;
537 for (loc
= -1, i
= 0; i
< members_count
; ++i
)
538 if ((be32_to_cpu(mgm
->qp
[i
]) & MGM_QPN_MASK
) == qpn
)
541 mgm
->members_count
= cpu_to_be32(--members_count
|
542 (MLX4_PROT_ETH
<< 30));
543 mgm
->qp
[loc
] = mgm
->qp
[i
- 1];
546 err
= mlx4_WRITE_ENTRY(dev
, entry
->index
, mailbox
);
554 mlx4_free_cmd_mailbox(dev
, mailbox
);
557 list_add_tail(&pqp
->list
, &s_steer
->promisc_qps
[steer
]);
561 mutex_unlock(&priv
->mcg_table
.mutex
);
566 * Caller must hold MCG table semaphore. gid and mgm parameters must
567 * be properly aligned for command interface.
569 * Returns 0 unless a firmware command error occurs.
571 * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1
572 * and *mgm holds MGM entry.
574 * if GID is found in AMGM, *index = index in AMGM, *prev = index of
575 * previous entry in hash chain and *mgm holds AMGM entry.
577 * If no AMGM exists for given gid, *index = -1, *prev = index of last
578 * entry in hash chain and *mgm holds end of hash chain.
580 static int find_entry(struct mlx4_dev
*dev
, u8 port
,
581 u8
*gid
, enum mlx4_protocol prot
,
582 struct mlx4_cmd_mailbox
*mgm_mailbox
,
583 int *prev
, int *index
)
585 struct mlx4_cmd_mailbox
*mailbox
;
586 struct mlx4_mgm
*mgm
= mgm_mailbox
->buf
;
590 u8 op_mod
= (prot
== MLX4_PROT_ETH
) ?
591 !!(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_VEP_MC_STEER
) : 0;
593 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
598 memcpy(mgid
, gid
, 16);
600 err
= mlx4_GID_HASH(dev
, mailbox
, &hash
, op_mod
);
601 mlx4_free_cmd_mailbox(dev
, mailbox
);
606 mlx4_dbg(dev
, "Hash for %pI6 is %04x\n", gid
, hash
);
612 err
= mlx4_READ_ENTRY(dev
, *index
, mgm_mailbox
);
616 if (!(be32_to_cpu(mgm
->members_count
) & 0xffffff)) {
617 if (*index
!= hash
) {
618 mlx4_err(dev
, "Found zero MGID in AMGM.\n");
624 if (!memcmp(mgm
->gid
, gid
, 16) &&
625 be32_to_cpu(mgm
->members_count
) >> 30 == prot
)
629 *index
= be32_to_cpu(mgm
->next_gid_index
) >> 6;
636 static const u8 __promisc_mode
[] = {
637 [MLX4_FS_REGULAR
] = 0x0,
638 [MLX4_FS_ALL_DEFAULT
] = 0x1,
639 [MLX4_FS_MC_DEFAULT
] = 0x3,
640 [MLX4_FS_UC_SNIFFER
] = 0x4,
641 [MLX4_FS_MC_SNIFFER
] = 0x5,
644 int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev
*dev
,
645 enum mlx4_net_trans_promisc_mode flow_type
)
647 if (flow_type
>= MLX4_FS_MODE_NUM
) {
648 mlx4_err(dev
, "Invalid flow type. type = %d\n", flow_type
);
651 return __promisc_mode
[flow_type
];
653 EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_mode
);
655 static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule
*ctrl
,
656 struct mlx4_net_trans_rule_hw_ctrl
*hw
)
660 flags
= ctrl
->queue_mode
== MLX4_NET_TRANS_Q_LIFO
? 1 : 0;
661 flags
|= ctrl
->exclusive
? (1 << 2) : 0;
662 flags
|= ctrl
->allow_loopback
? (1 << 3) : 0;
665 hw
->type
= __promisc_mode
[ctrl
->promisc_mode
];
666 hw
->prio
= cpu_to_be16(ctrl
->priority
);
667 hw
->port
= ctrl
->port
;
668 hw
->qpn
= cpu_to_be32(ctrl
->qpn
);
671 const u16 __sw_id_hw
[] = {
672 [MLX4_NET_TRANS_RULE_ID_ETH
] = 0xE001,
673 [MLX4_NET_TRANS_RULE_ID_IB
] = 0xE005,
674 [MLX4_NET_TRANS_RULE_ID_IPV6
] = 0xE003,
675 [MLX4_NET_TRANS_RULE_ID_IPV4
] = 0xE002,
676 [MLX4_NET_TRANS_RULE_ID_TCP
] = 0xE004,
677 [MLX4_NET_TRANS_RULE_ID_UDP
] = 0xE006
680 int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev
*dev
,
681 enum mlx4_net_trans_rule_id id
)
683 if (id
>= MLX4_NET_TRANS_RULE_NUM
) {
684 mlx4_err(dev
, "Invalid network rule id. id = %d\n", id
);
687 return __sw_id_hw
[id
];
689 EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_id
);
691 static const int __rule_hw_sz
[] = {
692 [MLX4_NET_TRANS_RULE_ID_ETH
] =
693 sizeof(struct mlx4_net_trans_rule_hw_eth
),
694 [MLX4_NET_TRANS_RULE_ID_IB
] =
695 sizeof(struct mlx4_net_trans_rule_hw_ib
),
696 [MLX4_NET_TRANS_RULE_ID_IPV6
] = 0,
697 [MLX4_NET_TRANS_RULE_ID_IPV4
] =
698 sizeof(struct mlx4_net_trans_rule_hw_ipv4
),
699 [MLX4_NET_TRANS_RULE_ID_TCP
] =
700 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp
),
701 [MLX4_NET_TRANS_RULE_ID_UDP
] =
702 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp
)
705 int mlx4_hw_rule_sz(struct mlx4_dev
*dev
,
706 enum mlx4_net_trans_rule_id id
)
708 if (id
>= MLX4_NET_TRANS_RULE_NUM
) {
709 mlx4_err(dev
, "Invalid network rule id. id = %d\n", id
);
713 return __rule_hw_sz
[id
];
715 EXPORT_SYMBOL_GPL(mlx4_hw_rule_sz
);
717 static int parse_trans_rule(struct mlx4_dev
*dev
, struct mlx4_spec_list
*spec
,
718 struct _rule_hw
*rule_hw
)
720 if (mlx4_hw_rule_sz(dev
, spec
->id
) < 0)
722 memset(rule_hw
, 0, mlx4_hw_rule_sz(dev
, spec
->id
));
723 rule_hw
->id
= cpu_to_be16(__sw_id_hw
[spec
->id
]);
724 rule_hw
->size
= mlx4_hw_rule_sz(dev
, spec
->id
) >> 2;
727 case MLX4_NET_TRANS_RULE_ID_ETH
:
728 memcpy(rule_hw
->eth
.dst_mac
, spec
->eth
.dst_mac
, ETH_ALEN
);
729 memcpy(rule_hw
->eth
.dst_mac_msk
, spec
->eth
.dst_mac_msk
,
731 memcpy(rule_hw
->eth
.src_mac
, spec
->eth
.src_mac
, ETH_ALEN
);
732 memcpy(rule_hw
->eth
.src_mac_msk
, spec
->eth
.src_mac_msk
,
734 if (spec
->eth
.ether_type_enable
) {
735 rule_hw
->eth
.ether_type_enable
= 1;
736 rule_hw
->eth
.ether_type
= spec
->eth
.ether_type
;
738 rule_hw
->eth
.vlan_tag
= spec
->eth
.vlan_id
;
739 rule_hw
->eth
.vlan_tag_msk
= spec
->eth
.vlan_id_msk
;
742 case MLX4_NET_TRANS_RULE_ID_IB
:
743 rule_hw
->ib
.l3_qpn
= spec
->ib
.l3_qpn
;
744 rule_hw
->ib
.qpn_mask
= spec
->ib
.qpn_msk
;
745 memcpy(&rule_hw
->ib
.dst_gid
, &spec
->ib
.dst_gid
, 16);
746 memcpy(&rule_hw
->ib
.dst_gid_msk
, &spec
->ib
.dst_gid_msk
, 16);
749 case MLX4_NET_TRANS_RULE_ID_IPV6
:
752 case MLX4_NET_TRANS_RULE_ID_IPV4
:
753 rule_hw
->ipv4
.src_ip
= spec
->ipv4
.src_ip
;
754 rule_hw
->ipv4
.src_ip_msk
= spec
->ipv4
.src_ip_msk
;
755 rule_hw
->ipv4
.dst_ip
= spec
->ipv4
.dst_ip
;
756 rule_hw
->ipv4
.dst_ip_msk
= spec
->ipv4
.dst_ip_msk
;
759 case MLX4_NET_TRANS_RULE_ID_TCP
:
760 case MLX4_NET_TRANS_RULE_ID_UDP
:
761 rule_hw
->tcp_udp
.dst_port
= spec
->tcp_udp
.dst_port
;
762 rule_hw
->tcp_udp
.dst_port_msk
= spec
->tcp_udp
.dst_port_msk
;
763 rule_hw
->tcp_udp
.src_port
= spec
->tcp_udp
.src_port
;
764 rule_hw
->tcp_udp
.src_port_msk
= spec
->tcp_udp
.src_port_msk
;
771 return __rule_hw_sz
[spec
->id
];
774 static void mlx4_err_rule(struct mlx4_dev
*dev
, char *str
,
775 struct mlx4_net_trans_rule
*rule
)
778 struct mlx4_spec_list
*cur
;
782 mlx4_err(dev
, "%s", str
);
783 len
+= snprintf(buf
+ len
, BUF_SIZE
- len
,
784 "port = %d prio = 0x%x qp = 0x%x ",
785 rule
->port
, rule
->priority
, rule
->qpn
);
787 list_for_each_entry(cur
, &rule
->list
, list
) {
789 case MLX4_NET_TRANS_RULE_ID_ETH
:
790 len
+= snprintf(buf
+ len
, BUF_SIZE
- len
,
791 "dmac = %pM ", &cur
->eth
.dst_mac
);
792 if (cur
->eth
.ether_type
)
793 len
+= snprintf(buf
+ len
, BUF_SIZE
- len
,
795 be16_to_cpu(cur
->eth
.ether_type
));
796 if (cur
->eth
.vlan_id
)
797 len
+= snprintf(buf
+ len
, BUF_SIZE
- len
,
799 be16_to_cpu(cur
->eth
.vlan_id
));
802 case MLX4_NET_TRANS_RULE_ID_IPV4
:
803 if (cur
->ipv4
.src_ip
)
804 len
+= snprintf(buf
+ len
, BUF_SIZE
- len
,
807 if (cur
->ipv4
.dst_ip
)
808 len
+= snprintf(buf
+ len
, BUF_SIZE
- len
,
813 case MLX4_NET_TRANS_RULE_ID_TCP
:
814 case MLX4_NET_TRANS_RULE_ID_UDP
:
815 if (cur
->tcp_udp
.src_port
)
816 len
+= snprintf(buf
+ len
, BUF_SIZE
- len
,
818 be16_to_cpu(cur
->tcp_udp
.src_port
));
819 if (cur
->tcp_udp
.dst_port
)
820 len
+= snprintf(buf
+ len
, BUF_SIZE
- len
,
822 be16_to_cpu(cur
->tcp_udp
.dst_port
));
825 case MLX4_NET_TRANS_RULE_ID_IB
:
826 len
+= snprintf(buf
+ len
, BUF_SIZE
- len
,
827 "dst-gid = %pI6\n", cur
->ib
.dst_gid
);
828 len
+= snprintf(buf
+ len
, BUF_SIZE
- len
,
829 "dst-gid-mask = %pI6\n",
830 cur
->ib
.dst_gid_msk
);
833 case MLX4_NET_TRANS_RULE_ID_IPV6
:
840 len
+= snprintf(buf
+ len
, BUF_SIZE
- len
, "\n");
841 mlx4_err(dev
, "%s", buf
);
844 mlx4_err(dev
, "Network rule error message was truncated, print buffer is too small.\n");
847 int mlx4_flow_attach(struct mlx4_dev
*dev
,
848 struct mlx4_net_trans_rule
*rule
, u64
*reg_id
)
850 struct mlx4_cmd_mailbox
*mailbox
;
851 struct mlx4_spec_list
*cur
;
855 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
857 return PTR_ERR(mailbox
);
859 trans_rule_ctrl_to_hw(rule
, mailbox
->buf
);
861 size
+= sizeof(struct mlx4_net_trans_rule_hw_ctrl
);
863 list_for_each_entry(cur
, &rule
->list
, list
) {
864 ret
= parse_trans_rule(dev
, cur
, mailbox
->buf
+ size
);
866 mlx4_free_cmd_mailbox(dev
, mailbox
);
872 ret
= mlx4_QP_FLOW_STEERING_ATTACH(dev
, mailbox
, size
>> 2, reg_id
);
875 "mcg table is full. Fail to register network rule.\n",
878 mlx4_err_rule(dev
, "Fail to register network rule.\n", rule
);
880 mlx4_free_cmd_mailbox(dev
, mailbox
);
884 EXPORT_SYMBOL_GPL(mlx4_flow_attach
);
886 int mlx4_flow_detach(struct mlx4_dev
*dev
, u64 reg_id
)
890 err
= mlx4_QP_FLOW_STEERING_DETACH(dev
, reg_id
);
892 mlx4_err(dev
, "Fail to detach network rule. registration id = 0x%llx\n",
896 EXPORT_SYMBOL_GPL(mlx4_flow_detach
);
898 int mlx4_qp_attach_common(struct mlx4_dev
*dev
, struct mlx4_qp
*qp
, u8 gid
[16],
899 int block_mcast_loopback
, enum mlx4_protocol prot
,
900 enum mlx4_steer_type steer
)
902 struct mlx4_priv
*priv
= mlx4_priv(dev
);
903 struct mlx4_cmd_mailbox
*mailbox
;
904 struct mlx4_mgm
*mgm
;
913 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
915 return PTR_ERR(mailbox
);
918 mutex_lock(&priv
->mcg_table
.mutex
);
919 err
= find_entry(dev
, port
, gid
, prot
,
920 mailbox
, &prev
, &index
);
925 if (!(be32_to_cpu(mgm
->members_count
) & 0xffffff)) {
927 memcpy(mgm
->gid
, gid
, 16);
932 index
= mlx4_bitmap_alloc(&priv
->mcg_table
.bitmap
);
934 mlx4_err(dev
, "No AMGM entries left\n");
938 index
+= dev
->caps
.num_mgms
;
941 memset(mgm
, 0, sizeof *mgm
);
942 memcpy(mgm
->gid
, gid
, 16);
945 members_count
= be32_to_cpu(mgm
->members_count
) & 0xffffff;
946 if (members_count
== dev
->caps
.num_qp_per_mgm
) {
947 mlx4_err(dev
, "MGM at index %x is full.\n", index
);
952 for (i
= 0; i
< members_count
; ++i
)
953 if ((be32_to_cpu(mgm
->qp
[i
]) & MGM_QPN_MASK
) == qp
->qpn
) {
954 mlx4_dbg(dev
, "QP %06x already a member of MGM\n", qp
->qpn
);
959 if (block_mcast_loopback
)
960 mgm
->qp
[members_count
++] = cpu_to_be32((qp
->qpn
& MGM_QPN_MASK
) |
961 (1U << MGM_BLCK_LB_BIT
));
963 mgm
->qp
[members_count
++] = cpu_to_be32(qp
->qpn
& MGM_QPN_MASK
);
965 mgm
->members_count
= cpu_to_be32(members_count
| (u32
) prot
<< 30);
967 err
= mlx4_WRITE_ENTRY(dev
, index
, mailbox
);
974 err
= mlx4_READ_ENTRY(dev
, prev
, mailbox
);
978 mgm
->next_gid_index
= cpu_to_be32(index
<< 6);
980 err
= mlx4_WRITE_ENTRY(dev
, prev
, mailbox
);
985 if (prot
== MLX4_PROT_ETH
) {
986 /* manage the steering entry for promisc mode */
988 new_steering_entry(dev
, port
, steer
, index
, qp
->qpn
);
990 existing_steering_entry(dev
, port
, steer
,
993 if (err
&& link
&& index
!= -1) {
994 if (index
< dev
->caps
.num_mgms
)
995 mlx4_warn(dev
, "Got AMGM index %d < %d",
996 index
, dev
->caps
.num_mgms
);
998 mlx4_bitmap_free(&priv
->mcg_table
.bitmap
,
999 index
- dev
->caps
.num_mgms
);
1001 mutex_unlock(&priv
->mcg_table
.mutex
);
1003 mlx4_free_cmd_mailbox(dev
, mailbox
);
1007 int mlx4_qp_detach_common(struct mlx4_dev
*dev
, struct mlx4_qp
*qp
, u8 gid
[16],
1008 enum mlx4_protocol prot
, enum mlx4_steer_type steer
)
1010 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1011 struct mlx4_cmd_mailbox
*mailbox
;
1012 struct mlx4_mgm
*mgm
;
1018 bool removed_entry
= false;
1020 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1021 if (IS_ERR(mailbox
))
1022 return PTR_ERR(mailbox
);
1025 mutex_lock(&priv
->mcg_table
.mutex
);
1027 err
= find_entry(dev
, port
, gid
, prot
,
1028 mailbox
, &prev
, &index
);
1033 mlx4_err(dev
, "MGID %pI6 not found\n", gid
);
1038 /* if this pq is also a promisc qp, it shouldn't be removed */
1039 if (prot
== MLX4_PROT_ETH
&&
1040 check_duplicate_entry(dev
, port
, steer
, index
, qp
->qpn
))
1043 members_count
= be32_to_cpu(mgm
->members_count
) & 0xffffff;
1044 for (loc
= -1, i
= 0; i
< members_count
; ++i
)
1045 if ((be32_to_cpu(mgm
->qp
[i
]) & MGM_QPN_MASK
) == qp
->qpn
)
1049 mlx4_err(dev
, "QP %06x not found in MGM\n", qp
->qpn
);
1055 mgm
->members_count
= cpu_to_be32(--members_count
| (u32
) prot
<< 30);
1056 mgm
->qp
[loc
] = mgm
->qp
[i
- 1];
1059 if (prot
== MLX4_PROT_ETH
)
1060 removed_entry
= can_remove_steering_entry(dev
, port
, steer
,
1062 if (i
!= 1 && (prot
!= MLX4_PROT_ETH
|| !removed_entry
)) {
1063 err
= mlx4_WRITE_ENTRY(dev
, index
, mailbox
);
1067 /* We are going to delete the entry, members count should be 0 */
1068 mgm
->members_count
= cpu_to_be32((u32
) prot
<< 30);
1071 /* Remove entry from MGM */
1072 int amgm_index
= be32_to_cpu(mgm
->next_gid_index
) >> 6;
1074 err
= mlx4_READ_ENTRY(dev
, amgm_index
, mailbox
);
1078 memset(mgm
->gid
, 0, 16);
1080 err
= mlx4_WRITE_ENTRY(dev
, index
, mailbox
);
1085 if (amgm_index
< dev
->caps
.num_mgms
)
1086 mlx4_warn(dev
, "MGM entry %d had AMGM index %d < %d",
1087 index
, amgm_index
, dev
->caps
.num_mgms
);
1089 mlx4_bitmap_free(&priv
->mcg_table
.bitmap
,
1090 amgm_index
- dev
->caps
.num_mgms
);
1093 /* Remove entry from AMGM */
1094 int cur_next_index
= be32_to_cpu(mgm
->next_gid_index
) >> 6;
1095 err
= mlx4_READ_ENTRY(dev
, prev
, mailbox
);
1099 mgm
->next_gid_index
= cpu_to_be32(cur_next_index
<< 6);
1101 err
= mlx4_WRITE_ENTRY(dev
, prev
, mailbox
);
1105 if (index
< dev
->caps
.num_mgms
)
1106 mlx4_warn(dev
, "entry %d had next AMGM index %d < %d",
1107 prev
, index
, dev
->caps
.num_mgms
);
1109 mlx4_bitmap_free(&priv
->mcg_table
.bitmap
,
1110 index
- dev
->caps
.num_mgms
);
1114 mutex_unlock(&priv
->mcg_table
.mutex
);
1116 mlx4_free_cmd_mailbox(dev
, mailbox
);
1120 static int mlx4_QP_ATTACH(struct mlx4_dev
*dev
, struct mlx4_qp
*qp
,
1121 u8 gid
[16], u8 attach
, u8 block_loopback
,
1122 enum mlx4_protocol prot
)
1124 struct mlx4_cmd_mailbox
*mailbox
;
1128 if (!mlx4_is_mfunc(dev
))
1131 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1132 if (IS_ERR(mailbox
))
1133 return PTR_ERR(mailbox
);
1135 memcpy(mailbox
->buf
, gid
, 16);
1137 qpn
|= (prot
<< 28);
1138 if (attach
&& block_loopback
)
1141 err
= mlx4_cmd(dev
, mailbox
->dma
, qpn
, attach
,
1142 MLX4_CMD_QP_ATTACH
, MLX4_CMD_TIME_CLASS_A
,
1145 mlx4_free_cmd_mailbox(dev
, mailbox
);
1149 int mlx4_trans_to_dmfs_attach(struct mlx4_dev
*dev
, struct mlx4_qp
*qp
,
1150 u8 gid
[16], u8 port
,
1151 int block_mcast_loopback
,
1152 enum mlx4_protocol prot
, u64
*reg_id
)
1154 struct mlx4_spec_list spec
= { {NULL
} };
1155 __be64 mac_mask
= cpu_to_be64(MLX4_MAC_MASK
<< 16);
1157 struct mlx4_net_trans_rule rule
= {
1158 .queue_mode
= MLX4_NET_TRANS_Q_FIFO
,
1160 .promisc_mode
= MLX4_FS_REGULAR
,
1161 .priority
= MLX4_DOMAIN_NIC
,
1164 rule
.allow_loopback
= !block_mcast_loopback
;
1167 INIT_LIST_HEAD(&rule
.list
);
1171 spec
.id
= MLX4_NET_TRANS_RULE_ID_ETH
;
1172 memcpy(spec
.eth
.dst_mac
, &gid
[10], ETH_ALEN
);
1173 memcpy(spec
.eth
.dst_mac_msk
, &mac_mask
, ETH_ALEN
);
1176 case MLX4_PROT_IB_IPV6
:
1177 spec
.id
= MLX4_NET_TRANS_RULE_ID_IB
;
1178 memcpy(spec
.ib
.dst_gid
, gid
, 16);
1179 memset(&spec
.ib
.dst_gid_msk
, 0xff, 16);
1184 list_add_tail(&spec
.list
, &rule
.list
);
1186 return mlx4_flow_attach(dev
, &rule
, reg_id
);
1189 int mlx4_multicast_attach(struct mlx4_dev
*dev
, struct mlx4_qp
*qp
, u8 gid
[16],
1190 u8 port
, int block_mcast_loopback
,
1191 enum mlx4_protocol prot
, u64
*reg_id
)
1193 switch (dev
->caps
.steering_mode
) {
1194 case MLX4_STEERING_MODE_A0
:
1195 if (prot
== MLX4_PROT_ETH
)
1198 case MLX4_STEERING_MODE_B0
:
1199 if (prot
== MLX4_PROT_ETH
)
1200 gid
[7] |= (MLX4_MC_STEER
<< 1);
1202 if (mlx4_is_mfunc(dev
))
1203 return mlx4_QP_ATTACH(dev
, qp
, gid
, 1,
1204 block_mcast_loopback
, prot
);
1205 return mlx4_qp_attach_common(dev
, qp
, gid
,
1206 block_mcast_loopback
, prot
,
1209 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
1210 return mlx4_trans_to_dmfs_attach(dev
, qp
, gid
, port
,
1211 block_mcast_loopback
,
1217 EXPORT_SYMBOL_GPL(mlx4_multicast_attach
);
1219 int mlx4_multicast_detach(struct mlx4_dev
*dev
, struct mlx4_qp
*qp
, u8 gid
[16],
1220 enum mlx4_protocol prot
, u64 reg_id
)
1222 switch (dev
->caps
.steering_mode
) {
1223 case MLX4_STEERING_MODE_A0
:
1224 if (prot
== MLX4_PROT_ETH
)
1227 case MLX4_STEERING_MODE_B0
:
1228 if (prot
== MLX4_PROT_ETH
)
1229 gid
[7] |= (MLX4_MC_STEER
<< 1);
1231 if (mlx4_is_mfunc(dev
))
1232 return mlx4_QP_ATTACH(dev
, qp
, gid
, 0, 0, prot
);
1234 return mlx4_qp_detach_common(dev
, qp
, gid
, prot
,
1237 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
1238 return mlx4_flow_detach(dev
, reg_id
);
1244 EXPORT_SYMBOL_GPL(mlx4_multicast_detach
);
1246 int mlx4_flow_steer_promisc_add(struct mlx4_dev
*dev
, u8 port
,
1247 u32 qpn
, enum mlx4_net_trans_promisc_mode mode
)
1249 struct mlx4_net_trans_rule rule
;
1253 case MLX4_FS_ALL_DEFAULT
:
1254 regid_p
= &dev
->regid_promisc_array
[port
];
1256 case MLX4_FS_MC_DEFAULT
:
1257 regid_p
= &dev
->regid_allmulti_array
[port
];
1266 rule
.promisc_mode
= mode
;
1269 INIT_LIST_HEAD(&rule
.list
);
1270 mlx4_err(dev
, "going promisc on %x\n", port
);
1272 return mlx4_flow_attach(dev
, &rule
, regid_p
);
1274 EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_add
);
1276 int mlx4_flow_steer_promisc_remove(struct mlx4_dev
*dev
, u8 port
,
1277 enum mlx4_net_trans_promisc_mode mode
)
1283 case MLX4_FS_ALL_DEFAULT
:
1284 regid_p
= &dev
->regid_promisc_array
[port
];
1286 case MLX4_FS_MC_DEFAULT
:
1287 regid_p
= &dev
->regid_allmulti_array
[port
];
1296 ret
= mlx4_flow_detach(dev
, *regid_p
);
1302 EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_remove
);
1304 int mlx4_unicast_attach(struct mlx4_dev
*dev
,
1305 struct mlx4_qp
*qp
, u8 gid
[16],
1306 int block_mcast_loopback
, enum mlx4_protocol prot
)
1308 if (prot
== MLX4_PROT_ETH
)
1309 gid
[7] |= (MLX4_UC_STEER
<< 1);
1311 if (mlx4_is_mfunc(dev
))
1312 return mlx4_QP_ATTACH(dev
, qp
, gid
, 1,
1313 block_mcast_loopback
, prot
);
1315 return mlx4_qp_attach_common(dev
, qp
, gid
, block_mcast_loopback
,
1316 prot
, MLX4_UC_STEER
);
1318 EXPORT_SYMBOL_GPL(mlx4_unicast_attach
);
1320 int mlx4_unicast_detach(struct mlx4_dev
*dev
, struct mlx4_qp
*qp
,
1321 u8 gid
[16], enum mlx4_protocol prot
)
1323 if (prot
== MLX4_PROT_ETH
)
1324 gid
[7] |= (MLX4_UC_STEER
<< 1);
1326 if (mlx4_is_mfunc(dev
))
1327 return mlx4_QP_ATTACH(dev
, qp
, gid
, 0, 0, prot
);
1329 return mlx4_qp_detach_common(dev
, qp
, gid
, prot
, MLX4_UC_STEER
);
1331 EXPORT_SYMBOL_GPL(mlx4_unicast_detach
);
1333 int mlx4_PROMISC_wrapper(struct mlx4_dev
*dev
, int slave
,
1334 struct mlx4_vhcr
*vhcr
,
1335 struct mlx4_cmd_mailbox
*inbox
,
1336 struct mlx4_cmd_mailbox
*outbox
,
1337 struct mlx4_cmd_info
*cmd
)
1339 u32 qpn
= (u32
) vhcr
->in_param
& 0xffffffff;
1340 u8 port
= vhcr
->in_param
>> 62;
1341 enum mlx4_steer_type steer
= vhcr
->in_modifier
;
1343 /* Promiscuous unicast is not allowed in mfunc */
1344 if (mlx4_is_mfunc(dev
) && steer
== MLX4_UC_STEER
)
1347 if (vhcr
->op_modifier
)
1348 return add_promisc_qp(dev
, port
, steer
, qpn
);
1350 return remove_promisc_qp(dev
, port
, steer
, qpn
);
1353 static int mlx4_PROMISC(struct mlx4_dev
*dev
, u32 qpn
,
1354 enum mlx4_steer_type steer
, u8 add
, u8 port
)
1356 return mlx4_cmd(dev
, (u64
) qpn
| (u64
) port
<< 62, (u32
) steer
, add
,
1357 MLX4_CMD_PROMISC
, MLX4_CMD_TIME_CLASS_A
,
1361 int mlx4_multicast_promisc_add(struct mlx4_dev
*dev
, u32 qpn
, u8 port
)
1363 if (mlx4_is_mfunc(dev
))
1364 return mlx4_PROMISC(dev
, qpn
, MLX4_MC_STEER
, 1, port
);
1366 return add_promisc_qp(dev
, port
, MLX4_MC_STEER
, qpn
);
1368 EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add
);
1370 int mlx4_multicast_promisc_remove(struct mlx4_dev
*dev
, u32 qpn
, u8 port
)
1372 if (mlx4_is_mfunc(dev
))
1373 return mlx4_PROMISC(dev
, qpn
, MLX4_MC_STEER
, 0, port
);
1375 return remove_promisc_qp(dev
, port
, MLX4_MC_STEER
, qpn
);
1377 EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove
);
1379 int mlx4_unicast_promisc_add(struct mlx4_dev
*dev
, u32 qpn
, u8 port
)
1381 if (mlx4_is_mfunc(dev
))
1382 return mlx4_PROMISC(dev
, qpn
, MLX4_UC_STEER
, 1, port
);
1384 return add_promisc_qp(dev
, port
, MLX4_UC_STEER
, qpn
);
1386 EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add
);
1388 int mlx4_unicast_promisc_remove(struct mlx4_dev
*dev
, u32 qpn
, u8 port
)
1390 if (mlx4_is_mfunc(dev
))
1391 return mlx4_PROMISC(dev
, qpn
, MLX4_UC_STEER
, 0, port
);
1393 return remove_promisc_qp(dev
, port
, MLX4_UC_STEER
, qpn
);
1395 EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove
);
1397 int mlx4_init_mcg_table(struct mlx4_dev
*dev
)
1399 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1402 /* No need for mcg_table when fw managed the mcg table*/
1403 if (dev
->caps
.steering_mode
==
1404 MLX4_STEERING_MODE_DEVICE_MANAGED
)
1406 err
= mlx4_bitmap_init(&priv
->mcg_table
.bitmap
, dev
->caps
.num_amgms
,
1407 dev
->caps
.num_amgms
- 1, 0, 0);
1411 mutex_init(&priv
->mcg_table
.mutex
);
1416 void mlx4_cleanup_mcg_table(struct mlx4_dev
*dev
)
1418 if (dev
->caps
.steering_mode
!=
1419 MLX4_STEERING_MODE_DEVICE_MANAGED
)
1420 mlx4_bitmap_cleanup(&mlx4_priv(dev
)->mcg_table
.bitmap
);