2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/errno.h>
34 #include <linux/if_ether.h>
35 #include <linux/export.h>
37 #include <linux/mlx4/cmd.h>
41 #define MLX4_MAC_VALID (1ull << 63)
43 #define MLX4_VLAN_VALID (1u << 31)
44 #define MLX4_VLAN_MASK 0xfff
46 #define MLX4_STATS_TRAFFIC_COUNTERS_MASK 0xfULL
47 #define MLX4_STATS_TRAFFIC_DROPS_MASK 0xc0ULL
48 #define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL
49 #define MLX4_STATS_PORT_COUNTERS_MASK 0x1fe00000ULL
51 void mlx4_init_mac_table(struct mlx4_dev
*dev
, struct mlx4_mac_table
*table
)
55 mutex_init(&table
->mutex
);
56 for (i
= 0; i
< MLX4_MAX_MAC_NUM
; i
++) {
57 table
->entries
[i
] = 0;
60 table
->max
= 1 << dev
->caps
.log_num_macs
;
64 void mlx4_init_vlan_table(struct mlx4_dev
*dev
, struct mlx4_vlan_table
*table
)
68 mutex_init(&table
->mutex
);
69 for (i
= 0; i
< MLX4_MAX_VLAN_NUM
; i
++) {
70 table
->entries
[i
] = 0;
73 table
->max
= (1 << dev
->caps
.log_num_vlans
) - MLX4_VLAN_REGULAR
;
77 static int validate_index(struct mlx4_dev
*dev
,
78 struct mlx4_mac_table
*table
, int index
)
82 if (index
< 0 || index
>= table
->max
|| !table
->entries
[index
]) {
83 mlx4_warn(dev
, "No valid Mac entry for the given index\n");
89 static int find_index(struct mlx4_dev
*dev
,
90 struct mlx4_mac_table
*table
, u64 mac
)
94 for (i
= 0; i
< MLX4_MAX_MAC_NUM
; i
++) {
95 if ((mac
& MLX4_MAC_MASK
) ==
96 (MLX4_MAC_MASK
& be64_to_cpu(table
->entries
[i
])))
103 static int mlx4_set_port_mac_table(struct mlx4_dev
*dev
, u8 port
,
106 struct mlx4_cmd_mailbox
*mailbox
;
110 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
112 return PTR_ERR(mailbox
);
114 memcpy(mailbox
->buf
, entries
, MLX4_MAC_TABLE_SIZE
);
116 in_mod
= MLX4_SET_PORT_MAC_TABLE
<< 8 | port
;
118 err
= mlx4_cmd(dev
, mailbox
->dma
, in_mod
, 1, MLX4_CMD_SET_PORT
,
119 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
121 mlx4_free_cmd_mailbox(dev
, mailbox
);
125 int __mlx4_register_mac(struct mlx4_dev
*dev
, u8 port
, u64 mac
)
127 struct mlx4_port_info
*info
= &mlx4_priv(dev
)->port
[port
];
128 struct mlx4_mac_table
*table
= &info
->mac_table
;
132 mlx4_dbg(dev
, "Registering MAC: 0x%llx for port %d\n",
133 (unsigned long long) mac
, port
);
135 mutex_lock(&table
->mutex
);
136 for (i
= 0; i
< MLX4_MAX_MAC_NUM
; i
++) {
137 if (free
< 0 && !table
->entries
[i
]) {
142 if (mac
== (MLX4_MAC_MASK
& be64_to_cpu(table
->entries
[i
]))) {
143 /* MAC already registered, Must not have duplicates */
149 mlx4_dbg(dev
, "Free MAC index is %d\n", free
);
151 if (table
->total
== table
->max
) {
152 /* No free mac entries */
157 /* Register new MAC */
158 table
->entries
[free
] = cpu_to_be64(mac
| MLX4_MAC_VALID
);
160 err
= mlx4_set_port_mac_table(dev
, port
, table
->entries
);
162 mlx4_err(dev
, "Failed adding MAC: 0x%llx\n",
163 (unsigned long long) mac
);
164 table
->entries
[free
] = 0;
171 mutex_unlock(&table
->mutex
);
174 EXPORT_SYMBOL_GPL(__mlx4_register_mac
);
176 int mlx4_register_mac(struct mlx4_dev
*dev
, u8 port
, u64 mac
)
181 if (mlx4_is_mfunc(dev
)) {
182 set_param_l(&out_param
, port
);
183 err
= mlx4_cmd_imm(dev
, mac
, &out_param
, RES_MAC
,
184 RES_OP_RESERVE_AND_MAP
, MLX4_CMD_ALLOC_RES
,
185 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
189 return get_param_l(&out_param
);
191 return __mlx4_register_mac(dev
, port
, mac
);
193 EXPORT_SYMBOL_GPL(mlx4_register_mac
);
195 int mlx4_get_base_qpn(struct mlx4_dev
*dev
, u8 port
)
197 return dev
->caps
.reserved_qps_base
[MLX4_QP_REGION_ETH_ADDR
] +
198 (port
- 1) * (1 << dev
->caps
.log_num_macs
);
200 EXPORT_SYMBOL_GPL(mlx4_get_base_qpn
);
202 void __mlx4_unregister_mac(struct mlx4_dev
*dev
, u8 port
, u64 mac
)
204 struct mlx4_port_info
*info
= &mlx4_priv(dev
)->port
[port
];
205 struct mlx4_mac_table
*table
= &info
->mac_table
;
208 index
= find_index(dev
, table
, mac
);
210 mutex_lock(&table
->mutex
);
212 if (validate_index(dev
, table
, index
))
215 table
->entries
[index
] = 0;
216 mlx4_set_port_mac_table(dev
, port
, table
->entries
);
219 mutex_unlock(&table
->mutex
);
221 EXPORT_SYMBOL_GPL(__mlx4_unregister_mac
);
223 void mlx4_unregister_mac(struct mlx4_dev
*dev
, u8 port
, u64 mac
)
227 if (mlx4_is_mfunc(dev
)) {
228 set_param_l(&out_param
, port
);
229 (void) mlx4_cmd_imm(dev
, mac
, &out_param
, RES_MAC
,
230 RES_OP_RESERVE_AND_MAP
, MLX4_CMD_FREE_RES
,
231 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
234 __mlx4_unregister_mac(dev
, port
, mac
);
237 EXPORT_SYMBOL_GPL(mlx4_unregister_mac
);
239 int __mlx4_replace_mac(struct mlx4_dev
*dev
, u8 port
, int qpn
, u64 new_mac
)
241 struct mlx4_port_info
*info
= &mlx4_priv(dev
)->port
[port
];
242 struct mlx4_mac_table
*table
= &info
->mac_table
;
243 int index
= qpn
- info
->base_qpn
;
246 /* CX1 doesn't support multi-functions */
247 mutex_lock(&table
->mutex
);
249 err
= validate_index(dev
, table
, index
);
253 table
->entries
[index
] = cpu_to_be64(new_mac
| MLX4_MAC_VALID
);
255 err
= mlx4_set_port_mac_table(dev
, port
, table
->entries
);
257 mlx4_err(dev
, "Failed adding MAC: 0x%llx\n",
258 (unsigned long long) new_mac
);
259 table
->entries
[index
] = 0;
262 mutex_unlock(&table
->mutex
);
265 EXPORT_SYMBOL_GPL(__mlx4_replace_mac
);
267 static int mlx4_set_port_vlan_table(struct mlx4_dev
*dev
, u8 port
,
270 struct mlx4_cmd_mailbox
*mailbox
;
274 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
276 return PTR_ERR(mailbox
);
278 memcpy(mailbox
->buf
, entries
, MLX4_VLAN_TABLE_SIZE
);
279 in_mod
= MLX4_SET_PORT_VLAN_TABLE
<< 8 | port
;
280 err
= mlx4_cmd(dev
, mailbox
->dma
, in_mod
, 1, MLX4_CMD_SET_PORT
,
281 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_WRAPPED
);
283 mlx4_free_cmd_mailbox(dev
, mailbox
);
288 int mlx4_find_cached_vlan(struct mlx4_dev
*dev
, u8 port
, u16 vid
, int *idx
)
290 struct mlx4_vlan_table
*table
= &mlx4_priv(dev
)->port
[port
].vlan_table
;
293 for (i
= 0; i
< MLX4_MAX_VLAN_NUM
; ++i
) {
294 if (table
->refs
[i
] &&
295 (vid
== (MLX4_VLAN_MASK
&
296 be32_to_cpu(table
->entries
[i
])))) {
297 /* VLAN already registered, increase reference count */
305 EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan
);
307 static int __mlx4_register_vlan(struct mlx4_dev
*dev
, u8 port
, u16 vlan
,
310 struct mlx4_vlan_table
*table
= &mlx4_priv(dev
)->port
[port
].vlan_table
;
314 mutex_lock(&table
->mutex
);
316 if (table
->total
== table
->max
) {
317 /* No free vlan entries */
322 for (i
= MLX4_VLAN_REGULAR
; i
< MLX4_MAX_VLAN_NUM
; i
++) {
323 if (free
< 0 && (table
->refs
[i
] == 0)) {
328 if (table
->refs
[i
] &&
329 (vlan
== (MLX4_VLAN_MASK
&
330 be32_to_cpu(table
->entries
[i
])))) {
331 /* Vlan already registered, increase references count */
343 /* Register new VLAN */
344 table
->refs
[free
] = 1;
345 table
->entries
[free
] = cpu_to_be32(vlan
| MLX4_VLAN_VALID
);
347 err
= mlx4_set_port_vlan_table(dev
, port
, table
->entries
);
349 mlx4_warn(dev
, "Failed adding vlan: %u\n", vlan
);
350 table
->refs
[free
] = 0;
351 table
->entries
[free
] = 0;
358 mutex_unlock(&table
->mutex
);
362 int mlx4_register_vlan(struct mlx4_dev
*dev
, u8 port
, u16 vlan
, int *index
)
367 if (mlx4_is_mfunc(dev
)) {
368 set_param_l(&out_param
, port
);
369 err
= mlx4_cmd_imm(dev
, vlan
, &out_param
, RES_VLAN
,
370 RES_OP_RESERVE_AND_MAP
, MLX4_CMD_ALLOC_RES
,
371 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
373 *index
= get_param_l(&out_param
);
377 return __mlx4_register_vlan(dev
, port
, vlan
, index
);
379 EXPORT_SYMBOL_GPL(mlx4_register_vlan
);
381 static void __mlx4_unregister_vlan(struct mlx4_dev
*dev
, u8 port
, int index
)
383 struct mlx4_vlan_table
*table
= &mlx4_priv(dev
)->port
[port
].vlan_table
;
385 if (index
< MLX4_VLAN_REGULAR
) {
386 mlx4_warn(dev
, "Trying to free special vlan index %d\n", index
);
390 mutex_lock(&table
->mutex
);
391 if (!table
->refs
[index
]) {
392 mlx4_warn(dev
, "No vlan entry for index %d\n", index
);
395 if (--table
->refs
[index
]) {
396 mlx4_dbg(dev
, "Have more references for index %d,"
397 "no need to modify vlan table\n", index
);
400 table
->entries
[index
] = 0;
401 mlx4_set_port_vlan_table(dev
, port
, table
->entries
);
404 mutex_unlock(&table
->mutex
);
407 void mlx4_unregister_vlan(struct mlx4_dev
*dev
, u8 port
, int index
)
412 if (mlx4_is_mfunc(dev
)) {
413 set_param_l(&in_param
, port
);
414 err
= mlx4_cmd(dev
, in_param
, RES_VLAN
, RES_OP_RESERVE_AND_MAP
,
415 MLX4_CMD_FREE_RES
, MLX4_CMD_TIME_CLASS_A
,
418 mlx4_warn(dev
, "Failed freeing vlan at index:%d\n",
423 __mlx4_unregister_vlan(dev
, port
, index
);
425 EXPORT_SYMBOL_GPL(mlx4_unregister_vlan
);
427 int mlx4_get_port_ib_caps(struct mlx4_dev
*dev
, u8 port
, __be32
*caps
)
429 struct mlx4_cmd_mailbox
*inmailbox
, *outmailbox
;
433 inmailbox
= mlx4_alloc_cmd_mailbox(dev
);
434 if (IS_ERR(inmailbox
))
435 return PTR_ERR(inmailbox
);
437 outmailbox
= mlx4_alloc_cmd_mailbox(dev
);
438 if (IS_ERR(outmailbox
)) {
439 mlx4_free_cmd_mailbox(dev
, inmailbox
);
440 return PTR_ERR(outmailbox
);
443 inbuf
= inmailbox
->buf
;
444 outbuf
= outmailbox
->buf
;
445 memset(inbuf
, 0, 256);
446 memset(outbuf
, 0, 256);
451 *(__be16
*) (&inbuf
[16]) = cpu_to_be16(0x0015);
452 *(__be32
*) (&inbuf
[20]) = cpu_to_be32(port
);
454 err
= mlx4_cmd_box(dev
, inmailbox
->dma
, outmailbox
->dma
, port
, 3,
455 MLX4_CMD_MAD_IFC
, MLX4_CMD_TIME_CLASS_C
,
458 *caps
= *(__be32
*) (outbuf
+ 84);
459 mlx4_free_cmd_mailbox(dev
, inmailbox
);
460 mlx4_free_cmd_mailbox(dev
, outmailbox
);
464 static int mlx4_common_set_port(struct mlx4_dev
*dev
, int slave
, u32 in_mod
,
465 u8 op_mod
, struct mlx4_cmd_mailbox
*inbox
)
467 struct mlx4_priv
*priv
= mlx4_priv(dev
);
468 struct mlx4_port_info
*port_info
;
469 struct mlx4_mfunc_master_ctx
*master
= &priv
->mfunc
.master
;
470 struct mlx4_slave_state
*slave_st
= &master
->slave_state
[slave
];
471 struct mlx4_set_port_rqp_calc_context
*qpn_context
;
472 struct mlx4_set_port_general_context
*gen_context
;
473 int reset_qkey_viols
;
482 __be32 slave_cap_mask
;
485 port
= in_mod
& 0xff;
486 in_modifier
= in_mod
>> 8;
488 port_info
= &priv
->port
[port
];
490 /* Slaves cannot perform SET_PORT operations except changing MTU */
492 if (slave
!= dev
->caps
.function
&&
493 in_modifier
!= MLX4_SET_PORT_GENERAL
) {
494 mlx4_warn(dev
, "denying SET_PORT for slave:%d\n",
498 switch (in_modifier
) {
499 case MLX4_SET_PORT_RQP_CALC
:
500 qpn_context
= inbox
->buf
;
501 qpn_context
->base_qpn
=
502 cpu_to_be32(port_info
->base_qpn
);
503 qpn_context
->n_mac
= 0x7;
504 promisc
= be32_to_cpu(qpn_context
->promisc
) >>
505 SET_PORT_PROMISC_SHIFT
;
506 qpn_context
->promisc
= cpu_to_be32(
507 promisc
<< SET_PORT_PROMISC_SHIFT
|
508 port_info
->base_qpn
);
509 promisc
= be32_to_cpu(qpn_context
->mcast
) >>
510 SET_PORT_MC_PROMISC_SHIFT
;
511 qpn_context
->mcast
= cpu_to_be32(
512 promisc
<< SET_PORT_MC_PROMISC_SHIFT
|
513 port_info
->base_qpn
);
515 case MLX4_SET_PORT_GENERAL
:
516 gen_context
= inbox
->buf
;
517 /* Mtu is configured as the max MTU among all the
518 * the functions on the port. */
519 mtu
= be16_to_cpu(gen_context
->mtu
);
520 mtu
= min_t(int, mtu
, dev
->caps
.eth_mtu_cap
[port
]);
521 prev_mtu
= slave_st
->mtu
[port
];
522 slave_st
->mtu
[port
] = mtu
;
523 if (mtu
> master
->max_mtu
[port
])
524 master
->max_mtu
[port
] = mtu
;
525 if (mtu
< prev_mtu
&& prev_mtu
==
526 master
->max_mtu
[port
]) {
527 slave_st
->mtu
[port
] = mtu
;
528 master
->max_mtu
[port
] = mtu
;
529 for (i
= 0; i
< dev
->num_slaves
; i
++) {
530 master
->max_mtu
[port
] =
531 max(master
->max_mtu
[port
],
532 master
->slave_state
[i
].mtu
[port
]);
536 gen_context
->mtu
= cpu_to_be16(master
->max_mtu
[port
]);
539 return mlx4_cmd(dev
, inbox
->dma
, in_mod
, op_mod
,
540 MLX4_CMD_SET_PORT
, MLX4_CMD_TIME_CLASS_B
,
544 /* For IB, we only consider:
545 * - The capability mask, which is set to the aggregate of all
546 * slave function capabilities
547 * - The QKey violatin counter - reset according to each request.
550 if (dev
->flags
& MLX4_FLAG_OLD_PORT_CMDS
) {
551 reset_qkey_viols
= (*(u8
*) inbox
->buf
) & 0x40;
552 new_cap_mask
= ((__be32
*) inbox
->buf
)[2];
554 reset_qkey_viols
= ((u8
*) inbox
->buf
)[3] & 0x1;
555 new_cap_mask
= ((__be32
*) inbox
->buf
)[1];
558 /* slave may not set the IS_SM capability for the port */
559 if (slave
!= mlx4_master_func_num(dev
) &&
560 (be32_to_cpu(new_cap_mask
) & MLX4_PORT_CAP_IS_SM
))
563 /* No DEV_MGMT in multifunc mode */
564 if (mlx4_is_mfunc(dev
) &&
565 (be32_to_cpu(new_cap_mask
) & MLX4_PORT_CAP_DEV_MGMT_SUP
))
570 priv
->mfunc
.master
.slave_state
[slave
].ib_cap_mask
[port
];
571 priv
->mfunc
.master
.slave_state
[slave
].ib_cap_mask
[port
] = new_cap_mask
;
572 for (i
= 0; i
< dev
->num_slaves
; i
++)
574 priv
->mfunc
.master
.slave_state
[i
].ib_cap_mask
[port
];
576 /* only clear mailbox for guests. Master may be setting
577 * MTU or PKEY table size
579 if (slave
!= dev
->caps
.function
)
580 memset(inbox
->buf
, 0, 256);
581 if (dev
->flags
& MLX4_FLAG_OLD_PORT_CMDS
) {
582 *(u8
*) inbox
->buf
|= !!reset_qkey_viols
<< 6;
583 ((__be32
*) inbox
->buf
)[2] = agg_cap_mask
;
585 ((u8
*) inbox
->buf
)[3] |= !!reset_qkey_viols
;
586 ((__be32
*) inbox
->buf
)[1] = agg_cap_mask
;
589 err
= mlx4_cmd(dev
, inbox
->dma
, port
, is_eth
, MLX4_CMD_SET_PORT
,
590 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
592 priv
->mfunc
.master
.slave_state
[slave
].ib_cap_mask
[port
] =
597 int mlx4_SET_PORT_wrapper(struct mlx4_dev
*dev
, int slave
,
598 struct mlx4_vhcr
*vhcr
,
599 struct mlx4_cmd_mailbox
*inbox
,
600 struct mlx4_cmd_mailbox
*outbox
,
601 struct mlx4_cmd_info
*cmd
)
603 return mlx4_common_set_port(dev
, slave
, vhcr
->in_modifier
,
604 vhcr
->op_modifier
, inbox
);
607 /* bit locations for set port command with zero op modifier */
609 MLX4_SET_PORT_VL_CAP
= 4, /* bits 7:4 */
610 MLX4_SET_PORT_MTU_CAP
= 12, /* bits 15:12 */
611 MLX4_CHANGE_PORT_PKEY_TBL_SZ
= 20,
612 MLX4_CHANGE_PORT_VL_CAP
= 21,
613 MLX4_CHANGE_PORT_MTU_CAP
= 22,
616 int mlx4_SET_PORT(struct mlx4_dev
*dev
, u8 port
, int pkey_tbl_sz
)
618 struct mlx4_cmd_mailbox
*mailbox
;
619 int err
, vl_cap
, pkey_tbl_flag
= 0;
621 if (dev
->caps
.port_type
[port
] == MLX4_PORT_TYPE_ETH
)
624 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
626 return PTR_ERR(mailbox
);
628 memset(mailbox
->buf
, 0, 256);
630 ((__be32
*) mailbox
->buf
)[1] = dev
->caps
.ib_port_def_cap
[port
];
632 if (pkey_tbl_sz
>= 0 && mlx4_is_master(dev
)) {
634 ((__be16
*) mailbox
->buf
)[20] = cpu_to_be16(pkey_tbl_sz
);
637 /* IB VL CAP enum isn't used by the firmware, just numerical values */
638 for (vl_cap
= 8; vl_cap
>= 1; vl_cap
>>= 1) {
639 ((__be32
*) mailbox
->buf
)[0] = cpu_to_be32(
640 (1 << MLX4_CHANGE_PORT_MTU_CAP
) |
641 (1 << MLX4_CHANGE_PORT_VL_CAP
) |
642 (pkey_tbl_flag
<< MLX4_CHANGE_PORT_PKEY_TBL_SZ
) |
643 (dev
->caps
.port_ib_mtu
[port
] << MLX4_SET_PORT_MTU_CAP
) |
644 (vl_cap
<< MLX4_SET_PORT_VL_CAP
));
645 err
= mlx4_cmd(dev
, mailbox
->dma
, port
, 0, MLX4_CMD_SET_PORT
,
646 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_WRAPPED
);
651 mlx4_free_cmd_mailbox(dev
, mailbox
);
655 int mlx4_SET_PORT_general(struct mlx4_dev
*dev
, u8 port
, int mtu
,
656 u8 pptx
, u8 pfctx
, u8 pprx
, u8 pfcrx
)
658 struct mlx4_cmd_mailbox
*mailbox
;
659 struct mlx4_set_port_general_context
*context
;
663 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
665 return PTR_ERR(mailbox
);
666 context
= mailbox
->buf
;
667 memset(context
, 0, sizeof *context
);
669 context
->flags
= SET_PORT_GEN_ALL_VALID
;
670 context
->mtu
= cpu_to_be16(mtu
);
671 context
->pptx
= (pptx
* (!pfctx
)) << 7;
672 context
->pfctx
= pfctx
;
673 context
->pprx
= (pprx
* (!pfcrx
)) << 7;
674 context
->pfcrx
= pfcrx
;
676 in_mod
= MLX4_SET_PORT_GENERAL
<< 8 | port
;
677 err
= mlx4_cmd(dev
, mailbox
->dma
, in_mod
, 1, MLX4_CMD_SET_PORT
,
678 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_WRAPPED
);
680 mlx4_free_cmd_mailbox(dev
, mailbox
);
683 EXPORT_SYMBOL(mlx4_SET_PORT_general
);
685 int mlx4_SET_PORT_qpn_calc(struct mlx4_dev
*dev
, u8 port
, u32 base_qpn
,
688 struct mlx4_cmd_mailbox
*mailbox
;
689 struct mlx4_set_port_rqp_calc_context
*context
;
692 u32 m_promisc
= (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_VEP_MC_STEER
) ?
693 MCAST_DIRECT
: MCAST_DEFAULT
;
695 if (dev
->caps
.steering_mode
!= MLX4_STEERING_MODE_A0
)
698 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
700 return PTR_ERR(mailbox
);
701 context
= mailbox
->buf
;
702 memset(context
, 0, sizeof *context
);
704 context
->base_qpn
= cpu_to_be32(base_qpn
);
705 context
->n_mac
= dev
->caps
.log_num_macs
;
706 context
->promisc
= cpu_to_be32(promisc
<< SET_PORT_PROMISC_SHIFT
|
708 context
->mcast
= cpu_to_be32(m_promisc
<< SET_PORT_MC_PROMISC_SHIFT
|
710 context
->intra_no_vlan
= 0;
711 context
->no_vlan
= MLX4_NO_VLAN_IDX
;
712 context
->intra_vlan_miss
= 0;
713 context
->vlan_miss
= MLX4_VLAN_MISS_IDX
;
715 in_mod
= MLX4_SET_PORT_RQP_CALC
<< 8 | port
;
716 err
= mlx4_cmd(dev
, mailbox
->dma
, in_mod
, 1, MLX4_CMD_SET_PORT
,
717 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_WRAPPED
);
719 mlx4_free_cmd_mailbox(dev
, mailbox
);
722 EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc
);
724 int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev
*dev
, u8 port
, u8
*prio2tc
)
726 struct mlx4_cmd_mailbox
*mailbox
;
727 struct mlx4_set_port_prio2tc_context
*context
;
732 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
734 return PTR_ERR(mailbox
);
735 context
= mailbox
->buf
;
736 memset(context
, 0, sizeof *context
);
738 for (i
= 0; i
< MLX4_NUM_UP
; i
+= 2)
739 context
->prio2tc
[i
>> 1] = prio2tc
[i
] << 4 | prio2tc
[i
+ 1];
741 in_mod
= MLX4_SET_PORT_PRIO2TC
<< 8 | port
;
742 err
= mlx4_cmd(dev
, mailbox
->dma
, in_mod
, 1, MLX4_CMD_SET_PORT
,
743 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
745 mlx4_free_cmd_mailbox(dev
, mailbox
);
748 EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC
);
750 int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev
*dev
, u8 port
, u8
*tc_tx_bw
,
751 u8
*pg
, u16
*ratelimit
)
753 struct mlx4_cmd_mailbox
*mailbox
;
754 struct mlx4_set_port_scheduler_context
*context
;
759 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
761 return PTR_ERR(mailbox
);
762 context
= mailbox
->buf
;
763 memset(context
, 0, sizeof *context
);
765 for (i
= 0; i
< MLX4_NUM_TC
; i
++) {
766 struct mlx4_port_scheduler_tc_cfg_be
*tc
= &context
->tc
[i
];
767 u16 r
= ratelimit
&& ratelimit
[i
] ? ratelimit
[i
] :
768 MLX4_RATELIMIT_DEFAULT
;
770 tc
->pg
= htons(pg
[i
]);
771 tc
->bw_precentage
= htons(tc_tx_bw
[i
]);
773 tc
->max_bw_units
= htons(MLX4_RATELIMIT_UNITS
);
774 tc
->max_bw_value
= htons(r
);
777 in_mod
= MLX4_SET_PORT_SCHEDULER
<< 8 | port
;
778 err
= mlx4_cmd(dev
, mailbox
->dma
, in_mod
, 1, MLX4_CMD_SET_PORT
,
779 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
781 mlx4_free_cmd_mailbox(dev
, mailbox
);
784 EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER
);
786 int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev
*dev
, int slave
,
787 struct mlx4_vhcr
*vhcr
,
788 struct mlx4_cmd_mailbox
*inbox
,
789 struct mlx4_cmd_mailbox
*outbox
,
790 struct mlx4_cmd_info
*cmd
)
797 int mlx4_SET_MCAST_FLTR(struct mlx4_dev
*dev
, u8 port
,
798 u64 mac
, u64 clear
, u8 mode
)
800 return mlx4_cmd(dev
, (mac
| (clear
<< 63)), port
, mode
,
801 MLX4_CMD_SET_MCAST_FLTR
, MLX4_CMD_TIME_CLASS_B
,
804 EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR
);
806 int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev
*dev
, int slave
,
807 struct mlx4_vhcr
*vhcr
,
808 struct mlx4_cmd_mailbox
*inbox
,
809 struct mlx4_cmd_mailbox
*outbox
,
810 struct mlx4_cmd_info
*cmd
)
817 int mlx4_common_dump_eth_stats(struct mlx4_dev
*dev
, int slave
,
818 u32 in_mod
, struct mlx4_cmd_mailbox
*outbox
)
820 return mlx4_cmd_box(dev
, 0, outbox
->dma
, in_mod
, 0,
821 MLX4_CMD_DUMP_ETH_STATS
, MLX4_CMD_TIME_CLASS_B
,
825 int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev
*dev
, int slave
,
826 struct mlx4_vhcr
*vhcr
,
827 struct mlx4_cmd_mailbox
*inbox
,
828 struct mlx4_cmd_mailbox
*outbox
,
829 struct mlx4_cmd_info
*cmd
)
831 if (slave
!= dev
->caps
.function
)
833 return mlx4_common_dump_eth_stats(dev
, slave
,
834 vhcr
->in_modifier
, outbox
);
837 void mlx4_set_stats_bitmap(struct mlx4_dev
*dev
, u64
*stats_bitmap
)
839 if (!mlx4_is_mfunc(dev
)) {
844 *stats_bitmap
= (MLX4_STATS_TRAFFIC_COUNTERS_MASK
|
845 MLX4_STATS_TRAFFIC_DROPS_MASK
|
846 MLX4_STATS_PORT_COUNTERS_MASK
);
848 if (mlx4_is_master(dev
))
849 *stats_bitmap
|= MLX4_STATS_ERROR_COUNTERS_MASK
;
851 EXPORT_SYMBOL(mlx4_set_stats_bitmap
);