2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/etherdevice.h>
36 #include <linux/mlx4/cmd.h>
37 #include <linux/module.h>
38 #include <linux/cache.h>
44 MLX4_COMMAND_INTERFACE_MIN_REV
= 2,
45 MLX4_COMMAND_INTERFACE_MAX_REV
= 3,
46 MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS
= 3,
49 extern void __buggy_use_of_MLX4_GET(void);
50 extern void __buggy_use_of_MLX4_PUT(void);
52 static bool enable_qos
;
53 module_param(enable_qos
, bool, 0444);
54 MODULE_PARM_DESC(enable_qos
, "Enable Quality of Service support in the HCA (default: off)");
56 #define MLX4_GET(dest, source, offset) \
58 void *__p = (char *) (source) + (offset); \
59 switch (sizeof (dest)) { \
60 case 1: (dest) = *(u8 *) __p; break; \
61 case 2: (dest) = be16_to_cpup(__p); break; \
62 case 4: (dest) = be32_to_cpup(__p); break; \
63 case 8: (dest) = be64_to_cpup(__p); break; \
64 default: __buggy_use_of_MLX4_GET(); \
68 #define MLX4_PUT(dest, source, offset) \
70 void *__d = ((char *) (dest) + (offset)); \
71 switch (sizeof(source)) { \
72 case 1: *(u8 *) __d = (source); break; \
73 case 2: *(__be16 *) __d = cpu_to_be16(source); break; \
74 case 4: *(__be32 *) __d = cpu_to_be32(source); break; \
75 case 8: *(__be64 *) __d = cpu_to_be64(source); break; \
76 default: __buggy_use_of_MLX4_PUT(); \
80 static void dump_dev_cap_flags(struct mlx4_dev
*dev
, u64 flags
)
82 static const char *fname
[] = {
83 [ 0] = "RC transport",
84 [ 1] = "UC transport",
85 [ 2] = "UD transport",
86 [ 3] = "XRC transport",
87 [ 4] = "reliable multicast",
88 [ 5] = "FCoIB support",
90 [ 7] = "IPoIB checksum offload",
91 [ 8] = "P_Key violation counter",
92 [ 9] = "Q_Key violation counter",
94 [12] = "Dual Port Different Protocol (DPDP) support",
95 [15] = "Big LSO headers",
98 [18] = "Atomic ops support",
99 [19] = "Raw multicast support",
100 [20] = "Address vector port checking support",
101 [21] = "UD multicast support",
102 [24] = "Demand paging support",
103 [25] = "Router support",
104 [30] = "IBoE support",
105 [32] = "Unicast loopback support",
106 [34] = "FCS header control",
107 [38] = "Wake On LAN support",
108 [40] = "UDP RSS support",
109 [41] = "Unicast VEP steering support",
110 [42] = "Multicast VEP steering support",
111 [48] = "Counters support",
112 [53] = "Port ETS Scheduler support",
113 [55] = "Port link type sensing support",
114 [59] = "Port management change event support",
115 [61] = "64 byte EQE support",
116 [62] = "64 byte CQE support",
120 mlx4_dbg(dev
, "DEV_CAP flags:\n");
121 for (i
= 0; i
< ARRAY_SIZE(fname
); ++i
)
122 if (fname
[i
] && (flags
& (1LL << i
)))
123 mlx4_dbg(dev
, " %s\n", fname
[i
]);
126 static void dump_dev_cap_flags2(struct mlx4_dev
*dev
, u64 flags
)
128 static const char * const fname
[] = {
130 [1] = "RSS Toeplitz Hash Function support",
131 [2] = "RSS XOR Hash Function support",
132 [3] = "Device managed flow steering support",
133 [4] = "Automatic MAC reassignment support",
134 [5] = "Time stamping support",
135 [6] = "VST (control vlan insertion/stripping) support",
136 [7] = "FSM (MAC anti-spoofing) support",
137 [8] = "Dynamic QP updates support",
138 [9] = "Device managed flow steering IPoIB support",
139 [10] = "TCP/IP offloads/flow-steering for VXLAN support",
140 [11] = "MAD DEMUX (Secure-Host) support",
141 [12] = "Large cache line (>64B) CQE stride support",
142 [13] = "Large cache line (>64B) EQE stride support",
143 [14] = "Ethernet protocol control support",
144 [15] = "Ethernet Backplane autoneg support"
148 for (i
= 0; i
< ARRAY_SIZE(fname
); ++i
)
149 if (fname
[i
] && (flags
& (1LL << i
)))
150 mlx4_dbg(dev
, " %s\n", fname
[i
]);
153 int mlx4_MOD_STAT_CFG(struct mlx4_dev
*dev
, struct mlx4_mod_stat_cfg
*cfg
)
155 struct mlx4_cmd_mailbox
*mailbox
;
159 #define MOD_STAT_CFG_IN_SIZE 0x100
161 #define MOD_STAT_CFG_PG_SZ_M_OFFSET 0x002
162 #define MOD_STAT_CFG_PG_SZ_OFFSET 0x003
164 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
166 return PTR_ERR(mailbox
);
167 inbox
= mailbox
->buf
;
169 MLX4_PUT(inbox
, cfg
->log_pg_sz
, MOD_STAT_CFG_PG_SZ_OFFSET
);
170 MLX4_PUT(inbox
, cfg
->log_pg_sz_m
, MOD_STAT_CFG_PG_SZ_M_OFFSET
);
172 err
= mlx4_cmd(dev
, mailbox
->dma
, 0, 0, MLX4_CMD_MOD_STAT_CFG
,
173 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
175 mlx4_free_cmd_mailbox(dev
, mailbox
);
179 int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev
*dev
, int slave
,
180 struct mlx4_vhcr
*vhcr
,
181 struct mlx4_cmd_mailbox
*inbox
,
182 struct mlx4_cmd_mailbox
*outbox
,
183 struct mlx4_cmd_info
*cmd
)
185 struct mlx4_priv
*priv
= mlx4_priv(dev
);
187 u32 size
, proxy_qp
, qkey
;
190 #define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0
191 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
192 #define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4
193 #define QUERY_FUNC_CAP_FMR_OFFSET 0x8
194 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP 0x10
195 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP 0x14
196 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP 0x18
197 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP 0x20
198 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP 0x24
199 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28
200 #define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
201 #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30
203 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50
204 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54
205 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x58
206 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x60
207 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64
208 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68
210 #define QUERY_FUNC_CAP_FMR_FLAG 0x80
211 #define QUERY_FUNC_CAP_FLAG_RDMA 0x40
212 #define QUERY_FUNC_CAP_FLAG_ETH 0x80
213 #define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10
215 /* when opcode modifier = 1 */
216 #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
217 #define QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET 0x4
218 #define QUERY_FUNC_CAP_FLAGS0_OFFSET 0x8
219 #define QUERY_FUNC_CAP_FLAGS1_OFFSET 0xc
221 #define QUERY_FUNC_CAP_QP0_TUNNEL 0x10
222 #define QUERY_FUNC_CAP_QP0_PROXY 0x14
223 #define QUERY_FUNC_CAP_QP1_TUNNEL 0x18
224 #define QUERY_FUNC_CAP_QP1_PROXY 0x1c
225 #define QUERY_FUNC_CAP_PHYS_PORT_ID 0x28
227 #define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC 0x40
228 #define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN 0x80
229 #define QUERY_FUNC_CAP_FLAGS1_NIC_INFO 0x10
230 #define QUERY_FUNC_CAP_VF_ENABLE_QP0 0x08
232 #define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
234 if (vhcr
->op_modifier
== 1) {
235 struct mlx4_active_ports actv_ports
=
236 mlx4_get_active_ports(dev
, slave
);
237 int converted_port
= mlx4_slave_convert_port(
238 dev
, slave
, vhcr
->in_modifier
);
240 if (converted_port
< 0)
243 vhcr
->in_modifier
= converted_port
;
244 /* phys-port = logical-port */
245 field
= vhcr
->in_modifier
-
246 find_first_bit(actv_ports
.ports
, dev
->caps
.num_ports
);
247 MLX4_PUT(outbox
->buf
, field
, QUERY_FUNC_CAP_PHYS_PORT_OFFSET
);
249 port
= vhcr
->in_modifier
;
250 proxy_qp
= dev
->phys_caps
.base_proxy_sqpn
+ 8 * slave
+ port
- 1;
252 /* Set nic_info bit to mark new fields support */
253 field
= QUERY_FUNC_CAP_FLAGS1_NIC_INFO
;
255 if (mlx4_vf_smi_enabled(dev
, slave
, port
) &&
256 !mlx4_get_parav_qkey(dev
, proxy_qp
, &qkey
)) {
257 field
|= QUERY_FUNC_CAP_VF_ENABLE_QP0
;
258 MLX4_PUT(outbox
->buf
, qkey
,
259 QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET
);
261 MLX4_PUT(outbox
->buf
, field
, QUERY_FUNC_CAP_FLAGS1_OFFSET
);
263 /* size is now the QP number */
264 size
= dev
->phys_caps
.base_tunnel_sqpn
+ 8 * slave
+ port
- 1;
265 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_QP0_TUNNEL
);
268 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_QP1_TUNNEL
);
270 MLX4_PUT(outbox
->buf
, proxy_qp
, QUERY_FUNC_CAP_QP0_PROXY
);
272 MLX4_PUT(outbox
->buf
, proxy_qp
, QUERY_FUNC_CAP_QP1_PROXY
);
274 MLX4_PUT(outbox
->buf
, dev
->caps
.phys_port_id
[vhcr
->in_modifier
],
275 QUERY_FUNC_CAP_PHYS_PORT_ID
);
277 } else if (vhcr
->op_modifier
== 0) {
278 struct mlx4_active_ports actv_ports
=
279 mlx4_get_active_ports(dev
, slave
);
280 /* enable rdma and ethernet interfaces, and new quota locations */
281 field
= (QUERY_FUNC_CAP_FLAG_ETH
| QUERY_FUNC_CAP_FLAG_RDMA
|
282 QUERY_FUNC_CAP_FLAG_QUOTAS
);
283 MLX4_PUT(outbox
->buf
, field
, QUERY_FUNC_CAP_FLAGS_OFFSET
);
286 bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
),
287 dev
->caps
.num_ports
);
288 MLX4_PUT(outbox
->buf
, field
, QUERY_FUNC_CAP_NUM_PORTS_OFFSET
);
290 size
= dev
->caps
.function_caps
; /* set PF behaviours */
291 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_PF_BHVR_OFFSET
);
293 field
= 0; /* protected FMR support not available as yet */
294 MLX4_PUT(outbox
->buf
, field
, QUERY_FUNC_CAP_FMR_OFFSET
);
296 size
= priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_QP
].quota
[slave
];
297 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_QP_QUOTA_OFFSET
);
298 size
= dev
->caps
.num_qps
;
299 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP
);
301 size
= priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_SRQ
].quota
[slave
];
302 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET
);
303 size
= dev
->caps
.num_srqs
;
304 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP
);
306 size
= priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_CQ
].quota
[slave
];
307 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET
);
308 size
= dev
->caps
.num_cqs
;
309 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP
);
311 size
= dev
->caps
.num_eqs
;
312 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MAX_EQ_OFFSET
);
314 size
= dev
->caps
.reserved_eqs
;
315 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET
);
317 size
= priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_MPT
].quota
[slave
];
318 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET
);
319 size
= dev
->caps
.num_mpts
;
320 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP
);
322 size
= priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_MTT
].quota
[slave
];
323 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET
);
324 size
= dev
->caps
.num_mtts
;
325 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP
);
327 size
= dev
->caps
.num_mgms
+ dev
->caps
.num_amgms
;
328 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET
);
329 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP
);
337 int mlx4_QUERY_FUNC_CAP(struct mlx4_dev
*dev
, u32 gen_or_port
,
338 struct mlx4_func_cap
*func_cap
)
340 struct mlx4_cmd_mailbox
*mailbox
;
342 u8 field
, op_modifier
;
344 int err
= 0, quotas
= 0;
346 op_modifier
= !!gen_or_port
; /* 0 = general, 1 = logical port */
348 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
350 return PTR_ERR(mailbox
);
352 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, gen_or_port
, op_modifier
,
353 MLX4_CMD_QUERY_FUNC_CAP
,
354 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
358 outbox
= mailbox
->buf
;
361 MLX4_GET(field
, outbox
, QUERY_FUNC_CAP_FLAGS_OFFSET
);
362 if (!(field
& (QUERY_FUNC_CAP_FLAG_ETH
| QUERY_FUNC_CAP_FLAG_RDMA
))) {
363 mlx4_err(dev
, "The host supports neither eth nor rdma interfaces\n");
364 err
= -EPROTONOSUPPORT
;
367 func_cap
->flags
= field
;
368 quotas
= !!(func_cap
->flags
& QUERY_FUNC_CAP_FLAG_QUOTAS
);
370 MLX4_GET(field
, outbox
, QUERY_FUNC_CAP_NUM_PORTS_OFFSET
);
371 func_cap
->num_ports
= field
;
373 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_PF_BHVR_OFFSET
);
374 func_cap
->pf_context_behaviour
= size
;
377 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP_QUOTA_OFFSET
);
378 func_cap
->qp_quota
= size
& 0xFFFFFF;
380 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET
);
381 func_cap
->srq_quota
= size
& 0xFFFFFF;
383 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET
);
384 func_cap
->cq_quota
= size
& 0xFFFFFF;
386 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET
);
387 func_cap
->mpt_quota
= size
& 0xFFFFFF;
389 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET
);
390 func_cap
->mtt_quota
= size
& 0xFFFFFF;
392 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET
);
393 func_cap
->mcg_quota
= size
& 0xFFFFFF;
396 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP
);
397 func_cap
->qp_quota
= size
& 0xFFFFFF;
399 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP
);
400 func_cap
->srq_quota
= size
& 0xFFFFFF;
402 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP
);
403 func_cap
->cq_quota
= size
& 0xFFFFFF;
405 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP
);
406 func_cap
->mpt_quota
= size
& 0xFFFFFF;
408 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP
);
409 func_cap
->mtt_quota
= size
& 0xFFFFFF;
411 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP
);
412 func_cap
->mcg_quota
= size
& 0xFFFFFF;
414 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MAX_EQ_OFFSET
);
415 func_cap
->max_eq
= size
& 0xFFFFFF;
417 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET
);
418 func_cap
->reserved_eq
= size
& 0xFFFFFF;
423 /* logical port query */
424 if (gen_or_port
> dev
->caps
.num_ports
) {
429 MLX4_GET(func_cap
->flags1
, outbox
, QUERY_FUNC_CAP_FLAGS1_OFFSET
);
430 if (dev
->caps
.port_type
[gen_or_port
] == MLX4_PORT_TYPE_ETH
) {
431 if (func_cap
->flags1
& QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN
) {
432 mlx4_err(dev
, "VLAN is enforced on this port\n");
433 err
= -EPROTONOSUPPORT
;
437 if (func_cap
->flags1
& QUERY_FUNC_CAP_FLAGS1_FORCE_MAC
) {
438 mlx4_err(dev
, "Force mac is enabled on this port\n");
439 err
= -EPROTONOSUPPORT
;
442 } else if (dev
->caps
.port_type
[gen_or_port
] == MLX4_PORT_TYPE_IB
) {
443 MLX4_GET(field
, outbox
, QUERY_FUNC_CAP_FLAGS0_OFFSET
);
444 if (field
& QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID
) {
445 mlx4_err(dev
, "phy_wqe_gid is enforced on this ib port\n");
446 err
= -EPROTONOSUPPORT
;
451 MLX4_GET(field
, outbox
, QUERY_FUNC_CAP_PHYS_PORT_OFFSET
);
452 func_cap
->physical_port
= field
;
453 if (func_cap
->physical_port
!= gen_or_port
) {
458 if (func_cap
->flags1
& QUERY_FUNC_CAP_VF_ENABLE_QP0
) {
459 MLX4_GET(qkey
, outbox
, QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET
);
460 func_cap
->qp0_qkey
= qkey
;
462 func_cap
->qp0_qkey
= 0;
465 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP0_TUNNEL
);
466 func_cap
->qp0_tunnel_qpn
= size
& 0xFFFFFF;
468 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP0_PROXY
);
469 func_cap
->qp0_proxy_qpn
= size
& 0xFFFFFF;
471 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP1_TUNNEL
);
472 func_cap
->qp1_tunnel_qpn
= size
& 0xFFFFFF;
474 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP1_PROXY
);
475 func_cap
->qp1_proxy_qpn
= size
& 0xFFFFFF;
477 if (func_cap
->flags1
& QUERY_FUNC_CAP_FLAGS1_NIC_INFO
)
478 MLX4_GET(func_cap
->phys_port_id
, outbox
,
479 QUERY_FUNC_CAP_PHYS_PORT_ID
);
481 /* All other resources are allocated by the master, but we still report
482 * 'num' and 'reserved' capabilities as follows:
483 * - num remains the maximum resource index
484 * - 'num - reserved' is the total available objects of a resource, but
485 * resource indices may be less than 'reserved'
486 * TODO: set per-resource quotas */
489 mlx4_free_cmd_mailbox(dev
, mailbox
);
494 int mlx4_QUERY_DEV_CAP(struct mlx4_dev
*dev
, struct mlx4_dev_cap
*dev_cap
)
496 struct mlx4_cmd_mailbox
*mailbox
;
499 u32 field32
, flags
, ext_flags
;
505 #define QUERY_DEV_CAP_OUT_SIZE 0x100
506 #define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10
507 #define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11
508 #define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12
509 #define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13
510 #define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14
511 #define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15
512 #define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16
513 #define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17
514 #define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19
515 #define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a
516 #define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b
517 #define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d
518 #define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e
519 #define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f
520 #define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20
521 #define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21
522 #define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22
523 #define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23
524 #define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27
525 #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29
526 #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b
527 #define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d
528 #define QUERY_DEV_CAP_RSS_OFFSET 0x2e
529 #define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f
530 #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33
531 #define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35
532 #define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36
533 #define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37
534 #define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38
535 #define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
536 #define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
537 #define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e
538 #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
539 #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40
540 #define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
541 #define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
542 #define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
543 #define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b
544 #define QUERY_DEV_CAP_BF_OFFSET 0x4c
545 #define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d
546 #define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e
547 #define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f
548 #define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51
549 #define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52
550 #define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55
551 #define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56
552 #define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61
553 #define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62
554 #define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63
555 #define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64
556 #define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65
557 #define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66
558 #define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67
559 #define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68
560 #define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70
561 #define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74
562 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
563 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
564 #define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a
565 #define QUERY_DEV_CAP_ETH_PROT_CTRL_OFFSET 0x7a
566 #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
567 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
568 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
569 #define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86
570 #define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88
571 #define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a
572 #define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c
573 #define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e
574 #define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90
575 #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
576 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
577 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
578 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
579 #define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c
580 #define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d
581 #define QUERY_DEV_CAP_VXLAN 0x9e
582 #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0
585 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
587 return PTR_ERR(mailbox
);
588 outbox
= mailbox
->buf
;
590 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 0, MLX4_CMD_QUERY_DEV_CAP
,
591 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
595 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_QP_OFFSET
);
596 dev_cap
->reserved_qps
= 1 << (field
& 0xf);
597 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_QP_OFFSET
);
598 dev_cap
->max_qps
= 1 << (field
& 0x1f);
599 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_SRQ_OFFSET
);
600 dev_cap
->reserved_srqs
= 1 << (field
>> 4);
601 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_SRQ_OFFSET
);
602 dev_cap
->max_srqs
= 1 << (field
& 0x1f);
603 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET
);
604 dev_cap
->max_cq_sz
= 1 << field
;
605 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_CQ_OFFSET
);
606 dev_cap
->reserved_cqs
= 1 << (field
& 0xf);
607 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_CQ_OFFSET
);
608 dev_cap
->max_cqs
= 1 << (field
& 0x1f);
609 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_MPT_OFFSET
);
610 dev_cap
->max_mpts
= 1 << (field
& 0x3f);
611 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_EQ_OFFSET
);
612 dev_cap
->reserved_eqs
= field
& 0xf;
613 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_EQ_OFFSET
);
614 dev_cap
->max_eqs
= 1 << (field
& 0xf);
615 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_MTT_OFFSET
);
616 dev_cap
->reserved_mtts
= 1 << (field
>> 4);
617 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET
);
618 dev_cap
->max_mrw_sz
= 1 << field
;
619 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_MRW_OFFSET
);
620 dev_cap
->reserved_mrws
= 1 << (field
& 0xf);
621 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET
);
622 dev_cap
->max_mtt_seg
= 1 << (field
& 0x3f);
623 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET
);
624 dev_cap
->max_requester_per_qp
= 1 << (field
& 0x3f);
625 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_RES_QP_OFFSET
);
626 dev_cap
->max_responder_per_qp
= 1 << (field
& 0x3f);
627 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_GSO_OFFSET
);
630 dev_cap
->max_gso_sz
= 0;
632 dev_cap
->max_gso_sz
= 1 << field
;
634 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSS_OFFSET
);
636 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_RSS_XOR
;
638 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_RSS_TOP
;
641 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_RSS
;
642 dev_cap
->max_rss_tbl_sz
= 1 << field
;
644 dev_cap
->max_rss_tbl_sz
= 0;
645 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_RDMA_OFFSET
);
646 dev_cap
->max_rdma_global
= 1 << (field
& 0x3f);
647 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_ACK_DELAY_OFFSET
);
648 dev_cap
->local_ca_ack_delay
= field
& 0x1f;
649 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_VL_PORT_OFFSET
);
650 dev_cap
->num_ports
= field
& 0xf;
651 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET
);
652 dev_cap
->max_msg_sz
= 1 << (field
& 0x1f);
653 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET
);
655 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_FS_EN
;
656 dev_cap
->fs_log_max_ucast_qp_range_size
= field
& 0x1f;
657 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET
);
659 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB
;
660 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET
);
661 dev_cap
->fs_max_num_qp_per_entry
= field
;
662 MLX4_GET(stat_rate
, outbox
, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET
);
663 dev_cap
->stat_rate_support
= stat_rate
;
664 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET
);
666 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_TS
;
667 MLX4_GET(ext_flags
, outbox
, QUERY_DEV_CAP_EXT_FLAGS_OFFSET
);
668 MLX4_GET(flags
, outbox
, QUERY_DEV_CAP_FLAGS_OFFSET
);
669 dev_cap
->flags
= flags
| (u64
)ext_flags
<< 32;
670 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_UAR_OFFSET
);
671 dev_cap
->reserved_uars
= field
>> 4;
672 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_UAR_SZ_OFFSET
);
673 dev_cap
->uar_size
= 1 << ((field
& 0x3f) + 20);
674 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_PAGE_SZ_OFFSET
);
675 dev_cap
->min_page_sz
= 1 << field
;
677 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_BF_OFFSET
);
679 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET
);
680 dev_cap
->bf_reg_size
= 1 << (field
& 0x1f);
681 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET
);
682 if ((1 << (field
& 0x3f)) > (PAGE_SIZE
/ dev_cap
->bf_reg_size
))
684 dev_cap
->bf_regs_per_page
= 1 << (field
& 0x3f);
685 mlx4_dbg(dev
, "BlueFlame available (reg size %d, regs/page %d)\n",
686 dev_cap
->bf_reg_size
, dev_cap
->bf_regs_per_page
);
688 dev_cap
->bf_reg_size
= 0;
689 mlx4_dbg(dev
, "BlueFlame not available\n");
692 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET
);
693 dev_cap
->max_sq_sg
= field
;
694 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET
);
695 dev_cap
->max_sq_desc_sz
= size
;
697 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET
);
698 dev_cap
->max_qp_per_mcg
= 1 << field
;
699 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_MCG_OFFSET
);
700 dev_cap
->reserved_mgms
= field
& 0xf;
701 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_MCG_OFFSET
);
702 dev_cap
->max_mcgs
= 1 << field
;
703 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_PD_OFFSET
);
704 dev_cap
->reserved_pds
= field
>> 4;
705 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_PD_OFFSET
);
706 dev_cap
->max_pds
= 1 << (field
& 0x3f);
707 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_XRC_OFFSET
);
708 dev_cap
->reserved_xrcds
= field
>> 4;
709 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_XRC_OFFSET
);
710 dev_cap
->max_xrcds
= 1 << (field
& 0x1f);
712 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET
);
713 dev_cap
->rdmarc_entry_sz
= size
;
714 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET
);
715 dev_cap
->qpc_entry_sz
= size
;
716 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET
);
717 dev_cap
->aux_entry_sz
= size
;
718 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET
);
719 dev_cap
->altc_entry_sz
= size
;
720 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET
);
721 dev_cap
->eqc_entry_sz
= size
;
722 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET
);
723 dev_cap
->cqc_entry_sz
= size
;
724 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET
);
725 dev_cap
->srq_entry_sz
= size
;
726 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET
);
727 dev_cap
->cmpt_entry_sz
= size
;
728 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET
);
729 dev_cap
->mtt_entry_sz
= size
;
730 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET
);
731 dev_cap
->dmpt_entry_sz
= size
;
733 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET
);
734 dev_cap
->max_srq_sz
= 1 << field
;
735 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET
);
736 dev_cap
->max_qp_sz
= 1 << field
;
737 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSZ_SRQ_OFFSET
);
738 dev_cap
->resize_srq
= field
& 1;
739 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET
);
740 dev_cap
->max_rq_sg
= field
;
741 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET
);
742 dev_cap
->max_rq_desc_sz
= size
;
743 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE
);
744 if (field
& (1 << 5))
745 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL
;
746 if (field
& (1 << 6))
747 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_CQE_STRIDE
;
748 if (field
& (1 << 7))
749 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_EQE_STRIDE
;
750 MLX4_GET(dev_cap
->bmme_flags
, outbox
,
751 QUERY_DEV_CAP_BMME_FLAGS_OFFSET
);
752 MLX4_GET(dev_cap
->reserved_lkey
, outbox
,
753 QUERY_DEV_CAP_RSVD_LKEY_OFFSET
);
754 MLX4_GET(field32
, outbox
, QUERY_DEV_CAP_ETH_BACKPL_OFFSET
);
755 if (field32
& (1 << 0))
756 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP
;
757 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_FW_REASSIGN_MAC
);
759 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN
;
760 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_VXLAN
);
762 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS
;
763 MLX4_GET(dev_cap
->max_icm_sz
, outbox
,
764 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET
);
765 if (dev_cap
->flags
& MLX4_DEV_CAP_FLAG_COUNTERS
)
766 MLX4_GET(dev_cap
->max_counters
, outbox
,
767 QUERY_DEV_CAP_MAX_COUNTERS_OFFSET
);
769 MLX4_GET(field32
, outbox
,
770 QUERY_DEV_CAP_MAD_DEMUX_OFFSET
);
771 if (field32
& (1 << 0))
772 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_MAD_DEMUX
;
774 MLX4_GET(field32
, outbox
, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET
);
775 if (field32
& (1 << 16))
776 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_UPDATE_QP
;
777 if (field32
& (1 << 26))
778 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL
;
779 if (field32
& (1 << 20))
780 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_FSM
;
782 if (dev
->flags
& MLX4_FLAG_OLD_PORT_CMDS
) {
783 for (i
= 1; i
<= dev_cap
->num_ports
; ++i
) {
784 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_VL_PORT_OFFSET
);
785 dev_cap
->max_vl
[i
] = field
>> 4;
786 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MTU_WIDTH_OFFSET
);
787 dev_cap
->ib_mtu
[i
] = field
>> 4;
788 dev_cap
->max_port_width
[i
] = field
& 0xf;
789 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_GID_OFFSET
);
790 dev_cap
->max_gids
[i
] = 1 << (field
& 0xf);
791 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_PKEY_OFFSET
);
792 dev_cap
->max_pkeys
[i
] = 1 << (field
& 0xf);
795 #define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00
796 #define QUERY_PORT_MTU_OFFSET 0x01
797 #define QUERY_PORT_ETH_MTU_OFFSET 0x02
798 #define QUERY_PORT_WIDTH_OFFSET 0x06
799 #define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07
800 #define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a
801 #define QUERY_PORT_MAX_VL_OFFSET 0x0b
802 #define QUERY_PORT_MAC_OFFSET 0x10
803 #define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18
804 #define QUERY_PORT_WAVELENGTH_OFFSET 0x1c
805 #define QUERY_PORT_TRANS_CODE_OFFSET 0x20
807 for (i
= 1; i
<= dev_cap
->num_ports
; ++i
) {
808 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, i
, 0, MLX4_CMD_QUERY_PORT
,
809 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
813 MLX4_GET(field
, outbox
, QUERY_PORT_SUPPORTED_TYPE_OFFSET
);
814 dev_cap
->supported_port_types
[i
] = field
& 3;
815 dev_cap
->suggested_type
[i
] = (field
>> 3) & 1;
816 dev_cap
->default_sense
[i
] = (field
>> 4) & 1;
817 MLX4_GET(field
, outbox
, QUERY_PORT_MTU_OFFSET
);
818 dev_cap
->ib_mtu
[i
] = field
& 0xf;
819 MLX4_GET(field
, outbox
, QUERY_PORT_WIDTH_OFFSET
);
820 dev_cap
->max_port_width
[i
] = field
& 0xf;
821 MLX4_GET(field
, outbox
, QUERY_PORT_MAX_GID_PKEY_OFFSET
);
822 dev_cap
->max_gids
[i
] = 1 << (field
>> 4);
823 dev_cap
->max_pkeys
[i
] = 1 << (field
& 0xf);
824 MLX4_GET(field
, outbox
, QUERY_PORT_MAX_VL_OFFSET
);
825 dev_cap
->max_vl
[i
] = field
& 0xf;
826 MLX4_GET(field
, outbox
, QUERY_PORT_MAX_MACVLAN_OFFSET
);
827 dev_cap
->log_max_macs
[i
] = field
& 0xf;
828 dev_cap
->log_max_vlans
[i
] = field
>> 4;
829 MLX4_GET(dev_cap
->eth_mtu
[i
], outbox
, QUERY_PORT_ETH_MTU_OFFSET
);
830 MLX4_GET(dev_cap
->def_mac
[i
], outbox
, QUERY_PORT_MAC_OFFSET
);
831 MLX4_GET(field32
, outbox
, QUERY_PORT_TRANS_VENDOR_OFFSET
);
832 dev_cap
->trans_type
[i
] = field32
>> 24;
833 dev_cap
->vendor_oui
[i
] = field32
& 0xffffff;
834 MLX4_GET(dev_cap
->wavelength
[i
], outbox
, QUERY_PORT_WAVELENGTH_OFFSET
);
835 MLX4_GET(dev_cap
->trans_code
[i
], outbox
, QUERY_PORT_TRANS_CODE_OFFSET
);
839 mlx4_dbg(dev
, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
840 dev_cap
->bmme_flags
, dev_cap
->reserved_lkey
);
843 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
844 * we can't use any EQs whose doorbell falls on that page,
845 * even if the EQ itself isn't reserved.
847 dev_cap
->reserved_eqs
= max(dev_cap
->reserved_uars
* 4,
848 dev_cap
->reserved_eqs
);
850 mlx4_dbg(dev
, "Max ICM size %lld MB\n",
851 (unsigned long long) dev_cap
->max_icm_sz
>> 20);
852 mlx4_dbg(dev
, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
853 dev_cap
->max_qps
, dev_cap
->reserved_qps
, dev_cap
->qpc_entry_sz
);
854 mlx4_dbg(dev
, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
855 dev_cap
->max_srqs
, dev_cap
->reserved_srqs
, dev_cap
->srq_entry_sz
);
856 mlx4_dbg(dev
, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
857 dev_cap
->max_cqs
, dev_cap
->reserved_cqs
, dev_cap
->cqc_entry_sz
);
858 mlx4_dbg(dev
, "Max EQs: %d, reserved EQs: %d, entry size: %d\n",
859 dev_cap
->max_eqs
, dev_cap
->reserved_eqs
, dev_cap
->eqc_entry_sz
);
860 mlx4_dbg(dev
, "reserved MPTs: %d, reserved MTTs: %d\n",
861 dev_cap
->reserved_mrws
, dev_cap
->reserved_mtts
);
862 mlx4_dbg(dev
, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
863 dev_cap
->max_pds
, dev_cap
->reserved_pds
, dev_cap
->reserved_uars
);
864 mlx4_dbg(dev
, "Max QP/MCG: %d, reserved MGMs: %d\n",
865 dev_cap
->max_pds
, dev_cap
->reserved_mgms
);
866 mlx4_dbg(dev
, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
867 dev_cap
->max_cq_sz
, dev_cap
->max_qp_sz
, dev_cap
->max_srq_sz
);
868 mlx4_dbg(dev
, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
869 dev_cap
->local_ca_ack_delay
, 128 << dev_cap
->ib_mtu
[1],
870 dev_cap
->max_port_width
[1]);
871 mlx4_dbg(dev
, "Max SQ desc size: %d, max SQ S/G: %d\n",
872 dev_cap
->max_sq_desc_sz
, dev_cap
->max_sq_sg
);
873 mlx4_dbg(dev
, "Max RQ desc size: %d, max RQ S/G: %d\n",
874 dev_cap
->max_rq_desc_sz
, dev_cap
->max_rq_sg
);
875 mlx4_dbg(dev
, "Max GSO size: %d\n", dev_cap
->max_gso_sz
);
876 mlx4_dbg(dev
, "Max counters: %d\n", dev_cap
->max_counters
);
877 mlx4_dbg(dev
, "Max RSS Table size: %d\n", dev_cap
->max_rss_tbl_sz
);
879 dump_dev_cap_flags(dev
, dev_cap
->flags
);
880 dump_dev_cap_flags2(dev
, dev_cap
->flags2
);
883 mlx4_free_cmd_mailbox(dev
, mailbox
);
887 int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev
*dev
, int slave
,
888 struct mlx4_vhcr
*vhcr
,
889 struct mlx4_cmd_mailbox
*inbox
,
890 struct mlx4_cmd_mailbox
*outbox
,
891 struct mlx4_cmd_info
*cmd
)
900 struct mlx4_active_ports actv_ports
;
902 err
= mlx4_cmd_box(dev
, 0, outbox
->dma
, 0, 0, MLX4_CMD_QUERY_DEV_CAP
,
903 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
907 /* add port mng change event capability and disable mw type 1
908 * unconditionally to slaves
910 MLX4_GET(flags
, outbox
->buf
, QUERY_DEV_CAP_EXT_FLAGS_OFFSET
);
911 flags
|= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV
;
912 flags
&= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW
;
913 actv_ports
= mlx4_get_active_ports(dev
, slave
);
914 first_port
= find_first_bit(actv_ports
.ports
, dev
->caps
.num_ports
);
915 for (slave_port
= 0, real_port
= first_port
;
916 real_port
< first_port
+
917 bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
);
918 ++real_port
, ++slave_port
) {
919 if (flags
& (MLX4_DEV_CAP_FLAG_WOL_PORT1
<< real_port
))
920 flags
|= MLX4_DEV_CAP_FLAG_WOL_PORT1
<< slave_port
;
922 flags
&= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1
<< slave_port
);
924 for (; slave_port
< dev
->caps
.num_ports
; ++slave_port
)
925 flags
&= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1
<< slave_port
);
926 MLX4_PUT(outbox
->buf
, flags
, QUERY_DEV_CAP_EXT_FLAGS_OFFSET
);
928 MLX4_GET(field
, outbox
->buf
, QUERY_DEV_CAP_VL_PORT_OFFSET
);
930 field
|= bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
) & 0x0F;
931 MLX4_PUT(outbox
->buf
, field
, QUERY_DEV_CAP_VL_PORT_OFFSET
);
933 /* For guests, disable timestamp */
934 MLX4_GET(field
, outbox
->buf
, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET
);
936 MLX4_PUT(outbox
->buf
, field
, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET
);
938 /* For guests, disable vxlan tunneling */
939 MLX4_GET(field
, outbox
->buf
, QUERY_DEV_CAP_VXLAN
);
941 MLX4_PUT(outbox
->buf
, field
, QUERY_DEV_CAP_VXLAN
);
943 /* For guests, report Blueflame disabled */
944 MLX4_GET(field
, outbox
->buf
, QUERY_DEV_CAP_BF_OFFSET
);
946 MLX4_PUT(outbox
->buf
, field
, QUERY_DEV_CAP_BF_OFFSET
);
948 /* For guests, disable mw type 2 */
949 MLX4_GET(bmme_flags
, outbox
->buf
, QUERY_DEV_CAP_BMME_FLAGS_OFFSET
);
950 bmme_flags
&= ~MLX4_BMME_FLAG_TYPE_2_WIN
;
951 MLX4_PUT(outbox
->buf
, bmme_flags
, QUERY_DEV_CAP_BMME_FLAGS_OFFSET
);
953 /* turn off device-managed steering capability if not enabled */
954 if (dev
->caps
.steering_mode
!= MLX4_STEERING_MODE_DEVICE_MANAGED
) {
955 MLX4_GET(field
, outbox
->buf
,
956 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET
);
958 MLX4_PUT(outbox
->buf
, field
,
959 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET
);
962 /* turn off ipoib managed steering for guests */
963 MLX4_GET(field
, outbox
->buf
, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET
);
965 MLX4_PUT(outbox
->buf
, field
, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET
);
970 int mlx4_QUERY_PORT_wrapper(struct mlx4_dev
*dev
, int slave
,
971 struct mlx4_vhcr
*vhcr
,
972 struct mlx4_cmd_mailbox
*inbox
,
973 struct mlx4_cmd_mailbox
*outbox
,
974 struct mlx4_cmd_info
*cmd
)
976 struct mlx4_priv
*priv
= mlx4_priv(dev
);
981 int admin_link_state
;
982 int port
= mlx4_slave_convert_port(dev
, slave
,
983 vhcr
->in_modifier
& 0xFF);
985 #define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0
986 #define MLX4_PORT_LINK_UP_MASK 0x80
987 #define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c
988 #define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e
993 /* Protect against untrusted guests: enforce that this is the
994 * QUERY_PORT general query.
996 if (vhcr
->op_modifier
|| vhcr
->in_modifier
& ~0xFF)
999 vhcr
->in_modifier
= port
;
1001 err
= mlx4_cmd_box(dev
, 0, outbox
->dma
, vhcr
->in_modifier
, 0,
1002 MLX4_CMD_QUERY_PORT
, MLX4_CMD_TIME_CLASS_B
,
1005 if (!err
&& dev
->caps
.function
!= slave
) {
1006 def_mac
= priv
->mfunc
.master
.vf_oper
[slave
].vport
[vhcr
->in_modifier
].state
.mac
;
1007 MLX4_PUT(outbox
->buf
, def_mac
, QUERY_PORT_MAC_OFFSET
);
1009 /* get port type - currently only eth is enabled */
1010 MLX4_GET(port_type
, outbox
->buf
,
1011 QUERY_PORT_SUPPORTED_TYPE_OFFSET
);
1013 /* No link sensing allowed */
1014 port_type
&= MLX4_VF_PORT_NO_LINK_SENSE_MASK
;
1015 /* set port type to currently operating port type */
1016 port_type
|= (dev
->caps
.port_type
[vhcr
->in_modifier
] & 0x3);
1018 admin_link_state
= priv
->mfunc
.master
.vf_oper
[slave
].vport
[vhcr
->in_modifier
].state
.link_state
;
1019 if (IFLA_VF_LINK_STATE_ENABLE
== admin_link_state
)
1020 port_type
|= MLX4_PORT_LINK_UP_MASK
;
1021 else if (IFLA_VF_LINK_STATE_DISABLE
== admin_link_state
)
1022 port_type
&= ~MLX4_PORT_LINK_UP_MASK
;
1024 MLX4_PUT(outbox
->buf
, port_type
,
1025 QUERY_PORT_SUPPORTED_TYPE_OFFSET
);
1027 if (dev
->caps
.port_type
[vhcr
->in_modifier
] == MLX4_PORT_TYPE_ETH
)
1028 short_field
= mlx4_get_slave_num_gids(dev
, slave
, port
);
1030 short_field
= 1; /* slave max gids */
1031 MLX4_PUT(outbox
->buf
, short_field
,
1032 QUERY_PORT_CUR_MAX_GID_OFFSET
);
1034 short_field
= dev
->caps
.pkey_table_len
[vhcr
->in_modifier
];
1035 MLX4_PUT(outbox
->buf
, short_field
,
1036 QUERY_PORT_CUR_MAX_PKEY_OFFSET
);
1042 int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev
*dev
, u8 port
,
1043 int *gid_tbl_len
, int *pkey_tbl_len
)
1045 struct mlx4_cmd_mailbox
*mailbox
;
1050 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1051 if (IS_ERR(mailbox
))
1052 return PTR_ERR(mailbox
);
1054 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, port
, 0,
1055 MLX4_CMD_QUERY_PORT
, MLX4_CMD_TIME_CLASS_B
,
1060 outbox
= mailbox
->buf
;
1062 MLX4_GET(field
, outbox
, QUERY_PORT_CUR_MAX_GID_OFFSET
);
1063 *gid_tbl_len
= field
;
1065 MLX4_GET(field
, outbox
, QUERY_PORT_CUR_MAX_PKEY_OFFSET
);
1066 *pkey_tbl_len
= field
;
1069 mlx4_free_cmd_mailbox(dev
, mailbox
);
1072 EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len
);
1074 int mlx4_map_cmd(struct mlx4_dev
*dev
, u16 op
, struct mlx4_icm
*icm
, u64 virt
)
1076 struct mlx4_cmd_mailbox
*mailbox
;
1077 struct mlx4_icm_iter iter
;
1085 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1086 if (IS_ERR(mailbox
))
1087 return PTR_ERR(mailbox
);
1088 pages
= mailbox
->buf
;
1090 for (mlx4_icm_first(icm
, &iter
);
1091 !mlx4_icm_last(&iter
);
1092 mlx4_icm_next(&iter
)) {
1094 * We have to pass pages that are aligned to their
1095 * size, so find the least significant 1 in the
1096 * address or size and use that as our log2 size.
1098 lg
= ffs(mlx4_icm_addr(&iter
) | mlx4_icm_size(&iter
)) - 1;
1099 if (lg
< MLX4_ICM_PAGE_SHIFT
) {
1100 mlx4_warn(dev
, "Got FW area not aligned to %d (%llx/%lx)\n",
1102 (unsigned long long) mlx4_icm_addr(&iter
),
1103 mlx4_icm_size(&iter
));
1108 for (i
= 0; i
< mlx4_icm_size(&iter
) >> lg
; ++i
) {
1110 pages
[nent
* 2] = cpu_to_be64(virt
);
1114 pages
[nent
* 2 + 1] =
1115 cpu_to_be64((mlx4_icm_addr(&iter
) + (i
<< lg
)) |
1116 (lg
- MLX4_ICM_PAGE_SHIFT
));
1117 ts
+= 1 << (lg
- 10);
1120 if (++nent
== MLX4_MAILBOX_SIZE
/ 16) {
1121 err
= mlx4_cmd(dev
, mailbox
->dma
, nent
, 0, op
,
1122 MLX4_CMD_TIME_CLASS_B
,
1132 err
= mlx4_cmd(dev
, mailbox
->dma
, nent
, 0, op
,
1133 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
1138 case MLX4_CMD_MAP_FA
:
1139 mlx4_dbg(dev
, "Mapped %d chunks/%d KB for FW\n", tc
, ts
);
1141 case MLX4_CMD_MAP_ICM_AUX
:
1142 mlx4_dbg(dev
, "Mapped %d chunks/%d KB for ICM aux\n", tc
, ts
);
1144 case MLX4_CMD_MAP_ICM
:
1145 mlx4_dbg(dev
, "Mapped %d chunks/%d KB at %llx for ICM\n",
1146 tc
, ts
, (unsigned long long) virt
- (ts
<< 10));
1151 mlx4_free_cmd_mailbox(dev
, mailbox
);
1155 int mlx4_MAP_FA(struct mlx4_dev
*dev
, struct mlx4_icm
*icm
)
1157 return mlx4_map_cmd(dev
, MLX4_CMD_MAP_FA
, icm
, -1);
1160 int mlx4_UNMAP_FA(struct mlx4_dev
*dev
)
1162 return mlx4_cmd(dev
, 0, 0, 0, MLX4_CMD_UNMAP_FA
,
1163 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
1167 int mlx4_RUN_FW(struct mlx4_dev
*dev
)
1169 return mlx4_cmd(dev
, 0, 0, 0, MLX4_CMD_RUN_FW
,
1170 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1173 int mlx4_QUERY_FW(struct mlx4_dev
*dev
)
1175 struct mlx4_fw
*fw
= &mlx4_priv(dev
)->fw
;
1176 struct mlx4_cmd
*cmd
= &mlx4_priv(dev
)->cmd
;
1177 struct mlx4_cmd_mailbox
*mailbox
;
1184 #define QUERY_FW_OUT_SIZE 0x100
1185 #define QUERY_FW_VER_OFFSET 0x00
1186 #define QUERY_FW_PPF_ID 0x09
1187 #define QUERY_FW_CMD_IF_REV_OFFSET 0x0a
1188 #define QUERY_FW_MAX_CMD_OFFSET 0x0f
1189 #define QUERY_FW_ERR_START_OFFSET 0x30
1190 #define QUERY_FW_ERR_SIZE_OFFSET 0x38
1191 #define QUERY_FW_ERR_BAR_OFFSET 0x3c
1193 #define QUERY_FW_SIZE_OFFSET 0x00
1194 #define QUERY_FW_CLR_INT_BASE_OFFSET 0x20
1195 #define QUERY_FW_CLR_INT_BAR_OFFSET 0x28
1197 #define QUERY_FW_COMM_BASE_OFFSET 0x40
1198 #define QUERY_FW_COMM_BAR_OFFSET 0x48
1200 #define QUERY_FW_CLOCK_OFFSET 0x50
1201 #define QUERY_FW_CLOCK_BAR 0x58
1203 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1204 if (IS_ERR(mailbox
))
1205 return PTR_ERR(mailbox
);
1206 outbox
= mailbox
->buf
;
1208 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 0, MLX4_CMD_QUERY_FW
,
1209 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1213 MLX4_GET(fw_ver
, outbox
, QUERY_FW_VER_OFFSET
);
1215 * FW subminor version is at more significant bits than minor
1216 * version, so swap here.
1218 dev
->caps
.fw_ver
= (fw_ver
& 0xffff00000000ull
) |
1219 ((fw_ver
& 0xffff0000ull
) >> 16) |
1220 ((fw_ver
& 0x0000ffffull
) << 16);
1222 MLX4_GET(lg
, outbox
, QUERY_FW_PPF_ID
);
1223 dev
->caps
.function
= lg
;
1225 if (mlx4_is_slave(dev
))
1229 MLX4_GET(cmd_if_rev
, outbox
, QUERY_FW_CMD_IF_REV_OFFSET
);
1230 if (cmd_if_rev
< MLX4_COMMAND_INTERFACE_MIN_REV
||
1231 cmd_if_rev
> MLX4_COMMAND_INTERFACE_MAX_REV
) {
1232 mlx4_err(dev
, "Installed FW has unsupported command interface revision %d\n",
1234 mlx4_err(dev
, "(Installed FW version is %d.%d.%03d)\n",
1235 (int) (dev
->caps
.fw_ver
>> 32),
1236 (int) (dev
->caps
.fw_ver
>> 16) & 0xffff,
1237 (int) dev
->caps
.fw_ver
& 0xffff);
1238 mlx4_err(dev
, "This driver version supports only revisions %d to %d\n",
1239 MLX4_COMMAND_INTERFACE_MIN_REV
, MLX4_COMMAND_INTERFACE_MAX_REV
);
1244 if (cmd_if_rev
< MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS
)
1245 dev
->flags
|= MLX4_FLAG_OLD_PORT_CMDS
;
1247 MLX4_GET(lg
, outbox
, QUERY_FW_MAX_CMD_OFFSET
);
1248 cmd
->max_cmds
= 1 << lg
;
1250 mlx4_dbg(dev
, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n",
1251 (int) (dev
->caps
.fw_ver
>> 32),
1252 (int) (dev
->caps
.fw_ver
>> 16) & 0xffff,
1253 (int) dev
->caps
.fw_ver
& 0xffff,
1254 cmd_if_rev
, cmd
->max_cmds
);
1256 MLX4_GET(fw
->catas_offset
, outbox
, QUERY_FW_ERR_START_OFFSET
);
1257 MLX4_GET(fw
->catas_size
, outbox
, QUERY_FW_ERR_SIZE_OFFSET
);
1258 MLX4_GET(fw
->catas_bar
, outbox
, QUERY_FW_ERR_BAR_OFFSET
);
1259 fw
->catas_bar
= (fw
->catas_bar
>> 6) * 2;
1261 mlx4_dbg(dev
, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n",
1262 (unsigned long long) fw
->catas_offset
, fw
->catas_size
, fw
->catas_bar
);
1264 MLX4_GET(fw
->fw_pages
, outbox
, QUERY_FW_SIZE_OFFSET
);
1265 MLX4_GET(fw
->clr_int_base
, outbox
, QUERY_FW_CLR_INT_BASE_OFFSET
);
1266 MLX4_GET(fw
->clr_int_bar
, outbox
, QUERY_FW_CLR_INT_BAR_OFFSET
);
1267 fw
->clr_int_bar
= (fw
->clr_int_bar
>> 6) * 2;
1269 MLX4_GET(fw
->comm_base
, outbox
, QUERY_FW_COMM_BASE_OFFSET
);
1270 MLX4_GET(fw
->comm_bar
, outbox
, QUERY_FW_COMM_BAR_OFFSET
);
1271 fw
->comm_bar
= (fw
->comm_bar
>> 6) * 2;
1272 mlx4_dbg(dev
, "Communication vector bar:%d offset:0x%llx\n",
1273 fw
->comm_bar
, fw
->comm_base
);
1274 mlx4_dbg(dev
, "FW size %d KB\n", fw
->fw_pages
>> 2);
1276 MLX4_GET(fw
->clock_offset
, outbox
, QUERY_FW_CLOCK_OFFSET
);
1277 MLX4_GET(fw
->clock_bar
, outbox
, QUERY_FW_CLOCK_BAR
);
1278 fw
->clock_bar
= (fw
->clock_bar
>> 6) * 2;
1279 mlx4_dbg(dev
, "Internal clock bar:%d offset:0x%llx\n",
1280 fw
->clock_bar
, fw
->clock_offset
);
1283 * Round up number of system pages needed in case
1284 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
1287 ALIGN(fw
->fw_pages
, PAGE_SIZE
/ MLX4_ICM_PAGE_SIZE
) >>
1288 (PAGE_SHIFT
- MLX4_ICM_PAGE_SHIFT
);
1290 mlx4_dbg(dev
, "Clear int @ %llx, BAR %d\n",
1291 (unsigned long long) fw
->clr_int_base
, fw
->clr_int_bar
);
1294 mlx4_free_cmd_mailbox(dev
, mailbox
);
1298 int mlx4_QUERY_FW_wrapper(struct mlx4_dev
*dev
, int slave
,
1299 struct mlx4_vhcr
*vhcr
,
1300 struct mlx4_cmd_mailbox
*inbox
,
1301 struct mlx4_cmd_mailbox
*outbox
,
1302 struct mlx4_cmd_info
*cmd
)
1307 outbuf
= outbox
->buf
;
1308 err
= mlx4_cmd_box(dev
, 0, outbox
->dma
, 0, 0, MLX4_CMD_QUERY_FW
,
1309 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1313 /* for slaves, set pci PPF ID to invalid and zero out everything
1314 * else except FW version */
1315 outbuf
[0] = outbuf
[1] = 0;
1316 memset(&outbuf
[8], 0, QUERY_FW_OUT_SIZE
- 8);
1317 outbuf
[QUERY_FW_PPF_ID
] = MLX4_INVALID_SLAVE_ID
;
1322 static void get_board_id(void *vsd
, char *board_id
)
1326 #define VSD_OFFSET_SIG1 0x00
1327 #define VSD_OFFSET_SIG2 0xde
1328 #define VSD_OFFSET_MLX_BOARD_ID 0xd0
1329 #define VSD_OFFSET_TS_BOARD_ID 0x20
1331 #define VSD_SIGNATURE_TOPSPIN 0x5ad
1333 memset(board_id
, 0, MLX4_BOARD_ID_LEN
);
1335 if (be16_to_cpup(vsd
+ VSD_OFFSET_SIG1
) == VSD_SIGNATURE_TOPSPIN
&&
1336 be16_to_cpup(vsd
+ VSD_OFFSET_SIG2
) == VSD_SIGNATURE_TOPSPIN
) {
1337 strlcpy(board_id
, vsd
+ VSD_OFFSET_TS_BOARD_ID
, MLX4_BOARD_ID_LEN
);
1340 * The board ID is a string but the firmware byte
1341 * swaps each 4-byte word before passing it back to
1342 * us. Therefore we need to swab it before printing.
1344 for (i
= 0; i
< 4; ++i
)
1345 ((u32
*) board_id
)[i
] =
1346 swab32(*(u32
*) (vsd
+ VSD_OFFSET_MLX_BOARD_ID
+ i
* 4));
1350 int mlx4_QUERY_ADAPTER(struct mlx4_dev
*dev
, struct mlx4_adapter
*adapter
)
1352 struct mlx4_cmd_mailbox
*mailbox
;
1356 #define QUERY_ADAPTER_OUT_SIZE 0x100
1357 #define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
1358 #define QUERY_ADAPTER_VSD_OFFSET 0x20
1360 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1361 if (IS_ERR(mailbox
))
1362 return PTR_ERR(mailbox
);
1363 outbox
= mailbox
->buf
;
1365 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 0, MLX4_CMD_QUERY_ADAPTER
,
1366 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1370 MLX4_GET(adapter
->inta_pin
, outbox
, QUERY_ADAPTER_INTA_PIN_OFFSET
);
1372 get_board_id(outbox
+ QUERY_ADAPTER_VSD_OFFSET
/ 4,
1376 mlx4_free_cmd_mailbox(dev
, mailbox
);
1380 int mlx4_INIT_HCA(struct mlx4_dev
*dev
, struct mlx4_init_hca_param
*param
)
1382 struct mlx4_cmd_mailbox
*mailbox
;
1386 #define INIT_HCA_IN_SIZE 0x200
1387 #define INIT_HCA_VERSION_OFFSET 0x000
1388 #define INIT_HCA_VERSION 2
1389 #define INIT_HCA_VXLAN_OFFSET 0x0c
1390 #define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e
1391 #define INIT_HCA_FLAGS_OFFSET 0x014
1392 #define INIT_HCA_QPC_OFFSET 0x020
1393 #define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
1394 #define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17)
1395 #define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28)
1396 #define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f)
1397 #define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30)
1398 #define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37)
1399 #define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38)
1400 #define INIT_HCA_EQE_CQE_STRIDE_OFFSET (INIT_HCA_QPC_OFFSET + 0x3b)
1401 #define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40)
1402 #define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50)
1403 #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60)
1404 #define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67)
1405 #define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70)
1406 #define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77)
1407 #define INIT_HCA_MCAST_OFFSET 0x0c0
1408 #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
1409 #define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
1410 #define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
1411 #define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
1412 #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
1413 #define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6
1414 #define INIT_HCA_FS_PARAM_OFFSET 0x1d0
1415 #define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00)
1416 #define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12)
1417 #define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
1418 #define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21)
1419 #define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22)
1420 #define INIT_HCA_FS_IB_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x25)
1421 #define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26)
1422 #define INIT_HCA_TPT_OFFSET 0x0f0
1423 #define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
1424 #define INIT_HCA_TPT_MW_OFFSET (INIT_HCA_TPT_OFFSET + 0x08)
1425 #define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
1426 #define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10)
1427 #define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18)
1428 #define INIT_HCA_UAR_OFFSET 0x120
1429 #define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a)
1430 #define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b)
1432 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1433 if (IS_ERR(mailbox
))
1434 return PTR_ERR(mailbox
);
1435 inbox
= mailbox
->buf
;
1437 *((u8
*) mailbox
->buf
+ INIT_HCA_VERSION_OFFSET
) = INIT_HCA_VERSION
;
1439 *((u8
*) mailbox
->buf
+ INIT_HCA_CACHELINE_SZ_OFFSET
) =
1440 (ilog2(cache_line_size()) - 4) << 5;
1442 #if defined(__LITTLE_ENDIAN)
1443 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) &= ~cpu_to_be32(1 << 1);
1444 #elif defined(__BIG_ENDIAN)
1445 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |= cpu_to_be32(1 << 1);
1447 #error Host endianness not defined
1449 /* Check port for UD address vector: */
1450 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |= cpu_to_be32(1);
1452 /* Enable IPoIB checksumming if we can: */
1453 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_IPOIB_CSUM
)
1454 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |= cpu_to_be32(1 << 3);
1456 /* Enable QoS support if module parameter set */
1458 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |= cpu_to_be32(1 << 2);
1460 /* enable counters */
1461 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_COUNTERS
)
1462 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |= cpu_to_be32(1 << 4);
1464 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
1465 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_64B_EQE
) {
1466 *(inbox
+ INIT_HCA_EQE_CQE_OFFSETS
/ 4) |= cpu_to_be32(1 << 29);
1467 dev
->caps
.eqe_size
= 64;
1468 dev
->caps
.eqe_factor
= 1;
1470 dev
->caps
.eqe_size
= 32;
1471 dev
->caps
.eqe_factor
= 0;
1474 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_64B_CQE
) {
1475 *(inbox
+ INIT_HCA_EQE_CQE_OFFSETS
/ 4) |= cpu_to_be32(1 << 30);
1476 dev
->caps
.cqe_size
= 64;
1477 dev
->caps
.userspace_caps
|= MLX4_USER_DEV_CAP_LARGE_CQE
;
1479 dev
->caps
.cqe_size
= 32;
1482 /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */
1483 if ((dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_EQE_STRIDE
) &&
1484 (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_CQE_STRIDE
)) {
1485 dev
->caps
.eqe_size
= cache_line_size();
1486 dev
->caps
.cqe_size
= cache_line_size();
1487 dev
->caps
.eqe_factor
= 0;
1488 MLX4_PUT(inbox
, (u8
)((ilog2(dev
->caps
.eqe_size
) - 5) << 4 |
1489 (ilog2(dev
->caps
.eqe_size
) - 5)),
1490 INIT_HCA_EQE_CQE_STRIDE_OFFSET
);
1492 /* User still need to know to support CQE > 32B */
1493 dev
->caps
.userspace_caps
|= MLX4_USER_DEV_CAP_LARGE_CQE
;
1496 /* QPC/EEC/CQC/EQC/RDMARC attributes */
1498 MLX4_PUT(inbox
, param
->qpc_base
, INIT_HCA_QPC_BASE_OFFSET
);
1499 MLX4_PUT(inbox
, param
->log_num_qps
, INIT_HCA_LOG_QP_OFFSET
);
1500 MLX4_PUT(inbox
, param
->srqc_base
, INIT_HCA_SRQC_BASE_OFFSET
);
1501 MLX4_PUT(inbox
, param
->log_num_srqs
, INIT_HCA_LOG_SRQ_OFFSET
);
1502 MLX4_PUT(inbox
, param
->cqc_base
, INIT_HCA_CQC_BASE_OFFSET
);
1503 MLX4_PUT(inbox
, param
->log_num_cqs
, INIT_HCA_LOG_CQ_OFFSET
);
1504 MLX4_PUT(inbox
, param
->altc_base
, INIT_HCA_ALTC_BASE_OFFSET
);
1505 MLX4_PUT(inbox
, param
->auxc_base
, INIT_HCA_AUXC_BASE_OFFSET
);
1506 MLX4_PUT(inbox
, param
->eqc_base
, INIT_HCA_EQC_BASE_OFFSET
);
1507 MLX4_PUT(inbox
, param
->log_num_eqs
, INIT_HCA_LOG_EQ_OFFSET
);
1508 MLX4_PUT(inbox
, param
->rdmarc_base
, INIT_HCA_RDMARC_BASE_OFFSET
);
1509 MLX4_PUT(inbox
, param
->log_rd_per_qp
, INIT_HCA_LOG_RD_OFFSET
);
1511 /* steering attributes */
1512 if (dev
->caps
.steering_mode
==
1513 MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1514 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |=
1516 INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN
);
1518 MLX4_PUT(inbox
, param
->mc_base
, INIT_HCA_FS_BASE_OFFSET
);
1519 MLX4_PUT(inbox
, param
->log_mc_entry_sz
,
1520 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET
);
1521 MLX4_PUT(inbox
, param
->log_mc_table_sz
,
1522 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET
);
1523 /* Enable Ethernet flow steering
1524 * with udp unicast and tcp unicast
1526 MLX4_PUT(inbox
, (u8
) (MLX4_FS_UDP_UC_EN
| MLX4_FS_TCP_UC_EN
),
1527 INIT_HCA_FS_ETH_BITS_OFFSET
);
1528 MLX4_PUT(inbox
, (u16
) MLX4_FS_NUM_OF_L2_ADDR
,
1529 INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET
);
1530 /* Enable IPoIB flow steering
1531 * with udp unicast and tcp unicast
1533 MLX4_PUT(inbox
, (u8
) (MLX4_FS_UDP_UC_EN
| MLX4_FS_TCP_UC_EN
),
1534 INIT_HCA_FS_IB_BITS_OFFSET
);
1535 MLX4_PUT(inbox
, (u16
) MLX4_FS_NUM_OF_L2_ADDR
,
1536 INIT_HCA_FS_IB_NUM_ADDRS_OFFSET
);
1538 MLX4_PUT(inbox
, param
->mc_base
, INIT_HCA_MC_BASE_OFFSET
);
1539 MLX4_PUT(inbox
, param
->log_mc_entry_sz
,
1540 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET
);
1541 MLX4_PUT(inbox
, param
->log_mc_hash_sz
,
1542 INIT_HCA_LOG_MC_HASH_SZ_OFFSET
);
1543 MLX4_PUT(inbox
, param
->log_mc_table_sz
,
1544 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET
);
1545 if (dev
->caps
.steering_mode
== MLX4_STEERING_MODE_B0
)
1546 MLX4_PUT(inbox
, (u8
) (1 << 3),
1547 INIT_HCA_UC_STEERING_OFFSET
);
1550 /* TPT attributes */
1552 MLX4_PUT(inbox
, param
->dmpt_base
, INIT_HCA_DMPT_BASE_OFFSET
);
1553 MLX4_PUT(inbox
, param
->mw_enabled
, INIT_HCA_TPT_MW_OFFSET
);
1554 MLX4_PUT(inbox
, param
->log_mpt_sz
, INIT_HCA_LOG_MPT_SZ_OFFSET
);
1555 MLX4_PUT(inbox
, param
->mtt_base
, INIT_HCA_MTT_BASE_OFFSET
);
1556 MLX4_PUT(inbox
, param
->cmpt_base
, INIT_HCA_CMPT_BASE_OFFSET
);
1558 /* UAR attributes */
1560 MLX4_PUT(inbox
, param
->uar_page_sz
, INIT_HCA_UAR_PAGE_SZ_OFFSET
);
1561 MLX4_PUT(inbox
, param
->log_uar_sz
, INIT_HCA_LOG_UAR_SZ_OFFSET
);
1563 /* set parser VXLAN attributes */
1564 if (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS
) {
1565 u8 parser_params
= 0;
1566 MLX4_PUT(inbox
, parser_params
, INIT_HCA_VXLAN_OFFSET
);
1569 err
= mlx4_cmd(dev
, mailbox
->dma
, 0, 0, MLX4_CMD_INIT_HCA
, 10000,
1573 mlx4_err(dev
, "INIT_HCA returns %d\n", err
);
1575 mlx4_free_cmd_mailbox(dev
, mailbox
);
1579 int mlx4_QUERY_HCA(struct mlx4_dev
*dev
,
1580 struct mlx4_init_hca_param
*param
)
1582 struct mlx4_cmd_mailbox
*mailbox
;
1588 #define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04
1589 #define QUERY_HCA_CORE_CLOCK_OFFSET 0x0c
1591 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1592 if (IS_ERR(mailbox
))
1593 return PTR_ERR(mailbox
);
1594 outbox
= mailbox
->buf
;
1596 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 0,
1598 MLX4_CMD_TIME_CLASS_B
,
1599 !mlx4_is_slave(dev
));
1603 MLX4_GET(param
->global_caps
, outbox
, QUERY_HCA_GLOBAL_CAPS_OFFSET
);
1604 MLX4_GET(param
->hca_core_clock
, outbox
, QUERY_HCA_CORE_CLOCK_OFFSET
);
1606 /* QPC/EEC/CQC/EQC/RDMARC attributes */
1608 MLX4_GET(param
->qpc_base
, outbox
, INIT_HCA_QPC_BASE_OFFSET
);
1609 MLX4_GET(param
->log_num_qps
, outbox
, INIT_HCA_LOG_QP_OFFSET
);
1610 MLX4_GET(param
->srqc_base
, outbox
, INIT_HCA_SRQC_BASE_OFFSET
);
1611 MLX4_GET(param
->log_num_srqs
, outbox
, INIT_HCA_LOG_SRQ_OFFSET
);
1612 MLX4_GET(param
->cqc_base
, outbox
, INIT_HCA_CQC_BASE_OFFSET
);
1613 MLX4_GET(param
->log_num_cqs
, outbox
, INIT_HCA_LOG_CQ_OFFSET
);
1614 MLX4_GET(param
->altc_base
, outbox
, INIT_HCA_ALTC_BASE_OFFSET
);
1615 MLX4_GET(param
->auxc_base
, outbox
, INIT_HCA_AUXC_BASE_OFFSET
);
1616 MLX4_GET(param
->eqc_base
, outbox
, INIT_HCA_EQC_BASE_OFFSET
);
1617 MLX4_GET(param
->log_num_eqs
, outbox
, INIT_HCA_LOG_EQ_OFFSET
);
1618 MLX4_GET(param
->rdmarc_base
, outbox
, INIT_HCA_RDMARC_BASE_OFFSET
);
1619 MLX4_GET(param
->log_rd_per_qp
, outbox
, INIT_HCA_LOG_RD_OFFSET
);
1621 MLX4_GET(dword_field
, outbox
, INIT_HCA_FLAGS_OFFSET
);
1622 if (dword_field
& (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN
)) {
1623 param
->steering_mode
= MLX4_STEERING_MODE_DEVICE_MANAGED
;
1625 MLX4_GET(byte_field
, outbox
, INIT_HCA_UC_STEERING_OFFSET
);
1626 if (byte_field
& 0x8)
1627 param
->steering_mode
= MLX4_STEERING_MODE_B0
;
1629 param
->steering_mode
= MLX4_STEERING_MODE_A0
;
1631 /* steering attributes */
1632 if (param
->steering_mode
== MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1633 MLX4_GET(param
->mc_base
, outbox
, INIT_HCA_FS_BASE_OFFSET
);
1634 MLX4_GET(param
->log_mc_entry_sz
, outbox
,
1635 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET
);
1636 MLX4_GET(param
->log_mc_table_sz
, outbox
,
1637 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET
);
1639 MLX4_GET(param
->mc_base
, outbox
, INIT_HCA_MC_BASE_OFFSET
);
1640 MLX4_GET(param
->log_mc_entry_sz
, outbox
,
1641 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET
);
1642 MLX4_GET(param
->log_mc_hash_sz
, outbox
,
1643 INIT_HCA_LOG_MC_HASH_SZ_OFFSET
);
1644 MLX4_GET(param
->log_mc_table_sz
, outbox
,
1645 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET
);
1648 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
1649 MLX4_GET(byte_field
, outbox
, INIT_HCA_EQE_CQE_OFFSETS
);
1650 if (byte_field
& 0x20) /* 64-bytes eqe enabled */
1651 param
->dev_cap_enabled
|= MLX4_DEV_CAP_64B_EQE_ENABLED
;
1652 if (byte_field
& 0x40) /* 64-bytes cqe enabled */
1653 param
->dev_cap_enabled
|= MLX4_DEV_CAP_64B_CQE_ENABLED
;
1655 /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */
1656 MLX4_GET(byte_field
, outbox
, INIT_HCA_EQE_CQE_STRIDE_OFFSET
);
1658 param
->dev_cap_enabled
|= MLX4_DEV_CAP_64B_EQE_ENABLED
;
1659 param
->dev_cap_enabled
|= MLX4_DEV_CAP_64B_CQE_ENABLED
;
1660 param
->cqe_size
= 1 << ((byte_field
&
1661 MLX4_CQE_SIZE_MASK_STRIDE
) + 5);
1662 param
->eqe_size
= 1 << (((byte_field
&
1663 MLX4_EQE_SIZE_MASK_STRIDE
) >> 4) + 5);
1666 /* TPT attributes */
1668 MLX4_GET(param
->dmpt_base
, outbox
, INIT_HCA_DMPT_BASE_OFFSET
);
1669 MLX4_GET(param
->mw_enabled
, outbox
, INIT_HCA_TPT_MW_OFFSET
);
1670 MLX4_GET(param
->log_mpt_sz
, outbox
, INIT_HCA_LOG_MPT_SZ_OFFSET
);
1671 MLX4_GET(param
->mtt_base
, outbox
, INIT_HCA_MTT_BASE_OFFSET
);
1672 MLX4_GET(param
->cmpt_base
, outbox
, INIT_HCA_CMPT_BASE_OFFSET
);
1674 /* UAR attributes */
1676 MLX4_GET(param
->uar_page_sz
, outbox
, INIT_HCA_UAR_PAGE_SZ_OFFSET
);
1677 MLX4_GET(param
->log_uar_sz
, outbox
, INIT_HCA_LOG_UAR_SZ_OFFSET
);
1680 mlx4_free_cmd_mailbox(dev
, mailbox
);
1685 /* for IB-type ports only in SRIOV mode. Checks that both proxy QP0
1686 * and real QP0 are active, so that the paravirtualized QP0 is ready
1688 static int check_qp0_state(struct mlx4_dev
*dev
, int function
, int port
)
1690 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1691 /* irrelevant if not infiniband */
1692 if (priv
->mfunc
.master
.qp0_state
[port
].proxy_qp0_active
&&
1693 priv
->mfunc
.master
.qp0_state
[port
].qp0_active
)
1698 int mlx4_INIT_PORT_wrapper(struct mlx4_dev
*dev
, int slave
,
1699 struct mlx4_vhcr
*vhcr
,
1700 struct mlx4_cmd_mailbox
*inbox
,
1701 struct mlx4_cmd_mailbox
*outbox
,
1702 struct mlx4_cmd_info
*cmd
)
1704 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1705 int port
= mlx4_slave_convert_port(dev
, slave
, vhcr
->in_modifier
);
1711 if (priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
& (1 << port
))
1714 if (dev
->caps
.port_mask
[port
] != MLX4_PORT_TYPE_IB
) {
1715 /* Enable port only if it was previously disabled */
1716 if (!priv
->mfunc
.master
.init_port_ref
[port
]) {
1717 err
= mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_INIT_PORT
,
1718 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1722 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
|= (1 << port
);
1724 if (slave
== mlx4_master_func_num(dev
)) {
1725 if (check_qp0_state(dev
, slave
, port
) &&
1726 !priv
->mfunc
.master
.qp0_state
[port
].port_active
) {
1727 err
= mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_INIT_PORT
,
1728 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1731 priv
->mfunc
.master
.qp0_state
[port
].port_active
= 1;
1732 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
|= (1 << port
);
1735 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
|= (1 << port
);
1737 ++priv
->mfunc
.master
.init_port_ref
[port
];
1741 int mlx4_INIT_PORT(struct mlx4_dev
*dev
, int port
)
1743 struct mlx4_cmd_mailbox
*mailbox
;
1749 if (dev
->flags
& MLX4_FLAG_OLD_PORT_CMDS
) {
1750 #define INIT_PORT_IN_SIZE 256
1751 #define INIT_PORT_FLAGS_OFFSET 0x00
1752 #define INIT_PORT_FLAG_SIG (1 << 18)
1753 #define INIT_PORT_FLAG_NG (1 << 17)
1754 #define INIT_PORT_FLAG_G0 (1 << 16)
1755 #define INIT_PORT_VL_SHIFT 4
1756 #define INIT_PORT_PORT_WIDTH_SHIFT 8
1757 #define INIT_PORT_MTU_OFFSET 0x04
1758 #define INIT_PORT_MAX_GID_OFFSET 0x06
1759 #define INIT_PORT_MAX_PKEY_OFFSET 0x0a
1760 #define INIT_PORT_GUID0_OFFSET 0x10
1761 #define INIT_PORT_NODE_GUID_OFFSET 0x18
1762 #define INIT_PORT_SI_GUID_OFFSET 0x20
1764 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1765 if (IS_ERR(mailbox
))
1766 return PTR_ERR(mailbox
);
1767 inbox
= mailbox
->buf
;
1770 flags
|= (dev
->caps
.vl_cap
[port
] & 0xf) << INIT_PORT_VL_SHIFT
;
1771 flags
|= (dev
->caps
.port_width_cap
[port
] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT
;
1772 MLX4_PUT(inbox
, flags
, INIT_PORT_FLAGS_OFFSET
);
1774 field
= 128 << dev
->caps
.ib_mtu_cap
[port
];
1775 MLX4_PUT(inbox
, field
, INIT_PORT_MTU_OFFSET
);
1776 field
= dev
->caps
.gid_table_len
[port
];
1777 MLX4_PUT(inbox
, field
, INIT_PORT_MAX_GID_OFFSET
);
1778 field
= dev
->caps
.pkey_table_len
[port
];
1779 MLX4_PUT(inbox
, field
, INIT_PORT_MAX_PKEY_OFFSET
);
1781 err
= mlx4_cmd(dev
, mailbox
->dma
, port
, 0, MLX4_CMD_INIT_PORT
,
1782 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1784 mlx4_free_cmd_mailbox(dev
, mailbox
);
1786 err
= mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_INIT_PORT
,
1787 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
1791 EXPORT_SYMBOL_GPL(mlx4_INIT_PORT
);
1793 int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev
*dev
, int slave
,
1794 struct mlx4_vhcr
*vhcr
,
1795 struct mlx4_cmd_mailbox
*inbox
,
1796 struct mlx4_cmd_mailbox
*outbox
,
1797 struct mlx4_cmd_info
*cmd
)
1799 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1800 int port
= mlx4_slave_convert_port(dev
, slave
, vhcr
->in_modifier
);
1806 if (!(priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
&
1810 if (dev
->caps
.port_mask
[port
] != MLX4_PORT_TYPE_IB
) {
1811 if (priv
->mfunc
.master
.init_port_ref
[port
] == 1) {
1812 err
= mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_CLOSE_PORT
,
1813 1000, MLX4_CMD_NATIVE
);
1817 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
&= ~(1 << port
);
1819 /* infiniband port */
1820 if (slave
== mlx4_master_func_num(dev
)) {
1821 if (!priv
->mfunc
.master
.qp0_state
[port
].qp0_active
&&
1822 priv
->mfunc
.master
.qp0_state
[port
].port_active
) {
1823 err
= mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_CLOSE_PORT
,
1824 1000, MLX4_CMD_NATIVE
);
1827 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
&= ~(1 << port
);
1828 priv
->mfunc
.master
.qp0_state
[port
].port_active
= 0;
1831 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
&= ~(1 << port
);
1833 --priv
->mfunc
.master
.init_port_ref
[port
];
1837 int mlx4_CLOSE_PORT(struct mlx4_dev
*dev
, int port
)
1839 return mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_CLOSE_PORT
, 1000,
1842 EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT
);
1844 int mlx4_CLOSE_HCA(struct mlx4_dev
*dev
, int panic
)
1846 return mlx4_cmd(dev
, 0, 0, panic
, MLX4_CMD_CLOSE_HCA
, 1000,
1850 struct mlx4_config_dev
{
1851 __be32 update_flags
;
1853 __be16 vxlan_udp_dport
;
1857 #define MLX4_VXLAN_UDP_DPORT (1 << 0)
1859 static int mlx4_CONFIG_DEV(struct mlx4_dev
*dev
, struct mlx4_config_dev
*config_dev
)
1862 struct mlx4_cmd_mailbox
*mailbox
;
1864 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1865 if (IS_ERR(mailbox
))
1866 return PTR_ERR(mailbox
);
1868 memcpy(mailbox
->buf
, config_dev
, sizeof(*config_dev
));
1870 err
= mlx4_cmd(dev
, mailbox
->dma
, 0, 0, MLX4_CMD_CONFIG_DEV
,
1871 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
1873 mlx4_free_cmd_mailbox(dev
, mailbox
);
1877 int mlx4_config_vxlan_port(struct mlx4_dev
*dev
, __be16 udp_port
)
1879 struct mlx4_config_dev config_dev
;
1881 memset(&config_dev
, 0, sizeof(config_dev
));
1882 config_dev
.update_flags
= cpu_to_be32(MLX4_VXLAN_UDP_DPORT
);
1883 config_dev
.vxlan_udp_dport
= udp_port
;
1885 return mlx4_CONFIG_DEV(dev
, &config_dev
);
1887 EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port
);
1890 int mlx4_SET_ICM_SIZE(struct mlx4_dev
*dev
, u64 icm_size
, u64
*aux_pages
)
1892 int ret
= mlx4_cmd_imm(dev
, icm_size
, aux_pages
, 0, 0,
1893 MLX4_CMD_SET_ICM_SIZE
,
1894 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1899 * Round up number of system pages needed in case
1900 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
1902 *aux_pages
= ALIGN(*aux_pages
, PAGE_SIZE
/ MLX4_ICM_PAGE_SIZE
) >>
1903 (PAGE_SHIFT
- MLX4_ICM_PAGE_SHIFT
);
1908 int mlx4_NOP(struct mlx4_dev
*dev
)
1910 /* Input modifier of 0x1f means "finish as soon as possible." */
1911 return mlx4_cmd(dev
, 0, 0x1f, 0, MLX4_CMD_NOP
, 100, MLX4_CMD_NATIVE
);
1914 int mlx4_get_phys_port_id(struct mlx4_dev
*dev
)
1918 struct mlx4_cmd_mailbox
*mailbox
;
1920 u32 guid_hi
, guid_lo
;
1922 #define MOD_STAT_CFG_PORT_OFFSET 8
1923 #define MOD_STAT_CFG_GUID_H 0X14
1924 #define MOD_STAT_CFG_GUID_L 0X1c
1926 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1927 if (IS_ERR(mailbox
))
1928 return PTR_ERR(mailbox
);
1929 outbox
= mailbox
->buf
;
1931 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
1932 in_mod
= port
<< MOD_STAT_CFG_PORT_OFFSET
;
1933 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, in_mod
, 0x2,
1934 MLX4_CMD_MOD_STAT_CFG
, MLX4_CMD_TIME_CLASS_A
,
1937 mlx4_err(dev
, "Fail to get port %d uplink guid\n",
1941 MLX4_GET(guid_hi
, outbox
, MOD_STAT_CFG_GUID_H
);
1942 MLX4_GET(guid_lo
, outbox
, MOD_STAT_CFG_GUID_L
);
1943 dev
->caps
.phys_port_id
[port
] = (u64
)guid_lo
|
1947 mlx4_free_cmd_mailbox(dev
, mailbox
);
1951 #define MLX4_WOL_SETUP_MODE (5 << 28)
1952 int mlx4_wol_read(struct mlx4_dev
*dev
, u64
*config
, int port
)
1954 u32 in_mod
= MLX4_WOL_SETUP_MODE
| port
<< 8;
1956 return mlx4_cmd_imm(dev
, 0, config
, in_mod
, 0x3,
1957 MLX4_CMD_MOD_STAT_CFG
, MLX4_CMD_TIME_CLASS_A
,
1960 EXPORT_SYMBOL_GPL(mlx4_wol_read
);
1962 int mlx4_wol_write(struct mlx4_dev
*dev
, u64 config
, int port
)
1964 u32 in_mod
= MLX4_WOL_SETUP_MODE
| port
<< 8;
1966 return mlx4_cmd(dev
, config
, in_mod
, 0x1, MLX4_CMD_MOD_STAT_CFG
,
1967 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1969 EXPORT_SYMBOL_GPL(mlx4_wol_write
);
1976 void mlx4_opreq_action(struct work_struct
*work
)
1978 struct mlx4_priv
*priv
= container_of(work
, struct mlx4_priv
,
1980 struct mlx4_dev
*dev
= &priv
->dev
;
1981 int num_tasks
= atomic_read(&priv
->opreq_count
);
1982 struct mlx4_cmd_mailbox
*mailbox
;
1983 struct mlx4_mgm
*mgm
;
1995 #define GET_OP_REQ_MODIFIER_OFFSET 0x08
1996 #define GET_OP_REQ_TOKEN_OFFSET 0x14
1997 #define GET_OP_REQ_TYPE_OFFSET 0x1a
1998 #define GET_OP_REQ_DATA_OFFSET 0x20
2000 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
2001 if (IS_ERR(mailbox
)) {
2002 mlx4_err(dev
, "Failed to allocate mailbox for GET_OP_REQ\n");
2005 outbox
= mailbox
->buf
;
2008 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 0,
2009 MLX4_CMD_GET_OP_REQ
, MLX4_CMD_TIME_CLASS_A
,
2012 mlx4_err(dev
, "Failed to retrieve required operation: %d\n",
2016 MLX4_GET(modifier
, outbox
, GET_OP_REQ_MODIFIER_OFFSET
);
2017 MLX4_GET(token
, outbox
, GET_OP_REQ_TOKEN_OFFSET
);
2018 MLX4_GET(type
, outbox
, GET_OP_REQ_TYPE_OFFSET
);
2023 if (dev
->caps
.steering_mode
==
2024 MLX4_STEERING_MODE_DEVICE_MANAGED
) {
2025 mlx4_warn(dev
, "ADD MCG operation is not supported in DEVICE_MANAGED steering mode\n");
2029 mgm
= (struct mlx4_mgm
*)((u8
*)(outbox
) +
2030 GET_OP_REQ_DATA_OFFSET
);
2031 num_qps
= be32_to_cpu(mgm
->members_count
) &
2033 rem_mcg
= ((u8
*)(&mgm
->members_count
))[0] & 1;
2034 prot
= ((u8
*)(&mgm
->members_count
))[0] >> 6;
2036 for (i
= 0; i
< num_qps
; i
++) {
2037 qp
.qpn
= be32_to_cpu(mgm
->qp
[i
]);
2039 err
= mlx4_multicast_detach(dev
, &qp
,
2043 err
= mlx4_multicast_attach(dev
, &qp
,
2053 mlx4_warn(dev
, "Bad type for required operation\n");
2057 err
= mlx4_cmd(dev
, 0, ((u32
) err
|
2058 (__force u32
)cpu_to_be32(token
) << 16),
2059 1, MLX4_CMD_GET_OP_REQ
, MLX4_CMD_TIME_CLASS_A
,
2062 mlx4_err(dev
, "Failed to acknowledge required request: %d\n",
2066 memset(outbox
, 0, 0xffc);
2067 num_tasks
= atomic_dec_return(&priv
->opreq_count
);
2071 mlx4_free_cmd_mailbox(dev
, mailbox
);
2074 static int mlx4_check_smp_firewall_active(struct mlx4_dev
*dev
,
2075 struct mlx4_cmd_mailbox
*mailbox
)
2077 #define MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET 0x10
2078 #define MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET 0x20
2079 #define MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET 0x40
2080 #define MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET 0x70
2082 u32 set_attr_mask
, getresp_attr_mask
;
2083 u32 trap_attr_mask
, traprepress_attr_mask
;
2085 MLX4_GET(set_attr_mask
, mailbox
->buf
,
2086 MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET
);
2087 mlx4_dbg(dev
, "SMP firewall set_attribute_mask = 0x%x\n",
2090 MLX4_GET(getresp_attr_mask
, mailbox
->buf
,
2091 MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET
);
2092 mlx4_dbg(dev
, "SMP firewall getresp_attribute_mask = 0x%x\n",
2095 MLX4_GET(trap_attr_mask
, mailbox
->buf
,
2096 MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET
);
2097 mlx4_dbg(dev
, "SMP firewall trap_attribute_mask = 0x%x\n",
2100 MLX4_GET(traprepress_attr_mask
, mailbox
->buf
,
2101 MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET
);
2102 mlx4_dbg(dev
, "SMP firewall traprepress_attribute_mask = 0x%x\n",
2103 traprepress_attr_mask
);
2105 if (set_attr_mask
&& getresp_attr_mask
&& trap_attr_mask
&&
2106 traprepress_attr_mask
)
2112 int mlx4_config_mad_demux(struct mlx4_dev
*dev
)
2114 struct mlx4_cmd_mailbox
*mailbox
;
2115 int secure_host_active
;
2118 /* Check if mad_demux is supported */
2119 if (!(dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_MAD_DEMUX
))
2122 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
2123 if (IS_ERR(mailbox
)) {
2124 mlx4_warn(dev
, "Failed to allocate mailbox for cmd MAD_DEMUX");
2128 /* Query mad_demux to find out which MADs are handled by internal sma */
2129 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0x01 /* subn mgmt class */,
2130 MLX4_CMD_MAD_DEMUX_QUERY_RESTR
, MLX4_CMD_MAD_DEMUX
,
2131 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
2133 mlx4_warn(dev
, "MLX4_CMD_MAD_DEMUX: query restrictions failed (%d)\n",
2138 secure_host_active
= mlx4_check_smp_firewall_active(dev
, mailbox
);
2140 /* Config mad_demux to handle all MADs returned by the query above */
2141 err
= mlx4_cmd(dev
, mailbox
->dma
, 0x01 /* subn mgmt class */,
2142 MLX4_CMD_MAD_DEMUX_CONFIG
, MLX4_CMD_MAD_DEMUX
,
2143 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
2145 mlx4_warn(dev
, "MLX4_CMD_MAD_DEMUX: configure failed (%d)\n", err
);
2149 if (secure_host_active
)
2150 mlx4_warn(dev
, "HCA operating in secure-host mode. SMP firewall activated.\n");
2152 mlx4_free_cmd_mailbox(dev
, mailbox
);
2156 /* Access Reg commands */
2157 enum mlx4_access_reg_masks
{
2158 MLX4_ACCESS_REG_STATUS_MASK
= 0x7f,
2159 MLX4_ACCESS_REG_METHOD_MASK
= 0x7f,
2160 MLX4_ACCESS_REG_LEN_MASK
= 0x7ff
2163 struct mlx4_access_reg
{
2173 #define MLX4_ACCESS_REG_HEADER_SIZE (20)
2174 u8 reg_data
[MLX4_MAILBOX_SIZE
-MLX4_ACCESS_REG_HEADER_SIZE
];
2175 } __attribute__((__packed__
));
2178 * mlx4_ACCESS_REG - Generic access reg command.
2180 * @reg_id: register ID to access.
2181 * @method: Access method Read/Write.
2182 * @reg_len: register length to Read/Write in bytes.
2183 * @reg_data: reg_data pointer to Read/Write From/To.
2185 * Access ConnectX registers FW command.
2186 * Returns 0 on success and copies outbox mlx4_access_reg data
2187 * field into reg_data or a negative error code.
2189 static int mlx4_ACCESS_REG(struct mlx4_dev
*dev
, u16 reg_id
,
2190 enum mlx4_access_reg_method method
,
2191 u16 reg_len
, void *reg_data
)
2193 struct mlx4_cmd_mailbox
*inbox
, *outbox
;
2194 struct mlx4_access_reg
*inbuf
, *outbuf
;
2197 inbox
= mlx4_alloc_cmd_mailbox(dev
);
2199 return PTR_ERR(inbox
);
2201 outbox
= mlx4_alloc_cmd_mailbox(dev
);
2202 if (IS_ERR(outbox
)) {
2203 mlx4_free_cmd_mailbox(dev
, inbox
);
2204 return PTR_ERR(outbox
);
2208 outbuf
= outbox
->buf
;
2210 inbuf
->constant1
= cpu_to_be16(0x1<<11 | 0x4);
2211 inbuf
->constant2
= 0x1;
2212 inbuf
->reg_id
= cpu_to_be16(reg_id
);
2213 inbuf
->method
= method
& MLX4_ACCESS_REG_METHOD_MASK
;
2215 reg_len
= min(reg_len
, (u16
)(sizeof(inbuf
->reg_data
)));
2217 cpu_to_be16(((reg_len
/4 + 1) & MLX4_ACCESS_REG_LEN_MASK
) |
2220 memcpy(inbuf
->reg_data
, reg_data
, reg_len
);
2221 err
= mlx4_cmd_box(dev
, inbox
->dma
, outbox
->dma
, 0, 0,
2222 MLX4_CMD_ACCESS_REG
, MLX4_CMD_TIME_CLASS_C
,
2227 if (outbuf
->status
& MLX4_ACCESS_REG_STATUS_MASK
) {
2228 err
= outbuf
->status
& MLX4_ACCESS_REG_STATUS_MASK
;
2230 "MLX4_CMD_ACCESS_REG(%x) returned REG status (%x)\n",
2235 memcpy(reg_data
, outbuf
->reg_data
, reg_len
);
2237 mlx4_free_cmd_mailbox(dev
, inbox
);
2238 mlx4_free_cmd_mailbox(dev
, outbox
);
2242 /* ConnectX registers IDs */
2244 MLX4_REG_ID_PTYS
= 0x5004,
2248 * mlx4_ACCESS_PTYS_REG - Access PTYs (Port Type and Speed)
2251 * @method: Access method Read/Write.
2252 * @ptys_reg: PTYS register data pointer.
2254 * Access ConnectX PTYS register, to Read/Write Port Type/Speed
2256 * Returns 0 on success or a negative error code.
2258 int mlx4_ACCESS_PTYS_REG(struct mlx4_dev
*dev
,
2259 enum mlx4_access_reg_method method
,
2260 struct mlx4_ptys_reg
*ptys_reg
)
2262 return mlx4_ACCESS_REG(dev
, MLX4_REG_ID_PTYS
,
2263 method
, sizeof(*ptys_reg
), ptys_reg
);
2265 EXPORT_SYMBOL_GPL(mlx4_ACCESS_PTYS_REG
);
2267 int mlx4_ACCESS_REG_wrapper(struct mlx4_dev
*dev
, int slave
,
2268 struct mlx4_vhcr
*vhcr
,
2269 struct mlx4_cmd_mailbox
*inbox
,
2270 struct mlx4_cmd_mailbox
*outbox
,
2271 struct mlx4_cmd_info
*cmd
)
2273 struct mlx4_access_reg
*inbuf
= inbox
->buf
;
2274 u8 method
= inbuf
->method
& MLX4_ACCESS_REG_METHOD_MASK
;
2275 u16 reg_id
= be16_to_cpu(inbuf
->reg_id
);
2277 if (slave
!= mlx4_master_func_num(dev
) &&
2278 method
== MLX4_ACCESS_REG_WRITE
)
2281 if (reg_id
== MLX4_REG_ID_PTYS
) {
2282 struct mlx4_ptys_reg
*ptys_reg
=
2283 (struct mlx4_ptys_reg
*)inbuf
->reg_data
;
2285 ptys_reg
->local_port
=
2286 mlx4_slave_convert_port(dev
, slave
,
2287 ptys_reg
->local_port
);
2290 return mlx4_cmd_box(dev
, inbox
->dma
, outbox
->dma
, vhcr
->in_modifier
,
2291 0, MLX4_CMD_ACCESS_REG
, MLX4_CMD_TIME_CLASS_C
,