2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/errno.h>
39 #include <linux/pci.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/slab.h>
42 #include <linux/io-mapping.h>
43 #include <linux/delay.h>
44 #include <linux/kmod.h>
46 #include <linux/mlx4/device.h>
47 #include <linux/mlx4/doorbell.h>
53 MODULE_AUTHOR("Roland Dreier");
54 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
55 MODULE_LICENSE("Dual BSD/GPL");
56 MODULE_VERSION(DRV_VERSION
);
58 struct workqueue_struct
*mlx4_wq
;
60 #ifdef CONFIG_MLX4_DEBUG
62 int mlx4_debug_level
= 0;
63 module_param_named(debug_level
, mlx4_debug_level
, int, 0644);
64 MODULE_PARM_DESC(debug_level
, "Enable debug tracing if > 0");
66 #endif /* CONFIG_MLX4_DEBUG */
71 module_param(msi_x
, int, 0444);
72 MODULE_PARM_DESC(msi_x
, "attempt to use MSI-X if nonzero");
74 #else /* CONFIG_PCI_MSI */
78 #endif /* CONFIG_PCI_MSI */
80 static uint8_t num_vfs
[3] = {0, 0, 0};
81 static int num_vfs_argc
;
82 module_param_array(num_vfs
, byte
, &num_vfs_argc
, 0444);
83 MODULE_PARM_DESC(num_vfs
, "enable #num_vfs functions if num_vfs > 0\n"
84 "num_vfs=port1,port2,port1+2");
86 static uint8_t probe_vf
[3] = {0, 0, 0};
87 static int probe_vfs_argc
;
88 module_param_array(probe_vf
, byte
, &probe_vfs_argc
, 0444);
89 MODULE_PARM_DESC(probe_vf
, "number of vfs to probe by pf driver (num_vfs > 0)\n"
90 "probe_vf=port1,port2,port1+2");
92 int mlx4_log_num_mgm_entry_size
= MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE
;
93 module_param_named(log_num_mgm_entry_size
,
94 mlx4_log_num_mgm_entry_size
, int, 0444);
95 MODULE_PARM_DESC(log_num_mgm_entry_size
, "log mgm size, that defines the num"
96 " of qp per mcg, for example:"
97 " 10 gives 248.range: 7 <="
98 " log_num_mgm_entry_size <= 12."
99 " To activate device managed"
100 " flow steering when available, set to -1");
102 static bool enable_64b_cqe_eqe
= true;
103 module_param(enable_64b_cqe_eqe
, bool, 0444);
104 MODULE_PARM_DESC(enable_64b_cqe_eqe
,
105 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
107 #define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \
108 MLX4_FUNC_CAP_EQE_CQE_STRIDE | \
109 MLX4_FUNC_CAP_DMFS_A0_STATIC)
111 static char mlx4_version
[] =
112 DRV_NAME
": Mellanox ConnectX core driver v"
113 DRV_VERSION
" (" DRV_RELDATE
")\n";
115 static struct mlx4_profile default_profile
= {
118 .rdmarc_per_qp
= 1 << 4,
122 .num_mtt
= 1 << 20, /* It is really num mtt segements */
125 static struct mlx4_profile low_mem_profile
= {
128 .rdmarc_per_qp
= 1 << 4,
135 static int log_num_mac
= 7;
136 module_param_named(log_num_mac
, log_num_mac
, int, 0444);
137 MODULE_PARM_DESC(log_num_mac
, "Log2 max number of MACs per ETH port (1-7)");
139 static int log_num_vlan
;
140 module_param_named(log_num_vlan
, log_num_vlan
, int, 0444);
141 MODULE_PARM_DESC(log_num_vlan
, "Log2 max number of VLANs per ETH port (0-7)");
142 /* Log2 max number of VLANs per ETH port (0-7) */
143 #define MLX4_LOG_NUM_VLANS 7
144 #define MLX4_MIN_LOG_NUM_VLANS 0
145 #define MLX4_MIN_LOG_NUM_MAC 1
147 static bool use_prio
;
148 module_param_named(use_prio
, use_prio
, bool, 0444);
149 MODULE_PARM_DESC(use_prio
, "Enable steering by VLAN priority on ETH ports (deprecated)");
151 int log_mtts_per_seg
= ilog2(MLX4_MTT_ENTRY_PER_SEG
);
152 module_param_named(log_mtts_per_seg
, log_mtts_per_seg
, int, 0444);
153 MODULE_PARM_DESC(log_mtts_per_seg
, "Log2 number of MTT entries per segment (1-7)");
155 static int port_type_array
[2] = {MLX4_PORT_TYPE_NONE
, MLX4_PORT_TYPE_NONE
};
156 static int arr_argc
= 2;
157 module_param_array(port_type_array
, int, &arr_argc
, 0444);
158 MODULE_PARM_DESC(port_type_array
, "Array of port types: HW_DEFAULT (0) is default "
159 "1 for IB, 2 for Ethernet");
161 struct mlx4_port_config
{
162 struct list_head list
;
163 enum mlx4_port_type port_type
[MLX4_MAX_PORTS
+ 1];
164 struct pci_dev
*pdev
;
167 static atomic_t pf_loading
= ATOMIC_INIT(0);
169 int mlx4_check_port_params(struct mlx4_dev
*dev
,
170 enum mlx4_port_type
*port_type
)
174 if (!(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_DPDP
)) {
175 for (i
= 0; i
< dev
->caps
.num_ports
- 1; i
++) {
176 if (port_type
[i
] != port_type
[i
+ 1]) {
177 mlx4_err(dev
, "Only same port types supported on this HCA, aborting\n");
183 for (i
= 0; i
< dev
->caps
.num_ports
; i
++) {
184 if (!(port_type
[i
] & dev
->caps
.supported_type
[i
+1])) {
185 mlx4_err(dev
, "Requested port type for port %d is not supported on this HCA\n",
193 static void mlx4_set_port_mask(struct mlx4_dev
*dev
)
197 for (i
= 1; i
<= dev
->caps
.num_ports
; ++i
)
198 dev
->caps
.port_mask
[i
] = dev
->caps
.port_type
[i
];
202 MLX4_QUERY_FUNC_NUM_SYS_EQS
= 1 << 0,
205 static int mlx4_query_func(struct mlx4_dev
*dev
, struct mlx4_dev_cap
*dev_cap
)
208 struct mlx4_func func
;
210 if (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_SYS_EQS
) {
211 err
= mlx4_QUERY_FUNC(dev
, &func
, 0);
213 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting.\n");
216 dev_cap
->max_eqs
= func
.max_eq
;
217 dev_cap
->reserved_eqs
= func
.rsvd_eqs
;
218 dev_cap
->reserved_uars
= func
.rsvd_uars
;
219 err
|= MLX4_QUERY_FUNC_NUM_SYS_EQS
;
224 static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev
*dev
)
226 struct mlx4_caps
*dev_cap
= &dev
->caps
;
228 /* FW not supporting or cancelled by user */
229 if (!(dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_EQE_STRIDE
) ||
230 !(dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_CQE_STRIDE
))
233 /* Must have 64B CQE_EQE enabled by FW to use bigger stride
234 * When FW has NCSI it may decide not to report 64B CQE/EQEs
236 if (!(dev_cap
->flags
& MLX4_DEV_CAP_FLAG_64B_EQE
) ||
237 !(dev_cap
->flags
& MLX4_DEV_CAP_FLAG_64B_CQE
)) {
238 dev_cap
->flags2
&= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE
;
239 dev_cap
->flags2
&= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE
;
243 if (cache_line_size() == 128 || cache_line_size() == 256) {
244 mlx4_dbg(dev
, "Enabling CQE stride cacheLine supported\n");
245 /* Changing the real data inside CQE size to 32B */
246 dev_cap
->flags
&= ~MLX4_DEV_CAP_FLAG_64B_CQE
;
247 dev_cap
->flags
&= ~MLX4_DEV_CAP_FLAG_64B_EQE
;
249 if (mlx4_is_master(dev
))
250 dev_cap
->function_caps
|= MLX4_FUNC_CAP_EQE_CQE_STRIDE
;
252 mlx4_dbg(dev
, "Disabling CQE stride cacheLine unsupported\n");
253 dev_cap
->flags2
&= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE
;
254 dev_cap
->flags2
&= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE
;
258 static int _mlx4_dev_port(struct mlx4_dev
*dev
, int port
,
259 struct mlx4_port_cap
*port_cap
)
261 dev
->caps
.vl_cap
[port
] = port_cap
->max_vl
;
262 dev
->caps
.ib_mtu_cap
[port
] = port_cap
->ib_mtu
;
263 dev
->phys_caps
.gid_phys_table_len
[port
] = port_cap
->max_gids
;
264 dev
->phys_caps
.pkey_phys_table_len
[port
] = port_cap
->max_pkeys
;
265 /* set gid and pkey table operating lengths by default
266 * to non-sriov values
268 dev
->caps
.gid_table_len
[port
] = port_cap
->max_gids
;
269 dev
->caps
.pkey_table_len
[port
] = port_cap
->max_pkeys
;
270 dev
->caps
.port_width_cap
[port
] = port_cap
->max_port_width
;
271 dev
->caps
.eth_mtu_cap
[port
] = port_cap
->eth_mtu
;
272 dev
->caps
.def_mac
[port
] = port_cap
->def_mac
;
273 dev
->caps
.supported_type
[port
] = port_cap
->supported_port_types
;
274 dev
->caps
.suggested_type
[port
] = port_cap
->suggested_type
;
275 dev
->caps
.default_sense
[port
] = port_cap
->default_sense
;
276 dev
->caps
.trans_type
[port
] = port_cap
->trans_type
;
277 dev
->caps
.vendor_oui
[port
] = port_cap
->vendor_oui
;
278 dev
->caps
.wavelength
[port
] = port_cap
->wavelength
;
279 dev
->caps
.trans_code
[port
] = port_cap
->trans_code
;
284 static int mlx4_dev_port(struct mlx4_dev
*dev
, int port
,
285 struct mlx4_port_cap
*port_cap
)
289 err
= mlx4_QUERY_PORT(dev
, port
, port_cap
);
292 mlx4_err(dev
, "QUERY_PORT command failed.\n");
297 #define MLX4_A0_STEERING_TABLE_SIZE 256
298 static int mlx4_dev_cap(struct mlx4_dev
*dev
, struct mlx4_dev_cap
*dev_cap
)
303 err
= mlx4_QUERY_DEV_CAP(dev
, dev_cap
);
305 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting\n");
308 mlx4_dev_cap_dump(dev
, dev_cap
);
310 if (dev_cap
->min_page_sz
> PAGE_SIZE
) {
311 mlx4_err(dev
, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
312 dev_cap
->min_page_sz
, PAGE_SIZE
);
315 if (dev_cap
->num_ports
> MLX4_MAX_PORTS
) {
316 mlx4_err(dev
, "HCA has %d ports, but we only support %d, aborting\n",
317 dev_cap
->num_ports
, MLX4_MAX_PORTS
);
321 if (dev_cap
->uar_size
> pci_resource_len(dev
->pdev
, 2)) {
322 mlx4_err(dev
, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
324 (unsigned long long) pci_resource_len(dev
->pdev
, 2));
328 dev
->caps
.num_ports
= dev_cap
->num_ports
;
329 dev
->caps
.num_sys_eqs
= dev_cap
->num_sys_eqs
;
330 dev
->phys_caps
.num_phys_eqs
= dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_SYS_EQS
?
331 dev
->caps
.num_sys_eqs
:
333 for (i
= 1; i
<= dev
->caps
.num_ports
; ++i
) {
334 err
= _mlx4_dev_port(dev
, i
, dev_cap
->port_cap
+ i
);
336 mlx4_err(dev
, "QUERY_PORT command failed, aborting\n");
341 dev
->caps
.uar_page_size
= PAGE_SIZE
;
342 dev
->caps
.num_uars
= dev_cap
->uar_size
/ PAGE_SIZE
;
343 dev
->caps
.local_ca_ack_delay
= dev_cap
->local_ca_ack_delay
;
344 dev
->caps
.bf_reg_size
= dev_cap
->bf_reg_size
;
345 dev
->caps
.bf_regs_per_page
= dev_cap
->bf_regs_per_page
;
346 dev
->caps
.max_sq_sg
= dev_cap
->max_sq_sg
;
347 dev
->caps
.max_rq_sg
= dev_cap
->max_rq_sg
;
348 dev
->caps
.max_wqes
= dev_cap
->max_qp_sz
;
349 dev
->caps
.max_qp_init_rdma
= dev_cap
->max_requester_per_qp
;
350 dev
->caps
.max_srq_wqes
= dev_cap
->max_srq_sz
;
351 dev
->caps
.max_srq_sge
= dev_cap
->max_rq_sg
- 1;
352 dev
->caps
.reserved_srqs
= dev_cap
->reserved_srqs
;
353 dev
->caps
.max_sq_desc_sz
= dev_cap
->max_sq_desc_sz
;
354 dev
->caps
.max_rq_desc_sz
= dev_cap
->max_rq_desc_sz
;
356 * Subtract 1 from the limit because we need to allocate a
357 * spare CQE so the HCA HW can tell the difference between an
358 * empty CQ and a full CQ.
360 dev
->caps
.max_cqes
= dev_cap
->max_cq_sz
- 1;
361 dev
->caps
.reserved_cqs
= dev_cap
->reserved_cqs
;
362 dev
->caps
.reserved_eqs
= dev_cap
->reserved_eqs
;
363 dev
->caps
.reserved_mtts
= dev_cap
->reserved_mtts
;
364 dev
->caps
.reserved_mrws
= dev_cap
->reserved_mrws
;
366 /* The first 128 UARs are used for EQ doorbells */
367 dev
->caps
.reserved_uars
= max_t(int, 128, dev_cap
->reserved_uars
);
368 dev
->caps
.reserved_pds
= dev_cap
->reserved_pds
;
369 dev
->caps
.reserved_xrcds
= (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
) ?
370 dev_cap
->reserved_xrcds
: 0;
371 dev
->caps
.max_xrcds
= (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
) ?
372 dev_cap
->max_xrcds
: 0;
373 dev
->caps
.mtt_entry_sz
= dev_cap
->mtt_entry_sz
;
375 dev
->caps
.max_msg_sz
= dev_cap
->max_msg_sz
;
376 dev
->caps
.page_size_cap
= ~(u32
) (dev_cap
->min_page_sz
- 1);
377 dev
->caps
.flags
= dev_cap
->flags
;
378 dev
->caps
.flags2
= dev_cap
->flags2
;
379 dev
->caps
.bmme_flags
= dev_cap
->bmme_flags
;
380 dev
->caps
.reserved_lkey
= dev_cap
->reserved_lkey
;
381 dev
->caps
.stat_rate_support
= dev_cap
->stat_rate_support
;
382 dev
->caps
.max_gso_sz
= dev_cap
->max_gso_sz
;
383 dev
->caps
.max_rss_tbl_sz
= dev_cap
->max_rss_tbl_sz
;
385 /* Sense port always allowed on supported devices for ConnectX-1 and -2 */
386 if (mlx4_priv(dev
)->pci_dev_data
& MLX4_PCI_DEV_FORCE_SENSE_PORT
)
387 dev
->caps
.flags
|= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT
;
388 /* Don't do sense port on multifunction devices (for now at least) */
389 if (mlx4_is_mfunc(dev
))
390 dev
->caps
.flags
&= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT
;
392 if (mlx4_low_memory_profile()) {
393 dev
->caps
.log_num_macs
= MLX4_MIN_LOG_NUM_MAC
;
394 dev
->caps
.log_num_vlans
= MLX4_MIN_LOG_NUM_VLANS
;
396 dev
->caps
.log_num_macs
= log_num_mac
;
397 dev
->caps
.log_num_vlans
= MLX4_LOG_NUM_VLANS
;
400 for (i
= 1; i
<= dev
->caps
.num_ports
; ++i
) {
401 dev
->caps
.port_type
[i
] = MLX4_PORT_TYPE_NONE
;
402 if (dev
->caps
.supported_type
[i
]) {
403 /* if only ETH is supported - assign ETH */
404 if (dev
->caps
.supported_type
[i
] == MLX4_PORT_TYPE_ETH
)
405 dev
->caps
.port_type
[i
] = MLX4_PORT_TYPE_ETH
;
406 /* if only IB is supported, assign IB */
407 else if (dev
->caps
.supported_type
[i
] ==
409 dev
->caps
.port_type
[i
] = MLX4_PORT_TYPE_IB
;
411 /* if IB and ETH are supported, we set the port
412 * type according to user selection of port type;
413 * if user selected none, take the FW hint */
414 if (port_type_array
[i
- 1] == MLX4_PORT_TYPE_NONE
)
415 dev
->caps
.port_type
[i
] = dev
->caps
.suggested_type
[i
] ?
416 MLX4_PORT_TYPE_ETH
: MLX4_PORT_TYPE_IB
;
418 dev
->caps
.port_type
[i
] = port_type_array
[i
- 1];
422 * Link sensing is allowed on the port if 3 conditions are true:
423 * 1. Both protocols are supported on the port.
424 * 2. Different types are supported on the port
425 * 3. FW declared that it supports link sensing
427 mlx4_priv(dev
)->sense
.sense_allowed
[i
] =
428 ((dev
->caps
.supported_type
[i
] == MLX4_PORT_TYPE_AUTO
) &&
429 (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_DPDP
) &&
430 (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_SENSE_SUPPORT
));
433 * If "default_sense" bit is set, we move the port to "AUTO" mode
434 * and perform sense_port FW command to try and set the correct
435 * port type from beginning
437 if (mlx4_priv(dev
)->sense
.sense_allowed
[i
] && dev
->caps
.default_sense
[i
]) {
438 enum mlx4_port_type sensed_port
= MLX4_PORT_TYPE_NONE
;
439 dev
->caps
.possible_type
[i
] = MLX4_PORT_TYPE_AUTO
;
440 mlx4_SENSE_PORT(dev
, i
, &sensed_port
);
441 if (sensed_port
!= MLX4_PORT_TYPE_NONE
)
442 dev
->caps
.port_type
[i
] = sensed_port
;
444 dev
->caps
.possible_type
[i
] = dev
->caps
.port_type
[i
];
447 if (dev
->caps
.log_num_macs
> dev_cap
->port_cap
[i
].log_max_macs
) {
448 dev
->caps
.log_num_macs
= dev_cap
->port_cap
[i
].log_max_macs
;
449 mlx4_warn(dev
, "Requested number of MACs is too much for port %d, reducing to %d\n",
450 i
, 1 << dev
->caps
.log_num_macs
);
452 if (dev
->caps
.log_num_vlans
> dev_cap
->port_cap
[i
].log_max_vlans
) {
453 dev
->caps
.log_num_vlans
= dev_cap
->port_cap
[i
].log_max_vlans
;
454 mlx4_warn(dev
, "Requested number of VLANs is too much for port %d, reducing to %d\n",
455 i
, 1 << dev
->caps
.log_num_vlans
);
459 dev
->caps
.max_counters
= 1 << ilog2(dev_cap
->max_counters
);
461 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
] = dev_cap
->reserved_qps
;
462 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_ETH_ADDR
] =
463 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FC_ADDR
] =
464 (1 << dev
->caps
.log_num_macs
) *
465 (1 << dev
->caps
.log_num_vlans
) *
467 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FC_EXCH
] = MLX4_NUM_FEXCH
;
469 if (dev_cap
->dmfs_high_rate_qpn_base
> 0 &&
470 dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_FS_EN
)
471 dev
->caps
.dmfs_high_rate_qpn_base
= dev_cap
->dmfs_high_rate_qpn_base
;
473 dev
->caps
.dmfs_high_rate_qpn_base
=
474 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
];
476 if (dev_cap
->dmfs_high_rate_qpn_range
> 0 &&
477 dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_FS_EN
) {
478 dev
->caps
.dmfs_high_rate_qpn_range
= dev_cap
->dmfs_high_rate_qpn_range
;
479 dev
->caps
.dmfs_high_steer_mode
= MLX4_STEERING_DMFS_A0_DEFAULT
;
480 dev
->caps
.flags2
|= MLX4_DEV_CAP_FLAG2_FS_A0
;
482 dev
->caps
.dmfs_high_steer_mode
= MLX4_STEERING_DMFS_A0_NOT_SUPPORTED
;
483 dev
->caps
.dmfs_high_rate_qpn_base
=
484 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
];
485 dev
->caps
.dmfs_high_rate_qpn_range
= MLX4_A0_STEERING_TABLE_SIZE
;
488 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_RSS_RAW_ETH
] =
489 dev
->caps
.dmfs_high_rate_qpn_range
;
491 dev
->caps
.reserved_qps
= dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
] +
492 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_ETH_ADDR
] +
493 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FC_ADDR
] +
494 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FC_EXCH
];
496 dev
->caps
.sqp_demux
= (mlx4_is_master(dev
)) ? MLX4_MAX_NUM_SLAVES
: 0;
498 if (!enable_64b_cqe_eqe
&& !mlx4_is_slave(dev
)) {
500 (MLX4_DEV_CAP_FLAG_64B_CQE
| MLX4_DEV_CAP_FLAG_64B_EQE
)) {
501 mlx4_warn(dev
, "64B EQEs/CQEs supported by the device but not enabled\n");
502 dev
->caps
.flags
&= ~MLX4_DEV_CAP_FLAG_64B_CQE
;
503 dev
->caps
.flags
&= ~MLX4_DEV_CAP_FLAG_64B_EQE
;
506 if (dev_cap
->flags2
&
507 (MLX4_DEV_CAP_FLAG2_CQE_STRIDE
|
508 MLX4_DEV_CAP_FLAG2_EQE_STRIDE
)) {
509 mlx4_warn(dev
, "Disabling EQE/CQE stride per user request\n");
510 dev_cap
->flags2
&= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE
;
511 dev_cap
->flags2
&= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE
;
515 if ((dev
->caps
.flags
&
516 (MLX4_DEV_CAP_FLAG_64B_CQE
| MLX4_DEV_CAP_FLAG_64B_EQE
)) &&
518 dev
->caps
.function_caps
|= MLX4_FUNC_CAP_64B_EQE_CQE
;
520 if (!mlx4_is_slave(dev
)) {
521 mlx4_enable_cqe_eqe_stride(dev
);
522 dev
->caps
.alloc_res_qp_mask
=
523 (dev
->caps
.bf_reg_size
? MLX4_RESERVE_ETH_BF_QP
: 0) |
526 dev
->caps
.alloc_res_qp_mask
= 0;
532 static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev
*dev
,
533 enum pci_bus_speed
*speed
,
534 enum pcie_link_width
*width
)
536 u32 lnkcap1
, lnkcap2
;
539 #define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */
541 *speed
= PCI_SPEED_UNKNOWN
;
542 *width
= PCIE_LNK_WIDTH_UNKNOWN
;
544 err1
= pcie_capability_read_dword(dev
->pdev
, PCI_EXP_LNKCAP
, &lnkcap1
);
545 err2
= pcie_capability_read_dword(dev
->pdev
, PCI_EXP_LNKCAP2
, &lnkcap2
);
546 if (!err2
&& lnkcap2
) { /* PCIe r3.0-compliant */
547 if (lnkcap2
& PCI_EXP_LNKCAP2_SLS_8_0GB
)
548 *speed
= PCIE_SPEED_8_0GT
;
549 else if (lnkcap2
& PCI_EXP_LNKCAP2_SLS_5_0GB
)
550 *speed
= PCIE_SPEED_5_0GT
;
551 else if (lnkcap2
& PCI_EXP_LNKCAP2_SLS_2_5GB
)
552 *speed
= PCIE_SPEED_2_5GT
;
555 *width
= (lnkcap1
& PCI_EXP_LNKCAP_MLW
) >> PCIE_MLW_CAP_SHIFT
;
556 if (!lnkcap2
) { /* pre-r3.0 */
557 if (lnkcap1
& PCI_EXP_LNKCAP_SLS_5_0GB
)
558 *speed
= PCIE_SPEED_5_0GT
;
559 else if (lnkcap1
& PCI_EXP_LNKCAP_SLS_2_5GB
)
560 *speed
= PCIE_SPEED_2_5GT
;
564 if (*speed
== PCI_SPEED_UNKNOWN
|| *width
== PCIE_LNK_WIDTH_UNKNOWN
) {
566 err2
? err2
: -EINVAL
;
571 static void mlx4_check_pcie_caps(struct mlx4_dev
*dev
)
573 enum pcie_link_width width
, width_cap
;
574 enum pci_bus_speed speed
, speed_cap
;
577 #define PCIE_SPEED_STR(speed) \
578 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
579 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
580 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
583 err
= mlx4_get_pcie_dev_link_caps(dev
, &speed_cap
, &width_cap
);
586 "Unable to determine PCIe device BW capabilities\n");
590 err
= pcie_get_minimum_link(dev
->pdev
, &speed
, &width
);
591 if (err
|| speed
== PCI_SPEED_UNKNOWN
||
592 width
== PCIE_LNK_WIDTH_UNKNOWN
) {
594 "Unable to determine PCI device chain minimum BW\n");
598 if (width
!= width_cap
|| speed
!= speed_cap
)
600 "PCIe BW is different than device's capability\n");
602 mlx4_info(dev
, "PCIe link speed is %s, device supports %s\n",
603 PCIE_SPEED_STR(speed
), PCIE_SPEED_STR(speed_cap
));
604 mlx4_info(dev
, "PCIe link width is x%d, device supports x%d\n",
609 /*The function checks if there are live vf, return the num of them*/
610 static int mlx4_how_many_lives_vf(struct mlx4_dev
*dev
)
612 struct mlx4_priv
*priv
= mlx4_priv(dev
);
613 struct mlx4_slave_state
*s_state
;
617 for (i
= 1/*the ppf is 0*/; i
< dev
->num_slaves
; ++i
) {
618 s_state
= &priv
->mfunc
.master
.slave_state
[i
];
619 if (s_state
->active
&& s_state
->last_cmd
!=
620 MLX4_COMM_CMD_RESET
) {
621 mlx4_warn(dev
, "%s: slave: %d is still active\n",
629 int mlx4_get_parav_qkey(struct mlx4_dev
*dev
, u32 qpn
, u32
*qkey
)
631 u32 qk
= MLX4_RESERVED_QKEY_BASE
;
633 if (qpn
>= dev
->phys_caps
.base_tunnel_sqpn
+ 8 * MLX4_MFUNC_MAX
||
634 qpn
< dev
->phys_caps
.base_proxy_sqpn
)
637 if (qpn
>= dev
->phys_caps
.base_tunnel_sqpn
)
639 qk
+= qpn
- dev
->phys_caps
.base_tunnel_sqpn
;
641 qk
+= qpn
- dev
->phys_caps
.base_proxy_sqpn
;
645 EXPORT_SYMBOL(mlx4_get_parav_qkey
);
647 void mlx4_sync_pkey_table(struct mlx4_dev
*dev
, int slave
, int port
, int i
, int val
)
649 struct mlx4_priv
*priv
= container_of(dev
, struct mlx4_priv
, dev
);
651 if (!mlx4_is_master(dev
))
654 priv
->virt2phys_pkey
[slave
][port
- 1][i
] = val
;
656 EXPORT_SYMBOL(mlx4_sync_pkey_table
);
658 void mlx4_put_slave_node_guid(struct mlx4_dev
*dev
, int slave
, __be64 guid
)
660 struct mlx4_priv
*priv
= container_of(dev
, struct mlx4_priv
, dev
);
662 if (!mlx4_is_master(dev
))
665 priv
->slave_node_guids
[slave
] = guid
;
667 EXPORT_SYMBOL(mlx4_put_slave_node_guid
);
669 __be64
mlx4_get_slave_node_guid(struct mlx4_dev
*dev
, int slave
)
671 struct mlx4_priv
*priv
= container_of(dev
, struct mlx4_priv
, dev
);
673 if (!mlx4_is_master(dev
))
676 return priv
->slave_node_guids
[slave
];
678 EXPORT_SYMBOL(mlx4_get_slave_node_guid
);
680 int mlx4_is_slave_active(struct mlx4_dev
*dev
, int slave
)
682 struct mlx4_priv
*priv
= mlx4_priv(dev
);
683 struct mlx4_slave_state
*s_slave
;
685 if (!mlx4_is_master(dev
))
688 s_slave
= &priv
->mfunc
.master
.slave_state
[slave
];
689 return !!s_slave
->active
;
691 EXPORT_SYMBOL(mlx4_is_slave_active
);
693 static void slave_adjust_steering_mode(struct mlx4_dev
*dev
,
694 struct mlx4_dev_cap
*dev_cap
,
695 struct mlx4_init_hca_param
*hca_param
)
697 dev
->caps
.steering_mode
= hca_param
->steering_mode
;
698 if (dev
->caps
.steering_mode
== MLX4_STEERING_MODE_DEVICE_MANAGED
) {
699 dev
->caps
.num_qp_per_mgm
= dev_cap
->fs_max_num_qp_per_entry
;
700 dev
->caps
.fs_log_max_ucast_qp_range_size
=
701 dev_cap
->fs_log_max_ucast_qp_range_size
;
703 dev
->caps
.num_qp_per_mgm
=
704 4 * ((1 << hca_param
->log_mc_entry_sz
)/16 - 2);
706 mlx4_dbg(dev
, "Steering mode is: %s\n",
707 mlx4_steering_mode_str(dev
->caps
.steering_mode
));
710 static int mlx4_slave_cap(struct mlx4_dev
*dev
)
714 struct mlx4_dev_cap dev_cap
;
715 struct mlx4_func_cap func_cap
;
716 struct mlx4_init_hca_param hca_param
;
719 memset(&hca_param
, 0, sizeof(hca_param
));
720 err
= mlx4_QUERY_HCA(dev
, &hca_param
);
722 mlx4_err(dev
, "QUERY_HCA command failed, aborting\n");
726 /* fail if the hca has an unknown global capability
727 * at this time global_caps should be always zeroed
729 if (hca_param
.global_caps
) {
730 mlx4_err(dev
, "Unknown hca global capabilities\n");
734 mlx4_log_num_mgm_entry_size
= hca_param
.log_mc_entry_sz
;
736 dev
->caps
.hca_core_clock
= hca_param
.hca_core_clock
;
738 memset(&dev_cap
, 0, sizeof(dev_cap
));
739 dev
->caps
.max_qp_dest_rdma
= 1 << hca_param
.log_rd_per_qp
;
740 err
= mlx4_dev_cap(dev
, &dev_cap
);
742 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting\n");
746 err
= mlx4_QUERY_FW(dev
);
748 mlx4_err(dev
, "QUERY_FW command failed: could not get FW version\n");
750 page_size
= ~dev
->caps
.page_size_cap
+ 1;
751 mlx4_warn(dev
, "HCA minimum page size:%d\n", page_size
);
752 if (page_size
> PAGE_SIZE
) {
753 mlx4_err(dev
, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
754 page_size
, PAGE_SIZE
);
758 /* slave gets uar page size from QUERY_HCA fw command */
759 dev
->caps
.uar_page_size
= 1 << (hca_param
.uar_page_sz
+ 12);
761 /* TODO: relax this assumption */
762 if (dev
->caps
.uar_page_size
!= PAGE_SIZE
) {
763 mlx4_err(dev
, "UAR size:%d != kernel PAGE_SIZE of %ld\n",
764 dev
->caps
.uar_page_size
, PAGE_SIZE
);
768 memset(&func_cap
, 0, sizeof(func_cap
));
769 err
= mlx4_QUERY_FUNC_CAP(dev
, 0, &func_cap
);
771 mlx4_err(dev
, "QUERY_FUNC_CAP general command failed, aborting (%d)\n",
776 if ((func_cap
.pf_context_behaviour
| PF_CONTEXT_BEHAVIOUR_MASK
) !=
777 PF_CONTEXT_BEHAVIOUR_MASK
) {
778 mlx4_err(dev
, "Unknown pf context behaviour %x known flags %x\n",
779 func_cap
.pf_context_behaviour
, PF_CONTEXT_BEHAVIOUR_MASK
);
783 dev
->caps
.num_ports
= func_cap
.num_ports
;
784 dev
->quotas
.qp
= func_cap
.qp_quota
;
785 dev
->quotas
.srq
= func_cap
.srq_quota
;
786 dev
->quotas
.cq
= func_cap
.cq_quota
;
787 dev
->quotas
.mpt
= func_cap
.mpt_quota
;
788 dev
->quotas
.mtt
= func_cap
.mtt_quota
;
789 dev
->caps
.num_qps
= 1 << hca_param
.log_num_qps
;
790 dev
->caps
.num_srqs
= 1 << hca_param
.log_num_srqs
;
791 dev
->caps
.num_cqs
= 1 << hca_param
.log_num_cqs
;
792 dev
->caps
.num_mpts
= 1 << hca_param
.log_mpt_sz
;
793 dev
->caps
.num_eqs
= func_cap
.max_eq
;
794 dev
->caps
.reserved_eqs
= func_cap
.reserved_eq
;
795 dev
->caps
.num_pds
= MLX4_NUM_PDS
;
796 dev
->caps
.num_mgms
= 0;
797 dev
->caps
.num_amgms
= 0;
799 if (dev
->caps
.num_ports
> MLX4_MAX_PORTS
) {
800 mlx4_err(dev
, "HCA has %d ports, but we only support %d, aborting\n",
801 dev
->caps
.num_ports
, MLX4_MAX_PORTS
);
805 dev
->caps
.qp0_qkey
= kcalloc(dev
->caps
.num_ports
, sizeof(u32
), GFP_KERNEL
);
806 dev
->caps
.qp0_tunnel
= kcalloc(dev
->caps
.num_ports
, sizeof (u32
), GFP_KERNEL
);
807 dev
->caps
.qp0_proxy
= kcalloc(dev
->caps
.num_ports
, sizeof (u32
), GFP_KERNEL
);
808 dev
->caps
.qp1_tunnel
= kcalloc(dev
->caps
.num_ports
, sizeof (u32
), GFP_KERNEL
);
809 dev
->caps
.qp1_proxy
= kcalloc(dev
->caps
.num_ports
, sizeof (u32
), GFP_KERNEL
);
811 if (!dev
->caps
.qp0_tunnel
|| !dev
->caps
.qp0_proxy
||
812 !dev
->caps
.qp1_tunnel
|| !dev
->caps
.qp1_proxy
||
813 !dev
->caps
.qp0_qkey
) {
818 for (i
= 1; i
<= dev
->caps
.num_ports
; ++i
) {
819 err
= mlx4_QUERY_FUNC_CAP(dev
, i
, &func_cap
);
821 mlx4_err(dev
, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n",
825 dev
->caps
.qp0_qkey
[i
- 1] = func_cap
.qp0_qkey
;
826 dev
->caps
.qp0_tunnel
[i
- 1] = func_cap
.qp0_tunnel_qpn
;
827 dev
->caps
.qp0_proxy
[i
- 1] = func_cap
.qp0_proxy_qpn
;
828 dev
->caps
.qp1_tunnel
[i
- 1] = func_cap
.qp1_tunnel_qpn
;
829 dev
->caps
.qp1_proxy
[i
- 1] = func_cap
.qp1_proxy_qpn
;
830 dev
->caps
.port_mask
[i
] = dev
->caps
.port_type
[i
];
831 dev
->caps
.phys_port_id
[i
] = func_cap
.phys_port_id
;
832 if (mlx4_get_slave_pkey_gid_tbl_len(dev
, i
,
833 &dev
->caps
.gid_table_len
[i
],
834 &dev
->caps
.pkey_table_len
[i
]))
838 if (dev
->caps
.uar_page_size
* (dev
->caps
.num_uars
-
839 dev
->caps
.reserved_uars
) >
840 pci_resource_len(dev
->pdev
, 2)) {
841 mlx4_err(dev
, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
842 dev
->caps
.uar_page_size
* dev
->caps
.num_uars
,
843 (unsigned long long) pci_resource_len(dev
->pdev
, 2));
847 if (hca_param
.dev_cap_enabled
& MLX4_DEV_CAP_64B_EQE_ENABLED
) {
848 dev
->caps
.eqe_size
= 64;
849 dev
->caps
.eqe_factor
= 1;
851 dev
->caps
.eqe_size
= 32;
852 dev
->caps
.eqe_factor
= 0;
855 if (hca_param
.dev_cap_enabled
& MLX4_DEV_CAP_64B_CQE_ENABLED
) {
856 dev
->caps
.cqe_size
= 64;
857 dev
->caps
.userspace_caps
|= MLX4_USER_DEV_CAP_LARGE_CQE
;
859 dev
->caps
.cqe_size
= 32;
862 if (hca_param
.dev_cap_enabled
& MLX4_DEV_CAP_EQE_STRIDE_ENABLED
) {
863 dev
->caps
.eqe_size
= hca_param
.eqe_size
;
864 dev
->caps
.eqe_factor
= 0;
867 if (hca_param
.dev_cap_enabled
& MLX4_DEV_CAP_CQE_STRIDE_ENABLED
) {
868 dev
->caps
.cqe_size
= hca_param
.cqe_size
;
869 /* User still need to know when CQE > 32B */
870 dev
->caps
.userspace_caps
|= MLX4_USER_DEV_CAP_LARGE_CQE
;
873 dev
->caps
.flags2
&= ~MLX4_DEV_CAP_FLAG2_TS
;
874 mlx4_warn(dev
, "Timestamping is not supported in slave mode\n");
876 slave_adjust_steering_mode(dev
, &dev_cap
, &hca_param
);
878 if (func_cap
.extra_flags
& MLX4_QUERY_FUNC_FLAGS_BF_RES_QP
&&
879 dev
->caps
.bf_reg_size
)
880 dev
->caps
.alloc_res_qp_mask
|= MLX4_RESERVE_ETH_BF_QP
;
882 if (func_cap
.extra_flags
& MLX4_QUERY_FUNC_FLAGS_A0_RES_QP
)
883 dev
->caps
.alloc_res_qp_mask
|= MLX4_RESERVE_A0_QP
;
888 kfree(dev
->caps
.qp0_qkey
);
889 kfree(dev
->caps
.qp0_tunnel
);
890 kfree(dev
->caps
.qp0_proxy
);
891 kfree(dev
->caps
.qp1_tunnel
);
892 kfree(dev
->caps
.qp1_proxy
);
893 dev
->caps
.qp0_qkey
= NULL
;
894 dev
->caps
.qp0_tunnel
= NULL
;
895 dev
->caps
.qp0_proxy
= NULL
;
896 dev
->caps
.qp1_tunnel
= NULL
;
897 dev
->caps
.qp1_proxy
= NULL
;
902 static void mlx4_request_modules(struct mlx4_dev
*dev
)
905 int has_ib_port
= false;
906 int has_eth_port
= false;
907 #define EN_DRV_NAME "mlx4_en"
908 #define IB_DRV_NAME "mlx4_ib"
910 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
911 if (dev
->caps
.port_type
[port
] == MLX4_PORT_TYPE_IB
)
913 else if (dev
->caps
.port_type
[port
] == MLX4_PORT_TYPE_ETH
)
918 request_module_nowait(EN_DRV_NAME
);
919 if (has_ib_port
|| (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_IBOE
))
920 request_module_nowait(IB_DRV_NAME
);
924 * Change the port configuration of the device.
925 * Every user of this function must hold the port mutex.
927 int mlx4_change_port_types(struct mlx4_dev
*dev
,
928 enum mlx4_port_type
*port_types
)
934 for (port
= 0; port
< dev
->caps
.num_ports
; port
++) {
935 /* Change the port type only if the new type is different
936 * from the current, and not set to Auto */
937 if (port_types
[port
] != dev
->caps
.port_type
[port
+ 1])
941 mlx4_unregister_device(dev
);
942 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
943 mlx4_CLOSE_PORT(dev
, port
);
944 dev
->caps
.port_type
[port
] = port_types
[port
- 1];
945 err
= mlx4_SET_PORT(dev
, port
, -1);
947 mlx4_err(dev
, "Failed to set port %d, aborting\n",
952 mlx4_set_port_mask(dev
);
953 err
= mlx4_register_device(dev
);
955 mlx4_err(dev
, "Failed to register device\n");
958 mlx4_request_modules(dev
);
965 static ssize_t
show_port_type(struct device
*dev
,
966 struct device_attribute
*attr
,
969 struct mlx4_port_info
*info
= container_of(attr
, struct mlx4_port_info
,
971 struct mlx4_dev
*mdev
= info
->dev
;
975 (mdev
->caps
.port_type
[info
->port
] == MLX4_PORT_TYPE_IB
) ?
977 if (mdev
->caps
.possible_type
[info
->port
] == MLX4_PORT_TYPE_AUTO
)
978 sprintf(buf
, "auto (%s)\n", type
);
980 sprintf(buf
, "%s\n", type
);
985 static ssize_t
set_port_type(struct device
*dev
,
986 struct device_attribute
*attr
,
987 const char *buf
, size_t count
)
989 struct mlx4_port_info
*info
= container_of(attr
, struct mlx4_port_info
,
991 struct mlx4_dev
*mdev
= info
->dev
;
992 struct mlx4_priv
*priv
= mlx4_priv(mdev
);
993 enum mlx4_port_type types
[MLX4_MAX_PORTS
];
994 enum mlx4_port_type new_types
[MLX4_MAX_PORTS
];
995 static DEFINE_MUTEX(set_port_type_mutex
);
999 mutex_lock(&set_port_type_mutex
);
1001 if (!strcmp(buf
, "ib\n"))
1002 info
->tmp_type
= MLX4_PORT_TYPE_IB
;
1003 else if (!strcmp(buf
, "eth\n"))
1004 info
->tmp_type
= MLX4_PORT_TYPE_ETH
;
1005 else if (!strcmp(buf
, "auto\n"))
1006 info
->tmp_type
= MLX4_PORT_TYPE_AUTO
;
1008 mlx4_err(mdev
, "%s is not supported port type\n", buf
);
1013 mlx4_stop_sense(mdev
);
1014 mutex_lock(&priv
->port_mutex
);
1015 /* Possible type is always the one that was delivered */
1016 mdev
->caps
.possible_type
[info
->port
] = info
->tmp_type
;
1018 for (i
= 0; i
< mdev
->caps
.num_ports
; i
++) {
1019 types
[i
] = priv
->port
[i
+1].tmp_type
? priv
->port
[i
+1].tmp_type
:
1020 mdev
->caps
.possible_type
[i
+1];
1021 if (types
[i
] == MLX4_PORT_TYPE_AUTO
)
1022 types
[i
] = mdev
->caps
.port_type
[i
+1];
1025 if (!(mdev
->caps
.flags
& MLX4_DEV_CAP_FLAG_DPDP
) &&
1026 !(mdev
->caps
.flags
& MLX4_DEV_CAP_FLAG_SENSE_SUPPORT
)) {
1027 for (i
= 1; i
<= mdev
->caps
.num_ports
; i
++) {
1028 if (mdev
->caps
.possible_type
[i
] == MLX4_PORT_TYPE_AUTO
) {
1029 mdev
->caps
.possible_type
[i
] = mdev
->caps
.port_type
[i
];
1035 mlx4_err(mdev
, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n");
1039 mlx4_do_sense_ports(mdev
, new_types
, types
);
1041 err
= mlx4_check_port_params(mdev
, new_types
);
1045 /* We are about to apply the changes after the configuration
1046 * was verified, no need to remember the temporary types
1048 for (i
= 0; i
< mdev
->caps
.num_ports
; i
++)
1049 priv
->port
[i
+ 1].tmp_type
= 0;
1051 err
= mlx4_change_port_types(mdev
, new_types
);
1054 mlx4_start_sense(mdev
);
1055 mutex_unlock(&priv
->port_mutex
);
1057 mutex_unlock(&set_port_type_mutex
);
1059 return err
? err
: count
;
1070 static inline int int_to_ibta_mtu(int mtu
)
1073 case 256: return IB_MTU_256
;
1074 case 512: return IB_MTU_512
;
1075 case 1024: return IB_MTU_1024
;
1076 case 2048: return IB_MTU_2048
;
1077 case 4096: return IB_MTU_4096
;
1082 static inline int ibta_mtu_to_int(enum ibta_mtu mtu
)
1085 case IB_MTU_256
: return 256;
1086 case IB_MTU_512
: return 512;
1087 case IB_MTU_1024
: return 1024;
1088 case IB_MTU_2048
: return 2048;
1089 case IB_MTU_4096
: return 4096;
1094 static ssize_t
show_port_ib_mtu(struct device
*dev
,
1095 struct device_attribute
*attr
,
1098 struct mlx4_port_info
*info
= container_of(attr
, struct mlx4_port_info
,
1100 struct mlx4_dev
*mdev
= info
->dev
;
1102 if (mdev
->caps
.port_type
[info
->port
] == MLX4_PORT_TYPE_ETH
)
1103 mlx4_warn(mdev
, "port level mtu is only used for IB ports\n");
1105 sprintf(buf
, "%d\n",
1106 ibta_mtu_to_int(mdev
->caps
.port_ib_mtu
[info
->port
]));
1110 static ssize_t
set_port_ib_mtu(struct device
*dev
,
1111 struct device_attribute
*attr
,
1112 const char *buf
, size_t count
)
1114 struct mlx4_port_info
*info
= container_of(attr
, struct mlx4_port_info
,
1116 struct mlx4_dev
*mdev
= info
->dev
;
1117 struct mlx4_priv
*priv
= mlx4_priv(mdev
);
1118 int err
, port
, mtu
, ibta_mtu
= -1;
1120 if (mdev
->caps
.port_type
[info
->port
] == MLX4_PORT_TYPE_ETH
) {
1121 mlx4_warn(mdev
, "port level mtu is only used for IB ports\n");
1125 err
= kstrtoint(buf
, 0, &mtu
);
1127 ibta_mtu
= int_to_ibta_mtu(mtu
);
1129 if (err
|| ibta_mtu
< 0) {
1130 mlx4_err(mdev
, "%s is invalid IBTA mtu\n", buf
);
1134 mdev
->caps
.port_ib_mtu
[info
->port
] = ibta_mtu
;
1136 mlx4_stop_sense(mdev
);
1137 mutex_lock(&priv
->port_mutex
);
1138 mlx4_unregister_device(mdev
);
1139 for (port
= 1; port
<= mdev
->caps
.num_ports
; port
++) {
1140 mlx4_CLOSE_PORT(mdev
, port
);
1141 err
= mlx4_SET_PORT(mdev
, port
, -1);
1143 mlx4_err(mdev
, "Failed to set port %d, aborting\n",
1148 err
= mlx4_register_device(mdev
);
1150 mutex_unlock(&priv
->port_mutex
);
1151 mlx4_start_sense(mdev
);
1152 return err
? err
: count
;
1155 static int mlx4_load_fw(struct mlx4_dev
*dev
)
1157 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1160 priv
->fw
.fw_icm
= mlx4_alloc_icm(dev
, priv
->fw
.fw_pages
,
1161 GFP_HIGHUSER
| __GFP_NOWARN
, 0);
1162 if (!priv
->fw
.fw_icm
) {
1163 mlx4_err(dev
, "Couldn't allocate FW area, aborting\n");
1167 err
= mlx4_MAP_FA(dev
, priv
->fw
.fw_icm
);
1169 mlx4_err(dev
, "MAP_FA command failed, aborting\n");
1173 err
= mlx4_RUN_FW(dev
);
1175 mlx4_err(dev
, "RUN_FW command failed, aborting\n");
1185 mlx4_free_icm(dev
, priv
->fw
.fw_icm
, 0);
1189 static int mlx4_init_cmpt_table(struct mlx4_dev
*dev
, u64 cmpt_base
,
1192 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1196 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.cmpt_table
,
1198 ((u64
) (MLX4_CMPT_TYPE_QP
*
1199 cmpt_entry_sz
) << MLX4_CMPT_SHIFT
),
1200 cmpt_entry_sz
, dev
->caps
.num_qps
,
1201 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
1206 err
= mlx4_init_icm_table(dev
, &priv
->srq_table
.cmpt_table
,
1208 ((u64
) (MLX4_CMPT_TYPE_SRQ
*
1209 cmpt_entry_sz
) << MLX4_CMPT_SHIFT
),
1210 cmpt_entry_sz
, dev
->caps
.num_srqs
,
1211 dev
->caps
.reserved_srqs
, 0, 0);
1215 err
= mlx4_init_icm_table(dev
, &priv
->cq_table
.cmpt_table
,
1217 ((u64
) (MLX4_CMPT_TYPE_CQ
*
1218 cmpt_entry_sz
) << MLX4_CMPT_SHIFT
),
1219 cmpt_entry_sz
, dev
->caps
.num_cqs
,
1220 dev
->caps
.reserved_cqs
, 0, 0);
1224 num_eqs
= dev
->phys_caps
.num_phys_eqs
;
1225 err
= mlx4_init_icm_table(dev
, &priv
->eq_table
.cmpt_table
,
1227 ((u64
) (MLX4_CMPT_TYPE_EQ
*
1228 cmpt_entry_sz
) << MLX4_CMPT_SHIFT
),
1229 cmpt_entry_sz
, num_eqs
, num_eqs
, 0, 0);
1236 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.cmpt_table
);
1239 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.cmpt_table
);
1242 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.cmpt_table
);
1248 static int mlx4_init_icm(struct mlx4_dev
*dev
, struct mlx4_dev_cap
*dev_cap
,
1249 struct mlx4_init_hca_param
*init_hca
, u64 icm_size
)
1251 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1256 err
= mlx4_SET_ICM_SIZE(dev
, icm_size
, &aux_pages
);
1258 mlx4_err(dev
, "SET_ICM_SIZE command failed, aborting\n");
1262 mlx4_dbg(dev
, "%lld KB of HCA context requires %lld KB aux memory\n",
1263 (unsigned long long) icm_size
>> 10,
1264 (unsigned long long) aux_pages
<< 2);
1266 priv
->fw
.aux_icm
= mlx4_alloc_icm(dev
, aux_pages
,
1267 GFP_HIGHUSER
| __GFP_NOWARN
, 0);
1268 if (!priv
->fw
.aux_icm
) {
1269 mlx4_err(dev
, "Couldn't allocate aux memory, aborting\n");
1273 err
= mlx4_MAP_ICM_AUX(dev
, priv
->fw
.aux_icm
);
1275 mlx4_err(dev
, "MAP_ICM_AUX command failed, aborting\n");
1279 err
= mlx4_init_cmpt_table(dev
, init_hca
->cmpt_base
, dev_cap
->cmpt_entry_sz
);
1281 mlx4_err(dev
, "Failed to map cMPT context memory, aborting\n");
1286 num_eqs
= dev
->phys_caps
.num_phys_eqs
;
1287 err
= mlx4_init_icm_table(dev
, &priv
->eq_table
.table
,
1288 init_hca
->eqc_base
, dev_cap
->eqc_entry_sz
,
1289 num_eqs
, num_eqs
, 0, 0);
1291 mlx4_err(dev
, "Failed to map EQ context memory, aborting\n");
1292 goto err_unmap_cmpt
;
1296 * Reserved MTT entries must be aligned up to a cacheline
1297 * boundary, since the FW will write to them, while the driver
1298 * writes to all other MTT entries. (The variable
1299 * dev->caps.mtt_entry_sz below is really the MTT segment
1300 * size, not the raw entry size)
1302 dev
->caps
.reserved_mtts
=
1303 ALIGN(dev
->caps
.reserved_mtts
* dev
->caps
.mtt_entry_sz
,
1304 dma_get_cache_alignment()) / dev
->caps
.mtt_entry_sz
;
1306 err
= mlx4_init_icm_table(dev
, &priv
->mr_table
.mtt_table
,
1308 dev
->caps
.mtt_entry_sz
,
1310 dev
->caps
.reserved_mtts
, 1, 0);
1312 mlx4_err(dev
, "Failed to map MTT context memory, aborting\n");
1316 err
= mlx4_init_icm_table(dev
, &priv
->mr_table
.dmpt_table
,
1317 init_hca
->dmpt_base
,
1318 dev_cap
->dmpt_entry_sz
,
1320 dev
->caps
.reserved_mrws
, 1, 1);
1322 mlx4_err(dev
, "Failed to map dMPT context memory, aborting\n");
1326 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.qp_table
,
1328 dev_cap
->qpc_entry_sz
,
1330 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
1333 mlx4_err(dev
, "Failed to map QP context memory, aborting\n");
1334 goto err_unmap_dmpt
;
1337 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.auxc_table
,
1338 init_hca
->auxc_base
,
1339 dev_cap
->aux_entry_sz
,
1341 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
1344 mlx4_err(dev
, "Failed to map AUXC context memory, aborting\n");
1348 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.altc_table
,
1349 init_hca
->altc_base
,
1350 dev_cap
->altc_entry_sz
,
1352 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
1355 mlx4_err(dev
, "Failed to map ALTC context memory, aborting\n");
1356 goto err_unmap_auxc
;
1359 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.rdmarc_table
,
1360 init_hca
->rdmarc_base
,
1361 dev_cap
->rdmarc_entry_sz
<< priv
->qp_table
.rdmarc_shift
,
1363 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
1366 mlx4_err(dev
, "Failed to map RDMARC context memory, aborting\n");
1367 goto err_unmap_altc
;
1370 err
= mlx4_init_icm_table(dev
, &priv
->cq_table
.table
,
1372 dev_cap
->cqc_entry_sz
,
1374 dev
->caps
.reserved_cqs
, 0, 0);
1376 mlx4_err(dev
, "Failed to map CQ context memory, aborting\n");
1377 goto err_unmap_rdmarc
;
1380 err
= mlx4_init_icm_table(dev
, &priv
->srq_table
.table
,
1381 init_hca
->srqc_base
,
1382 dev_cap
->srq_entry_sz
,
1384 dev
->caps
.reserved_srqs
, 0, 0);
1386 mlx4_err(dev
, "Failed to map SRQ context memory, aborting\n");
1391 * For flow steering device managed mode it is required to use
1392 * mlx4_init_icm_table. For B0 steering mode it's not strictly
1393 * required, but for simplicity just map the whole multicast
1394 * group table now. The table isn't very big and it's a lot
1395 * easier than trying to track ref counts.
1397 err
= mlx4_init_icm_table(dev
, &priv
->mcg_table
.table
,
1399 mlx4_get_mgm_entry_size(dev
),
1400 dev
->caps
.num_mgms
+ dev
->caps
.num_amgms
,
1401 dev
->caps
.num_mgms
+ dev
->caps
.num_amgms
,
1404 mlx4_err(dev
, "Failed to map MCG context memory, aborting\n");
1411 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.table
);
1414 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.table
);
1417 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.rdmarc_table
);
1420 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.altc_table
);
1423 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.auxc_table
);
1426 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.qp_table
);
1429 mlx4_cleanup_icm_table(dev
, &priv
->mr_table
.dmpt_table
);
1432 mlx4_cleanup_icm_table(dev
, &priv
->mr_table
.mtt_table
);
1435 mlx4_cleanup_icm_table(dev
, &priv
->eq_table
.table
);
1438 mlx4_cleanup_icm_table(dev
, &priv
->eq_table
.cmpt_table
);
1439 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.cmpt_table
);
1440 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.cmpt_table
);
1441 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.cmpt_table
);
1444 mlx4_UNMAP_ICM_AUX(dev
);
1447 mlx4_free_icm(dev
, priv
->fw
.aux_icm
, 0);
1452 static void mlx4_free_icms(struct mlx4_dev
*dev
)
1454 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1456 mlx4_cleanup_icm_table(dev
, &priv
->mcg_table
.table
);
1457 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.table
);
1458 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.table
);
1459 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.rdmarc_table
);
1460 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.altc_table
);
1461 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.auxc_table
);
1462 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.qp_table
);
1463 mlx4_cleanup_icm_table(dev
, &priv
->mr_table
.dmpt_table
);
1464 mlx4_cleanup_icm_table(dev
, &priv
->mr_table
.mtt_table
);
1465 mlx4_cleanup_icm_table(dev
, &priv
->eq_table
.table
);
1466 mlx4_cleanup_icm_table(dev
, &priv
->eq_table
.cmpt_table
);
1467 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.cmpt_table
);
1468 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.cmpt_table
);
1469 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.cmpt_table
);
1471 mlx4_UNMAP_ICM_AUX(dev
);
1472 mlx4_free_icm(dev
, priv
->fw
.aux_icm
, 0);
1475 static void mlx4_slave_exit(struct mlx4_dev
*dev
)
1477 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1479 mutex_lock(&priv
->cmd
.slave_cmd_mutex
);
1480 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_RESET
, 0, MLX4_COMM_TIME
))
1481 mlx4_warn(dev
, "Failed to close slave function\n");
1482 mutex_unlock(&priv
->cmd
.slave_cmd_mutex
);
1485 static int map_bf_area(struct mlx4_dev
*dev
)
1487 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1488 resource_size_t bf_start
;
1489 resource_size_t bf_len
;
1492 if (!dev
->caps
.bf_reg_size
)
1495 bf_start
= pci_resource_start(dev
->pdev
, 2) +
1496 (dev
->caps
.num_uars
<< PAGE_SHIFT
);
1497 bf_len
= pci_resource_len(dev
->pdev
, 2) -
1498 (dev
->caps
.num_uars
<< PAGE_SHIFT
);
1499 priv
->bf_mapping
= io_mapping_create_wc(bf_start
, bf_len
);
1500 if (!priv
->bf_mapping
)
1506 static void unmap_bf_area(struct mlx4_dev
*dev
)
1508 if (mlx4_priv(dev
)->bf_mapping
)
1509 io_mapping_free(mlx4_priv(dev
)->bf_mapping
);
1512 cycle_t
mlx4_read_clock(struct mlx4_dev
*dev
)
1514 u32 clockhi
, clocklo
, clockhi1
;
1517 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1519 for (i
= 0; i
< 10; i
++) {
1520 clockhi
= swab32(readl(priv
->clock_mapping
));
1521 clocklo
= swab32(readl(priv
->clock_mapping
+ 4));
1522 clockhi1
= swab32(readl(priv
->clock_mapping
));
1523 if (clockhi
== clockhi1
)
1527 cycles
= (u64
) clockhi
<< 32 | (u64
) clocklo
;
1531 EXPORT_SYMBOL_GPL(mlx4_read_clock
);
1534 static int map_internal_clock(struct mlx4_dev
*dev
)
1536 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1538 priv
->clock_mapping
=
1539 ioremap(pci_resource_start(dev
->pdev
, priv
->fw
.clock_bar
) +
1540 priv
->fw
.clock_offset
, MLX4_CLOCK_SIZE
);
1542 if (!priv
->clock_mapping
)
1548 static void unmap_internal_clock(struct mlx4_dev
*dev
)
1550 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1552 if (priv
->clock_mapping
)
1553 iounmap(priv
->clock_mapping
);
1556 static void mlx4_close_hca(struct mlx4_dev
*dev
)
1558 unmap_internal_clock(dev
);
1560 if (mlx4_is_slave(dev
))
1561 mlx4_slave_exit(dev
);
1563 mlx4_CLOSE_HCA(dev
, 0);
1564 mlx4_free_icms(dev
);
1568 static void mlx4_close_fw(struct mlx4_dev
*dev
)
1570 if (!mlx4_is_slave(dev
)) {
1572 mlx4_free_icm(dev
, mlx4_priv(dev
)->fw
.fw_icm
, 0);
1576 static int mlx4_init_slave(struct mlx4_dev
*dev
)
1578 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1579 u64 dma
= (u64
) priv
->mfunc
.vhcr_dma
;
1580 int ret_from_reset
= 0;
1582 u32 cmd_channel_ver
;
1584 if (atomic_read(&pf_loading
)) {
1585 mlx4_warn(dev
, "PF is not ready - Deferring probe\n");
1586 return -EPROBE_DEFER
;
1589 mutex_lock(&priv
->cmd
.slave_cmd_mutex
);
1590 priv
->cmd
.max_cmds
= 1;
1591 mlx4_warn(dev
, "Sending reset\n");
1592 ret_from_reset
= mlx4_comm_cmd(dev
, MLX4_COMM_CMD_RESET
, 0,
1594 /* if we are in the middle of flr the slave will try
1595 * NUM_OF_RESET_RETRIES times before leaving.*/
1596 if (ret_from_reset
) {
1597 if (MLX4_DELAY_RESET_SLAVE
== ret_from_reset
) {
1598 mlx4_warn(dev
, "slave is currently in the middle of FLR - Deferring probe\n");
1599 mutex_unlock(&priv
->cmd
.slave_cmd_mutex
);
1600 return -EPROBE_DEFER
;
1605 /* check the driver version - the slave I/F revision
1606 * must match the master's */
1607 slave_read
= swab32(readl(&priv
->mfunc
.comm
->slave_read
));
1608 cmd_channel_ver
= mlx4_comm_get_version();
1610 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver
) !=
1611 MLX4_COMM_GET_IF_REV(slave_read
)) {
1612 mlx4_err(dev
, "slave driver version is not supported by the master\n");
1616 mlx4_warn(dev
, "Sending vhcr0\n");
1617 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR0
, dma
>> 48,
1620 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR1
, dma
>> 32,
1623 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR2
, dma
>> 16,
1626 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR_EN
, dma
, MLX4_COMM_TIME
))
1629 mutex_unlock(&priv
->cmd
.slave_cmd_mutex
);
1633 mlx4_comm_cmd(dev
, MLX4_COMM_CMD_RESET
, 0, 0);
1634 mutex_unlock(&priv
->cmd
.slave_cmd_mutex
);
1638 static void mlx4_parav_master_pf_caps(struct mlx4_dev
*dev
)
1642 for (i
= 1; i
<= dev
->caps
.num_ports
; i
++) {
1643 if (dev
->caps
.port_type
[i
] == MLX4_PORT_TYPE_ETH
)
1644 dev
->caps
.gid_table_len
[i
] =
1645 mlx4_get_slave_num_gids(dev
, 0, i
);
1647 dev
->caps
.gid_table_len
[i
] = 1;
1648 dev
->caps
.pkey_table_len
[i
] =
1649 dev
->phys_caps
.pkey_phys_table_len
[i
] - 1;
1653 static int choose_log_fs_mgm_entry_size(int qp_per_entry
)
1655 int i
= MLX4_MIN_MGM_LOG_ENTRY_SIZE
;
1657 for (i
= MLX4_MIN_MGM_LOG_ENTRY_SIZE
; i
<= MLX4_MAX_MGM_LOG_ENTRY_SIZE
;
1659 if (qp_per_entry
<= 4 * ((1 << i
) / 16 - 2))
1663 return (i
<= MLX4_MAX_MGM_LOG_ENTRY_SIZE
) ? i
: -1;
1666 static const char *dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode
)
1668 switch (dmfs_high_steer_mode
) {
1669 case MLX4_STEERING_DMFS_A0_DEFAULT
:
1670 return "default performance";
1672 case MLX4_STEERING_DMFS_A0_DYNAMIC
:
1673 return "dynamic hybrid mode";
1675 case MLX4_STEERING_DMFS_A0_STATIC
:
1676 return "performance optimized for limited rule configuration (static)";
1678 case MLX4_STEERING_DMFS_A0_DISABLE
:
1679 return "disabled performance optimized steering";
1681 case MLX4_STEERING_DMFS_A0_NOT_SUPPORTED
:
1682 return "performance optimized steering not supported";
1685 return "Unrecognized mode";
1689 #define MLX4_DMFS_A0_STEERING (1UL << 2)
1691 static void choose_steering_mode(struct mlx4_dev
*dev
,
1692 struct mlx4_dev_cap
*dev_cap
)
1694 if (mlx4_log_num_mgm_entry_size
<= 0) {
1695 if ((-mlx4_log_num_mgm_entry_size
) & MLX4_DMFS_A0_STEERING
) {
1696 if (dev
->caps
.dmfs_high_steer_mode
==
1697 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED
)
1698 mlx4_err(dev
, "DMFS high rate mode not supported\n");
1700 dev
->caps
.dmfs_high_steer_mode
=
1701 MLX4_STEERING_DMFS_A0_STATIC
;
1705 if (mlx4_log_num_mgm_entry_size
<= 0 &&
1706 dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_FS_EN
&&
1707 (!mlx4_is_mfunc(dev
) ||
1708 (dev_cap
->fs_max_num_qp_per_entry
>= (dev
->num_vfs
+ 1))) &&
1709 choose_log_fs_mgm_entry_size(dev_cap
->fs_max_num_qp_per_entry
) >=
1710 MLX4_MIN_MGM_LOG_ENTRY_SIZE
) {
1711 dev
->oper_log_mgm_entry_size
=
1712 choose_log_fs_mgm_entry_size(dev_cap
->fs_max_num_qp_per_entry
);
1713 dev
->caps
.steering_mode
= MLX4_STEERING_MODE_DEVICE_MANAGED
;
1714 dev
->caps
.num_qp_per_mgm
= dev_cap
->fs_max_num_qp_per_entry
;
1715 dev
->caps
.fs_log_max_ucast_qp_range_size
=
1716 dev_cap
->fs_log_max_ucast_qp_range_size
;
1718 if (dev
->caps
.dmfs_high_steer_mode
!=
1719 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED
)
1720 dev
->caps
.dmfs_high_steer_mode
= MLX4_STEERING_DMFS_A0_DISABLE
;
1721 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_VEP_UC_STEER
&&
1722 dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_VEP_MC_STEER
)
1723 dev
->caps
.steering_mode
= MLX4_STEERING_MODE_B0
;
1725 dev
->caps
.steering_mode
= MLX4_STEERING_MODE_A0
;
1727 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_VEP_UC_STEER
||
1728 dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_VEP_MC_STEER
)
1729 mlx4_warn(dev
, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n");
1731 dev
->oper_log_mgm_entry_size
=
1732 mlx4_log_num_mgm_entry_size
> 0 ?
1733 mlx4_log_num_mgm_entry_size
:
1734 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE
;
1735 dev
->caps
.num_qp_per_mgm
= mlx4_get_qp_per_mgm(dev
);
1737 mlx4_dbg(dev
, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n",
1738 mlx4_steering_mode_str(dev
->caps
.steering_mode
),
1739 dev
->oper_log_mgm_entry_size
,
1740 mlx4_log_num_mgm_entry_size
);
1743 static void choose_tunnel_offload_mode(struct mlx4_dev
*dev
,
1744 struct mlx4_dev_cap
*dev_cap
)
1746 if (dev
->caps
.steering_mode
== MLX4_STEERING_MODE_DEVICE_MANAGED
&&
1747 dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS
&&
1748 dev
->caps
.dmfs_high_steer_mode
!= MLX4_STEERING_DMFS_A0_STATIC
)
1749 dev
->caps
.tunnel_offload_mode
= MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
;
1751 dev
->caps
.tunnel_offload_mode
= MLX4_TUNNEL_OFFLOAD_MODE_NONE
;
1753 mlx4_dbg(dev
, "Tunneling offload mode is: %s\n", (dev
->caps
.tunnel_offload_mode
1754 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
) ? "vxlan" : "none");
1757 static int mlx4_validate_optimized_steering(struct mlx4_dev
*dev
)
1760 struct mlx4_port_cap port_cap
;
1762 if (dev
->caps
.dmfs_high_steer_mode
== MLX4_STEERING_DMFS_A0_NOT_SUPPORTED
)
1765 for (i
= 1; i
<= dev
->caps
.num_ports
; i
++) {
1766 if (mlx4_dev_port(dev
, i
, &port_cap
)) {
1768 "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n");
1769 } else if ((dev
->caps
.dmfs_high_steer_mode
!=
1770 MLX4_STEERING_DMFS_A0_DEFAULT
) &&
1771 (port_cap
.dmfs_optimized_state
==
1772 !!(dev
->caps
.dmfs_high_steer_mode
==
1773 MLX4_STEERING_DMFS_A0_DISABLE
))) {
1775 "DMFS high rate steer mode differ, driver requested %s but %s in FW.\n",
1776 dmfs_high_rate_steering_mode_str(
1777 dev
->caps
.dmfs_high_steer_mode
),
1778 (port_cap
.dmfs_optimized_state
?
1779 "enabled" : "disabled"));
1786 static int mlx4_init_fw(struct mlx4_dev
*dev
)
1788 struct mlx4_mod_stat_cfg mlx4_cfg
;
1791 if (!mlx4_is_slave(dev
)) {
1792 err
= mlx4_QUERY_FW(dev
);
1795 mlx4_info(dev
, "non-primary physical function, skipping\n");
1797 mlx4_err(dev
, "QUERY_FW command failed, aborting\n");
1801 err
= mlx4_load_fw(dev
);
1803 mlx4_err(dev
, "Failed to start FW, aborting\n");
1807 mlx4_cfg
.log_pg_sz_m
= 1;
1808 mlx4_cfg
.log_pg_sz
= 0;
1809 err
= mlx4_MOD_STAT_CFG(dev
, &mlx4_cfg
);
1811 mlx4_warn(dev
, "Failed to override log_pg_sz parameter\n");
1817 static int mlx4_init_hca(struct mlx4_dev
*dev
)
1819 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1820 struct mlx4_adapter adapter
;
1821 struct mlx4_dev_cap dev_cap
;
1822 struct mlx4_profile profile
;
1823 struct mlx4_init_hca_param init_hca
;
1825 struct mlx4_config_dev_params params
;
1828 if (!mlx4_is_slave(dev
)) {
1829 err
= mlx4_dev_cap(dev
, &dev_cap
);
1831 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting\n");
1835 choose_steering_mode(dev
, &dev_cap
);
1836 choose_tunnel_offload_mode(dev
, &dev_cap
);
1838 if (dev
->caps
.dmfs_high_steer_mode
== MLX4_STEERING_DMFS_A0_STATIC
&&
1839 mlx4_is_master(dev
))
1840 dev
->caps
.function_caps
|= MLX4_FUNC_CAP_DMFS_A0_STATIC
;
1842 err
= mlx4_get_phys_port_id(dev
);
1844 mlx4_err(dev
, "Fail to get physical port id\n");
1846 if (mlx4_is_master(dev
))
1847 mlx4_parav_master_pf_caps(dev
);
1849 if (mlx4_low_memory_profile()) {
1850 mlx4_info(dev
, "Running from within kdump kernel. Using low memory profile\n");
1851 profile
= low_mem_profile
;
1853 profile
= default_profile
;
1855 if (dev
->caps
.steering_mode
==
1856 MLX4_STEERING_MODE_DEVICE_MANAGED
)
1857 profile
.num_mcg
= MLX4_FS_NUM_MCG
;
1859 icm_size
= mlx4_make_profile(dev
, &profile
, &dev_cap
,
1861 if ((long long) icm_size
< 0) {
1866 dev
->caps
.max_fmr_maps
= (1 << (32 - ilog2(dev
->caps
.num_mpts
))) - 1;
1868 init_hca
.log_uar_sz
= ilog2(dev
->caps
.num_uars
);
1869 init_hca
.uar_page_sz
= PAGE_SHIFT
- 12;
1870 init_hca
.mw_enabled
= 0;
1871 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_MEM_WINDOW
||
1872 dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_TYPE_2_WIN
)
1873 init_hca
.mw_enabled
= INIT_HCA_TPT_MW_ENABLE
;
1875 err
= mlx4_init_icm(dev
, &dev_cap
, &init_hca
, icm_size
);
1879 err
= mlx4_INIT_HCA(dev
, &init_hca
);
1881 mlx4_err(dev
, "INIT_HCA command failed, aborting\n");
1885 if (dev_cap
.flags2
& MLX4_DEV_CAP_FLAG2_SYS_EQS
) {
1886 err
= mlx4_query_func(dev
, &dev_cap
);
1888 mlx4_err(dev
, "QUERY_FUNC command failed, aborting.\n");
1890 } else if (err
& MLX4_QUERY_FUNC_NUM_SYS_EQS
) {
1891 dev
->caps
.num_eqs
= dev_cap
.max_eqs
;
1892 dev
->caps
.reserved_eqs
= dev_cap
.reserved_eqs
;
1893 dev
->caps
.reserved_uars
= dev_cap
.reserved_uars
;
1898 * If TS is supported by FW
1899 * read HCA frequency by QUERY_HCA command
1901 if (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_TS
) {
1902 memset(&init_hca
, 0, sizeof(init_hca
));
1903 err
= mlx4_QUERY_HCA(dev
, &init_hca
);
1905 mlx4_err(dev
, "QUERY_HCA command failed, disable timestamp\n");
1906 dev
->caps
.flags2
&= ~MLX4_DEV_CAP_FLAG2_TS
;
1908 dev
->caps
.hca_core_clock
=
1909 init_hca
.hca_core_clock
;
1912 /* In case we got HCA frequency 0 - disable timestamping
1913 * to avoid dividing by zero
1915 if (!dev
->caps
.hca_core_clock
) {
1916 dev
->caps
.flags2
&= ~MLX4_DEV_CAP_FLAG2_TS
;
1918 "HCA frequency is 0 - timestamping is not supported\n");
1919 } else if (map_internal_clock(dev
)) {
1921 * Map internal clock,
1922 * in case of failure disable timestamping
1924 dev
->caps
.flags2
&= ~MLX4_DEV_CAP_FLAG2_TS
;
1925 mlx4_err(dev
, "Failed to map internal clock. Timestamping is not supported\n");
1929 if (dev
->caps
.dmfs_high_steer_mode
!=
1930 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED
) {
1931 if (mlx4_validate_optimized_steering(dev
))
1932 mlx4_warn(dev
, "Optimized steering validation failed\n");
1934 if (dev
->caps
.dmfs_high_steer_mode
==
1935 MLX4_STEERING_DMFS_A0_DISABLE
) {
1936 dev
->caps
.dmfs_high_rate_qpn_base
=
1937 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
];
1938 dev
->caps
.dmfs_high_rate_qpn_range
=
1939 MLX4_A0_STEERING_TABLE_SIZE
;
1942 mlx4_dbg(dev
, "DMFS high rate steer mode is: %s\n",
1943 dmfs_high_rate_steering_mode_str(
1944 dev
->caps
.dmfs_high_steer_mode
));
1947 err
= mlx4_init_slave(dev
);
1949 if (err
!= -EPROBE_DEFER
)
1950 mlx4_err(dev
, "Failed to initialize slave\n");
1954 err
= mlx4_slave_cap(dev
);
1956 mlx4_err(dev
, "Failed to obtain slave caps\n");
1961 if (map_bf_area(dev
))
1962 mlx4_dbg(dev
, "Failed to map blue flame area\n");
1964 /*Only the master set the ports, all the rest got it from it.*/
1965 if (!mlx4_is_slave(dev
))
1966 mlx4_set_port_mask(dev
);
1968 err
= mlx4_QUERY_ADAPTER(dev
, &adapter
);
1970 mlx4_err(dev
, "QUERY_ADAPTER command failed, aborting\n");
1974 /* Query CONFIG_DEV parameters */
1975 err
= mlx4_config_dev_retrieval(dev
, ¶ms
);
1976 if (err
&& err
!= -ENOTSUPP
) {
1977 mlx4_err(dev
, "Failed to query CONFIG_DEV parameters\n");
1979 dev
->caps
.rx_checksum_flags_port
[1] = params
.rx_csum_flags_port_1
;
1980 dev
->caps
.rx_checksum_flags_port
[2] = params
.rx_csum_flags_port_2
;
1982 priv
->eq_table
.inta_pin
= adapter
.inta_pin
;
1983 memcpy(dev
->board_id
, adapter
.board_id
, sizeof dev
->board_id
);
1988 unmap_internal_clock(dev
);
1991 if (mlx4_is_slave(dev
)) {
1992 kfree(dev
->caps
.qp0_qkey
);
1993 kfree(dev
->caps
.qp0_tunnel
);
1994 kfree(dev
->caps
.qp0_proxy
);
1995 kfree(dev
->caps
.qp1_tunnel
);
1996 kfree(dev
->caps
.qp1_proxy
);
2000 if (mlx4_is_slave(dev
))
2001 mlx4_slave_exit(dev
);
2003 mlx4_CLOSE_HCA(dev
, 0);
2006 if (!mlx4_is_slave(dev
))
2007 mlx4_free_icms(dev
);
2012 static int mlx4_init_counters_table(struct mlx4_dev
*dev
)
2014 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2017 if (!(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_COUNTERS
))
2020 nent
= dev
->caps
.max_counters
;
2021 return mlx4_bitmap_init(&priv
->counters_bitmap
, nent
, nent
- 1, 0, 0);
2024 static void mlx4_cleanup_counters_table(struct mlx4_dev
*dev
)
2026 mlx4_bitmap_cleanup(&mlx4_priv(dev
)->counters_bitmap
);
2029 int __mlx4_counter_alloc(struct mlx4_dev
*dev
, u32
*idx
)
2031 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2033 if (!(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_COUNTERS
))
2036 *idx
= mlx4_bitmap_alloc(&priv
->counters_bitmap
);
2043 int mlx4_counter_alloc(struct mlx4_dev
*dev
, u32
*idx
)
2048 if (mlx4_is_mfunc(dev
)) {
2049 err
= mlx4_cmd_imm(dev
, 0, &out_param
, RES_COUNTER
,
2050 RES_OP_RESERVE
, MLX4_CMD_ALLOC_RES
,
2051 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
2053 *idx
= get_param_l(&out_param
);
2057 return __mlx4_counter_alloc(dev
, idx
);
2059 EXPORT_SYMBOL_GPL(mlx4_counter_alloc
);
2061 void __mlx4_counter_free(struct mlx4_dev
*dev
, u32 idx
)
2063 mlx4_bitmap_free(&mlx4_priv(dev
)->counters_bitmap
, idx
, MLX4_USE_RR
);
2067 void mlx4_counter_free(struct mlx4_dev
*dev
, u32 idx
)
2071 if (mlx4_is_mfunc(dev
)) {
2072 set_param_l(&in_param
, idx
);
2073 mlx4_cmd(dev
, in_param
, RES_COUNTER
, RES_OP_RESERVE
,
2074 MLX4_CMD_FREE_RES
, MLX4_CMD_TIME_CLASS_A
,
2078 __mlx4_counter_free(dev
, idx
);
2080 EXPORT_SYMBOL_GPL(mlx4_counter_free
);
2082 static int mlx4_setup_hca(struct mlx4_dev
*dev
)
2084 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2087 __be32 ib_port_default_caps
;
2089 err
= mlx4_init_uar_table(dev
);
2091 mlx4_err(dev
, "Failed to initialize user access region table, aborting\n");
2095 err
= mlx4_uar_alloc(dev
, &priv
->driver_uar
);
2097 mlx4_err(dev
, "Failed to allocate driver access region, aborting\n");
2098 goto err_uar_table_free
;
2101 priv
->kar
= ioremap((phys_addr_t
) priv
->driver_uar
.pfn
<< PAGE_SHIFT
, PAGE_SIZE
);
2103 mlx4_err(dev
, "Couldn't map kernel access region, aborting\n");
2108 err
= mlx4_init_pd_table(dev
);
2110 mlx4_err(dev
, "Failed to initialize protection domain table, aborting\n");
2114 err
= mlx4_init_xrcd_table(dev
);
2116 mlx4_err(dev
, "Failed to initialize reliable connection domain table, aborting\n");
2117 goto err_pd_table_free
;
2120 err
= mlx4_init_mr_table(dev
);
2122 mlx4_err(dev
, "Failed to initialize memory region table, aborting\n");
2123 goto err_xrcd_table_free
;
2126 if (!mlx4_is_slave(dev
)) {
2127 err
= mlx4_init_mcg_table(dev
);
2129 mlx4_err(dev
, "Failed to initialize multicast group table, aborting\n");
2130 goto err_mr_table_free
;
2132 err
= mlx4_config_mad_demux(dev
);
2134 mlx4_err(dev
, "Failed in config_mad_demux, aborting\n");
2135 goto err_mcg_table_free
;
2139 err
= mlx4_init_eq_table(dev
);
2141 mlx4_err(dev
, "Failed to initialize event queue table, aborting\n");
2142 goto err_mcg_table_free
;
2145 err
= mlx4_cmd_use_events(dev
);
2147 mlx4_err(dev
, "Failed to switch to event-driven firmware commands, aborting\n");
2148 goto err_eq_table_free
;
2151 err
= mlx4_NOP(dev
);
2153 if (dev
->flags
& MLX4_FLAG_MSI_X
) {
2154 mlx4_warn(dev
, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
2155 priv
->eq_table
.eq
[dev
->caps
.num_comp_vectors
].irq
);
2156 mlx4_warn(dev
, "Trying again without MSI-X\n");
2158 mlx4_err(dev
, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
2159 priv
->eq_table
.eq
[dev
->caps
.num_comp_vectors
].irq
);
2160 mlx4_err(dev
, "BIOS or ACPI interrupt routing problem?\n");
2166 mlx4_dbg(dev
, "NOP command IRQ test passed\n");
2168 err
= mlx4_init_cq_table(dev
);
2170 mlx4_err(dev
, "Failed to initialize completion queue table, aborting\n");
2174 err
= mlx4_init_srq_table(dev
);
2176 mlx4_err(dev
, "Failed to initialize shared receive queue table, aborting\n");
2177 goto err_cq_table_free
;
2180 err
= mlx4_init_qp_table(dev
);
2182 mlx4_err(dev
, "Failed to initialize queue pair table, aborting\n");
2183 goto err_srq_table_free
;
2186 err
= mlx4_init_counters_table(dev
);
2187 if (err
&& err
!= -ENOENT
) {
2188 mlx4_err(dev
, "Failed to initialize counters table, aborting\n");
2189 goto err_qp_table_free
;
2192 if (!mlx4_is_slave(dev
)) {
2193 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
2194 ib_port_default_caps
= 0;
2195 err
= mlx4_get_port_ib_caps(dev
, port
,
2196 &ib_port_default_caps
);
2198 mlx4_warn(dev
, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n",
2200 dev
->caps
.ib_port_def_cap
[port
] = ib_port_default_caps
;
2202 /* initialize per-slave default ib port capabilities */
2203 if (mlx4_is_master(dev
)) {
2205 for (i
= 0; i
< dev
->num_slaves
; i
++) {
2206 if (i
== mlx4_master_func_num(dev
))
2208 priv
->mfunc
.master
.slave_state
[i
].ib_cap_mask
[port
] =
2209 ib_port_default_caps
;
2213 if (mlx4_is_mfunc(dev
))
2214 dev
->caps
.port_ib_mtu
[port
] = IB_MTU_2048
;
2216 dev
->caps
.port_ib_mtu
[port
] = IB_MTU_4096
;
2218 err
= mlx4_SET_PORT(dev
, port
, mlx4_is_master(dev
) ?
2219 dev
->caps
.pkey_table_len
[port
] : -1);
2221 mlx4_err(dev
, "Failed to set port %d, aborting\n",
2223 goto err_counters_table_free
;
2230 err_counters_table_free
:
2231 mlx4_cleanup_counters_table(dev
);
2234 mlx4_cleanup_qp_table(dev
);
2237 mlx4_cleanup_srq_table(dev
);
2240 mlx4_cleanup_cq_table(dev
);
2243 mlx4_cmd_use_polling(dev
);
2246 mlx4_cleanup_eq_table(dev
);
2249 if (!mlx4_is_slave(dev
))
2250 mlx4_cleanup_mcg_table(dev
);
2253 mlx4_cleanup_mr_table(dev
);
2255 err_xrcd_table_free
:
2256 mlx4_cleanup_xrcd_table(dev
);
2259 mlx4_cleanup_pd_table(dev
);
2265 mlx4_uar_free(dev
, &priv
->driver_uar
);
2268 mlx4_cleanup_uar_table(dev
);
2272 static void mlx4_enable_msi_x(struct mlx4_dev
*dev
)
2274 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2275 struct msix_entry
*entries
;
2279 int nreq
= dev
->caps
.num_ports
* num_online_cpus() + MSIX_LEGACY_SZ
;
2281 nreq
= min_t(int, dev
->caps
.num_eqs
- dev
->caps
.reserved_eqs
,
2284 entries
= kcalloc(nreq
, sizeof *entries
, GFP_KERNEL
);
2288 for (i
= 0; i
< nreq
; ++i
)
2289 entries
[i
].entry
= i
;
2291 nreq
= pci_enable_msix_range(dev
->pdev
, entries
, 2, nreq
);
2296 } else if (nreq
< MSIX_LEGACY_SZ
+
2297 dev
->caps
.num_ports
* MIN_MSIX_P_PORT
) {
2298 /*Working in legacy mode , all EQ's shared*/
2299 dev
->caps
.comp_pool
= 0;
2300 dev
->caps
.num_comp_vectors
= nreq
- 1;
2302 dev
->caps
.comp_pool
= nreq
- MSIX_LEGACY_SZ
;
2303 dev
->caps
.num_comp_vectors
= MSIX_LEGACY_SZ
- 1;
2305 for (i
= 0; i
< nreq
; ++i
)
2306 priv
->eq_table
.eq
[i
].irq
= entries
[i
].vector
;
2308 dev
->flags
|= MLX4_FLAG_MSI_X
;
2315 dev
->caps
.num_comp_vectors
= 1;
2316 dev
->caps
.comp_pool
= 0;
2318 for (i
= 0; i
< 2; ++i
)
2319 priv
->eq_table
.eq
[i
].irq
= dev
->pdev
->irq
;
2322 static int mlx4_init_port_info(struct mlx4_dev
*dev
, int port
)
2324 struct mlx4_port_info
*info
= &mlx4_priv(dev
)->port
[port
];
2329 if (!mlx4_is_slave(dev
)) {
2330 mlx4_init_mac_table(dev
, &info
->mac_table
);
2331 mlx4_init_vlan_table(dev
, &info
->vlan_table
);
2332 mlx4_init_roce_gid_table(dev
, &info
->gid_table
);
2333 info
->base_qpn
= mlx4_get_base_qpn(dev
, port
);
2336 sprintf(info
->dev_name
, "mlx4_port%d", port
);
2337 info
->port_attr
.attr
.name
= info
->dev_name
;
2338 if (mlx4_is_mfunc(dev
))
2339 info
->port_attr
.attr
.mode
= S_IRUGO
;
2341 info
->port_attr
.attr
.mode
= S_IRUGO
| S_IWUSR
;
2342 info
->port_attr
.store
= set_port_type
;
2344 info
->port_attr
.show
= show_port_type
;
2345 sysfs_attr_init(&info
->port_attr
.attr
);
2347 err
= device_create_file(&dev
->pdev
->dev
, &info
->port_attr
);
2349 mlx4_err(dev
, "Failed to create file for port %d\n", port
);
2353 sprintf(info
->dev_mtu_name
, "mlx4_port%d_mtu", port
);
2354 info
->port_mtu_attr
.attr
.name
= info
->dev_mtu_name
;
2355 if (mlx4_is_mfunc(dev
))
2356 info
->port_mtu_attr
.attr
.mode
= S_IRUGO
;
2358 info
->port_mtu_attr
.attr
.mode
= S_IRUGO
| S_IWUSR
;
2359 info
->port_mtu_attr
.store
= set_port_ib_mtu
;
2361 info
->port_mtu_attr
.show
= show_port_ib_mtu
;
2362 sysfs_attr_init(&info
->port_mtu_attr
.attr
);
2364 err
= device_create_file(&dev
->pdev
->dev
, &info
->port_mtu_attr
);
2366 mlx4_err(dev
, "Failed to create mtu file for port %d\n", port
);
2367 device_remove_file(&info
->dev
->pdev
->dev
, &info
->port_attr
);
2374 static void mlx4_cleanup_port_info(struct mlx4_port_info
*info
)
2379 device_remove_file(&info
->dev
->pdev
->dev
, &info
->port_attr
);
2380 device_remove_file(&info
->dev
->pdev
->dev
, &info
->port_mtu_attr
);
2383 static int mlx4_init_steering(struct mlx4_dev
*dev
)
2385 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2386 int num_entries
= dev
->caps
.num_ports
;
2389 priv
->steer
= kzalloc(sizeof(struct mlx4_steer
) * num_entries
, GFP_KERNEL
);
2393 for (i
= 0; i
< num_entries
; i
++)
2394 for (j
= 0; j
< MLX4_NUM_STEERS
; j
++) {
2395 INIT_LIST_HEAD(&priv
->steer
[i
].promisc_qps
[j
]);
2396 INIT_LIST_HEAD(&priv
->steer
[i
].steer_entries
[j
]);
2401 static void mlx4_clear_steering(struct mlx4_dev
*dev
)
2403 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2404 struct mlx4_steer_index
*entry
, *tmp_entry
;
2405 struct mlx4_promisc_qp
*pqp
, *tmp_pqp
;
2406 int num_entries
= dev
->caps
.num_ports
;
2409 for (i
= 0; i
< num_entries
; i
++) {
2410 for (j
= 0; j
< MLX4_NUM_STEERS
; j
++) {
2411 list_for_each_entry_safe(pqp
, tmp_pqp
,
2412 &priv
->steer
[i
].promisc_qps
[j
],
2414 list_del(&pqp
->list
);
2417 list_for_each_entry_safe(entry
, tmp_entry
,
2418 &priv
->steer
[i
].steer_entries
[j
],
2420 list_del(&entry
->list
);
2421 list_for_each_entry_safe(pqp
, tmp_pqp
,
2424 list_del(&pqp
->list
);
2434 static int extended_func_num(struct pci_dev
*pdev
)
2436 return PCI_SLOT(pdev
->devfn
) * 8 + PCI_FUNC(pdev
->devfn
);
2439 #define MLX4_OWNER_BASE 0x8069c
2440 #define MLX4_OWNER_SIZE 4
2442 static int mlx4_get_ownership(struct mlx4_dev
*dev
)
2444 void __iomem
*owner
;
2447 if (pci_channel_offline(dev
->pdev
))
2450 owner
= ioremap(pci_resource_start(dev
->pdev
, 0) + MLX4_OWNER_BASE
,
2453 mlx4_err(dev
, "Failed to obtain ownership bit\n");
2462 static void mlx4_free_ownership(struct mlx4_dev
*dev
)
2464 void __iomem
*owner
;
2466 if (pci_channel_offline(dev
->pdev
))
2469 owner
= ioremap(pci_resource_start(dev
->pdev
, 0) + MLX4_OWNER_BASE
,
2472 mlx4_err(dev
, "Failed to obtain ownership bit\n");
2480 #define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\
2481 !!((flags) & MLX4_FLAG_MASTER))
2483 static u64
mlx4_enable_sriov(struct mlx4_dev
*dev
, struct pci_dev
*pdev
,
2484 u8 total_vfs
, int existing_vfs
)
2486 u64 dev_flags
= dev
->flags
;
2489 atomic_inc(&pf_loading
);
2490 if (dev
->flags
& MLX4_FLAG_SRIOV
) {
2491 if (existing_vfs
!= total_vfs
) {
2492 mlx4_err(dev
, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
2493 existing_vfs
, total_vfs
);
2494 total_vfs
= existing_vfs
;
2498 dev
->dev_vfs
= kzalloc(total_vfs
* sizeof(*dev
->dev_vfs
), GFP_KERNEL
);
2499 if (NULL
== dev
->dev_vfs
) {
2500 mlx4_err(dev
, "Failed to allocate memory for VFs\n");
2504 if (!(dev
->flags
& MLX4_FLAG_SRIOV
)) {
2505 mlx4_warn(dev
, "Enabling SR-IOV with %d VFs\n", total_vfs
);
2506 err
= pci_enable_sriov(pdev
, total_vfs
);
2509 mlx4_err(dev
, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
2513 mlx4_warn(dev
, "Running in master mode\n");
2514 dev_flags
|= MLX4_FLAG_SRIOV
|
2516 dev_flags
&= ~MLX4_FLAG_SLAVE
;
2517 dev
->num_vfs
= total_vfs
;
2522 atomic_dec(&pf_loading
);
2524 kfree(dev
->dev_vfs
);
2525 return dev_flags
& ~MLX4_FLAG_MASTER
;
2529 MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64
= -1,
2532 static int mlx4_check_dev_cap(struct mlx4_dev
*dev
, struct mlx4_dev_cap
*dev_cap
,
2535 int requested_vfs
= nvfs
[0] + nvfs
[1] + nvfs
[2];
2536 /* Checking for 64 VFs as a limitation of CX2 */
2537 if (!(dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_80_VFS
) &&
2538 requested_vfs
>= 64) {
2539 mlx4_err(dev
, "Requested %d VFs, but FW does not support more than 64\n",
2541 return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64
;
2546 static int mlx4_load_one(struct pci_dev
*pdev
, int pci_dev_data
,
2547 int total_vfs
, int *nvfs
, struct mlx4_priv
*priv
)
2549 struct mlx4_dev
*dev
;
2554 struct mlx4_dev_cap
*dev_cap
= NULL
;
2555 int existing_vfs
= 0;
2559 INIT_LIST_HEAD(&priv
->ctx_list
);
2560 spin_lock_init(&priv
->ctx_lock
);
2562 mutex_init(&priv
->port_mutex
);
2564 INIT_LIST_HEAD(&priv
->pgdir_list
);
2565 mutex_init(&priv
->pgdir_mutex
);
2567 INIT_LIST_HEAD(&priv
->bf_list
);
2568 mutex_init(&priv
->bf_mutex
);
2570 dev
->rev_id
= pdev
->revision
;
2571 dev
->numa_node
= dev_to_node(&pdev
->dev
);
2573 /* Detect if this device is a virtual function */
2574 if (pci_dev_data
& MLX4_PCI_DEV_IS_VF
) {
2575 mlx4_warn(dev
, "Detected virtual function - running in slave mode\n");
2576 dev
->flags
|= MLX4_FLAG_SLAVE
;
2578 /* We reset the device and enable SRIOV only for physical
2579 * devices. Try to claim ownership on the device;
2580 * if already taken, skip -- do not allow multiple PFs */
2581 err
= mlx4_get_ownership(dev
);
2586 mlx4_warn(dev
, "Multiple PFs not yet supported - Skipping PF\n");
2591 atomic_set(&priv
->opreq_count
, 0);
2592 INIT_WORK(&priv
->opreq_task
, mlx4_opreq_action
);
2595 * Now reset the HCA before we touch the PCI capabilities or
2596 * attempt a firmware command, since a boot ROM may have left
2597 * the HCA in an undefined state.
2599 err
= mlx4_reset(dev
);
2601 mlx4_err(dev
, "Failed to reset HCA, aborting\n");
2606 dev
->flags
= MLX4_FLAG_MASTER
;
2607 existing_vfs
= pci_num_vf(pdev
);
2609 dev
->flags
|= MLX4_FLAG_SRIOV
;
2610 dev
->num_vfs
= total_vfs
;
2615 err
= mlx4_cmd_init(dev
);
2617 mlx4_err(dev
, "Failed to init command interface, aborting\n");
2621 /* In slave functions, the communication channel must be initialized
2622 * before posting commands. Also, init num_slaves before calling
2624 if (mlx4_is_mfunc(dev
)) {
2625 if (mlx4_is_master(dev
)) {
2626 dev
->num_slaves
= MLX4_MAX_NUM_SLAVES
;
2629 dev
->num_slaves
= 0;
2630 err
= mlx4_multi_func_init(dev
);
2632 mlx4_err(dev
, "Failed to init slave mfunc interface, aborting\n");
2638 err
= mlx4_init_fw(dev
);
2640 mlx4_err(dev
, "Failed to init fw, aborting.\n");
2644 if (mlx4_is_master(dev
)) {
2645 /* when we hit the goto slave_start below, dev_cap already initialized */
2647 dev_cap
= kzalloc(sizeof(*dev_cap
), GFP_KERNEL
);
2654 err
= mlx4_QUERY_DEV_CAP(dev
, dev_cap
);
2656 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting.\n");
2660 if (mlx4_check_dev_cap(dev
, dev_cap
, nvfs
))
2663 if (!(dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_SYS_EQS
)) {
2664 u64 dev_flags
= mlx4_enable_sriov(dev
, pdev
, total_vfs
,
2667 mlx4_cmd_cleanup(dev
, MLX4_CMD_CLEANUP_ALL
);
2668 dev
->flags
= dev_flags
;
2669 if (!SRIOV_VALID_STATE(dev
->flags
)) {
2670 mlx4_err(dev
, "Invalid SRIOV state\n");
2673 err
= mlx4_reset(dev
);
2675 mlx4_err(dev
, "Failed to reset HCA, aborting.\n");
2681 /* Legacy mode FW requires SRIOV to be enabled before
2682 * doing QUERY_DEV_CAP, since max_eq's value is different if
2685 memset(dev_cap
, 0, sizeof(*dev_cap
));
2686 err
= mlx4_QUERY_DEV_CAP(dev
, dev_cap
);
2688 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting.\n");
2692 if (mlx4_check_dev_cap(dev
, dev_cap
, nvfs
))
2697 err
= mlx4_init_hca(dev
);
2699 if (err
== -EACCES
) {
2700 /* Not primary Physical function
2701 * Running in slave mode */
2702 mlx4_cmd_cleanup(dev
, MLX4_CMD_CLEANUP_ALL
);
2703 /* We're not a PF */
2704 if (dev
->flags
& MLX4_FLAG_SRIOV
) {
2706 pci_disable_sriov(pdev
);
2707 if (mlx4_is_master(dev
))
2708 atomic_dec(&pf_loading
);
2709 dev
->flags
&= ~MLX4_FLAG_SRIOV
;
2711 if (!mlx4_is_slave(dev
))
2712 mlx4_free_ownership(dev
);
2713 dev
->flags
|= MLX4_FLAG_SLAVE
;
2714 dev
->flags
&= ~MLX4_FLAG_MASTER
;
2720 if (mlx4_is_master(dev
) && (dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_SYS_EQS
)) {
2721 u64 dev_flags
= mlx4_enable_sriov(dev
, pdev
, total_vfs
, existing_vfs
);
2723 if ((dev
->flags
^ dev_flags
) & (MLX4_FLAG_MASTER
| MLX4_FLAG_SLAVE
)) {
2724 mlx4_cmd_cleanup(dev
, MLX4_CMD_CLEANUP_VHCR
);
2725 dev
->flags
= dev_flags
;
2726 err
= mlx4_cmd_init(dev
);
2728 /* Only VHCR is cleaned up, so could still
2731 mlx4_err(dev
, "Failed to init VHCR command interface, aborting\n");
2735 dev
->flags
= dev_flags
;
2738 if (!SRIOV_VALID_STATE(dev
->flags
)) {
2739 mlx4_err(dev
, "Invalid SRIOV state\n");
2744 /* check if the device is functioning at its maximum possible speed.
2745 * No return code for this call, just warn the user in case of PCI
2746 * express device capabilities are under-satisfied by the bus.
2748 if (!mlx4_is_slave(dev
))
2749 mlx4_check_pcie_caps(dev
);
2751 /* In master functions, the communication channel must be initialized
2752 * after obtaining its address from fw */
2753 if (mlx4_is_master(dev
)) {
2756 mlx4_foreach_port(i
, dev
, MLX4_PORT_TYPE_IB
)
2760 (num_vfs_argc
> 1 || probe_vfs_argc
> 1)) {
2762 "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n");
2766 if (dev
->caps
.num_ports
< 2 &&
2770 "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n",
2771 dev
->caps
.num_ports
);
2774 memcpy(dev
->nvfs
, nvfs
, sizeof(dev
->nvfs
));
2776 for (i
= 0; i
< sizeof(dev
->nvfs
)/sizeof(dev
->nvfs
[0]); i
++) {
2779 for (j
= 0; j
< dev
->nvfs
[i
]; ++sum
, ++j
) {
2780 dev
->dev_vfs
[sum
].min_port
= i
< 2 ? i
+ 1 : 1;
2781 dev
->dev_vfs
[sum
].n_ports
= i
< 2 ? 1 :
2782 dev
->caps
.num_ports
;
2786 /* In master functions, the communication channel
2787 * must be initialized after obtaining its address from fw
2789 err
= mlx4_multi_func_init(dev
);
2791 mlx4_err(dev
, "Failed to init master mfunc interface, aborting.\n");
2796 err
= mlx4_alloc_eq_table(dev
);
2798 goto err_master_mfunc
;
2800 priv
->msix_ctl
.pool_bm
= 0;
2801 mutex_init(&priv
->msix_ctl
.pool_lock
);
2803 mlx4_enable_msi_x(dev
);
2804 if ((mlx4_is_mfunc(dev
)) &&
2805 !(dev
->flags
& MLX4_FLAG_MSI_X
)) {
2807 mlx4_err(dev
, "INTx is not supported in multi-function mode, aborting\n");
2811 if (!mlx4_is_slave(dev
)) {
2812 err
= mlx4_init_steering(dev
);
2814 goto err_disable_msix
;
2817 err
= mlx4_setup_hca(dev
);
2818 if (err
== -EBUSY
&& (dev
->flags
& MLX4_FLAG_MSI_X
) &&
2819 !mlx4_is_mfunc(dev
)) {
2820 dev
->flags
&= ~MLX4_FLAG_MSI_X
;
2821 dev
->caps
.num_comp_vectors
= 1;
2822 dev
->caps
.comp_pool
= 0;
2823 pci_disable_msix(pdev
);
2824 err
= mlx4_setup_hca(dev
);
2830 mlx4_init_quotas(dev
);
2832 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
2833 err
= mlx4_init_port_info(dev
, port
);
2838 err
= mlx4_register_device(dev
);
2842 mlx4_request_modules(dev
);
2844 mlx4_sense_init(dev
);
2845 mlx4_start_sense(dev
);
2849 if (mlx4_is_master(dev
) && dev
->num_vfs
)
2850 atomic_dec(&pf_loading
);
2856 for (--port
; port
>= 1; --port
)
2857 mlx4_cleanup_port_info(&priv
->port
[port
]);
2859 mlx4_cleanup_counters_table(dev
);
2860 mlx4_cleanup_qp_table(dev
);
2861 mlx4_cleanup_srq_table(dev
);
2862 mlx4_cleanup_cq_table(dev
);
2863 mlx4_cmd_use_polling(dev
);
2864 mlx4_cleanup_eq_table(dev
);
2865 mlx4_cleanup_mcg_table(dev
);
2866 mlx4_cleanup_mr_table(dev
);
2867 mlx4_cleanup_xrcd_table(dev
);
2868 mlx4_cleanup_pd_table(dev
);
2869 mlx4_cleanup_uar_table(dev
);
2872 if (!mlx4_is_slave(dev
))
2873 mlx4_clear_steering(dev
);
2876 if (dev
->flags
& MLX4_FLAG_MSI_X
)
2877 pci_disable_msix(pdev
);
2880 mlx4_free_eq_table(dev
);
2883 if (mlx4_is_master(dev
))
2884 mlx4_multi_func_cleanup(dev
);
2886 if (mlx4_is_slave(dev
)) {
2887 kfree(dev
->caps
.qp0_qkey
);
2888 kfree(dev
->caps
.qp0_tunnel
);
2889 kfree(dev
->caps
.qp0_proxy
);
2890 kfree(dev
->caps
.qp1_tunnel
);
2891 kfree(dev
->caps
.qp1_proxy
);
2895 mlx4_close_hca(dev
);
2901 if (mlx4_is_slave(dev
))
2902 mlx4_multi_func_cleanup(dev
);
2905 mlx4_cmd_cleanup(dev
, MLX4_CMD_CLEANUP_ALL
);
2908 if (dev
->flags
& MLX4_FLAG_SRIOV
&& !existing_vfs
)
2909 pci_disable_sriov(pdev
);
2911 if (mlx4_is_master(dev
) && dev
->num_vfs
)
2912 atomic_dec(&pf_loading
);
2914 kfree(priv
->dev
.dev_vfs
);
2916 if (!mlx4_is_slave(dev
))
2917 mlx4_free_ownership(dev
);
2923 static int __mlx4_init_one(struct pci_dev
*pdev
, int pci_dev_data
,
2924 struct mlx4_priv
*priv
)
2927 int nvfs
[MLX4_MAX_PORTS
+ 1] = {0, 0, 0};
2928 int prb_vf
[MLX4_MAX_PORTS
+ 1] = {0, 0, 0};
2929 const int param_map
[MLX4_MAX_PORTS
+ 1][MLX4_MAX_PORTS
+ 1] = {
2930 {2, 0, 0}, {0, 1, 2}, {0, 1, 2} };
2931 unsigned total_vfs
= 0;
2934 pr_info(DRV_NAME
": Initializing %s\n", pci_name(pdev
));
2936 err
= pci_enable_device(pdev
);
2938 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
2942 /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS
2943 * per port, we must limit the number of VFs to 63 (since their are
2946 for (i
= 0; i
< sizeof(nvfs
)/sizeof(nvfs
[0]) && i
< num_vfs_argc
;
2947 total_vfs
+= nvfs
[param_map
[num_vfs_argc
- 1][i
]], i
++) {
2948 nvfs
[param_map
[num_vfs_argc
- 1][i
]] = num_vfs
[i
];
2950 dev_err(&pdev
->dev
, "num_vfs module parameter cannot be negative\n");
2952 goto err_disable_pdev
;
2955 for (i
= 0; i
< sizeof(prb_vf
)/sizeof(prb_vf
[0]) && i
< probe_vfs_argc
;
2957 prb_vf
[param_map
[probe_vfs_argc
- 1][i
]] = probe_vf
[i
];
2958 if (prb_vf
[i
] < 0 || prb_vf
[i
] > nvfs
[i
]) {
2959 dev_err(&pdev
->dev
, "probe_vf module parameter cannot be negative or greater than num_vfs\n");
2961 goto err_disable_pdev
;
2964 if (total_vfs
>= MLX4_MAX_NUM_VF
) {
2966 "Requested more VF's (%d) than allowed (%d)\n",
2967 total_vfs
, MLX4_MAX_NUM_VF
- 1);
2969 goto err_disable_pdev
;
2972 for (i
= 0; i
< MLX4_MAX_PORTS
; i
++) {
2973 if (nvfs
[i
] + nvfs
[2] >= MLX4_MAX_NUM_VF_P_PORT
) {
2975 "Requested more VF's (%d) for port (%d) than allowed (%d)\n",
2976 nvfs
[i
] + nvfs
[2], i
+ 1,
2977 MLX4_MAX_NUM_VF_P_PORT
- 1);
2979 goto err_disable_pdev
;
2983 /* Check for BARs. */
2984 if (!(pci_dev_data
& MLX4_PCI_DEV_IS_VF
) &&
2985 !(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
2986 dev_err(&pdev
->dev
, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
2987 pci_dev_data
, pci_resource_flags(pdev
, 0));
2989 goto err_disable_pdev
;
2991 if (!(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
2992 dev_err(&pdev
->dev
, "Missing UAR, aborting\n");
2994 goto err_disable_pdev
;
2997 err
= pci_request_regions(pdev
, DRV_NAME
);
2999 dev_err(&pdev
->dev
, "Couldn't get PCI resources, aborting\n");
3000 goto err_disable_pdev
;
3003 pci_set_master(pdev
);
3005 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
3007 dev_warn(&pdev
->dev
, "Warning: couldn't set 64-bit PCI DMA mask\n");
3008 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
3010 dev_err(&pdev
->dev
, "Can't set PCI DMA mask, aborting\n");
3011 goto err_release_regions
;
3014 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
3016 dev_warn(&pdev
->dev
, "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
3017 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
3019 dev_err(&pdev
->dev
, "Can't set consistent PCI DMA mask, aborting\n");
3020 goto err_release_regions
;
3024 /* Allow large DMA segments, up to the firmware limit of 1 GB */
3025 dma_set_max_seg_size(&pdev
->dev
, 1024 * 1024 * 1024);
3026 /* Detect if this device is a virtual function */
3027 if (pci_dev_data
& MLX4_PCI_DEV_IS_VF
) {
3028 /* When acting as pf, we normally skip vfs unless explicitly
3029 * requested to probe them.
3032 unsigned vfs_offset
= 0;
3034 for (i
= 0; i
< sizeof(nvfs
)/sizeof(nvfs
[0]) &&
3035 vfs_offset
+ nvfs
[i
] < extended_func_num(pdev
);
3036 vfs_offset
+= nvfs
[i
], i
++)
3038 if (i
== sizeof(nvfs
)/sizeof(nvfs
[0])) {
3040 goto err_release_regions
;
3042 if ((extended_func_num(pdev
) - vfs_offset
)
3044 dev_warn(&pdev
->dev
, "Skipping virtual function:%d\n",
3045 extended_func_num(pdev
));
3047 goto err_release_regions
;
3052 err
= mlx4_load_one(pdev
, pci_dev_data
, total_vfs
, nvfs
, priv
);
3054 goto err_release_regions
;
3057 err_release_regions
:
3058 pci_release_regions(pdev
);
3061 pci_disable_device(pdev
);
3062 pci_set_drvdata(pdev
, NULL
);
3066 static int mlx4_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
3068 struct mlx4_priv
*priv
;
3069 struct mlx4_dev
*dev
;
3072 printk_once(KERN_INFO
"%s", mlx4_version
);
3074 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
3080 pci_set_drvdata(pdev
, dev
);
3081 priv
->pci_dev_data
= id
->driver_data
;
3083 ret
= __mlx4_init_one(pdev
, id
->driver_data
, priv
);
3090 static void mlx4_unload_one(struct pci_dev
*pdev
)
3092 struct mlx4_dev
*dev
= pci_get_drvdata(pdev
);
3093 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3101 pci_dev_data
= priv
->pci_dev_data
;
3103 /* Disabling SR-IOV is not allowed while there are active vf's */
3104 if (mlx4_is_master(dev
)) {
3105 active_vfs
= mlx4_how_many_lives_vf(dev
);
3107 pr_warn("Removing PF when there are active VF's !!\n");
3108 pr_warn("Will not disable SR-IOV.\n");
3111 mlx4_stop_sense(dev
);
3112 mlx4_unregister_device(dev
);
3114 for (p
= 1; p
<= dev
->caps
.num_ports
; p
++) {
3115 mlx4_cleanup_port_info(&priv
->port
[p
]);
3116 mlx4_CLOSE_PORT(dev
, p
);
3119 if (mlx4_is_master(dev
))
3120 mlx4_free_resource_tracker(dev
,
3121 RES_TR_FREE_SLAVES_ONLY
);
3123 mlx4_cleanup_counters_table(dev
);
3124 mlx4_cleanup_qp_table(dev
);
3125 mlx4_cleanup_srq_table(dev
);
3126 mlx4_cleanup_cq_table(dev
);
3127 mlx4_cmd_use_polling(dev
);
3128 mlx4_cleanup_eq_table(dev
);
3129 mlx4_cleanup_mcg_table(dev
);
3130 mlx4_cleanup_mr_table(dev
);
3131 mlx4_cleanup_xrcd_table(dev
);
3132 mlx4_cleanup_pd_table(dev
);
3134 if (mlx4_is_master(dev
))
3135 mlx4_free_resource_tracker(dev
,
3136 RES_TR_FREE_STRUCTS_ONLY
);
3139 mlx4_uar_free(dev
, &priv
->driver_uar
);
3140 mlx4_cleanup_uar_table(dev
);
3141 if (!mlx4_is_slave(dev
))
3142 mlx4_clear_steering(dev
);
3143 mlx4_free_eq_table(dev
);
3144 if (mlx4_is_master(dev
))
3145 mlx4_multi_func_cleanup(dev
);
3146 mlx4_close_hca(dev
);
3148 if (mlx4_is_slave(dev
))
3149 mlx4_multi_func_cleanup(dev
);
3150 mlx4_cmd_cleanup(dev
, MLX4_CMD_CLEANUP_ALL
);
3152 if (dev
->flags
& MLX4_FLAG_MSI_X
)
3153 pci_disable_msix(pdev
);
3154 if (dev
->flags
& MLX4_FLAG_SRIOV
&& !active_vfs
) {
3155 mlx4_warn(dev
, "Disabling SR-IOV\n");
3156 pci_disable_sriov(pdev
);
3157 dev
->flags
&= ~MLX4_FLAG_SRIOV
;
3161 if (!mlx4_is_slave(dev
))
3162 mlx4_free_ownership(dev
);
3164 kfree(dev
->caps
.qp0_qkey
);
3165 kfree(dev
->caps
.qp0_tunnel
);
3166 kfree(dev
->caps
.qp0_proxy
);
3167 kfree(dev
->caps
.qp1_tunnel
);
3168 kfree(dev
->caps
.qp1_proxy
);
3169 kfree(dev
->dev_vfs
);
3171 memset(priv
, 0, sizeof(*priv
));
3172 priv
->pci_dev_data
= pci_dev_data
;
3176 static void mlx4_remove_one(struct pci_dev
*pdev
)
3178 struct mlx4_dev
*dev
= pci_get_drvdata(pdev
);
3179 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3181 mlx4_unload_one(pdev
);
3182 pci_release_regions(pdev
);
3183 pci_disable_device(pdev
);
3185 pci_set_drvdata(pdev
, NULL
);
3188 int mlx4_restart_one(struct pci_dev
*pdev
)
3190 struct mlx4_dev
*dev
= pci_get_drvdata(pdev
);
3191 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3192 int nvfs
[MLX4_MAX_PORTS
+ 1] = {0, 0, 0};
3193 int pci_dev_data
, err
, total_vfs
;
3195 pci_dev_data
= priv
->pci_dev_data
;
3196 total_vfs
= dev
->num_vfs
;
3197 memcpy(nvfs
, dev
->nvfs
, sizeof(dev
->nvfs
));
3199 mlx4_unload_one(pdev
);
3200 err
= mlx4_load_one(pdev
, pci_dev_data
, total_vfs
, nvfs
, priv
);
3202 mlx4_err(dev
, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n",
3203 __func__
, pci_name(pdev
), err
);
3210 static const struct pci_device_id mlx4_pci_table
[] = {
3211 /* MT25408 "Hermon" SDR */
3212 { PCI_VDEVICE(MELLANOX
, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3213 /* MT25408 "Hermon" DDR */
3214 { PCI_VDEVICE(MELLANOX
, 0x634a), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3215 /* MT25408 "Hermon" QDR */
3216 { PCI_VDEVICE(MELLANOX
, 0x6354), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3217 /* MT25408 "Hermon" DDR PCIe gen2 */
3218 { PCI_VDEVICE(MELLANOX
, 0x6732), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3219 /* MT25408 "Hermon" QDR PCIe gen2 */
3220 { PCI_VDEVICE(MELLANOX
, 0x673c), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3221 /* MT25408 "Hermon" EN 10GigE */
3222 { PCI_VDEVICE(MELLANOX
, 0x6368), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3223 /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
3224 { PCI_VDEVICE(MELLANOX
, 0x6750), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3225 /* MT25458 ConnectX EN 10GBASE-T 10GigE */
3226 { PCI_VDEVICE(MELLANOX
, 0x6372), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3227 /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
3228 { PCI_VDEVICE(MELLANOX
, 0x675a), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3229 /* MT26468 ConnectX EN 10GigE PCIe gen2*/
3230 { PCI_VDEVICE(MELLANOX
, 0x6764), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3231 /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
3232 { PCI_VDEVICE(MELLANOX
, 0x6746), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3233 /* MT26478 ConnectX2 40GigE PCIe gen2 */
3234 { PCI_VDEVICE(MELLANOX
, 0x676e), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3235 /* MT25400 Family [ConnectX-2 Virtual Function] */
3236 { PCI_VDEVICE(MELLANOX
, 0x1002), MLX4_PCI_DEV_IS_VF
},
3237 /* MT27500 Family [ConnectX-3] */
3238 { PCI_VDEVICE(MELLANOX
, 0x1003), 0 },
3239 /* MT27500 Family [ConnectX-3 Virtual Function] */
3240 { PCI_VDEVICE(MELLANOX
, 0x1004), MLX4_PCI_DEV_IS_VF
},
3241 { PCI_VDEVICE(MELLANOX
, 0x1005), 0 }, /* MT27510 Family */
3242 { PCI_VDEVICE(MELLANOX
, 0x1006), 0 }, /* MT27511 Family */
3243 { PCI_VDEVICE(MELLANOX
, 0x1007), 0 }, /* MT27520 Family */
3244 { PCI_VDEVICE(MELLANOX
, 0x1008), 0 }, /* MT27521 Family */
3245 { PCI_VDEVICE(MELLANOX
, 0x1009), 0 }, /* MT27530 Family */
3246 { PCI_VDEVICE(MELLANOX
, 0x100a), 0 }, /* MT27531 Family */
3247 { PCI_VDEVICE(MELLANOX
, 0x100b), 0 }, /* MT27540 Family */
3248 { PCI_VDEVICE(MELLANOX
, 0x100c), 0 }, /* MT27541 Family */
3249 { PCI_VDEVICE(MELLANOX
, 0x100d), 0 }, /* MT27550 Family */
3250 { PCI_VDEVICE(MELLANOX
, 0x100e), 0 }, /* MT27551 Family */
3251 { PCI_VDEVICE(MELLANOX
, 0x100f), 0 }, /* MT27560 Family */
3252 { PCI_VDEVICE(MELLANOX
, 0x1010), 0 }, /* MT27561 Family */
3256 MODULE_DEVICE_TABLE(pci
, mlx4_pci_table
);
3258 static pci_ers_result_t
mlx4_pci_err_detected(struct pci_dev
*pdev
,
3259 pci_channel_state_t state
)
3261 mlx4_unload_one(pdev
);
3263 return state
== pci_channel_io_perm_failure
?
3264 PCI_ERS_RESULT_DISCONNECT
: PCI_ERS_RESULT_NEED_RESET
;
3267 static pci_ers_result_t
mlx4_pci_slot_reset(struct pci_dev
*pdev
)
3269 struct mlx4_dev
*dev
= pci_get_drvdata(pdev
);
3270 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3273 ret
= __mlx4_init_one(pdev
, priv
->pci_dev_data
, priv
);
3275 return ret
? PCI_ERS_RESULT_DISCONNECT
: PCI_ERS_RESULT_RECOVERED
;
3278 static const struct pci_error_handlers mlx4_err_handler
= {
3279 .error_detected
= mlx4_pci_err_detected
,
3280 .slot_reset
= mlx4_pci_slot_reset
,
3283 static struct pci_driver mlx4_driver
= {
3285 .id_table
= mlx4_pci_table
,
3286 .probe
= mlx4_init_one
,
3287 .shutdown
= mlx4_unload_one
,
3288 .remove
= mlx4_remove_one
,
3289 .err_handler
= &mlx4_err_handler
,
3292 static int __init
mlx4_verify_params(void)
3294 if ((log_num_mac
< 0) || (log_num_mac
> 7)) {
3295 pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac
);
3299 if (log_num_vlan
!= 0)
3300 pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
3301 MLX4_LOG_NUM_VLANS
);
3304 pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n");
3306 if ((log_mtts_per_seg
< 1) || (log_mtts_per_seg
> 7)) {
3307 pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n",
3312 /* Check if module param for ports type has legal combination */
3313 if (port_type_array
[0] == false && port_type_array
[1] == true) {
3314 pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
3315 port_type_array
[0] = true;
3318 if (mlx4_log_num_mgm_entry_size
< -7 ||
3319 (mlx4_log_num_mgm_entry_size
> 0 &&
3320 (mlx4_log_num_mgm_entry_size
< MLX4_MIN_MGM_LOG_ENTRY_SIZE
||
3321 mlx4_log_num_mgm_entry_size
> MLX4_MAX_MGM_LOG_ENTRY_SIZE
))) {
3322 pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-7..0 or %d..%d)\n",
3323 mlx4_log_num_mgm_entry_size
,
3324 MLX4_MIN_MGM_LOG_ENTRY_SIZE
,
3325 MLX4_MAX_MGM_LOG_ENTRY_SIZE
);
3332 static int __init
mlx4_init(void)
3336 if (mlx4_verify_params())
3341 mlx4_wq
= create_singlethread_workqueue("mlx4");
3345 ret
= pci_register_driver(&mlx4_driver
);
3347 destroy_workqueue(mlx4_wq
);
3348 return ret
< 0 ? ret
: 0;
3351 static void __exit
mlx4_cleanup(void)
3353 pci_unregister_driver(&mlx4_driver
);
3354 destroy_workqueue(mlx4_wq
);
3357 module_init(mlx4_init
);
3358 module_exit(mlx4_cleanup
);