2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/errno.h>
39 #include <linux/pci.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/slab.h>
42 #include <linux/io-mapping.h>
43 #include <linux/delay.h>
45 #include <linux/mlx4/device.h>
46 #include <linux/mlx4/doorbell.h>
52 MODULE_AUTHOR("Roland Dreier");
53 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
54 MODULE_LICENSE("Dual BSD/GPL");
55 MODULE_VERSION(DRV_VERSION
);
57 struct workqueue_struct
*mlx4_wq
;
59 #ifdef CONFIG_MLX4_DEBUG
61 int mlx4_debug_level
= 0;
62 module_param_named(debug_level
, mlx4_debug_level
, int, 0644);
63 MODULE_PARM_DESC(debug_level
, "Enable debug tracing if > 0");
65 #endif /* CONFIG_MLX4_DEBUG */
70 module_param(msi_x
, int, 0444);
71 MODULE_PARM_DESC(msi_x
, "attempt to use MSI-X if nonzero");
73 #else /* CONFIG_PCI_MSI */
77 #endif /* CONFIG_PCI_MSI */
80 module_param(num_vfs
, int, 0444);
81 MODULE_PARM_DESC(num_vfs
, "enable #num_vfs functions if num_vfs > 0");
84 module_param(probe_vf
, int, 0644);
85 MODULE_PARM_DESC(probe_vf
, "number of vfs to probe by pf driver (num_vfs > 0)");
87 int mlx4_log_num_mgm_entry_size
= 10;
88 module_param_named(log_num_mgm_entry_size
,
89 mlx4_log_num_mgm_entry_size
, int, 0444);
90 MODULE_PARM_DESC(log_num_mgm_entry_size
, "log mgm size, that defines the num"
91 " of qp per mcg, for example:"
92 " 10 gives 248.range: 9<="
93 " log_num_mgm_entry_size <= 12");
95 #define MLX4_VF (1 << 0)
97 #define HCA_GLOBAL_CAP_MASK 0
98 #define PF_CONTEXT_BEHAVIOUR_MASK 0
100 static char mlx4_version
[] __devinitdata
=
101 DRV_NAME
": Mellanox ConnectX core driver v"
102 DRV_VERSION
" (" DRV_RELDATE
")\n";
104 static struct mlx4_profile default_profile
= {
107 .rdmarc_per_qp
= 1 << 4,
111 .num_mtt
= 1 << 20, /* It is really num mtt segements */
114 static int log_num_mac
= 7;
115 module_param_named(log_num_mac
, log_num_mac
, int, 0444);
116 MODULE_PARM_DESC(log_num_mac
, "Log2 max number of MACs per ETH port (1-7)");
118 static int log_num_vlan
;
119 module_param_named(log_num_vlan
, log_num_vlan
, int, 0444);
120 MODULE_PARM_DESC(log_num_vlan
, "Log2 max number of VLANs per ETH port (0-7)");
121 /* Log2 max number of VLANs per ETH port (0-7) */
122 #define MLX4_LOG_NUM_VLANS 7
124 static bool use_prio
;
125 module_param_named(use_prio
, use_prio
, bool, 0444);
126 MODULE_PARM_DESC(use_prio
, "Enable steering by VLAN priority on ETH ports "
129 int log_mtts_per_seg
= ilog2(MLX4_MTT_ENTRY_PER_SEG
);
130 module_param_named(log_mtts_per_seg
, log_mtts_per_seg
, int, 0444);
131 MODULE_PARM_DESC(log_mtts_per_seg
, "Log2 number of MTT entries per segment (1-7)");
133 static int port_type_array
[2] = {MLX4_PORT_TYPE_NONE
, MLX4_PORT_TYPE_NONE
};
134 static int arr_argc
= 2;
135 module_param_array(port_type_array
, int, &arr_argc
, 0444);
136 MODULE_PARM_DESC(port_type_array
, "Array of port types: HW_DEFAULT (0) is default "
137 "1 for IB, 2 for Ethernet");
139 struct mlx4_port_config
{
140 struct list_head list
;
141 enum mlx4_port_type port_type
[MLX4_MAX_PORTS
+ 1];
142 struct pci_dev
*pdev
;
145 static inline int mlx4_master_get_num_eqs(struct mlx4_dev
*dev
)
147 return dev
->caps
.reserved_eqs
+
148 MLX4_MFUNC_EQ_NUM
* (dev
->num_slaves
+ 1);
151 int mlx4_check_port_params(struct mlx4_dev
*dev
,
152 enum mlx4_port_type
*port_type
)
156 for (i
= 0; i
< dev
->caps
.num_ports
- 1; i
++) {
157 if (port_type
[i
] != port_type
[i
+ 1]) {
158 if (!(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_DPDP
)) {
159 mlx4_err(dev
, "Only same port types supported "
160 "on this HCA, aborting.\n");
163 if (port_type
[i
] == MLX4_PORT_TYPE_ETH
&&
164 port_type
[i
+ 1] == MLX4_PORT_TYPE_IB
)
169 for (i
= 0; i
< dev
->caps
.num_ports
; i
++) {
170 if (!(port_type
[i
] & dev
->caps
.supported_type
[i
+1])) {
171 mlx4_err(dev
, "Requested port type for port %d is not "
172 "supported on this HCA\n", i
+ 1);
179 static void mlx4_set_port_mask(struct mlx4_dev
*dev
)
183 for (i
= 1; i
<= dev
->caps
.num_ports
; ++i
)
184 dev
->caps
.port_mask
[i
] = dev
->caps
.port_type
[i
];
187 static int mlx4_dev_cap(struct mlx4_dev
*dev
, struct mlx4_dev_cap
*dev_cap
)
192 err
= mlx4_QUERY_DEV_CAP(dev
, dev_cap
);
194 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting.\n");
198 if (dev_cap
->min_page_sz
> PAGE_SIZE
) {
199 mlx4_err(dev
, "HCA minimum page size of %d bigger than "
200 "kernel PAGE_SIZE of %ld, aborting.\n",
201 dev_cap
->min_page_sz
, PAGE_SIZE
);
204 if (dev_cap
->num_ports
> MLX4_MAX_PORTS
) {
205 mlx4_err(dev
, "HCA has %d ports, but we only support %d, "
207 dev_cap
->num_ports
, MLX4_MAX_PORTS
);
211 if (dev_cap
->uar_size
> pci_resource_len(dev
->pdev
, 2)) {
212 mlx4_err(dev
, "HCA reported UAR size of 0x%x bigger than "
213 "PCI resource 2 size of 0x%llx, aborting.\n",
215 (unsigned long long) pci_resource_len(dev
->pdev
, 2));
219 dev
->caps
.num_ports
= dev_cap
->num_ports
;
220 for (i
= 1; i
<= dev
->caps
.num_ports
; ++i
) {
221 dev
->caps
.vl_cap
[i
] = dev_cap
->max_vl
[i
];
222 dev
->caps
.ib_mtu_cap
[i
] = dev_cap
->ib_mtu
[i
];
223 dev
->caps
.gid_table_len
[i
] = dev_cap
->max_gids
[i
];
224 dev
->caps
.pkey_table_len
[i
] = dev_cap
->max_pkeys
[i
];
225 dev
->caps
.port_width_cap
[i
] = dev_cap
->max_port_width
[i
];
226 dev
->caps
.eth_mtu_cap
[i
] = dev_cap
->eth_mtu
[i
];
227 dev
->caps
.def_mac
[i
] = dev_cap
->def_mac
[i
];
228 dev
->caps
.supported_type
[i
] = dev_cap
->supported_port_types
[i
];
229 dev
->caps
.suggested_type
[i
] = dev_cap
->suggested_type
[i
];
230 dev
->caps
.default_sense
[i
] = dev_cap
->default_sense
[i
];
231 dev
->caps
.trans_type
[i
] = dev_cap
->trans_type
[i
];
232 dev
->caps
.vendor_oui
[i
] = dev_cap
->vendor_oui
[i
];
233 dev
->caps
.wavelength
[i
] = dev_cap
->wavelength
[i
];
234 dev
->caps
.trans_code
[i
] = dev_cap
->trans_code
[i
];
237 dev
->caps
.uar_page_size
= PAGE_SIZE
;
238 dev
->caps
.num_uars
= dev_cap
->uar_size
/ PAGE_SIZE
;
239 dev
->caps
.local_ca_ack_delay
= dev_cap
->local_ca_ack_delay
;
240 dev
->caps
.bf_reg_size
= dev_cap
->bf_reg_size
;
241 dev
->caps
.bf_regs_per_page
= dev_cap
->bf_regs_per_page
;
242 dev
->caps
.max_sq_sg
= dev_cap
->max_sq_sg
;
243 dev
->caps
.max_rq_sg
= dev_cap
->max_rq_sg
;
244 dev
->caps
.max_wqes
= dev_cap
->max_qp_sz
;
245 dev
->caps
.max_qp_init_rdma
= dev_cap
->max_requester_per_qp
;
246 dev
->caps
.max_srq_wqes
= dev_cap
->max_srq_sz
;
247 dev
->caps
.max_srq_sge
= dev_cap
->max_rq_sg
- 1;
248 dev
->caps
.reserved_srqs
= dev_cap
->reserved_srqs
;
249 dev
->caps
.max_sq_desc_sz
= dev_cap
->max_sq_desc_sz
;
250 dev
->caps
.max_rq_desc_sz
= dev_cap
->max_rq_desc_sz
;
251 dev
->caps
.num_qp_per_mgm
= mlx4_get_qp_per_mgm(dev
);
253 * Subtract 1 from the limit because we need to allocate a
254 * spare CQE so the HCA HW can tell the difference between an
255 * empty CQ and a full CQ.
257 dev
->caps
.max_cqes
= dev_cap
->max_cq_sz
- 1;
258 dev
->caps
.reserved_cqs
= dev_cap
->reserved_cqs
;
259 dev
->caps
.reserved_eqs
= dev_cap
->reserved_eqs
;
260 dev
->caps
.reserved_mtts
= dev_cap
->reserved_mtts
;
261 dev
->caps
.reserved_mrws
= dev_cap
->reserved_mrws
;
263 /* The first 128 UARs are used for EQ doorbells */
264 dev
->caps
.reserved_uars
= max_t(int, 128, dev_cap
->reserved_uars
);
265 dev
->caps
.reserved_pds
= dev_cap
->reserved_pds
;
266 dev
->caps
.reserved_xrcds
= (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
) ?
267 dev_cap
->reserved_xrcds
: 0;
268 dev
->caps
.max_xrcds
= (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
) ?
269 dev_cap
->max_xrcds
: 0;
270 dev
->caps
.mtt_entry_sz
= dev_cap
->mtt_entry_sz
;
272 dev
->caps
.max_msg_sz
= dev_cap
->max_msg_sz
;
273 dev
->caps
.page_size_cap
= ~(u32
) (dev_cap
->min_page_sz
- 1);
274 dev
->caps
.flags
= dev_cap
->flags
;
275 dev
->caps
.flags2
= dev_cap
->flags2
;
276 dev
->caps
.bmme_flags
= dev_cap
->bmme_flags
;
277 dev
->caps
.reserved_lkey
= dev_cap
->reserved_lkey
;
278 dev
->caps
.stat_rate_support
= dev_cap
->stat_rate_support
;
279 dev
->caps
.max_gso_sz
= dev_cap
->max_gso_sz
;
280 dev
->caps
.max_rss_tbl_sz
= dev_cap
->max_rss_tbl_sz
;
282 /* Sense port always allowed on supported devices for ConnectX1 and 2 */
283 if (dev
->pdev
->device
!= 0x1003)
284 dev
->caps
.flags
|= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT
;
286 dev
->caps
.log_num_macs
= log_num_mac
;
287 dev
->caps
.log_num_vlans
= MLX4_LOG_NUM_VLANS
;
288 dev
->caps
.log_num_prios
= use_prio
? 3 : 0;
290 for (i
= 1; i
<= dev
->caps
.num_ports
; ++i
) {
291 dev
->caps
.port_type
[i
] = MLX4_PORT_TYPE_NONE
;
292 if (dev
->caps
.supported_type
[i
]) {
293 /* if only ETH is supported - assign ETH */
294 if (dev
->caps
.supported_type
[i
] == MLX4_PORT_TYPE_ETH
)
295 dev
->caps
.port_type
[i
] = MLX4_PORT_TYPE_ETH
;
296 /* if only IB is supported,
297 * assign IB only if SRIOV is off*/
298 else if (dev
->caps
.supported_type
[i
] ==
300 if (dev
->flags
& MLX4_FLAG_SRIOV
)
301 dev
->caps
.port_type
[i
] =
304 dev
->caps
.port_type
[i
] =
306 /* if IB and ETH are supported,
307 * first of all check if SRIOV is on */
308 } else if (dev
->flags
& MLX4_FLAG_SRIOV
)
309 dev
->caps
.port_type
[i
] = MLX4_PORT_TYPE_ETH
;
311 /* In non-SRIOV mode, we set the port type
312 * according to user selection of port type,
313 * if usere selected none, take the FW hint */
314 if (port_type_array
[i
-1] == MLX4_PORT_TYPE_NONE
)
315 dev
->caps
.port_type
[i
] = dev
->caps
.suggested_type
[i
] ?
316 MLX4_PORT_TYPE_ETH
: MLX4_PORT_TYPE_IB
;
318 dev
->caps
.port_type
[i
] = port_type_array
[i
-1];
322 * Link sensing is allowed on the port if 3 conditions are true:
323 * 1. Both protocols are supported on the port.
324 * 2. Different types are supported on the port
325 * 3. FW declared that it supports link sensing
327 mlx4_priv(dev
)->sense
.sense_allowed
[i
] =
328 ((dev
->caps
.supported_type
[i
] == MLX4_PORT_TYPE_AUTO
) &&
329 (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_DPDP
) &&
330 (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_SENSE_SUPPORT
));
333 * If "default_sense" bit is set, we move the port to "AUTO" mode
334 * and perform sense_port FW command to try and set the correct
335 * port type from beginning
337 if (mlx4_priv(dev
)->sense
.sense_allowed
[i
] && dev
->caps
.default_sense
[i
]) {
338 enum mlx4_port_type sensed_port
= MLX4_PORT_TYPE_NONE
;
339 dev
->caps
.possible_type
[i
] = MLX4_PORT_TYPE_AUTO
;
340 mlx4_SENSE_PORT(dev
, i
, &sensed_port
);
341 if (sensed_port
!= MLX4_PORT_TYPE_NONE
)
342 dev
->caps
.port_type
[i
] = sensed_port
;
344 dev
->caps
.possible_type
[i
] = dev
->caps
.port_type
[i
];
347 if (dev
->caps
.log_num_macs
> dev_cap
->log_max_macs
[i
]) {
348 dev
->caps
.log_num_macs
= dev_cap
->log_max_macs
[i
];
349 mlx4_warn(dev
, "Requested number of MACs is too much "
350 "for port %d, reducing to %d.\n",
351 i
, 1 << dev
->caps
.log_num_macs
);
353 if (dev
->caps
.log_num_vlans
> dev_cap
->log_max_vlans
[i
]) {
354 dev
->caps
.log_num_vlans
= dev_cap
->log_max_vlans
[i
];
355 mlx4_warn(dev
, "Requested number of VLANs is too much "
356 "for port %d, reducing to %d.\n",
357 i
, 1 << dev
->caps
.log_num_vlans
);
361 dev
->caps
.max_counters
= 1 << ilog2(dev_cap
->max_counters
);
363 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
] = dev_cap
->reserved_qps
;
364 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_ETH_ADDR
] =
365 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FC_ADDR
] =
366 (1 << dev
->caps
.log_num_macs
) *
367 (1 << dev
->caps
.log_num_vlans
) *
368 (1 << dev
->caps
.log_num_prios
) *
370 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FC_EXCH
] = MLX4_NUM_FEXCH
;
372 dev
->caps
.reserved_qps
= dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
] +
373 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_ETH_ADDR
] +
374 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FC_ADDR
] +
375 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FC_EXCH
];
379 /*The function checks if there are live vf, return the num of them*/
380 static int mlx4_how_many_lives_vf(struct mlx4_dev
*dev
)
382 struct mlx4_priv
*priv
= mlx4_priv(dev
);
383 struct mlx4_slave_state
*s_state
;
387 for (i
= 1/*the ppf is 0*/; i
< dev
->num_slaves
; ++i
) {
388 s_state
= &priv
->mfunc
.master
.slave_state
[i
];
389 if (s_state
->active
&& s_state
->last_cmd
!=
390 MLX4_COMM_CMD_RESET
) {
391 mlx4_warn(dev
, "%s: slave: %d is still active\n",
399 int mlx4_is_slave_active(struct mlx4_dev
*dev
, int slave
)
401 struct mlx4_priv
*priv
= mlx4_priv(dev
);
402 struct mlx4_slave_state
*s_slave
;
404 if (!mlx4_is_master(dev
))
407 s_slave
= &priv
->mfunc
.master
.slave_state
[slave
];
408 return !!s_slave
->active
;
410 EXPORT_SYMBOL(mlx4_is_slave_active
);
412 static int mlx4_slave_cap(struct mlx4_dev
*dev
)
416 struct mlx4_dev_cap dev_cap
;
417 struct mlx4_func_cap func_cap
;
418 struct mlx4_init_hca_param hca_param
;
421 memset(&hca_param
, 0, sizeof(hca_param
));
422 err
= mlx4_QUERY_HCA(dev
, &hca_param
);
424 mlx4_err(dev
, "QUERY_HCA command failed, aborting.\n");
428 /*fail if the hca has an unknown capability */
429 if ((hca_param
.global_caps
| HCA_GLOBAL_CAP_MASK
) !=
430 HCA_GLOBAL_CAP_MASK
) {
431 mlx4_err(dev
, "Unknown hca global capabilities\n");
435 mlx4_log_num_mgm_entry_size
= hca_param
.log_mc_entry_sz
;
437 memset(&dev_cap
, 0, sizeof(dev_cap
));
438 err
= mlx4_dev_cap(dev
, &dev_cap
);
440 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting.\n");
444 page_size
= ~dev
->caps
.page_size_cap
+ 1;
445 mlx4_warn(dev
, "HCA minimum page size:%d\n", page_size
);
446 if (page_size
> PAGE_SIZE
) {
447 mlx4_err(dev
, "HCA minimum page size of %d bigger than "
448 "kernel PAGE_SIZE of %ld, aborting.\n",
449 page_size
, PAGE_SIZE
);
453 /* slave gets uar page size from QUERY_HCA fw command */
454 dev
->caps
.uar_page_size
= 1 << (hca_param
.uar_page_sz
+ 12);
456 /* TODO: relax this assumption */
457 if (dev
->caps
.uar_page_size
!= PAGE_SIZE
) {
458 mlx4_err(dev
, "UAR size:%d != kernel PAGE_SIZE of %ld\n",
459 dev
->caps
.uar_page_size
, PAGE_SIZE
);
463 memset(&func_cap
, 0, sizeof(func_cap
));
464 err
= mlx4_QUERY_FUNC_CAP(dev
, &func_cap
);
466 mlx4_err(dev
, "QUERY_FUNC_CAP command failed, aborting.\n");
470 if ((func_cap
.pf_context_behaviour
| PF_CONTEXT_BEHAVIOUR_MASK
) !=
471 PF_CONTEXT_BEHAVIOUR_MASK
) {
472 mlx4_err(dev
, "Unknown pf context behaviour\n");
476 dev
->caps
.num_ports
= func_cap
.num_ports
;
477 dev
->caps
.num_qps
= func_cap
.qp_quota
;
478 dev
->caps
.num_srqs
= func_cap
.srq_quota
;
479 dev
->caps
.num_cqs
= func_cap
.cq_quota
;
480 dev
->caps
.num_eqs
= func_cap
.max_eq
;
481 dev
->caps
.reserved_eqs
= func_cap
.reserved_eq
;
482 dev
->caps
.num_mpts
= func_cap
.mpt_quota
;
483 dev
->caps
.num_mtts
= func_cap
.mtt_quota
;
484 dev
->caps
.num_pds
= MLX4_NUM_PDS
;
485 dev
->caps
.num_mgms
= 0;
486 dev
->caps
.num_amgms
= 0;
488 for (i
= 1; i
<= dev
->caps
.num_ports
; ++i
)
489 dev
->caps
.port_mask
[i
] = dev
->caps
.port_type
[i
];
491 if (dev
->caps
.num_ports
> MLX4_MAX_PORTS
) {
492 mlx4_err(dev
, "HCA has %d ports, but we only support %d, "
493 "aborting.\n", dev
->caps
.num_ports
, MLX4_MAX_PORTS
);
497 if (dev
->caps
.uar_page_size
* (dev
->caps
.num_uars
-
498 dev
->caps
.reserved_uars
) >
499 pci_resource_len(dev
->pdev
, 2)) {
500 mlx4_err(dev
, "HCA reported UAR region size of 0x%x bigger than "
501 "PCI resource 2 size of 0x%llx, aborting.\n",
502 dev
->caps
.uar_page_size
* dev
->caps
.num_uars
,
503 (unsigned long long) pci_resource_len(dev
->pdev
, 2));
508 mlx4_warn(dev
, "sqp_demux:%d\n", dev
->caps
.sqp_demux
);
509 mlx4_warn(dev
, "num_uars:%d reserved_uars:%d uar region:0x%x bar2:0x%llx\n",
510 dev
->caps
.num_uars
, dev
->caps
.reserved_uars
,
511 dev
->caps
.uar_page_size
* dev
->caps
.num_uars
,
512 pci_resource_len(dev
->pdev
, 2));
513 mlx4_warn(dev
, "num_eqs:%d reserved_eqs:%d\n", dev
->caps
.num_eqs
,
514 dev
->caps
.reserved_eqs
);
515 mlx4_warn(dev
, "num_pds:%d reserved_pds:%d slave_pd_shift:%d pd_base:%d\n",
516 dev
->caps
.num_pds
, dev
->caps
.reserved_pds
,
517 dev
->caps
.slave_pd_shift
, dev
->caps
.pd_base
);
523 * Change the port configuration of the device.
524 * Every user of this function must hold the port mutex.
526 int mlx4_change_port_types(struct mlx4_dev
*dev
,
527 enum mlx4_port_type
*port_types
)
533 for (port
= 0; port
< dev
->caps
.num_ports
; port
++) {
534 /* Change the port type only if the new type is different
535 * from the current, and not set to Auto */
536 if (port_types
[port
] != dev
->caps
.port_type
[port
+ 1])
540 mlx4_unregister_device(dev
);
541 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
542 mlx4_CLOSE_PORT(dev
, port
);
543 dev
->caps
.port_type
[port
] = port_types
[port
- 1];
544 err
= mlx4_SET_PORT(dev
, port
);
546 mlx4_err(dev
, "Failed to set port %d, "
551 mlx4_set_port_mask(dev
);
552 err
= mlx4_register_device(dev
);
559 static ssize_t
show_port_type(struct device
*dev
,
560 struct device_attribute
*attr
,
563 struct mlx4_port_info
*info
= container_of(attr
, struct mlx4_port_info
,
565 struct mlx4_dev
*mdev
= info
->dev
;
569 (mdev
->caps
.port_type
[info
->port
] == MLX4_PORT_TYPE_IB
) ?
571 if (mdev
->caps
.possible_type
[info
->port
] == MLX4_PORT_TYPE_AUTO
)
572 sprintf(buf
, "auto (%s)\n", type
);
574 sprintf(buf
, "%s\n", type
);
579 static ssize_t
set_port_type(struct device
*dev
,
580 struct device_attribute
*attr
,
581 const char *buf
, size_t count
)
583 struct mlx4_port_info
*info
= container_of(attr
, struct mlx4_port_info
,
585 struct mlx4_dev
*mdev
= info
->dev
;
586 struct mlx4_priv
*priv
= mlx4_priv(mdev
);
587 enum mlx4_port_type types
[MLX4_MAX_PORTS
];
588 enum mlx4_port_type new_types
[MLX4_MAX_PORTS
];
592 if (!strcmp(buf
, "ib\n"))
593 info
->tmp_type
= MLX4_PORT_TYPE_IB
;
594 else if (!strcmp(buf
, "eth\n"))
595 info
->tmp_type
= MLX4_PORT_TYPE_ETH
;
596 else if (!strcmp(buf
, "auto\n"))
597 info
->tmp_type
= MLX4_PORT_TYPE_AUTO
;
599 mlx4_err(mdev
, "%s is not supported port type\n", buf
);
603 mlx4_stop_sense(mdev
);
604 mutex_lock(&priv
->port_mutex
);
605 /* Possible type is always the one that was delivered */
606 mdev
->caps
.possible_type
[info
->port
] = info
->tmp_type
;
608 for (i
= 0; i
< mdev
->caps
.num_ports
; i
++) {
609 types
[i
] = priv
->port
[i
+1].tmp_type
? priv
->port
[i
+1].tmp_type
:
610 mdev
->caps
.possible_type
[i
+1];
611 if (types
[i
] == MLX4_PORT_TYPE_AUTO
)
612 types
[i
] = mdev
->caps
.port_type
[i
+1];
615 if (!(mdev
->caps
.flags
& MLX4_DEV_CAP_FLAG_DPDP
) &&
616 !(mdev
->caps
.flags
& MLX4_DEV_CAP_FLAG_SENSE_SUPPORT
)) {
617 for (i
= 1; i
<= mdev
->caps
.num_ports
; i
++) {
618 if (mdev
->caps
.possible_type
[i
] == MLX4_PORT_TYPE_AUTO
) {
619 mdev
->caps
.possible_type
[i
] = mdev
->caps
.port_type
[i
];
625 mlx4_err(mdev
, "Auto sensing is not supported on this HCA. "
626 "Set only 'eth' or 'ib' for both ports "
627 "(should be the same)\n");
631 mlx4_do_sense_ports(mdev
, new_types
, types
);
633 err
= mlx4_check_port_params(mdev
, new_types
);
637 /* We are about to apply the changes after the configuration
638 * was verified, no need to remember the temporary types
640 for (i
= 0; i
< mdev
->caps
.num_ports
; i
++)
641 priv
->port
[i
+ 1].tmp_type
= 0;
643 err
= mlx4_change_port_types(mdev
, new_types
);
646 mlx4_start_sense(mdev
);
647 mutex_unlock(&priv
->port_mutex
);
648 return err
? err
: count
;
659 static inline int int_to_ibta_mtu(int mtu
)
662 case 256: return IB_MTU_256
;
663 case 512: return IB_MTU_512
;
664 case 1024: return IB_MTU_1024
;
665 case 2048: return IB_MTU_2048
;
666 case 4096: return IB_MTU_4096
;
671 static inline int ibta_mtu_to_int(enum ibta_mtu mtu
)
674 case IB_MTU_256
: return 256;
675 case IB_MTU_512
: return 512;
676 case IB_MTU_1024
: return 1024;
677 case IB_MTU_2048
: return 2048;
678 case IB_MTU_4096
: return 4096;
683 static ssize_t
show_port_ib_mtu(struct device
*dev
,
684 struct device_attribute
*attr
,
687 struct mlx4_port_info
*info
= container_of(attr
, struct mlx4_port_info
,
689 struct mlx4_dev
*mdev
= info
->dev
;
691 if (mdev
->caps
.port_type
[info
->port
] == MLX4_PORT_TYPE_ETH
)
692 mlx4_warn(mdev
, "port level mtu is only used for IB ports\n");
695 ibta_mtu_to_int(mdev
->caps
.port_ib_mtu
[info
->port
]));
699 static ssize_t
set_port_ib_mtu(struct device
*dev
,
700 struct device_attribute
*attr
,
701 const char *buf
, size_t count
)
703 struct mlx4_port_info
*info
= container_of(attr
, struct mlx4_port_info
,
705 struct mlx4_dev
*mdev
= info
->dev
;
706 struct mlx4_priv
*priv
= mlx4_priv(mdev
);
707 int err
, port
, mtu
, ibta_mtu
= -1;
709 if (mdev
->caps
.port_type
[info
->port
] == MLX4_PORT_TYPE_ETH
) {
710 mlx4_warn(mdev
, "port level mtu is only used for IB ports\n");
714 err
= sscanf(buf
, "%d", &mtu
);
716 ibta_mtu
= int_to_ibta_mtu(mtu
);
718 if (err
<= 0 || ibta_mtu
< 0) {
719 mlx4_err(mdev
, "%s is invalid IBTA mtu\n", buf
);
723 mdev
->caps
.port_ib_mtu
[info
->port
] = ibta_mtu
;
725 mlx4_stop_sense(mdev
);
726 mutex_lock(&priv
->port_mutex
);
727 mlx4_unregister_device(mdev
);
728 for (port
= 1; port
<= mdev
->caps
.num_ports
; port
++) {
729 mlx4_CLOSE_PORT(mdev
, port
);
730 err
= mlx4_SET_PORT(mdev
, port
);
732 mlx4_err(mdev
, "Failed to set port %d, "
737 err
= mlx4_register_device(mdev
);
739 mutex_unlock(&priv
->port_mutex
);
740 mlx4_start_sense(mdev
);
741 return err
? err
: count
;
744 static int mlx4_load_fw(struct mlx4_dev
*dev
)
746 struct mlx4_priv
*priv
= mlx4_priv(dev
);
749 priv
->fw
.fw_icm
= mlx4_alloc_icm(dev
, priv
->fw
.fw_pages
,
750 GFP_HIGHUSER
| __GFP_NOWARN
, 0);
751 if (!priv
->fw
.fw_icm
) {
752 mlx4_err(dev
, "Couldn't allocate FW area, aborting.\n");
756 err
= mlx4_MAP_FA(dev
, priv
->fw
.fw_icm
);
758 mlx4_err(dev
, "MAP_FA command failed, aborting.\n");
762 err
= mlx4_RUN_FW(dev
);
764 mlx4_err(dev
, "RUN_FW command failed, aborting.\n");
774 mlx4_free_icm(dev
, priv
->fw
.fw_icm
, 0);
778 static int mlx4_init_cmpt_table(struct mlx4_dev
*dev
, u64 cmpt_base
,
781 struct mlx4_priv
*priv
= mlx4_priv(dev
);
785 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.cmpt_table
,
787 ((u64
) (MLX4_CMPT_TYPE_QP
*
788 cmpt_entry_sz
) << MLX4_CMPT_SHIFT
),
789 cmpt_entry_sz
, dev
->caps
.num_qps
,
790 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
795 err
= mlx4_init_icm_table(dev
, &priv
->srq_table
.cmpt_table
,
797 ((u64
) (MLX4_CMPT_TYPE_SRQ
*
798 cmpt_entry_sz
) << MLX4_CMPT_SHIFT
),
799 cmpt_entry_sz
, dev
->caps
.num_srqs
,
800 dev
->caps
.reserved_srqs
, 0, 0);
804 err
= mlx4_init_icm_table(dev
, &priv
->cq_table
.cmpt_table
,
806 ((u64
) (MLX4_CMPT_TYPE_CQ
*
807 cmpt_entry_sz
) << MLX4_CMPT_SHIFT
),
808 cmpt_entry_sz
, dev
->caps
.num_cqs
,
809 dev
->caps
.reserved_cqs
, 0, 0);
813 num_eqs
= (mlx4_is_master(dev
)) ?
814 roundup_pow_of_two(mlx4_master_get_num_eqs(dev
)) :
816 err
= mlx4_init_icm_table(dev
, &priv
->eq_table
.cmpt_table
,
818 ((u64
) (MLX4_CMPT_TYPE_EQ
*
819 cmpt_entry_sz
) << MLX4_CMPT_SHIFT
),
820 cmpt_entry_sz
, num_eqs
, num_eqs
, 0, 0);
827 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.cmpt_table
);
830 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.cmpt_table
);
833 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.cmpt_table
);
839 static int mlx4_init_icm(struct mlx4_dev
*dev
, struct mlx4_dev_cap
*dev_cap
,
840 struct mlx4_init_hca_param
*init_hca
, u64 icm_size
)
842 struct mlx4_priv
*priv
= mlx4_priv(dev
);
847 err
= mlx4_SET_ICM_SIZE(dev
, icm_size
, &aux_pages
);
849 mlx4_err(dev
, "SET_ICM_SIZE command failed, aborting.\n");
853 mlx4_dbg(dev
, "%lld KB of HCA context requires %lld KB aux memory.\n",
854 (unsigned long long) icm_size
>> 10,
855 (unsigned long long) aux_pages
<< 2);
857 priv
->fw
.aux_icm
= mlx4_alloc_icm(dev
, aux_pages
,
858 GFP_HIGHUSER
| __GFP_NOWARN
, 0);
859 if (!priv
->fw
.aux_icm
) {
860 mlx4_err(dev
, "Couldn't allocate aux memory, aborting.\n");
864 err
= mlx4_MAP_ICM_AUX(dev
, priv
->fw
.aux_icm
);
866 mlx4_err(dev
, "MAP_ICM_AUX command failed, aborting.\n");
870 err
= mlx4_init_cmpt_table(dev
, init_hca
->cmpt_base
, dev_cap
->cmpt_entry_sz
);
872 mlx4_err(dev
, "Failed to map cMPT context memory, aborting.\n");
877 num_eqs
= (mlx4_is_master(dev
)) ?
878 roundup_pow_of_two(mlx4_master_get_num_eqs(dev
)) :
880 err
= mlx4_init_icm_table(dev
, &priv
->eq_table
.table
,
881 init_hca
->eqc_base
, dev_cap
->eqc_entry_sz
,
882 num_eqs
, num_eqs
, 0, 0);
884 mlx4_err(dev
, "Failed to map EQ context memory, aborting.\n");
889 * Reserved MTT entries must be aligned up to a cacheline
890 * boundary, since the FW will write to them, while the driver
891 * writes to all other MTT entries. (The variable
892 * dev->caps.mtt_entry_sz below is really the MTT segment
893 * size, not the raw entry size)
895 dev
->caps
.reserved_mtts
=
896 ALIGN(dev
->caps
.reserved_mtts
* dev
->caps
.mtt_entry_sz
,
897 dma_get_cache_alignment()) / dev
->caps
.mtt_entry_sz
;
899 err
= mlx4_init_icm_table(dev
, &priv
->mr_table
.mtt_table
,
901 dev
->caps
.mtt_entry_sz
,
903 dev
->caps
.reserved_mtts
, 1, 0);
905 mlx4_err(dev
, "Failed to map MTT context memory, aborting.\n");
909 err
= mlx4_init_icm_table(dev
, &priv
->mr_table
.dmpt_table
,
911 dev_cap
->dmpt_entry_sz
,
913 dev
->caps
.reserved_mrws
, 1, 1);
915 mlx4_err(dev
, "Failed to map dMPT context memory, aborting.\n");
919 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.qp_table
,
921 dev_cap
->qpc_entry_sz
,
923 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
926 mlx4_err(dev
, "Failed to map QP context memory, aborting.\n");
930 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.auxc_table
,
932 dev_cap
->aux_entry_sz
,
934 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
937 mlx4_err(dev
, "Failed to map AUXC context memory, aborting.\n");
941 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.altc_table
,
943 dev_cap
->altc_entry_sz
,
945 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
948 mlx4_err(dev
, "Failed to map ALTC context memory, aborting.\n");
952 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.rdmarc_table
,
953 init_hca
->rdmarc_base
,
954 dev_cap
->rdmarc_entry_sz
<< priv
->qp_table
.rdmarc_shift
,
956 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
959 mlx4_err(dev
, "Failed to map RDMARC context memory, aborting\n");
963 err
= mlx4_init_icm_table(dev
, &priv
->cq_table
.table
,
965 dev_cap
->cqc_entry_sz
,
967 dev
->caps
.reserved_cqs
, 0, 0);
969 mlx4_err(dev
, "Failed to map CQ context memory, aborting.\n");
970 goto err_unmap_rdmarc
;
973 err
= mlx4_init_icm_table(dev
, &priv
->srq_table
.table
,
975 dev_cap
->srq_entry_sz
,
977 dev
->caps
.reserved_srqs
, 0, 0);
979 mlx4_err(dev
, "Failed to map SRQ context memory, aborting.\n");
984 * It's not strictly required, but for simplicity just map the
985 * whole multicast group table now. The table isn't very big
986 * and it's a lot easier than trying to track ref counts.
988 err
= mlx4_init_icm_table(dev
, &priv
->mcg_table
.table
,
990 mlx4_get_mgm_entry_size(dev
),
991 dev
->caps
.num_mgms
+ dev
->caps
.num_amgms
,
992 dev
->caps
.num_mgms
+ dev
->caps
.num_amgms
,
995 mlx4_err(dev
, "Failed to map MCG context memory, aborting.\n");
1002 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.table
);
1005 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.table
);
1008 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.rdmarc_table
);
1011 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.altc_table
);
1014 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.auxc_table
);
1017 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.qp_table
);
1020 mlx4_cleanup_icm_table(dev
, &priv
->mr_table
.dmpt_table
);
1023 mlx4_cleanup_icm_table(dev
, &priv
->mr_table
.mtt_table
);
1026 mlx4_cleanup_icm_table(dev
, &priv
->eq_table
.table
);
1029 mlx4_cleanup_icm_table(dev
, &priv
->eq_table
.cmpt_table
);
1030 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.cmpt_table
);
1031 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.cmpt_table
);
1032 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.cmpt_table
);
1035 mlx4_UNMAP_ICM_AUX(dev
);
1038 mlx4_free_icm(dev
, priv
->fw
.aux_icm
, 0);
1043 static void mlx4_free_icms(struct mlx4_dev
*dev
)
1045 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1047 mlx4_cleanup_icm_table(dev
, &priv
->mcg_table
.table
);
1048 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.table
);
1049 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.table
);
1050 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.rdmarc_table
);
1051 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.altc_table
);
1052 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.auxc_table
);
1053 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.qp_table
);
1054 mlx4_cleanup_icm_table(dev
, &priv
->mr_table
.dmpt_table
);
1055 mlx4_cleanup_icm_table(dev
, &priv
->mr_table
.mtt_table
);
1056 mlx4_cleanup_icm_table(dev
, &priv
->eq_table
.table
);
1057 mlx4_cleanup_icm_table(dev
, &priv
->eq_table
.cmpt_table
);
1058 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.cmpt_table
);
1059 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.cmpt_table
);
1060 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.cmpt_table
);
1062 mlx4_UNMAP_ICM_AUX(dev
);
1063 mlx4_free_icm(dev
, priv
->fw
.aux_icm
, 0);
1066 static void mlx4_slave_exit(struct mlx4_dev
*dev
)
1068 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1070 down(&priv
->cmd
.slave_sem
);
1071 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_RESET
, 0, MLX4_COMM_TIME
))
1072 mlx4_warn(dev
, "Failed to close slave function.\n");
1073 up(&priv
->cmd
.slave_sem
);
1076 static int map_bf_area(struct mlx4_dev
*dev
)
1078 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1079 resource_size_t bf_start
;
1080 resource_size_t bf_len
;
1083 if (!dev
->caps
.bf_reg_size
)
1086 bf_start
= pci_resource_start(dev
->pdev
, 2) +
1087 (dev
->caps
.num_uars
<< PAGE_SHIFT
);
1088 bf_len
= pci_resource_len(dev
->pdev
, 2) -
1089 (dev
->caps
.num_uars
<< PAGE_SHIFT
);
1090 priv
->bf_mapping
= io_mapping_create_wc(bf_start
, bf_len
);
1091 if (!priv
->bf_mapping
)
1097 static void unmap_bf_area(struct mlx4_dev
*dev
)
1099 if (mlx4_priv(dev
)->bf_mapping
)
1100 io_mapping_free(mlx4_priv(dev
)->bf_mapping
);
1103 static void mlx4_close_hca(struct mlx4_dev
*dev
)
1106 if (mlx4_is_slave(dev
))
1107 mlx4_slave_exit(dev
);
1109 mlx4_CLOSE_HCA(dev
, 0);
1110 mlx4_free_icms(dev
);
1112 mlx4_free_icm(dev
, mlx4_priv(dev
)->fw
.fw_icm
, 0);
1116 static int mlx4_init_slave(struct mlx4_dev
*dev
)
1118 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1119 u64 dma
= (u64
) priv
->mfunc
.vhcr_dma
;
1120 int num_of_reset_retries
= NUM_OF_RESET_RETRIES
;
1121 int ret_from_reset
= 0;
1123 u32 cmd_channel_ver
;
1125 down(&priv
->cmd
.slave_sem
);
1126 priv
->cmd
.max_cmds
= 1;
1127 mlx4_warn(dev
, "Sending reset\n");
1128 ret_from_reset
= mlx4_comm_cmd(dev
, MLX4_COMM_CMD_RESET
, 0,
1130 /* if we are in the middle of flr the slave will try
1131 * NUM_OF_RESET_RETRIES times before leaving.*/
1132 if (ret_from_reset
) {
1133 if (MLX4_DELAY_RESET_SLAVE
== ret_from_reset
) {
1134 msleep(SLEEP_TIME_IN_RESET
);
1135 while (ret_from_reset
&& num_of_reset_retries
) {
1136 mlx4_warn(dev
, "slave is currently in the"
1137 "middle of FLR. retrying..."
1139 (NUM_OF_RESET_RETRIES
-
1140 num_of_reset_retries
+ 1));
1142 mlx4_comm_cmd(dev
, MLX4_COMM_CMD_RESET
,
1144 num_of_reset_retries
= num_of_reset_retries
- 1;
1150 /* check the driver version - the slave I/F revision
1151 * must match the master's */
1152 slave_read
= swab32(readl(&priv
->mfunc
.comm
->slave_read
));
1153 cmd_channel_ver
= mlx4_comm_get_version();
1155 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver
) !=
1156 MLX4_COMM_GET_IF_REV(slave_read
)) {
1157 mlx4_err(dev
, "slave driver version is not supported"
1158 " by the master\n");
1162 mlx4_warn(dev
, "Sending vhcr0\n");
1163 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR0
, dma
>> 48,
1166 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR1
, dma
>> 32,
1169 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR2
, dma
>> 16,
1172 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR_EN
, dma
, MLX4_COMM_TIME
))
1174 up(&priv
->cmd
.slave_sem
);
1178 mlx4_comm_cmd(dev
, MLX4_COMM_CMD_RESET
, 0, 0);
1179 up(&priv
->cmd
.slave_sem
);
1183 static int mlx4_init_hca(struct mlx4_dev
*dev
)
1185 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1186 struct mlx4_adapter adapter
;
1187 struct mlx4_dev_cap dev_cap
;
1188 struct mlx4_mod_stat_cfg mlx4_cfg
;
1189 struct mlx4_profile profile
;
1190 struct mlx4_init_hca_param init_hca
;
1194 if (!mlx4_is_slave(dev
)) {
1195 err
= mlx4_QUERY_FW(dev
);
1198 mlx4_info(dev
, "non-primary physical function, skipping.\n");
1200 mlx4_err(dev
, "QUERY_FW command failed, aborting.\n");
1204 err
= mlx4_load_fw(dev
);
1206 mlx4_err(dev
, "Failed to start FW, aborting.\n");
1210 mlx4_cfg
.log_pg_sz_m
= 1;
1211 mlx4_cfg
.log_pg_sz
= 0;
1212 err
= mlx4_MOD_STAT_CFG(dev
, &mlx4_cfg
);
1214 mlx4_warn(dev
, "Failed to override log_pg_sz parameter\n");
1216 err
= mlx4_dev_cap(dev
, &dev_cap
);
1218 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting.\n");
1222 profile
= default_profile
;
1224 icm_size
= mlx4_make_profile(dev
, &profile
, &dev_cap
,
1226 if ((long long) icm_size
< 0) {
1231 dev
->caps
.max_fmr_maps
= (1 << (32 - ilog2(dev
->caps
.num_mpts
))) - 1;
1233 init_hca
.log_uar_sz
= ilog2(dev
->caps
.num_uars
);
1234 init_hca
.uar_page_sz
= PAGE_SHIFT
- 12;
1236 err
= mlx4_init_icm(dev
, &dev_cap
, &init_hca
, icm_size
);
1240 err
= mlx4_INIT_HCA(dev
, &init_hca
);
1242 mlx4_err(dev
, "INIT_HCA command failed, aborting.\n");
1246 err
= mlx4_init_slave(dev
);
1248 mlx4_err(dev
, "Failed to initialize slave\n");
1252 err
= mlx4_slave_cap(dev
);
1254 mlx4_err(dev
, "Failed to obtain slave caps\n");
1259 if (map_bf_area(dev
))
1260 mlx4_dbg(dev
, "Failed to map blue flame area\n");
1262 /*Only the master set the ports, all the rest got it from it.*/
1263 if (!mlx4_is_slave(dev
))
1264 mlx4_set_port_mask(dev
);
1266 err
= mlx4_QUERY_ADAPTER(dev
, &adapter
);
1268 mlx4_err(dev
, "QUERY_ADAPTER command failed, aborting.\n");
1272 priv
->eq_table
.inta_pin
= adapter
.inta_pin
;
1273 memcpy(dev
->board_id
, adapter
.board_id
, sizeof dev
->board_id
);
1278 mlx4_close_hca(dev
);
1281 if (!mlx4_is_slave(dev
))
1282 mlx4_free_icms(dev
);
1285 if (!mlx4_is_slave(dev
)) {
1287 mlx4_free_icm(dev
, priv
->fw
.fw_icm
, 0);
1294 static int mlx4_init_counters_table(struct mlx4_dev
*dev
)
1296 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1299 if (!(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_COUNTERS
))
1302 nent
= dev
->caps
.max_counters
;
1303 return mlx4_bitmap_init(&priv
->counters_bitmap
, nent
, nent
- 1, 0, 0);
1306 static void mlx4_cleanup_counters_table(struct mlx4_dev
*dev
)
1308 mlx4_bitmap_cleanup(&mlx4_priv(dev
)->counters_bitmap
);
1311 int __mlx4_counter_alloc(struct mlx4_dev
*dev
, u32
*idx
)
1313 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1315 if (!(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_COUNTERS
))
1318 *idx
= mlx4_bitmap_alloc(&priv
->counters_bitmap
);
1325 int mlx4_counter_alloc(struct mlx4_dev
*dev
, u32
*idx
)
1330 if (mlx4_is_mfunc(dev
)) {
1331 err
= mlx4_cmd_imm(dev
, 0, &out_param
, RES_COUNTER
,
1332 RES_OP_RESERVE
, MLX4_CMD_ALLOC_RES
,
1333 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
1335 *idx
= get_param_l(&out_param
);
1339 return __mlx4_counter_alloc(dev
, idx
);
1341 EXPORT_SYMBOL_GPL(mlx4_counter_alloc
);
1343 void __mlx4_counter_free(struct mlx4_dev
*dev
, u32 idx
)
1345 mlx4_bitmap_free(&mlx4_priv(dev
)->counters_bitmap
, idx
);
1349 void mlx4_counter_free(struct mlx4_dev
*dev
, u32 idx
)
1353 if (mlx4_is_mfunc(dev
)) {
1354 set_param_l(&in_param
, idx
);
1355 mlx4_cmd(dev
, in_param
, RES_COUNTER
, RES_OP_RESERVE
,
1356 MLX4_CMD_FREE_RES
, MLX4_CMD_TIME_CLASS_A
,
1360 __mlx4_counter_free(dev
, idx
);
1362 EXPORT_SYMBOL_GPL(mlx4_counter_free
);
1364 static int mlx4_setup_hca(struct mlx4_dev
*dev
)
1366 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1369 __be32 ib_port_default_caps
;
1371 err
= mlx4_init_uar_table(dev
);
1373 mlx4_err(dev
, "Failed to initialize "
1374 "user access region table, aborting.\n");
1378 err
= mlx4_uar_alloc(dev
, &priv
->driver_uar
);
1380 mlx4_err(dev
, "Failed to allocate driver access region, "
1382 goto err_uar_table_free
;
1385 priv
->kar
= ioremap((phys_addr_t
) priv
->driver_uar
.pfn
<< PAGE_SHIFT
, PAGE_SIZE
);
1387 mlx4_err(dev
, "Couldn't map kernel access region, "
1393 err
= mlx4_init_pd_table(dev
);
1395 mlx4_err(dev
, "Failed to initialize "
1396 "protection domain table, aborting.\n");
1400 err
= mlx4_init_xrcd_table(dev
);
1402 mlx4_err(dev
, "Failed to initialize "
1403 "reliable connection domain table, aborting.\n");
1404 goto err_pd_table_free
;
1407 err
= mlx4_init_mr_table(dev
);
1409 mlx4_err(dev
, "Failed to initialize "
1410 "memory region table, aborting.\n");
1411 goto err_xrcd_table_free
;
1414 err
= mlx4_init_eq_table(dev
);
1416 mlx4_err(dev
, "Failed to initialize "
1417 "event queue table, aborting.\n");
1418 goto err_mr_table_free
;
1421 err
= mlx4_cmd_use_events(dev
);
1423 mlx4_err(dev
, "Failed to switch to event-driven "
1424 "firmware commands, aborting.\n");
1425 goto err_eq_table_free
;
1428 err
= mlx4_NOP(dev
);
1430 if (dev
->flags
& MLX4_FLAG_MSI_X
) {
1431 mlx4_warn(dev
, "NOP command failed to generate MSI-X "
1432 "interrupt IRQ %d).\n",
1433 priv
->eq_table
.eq
[dev
->caps
.num_comp_vectors
].irq
);
1434 mlx4_warn(dev
, "Trying again without MSI-X.\n");
1436 mlx4_err(dev
, "NOP command failed to generate interrupt "
1437 "(IRQ %d), aborting.\n",
1438 priv
->eq_table
.eq
[dev
->caps
.num_comp_vectors
].irq
);
1439 mlx4_err(dev
, "BIOS or ACPI interrupt routing problem?\n");
1445 mlx4_dbg(dev
, "NOP command IRQ test passed\n");
1447 err
= mlx4_init_cq_table(dev
);
1449 mlx4_err(dev
, "Failed to initialize "
1450 "completion queue table, aborting.\n");
1454 err
= mlx4_init_srq_table(dev
);
1456 mlx4_err(dev
, "Failed to initialize "
1457 "shared receive queue table, aborting.\n");
1458 goto err_cq_table_free
;
1461 err
= mlx4_init_qp_table(dev
);
1463 mlx4_err(dev
, "Failed to initialize "
1464 "queue pair table, aborting.\n");
1465 goto err_srq_table_free
;
1468 if (!mlx4_is_slave(dev
)) {
1469 err
= mlx4_init_mcg_table(dev
);
1471 mlx4_err(dev
, "Failed to initialize "
1472 "multicast group table, aborting.\n");
1473 goto err_qp_table_free
;
1477 err
= mlx4_init_counters_table(dev
);
1478 if (err
&& err
!= -ENOENT
) {
1479 mlx4_err(dev
, "Failed to initialize counters table, aborting.\n");
1480 goto err_mcg_table_free
;
1483 if (!mlx4_is_slave(dev
)) {
1484 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
1485 ib_port_default_caps
= 0;
1486 err
= mlx4_get_port_ib_caps(dev
, port
,
1487 &ib_port_default_caps
);
1489 mlx4_warn(dev
, "failed to get port %d default "
1490 "ib capabilities (%d). Continuing "
1491 "with caps = 0\n", port
, err
);
1492 dev
->caps
.ib_port_def_cap
[port
] = ib_port_default_caps
;
1494 if (mlx4_is_mfunc(dev
))
1495 dev
->caps
.port_ib_mtu
[port
] = IB_MTU_2048
;
1497 dev
->caps
.port_ib_mtu
[port
] = IB_MTU_4096
;
1499 err
= mlx4_SET_PORT(dev
, port
);
1501 mlx4_err(dev
, "Failed to set port %d, aborting\n",
1503 goto err_counters_table_free
;
1510 err_counters_table_free
:
1511 mlx4_cleanup_counters_table(dev
);
1514 mlx4_cleanup_mcg_table(dev
);
1517 mlx4_cleanup_qp_table(dev
);
1520 mlx4_cleanup_srq_table(dev
);
1523 mlx4_cleanup_cq_table(dev
);
1526 mlx4_cmd_use_polling(dev
);
1529 mlx4_cleanup_eq_table(dev
);
1532 mlx4_cleanup_mr_table(dev
);
1534 err_xrcd_table_free
:
1535 mlx4_cleanup_xrcd_table(dev
);
1538 mlx4_cleanup_pd_table(dev
);
1544 mlx4_uar_free(dev
, &priv
->driver_uar
);
1547 mlx4_cleanup_uar_table(dev
);
1551 static void mlx4_enable_msi_x(struct mlx4_dev
*dev
)
1553 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1554 struct msix_entry
*entries
;
1555 int nreq
= min_t(int, dev
->caps
.num_ports
*
1556 min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT
)
1557 + MSIX_LEGACY_SZ
, MAX_MSIX
);
1562 /* In multifunction mode each function gets 2 msi-X vectors
1563 * one for data path completions anf the other for asynch events
1564 * or command completions */
1565 if (mlx4_is_mfunc(dev
)) {
1568 nreq
= min_t(int, dev
->caps
.num_eqs
-
1569 dev
->caps
.reserved_eqs
, nreq
);
1572 entries
= kcalloc(nreq
, sizeof *entries
, GFP_KERNEL
);
1576 for (i
= 0; i
< nreq
; ++i
)
1577 entries
[i
].entry
= i
;
1580 err
= pci_enable_msix(dev
->pdev
, entries
, nreq
);
1582 /* Try again if at least 2 vectors are available */
1584 mlx4_info(dev
, "Requested %d vectors, "
1585 "but only %d MSI-X vectors available, "
1586 "trying again\n", nreq
, err
);
1595 MSIX_LEGACY_SZ
+ dev
->caps
.num_ports
* MIN_MSIX_P_PORT
) {
1596 /*Working in legacy mode , all EQ's shared*/
1597 dev
->caps
.comp_pool
= 0;
1598 dev
->caps
.num_comp_vectors
= nreq
- 1;
1600 dev
->caps
.comp_pool
= nreq
- MSIX_LEGACY_SZ
;
1601 dev
->caps
.num_comp_vectors
= MSIX_LEGACY_SZ
- 1;
1603 for (i
= 0; i
< nreq
; ++i
)
1604 priv
->eq_table
.eq
[i
].irq
= entries
[i
].vector
;
1606 dev
->flags
|= MLX4_FLAG_MSI_X
;
1613 dev
->caps
.num_comp_vectors
= 1;
1614 dev
->caps
.comp_pool
= 0;
1616 for (i
= 0; i
< 2; ++i
)
1617 priv
->eq_table
.eq
[i
].irq
= dev
->pdev
->irq
;
1620 static int mlx4_init_port_info(struct mlx4_dev
*dev
, int port
)
1622 struct mlx4_port_info
*info
= &mlx4_priv(dev
)->port
[port
];
1627 if (!mlx4_is_slave(dev
)) {
1628 INIT_RADIX_TREE(&info
->mac_tree
, GFP_KERNEL
);
1629 mlx4_init_mac_table(dev
, &info
->mac_table
);
1630 mlx4_init_vlan_table(dev
, &info
->vlan_table
);
1632 dev
->caps
.reserved_qps_base
[MLX4_QP_REGION_ETH_ADDR
] +
1633 (port
- 1) * (1 << log_num_mac
);
1636 sprintf(info
->dev_name
, "mlx4_port%d", port
);
1637 info
->port_attr
.attr
.name
= info
->dev_name
;
1638 if (mlx4_is_mfunc(dev
))
1639 info
->port_attr
.attr
.mode
= S_IRUGO
;
1641 info
->port_attr
.attr
.mode
= S_IRUGO
| S_IWUSR
;
1642 info
->port_attr
.store
= set_port_type
;
1644 info
->port_attr
.show
= show_port_type
;
1645 sysfs_attr_init(&info
->port_attr
.attr
);
1647 err
= device_create_file(&dev
->pdev
->dev
, &info
->port_attr
);
1649 mlx4_err(dev
, "Failed to create file for port %d\n", port
);
1653 sprintf(info
->dev_mtu_name
, "mlx4_port%d_mtu", port
);
1654 info
->port_mtu_attr
.attr
.name
= info
->dev_mtu_name
;
1655 if (mlx4_is_mfunc(dev
))
1656 info
->port_mtu_attr
.attr
.mode
= S_IRUGO
;
1658 info
->port_mtu_attr
.attr
.mode
= S_IRUGO
| S_IWUSR
;
1659 info
->port_mtu_attr
.store
= set_port_ib_mtu
;
1661 info
->port_mtu_attr
.show
= show_port_ib_mtu
;
1662 sysfs_attr_init(&info
->port_mtu_attr
.attr
);
1664 err
= device_create_file(&dev
->pdev
->dev
, &info
->port_mtu_attr
);
1666 mlx4_err(dev
, "Failed to create mtu file for port %d\n", port
);
1667 device_remove_file(&info
->dev
->pdev
->dev
, &info
->port_attr
);
1674 static void mlx4_cleanup_port_info(struct mlx4_port_info
*info
)
1679 device_remove_file(&info
->dev
->pdev
->dev
, &info
->port_attr
);
1680 device_remove_file(&info
->dev
->pdev
->dev
, &info
->port_mtu_attr
);
1683 static int mlx4_init_steering(struct mlx4_dev
*dev
)
1685 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1686 int num_entries
= dev
->caps
.num_ports
;
1689 priv
->steer
= kzalloc(sizeof(struct mlx4_steer
) * num_entries
, GFP_KERNEL
);
1693 for (i
= 0; i
< num_entries
; i
++)
1694 for (j
= 0; j
< MLX4_NUM_STEERS
; j
++) {
1695 INIT_LIST_HEAD(&priv
->steer
[i
].promisc_qps
[j
]);
1696 INIT_LIST_HEAD(&priv
->steer
[i
].steer_entries
[j
]);
1701 static void mlx4_clear_steering(struct mlx4_dev
*dev
)
1703 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1704 struct mlx4_steer_index
*entry
, *tmp_entry
;
1705 struct mlx4_promisc_qp
*pqp
, *tmp_pqp
;
1706 int num_entries
= dev
->caps
.num_ports
;
1709 for (i
= 0; i
< num_entries
; i
++) {
1710 for (j
= 0; j
< MLX4_NUM_STEERS
; j
++) {
1711 list_for_each_entry_safe(pqp
, tmp_pqp
,
1712 &priv
->steer
[i
].promisc_qps
[j
],
1714 list_del(&pqp
->list
);
1717 list_for_each_entry_safe(entry
, tmp_entry
,
1718 &priv
->steer
[i
].steer_entries
[j
],
1720 list_del(&entry
->list
);
1721 list_for_each_entry_safe(pqp
, tmp_pqp
,
1724 list_del(&pqp
->list
);
1734 static int extended_func_num(struct pci_dev
*pdev
)
1736 return PCI_SLOT(pdev
->devfn
) * 8 + PCI_FUNC(pdev
->devfn
);
1739 #define MLX4_OWNER_BASE 0x8069c
1740 #define MLX4_OWNER_SIZE 4
1742 static int mlx4_get_ownership(struct mlx4_dev
*dev
)
1744 void __iomem
*owner
;
1747 owner
= ioremap(pci_resource_start(dev
->pdev
, 0) + MLX4_OWNER_BASE
,
1750 mlx4_err(dev
, "Failed to obtain ownership bit\n");
1759 static void mlx4_free_ownership(struct mlx4_dev
*dev
)
1761 void __iomem
*owner
;
1763 owner
= ioremap(pci_resource_start(dev
->pdev
, 0) + MLX4_OWNER_BASE
,
1766 mlx4_err(dev
, "Failed to obtain ownership bit\n");
1774 static int __mlx4_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
1776 struct mlx4_priv
*priv
;
1777 struct mlx4_dev
*dev
;
1781 pr_info(DRV_NAME
": Initializing %s\n", pci_name(pdev
));
1783 err
= pci_enable_device(pdev
);
1785 dev_err(&pdev
->dev
, "Cannot enable PCI device, "
1789 if (num_vfs
> MLX4_MAX_NUM_VF
) {
1790 printk(KERN_ERR
"There are more VF's (%d) than allowed(%d)\n",
1791 num_vfs
, MLX4_MAX_NUM_VF
);
1797 if (((id
== NULL
) || !(id
->driver_data
& MLX4_VF
)) &&
1798 !(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
1799 dev_err(&pdev
->dev
, "Missing DCS, aborting."
1800 "(id == 0X%p, id->driver_data: 0x%lx,"
1801 " pci_resource_flags(pdev, 0):0x%lx)\n", id
,
1802 id
? id
->driver_data
: 0, pci_resource_flags(pdev
, 0));
1804 goto err_disable_pdev
;
1806 if (!(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
1807 dev_err(&pdev
->dev
, "Missing UAR, aborting.\n");
1809 goto err_disable_pdev
;
1812 err
= pci_request_regions(pdev
, DRV_NAME
);
1814 dev_err(&pdev
->dev
, "Couldn't get PCI resources, aborting\n");
1815 goto err_disable_pdev
;
1818 pci_set_master(pdev
);
1820 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
1822 dev_warn(&pdev
->dev
, "Warning: couldn't set 64-bit PCI DMA mask.\n");
1823 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
1825 dev_err(&pdev
->dev
, "Can't set PCI DMA mask, aborting.\n");
1826 goto err_release_regions
;
1829 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
1831 dev_warn(&pdev
->dev
, "Warning: couldn't set 64-bit "
1832 "consistent PCI DMA mask.\n");
1833 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
1835 dev_err(&pdev
->dev
, "Can't set consistent PCI DMA mask, "
1837 goto err_release_regions
;
1841 /* Allow large DMA segments, up to the firmware limit of 1 GB */
1842 dma_set_max_seg_size(&pdev
->dev
, 1024 * 1024 * 1024);
1844 priv
= kzalloc(sizeof *priv
, GFP_KERNEL
);
1846 dev_err(&pdev
->dev
, "Device struct alloc failed, "
1849 goto err_release_regions
;
1854 INIT_LIST_HEAD(&priv
->ctx_list
);
1855 spin_lock_init(&priv
->ctx_lock
);
1857 mutex_init(&priv
->port_mutex
);
1859 INIT_LIST_HEAD(&priv
->pgdir_list
);
1860 mutex_init(&priv
->pgdir_mutex
);
1862 INIT_LIST_HEAD(&priv
->bf_list
);
1863 mutex_init(&priv
->bf_mutex
);
1865 dev
->rev_id
= pdev
->revision
;
1866 /* Detect if this device is a virtual function */
1867 if (id
&& id
->driver_data
& MLX4_VF
) {
1868 /* When acting as pf, we normally skip vfs unless explicitly
1869 * requested to probe them. */
1870 if (num_vfs
&& extended_func_num(pdev
) > probe_vf
) {
1871 mlx4_warn(dev
, "Skipping virtual function:%d\n",
1872 extended_func_num(pdev
));
1876 mlx4_warn(dev
, "Detected virtual function - running in slave mode\n");
1877 dev
->flags
|= MLX4_FLAG_SLAVE
;
1879 /* We reset the device and enable SRIOV only for physical
1880 * devices. Try to claim ownership on the device;
1881 * if already taken, skip -- do not allow multiple PFs */
1882 err
= mlx4_get_ownership(dev
);
1887 mlx4_warn(dev
, "Multiple PFs not yet supported."
1895 mlx4_warn(dev
, "Enabling sriov with:%d vfs\n", num_vfs
);
1896 err
= pci_enable_sriov(pdev
, num_vfs
);
1898 mlx4_err(dev
, "Failed to enable sriov,"
1899 "continuing without sriov enabled"
1900 " (err = %d).\n", err
);
1903 mlx4_warn(dev
, "Running in master mode\n");
1904 dev
->flags
|= MLX4_FLAG_SRIOV
|
1906 dev
->num_vfs
= num_vfs
;
1911 * Now reset the HCA before we touch the PCI capabilities or
1912 * attempt a firmware command, since a boot ROM may have left
1913 * the HCA in an undefined state.
1915 err
= mlx4_reset(dev
);
1917 mlx4_err(dev
, "Failed to reset HCA, aborting.\n");
1923 if (mlx4_cmd_init(dev
)) {
1924 mlx4_err(dev
, "Failed to init command interface, aborting.\n");
1928 /* In slave functions, the communication channel must be initialized
1929 * before posting commands. Also, init num_slaves before calling
1931 if (mlx4_is_mfunc(dev
)) {
1932 if (mlx4_is_master(dev
))
1933 dev
->num_slaves
= MLX4_MAX_NUM_SLAVES
;
1935 dev
->num_slaves
= 0;
1936 if (mlx4_multi_func_init(dev
)) {
1937 mlx4_err(dev
, "Failed to init slave mfunc"
1938 " interface, aborting.\n");
1944 err
= mlx4_init_hca(dev
);
1946 if (err
== -EACCES
) {
1947 /* Not primary Physical function
1948 * Running in slave mode */
1949 mlx4_cmd_cleanup(dev
);
1950 dev
->flags
|= MLX4_FLAG_SLAVE
;
1951 dev
->flags
&= ~MLX4_FLAG_MASTER
;
1957 /* In master functions, the communication channel must be initialized
1958 * after obtaining its address from fw */
1959 if (mlx4_is_master(dev
)) {
1960 if (mlx4_multi_func_init(dev
)) {
1961 mlx4_err(dev
, "Failed to init master mfunc"
1962 "interface, aborting.\n");
1967 err
= mlx4_alloc_eq_table(dev
);
1969 goto err_master_mfunc
;
1971 priv
->msix_ctl
.pool_bm
= 0;
1972 mutex_init(&priv
->msix_ctl
.pool_lock
);
1974 mlx4_enable_msi_x(dev
);
1975 if ((mlx4_is_mfunc(dev
)) &&
1976 !(dev
->flags
& MLX4_FLAG_MSI_X
)) {
1977 mlx4_err(dev
, "INTx is not supported in multi-function mode."
1982 if (!mlx4_is_slave(dev
)) {
1983 err
= mlx4_init_steering(dev
);
1988 err
= mlx4_setup_hca(dev
);
1989 if (err
== -EBUSY
&& (dev
->flags
& MLX4_FLAG_MSI_X
) &&
1990 !mlx4_is_mfunc(dev
)) {
1991 dev
->flags
&= ~MLX4_FLAG_MSI_X
;
1992 pci_disable_msix(pdev
);
1993 err
= mlx4_setup_hca(dev
);
1999 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
2000 err
= mlx4_init_port_info(dev
, port
);
2005 err
= mlx4_register_device(dev
);
2009 mlx4_sense_init(dev
);
2010 mlx4_start_sense(dev
);
2012 pci_set_drvdata(pdev
, dev
);
2017 for (--port
; port
>= 1; --port
)
2018 mlx4_cleanup_port_info(&priv
->port
[port
]);
2020 mlx4_cleanup_counters_table(dev
);
2021 mlx4_cleanup_mcg_table(dev
);
2022 mlx4_cleanup_qp_table(dev
);
2023 mlx4_cleanup_srq_table(dev
);
2024 mlx4_cleanup_cq_table(dev
);
2025 mlx4_cmd_use_polling(dev
);
2026 mlx4_cleanup_eq_table(dev
);
2027 mlx4_cleanup_mr_table(dev
);
2028 mlx4_cleanup_xrcd_table(dev
);
2029 mlx4_cleanup_pd_table(dev
);
2030 mlx4_cleanup_uar_table(dev
);
2033 if (!mlx4_is_slave(dev
))
2034 mlx4_clear_steering(dev
);
2037 mlx4_free_eq_table(dev
);
2040 if (mlx4_is_master(dev
))
2041 mlx4_multi_func_cleanup(dev
);
2044 if (dev
->flags
& MLX4_FLAG_MSI_X
)
2045 pci_disable_msix(pdev
);
2047 mlx4_close_hca(dev
);
2050 if (mlx4_is_slave(dev
))
2051 mlx4_multi_func_cleanup(dev
);
2054 mlx4_cmd_cleanup(dev
);
2057 if (dev
->flags
& MLX4_FLAG_SRIOV
)
2058 pci_disable_sriov(pdev
);
2061 if (!mlx4_is_slave(dev
))
2062 mlx4_free_ownership(dev
);
2067 err_release_regions
:
2068 pci_release_regions(pdev
);
2071 pci_disable_device(pdev
);
2072 pci_set_drvdata(pdev
, NULL
);
2076 static int __devinit
mlx4_init_one(struct pci_dev
*pdev
,
2077 const struct pci_device_id
*id
)
2079 printk_once(KERN_INFO
"%s", mlx4_version
);
2081 return __mlx4_init_one(pdev
, id
);
2084 static void mlx4_remove_one(struct pci_dev
*pdev
)
2086 struct mlx4_dev
*dev
= pci_get_drvdata(pdev
);
2087 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2091 /* in SRIOV it is not allowed to unload the pf's
2092 * driver while there are alive vf's */
2093 if (mlx4_is_master(dev
)) {
2094 if (mlx4_how_many_lives_vf(dev
))
2095 printk(KERN_ERR
"Removing PF when there are assigned VF's !!!\n");
2097 mlx4_stop_sense(dev
);
2098 mlx4_unregister_device(dev
);
2100 for (p
= 1; p
<= dev
->caps
.num_ports
; p
++) {
2101 mlx4_cleanup_port_info(&priv
->port
[p
]);
2102 mlx4_CLOSE_PORT(dev
, p
);
2105 if (mlx4_is_master(dev
))
2106 mlx4_free_resource_tracker(dev
,
2107 RES_TR_FREE_SLAVES_ONLY
);
2109 mlx4_cleanup_counters_table(dev
);
2110 mlx4_cleanup_mcg_table(dev
);
2111 mlx4_cleanup_qp_table(dev
);
2112 mlx4_cleanup_srq_table(dev
);
2113 mlx4_cleanup_cq_table(dev
);
2114 mlx4_cmd_use_polling(dev
);
2115 mlx4_cleanup_eq_table(dev
);
2116 mlx4_cleanup_mr_table(dev
);
2117 mlx4_cleanup_xrcd_table(dev
);
2118 mlx4_cleanup_pd_table(dev
);
2120 if (mlx4_is_master(dev
))
2121 mlx4_free_resource_tracker(dev
,
2122 RES_TR_FREE_STRUCTS_ONLY
);
2125 mlx4_uar_free(dev
, &priv
->driver_uar
);
2126 mlx4_cleanup_uar_table(dev
);
2127 if (!mlx4_is_slave(dev
))
2128 mlx4_clear_steering(dev
);
2129 mlx4_free_eq_table(dev
);
2130 if (mlx4_is_master(dev
))
2131 mlx4_multi_func_cleanup(dev
);
2132 mlx4_close_hca(dev
);
2133 if (mlx4_is_slave(dev
))
2134 mlx4_multi_func_cleanup(dev
);
2135 mlx4_cmd_cleanup(dev
);
2137 if (dev
->flags
& MLX4_FLAG_MSI_X
)
2138 pci_disable_msix(pdev
);
2139 if (dev
->flags
& MLX4_FLAG_SRIOV
) {
2140 mlx4_warn(dev
, "Disabling sriov\n");
2141 pci_disable_sriov(pdev
);
2144 if (!mlx4_is_slave(dev
))
2145 mlx4_free_ownership(dev
);
2147 pci_release_regions(pdev
);
2148 pci_disable_device(pdev
);
2149 pci_set_drvdata(pdev
, NULL
);
2153 int mlx4_restart_one(struct pci_dev
*pdev
)
2155 mlx4_remove_one(pdev
);
2156 return __mlx4_init_one(pdev
, NULL
);
2159 static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table
) = {
2160 /* MT25408 "Hermon" SDR */
2161 { PCI_VDEVICE(MELLANOX
, 0x6340), 0 },
2162 /* MT25408 "Hermon" DDR */
2163 { PCI_VDEVICE(MELLANOX
, 0x634a), 0 },
2164 /* MT25408 "Hermon" QDR */
2165 { PCI_VDEVICE(MELLANOX
, 0x6354), 0 },
2166 /* MT25408 "Hermon" DDR PCIe gen2 */
2167 { PCI_VDEVICE(MELLANOX
, 0x6732), 0 },
2168 /* MT25408 "Hermon" QDR PCIe gen2 */
2169 { PCI_VDEVICE(MELLANOX
, 0x673c), 0 },
2170 /* MT25408 "Hermon" EN 10GigE */
2171 { PCI_VDEVICE(MELLANOX
, 0x6368), 0 },
2172 /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
2173 { PCI_VDEVICE(MELLANOX
, 0x6750), 0 },
2174 /* MT25458 ConnectX EN 10GBASE-T 10GigE */
2175 { PCI_VDEVICE(MELLANOX
, 0x6372), 0 },
2176 /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
2177 { PCI_VDEVICE(MELLANOX
, 0x675a), 0 },
2178 /* MT26468 ConnectX EN 10GigE PCIe gen2*/
2179 { PCI_VDEVICE(MELLANOX
, 0x6764), 0 },
2180 /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
2181 { PCI_VDEVICE(MELLANOX
, 0x6746), 0 },
2182 /* MT26478 ConnectX2 40GigE PCIe gen2 */
2183 { PCI_VDEVICE(MELLANOX
, 0x676e), 0 },
2184 /* MT25400 Family [ConnectX-2 Virtual Function] */
2185 { PCI_VDEVICE(MELLANOX
, 0x1002), MLX4_VF
},
2186 /* MT27500 Family [ConnectX-3] */
2187 { PCI_VDEVICE(MELLANOX
, 0x1003), 0 },
2188 /* MT27500 Family [ConnectX-3 Virtual Function] */
2189 { PCI_VDEVICE(MELLANOX
, 0x1004), MLX4_VF
},
2190 { PCI_VDEVICE(MELLANOX
, 0x1005), 0 }, /* MT27510 Family */
2191 { PCI_VDEVICE(MELLANOX
, 0x1006), 0 }, /* MT27511 Family */
2192 { PCI_VDEVICE(MELLANOX
, 0x1007), 0 }, /* MT27520 Family */
2193 { PCI_VDEVICE(MELLANOX
, 0x1008), 0 }, /* MT27521 Family */
2194 { PCI_VDEVICE(MELLANOX
, 0x1009), 0 }, /* MT27530 Family */
2195 { PCI_VDEVICE(MELLANOX
, 0x100a), 0 }, /* MT27531 Family */
2196 { PCI_VDEVICE(MELLANOX
, 0x100b), 0 }, /* MT27540 Family */
2197 { PCI_VDEVICE(MELLANOX
, 0x100c), 0 }, /* MT27541 Family */
2198 { PCI_VDEVICE(MELLANOX
, 0x100d), 0 }, /* MT27550 Family */
2199 { PCI_VDEVICE(MELLANOX
, 0x100e), 0 }, /* MT27551 Family */
2200 { PCI_VDEVICE(MELLANOX
, 0x100f), 0 }, /* MT27560 Family */
2201 { PCI_VDEVICE(MELLANOX
, 0x1010), 0 }, /* MT27561 Family */
2205 MODULE_DEVICE_TABLE(pci
, mlx4_pci_table
);
2207 static struct pci_driver mlx4_driver
= {
2209 .id_table
= mlx4_pci_table
,
2210 .probe
= mlx4_init_one
,
2211 .remove
= __devexit_p(mlx4_remove_one
)
2214 static int __init
mlx4_verify_params(void)
2216 if ((log_num_mac
< 0) || (log_num_mac
> 7)) {
2217 pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac
);
2221 if (log_num_vlan
!= 0)
2222 pr_warning("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
2223 MLX4_LOG_NUM_VLANS
);
2225 if ((log_mtts_per_seg
< 1) || (log_mtts_per_seg
> 7)) {
2226 pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg
);
2230 /* Check if module param for ports type has legal combination */
2231 if (port_type_array
[0] == false && port_type_array
[1] == true) {
2232 printk(KERN_WARNING
"Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
2233 port_type_array
[0] = true;
2239 static int __init
mlx4_init(void)
2243 if (mlx4_verify_params())
2248 mlx4_wq
= create_singlethread_workqueue("mlx4");
2252 ret
= pci_register_driver(&mlx4_driver
);
2253 return ret
< 0 ? ret
: 0;
2256 static void __exit
mlx4_cleanup(void)
2258 pci_unregister_driver(&mlx4_driver
);
2259 destroy_workqueue(mlx4_wq
);
2262 module_init(mlx4_init
);
2263 module_exit(mlx4_cleanup
);