2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/errno.h>
39 #include <linux/pci.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/slab.h>
42 #include <linux/io-mapping.h>
43 #include <linux/delay.h>
44 #include <linux/kmod.h>
46 #include <linux/mlx4/device.h>
47 #include <linux/mlx4/doorbell.h>
53 MODULE_AUTHOR("Roland Dreier");
54 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
55 MODULE_LICENSE("Dual BSD/GPL");
56 MODULE_VERSION(DRV_VERSION
);
58 struct workqueue_struct
*mlx4_wq
;
60 #ifdef CONFIG_MLX4_DEBUG
62 int mlx4_debug_level
= 0;
63 module_param_named(debug_level
, mlx4_debug_level
, int, 0644);
64 MODULE_PARM_DESC(debug_level
, "Enable debug tracing if > 0");
66 #endif /* CONFIG_MLX4_DEBUG */
71 module_param(msi_x
, int, 0444);
72 MODULE_PARM_DESC(msi_x
, "attempt to use MSI-X if nonzero");
74 #else /* CONFIG_PCI_MSI */
78 #endif /* CONFIG_PCI_MSI */
80 static uint8_t num_vfs
[3] = {0, 0, 0};
81 static int num_vfs_argc
;
82 module_param_array(num_vfs
, byte
, &num_vfs_argc
, 0444);
83 MODULE_PARM_DESC(num_vfs
, "enable #num_vfs functions if num_vfs > 0\n"
84 "num_vfs=port1,port2,port1+2");
86 static uint8_t probe_vf
[3] = {0, 0, 0};
87 static int probe_vfs_argc
;
88 module_param_array(probe_vf
, byte
, &probe_vfs_argc
, 0444);
89 MODULE_PARM_DESC(probe_vf
, "number of vfs to probe by pf driver (num_vfs > 0)\n"
90 "probe_vf=port1,port2,port1+2");
92 int mlx4_log_num_mgm_entry_size
= MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE
;
93 module_param_named(log_num_mgm_entry_size
,
94 mlx4_log_num_mgm_entry_size
, int, 0444);
95 MODULE_PARM_DESC(log_num_mgm_entry_size
, "log mgm size, that defines the num"
96 " of qp per mcg, for example:"
97 " 10 gives 248.range: 7 <="
98 " log_num_mgm_entry_size <= 12."
99 " To activate device managed"
100 " flow steering when available, set to -1");
102 static bool enable_64b_cqe_eqe
= true;
103 module_param(enable_64b_cqe_eqe
, bool, 0444);
104 MODULE_PARM_DESC(enable_64b_cqe_eqe
,
105 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
107 #define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \
108 MLX4_FUNC_CAP_EQE_CQE_STRIDE)
110 static char mlx4_version
[] =
111 DRV_NAME
": Mellanox ConnectX core driver v"
112 DRV_VERSION
" (" DRV_RELDATE
")\n";
114 static struct mlx4_profile default_profile
= {
117 .rdmarc_per_qp
= 1 << 4,
121 .num_mtt
= 1 << 20, /* It is really num mtt segements */
124 static struct mlx4_profile low_mem_profile
= {
127 .rdmarc_per_qp
= 1 << 4,
134 static int log_num_mac
= 7;
135 module_param_named(log_num_mac
, log_num_mac
, int, 0444);
136 MODULE_PARM_DESC(log_num_mac
, "Log2 max number of MACs per ETH port (1-7)");
138 static int log_num_vlan
;
139 module_param_named(log_num_vlan
, log_num_vlan
, int, 0444);
140 MODULE_PARM_DESC(log_num_vlan
, "Log2 max number of VLANs per ETH port (0-7)");
141 /* Log2 max number of VLANs per ETH port (0-7) */
142 #define MLX4_LOG_NUM_VLANS 7
143 #define MLX4_MIN_LOG_NUM_VLANS 0
144 #define MLX4_MIN_LOG_NUM_MAC 1
146 static bool use_prio
;
147 module_param_named(use_prio
, use_prio
, bool, 0444);
148 MODULE_PARM_DESC(use_prio
, "Enable steering by VLAN priority on ETH ports (deprecated)");
150 int log_mtts_per_seg
= ilog2(MLX4_MTT_ENTRY_PER_SEG
);
151 module_param_named(log_mtts_per_seg
, log_mtts_per_seg
, int, 0444);
152 MODULE_PARM_DESC(log_mtts_per_seg
, "Log2 number of MTT entries per segment (1-7)");
154 static int port_type_array
[2] = {MLX4_PORT_TYPE_NONE
, MLX4_PORT_TYPE_NONE
};
155 static int arr_argc
= 2;
156 module_param_array(port_type_array
, int, &arr_argc
, 0444);
157 MODULE_PARM_DESC(port_type_array
, "Array of port types: HW_DEFAULT (0) is default "
158 "1 for IB, 2 for Ethernet");
160 struct mlx4_port_config
{
161 struct list_head list
;
162 enum mlx4_port_type port_type
[MLX4_MAX_PORTS
+ 1];
163 struct pci_dev
*pdev
;
166 static atomic_t pf_loading
= ATOMIC_INIT(0);
168 int mlx4_check_port_params(struct mlx4_dev
*dev
,
169 enum mlx4_port_type
*port_type
)
173 for (i
= 0; i
< dev
->caps
.num_ports
- 1; i
++) {
174 if (port_type
[i
] != port_type
[i
+ 1]) {
175 if (!(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_DPDP
)) {
176 mlx4_err(dev
, "Only same port types supported on this HCA, aborting\n");
182 for (i
= 0; i
< dev
->caps
.num_ports
; i
++) {
183 if (!(port_type
[i
] & dev
->caps
.supported_type
[i
+1])) {
184 mlx4_err(dev
, "Requested port type for port %d is not supported on this HCA\n",
192 static void mlx4_set_port_mask(struct mlx4_dev
*dev
)
196 for (i
= 1; i
<= dev
->caps
.num_ports
; ++i
)
197 dev
->caps
.port_mask
[i
] = dev
->caps
.port_type
[i
];
201 MLX4_QUERY_FUNC_NUM_SYS_EQS
= 1 << 0,
204 static int mlx4_query_func(struct mlx4_dev
*dev
, struct mlx4_dev_cap
*dev_cap
)
207 struct mlx4_func func
;
209 if (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_SYS_EQS
) {
210 err
= mlx4_QUERY_FUNC(dev
, &func
, 0);
212 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting.\n");
215 dev_cap
->max_eqs
= func
.max_eq
;
216 dev_cap
->reserved_eqs
= func
.rsvd_eqs
;
217 dev_cap
->reserved_uars
= func
.rsvd_uars
;
218 err
|= MLX4_QUERY_FUNC_NUM_SYS_EQS
;
223 static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev
*dev
)
225 struct mlx4_caps
*dev_cap
= &dev
->caps
;
227 /* FW not supporting or cancelled by user */
228 if (!(dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_EQE_STRIDE
) ||
229 !(dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_CQE_STRIDE
))
232 /* Must have 64B CQE_EQE enabled by FW to use bigger stride
233 * When FW has NCSI it may decide not to report 64B CQE/EQEs
235 if (!(dev_cap
->flags
& MLX4_DEV_CAP_FLAG_64B_EQE
) ||
236 !(dev_cap
->flags
& MLX4_DEV_CAP_FLAG_64B_CQE
)) {
237 dev_cap
->flags2
&= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE
;
238 dev_cap
->flags2
&= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE
;
242 if (cache_line_size() == 128 || cache_line_size() == 256) {
243 mlx4_dbg(dev
, "Enabling CQE stride cacheLine supported\n");
244 /* Changing the real data inside CQE size to 32B */
245 dev_cap
->flags
&= ~MLX4_DEV_CAP_FLAG_64B_CQE
;
246 dev_cap
->flags
&= ~MLX4_DEV_CAP_FLAG_64B_EQE
;
248 if (mlx4_is_master(dev
))
249 dev_cap
->function_caps
|= MLX4_FUNC_CAP_EQE_CQE_STRIDE
;
251 mlx4_dbg(dev
, "Disabling CQE stride cacheLine unsupported\n");
252 dev_cap
->flags2
&= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE
;
253 dev_cap
->flags2
&= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE
;
257 static int mlx4_dev_cap(struct mlx4_dev
*dev
, struct mlx4_dev_cap
*dev_cap
)
262 err
= mlx4_QUERY_DEV_CAP(dev
, dev_cap
);
264 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting\n");
268 if (dev_cap
->min_page_sz
> PAGE_SIZE
) {
269 mlx4_err(dev
, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
270 dev_cap
->min_page_sz
, PAGE_SIZE
);
273 if (dev_cap
->num_ports
> MLX4_MAX_PORTS
) {
274 mlx4_err(dev
, "HCA has %d ports, but we only support %d, aborting\n",
275 dev_cap
->num_ports
, MLX4_MAX_PORTS
);
279 if (dev_cap
->uar_size
> pci_resource_len(dev
->pdev
, 2)) {
280 mlx4_err(dev
, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
282 (unsigned long long) pci_resource_len(dev
->pdev
, 2));
286 dev
->caps
.num_ports
= dev_cap
->num_ports
;
287 dev
->caps
.num_sys_eqs
= dev_cap
->num_sys_eqs
;
288 dev
->phys_caps
.num_phys_eqs
= dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_SYS_EQS
?
289 dev
->caps
.num_sys_eqs
:
291 for (i
= 1; i
<= dev
->caps
.num_ports
; ++i
) {
292 dev
->caps
.vl_cap
[i
] = dev_cap
->max_vl
[i
];
293 dev
->caps
.ib_mtu_cap
[i
] = dev_cap
->ib_mtu
[i
];
294 dev
->phys_caps
.gid_phys_table_len
[i
] = dev_cap
->max_gids
[i
];
295 dev
->phys_caps
.pkey_phys_table_len
[i
] = dev_cap
->max_pkeys
[i
];
296 /* set gid and pkey table operating lengths by default
297 * to non-sriov values */
298 dev
->caps
.gid_table_len
[i
] = dev_cap
->max_gids
[i
];
299 dev
->caps
.pkey_table_len
[i
] = dev_cap
->max_pkeys
[i
];
300 dev
->caps
.port_width_cap
[i
] = dev_cap
->max_port_width
[i
];
301 dev
->caps
.eth_mtu_cap
[i
] = dev_cap
->eth_mtu
[i
];
302 dev
->caps
.def_mac
[i
] = dev_cap
->def_mac
[i
];
303 dev
->caps
.supported_type
[i
] = dev_cap
->supported_port_types
[i
];
304 dev
->caps
.suggested_type
[i
] = dev_cap
->suggested_type
[i
];
305 dev
->caps
.default_sense
[i
] = dev_cap
->default_sense
[i
];
306 dev
->caps
.trans_type
[i
] = dev_cap
->trans_type
[i
];
307 dev
->caps
.vendor_oui
[i
] = dev_cap
->vendor_oui
[i
];
308 dev
->caps
.wavelength
[i
] = dev_cap
->wavelength
[i
];
309 dev
->caps
.trans_code
[i
] = dev_cap
->trans_code
[i
];
312 dev
->caps
.uar_page_size
= PAGE_SIZE
;
313 dev
->caps
.num_uars
= dev_cap
->uar_size
/ PAGE_SIZE
;
314 dev
->caps
.local_ca_ack_delay
= dev_cap
->local_ca_ack_delay
;
315 dev
->caps
.bf_reg_size
= dev_cap
->bf_reg_size
;
316 dev
->caps
.bf_regs_per_page
= dev_cap
->bf_regs_per_page
;
317 dev
->caps
.max_sq_sg
= dev_cap
->max_sq_sg
;
318 dev
->caps
.max_rq_sg
= dev_cap
->max_rq_sg
;
319 dev
->caps
.max_wqes
= dev_cap
->max_qp_sz
;
320 dev
->caps
.max_qp_init_rdma
= dev_cap
->max_requester_per_qp
;
321 dev
->caps
.max_srq_wqes
= dev_cap
->max_srq_sz
;
322 dev
->caps
.max_srq_sge
= dev_cap
->max_rq_sg
- 1;
323 dev
->caps
.reserved_srqs
= dev_cap
->reserved_srqs
;
324 dev
->caps
.max_sq_desc_sz
= dev_cap
->max_sq_desc_sz
;
325 dev
->caps
.max_rq_desc_sz
= dev_cap
->max_rq_desc_sz
;
327 * Subtract 1 from the limit because we need to allocate a
328 * spare CQE so the HCA HW can tell the difference between an
329 * empty CQ and a full CQ.
331 dev
->caps
.max_cqes
= dev_cap
->max_cq_sz
- 1;
332 dev
->caps
.reserved_cqs
= dev_cap
->reserved_cqs
;
333 dev
->caps
.reserved_eqs
= dev_cap
->reserved_eqs
;
334 dev
->caps
.reserved_mtts
= dev_cap
->reserved_mtts
;
335 dev
->caps
.reserved_mrws
= dev_cap
->reserved_mrws
;
337 /* The first 128 UARs are used for EQ doorbells */
338 dev
->caps
.reserved_uars
= max_t(int, 128, dev_cap
->reserved_uars
);
339 dev
->caps
.reserved_pds
= dev_cap
->reserved_pds
;
340 dev
->caps
.reserved_xrcds
= (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
) ?
341 dev_cap
->reserved_xrcds
: 0;
342 dev
->caps
.max_xrcds
= (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
) ?
343 dev_cap
->max_xrcds
: 0;
344 dev
->caps
.mtt_entry_sz
= dev_cap
->mtt_entry_sz
;
346 dev
->caps
.max_msg_sz
= dev_cap
->max_msg_sz
;
347 dev
->caps
.page_size_cap
= ~(u32
) (dev_cap
->min_page_sz
- 1);
348 dev
->caps
.flags
= dev_cap
->flags
;
349 dev
->caps
.flags2
= dev_cap
->flags2
;
350 dev
->caps
.bmme_flags
= dev_cap
->bmme_flags
;
351 dev
->caps
.reserved_lkey
= dev_cap
->reserved_lkey
;
352 dev
->caps
.stat_rate_support
= dev_cap
->stat_rate_support
;
353 dev
->caps
.max_gso_sz
= dev_cap
->max_gso_sz
;
354 dev
->caps
.max_rss_tbl_sz
= dev_cap
->max_rss_tbl_sz
;
356 /* Sense port always allowed on supported devices for ConnectX-1 and -2 */
357 if (mlx4_priv(dev
)->pci_dev_data
& MLX4_PCI_DEV_FORCE_SENSE_PORT
)
358 dev
->caps
.flags
|= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT
;
359 /* Don't do sense port on multifunction devices (for now at least) */
360 if (mlx4_is_mfunc(dev
))
361 dev
->caps
.flags
&= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT
;
363 if (mlx4_low_memory_profile()) {
364 dev
->caps
.log_num_macs
= MLX4_MIN_LOG_NUM_MAC
;
365 dev
->caps
.log_num_vlans
= MLX4_MIN_LOG_NUM_VLANS
;
367 dev
->caps
.log_num_macs
= log_num_mac
;
368 dev
->caps
.log_num_vlans
= MLX4_LOG_NUM_VLANS
;
371 for (i
= 1; i
<= dev
->caps
.num_ports
; ++i
) {
372 dev
->caps
.port_type
[i
] = MLX4_PORT_TYPE_NONE
;
373 if (dev
->caps
.supported_type
[i
]) {
374 /* if only ETH is supported - assign ETH */
375 if (dev
->caps
.supported_type
[i
] == MLX4_PORT_TYPE_ETH
)
376 dev
->caps
.port_type
[i
] = MLX4_PORT_TYPE_ETH
;
377 /* if only IB is supported, assign IB */
378 else if (dev
->caps
.supported_type
[i
] ==
380 dev
->caps
.port_type
[i
] = MLX4_PORT_TYPE_IB
;
382 /* if IB and ETH are supported, we set the port
383 * type according to user selection of port type;
384 * if user selected none, take the FW hint */
385 if (port_type_array
[i
- 1] == MLX4_PORT_TYPE_NONE
)
386 dev
->caps
.port_type
[i
] = dev
->caps
.suggested_type
[i
] ?
387 MLX4_PORT_TYPE_ETH
: MLX4_PORT_TYPE_IB
;
389 dev
->caps
.port_type
[i
] = port_type_array
[i
- 1];
393 * Link sensing is allowed on the port if 3 conditions are true:
394 * 1. Both protocols are supported on the port.
395 * 2. Different types are supported on the port
396 * 3. FW declared that it supports link sensing
398 mlx4_priv(dev
)->sense
.sense_allowed
[i
] =
399 ((dev
->caps
.supported_type
[i
] == MLX4_PORT_TYPE_AUTO
) &&
400 (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_DPDP
) &&
401 (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_SENSE_SUPPORT
));
404 * If "default_sense" bit is set, we move the port to "AUTO" mode
405 * and perform sense_port FW command to try and set the correct
406 * port type from beginning
408 if (mlx4_priv(dev
)->sense
.sense_allowed
[i
] && dev
->caps
.default_sense
[i
]) {
409 enum mlx4_port_type sensed_port
= MLX4_PORT_TYPE_NONE
;
410 dev
->caps
.possible_type
[i
] = MLX4_PORT_TYPE_AUTO
;
411 mlx4_SENSE_PORT(dev
, i
, &sensed_port
);
412 if (sensed_port
!= MLX4_PORT_TYPE_NONE
)
413 dev
->caps
.port_type
[i
] = sensed_port
;
415 dev
->caps
.possible_type
[i
] = dev
->caps
.port_type
[i
];
418 if (dev
->caps
.log_num_macs
> dev_cap
->log_max_macs
[i
]) {
419 dev
->caps
.log_num_macs
= dev_cap
->log_max_macs
[i
];
420 mlx4_warn(dev
, "Requested number of MACs is too much for port %d, reducing to %d\n",
421 i
, 1 << dev
->caps
.log_num_macs
);
423 if (dev
->caps
.log_num_vlans
> dev_cap
->log_max_vlans
[i
]) {
424 dev
->caps
.log_num_vlans
= dev_cap
->log_max_vlans
[i
];
425 mlx4_warn(dev
, "Requested number of VLANs is too much for port %d, reducing to %d\n",
426 i
, 1 << dev
->caps
.log_num_vlans
);
430 dev
->caps
.max_counters
= 1 << ilog2(dev_cap
->max_counters
);
432 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
] = dev_cap
->reserved_qps
;
433 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_ETH_ADDR
] =
434 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FC_ADDR
] =
435 (1 << dev
->caps
.log_num_macs
) *
436 (1 << dev
->caps
.log_num_vlans
) *
438 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FC_EXCH
] = MLX4_NUM_FEXCH
;
440 dev
->caps
.reserved_qps
= dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
] +
441 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_ETH_ADDR
] +
442 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FC_ADDR
] +
443 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FC_EXCH
];
445 dev
->caps
.sqp_demux
= (mlx4_is_master(dev
)) ? MLX4_MAX_NUM_SLAVES
: 0;
447 if (!enable_64b_cqe_eqe
&& !mlx4_is_slave(dev
)) {
449 (MLX4_DEV_CAP_FLAG_64B_CQE
| MLX4_DEV_CAP_FLAG_64B_EQE
)) {
450 mlx4_warn(dev
, "64B EQEs/CQEs supported by the device but not enabled\n");
451 dev
->caps
.flags
&= ~MLX4_DEV_CAP_FLAG_64B_CQE
;
452 dev
->caps
.flags
&= ~MLX4_DEV_CAP_FLAG_64B_EQE
;
455 if (dev_cap
->flags2
&
456 (MLX4_DEV_CAP_FLAG2_CQE_STRIDE
|
457 MLX4_DEV_CAP_FLAG2_EQE_STRIDE
)) {
458 mlx4_warn(dev
, "Disabling EQE/CQE stride per user request\n");
459 dev_cap
->flags2
&= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE
;
460 dev_cap
->flags2
&= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE
;
464 if ((dev
->caps
.flags
&
465 (MLX4_DEV_CAP_FLAG_64B_CQE
| MLX4_DEV_CAP_FLAG_64B_EQE
)) &&
467 dev
->caps
.function_caps
|= MLX4_FUNC_CAP_64B_EQE_CQE
;
469 if (!mlx4_is_slave(dev
))
470 mlx4_enable_cqe_eqe_stride(dev
);
475 static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev
*dev
,
476 enum pci_bus_speed
*speed
,
477 enum pcie_link_width
*width
)
479 u32 lnkcap1
, lnkcap2
;
482 #define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */
484 *speed
= PCI_SPEED_UNKNOWN
;
485 *width
= PCIE_LNK_WIDTH_UNKNOWN
;
487 err1
= pcie_capability_read_dword(dev
->pdev
, PCI_EXP_LNKCAP
, &lnkcap1
);
488 err2
= pcie_capability_read_dword(dev
->pdev
, PCI_EXP_LNKCAP2
, &lnkcap2
);
489 if (!err2
&& lnkcap2
) { /* PCIe r3.0-compliant */
490 if (lnkcap2
& PCI_EXP_LNKCAP2_SLS_8_0GB
)
491 *speed
= PCIE_SPEED_8_0GT
;
492 else if (lnkcap2
& PCI_EXP_LNKCAP2_SLS_5_0GB
)
493 *speed
= PCIE_SPEED_5_0GT
;
494 else if (lnkcap2
& PCI_EXP_LNKCAP2_SLS_2_5GB
)
495 *speed
= PCIE_SPEED_2_5GT
;
498 *width
= (lnkcap1
& PCI_EXP_LNKCAP_MLW
) >> PCIE_MLW_CAP_SHIFT
;
499 if (!lnkcap2
) { /* pre-r3.0 */
500 if (lnkcap1
& PCI_EXP_LNKCAP_SLS_5_0GB
)
501 *speed
= PCIE_SPEED_5_0GT
;
502 else if (lnkcap1
& PCI_EXP_LNKCAP_SLS_2_5GB
)
503 *speed
= PCIE_SPEED_2_5GT
;
507 if (*speed
== PCI_SPEED_UNKNOWN
|| *width
== PCIE_LNK_WIDTH_UNKNOWN
) {
509 err2
? err2
: -EINVAL
;
514 static void mlx4_check_pcie_caps(struct mlx4_dev
*dev
)
516 enum pcie_link_width width
, width_cap
;
517 enum pci_bus_speed speed
, speed_cap
;
520 #define PCIE_SPEED_STR(speed) \
521 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
522 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
523 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
526 err
= mlx4_get_pcie_dev_link_caps(dev
, &speed_cap
, &width_cap
);
529 "Unable to determine PCIe device BW capabilities\n");
533 err
= pcie_get_minimum_link(dev
->pdev
, &speed
, &width
);
534 if (err
|| speed
== PCI_SPEED_UNKNOWN
||
535 width
== PCIE_LNK_WIDTH_UNKNOWN
) {
537 "Unable to determine PCI device chain minimum BW\n");
541 if (width
!= width_cap
|| speed
!= speed_cap
)
543 "PCIe BW is different than device's capability\n");
545 mlx4_info(dev
, "PCIe link speed is %s, device supports %s\n",
546 PCIE_SPEED_STR(speed
), PCIE_SPEED_STR(speed_cap
));
547 mlx4_info(dev
, "PCIe link width is x%d, device supports x%d\n",
552 /*The function checks if there are live vf, return the num of them*/
553 static int mlx4_how_many_lives_vf(struct mlx4_dev
*dev
)
555 struct mlx4_priv
*priv
= mlx4_priv(dev
);
556 struct mlx4_slave_state
*s_state
;
560 for (i
= 1/*the ppf is 0*/; i
< dev
->num_slaves
; ++i
) {
561 s_state
= &priv
->mfunc
.master
.slave_state
[i
];
562 if (s_state
->active
&& s_state
->last_cmd
!=
563 MLX4_COMM_CMD_RESET
) {
564 mlx4_warn(dev
, "%s: slave: %d is still active\n",
572 int mlx4_get_parav_qkey(struct mlx4_dev
*dev
, u32 qpn
, u32
*qkey
)
574 u32 qk
= MLX4_RESERVED_QKEY_BASE
;
576 if (qpn
>= dev
->phys_caps
.base_tunnel_sqpn
+ 8 * MLX4_MFUNC_MAX
||
577 qpn
< dev
->phys_caps
.base_proxy_sqpn
)
580 if (qpn
>= dev
->phys_caps
.base_tunnel_sqpn
)
582 qk
+= qpn
- dev
->phys_caps
.base_tunnel_sqpn
;
584 qk
+= qpn
- dev
->phys_caps
.base_proxy_sqpn
;
588 EXPORT_SYMBOL(mlx4_get_parav_qkey
);
590 void mlx4_sync_pkey_table(struct mlx4_dev
*dev
, int slave
, int port
, int i
, int val
)
592 struct mlx4_priv
*priv
= container_of(dev
, struct mlx4_priv
, dev
);
594 if (!mlx4_is_master(dev
))
597 priv
->virt2phys_pkey
[slave
][port
- 1][i
] = val
;
599 EXPORT_SYMBOL(mlx4_sync_pkey_table
);
601 void mlx4_put_slave_node_guid(struct mlx4_dev
*dev
, int slave
, __be64 guid
)
603 struct mlx4_priv
*priv
= container_of(dev
, struct mlx4_priv
, dev
);
605 if (!mlx4_is_master(dev
))
608 priv
->slave_node_guids
[slave
] = guid
;
610 EXPORT_SYMBOL(mlx4_put_slave_node_guid
);
612 __be64
mlx4_get_slave_node_guid(struct mlx4_dev
*dev
, int slave
)
614 struct mlx4_priv
*priv
= container_of(dev
, struct mlx4_priv
, dev
);
616 if (!mlx4_is_master(dev
))
619 return priv
->slave_node_guids
[slave
];
621 EXPORT_SYMBOL(mlx4_get_slave_node_guid
);
623 int mlx4_is_slave_active(struct mlx4_dev
*dev
, int slave
)
625 struct mlx4_priv
*priv
= mlx4_priv(dev
);
626 struct mlx4_slave_state
*s_slave
;
628 if (!mlx4_is_master(dev
))
631 s_slave
= &priv
->mfunc
.master
.slave_state
[slave
];
632 return !!s_slave
->active
;
634 EXPORT_SYMBOL(mlx4_is_slave_active
);
636 static void slave_adjust_steering_mode(struct mlx4_dev
*dev
,
637 struct mlx4_dev_cap
*dev_cap
,
638 struct mlx4_init_hca_param
*hca_param
)
640 dev
->caps
.steering_mode
= hca_param
->steering_mode
;
641 if (dev
->caps
.steering_mode
== MLX4_STEERING_MODE_DEVICE_MANAGED
) {
642 dev
->caps
.num_qp_per_mgm
= dev_cap
->fs_max_num_qp_per_entry
;
643 dev
->caps
.fs_log_max_ucast_qp_range_size
=
644 dev_cap
->fs_log_max_ucast_qp_range_size
;
646 dev
->caps
.num_qp_per_mgm
=
647 4 * ((1 << hca_param
->log_mc_entry_sz
)/16 - 2);
649 mlx4_dbg(dev
, "Steering mode is: %s\n",
650 mlx4_steering_mode_str(dev
->caps
.steering_mode
));
653 static int mlx4_slave_cap(struct mlx4_dev
*dev
)
657 struct mlx4_dev_cap dev_cap
;
658 struct mlx4_func_cap func_cap
;
659 struct mlx4_init_hca_param hca_param
;
662 memset(&hca_param
, 0, sizeof(hca_param
));
663 err
= mlx4_QUERY_HCA(dev
, &hca_param
);
665 mlx4_err(dev
, "QUERY_HCA command failed, aborting\n");
669 /* fail if the hca has an unknown global capability
670 * at this time global_caps should be always zeroed
672 if (hca_param
.global_caps
) {
673 mlx4_err(dev
, "Unknown hca global capabilities\n");
677 mlx4_log_num_mgm_entry_size
= hca_param
.log_mc_entry_sz
;
679 dev
->caps
.hca_core_clock
= hca_param
.hca_core_clock
;
681 memset(&dev_cap
, 0, sizeof(dev_cap
));
682 dev
->caps
.max_qp_dest_rdma
= 1 << hca_param
.log_rd_per_qp
;
683 err
= mlx4_dev_cap(dev
, &dev_cap
);
685 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting\n");
689 err
= mlx4_QUERY_FW(dev
);
691 mlx4_err(dev
, "QUERY_FW command failed: could not get FW version\n");
693 page_size
= ~dev
->caps
.page_size_cap
+ 1;
694 mlx4_warn(dev
, "HCA minimum page size:%d\n", page_size
);
695 if (page_size
> PAGE_SIZE
) {
696 mlx4_err(dev
, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
697 page_size
, PAGE_SIZE
);
701 /* slave gets uar page size from QUERY_HCA fw command */
702 dev
->caps
.uar_page_size
= 1 << (hca_param
.uar_page_sz
+ 12);
704 /* TODO: relax this assumption */
705 if (dev
->caps
.uar_page_size
!= PAGE_SIZE
) {
706 mlx4_err(dev
, "UAR size:%d != kernel PAGE_SIZE of %ld\n",
707 dev
->caps
.uar_page_size
, PAGE_SIZE
);
711 memset(&func_cap
, 0, sizeof(func_cap
));
712 err
= mlx4_QUERY_FUNC_CAP(dev
, 0, &func_cap
);
714 mlx4_err(dev
, "QUERY_FUNC_CAP general command failed, aborting (%d)\n",
719 if ((func_cap
.pf_context_behaviour
| PF_CONTEXT_BEHAVIOUR_MASK
) !=
720 PF_CONTEXT_BEHAVIOUR_MASK
) {
721 mlx4_err(dev
, "Unknown pf context behaviour\n");
725 dev
->caps
.num_ports
= func_cap
.num_ports
;
726 dev
->quotas
.qp
= func_cap
.qp_quota
;
727 dev
->quotas
.srq
= func_cap
.srq_quota
;
728 dev
->quotas
.cq
= func_cap
.cq_quota
;
729 dev
->quotas
.mpt
= func_cap
.mpt_quota
;
730 dev
->quotas
.mtt
= func_cap
.mtt_quota
;
731 dev
->caps
.num_qps
= 1 << hca_param
.log_num_qps
;
732 dev
->caps
.num_srqs
= 1 << hca_param
.log_num_srqs
;
733 dev
->caps
.num_cqs
= 1 << hca_param
.log_num_cqs
;
734 dev
->caps
.num_mpts
= 1 << hca_param
.log_mpt_sz
;
735 dev
->caps
.num_eqs
= func_cap
.max_eq
;
736 dev
->caps
.reserved_eqs
= func_cap
.reserved_eq
;
737 dev
->caps
.num_pds
= MLX4_NUM_PDS
;
738 dev
->caps
.num_mgms
= 0;
739 dev
->caps
.num_amgms
= 0;
741 if (dev
->caps
.num_ports
> MLX4_MAX_PORTS
) {
742 mlx4_err(dev
, "HCA has %d ports, but we only support %d, aborting\n",
743 dev
->caps
.num_ports
, MLX4_MAX_PORTS
);
747 dev
->caps
.qp0_qkey
= kcalloc(dev
->caps
.num_ports
, sizeof(u32
), GFP_KERNEL
);
748 dev
->caps
.qp0_tunnel
= kcalloc(dev
->caps
.num_ports
, sizeof (u32
), GFP_KERNEL
);
749 dev
->caps
.qp0_proxy
= kcalloc(dev
->caps
.num_ports
, sizeof (u32
), GFP_KERNEL
);
750 dev
->caps
.qp1_tunnel
= kcalloc(dev
->caps
.num_ports
, sizeof (u32
), GFP_KERNEL
);
751 dev
->caps
.qp1_proxy
= kcalloc(dev
->caps
.num_ports
, sizeof (u32
), GFP_KERNEL
);
753 if (!dev
->caps
.qp0_tunnel
|| !dev
->caps
.qp0_proxy
||
754 !dev
->caps
.qp1_tunnel
|| !dev
->caps
.qp1_proxy
||
755 !dev
->caps
.qp0_qkey
) {
760 for (i
= 1; i
<= dev
->caps
.num_ports
; ++i
) {
761 err
= mlx4_QUERY_FUNC_CAP(dev
, i
, &func_cap
);
763 mlx4_err(dev
, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n",
767 dev
->caps
.qp0_qkey
[i
- 1] = func_cap
.qp0_qkey
;
768 dev
->caps
.qp0_tunnel
[i
- 1] = func_cap
.qp0_tunnel_qpn
;
769 dev
->caps
.qp0_proxy
[i
- 1] = func_cap
.qp0_proxy_qpn
;
770 dev
->caps
.qp1_tunnel
[i
- 1] = func_cap
.qp1_tunnel_qpn
;
771 dev
->caps
.qp1_proxy
[i
- 1] = func_cap
.qp1_proxy_qpn
;
772 dev
->caps
.port_mask
[i
] = dev
->caps
.port_type
[i
];
773 dev
->caps
.phys_port_id
[i
] = func_cap
.phys_port_id
;
774 if (mlx4_get_slave_pkey_gid_tbl_len(dev
, i
,
775 &dev
->caps
.gid_table_len
[i
],
776 &dev
->caps
.pkey_table_len
[i
]))
780 if (dev
->caps
.uar_page_size
* (dev
->caps
.num_uars
-
781 dev
->caps
.reserved_uars
) >
782 pci_resource_len(dev
->pdev
, 2)) {
783 mlx4_err(dev
, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
784 dev
->caps
.uar_page_size
* dev
->caps
.num_uars
,
785 (unsigned long long) pci_resource_len(dev
->pdev
, 2));
789 if (hca_param
.dev_cap_enabled
& MLX4_DEV_CAP_64B_EQE_ENABLED
) {
790 dev
->caps
.eqe_size
= 64;
791 dev
->caps
.eqe_factor
= 1;
793 dev
->caps
.eqe_size
= 32;
794 dev
->caps
.eqe_factor
= 0;
797 if (hca_param
.dev_cap_enabled
& MLX4_DEV_CAP_64B_CQE_ENABLED
) {
798 dev
->caps
.cqe_size
= 64;
799 dev
->caps
.userspace_caps
|= MLX4_USER_DEV_CAP_LARGE_CQE
;
801 dev
->caps
.cqe_size
= 32;
804 if (hca_param
.dev_cap_enabled
& MLX4_DEV_CAP_EQE_STRIDE_ENABLED
) {
805 dev
->caps
.eqe_size
= hca_param
.eqe_size
;
806 dev
->caps
.eqe_factor
= 0;
809 if (hca_param
.dev_cap_enabled
& MLX4_DEV_CAP_CQE_STRIDE_ENABLED
) {
810 dev
->caps
.cqe_size
= hca_param
.cqe_size
;
811 /* User still need to know when CQE > 32B */
812 dev
->caps
.userspace_caps
|= MLX4_USER_DEV_CAP_LARGE_CQE
;
815 dev
->caps
.flags2
&= ~MLX4_DEV_CAP_FLAG2_TS
;
816 mlx4_warn(dev
, "Timestamping is not supported in slave mode\n");
818 slave_adjust_steering_mode(dev
, &dev_cap
, &hca_param
);
823 kfree(dev
->caps
.qp0_qkey
);
824 kfree(dev
->caps
.qp0_tunnel
);
825 kfree(dev
->caps
.qp0_proxy
);
826 kfree(dev
->caps
.qp1_tunnel
);
827 kfree(dev
->caps
.qp1_proxy
);
828 dev
->caps
.qp0_qkey
= NULL
;
829 dev
->caps
.qp0_tunnel
= NULL
;
830 dev
->caps
.qp0_proxy
= NULL
;
831 dev
->caps
.qp1_tunnel
= NULL
;
832 dev
->caps
.qp1_proxy
= NULL
;
837 static void mlx4_request_modules(struct mlx4_dev
*dev
)
840 int has_ib_port
= false;
841 int has_eth_port
= false;
842 #define EN_DRV_NAME "mlx4_en"
843 #define IB_DRV_NAME "mlx4_ib"
845 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
846 if (dev
->caps
.port_type
[port
] == MLX4_PORT_TYPE_IB
)
848 else if (dev
->caps
.port_type
[port
] == MLX4_PORT_TYPE_ETH
)
853 request_module_nowait(EN_DRV_NAME
);
854 if (has_ib_port
|| (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_IBOE
))
855 request_module_nowait(IB_DRV_NAME
);
859 * Change the port configuration of the device.
860 * Every user of this function must hold the port mutex.
862 int mlx4_change_port_types(struct mlx4_dev
*dev
,
863 enum mlx4_port_type
*port_types
)
869 for (port
= 0; port
< dev
->caps
.num_ports
; port
++) {
870 /* Change the port type only if the new type is different
871 * from the current, and not set to Auto */
872 if (port_types
[port
] != dev
->caps
.port_type
[port
+ 1])
876 mlx4_unregister_device(dev
);
877 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
878 mlx4_CLOSE_PORT(dev
, port
);
879 dev
->caps
.port_type
[port
] = port_types
[port
- 1];
880 err
= mlx4_SET_PORT(dev
, port
, -1);
882 mlx4_err(dev
, "Failed to set port %d, aborting\n",
887 mlx4_set_port_mask(dev
);
888 err
= mlx4_register_device(dev
);
890 mlx4_err(dev
, "Failed to register device\n");
893 mlx4_request_modules(dev
);
900 static ssize_t
show_port_type(struct device
*dev
,
901 struct device_attribute
*attr
,
904 struct mlx4_port_info
*info
= container_of(attr
, struct mlx4_port_info
,
906 struct mlx4_dev
*mdev
= info
->dev
;
910 (mdev
->caps
.port_type
[info
->port
] == MLX4_PORT_TYPE_IB
) ?
912 if (mdev
->caps
.possible_type
[info
->port
] == MLX4_PORT_TYPE_AUTO
)
913 sprintf(buf
, "auto (%s)\n", type
);
915 sprintf(buf
, "%s\n", type
);
920 static ssize_t
set_port_type(struct device
*dev
,
921 struct device_attribute
*attr
,
922 const char *buf
, size_t count
)
924 struct mlx4_port_info
*info
= container_of(attr
, struct mlx4_port_info
,
926 struct mlx4_dev
*mdev
= info
->dev
;
927 struct mlx4_priv
*priv
= mlx4_priv(mdev
);
928 enum mlx4_port_type types
[MLX4_MAX_PORTS
];
929 enum mlx4_port_type new_types
[MLX4_MAX_PORTS
];
930 static DEFINE_MUTEX(set_port_type_mutex
);
934 mutex_lock(&set_port_type_mutex
);
936 if (!strcmp(buf
, "ib\n"))
937 info
->tmp_type
= MLX4_PORT_TYPE_IB
;
938 else if (!strcmp(buf
, "eth\n"))
939 info
->tmp_type
= MLX4_PORT_TYPE_ETH
;
940 else if (!strcmp(buf
, "auto\n"))
941 info
->tmp_type
= MLX4_PORT_TYPE_AUTO
;
943 mlx4_err(mdev
, "%s is not supported port type\n", buf
);
948 mlx4_stop_sense(mdev
);
949 mutex_lock(&priv
->port_mutex
);
950 /* Possible type is always the one that was delivered */
951 mdev
->caps
.possible_type
[info
->port
] = info
->tmp_type
;
953 for (i
= 0; i
< mdev
->caps
.num_ports
; i
++) {
954 types
[i
] = priv
->port
[i
+1].tmp_type
? priv
->port
[i
+1].tmp_type
:
955 mdev
->caps
.possible_type
[i
+1];
956 if (types
[i
] == MLX4_PORT_TYPE_AUTO
)
957 types
[i
] = mdev
->caps
.port_type
[i
+1];
960 if (!(mdev
->caps
.flags
& MLX4_DEV_CAP_FLAG_DPDP
) &&
961 !(mdev
->caps
.flags
& MLX4_DEV_CAP_FLAG_SENSE_SUPPORT
)) {
962 for (i
= 1; i
<= mdev
->caps
.num_ports
; i
++) {
963 if (mdev
->caps
.possible_type
[i
] == MLX4_PORT_TYPE_AUTO
) {
964 mdev
->caps
.possible_type
[i
] = mdev
->caps
.port_type
[i
];
970 mlx4_err(mdev
, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n");
974 mlx4_do_sense_ports(mdev
, new_types
, types
);
976 err
= mlx4_check_port_params(mdev
, new_types
);
980 /* We are about to apply the changes after the configuration
981 * was verified, no need to remember the temporary types
983 for (i
= 0; i
< mdev
->caps
.num_ports
; i
++)
984 priv
->port
[i
+ 1].tmp_type
= 0;
986 err
= mlx4_change_port_types(mdev
, new_types
);
989 mlx4_start_sense(mdev
);
990 mutex_unlock(&priv
->port_mutex
);
992 mutex_unlock(&set_port_type_mutex
);
994 return err
? err
: count
;
1005 static inline int int_to_ibta_mtu(int mtu
)
1008 case 256: return IB_MTU_256
;
1009 case 512: return IB_MTU_512
;
1010 case 1024: return IB_MTU_1024
;
1011 case 2048: return IB_MTU_2048
;
1012 case 4096: return IB_MTU_4096
;
1017 static inline int ibta_mtu_to_int(enum ibta_mtu mtu
)
1020 case IB_MTU_256
: return 256;
1021 case IB_MTU_512
: return 512;
1022 case IB_MTU_1024
: return 1024;
1023 case IB_MTU_2048
: return 2048;
1024 case IB_MTU_4096
: return 4096;
1029 static ssize_t
show_port_ib_mtu(struct device
*dev
,
1030 struct device_attribute
*attr
,
1033 struct mlx4_port_info
*info
= container_of(attr
, struct mlx4_port_info
,
1035 struct mlx4_dev
*mdev
= info
->dev
;
1037 if (mdev
->caps
.port_type
[info
->port
] == MLX4_PORT_TYPE_ETH
)
1038 mlx4_warn(mdev
, "port level mtu is only used for IB ports\n");
1040 sprintf(buf
, "%d\n",
1041 ibta_mtu_to_int(mdev
->caps
.port_ib_mtu
[info
->port
]));
1045 static ssize_t
set_port_ib_mtu(struct device
*dev
,
1046 struct device_attribute
*attr
,
1047 const char *buf
, size_t count
)
1049 struct mlx4_port_info
*info
= container_of(attr
, struct mlx4_port_info
,
1051 struct mlx4_dev
*mdev
= info
->dev
;
1052 struct mlx4_priv
*priv
= mlx4_priv(mdev
);
1053 int err
, port
, mtu
, ibta_mtu
= -1;
1055 if (mdev
->caps
.port_type
[info
->port
] == MLX4_PORT_TYPE_ETH
) {
1056 mlx4_warn(mdev
, "port level mtu is only used for IB ports\n");
1060 err
= kstrtoint(buf
, 0, &mtu
);
1062 ibta_mtu
= int_to_ibta_mtu(mtu
);
1064 if (err
|| ibta_mtu
< 0) {
1065 mlx4_err(mdev
, "%s is invalid IBTA mtu\n", buf
);
1069 mdev
->caps
.port_ib_mtu
[info
->port
] = ibta_mtu
;
1071 mlx4_stop_sense(mdev
);
1072 mutex_lock(&priv
->port_mutex
);
1073 mlx4_unregister_device(mdev
);
1074 for (port
= 1; port
<= mdev
->caps
.num_ports
; port
++) {
1075 mlx4_CLOSE_PORT(mdev
, port
);
1076 err
= mlx4_SET_PORT(mdev
, port
, -1);
1078 mlx4_err(mdev
, "Failed to set port %d, aborting\n",
1083 err
= mlx4_register_device(mdev
);
1085 mutex_unlock(&priv
->port_mutex
);
1086 mlx4_start_sense(mdev
);
1087 return err
? err
: count
;
1090 static int mlx4_load_fw(struct mlx4_dev
*dev
)
1092 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1095 priv
->fw
.fw_icm
= mlx4_alloc_icm(dev
, priv
->fw
.fw_pages
,
1096 GFP_HIGHUSER
| __GFP_NOWARN
, 0);
1097 if (!priv
->fw
.fw_icm
) {
1098 mlx4_err(dev
, "Couldn't allocate FW area, aborting\n");
1102 err
= mlx4_MAP_FA(dev
, priv
->fw
.fw_icm
);
1104 mlx4_err(dev
, "MAP_FA command failed, aborting\n");
1108 err
= mlx4_RUN_FW(dev
);
1110 mlx4_err(dev
, "RUN_FW command failed, aborting\n");
1120 mlx4_free_icm(dev
, priv
->fw
.fw_icm
, 0);
1124 static int mlx4_init_cmpt_table(struct mlx4_dev
*dev
, u64 cmpt_base
,
1127 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1131 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.cmpt_table
,
1133 ((u64
) (MLX4_CMPT_TYPE_QP
*
1134 cmpt_entry_sz
) << MLX4_CMPT_SHIFT
),
1135 cmpt_entry_sz
, dev
->caps
.num_qps
,
1136 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
1141 err
= mlx4_init_icm_table(dev
, &priv
->srq_table
.cmpt_table
,
1143 ((u64
) (MLX4_CMPT_TYPE_SRQ
*
1144 cmpt_entry_sz
) << MLX4_CMPT_SHIFT
),
1145 cmpt_entry_sz
, dev
->caps
.num_srqs
,
1146 dev
->caps
.reserved_srqs
, 0, 0);
1150 err
= mlx4_init_icm_table(dev
, &priv
->cq_table
.cmpt_table
,
1152 ((u64
) (MLX4_CMPT_TYPE_CQ
*
1153 cmpt_entry_sz
) << MLX4_CMPT_SHIFT
),
1154 cmpt_entry_sz
, dev
->caps
.num_cqs
,
1155 dev
->caps
.reserved_cqs
, 0, 0);
1159 num_eqs
= dev
->phys_caps
.num_phys_eqs
;
1160 err
= mlx4_init_icm_table(dev
, &priv
->eq_table
.cmpt_table
,
1162 ((u64
) (MLX4_CMPT_TYPE_EQ
*
1163 cmpt_entry_sz
) << MLX4_CMPT_SHIFT
),
1164 cmpt_entry_sz
, num_eqs
, num_eqs
, 0, 0);
1171 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.cmpt_table
);
1174 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.cmpt_table
);
1177 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.cmpt_table
);
1183 static int mlx4_init_icm(struct mlx4_dev
*dev
, struct mlx4_dev_cap
*dev_cap
,
1184 struct mlx4_init_hca_param
*init_hca
, u64 icm_size
)
1186 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1191 err
= mlx4_SET_ICM_SIZE(dev
, icm_size
, &aux_pages
);
1193 mlx4_err(dev
, "SET_ICM_SIZE command failed, aborting\n");
1197 mlx4_dbg(dev
, "%lld KB of HCA context requires %lld KB aux memory\n",
1198 (unsigned long long) icm_size
>> 10,
1199 (unsigned long long) aux_pages
<< 2);
1201 priv
->fw
.aux_icm
= mlx4_alloc_icm(dev
, aux_pages
,
1202 GFP_HIGHUSER
| __GFP_NOWARN
, 0);
1203 if (!priv
->fw
.aux_icm
) {
1204 mlx4_err(dev
, "Couldn't allocate aux memory, aborting\n");
1208 err
= mlx4_MAP_ICM_AUX(dev
, priv
->fw
.aux_icm
);
1210 mlx4_err(dev
, "MAP_ICM_AUX command failed, aborting\n");
1214 err
= mlx4_init_cmpt_table(dev
, init_hca
->cmpt_base
, dev_cap
->cmpt_entry_sz
);
1216 mlx4_err(dev
, "Failed to map cMPT context memory, aborting\n");
1221 num_eqs
= dev
->phys_caps
.num_phys_eqs
;
1222 err
= mlx4_init_icm_table(dev
, &priv
->eq_table
.table
,
1223 init_hca
->eqc_base
, dev_cap
->eqc_entry_sz
,
1224 num_eqs
, num_eqs
, 0, 0);
1226 mlx4_err(dev
, "Failed to map EQ context memory, aborting\n");
1227 goto err_unmap_cmpt
;
1231 * Reserved MTT entries must be aligned up to a cacheline
1232 * boundary, since the FW will write to them, while the driver
1233 * writes to all other MTT entries. (The variable
1234 * dev->caps.mtt_entry_sz below is really the MTT segment
1235 * size, not the raw entry size)
1237 dev
->caps
.reserved_mtts
=
1238 ALIGN(dev
->caps
.reserved_mtts
* dev
->caps
.mtt_entry_sz
,
1239 dma_get_cache_alignment()) / dev
->caps
.mtt_entry_sz
;
1241 err
= mlx4_init_icm_table(dev
, &priv
->mr_table
.mtt_table
,
1243 dev
->caps
.mtt_entry_sz
,
1245 dev
->caps
.reserved_mtts
, 1, 0);
1247 mlx4_err(dev
, "Failed to map MTT context memory, aborting\n");
1251 err
= mlx4_init_icm_table(dev
, &priv
->mr_table
.dmpt_table
,
1252 init_hca
->dmpt_base
,
1253 dev_cap
->dmpt_entry_sz
,
1255 dev
->caps
.reserved_mrws
, 1, 1);
1257 mlx4_err(dev
, "Failed to map dMPT context memory, aborting\n");
1261 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.qp_table
,
1263 dev_cap
->qpc_entry_sz
,
1265 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
1268 mlx4_err(dev
, "Failed to map QP context memory, aborting\n");
1269 goto err_unmap_dmpt
;
1272 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.auxc_table
,
1273 init_hca
->auxc_base
,
1274 dev_cap
->aux_entry_sz
,
1276 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
1279 mlx4_err(dev
, "Failed to map AUXC context memory, aborting\n");
1283 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.altc_table
,
1284 init_hca
->altc_base
,
1285 dev_cap
->altc_entry_sz
,
1287 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
1290 mlx4_err(dev
, "Failed to map ALTC context memory, aborting\n");
1291 goto err_unmap_auxc
;
1294 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.rdmarc_table
,
1295 init_hca
->rdmarc_base
,
1296 dev_cap
->rdmarc_entry_sz
<< priv
->qp_table
.rdmarc_shift
,
1298 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
1301 mlx4_err(dev
, "Failed to map RDMARC context memory, aborting\n");
1302 goto err_unmap_altc
;
1305 err
= mlx4_init_icm_table(dev
, &priv
->cq_table
.table
,
1307 dev_cap
->cqc_entry_sz
,
1309 dev
->caps
.reserved_cqs
, 0, 0);
1311 mlx4_err(dev
, "Failed to map CQ context memory, aborting\n");
1312 goto err_unmap_rdmarc
;
1315 err
= mlx4_init_icm_table(dev
, &priv
->srq_table
.table
,
1316 init_hca
->srqc_base
,
1317 dev_cap
->srq_entry_sz
,
1319 dev
->caps
.reserved_srqs
, 0, 0);
1321 mlx4_err(dev
, "Failed to map SRQ context memory, aborting\n");
1326 * For flow steering device managed mode it is required to use
1327 * mlx4_init_icm_table. For B0 steering mode it's not strictly
1328 * required, but for simplicity just map the whole multicast
1329 * group table now. The table isn't very big and it's a lot
1330 * easier than trying to track ref counts.
1332 err
= mlx4_init_icm_table(dev
, &priv
->mcg_table
.table
,
1334 mlx4_get_mgm_entry_size(dev
),
1335 dev
->caps
.num_mgms
+ dev
->caps
.num_amgms
,
1336 dev
->caps
.num_mgms
+ dev
->caps
.num_amgms
,
1339 mlx4_err(dev
, "Failed to map MCG context memory, aborting\n");
1346 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.table
);
1349 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.table
);
1352 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.rdmarc_table
);
1355 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.altc_table
);
1358 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.auxc_table
);
1361 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.qp_table
);
1364 mlx4_cleanup_icm_table(dev
, &priv
->mr_table
.dmpt_table
);
1367 mlx4_cleanup_icm_table(dev
, &priv
->mr_table
.mtt_table
);
1370 mlx4_cleanup_icm_table(dev
, &priv
->eq_table
.table
);
1373 mlx4_cleanup_icm_table(dev
, &priv
->eq_table
.cmpt_table
);
1374 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.cmpt_table
);
1375 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.cmpt_table
);
1376 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.cmpt_table
);
1379 mlx4_UNMAP_ICM_AUX(dev
);
1382 mlx4_free_icm(dev
, priv
->fw
.aux_icm
, 0);
1387 static void mlx4_free_icms(struct mlx4_dev
*dev
)
1389 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1391 mlx4_cleanup_icm_table(dev
, &priv
->mcg_table
.table
);
1392 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.table
);
1393 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.table
);
1394 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.rdmarc_table
);
1395 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.altc_table
);
1396 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.auxc_table
);
1397 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.qp_table
);
1398 mlx4_cleanup_icm_table(dev
, &priv
->mr_table
.dmpt_table
);
1399 mlx4_cleanup_icm_table(dev
, &priv
->mr_table
.mtt_table
);
1400 mlx4_cleanup_icm_table(dev
, &priv
->eq_table
.table
);
1401 mlx4_cleanup_icm_table(dev
, &priv
->eq_table
.cmpt_table
);
1402 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.cmpt_table
);
1403 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.cmpt_table
);
1404 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.cmpt_table
);
1406 mlx4_UNMAP_ICM_AUX(dev
);
1407 mlx4_free_icm(dev
, priv
->fw
.aux_icm
, 0);
1410 static void mlx4_slave_exit(struct mlx4_dev
*dev
)
1412 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1414 mutex_lock(&priv
->cmd
.slave_cmd_mutex
);
1415 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_RESET
, 0, MLX4_COMM_TIME
))
1416 mlx4_warn(dev
, "Failed to close slave function\n");
1417 mutex_unlock(&priv
->cmd
.slave_cmd_mutex
);
1420 static int map_bf_area(struct mlx4_dev
*dev
)
1422 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1423 resource_size_t bf_start
;
1424 resource_size_t bf_len
;
1427 if (!dev
->caps
.bf_reg_size
)
1430 bf_start
= pci_resource_start(dev
->pdev
, 2) +
1431 (dev
->caps
.num_uars
<< PAGE_SHIFT
);
1432 bf_len
= pci_resource_len(dev
->pdev
, 2) -
1433 (dev
->caps
.num_uars
<< PAGE_SHIFT
);
1434 priv
->bf_mapping
= io_mapping_create_wc(bf_start
, bf_len
);
1435 if (!priv
->bf_mapping
)
1441 static void unmap_bf_area(struct mlx4_dev
*dev
)
1443 if (mlx4_priv(dev
)->bf_mapping
)
1444 io_mapping_free(mlx4_priv(dev
)->bf_mapping
);
1447 cycle_t
mlx4_read_clock(struct mlx4_dev
*dev
)
1449 u32 clockhi
, clocklo
, clockhi1
;
1452 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1454 for (i
= 0; i
< 10; i
++) {
1455 clockhi
= swab32(readl(priv
->clock_mapping
));
1456 clocklo
= swab32(readl(priv
->clock_mapping
+ 4));
1457 clockhi1
= swab32(readl(priv
->clock_mapping
));
1458 if (clockhi
== clockhi1
)
1462 cycles
= (u64
) clockhi
<< 32 | (u64
) clocklo
;
1466 EXPORT_SYMBOL_GPL(mlx4_read_clock
);
1469 static int map_internal_clock(struct mlx4_dev
*dev
)
1471 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1473 priv
->clock_mapping
=
1474 ioremap(pci_resource_start(dev
->pdev
, priv
->fw
.clock_bar
) +
1475 priv
->fw
.clock_offset
, MLX4_CLOCK_SIZE
);
1477 if (!priv
->clock_mapping
)
1483 static void unmap_internal_clock(struct mlx4_dev
*dev
)
1485 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1487 if (priv
->clock_mapping
)
1488 iounmap(priv
->clock_mapping
);
1491 static void mlx4_close_hca(struct mlx4_dev
*dev
)
1493 unmap_internal_clock(dev
);
1495 if (mlx4_is_slave(dev
))
1496 mlx4_slave_exit(dev
);
1498 mlx4_CLOSE_HCA(dev
, 0);
1499 mlx4_free_icms(dev
);
1503 static void mlx4_close_fw(struct mlx4_dev
*dev
)
1505 if (!mlx4_is_slave(dev
)) {
1507 mlx4_free_icm(dev
, mlx4_priv(dev
)->fw
.fw_icm
, 0);
1511 static int mlx4_init_slave(struct mlx4_dev
*dev
)
1513 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1514 u64 dma
= (u64
) priv
->mfunc
.vhcr_dma
;
1515 int ret_from_reset
= 0;
1517 u32 cmd_channel_ver
;
1519 if (atomic_read(&pf_loading
)) {
1520 mlx4_warn(dev
, "PF is not ready - Deferring probe\n");
1521 return -EPROBE_DEFER
;
1524 mutex_lock(&priv
->cmd
.slave_cmd_mutex
);
1525 priv
->cmd
.max_cmds
= 1;
1526 mlx4_warn(dev
, "Sending reset\n");
1527 ret_from_reset
= mlx4_comm_cmd(dev
, MLX4_COMM_CMD_RESET
, 0,
1529 /* if we are in the middle of flr the slave will try
1530 * NUM_OF_RESET_RETRIES times before leaving.*/
1531 if (ret_from_reset
) {
1532 if (MLX4_DELAY_RESET_SLAVE
== ret_from_reset
) {
1533 mlx4_warn(dev
, "slave is currently in the middle of FLR - Deferring probe\n");
1534 mutex_unlock(&priv
->cmd
.slave_cmd_mutex
);
1535 return -EPROBE_DEFER
;
1540 /* check the driver version - the slave I/F revision
1541 * must match the master's */
1542 slave_read
= swab32(readl(&priv
->mfunc
.comm
->slave_read
));
1543 cmd_channel_ver
= mlx4_comm_get_version();
1545 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver
) !=
1546 MLX4_COMM_GET_IF_REV(slave_read
)) {
1547 mlx4_err(dev
, "slave driver version is not supported by the master\n");
1551 mlx4_warn(dev
, "Sending vhcr0\n");
1552 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR0
, dma
>> 48,
1555 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR1
, dma
>> 32,
1558 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR2
, dma
>> 16,
1561 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR_EN
, dma
, MLX4_COMM_TIME
))
1564 mutex_unlock(&priv
->cmd
.slave_cmd_mutex
);
1568 mlx4_comm_cmd(dev
, MLX4_COMM_CMD_RESET
, 0, 0);
1569 mutex_unlock(&priv
->cmd
.slave_cmd_mutex
);
1573 static void mlx4_parav_master_pf_caps(struct mlx4_dev
*dev
)
1577 for (i
= 1; i
<= dev
->caps
.num_ports
; i
++) {
1578 if (dev
->caps
.port_type
[i
] == MLX4_PORT_TYPE_ETH
)
1579 dev
->caps
.gid_table_len
[i
] =
1580 mlx4_get_slave_num_gids(dev
, 0, i
);
1582 dev
->caps
.gid_table_len
[i
] = 1;
1583 dev
->caps
.pkey_table_len
[i
] =
1584 dev
->phys_caps
.pkey_phys_table_len
[i
] - 1;
1588 static int choose_log_fs_mgm_entry_size(int qp_per_entry
)
1590 int i
= MLX4_MIN_MGM_LOG_ENTRY_SIZE
;
1592 for (i
= MLX4_MIN_MGM_LOG_ENTRY_SIZE
; i
<= MLX4_MAX_MGM_LOG_ENTRY_SIZE
;
1594 if (qp_per_entry
<= 4 * ((1 << i
) / 16 - 2))
1598 return (i
<= MLX4_MAX_MGM_LOG_ENTRY_SIZE
) ? i
: -1;
1601 static void choose_steering_mode(struct mlx4_dev
*dev
,
1602 struct mlx4_dev_cap
*dev_cap
)
1604 if (mlx4_log_num_mgm_entry_size
== -1 &&
1605 dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_FS_EN
&&
1606 (!mlx4_is_mfunc(dev
) ||
1607 (dev_cap
->fs_max_num_qp_per_entry
>= (dev
->num_vfs
+ 1))) &&
1608 choose_log_fs_mgm_entry_size(dev_cap
->fs_max_num_qp_per_entry
) >=
1609 MLX4_MIN_MGM_LOG_ENTRY_SIZE
) {
1610 dev
->oper_log_mgm_entry_size
=
1611 choose_log_fs_mgm_entry_size(dev_cap
->fs_max_num_qp_per_entry
);
1612 dev
->caps
.steering_mode
= MLX4_STEERING_MODE_DEVICE_MANAGED
;
1613 dev
->caps
.num_qp_per_mgm
= dev_cap
->fs_max_num_qp_per_entry
;
1614 dev
->caps
.fs_log_max_ucast_qp_range_size
=
1615 dev_cap
->fs_log_max_ucast_qp_range_size
;
1617 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_VEP_UC_STEER
&&
1618 dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_VEP_MC_STEER
)
1619 dev
->caps
.steering_mode
= MLX4_STEERING_MODE_B0
;
1621 dev
->caps
.steering_mode
= MLX4_STEERING_MODE_A0
;
1623 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_VEP_UC_STEER
||
1624 dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_VEP_MC_STEER
)
1625 mlx4_warn(dev
, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n");
1627 dev
->oper_log_mgm_entry_size
=
1628 mlx4_log_num_mgm_entry_size
> 0 ?
1629 mlx4_log_num_mgm_entry_size
:
1630 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE
;
1631 dev
->caps
.num_qp_per_mgm
= mlx4_get_qp_per_mgm(dev
);
1633 mlx4_dbg(dev
, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n",
1634 mlx4_steering_mode_str(dev
->caps
.steering_mode
),
1635 dev
->oper_log_mgm_entry_size
,
1636 mlx4_log_num_mgm_entry_size
);
1639 static void choose_tunnel_offload_mode(struct mlx4_dev
*dev
,
1640 struct mlx4_dev_cap
*dev_cap
)
1642 if (dev
->caps
.steering_mode
== MLX4_STEERING_MODE_DEVICE_MANAGED
&&
1643 dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS
)
1644 dev
->caps
.tunnel_offload_mode
= MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
;
1646 dev
->caps
.tunnel_offload_mode
= MLX4_TUNNEL_OFFLOAD_MODE_NONE
;
1648 mlx4_dbg(dev
, "Tunneling offload mode is: %s\n", (dev
->caps
.tunnel_offload_mode
1649 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
) ? "vxlan" : "none");
1652 static int mlx4_init_fw(struct mlx4_dev
*dev
)
1654 struct mlx4_mod_stat_cfg mlx4_cfg
;
1657 if (!mlx4_is_slave(dev
)) {
1658 err
= mlx4_QUERY_FW(dev
);
1661 mlx4_info(dev
, "non-primary physical function, skipping\n");
1663 mlx4_err(dev
, "QUERY_FW command failed, aborting\n");
1667 err
= mlx4_load_fw(dev
);
1669 mlx4_err(dev
, "Failed to start FW, aborting\n");
1673 mlx4_cfg
.log_pg_sz_m
= 1;
1674 mlx4_cfg
.log_pg_sz
= 0;
1675 err
= mlx4_MOD_STAT_CFG(dev
, &mlx4_cfg
);
1677 mlx4_warn(dev
, "Failed to override log_pg_sz parameter\n");
1683 static int mlx4_init_hca(struct mlx4_dev
*dev
)
1685 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1686 struct mlx4_adapter adapter
;
1687 struct mlx4_dev_cap dev_cap
;
1688 struct mlx4_profile profile
;
1689 struct mlx4_init_hca_param init_hca
;
1691 struct mlx4_config_dev_params params
;
1694 if (!mlx4_is_slave(dev
)) {
1695 err
= mlx4_dev_cap(dev
, &dev_cap
);
1697 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting\n");
1701 choose_steering_mode(dev
, &dev_cap
);
1702 choose_tunnel_offload_mode(dev
, &dev_cap
);
1704 err
= mlx4_get_phys_port_id(dev
);
1706 mlx4_err(dev
, "Fail to get physical port id\n");
1708 if (mlx4_is_master(dev
))
1709 mlx4_parav_master_pf_caps(dev
);
1711 if (mlx4_low_memory_profile()) {
1712 mlx4_info(dev
, "Running from within kdump kernel. Using low memory profile\n");
1713 profile
= low_mem_profile
;
1715 profile
= default_profile
;
1717 if (dev
->caps
.steering_mode
==
1718 MLX4_STEERING_MODE_DEVICE_MANAGED
)
1719 profile
.num_mcg
= MLX4_FS_NUM_MCG
;
1721 icm_size
= mlx4_make_profile(dev
, &profile
, &dev_cap
,
1723 if ((long long) icm_size
< 0) {
1728 dev
->caps
.max_fmr_maps
= (1 << (32 - ilog2(dev
->caps
.num_mpts
))) - 1;
1730 init_hca
.log_uar_sz
= ilog2(dev
->caps
.num_uars
);
1731 init_hca
.uar_page_sz
= PAGE_SHIFT
- 12;
1732 init_hca
.mw_enabled
= 0;
1733 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_MEM_WINDOW
||
1734 dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_TYPE_2_WIN
)
1735 init_hca
.mw_enabled
= INIT_HCA_TPT_MW_ENABLE
;
1737 err
= mlx4_init_icm(dev
, &dev_cap
, &init_hca
, icm_size
);
1741 err
= mlx4_INIT_HCA(dev
, &init_hca
);
1743 mlx4_err(dev
, "INIT_HCA command failed, aborting\n");
1747 if (dev_cap
.flags2
& MLX4_DEV_CAP_FLAG2_SYS_EQS
) {
1748 err
= mlx4_query_func(dev
, &dev_cap
);
1750 mlx4_err(dev
, "QUERY_FUNC command failed, aborting.\n");
1752 } else if (err
& MLX4_QUERY_FUNC_NUM_SYS_EQS
) {
1753 dev
->caps
.num_eqs
= dev_cap
.max_eqs
;
1754 dev
->caps
.reserved_eqs
= dev_cap
.reserved_eqs
;
1755 dev
->caps
.reserved_uars
= dev_cap
.reserved_uars
;
1760 * If TS is supported by FW
1761 * read HCA frequency by QUERY_HCA command
1763 if (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_TS
) {
1764 memset(&init_hca
, 0, sizeof(init_hca
));
1765 err
= mlx4_QUERY_HCA(dev
, &init_hca
);
1767 mlx4_err(dev
, "QUERY_HCA command failed, disable timestamp\n");
1768 dev
->caps
.flags2
&= ~MLX4_DEV_CAP_FLAG2_TS
;
1770 dev
->caps
.hca_core_clock
=
1771 init_hca
.hca_core_clock
;
1774 /* In case we got HCA frequency 0 - disable timestamping
1775 * to avoid dividing by zero
1777 if (!dev
->caps
.hca_core_clock
) {
1778 dev
->caps
.flags2
&= ~MLX4_DEV_CAP_FLAG2_TS
;
1780 "HCA frequency is 0 - timestamping is not supported\n");
1781 } else if (map_internal_clock(dev
)) {
1783 * Map internal clock,
1784 * in case of failure disable timestamping
1786 dev
->caps
.flags2
&= ~MLX4_DEV_CAP_FLAG2_TS
;
1787 mlx4_err(dev
, "Failed to map internal clock. Timestamping is not supported\n");
1791 err
= mlx4_init_slave(dev
);
1793 if (err
!= -EPROBE_DEFER
)
1794 mlx4_err(dev
, "Failed to initialize slave\n");
1798 err
= mlx4_slave_cap(dev
);
1800 mlx4_err(dev
, "Failed to obtain slave caps\n");
1805 if (map_bf_area(dev
))
1806 mlx4_dbg(dev
, "Failed to map blue flame area\n");
1808 /*Only the master set the ports, all the rest got it from it.*/
1809 if (!mlx4_is_slave(dev
))
1810 mlx4_set_port_mask(dev
);
1812 err
= mlx4_QUERY_ADAPTER(dev
, &adapter
);
1814 mlx4_err(dev
, "QUERY_ADAPTER command failed, aborting\n");
1818 /* Query CONFIG_DEV parameters */
1819 err
= mlx4_config_dev_retrieval(dev
, ¶ms
);
1820 if (err
&& err
!= -ENOTSUPP
) {
1821 mlx4_err(dev
, "Failed to query CONFIG_DEV parameters\n");
1823 dev
->caps
.rx_checksum_flags_port
[1] = params
.rx_csum_flags_port_1
;
1824 dev
->caps
.rx_checksum_flags_port
[2] = params
.rx_csum_flags_port_2
;
1826 priv
->eq_table
.inta_pin
= adapter
.inta_pin
;
1827 memcpy(dev
->board_id
, adapter
.board_id
, sizeof dev
->board_id
);
1832 unmap_internal_clock(dev
);
1835 if (mlx4_is_slave(dev
)) {
1836 kfree(dev
->caps
.qp0_qkey
);
1837 kfree(dev
->caps
.qp0_tunnel
);
1838 kfree(dev
->caps
.qp0_proxy
);
1839 kfree(dev
->caps
.qp1_tunnel
);
1840 kfree(dev
->caps
.qp1_proxy
);
1844 if (mlx4_is_slave(dev
))
1845 mlx4_slave_exit(dev
);
1847 mlx4_CLOSE_HCA(dev
, 0);
1850 if (!mlx4_is_slave(dev
))
1851 mlx4_free_icms(dev
);
1854 if (!mlx4_is_slave(dev
)) {
1856 mlx4_free_icm(dev
, priv
->fw
.fw_icm
, 0);
1861 static int mlx4_init_counters_table(struct mlx4_dev
*dev
)
1863 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1866 if (!(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_COUNTERS
))
1869 nent
= dev
->caps
.max_counters
;
1870 return mlx4_bitmap_init(&priv
->counters_bitmap
, nent
, nent
- 1, 0, 0);
1873 static void mlx4_cleanup_counters_table(struct mlx4_dev
*dev
)
1875 mlx4_bitmap_cleanup(&mlx4_priv(dev
)->counters_bitmap
);
1878 int __mlx4_counter_alloc(struct mlx4_dev
*dev
, u32
*idx
)
1880 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1882 if (!(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_COUNTERS
))
1885 *idx
= mlx4_bitmap_alloc(&priv
->counters_bitmap
);
1892 int mlx4_counter_alloc(struct mlx4_dev
*dev
, u32
*idx
)
1897 if (mlx4_is_mfunc(dev
)) {
1898 err
= mlx4_cmd_imm(dev
, 0, &out_param
, RES_COUNTER
,
1899 RES_OP_RESERVE
, MLX4_CMD_ALLOC_RES
,
1900 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
1902 *idx
= get_param_l(&out_param
);
1906 return __mlx4_counter_alloc(dev
, idx
);
1908 EXPORT_SYMBOL_GPL(mlx4_counter_alloc
);
1910 void __mlx4_counter_free(struct mlx4_dev
*dev
, u32 idx
)
1912 mlx4_bitmap_free(&mlx4_priv(dev
)->counters_bitmap
, idx
, MLX4_USE_RR
);
1916 void mlx4_counter_free(struct mlx4_dev
*dev
, u32 idx
)
1920 if (mlx4_is_mfunc(dev
)) {
1921 set_param_l(&in_param
, idx
);
1922 mlx4_cmd(dev
, in_param
, RES_COUNTER
, RES_OP_RESERVE
,
1923 MLX4_CMD_FREE_RES
, MLX4_CMD_TIME_CLASS_A
,
1927 __mlx4_counter_free(dev
, idx
);
1929 EXPORT_SYMBOL_GPL(mlx4_counter_free
);
1931 static int mlx4_setup_hca(struct mlx4_dev
*dev
)
1933 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1936 __be32 ib_port_default_caps
;
1938 err
= mlx4_init_uar_table(dev
);
1940 mlx4_err(dev
, "Failed to initialize user access region table, aborting\n");
1944 err
= mlx4_uar_alloc(dev
, &priv
->driver_uar
);
1946 mlx4_err(dev
, "Failed to allocate driver access region, aborting\n");
1947 goto err_uar_table_free
;
1950 priv
->kar
= ioremap((phys_addr_t
) priv
->driver_uar
.pfn
<< PAGE_SHIFT
, PAGE_SIZE
);
1952 mlx4_err(dev
, "Couldn't map kernel access region, aborting\n");
1957 err
= mlx4_init_pd_table(dev
);
1959 mlx4_err(dev
, "Failed to initialize protection domain table, aborting\n");
1963 err
= mlx4_init_xrcd_table(dev
);
1965 mlx4_err(dev
, "Failed to initialize reliable connection domain table, aborting\n");
1966 goto err_pd_table_free
;
1969 err
= mlx4_init_mr_table(dev
);
1971 mlx4_err(dev
, "Failed to initialize memory region table, aborting\n");
1972 goto err_xrcd_table_free
;
1975 if (!mlx4_is_slave(dev
)) {
1976 err
= mlx4_init_mcg_table(dev
);
1978 mlx4_err(dev
, "Failed to initialize multicast group table, aborting\n");
1979 goto err_mr_table_free
;
1981 err
= mlx4_config_mad_demux(dev
);
1983 mlx4_err(dev
, "Failed in config_mad_demux, aborting\n");
1984 goto err_mcg_table_free
;
1988 err
= mlx4_init_eq_table(dev
);
1990 mlx4_err(dev
, "Failed to initialize event queue table, aborting\n");
1991 goto err_mcg_table_free
;
1994 err
= mlx4_cmd_use_events(dev
);
1996 mlx4_err(dev
, "Failed to switch to event-driven firmware commands, aborting\n");
1997 goto err_eq_table_free
;
2000 err
= mlx4_NOP(dev
);
2002 if (dev
->flags
& MLX4_FLAG_MSI_X
) {
2003 mlx4_warn(dev
, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
2004 priv
->eq_table
.eq
[dev
->caps
.num_comp_vectors
].irq
);
2005 mlx4_warn(dev
, "Trying again without MSI-X\n");
2007 mlx4_err(dev
, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
2008 priv
->eq_table
.eq
[dev
->caps
.num_comp_vectors
].irq
);
2009 mlx4_err(dev
, "BIOS or ACPI interrupt routing problem?\n");
2015 mlx4_dbg(dev
, "NOP command IRQ test passed\n");
2017 err
= mlx4_init_cq_table(dev
);
2019 mlx4_err(dev
, "Failed to initialize completion queue table, aborting\n");
2023 err
= mlx4_init_srq_table(dev
);
2025 mlx4_err(dev
, "Failed to initialize shared receive queue table, aborting\n");
2026 goto err_cq_table_free
;
2029 err
= mlx4_init_qp_table(dev
);
2031 mlx4_err(dev
, "Failed to initialize queue pair table, aborting\n");
2032 goto err_srq_table_free
;
2035 err
= mlx4_init_counters_table(dev
);
2036 if (err
&& err
!= -ENOENT
) {
2037 mlx4_err(dev
, "Failed to initialize counters table, aborting\n");
2038 goto err_qp_table_free
;
2041 if (!mlx4_is_slave(dev
)) {
2042 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
2043 ib_port_default_caps
= 0;
2044 err
= mlx4_get_port_ib_caps(dev
, port
,
2045 &ib_port_default_caps
);
2047 mlx4_warn(dev
, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n",
2049 dev
->caps
.ib_port_def_cap
[port
] = ib_port_default_caps
;
2051 /* initialize per-slave default ib port capabilities */
2052 if (mlx4_is_master(dev
)) {
2054 for (i
= 0; i
< dev
->num_slaves
; i
++) {
2055 if (i
== mlx4_master_func_num(dev
))
2057 priv
->mfunc
.master
.slave_state
[i
].ib_cap_mask
[port
] =
2058 ib_port_default_caps
;
2062 if (mlx4_is_mfunc(dev
))
2063 dev
->caps
.port_ib_mtu
[port
] = IB_MTU_2048
;
2065 dev
->caps
.port_ib_mtu
[port
] = IB_MTU_4096
;
2067 err
= mlx4_SET_PORT(dev
, port
, mlx4_is_master(dev
) ?
2068 dev
->caps
.pkey_table_len
[port
] : -1);
2070 mlx4_err(dev
, "Failed to set port %d, aborting\n",
2072 goto err_counters_table_free
;
2079 err_counters_table_free
:
2080 mlx4_cleanup_counters_table(dev
);
2083 mlx4_cleanup_qp_table(dev
);
2086 mlx4_cleanup_srq_table(dev
);
2089 mlx4_cleanup_cq_table(dev
);
2092 mlx4_cmd_use_polling(dev
);
2095 mlx4_cleanup_eq_table(dev
);
2098 if (!mlx4_is_slave(dev
))
2099 mlx4_cleanup_mcg_table(dev
);
2102 mlx4_cleanup_mr_table(dev
);
2104 err_xrcd_table_free
:
2105 mlx4_cleanup_xrcd_table(dev
);
2108 mlx4_cleanup_pd_table(dev
);
2114 mlx4_uar_free(dev
, &priv
->driver_uar
);
2117 mlx4_cleanup_uar_table(dev
);
2121 static void mlx4_enable_msi_x(struct mlx4_dev
*dev
)
2123 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2124 struct msix_entry
*entries
;
2128 int nreq
= dev
->caps
.num_ports
* num_online_cpus() + MSIX_LEGACY_SZ
;
2130 nreq
= min_t(int, dev
->caps
.num_eqs
- dev
->caps
.reserved_eqs
,
2133 entries
= kcalloc(nreq
, sizeof *entries
, GFP_KERNEL
);
2137 for (i
= 0; i
< nreq
; ++i
)
2138 entries
[i
].entry
= i
;
2140 nreq
= pci_enable_msix_range(dev
->pdev
, entries
, 2, nreq
);
2145 } else if (nreq
< MSIX_LEGACY_SZ
+
2146 dev
->caps
.num_ports
* MIN_MSIX_P_PORT
) {
2147 /*Working in legacy mode , all EQ's shared*/
2148 dev
->caps
.comp_pool
= 0;
2149 dev
->caps
.num_comp_vectors
= nreq
- 1;
2151 dev
->caps
.comp_pool
= nreq
- MSIX_LEGACY_SZ
;
2152 dev
->caps
.num_comp_vectors
= MSIX_LEGACY_SZ
- 1;
2154 for (i
= 0; i
< nreq
; ++i
)
2155 priv
->eq_table
.eq
[i
].irq
= entries
[i
].vector
;
2157 dev
->flags
|= MLX4_FLAG_MSI_X
;
2164 dev
->caps
.num_comp_vectors
= 1;
2165 dev
->caps
.comp_pool
= 0;
2167 for (i
= 0; i
< 2; ++i
)
2168 priv
->eq_table
.eq
[i
].irq
= dev
->pdev
->irq
;
2171 static int mlx4_init_port_info(struct mlx4_dev
*dev
, int port
)
2173 struct mlx4_port_info
*info
= &mlx4_priv(dev
)->port
[port
];
2178 if (!mlx4_is_slave(dev
)) {
2179 mlx4_init_mac_table(dev
, &info
->mac_table
);
2180 mlx4_init_vlan_table(dev
, &info
->vlan_table
);
2181 mlx4_init_roce_gid_table(dev
, &info
->gid_table
);
2182 info
->base_qpn
= mlx4_get_base_qpn(dev
, port
);
2185 sprintf(info
->dev_name
, "mlx4_port%d", port
);
2186 info
->port_attr
.attr
.name
= info
->dev_name
;
2187 if (mlx4_is_mfunc(dev
))
2188 info
->port_attr
.attr
.mode
= S_IRUGO
;
2190 info
->port_attr
.attr
.mode
= S_IRUGO
| S_IWUSR
;
2191 info
->port_attr
.store
= set_port_type
;
2193 info
->port_attr
.show
= show_port_type
;
2194 sysfs_attr_init(&info
->port_attr
.attr
);
2196 err
= device_create_file(&dev
->pdev
->dev
, &info
->port_attr
);
2198 mlx4_err(dev
, "Failed to create file for port %d\n", port
);
2202 sprintf(info
->dev_mtu_name
, "mlx4_port%d_mtu", port
);
2203 info
->port_mtu_attr
.attr
.name
= info
->dev_mtu_name
;
2204 if (mlx4_is_mfunc(dev
))
2205 info
->port_mtu_attr
.attr
.mode
= S_IRUGO
;
2207 info
->port_mtu_attr
.attr
.mode
= S_IRUGO
| S_IWUSR
;
2208 info
->port_mtu_attr
.store
= set_port_ib_mtu
;
2210 info
->port_mtu_attr
.show
= show_port_ib_mtu
;
2211 sysfs_attr_init(&info
->port_mtu_attr
.attr
);
2213 err
= device_create_file(&dev
->pdev
->dev
, &info
->port_mtu_attr
);
2215 mlx4_err(dev
, "Failed to create mtu file for port %d\n", port
);
2216 device_remove_file(&info
->dev
->pdev
->dev
, &info
->port_attr
);
2223 static void mlx4_cleanup_port_info(struct mlx4_port_info
*info
)
2228 device_remove_file(&info
->dev
->pdev
->dev
, &info
->port_attr
);
2229 device_remove_file(&info
->dev
->pdev
->dev
, &info
->port_mtu_attr
);
2232 static int mlx4_init_steering(struct mlx4_dev
*dev
)
2234 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2235 int num_entries
= dev
->caps
.num_ports
;
2238 priv
->steer
= kzalloc(sizeof(struct mlx4_steer
) * num_entries
, GFP_KERNEL
);
2242 for (i
= 0; i
< num_entries
; i
++)
2243 for (j
= 0; j
< MLX4_NUM_STEERS
; j
++) {
2244 INIT_LIST_HEAD(&priv
->steer
[i
].promisc_qps
[j
]);
2245 INIT_LIST_HEAD(&priv
->steer
[i
].steer_entries
[j
]);
2250 static void mlx4_clear_steering(struct mlx4_dev
*dev
)
2252 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2253 struct mlx4_steer_index
*entry
, *tmp_entry
;
2254 struct mlx4_promisc_qp
*pqp
, *tmp_pqp
;
2255 int num_entries
= dev
->caps
.num_ports
;
2258 for (i
= 0; i
< num_entries
; i
++) {
2259 for (j
= 0; j
< MLX4_NUM_STEERS
; j
++) {
2260 list_for_each_entry_safe(pqp
, tmp_pqp
,
2261 &priv
->steer
[i
].promisc_qps
[j
],
2263 list_del(&pqp
->list
);
2266 list_for_each_entry_safe(entry
, tmp_entry
,
2267 &priv
->steer
[i
].steer_entries
[j
],
2269 list_del(&entry
->list
);
2270 list_for_each_entry_safe(pqp
, tmp_pqp
,
2273 list_del(&pqp
->list
);
2283 static int extended_func_num(struct pci_dev
*pdev
)
2285 return PCI_SLOT(pdev
->devfn
) * 8 + PCI_FUNC(pdev
->devfn
);
2288 #define MLX4_OWNER_BASE 0x8069c
2289 #define MLX4_OWNER_SIZE 4
2291 static int mlx4_get_ownership(struct mlx4_dev
*dev
)
2293 void __iomem
*owner
;
2296 if (pci_channel_offline(dev
->pdev
))
2299 owner
= ioremap(pci_resource_start(dev
->pdev
, 0) + MLX4_OWNER_BASE
,
2302 mlx4_err(dev
, "Failed to obtain ownership bit\n");
2311 static void mlx4_free_ownership(struct mlx4_dev
*dev
)
2313 void __iomem
*owner
;
2315 if (pci_channel_offline(dev
->pdev
))
2318 owner
= ioremap(pci_resource_start(dev
->pdev
, 0) + MLX4_OWNER_BASE
,
2321 mlx4_err(dev
, "Failed to obtain ownership bit\n");
2329 #define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\
2330 !!((flags) & MLX4_FLAG_MASTER))
2332 static u64
mlx4_enable_sriov(struct mlx4_dev
*dev
, struct pci_dev
*pdev
,
2333 u8 total_vfs
, int existing_vfs
)
2335 u64 dev_flags
= dev
->flags
;
2337 dev
->dev_vfs
= kzalloc(
2338 total_vfs
* sizeof(*dev
->dev_vfs
),
2340 if (NULL
== dev
->dev_vfs
) {
2341 mlx4_err(dev
, "Failed to allocate memory for VFs\n");
2343 } else if (!(dev
->flags
& MLX4_FLAG_SRIOV
)) {
2346 atomic_inc(&pf_loading
);
2348 if (existing_vfs
!= total_vfs
)
2349 mlx4_err(dev
, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
2350 existing_vfs
, total_vfs
);
2352 mlx4_warn(dev
, "Enabling SR-IOV with %d VFs\n", total_vfs
);
2353 err
= pci_enable_sriov(pdev
, total_vfs
);
2356 mlx4_err(dev
, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
2358 atomic_dec(&pf_loading
);
2361 mlx4_warn(dev
, "Running in master mode\n");
2362 dev_flags
|= MLX4_FLAG_SRIOV
|
2364 dev_flags
&= ~MLX4_FLAG_SLAVE
;
2365 dev
->num_vfs
= total_vfs
;
2372 kfree(dev
->dev_vfs
);
2373 return dev_flags
& ~MLX4_FLAG_MASTER
;
2377 MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64
= -1,
2380 static int mlx4_check_dev_cap(struct mlx4_dev
*dev
, struct mlx4_dev_cap
*dev_cap
,
2383 int requested_vfs
= nvfs
[0] + nvfs
[1] + nvfs
[2];
2384 /* Checking for 64 VFs as a limitation of CX2 */
2385 if (!(dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_80_VFS
) &&
2386 requested_vfs
>= 64) {
2387 mlx4_err(dev
, "Requested %d VFs, but FW does not support more than 64\n",
2389 return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64
;
2394 static int mlx4_load_one(struct pci_dev
*pdev
, int pci_dev_data
,
2395 int total_vfs
, int *nvfs
, struct mlx4_priv
*priv
)
2397 struct mlx4_dev
*dev
;
2402 struct mlx4_dev_cap
*dev_cap
= NULL
;
2403 int existing_vfs
= 0;
2407 INIT_LIST_HEAD(&priv
->ctx_list
);
2408 spin_lock_init(&priv
->ctx_lock
);
2410 mutex_init(&priv
->port_mutex
);
2412 INIT_LIST_HEAD(&priv
->pgdir_list
);
2413 mutex_init(&priv
->pgdir_mutex
);
2415 INIT_LIST_HEAD(&priv
->bf_list
);
2416 mutex_init(&priv
->bf_mutex
);
2418 dev
->rev_id
= pdev
->revision
;
2419 dev
->numa_node
= dev_to_node(&pdev
->dev
);
2421 /* Detect if this device is a virtual function */
2422 if (pci_dev_data
& MLX4_PCI_DEV_IS_VF
) {
2423 mlx4_warn(dev
, "Detected virtual function - running in slave mode\n");
2424 dev
->flags
|= MLX4_FLAG_SLAVE
;
2426 /* We reset the device and enable SRIOV only for physical
2427 * devices. Try to claim ownership on the device;
2428 * if already taken, skip -- do not allow multiple PFs */
2429 err
= mlx4_get_ownership(dev
);
2434 mlx4_warn(dev
, "Multiple PFs not yet supported - Skipping PF\n");
2439 atomic_set(&priv
->opreq_count
, 0);
2440 INIT_WORK(&priv
->opreq_task
, mlx4_opreq_action
);
2443 * Now reset the HCA before we touch the PCI capabilities or
2444 * attempt a firmware command, since a boot ROM may have left
2445 * the HCA in an undefined state.
2447 err
= mlx4_reset(dev
);
2449 mlx4_err(dev
, "Failed to reset HCA, aborting\n");
2454 existing_vfs
= pci_num_vf(pdev
);
2455 dev
->flags
= MLX4_FLAG_MASTER
;
2456 dev
->num_vfs
= total_vfs
;
2461 err
= mlx4_cmd_init(dev
);
2463 mlx4_err(dev
, "Failed to init command interface, aborting\n");
2467 /* In slave functions, the communication channel must be initialized
2468 * before posting commands. Also, init num_slaves before calling
2470 if (mlx4_is_mfunc(dev
)) {
2471 if (mlx4_is_master(dev
)) {
2472 dev
->num_slaves
= MLX4_MAX_NUM_SLAVES
;
2475 dev
->num_slaves
= 0;
2476 err
= mlx4_multi_func_init(dev
);
2478 mlx4_err(dev
, "Failed to init slave mfunc interface, aborting\n");
2484 err
= mlx4_init_fw(dev
);
2486 mlx4_err(dev
, "Failed to init fw, aborting.\n");
2490 if (mlx4_is_master(dev
)) {
2492 dev_cap
= kzalloc(sizeof(*dev_cap
), GFP_KERNEL
);
2499 err
= mlx4_QUERY_DEV_CAP(dev
, dev_cap
);
2501 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting.\n");
2505 if (mlx4_check_dev_cap(dev
, dev_cap
, nvfs
))
2508 if (!(dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_SYS_EQS
)) {
2509 u64 dev_flags
= mlx4_enable_sriov(dev
, pdev
, total_vfs
,
2512 mlx4_cmd_cleanup(dev
, MLX4_CMD_CLEANUP_ALL
);
2513 dev
->flags
= dev_flags
;
2514 if (!SRIOV_VALID_STATE(dev
->flags
)) {
2515 mlx4_err(dev
, "Invalid SRIOV state\n");
2518 err
= mlx4_reset(dev
);
2520 mlx4_err(dev
, "Failed to reset HCA, aborting.\n");
2526 /* Legacy mode FW requires SRIOV to be enabled before
2527 * doing QUERY_DEV_CAP, since max_eq's value is different if
2530 memset(dev_cap
, 0, sizeof(*dev_cap
));
2531 err
= mlx4_QUERY_DEV_CAP(dev
, dev_cap
);
2533 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting.\n");
2537 if (mlx4_check_dev_cap(dev
, dev_cap
, nvfs
))
2542 err
= mlx4_init_hca(dev
);
2544 if (err
== -EACCES
) {
2545 /* Not primary Physical function
2546 * Running in slave mode */
2547 mlx4_cmd_cleanup(dev
, MLX4_CMD_CLEANUP_ALL
);
2548 /* We're not a PF */
2549 if (dev
->flags
& MLX4_FLAG_SRIOV
) {
2551 pci_disable_sriov(pdev
);
2552 if (mlx4_is_master(dev
))
2553 atomic_dec(&pf_loading
);
2554 dev
->flags
&= ~MLX4_FLAG_SRIOV
;
2556 if (!mlx4_is_slave(dev
))
2557 mlx4_free_ownership(dev
);
2558 dev
->flags
|= MLX4_FLAG_SLAVE
;
2559 dev
->flags
&= ~MLX4_FLAG_MASTER
;
2565 if (mlx4_is_master(dev
) && (dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_SYS_EQS
)) {
2566 u64 dev_flags
= mlx4_enable_sriov(dev
, pdev
, total_vfs
, existing_vfs
);
2568 if ((dev
->flags
^ dev_flags
) & (MLX4_FLAG_MASTER
| MLX4_FLAG_SLAVE
)) {
2569 mlx4_cmd_cleanup(dev
, MLX4_CMD_CLEANUP_VHCR
);
2570 dev
->flags
= dev_flags
;
2571 err
= mlx4_cmd_init(dev
);
2573 /* Only VHCR is cleaned up, so could still
2576 mlx4_err(dev
, "Failed to init VHCR command interface, aborting\n");
2580 dev
->flags
= dev_flags
;
2583 if (!SRIOV_VALID_STATE(dev
->flags
)) {
2584 mlx4_err(dev
, "Invalid SRIOV state\n");
2589 /* check if the device is functioning at its maximum possible speed.
2590 * No return code for this call, just warn the user in case of PCI
2591 * express device capabilities are under-satisfied by the bus.
2593 if (!mlx4_is_slave(dev
))
2594 mlx4_check_pcie_caps(dev
);
2596 /* In master functions, the communication channel must be initialized
2597 * after obtaining its address from fw */
2598 if (mlx4_is_master(dev
)) {
2601 mlx4_foreach_port(i
, dev
, MLX4_PORT_TYPE_IB
)
2605 (num_vfs_argc
> 1 || probe_vfs_argc
> 1)) {
2607 "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n");
2611 if (dev
->caps
.num_ports
< 2 &&
2615 "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n",
2616 dev
->caps
.num_ports
);
2619 memcpy(dev
->nvfs
, nvfs
, sizeof(dev
->nvfs
));
2621 for (i
= 0; i
< sizeof(dev
->nvfs
)/sizeof(dev
->nvfs
[0]); i
++) {
2624 for (j
= 0; j
< dev
->nvfs
[i
]; ++sum
, ++j
) {
2625 dev
->dev_vfs
[sum
].min_port
= i
< 2 ? i
+ 1 : 1;
2626 dev
->dev_vfs
[sum
].n_ports
= i
< 2 ? 1 :
2627 dev
->caps
.num_ports
;
2631 /* In master functions, the communication channel
2632 * must be initialized after obtaining its address from fw
2634 err
= mlx4_multi_func_init(dev
);
2636 mlx4_err(dev
, "Failed to init master mfunc interface, aborting.\n");
2641 err
= mlx4_alloc_eq_table(dev
);
2643 goto err_master_mfunc
;
2645 priv
->msix_ctl
.pool_bm
= 0;
2646 mutex_init(&priv
->msix_ctl
.pool_lock
);
2648 mlx4_enable_msi_x(dev
);
2649 if ((mlx4_is_mfunc(dev
)) &&
2650 !(dev
->flags
& MLX4_FLAG_MSI_X
)) {
2652 mlx4_err(dev
, "INTx is not supported in multi-function mode, aborting\n");
2656 if (!mlx4_is_slave(dev
)) {
2657 err
= mlx4_init_steering(dev
);
2659 goto err_disable_msix
;
2662 err
= mlx4_setup_hca(dev
);
2663 if (err
== -EBUSY
&& (dev
->flags
& MLX4_FLAG_MSI_X
) &&
2664 !mlx4_is_mfunc(dev
)) {
2665 dev
->flags
&= ~MLX4_FLAG_MSI_X
;
2666 dev
->caps
.num_comp_vectors
= 1;
2667 dev
->caps
.comp_pool
= 0;
2668 pci_disable_msix(pdev
);
2669 err
= mlx4_setup_hca(dev
);
2675 mlx4_init_quotas(dev
);
2677 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
2678 err
= mlx4_init_port_info(dev
, port
);
2683 err
= mlx4_register_device(dev
);
2687 mlx4_request_modules(dev
);
2689 mlx4_sense_init(dev
);
2690 mlx4_start_sense(dev
);
2694 if (mlx4_is_master(dev
) && dev
->num_vfs
)
2695 atomic_dec(&pf_loading
);
2700 for (--port
; port
>= 1; --port
)
2701 mlx4_cleanup_port_info(&priv
->port
[port
]);
2703 mlx4_cleanup_counters_table(dev
);
2704 mlx4_cleanup_qp_table(dev
);
2705 mlx4_cleanup_srq_table(dev
);
2706 mlx4_cleanup_cq_table(dev
);
2707 mlx4_cmd_use_polling(dev
);
2708 mlx4_cleanup_eq_table(dev
);
2709 mlx4_cleanup_mcg_table(dev
);
2710 mlx4_cleanup_mr_table(dev
);
2711 mlx4_cleanup_xrcd_table(dev
);
2712 mlx4_cleanup_pd_table(dev
);
2713 mlx4_cleanup_uar_table(dev
);
2716 if (!mlx4_is_slave(dev
))
2717 mlx4_clear_steering(dev
);
2720 if (dev
->flags
& MLX4_FLAG_MSI_X
)
2721 pci_disable_msix(pdev
);
2724 mlx4_free_eq_table(dev
);
2727 if (mlx4_is_master(dev
))
2728 mlx4_multi_func_cleanup(dev
);
2730 if (mlx4_is_slave(dev
)) {
2731 kfree(dev
->caps
.qp0_qkey
);
2732 kfree(dev
->caps
.qp0_tunnel
);
2733 kfree(dev
->caps
.qp0_proxy
);
2734 kfree(dev
->caps
.qp1_tunnel
);
2735 kfree(dev
->caps
.qp1_proxy
);
2739 mlx4_close_hca(dev
);
2745 if (mlx4_is_slave(dev
))
2746 mlx4_multi_func_cleanup(dev
);
2749 mlx4_cmd_cleanup(dev
, MLX4_CMD_CLEANUP_ALL
);
2752 if (dev
->flags
& MLX4_FLAG_SRIOV
&& !existing_vfs
)
2753 pci_disable_sriov(pdev
);
2755 if (mlx4_is_master(dev
) && dev
->num_vfs
)
2756 atomic_dec(&pf_loading
);
2758 kfree(priv
->dev
.dev_vfs
);
2760 if (!mlx4_is_slave(dev
))
2761 mlx4_free_ownership(dev
);
2767 static int __mlx4_init_one(struct pci_dev
*pdev
, int pci_dev_data
,
2768 struct mlx4_priv
*priv
)
2771 int nvfs
[MLX4_MAX_PORTS
+ 1] = {0, 0, 0};
2772 int prb_vf
[MLX4_MAX_PORTS
+ 1] = {0, 0, 0};
2773 const int param_map
[MLX4_MAX_PORTS
+ 1][MLX4_MAX_PORTS
+ 1] = {
2774 {2, 0, 0}, {0, 1, 2}, {0, 1, 2} };
2775 unsigned total_vfs
= 0;
2778 pr_info(DRV_NAME
": Initializing %s\n", pci_name(pdev
));
2780 err
= pci_enable_device(pdev
);
2782 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
2786 /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS
2787 * per port, we must limit the number of VFs to 63 (since their are
2790 for (i
= 0; i
< sizeof(nvfs
)/sizeof(nvfs
[0]) && i
< num_vfs_argc
;
2791 total_vfs
+= nvfs
[param_map
[num_vfs_argc
- 1][i
]], i
++) {
2792 nvfs
[param_map
[num_vfs_argc
- 1][i
]] = num_vfs
[i
];
2794 dev_err(&pdev
->dev
, "num_vfs module parameter cannot be negative\n");
2796 goto err_disable_pdev
;
2799 for (i
= 0; i
< sizeof(prb_vf
)/sizeof(prb_vf
[0]) && i
< probe_vfs_argc
;
2801 prb_vf
[param_map
[probe_vfs_argc
- 1][i
]] = probe_vf
[i
];
2802 if (prb_vf
[i
] < 0 || prb_vf
[i
] > nvfs
[i
]) {
2803 dev_err(&pdev
->dev
, "probe_vf module parameter cannot be negative or greater than num_vfs\n");
2805 goto err_disable_pdev
;
2808 if (total_vfs
>= MLX4_MAX_NUM_VF
) {
2810 "Requested more VF's (%d) than allowed (%d)\n",
2811 total_vfs
, MLX4_MAX_NUM_VF
- 1);
2813 goto err_disable_pdev
;
2816 for (i
= 0; i
< MLX4_MAX_PORTS
; i
++) {
2817 if (nvfs
[i
] + nvfs
[2] >= MLX4_MAX_NUM_VF_P_PORT
) {
2819 "Requested more VF's (%d) for port (%d) than allowed (%d)\n",
2820 nvfs
[i
] + nvfs
[2], i
+ 1,
2821 MLX4_MAX_NUM_VF_P_PORT
- 1);
2823 goto err_disable_pdev
;
2827 /* Check for BARs. */
2828 if (!(pci_dev_data
& MLX4_PCI_DEV_IS_VF
) &&
2829 !(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
2830 dev_err(&pdev
->dev
, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
2831 pci_dev_data
, pci_resource_flags(pdev
, 0));
2833 goto err_disable_pdev
;
2835 if (!(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
2836 dev_err(&pdev
->dev
, "Missing UAR, aborting\n");
2838 goto err_disable_pdev
;
2841 err
= pci_request_regions(pdev
, DRV_NAME
);
2843 dev_err(&pdev
->dev
, "Couldn't get PCI resources, aborting\n");
2844 goto err_disable_pdev
;
2847 pci_set_master(pdev
);
2849 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
2851 dev_warn(&pdev
->dev
, "Warning: couldn't set 64-bit PCI DMA mask\n");
2852 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
2854 dev_err(&pdev
->dev
, "Can't set PCI DMA mask, aborting\n");
2855 goto err_release_regions
;
2858 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
2860 dev_warn(&pdev
->dev
, "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
2861 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
2863 dev_err(&pdev
->dev
, "Can't set consistent PCI DMA mask, aborting\n");
2864 goto err_release_regions
;
2868 /* Allow large DMA segments, up to the firmware limit of 1 GB */
2869 dma_set_max_seg_size(&pdev
->dev
, 1024 * 1024 * 1024);
2870 /* Detect if this device is a virtual function */
2871 if (pci_dev_data
& MLX4_PCI_DEV_IS_VF
) {
2872 /* When acting as pf, we normally skip vfs unless explicitly
2873 * requested to probe them.
2876 unsigned vfs_offset
= 0;
2878 for (i
= 0; i
< sizeof(nvfs
)/sizeof(nvfs
[0]) &&
2879 vfs_offset
+ nvfs
[i
] < extended_func_num(pdev
);
2880 vfs_offset
+= nvfs
[i
], i
++)
2882 if (i
== sizeof(nvfs
)/sizeof(nvfs
[0])) {
2884 goto err_release_regions
;
2886 if ((extended_func_num(pdev
) - vfs_offset
)
2888 dev_warn(&pdev
->dev
, "Skipping virtual function:%d\n",
2889 extended_func_num(pdev
));
2891 goto err_release_regions
;
2896 err
= mlx4_load_one(pdev
, pci_dev_data
, total_vfs
, nvfs
, priv
);
2898 goto err_release_regions
;
2901 err_release_regions
:
2902 pci_release_regions(pdev
);
2905 pci_disable_device(pdev
);
2906 pci_set_drvdata(pdev
, NULL
);
2910 static int mlx4_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
2912 struct mlx4_priv
*priv
;
2913 struct mlx4_dev
*dev
;
2916 printk_once(KERN_INFO
"%s", mlx4_version
);
2918 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
2924 pci_set_drvdata(pdev
, dev
);
2925 priv
->pci_dev_data
= id
->driver_data
;
2927 ret
= __mlx4_init_one(pdev
, id
->driver_data
, priv
);
2934 static void mlx4_unload_one(struct pci_dev
*pdev
)
2936 struct mlx4_dev
*dev
= pci_get_drvdata(pdev
);
2937 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2945 pci_dev_data
= priv
->pci_dev_data
;
2947 /* Disabling SR-IOV is not allowed while there are active vf's */
2948 if (mlx4_is_master(dev
)) {
2949 active_vfs
= mlx4_how_many_lives_vf(dev
);
2951 pr_warn("Removing PF when there are active VF's !!\n");
2952 pr_warn("Will not disable SR-IOV.\n");
2955 mlx4_stop_sense(dev
);
2956 mlx4_unregister_device(dev
);
2958 for (p
= 1; p
<= dev
->caps
.num_ports
; p
++) {
2959 mlx4_cleanup_port_info(&priv
->port
[p
]);
2960 mlx4_CLOSE_PORT(dev
, p
);
2963 if (mlx4_is_master(dev
))
2964 mlx4_free_resource_tracker(dev
,
2965 RES_TR_FREE_SLAVES_ONLY
);
2967 mlx4_cleanup_counters_table(dev
);
2968 mlx4_cleanup_qp_table(dev
);
2969 mlx4_cleanup_srq_table(dev
);
2970 mlx4_cleanup_cq_table(dev
);
2971 mlx4_cmd_use_polling(dev
);
2972 mlx4_cleanup_eq_table(dev
);
2973 mlx4_cleanup_mcg_table(dev
);
2974 mlx4_cleanup_mr_table(dev
);
2975 mlx4_cleanup_xrcd_table(dev
);
2976 mlx4_cleanup_pd_table(dev
);
2978 if (mlx4_is_master(dev
))
2979 mlx4_free_resource_tracker(dev
,
2980 RES_TR_FREE_STRUCTS_ONLY
);
2983 mlx4_uar_free(dev
, &priv
->driver_uar
);
2984 mlx4_cleanup_uar_table(dev
);
2985 if (!mlx4_is_slave(dev
))
2986 mlx4_clear_steering(dev
);
2987 mlx4_free_eq_table(dev
);
2988 if (mlx4_is_master(dev
))
2989 mlx4_multi_func_cleanup(dev
);
2990 mlx4_close_hca(dev
);
2992 if (mlx4_is_slave(dev
))
2993 mlx4_multi_func_cleanup(dev
);
2994 mlx4_cmd_cleanup(dev
, MLX4_CMD_CLEANUP_ALL
);
2996 if (dev
->flags
& MLX4_FLAG_MSI_X
)
2997 pci_disable_msix(pdev
);
2998 if (dev
->flags
& MLX4_FLAG_SRIOV
&& !active_vfs
) {
2999 mlx4_warn(dev
, "Disabling SR-IOV\n");
3000 pci_disable_sriov(pdev
);
3001 dev
->flags
&= ~MLX4_FLAG_SRIOV
;
3005 if (!mlx4_is_slave(dev
))
3006 mlx4_free_ownership(dev
);
3008 kfree(dev
->caps
.qp0_qkey
);
3009 kfree(dev
->caps
.qp0_tunnel
);
3010 kfree(dev
->caps
.qp0_proxy
);
3011 kfree(dev
->caps
.qp1_tunnel
);
3012 kfree(dev
->caps
.qp1_proxy
);
3013 kfree(dev
->dev_vfs
);
3015 memset(priv
, 0, sizeof(*priv
));
3016 priv
->pci_dev_data
= pci_dev_data
;
3020 static void mlx4_remove_one(struct pci_dev
*pdev
)
3022 struct mlx4_dev
*dev
= pci_get_drvdata(pdev
);
3023 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3025 mlx4_unload_one(pdev
);
3026 pci_release_regions(pdev
);
3027 pci_disable_device(pdev
);
3029 pci_set_drvdata(pdev
, NULL
);
3032 int mlx4_restart_one(struct pci_dev
*pdev
)
3034 struct mlx4_dev
*dev
= pci_get_drvdata(pdev
);
3035 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3036 int nvfs
[MLX4_MAX_PORTS
+ 1] = {0, 0, 0};
3037 int pci_dev_data
, err
, total_vfs
;
3039 pci_dev_data
= priv
->pci_dev_data
;
3040 total_vfs
= dev
->num_vfs
;
3041 memcpy(nvfs
, dev
->nvfs
, sizeof(dev
->nvfs
));
3043 mlx4_unload_one(pdev
);
3044 err
= mlx4_load_one(pdev
, pci_dev_data
, total_vfs
, nvfs
, priv
);
3046 mlx4_err(dev
, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n",
3047 __func__
, pci_name(pdev
), err
);
3054 static const struct pci_device_id mlx4_pci_table
[] = {
3055 /* MT25408 "Hermon" SDR */
3056 { PCI_VDEVICE(MELLANOX
, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3057 /* MT25408 "Hermon" DDR */
3058 { PCI_VDEVICE(MELLANOX
, 0x634a), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3059 /* MT25408 "Hermon" QDR */
3060 { PCI_VDEVICE(MELLANOX
, 0x6354), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3061 /* MT25408 "Hermon" DDR PCIe gen2 */
3062 { PCI_VDEVICE(MELLANOX
, 0x6732), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3063 /* MT25408 "Hermon" QDR PCIe gen2 */
3064 { PCI_VDEVICE(MELLANOX
, 0x673c), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3065 /* MT25408 "Hermon" EN 10GigE */
3066 { PCI_VDEVICE(MELLANOX
, 0x6368), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3067 /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
3068 { PCI_VDEVICE(MELLANOX
, 0x6750), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3069 /* MT25458 ConnectX EN 10GBASE-T 10GigE */
3070 { PCI_VDEVICE(MELLANOX
, 0x6372), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3071 /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
3072 { PCI_VDEVICE(MELLANOX
, 0x675a), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3073 /* MT26468 ConnectX EN 10GigE PCIe gen2*/
3074 { PCI_VDEVICE(MELLANOX
, 0x6764), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3075 /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
3076 { PCI_VDEVICE(MELLANOX
, 0x6746), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3077 /* MT26478 ConnectX2 40GigE PCIe gen2 */
3078 { PCI_VDEVICE(MELLANOX
, 0x676e), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3079 /* MT25400 Family [ConnectX-2 Virtual Function] */
3080 { PCI_VDEVICE(MELLANOX
, 0x1002), MLX4_PCI_DEV_IS_VF
},
3081 /* MT27500 Family [ConnectX-3] */
3082 { PCI_VDEVICE(MELLANOX
, 0x1003), 0 },
3083 /* MT27500 Family [ConnectX-3 Virtual Function] */
3084 { PCI_VDEVICE(MELLANOX
, 0x1004), MLX4_PCI_DEV_IS_VF
},
3085 { PCI_VDEVICE(MELLANOX
, 0x1005), 0 }, /* MT27510 Family */
3086 { PCI_VDEVICE(MELLANOX
, 0x1006), 0 }, /* MT27511 Family */
3087 { PCI_VDEVICE(MELLANOX
, 0x1007), 0 }, /* MT27520 Family */
3088 { PCI_VDEVICE(MELLANOX
, 0x1008), 0 }, /* MT27521 Family */
3089 { PCI_VDEVICE(MELLANOX
, 0x1009), 0 }, /* MT27530 Family */
3090 { PCI_VDEVICE(MELLANOX
, 0x100a), 0 }, /* MT27531 Family */
3091 { PCI_VDEVICE(MELLANOX
, 0x100b), 0 }, /* MT27540 Family */
3092 { PCI_VDEVICE(MELLANOX
, 0x100c), 0 }, /* MT27541 Family */
3093 { PCI_VDEVICE(MELLANOX
, 0x100d), 0 }, /* MT27550 Family */
3094 { PCI_VDEVICE(MELLANOX
, 0x100e), 0 }, /* MT27551 Family */
3095 { PCI_VDEVICE(MELLANOX
, 0x100f), 0 }, /* MT27560 Family */
3096 { PCI_VDEVICE(MELLANOX
, 0x1010), 0 }, /* MT27561 Family */
3100 MODULE_DEVICE_TABLE(pci
, mlx4_pci_table
);
3102 static pci_ers_result_t
mlx4_pci_err_detected(struct pci_dev
*pdev
,
3103 pci_channel_state_t state
)
3105 mlx4_unload_one(pdev
);
3107 return state
== pci_channel_io_perm_failure
?
3108 PCI_ERS_RESULT_DISCONNECT
: PCI_ERS_RESULT_NEED_RESET
;
3111 static pci_ers_result_t
mlx4_pci_slot_reset(struct pci_dev
*pdev
)
3113 struct mlx4_dev
*dev
= pci_get_drvdata(pdev
);
3114 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3117 ret
= __mlx4_init_one(pdev
, priv
->pci_dev_data
, priv
);
3119 return ret
? PCI_ERS_RESULT_DISCONNECT
: PCI_ERS_RESULT_RECOVERED
;
3122 static const struct pci_error_handlers mlx4_err_handler
= {
3123 .error_detected
= mlx4_pci_err_detected
,
3124 .slot_reset
= mlx4_pci_slot_reset
,
3127 static struct pci_driver mlx4_driver
= {
3129 .id_table
= mlx4_pci_table
,
3130 .probe
= mlx4_init_one
,
3131 .shutdown
= mlx4_unload_one
,
3132 .remove
= mlx4_remove_one
,
3133 .err_handler
= &mlx4_err_handler
,
3136 static int __init
mlx4_verify_params(void)
3138 if ((log_num_mac
< 0) || (log_num_mac
> 7)) {
3139 pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac
);
3143 if (log_num_vlan
!= 0)
3144 pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
3145 MLX4_LOG_NUM_VLANS
);
3148 pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n");
3150 if ((log_mtts_per_seg
< 1) || (log_mtts_per_seg
> 7)) {
3151 pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n",
3156 /* Check if module param for ports type has legal combination */
3157 if (port_type_array
[0] == false && port_type_array
[1] == true) {
3158 pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
3159 port_type_array
[0] = true;
3162 if (mlx4_log_num_mgm_entry_size
!= -1 &&
3163 (mlx4_log_num_mgm_entry_size
< MLX4_MIN_MGM_LOG_ENTRY_SIZE
||
3164 mlx4_log_num_mgm_entry_size
> MLX4_MAX_MGM_LOG_ENTRY_SIZE
)) {
3165 pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-1 or %d..%d)\n",
3166 mlx4_log_num_mgm_entry_size
,
3167 MLX4_MIN_MGM_LOG_ENTRY_SIZE
,
3168 MLX4_MAX_MGM_LOG_ENTRY_SIZE
);
3175 static int __init
mlx4_init(void)
3179 if (mlx4_verify_params())
3184 mlx4_wq
= create_singlethread_workqueue("mlx4");
3188 ret
= pci_register_driver(&mlx4_driver
);
3190 destroy_workqueue(mlx4_wq
);
3191 return ret
< 0 ? ret
: 0;
3194 static void __exit
mlx4_cleanup(void)
3196 pci_unregister_driver(&mlx4_driver
);
3197 destroy_workqueue(mlx4_wq
);
3200 module_init(mlx4_init
);
3201 module_exit(mlx4_cleanup
);