2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/errno.h>
39 #include <linux/pci.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/slab.h>
42 #include <linux/io-mapping.h>
43 #include <linux/delay.h>
44 #include <linux/kmod.h>
46 #include <linux/mlx4/device.h>
47 #include <linux/mlx4/doorbell.h>
53 MODULE_AUTHOR("Roland Dreier");
54 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
55 MODULE_LICENSE("Dual BSD/GPL");
56 MODULE_VERSION(DRV_VERSION
);
58 struct workqueue_struct
*mlx4_wq
;
60 #ifdef CONFIG_MLX4_DEBUG
62 int mlx4_debug_level
= 0;
63 module_param_named(debug_level
, mlx4_debug_level
, int, 0644);
64 MODULE_PARM_DESC(debug_level
, "Enable debug tracing if > 0");
66 #endif /* CONFIG_MLX4_DEBUG */
71 module_param(msi_x
, int, 0444);
72 MODULE_PARM_DESC(msi_x
, "attempt to use MSI-X if nonzero");
74 #else /* CONFIG_PCI_MSI */
78 #endif /* CONFIG_PCI_MSI */
80 static uint8_t num_vfs
[3] = {0, 0, 0};
81 static int num_vfs_argc
= 3;
82 module_param_array(num_vfs
, byte
, &num_vfs_argc
, 0444);
83 MODULE_PARM_DESC(num_vfs
, "enable #num_vfs functions if num_vfs > 0\n"
84 "num_vfs=port1,port2,port1+2");
86 static uint8_t probe_vf
[3] = {0, 0, 0};
87 static int probe_vfs_argc
= 3;
88 module_param_array(probe_vf
, byte
, &probe_vfs_argc
, 0444);
89 MODULE_PARM_DESC(probe_vf
, "number of vfs to probe by pf driver (num_vfs > 0)\n"
90 "probe_vf=port1,port2,port1+2");
92 int mlx4_log_num_mgm_entry_size
= MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE
;
93 module_param_named(log_num_mgm_entry_size
,
94 mlx4_log_num_mgm_entry_size
, int, 0444);
95 MODULE_PARM_DESC(log_num_mgm_entry_size
, "log mgm size, that defines the num"
96 " of qp per mcg, for example:"
97 " 10 gives 248.range: 7 <="
98 " log_num_mgm_entry_size <= 12."
99 " To activate device managed"
100 " flow steering when available, set to -1");
102 static bool enable_64b_cqe_eqe
= true;
103 module_param(enable_64b_cqe_eqe
, bool, 0444);
104 MODULE_PARM_DESC(enable_64b_cqe_eqe
,
105 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
107 #define PF_CONTEXT_BEHAVIOUR_MASK MLX4_FUNC_CAP_64B_EQE_CQE
109 static char mlx4_version
[] =
110 DRV_NAME
": Mellanox ConnectX core driver v"
111 DRV_VERSION
" (" DRV_RELDATE
")\n";
113 static struct mlx4_profile default_profile
= {
116 .rdmarc_per_qp
= 1 << 4,
120 .num_mtt
= 1 << 20, /* It is really num mtt segements */
123 static int log_num_mac
= 7;
124 module_param_named(log_num_mac
, log_num_mac
, int, 0444);
125 MODULE_PARM_DESC(log_num_mac
, "Log2 max number of MACs per ETH port (1-7)");
127 static int log_num_vlan
;
128 module_param_named(log_num_vlan
, log_num_vlan
, int, 0444);
129 MODULE_PARM_DESC(log_num_vlan
, "Log2 max number of VLANs per ETH port (0-7)");
130 /* Log2 max number of VLANs per ETH port (0-7) */
131 #define MLX4_LOG_NUM_VLANS 7
133 static bool use_prio
;
134 module_param_named(use_prio
, use_prio
, bool, 0444);
135 MODULE_PARM_DESC(use_prio
, "Enable steering by VLAN priority on ETH ports "
138 int log_mtts_per_seg
= ilog2(MLX4_MTT_ENTRY_PER_SEG
);
139 module_param_named(log_mtts_per_seg
, log_mtts_per_seg
, int, 0444);
140 MODULE_PARM_DESC(log_mtts_per_seg
, "Log2 number of MTT entries per segment (1-7)");
142 static int port_type_array
[2] = {MLX4_PORT_TYPE_NONE
, MLX4_PORT_TYPE_NONE
};
143 static int arr_argc
= 2;
144 module_param_array(port_type_array
, int, &arr_argc
, 0444);
145 MODULE_PARM_DESC(port_type_array
, "Array of port types: HW_DEFAULT (0) is default "
146 "1 for IB, 2 for Ethernet");
148 struct mlx4_port_config
{
149 struct list_head list
;
150 enum mlx4_port_type port_type
[MLX4_MAX_PORTS
+ 1];
151 struct pci_dev
*pdev
;
154 static atomic_t pf_loading
= ATOMIC_INIT(0);
156 int mlx4_check_port_params(struct mlx4_dev
*dev
,
157 enum mlx4_port_type
*port_type
)
161 for (i
= 0; i
< dev
->caps
.num_ports
- 1; i
++) {
162 if (port_type
[i
] != port_type
[i
+ 1]) {
163 if (!(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_DPDP
)) {
164 mlx4_err(dev
, "Only same port types supported on this HCA, aborting\n");
170 for (i
= 0; i
< dev
->caps
.num_ports
; i
++) {
171 if (!(port_type
[i
] & dev
->caps
.supported_type
[i
+1])) {
172 mlx4_err(dev
, "Requested port type for port %d is not supported on this HCA\n",
180 static void mlx4_set_port_mask(struct mlx4_dev
*dev
)
184 for (i
= 1; i
<= dev
->caps
.num_ports
; ++i
)
185 dev
->caps
.port_mask
[i
] = dev
->caps
.port_type
[i
];
188 static int mlx4_dev_cap(struct mlx4_dev
*dev
, struct mlx4_dev_cap
*dev_cap
)
193 err
= mlx4_QUERY_DEV_CAP(dev
, dev_cap
);
195 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting\n");
199 if (dev_cap
->min_page_sz
> PAGE_SIZE
) {
200 mlx4_err(dev
, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
201 dev_cap
->min_page_sz
, PAGE_SIZE
);
204 if (dev_cap
->num_ports
> MLX4_MAX_PORTS
) {
205 mlx4_err(dev
, "HCA has %d ports, but we only support %d, aborting\n",
206 dev_cap
->num_ports
, MLX4_MAX_PORTS
);
210 if (dev_cap
->uar_size
> pci_resource_len(dev
->pdev
, 2)) {
211 mlx4_err(dev
, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
213 (unsigned long long) pci_resource_len(dev
->pdev
, 2));
217 dev
->caps
.num_ports
= dev_cap
->num_ports
;
218 dev
->phys_caps
.num_phys_eqs
= MLX4_MAX_EQ_NUM
;
219 for (i
= 1; i
<= dev
->caps
.num_ports
; ++i
) {
220 dev
->caps
.vl_cap
[i
] = dev_cap
->max_vl
[i
];
221 dev
->caps
.ib_mtu_cap
[i
] = dev_cap
->ib_mtu
[i
];
222 dev
->phys_caps
.gid_phys_table_len
[i
] = dev_cap
->max_gids
[i
];
223 dev
->phys_caps
.pkey_phys_table_len
[i
] = dev_cap
->max_pkeys
[i
];
224 /* set gid and pkey table operating lengths by default
225 * to non-sriov values */
226 dev
->caps
.gid_table_len
[i
] = dev_cap
->max_gids
[i
];
227 dev
->caps
.pkey_table_len
[i
] = dev_cap
->max_pkeys
[i
];
228 dev
->caps
.port_width_cap
[i
] = dev_cap
->max_port_width
[i
];
229 dev
->caps
.eth_mtu_cap
[i
] = dev_cap
->eth_mtu
[i
];
230 dev
->caps
.def_mac
[i
] = dev_cap
->def_mac
[i
];
231 dev
->caps
.supported_type
[i
] = dev_cap
->supported_port_types
[i
];
232 dev
->caps
.suggested_type
[i
] = dev_cap
->suggested_type
[i
];
233 dev
->caps
.default_sense
[i
] = dev_cap
->default_sense
[i
];
234 dev
->caps
.trans_type
[i
] = dev_cap
->trans_type
[i
];
235 dev
->caps
.vendor_oui
[i
] = dev_cap
->vendor_oui
[i
];
236 dev
->caps
.wavelength
[i
] = dev_cap
->wavelength
[i
];
237 dev
->caps
.trans_code
[i
] = dev_cap
->trans_code
[i
];
240 dev
->caps
.uar_page_size
= PAGE_SIZE
;
241 dev
->caps
.num_uars
= dev_cap
->uar_size
/ PAGE_SIZE
;
242 dev
->caps
.local_ca_ack_delay
= dev_cap
->local_ca_ack_delay
;
243 dev
->caps
.bf_reg_size
= dev_cap
->bf_reg_size
;
244 dev
->caps
.bf_regs_per_page
= dev_cap
->bf_regs_per_page
;
245 dev
->caps
.max_sq_sg
= dev_cap
->max_sq_sg
;
246 dev
->caps
.max_rq_sg
= dev_cap
->max_rq_sg
;
247 dev
->caps
.max_wqes
= dev_cap
->max_qp_sz
;
248 dev
->caps
.max_qp_init_rdma
= dev_cap
->max_requester_per_qp
;
249 dev
->caps
.max_srq_wqes
= dev_cap
->max_srq_sz
;
250 dev
->caps
.max_srq_sge
= dev_cap
->max_rq_sg
- 1;
251 dev
->caps
.reserved_srqs
= dev_cap
->reserved_srqs
;
252 dev
->caps
.max_sq_desc_sz
= dev_cap
->max_sq_desc_sz
;
253 dev
->caps
.max_rq_desc_sz
= dev_cap
->max_rq_desc_sz
;
255 * Subtract 1 from the limit because we need to allocate a
256 * spare CQE so the HCA HW can tell the difference between an
257 * empty CQ and a full CQ.
259 dev
->caps
.max_cqes
= dev_cap
->max_cq_sz
- 1;
260 dev
->caps
.reserved_cqs
= dev_cap
->reserved_cqs
;
261 dev
->caps
.reserved_eqs
= dev_cap
->reserved_eqs
;
262 dev
->caps
.reserved_mtts
= dev_cap
->reserved_mtts
;
263 dev
->caps
.reserved_mrws
= dev_cap
->reserved_mrws
;
265 /* The first 128 UARs are used for EQ doorbells */
266 dev
->caps
.reserved_uars
= max_t(int, 128, dev_cap
->reserved_uars
);
267 dev
->caps
.reserved_pds
= dev_cap
->reserved_pds
;
268 dev
->caps
.reserved_xrcds
= (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
) ?
269 dev_cap
->reserved_xrcds
: 0;
270 dev
->caps
.max_xrcds
= (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
) ?
271 dev_cap
->max_xrcds
: 0;
272 dev
->caps
.mtt_entry_sz
= dev_cap
->mtt_entry_sz
;
274 dev
->caps
.max_msg_sz
= dev_cap
->max_msg_sz
;
275 dev
->caps
.page_size_cap
= ~(u32
) (dev_cap
->min_page_sz
- 1);
276 dev
->caps
.flags
= dev_cap
->flags
;
277 dev
->caps
.flags2
= dev_cap
->flags2
;
278 dev
->caps
.bmme_flags
= dev_cap
->bmme_flags
;
279 dev
->caps
.reserved_lkey
= dev_cap
->reserved_lkey
;
280 dev
->caps
.stat_rate_support
= dev_cap
->stat_rate_support
;
281 dev
->caps
.max_gso_sz
= dev_cap
->max_gso_sz
;
282 dev
->caps
.max_rss_tbl_sz
= dev_cap
->max_rss_tbl_sz
;
284 /* Sense port always allowed on supported devices for ConnectX-1 and -2 */
285 if (mlx4_priv(dev
)->pci_dev_data
& MLX4_PCI_DEV_FORCE_SENSE_PORT
)
286 dev
->caps
.flags
|= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT
;
287 /* Don't do sense port on multifunction devices (for now at least) */
288 if (mlx4_is_mfunc(dev
))
289 dev
->caps
.flags
&= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT
;
291 dev
->caps
.log_num_macs
= log_num_mac
;
292 dev
->caps
.log_num_vlans
= MLX4_LOG_NUM_VLANS
;
293 dev
->caps
.log_num_prios
= use_prio
? 3 : 0;
295 for (i
= 1; i
<= dev
->caps
.num_ports
; ++i
) {
296 dev
->caps
.port_type
[i
] = MLX4_PORT_TYPE_NONE
;
297 if (dev
->caps
.supported_type
[i
]) {
298 /* if only ETH is supported - assign ETH */
299 if (dev
->caps
.supported_type
[i
] == MLX4_PORT_TYPE_ETH
)
300 dev
->caps
.port_type
[i
] = MLX4_PORT_TYPE_ETH
;
301 /* if only IB is supported, assign IB */
302 else if (dev
->caps
.supported_type
[i
] ==
304 dev
->caps
.port_type
[i
] = MLX4_PORT_TYPE_IB
;
306 /* if IB and ETH are supported, we set the port
307 * type according to user selection of port type;
308 * if user selected none, take the FW hint */
309 if (port_type_array
[i
- 1] == MLX4_PORT_TYPE_NONE
)
310 dev
->caps
.port_type
[i
] = dev
->caps
.suggested_type
[i
] ?
311 MLX4_PORT_TYPE_ETH
: MLX4_PORT_TYPE_IB
;
313 dev
->caps
.port_type
[i
] = port_type_array
[i
- 1];
317 * Link sensing is allowed on the port if 3 conditions are true:
318 * 1. Both protocols are supported on the port.
319 * 2. Different types are supported on the port
320 * 3. FW declared that it supports link sensing
322 mlx4_priv(dev
)->sense
.sense_allowed
[i
] =
323 ((dev
->caps
.supported_type
[i
] == MLX4_PORT_TYPE_AUTO
) &&
324 (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_DPDP
) &&
325 (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_SENSE_SUPPORT
));
328 * If "default_sense" bit is set, we move the port to "AUTO" mode
329 * and perform sense_port FW command to try and set the correct
330 * port type from beginning
332 if (mlx4_priv(dev
)->sense
.sense_allowed
[i
] && dev
->caps
.default_sense
[i
]) {
333 enum mlx4_port_type sensed_port
= MLX4_PORT_TYPE_NONE
;
334 dev
->caps
.possible_type
[i
] = MLX4_PORT_TYPE_AUTO
;
335 mlx4_SENSE_PORT(dev
, i
, &sensed_port
);
336 if (sensed_port
!= MLX4_PORT_TYPE_NONE
)
337 dev
->caps
.port_type
[i
] = sensed_port
;
339 dev
->caps
.possible_type
[i
] = dev
->caps
.port_type
[i
];
342 if (dev
->caps
.log_num_macs
> dev_cap
->log_max_macs
[i
]) {
343 dev
->caps
.log_num_macs
= dev_cap
->log_max_macs
[i
];
344 mlx4_warn(dev
, "Requested number of MACs is too much for port %d, reducing to %d\n",
345 i
, 1 << dev
->caps
.log_num_macs
);
347 if (dev
->caps
.log_num_vlans
> dev_cap
->log_max_vlans
[i
]) {
348 dev
->caps
.log_num_vlans
= dev_cap
->log_max_vlans
[i
];
349 mlx4_warn(dev
, "Requested number of VLANs is too much for port %d, reducing to %d\n",
350 i
, 1 << dev
->caps
.log_num_vlans
);
354 dev
->caps
.max_counters
= 1 << ilog2(dev_cap
->max_counters
);
356 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
] = dev_cap
->reserved_qps
;
357 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_ETH_ADDR
] =
358 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FC_ADDR
] =
359 (1 << dev
->caps
.log_num_macs
) *
360 (1 << dev
->caps
.log_num_vlans
) *
361 (1 << dev
->caps
.log_num_prios
) *
363 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FC_EXCH
] = MLX4_NUM_FEXCH
;
365 dev
->caps
.reserved_qps
= dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
] +
366 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_ETH_ADDR
] +
367 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FC_ADDR
] +
368 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FC_EXCH
];
370 dev
->caps
.sqp_demux
= (mlx4_is_master(dev
)) ? MLX4_MAX_NUM_SLAVES
: 0;
372 if (!enable_64b_cqe_eqe
&& !mlx4_is_slave(dev
)) {
374 (MLX4_DEV_CAP_FLAG_64B_CQE
| MLX4_DEV_CAP_FLAG_64B_EQE
)) {
375 mlx4_warn(dev
, "64B EQEs/CQEs supported by the device but not enabled\n");
376 dev
->caps
.flags
&= ~MLX4_DEV_CAP_FLAG_64B_CQE
;
377 dev
->caps
.flags
&= ~MLX4_DEV_CAP_FLAG_64B_EQE
;
381 if ((dev
->caps
.flags
&
382 (MLX4_DEV_CAP_FLAG_64B_CQE
| MLX4_DEV_CAP_FLAG_64B_EQE
)) &&
384 dev
->caps
.function_caps
|= MLX4_FUNC_CAP_64B_EQE_CQE
;
389 static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev
*dev
,
390 enum pci_bus_speed
*speed
,
391 enum pcie_link_width
*width
)
393 u32 lnkcap1
, lnkcap2
;
396 #define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */
398 *speed
= PCI_SPEED_UNKNOWN
;
399 *width
= PCIE_LNK_WIDTH_UNKNOWN
;
401 err1
= pcie_capability_read_dword(dev
->pdev
, PCI_EXP_LNKCAP
, &lnkcap1
);
402 err2
= pcie_capability_read_dword(dev
->pdev
, PCI_EXP_LNKCAP2
, &lnkcap2
);
403 if (!err2
&& lnkcap2
) { /* PCIe r3.0-compliant */
404 if (lnkcap2
& PCI_EXP_LNKCAP2_SLS_8_0GB
)
405 *speed
= PCIE_SPEED_8_0GT
;
406 else if (lnkcap2
& PCI_EXP_LNKCAP2_SLS_5_0GB
)
407 *speed
= PCIE_SPEED_5_0GT
;
408 else if (lnkcap2
& PCI_EXP_LNKCAP2_SLS_2_5GB
)
409 *speed
= PCIE_SPEED_2_5GT
;
412 *width
= (lnkcap1
& PCI_EXP_LNKCAP_MLW
) >> PCIE_MLW_CAP_SHIFT
;
413 if (!lnkcap2
) { /* pre-r3.0 */
414 if (lnkcap1
& PCI_EXP_LNKCAP_SLS_5_0GB
)
415 *speed
= PCIE_SPEED_5_0GT
;
416 else if (lnkcap1
& PCI_EXP_LNKCAP_SLS_2_5GB
)
417 *speed
= PCIE_SPEED_2_5GT
;
421 if (*speed
== PCI_SPEED_UNKNOWN
|| *width
== PCIE_LNK_WIDTH_UNKNOWN
) {
423 err2
? err2
: -EINVAL
;
428 static void mlx4_check_pcie_caps(struct mlx4_dev
*dev
)
430 enum pcie_link_width width
, width_cap
;
431 enum pci_bus_speed speed
, speed_cap
;
434 #define PCIE_SPEED_STR(speed) \
435 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
436 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
437 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
440 err
= mlx4_get_pcie_dev_link_caps(dev
, &speed_cap
, &width_cap
);
443 "Unable to determine PCIe device BW capabilities\n");
447 err
= pcie_get_minimum_link(dev
->pdev
, &speed
, &width
);
448 if (err
|| speed
== PCI_SPEED_UNKNOWN
||
449 width
== PCIE_LNK_WIDTH_UNKNOWN
) {
451 "Unable to determine PCI device chain minimum BW\n");
455 if (width
!= width_cap
|| speed
!= speed_cap
)
457 "PCIe BW is different than device's capability\n");
459 mlx4_info(dev
, "PCIe link speed is %s, device supports %s\n",
460 PCIE_SPEED_STR(speed
), PCIE_SPEED_STR(speed_cap
));
461 mlx4_info(dev
, "PCIe link width is x%d, device supports x%d\n",
466 /*The function checks if there are live vf, return the num of them*/
467 static int mlx4_how_many_lives_vf(struct mlx4_dev
*dev
)
469 struct mlx4_priv
*priv
= mlx4_priv(dev
);
470 struct mlx4_slave_state
*s_state
;
474 for (i
= 1/*the ppf is 0*/; i
< dev
->num_slaves
; ++i
) {
475 s_state
= &priv
->mfunc
.master
.slave_state
[i
];
476 if (s_state
->active
&& s_state
->last_cmd
!=
477 MLX4_COMM_CMD_RESET
) {
478 mlx4_warn(dev
, "%s: slave: %d is still active\n",
486 int mlx4_get_parav_qkey(struct mlx4_dev
*dev
, u32 qpn
, u32
*qkey
)
488 u32 qk
= MLX4_RESERVED_QKEY_BASE
;
490 if (qpn
>= dev
->phys_caps
.base_tunnel_sqpn
+ 8 * MLX4_MFUNC_MAX
||
491 qpn
< dev
->phys_caps
.base_proxy_sqpn
)
494 if (qpn
>= dev
->phys_caps
.base_tunnel_sqpn
)
496 qk
+= qpn
- dev
->phys_caps
.base_tunnel_sqpn
;
498 qk
+= qpn
- dev
->phys_caps
.base_proxy_sqpn
;
502 EXPORT_SYMBOL(mlx4_get_parav_qkey
);
504 void mlx4_sync_pkey_table(struct mlx4_dev
*dev
, int slave
, int port
, int i
, int val
)
506 struct mlx4_priv
*priv
= container_of(dev
, struct mlx4_priv
, dev
);
508 if (!mlx4_is_master(dev
))
511 priv
->virt2phys_pkey
[slave
][port
- 1][i
] = val
;
513 EXPORT_SYMBOL(mlx4_sync_pkey_table
);
515 void mlx4_put_slave_node_guid(struct mlx4_dev
*dev
, int slave
, __be64 guid
)
517 struct mlx4_priv
*priv
= container_of(dev
, struct mlx4_priv
, dev
);
519 if (!mlx4_is_master(dev
))
522 priv
->slave_node_guids
[slave
] = guid
;
524 EXPORT_SYMBOL(mlx4_put_slave_node_guid
);
526 __be64
mlx4_get_slave_node_guid(struct mlx4_dev
*dev
, int slave
)
528 struct mlx4_priv
*priv
= container_of(dev
, struct mlx4_priv
, dev
);
530 if (!mlx4_is_master(dev
))
533 return priv
->slave_node_guids
[slave
];
535 EXPORT_SYMBOL(mlx4_get_slave_node_guid
);
537 int mlx4_is_slave_active(struct mlx4_dev
*dev
, int slave
)
539 struct mlx4_priv
*priv
= mlx4_priv(dev
);
540 struct mlx4_slave_state
*s_slave
;
542 if (!mlx4_is_master(dev
))
545 s_slave
= &priv
->mfunc
.master
.slave_state
[slave
];
546 return !!s_slave
->active
;
548 EXPORT_SYMBOL(mlx4_is_slave_active
);
550 static void slave_adjust_steering_mode(struct mlx4_dev
*dev
,
551 struct mlx4_dev_cap
*dev_cap
,
552 struct mlx4_init_hca_param
*hca_param
)
554 dev
->caps
.steering_mode
= hca_param
->steering_mode
;
555 if (dev
->caps
.steering_mode
== MLX4_STEERING_MODE_DEVICE_MANAGED
) {
556 dev
->caps
.num_qp_per_mgm
= dev_cap
->fs_max_num_qp_per_entry
;
557 dev
->caps
.fs_log_max_ucast_qp_range_size
=
558 dev_cap
->fs_log_max_ucast_qp_range_size
;
560 dev
->caps
.num_qp_per_mgm
=
561 4 * ((1 << hca_param
->log_mc_entry_sz
)/16 - 2);
563 mlx4_dbg(dev
, "Steering mode is: %s\n",
564 mlx4_steering_mode_str(dev
->caps
.steering_mode
));
567 static int mlx4_slave_cap(struct mlx4_dev
*dev
)
571 struct mlx4_dev_cap dev_cap
;
572 struct mlx4_func_cap func_cap
;
573 struct mlx4_init_hca_param hca_param
;
576 memset(&hca_param
, 0, sizeof(hca_param
));
577 err
= mlx4_QUERY_HCA(dev
, &hca_param
);
579 mlx4_err(dev
, "QUERY_HCA command failed, aborting\n");
583 /* fail if the hca has an unknown global capability
584 * at this time global_caps should be always zeroed
586 if (hca_param
.global_caps
) {
587 mlx4_err(dev
, "Unknown hca global capabilities\n");
591 mlx4_log_num_mgm_entry_size
= hca_param
.log_mc_entry_sz
;
593 dev
->caps
.hca_core_clock
= hca_param
.hca_core_clock
;
595 memset(&dev_cap
, 0, sizeof(dev_cap
));
596 dev
->caps
.max_qp_dest_rdma
= 1 << hca_param
.log_rd_per_qp
;
597 err
= mlx4_dev_cap(dev
, &dev_cap
);
599 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting\n");
603 err
= mlx4_QUERY_FW(dev
);
605 mlx4_err(dev
, "QUERY_FW command failed: could not get FW version\n");
607 page_size
= ~dev
->caps
.page_size_cap
+ 1;
608 mlx4_warn(dev
, "HCA minimum page size:%d\n", page_size
);
609 if (page_size
> PAGE_SIZE
) {
610 mlx4_err(dev
, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
611 page_size
, PAGE_SIZE
);
615 /* slave gets uar page size from QUERY_HCA fw command */
616 dev
->caps
.uar_page_size
= 1 << (hca_param
.uar_page_sz
+ 12);
618 /* TODO: relax this assumption */
619 if (dev
->caps
.uar_page_size
!= PAGE_SIZE
) {
620 mlx4_err(dev
, "UAR size:%d != kernel PAGE_SIZE of %ld\n",
621 dev
->caps
.uar_page_size
, PAGE_SIZE
);
625 memset(&func_cap
, 0, sizeof(func_cap
));
626 err
= mlx4_QUERY_FUNC_CAP(dev
, 0, &func_cap
);
628 mlx4_err(dev
, "QUERY_FUNC_CAP general command failed, aborting (%d)\n",
633 if ((func_cap
.pf_context_behaviour
| PF_CONTEXT_BEHAVIOUR_MASK
) !=
634 PF_CONTEXT_BEHAVIOUR_MASK
) {
635 mlx4_err(dev
, "Unknown pf context behaviour\n");
639 dev
->caps
.num_ports
= func_cap
.num_ports
;
640 dev
->quotas
.qp
= func_cap
.qp_quota
;
641 dev
->quotas
.srq
= func_cap
.srq_quota
;
642 dev
->quotas
.cq
= func_cap
.cq_quota
;
643 dev
->quotas
.mpt
= func_cap
.mpt_quota
;
644 dev
->quotas
.mtt
= func_cap
.mtt_quota
;
645 dev
->caps
.num_qps
= 1 << hca_param
.log_num_qps
;
646 dev
->caps
.num_srqs
= 1 << hca_param
.log_num_srqs
;
647 dev
->caps
.num_cqs
= 1 << hca_param
.log_num_cqs
;
648 dev
->caps
.num_mpts
= 1 << hca_param
.log_mpt_sz
;
649 dev
->caps
.num_eqs
= func_cap
.max_eq
;
650 dev
->caps
.reserved_eqs
= func_cap
.reserved_eq
;
651 dev
->caps
.num_pds
= MLX4_NUM_PDS
;
652 dev
->caps
.num_mgms
= 0;
653 dev
->caps
.num_amgms
= 0;
655 if (dev
->caps
.num_ports
> MLX4_MAX_PORTS
) {
656 mlx4_err(dev
, "HCA has %d ports, but we only support %d, aborting\n",
657 dev
->caps
.num_ports
, MLX4_MAX_PORTS
);
661 dev
->caps
.qp0_tunnel
= kcalloc(dev
->caps
.num_ports
, sizeof (u32
), GFP_KERNEL
);
662 dev
->caps
.qp0_proxy
= kcalloc(dev
->caps
.num_ports
, sizeof (u32
), GFP_KERNEL
);
663 dev
->caps
.qp1_tunnel
= kcalloc(dev
->caps
.num_ports
, sizeof (u32
), GFP_KERNEL
);
664 dev
->caps
.qp1_proxy
= kcalloc(dev
->caps
.num_ports
, sizeof (u32
), GFP_KERNEL
);
666 if (!dev
->caps
.qp0_tunnel
|| !dev
->caps
.qp0_proxy
||
667 !dev
->caps
.qp1_tunnel
|| !dev
->caps
.qp1_proxy
) {
672 for (i
= 1; i
<= dev
->caps
.num_ports
; ++i
) {
673 err
= mlx4_QUERY_FUNC_CAP(dev
, (u32
) i
, &func_cap
);
675 mlx4_err(dev
, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n",
679 dev
->caps
.qp0_tunnel
[i
- 1] = func_cap
.qp0_tunnel_qpn
;
680 dev
->caps
.qp0_proxy
[i
- 1] = func_cap
.qp0_proxy_qpn
;
681 dev
->caps
.qp1_tunnel
[i
- 1] = func_cap
.qp1_tunnel_qpn
;
682 dev
->caps
.qp1_proxy
[i
- 1] = func_cap
.qp1_proxy_qpn
;
683 dev
->caps
.port_mask
[i
] = dev
->caps
.port_type
[i
];
684 dev
->caps
.phys_port_id
[i
] = func_cap
.phys_port_id
;
685 if (mlx4_get_slave_pkey_gid_tbl_len(dev
, i
,
686 &dev
->caps
.gid_table_len
[i
],
687 &dev
->caps
.pkey_table_len
[i
]))
691 if (dev
->caps
.uar_page_size
* (dev
->caps
.num_uars
-
692 dev
->caps
.reserved_uars
) >
693 pci_resource_len(dev
->pdev
, 2)) {
694 mlx4_err(dev
, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
695 dev
->caps
.uar_page_size
* dev
->caps
.num_uars
,
696 (unsigned long long) pci_resource_len(dev
->pdev
, 2));
700 if (hca_param
.dev_cap_enabled
& MLX4_DEV_CAP_64B_EQE_ENABLED
) {
701 dev
->caps
.eqe_size
= 64;
702 dev
->caps
.eqe_factor
= 1;
704 dev
->caps
.eqe_size
= 32;
705 dev
->caps
.eqe_factor
= 0;
708 if (hca_param
.dev_cap_enabled
& MLX4_DEV_CAP_64B_CQE_ENABLED
) {
709 dev
->caps
.cqe_size
= 64;
710 dev
->caps
.userspace_caps
|= MLX4_USER_DEV_CAP_64B_CQE
;
712 dev
->caps
.cqe_size
= 32;
715 dev
->caps
.flags2
&= ~MLX4_DEV_CAP_FLAG2_TS
;
716 mlx4_warn(dev
, "Timestamping is not supported in slave mode\n");
718 slave_adjust_steering_mode(dev
, &dev_cap
, &hca_param
);
723 kfree(dev
->caps
.qp0_tunnel
);
724 kfree(dev
->caps
.qp0_proxy
);
725 kfree(dev
->caps
.qp1_tunnel
);
726 kfree(dev
->caps
.qp1_proxy
);
727 dev
->caps
.qp0_tunnel
= dev
->caps
.qp0_proxy
=
728 dev
->caps
.qp1_tunnel
= dev
->caps
.qp1_proxy
= NULL
;
733 static void mlx4_request_modules(struct mlx4_dev
*dev
)
736 int has_ib_port
= false;
737 int has_eth_port
= false;
738 #define EN_DRV_NAME "mlx4_en"
739 #define IB_DRV_NAME "mlx4_ib"
741 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
742 if (dev
->caps
.port_type
[port
] == MLX4_PORT_TYPE_IB
)
744 else if (dev
->caps
.port_type
[port
] == MLX4_PORT_TYPE_ETH
)
749 request_module_nowait(EN_DRV_NAME
);
750 if (has_ib_port
|| (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_IBOE
))
751 request_module_nowait(IB_DRV_NAME
);
755 * Change the port configuration of the device.
756 * Every user of this function must hold the port mutex.
758 int mlx4_change_port_types(struct mlx4_dev
*dev
,
759 enum mlx4_port_type
*port_types
)
765 for (port
= 0; port
< dev
->caps
.num_ports
; port
++) {
766 /* Change the port type only if the new type is different
767 * from the current, and not set to Auto */
768 if (port_types
[port
] != dev
->caps
.port_type
[port
+ 1])
772 mlx4_unregister_device(dev
);
773 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
774 mlx4_CLOSE_PORT(dev
, port
);
775 dev
->caps
.port_type
[port
] = port_types
[port
- 1];
776 err
= mlx4_SET_PORT(dev
, port
, -1);
778 mlx4_err(dev
, "Failed to set port %d, aborting\n",
783 mlx4_set_port_mask(dev
);
784 err
= mlx4_register_device(dev
);
786 mlx4_err(dev
, "Failed to register device\n");
789 mlx4_request_modules(dev
);
796 static ssize_t
show_port_type(struct device
*dev
,
797 struct device_attribute
*attr
,
800 struct mlx4_port_info
*info
= container_of(attr
, struct mlx4_port_info
,
802 struct mlx4_dev
*mdev
= info
->dev
;
806 (mdev
->caps
.port_type
[info
->port
] == MLX4_PORT_TYPE_IB
) ?
808 if (mdev
->caps
.possible_type
[info
->port
] == MLX4_PORT_TYPE_AUTO
)
809 sprintf(buf
, "auto (%s)\n", type
);
811 sprintf(buf
, "%s\n", type
);
816 static ssize_t
set_port_type(struct device
*dev
,
817 struct device_attribute
*attr
,
818 const char *buf
, size_t count
)
820 struct mlx4_port_info
*info
= container_of(attr
, struct mlx4_port_info
,
822 struct mlx4_dev
*mdev
= info
->dev
;
823 struct mlx4_priv
*priv
= mlx4_priv(mdev
);
824 enum mlx4_port_type types
[MLX4_MAX_PORTS
];
825 enum mlx4_port_type new_types
[MLX4_MAX_PORTS
];
829 if (!strcmp(buf
, "ib\n"))
830 info
->tmp_type
= MLX4_PORT_TYPE_IB
;
831 else if (!strcmp(buf
, "eth\n"))
832 info
->tmp_type
= MLX4_PORT_TYPE_ETH
;
833 else if (!strcmp(buf
, "auto\n"))
834 info
->tmp_type
= MLX4_PORT_TYPE_AUTO
;
836 mlx4_err(mdev
, "%s is not supported port type\n", buf
);
840 mlx4_stop_sense(mdev
);
841 mutex_lock(&priv
->port_mutex
);
842 /* Possible type is always the one that was delivered */
843 mdev
->caps
.possible_type
[info
->port
] = info
->tmp_type
;
845 for (i
= 0; i
< mdev
->caps
.num_ports
; i
++) {
846 types
[i
] = priv
->port
[i
+1].tmp_type
? priv
->port
[i
+1].tmp_type
:
847 mdev
->caps
.possible_type
[i
+1];
848 if (types
[i
] == MLX4_PORT_TYPE_AUTO
)
849 types
[i
] = mdev
->caps
.port_type
[i
+1];
852 if (!(mdev
->caps
.flags
& MLX4_DEV_CAP_FLAG_DPDP
) &&
853 !(mdev
->caps
.flags
& MLX4_DEV_CAP_FLAG_SENSE_SUPPORT
)) {
854 for (i
= 1; i
<= mdev
->caps
.num_ports
; i
++) {
855 if (mdev
->caps
.possible_type
[i
] == MLX4_PORT_TYPE_AUTO
) {
856 mdev
->caps
.possible_type
[i
] = mdev
->caps
.port_type
[i
];
862 mlx4_err(mdev
, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n");
866 mlx4_do_sense_ports(mdev
, new_types
, types
);
868 err
= mlx4_check_port_params(mdev
, new_types
);
872 /* We are about to apply the changes after the configuration
873 * was verified, no need to remember the temporary types
875 for (i
= 0; i
< mdev
->caps
.num_ports
; i
++)
876 priv
->port
[i
+ 1].tmp_type
= 0;
878 err
= mlx4_change_port_types(mdev
, new_types
);
881 mlx4_start_sense(mdev
);
882 mutex_unlock(&priv
->port_mutex
);
883 return err
? err
: count
;
894 static inline int int_to_ibta_mtu(int mtu
)
897 case 256: return IB_MTU_256
;
898 case 512: return IB_MTU_512
;
899 case 1024: return IB_MTU_1024
;
900 case 2048: return IB_MTU_2048
;
901 case 4096: return IB_MTU_4096
;
906 static inline int ibta_mtu_to_int(enum ibta_mtu mtu
)
909 case IB_MTU_256
: return 256;
910 case IB_MTU_512
: return 512;
911 case IB_MTU_1024
: return 1024;
912 case IB_MTU_2048
: return 2048;
913 case IB_MTU_4096
: return 4096;
918 static ssize_t
show_port_ib_mtu(struct device
*dev
,
919 struct device_attribute
*attr
,
922 struct mlx4_port_info
*info
= container_of(attr
, struct mlx4_port_info
,
924 struct mlx4_dev
*mdev
= info
->dev
;
926 if (mdev
->caps
.port_type
[info
->port
] == MLX4_PORT_TYPE_ETH
)
927 mlx4_warn(mdev
, "port level mtu is only used for IB ports\n");
930 ibta_mtu_to_int(mdev
->caps
.port_ib_mtu
[info
->port
]));
934 static ssize_t
set_port_ib_mtu(struct device
*dev
,
935 struct device_attribute
*attr
,
936 const char *buf
, size_t count
)
938 struct mlx4_port_info
*info
= container_of(attr
, struct mlx4_port_info
,
940 struct mlx4_dev
*mdev
= info
->dev
;
941 struct mlx4_priv
*priv
= mlx4_priv(mdev
);
942 int err
, port
, mtu
, ibta_mtu
= -1;
944 if (mdev
->caps
.port_type
[info
->port
] == MLX4_PORT_TYPE_ETH
) {
945 mlx4_warn(mdev
, "port level mtu is only used for IB ports\n");
949 err
= kstrtoint(buf
, 0, &mtu
);
951 ibta_mtu
= int_to_ibta_mtu(mtu
);
953 if (err
|| ibta_mtu
< 0) {
954 mlx4_err(mdev
, "%s is invalid IBTA mtu\n", buf
);
958 mdev
->caps
.port_ib_mtu
[info
->port
] = ibta_mtu
;
960 mlx4_stop_sense(mdev
);
961 mutex_lock(&priv
->port_mutex
);
962 mlx4_unregister_device(mdev
);
963 for (port
= 1; port
<= mdev
->caps
.num_ports
; port
++) {
964 mlx4_CLOSE_PORT(mdev
, port
);
965 err
= mlx4_SET_PORT(mdev
, port
, -1);
967 mlx4_err(mdev
, "Failed to set port %d, aborting\n",
972 err
= mlx4_register_device(mdev
);
974 mutex_unlock(&priv
->port_mutex
);
975 mlx4_start_sense(mdev
);
976 return err
? err
: count
;
979 static int mlx4_load_fw(struct mlx4_dev
*dev
)
981 struct mlx4_priv
*priv
= mlx4_priv(dev
);
984 priv
->fw
.fw_icm
= mlx4_alloc_icm(dev
, priv
->fw
.fw_pages
,
985 GFP_HIGHUSER
| __GFP_NOWARN
, 0);
986 if (!priv
->fw
.fw_icm
) {
987 mlx4_err(dev
, "Couldn't allocate FW area, aborting\n");
991 err
= mlx4_MAP_FA(dev
, priv
->fw
.fw_icm
);
993 mlx4_err(dev
, "MAP_FA command failed, aborting\n");
997 err
= mlx4_RUN_FW(dev
);
999 mlx4_err(dev
, "RUN_FW command failed, aborting\n");
1009 mlx4_free_icm(dev
, priv
->fw
.fw_icm
, 0);
1013 static int mlx4_init_cmpt_table(struct mlx4_dev
*dev
, u64 cmpt_base
,
1016 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1020 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.cmpt_table
,
1022 ((u64
) (MLX4_CMPT_TYPE_QP
*
1023 cmpt_entry_sz
) << MLX4_CMPT_SHIFT
),
1024 cmpt_entry_sz
, dev
->caps
.num_qps
,
1025 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
1030 err
= mlx4_init_icm_table(dev
, &priv
->srq_table
.cmpt_table
,
1032 ((u64
) (MLX4_CMPT_TYPE_SRQ
*
1033 cmpt_entry_sz
) << MLX4_CMPT_SHIFT
),
1034 cmpt_entry_sz
, dev
->caps
.num_srqs
,
1035 dev
->caps
.reserved_srqs
, 0, 0);
1039 err
= mlx4_init_icm_table(dev
, &priv
->cq_table
.cmpt_table
,
1041 ((u64
) (MLX4_CMPT_TYPE_CQ
*
1042 cmpt_entry_sz
) << MLX4_CMPT_SHIFT
),
1043 cmpt_entry_sz
, dev
->caps
.num_cqs
,
1044 dev
->caps
.reserved_cqs
, 0, 0);
1048 num_eqs
= (mlx4_is_master(dev
)) ? dev
->phys_caps
.num_phys_eqs
:
1050 err
= mlx4_init_icm_table(dev
, &priv
->eq_table
.cmpt_table
,
1052 ((u64
) (MLX4_CMPT_TYPE_EQ
*
1053 cmpt_entry_sz
) << MLX4_CMPT_SHIFT
),
1054 cmpt_entry_sz
, num_eqs
, num_eqs
, 0, 0);
1061 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.cmpt_table
);
1064 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.cmpt_table
);
1067 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.cmpt_table
);
1073 static int mlx4_init_icm(struct mlx4_dev
*dev
, struct mlx4_dev_cap
*dev_cap
,
1074 struct mlx4_init_hca_param
*init_hca
, u64 icm_size
)
1076 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1081 err
= mlx4_SET_ICM_SIZE(dev
, icm_size
, &aux_pages
);
1083 mlx4_err(dev
, "SET_ICM_SIZE command failed, aborting\n");
1087 mlx4_dbg(dev
, "%lld KB of HCA context requires %lld KB aux memory\n",
1088 (unsigned long long) icm_size
>> 10,
1089 (unsigned long long) aux_pages
<< 2);
1091 priv
->fw
.aux_icm
= mlx4_alloc_icm(dev
, aux_pages
,
1092 GFP_HIGHUSER
| __GFP_NOWARN
, 0);
1093 if (!priv
->fw
.aux_icm
) {
1094 mlx4_err(dev
, "Couldn't allocate aux memory, aborting\n");
1098 err
= mlx4_MAP_ICM_AUX(dev
, priv
->fw
.aux_icm
);
1100 mlx4_err(dev
, "MAP_ICM_AUX command failed, aborting\n");
1104 err
= mlx4_init_cmpt_table(dev
, init_hca
->cmpt_base
, dev_cap
->cmpt_entry_sz
);
1106 mlx4_err(dev
, "Failed to map cMPT context memory, aborting\n");
1111 num_eqs
= (mlx4_is_master(dev
)) ? dev
->phys_caps
.num_phys_eqs
:
1113 err
= mlx4_init_icm_table(dev
, &priv
->eq_table
.table
,
1114 init_hca
->eqc_base
, dev_cap
->eqc_entry_sz
,
1115 num_eqs
, num_eqs
, 0, 0);
1117 mlx4_err(dev
, "Failed to map EQ context memory, aborting\n");
1118 goto err_unmap_cmpt
;
1122 * Reserved MTT entries must be aligned up to a cacheline
1123 * boundary, since the FW will write to them, while the driver
1124 * writes to all other MTT entries. (The variable
1125 * dev->caps.mtt_entry_sz below is really the MTT segment
1126 * size, not the raw entry size)
1128 dev
->caps
.reserved_mtts
=
1129 ALIGN(dev
->caps
.reserved_mtts
* dev
->caps
.mtt_entry_sz
,
1130 dma_get_cache_alignment()) / dev
->caps
.mtt_entry_sz
;
1132 err
= mlx4_init_icm_table(dev
, &priv
->mr_table
.mtt_table
,
1134 dev
->caps
.mtt_entry_sz
,
1136 dev
->caps
.reserved_mtts
, 1, 0);
1138 mlx4_err(dev
, "Failed to map MTT context memory, aborting\n");
1142 err
= mlx4_init_icm_table(dev
, &priv
->mr_table
.dmpt_table
,
1143 init_hca
->dmpt_base
,
1144 dev_cap
->dmpt_entry_sz
,
1146 dev
->caps
.reserved_mrws
, 1, 1);
1148 mlx4_err(dev
, "Failed to map dMPT context memory, aborting\n");
1152 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.qp_table
,
1154 dev_cap
->qpc_entry_sz
,
1156 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
1159 mlx4_err(dev
, "Failed to map QP context memory, aborting\n");
1160 goto err_unmap_dmpt
;
1163 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.auxc_table
,
1164 init_hca
->auxc_base
,
1165 dev_cap
->aux_entry_sz
,
1167 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
1170 mlx4_err(dev
, "Failed to map AUXC context memory, aborting\n");
1174 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.altc_table
,
1175 init_hca
->altc_base
,
1176 dev_cap
->altc_entry_sz
,
1178 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
1181 mlx4_err(dev
, "Failed to map ALTC context memory, aborting\n");
1182 goto err_unmap_auxc
;
1185 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.rdmarc_table
,
1186 init_hca
->rdmarc_base
,
1187 dev_cap
->rdmarc_entry_sz
<< priv
->qp_table
.rdmarc_shift
,
1189 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
1192 mlx4_err(dev
, "Failed to map RDMARC context memory, aborting\n");
1193 goto err_unmap_altc
;
1196 err
= mlx4_init_icm_table(dev
, &priv
->cq_table
.table
,
1198 dev_cap
->cqc_entry_sz
,
1200 dev
->caps
.reserved_cqs
, 0, 0);
1202 mlx4_err(dev
, "Failed to map CQ context memory, aborting\n");
1203 goto err_unmap_rdmarc
;
1206 err
= mlx4_init_icm_table(dev
, &priv
->srq_table
.table
,
1207 init_hca
->srqc_base
,
1208 dev_cap
->srq_entry_sz
,
1210 dev
->caps
.reserved_srqs
, 0, 0);
1212 mlx4_err(dev
, "Failed to map SRQ context memory, aborting\n");
1217 * For flow steering device managed mode it is required to use
1218 * mlx4_init_icm_table. For B0 steering mode it's not strictly
1219 * required, but for simplicity just map the whole multicast
1220 * group table now. The table isn't very big and it's a lot
1221 * easier than trying to track ref counts.
1223 err
= mlx4_init_icm_table(dev
, &priv
->mcg_table
.table
,
1225 mlx4_get_mgm_entry_size(dev
),
1226 dev
->caps
.num_mgms
+ dev
->caps
.num_amgms
,
1227 dev
->caps
.num_mgms
+ dev
->caps
.num_amgms
,
1230 mlx4_err(dev
, "Failed to map MCG context memory, aborting\n");
1237 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.table
);
1240 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.table
);
1243 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.rdmarc_table
);
1246 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.altc_table
);
1249 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.auxc_table
);
1252 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.qp_table
);
1255 mlx4_cleanup_icm_table(dev
, &priv
->mr_table
.dmpt_table
);
1258 mlx4_cleanup_icm_table(dev
, &priv
->mr_table
.mtt_table
);
1261 mlx4_cleanup_icm_table(dev
, &priv
->eq_table
.table
);
1264 mlx4_cleanup_icm_table(dev
, &priv
->eq_table
.cmpt_table
);
1265 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.cmpt_table
);
1266 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.cmpt_table
);
1267 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.cmpt_table
);
1270 mlx4_UNMAP_ICM_AUX(dev
);
1273 mlx4_free_icm(dev
, priv
->fw
.aux_icm
, 0);
1278 static void mlx4_free_icms(struct mlx4_dev
*dev
)
1280 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1282 mlx4_cleanup_icm_table(dev
, &priv
->mcg_table
.table
);
1283 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.table
);
1284 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.table
);
1285 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.rdmarc_table
);
1286 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.altc_table
);
1287 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.auxc_table
);
1288 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.qp_table
);
1289 mlx4_cleanup_icm_table(dev
, &priv
->mr_table
.dmpt_table
);
1290 mlx4_cleanup_icm_table(dev
, &priv
->mr_table
.mtt_table
);
1291 mlx4_cleanup_icm_table(dev
, &priv
->eq_table
.table
);
1292 mlx4_cleanup_icm_table(dev
, &priv
->eq_table
.cmpt_table
);
1293 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.cmpt_table
);
1294 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.cmpt_table
);
1295 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.cmpt_table
);
1297 mlx4_UNMAP_ICM_AUX(dev
);
1298 mlx4_free_icm(dev
, priv
->fw
.aux_icm
, 0);
1301 static void mlx4_slave_exit(struct mlx4_dev
*dev
)
1303 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1305 mutex_lock(&priv
->cmd
.slave_cmd_mutex
);
1306 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_RESET
, 0, MLX4_COMM_TIME
))
1307 mlx4_warn(dev
, "Failed to close slave function\n");
1308 mutex_unlock(&priv
->cmd
.slave_cmd_mutex
);
1311 static int map_bf_area(struct mlx4_dev
*dev
)
1313 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1314 resource_size_t bf_start
;
1315 resource_size_t bf_len
;
1318 if (!dev
->caps
.bf_reg_size
)
1321 bf_start
= pci_resource_start(dev
->pdev
, 2) +
1322 (dev
->caps
.num_uars
<< PAGE_SHIFT
);
1323 bf_len
= pci_resource_len(dev
->pdev
, 2) -
1324 (dev
->caps
.num_uars
<< PAGE_SHIFT
);
1325 priv
->bf_mapping
= io_mapping_create_wc(bf_start
, bf_len
);
1326 if (!priv
->bf_mapping
)
1332 static void unmap_bf_area(struct mlx4_dev
*dev
)
1334 if (mlx4_priv(dev
)->bf_mapping
)
1335 io_mapping_free(mlx4_priv(dev
)->bf_mapping
);
1338 cycle_t
mlx4_read_clock(struct mlx4_dev
*dev
)
1340 u32 clockhi
, clocklo
, clockhi1
;
1343 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1345 for (i
= 0; i
< 10; i
++) {
1346 clockhi
= swab32(readl(priv
->clock_mapping
));
1347 clocklo
= swab32(readl(priv
->clock_mapping
+ 4));
1348 clockhi1
= swab32(readl(priv
->clock_mapping
));
1349 if (clockhi
== clockhi1
)
1353 cycles
= (u64
) clockhi
<< 32 | (u64
) clocklo
;
1357 EXPORT_SYMBOL_GPL(mlx4_read_clock
);
1360 static int map_internal_clock(struct mlx4_dev
*dev
)
1362 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1364 priv
->clock_mapping
=
1365 ioremap(pci_resource_start(dev
->pdev
, priv
->fw
.clock_bar
) +
1366 priv
->fw
.clock_offset
, MLX4_CLOCK_SIZE
);
1368 if (!priv
->clock_mapping
)
1374 static void unmap_internal_clock(struct mlx4_dev
*dev
)
1376 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1378 if (priv
->clock_mapping
)
1379 iounmap(priv
->clock_mapping
);
1382 static void mlx4_close_hca(struct mlx4_dev
*dev
)
1384 unmap_internal_clock(dev
);
1386 if (mlx4_is_slave(dev
))
1387 mlx4_slave_exit(dev
);
1389 mlx4_CLOSE_HCA(dev
, 0);
1390 mlx4_free_icms(dev
);
1392 mlx4_free_icm(dev
, mlx4_priv(dev
)->fw
.fw_icm
, 0);
1396 static int mlx4_init_slave(struct mlx4_dev
*dev
)
1398 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1399 u64 dma
= (u64
) priv
->mfunc
.vhcr_dma
;
1400 int ret_from_reset
= 0;
1402 u32 cmd_channel_ver
;
1404 if (atomic_read(&pf_loading
)) {
1405 mlx4_warn(dev
, "PF is not ready - Deferring probe\n");
1406 return -EPROBE_DEFER
;
1409 mutex_lock(&priv
->cmd
.slave_cmd_mutex
);
1410 priv
->cmd
.max_cmds
= 1;
1411 mlx4_warn(dev
, "Sending reset\n");
1412 ret_from_reset
= mlx4_comm_cmd(dev
, MLX4_COMM_CMD_RESET
, 0,
1414 /* if we are in the middle of flr the slave will try
1415 * NUM_OF_RESET_RETRIES times before leaving.*/
1416 if (ret_from_reset
) {
1417 if (MLX4_DELAY_RESET_SLAVE
== ret_from_reset
) {
1418 mlx4_warn(dev
, "slave is currently in the middle of FLR - Deferring probe\n");
1419 mutex_unlock(&priv
->cmd
.slave_cmd_mutex
);
1420 return -EPROBE_DEFER
;
1425 /* check the driver version - the slave I/F revision
1426 * must match the master's */
1427 slave_read
= swab32(readl(&priv
->mfunc
.comm
->slave_read
));
1428 cmd_channel_ver
= mlx4_comm_get_version();
1430 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver
) !=
1431 MLX4_COMM_GET_IF_REV(slave_read
)) {
1432 mlx4_err(dev
, "slave driver version is not supported by the master\n");
1436 mlx4_warn(dev
, "Sending vhcr0\n");
1437 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR0
, dma
>> 48,
1440 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR1
, dma
>> 32,
1443 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR2
, dma
>> 16,
1446 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR_EN
, dma
, MLX4_COMM_TIME
))
1449 mutex_unlock(&priv
->cmd
.slave_cmd_mutex
);
1453 mlx4_comm_cmd(dev
, MLX4_COMM_CMD_RESET
, 0, 0);
1454 mutex_unlock(&priv
->cmd
.slave_cmd_mutex
);
1458 static void mlx4_parav_master_pf_caps(struct mlx4_dev
*dev
)
1462 for (i
= 1; i
<= dev
->caps
.num_ports
; i
++) {
1463 if (dev
->caps
.port_type
[i
] == MLX4_PORT_TYPE_ETH
)
1464 dev
->caps
.gid_table_len
[i
] =
1465 mlx4_get_slave_num_gids(dev
, 0, i
);
1467 dev
->caps
.gid_table_len
[i
] = 1;
1468 dev
->caps
.pkey_table_len
[i
] =
1469 dev
->phys_caps
.pkey_phys_table_len
[i
] - 1;
1473 static int choose_log_fs_mgm_entry_size(int qp_per_entry
)
1475 int i
= MLX4_MIN_MGM_LOG_ENTRY_SIZE
;
1477 for (i
= MLX4_MIN_MGM_LOG_ENTRY_SIZE
; i
<= MLX4_MAX_MGM_LOG_ENTRY_SIZE
;
1479 if (qp_per_entry
<= 4 * ((1 << i
) / 16 - 2))
1483 return (i
<= MLX4_MAX_MGM_LOG_ENTRY_SIZE
) ? i
: -1;
1486 static void choose_steering_mode(struct mlx4_dev
*dev
,
1487 struct mlx4_dev_cap
*dev_cap
)
1489 if (mlx4_log_num_mgm_entry_size
== -1 &&
1490 dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_FS_EN
&&
1491 (!mlx4_is_mfunc(dev
) ||
1492 (dev_cap
->fs_max_num_qp_per_entry
>= (dev
->num_vfs
+ 1))) &&
1493 choose_log_fs_mgm_entry_size(dev_cap
->fs_max_num_qp_per_entry
) >=
1494 MLX4_MIN_MGM_LOG_ENTRY_SIZE
) {
1495 dev
->oper_log_mgm_entry_size
=
1496 choose_log_fs_mgm_entry_size(dev_cap
->fs_max_num_qp_per_entry
);
1497 dev
->caps
.steering_mode
= MLX4_STEERING_MODE_DEVICE_MANAGED
;
1498 dev
->caps
.num_qp_per_mgm
= dev_cap
->fs_max_num_qp_per_entry
;
1499 dev
->caps
.fs_log_max_ucast_qp_range_size
=
1500 dev_cap
->fs_log_max_ucast_qp_range_size
;
1502 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_VEP_UC_STEER
&&
1503 dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_VEP_MC_STEER
)
1504 dev
->caps
.steering_mode
= MLX4_STEERING_MODE_B0
;
1506 dev
->caps
.steering_mode
= MLX4_STEERING_MODE_A0
;
1508 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_VEP_UC_STEER
||
1509 dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_VEP_MC_STEER
)
1510 mlx4_warn(dev
, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n");
1512 dev
->oper_log_mgm_entry_size
=
1513 mlx4_log_num_mgm_entry_size
> 0 ?
1514 mlx4_log_num_mgm_entry_size
:
1515 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE
;
1516 dev
->caps
.num_qp_per_mgm
= mlx4_get_qp_per_mgm(dev
);
1518 mlx4_dbg(dev
, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n",
1519 mlx4_steering_mode_str(dev
->caps
.steering_mode
),
1520 dev
->oper_log_mgm_entry_size
,
1521 mlx4_log_num_mgm_entry_size
);
1524 static void choose_tunnel_offload_mode(struct mlx4_dev
*dev
,
1525 struct mlx4_dev_cap
*dev_cap
)
1527 if (dev
->caps
.steering_mode
== MLX4_STEERING_MODE_DEVICE_MANAGED
&&
1528 dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS
)
1529 dev
->caps
.tunnel_offload_mode
= MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
;
1531 dev
->caps
.tunnel_offload_mode
= MLX4_TUNNEL_OFFLOAD_MODE_NONE
;
1533 mlx4_dbg(dev
, "Tunneling offload mode is: %s\n", (dev
->caps
.tunnel_offload_mode
1534 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
) ? "vxlan" : "none");
1537 static int mlx4_init_hca(struct mlx4_dev
*dev
)
1539 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1540 struct mlx4_adapter adapter
;
1541 struct mlx4_dev_cap dev_cap
;
1542 struct mlx4_mod_stat_cfg mlx4_cfg
;
1543 struct mlx4_profile profile
;
1544 struct mlx4_init_hca_param init_hca
;
1548 if (!mlx4_is_slave(dev
)) {
1549 err
= mlx4_QUERY_FW(dev
);
1552 mlx4_info(dev
, "non-primary physical function, skipping\n");
1554 mlx4_err(dev
, "QUERY_FW command failed, aborting\n");
1558 err
= mlx4_load_fw(dev
);
1560 mlx4_err(dev
, "Failed to start FW, aborting\n");
1564 mlx4_cfg
.log_pg_sz_m
= 1;
1565 mlx4_cfg
.log_pg_sz
= 0;
1566 err
= mlx4_MOD_STAT_CFG(dev
, &mlx4_cfg
);
1568 mlx4_warn(dev
, "Failed to override log_pg_sz parameter\n");
1570 err
= mlx4_dev_cap(dev
, &dev_cap
);
1572 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting\n");
1576 choose_steering_mode(dev
, &dev_cap
);
1577 choose_tunnel_offload_mode(dev
, &dev_cap
);
1579 err
= mlx4_get_phys_port_id(dev
);
1581 mlx4_err(dev
, "Fail to get physical port id\n");
1583 if (mlx4_is_master(dev
))
1584 mlx4_parav_master_pf_caps(dev
);
1586 profile
= default_profile
;
1587 if (dev
->caps
.steering_mode
==
1588 MLX4_STEERING_MODE_DEVICE_MANAGED
)
1589 profile
.num_mcg
= MLX4_FS_NUM_MCG
;
1591 icm_size
= mlx4_make_profile(dev
, &profile
, &dev_cap
,
1593 if ((long long) icm_size
< 0) {
1598 dev
->caps
.max_fmr_maps
= (1 << (32 - ilog2(dev
->caps
.num_mpts
))) - 1;
1600 init_hca
.log_uar_sz
= ilog2(dev
->caps
.num_uars
);
1601 init_hca
.uar_page_sz
= PAGE_SHIFT
- 12;
1602 init_hca
.mw_enabled
= 0;
1603 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_MEM_WINDOW
||
1604 dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_TYPE_2_WIN
)
1605 init_hca
.mw_enabled
= INIT_HCA_TPT_MW_ENABLE
;
1607 err
= mlx4_init_icm(dev
, &dev_cap
, &init_hca
, icm_size
);
1611 err
= mlx4_INIT_HCA(dev
, &init_hca
);
1613 mlx4_err(dev
, "INIT_HCA command failed, aborting\n");
1617 * If TS is supported by FW
1618 * read HCA frequency by QUERY_HCA command
1620 if (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_TS
) {
1621 memset(&init_hca
, 0, sizeof(init_hca
));
1622 err
= mlx4_QUERY_HCA(dev
, &init_hca
);
1624 mlx4_err(dev
, "QUERY_HCA command failed, disable timestamp\n");
1625 dev
->caps
.flags2
&= ~MLX4_DEV_CAP_FLAG2_TS
;
1627 dev
->caps
.hca_core_clock
=
1628 init_hca
.hca_core_clock
;
1631 /* In case we got HCA frequency 0 - disable timestamping
1632 * to avoid dividing by zero
1634 if (!dev
->caps
.hca_core_clock
) {
1635 dev
->caps
.flags2
&= ~MLX4_DEV_CAP_FLAG2_TS
;
1637 "HCA frequency is 0 - timestamping is not supported\n");
1638 } else if (map_internal_clock(dev
)) {
1640 * Map internal clock,
1641 * in case of failure disable timestamping
1643 dev
->caps
.flags2
&= ~MLX4_DEV_CAP_FLAG2_TS
;
1644 mlx4_err(dev
, "Failed to map internal clock. Timestamping is not supported\n");
1648 err
= mlx4_init_slave(dev
);
1650 if (err
!= -EPROBE_DEFER
)
1651 mlx4_err(dev
, "Failed to initialize slave\n");
1655 err
= mlx4_slave_cap(dev
);
1657 mlx4_err(dev
, "Failed to obtain slave caps\n");
1662 if (map_bf_area(dev
))
1663 mlx4_dbg(dev
, "Failed to map blue flame area\n");
1665 /*Only the master set the ports, all the rest got it from it.*/
1666 if (!mlx4_is_slave(dev
))
1667 mlx4_set_port_mask(dev
);
1669 err
= mlx4_QUERY_ADAPTER(dev
, &adapter
);
1671 mlx4_err(dev
, "QUERY_ADAPTER command failed, aborting\n");
1675 priv
->eq_table
.inta_pin
= adapter
.inta_pin
;
1676 memcpy(dev
->board_id
, adapter
.board_id
, sizeof dev
->board_id
);
1681 unmap_internal_clock(dev
);
1685 if (mlx4_is_slave(dev
))
1686 mlx4_slave_exit(dev
);
1688 mlx4_CLOSE_HCA(dev
, 0);
1691 if (!mlx4_is_slave(dev
))
1692 mlx4_free_icms(dev
);
1695 if (!mlx4_is_slave(dev
)) {
1697 mlx4_free_icm(dev
, priv
->fw
.fw_icm
, 0);
1702 static int mlx4_init_counters_table(struct mlx4_dev
*dev
)
1704 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1707 if (!(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_COUNTERS
))
1710 nent
= dev
->caps
.max_counters
;
1711 return mlx4_bitmap_init(&priv
->counters_bitmap
, nent
, nent
- 1, 0, 0);
1714 static void mlx4_cleanup_counters_table(struct mlx4_dev
*dev
)
1716 mlx4_bitmap_cleanup(&mlx4_priv(dev
)->counters_bitmap
);
1719 int __mlx4_counter_alloc(struct mlx4_dev
*dev
, u32
*idx
)
1721 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1723 if (!(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_COUNTERS
))
1726 *idx
= mlx4_bitmap_alloc(&priv
->counters_bitmap
);
1733 int mlx4_counter_alloc(struct mlx4_dev
*dev
, u32
*idx
)
1738 if (mlx4_is_mfunc(dev
)) {
1739 err
= mlx4_cmd_imm(dev
, 0, &out_param
, RES_COUNTER
,
1740 RES_OP_RESERVE
, MLX4_CMD_ALLOC_RES
,
1741 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
1743 *idx
= get_param_l(&out_param
);
1747 return __mlx4_counter_alloc(dev
, idx
);
1749 EXPORT_SYMBOL_GPL(mlx4_counter_alloc
);
1751 void __mlx4_counter_free(struct mlx4_dev
*dev
, u32 idx
)
1753 mlx4_bitmap_free(&mlx4_priv(dev
)->counters_bitmap
, idx
, MLX4_USE_RR
);
1757 void mlx4_counter_free(struct mlx4_dev
*dev
, u32 idx
)
1761 if (mlx4_is_mfunc(dev
)) {
1762 set_param_l(&in_param
, idx
);
1763 mlx4_cmd(dev
, in_param
, RES_COUNTER
, RES_OP_RESERVE
,
1764 MLX4_CMD_FREE_RES
, MLX4_CMD_TIME_CLASS_A
,
1768 __mlx4_counter_free(dev
, idx
);
1770 EXPORT_SYMBOL_GPL(mlx4_counter_free
);
1772 static int mlx4_setup_hca(struct mlx4_dev
*dev
)
1774 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1777 __be32 ib_port_default_caps
;
1779 err
= mlx4_init_uar_table(dev
);
1781 mlx4_err(dev
, "Failed to initialize user access region table, aborting\n");
1785 err
= mlx4_uar_alloc(dev
, &priv
->driver_uar
);
1787 mlx4_err(dev
, "Failed to allocate driver access region, aborting\n");
1788 goto err_uar_table_free
;
1791 priv
->kar
= ioremap((phys_addr_t
) priv
->driver_uar
.pfn
<< PAGE_SHIFT
, PAGE_SIZE
);
1793 mlx4_err(dev
, "Couldn't map kernel access region, aborting\n");
1798 err
= mlx4_init_pd_table(dev
);
1800 mlx4_err(dev
, "Failed to initialize protection domain table, aborting\n");
1804 err
= mlx4_init_xrcd_table(dev
);
1806 mlx4_err(dev
, "Failed to initialize reliable connection domain table, aborting\n");
1807 goto err_pd_table_free
;
1810 err
= mlx4_init_mr_table(dev
);
1812 mlx4_err(dev
, "Failed to initialize memory region table, aborting\n");
1813 goto err_xrcd_table_free
;
1816 if (!mlx4_is_slave(dev
)) {
1817 err
= mlx4_init_mcg_table(dev
);
1819 mlx4_err(dev
, "Failed to initialize multicast group table, aborting\n");
1820 goto err_mr_table_free
;
1824 err
= mlx4_init_eq_table(dev
);
1826 mlx4_err(dev
, "Failed to initialize event queue table, aborting\n");
1827 goto err_mcg_table_free
;
1830 err
= mlx4_cmd_use_events(dev
);
1832 mlx4_err(dev
, "Failed to switch to event-driven firmware commands, aborting\n");
1833 goto err_eq_table_free
;
1836 err
= mlx4_NOP(dev
);
1838 if (dev
->flags
& MLX4_FLAG_MSI_X
) {
1839 mlx4_warn(dev
, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
1840 priv
->eq_table
.eq
[dev
->caps
.num_comp_vectors
].irq
);
1841 mlx4_warn(dev
, "Trying again without MSI-X\n");
1843 mlx4_err(dev
, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
1844 priv
->eq_table
.eq
[dev
->caps
.num_comp_vectors
].irq
);
1845 mlx4_err(dev
, "BIOS or ACPI interrupt routing problem?\n");
1851 mlx4_dbg(dev
, "NOP command IRQ test passed\n");
1853 err
= mlx4_init_cq_table(dev
);
1855 mlx4_err(dev
, "Failed to initialize completion queue table, aborting\n");
1859 err
= mlx4_init_srq_table(dev
);
1861 mlx4_err(dev
, "Failed to initialize shared receive queue table, aborting\n");
1862 goto err_cq_table_free
;
1865 err
= mlx4_init_qp_table(dev
);
1867 mlx4_err(dev
, "Failed to initialize queue pair table, aborting\n");
1868 goto err_srq_table_free
;
1871 err
= mlx4_init_counters_table(dev
);
1872 if (err
&& err
!= -ENOENT
) {
1873 mlx4_err(dev
, "Failed to initialize counters table, aborting\n");
1874 goto err_qp_table_free
;
1877 if (!mlx4_is_slave(dev
)) {
1878 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
1879 ib_port_default_caps
= 0;
1880 err
= mlx4_get_port_ib_caps(dev
, port
,
1881 &ib_port_default_caps
);
1883 mlx4_warn(dev
, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n",
1885 dev
->caps
.ib_port_def_cap
[port
] = ib_port_default_caps
;
1887 /* initialize per-slave default ib port capabilities */
1888 if (mlx4_is_master(dev
)) {
1890 for (i
= 0; i
< dev
->num_slaves
; i
++) {
1891 if (i
== mlx4_master_func_num(dev
))
1893 priv
->mfunc
.master
.slave_state
[i
].ib_cap_mask
[port
] =
1894 ib_port_default_caps
;
1898 if (mlx4_is_mfunc(dev
))
1899 dev
->caps
.port_ib_mtu
[port
] = IB_MTU_2048
;
1901 dev
->caps
.port_ib_mtu
[port
] = IB_MTU_4096
;
1903 err
= mlx4_SET_PORT(dev
, port
, mlx4_is_master(dev
) ?
1904 dev
->caps
.pkey_table_len
[port
] : -1);
1906 mlx4_err(dev
, "Failed to set port %d, aborting\n",
1908 goto err_counters_table_free
;
1915 err_counters_table_free
:
1916 mlx4_cleanup_counters_table(dev
);
1919 mlx4_cleanup_qp_table(dev
);
1922 mlx4_cleanup_srq_table(dev
);
1925 mlx4_cleanup_cq_table(dev
);
1928 mlx4_cmd_use_polling(dev
);
1931 mlx4_cleanup_eq_table(dev
);
1934 if (!mlx4_is_slave(dev
))
1935 mlx4_cleanup_mcg_table(dev
);
1938 mlx4_cleanup_mr_table(dev
);
1940 err_xrcd_table_free
:
1941 mlx4_cleanup_xrcd_table(dev
);
1944 mlx4_cleanup_pd_table(dev
);
1950 mlx4_uar_free(dev
, &priv
->driver_uar
);
1953 mlx4_cleanup_uar_table(dev
);
1957 static void mlx4_enable_msi_x(struct mlx4_dev
*dev
)
1959 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1960 struct msix_entry
*entries
;
1961 int nreq
= min_t(int, dev
->caps
.num_ports
*
1962 min_t(int, num_online_cpus() + 1,
1963 MAX_MSIX_P_PORT
) + MSIX_LEGACY_SZ
, MAX_MSIX
);
1967 nreq
= min_t(int, dev
->caps
.num_eqs
- dev
->caps
.reserved_eqs
,
1970 entries
= kcalloc(nreq
, sizeof *entries
, GFP_KERNEL
);
1974 for (i
= 0; i
< nreq
; ++i
)
1975 entries
[i
].entry
= i
;
1977 nreq
= pci_enable_msix_range(dev
->pdev
, entries
, 2, nreq
);
1982 } else if (nreq
< MSIX_LEGACY_SZ
+
1983 dev
->caps
.num_ports
* MIN_MSIX_P_PORT
) {
1984 /*Working in legacy mode , all EQ's shared*/
1985 dev
->caps
.comp_pool
= 0;
1986 dev
->caps
.num_comp_vectors
= nreq
- 1;
1988 dev
->caps
.comp_pool
= nreq
- MSIX_LEGACY_SZ
;
1989 dev
->caps
.num_comp_vectors
= MSIX_LEGACY_SZ
- 1;
1991 for (i
= 0; i
< nreq
; ++i
)
1992 priv
->eq_table
.eq
[i
].irq
= entries
[i
].vector
;
1994 dev
->flags
|= MLX4_FLAG_MSI_X
;
2001 dev
->caps
.num_comp_vectors
= 1;
2002 dev
->caps
.comp_pool
= 0;
2004 for (i
= 0; i
< 2; ++i
)
2005 priv
->eq_table
.eq
[i
].irq
= dev
->pdev
->irq
;
2008 static int mlx4_init_port_info(struct mlx4_dev
*dev
, int port
)
2010 struct mlx4_port_info
*info
= &mlx4_priv(dev
)->port
[port
];
2015 if (!mlx4_is_slave(dev
)) {
2016 mlx4_init_mac_table(dev
, &info
->mac_table
);
2017 mlx4_init_vlan_table(dev
, &info
->vlan_table
);
2018 info
->base_qpn
= mlx4_get_base_qpn(dev
, port
);
2021 sprintf(info
->dev_name
, "mlx4_port%d", port
);
2022 info
->port_attr
.attr
.name
= info
->dev_name
;
2023 if (mlx4_is_mfunc(dev
))
2024 info
->port_attr
.attr
.mode
= S_IRUGO
;
2026 info
->port_attr
.attr
.mode
= S_IRUGO
| S_IWUSR
;
2027 info
->port_attr
.store
= set_port_type
;
2029 info
->port_attr
.show
= show_port_type
;
2030 sysfs_attr_init(&info
->port_attr
.attr
);
2032 err
= device_create_file(&dev
->pdev
->dev
, &info
->port_attr
);
2034 mlx4_err(dev
, "Failed to create file for port %d\n", port
);
2038 sprintf(info
->dev_mtu_name
, "mlx4_port%d_mtu", port
);
2039 info
->port_mtu_attr
.attr
.name
= info
->dev_mtu_name
;
2040 if (mlx4_is_mfunc(dev
))
2041 info
->port_mtu_attr
.attr
.mode
= S_IRUGO
;
2043 info
->port_mtu_attr
.attr
.mode
= S_IRUGO
| S_IWUSR
;
2044 info
->port_mtu_attr
.store
= set_port_ib_mtu
;
2046 info
->port_mtu_attr
.show
= show_port_ib_mtu
;
2047 sysfs_attr_init(&info
->port_mtu_attr
.attr
);
2049 err
= device_create_file(&dev
->pdev
->dev
, &info
->port_mtu_attr
);
2051 mlx4_err(dev
, "Failed to create mtu file for port %d\n", port
);
2052 device_remove_file(&info
->dev
->pdev
->dev
, &info
->port_attr
);
2059 static void mlx4_cleanup_port_info(struct mlx4_port_info
*info
)
2064 device_remove_file(&info
->dev
->pdev
->dev
, &info
->port_attr
);
2065 device_remove_file(&info
->dev
->pdev
->dev
, &info
->port_mtu_attr
);
2068 static int mlx4_init_steering(struct mlx4_dev
*dev
)
2070 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2071 int num_entries
= dev
->caps
.num_ports
;
2074 priv
->steer
= kzalloc(sizeof(struct mlx4_steer
) * num_entries
, GFP_KERNEL
);
2078 for (i
= 0; i
< num_entries
; i
++)
2079 for (j
= 0; j
< MLX4_NUM_STEERS
; j
++) {
2080 INIT_LIST_HEAD(&priv
->steer
[i
].promisc_qps
[j
]);
2081 INIT_LIST_HEAD(&priv
->steer
[i
].steer_entries
[j
]);
2086 static void mlx4_clear_steering(struct mlx4_dev
*dev
)
2088 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2089 struct mlx4_steer_index
*entry
, *tmp_entry
;
2090 struct mlx4_promisc_qp
*pqp
, *tmp_pqp
;
2091 int num_entries
= dev
->caps
.num_ports
;
2094 for (i
= 0; i
< num_entries
; i
++) {
2095 for (j
= 0; j
< MLX4_NUM_STEERS
; j
++) {
2096 list_for_each_entry_safe(pqp
, tmp_pqp
,
2097 &priv
->steer
[i
].promisc_qps
[j
],
2099 list_del(&pqp
->list
);
2102 list_for_each_entry_safe(entry
, tmp_entry
,
2103 &priv
->steer
[i
].steer_entries
[j
],
2105 list_del(&entry
->list
);
2106 list_for_each_entry_safe(pqp
, tmp_pqp
,
2109 list_del(&pqp
->list
);
2119 static int extended_func_num(struct pci_dev
*pdev
)
2121 return PCI_SLOT(pdev
->devfn
) * 8 + PCI_FUNC(pdev
->devfn
);
2124 #define MLX4_OWNER_BASE 0x8069c
2125 #define MLX4_OWNER_SIZE 4
2127 static int mlx4_get_ownership(struct mlx4_dev
*dev
)
2129 void __iomem
*owner
;
2132 if (pci_channel_offline(dev
->pdev
))
2135 owner
= ioremap(pci_resource_start(dev
->pdev
, 0) + MLX4_OWNER_BASE
,
2138 mlx4_err(dev
, "Failed to obtain ownership bit\n");
2147 static void mlx4_free_ownership(struct mlx4_dev
*dev
)
2149 void __iomem
*owner
;
2151 if (pci_channel_offline(dev
->pdev
))
2154 owner
= ioremap(pci_resource_start(dev
->pdev
, 0) + MLX4_OWNER_BASE
,
2157 mlx4_err(dev
, "Failed to obtain ownership bit\n");
2165 static int __mlx4_init_one(struct pci_dev
*pdev
, int pci_dev_data
)
2167 struct mlx4_priv
*priv
;
2168 struct mlx4_dev
*dev
;
2171 int nvfs
[MLX4_MAX_PORTS
+ 1] = {0, 0, 0};
2172 int prb_vf
[MLX4_MAX_PORTS
+ 1] = {0, 0, 0};
2173 const int param_map
[MLX4_MAX_PORTS
+ 1][MLX4_MAX_PORTS
+ 1] = {
2174 {2, 0, 0}, {0, 1, 2}, {0, 1, 2} };
2175 unsigned total_vfs
= 0;
2176 int sriov_initialized
= 0;
2179 pr_info(DRV_NAME
": Initializing %s\n", pci_name(pdev
));
2181 err
= pci_enable_device(pdev
);
2183 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
2187 /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS
2188 * per port, we must limit the number of VFs to 63 (since their are
2191 for (i
= 0; i
< sizeof(nvfs
)/sizeof(nvfs
[0]) && i
< num_vfs_argc
;
2192 total_vfs
+= nvfs
[param_map
[num_vfs_argc
- 1][i
]], i
++) {
2193 nvfs
[param_map
[num_vfs_argc
- 1][i
]] = num_vfs
[i
];
2195 dev_err(&pdev
->dev
, "num_vfs module parameter cannot be negative\n");
2199 for (i
= 0; i
< sizeof(prb_vf
)/sizeof(prb_vf
[0]) && i
< probe_vfs_argc
;
2201 prb_vf
[param_map
[probe_vfs_argc
- 1][i
]] = probe_vf
[i
];
2202 if (prb_vf
[i
] < 0 || prb_vf
[i
] > nvfs
[i
]) {
2203 dev_err(&pdev
->dev
, "probe_vf module parameter cannot be negative or greater than num_vfs\n");
2207 if (total_vfs
>= MLX4_MAX_NUM_VF
) {
2209 "Requested more VF's (%d) than allowed (%d)\n",
2210 total_vfs
, MLX4_MAX_NUM_VF
- 1);
2214 for (i
= 0; i
< MLX4_MAX_PORTS
; i
++) {
2215 if (nvfs
[i
] + nvfs
[2] >= MLX4_MAX_NUM_VF_P_PORT
) {
2217 "Requested more VF's (%d) for port (%d) than allowed (%d)\n",
2218 nvfs
[i
] + nvfs
[2], i
+ 1,
2219 MLX4_MAX_NUM_VF_P_PORT
- 1);
2228 if (!(pci_dev_data
& MLX4_PCI_DEV_IS_VF
) &&
2229 !(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
2230 dev_err(&pdev
->dev
, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
2231 pci_dev_data
, pci_resource_flags(pdev
, 0));
2233 goto err_disable_pdev
;
2235 if (!(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
2236 dev_err(&pdev
->dev
, "Missing UAR, aborting\n");
2238 goto err_disable_pdev
;
2241 err
= pci_request_regions(pdev
, DRV_NAME
);
2243 dev_err(&pdev
->dev
, "Couldn't get PCI resources, aborting\n");
2244 goto err_disable_pdev
;
2247 pci_set_master(pdev
);
2249 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
2251 dev_warn(&pdev
->dev
, "Warning: couldn't set 64-bit PCI DMA mask\n");
2252 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
2254 dev_err(&pdev
->dev
, "Can't set PCI DMA mask, aborting\n");
2255 goto err_release_regions
;
2258 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
2260 dev_warn(&pdev
->dev
, "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
2261 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
2263 dev_err(&pdev
->dev
, "Can't set consistent PCI DMA mask, aborting\n");
2264 goto err_release_regions
;
2268 /* Allow large DMA segments, up to the firmware limit of 1 GB */
2269 dma_set_max_seg_size(&pdev
->dev
, 1024 * 1024 * 1024);
2271 dev
= pci_get_drvdata(pdev
);
2272 priv
= mlx4_priv(dev
);
2274 INIT_LIST_HEAD(&priv
->ctx_list
);
2275 spin_lock_init(&priv
->ctx_lock
);
2277 mutex_init(&priv
->port_mutex
);
2279 INIT_LIST_HEAD(&priv
->pgdir_list
);
2280 mutex_init(&priv
->pgdir_mutex
);
2282 INIT_LIST_HEAD(&priv
->bf_list
);
2283 mutex_init(&priv
->bf_mutex
);
2285 dev
->rev_id
= pdev
->revision
;
2286 dev
->numa_node
= dev_to_node(&pdev
->dev
);
2287 /* Detect if this device is a virtual function */
2288 if (pci_dev_data
& MLX4_PCI_DEV_IS_VF
) {
2289 /* When acting as pf, we normally skip vfs unless explicitly
2290 * requested to probe them. */
2292 unsigned vfs_offset
= 0;
2293 for (i
= 0; i
< sizeof(nvfs
)/sizeof(nvfs
[0]) &&
2294 vfs_offset
+ nvfs
[i
] < extended_func_num(pdev
);
2295 vfs_offset
+= nvfs
[i
], i
++)
2297 if (i
== sizeof(nvfs
)/sizeof(nvfs
[0])) {
2301 if ((extended_func_num(pdev
) - vfs_offset
)
2303 mlx4_warn(dev
, "Skipping virtual function:%d\n",
2304 extended_func_num(pdev
));
2309 mlx4_warn(dev
, "Detected virtual function - running in slave mode\n");
2310 dev
->flags
|= MLX4_FLAG_SLAVE
;
2312 /* We reset the device and enable SRIOV only for physical
2313 * devices. Try to claim ownership on the device;
2314 * if already taken, skip -- do not allow multiple PFs */
2315 err
= mlx4_get_ownership(dev
);
2320 mlx4_warn(dev
, "Multiple PFs not yet supported - Skipping PF\n");
2327 mlx4_warn(dev
, "Enabling SR-IOV with %d VFs\n",
2329 dev
->dev_vfs
= kzalloc(
2330 total_vfs
* sizeof(*dev
->dev_vfs
),
2332 if (NULL
== dev
->dev_vfs
) {
2333 mlx4_err(dev
, "Failed to allocate memory for VFs\n");
2336 atomic_inc(&pf_loading
);
2337 err
= pci_enable_sriov(pdev
, total_vfs
);
2339 mlx4_err(dev
, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
2341 atomic_dec(&pf_loading
);
2344 mlx4_warn(dev
, "Running in master mode\n");
2345 dev
->flags
|= MLX4_FLAG_SRIOV
|
2347 dev
->num_vfs
= total_vfs
;
2348 sriov_initialized
= 1;
2353 atomic_set(&priv
->opreq_count
, 0);
2354 INIT_WORK(&priv
->opreq_task
, mlx4_opreq_action
);
2357 * Now reset the HCA before we touch the PCI capabilities or
2358 * attempt a firmware command, since a boot ROM may have left
2359 * the HCA in an undefined state.
2361 err
= mlx4_reset(dev
);
2363 mlx4_err(dev
, "Failed to reset HCA, aborting\n");
2369 err
= mlx4_cmd_init(dev
);
2371 mlx4_err(dev
, "Failed to init command interface, aborting\n");
2375 /* In slave functions, the communication channel must be initialized
2376 * before posting commands. Also, init num_slaves before calling
2378 if (mlx4_is_mfunc(dev
)) {
2379 if (mlx4_is_master(dev
))
2380 dev
->num_slaves
= MLX4_MAX_NUM_SLAVES
;
2382 dev
->num_slaves
= 0;
2383 err
= mlx4_multi_func_init(dev
);
2385 mlx4_err(dev
, "Failed to init slave mfunc interface, aborting\n");
2391 err
= mlx4_init_hca(dev
);
2393 if (err
== -EACCES
) {
2394 /* Not primary Physical function
2395 * Running in slave mode */
2396 mlx4_cmd_cleanup(dev
);
2397 dev
->flags
|= MLX4_FLAG_SLAVE
;
2398 dev
->flags
&= ~MLX4_FLAG_MASTER
;
2404 /* check if the device is functioning at its maximum possible speed.
2405 * No return code for this call, just warn the user in case of PCI
2406 * express device capabilities are under-satisfied by the bus.
2408 if (!mlx4_is_slave(dev
))
2409 mlx4_check_pcie_caps(dev
);
2411 /* In master functions, the communication channel must be initialized
2412 * after obtaining its address from fw */
2413 if (mlx4_is_master(dev
)) {
2415 err
= mlx4_multi_func_init(dev
);
2417 mlx4_err(dev
, "Failed to init master mfunc interface, aborting\n");
2420 if (sriov_initialized
) {
2422 mlx4_foreach_port(i
, dev
, MLX4_PORT_TYPE_IB
)
2426 (num_vfs_argc
> 1 || probe_vfs_argc
> 1)) {
2428 "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n");
2431 for (i
= 0; i
< sizeof(nvfs
)/sizeof(nvfs
[0]); i
++) {
2433 for (j
= 0; j
< nvfs
[i
]; ++sum
, ++j
) {
2434 dev
->dev_vfs
[sum
].min_port
=
2436 dev
->dev_vfs
[sum
].n_ports
= i
< 2 ? 1 :
2437 dev
->caps
.num_ports
;
2443 err
= mlx4_alloc_eq_table(dev
);
2445 goto err_master_mfunc
;
2447 priv
->msix_ctl
.pool_bm
= 0;
2448 mutex_init(&priv
->msix_ctl
.pool_lock
);
2450 mlx4_enable_msi_x(dev
);
2451 if ((mlx4_is_mfunc(dev
)) &&
2452 !(dev
->flags
& MLX4_FLAG_MSI_X
)) {
2454 mlx4_err(dev
, "INTx is not supported in multi-function mode, aborting\n");
2458 if (!mlx4_is_slave(dev
)) {
2459 err
= mlx4_init_steering(dev
);
2464 err
= mlx4_setup_hca(dev
);
2465 if (err
== -EBUSY
&& (dev
->flags
& MLX4_FLAG_MSI_X
) &&
2466 !mlx4_is_mfunc(dev
)) {
2467 dev
->flags
&= ~MLX4_FLAG_MSI_X
;
2468 dev
->caps
.num_comp_vectors
= 1;
2469 dev
->caps
.comp_pool
= 0;
2470 pci_disable_msix(pdev
);
2471 err
= mlx4_setup_hca(dev
);
2477 mlx4_init_quotas(dev
);
2479 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
2480 err
= mlx4_init_port_info(dev
, port
);
2485 err
= mlx4_register_device(dev
);
2489 mlx4_request_modules(dev
);
2491 mlx4_sense_init(dev
);
2492 mlx4_start_sense(dev
);
2496 if (mlx4_is_master(dev
) && dev
->num_vfs
)
2497 atomic_dec(&pf_loading
);
2502 for (--port
; port
>= 1; --port
)
2503 mlx4_cleanup_port_info(&priv
->port
[port
]);
2505 mlx4_cleanup_counters_table(dev
);
2506 mlx4_cleanup_qp_table(dev
);
2507 mlx4_cleanup_srq_table(dev
);
2508 mlx4_cleanup_cq_table(dev
);
2509 mlx4_cmd_use_polling(dev
);
2510 mlx4_cleanup_eq_table(dev
);
2511 mlx4_cleanup_mcg_table(dev
);
2512 mlx4_cleanup_mr_table(dev
);
2513 mlx4_cleanup_xrcd_table(dev
);
2514 mlx4_cleanup_pd_table(dev
);
2515 mlx4_cleanup_uar_table(dev
);
2518 if (!mlx4_is_slave(dev
))
2519 mlx4_clear_steering(dev
);
2522 mlx4_free_eq_table(dev
);
2525 if (mlx4_is_master(dev
))
2526 mlx4_multi_func_cleanup(dev
);
2529 if (dev
->flags
& MLX4_FLAG_MSI_X
)
2530 pci_disable_msix(pdev
);
2532 mlx4_close_hca(dev
);
2535 if (mlx4_is_slave(dev
))
2536 mlx4_multi_func_cleanup(dev
);
2539 mlx4_cmd_cleanup(dev
);
2542 if (dev
->flags
& MLX4_FLAG_SRIOV
)
2543 pci_disable_sriov(pdev
);
2546 if (!mlx4_is_slave(dev
))
2547 mlx4_free_ownership(dev
);
2549 if (mlx4_is_master(dev
) && dev
->num_vfs
)
2550 atomic_dec(&pf_loading
);
2552 kfree(priv
->dev
.dev_vfs
);
2557 err_release_regions
:
2558 pci_release_regions(pdev
);
2561 pci_disable_device(pdev
);
2562 pci_set_drvdata(pdev
, NULL
);
2566 static int mlx4_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
2568 struct mlx4_priv
*priv
;
2569 struct mlx4_dev
*dev
;
2571 printk_once(KERN_INFO
"%s", mlx4_version
);
2573 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
2578 pci_set_drvdata(pdev
, dev
);
2579 priv
->pci_dev_data
= id
->driver_data
;
2581 return __mlx4_init_one(pdev
, id
->driver_data
);
2584 static void __mlx4_remove_one(struct pci_dev
*pdev
)
2586 struct mlx4_dev
*dev
= pci_get_drvdata(pdev
);
2587 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2594 pci_dev_data
= priv
->pci_dev_data
;
2596 /* in SRIOV it is not allowed to unload the pf's
2597 * driver while there are alive vf's */
2598 if (mlx4_is_master(dev
) && mlx4_how_many_lives_vf(dev
))
2599 printk(KERN_ERR
"Removing PF when there are assigned VF's !!!\n");
2600 mlx4_stop_sense(dev
);
2601 mlx4_unregister_device(dev
);
2603 for (p
= 1; p
<= dev
->caps
.num_ports
; p
++) {
2604 mlx4_cleanup_port_info(&priv
->port
[p
]);
2605 mlx4_CLOSE_PORT(dev
, p
);
2608 if (mlx4_is_master(dev
))
2609 mlx4_free_resource_tracker(dev
,
2610 RES_TR_FREE_SLAVES_ONLY
);
2612 mlx4_cleanup_counters_table(dev
);
2613 mlx4_cleanup_qp_table(dev
);
2614 mlx4_cleanup_srq_table(dev
);
2615 mlx4_cleanup_cq_table(dev
);
2616 mlx4_cmd_use_polling(dev
);
2617 mlx4_cleanup_eq_table(dev
);
2618 mlx4_cleanup_mcg_table(dev
);
2619 mlx4_cleanup_mr_table(dev
);
2620 mlx4_cleanup_xrcd_table(dev
);
2621 mlx4_cleanup_pd_table(dev
);
2623 if (mlx4_is_master(dev
))
2624 mlx4_free_resource_tracker(dev
,
2625 RES_TR_FREE_STRUCTS_ONLY
);
2628 mlx4_uar_free(dev
, &priv
->driver_uar
);
2629 mlx4_cleanup_uar_table(dev
);
2630 if (!mlx4_is_slave(dev
))
2631 mlx4_clear_steering(dev
);
2632 mlx4_free_eq_table(dev
);
2633 if (mlx4_is_master(dev
))
2634 mlx4_multi_func_cleanup(dev
);
2635 mlx4_close_hca(dev
);
2636 if (mlx4_is_slave(dev
))
2637 mlx4_multi_func_cleanup(dev
);
2638 mlx4_cmd_cleanup(dev
);
2640 if (dev
->flags
& MLX4_FLAG_MSI_X
)
2641 pci_disable_msix(pdev
);
2642 if (dev
->flags
& MLX4_FLAG_SRIOV
) {
2643 mlx4_warn(dev
, "Disabling SR-IOV\n");
2644 pci_disable_sriov(pdev
);
2648 if (!mlx4_is_slave(dev
))
2649 mlx4_free_ownership(dev
);
2651 kfree(dev
->caps
.qp0_tunnel
);
2652 kfree(dev
->caps
.qp0_proxy
);
2653 kfree(dev
->caps
.qp1_tunnel
);
2654 kfree(dev
->caps
.qp1_proxy
);
2655 kfree(dev
->dev_vfs
);
2657 pci_release_regions(pdev
);
2658 pci_disable_device(pdev
);
2659 memset(priv
, 0, sizeof(*priv
));
2660 priv
->pci_dev_data
= pci_dev_data
;
2664 static void mlx4_remove_one(struct pci_dev
*pdev
)
2666 struct mlx4_dev
*dev
= pci_get_drvdata(pdev
);
2667 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2669 __mlx4_remove_one(pdev
);
2671 pci_set_drvdata(pdev
, NULL
);
2674 int mlx4_restart_one(struct pci_dev
*pdev
)
2676 struct mlx4_dev
*dev
= pci_get_drvdata(pdev
);
2677 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2680 pci_dev_data
= priv
->pci_dev_data
;
2681 __mlx4_remove_one(pdev
);
2682 return __mlx4_init_one(pdev
, pci_dev_data
);
2685 static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table
) = {
2686 /* MT25408 "Hermon" SDR */
2687 { PCI_VDEVICE(MELLANOX
, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
2688 /* MT25408 "Hermon" DDR */
2689 { PCI_VDEVICE(MELLANOX
, 0x634a), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
2690 /* MT25408 "Hermon" QDR */
2691 { PCI_VDEVICE(MELLANOX
, 0x6354), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
2692 /* MT25408 "Hermon" DDR PCIe gen2 */
2693 { PCI_VDEVICE(MELLANOX
, 0x6732), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
2694 /* MT25408 "Hermon" QDR PCIe gen2 */
2695 { PCI_VDEVICE(MELLANOX
, 0x673c), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
2696 /* MT25408 "Hermon" EN 10GigE */
2697 { PCI_VDEVICE(MELLANOX
, 0x6368), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
2698 /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
2699 { PCI_VDEVICE(MELLANOX
, 0x6750), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
2700 /* MT25458 ConnectX EN 10GBASE-T 10GigE */
2701 { PCI_VDEVICE(MELLANOX
, 0x6372), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
2702 /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
2703 { PCI_VDEVICE(MELLANOX
, 0x675a), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
2704 /* MT26468 ConnectX EN 10GigE PCIe gen2*/
2705 { PCI_VDEVICE(MELLANOX
, 0x6764), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
2706 /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
2707 { PCI_VDEVICE(MELLANOX
, 0x6746), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
2708 /* MT26478 ConnectX2 40GigE PCIe gen2 */
2709 { PCI_VDEVICE(MELLANOX
, 0x676e), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
2710 /* MT25400 Family [ConnectX-2 Virtual Function] */
2711 { PCI_VDEVICE(MELLANOX
, 0x1002), MLX4_PCI_DEV_IS_VF
},
2712 /* MT27500 Family [ConnectX-3] */
2713 { PCI_VDEVICE(MELLANOX
, 0x1003), 0 },
2714 /* MT27500 Family [ConnectX-3 Virtual Function] */
2715 { PCI_VDEVICE(MELLANOX
, 0x1004), MLX4_PCI_DEV_IS_VF
},
2716 { PCI_VDEVICE(MELLANOX
, 0x1005), 0 }, /* MT27510 Family */
2717 { PCI_VDEVICE(MELLANOX
, 0x1006), 0 }, /* MT27511 Family */
2718 { PCI_VDEVICE(MELLANOX
, 0x1007), 0 }, /* MT27520 Family */
2719 { PCI_VDEVICE(MELLANOX
, 0x1008), 0 }, /* MT27521 Family */
2720 { PCI_VDEVICE(MELLANOX
, 0x1009), 0 }, /* MT27530 Family */
2721 { PCI_VDEVICE(MELLANOX
, 0x100a), 0 }, /* MT27531 Family */
2722 { PCI_VDEVICE(MELLANOX
, 0x100b), 0 }, /* MT27540 Family */
2723 { PCI_VDEVICE(MELLANOX
, 0x100c), 0 }, /* MT27541 Family */
2724 { PCI_VDEVICE(MELLANOX
, 0x100d), 0 }, /* MT27550 Family */
2725 { PCI_VDEVICE(MELLANOX
, 0x100e), 0 }, /* MT27551 Family */
2726 { PCI_VDEVICE(MELLANOX
, 0x100f), 0 }, /* MT27560 Family */
2727 { PCI_VDEVICE(MELLANOX
, 0x1010), 0 }, /* MT27561 Family */
2731 MODULE_DEVICE_TABLE(pci
, mlx4_pci_table
);
2733 static pci_ers_result_t
mlx4_pci_err_detected(struct pci_dev
*pdev
,
2734 pci_channel_state_t state
)
2736 __mlx4_remove_one(pdev
);
2738 return state
== pci_channel_io_perm_failure
?
2739 PCI_ERS_RESULT_DISCONNECT
: PCI_ERS_RESULT_NEED_RESET
;
2742 static pci_ers_result_t
mlx4_pci_slot_reset(struct pci_dev
*pdev
)
2744 struct mlx4_dev
*dev
= pci_get_drvdata(pdev
);
2745 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2748 ret
= __mlx4_init_one(pdev
, priv
->pci_dev_data
);
2750 return ret
? PCI_ERS_RESULT_DISCONNECT
: PCI_ERS_RESULT_RECOVERED
;
2753 static const struct pci_error_handlers mlx4_err_handler
= {
2754 .error_detected
= mlx4_pci_err_detected
,
2755 .slot_reset
= mlx4_pci_slot_reset
,
2758 static struct pci_driver mlx4_driver
= {
2760 .id_table
= mlx4_pci_table
,
2761 .probe
= mlx4_init_one
,
2762 .shutdown
= mlx4_remove_one
,
2763 .remove
= mlx4_remove_one
,
2764 .err_handler
= &mlx4_err_handler
,
2767 static int __init
mlx4_verify_params(void)
2769 if ((log_num_mac
< 0) || (log_num_mac
> 7)) {
2770 pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac
);
2774 if (log_num_vlan
!= 0)
2775 pr_warning("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
2776 MLX4_LOG_NUM_VLANS
);
2778 if ((log_mtts_per_seg
< 1) || (log_mtts_per_seg
> 7)) {
2779 pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg
);
2783 /* Check if module param for ports type has legal combination */
2784 if (port_type_array
[0] == false && port_type_array
[1] == true) {
2785 printk(KERN_WARNING
"Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
2786 port_type_array
[0] = true;
2789 if (mlx4_log_num_mgm_entry_size
!= -1 &&
2790 (mlx4_log_num_mgm_entry_size
< MLX4_MIN_MGM_LOG_ENTRY_SIZE
||
2791 mlx4_log_num_mgm_entry_size
> MLX4_MAX_MGM_LOG_ENTRY_SIZE
)) {
2792 pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-1 or %d..%d)\n",
2793 mlx4_log_num_mgm_entry_size
,
2794 MLX4_MIN_MGM_LOG_ENTRY_SIZE
,
2795 MLX4_MAX_MGM_LOG_ENTRY_SIZE
);
2802 static int __init
mlx4_init(void)
2806 if (mlx4_verify_params())
2811 mlx4_wq
= create_singlethread_workqueue("mlx4");
2815 ret
= pci_register_driver(&mlx4_driver
);
2817 destroy_workqueue(mlx4_wq
);
2818 return ret
< 0 ? ret
: 0;
2821 static void __exit
mlx4_cleanup(void)
2823 pci_unregister_driver(&mlx4_driver
);
2824 destroy_workqueue(mlx4_wq
);
2827 module_init(mlx4_init
);
2828 module_exit(mlx4_cleanup
);