net/mlx4_core: Don't disable SRIOV if there are active VFs
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx4 / main.c
CommitLineData
225c7b1f
RD
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
51a379d0 4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
225c7b1f
RD
5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/module.h>
37#include <linux/init.h>
38#include <linux/errno.h>
39#include <linux/pci.h>
40#include <linux/dma-mapping.h>
5a0e3ad6 41#include <linux/slab.h>
c1b43dca 42#include <linux/io-mapping.h>
ab9c17a0 43#include <linux/delay.h>
b046ffe5 44#include <linux/kmod.h>
225c7b1f
RD
45
46#include <linux/mlx4/device.h>
47#include <linux/mlx4/doorbell.h>
48
49#include "mlx4.h"
50#include "fw.h"
51#include "icm.h"
52
53MODULE_AUTHOR("Roland Dreier");
54MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
55MODULE_LICENSE("Dual BSD/GPL");
56MODULE_VERSION(DRV_VERSION);
57
27bf91d6
YP
58struct workqueue_struct *mlx4_wq;
59
225c7b1f
RD
60#ifdef CONFIG_MLX4_DEBUG
61
62int mlx4_debug_level = 0;
63module_param_named(debug_level, mlx4_debug_level, int, 0644);
64MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
65
66#endif /* CONFIG_MLX4_DEBUG */
67
68#ifdef CONFIG_PCI_MSI
69
08fb1055 70static int msi_x = 1;
225c7b1f
RD
71module_param(msi_x, int, 0444);
72MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
73
74#else /* CONFIG_PCI_MSI */
75
76#define msi_x (0)
77
78#endif /* CONFIG_PCI_MSI */
79
dd41cc3b
MB
80static uint8_t num_vfs[3] = {0, 0, 0};
81static int num_vfs_argc = 3;
82module_param_array(num_vfs, byte , &num_vfs_argc, 0444);
83MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n"
84 "num_vfs=port1,port2,port1+2");
85
86static uint8_t probe_vf[3] = {0, 0, 0};
87static int probe_vfs_argc = 3;
88module_param_array(probe_vf, byte, &probe_vfs_argc, 0444);
89MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n"
90 "probe_vf=port1,port2,port1+2");
ab9c17a0 91
3c439b55 92int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
0ec2c0f8
EE
93module_param_named(log_num_mgm_entry_size,
94 mlx4_log_num_mgm_entry_size, int, 0444);
95MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
96 " of qp per mcg, for example:"
3c439b55 97 " 10 gives 248.range: 7 <="
0ff1fb65 98 " log_num_mgm_entry_size <= 12."
3c439b55
JM
99 " To activate device managed"
100 " flow steering when available, set to -1");
0ec2c0f8 101
be902ab1 102static bool enable_64b_cqe_eqe = true;
08ff3235
OG
103module_param(enable_64b_cqe_eqe, bool, 0444);
104MODULE_PARM_DESC(enable_64b_cqe_eqe,
be902ab1 105 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
08ff3235 106
77507aa2
IS
107#define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \
108 MLX4_FUNC_CAP_EQE_CQE_STRIDE)
ab9c17a0 109
f57e6848 110static char mlx4_version[] =
225c7b1f
RD
111 DRV_NAME ": Mellanox ConnectX core driver v"
112 DRV_VERSION " (" DRV_RELDATE ")\n";
113
114static struct mlx4_profile default_profile = {
ab9c17a0 115 .num_qp = 1 << 18,
225c7b1f 116 .num_srq = 1 << 16,
c9f2ba5e 117 .rdmarc_per_qp = 1 << 4,
225c7b1f
RD
118 .num_cq = 1 << 16,
119 .num_mcg = 1 << 13,
ab9c17a0 120 .num_mpt = 1 << 19,
9fd7a1e1 121 .num_mtt = 1 << 20, /* It is really num mtt segements */
225c7b1f
RD
122};
123
2599d858
AV
124static struct mlx4_profile low_mem_profile = {
125 .num_qp = 1 << 17,
126 .num_srq = 1 << 6,
127 .rdmarc_per_qp = 1 << 4,
128 .num_cq = 1 << 8,
129 .num_mcg = 1 << 8,
130 .num_mpt = 1 << 9,
131 .num_mtt = 1 << 7,
132};
133
ab9c17a0 134static int log_num_mac = 7;
93fc9e1b
YP
135module_param_named(log_num_mac, log_num_mac, int, 0444);
136MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
137
138static int log_num_vlan;
139module_param_named(log_num_vlan, log_num_vlan, int, 0444);
140MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
cb29688a
OG
141/* Log2 max number of VLANs per ETH port (0-7) */
142#define MLX4_LOG_NUM_VLANS 7
2599d858
AV
143#define MLX4_MIN_LOG_NUM_VLANS 0
144#define MLX4_MIN_LOG_NUM_MAC 1
93fc9e1b 145
eb939922 146static bool use_prio;
93fc9e1b 147module_param_named(use_prio, use_prio, bool, 0444);
ecc8fb11 148MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)");
93fc9e1b 149
2b8fb286 150int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
ab6bf42e 151module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
0498628f 152MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
ab6bf42e 153
8d0fc7b6 154static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE};
ab9c17a0
JM
155static int arr_argc = 2;
156module_param_array(port_type_array, int, &arr_argc, 0444);
8d0fc7b6
YP
157MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default "
158 "1 for IB, 2 for Ethernet");
ab9c17a0
JM
159
160struct mlx4_port_config {
161 struct list_head list;
162 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
163 struct pci_dev *pdev;
164};
165
97989356
AV
166static atomic_t pf_loading = ATOMIC_INIT(0);
167
27bf91d6
YP
168int mlx4_check_port_params(struct mlx4_dev *dev,
169 enum mlx4_port_type *port_type)
7ff93f8b
YP
170{
171 int i;
172
173 for (i = 0; i < dev->caps.num_ports - 1; i++) {
27bf91d6
YP
174 if (port_type[i] != port_type[i + 1]) {
175 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
1a91de28 176 mlx4_err(dev, "Only same port types supported on this HCA, aborting\n");
27bf91d6
YP
177 return -EINVAL;
178 }
7ff93f8b
YP
179 }
180 }
7ff93f8b
YP
181
182 for (i = 0; i < dev->caps.num_ports; i++) {
183 if (!(port_type[i] & dev->caps.supported_type[i+1])) {
1a91de28
JP
184 mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n",
185 i + 1);
7ff93f8b
YP
186 return -EINVAL;
187 }
188 }
189 return 0;
190}
191
192static void mlx4_set_port_mask(struct mlx4_dev *dev)
193{
194 int i;
195
7ff93f8b 196 for (i = 1; i <= dev->caps.num_ports; ++i)
65dab25d 197 dev->caps.port_mask[i] = dev->caps.port_type[i];
7ff93f8b 198}
f2a3f6a3 199
77507aa2
IS
200static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev)
201{
202 struct mlx4_caps *dev_cap = &dev->caps;
203
204 /* FW not supporting or cancelled by user */
205 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) ||
206 !(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE))
207 return;
208
209 /* Must have 64B CQE_EQE enabled by FW to use bigger stride
210 * When FW has NCSI it may decide not to report 64B CQE/EQEs
211 */
212 if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) ||
213 !(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) {
214 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
215 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
216 return;
217 }
218
219 if (cache_line_size() == 128 || cache_line_size() == 256) {
220 mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n");
221 /* Changing the real data inside CQE size to 32B */
222 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
223 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
224
225 if (mlx4_is_master(dev))
226 dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE;
227 } else {
228 mlx4_dbg(dev, "Disabling CQE stride cacheLine unsupported\n");
229 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
230 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
231 }
232}
233
3d73c288 234static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
225c7b1f
RD
235{
236 int err;
5ae2a7a8 237 int i;
225c7b1f
RD
238
239 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
240 if (err) {
1a91de28 241 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
225c7b1f
RD
242 return err;
243 }
244
245 if (dev_cap->min_page_sz > PAGE_SIZE) {
1a91de28 246 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
225c7b1f
RD
247 dev_cap->min_page_sz, PAGE_SIZE);
248 return -ENODEV;
249 }
250 if (dev_cap->num_ports > MLX4_MAX_PORTS) {
1a91de28 251 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
225c7b1f
RD
252 dev_cap->num_ports, MLX4_MAX_PORTS);
253 return -ENODEV;
254 }
255
256 if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
1a91de28 257 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
225c7b1f
RD
258 dev_cap->uar_size,
259 (unsigned long long) pci_resource_len(dev->pdev, 2));
260 return -ENODEV;
261 }
262
263 dev->caps.num_ports = dev_cap->num_ports;
3fc929e2 264 dev->phys_caps.num_phys_eqs = MLX4_MAX_EQ_NUM;
5ae2a7a8
RD
265 for (i = 1; i <= dev->caps.num_ports; ++i) {
266 dev->caps.vl_cap[i] = dev_cap->max_vl[i];
b79acb49 267 dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i];
6634961c
JM
268 dev->phys_caps.gid_phys_table_len[i] = dev_cap->max_gids[i];
269 dev->phys_caps.pkey_phys_table_len[i] = dev_cap->max_pkeys[i];
270 /* set gid and pkey table operating lengths by default
271 * to non-sriov values */
5ae2a7a8
RD
272 dev->caps.gid_table_len[i] = dev_cap->max_gids[i];
273 dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
274 dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
b79acb49
YP
275 dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i];
276 dev->caps.def_mac[i] = dev_cap->def_mac[i];
7ff93f8b 277 dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
8d0fc7b6
YP
278 dev->caps.suggested_type[i] = dev_cap->suggested_type[i];
279 dev->caps.default_sense[i] = dev_cap->default_sense[i];
7699517d
YP
280 dev->caps.trans_type[i] = dev_cap->trans_type[i];
281 dev->caps.vendor_oui[i] = dev_cap->vendor_oui[i];
282 dev->caps.wavelength[i] = dev_cap->wavelength[i];
283 dev->caps.trans_code[i] = dev_cap->trans_code[i];
5ae2a7a8
RD
284 }
285
ab9c17a0 286 dev->caps.uar_page_size = PAGE_SIZE;
225c7b1f 287 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
225c7b1f
RD
288 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
289 dev->caps.bf_reg_size = dev_cap->bf_reg_size;
290 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page;
291 dev->caps.max_sq_sg = dev_cap->max_sq_sg;
292 dev->caps.max_rq_sg = dev_cap->max_rq_sg;
293 dev->caps.max_wqes = dev_cap->max_qp_sz;
294 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
225c7b1f
RD
295 dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
296 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
297 dev->caps.reserved_srqs = dev_cap->reserved_srqs;
298 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
299 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
225c7b1f
RD
300 /*
301 * Subtract 1 from the limit because we need to allocate a
302 * spare CQE so the HCA HW can tell the difference between an
303 * empty CQ and a full CQ.
304 */
305 dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
306 dev->caps.reserved_cqs = dev_cap->reserved_cqs;
307 dev->caps.reserved_eqs = dev_cap->reserved_eqs;
2b8fb286 308 dev->caps.reserved_mtts = dev_cap->reserved_mtts;
225c7b1f 309 dev->caps.reserved_mrws = dev_cap->reserved_mrws;
ab9c17a0
JM
310
311 /* The first 128 UARs are used for EQ doorbells */
312 dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars);
225c7b1f 313 dev->caps.reserved_pds = dev_cap->reserved_pds;
012a8ff5
SH
314 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
315 dev_cap->reserved_xrcds : 0;
316 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
317 dev_cap->max_xrcds : 0;
2b8fb286
MA
318 dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz;
319
149983af 320 dev->caps.max_msg_sz = dev_cap->max_msg_sz;
225c7b1f
RD
321 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
322 dev->caps.flags = dev_cap->flags;
b3416f44 323 dev->caps.flags2 = dev_cap->flags2;
95d04f07
RD
324 dev->caps.bmme_flags = dev_cap->bmme_flags;
325 dev->caps.reserved_lkey = dev_cap->reserved_lkey;
225c7b1f 326 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
b832be1e 327 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
b3416f44 328 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
225c7b1f 329
ca3e57a5
RD
330 /* Sense port always allowed on supported devices for ConnectX-1 and -2 */
331 if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
58a60168 332 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
aadf4f3f
RD
333 /* Don't do sense port on multifunction devices (for now at least) */
334 if (mlx4_is_mfunc(dev))
335 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
58a60168 336
2599d858
AV
337 if (mlx4_low_memory_profile()) {
338 dev->caps.log_num_macs = MLX4_MIN_LOG_NUM_MAC;
339 dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS;
340 } else {
341 dev->caps.log_num_macs = log_num_mac;
342 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
343 }
93fc9e1b
YP
344
345 for (i = 1; i <= dev->caps.num_ports; ++i) {
ab9c17a0
JM
346 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
347 if (dev->caps.supported_type[i]) {
348 /* if only ETH is supported - assign ETH */
349 if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH)
350 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
105c320f 351 /* if only IB is supported, assign IB */
ab9c17a0 352 else if (dev->caps.supported_type[i] ==
105c320f
JM
353 MLX4_PORT_TYPE_IB)
354 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
ab9c17a0 355 else {
105c320f
JM
356 /* if IB and ETH are supported, we set the port
357 * type according to user selection of port type;
358 * if user selected none, take the FW hint */
359 if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE)
8d0fc7b6
YP
360 dev->caps.port_type[i] = dev->caps.suggested_type[i] ?
361 MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB;
ab9c17a0 362 else
105c320f 363 dev->caps.port_type[i] = port_type_array[i - 1];
ab9c17a0
JM
364 }
365 }
8d0fc7b6
YP
366 /*
367 * Link sensing is allowed on the port if 3 conditions are true:
368 * 1. Both protocols are supported on the port.
369 * 2. Different types are supported on the port
370 * 3. FW declared that it supports link sensing
371 */
27bf91d6 372 mlx4_priv(dev)->sense.sense_allowed[i] =
58a60168 373 ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) &&
8d0fc7b6 374 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
58a60168 375 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT));
7ff93f8b 376
8d0fc7b6
YP
377 /*
378 * If "default_sense" bit is set, we move the port to "AUTO" mode
379 * and perform sense_port FW command to try and set the correct
380 * port type from beginning
381 */
46c46747 382 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) {
8d0fc7b6
YP
383 enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE;
384 dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO;
385 mlx4_SENSE_PORT(dev, i, &sensed_port);
386 if (sensed_port != MLX4_PORT_TYPE_NONE)
387 dev->caps.port_type[i] = sensed_port;
388 } else {
389 dev->caps.possible_type[i] = dev->caps.port_type[i];
390 }
391
93fc9e1b
YP
392 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
393 dev->caps.log_num_macs = dev_cap->log_max_macs[i];
1a91de28 394 mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n",
93fc9e1b
YP
395 i, 1 << dev->caps.log_num_macs);
396 }
397 if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
398 dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
1a91de28 399 mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n",
93fc9e1b
YP
400 i, 1 << dev->caps.log_num_vlans);
401 }
402 }
403
f2a3f6a3
OG
404 dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);
405
93fc9e1b
YP
406 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
407 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
408 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
409 (1 << dev->caps.log_num_macs) *
410 (1 << dev->caps.log_num_vlans) *
93fc9e1b
YP
411 dev->caps.num_ports;
412 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
413
414 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
415 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
416 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
417 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
418
e2c76824 419 dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0;
08ff3235 420
b3051320 421 if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) {
08ff3235
OG
422 if (dev_cap->flags &
423 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) {
424 mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n");
425 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
426 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
427 }
77507aa2
IS
428
429 if (dev_cap->flags2 &
430 (MLX4_DEV_CAP_FLAG2_CQE_STRIDE |
431 MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) {
432 mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n");
433 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
434 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
435 }
08ff3235
OG
436 }
437
f97b4b5d 438 if ((dev->caps.flags &
08ff3235
OG
439 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) &&
440 mlx4_is_master(dev))
441 dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;
442
77507aa2
IS
443 if (!mlx4_is_slave(dev))
444 mlx4_enable_cqe_eqe_stride(dev);
445
225c7b1f
RD
446 return 0;
447}
b912b2f8
EP
448
449static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev,
450 enum pci_bus_speed *speed,
451 enum pcie_link_width *width)
452{
453 u32 lnkcap1, lnkcap2;
454 int err1, err2;
455
456#define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */
457
458 *speed = PCI_SPEED_UNKNOWN;
459 *width = PCIE_LNK_WIDTH_UNKNOWN;
460
461 err1 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP, &lnkcap1);
462 err2 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP2, &lnkcap2);
463 if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
464 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
465 *speed = PCIE_SPEED_8_0GT;
466 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
467 *speed = PCIE_SPEED_5_0GT;
468 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
469 *speed = PCIE_SPEED_2_5GT;
470 }
471 if (!err1) {
472 *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT;
473 if (!lnkcap2) { /* pre-r3.0 */
474 if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB)
475 *speed = PCIE_SPEED_5_0GT;
476 else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB)
477 *speed = PCIE_SPEED_2_5GT;
478 }
479 }
480
481 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) {
482 return err1 ? err1 :
483 err2 ? err2 : -EINVAL;
484 }
485 return 0;
486}
487
488static void mlx4_check_pcie_caps(struct mlx4_dev *dev)
489{
490 enum pcie_link_width width, width_cap;
491 enum pci_bus_speed speed, speed_cap;
492 int err;
493
494#define PCIE_SPEED_STR(speed) \
495 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
496 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
497 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
498 "Unknown")
499
500 err = mlx4_get_pcie_dev_link_caps(dev, &speed_cap, &width_cap);
501 if (err) {
502 mlx4_warn(dev,
503 "Unable to determine PCIe device BW capabilities\n");
504 return;
505 }
506
507 err = pcie_get_minimum_link(dev->pdev, &speed, &width);
508 if (err || speed == PCI_SPEED_UNKNOWN ||
509 width == PCIE_LNK_WIDTH_UNKNOWN) {
510 mlx4_warn(dev,
511 "Unable to determine PCI device chain minimum BW\n");
512 return;
513 }
514
515 if (width != width_cap || speed != speed_cap)
516 mlx4_warn(dev,
517 "PCIe BW is different than device's capability\n");
518
519 mlx4_info(dev, "PCIe link speed is %s, device supports %s\n",
520 PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap));
521 mlx4_info(dev, "PCIe link width is x%d, device supports x%d\n",
522 width, width_cap);
523 return;
524}
525
ab9c17a0
JM
526/*The function checks if there are live vf, return the num of them*/
527static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
528{
529 struct mlx4_priv *priv = mlx4_priv(dev);
530 struct mlx4_slave_state *s_state;
531 int i;
532 int ret = 0;
533
534 for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) {
535 s_state = &priv->mfunc.master.slave_state[i];
536 if (s_state->active && s_state->last_cmd !=
537 MLX4_COMM_CMD_RESET) {
538 mlx4_warn(dev, "%s: slave: %d is still active\n",
539 __func__, i);
540 ret++;
541 }
542 }
543 return ret;
544}
545
396f2feb
JM
546int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey)
547{
548 u32 qk = MLX4_RESERVED_QKEY_BASE;
47605df9
JM
549
550 if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX ||
551 qpn < dev->phys_caps.base_proxy_sqpn)
396f2feb
JM
552 return -EINVAL;
553
47605df9 554 if (qpn >= dev->phys_caps.base_tunnel_sqpn)
396f2feb 555 /* tunnel qp */
47605df9 556 qk += qpn - dev->phys_caps.base_tunnel_sqpn;
396f2feb 557 else
47605df9 558 qk += qpn - dev->phys_caps.base_proxy_sqpn;
396f2feb
JM
559 *qkey = qk;
560 return 0;
561}
562EXPORT_SYMBOL(mlx4_get_parav_qkey);
563
54679e14
JM
564void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val)
565{
566 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
567
568 if (!mlx4_is_master(dev))
569 return;
570
571 priv->virt2phys_pkey[slave][port - 1][i] = val;
572}
573EXPORT_SYMBOL(mlx4_sync_pkey_table);
574
afa8fd1d
JM
575void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid)
576{
577 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
578
579 if (!mlx4_is_master(dev))
580 return;
581
582 priv->slave_node_guids[slave] = guid;
583}
584EXPORT_SYMBOL(mlx4_put_slave_node_guid);
585
586__be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave)
587{
588 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
589
590 if (!mlx4_is_master(dev))
591 return 0;
592
593 return priv->slave_node_guids[slave];
594}
595EXPORT_SYMBOL(mlx4_get_slave_node_guid);
596
e10903b0 597int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
ab9c17a0
JM
598{
599 struct mlx4_priv *priv = mlx4_priv(dev);
600 struct mlx4_slave_state *s_slave;
601
602 if (!mlx4_is_master(dev))
603 return 0;
604
605 s_slave = &priv->mfunc.master.slave_state[slave];
606 return !!s_slave->active;
607}
608EXPORT_SYMBOL(mlx4_is_slave_active);
609
7b8157be
JM
610static void slave_adjust_steering_mode(struct mlx4_dev *dev,
611 struct mlx4_dev_cap *dev_cap,
612 struct mlx4_init_hca_param *hca_param)
613{
614 dev->caps.steering_mode = hca_param->steering_mode;
615 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
616 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
617 dev->caps.fs_log_max_ucast_qp_range_size =
618 dev_cap->fs_log_max_ucast_qp_range_size;
619 } else
620 dev->caps.num_qp_per_mgm =
621 4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2);
622
623 mlx4_dbg(dev, "Steering mode is: %s\n",
624 mlx4_steering_mode_str(dev->caps.steering_mode));
625}
626
ab9c17a0
JM
627static int mlx4_slave_cap(struct mlx4_dev *dev)
628{
629 int err;
630 u32 page_size;
631 struct mlx4_dev_cap dev_cap;
632 struct mlx4_func_cap func_cap;
633 struct mlx4_init_hca_param hca_param;
634 int i;
635
636 memset(&hca_param, 0, sizeof(hca_param));
637 err = mlx4_QUERY_HCA(dev, &hca_param);
638 if (err) {
1a91de28 639 mlx4_err(dev, "QUERY_HCA command failed, aborting\n");
ab9c17a0
JM
640 return err;
641 }
642
483e0132
EP
643 /* fail if the hca has an unknown global capability
644 * at this time global_caps should be always zeroed
645 */
646 if (hca_param.global_caps) {
ab9c17a0
JM
647 mlx4_err(dev, "Unknown hca global capabilities\n");
648 return -ENOSYS;
649 }
650
651 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
652
ddd8a6c1
EE
653 dev->caps.hca_core_clock = hca_param.hca_core_clock;
654
ab9c17a0 655 memset(&dev_cap, 0, sizeof(dev_cap));
b91cb3eb 656 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
ab9c17a0
JM
657 err = mlx4_dev_cap(dev, &dev_cap);
658 if (err) {
1a91de28 659 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
ab9c17a0
JM
660 return err;
661 }
662
b91cb3eb
JM
663 err = mlx4_QUERY_FW(dev);
664 if (err)
1a91de28 665 mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n");
b91cb3eb 666
ab9c17a0
JM
667 page_size = ~dev->caps.page_size_cap + 1;
668 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
669 if (page_size > PAGE_SIZE) {
1a91de28 670 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
ab9c17a0
JM
671 page_size, PAGE_SIZE);
672 return -ENODEV;
673 }
674
675 /* slave gets uar page size from QUERY_HCA fw command */
676 dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12);
677
678 /* TODO: relax this assumption */
679 if (dev->caps.uar_page_size != PAGE_SIZE) {
680 mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n",
681 dev->caps.uar_page_size, PAGE_SIZE);
682 return -ENODEV;
683 }
684
685 memset(&func_cap, 0, sizeof(func_cap));
47605df9 686 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap);
ab9c17a0 687 if (err) {
1a91de28
JP
688 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n",
689 err);
ab9c17a0
JM
690 return err;
691 }
692
693 if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) !=
694 PF_CONTEXT_BEHAVIOUR_MASK) {
695 mlx4_err(dev, "Unknown pf context behaviour\n");
696 return -ENOSYS;
697 }
698
ab9c17a0 699 dev->caps.num_ports = func_cap.num_ports;
5a0d0a61
JM
700 dev->quotas.qp = func_cap.qp_quota;
701 dev->quotas.srq = func_cap.srq_quota;
702 dev->quotas.cq = func_cap.cq_quota;
703 dev->quotas.mpt = func_cap.mpt_quota;
704 dev->quotas.mtt = func_cap.mtt_quota;
705 dev->caps.num_qps = 1 << hca_param.log_num_qps;
706 dev->caps.num_srqs = 1 << hca_param.log_num_srqs;
707 dev->caps.num_cqs = 1 << hca_param.log_num_cqs;
708 dev->caps.num_mpts = 1 << hca_param.log_mpt_sz;
709 dev->caps.num_eqs = func_cap.max_eq;
710 dev->caps.reserved_eqs = func_cap.reserved_eq;
ab9c17a0
JM
711 dev->caps.num_pds = MLX4_NUM_PDS;
712 dev->caps.num_mgms = 0;
713 dev->caps.num_amgms = 0;
714
ab9c17a0 715 if (dev->caps.num_ports > MLX4_MAX_PORTS) {
1a91de28
JP
716 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
717 dev->caps.num_ports, MLX4_MAX_PORTS);
ab9c17a0
JM
718 return -ENODEV;
719 }
720
99ec41d0 721 dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL);
47605df9
JM
722 dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
723 dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
724 dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
725 dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
726
727 if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy ||
99ec41d0
JM
728 !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy ||
729 !dev->caps.qp0_qkey) {
47605df9
JM
730 err = -ENOMEM;
731 goto err_mem;
732 }
733
6634961c 734 for (i = 1; i <= dev->caps.num_ports; ++i) {
47605df9
JM
735 err = mlx4_QUERY_FUNC_CAP(dev, (u32) i, &func_cap);
736 if (err) {
1a91de28
JP
737 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n",
738 i, err);
47605df9
JM
739 goto err_mem;
740 }
99ec41d0 741 dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey;
47605df9
JM
742 dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn;
743 dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn;
744 dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn;
745 dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn;
6230bb23 746 dev->caps.port_mask[i] = dev->caps.port_type[i];
8e1a28e8 747 dev->caps.phys_port_id[i] = func_cap.phys_port_id;
6634961c
JM
748 if (mlx4_get_slave_pkey_gid_tbl_len(dev, i,
749 &dev->caps.gid_table_len[i],
750 &dev->caps.pkey_table_len[i]))
47605df9 751 goto err_mem;
6634961c 752 }
6230bb23 753
ab9c17a0
JM
754 if (dev->caps.uar_page_size * (dev->caps.num_uars -
755 dev->caps.reserved_uars) >
756 pci_resource_len(dev->pdev, 2)) {
1a91de28 757 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
ab9c17a0
JM
758 dev->caps.uar_page_size * dev->caps.num_uars,
759 (unsigned long long) pci_resource_len(dev->pdev, 2));
47605df9 760 goto err_mem;
ab9c17a0
JM
761 }
762
08ff3235
OG
763 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) {
764 dev->caps.eqe_size = 64;
765 dev->caps.eqe_factor = 1;
766 } else {
767 dev->caps.eqe_size = 32;
768 dev->caps.eqe_factor = 0;
769 }
770
771 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) {
772 dev->caps.cqe_size = 64;
77507aa2 773 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
08ff3235
OG
774 } else {
775 dev->caps.cqe_size = 32;
776 }
777
77507aa2
IS
778 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) {
779 dev->caps.eqe_size = hca_param.eqe_size;
780 dev->caps.eqe_factor = 0;
781 }
782
783 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) {
784 dev->caps.cqe_size = hca_param.cqe_size;
785 /* User still need to know when CQE > 32B */
786 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
787 }
788
f9bd2d7f 789 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
1a91de28 790 mlx4_warn(dev, "Timestamping is not supported in slave mode\n");
f9bd2d7f 791
7b8157be
JM
792 slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
793
ab9c17a0 794 return 0;
47605df9
JM
795
796err_mem:
99ec41d0 797 kfree(dev->caps.qp0_qkey);
47605df9
JM
798 kfree(dev->caps.qp0_tunnel);
799 kfree(dev->caps.qp0_proxy);
800 kfree(dev->caps.qp1_tunnel);
801 kfree(dev->caps.qp1_proxy);
99ec41d0
JM
802 dev->caps.qp0_qkey = NULL;
803 dev->caps.qp0_tunnel = NULL;
804 dev->caps.qp0_proxy = NULL;
805 dev->caps.qp1_tunnel = NULL;
806 dev->caps.qp1_proxy = NULL;
47605df9
JM
807
808 return err;
ab9c17a0 809}
225c7b1f 810
b046ffe5
EP
811static void mlx4_request_modules(struct mlx4_dev *dev)
812{
813 int port;
814 int has_ib_port = false;
815 int has_eth_port = false;
816#define EN_DRV_NAME "mlx4_en"
817#define IB_DRV_NAME "mlx4_ib"
818
819 for (port = 1; port <= dev->caps.num_ports; port++) {
820 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB)
821 has_ib_port = true;
822 else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
823 has_eth_port = true;
824 }
825
b046ffe5
EP
826 if (has_eth_port)
827 request_module_nowait(EN_DRV_NAME);
f24f790f
OG
828 if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
829 request_module_nowait(IB_DRV_NAME);
b046ffe5
EP
830}
831
7ff93f8b
YP
832/*
833 * Change the port configuration of the device.
834 * Every user of this function must hold the port mutex.
835 */
27bf91d6
YP
836int mlx4_change_port_types(struct mlx4_dev *dev,
837 enum mlx4_port_type *port_types)
7ff93f8b
YP
838{
839 int err = 0;
840 int change = 0;
841 int port;
842
843 for (port = 0; port < dev->caps.num_ports; port++) {
27bf91d6
YP
844 /* Change the port type only if the new type is different
845 * from the current, and not set to Auto */
3d8f9308 846 if (port_types[port] != dev->caps.port_type[port + 1])
7ff93f8b 847 change = 1;
7ff93f8b
YP
848 }
849 if (change) {
850 mlx4_unregister_device(dev);
851 for (port = 1; port <= dev->caps.num_ports; port++) {
852 mlx4_CLOSE_PORT(dev, port);
1e0f03d5 853 dev->caps.port_type[port] = port_types[port - 1];
6634961c 854 err = mlx4_SET_PORT(dev, port, -1);
7ff93f8b 855 if (err) {
1a91de28
JP
856 mlx4_err(dev, "Failed to set port %d, aborting\n",
857 port);
7ff93f8b
YP
858 goto out;
859 }
860 }
861 mlx4_set_port_mask(dev);
862 err = mlx4_register_device(dev);
b046ffe5
EP
863 if (err) {
864 mlx4_err(dev, "Failed to register device\n");
865 goto out;
866 }
867 mlx4_request_modules(dev);
7ff93f8b
YP
868 }
869
870out:
871 return err;
872}
873
874static ssize_t show_port_type(struct device *dev,
875 struct device_attribute *attr,
876 char *buf)
877{
878 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
879 port_attr);
880 struct mlx4_dev *mdev = info->dev;
27bf91d6
YP
881 char type[8];
882
883 sprintf(type, "%s",
884 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
885 "ib" : "eth");
886 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
887 sprintf(buf, "auto (%s)\n", type);
888 else
889 sprintf(buf, "%s\n", type);
7ff93f8b 890
27bf91d6 891 return strlen(buf);
7ff93f8b
YP
892}
893
894static ssize_t set_port_type(struct device *dev,
895 struct device_attribute *attr,
896 const char *buf, size_t count)
897{
898 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
899 port_attr);
900 struct mlx4_dev *mdev = info->dev;
901 struct mlx4_priv *priv = mlx4_priv(mdev);
902 enum mlx4_port_type types[MLX4_MAX_PORTS];
27bf91d6 903 enum mlx4_port_type new_types[MLX4_MAX_PORTS];
7ff93f8b
YP
904 int i;
905 int err = 0;
906
907 if (!strcmp(buf, "ib\n"))
908 info->tmp_type = MLX4_PORT_TYPE_IB;
909 else if (!strcmp(buf, "eth\n"))
910 info->tmp_type = MLX4_PORT_TYPE_ETH;
27bf91d6
YP
911 else if (!strcmp(buf, "auto\n"))
912 info->tmp_type = MLX4_PORT_TYPE_AUTO;
7ff93f8b
YP
913 else {
914 mlx4_err(mdev, "%s is not supported port type\n", buf);
915 return -EINVAL;
916 }
917
27bf91d6 918 mlx4_stop_sense(mdev);
7ff93f8b 919 mutex_lock(&priv->port_mutex);
27bf91d6
YP
920 /* Possible type is always the one that was delivered */
921 mdev->caps.possible_type[info->port] = info->tmp_type;
922
923 for (i = 0; i < mdev->caps.num_ports; i++) {
7ff93f8b 924 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
27bf91d6
YP
925 mdev->caps.possible_type[i+1];
926 if (types[i] == MLX4_PORT_TYPE_AUTO)
927 types[i] = mdev->caps.port_type[i+1];
928 }
7ff93f8b 929
58a60168
YP
930 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
931 !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) {
27bf91d6
YP
932 for (i = 1; i <= mdev->caps.num_ports; i++) {
933 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
934 mdev->caps.possible_type[i] = mdev->caps.port_type[i];
935 err = -EINVAL;
936 }
937 }
938 }
939 if (err) {
1a91de28 940 mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n");
27bf91d6
YP
941 goto out;
942 }
943
944 mlx4_do_sense_ports(mdev, new_types, types);
945
946 err = mlx4_check_port_params(mdev, new_types);
7ff93f8b
YP
947 if (err)
948 goto out;
949
27bf91d6
YP
950 /* We are about to apply the changes after the configuration
951 * was verified, no need to remember the temporary types
952 * any more */
953 for (i = 0; i < mdev->caps.num_ports; i++)
954 priv->port[i + 1].tmp_type = 0;
7ff93f8b 955
27bf91d6 956 err = mlx4_change_port_types(mdev, new_types);
7ff93f8b
YP
957
958out:
27bf91d6 959 mlx4_start_sense(mdev);
7ff93f8b
YP
960 mutex_unlock(&priv->port_mutex);
961 return err ? err : count;
962}
963
096335b3
OG
964enum ibta_mtu {
965 IB_MTU_256 = 1,
966 IB_MTU_512 = 2,
967 IB_MTU_1024 = 3,
968 IB_MTU_2048 = 4,
969 IB_MTU_4096 = 5
970};
971
972static inline int int_to_ibta_mtu(int mtu)
973{
974 switch (mtu) {
975 case 256: return IB_MTU_256;
976 case 512: return IB_MTU_512;
977 case 1024: return IB_MTU_1024;
978 case 2048: return IB_MTU_2048;
979 case 4096: return IB_MTU_4096;
980 default: return -1;
981 }
982}
983
984static inline int ibta_mtu_to_int(enum ibta_mtu mtu)
985{
986 switch (mtu) {
987 case IB_MTU_256: return 256;
988 case IB_MTU_512: return 512;
989 case IB_MTU_1024: return 1024;
990 case IB_MTU_2048: return 2048;
991 case IB_MTU_4096: return 4096;
992 default: return -1;
993 }
994}
995
996static ssize_t show_port_ib_mtu(struct device *dev,
997 struct device_attribute *attr,
998 char *buf)
999{
1000 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1001 port_mtu_attr);
1002 struct mlx4_dev *mdev = info->dev;
1003
1004 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH)
1005 mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
1006
1007 sprintf(buf, "%d\n",
1008 ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port]));
1009 return strlen(buf);
1010}
1011
1012static ssize_t set_port_ib_mtu(struct device *dev,
1013 struct device_attribute *attr,
1014 const char *buf, size_t count)
1015{
1016 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1017 port_mtu_attr);
1018 struct mlx4_dev *mdev = info->dev;
1019 struct mlx4_priv *priv = mlx4_priv(mdev);
1020 int err, port, mtu, ibta_mtu = -1;
1021
1022 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) {
1023 mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
1024 return -EINVAL;
1025 }
1026
618fad95
DB
1027 err = kstrtoint(buf, 0, &mtu);
1028 if (!err)
096335b3
OG
1029 ibta_mtu = int_to_ibta_mtu(mtu);
1030
618fad95 1031 if (err || ibta_mtu < 0) {
096335b3
OG
1032 mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf);
1033 return -EINVAL;
1034 }
1035
1036 mdev->caps.port_ib_mtu[info->port] = ibta_mtu;
1037
1038 mlx4_stop_sense(mdev);
1039 mutex_lock(&priv->port_mutex);
1040 mlx4_unregister_device(mdev);
1041 for (port = 1; port <= mdev->caps.num_ports; port++) {
1042 mlx4_CLOSE_PORT(mdev, port);
6634961c 1043 err = mlx4_SET_PORT(mdev, port, -1);
096335b3 1044 if (err) {
1a91de28
JP
1045 mlx4_err(mdev, "Failed to set port %d, aborting\n",
1046 port);
096335b3
OG
1047 goto err_set_port;
1048 }
1049 }
1050 err = mlx4_register_device(mdev);
1051err_set_port:
1052 mutex_unlock(&priv->port_mutex);
1053 mlx4_start_sense(mdev);
1054 return err ? err : count;
1055}
1056
e8f9b2ed 1057static int mlx4_load_fw(struct mlx4_dev *dev)
225c7b1f
RD
1058{
1059 struct mlx4_priv *priv = mlx4_priv(dev);
1060 int err;
1061
1062 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
5b0bf5e2 1063 GFP_HIGHUSER | __GFP_NOWARN, 0);
225c7b1f 1064 if (!priv->fw.fw_icm) {
1a91de28 1065 mlx4_err(dev, "Couldn't allocate FW area, aborting\n");
225c7b1f
RD
1066 return -ENOMEM;
1067 }
1068
1069 err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
1070 if (err) {
1a91de28 1071 mlx4_err(dev, "MAP_FA command failed, aborting\n");
225c7b1f
RD
1072 goto err_free;
1073 }
1074
1075 err = mlx4_RUN_FW(dev);
1076 if (err) {
1a91de28 1077 mlx4_err(dev, "RUN_FW command failed, aborting\n");
225c7b1f
RD
1078 goto err_unmap_fa;
1079 }
1080
1081 return 0;
1082
1083err_unmap_fa:
1084 mlx4_UNMAP_FA(dev);
1085
1086err_free:
5b0bf5e2 1087 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
225c7b1f
RD
1088 return err;
1089}
1090
e8f9b2ed
RD
1091static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
1092 int cmpt_entry_sz)
225c7b1f
RD
1093{
1094 struct mlx4_priv *priv = mlx4_priv(dev);
1095 int err;
ab9c17a0 1096 int num_eqs;
225c7b1f
RD
1097
1098 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
1099 cmpt_base +
1100 ((u64) (MLX4_CMPT_TYPE_QP *
1101 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1102 cmpt_entry_sz, dev->caps.num_qps,
93fc9e1b
YP
1103 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1104 0, 0);
225c7b1f
RD
1105 if (err)
1106 goto err;
1107
1108 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
1109 cmpt_base +
1110 ((u64) (MLX4_CMPT_TYPE_SRQ *
1111 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1112 cmpt_entry_sz, dev->caps.num_srqs,
5b0bf5e2 1113 dev->caps.reserved_srqs, 0, 0);
225c7b1f
RD
1114 if (err)
1115 goto err_qp;
1116
1117 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
1118 cmpt_base +
1119 ((u64) (MLX4_CMPT_TYPE_CQ *
1120 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1121 cmpt_entry_sz, dev->caps.num_cqs,
5b0bf5e2 1122 dev->caps.reserved_cqs, 0, 0);
225c7b1f
RD
1123 if (err)
1124 goto err_srq;
1125
3fc929e2
MA
1126 num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs :
1127 dev->caps.num_eqs;
225c7b1f
RD
1128 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
1129 cmpt_base +
1130 ((u64) (MLX4_CMPT_TYPE_EQ *
1131 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
ab9c17a0 1132 cmpt_entry_sz, num_eqs, num_eqs, 0, 0);
225c7b1f
RD
1133 if (err)
1134 goto err_cq;
1135
1136 return 0;
1137
1138err_cq:
1139 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1140
1141err_srq:
1142 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1143
1144err_qp:
1145 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1146
1147err:
1148 return err;
1149}
1150
3d73c288
RD
1151static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1152 struct mlx4_init_hca_param *init_hca, u64 icm_size)
225c7b1f
RD
1153{
1154 struct mlx4_priv *priv = mlx4_priv(dev);
1155 u64 aux_pages;
ab9c17a0 1156 int num_eqs;
225c7b1f
RD
1157 int err;
1158
1159 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
1160 if (err) {
1a91de28 1161 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n");
225c7b1f
RD
1162 return err;
1163 }
1164
1a91de28 1165 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n",
225c7b1f
RD
1166 (unsigned long long) icm_size >> 10,
1167 (unsigned long long) aux_pages << 2);
1168
1169 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
5b0bf5e2 1170 GFP_HIGHUSER | __GFP_NOWARN, 0);
225c7b1f 1171 if (!priv->fw.aux_icm) {
1a91de28 1172 mlx4_err(dev, "Couldn't allocate aux memory, aborting\n");
225c7b1f
RD
1173 return -ENOMEM;
1174 }
1175
1176 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
1177 if (err) {
1a91de28 1178 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n");
225c7b1f
RD
1179 goto err_free_aux;
1180 }
1181
1182 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
1183 if (err) {
1a91de28 1184 mlx4_err(dev, "Failed to map cMPT context memory, aborting\n");
225c7b1f
RD
1185 goto err_unmap_aux;
1186 }
1187
ab9c17a0 1188
3fc929e2
MA
1189 num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs :
1190 dev->caps.num_eqs;
fa0681d2
RD
1191 err = mlx4_init_icm_table(dev, &priv->eq_table.table,
1192 init_hca->eqc_base, dev_cap->eqc_entry_sz,
ab9c17a0 1193 num_eqs, num_eqs, 0, 0);
225c7b1f 1194 if (err) {
1a91de28 1195 mlx4_err(dev, "Failed to map EQ context memory, aborting\n");
225c7b1f
RD
1196 goto err_unmap_cmpt;
1197 }
1198
d7bb58fb
JM
1199 /*
1200 * Reserved MTT entries must be aligned up to a cacheline
1201 * boundary, since the FW will write to them, while the driver
1202 * writes to all other MTT entries. (The variable
1203 * dev->caps.mtt_entry_sz below is really the MTT segment
1204 * size, not the raw entry size)
1205 */
1206 dev->caps.reserved_mtts =
1207 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
1208 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;
1209
225c7b1f
RD
1210 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
1211 init_hca->mtt_base,
1212 dev->caps.mtt_entry_sz,
2b8fb286 1213 dev->caps.num_mtts,
5b0bf5e2 1214 dev->caps.reserved_mtts, 1, 0);
225c7b1f 1215 if (err) {
1a91de28 1216 mlx4_err(dev, "Failed to map MTT context memory, aborting\n");
225c7b1f
RD
1217 goto err_unmap_eq;
1218 }
1219
1220 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
1221 init_hca->dmpt_base,
1222 dev_cap->dmpt_entry_sz,
1223 dev->caps.num_mpts,
5b0bf5e2 1224 dev->caps.reserved_mrws, 1, 1);
225c7b1f 1225 if (err) {
1a91de28 1226 mlx4_err(dev, "Failed to map dMPT context memory, aborting\n");
225c7b1f
RD
1227 goto err_unmap_mtt;
1228 }
1229
1230 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
1231 init_hca->qpc_base,
1232 dev_cap->qpc_entry_sz,
1233 dev->caps.num_qps,
93fc9e1b
YP
1234 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1235 0, 0);
225c7b1f 1236 if (err) {
1a91de28 1237 mlx4_err(dev, "Failed to map QP context memory, aborting\n");
225c7b1f
RD
1238 goto err_unmap_dmpt;
1239 }
1240
1241 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
1242 init_hca->auxc_base,
1243 dev_cap->aux_entry_sz,
1244 dev->caps.num_qps,
93fc9e1b
YP
1245 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1246 0, 0);
225c7b1f 1247 if (err) {
1a91de28 1248 mlx4_err(dev, "Failed to map AUXC context memory, aborting\n");
225c7b1f
RD
1249 goto err_unmap_qp;
1250 }
1251
1252 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
1253 init_hca->altc_base,
1254 dev_cap->altc_entry_sz,
1255 dev->caps.num_qps,
93fc9e1b
YP
1256 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1257 0, 0);
225c7b1f 1258 if (err) {
1a91de28 1259 mlx4_err(dev, "Failed to map ALTC context memory, aborting\n");
225c7b1f
RD
1260 goto err_unmap_auxc;
1261 }
1262
1263 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
1264 init_hca->rdmarc_base,
1265 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
1266 dev->caps.num_qps,
93fc9e1b
YP
1267 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1268 0, 0);
225c7b1f
RD
1269 if (err) {
1270 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
1271 goto err_unmap_altc;
1272 }
1273
1274 err = mlx4_init_icm_table(dev, &priv->cq_table.table,
1275 init_hca->cqc_base,
1276 dev_cap->cqc_entry_sz,
1277 dev->caps.num_cqs,
5b0bf5e2 1278 dev->caps.reserved_cqs, 0, 0);
225c7b1f 1279 if (err) {
1a91de28 1280 mlx4_err(dev, "Failed to map CQ context memory, aborting\n");
225c7b1f
RD
1281 goto err_unmap_rdmarc;
1282 }
1283
1284 err = mlx4_init_icm_table(dev, &priv->srq_table.table,
1285 init_hca->srqc_base,
1286 dev_cap->srq_entry_sz,
1287 dev->caps.num_srqs,
5b0bf5e2 1288 dev->caps.reserved_srqs, 0, 0);
225c7b1f 1289 if (err) {
1a91de28 1290 mlx4_err(dev, "Failed to map SRQ context memory, aborting\n");
225c7b1f
RD
1291 goto err_unmap_cq;
1292 }
1293
1294 /*
0ff1fb65
HHZ
1295 * For flow steering device managed mode it is required to use
1296 * mlx4_init_icm_table. For B0 steering mode it's not strictly
1297 * required, but for simplicity just map the whole multicast
1298 * group table now. The table isn't very big and it's a lot
1299 * easier than trying to track ref counts.
225c7b1f
RD
1300 */
1301 err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
0ec2c0f8
EE
1302 init_hca->mc_base,
1303 mlx4_get_mgm_entry_size(dev),
225c7b1f
RD
1304 dev->caps.num_mgms + dev->caps.num_amgms,
1305 dev->caps.num_mgms + dev->caps.num_amgms,
5b0bf5e2 1306 0, 0);
225c7b1f 1307 if (err) {
1a91de28 1308 mlx4_err(dev, "Failed to map MCG context memory, aborting\n");
225c7b1f
RD
1309 goto err_unmap_srq;
1310 }
1311
1312 return 0;
1313
1314err_unmap_srq:
1315 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1316
1317err_unmap_cq:
1318 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1319
1320err_unmap_rdmarc:
1321 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1322
1323err_unmap_altc:
1324 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1325
1326err_unmap_auxc:
1327 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1328
1329err_unmap_qp:
1330 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1331
1332err_unmap_dmpt:
1333 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1334
1335err_unmap_mtt:
1336 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
1337
1338err_unmap_eq:
fa0681d2 1339 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
225c7b1f
RD
1340
1341err_unmap_cmpt:
1342 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1343 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1344 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1345 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1346
1347err_unmap_aux:
1348 mlx4_UNMAP_ICM_AUX(dev);
1349
1350err_free_aux:
5b0bf5e2 1351 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
225c7b1f
RD
1352
1353 return err;
1354}
1355
1356static void mlx4_free_icms(struct mlx4_dev *dev)
1357{
1358 struct mlx4_priv *priv = mlx4_priv(dev);
1359
1360 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
1361 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1362 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1363 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1364 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1365 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1366 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1367 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1368 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
fa0681d2 1369 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
225c7b1f
RD
1370 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1371 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1372 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1373 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
225c7b1f
RD
1374
1375 mlx4_UNMAP_ICM_AUX(dev);
5b0bf5e2 1376 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
225c7b1f
RD
1377}
1378
ab9c17a0
JM
1379static void mlx4_slave_exit(struct mlx4_dev *dev)
1380{
1381 struct mlx4_priv *priv = mlx4_priv(dev);
1382
f3d4c89e 1383 mutex_lock(&priv->cmd.slave_cmd_mutex);
ab9c17a0 1384 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
1a91de28 1385 mlx4_warn(dev, "Failed to close slave function\n");
f3d4c89e 1386 mutex_unlock(&priv->cmd.slave_cmd_mutex);
ab9c17a0
JM
1387}
1388
c1b43dca
EC
1389static int map_bf_area(struct mlx4_dev *dev)
1390{
1391 struct mlx4_priv *priv = mlx4_priv(dev);
1392 resource_size_t bf_start;
1393 resource_size_t bf_len;
1394 int err = 0;
1395
3d747473
JM
1396 if (!dev->caps.bf_reg_size)
1397 return -ENXIO;
1398
ab9c17a0
JM
1399 bf_start = pci_resource_start(dev->pdev, 2) +
1400 (dev->caps.num_uars << PAGE_SHIFT);
1401 bf_len = pci_resource_len(dev->pdev, 2) -
1402 (dev->caps.num_uars << PAGE_SHIFT);
c1b43dca
EC
1403 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
1404 if (!priv->bf_mapping)
1405 err = -ENOMEM;
1406
1407 return err;
1408}
1409
1410static void unmap_bf_area(struct mlx4_dev *dev)
1411{
1412 if (mlx4_priv(dev)->bf_mapping)
1413 io_mapping_free(mlx4_priv(dev)->bf_mapping);
1414}
1415
ec693d47
AV
1416cycle_t mlx4_read_clock(struct mlx4_dev *dev)
1417{
1418 u32 clockhi, clocklo, clockhi1;
1419 cycle_t cycles;
1420 int i;
1421 struct mlx4_priv *priv = mlx4_priv(dev);
1422
1423 for (i = 0; i < 10; i++) {
1424 clockhi = swab32(readl(priv->clock_mapping));
1425 clocklo = swab32(readl(priv->clock_mapping + 4));
1426 clockhi1 = swab32(readl(priv->clock_mapping));
1427 if (clockhi == clockhi1)
1428 break;
1429 }
1430
1431 cycles = (u64) clockhi << 32 | (u64) clocklo;
1432
1433 return cycles;
1434}
1435EXPORT_SYMBOL_GPL(mlx4_read_clock);
1436
1437
ddd8a6c1
EE
1438static int map_internal_clock(struct mlx4_dev *dev)
1439{
1440 struct mlx4_priv *priv = mlx4_priv(dev);
1441
1442 priv->clock_mapping =
1443 ioremap(pci_resource_start(dev->pdev, priv->fw.clock_bar) +
1444 priv->fw.clock_offset, MLX4_CLOCK_SIZE);
1445
1446 if (!priv->clock_mapping)
1447 return -ENOMEM;
1448
1449 return 0;
1450}
1451
1452static void unmap_internal_clock(struct mlx4_dev *dev)
1453{
1454 struct mlx4_priv *priv = mlx4_priv(dev);
1455
1456 if (priv->clock_mapping)
1457 iounmap(priv->clock_mapping);
1458}
1459
225c7b1f
RD
1460static void mlx4_close_hca(struct mlx4_dev *dev)
1461{
ddd8a6c1 1462 unmap_internal_clock(dev);
c1b43dca 1463 unmap_bf_area(dev);
ab9c17a0
JM
1464 if (mlx4_is_slave(dev))
1465 mlx4_slave_exit(dev);
1466 else {
1467 mlx4_CLOSE_HCA(dev, 0);
1468 mlx4_free_icms(dev);
1469 mlx4_UNMAP_FA(dev);
1470 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
1471 }
1472}
1473
1474static int mlx4_init_slave(struct mlx4_dev *dev)
1475{
1476 struct mlx4_priv *priv = mlx4_priv(dev);
1477 u64 dma = (u64) priv->mfunc.vhcr_dma;
ab9c17a0
JM
1478 int ret_from_reset = 0;
1479 u32 slave_read;
1480 u32 cmd_channel_ver;
1481
97989356 1482 if (atomic_read(&pf_loading)) {
1a91de28 1483 mlx4_warn(dev, "PF is not ready - Deferring probe\n");
97989356
AV
1484 return -EPROBE_DEFER;
1485 }
1486
f3d4c89e 1487 mutex_lock(&priv->cmd.slave_cmd_mutex);
ab9c17a0
JM
1488 priv->cmd.max_cmds = 1;
1489 mlx4_warn(dev, "Sending reset\n");
1490 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
1491 MLX4_COMM_TIME);
1492 /* if we are in the middle of flr the slave will try
1493 * NUM_OF_RESET_RETRIES times before leaving.*/
1494 if (ret_from_reset) {
1495 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
1a91de28 1496 mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n");
5efe5355
JM
1497 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1498 return -EPROBE_DEFER;
ab9c17a0
JM
1499 } else
1500 goto err;
1501 }
1502
1503 /* check the driver version - the slave I/F revision
1504 * must match the master's */
1505 slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
1506 cmd_channel_ver = mlx4_comm_get_version();
1507
1508 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
1509 MLX4_COMM_GET_IF_REV(slave_read)) {
1a91de28 1510 mlx4_err(dev, "slave driver version is not supported by the master\n");
ab9c17a0
JM
1511 goto err;
1512 }
1513
1514 mlx4_warn(dev, "Sending vhcr0\n");
1515 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
1516 MLX4_COMM_TIME))
1517 goto err;
1518 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
1519 MLX4_COMM_TIME))
1520 goto err;
1521 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
1522 MLX4_COMM_TIME))
1523 goto err;
1524 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME))
1525 goto err;
f3d4c89e
RD
1526
1527 mutex_unlock(&priv->cmd.slave_cmd_mutex);
ab9c17a0
JM
1528 return 0;
1529
1530err:
1531 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0);
f3d4c89e 1532 mutex_unlock(&priv->cmd.slave_cmd_mutex);
ab9c17a0 1533 return -EIO;
225c7b1f
RD
1534}
1535
6634961c
JM
1536static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev)
1537{
1538 int i;
1539
1540 for (i = 1; i <= dev->caps.num_ports; i++) {
b6ffaeff
JM
1541 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
1542 dev->caps.gid_table_len[i] =
449fc488 1543 mlx4_get_slave_num_gids(dev, 0, i);
b6ffaeff
JM
1544 else
1545 dev->caps.gid_table_len[i] = 1;
6634961c
JM
1546 dev->caps.pkey_table_len[i] =
1547 dev->phys_caps.pkey_phys_table_len[i] - 1;
1548 }
1549}
1550
3c439b55
JM
1551static int choose_log_fs_mgm_entry_size(int qp_per_entry)
1552{
1553 int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE;
1554
1555 for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE;
1556 i++) {
1557 if (qp_per_entry <= 4 * ((1 << i) / 16 - 2))
1558 break;
1559 }
1560
1561 return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1;
1562}
1563
7b8157be
JM
1564static void choose_steering_mode(struct mlx4_dev *dev,
1565 struct mlx4_dev_cap *dev_cap)
1566{
3c439b55
JM
1567 if (mlx4_log_num_mgm_entry_size == -1 &&
1568 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
7b8157be 1569 (!mlx4_is_mfunc(dev) ||
449fc488 1570 (dev_cap->fs_max_num_qp_per_entry >= (dev->num_vfs + 1))) &&
3c439b55
JM
1571 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >=
1572 MLX4_MIN_MGM_LOG_ENTRY_SIZE) {
1573 dev->oper_log_mgm_entry_size =
1574 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry);
7b8157be
JM
1575 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
1576 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
1577 dev->caps.fs_log_max_ucast_qp_range_size =
1578 dev_cap->fs_log_max_ucast_qp_range_size;
1579 } else {
1580 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
1581 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
1582 dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
1583 else {
1584 dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
1585
1586 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
1587 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
1a91de28 1588 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n");
7b8157be 1589 }
3c439b55
JM
1590 dev->oper_log_mgm_entry_size =
1591 mlx4_log_num_mgm_entry_size > 0 ?
1592 mlx4_log_num_mgm_entry_size :
1593 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
7b8157be
JM
1594 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
1595 }
1a91de28 1596 mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n",
3c439b55
JM
1597 mlx4_steering_mode_str(dev->caps.steering_mode),
1598 dev->oper_log_mgm_entry_size,
1599 mlx4_log_num_mgm_entry_size);
7b8157be
JM
1600}
1601
7ffdf726
OG
1602static void choose_tunnel_offload_mode(struct mlx4_dev *dev,
1603 struct mlx4_dev_cap *dev_cap)
1604{
1605 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
1606 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)
1607 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN;
1608 else
1609 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE;
1610
1611 mlx4_dbg(dev, "Tunneling offload mode is: %s\n", (dev->caps.tunnel_offload_mode
1612 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none");
1613}
1614
3d73c288 1615static int mlx4_init_hca(struct mlx4_dev *dev)
225c7b1f
RD
1616{
1617 struct mlx4_priv *priv = mlx4_priv(dev);
1618 struct mlx4_adapter adapter;
1619 struct mlx4_dev_cap dev_cap;
2d928651 1620 struct mlx4_mod_stat_cfg mlx4_cfg;
225c7b1f
RD
1621 struct mlx4_profile profile;
1622 struct mlx4_init_hca_param init_hca;
1623 u64 icm_size;
1624 int err;
1625
ab9c17a0
JM
1626 if (!mlx4_is_slave(dev)) {
1627 err = mlx4_QUERY_FW(dev);
1628 if (err) {
1629 if (err == -EACCES)
1a91de28 1630 mlx4_info(dev, "non-primary physical function, skipping\n");
ab9c17a0 1631 else
1a91de28 1632 mlx4_err(dev, "QUERY_FW command failed, aborting\n");
bef772eb 1633 return err;
ab9c17a0 1634 }
225c7b1f 1635
ab9c17a0
JM
1636 err = mlx4_load_fw(dev);
1637 if (err) {
1a91de28 1638 mlx4_err(dev, "Failed to start FW, aborting\n");
bef772eb 1639 return err;
ab9c17a0 1640 }
225c7b1f 1641
ab9c17a0
JM
1642 mlx4_cfg.log_pg_sz_m = 1;
1643 mlx4_cfg.log_pg_sz = 0;
1644 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
1645 if (err)
1646 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
2d928651 1647
ab9c17a0
JM
1648 err = mlx4_dev_cap(dev, &dev_cap);
1649 if (err) {
1a91de28 1650 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
ab9c17a0
JM
1651 goto err_stop_fw;
1652 }
225c7b1f 1653
7b8157be 1654 choose_steering_mode(dev, &dev_cap);
7ffdf726 1655 choose_tunnel_offload_mode(dev, &dev_cap);
7b8157be 1656
8e1a28e8
HHZ
1657 err = mlx4_get_phys_port_id(dev);
1658 if (err)
1659 mlx4_err(dev, "Fail to get physical port id\n");
1660
6634961c
JM
1661 if (mlx4_is_master(dev))
1662 mlx4_parav_master_pf_caps(dev);
1663
2599d858
AV
1664 if (mlx4_low_memory_profile()) {
1665 mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n");
1666 profile = low_mem_profile;
1667 } else {
1668 profile = default_profile;
1669 }
0ff1fb65
HHZ
1670 if (dev->caps.steering_mode ==
1671 MLX4_STEERING_MODE_DEVICE_MANAGED)
1672 profile.num_mcg = MLX4_FS_NUM_MCG;
225c7b1f 1673
ab9c17a0
JM
1674 icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
1675 &init_hca);
1676 if ((long long) icm_size < 0) {
1677 err = icm_size;
1678 goto err_stop_fw;
1679 }
225c7b1f 1680
a5bbe892
EC
1681 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
1682
ab9c17a0
JM
1683 init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
1684 init_hca.uar_page_sz = PAGE_SHIFT - 12;
e448834e
SM
1685 init_hca.mw_enabled = 0;
1686 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
1687 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
1688 init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE;
c1b43dca 1689
ab9c17a0
JM
1690 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
1691 if (err)
1692 goto err_stop_fw;
225c7b1f 1693
ab9c17a0
JM
1694 err = mlx4_INIT_HCA(dev, &init_hca);
1695 if (err) {
1a91de28 1696 mlx4_err(dev, "INIT_HCA command failed, aborting\n");
ab9c17a0
JM
1697 goto err_free_icm;
1698 }
ddd8a6c1
EE
1699 /*
1700 * If TS is supported by FW
1701 * read HCA frequency by QUERY_HCA command
1702 */
1703 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
1704 memset(&init_hca, 0, sizeof(init_hca));
1705 err = mlx4_QUERY_HCA(dev, &init_hca);
1706 if (err) {
1a91de28 1707 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n");
ddd8a6c1
EE
1708 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
1709 } else {
1710 dev->caps.hca_core_clock =
1711 init_hca.hca_core_clock;
1712 }
1713
1714 /* In case we got HCA frequency 0 - disable timestamping
1715 * to avoid dividing by zero
1716 */
1717 if (!dev->caps.hca_core_clock) {
1718 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
1719 mlx4_err(dev,
1a91de28 1720 "HCA frequency is 0 - timestamping is not supported\n");
ddd8a6c1
EE
1721 } else if (map_internal_clock(dev)) {
1722 /*
1723 * Map internal clock,
1724 * in case of failure disable timestamping
1725 */
1726 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
1a91de28 1727 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n");
ddd8a6c1
EE
1728 }
1729 }
ab9c17a0
JM
1730 } else {
1731 err = mlx4_init_slave(dev);
1732 if (err) {
5efe5355
JM
1733 if (err != -EPROBE_DEFER)
1734 mlx4_err(dev, "Failed to initialize slave\n");
bef772eb 1735 return err;
ab9c17a0 1736 }
225c7b1f 1737
ab9c17a0
JM
1738 err = mlx4_slave_cap(dev);
1739 if (err) {
1740 mlx4_err(dev, "Failed to obtain slave caps\n");
1741 goto err_close;
1742 }
225c7b1f
RD
1743 }
1744
ab9c17a0
JM
1745 if (map_bf_area(dev))
1746 mlx4_dbg(dev, "Failed to map blue flame area\n");
1747
1748 /*Only the master set the ports, all the rest got it from it.*/
1749 if (!mlx4_is_slave(dev))
1750 mlx4_set_port_mask(dev);
1751
225c7b1f
RD
1752 err = mlx4_QUERY_ADAPTER(dev, &adapter);
1753 if (err) {
1a91de28 1754 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n");
bef772eb 1755 goto unmap_bf;
225c7b1f
RD
1756 }
1757
1758 priv->eq_table.inta_pin = adapter.inta_pin;
cd9281d8 1759 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
225c7b1f
RD
1760
1761 return 0;
1762
bef772eb 1763unmap_bf:
ddd8a6c1 1764 unmap_internal_clock(dev);
bef772eb
AY
1765 unmap_bf_area(dev);
1766
b38f2879 1767 if (mlx4_is_slave(dev)) {
99ec41d0 1768 kfree(dev->caps.qp0_qkey);
b38f2879
DB
1769 kfree(dev->caps.qp0_tunnel);
1770 kfree(dev->caps.qp0_proxy);
1771 kfree(dev->caps.qp1_tunnel);
1772 kfree(dev->caps.qp1_proxy);
1773 }
1774
225c7b1f 1775err_close:
41929ed2
DB
1776 if (mlx4_is_slave(dev))
1777 mlx4_slave_exit(dev);
1778 else
1779 mlx4_CLOSE_HCA(dev, 0);
225c7b1f
RD
1780
1781err_free_icm:
ab9c17a0
JM
1782 if (!mlx4_is_slave(dev))
1783 mlx4_free_icms(dev);
225c7b1f
RD
1784
1785err_stop_fw:
ab9c17a0
JM
1786 if (!mlx4_is_slave(dev)) {
1787 mlx4_UNMAP_FA(dev);
1788 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
1789 }
225c7b1f
RD
1790 return err;
1791}
1792
f2a3f6a3
OG
1793static int mlx4_init_counters_table(struct mlx4_dev *dev)
1794{
1795 struct mlx4_priv *priv = mlx4_priv(dev);
1796 int nent;
1797
1798 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
1799 return -ENOENT;
1800
1801 nent = dev->caps.max_counters;
1802 return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0);
1803}
1804
1805static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
1806{
1807 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
1808}
1809
ba062d52 1810int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
f2a3f6a3
OG
1811{
1812 struct mlx4_priv *priv = mlx4_priv(dev);
1813
1814 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
1815 return -ENOENT;
1816
1817 *idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
1818 if (*idx == -1)
1819 return -ENOMEM;
1820
1821 return 0;
1822}
ba062d52
JM
1823
1824int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
1825{
1826 u64 out_param;
1827 int err;
1828
1829 if (mlx4_is_mfunc(dev)) {
1830 err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER,
1831 RES_OP_RESERVE, MLX4_CMD_ALLOC_RES,
1832 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
1833 if (!err)
1834 *idx = get_param_l(&out_param);
1835
1836 return err;
1837 }
1838 return __mlx4_counter_alloc(dev, idx);
1839}
f2a3f6a3
OG
1840EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
1841
ba062d52 1842void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
f2a3f6a3 1843{
7c6d74d2 1844 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR);
f2a3f6a3
OG
1845 return;
1846}
ba062d52
JM
1847
1848void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
1849{
e7dbeba8 1850 u64 in_param = 0;
ba062d52
JM
1851
1852 if (mlx4_is_mfunc(dev)) {
1853 set_param_l(&in_param, idx);
1854 mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE,
1855 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
1856 MLX4_CMD_WRAPPED);
1857 return;
1858 }
1859 __mlx4_counter_free(dev, idx);
1860}
f2a3f6a3
OG
1861EXPORT_SYMBOL_GPL(mlx4_counter_free);
1862
3d73c288 1863static int mlx4_setup_hca(struct mlx4_dev *dev)
225c7b1f
RD
1864{
1865 struct mlx4_priv *priv = mlx4_priv(dev);
1866 int err;
7ff93f8b 1867 int port;
9a5aa622 1868 __be32 ib_port_default_caps;
225c7b1f 1869
225c7b1f
RD
1870 err = mlx4_init_uar_table(dev);
1871 if (err) {
1a91de28
JP
1872 mlx4_err(dev, "Failed to initialize user access region table, aborting\n");
1873 return err;
225c7b1f
RD
1874 }
1875
1876 err = mlx4_uar_alloc(dev, &priv->driver_uar);
1877 if (err) {
1a91de28 1878 mlx4_err(dev, "Failed to allocate driver access region, aborting\n");
225c7b1f
RD
1879 goto err_uar_table_free;
1880 }
1881
4979d18f 1882 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
225c7b1f 1883 if (!priv->kar) {
1a91de28 1884 mlx4_err(dev, "Couldn't map kernel access region, aborting\n");
225c7b1f
RD
1885 err = -ENOMEM;
1886 goto err_uar_free;
1887 }
1888
1889 err = mlx4_init_pd_table(dev);
1890 if (err) {
1a91de28 1891 mlx4_err(dev, "Failed to initialize protection domain table, aborting\n");
225c7b1f
RD
1892 goto err_kar_unmap;
1893 }
1894
012a8ff5
SH
1895 err = mlx4_init_xrcd_table(dev);
1896 if (err) {
1a91de28 1897 mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n");
012a8ff5
SH
1898 goto err_pd_table_free;
1899 }
1900
225c7b1f
RD
1901 err = mlx4_init_mr_table(dev);
1902 if (err) {
1a91de28 1903 mlx4_err(dev, "Failed to initialize memory region table, aborting\n");
012a8ff5 1904 goto err_xrcd_table_free;
225c7b1f
RD
1905 }
1906
fe6f700d
YP
1907 if (!mlx4_is_slave(dev)) {
1908 err = mlx4_init_mcg_table(dev);
1909 if (err) {
1a91de28 1910 mlx4_err(dev, "Failed to initialize multicast group table, aborting\n");
fe6f700d
YP
1911 goto err_mr_table_free;
1912 }
114840c3
JM
1913 err = mlx4_config_mad_demux(dev);
1914 if (err) {
1915 mlx4_err(dev, "Failed in config_mad_demux, aborting\n");
1916 goto err_mcg_table_free;
1917 }
fe6f700d
YP
1918 }
1919
225c7b1f
RD
1920 err = mlx4_init_eq_table(dev);
1921 if (err) {
1a91de28 1922 mlx4_err(dev, "Failed to initialize event queue table, aborting\n");
fe6f700d 1923 goto err_mcg_table_free;
225c7b1f
RD
1924 }
1925
1926 err = mlx4_cmd_use_events(dev);
1927 if (err) {
1a91de28 1928 mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n");
225c7b1f
RD
1929 goto err_eq_table_free;
1930 }
1931
1932 err = mlx4_NOP(dev);
1933 if (err) {
08fb1055 1934 if (dev->flags & MLX4_FLAG_MSI_X) {
1a91de28 1935 mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
b8dd786f 1936 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
1a91de28 1937 mlx4_warn(dev, "Trying again without MSI-X\n");
08fb1055 1938 } else {
1a91de28 1939 mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
b8dd786f 1940 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
225c7b1f 1941 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
08fb1055 1942 }
225c7b1f
RD
1943
1944 goto err_cmd_poll;
1945 }
1946
1947 mlx4_dbg(dev, "NOP command IRQ test passed\n");
1948
1949 err = mlx4_init_cq_table(dev);
1950 if (err) {
1a91de28 1951 mlx4_err(dev, "Failed to initialize completion queue table, aborting\n");
225c7b1f
RD
1952 goto err_cmd_poll;
1953 }
1954
1955 err = mlx4_init_srq_table(dev);
1956 if (err) {
1a91de28 1957 mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n");
225c7b1f
RD
1958 goto err_cq_table_free;
1959 }
1960
1961 err = mlx4_init_qp_table(dev);
1962 if (err) {
1a91de28 1963 mlx4_err(dev, "Failed to initialize queue pair table, aborting\n");
225c7b1f
RD
1964 goto err_srq_table_free;
1965 }
1966
f2a3f6a3
OG
1967 err = mlx4_init_counters_table(dev);
1968 if (err && err != -ENOENT) {
1a91de28 1969 mlx4_err(dev, "Failed to initialize counters table, aborting\n");
fe6f700d 1970 goto err_qp_table_free;
f2a3f6a3
OG
1971 }
1972
ab9c17a0
JM
1973 if (!mlx4_is_slave(dev)) {
1974 for (port = 1; port <= dev->caps.num_ports; port++) {
ab9c17a0
JM
1975 ib_port_default_caps = 0;
1976 err = mlx4_get_port_ib_caps(dev, port,
1977 &ib_port_default_caps);
1978 if (err)
1a91de28
JP
1979 mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n",
1980 port, err);
ab9c17a0
JM
1981 dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
1982
2aca1172
JM
1983 /* initialize per-slave default ib port capabilities */
1984 if (mlx4_is_master(dev)) {
1985 int i;
1986 for (i = 0; i < dev->num_slaves; i++) {
1987 if (i == mlx4_master_func_num(dev))
1988 continue;
1989 priv->mfunc.master.slave_state[i].ib_cap_mask[port] =
1a91de28 1990 ib_port_default_caps;
2aca1172
JM
1991 }
1992 }
1993
096335b3
OG
1994 if (mlx4_is_mfunc(dev))
1995 dev->caps.port_ib_mtu[port] = IB_MTU_2048;
1996 else
1997 dev->caps.port_ib_mtu[port] = IB_MTU_4096;
97285b78 1998
6634961c
JM
1999 err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ?
2000 dev->caps.pkey_table_len[port] : -1);
ab9c17a0
JM
2001 if (err) {
2002 mlx4_err(dev, "Failed to set port %d, aborting\n",
1a91de28 2003 port);
ab9c17a0
JM
2004 goto err_counters_table_free;
2005 }
7ff93f8b
YP
2006 }
2007 }
2008
225c7b1f
RD
2009 return 0;
2010
f2a3f6a3
OG
2011err_counters_table_free:
2012 mlx4_cleanup_counters_table(dev);
2013
225c7b1f
RD
2014err_qp_table_free:
2015 mlx4_cleanup_qp_table(dev);
2016
2017err_srq_table_free:
2018 mlx4_cleanup_srq_table(dev);
2019
2020err_cq_table_free:
2021 mlx4_cleanup_cq_table(dev);
2022
2023err_cmd_poll:
2024 mlx4_cmd_use_polling(dev);
2025
2026err_eq_table_free:
2027 mlx4_cleanup_eq_table(dev);
2028
fe6f700d
YP
2029err_mcg_table_free:
2030 if (!mlx4_is_slave(dev))
2031 mlx4_cleanup_mcg_table(dev);
2032
ee49bd93 2033err_mr_table_free:
225c7b1f
RD
2034 mlx4_cleanup_mr_table(dev);
2035
012a8ff5
SH
2036err_xrcd_table_free:
2037 mlx4_cleanup_xrcd_table(dev);
2038
225c7b1f
RD
2039err_pd_table_free:
2040 mlx4_cleanup_pd_table(dev);
2041
2042err_kar_unmap:
2043 iounmap(priv->kar);
2044
2045err_uar_free:
2046 mlx4_uar_free(dev, &priv->driver_uar);
2047
2048err_uar_table_free:
2049 mlx4_cleanup_uar_table(dev);
2050 return err;
2051}
2052
e8f9b2ed 2053static void mlx4_enable_msi_x(struct mlx4_dev *dev)
225c7b1f
RD
2054{
2055 struct mlx4_priv *priv = mlx4_priv(dev);
b8dd786f 2056 struct msix_entry *entries;
0b7ca5a9 2057 int nreq = min_t(int, dev->caps.num_ports *
bb2146bc 2058 min_t(int, num_online_cpus() + 1,
90b1ebe7 2059 MAX_MSIX_P_PORT) + MSIX_LEGACY_SZ, MAX_MSIX);
225c7b1f
RD
2060 int i;
2061
2062 if (msi_x) {
ca4c7b35
OG
2063 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
2064 nreq);
ab9c17a0 2065
b8dd786f
YP
2066 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
2067 if (!entries)
2068 goto no_msi;
2069
2070 for (i = 0; i < nreq; ++i)
225c7b1f
RD
2071 entries[i].entry = i;
2072
66e2f9c1
AG
2073 nreq = pci_enable_msix_range(dev->pdev, entries, 2, nreq);
2074
2075 if (nreq < 0) {
5bf0da7d 2076 kfree(entries);
225c7b1f 2077 goto no_msi;
66e2f9c1 2078 } else if (nreq < MSIX_LEGACY_SZ +
1a91de28 2079 dev->caps.num_ports * MIN_MSIX_P_PORT) {
0b7ca5a9
YP
2080 /*Working in legacy mode , all EQ's shared*/
2081 dev->caps.comp_pool = 0;
2082 dev->caps.num_comp_vectors = nreq - 1;
2083 } else {
2084 dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ;
2085 dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
2086 }
b8dd786f 2087 for (i = 0; i < nreq; ++i)
225c7b1f
RD
2088 priv->eq_table.eq[i].irq = entries[i].vector;
2089
2090 dev->flags |= MLX4_FLAG_MSI_X;
b8dd786f
YP
2091
2092 kfree(entries);
225c7b1f
RD
2093 return;
2094 }
2095
2096no_msi:
b8dd786f 2097 dev->caps.num_comp_vectors = 1;
0b7ca5a9 2098 dev->caps.comp_pool = 0;
b8dd786f
YP
2099
2100 for (i = 0; i < 2; ++i)
225c7b1f
RD
2101 priv->eq_table.eq[i].irq = dev->pdev->irq;
2102}
2103
7ff93f8b 2104static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
2a2336f8
YP
2105{
2106 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
7ff93f8b 2107 int err = 0;
2a2336f8
YP
2108
2109 info->dev = dev;
2110 info->port = port;
ab9c17a0 2111 if (!mlx4_is_slave(dev)) {
ab9c17a0
JM
2112 mlx4_init_mac_table(dev, &info->mac_table);
2113 mlx4_init_vlan_table(dev, &info->vlan_table);
111c6094 2114 mlx4_init_roce_gid_table(dev, &info->gid_table);
16a10ffd 2115 info->base_qpn = mlx4_get_base_qpn(dev, port);
ab9c17a0 2116 }
7ff93f8b
YP
2117
2118 sprintf(info->dev_name, "mlx4_port%d", port);
2119 info->port_attr.attr.name = info->dev_name;
ab9c17a0
JM
2120 if (mlx4_is_mfunc(dev))
2121 info->port_attr.attr.mode = S_IRUGO;
2122 else {
2123 info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
2124 info->port_attr.store = set_port_type;
2125 }
7ff93f8b 2126 info->port_attr.show = show_port_type;
3691c964 2127 sysfs_attr_init(&info->port_attr.attr);
7ff93f8b
YP
2128
2129 err = device_create_file(&dev->pdev->dev, &info->port_attr);
2130 if (err) {
2131 mlx4_err(dev, "Failed to create file for port %d\n", port);
2132 info->port = -1;
2133 }
2134
096335b3
OG
2135 sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port);
2136 info->port_mtu_attr.attr.name = info->dev_mtu_name;
2137 if (mlx4_is_mfunc(dev))
2138 info->port_mtu_attr.attr.mode = S_IRUGO;
2139 else {
2140 info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR;
2141 info->port_mtu_attr.store = set_port_ib_mtu;
2142 }
2143 info->port_mtu_attr.show = show_port_ib_mtu;
2144 sysfs_attr_init(&info->port_mtu_attr.attr);
2145
2146 err = device_create_file(&dev->pdev->dev, &info->port_mtu_attr);
2147 if (err) {
2148 mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
2149 device_remove_file(&info->dev->pdev->dev, &info->port_attr);
2150 info->port = -1;
2151 }
2152
7ff93f8b
YP
2153 return err;
2154}
2155
2156static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
2157{
2158 if (info->port < 0)
2159 return;
2160
2161 device_remove_file(&info->dev->pdev->dev, &info->port_attr);
096335b3 2162 device_remove_file(&info->dev->pdev->dev, &info->port_mtu_attr);
2a2336f8
YP
2163}
2164
b12d93d6
YP
2165static int mlx4_init_steering(struct mlx4_dev *dev)
2166{
2167 struct mlx4_priv *priv = mlx4_priv(dev);
2168 int num_entries = dev->caps.num_ports;
2169 int i, j;
2170
2171 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL);
2172 if (!priv->steer)
2173 return -ENOMEM;
2174
45b51365 2175 for (i = 0; i < num_entries; i++)
b12d93d6
YP
2176 for (j = 0; j < MLX4_NUM_STEERS; j++) {
2177 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
2178 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
2179 }
b12d93d6
YP
2180 return 0;
2181}
2182
2183static void mlx4_clear_steering(struct mlx4_dev *dev)
2184{
2185 struct mlx4_priv *priv = mlx4_priv(dev);
2186 struct mlx4_steer_index *entry, *tmp_entry;
2187 struct mlx4_promisc_qp *pqp, *tmp_pqp;
2188 int num_entries = dev->caps.num_ports;
2189 int i, j;
2190
2191 for (i = 0; i < num_entries; i++) {
2192 for (j = 0; j < MLX4_NUM_STEERS; j++) {
2193 list_for_each_entry_safe(pqp, tmp_pqp,
2194 &priv->steer[i].promisc_qps[j],
2195 list) {
2196 list_del(&pqp->list);
2197 kfree(pqp);
2198 }
2199 list_for_each_entry_safe(entry, tmp_entry,
2200 &priv->steer[i].steer_entries[j],
2201 list) {
2202 list_del(&entry->list);
2203 list_for_each_entry_safe(pqp, tmp_pqp,
2204 &entry->duplicates,
2205 list) {
2206 list_del(&pqp->list);
2207 kfree(pqp);
2208 }
2209 kfree(entry);
2210 }
2211 }
2212 }
2213 kfree(priv->steer);
2214}
2215
ab9c17a0
JM
2216static int extended_func_num(struct pci_dev *pdev)
2217{
2218 return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn);
2219}
2220
2221#define MLX4_OWNER_BASE 0x8069c
2222#define MLX4_OWNER_SIZE 4
2223
2224static int mlx4_get_ownership(struct mlx4_dev *dev)
2225{
2226 void __iomem *owner;
2227 u32 ret;
2228
57dbf29a
KSS
2229 if (pci_channel_offline(dev->pdev))
2230 return -EIO;
2231
ab9c17a0
JM
2232 owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
2233 MLX4_OWNER_SIZE);
2234 if (!owner) {
2235 mlx4_err(dev, "Failed to obtain ownership bit\n");
2236 return -ENOMEM;
2237 }
2238
2239 ret = readl(owner);
2240 iounmap(owner);
2241 return (int) !!ret;
2242}
2243
2244static void mlx4_free_ownership(struct mlx4_dev *dev)
2245{
2246 void __iomem *owner;
2247
57dbf29a
KSS
2248 if (pci_channel_offline(dev->pdev))
2249 return;
2250
ab9c17a0
JM
2251 owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
2252 MLX4_OWNER_SIZE);
2253 if (!owner) {
2254 mlx4_err(dev, "Failed to obtain ownership bit\n");
2255 return;
2256 }
2257 writel(0, owner);
2258 msleep(1000);
2259 iounmap(owner);
2260}
2261
839f1243 2262static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
225c7b1f 2263{
225c7b1f
RD
2264 struct mlx4_priv *priv;
2265 struct mlx4_dev *dev;
2266 int err;
2a2336f8 2267 int port;
dd41cc3b
MB
2268 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
2269 int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0};
2270 const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = {
2271 {2, 0, 0}, {0, 1, 2}, {0, 1, 2} };
1ab95d37
MB
2272 unsigned total_vfs = 0;
2273 int sriov_initialized = 0;
2274 unsigned int i;
bbb07af4 2275 int existing_vfs = 0;
225c7b1f 2276
0a645e80 2277 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
225c7b1f
RD
2278
2279 err = pci_enable_device(pdev);
2280 if (err) {
1a91de28 2281 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
225c7b1f
RD
2282 return err;
2283 }
5a0d0a61
JM
2284
2285 /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS
2286 * per port, we must limit the number of VFs to 63 (since their are
2287 * 128 MACs)
2288 */
dd41cc3b
MB
2289 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc;
2290 total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) {
2291 nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i];
1ab95d37
MB
2292 if (nvfs[i] < 0) {
2293 dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n");
2294 return -EINVAL;
2295 }
2296 }
dd41cc3b
MB
2297 for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc;
2298 i++) {
2299 prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i];
1ab95d37
MB
2300 if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) {
2301 dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n");
2302 return -EINVAL;
2303 }
2304 }
2305 if (total_vfs >= MLX4_MAX_NUM_VF) {
5a0d0a61
JM
2306 dev_err(&pdev->dev,
2307 "Requested more VF's (%d) than allowed (%d)\n",
1ab95d37 2308 total_vfs, MLX4_MAX_NUM_VF - 1);
ab9c17a0
JM
2309 return -EINVAL;
2310 }
30e514a7 2311
1ab95d37
MB
2312 for (i = 0; i < MLX4_MAX_PORTS; i++) {
2313 if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) {
2314 dev_err(&pdev->dev,
2315 "Requested more VF's (%d) for port (%d) than allowed (%d)\n",
2316 nvfs[i] + nvfs[2], i + 1,
2317 MLX4_MAX_NUM_VF_P_PORT - 1);
2318 return -EINVAL;
2319 }
30e514a7 2320 }
1ab95d37
MB
2321
2322
225c7b1f 2323 /*
ab9c17a0 2324 * Check for BARs.
225c7b1f 2325 */
839f1243 2326 if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) &&
ab9c17a0 2327 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1a91de28 2328 dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
839f1243 2329 pci_dev_data, pci_resource_flags(pdev, 0));
225c7b1f
RD
2330 err = -ENODEV;
2331 goto err_disable_pdev;
2332 }
2333 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
1a91de28 2334 dev_err(&pdev->dev, "Missing UAR, aborting\n");
225c7b1f
RD
2335 err = -ENODEV;
2336 goto err_disable_pdev;
2337 }
2338
a01df0fe 2339 err = pci_request_regions(pdev, DRV_NAME);
225c7b1f 2340 if (err) {
a01df0fe 2341 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
225c7b1f
RD
2342 goto err_disable_pdev;
2343 }
2344
225c7b1f
RD
2345 pci_set_master(pdev);
2346
6a35528a 2347 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
225c7b1f 2348 if (err) {
1a91de28 2349 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
284901a9 2350 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
225c7b1f 2351 if (err) {
1a91de28 2352 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
a01df0fe 2353 goto err_release_regions;
225c7b1f
RD
2354 }
2355 }
6a35528a 2356 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
225c7b1f 2357 if (err) {
1a91de28 2358 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
284901a9 2359 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
225c7b1f 2360 if (err) {
1a91de28 2361 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n");
a01df0fe 2362 goto err_release_regions;
225c7b1f
RD
2363 }
2364 }
2365
7f9e5c48
DD
2366 /* Allow large DMA segments, up to the firmware limit of 1 GB */
2367 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
2368
befdf897
WY
2369 dev = pci_get_drvdata(pdev);
2370 priv = mlx4_priv(dev);
225c7b1f 2371 dev->pdev = pdev;
b581401e
RD
2372 INIT_LIST_HEAD(&priv->ctx_list);
2373 spin_lock_init(&priv->ctx_lock);
225c7b1f 2374
7ff93f8b
YP
2375 mutex_init(&priv->port_mutex);
2376
6296883c
YP
2377 INIT_LIST_HEAD(&priv->pgdir_list);
2378 mutex_init(&priv->pgdir_mutex);
2379
c1b43dca
EC
2380 INIT_LIST_HEAD(&priv->bf_list);
2381 mutex_init(&priv->bf_mutex);
2382
aca7a3ac 2383 dev->rev_id = pdev->revision;
6e7136ed 2384 dev->numa_node = dev_to_node(&pdev->dev);
ab9c17a0 2385 /* Detect if this device is a virtual function */
839f1243 2386 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
ab9c17a0
JM
2387 /* When acting as pf, we normally skip vfs unless explicitly
2388 * requested to probe them. */
1ab95d37
MB
2389 if (total_vfs) {
2390 unsigned vfs_offset = 0;
2391 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) &&
1a91de28 2392 vfs_offset + nvfs[i] < extended_func_num(pdev);
1ab95d37
MB
2393 vfs_offset += nvfs[i], i++)
2394 ;
2395 if (i == sizeof(nvfs)/sizeof(nvfs[0])) {
2396 err = -ENODEV;
2397 goto err_free_dev;
2398 }
2399 if ((extended_func_num(pdev) - vfs_offset)
2400 > prb_vf[i]) {
2401 mlx4_warn(dev, "Skipping virtual function:%d\n",
2402 extended_func_num(pdev));
2403 err = -ENODEV;
2404 goto err_free_dev;
2405 }
ab9c17a0
JM
2406 }
2407 mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
2408 dev->flags |= MLX4_FLAG_SLAVE;
2409 } else {
2410 /* We reset the device and enable SRIOV only for physical
2411 * devices. Try to claim ownership on the device;
2412 * if already taken, skip -- do not allow multiple PFs */
2413 err = mlx4_get_ownership(dev);
2414 if (err) {
2415 if (err < 0)
2416 goto err_free_dev;
2417 else {
1a91de28 2418 mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n");
ab9c17a0
JM
2419 err = -EINVAL;
2420 goto err_free_dev;
2421 }
2422 }
aca7a3ac 2423
1ab95d37
MB
2424 if (total_vfs) {
2425 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n",
2426 total_vfs);
2427 dev->dev_vfs = kzalloc(
1a91de28
JP
2428 total_vfs * sizeof(*dev->dev_vfs),
2429 GFP_KERNEL);
1ab95d37
MB
2430 if (NULL == dev->dev_vfs) {
2431 mlx4_err(dev, "Failed to allocate memory for VFs\n");
ab9c17a0
JM
2432 err = 0;
2433 } else {
1ab95d37 2434 atomic_inc(&pf_loading);
bbb07af4
JM
2435 existing_vfs = pci_num_vf(pdev);
2436 if (existing_vfs) {
2437 err = 0;
2438 if (existing_vfs != total_vfs)
2439 mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
2440 existing_vfs, total_vfs);
2441 } else {
2442 err = pci_enable_sriov(pdev, total_vfs);
2443 }
1ab95d37 2444 if (err) {
1a91de28 2445 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
1ab95d37 2446 err);
e1a5ddc5 2447 atomic_dec(&pf_loading);
1ab95d37
MB
2448 err = 0;
2449 } else {
2450 mlx4_warn(dev, "Running in master mode\n");
2451 dev->flags |= MLX4_FLAG_SRIOV |
1a91de28 2452 MLX4_FLAG_MASTER;
1ab95d37
MB
2453 dev->num_vfs = total_vfs;
2454 sriov_initialized = 1;
2455 }
ab9c17a0
JM
2456 }
2457 }
2458
fe6f700d
YP
2459 atomic_set(&priv->opreq_count, 0);
2460 INIT_WORK(&priv->opreq_task, mlx4_opreq_action);
2461
ab9c17a0
JM
2462 /*
2463 * Now reset the HCA before we touch the PCI capabilities or
2464 * attempt a firmware command, since a boot ROM may have left
2465 * the HCA in an undefined state.
2466 */
2467 err = mlx4_reset(dev);
2468 if (err) {
1a91de28 2469 mlx4_err(dev, "Failed to reset HCA, aborting\n");
ab9c17a0
JM
2470 goto err_rel_own;
2471 }
225c7b1f
RD
2472 }
2473
ab9c17a0 2474slave_start:
521130d1
EE
2475 err = mlx4_cmd_init(dev);
2476 if (err) {
1a91de28 2477 mlx4_err(dev, "Failed to init command interface, aborting\n");
ab9c17a0
JM
2478 goto err_sriov;
2479 }
2480
2481 /* In slave functions, the communication channel must be initialized
2482 * before posting commands. Also, init num_slaves before calling
2483 * mlx4_init_hca */
2484 if (mlx4_is_mfunc(dev)) {
2485 if (mlx4_is_master(dev))
2486 dev->num_slaves = MLX4_MAX_NUM_SLAVES;
2487 else {
2488 dev->num_slaves = 0;
f356fcbe
JM
2489 err = mlx4_multi_func_init(dev);
2490 if (err) {
1a91de28 2491 mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n");
ab9c17a0
JM
2492 goto err_cmd;
2493 }
2494 }
225c7b1f
RD
2495 }
2496
2497 err = mlx4_init_hca(dev);
ab9c17a0
JM
2498 if (err) {
2499 if (err == -EACCES) {
2500 /* Not primary Physical function
2501 * Running in slave mode */
2502 mlx4_cmd_cleanup(dev);
2503 dev->flags |= MLX4_FLAG_SLAVE;
2504 dev->flags &= ~MLX4_FLAG_MASTER;
2505 goto slave_start;
2506 } else
2507 goto err_mfunc;
2508 }
2509
b912b2f8
EP
2510 /* check if the device is functioning at its maximum possible speed.
2511 * No return code for this call, just warn the user in case of PCI
2512 * express device capabilities are under-satisfied by the bus.
2513 */
83d3459a
EP
2514 if (!mlx4_is_slave(dev))
2515 mlx4_check_pcie_caps(dev);
b912b2f8 2516
ab9c17a0
JM
2517 /* In master functions, the communication channel must be initialized
2518 * after obtaining its address from fw */
2519 if (mlx4_is_master(dev)) {
1ab95d37 2520 unsigned sum = 0;
f356fcbe
JM
2521 err = mlx4_multi_func_init(dev);
2522 if (err) {
1a91de28 2523 mlx4_err(dev, "Failed to init master mfunc interface, aborting\n");
ab9c17a0
JM
2524 goto err_close;
2525 }
1ab95d37 2526 if (sriov_initialized) {
dd41cc3b
MB
2527 int ib_ports = 0;
2528 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2529 ib_ports++;
2530
2531 if (ib_ports &&
2532 (num_vfs_argc > 1 || probe_vfs_argc > 1)) {
2533 mlx4_err(dev,
1a91de28 2534 "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n");
960b1f45
OG
2535 err = -EINVAL;
2536 goto err_master_mfunc;
dd41cc3b 2537 }
1ab95d37
MB
2538 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) {
2539 unsigned j;
2540 for (j = 0; j < nvfs[i]; ++sum, ++j) {
2541 dev->dev_vfs[sum].min_port =
2542 i < 2 ? i + 1 : 1;
2543 dev->dev_vfs[sum].n_ports = i < 2 ? 1 :
2544 dev->caps.num_ports;
2545 }
2546 }
2547 }
ab9c17a0 2548 }
225c7b1f 2549
b8dd786f
YP
2550 err = mlx4_alloc_eq_table(dev);
2551 if (err)
ab9c17a0 2552 goto err_master_mfunc;
b8dd786f 2553
0b7ca5a9 2554 priv->msix_ctl.pool_bm = 0;
730c41d5 2555 mutex_init(&priv->msix_ctl.pool_lock);
0b7ca5a9 2556
08fb1055 2557 mlx4_enable_msi_x(dev);
ab9c17a0
JM
2558 if ((mlx4_is_mfunc(dev)) &&
2559 !(dev->flags & MLX4_FLAG_MSI_X)) {
f356fcbe 2560 err = -ENOSYS;
1a91de28 2561 mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n");
b12d93d6 2562 goto err_free_eq;
ab9c17a0
JM
2563 }
2564
2565 if (!mlx4_is_slave(dev)) {
2566 err = mlx4_init_steering(dev);
2567 if (err)
2568 goto err_free_eq;
2569 }
b12d93d6 2570
225c7b1f 2571 err = mlx4_setup_hca(dev);
ab9c17a0
JM
2572 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
2573 !mlx4_is_mfunc(dev)) {
08fb1055 2574 dev->flags &= ~MLX4_FLAG_MSI_X;
9858d2d1
YP
2575 dev->caps.num_comp_vectors = 1;
2576 dev->caps.comp_pool = 0;
08fb1055
MT
2577 pci_disable_msix(pdev);
2578 err = mlx4_setup_hca(dev);
2579 }
2580
225c7b1f 2581 if (err)
b12d93d6 2582 goto err_steer;
225c7b1f 2583
5a0d0a61
JM
2584 mlx4_init_quotas(dev);
2585
7ff93f8b
YP
2586 for (port = 1; port <= dev->caps.num_ports; port++) {
2587 err = mlx4_init_port_info(dev, port);
2588 if (err)
2589 goto err_port;
2590 }
2a2336f8 2591
225c7b1f
RD
2592 err = mlx4_register_device(dev);
2593 if (err)
7ff93f8b 2594 goto err_port;
225c7b1f 2595
b046ffe5
EP
2596 mlx4_request_modules(dev);
2597
27bf91d6
YP
2598 mlx4_sense_init(dev);
2599 mlx4_start_sense(dev);
2600
befdf897 2601 priv->removed = 0;
225c7b1f 2602
e1a5ddc5
AV
2603 if (mlx4_is_master(dev) && dev->num_vfs)
2604 atomic_dec(&pf_loading);
2605
225c7b1f
RD
2606 return 0;
2607
7ff93f8b 2608err_port:
b4f77264 2609 for (--port; port >= 1; --port)
7ff93f8b
YP
2610 mlx4_cleanup_port_info(&priv->port[port]);
2611
f2a3f6a3 2612 mlx4_cleanup_counters_table(dev);
225c7b1f
RD
2613 mlx4_cleanup_qp_table(dev);
2614 mlx4_cleanup_srq_table(dev);
2615 mlx4_cleanup_cq_table(dev);
2616 mlx4_cmd_use_polling(dev);
2617 mlx4_cleanup_eq_table(dev);
fe6f700d 2618 mlx4_cleanup_mcg_table(dev);
225c7b1f 2619 mlx4_cleanup_mr_table(dev);
012a8ff5 2620 mlx4_cleanup_xrcd_table(dev);
225c7b1f
RD
2621 mlx4_cleanup_pd_table(dev);
2622 mlx4_cleanup_uar_table(dev);
2623
b12d93d6 2624err_steer:
ab9c17a0
JM
2625 if (!mlx4_is_slave(dev))
2626 mlx4_clear_steering(dev);
b12d93d6 2627
b8dd786f
YP
2628err_free_eq:
2629 mlx4_free_eq_table(dev);
2630
ab9c17a0
JM
2631err_master_mfunc:
2632 if (mlx4_is_master(dev))
2633 mlx4_multi_func_cleanup(dev);
2634
b38f2879 2635 if (mlx4_is_slave(dev)) {
99ec41d0 2636 kfree(dev->caps.qp0_qkey);
b38f2879
DB
2637 kfree(dev->caps.qp0_tunnel);
2638 kfree(dev->caps.qp0_proxy);
2639 kfree(dev->caps.qp1_tunnel);
2640 kfree(dev->caps.qp1_proxy);
2641 }
2642
225c7b1f 2643err_close:
08fb1055
MT
2644 if (dev->flags & MLX4_FLAG_MSI_X)
2645 pci_disable_msix(pdev);
2646
225c7b1f
RD
2647 mlx4_close_hca(dev);
2648
ab9c17a0
JM
2649err_mfunc:
2650 if (mlx4_is_slave(dev))
2651 mlx4_multi_func_cleanup(dev);
2652
225c7b1f
RD
2653err_cmd:
2654 mlx4_cmd_cleanup(dev);
2655
ab9c17a0 2656err_sriov:
bbb07af4 2657 if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs)
ab9c17a0
JM
2658 pci_disable_sriov(pdev);
2659
2660err_rel_own:
2661 if (!mlx4_is_slave(dev))
2662 mlx4_free_ownership(dev);
2663
e1a5ddc5
AV
2664 if (mlx4_is_master(dev) && dev->num_vfs)
2665 atomic_dec(&pf_loading);
2666
1ab95d37
MB
2667 kfree(priv->dev.dev_vfs);
2668
225c7b1f 2669err_free_dev:
225c7b1f
RD
2670 kfree(priv);
2671
a01df0fe
RD
2672err_release_regions:
2673 pci_release_regions(pdev);
225c7b1f
RD
2674
2675err_disable_pdev:
2676 pci_disable_device(pdev);
2677 pci_set_drvdata(pdev, NULL);
2678 return err;
2679}
2680
1dd06ae8 2681static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
3d73c288 2682{
befdf897
WY
2683 struct mlx4_priv *priv;
2684 struct mlx4_dev *dev;
2685
0a645e80 2686 printk_once(KERN_INFO "%s", mlx4_version);
3d73c288 2687
befdf897
WY
2688 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
2689 if (!priv)
2690 return -ENOMEM;
2691
2692 dev = &priv->dev;
2693 pci_set_drvdata(pdev, dev);
2694 priv->pci_dev_data = id->driver_data;
2695
839f1243 2696 return __mlx4_init_one(pdev, id->driver_data);
3d73c288
RD
2697}
2698
befdf897 2699static void __mlx4_remove_one(struct pci_dev *pdev)
225c7b1f
RD
2700{
2701 struct mlx4_dev *dev = pci_get_drvdata(pdev);
2702 struct mlx4_priv *priv = mlx4_priv(dev);
befdf897 2703 int pci_dev_data;
225c7b1f 2704 int p;
bbb07af4 2705 int active_vfs = 0;
225c7b1f 2706
befdf897
WY
2707 if (priv->removed)
2708 return;
225c7b1f 2709
befdf897 2710 pci_dev_data = priv->pci_dev_data;
225c7b1f 2711
bbb07af4
JM
2712 /* Disabling SR-IOV is not allowed while there are active vf's */
2713 if (mlx4_is_master(dev)) {
2714 active_vfs = mlx4_how_many_lives_vf(dev);
2715 if (active_vfs) {
2716 pr_warn("Removing PF when there are active VF's !!\n");
2717 pr_warn("Will not disable SR-IOV.\n");
2718 }
2719 }
befdf897
WY
2720 mlx4_stop_sense(dev);
2721 mlx4_unregister_device(dev);
225c7b1f 2722
befdf897
WY
2723 for (p = 1; p <= dev->caps.num_ports; p++) {
2724 mlx4_cleanup_port_info(&priv->port[p]);
2725 mlx4_CLOSE_PORT(dev, p);
2726 }
2727
2728 if (mlx4_is_master(dev))
2729 mlx4_free_resource_tracker(dev,
2730 RES_TR_FREE_SLAVES_ONLY);
2731
2732 mlx4_cleanup_counters_table(dev);
2733 mlx4_cleanup_qp_table(dev);
2734 mlx4_cleanup_srq_table(dev);
2735 mlx4_cleanup_cq_table(dev);
2736 mlx4_cmd_use_polling(dev);
2737 mlx4_cleanup_eq_table(dev);
2738 mlx4_cleanup_mcg_table(dev);
2739 mlx4_cleanup_mr_table(dev);
2740 mlx4_cleanup_xrcd_table(dev);
2741 mlx4_cleanup_pd_table(dev);
225c7b1f 2742
befdf897
WY
2743 if (mlx4_is_master(dev))
2744 mlx4_free_resource_tracker(dev,
2745 RES_TR_FREE_STRUCTS_ONLY);
47605df9 2746
befdf897
WY
2747 iounmap(priv->kar);
2748 mlx4_uar_free(dev, &priv->driver_uar);
2749 mlx4_cleanup_uar_table(dev);
2750 if (!mlx4_is_slave(dev))
2751 mlx4_clear_steering(dev);
2752 mlx4_free_eq_table(dev);
2753 if (mlx4_is_master(dev))
2754 mlx4_multi_func_cleanup(dev);
2755 mlx4_close_hca(dev);
2756 if (mlx4_is_slave(dev))
2757 mlx4_multi_func_cleanup(dev);
2758 mlx4_cmd_cleanup(dev);
47605df9 2759
befdf897
WY
2760 if (dev->flags & MLX4_FLAG_MSI_X)
2761 pci_disable_msix(pdev);
bbb07af4 2762 if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) {
befdf897
WY
2763 mlx4_warn(dev, "Disabling SR-IOV\n");
2764 pci_disable_sriov(pdev);
e1a5ddc5 2765 dev->num_vfs = 0;
225c7b1f 2766 }
befdf897
WY
2767
2768 if (!mlx4_is_slave(dev))
2769 mlx4_free_ownership(dev);
2770
99ec41d0 2771 kfree(dev->caps.qp0_qkey);
befdf897
WY
2772 kfree(dev->caps.qp0_tunnel);
2773 kfree(dev->caps.qp0_proxy);
2774 kfree(dev->caps.qp1_tunnel);
2775 kfree(dev->caps.qp1_proxy);
2776 kfree(dev->dev_vfs);
2777
2778 pci_release_regions(pdev);
2779 pci_disable_device(pdev);
2780 memset(priv, 0, sizeof(*priv));
2781 priv->pci_dev_data = pci_dev_data;
2782 priv->removed = 1;
2783}
2784
2785static void mlx4_remove_one(struct pci_dev *pdev)
2786{
2787 struct mlx4_dev *dev = pci_get_drvdata(pdev);
2788 struct mlx4_priv *priv = mlx4_priv(dev);
2789
2790 __mlx4_remove_one(pdev);
2791 kfree(priv);
2792 pci_set_drvdata(pdev, NULL);
225c7b1f
RD
2793}
2794
ee49bd93
JM
2795int mlx4_restart_one(struct pci_dev *pdev)
2796{
839f1243
RD
2797 struct mlx4_dev *dev = pci_get_drvdata(pdev);
2798 struct mlx4_priv *priv = mlx4_priv(dev);
2799 int pci_dev_data;
2800
2801 pci_dev_data = priv->pci_dev_data;
befdf897 2802 __mlx4_remove_one(pdev);
839f1243 2803 return __mlx4_init_one(pdev, pci_dev_data);
ee49bd93
JM
2804}
2805
9baa3c34 2806static const struct pci_device_id mlx4_pci_table[] = {
ab9c17a0 2807 /* MT25408 "Hermon" SDR */
ca3e57a5 2808 { PCI_VDEVICE(MELLANOX, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT },
ab9c17a0 2809 /* MT25408 "Hermon" DDR */
ca3e57a5 2810 { PCI_VDEVICE(MELLANOX, 0x634a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
ab9c17a0 2811 /* MT25408 "Hermon" QDR */
ca3e57a5 2812 { PCI_VDEVICE(MELLANOX, 0x6354), MLX4_PCI_DEV_FORCE_SENSE_PORT },
ab9c17a0 2813 /* MT25408 "Hermon" DDR PCIe gen2 */
ca3e57a5 2814 { PCI_VDEVICE(MELLANOX, 0x6732), MLX4_PCI_DEV_FORCE_SENSE_PORT },
ab9c17a0 2815 /* MT25408 "Hermon" QDR PCIe gen2 */
ca3e57a5 2816 { PCI_VDEVICE(MELLANOX, 0x673c), MLX4_PCI_DEV_FORCE_SENSE_PORT },
ab9c17a0 2817 /* MT25408 "Hermon" EN 10GigE */
ca3e57a5 2818 { PCI_VDEVICE(MELLANOX, 0x6368), MLX4_PCI_DEV_FORCE_SENSE_PORT },
ab9c17a0 2819 /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
ca3e57a5 2820 { PCI_VDEVICE(MELLANOX, 0x6750), MLX4_PCI_DEV_FORCE_SENSE_PORT },
ab9c17a0 2821 /* MT25458 ConnectX EN 10GBASE-T 10GigE */
ca3e57a5 2822 { PCI_VDEVICE(MELLANOX, 0x6372), MLX4_PCI_DEV_FORCE_SENSE_PORT },
ab9c17a0 2823 /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
ca3e57a5 2824 { PCI_VDEVICE(MELLANOX, 0x675a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
ab9c17a0 2825 /* MT26468 ConnectX EN 10GigE PCIe gen2*/
ca3e57a5 2826 { PCI_VDEVICE(MELLANOX, 0x6764), MLX4_PCI_DEV_FORCE_SENSE_PORT },
ab9c17a0 2827 /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
ca3e57a5 2828 { PCI_VDEVICE(MELLANOX, 0x6746), MLX4_PCI_DEV_FORCE_SENSE_PORT },
ab9c17a0 2829 /* MT26478 ConnectX2 40GigE PCIe gen2 */
ca3e57a5 2830 { PCI_VDEVICE(MELLANOX, 0x676e), MLX4_PCI_DEV_FORCE_SENSE_PORT },
ab9c17a0 2831 /* MT25400 Family [ConnectX-2 Virtual Function] */
839f1243 2832 { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_PCI_DEV_IS_VF },
ab9c17a0
JM
2833 /* MT27500 Family [ConnectX-3] */
2834 { PCI_VDEVICE(MELLANOX, 0x1003), 0 },
2835 /* MT27500 Family [ConnectX-3 Virtual Function] */
839f1243 2836 { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_PCI_DEV_IS_VF },
ab9c17a0
JM
2837 { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */
2838 { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */
2839 { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */
2840 { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */
2841 { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */
2842 { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */
2843 { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */
2844 { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */
2845 { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */
2846 { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */
2847 { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */
2848 { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */
225c7b1f
RD
2849 { 0, }
2850};
2851
2852MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
2853
57dbf29a
KSS
2854static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
2855 pci_channel_state_t state)
2856{
befdf897 2857 __mlx4_remove_one(pdev);
57dbf29a
KSS
2858
2859 return state == pci_channel_io_perm_failure ?
2860 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
2861}
2862
2863static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
2864{
befdf897
WY
2865 struct mlx4_dev *dev = pci_get_drvdata(pdev);
2866 struct mlx4_priv *priv = mlx4_priv(dev);
2867 int ret;
97a5221f 2868
befdf897 2869 ret = __mlx4_init_one(pdev, priv->pci_dev_data);
57dbf29a
KSS
2870
2871 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
2872}
2873
3646f0e5 2874static const struct pci_error_handlers mlx4_err_handler = {
57dbf29a
KSS
2875 .error_detected = mlx4_pci_err_detected,
2876 .slot_reset = mlx4_pci_slot_reset,
2877};
2878
225c7b1f
RD
2879static struct pci_driver mlx4_driver = {
2880 .name = DRV_NAME,
2881 .id_table = mlx4_pci_table,
2882 .probe = mlx4_init_one,
da1de8df 2883 .shutdown = __mlx4_remove_one,
f57e6848 2884 .remove = mlx4_remove_one,
57dbf29a 2885 .err_handler = &mlx4_err_handler,
225c7b1f
RD
2886};
2887
7ff93f8b
YP
2888static int __init mlx4_verify_params(void)
2889{
2890 if ((log_num_mac < 0) || (log_num_mac > 7)) {
c20862c8 2891 pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac);
7ff93f8b
YP
2892 return -1;
2893 }
2894
cb29688a 2895 if (log_num_vlan != 0)
c20862c8
AV
2896 pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
2897 MLX4_LOG_NUM_VLANS);
7ff93f8b 2898
ecc8fb11
AV
2899 if (use_prio != 0)
2900 pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n");
7ff93f8b 2901
0498628f 2902 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
c20862c8
AV
2903 pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n",
2904 log_mtts_per_seg);
ab6bf42e
EC
2905 return -1;
2906 }
2907
ab9c17a0
JM
2908 /* Check if module param for ports type has legal combination */
2909 if (port_type_array[0] == false && port_type_array[1] == true) {
c20862c8 2910 pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
ab9c17a0
JM
2911 port_type_array[0] = true;
2912 }
2913
3c439b55
JM
2914 if (mlx4_log_num_mgm_entry_size != -1 &&
2915 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
2916 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) {
1a91de28
JP
2917 pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-1 or %d..%d)\n",
2918 mlx4_log_num_mgm_entry_size,
2919 MLX4_MIN_MGM_LOG_ENTRY_SIZE,
2920 MLX4_MAX_MGM_LOG_ENTRY_SIZE);
3c439b55
JM
2921 return -1;
2922 }
2923
7ff93f8b
YP
2924 return 0;
2925}
2926
225c7b1f
RD
2927static int __init mlx4_init(void)
2928{
2929 int ret;
2930
7ff93f8b
YP
2931 if (mlx4_verify_params())
2932 return -EINVAL;
2933
27bf91d6
YP
2934 mlx4_catas_init();
2935
2936 mlx4_wq = create_singlethread_workqueue("mlx4");
2937 if (!mlx4_wq)
2938 return -ENOMEM;
ee49bd93 2939
225c7b1f 2940 ret = pci_register_driver(&mlx4_driver);
1b85ee09
WY
2941 if (ret < 0)
2942 destroy_workqueue(mlx4_wq);
225c7b1f
RD
2943 return ret < 0 ? ret : 0;
2944}
2945
2946static void __exit mlx4_cleanup(void)
2947{
2948 pci_unregister_driver(&mlx4_driver);
27bf91d6 2949 destroy_workqueue(mlx4_wq);
225c7b1f
RD
2950}
2951
2952module_init(mlx4_init);
2953module_exit(mlx4_cleanup);
This page took 0.96042 seconds and 5 git commands to generate.