2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/netdevice.h>
39 #include <linux/inetdevice.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/if_vlan.h>
43 #include <net/addrconf.h>
45 #include <rdma/ib_smi.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/ib_addr.h>
49 #include <linux/mlx4/driver.h>
50 #include <linux/mlx4/cmd.h>
51 #include <linux/mlx4/qp.h>
56 #define DRV_NAME MLX4_IB_DRV_NAME
57 #define DRV_VERSION "2.2-1"
58 #define DRV_RELDATE "Feb 2014"
60 #define MLX4_IB_FLOW_MAX_PRIO 0xFFF
61 #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
63 MODULE_AUTHOR("Roland Dreier");
64 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
65 MODULE_LICENSE("Dual BSD/GPL");
66 MODULE_VERSION(DRV_VERSION
);
68 int mlx4_ib_sm_guid_assign
= 1;
69 module_param_named(sm_guid_assign
, mlx4_ib_sm_guid_assign
, int, 0444);
70 MODULE_PARM_DESC(sm_guid_assign
, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 1)");
72 static const char mlx4_ib_version
[] =
73 DRV_NAME
": Mellanox ConnectX InfiniBand driver v"
74 DRV_VERSION
" (" DRV_RELDATE
")\n";
76 struct update_gid_work
{
77 struct work_struct work
;
78 union ib_gid gids
[128];
79 struct mlx4_ib_dev
*dev
;
83 static void do_slave_init(struct mlx4_ib_dev
*ibdev
, int slave
, int do_init
);
85 static struct workqueue_struct
*wq
;
87 static void init_query_mad(struct ib_smp
*mad
)
89 mad
->base_version
= 1;
90 mad
->mgmt_class
= IB_MGMT_CLASS_SUBN_LID_ROUTED
;
91 mad
->class_version
= 1;
92 mad
->method
= IB_MGMT_METHOD_GET
;
95 static union ib_gid zgid
;
97 static int check_flow_steering_support(struct mlx4_dev
*dev
)
99 int eth_num_ports
= 0;
100 int ib_num_ports
= 0;
102 int dmfs
= dev
->caps
.steering_mode
== MLX4_STEERING_MODE_DEVICE_MANAGED
;
106 mlx4_foreach_port(i
, dev
, MLX4_PORT_TYPE_ETH
)
108 mlx4_foreach_port(i
, dev
, MLX4_PORT_TYPE_IB
)
110 dmfs
&= (!ib_num_ports
||
111 (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_DMFS_IPOIB
)) &&
113 (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_FS_EN
));
114 if (ib_num_ports
&& mlx4_is_mfunc(dev
)) {
115 pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
122 static int mlx4_ib_query_device(struct ib_device
*ibdev
,
123 struct ib_device_attr
*props
)
125 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
126 struct ib_smp
*in_mad
= NULL
;
127 struct ib_smp
*out_mad
= NULL
;
130 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
131 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
132 if (!in_mad
|| !out_mad
)
135 init_query_mad(in_mad
);
136 in_mad
->attr_id
= IB_SMP_ATTR_NODE_INFO
;
138 err
= mlx4_MAD_IFC(to_mdev(ibdev
), MLX4_MAD_IFC_IGNORE_KEYS
,
139 1, NULL
, NULL
, in_mad
, out_mad
);
143 memset(props
, 0, sizeof *props
);
145 props
->fw_ver
= dev
->dev
->caps
.fw_ver
;
146 props
->device_cap_flags
= IB_DEVICE_CHANGE_PHY_PORT
|
147 IB_DEVICE_PORT_ACTIVE_EVENT
|
148 IB_DEVICE_SYS_IMAGE_GUID
|
149 IB_DEVICE_RC_RNR_NAK_GEN
|
150 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK
;
151 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR
)
152 props
->device_cap_flags
|= IB_DEVICE_BAD_PKEY_CNTR
;
153 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR
)
154 props
->device_cap_flags
|= IB_DEVICE_BAD_QKEY_CNTR
;
155 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_APM
)
156 props
->device_cap_flags
|= IB_DEVICE_AUTO_PATH_MIG
;
157 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_UD_AV_PORT
)
158 props
->device_cap_flags
|= IB_DEVICE_UD_AV_PORT_ENFORCE
;
159 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_IPOIB_CSUM
)
160 props
->device_cap_flags
|= IB_DEVICE_UD_IP_CSUM
;
161 if (dev
->dev
->caps
.max_gso_sz
&& dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_BLH
)
162 props
->device_cap_flags
|= IB_DEVICE_UD_TSO
;
163 if (dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_RESERVED_LKEY
)
164 props
->device_cap_flags
|= IB_DEVICE_LOCAL_DMA_LKEY
;
165 if ((dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_LOCAL_INV
) &&
166 (dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_REMOTE_INV
) &&
167 (dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_FAST_REG_WR
))
168 props
->device_cap_flags
|= IB_DEVICE_MEM_MGT_EXTENSIONS
;
169 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
)
170 props
->device_cap_flags
|= IB_DEVICE_XRC
;
171 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_MEM_WINDOW
)
172 props
->device_cap_flags
|= IB_DEVICE_MEM_WINDOW
;
173 if (dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_TYPE_2_WIN
) {
174 if (dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_WIN_TYPE_2B
)
175 props
->device_cap_flags
|= IB_DEVICE_MEM_WINDOW_TYPE_2B
;
177 props
->device_cap_flags
|= IB_DEVICE_MEM_WINDOW_TYPE_2A
;
178 if (dev
->steering_support
== MLX4_STEERING_MODE_DEVICE_MANAGED
)
179 props
->device_cap_flags
|= IB_DEVICE_MANAGED_FLOW_STEERING
;
182 props
->vendor_id
= be32_to_cpup((__be32
*) (out_mad
->data
+ 36)) &
184 props
->vendor_part_id
= dev
->dev
->pdev
->device
;
185 props
->hw_ver
= be32_to_cpup((__be32
*) (out_mad
->data
+ 32));
186 memcpy(&props
->sys_image_guid
, out_mad
->data
+ 4, 8);
188 props
->max_mr_size
= ~0ull;
189 props
->page_size_cap
= dev
->dev
->caps
.page_size_cap
;
190 props
->max_qp
= dev
->dev
->quotas
.qp
;
191 props
->max_qp_wr
= dev
->dev
->caps
.max_wqes
- MLX4_IB_SQ_MAX_SPARE
;
192 props
->max_sge
= min(dev
->dev
->caps
.max_sq_sg
,
193 dev
->dev
->caps
.max_rq_sg
);
194 props
->max_cq
= dev
->dev
->quotas
.cq
;
195 props
->max_cqe
= dev
->dev
->caps
.max_cqes
;
196 props
->max_mr
= dev
->dev
->quotas
.mpt
;
197 props
->max_pd
= dev
->dev
->caps
.num_pds
- dev
->dev
->caps
.reserved_pds
;
198 props
->max_qp_rd_atom
= dev
->dev
->caps
.max_qp_dest_rdma
;
199 props
->max_qp_init_rd_atom
= dev
->dev
->caps
.max_qp_init_rdma
;
200 props
->max_res_rd_atom
= props
->max_qp_rd_atom
* props
->max_qp
;
201 props
->max_srq
= dev
->dev
->quotas
.srq
;
202 props
->max_srq_wr
= dev
->dev
->caps
.max_srq_wqes
- 1;
203 props
->max_srq_sge
= dev
->dev
->caps
.max_srq_sge
;
204 props
->max_fast_reg_page_list_len
= MLX4_MAX_FAST_REG_PAGES
;
205 props
->local_ca_ack_delay
= dev
->dev
->caps
.local_ca_ack_delay
;
206 props
->atomic_cap
= dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_ATOMIC
?
207 IB_ATOMIC_HCA
: IB_ATOMIC_NONE
;
208 props
->masked_atomic_cap
= props
->atomic_cap
;
209 props
->max_pkeys
= dev
->dev
->caps
.pkey_table_len
[1];
210 props
->max_mcast_grp
= dev
->dev
->caps
.num_mgms
+ dev
->dev
->caps
.num_amgms
;
211 props
->max_mcast_qp_attach
= dev
->dev
->caps
.num_qp_per_mgm
;
212 props
->max_total_mcast_qp_attach
= props
->max_mcast_qp_attach
*
213 props
->max_mcast_grp
;
214 props
->max_map_per_fmr
= dev
->dev
->caps
.max_fmr_maps
;
223 static enum rdma_link_layer
224 mlx4_ib_port_link_layer(struct ib_device
*device
, u8 port_num
)
226 struct mlx4_dev
*dev
= to_mdev(device
)->dev
;
228 return dev
->caps
.port_mask
[port_num
] == MLX4_PORT_TYPE_IB
?
229 IB_LINK_LAYER_INFINIBAND
: IB_LINK_LAYER_ETHERNET
;
232 static int ib_link_query_port(struct ib_device
*ibdev
, u8 port
,
233 struct ib_port_attr
*props
, int netw_view
)
235 struct ib_smp
*in_mad
= NULL
;
236 struct ib_smp
*out_mad
= NULL
;
237 int ext_active_speed
;
238 int mad_ifc_flags
= MLX4_MAD_IFC_IGNORE_KEYS
;
241 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
242 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
243 if (!in_mad
|| !out_mad
)
246 init_query_mad(in_mad
);
247 in_mad
->attr_id
= IB_SMP_ATTR_PORT_INFO
;
248 in_mad
->attr_mod
= cpu_to_be32(port
);
250 if (mlx4_is_mfunc(to_mdev(ibdev
)->dev
) && netw_view
)
251 mad_ifc_flags
|= MLX4_MAD_IFC_NET_VIEW
;
253 err
= mlx4_MAD_IFC(to_mdev(ibdev
), mad_ifc_flags
, port
, NULL
, NULL
,
259 props
->lid
= be16_to_cpup((__be16
*) (out_mad
->data
+ 16));
260 props
->lmc
= out_mad
->data
[34] & 0x7;
261 props
->sm_lid
= be16_to_cpup((__be16
*) (out_mad
->data
+ 18));
262 props
->sm_sl
= out_mad
->data
[36] & 0xf;
263 props
->state
= out_mad
->data
[32] & 0xf;
264 props
->phys_state
= out_mad
->data
[33] >> 4;
265 props
->port_cap_flags
= be32_to_cpup((__be32
*) (out_mad
->data
+ 20));
267 props
->gid_tbl_len
= out_mad
->data
[50];
269 props
->gid_tbl_len
= to_mdev(ibdev
)->dev
->caps
.gid_table_len
[port
];
270 props
->max_msg_sz
= to_mdev(ibdev
)->dev
->caps
.max_msg_sz
;
271 props
->pkey_tbl_len
= to_mdev(ibdev
)->dev
->caps
.pkey_table_len
[port
];
272 props
->bad_pkey_cntr
= be16_to_cpup((__be16
*) (out_mad
->data
+ 46));
273 props
->qkey_viol_cntr
= be16_to_cpup((__be16
*) (out_mad
->data
+ 48));
274 props
->active_width
= out_mad
->data
[31] & 0xf;
275 props
->active_speed
= out_mad
->data
[35] >> 4;
276 props
->max_mtu
= out_mad
->data
[41] & 0xf;
277 props
->active_mtu
= out_mad
->data
[36] >> 4;
278 props
->subnet_timeout
= out_mad
->data
[51] & 0x1f;
279 props
->max_vl_num
= out_mad
->data
[37] >> 4;
280 props
->init_type_reply
= out_mad
->data
[41] >> 4;
282 /* Check if extended speeds (EDR/FDR/...) are supported */
283 if (props
->port_cap_flags
& IB_PORT_EXTENDED_SPEEDS_SUP
) {
284 ext_active_speed
= out_mad
->data
[62] >> 4;
286 switch (ext_active_speed
) {
288 props
->active_speed
= IB_SPEED_FDR
;
291 props
->active_speed
= IB_SPEED_EDR
;
296 /* If reported active speed is QDR, check if is FDR-10 */
297 if (props
->active_speed
== IB_SPEED_QDR
) {
298 init_query_mad(in_mad
);
299 in_mad
->attr_id
= MLX4_ATTR_EXTENDED_PORT_INFO
;
300 in_mad
->attr_mod
= cpu_to_be32(port
);
302 err
= mlx4_MAD_IFC(to_mdev(ibdev
), mad_ifc_flags
, port
,
303 NULL
, NULL
, in_mad
, out_mad
);
307 /* Checking LinkSpeedActive for FDR-10 */
308 if (out_mad
->data
[15] & 0x1)
309 props
->active_speed
= IB_SPEED_FDR10
;
312 /* Avoid wrong speed value returned by FW if the IB link is down. */
313 if (props
->state
== IB_PORT_DOWN
)
314 props
->active_speed
= IB_SPEED_SDR
;
322 static u8
state_to_phys_state(enum ib_port_state state
)
324 return state
== IB_PORT_ACTIVE
? 5 : 3;
327 static int eth_link_query_port(struct ib_device
*ibdev
, u8 port
,
328 struct ib_port_attr
*props
, int netw_view
)
331 struct mlx4_ib_dev
*mdev
= to_mdev(ibdev
);
332 struct mlx4_ib_iboe
*iboe
= &mdev
->iboe
;
333 struct net_device
*ndev
;
335 struct mlx4_cmd_mailbox
*mailbox
;
338 mailbox
= mlx4_alloc_cmd_mailbox(mdev
->dev
);
340 return PTR_ERR(mailbox
);
342 err
= mlx4_cmd_box(mdev
->dev
, 0, mailbox
->dma
, port
, 0,
343 MLX4_CMD_QUERY_PORT
, MLX4_CMD_TIME_CLASS_B
,
348 props
->active_width
= (((u8
*)mailbox
->buf
)[5] == 0x40) ?
349 IB_WIDTH_4X
: IB_WIDTH_1X
;
350 props
->active_speed
= IB_SPEED_QDR
;
351 props
->port_cap_flags
= IB_PORT_CM_SUP
| IB_PORT_IP_BASED_GIDS
;
352 props
->gid_tbl_len
= mdev
->dev
->caps
.gid_table_len
[port
];
353 props
->max_msg_sz
= mdev
->dev
->caps
.max_msg_sz
;
354 props
->pkey_tbl_len
= 1;
355 props
->max_mtu
= IB_MTU_4096
;
356 props
->max_vl_num
= 2;
357 props
->state
= IB_PORT_DOWN
;
358 props
->phys_state
= state_to_phys_state(props
->state
);
359 props
->active_mtu
= IB_MTU_256
;
360 spin_lock(&iboe
->lock
);
361 ndev
= iboe
->netdevs
[port
- 1];
365 tmp
= iboe_get_mtu(ndev
->mtu
);
366 props
->active_mtu
= tmp
? min(props
->max_mtu
, tmp
) : IB_MTU_256
;
368 props
->state
= (netif_running(ndev
) && netif_carrier_ok(ndev
)) ?
369 IB_PORT_ACTIVE
: IB_PORT_DOWN
;
370 props
->phys_state
= state_to_phys_state(props
->state
);
372 spin_unlock(&iboe
->lock
);
374 mlx4_free_cmd_mailbox(mdev
->dev
, mailbox
);
378 int __mlx4_ib_query_port(struct ib_device
*ibdev
, u8 port
,
379 struct ib_port_attr
*props
, int netw_view
)
383 memset(props
, 0, sizeof *props
);
385 err
= mlx4_ib_port_link_layer(ibdev
, port
) == IB_LINK_LAYER_INFINIBAND
?
386 ib_link_query_port(ibdev
, port
, props
, netw_view
) :
387 eth_link_query_port(ibdev
, port
, props
, netw_view
);
392 static int mlx4_ib_query_port(struct ib_device
*ibdev
, u8 port
,
393 struct ib_port_attr
*props
)
395 /* returns host view */
396 return __mlx4_ib_query_port(ibdev
, port
, props
, 0);
399 int __mlx4_ib_query_gid(struct ib_device
*ibdev
, u8 port
, int index
,
400 union ib_gid
*gid
, int netw_view
)
402 struct ib_smp
*in_mad
= NULL
;
403 struct ib_smp
*out_mad
= NULL
;
405 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
407 int mad_ifc_flags
= MLX4_MAD_IFC_IGNORE_KEYS
;
409 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
410 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
411 if (!in_mad
|| !out_mad
)
414 init_query_mad(in_mad
);
415 in_mad
->attr_id
= IB_SMP_ATTR_PORT_INFO
;
416 in_mad
->attr_mod
= cpu_to_be32(port
);
418 if (mlx4_is_mfunc(dev
->dev
) && netw_view
)
419 mad_ifc_flags
|= MLX4_MAD_IFC_NET_VIEW
;
421 err
= mlx4_MAD_IFC(dev
, mad_ifc_flags
, port
, NULL
, NULL
, in_mad
, out_mad
);
425 memcpy(gid
->raw
, out_mad
->data
+ 8, 8);
427 if (mlx4_is_mfunc(dev
->dev
) && !netw_view
) {
429 /* For any index > 0, return the null guid */
436 init_query_mad(in_mad
);
437 in_mad
->attr_id
= IB_SMP_ATTR_GUID_INFO
;
438 in_mad
->attr_mod
= cpu_to_be32(index
/ 8);
440 err
= mlx4_MAD_IFC(dev
, mad_ifc_flags
, port
,
441 NULL
, NULL
, in_mad
, out_mad
);
445 memcpy(gid
->raw
+ 8, out_mad
->data
+ (index
% 8) * 8, 8);
449 memset(gid
->raw
+ 8, 0, 8);
455 static int iboe_query_gid(struct ib_device
*ibdev
, u8 port
, int index
,
458 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
460 *gid
= dev
->iboe
.gid_table
[port
- 1][index
];
465 static int mlx4_ib_query_gid(struct ib_device
*ibdev
, u8 port
, int index
,
468 if (rdma_port_get_link_layer(ibdev
, port
) == IB_LINK_LAYER_INFINIBAND
)
469 return __mlx4_ib_query_gid(ibdev
, port
, index
, gid
, 0);
471 return iboe_query_gid(ibdev
, port
, index
, gid
);
474 int __mlx4_ib_query_pkey(struct ib_device
*ibdev
, u8 port
, u16 index
,
475 u16
*pkey
, int netw_view
)
477 struct ib_smp
*in_mad
= NULL
;
478 struct ib_smp
*out_mad
= NULL
;
479 int mad_ifc_flags
= MLX4_MAD_IFC_IGNORE_KEYS
;
482 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
483 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
484 if (!in_mad
|| !out_mad
)
487 init_query_mad(in_mad
);
488 in_mad
->attr_id
= IB_SMP_ATTR_PKEY_TABLE
;
489 in_mad
->attr_mod
= cpu_to_be32(index
/ 32);
491 if (mlx4_is_mfunc(to_mdev(ibdev
)->dev
) && netw_view
)
492 mad_ifc_flags
|= MLX4_MAD_IFC_NET_VIEW
;
494 err
= mlx4_MAD_IFC(to_mdev(ibdev
), mad_ifc_flags
, port
, NULL
, NULL
,
499 *pkey
= be16_to_cpu(((__be16
*) out_mad
->data
)[index
% 32]);
507 static int mlx4_ib_query_pkey(struct ib_device
*ibdev
, u8 port
, u16 index
, u16
*pkey
)
509 return __mlx4_ib_query_pkey(ibdev
, port
, index
, pkey
, 0);
512 static int mlx4_ib_modify_device(struct ib_device
*ibdev
, int mask
,
513 struct ib_device_modify
*props
)
515 struct mlx4_cmd_mailbox
*mailbox
;
518 if (mask
& ~IB_DEVICE_MODIFY_NODE_DESC
)
521 if (!(mask
& IB_DEVICE_MODIFY_NODE_DESC
))
524 if (mlx4_is_slave(to_mdev(ibdev
)->dev
))
527 spin_lock_irqsave(&to_mdev(ibdev
)->sm_lock
, flags
);
528 memcpy(ibdev
->node_desc
, props
->node_desc
, 64);
529 spin_unlock_irqrestore(&to_mdev(ibdev
)->sm_lock
, flags
);
532 * If possible, pass node desc to FW, so it can generate
533 * a 144 trap. If cmd fails, just ignore.
535 mailbox
= mlx4_alloc_cmd_mailbox(to_mdev(ibdev
)->dev
);
539 memcpy(mailbox
->buf
, props
->node_desc
, 64);
540 mlx4_cmd(to_mdev(ibdev
)->dev
, mailbox
->dma
, 1, 0,
541 MLX4_CMD_SET_NODE
, MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
543 mlx4_free_cmd_mailbox(to_mdev(ibdev
)->dev
, mailbox
);
548 static int mlx4_SET_PORT(struct mlx4_ib_dev
*dev
, u8 port
, int reset_qkey_viols
,
551 struct mlx4_cmd_mailbox
*mailbox
;
553 u8 is_eth
= dev
->dev
->caps
.port_type
[port
] == MLX4_PORT_TYPE_ETH
;
555 mailbox
= mlx4_alloc_cmd_mailbox(dev
->dev
);
557 return PTR_ERR(mailbox
);
559 if (dev
->dev
->flags
& MLX4_FLAG_OLD_PORT_CMDS
) {
560 *(u8
*) mailbox
->buf
= !!reset_qkey_viols
<< 6;
561 ((__be32
*) mailbox
->buf
)[2] = cpu_to_be32(cap_mask
);
563 ((u8
*) mailbox
->buf
)[3] = !!reset_qkey_viols
;
564 ((__be32
*) mailbox
->buf
)[1] = cpu_to_be32(cap_mask
);
567 err
= mlx4_cmd(dev
->dev
, mailbox
->dma
, port
, is_eth
, MLX4_CMD_SET_PORT
,
568 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
570 mlx4_free_cmd_mailbox(dev
->dev
, mailbox
);
574 static int mlx4_ib_modify_port(struct ib_device
*ibdev
, u8 port
, int mask
,
575 struct ib_port_modify
*props
)
577 struct ib_port_attr attr
;
581 mutex_lock(&to_mdev(ibdev
)->cap_mask_mutex
);
583 err
= mlx4_ib_query_port(ibdev
, port
, &attr
);
587 cap_mask
= (attr
.port_cap_flags
| props
->set_port_cap_mask
) &
588 ~props
->clr_port_cap_mask
;
590 err
= mlx4_SET_PORT(to_mdev(ibdev
), port
,
591 !!(mask
& IB_PORT_RESET_QKEY_CNTR
),
595 mutex_unlock(&to_mdev(ibdev
)->cap_mask_mutex
);
599 static struct ib_ucontext
*mlx4_ib_alloc_ucontext(struct ib_device
*ibdev
,
600 struct ib_udata
*udata
)
602 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
603 struct mlx4_ib_ucontext
*context
;
604 struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3
;
605 struct mlx4_ib_alloc_ucontext_resp resp
;
609 return ERR_PTR(-EAGAIN
);
611 if (ibdev
->uverbs_abi_ver
== MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION
) {
612 resp_v3
.qp_tab_size
= dev
->dev
->caps
.num_qps
;
613 resp_v3
.bf_reg_size
= dev
->dev
->caps
.bf_reg_size
;
614 resp_v3
.bf_regs_per_page
= dev
->dev
->caps
.bf_regs_per_page
;
616 resp
.dev_caps
= dev
->dev
->caps
.userspace_caps
;
617 resp
.qp_tab_size
= dev
->dev
->caps
.num_qps
;
618 resp
.bf_reg_size
= dev
->dev
->caps
.bf_reg_size
;
619 resp
.bf_regs_per_page
= dev
->dev
->caps
.bf_regs_per_page
;
620 resp
.cqe_size
= dev
->dev
->caps
.cqe_size
;
623 context
= kmalloc(sizeof *context
, GFP_KERNEL
);
625 return ERR_PTR(-ENOMEM
);
627 err
= mlx4_uar_alloc(to_mdev(ibdev
)->dev
, &context
->uar
);
633 INIT_LIST_HEAD(&context
->db_page_list
);
634 mutex_init(&context
->db_page_mutex
);
636 if (ibdev
->uverbs_abi_ver
== MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION
)
637 err
= ib_copy_to_udata(udata
, &resp_v3
, sizeof(resp_v3
));
639 err
= ib_copy_to_udata(udata
, &resp
, sizeof(resp
));
642 mlx4_uar_free(to_mdev(ibdev
)->dev
, &context
->uar
);
644 return ERR_PTR(-EFAULT
);
647 return &context
->ibucontext
;
650 static int mlx4_ib_dealloc_ucontext(struct ib_ucontext
*ibcontext
)
652 struct mlx4_ib_ucontext
*context
= to_mucontext(ibcontext
);
654 mlx4_uar_free(to_mdev(ibcontext
->device
)->dev
, &context
->uar
);
660 static int mlx4_ib_mmap(struct ib_ucontext
*context
, struct vm_area_struct
*vma
)
662 struct mlx4_ib_dev
*dev
= to_mdev(context
->device
);
664 if (vma
->vm_end
- vma
->vm_start
!= PAGE_SIZE
)
667 if (vma
->vm_pgoff
== 0) {
668 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
670 if (io_remap_pfn_range(vma
, vma
->vm_start
,
671 to_mucontext(context
)->uar
.pfn
,
672 PAGE_SIZE
, vma
->vm_page_prot
))
674 } else if (vma
->vm_pgoff
== 1 && dev
->dev
->caps
.bf_reg_size
!= 0) {
675 vma
->vm_page_prot
= pgprot_writecombine(vma
->vm_page_prot
);
677 if (io_remap_pfn_range(vma
, vma
->vm_start
,
678 to_mucontext(context
)->uar
.pfn
+
679 dev
->dev
->caps
.num_uars
,
680 PAGE_SIZE
, vma
->vm_page_prot
))
688 static struct ib_pd
*mlx4_ib_alloc_pd(struct ib_device
*ibdev
,
689 struct ib_ucontext
*context
,
690 struct ib_udata
*udata
)
692 struct mlx4_ib_pd
*pd
;
695 pd
= kmalloc(sizeof *pd
, GFP_KERNEL
);
697 return ERR_PTR(-ENOMEM
);
699 err
= mlx4_pd_alloc(to_mdev(ibdev
)->dev
, &pd
->pdn
);
706 if (ib_copy_to_udata(udata
, &pd
->pdn
, sizeof (__u32
))) {
707 mlx4_pd_free(to_mdev(ibdev
)->dev
, pd
->pdn
);
709 return ERR_PTR(-EFAULT
);
715 static int mlx4_ib_dealloc_pd(struct ib_pd
*pd
)
717 mlx4_pd_free(to_mdev(pd
->device
)->dev
, to_mpd(pd
)->pdn
);
723 static struct ib_xrcd
*mlx4_ib_alloc_xrcd(struct ib_device
*ibdev
,
724 struct ib_ucontext
*context
,
725 struct ib_udata
*udata
)
727 struct mlx4_ib_xrcd
*xrcd
;
730 if (!(to_mdev(ibdev
)->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
))
731 return ERR_PTR(-ENOSYS
);
733 xrcd
= kmalloc(sizeof *xrcd
, GFP_KERNEL
);
735 return ERR_PTR(-ENOMEM
);
737 err
= mlx4_xrcd_alloc(to_mdev(ibdev
)->dev
, &xrcd
->xrcdn
);
741 xrcd
->pd
= ib_alloc_pd(ibdev
);
742 if (IS_ERR(xrcd
->pd
)) {
743 err
= PTR_ERR(xrcd
->pd
);
747 xrcd
->cq
= ib_create_cq(ibdev
, NULL
, NULL
, xrcd
, 1, 0);
748 if (IS_ERR(xrcd
->cq
)) {
749 err
= PTR_ERR(xrcd
->cq
);
753 return &xrcd
->ibxrcd
;
756 ib_dealloc_pd(xrcd
->pd
);
758 mlx4_xrcd_free(to_mdev(ibdev
)->dev
, xrcd
->xrcdn
);
764 static int mlx4_ib_dealloc_xrcd(struct ib_xrcd
*xrcd
)
766 ib_destroy_cq(to_mxrcd(xrcd
)->cq
);
767 ib_dealloc_pd(to_mxrcd(xrcd
)->pd
);
768 mlx4_xrcd_free(to_mdev(xrcd
->device
)->dev
, to_mxrcd(xrcd
)->xrcdn
);
774 static int add_gid_entry(struct ib_qp
*ibqp
, union ib_gid
*gid
)
776 struct mlx4_ib_qp
*mqp
= to_mqp(ibqp
);
777 struct mlx4_ib_dev
*mdev
= to_mdev(ibqp
->device
);
778 struct mlx4_ib_gid_entry
*ge
;
780 ge
= kzalloc(sizeof *ge
, GFP_KERNEL
);
785 if (mlx4_ib_add_mc(mdev
, mqp
, gid
)) {
786 ge
->port
= mqp
->port
;
790 mutex_lock(&mqp
->mutex
);
791 list_add_tail(&ge
->list
, &mqp
->gid_list
);
792 mutex_unlock(&mqp
->mutex
);
797 int mlx4_ib_add_mc(struct mlx4_ib_dev
*mdev
, struct mlx4_ib_qp
*mqp
,
800 struct net_device
*ndev
;
806 spin_lock(&mdev
->iboe
.lock
);
807 ndev
= mdev
->iboe
.netdevs
[mqp
->port
- 1];
810 spin_unlock(&mdev
->iboe
.lock
);
820 struct mlx4_ib_steering
{
821 struct list_head list
;
826 static int parse_flow_attr(struct mlx4_dev
*dev
,
828 union ib_flow_spec
*ib_spec
,
829 struct _rule_hw
*mlx4_spec
)
831 enum mlx4_net_trans_rule_id type
;
833 switch (ib_spec
->type
) {
834 case IB_FLOW_SPEC_ETH
:
835 type
= MLX4_NET_TRANS_RULE_ID_ETH
;
836 memcpy(mlx4_spec
->eth
.dst_mac
, ib_spec
->eth
.val
.dst_mac
,
838 memcpy(mlx4_spec
->eth
.dst_mac_msk
, ib_spec
->eth
.mask
.dst_mac
,
840 mlx4_spec
->eth
.vlan_tag
= ib_spec
->eth
.val
.vlan_tag
;
841 mlx4_spec
->eth
.vlan_tag_msk
= ib_spec
->eth
.mask
.vlan_tag
;
843 case IB_FLOW_SPEC_IB
:
844 type
= MLX4_NET_TRANS_RULE_ID_IB
;
845 mlx4_spec
->ib
.l3_qpn
=
847 mlx4_spec
->ib
.qpn_mask
=
848 cpu_to_be32(MLX4_IB_FLOW_QPN_MASK
);
852 case IB_FLOW_SPEC_IPV4
:
853 type
= MLX4_NET_TRANS_RULE_ID_IPV4
;
854 mlx4_spec
->ipv4
.src_ip
= ib_spec
->ipv4
.val
.src_ip
;
855 mlx4_spec
->ipv4
.src_ip_msk
= ib_spec
->ipv4
.mask
.src_ip
;
856 mlx4_spec
->ipv4
.dst_ip
= ib_spec
->ipv4
.val
.dst_ip
;
857 mlx4_spec
->ipv4
.dst_ip_msk
= ib_spec
->ipv4
.mask
.dst_ip
;
860 case IB_FLOW_SPEC_TCP
:
861 case IB_FLOW_SPEC_UDP
:
862 type
= ib_spec
->type
== IB_FLOW_SPEC_TCP
?
863 MLX4_NET_TRANS_RULE_ID_TCP
:
864 MLX4_NET_TRANS_RULE_ID_UDP
;
865 mlx4_spec
->tcp_udp
.dst_port
= ib_spec
->tcp_udp
.val
.dst_port
;
866 mlx4_spec
->tcp_udp
.dst_port_msk
= ib_spec
->tcp_udp
.mask
.dst_port
;
867 mlx4_spec
->tcp_udp
.src_port
= ib_spec
->tcp_udp
.val
.src_port
;
868 mlx4_spec
->tcp_udp
.src_port_msk
= ib_spec
->tcp_udp
.mask
.src_port
;
874 if (mlx4_map_sw_to_hw_steering_id(dev
, type
) < 0 ||
875 mlx4_hw_rule_sz(dev
, type
) < 0)
877 mlx4_spec
->id
= cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev
, type
));
878 mlx4_spec
->size
= mlx4_hw_rule_sz(dev
, type
) >> 2;
879 return mlx4_hw_rule_sz(dev
, type
);
882 struct default_rules
{
883 __u32 mandatory_fields
[IB_FLOW_SPEC_SUPPORT_LAYERS
];
884 __u32 mandatory_not_fields
[IB_FLOW_SPEC_SUPPORT_LAYERS
];
885 __u32 rules_create_list
[IB_FLOW_SPEC_SUPPORT_LAYERS
];
888 static const struct default_rules default_table
[] = {
890 .mandatory_fields
= {IB_FLOW_SPEC_IPV4
},
891 .mandatory_not_fields
= {IB_FLOW_SPEC_ETH
},
892 .rules_create_list
= {IB_FLOW_SPEC_IB
},
893 .link_layer
= IB_LINK_LAYER_INFINIBAND
897 static int __mlx4_ib_default_rules_match(struct ib_qp
*qp
,
898 struct ib_flow_attr
*flow_attr
)
902 const struct default_rules
*pdefault_rules
= default_table
;
903 u8 link_layer
= rdma_port_get_link_layer(qp
->device
, flow_attr
->port
);
905 for (i
= 0; i
< sizeof(default_table
)/sizeof(default_table
[0]); i
++,
907 __u32 field_types
[IB_FLOW_SPEC_SUPPORT_LAYERS
];
908 memset(&field_types
, 0, sizeof(field_types
));
910 if (link_layer
!= pdefault_rules
->link_layer
)
913 ib_flow
= flow_attr
+ 1;
914 /* we assume the specs are sorted */
915 for (j
= 0, k
= 0; k
< IB_FLOW_SPEC_SUPPORT_LAYERS
&&
916 j
< flow_attr
->num_of_specs
; k
++) {
917 union ib_flow_spec
*current_flow
=
918 (union ib_flow_spec
*)ib_flow
;
920 /* same layer but different type */
921 if (((current_flow
->type
& IB_FLOW_SPEC_LAYER_MASK
) ==
922 (pdefault_rules
->mandatory_fields
[k
] &
923 IB_FLOW_SPEC_LAYER_MASK
)) &&
924 (current_flow
->type
!=
925 pdefault_rules
->mandatory_fields
[k
]))
928 /* same layer, try match next one */
929 if (current_flow
->type
==
930 pdefault_rules
->mandatory_fields
[k
]) {
933 ((union ib_flow_spec
*)ib_flow
)->size
;
937 ib_flow
= flow_attr
+ 1;
938 for (j
= 0; j
< flow_attr
->num_of_specs
;
939 j
++, ib_flow
+= ((union ib_flow_spec
*)ib_flow
)->size
)
940 for (k
= 0; k
< IB_FLOW_SPEC_SUPPORT_LAYERS
; k
++)
941 /* same layer and same type */
942 if (((union ib_flow_spec
*)ib_flow
)->type
==
943 pdefault_rules
->mandatory_not_fields
[k
])
952 static int __mlx4_ib_create_default_rules(
953 struct mlx4_ib_dev
*mdev
,
955 const struct default_rules
*pdefault_rules
,
956 struct _rule_hw
*mlx4_spec
) {
960 for (i
= 0; i
< sizeof(pdefault_rules
->rules_create_list
)/
961 sizeof(pdefault_rules
->rules_create_list
[0]); i
++) {
963 union ib_flow_spec ib_spec
;
964 switch (pdefault_rules
->rules_create_list
[i
]) {
968 case IB_FLOW_SPEC_IB
:
969 ib_spec
.type
= IB_FLOW_SPEC_IB
;
970 ib_spec
.size
= sizeof(struct ib_flow_spec_ib
);
977 /* We must put empty rule, qpn is being ignored */
978 ret
= parse_flow_attr(mdev
->dev
, 0, &ib_spec
,
981 pr_info("invalid parsing\n");
985 mlx4_spec
= (void *)mlx4_spec
+ ret
;
991 static int __mlx4_ib_create_flow(struct ib_qp
*qp
, struct ib_flow_attr
*flow_attr
,
993 enum mlx4_net_trans_promisc_mode flow_type
,
999 struct mlx4_ib_dev
*mdev
= to_mdev(qp
->device
);
1000 struct mlx4_cmd_mailbox
*mailbox
;
1001 struct mlx4_net_trans_rule_hw_ctrl
*ctrl
;
1004 static const u16 __mlx4_domain
[] = {
1005 [IB_FLOW_DOMAIN_USER
] = MLX4_DOMAIN_UVERBS
,
1006 [IB_FLOW_DOMAIN_ETHTOOL
] = MLX4_DOMAIN_ETHTOOL
,
1007 [IB_FLOW_DOMAIN_RFS
] = MLX4_DOMAIN_RFS
,
1008 [IB_FLOW_DOMAIN_NIC
] = MLX4_DOMAIN_NIC
,
1011 if (flow_attr
->priority
> MLX4_IB_FLOW_MAX_PRIO
) {
1012 pr_err("Invalid priority value %d\n", flow_attr
->priority
);
1016 if (domain
>= IB_FLOW_DOMAIN_NUM
) {
1017 pr_err("Invalid domain value %d\n", domain
);
1021 if (mlx4_map_sw_to_hw_steering_mode(mdev
->dev
, flow_type
) < 0)
1024 mailbox
= mlx4_alloc_cmd_mailbox(mdev
->dev
);
1025 if (IS_ERR(mailbox
))
1026 return PTR_ERR(mailbox
);
1027 ctrl
= mailbox
->buf
;
1029 ctrl
->prio
= cpu_to_be16(__mlx4_domain
[domain
] |
1030 flow_attr
->priority
);
1031 ctrl
->type
= mlx4_map_sw_to_hw_steering_mode(mdev
->dev
, flow_type
);
1032 ctrl
->port
= flow_attr
->port
;
1033 ctrl
->qpn
= cpu_to_be32(qp
->qp_num
);
1035 ib_flow
= flow_attr
+ 1;
1036 size
+= sizeof(struct mlx4_net_trans_rule_hw_ctrl
);
1037 /* Add default flows */
1038 default_flow
= __mlx4_ib_default_rules_match(qp
, flow_attr
);
1039 if (default_flow
>= 0) {
1040 ret
= __mlx4_ib_create_default_rules(
1041 mdev
, qp
, default_table
+ default_flow
,
1042 mailbox
->buf
+ size
);
1044 mlx4_free_cmd_mailbox(mdev
->dev
, mailbox
);
1049 for (i
= 0; i
< flow_attr
->num_of_specs
; i
++) {
1050 ret
= parse_flow_attr(mdev
->dev
, qp
->qp_num
, ib_flow
,
1051 mailbox
->buf
+ size
);
1053 mlx4_free_cmd_mailbox(mdev
->dev
, mailbox
);
1056 ib_flow
+= ((union ib_flow_spec
*) ib_flow
)->size
;
1060 ret
= mlx4_cmd_imm(mdev
->dev
, mailbox
->dma
, reg_id
, size
>> 2, 0,
1061 MLX4_QP_FLOW_STEERING_ATTACH
, MLX4_CMD_TIME_CLASS_A
,
1064 pr_err("mcg table is full. Fail to register network rule.\n");
1065 else if (ret
== -ENXIO
)
1066 pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
1068 pr_err("Invalid argumant. Fail to register network rule.\n");
1070 mlx4_free_cmd_mailbox(mdev
->dev
, mailbox
);
1074 static int __mlx4_ib_destroy_flow(struct mlx4_dev
*dev
, u64 reg_id
)
1077 err
= mlx4_cmd(dev
, reg_id
, 0, 0,
1078 MLX4_QP_FLOW_STEERING_DETACH
, MLX4_CMD_TIME_CLASS_A
,
1081 pr_err("Fail to detach network rule. registration id = 0x%llx\n",
1086 static struct ib_flow
*mlx4_ib_create_flow(struct ib_qp
*qp
,
1087 struct ib_flow_attr
*flow_attr
,
1091 struct mlx4_ib_flow
*mflow
;
1092 enum mlx4_net_trans_promisc_mode type
[2];
1094 memset(type
, 0, sizeof(type
));
1096 mflow
= kzalloc(sizeof(*mflow
), GFP_KERNEL
);
1102 switch (flow_attr
->type
) {
1103 case IB_FLOW_ATTR_NORMAL
:
1104 type
[0] = MLX4_FS_REGULAR
;
1107 case IB_FLOW_ATTR_ALL_DEFAULT
:
1108 type
[0] = MLX4_FS_ALL_DEFAULT
;
1111 case IB_FLOW_ATTR_MC_DEFAULT
:
1112 type
[0] = MLX4_FS_MC_DEFAULT
;
1115 case IB_FLOW_ATTR_SNIFFER
:
1116 type
[0] = MLX4_FS_UC_SNIFFER
;
1117 type
[1] = MLX4_FS_MC_SNIFFER
;
1125 while (i
< ARRAY_SIZE(type
) && type
[i
]) {
1126 err
= __mlx4_ib_create_flow(qp
, flow_attr
, domain
, type
[i
],
1133 return &mflow
->ibflow
;
1137 return ERR_PTR(err
);
1140 static int mlx4_ib_destroy_flow(struct ib_flow
*flow_id
)
1144 struct mlx4_ib_dev
*mdev
= to_mdev(flow_id
->qp
->device
);
1145 struct mlx4_ib_flow
*mflow
= to_mflow(flow_id
);
1147 while (i
< ARRAY_SIZE(mflow
->reg_id
) && mflow
->reg_id
[i
]) {
1148 err
= __mlx4_ib_destroy_flow(mdev
->dev
, mflow
->reg_id
[i
]);
1158 static int mlx4_ib_mcg_attach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
1161 struct mlx4_ib_dev
*mdev
= to_mdev(ibqp
->device
);
1162 struct mlx4_ib_qp
*mqp
= to_mqp(ibqp
);
1164 struct mlx4_ib_steering
*ib_steering
= NULL
;
1165 enum mlx4_protocol prot
= (gid
->raw
[1] == 0x0e) ?
1166 MLX4_PROT_IB_IPV4
: MLX4_PROT_IB_IPV6
;
1168 if (mdev
->dev
->caps
.steering_mode
==
1169 MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1170 ib_steering
= kmalloc(sizeof(*ib_steering
), GFP_KERNEL
);
1175 err
= mlx4_multicast_attach(mdev
->dev
, &mqp
->mqp
, gid
->raw
, mqp
->port
,
1177 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
),
1182 err
= add_gid_entry(ibqp
, gid
);
1187 memcpy(ib_steering
->gid
.raw
, gid
->raw
, 16);
1188 ib_steering
->reg_id
= reg_id
;
1189 mutex_lock(&mqp
->mutex
);
1190 list_add(&ib_steering
->list
, &mqp
->steering_rules
);
1191 mutex_unlock(&mqp
->mutex
);
1196 mlx4_multicast_detach(mdev
->dev
, &mqp
->mqp
, gid
->raw
,
1204 static struct mlx4_ib_gid_entry
*find_gid_entry(struct mlx4_ib_qp
*qp
, u8
*raw
)
1206 struct mlx4_ib_gid_entry
*ge
;
1207 struct mlx4_ib_gid_entry
*tmp
;
1208 struct mlx4_ib_gid_entry
*ret
= NULL
;
1210 list_for_each_entry_safe(ge
, tmp
, &qp
->gid_list
, list
) {
1211 if (!memcmp(raw
, ge
->gid
.raw
, 16)) {
1220 static int mlx4_ib_mcg_detach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
1223 struct mlx4_ib_dev
*mdev
= to_mdev(ibqp
->device
);
1224 struct mlx4_ib_qp
*mqp
= to_mqp(ibqp
);
1225 struct net_device
*ndev
;
1226 struct mlx4_ib_gid_entry
*ge
;
1228 enum mlx4_protocol prot
= (gid
->raw
[1] == 0x0e) ?
1229 MLX4_PROT_IB_IPV4
: MLX4_PROT_IB_IPV6
;
1231 if (mdev
->dev
->caps
.steering_mode
==
1232 MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1233 struct mlx4_ib_steering
*ib_steering
;
1235 mutex_lock(&mqp
->mutex
);
1236 list_for_each_entry(ib_steering
, &mqp
->steering_rules
, list
) {
1237 if (!memcmp(ib_steering
->gid
.raw
, gid
->raw
, 16)) {
1238 list_del(&ib_steering
->list
);
1242 mutex_unlock(&mqp
->mutex
);
1243 if (&ib_steering
->list
== &mqp
->steering_rules
) {
1244 pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
1247 reg_id
= ib_steering
->reg_id
;
1251 err
= mlx4_multicast_detach(mdev
->dev
, &mqp
->mqp
, gid
->raw
,
1256 mutex_lock(&mqp
->mutex
);
1257 ge
= find_gid_entry(mqp
, gid
->raw
);
1259 spin_lock(&mdev
->iboe
.lock
);
1260 ndev
= ge
->added
? mdev
->iboe
.netdevs
[ge
->port
- 1] : NULL
;
1263 spin_unlock(&mdev
->iboe
.lock
);
1266 list_del(&ge
->list
);
1269 pr_warn("could not find mgid entry\n");
1271 mutex_unlock(&mqp
->mutex
);
1276 static int init_node_data(struct mlx4_ib_dev
*dev
)
1278 struct ib_smp
*in_mad
= NULL
;
1279 struct ib_smp
*out_mad
= NULL
;
1280 int mad_ifc_flags
= MLX4_MAD_IFC_IGNORE_KEYS
;
1283 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
1284 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
1285 if (!in_mad
|| !out_mad
)
1288 init_query_mad(in_mad
);
1289 in_mad
->attr_id
= IB_SMP_ATTR_NODE_DESC
;
1290 if (mlx4_is_master(dev
->dev
))
1291 mad_ifc_flags
|= MLX4_MAD_IFC_NET_VIEW
;
1293 err
= mlx4_MAD_IFC(dev
, mad_ifc_flags
, 1, NULL
, NULL
, in_mad
, out_mad
);
1297 memcpy(dev
->ib_dev
.node_desc
, out_mad
->data
, 64);
1299 in_mad
->attr_id
= IB_SMP_ATTR_NODE_INFO
;
1301 err
= mlx4_MAD_IFC(dev
, mad_ifc_flags
, 1, NULL
, NULL
, in_mad
, out_mad
);
1305 dev
->dev
->rev_id
= be32_to_cpup((__be32
*) (out_mad
->data
+ 32));
1306 memcpy(&dev
->ib_dev
.node_guid
, out_mad
->data
+ 12, 8);
1314 static ssize_t
show_hca(struct device
*device
, struct device_attribute
*attr
,
1317 struct mlx4_ib_dev
*dev
=
1318 container_of(device
, struct mlx4_ib_dev
, ib_dev
.dev
);
1319 return sprintf(buf
, "MT%d\n", dev
->dev
->pdev
->device
);
1322 static ssize_t
show_fw_ver(struct device
*device
, struct device_attribute
*attr
,
1325 struct mlx4_ib_dev
*dev
=
1326 container_of(device
, struct mlx4_ib_dev
, ib_dev
.dev
);
1327 return sprintf(buf
, "%d.%d.%d\n", (int) (dev
->dev
->caps
.fw_ver
>> 32),
1328 (int) (dev
->dev
->caps
.fw_ver
>> 16) & 0xffff,
1329 (int) dev
->dev
->caps
.fw_ver
& 0xffff);
1332 static ssize_t
show_rev(struct device
*device
, struct device_attribute
*attr
,
1335 struct mlx4_ib_dev
*dev
=
1336 container_of(device
, struct mlx4_ib_dev
, ib_dev
.dev
);
1337 return sprintf(buf
, "%x\n", dev
->dev
->rev_id
);
1340 static ssize_t
show_board(struct device
*device
, struct device_attribute
*attr
,
1343 struct mlx4_ib_dev
*dev
=
1344 container_of(device
, struct mlx4_ib_dev
, ib_dev
.dev
);
1345 return sprintf(buf
, "%.*s\n", MLX4_BOARD_ID_LEN
,
1346 dev
->dev
->board_id
);
1349 static DEVICE_ATTR(hw_rev
, S_IRUGO
, show_rev
, NULL
);
1350 static DEVICE_ATTR(fw_ver
, S_IRUGO
, show_fw_ver
, NULL
);
1351 static DEVICE_ATTR(hca_type
, S_IRUGO
, show_hca
, NULL
);
1352 static DEVICE_ATTR(board_id
, S_IRUGO
, show_board
, NULL
);
1354 static struct device_attribute
*mlx4_class_attributes
[] = {
1361 static void mlx4_addrconf_ifid_eui48(u8
*eui
, u16 vlan_id
,
1362 struct net_device
*dev
)
1364 memcpy(eui
, dev
->dev_addr
, 3);
1365 memcpy(eui
+ 5, dev
->dev_addr
+ 3, 3);
1366 if (vlan_id
< 0x1000) {
1367 eui
[3] = vlan_id
>> 8;
1368 eui
[4] = vlan_id
& 0xff;
1376 static void update_gids_task(struct work_struct
*work
)
1378 struct update_gid_work
*gw
= container_of(work
, struct update_gid_work
, work
);
1379 struct mlx4_cmd_mailbox
*mailbox
;
1382 struct mlx4_dev
*dev
= gw
->dev
->dev
;
1384 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1385 if (IS_ERR(mailbox
)) {
1386 pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox
));
1390 gids
= mailbox
->buf
;
1391 memcpy(gids
, gw
->gids
, sizeof gw
->gids
);
1393 err
= mlx4_cmd(dev
, mailbox
->dma
, MLX4_SET_PORT_GID_TABLE
<< 8 | gw
->port
,
1394 1, MLX4_CMD_SET_PORT
, MLX4_CMD_TIME_CLASS_B
,
1397 pr_warn("set port command failed\n");
1399 mlx4_ib_dispatch_event(gw
->dev
, gw
->port
, IB_EVENT_GID_CHANGE
);
1401 mlx4_free_cmd_mailbox(dev
, mailbox
);
1405 static void reset_gids_task(struct work_struct
*work
)
1407 struct update_gid_work
*gw
=
1408 container_of(work
, struct update_gid_work
, work
);
1409 struct mlx4_cmd_mailbox
*mailbox
;
1412 struct mlx4_dev
*dev
= gw
->dev
->dev
;
1414 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1415 if (IS_ERR(mailbox
)) {
1416 pr_warn("reset gid table failed\n");
1420 gids
= mailbox
->buf
;
1421 memcpy(gids
, gw
->gids
, sizeof(gw
->gids
));
1423 if (mlx4_ib_port_link_layer(&gw
->dev
->ib_dev
, gw
->port
) ==
1424 IB_LINK_LAYER_ETHERNET
) {
1425 err
= mlx4_cmd(dev
, mailbox
->dma
,
1426 MLX4_SET_PORT_GID_TABLE
<< 8 | gw
->port
,
1427 1, MLX4_CMD_SET_PORT
,
1428 MLX4_CMD_TIME_CLASS_B
,
1431 pr_warn(KERN_WARNING
1432 "set port %d command failed\n", gw
->port
);
1435 mlx4_free_cmd_mailbox(dev
, mailbox
);
1440 static int update_gid_table(struct mlx4_ib_dev
*dev
, int port
,
1441 union ib_gid
*gid
, int clear
,
1444 struct update_gid_work
*work
;
1446 int need_update
= 0;
1454 max_gids
= dev
->dev
->caps
.gid_table_len
[port
];
1455 for (i
= 1; i
< max_gids
; ++i
) {
1456 if (!memcmp(&dev
->iboe
.gid_table
[port
- 1][i
], gid
,
1463 dev
->iboe
.gid_table
[port
- 1][found
] =
1472 !memcmp(&dev
->iboe
.gid_table
[port
- 1][i
],
1473 &zgid
, sizeof(*gid
)))
1479 if (found
== -1 && !clear
&& free
>= 0) {
1480 dev
->iboe
.gid_table
[port
- 1][free
] = *gid
;
1487 work
= kzalloc(sizeof(*work
), GFP_ATOMIC
);
1491 memcpy(work
->gids
, dev
->iboe
.gid_table
[port
- 1], sizeof(work
->gids
));
1492 INIT_WORK(&work
->work
, update_gids_task
);
1495 queue_work(wq
, &work
->work
);
1500 static void mlx4_make_default_gid(struct net_device
*dev
, union ib_gid
*gid
)
1502 gid
->global
.subnet_prefix
= cpu_to_be64(0xfe80000000000000LL
);
1503 mlx4_addrconf_ifid_eui48(&gid
->raw
[8], 0xffff, dev
);
1507 static int reset_gid_table(struct mlx4_ib_dev
*dev
, u8 port
)
1509 struct update_gid_work
*work
;
1511 work
= kzalloc(sizeof(*work
), GFP_ATOMIC
);
1515 memset(dev
->iboe
.gid_table
[port
- 1], 0, sizeof(work
->gids
));
1516 memset(work
->gids
, 0, sizeof(work
->gids
));
1517 INIT_WORK(&work
->work
, reset_gids_task
);
1520 queue_work(wq
, &work
->work
);
1524 static int mlx4_ib_addr_event(int event
, struct net_device
*event_netdev
,
1525 struct mlx4_ib_dev
*ibdev
, union ib_gid
*gid
)
1527 struct mlx4_ib_iboe
*iboe
;
1529 struct net_device
*real_dev
= rdma_vlan_dev_real_dev(event_netdev
) ?
1530 rdma_vlan_dev_real_dev(event_netdev
) :
1532 union ib_gid default_gid
;
1534 mlx4_make_default_gid(real_dev
, &default_gid
);
1536 if (!memcmp(gid
, &default_gid
, sizeof(*gid
)))
1539 if (event
!= NETDEV_DOWN
&& event
!= NETDEV_UP
)
1542 if ((real_dev
!= event_netdev
) &&
1543 (event
== NETDEV_DOWN
) &&
1544 rdma_link_local_addr((struct in6_addr
*)gid
))
1547 iboe
= &ibdev
->iboe
;
1548 spin_lock(&iboe
->lock
);
1550 for (port
= 1; port
<= ibdev
->dev
->caps
.num_ports
; ++port
)
1551 if ((netif_is_bond_master(real_dev
) &&
1552 (real_dev
== iboe
->masters
[port
- 1])) ||
1553 (!netif_is_bond_master(real_dev
) &&
1554 (real_dev
== iboe
->netdevs
[port
- 1])))
1555 update_gid_table(ibdev
, port
, gid
,
1556 event
== NETDEV_DOWN
, 0);
1558 spin_unlock(&iboe
->lock
);
1563 static u8
mlx4_ib_get_dev_port(struct net_device
*dev
,
1564 struct mlx4_ib_dev
*ibdev
)
1567 struct mlx4_ib_iboe
*iboe
;
1568 struct net_device
*real_dev
= rdma_vlan_dev_real_dev(dev
) ?
1569 rdma_vlan_dev_real_dev(dev
) : dev
;
1571 iboe
= &ibdev
->iboe
;
1573 for (port
= 1; port
<= ibdev
->dev
->caps
.num_ports
; ++port
)
1574 if ((netif_is_bond_master(real_dev
) &&
1575 (real_dev
== iboe
->masters
[port
- 1])) ||
1576 (!netif_is_bond_master(real_dev
) &&
1577 (real_dev
== iboe
->netdevs
[port
- 1])))
1580 if ((port
== 0) || (port
> ibdev
->dev
->caps
.num_ports
))
1586 static int mlx4_ib_inet_event(struct notifier_block
*this, unsigned long event
,
1589 struct mlx4_ib_dev
*ibdev
;
1590 struct in_ifaddr
*ifa
= ptr
;
1592 struct net_device
*event_netdev
= ifa
->ifa_dev
->dev
;
1594 ipv6_addr_set_v4mapped(ifa
->ifa_address
, (struct in6_addr
*)&gid
);
1596 ibdev
= container_of(this, struct mlx4_ib_dev
, iboe
.nb_inet
);
1598 mlx4_ib_addr_event(event
, event_netdev
, ibdev
, &gid
);
1602 #if IS_ENABLED(CONFIG_IPV6)
1603 static int mlx4_ib_inet6_event(struct notifier_block
*this, unsigned long event
,
1606 struct mlx4_ib_dev
*ibdev
;
1607 struct inet6_ifaddr
*ifa
= ptr
;
1608 union ib_gid
*gid
= (union ib_gid
*)&ifa
->addr
;
1609 struct net_device
*event_netdev
= ifa
->idev
->dev
;
1611 ibdev
= container_of(this, struct mlx4_ib_dev
, iboe
.nb_inet6
);
1613 mlx4_ib_addr_event(event
, event_netdev
, ibdev
, gid
);
1618 #define MLX4_IB_INVALID_MAC ((u64)-1)
1619 static void mlx4_ib_update_qps(struct mlx4_ib_dev
*ibdev
,
1620 struct net_device
*dev
,
1624 u64 release_mac
= MLX4_IB_INVALID_MAC
;
1625 struct mlx4_ib_qp
*qp
;
1627 read_lock(&dev_base_lock
);
1628 new_smac
= mlx4_mac_to_u64(dev
->dev_addr
);
1629 read_unlock(&dev_base_lock
);
1631 mutex_lock(&ibdev
->qp1_proxy_lock
[port
- 1]);
1632 qp
= ibdev
->qp1_proxy
[port
- 1];
1635 u64 old_smac
= qp
->pri
.smac
;
1636 struct mlx4_update_qp_params update_params
;
1638 if (new_smac
== old_smac
)
1641 new_smac_index
= mlx4_register_mac(ibdev
->dev
, port
, new_smac
);
1643 if (new_smac_index
< 0)
1646 update_params
.smac_index
= new_smac_index
;
1647 if (mlx4_update_qp(ibdev
->dev
, &qp
->mqp
, MLX4_UPDATE_QP_SMAC
,
1649 release_mac
= new_smac
;
1653 qp
->pri
.smac
= new_smac
;
1654 qp
->pri
.smac_index
= new_smac_index
;
1656 release_mac
= old_smac
;
1660 mutex_unlock(&ibdev
->qp1_proxy_lock
[port
- 1]);
1661 if (release_mac
!= MLX4_IB_INVALID_MAC
)
1662 mlx4_unregister_mac(ibdev
->dev
, port
, release_mac
);
1665 static void mlx4_ib_get_dev_addr(struct net_device
*dev
,
1666 struct mlx4_ib_dev
*ibdev
, u8 port
)
1668 struct in_device
*in_dev
;
1669 #if IS_ENABLED(CONFIG_IPV6)
1670 struct inet6_dev
*in6_dev
;
1672 struct inet6_ifaddr
*ifp
;
1677 if ((port
== 0) || (port
> ibdev
->dev
->caps
.num_ports
))
1681 in_dev
= in_dev_get(dev
);
1684 /*ifa->ifa_address;*/
1685 ipv6_addr_set_v4mapped(ifa
->ifa_address
,
1686 (struct in6_addr
*)&gid
);
1687 update_gid_table(ibdev
, port
, &gid
, 0, 0);
1692 #if IS_ENABLED(CONFIG_IPV6)
1694 in6_dev
= in6_dev_get(dev
);
1696 read_lock_bh(&in6_dev
->lock
);
1697 list_for_each_entry(ifp
, &in6_dev
->addr_list
, if_list
) {
1698 pgid
= (union ib_gid
*)&ifp
->addr
;
1699 update_gid_table(ibdev
, port
, pgid
, 0, 0);
1701 read_unlock_bh(&in6_dev
->lock
);
1702 in6_dev_put(in6_dev
);
1707 static void mlx4_ib_set_default_gid(struct mlx4_ib_dev
*ibdev
,
1708 struct net_device
*dev
, u8 port
)
1711 mlx4_make_default_gid(dev
, &gid
);
1712 update_gid_table(ibdev
, port
, &gid
, 0, 1);
1715 static int mlx4_ib_init_gid_table(struct mlx4_ib_dev
*ibdev
)
1717 struct net_device
*dev
;
1718 struct mlx4_ib_iboe
*iboe
= &ibdev
->iboe
;
1721 for (i
= 1; i
<= ibdev
->num_ports
; ++i
)
1722 if (reset_gid_table(ibdev
, i
))
1725 read_lock(&dev_base_lock
);
1726 spin_lock(&iboe
->lock
);
1728 for_each_netdev(&init_net
, dev
) {
1729 u8 port
= mlx4_ib_get_dev_port(dev
, ibdev
);
1731 mlx4_ib_get_dev_addr(dev
, ibdev
, port
);
1734 spin_unlock(&iboe
->lock
);
1735 read_unlock(&dev_base_lock
);
1740 static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev
*ibdev
,
1741 struct net_device
*dev
,
1742 unsigned long event
)
1745 struct mlx4_ib_iboe
*iboe
;
1746 int update_qps_port
= -1;
1749 iboe
= &ibdev
->iboe
;
1751 spin_lock(&iboe
->lock
);
1752 mlx4_foreach_ib_transport_port(port
, ibdev
->dev
) {
1753 enum ib_port_state port_state
= IB_PORT_NOP
;
1754 struct net_device
*old_master
= iboe
->masters
[port
- 1];
1755 struct net_device
*curr_netdev
;
1756 struct net_device
*curr_master
;
1758 iboe
->netdevs
[port
- 1] =
1759 mlx4_get_protocol_dev(ibdev
->dev
, MLX4_PROT_ETH
, port
);
1760 if (iboe
->netdevs
[port
- 1])
1761 mlx4_ib_set_default_gid(ibdev
,
1762 iboe
->netdevs
[port
- 1], port
);
1763 curr_netdev
= iboe
->netdevs
[port
- 1];
1765 if (iboe
->netdevs
[port
- 1] &&
1766 netif_is_bond_slave(iboe
->netdevs
[port
- 1])) {
1767 iboe
->masters
[port
- 1] = netdev_master_upper_dev_get(
1768 iboe
->netdevs
[port
- 1]);
1770 iboe
->masters
[port
- 1] = NULL
;
1772 curr_master
= iboe
->masters
[port
- 1];
1774 if (dev
== iboe
->netdevs
[port
- 1] &&
1775 (event
== NETDEV_CHANGEADDR
|| event
== NETDEV_REGISTER
||
1776 event
== NETDEV_UP
|| event
== NETDEV_CHANGE
))
1777 update_qps_port
= port
;
1780 port_state
= (netif_running(curr_netdev
) && netif_carrier_ok(curr_netdev
)) ?
1781 IB_PORT_ACTIVE
: IB_PORT_DOWN
;
1782 mlx4_ib_set_default_gid(ibdev
, curr_netdev
, port
);
1784 reset_gid_table(ibdev
, port
);
1786 /* if using bonding/team and a slave port is down, we don't the bond IP
1787 * based gids in the table since flows that select port by gid may get
1790 if (curr_master
&& (port_state
== IB_PORT_DOWN
)) {
1791 reset_gid_table(ibdev
, port
);
1792 mlx4_ib_set_default_gid(ibdev
, curr_netdev
, port
);
1794 /* if bonding is used it is possible that we add it to masters
1795 * only after IP address is assigned to the net bonding
1798 if (curr_master
&& (old_master
!= curr_master
)) {
1799 reset_gid_table(ibdev
, port
);
1800 mlx4_ib_set_default_gid(ibdev
, curr_netdev
, port
);
1801 mlx4_ib_get_dev_addr(curr_master
, ibdev
, port
);
1804 if (!curr_master
&& (old_master
!= curr_master
)) {
1805 reset_gid_table(ibdev
, port
);
1806 mlx4_ib_set_default_gid(ibdev
, curr_netdev
, port
);
1807 mlx4_ib_get_dev_addr(curr_netdev
, ibdev
, port
);
1811 spin_unlock(&iboe
->lock
);
1813 if (update_qps_port
> 0)
1814 mlx4_ib_update_qps(ibdev
, dev
, update_qps_port
);
1817 static int mlx4_ib_netdev_event(struct notifier_block
*this,
1818 unsigned long event
, void *ptr
)
1820 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1821 struct mlx4_ib_dev
*ibdev
;
1823 if (!net_eq(dev_net(dev
), &init_net
))
1826 ibdev
= container_of(this, struct mlx4_ib_dev
, iboe
.nb
);
1827 mlx4_ib_scan_netdevs(ibdev
, dev
, event
);
1832 static void init_pkeys(struct mlx4_ib_dev
*ibdev
)
1838 if (mlx4_is_master(ibdev
->dev
)) {
1839 for (slave
= 0; slave
<= ibdev
->dev
->num_vfs
; ++slave
) {
1840 for (port
= 1; port
<= ibdev
->dev
->caps
.num_ports
; ++port
) {
1842 i
< ibdev
->dev
->phys_caps
.pkey_phys_table_len
[port
];
1844 ibdev
->pkeys
.virt2phys_pkey
[slave
][port
- 1][i
] =
1845 /* master has the identity virt2phys pkey mapping */
1846 (slave
== mlx4_master_func_num(ibdev
->dev
) || !i
) ? i
:
1847 ibdev
->dev
->phys_caps
.pkey_phys_table_len
[port
] - 1;
1848 mlx4_sync_pkey_table(ibdev
->dev
, slave
, port
, i
,
1849 ibdev
->pkeys
.virt2phys_pkey
[slave
][port
- 1][i
]);
1853 /* initialize pkey cache */
1854 for (port
= 1; port
<= ibdev
->dev
->caps
.num_ports
; ++port
) {
1856 i
< ibdev
->dev
->phys_caps
.pkey_phys_table_len
[port
];
1858 ibdev
->pkeys
.phys_pkey_cache
[port
-1][i
] =
1864 static void mlx4_ib_alloc_eqs(struct mlx4_dev
*dev
, struct mlx4_ib_dev
*ibdev
)
1867 int eq_per_port
= 0;
1872 /* Legacy mode or comp_pool is not large enough */
1873 if (dev
->caps
.comp_pool
== 0 ||
1874 dev
->caps
.num_ports
> dev
->caps
.comp_pool
)
1877 eq_per_port
= rounddown_pow_of_two(dev
->caps
.comp_pool
/
1878 dev
->caps
.num_ports
);
1882 mlx4_foreach_port(i
, dev
, MLX4_PORT_TYPE_IB
)
1883 added_eqs
+= eq_per_port
;
1885 total_eqs
= dev
->caps
.num_comp_vectors
+ added_eqs
;
1887 ibdev
->eq_table
= kzalloc(total_eqs
* sizeof(int), GFP_KERNEL
);
1888 if (!ibdev
->eq_table
)
1891 ibdev
->eq_added
= added_eqs
;
1894 mlx4_foreach_port(i
, dev
, MLX4_PORT_TYPE_IB
) {
1895 for (j
= 0; j
< eq_per_port
; j
++) {
1896 snprintf(name
, sizeof(name
), "mlx4-ib-%d-%d@%s",
1897 i
, j
, dev
->pdev
->bus
->name
);
1898 /* Set IRQ for specific name (per ring) */
1899 if (mlx4_assign_eq(dev
, name
, NULL
,
1900 &ibdev
->eq_table
[eq
])) {
1901 /* Use legacy (same as mlx4_en driver) */
1902 pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq
);
1903 ibdev
->eq_table
[eq
] =
1904 (eq
% dev
->caps
.num_comp_vectors
);
1910 /* Fill the reset of the vector with legacy EQ */
1911 for (i
= 0, eq
= added_eqs
; i
< dev
->caps
.num_comp_vectors
; i
++)
1912 ibdev
->eq_table
[eq
++] = i
;
1914 /* Advertise the new number of EQs to clients */
1915 ibdev
->ib_dev
.num_comp_vectors
= total_eqs
;
1918 static void mlx4_ib_free_eqs(struct mlx4_dev
*dev
, struct mlx4_ib_dev
*ibdev
)
1922 /* no additional eqs were added */
1923 if (!ibdev
->eq_table
)
1926 /* Reset the advertised EQ number */
1927 ibdev
->ib_dev
.num_comp_vectors
= dev
->caps
.num_comp_vectors
;
1929 /* Free only the added eqs */
1930 for (i
= 0; i
< ibdev
->eq_added
; i
++) {
1931 /* Don't free legacy eqs if used */
1932 if (ibdev
->eq_table
[i
] <= dev
->caps
.num_comp_vectors
)
1934 mlx4_release_eq(dev
, ibdev
->eq_table
[i
]);
1937 kfree(ibdev
->eq_table
);
1940 static void *mlx4_ib_add(struct mlx4_dev
*dev
)
1942 struct mlx4_ib_dev
*ibdev
;
1946 struct mlx4_ib_iboe
*iboe
;
1947 int ib_num_ports
= 0;
1949 pr_info_once("%s", mlx4_ib_version
);
1952 mlx4_foreach_ib_transport_port(i
, dev
)
1955 /* No point in registering a device with no ports... */
1959 ibdev
= (struct mlx4_ib_dev
*) ib_alloc_device(sizeof *ibdev
);
1961 dev_err(&dev
->pdev
->dev
, "Device struct alloc failed\n");
1965 iboe
= &ibdev
->iboe
;
1967 if (mlx4_pd_alloc(dev
, &ibdev
->priv_pdn
))
1970 if (mlx4_uar_alloc(dev
, &ibdev
->priv_uar
))
1973 ibdev
->uar_map
= ioremap((phys_addr_t
) ibdev
->priv_uar
.pfn
<< PAGE_SHIFT
,
1975 if (!ibdev
->uar_map
)
1977 MLX4_INIT_DOORBELL_LOCK(&ibdev
->uar_lock
);
1981 strlcpy(ibdev
->ib_dev
.name
, "mlx4_%d", IB_DEVICE_NAME_MAX
);
1982 ibdev
->ib_dev
.owner
= THIS_MODULE
;
1983 ibdev
->ib_dev
.node_type
= RDMA_NODE_IB_CA
;
1984 ibdev
->ib_dev
.local_dma_lkey
= dev
->caps
.reserved_lkey
;
1985 ibdev
->num_ports
= num_ports
;
1986 ibdev
->ib_dev
.phys_port_cnt
= ibdev
->num_ports
;
1987 ibdev
->ib_dev
.num_comp_vectors
= dev
->caps
.num_comp_vectors
;
1988 ibdev
->ib_dev
.dma_device
= &dev
->pdev
->dev
;
1990 if (dev
->caps
.userspace_caps
)
1991 ibdev
->ib_dev
.uverbs_abi_ver
= MLX4_IB_UVERBS_ABI_VERSION
;
1993 ibdev
->ib_dev
.uverbs_abi_ver
= MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION
;
1995 ibdev
->ib_dev
.uverbs_cmd_mask
=
1996 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT
) |
1997 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
1998 (1ull << IB_USER_VERBS_CMD_QUERY_PORT
) |
1999 (1ull << IB_USER_VERBS_CMD_ALLOC_PD
) |
2000 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD
) |
2001 (1ull << IB_USER_VERBS_CMD_REG_MR
) |
2002 (1ull << IB_USER_VERBS_CMD_DEREG_MR
) |
2003 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
2004 (1ull << IB_USER_VERBS_CMD_CREATE_CQ
) |
2005 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ
) |
2006 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ
) |
2007 (1ull << IB_USER_VERBS_CMD_CREATE_QP
) |
2008 (1ull << IB_USER_VERBS_CMD_MODIFY_QP
) |
2009 (1ull << IB_USER_VERBS_CMD_QUERY_QP
) |
2010 (1ull << IB_USER_VERBS_CMD_DESTROY_QP
) |
2011 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST
) |
2012 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST
) |
2013 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ
) |
2014 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ
) |
2015 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ
) |
2016 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ
) |
2017 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ
) |
2018 (1ull << IB_USER_VERBS_CMD_OPEN_QP
);
2020 ibdev
->ib_dev
.query_device
= mlx4_ib_query_device
;
2021 ibdev
->ib_dev
.query_port
= mlx4_ib_query_port
;
2022 ibdev
->ib_dev
.get_link_layer
= mlx4_ib_port_link_layer
;
2023 ibdev
->ib_dev
.query_gid
= mlx4_ib_query_gid
;
2024 ibdev
->ib_dev
.query_pkey
= mlx4_ib_query_pkey
;
2025 ibdev
->ib_dev
.modify_device
= mlx4_ib_modify_device
;
2026 ibdev
->ib_dev
.modify_port
= mlx4_ib_modify_port
;
2027 ibdev
->ib_dev
.alloc_ucontext
= mlx4_ib_alloc_ucontext
;
2028 ibdev
->ib_dev
.dealloc_ucontext
= mlx4_ib_dealloc_ucontext
;
2029 ibdev
->ib_dev
.mmap
= mlx4_ib_mmap
;
2030 ibdev
->ib_dev
.alloc_pd
= mlx4_ib_alloc_pd
;
2031 ibdev
->ib_dev
.dealloc_pd
= mlx4_ib_dealloc_pd
;
2032 ibdev
->ib_dev
.create_ah
= mlx4_ib_create_ah
;
2033 ibdev
->ib_dev
.query_ah
= mlx4_ib_query_ah
;
2034 ibdev
->ib_dev
.destroy_ah
= mlx4_ib_destroy_ah
;
2035 ibdev
->ib_dev
.create_srq
= mlx4_ib_create_srq
;
2036 ibdev
->ib_dev
.modify_srq
= mlx4_ib_modify_srq
;
2037 ibdev
->ib_dev
.query_srq
= mlx4_ib_query_srq
;
2038 ibdev
->ib_dev
.destroy_srq
= mlx4_ib_destroy_srq
;
2039 ibdev
->ib_dev
.post_srq_recv
= mlx4_ib_post_srq_recv
;
2040 ibdev
->ib_dev
.create_qp
= mlx4_ib_create_qp
;
2041 ibdev
->ib_dev
.modify_qp
= mlx4_ib_modify_qp
;
2042 ibdev
->ib_dev
.query_qp
= mlx4_ib_query_qp
;
2043 ibdev
->ib_dev
.destroy_qp
= mlx4_ib_destroy_qp
;
2044 ibdev
->ib_dev
.post_send
= mlx4_ib_post_send
;
2045 ibdev
->ib_dev
.post_recv
= mlx4_ib_post_recv
;
2046 ibdev
->ib_dev
.create_cq
= mlx4_ib_create_cq
;
2047 ibdev
->ib_dev
.modify_cq
= mlx4_ib_modify_cq
;
2048 ibdev
->ib_dev
.resize_cq
= mlx4_ib_resize_cq
;
2049 ibdev
->ib_dev
.destroy_cq
= mlx4_ib_destroy_cq
;
2050 ibdev
->ib_dev
.poll_cq
= mlx4_ib_poll_cq
;
2051 ibdev
->ib_dev
.req_notify_cq
= mlx4_ib_arm_cq
;
2052 ibdev
->ib_dev
.get_dma_mr
= mlx4_ib_get_dma_mr
;
2053 ibdev
->ib_dev
.reg_user_mr
= mlx4_ib_reg_user_mr
;
2054 ibdev
->ib_dev
.dereg_mr
= mlx4_ib_dereg_mr
;
2055 ibdev
->ib_dev
.alloc_fast_reg_mr
= mlx4_ib_alloc_fast_reg_mr
;
2056 ibdev
->ib_dev
.alloc_fast_reg_page_list
= mlx4_ib_alloc_fast_reg_page_list
;
2057 ibdev
->ib_dev
.free_fast_reg_page_list
= mlx4_ib_free_fast_reg_page_list
;
2058 ibdev
->ib_dev
.attach_mcast
= mlx4_ib_mcg_attach
;
2059 ibdev
->ib_dev
.detach_mcast
= mlx4_ib_mcg_detach
;
2060 ibdev
->ib_dev
.process_mad
= mlx4_ib_process_mad
;
2062 if (!mlx4_is_slave(ibdev
->dev
)) {
2063 ibdev
->ib_dev
.alloc_fmr
= mlx4_ib_fmr_alloc
;
2064 ibdev
->ib_dev
.map_phys_fmr
= mlx4_ib_map_phys_fmr
;
2065 ibdev
->ib_dev
.unmap_fmr
= mlx4_ib_unmap_fmr
;
2066 ibdev
->ib_dev
.dealloc_fmr
= mlx4_ib_fmr_dealloc
;
2069 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_MEM_WINDOW
||
2070 dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_TYPE_2_WIN
) {
2071 ibdev
->ib_dev
.alloc_mw
= mlx4_ib_alloc_mw
;
2072 ibdev
->ib_dev
.bind_mw
= mlx4_ib_bind_mw
;
2073 ibdev
->ib_dev
.dealloc_mw
= mlx4_ib_dealloc_mw
;
2075 ibdev
->ib_dev
.uverbs_cmd_mask
|=
2076 (1ull << IB_USER_VERBS_CMD_ALLOC_MW
) |
2077 (1ull << IB_USER_VERBS_CMD_DEALLOC_MW
);
2080 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
) {
2081 ibdev
->ib_dev
.alloc_xrcd
= mlx4_ib_alloc_xrcd
;
2082 ibdev
->ib_dev
.dealloc_xrcd
= mlx4_ib_dealloc_xrcd
;
2083 ibdev
->ib_dev
.uverbs_cmd_mask
|=
2084 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD
) |
2085 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD
);
2088 if (check_flow_steering_support(dev
)) {
2089 ibdev
->steering_support
= MLX4_STEERING_MODE_DEVICE_MANAGED
;
2090 ibdev
->ib_dev
.create_flow
= mlx4_ib_create_flow
;
2091 ibdev
->ib_dev
.destroy_flow
= mlx4_ib_destroy_flow
;
2093 ibdev
->ib_dev
.uverbs_ex_cmd_mask
|=
2094 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW
) |
2095 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW
);
2098 mlx4_ib_alloc_eqs(dev
, ibdev
);
2100 spin_lock_init(&iboe
->lock
);
2102 if (init_node_data(ibdev
))
2105 for (i
= 0; i
< ibdev
->num_ports
; ++i
) {
2106 mutex_init(&ibdev
->qp1_proxy_lock
[i
]);
2107 if (mlx4_ib_port_link_layer(&ibdev
->ib_dev
, i
+ 1) ==
2108 IB_LINK_LAYER_ETHERNET
) {
2109 err
= mlx4_counter_alloc(ibdev
->dev
, &ibdev
->counters
[i
]);
2111 ibdev
->counters
[i
] = -1;
2113 ibdev
->counters
[i
] = -1;
2117 mlx4_foreach_port(i
, dev
, MLX4_PORT_TYPE_IB
)
2120 spin_lock_init(&ibdev
->sm_lock
);
2121 mutex_init(&ibdev
->cap_mask_mutex
);
2123 if (ibdev
->steering_support
== MLX4_STEERING_MODE_DEVICE_MANAGED
&&
2125 ibdev
->steer_qpn_count
= MLX4_IB_UC_MAX_NUM_QPS
;
2126 err
= mlx4_qp_reserve_range(dev
, ibdev
->steer_qpn_count
,
2127 MLX4_IB_UC_STEER_QPN_ALIGN
,
2128 &ibdev
->steer_qpn_base
);
2132 ibdev
->ib_uc_qpns_bitmap
=
2133 kmalloc(BITS_TO_LONGS(ibdev
->steer_qpn_count
) *
2136 if (!ibdev
->ib_uc_qpns_bitmap
) {
2137 dev_err(&dev
->pdev
->dev
, "bit map alloc failed\n");
2138 goto err_steer_qp_release
;
2141 bitmap_zero(ibdev
->ib_uc_qpns_bitmap
, ibdev
->steer_qpn_count
);
2143 err
= mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
2144 dev
, ibdev
->steer_qpn_base
,
2145 ibdev
->steer_qpn_base
+
2146 ibdev
->steer_qpn_count
- 1);
2148 goto err_steer_free_bitmap
;
2151 if (ib_register_device(&ibdev
->ib_dev
, NULL
))
2152 goto err_steer_free_bitmap
;
2154 if (mlx4_ib_mad_init(ibdev
))
2157 if (mlx4_ib_init_sriov(ibdev
))
2160 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_IBOE
) {
2161 if (!iboe
->nb
.notifier_call
) {
2162 iboe
->nb
.notifier_call
= mlx4_ib_netdev_event
;
2163 err
= register_netdevice_notifier(&iboe
->nb
);
2165 iboe
->nb
.notifier_call
= NULL
;
2169 if (!iboe
->nb_inet
.notifier_call
) {
2170 iboe
->nb_inet
.notifier_call
= mlx4_ib_inet_event
;
2171 err
= register_inetaddr_notifier(&iboe
->nb_inet
);
2173 iboe
->nb_inet
.notifier_call
= NULL
;
2177 #if IS_ENABLED(CONFIG_IPV6)
2178 if (!iboe
->nb_inet6
.notifier_call
) {
2179 iboe
->nb_inet6
.notifier_call
= mlx4_ib_inet6_event
;
2180 err
= register_inet6addr_notifier(&iboe
->nb_inet6
);
2182 iboe
->nb_inet6
.notifier_call
= NULL
;
2187 for (i
= 1 ; i
<= ibdev
->num_ports
; ++i
)
2188 reset_gid_table(ibdev
, i
);
2190 mlx4_ib_scan_netdevs(ibdev
, NULL
, 0);
2192 mlx4_ib_init_gid_table(ibdev
);
2195 for (j
= 0; j
< ARRAY_SIZE(mlx4_class_attributes
); ++j
) {
2196 if (device_create_file(&ibdev
->ib_dev
.dev
,
2197 mlx4_class_attributes
[j
]))
2201 ibdev
->ib_active
= true;
2203 if (mlx4_is_mfunc(ibdev
->dev
))
2206 /* create paravirt contexts for any VFs which are active */
2207 if (mlx4_is_master(ibdev
->dev
)) {
2208 for (j
= 0; j
< MLX4_MFUNC_MAX
; j
++) {
2209 if (j
== mlx4_master_func_num(ibdev
->dev
))
2211 if (mlx4_is_slave_active(ibdev
->dev
, j
))
2212 do_slave_init(ibdev
, j
, 1);
2218 if (ibdev
->iboe
.nb
.notifier_call
) {
2219 if (unregister_netdevice_notifier(&ibdev
->iboe
.nb
))
2220 pr_warn("failure unregistering notifier\n");
2221 ibdev
->iboe
.nb
.notifier_call
= NULL
;
2223 if (ibdev
->iboe
.nb_inet
.notifier_call
) {
2224 if (unregister_inetaddr_notifier(&ibdev
->iboe
.nb_inet
))
2225 pr_warn("failure unregistering notifier\n");
2226 ibdev
->iboe
.nb_inet
.notifier_call
= NULL
;
2228 #if IS_ENABLED(CONFIG_IPV6)
2229 if (ibdev
->iboe
.nb_inet6
.notifier_call
) {
2230 if (unregister_inet6addr_notifier(&ibdev
->iboe
.nb_inet6
))
2231 pr_warn("failure unregistering notifier\n");
2232 ibdev
->iboe
.nb_inet6
.notifier_call
= NULL
;
2235 flush_workqueue(wq
);
2237 mlx4_ib_close_sriov(ibdev
);
2240 mlx4_ib_mad_cleanup(ibdev
);
2243 ib_unregister_device(&ibdev
->ib_dev
);
2245 err_steer_free_bitmap
:
2246 kfree(ibdev
->ib_uc_qpns_bitmap
);
2248 err_steer_qp_release
:
2249 if (ibdev
->steering_support
== MLX4_STEERING_MODE_DEVICE_MANAGED
)
2250 mlx4_qp_release_range(dev
, ibdev
->steer_qpn_base
,
2251 ibdev
->steer_qpn_count
);
2254 if (ibdev
->counters
[i
- 1] != -1)
2255 mlx4_counter_free(ibdev
->dev
, ibdev
->counters
[i
- 1]);
2258 iounmap(ibdev
->uar_map
);
2261 mlx4_uar_free(dev
, &ibdev
->priv_uar
);
2264 mlx4_pd_free(dev
, ibdev
->priv_pdn
);
2267 ib_dealloc_device(&ibdev
->ib_dev
);
2272 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev
*dev
, int count
, int *qpn
)
2276 WARN_ON(!dev
->ib_uc_qpns_bitmap
);
2278 offset
= bitmap_find_free_region(dev
->ib_uc_qpns_bitmap
,
2279 dev
->steer_qpn_count
,
2280 get_count_order(count
));
2284 *qpn
= dev
->steer_qpn_base
+ offset
;
2288 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev
*dev
, u32 qpn
, int count
)
2291 dev
->steering_support
!= MLX4_STEERING_MODE_DEVICE_MANAGED
)
2294 BUG_ON(qpn
< dev
->steer_qpn_base
);
2296 bitmap_release_region(dev
->ib_uc_qpns_bitmap
,
2297 qpn
- dev
->steer_qpn_base
,
2298 get_count_order(count
));
2301 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev
*mdev
, struct mlx4_ib_qp
*mqp
,
2306 struct ib_flow_attr
*flow
= NULL
;
2307 struct ib_flow_spec_ib
*ib_spec
;
2310 flow_size
= sizeof(struct ib_flow_attr
) +
2311 sizeof(struct ib_flow_spec_ib
);
2312 flow
= kzalloc(flow_size
, GFP_KERNEL
);
2315 flow
->port
= mqp
->port
;
2316 flow
->num_of_specs
= 1;
2317 flow
->size
= flow_size
;
2318 ib_spec
= (struct ib_flow_spec_ib
*)(flow
+ 1);
2319 ib_spec
->type
= IB_FLOW_SPEC_IB
;
2320 ib_spec
->size
= sizeof(struct ib_flow_spec_ib
);
2321 /* Add an empty rule for IB L2 */
2322 memset(&ib_spec
->mask
, 0, sizeof(ib_spec
->mask
));
2324 err
= __mlx4_ib_create_flow(&mqp
->ibqp
, flow
,
2329 err
= __mlx4_ib_destroy_flow(mdev
->dev
, mqp
->reg_id
);
2335 static void mlx4_ib_remove(struct mlx4_dev
*dev
, void *ibdev_ptr
)
2337 struct mlx4_ib_dev
*ibdev
= ibdev_ptr
;
2340 mlx4_ib_close_sriov(ibdev
);
2341 mlx4_ib_mad_cleanup(ibdev
);
2342 ib_unregister_device(&ibdev
->ib_dev
);
2343 if (ibdev
->iboe
.nb
.notifier_call
) {
2344 if (unregister_netdevice_notifier(&ibdev
->iboe
.nb
))
2345 pr_warn("failure unregistering notifier\n");
2346 ibdev
->iboe
.nb
.notifier_call
= NULL
;
2349 if (ibdev
->steering_support
== MLX4_STEERING_MODE_DEVICE_MANAGED
) {
2350 mlx4_qp_release_range(dev
, ibdev
->steer_qpn_base
,
2351 ibdev
->steer_qpn_count
);
2352 kfree(ibdev
->ib_uc_qpns_bitmap
);
2355 if (ibdev
->iboe
.nb_inet
.notifier_call
) {
2356 if (unregister_inetaddr_notifier(&ibdev
->iboe
.nb_inet
))
2357 pr_warn("failure unregistering notifier\n");
2358 ibdev
->iboe
.nb_inet
.notifier_call
= NULL
;
2360 #if IS_ENABLED(CONFIG_IPV6)
2361 if (ibdev
->iboe
.nb_inet6
.notifier_call
) {
2362 if (unregister_inet6addr_notifier(&ibdev
->iboe
.nb_inet6
))
2363 pr_warn("failure unregistering notifier\n");
2364 ibdev
->iboe
.nb_inet6
.notifier_call
= NULL
;
2368 iounmap(ibdev
->uar_map
);
2369 for (p
= 0; p
< ibdev
->num_ports
; ++p
)
2370 if (ibdev
->counters
[p
] != -1)
2371 mlx4_counter_free(ibdev
->dev
, ibdev
->counters
[p
]);
2372 mlx4_foreach_port(p
, dev
, MLX4_PORT_TYPE_IB
)
2373 mlx4_CLOSE_PORT(dev
, p
);
2375 mlx4_ib_free_eqs(dev
, ibdev
);
2377 mlx4_uar_free(dev
, &ibdev
->priv_uar
);
2378 mlx4_pd_free(dev
, ibdev
->priv_pdn
);
2379 ib_dealloc_device(&ibdev
->ib_dev
);
2382 static void do_slave_init(struct mlx4_ib_dev
*ibdev
, int slave
, int do_init
)
2384 struct mlx4_ib_demux_work
**dm
= NULL
;
2385 struct mlx4_dev
*dev
= ibdev
->dev
;
2387 unsigned long flags
;
2388 struct mlx4_active_ports actv_ports
;
2390 unsigned int first_port
;
2392 if (!mlx4_is_master(dev
))
2395 actv_ports
= mlx4_get_active_ports(dev
, slave
);
2396 ports
= bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
);
2397 first_port
= find_first_bit(actv_ports
.ports
, dev
->caps
.num_ports
);
2399 dm
= kcalloc(ports
, sizeof(*dm
), GFP_ATOMIC
);
2401 pr_err("failed to allocate memory for tunneling qp update\n");
2405 for (i
= 0; i
< ports
; i
++) {
2406 dm
[i
] = kmalloc(sizeof (struct mlx4_ib_demux_work
), GFP_ATOMIC
);
2408 pr_err("failed to allocate memory for tunneling qp update work struct\n");
2409 for (i
= 0; i
< dev
->caps
.num_ports
; i
++) {
2416 /* initialize or tear down tunnel QPs for the slave */
2417 for (i
= 0; i
< ports
; i
++) {
2418 INIT_WORK(&dm
[i
]->work
, mlx4_ib_tunnels_update_work
);
2419 dm
[i
]->port
= first_port
+ i
+ 1;
2420 dm
[i
]->slave
= slave
;
2421 dm
[i
]->do_init
= do_init
;
2423 spin_lock_irqsave(&ibdev
->sriov
.going_down_lock
, flags
);
2424 if (!ibdev
->sriov
.is_going_down
)
2425 queue_work(ibdev
->sriov
.demux
[i
].ud_wq
, &dm
[i
]->work
);
2426 spin_unlock_irqrestore(&ibdev
->sriov
.going_down_lock
, flags
);
2433 static void mlx4_ib_event(struct mlx4_dev
*dev
, void *ibdev_ptr
,
2434 enum mlx4_dev_event event
, unsigned long param
)
2436 struct ib_event ibev
;
2437 struct mlx4_ib_dev
*ibdev
= to_mdev((struct ib_device
*) ibdev_ptr
);
2438 struct mlx4_eqe
*eqe
= NULL
;
2439 struct ib_event_work
*ew
;
2442 if (event
== MLX4_DEV_EVENT_PORT_MGMT_CHANGE
)
2443 eqe
= (struct mlx4_eqe
*)param
;
2448 case MLX4_DEV_EVENT_PORT_UP
:
2449 if (p
> ibdev
->num_ports
)
2451 if (mlx4_is_master(dev
) &&
2452 rdma_port_get_link_layer(&ibdev
->ib_dev
, p
) ==
2453 IB_LINK_LAYER_INFINIBAND
) {
2454 mlx4_ib_invalidate_all_guid_record(ibdev
, p
);
2456 ibev
.event
= IB_EVENT_PORT_ACTIVE
;
2459 case MLX4_DEV_EVENT_PORT_DOWN
:
2460 if (p
> ibdev
->num_ports
)
2462 ibev
.event
= IB_EVENT_PORT_ERR
;
2465 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR
:
2466 ibdev
->ib_active
= false;
2467 ibev
.event
= IB_EVENT_DEVICE_FATAL
;
2470 case MLX4_DEV_EVENT_PORT_MGMT_CHANGE
:
2471 ew
= kmalloc(sizeof *ew
, GFP_ATOMIC
);
2473 pr_err("failed to allocate memory for events work\n");
2477 INIT_WORK(&ew
->work
, handle_port_mgmt_change_event
);
2478 memcpy(&ew
->ib_eqe
, eqe
, sizeof *eqe
);
2480 /* need to queue only for port owner, which uses GEN_EQE */
2481 if (mlx4_is_master(dev
))
2482 queue_work(wq
, &ew
->work
);
2484 handle_port_mgmt_change_event(&ew
->work
);
2487 case MLX4_DEV_EVENT_SLAVE_INIT
:
2488 /* here, p is the slave id */
2489 do_slave_init(ibdev
, p
, 1);
2492 case MLX4_DEV_EVENT_SLAVE_SHUTDOWN
:
2493 /* here, p is the slave id */
2494 do_slave_init(ibdev
, p
, 0);
2501 ibev
.device
= ibdev_ptr
;
2502 ibev
.element
.port_num
= (u8
) p
;
2504 ib_dispatch_event(&ibev
);
2507 static struct mlx4_interface mlx4_ib_interface
= {
2509 .remove
= mlx4_ib_remove
,
2510 .event
= mlx4_ib_event
,
2511 .protocol
= MLX4_PROT_IB_IPV6
2514 static int __init
mlx4_ib_init(void)
2518 wq
= create_singlethread_workqueue("mlx4_ib");
2522 err
= mlx4_ib_mcg_init();
2526 err
= mlx4_register_interface(&mlx4_ib_interface
);
2533 mlx4_ib_mcg_destroy();
2536 destroy_workqueue(wq
);
2540 static void __exit
mlx4_ib_cleanup(void)
2542 mlx4_unregister_interface(&mlx4_ib_interface
);
2543 mlx4_ib_mcg_destroy();
2544 destroy_workqueue(wq
);
2547 module_init(mlx4_ib_init
);
2548 module_exit(mlx4_ib_cleanup
);