2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/netdevice.h>
39 #include <linux/inetdevice.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/if_vlan.h>
43 #include <net/addrconf.h>
45 #include <rdma/ib_smi.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/ib_addr.h>
49 #include <linux/mlx4/driver.h>
50 #include <linux/mlx4/cmd.h>
51 #include <linux/mlx4/qp.h>
56 #define DRV_NAME MLX4_IB_DRV_NAME
57 #define DRV_VERSION "2.2-1"
58 #define DRV_RELDATE "Feb 2014"
60 #define MLX4_IB_FLOW_MAX_PRIO 0xFFF
61 #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
62 #define MLX4_IB_CARD_REV_A0 0xA0
64 MODULE_AUTHOR("Roland Dreier");
65 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
66 MODULE_LICENSE("Dual BSD/GPL");
67 MODULE_VERSION(DRV_VERSION
);
69 int mlx4_ib_sm_guid_assign
= 0;
70 module_param_named(sm_guid_assign
, mlx4_ib_sm_guid_assign
, int, 0444);
71 MODULE_PARM_DESC(sm_guid_assign
, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)");
73 static const char mlx4_ib_version
[] =
74 DRV_NAME
": Mellanox ConnectX InfiniBand driver v"
75 DRV_VERSION
" (" DRV_RELDATE
")\n";
77 struct update_gid_work
{
78 struct work_struct work
;
79 union ib_gid gids
[128];
80 struct mlx4_ib_dev
*dev
;
84 static void do_slave_init(struct mlx4_ib_dev
*ibdev
, int slave
, int do_init
);
86 static struct workqueue_struct
*wq
;
88 static void init_query_mad(struct ib_smp
*mad
)
90 mad
->base_version
= 1;
91 mad
->mgmt_class
= IB_MGMT_CLASS_SUBN_LID_ROUTED
;
92 mad
->class_version
= 1;
93 mad
->method
= IB_MGMT_METHOD_GET
;
96 static union ib_gid zgid
;
98 static int check_flow_steering_support(struct mlx4_dev
*dev
)
100 int eth_num_ports
= 0;
101 int ib_num_ports
= 0;
103 int dmfs
= dev
->caps
.steering_mode
== MLX4_STEERING_MODE_DEVICE_MANAGED
;
107 mlx4_foreach_port(i
, dev
, MLX4_PORT_TYPE_ETH
)
109 mlx4_foreach_port(i
, dev
, MLX4_PORT_TYPE_IB
)
111 dmfs
&= (!ib_num_ports
||
112 (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_DMFS_IPOIB
)) &&
114 (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_FS_EN
));
115 if (ib_num_ports
&& mlx4_is_mfunc(dev
)) {
116 pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
123 static int num_ib_ports(struct mlx4_dev
*dev
)
128 mlx4_foreach_port(i
, dev
, MLX4_PORT_TYPE_IB
)
134 static int mlx4_ib_query_device(struct ib_device
*ibdev
,
135 struct ib_device_attr
*props
,
136 struct ib_udata
*uhw
)
138 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
139 struct ib_smp
*in_mad
= NULL
;
140 struct ib_smp
*out_mad
= NULL
;
143 struct mlx4_uverbs_ex_query_device cmd
;
144 struct mlx4_uverbs_ex_query_device_resp resp
= {.comp_mask
= 0};
145 struct mlx4_clock_params clock_params
;
148 if (uhw
->inlen
< sizeof(cmd
))
151 err
= ib_copy_from_udata(&cmd
, uhw
, sizeof(cmd
));
162 resp
.response_length
= offsetof(typeof(resp
), response_length
) +
163 sizeof(resp
.response_length
);
164 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
165 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
166 if (!in_mad
|| !out_mad
)
169 init_query_mad(in_mad
);
170 in_mad
->attr_id
= IB_SMP_ATTR_NODE_INFO
;
172 err
= mlx4_MAD_IFC(to_mdev(ibdev
), MLX4_MAD_IFC_IGNORE_KEYS
,
173 1, NULL
, NULL
, in_mad
, out_mad
);
177 memset(props
, 0, sizeof *props
);
179 have_ib_ports
= num_ib_ports(dev
->dev
);
181 props
->fw_ver
= dev
->dev
->caps
.fw_ver
;
182 props
->device_cap_flags
= IB_DEVICE_CHANGE_PHY_PORT
|
183 IB_DEVICE_PORT_ACTIVE_EVENT
|
184 IB_DEVICE_SYS_IMAGE_GUID
|
185 IB_DEVICE_RC_RNR_NAK_GEN
|
186 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK
;
187 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR
)
188 props
->device_cap_flags
|= IB_DEVICE_BAD_PKEY_CNTR
;
189 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR
)
190 props
->device_cap_flags
|= IB_DEVICE_BAD_QKEY_CNTR
;
191 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_APM
&& have_ib_ports
)
192 props
->device_cap_flags
|= IB_DEVICE_AUTO_PATH_MIG
;
193 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_UD_AV_PORT
)
194 props
->device_cap_flags
|= IB_DEVICE_UD_AV_PORT_ENFORCE
;
195 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_IPOIB_CSUM
)
196 props
->device_cap_flags
|= IB_DEVICE_UD_IP_CSUM
;
197 if (dev
->dev
->caps
.max_gso_sz
&&
198 (dev
->dev
->rev_id
!= MLX4_IB_CARD_REV_A0
) &&
199 (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_BLH
))
200 props
->device_cap_flags
|= IB_DEVICE_UD_TSO
;
201 if (dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_RESERVED_LKEY
)
202 props
->device_cap_flags
|= IB_DEVICE_LOCAL_DMA_LKEY
;
203 if ((dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_LOCAL_INV
) &&
204 (dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_REMOTE_INV
) &&
205 (dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_FAST_REG_WR
))
206 props
->device_cap_flags
|= IB_DEVICE_MEM_MGT_EXTENSIONS
;
207 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
)
208 props
->device_cap_flags
|= IB_DEVICE_XRC
;
209 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_MEM_WINDOW
)
210 props
->device_cap_flags
|= IB_DEVICE_MEM_WINDOW
;
211 if (dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_TYPE_2_WIN
) {
212 if (dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_WIN_TYPE_2B
)
213 props
->device_cap_flags
|= IB_DEVICE_MEM_WINDOW_TYPE_2B
;
215 props
->device_cap_flags
|= IB_DEVICE_MEM_WINDOW_TYPE_2A
;
216 if (dev
->steering_support
== MLX4_STEERING_MODE_DEVICE_MANAGED
)
217 props
->device_cap_flags
|= IB_DEVICE_MANAGED_FLOW_STEERING
;
220 props
->vendor_id
= be32_to_cpup((__be32
*) (out_mad
->data
+ 36)) &
222 props
->vendor_part_id
= dev
->dev
->persist
->pdev
->device
;
223 props
->hw_ver
= be32_to_cpup((__be32
*) (out_mad
->data
+ 32));
224 memcpy(&props
->sys_image_guid
, out_mad
->data
+ 4, 8);
226 props
->max_mr_size
= ~0ull;
227 props
->page_size_cap
= dev
->dev
->caps
.page_size_cap
;
228 props
->max_qp
= dev
->dev
->quotas
.qp
;
229 props
->max_qp_wr
= dev
->dev
->caps
.max_wqes
- MLX4_IB_SQ_MAX_SPARE
;
230 props
->max_sge
= min(dev
->dev
->caps
.max_sq_sg
,
231 dev
->dev
->caps
.max_rq_sg
);
232 props
->max_cq
= dev
->dev
->quotas
.cq
;
233 props
->max_cqe
= dev
->dev
->caps
.max_cqes
;
234 props
->max_mr
= dev
->dev
->quotas
.mpt
;
235 props
->max_pd
= dev
->dev
->caps
.num_pds
- dev
->dev
->caps
.reserved_pds
;
236 props
->max_qp_rd_atom
= dev
->dev
->caps
.max_qp_dest_rdma
;
237 props
->max_qp_init_rd_atom
= dev
->dev
->caps
.max_qp_init_rdma
;
238 props
->max_res_rd_atom
= props
->max_qp_rd_atom
* props
->max_qp
;
239 props
->max_srq
= dev
->dev
->quotas
.srq
;
240 props
->max_srq_wr
= dev
->dev
->caps
.max_srq_wqes
- 1;
241 props
->max_srq_sge
= dev
->dev
->caps
.max_srq_sge
;
242 props
->max_fast_reg_page_list_len
= MLX4_MAX_FAST_REG_PAGES
;
243 props
->local_ca_ack_delay
= dev
->dev
->caps
.local_ca_ack_delay
;
244 props
->atomic_cap
= dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_ATOMIC
?
245 IB_ATOMIC_HCA
: IB_ATOMIC_NONE
;
246 props
->masked_atomic_cap
= props
->atomic_cap
;
247 props
->max_pkeys
= dev
->dev
->caps
.pkey_table_len
[1];
248 props
->max_mcast_grp
= dev
->dev
->caps
.num_mgms
+ dev
->dev
->caps
.num_amgms
;
249 props
->max_mcast_qp_attach
= dev
->dev
->caps
.num_qp_per_mgm
;
250 props
->max_total_mcast_qp_attach
= props
->max_mcast_qp_attach
*
251 props
->max_mcast_grp
;
252 props
->max_map_per_fmr
= dev
->dev
->caps
.max_fmr_maps
;
253 props
->hca_core_clock
= dev
->dev
->caps
.hca_core_clock
* 1000UL;
254 props
->timestamp_mask
= 0xFFFFFFFFFFFFULL
;
256 if (!mlx4_is_slave(dev
->dev
))
257 err
= mlx4_get_internal_clock_params(dev
->dev
, &clock_params
);
259 if (uhw
->outlen
>= resp
.response_length
+ sizeof(resp
.hca_core_clock_offset
)) {
260 resp
.response_length
+= sizeof(resp
.hca_core_clock_offset
);
261 if (!err
&& !mlx4_is_slave(dev
->dev
)) {
262 resp
.comp_mask
|= QUERY_DEVICE_RESP_MASK_TIMESTAMP
;
263 resp
.hca_core_clock_offset
= clock_params
.offset
% PAGE_SIZE
;
268 err
= ib_copy_to_udata(uhw
, &resp
, resp
.response_length
);
279 static enum rdma_link_layer
280 mlx4_ib_port_link_layer(struct ib_device
*device
, u8 port_num
)
282 struct mlx4_dev
*dev
= to_mdev(device
)->dev
;
284 return dev
->caps
.port_mask
[port_num
] == MLX4_PORT_TYPE_IB
?
285 IB_LINK_LAYER_INFINIBAND
: IB_LINK_LAYER_ETHERNET
;
288 static int ib_link_query_port(struct ib_device
*ibdev
, u8 port
,
289 struct ib_port_attr
*props
, int netw_view
)
291 struct ib_smp
*in_mad
= NULL
;
292 struct ib_smp
*out_mad
= NULL
;
293 int ext_active_speed
;
294 int mad_ifc_flags
= MLX4_MAD_IFC_IGNORE_KEYS
;
297 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
298 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
299 if (!in_mad
|| !out_mad
)
302 init_query_mad(in_mad
);
303 in_mad
->attr_id
= IB_SMP_ATTR_PORT_INFO
;
304 in_mad
->attr_mod
= cpu_to_be32(port
);
306 if (mlx4_is_mfunc(to_mdev(ibdev
)->dev
) && netw_view
)
307 mad_ifc_flags
|= MLX4_MAD_IFC_NET_VIEW
;
309 err
= mlx4_MAD_IFC(to_mdev(ibdev
), mad_ifc_flags
, port
, NULL
, NULL
,
315 props
->lid
= be16_to_cpup((__be16
*) (out_mad
->data
+ 16));
316 props
->lmc
= out_mad
->data
[34] & 0x7;
317 props
->sm_lid
= be16_to_cpup((__be16
*) (out_mad
->data
+ 18));
318 props
->sm_sl
= out_mad
->data
[36] & 0xf;
319 props
->state
= out_mad
->data
[32] & 0xf;
320 props
->phys_state
= out_mad
->data
[33] >> 4;
321 props
->port_cap_flags
= be32_to_cpup((__be32
*) (out_mad
->data
+ 20));
323 props
->gid_tbl_len
= out_mad
->data
[50];
325 props
->gid_tbl_len
= to_mdev(ibdev
)->dev
->caps
.gid_table_len
[port
];
326 props
->max_msg_sz
= to_mdev(ibdev
)->dev
->caps
.max_msg_sz
;
327 props
->pkey_tbl_len
= to_mdev(ibdev
)->dev
->caps
.pkey_table_len
[port
];
328 props
->bad_pkey_cntr
= be16_to_cpup((__be16
*) (out_mad
->data
+ 46));
329 props
->qkey_viol_cntr
= be16_to_cpup((__be16
*) (out_mad
->data
+ 48));
330 props
->active_width
= out_mad
->data
[31] & 0xf;
331 props
->active_speed
= out_mad
->data
[35] >> 4;
332 props
->max_mtu
= out_mad
->data
[41] & 0xf;
333 props
->active_mtu
= out_mad
->data
[36] >> 4;
334 props
->subnet_timeout
= out_mad
->data
[51] & 0x1f;
335 props
->max_vl_num
= out_mad
->data
[37] >> 4;
336 props
->init_type_reply
= out_mad
->data
[41] >> 4;
338 /* Check if extended speeds (EDR/FDR/...) are supported */
339 if (props
->port_cap_flags
& IB_PORT_EXTENDED_SPEEDS_SUP
) {
340 ext_active_speed
= out_mad
->data
[62] >> 4;
342 switch (ext_active_speed
) {
344 props
->active_speed
= IB_SPEED_FDR
;
347 props
->active_speed
= IB_SPEED_EDR
;
352 /* If reported active speed is QDR, check if is FDR-10 */
353 if (props
->active_speed
== IB_SPEED_QDR
) {
354 init_query_mad(in_mad
);
355 in_mad
->attr_id
= MLX4_ATTR_EXTENDED_PORT_INFO
;
356 in_mad
->attr_mod
= cpu_to_be32(port
);
358 err
= mlx4_MAD_IFC(to_mdev(ibdev
), mad_ifc_flags
, port
,
359 NULL
, NULL
, in_mad
, out_mad
);
363 /* Checking LinkSpeedActive for FDR-10 */
364 if (out_mad
->data
[15] & 0x1)
365 props
->active_speed
= IB_SPEED_FDR10
;
368 /* Avoid wrong speed value returned by FW if the IB link is down. */
369 if (props
->state
== IB_PORT_DOWN
)
370 props
->active_speed
= IB_SPEED_SDR
;
378 static u8
state_to_phys_state(enum ib_port_state state
)
380 return state
== IB_PORT_ACTIVE
? 5 : 3;
383 static int eth_link_query_port(struct ib_device
*ibdev
, u8 port
,
384 struct ib_port_attr
*props
, int netw_view
)
387 struct mlx4_ib_dev
*mdev
= to_mdev(ibdev
);
388 struct mlx4_ib_iboe
*iboe
= &mdev
->iboe
;
389 struct net_device
*ndev
;
391 struct mlx4_cmd_mailbox
*mailbox
;
393 int is_bonded
= mlx4_is_bonded(mdev
->dev
);
395 mailbox
= mlx4_alloc_cmd_mailbox(mdev
->dev
);
397 return PTR_ERR(mailbox
);
399 err
= mlx4_cmd_box(mdev
->dev
, 0, mailbox
->dma
, port
, 0,
400 MLX4_CMD_QUERY_PORT
, MLX4_CMD_TIME_CLASS_B
,
405 props
->active_width
= (((u8
*)mailbox
->buf
)[5] == 0x40) ?
406 IB_WIDTH_4X
: IB_WIDTH_1X
;
407 props
->active_speed
= IB_SPEED_QDR
;
408 props
->port_cap_flags
= IB_PORT_CM_SUP
| IB_PORT_IP_BASED_GIDS
;
409 props
->gid_tbl_len
= mdev
->dev
->caps
.gid_table_len
[port
];
410 props
->max_msg_sz
= mdev
->dev
->caps
.max_msg_sz
;
411 props
->pkey_tbl_len
= 1;
412 props
->max_mtu
= IB_MTU_4096
;
413 props
->max_vl_num
= 2;
414 props
->state
= IB_PORT_DOWN
;
415 props
->phys_state
= state_to_phys_state(props
->state
);
416 props
->active_mtu
= IB_MTU_256
;
418 rtnl_lock(); /* required to get upper dev */
419 spin_lock_bh(&iboe
->lock
);
420 ndev
= iboe
->netdevs
[port
- 1];
421 if (ndev
&& is_bonded
)
422 ndev
= netdev_master_upper_dev_get(ndev
);
426 tmp
= iboe_get_mtu(ndev
->mtu
);
427 props
->active_mtu
= tmp
? min(props
->max_mtu
, tmp
) : IB_MTU_256
;
429 props
->state
= (netif_running(ndev
) && netif_carrier_ok(ndev
)) ?
430 IB_PORT_ACTIVE
: IB_PORT_DOWN
;
431 props
->phys_state
= state_to_phys_state(props
->state
);
433 spin_unlock_bh(&iboe
->lock
);
437 mlx4_free_cmd_mailbox(mdev
->dev
, mailbox
);
441 int __mlx4_ib_query_port(struct ib_device
*ibdev
, u8 port
,
442 struct ib_port_attr
*props
, int netw_view
)
446 memset(props
, 0, sizeof *props
);
448 err
= mlx4_ib_port_link_layer(ibdev
, port
) == IB_LINK_LAYER_INFINIBAND
?
449 ib_link_query_port(ibdev
, port
, props
, netw_view
) :
450 eth_link_query_port(ibdev
, port
, props
, netw_view
);
455 static int mlx4_ib_query_port(struct ib_device
*ibdev
, u8 port
,
456 struct ib_port_attr
*props
)
458 /* returns host view */
459 return __mlx4_ib_query_port(ibdev
, port
, props
, 0);
462 int __mlx4_ib_query_gid(struct ib_device
*ibdev
, u8 port
, int index
,
463 union ib_gid
*gid
, int netw_view
)
465 struct ib_smp
*in_mad
= NULL
;
466 struct ib_smp
*out_mad
= NULL
;
468 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
470 int mad_ifc_flags
= MLX4_MAD_IFC_IGNORE_KEYS
;
472 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
473 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
474 if (!in_mad
|| !out_mad
)
477 init_query_mad(in_mad
);
478 in_mad
->attr_id
= IB_SMP_ATTR_PORT_INFO
;
479 in_mad
->attr_mod
= cpu_to_be32(port
);
481 if (mlx4_is_mfunc(dev
->dev
) && netw_view
)
482 mad_ifc_flags
|= MLX4_MAD_IFC_NET_VIEW
;
484 err
= mlx4_MAD_IFC(dev
, mad_ifc_flags
, port
, NULL
, NULL
, in_mad
, out_mad
);
488 memcpy(gid
->raw
, out_mad
->data
+ 8, 8);
490 if (mlx4_is_mfunc(dev
->dev
) && !netw_view
) {
492 /* For any index > 0, return the null guid */
499 init_query_mad(in_mad
);
500 in_mad
->attr_id
= IB_SMP_ATTR_GUID_INFO
;
501 in_mad
->attr_mod
= cpu_to_be32(index
/ 8);
503 err
= mlx4_MAD_IFC(dev
, mad_ifc_flags
, port
,
504 NULL
, NULL
, in_mad
, out_mad
);
508 memcpy(gid
->raw
+ 8, out_mad
->data
+ (index
% 8) * 8, 8);
512 memset(gid
->raw
+ 8, 0, 8);
518 static int iboe_query_gid(struct ib_device
*ibdev
, u8 port
, int index
,
521 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
523 *gid
= dev
->iboe
.gid_table
[port
- 1][index
];
528 static int mlx4_ib_query_gid(struct ib_device
*ibdev
, u8 port
, int index
,
531 if (rdma_port_get_link_layer(ibdev
, port
) == IB_LINK_LAYER_INFINIBAND
)
532 return __mlx4_ib_query_gid(ibdev
, port
, index
, gid
, 0);
534 return iboe_query_gid(ibdev
, port
, index
, gid
);
537 int __mlx4_ib_query_pkey(struct ib_device
*ibdev
, u8 port
, u16 index
,
538 u16
*pkey
, int netw_view
)
540 struct ib_smp
*in_mad
= NULL
;
541 struct ib_smp
*out_mad
= NULL
;
542 int mad_ifc_flags
= MLX4_MAD_IFC_IGNORE_KEYS
;
545 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
546 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
547 if (!in_mad
|| !out_mad
)
550 init_query_mad(in_mad
);
551 in_mad
->attr_id
= IB_SMP_ATTR_PKEY_TABLE
;
552 in_mad
->attr_mod
= cpu_to_be32(index
/ 32);
554 if (mlx4_is_mfunc(to_mdev(ibdev
)->dev
) && netw_view
)
555 mad_ifc_flags
|= MLX4_MAD_IFC_NET_VIEW
;
557 err
= mlx4_MAD_IFC(to_mdev(ibdev
), mad_ifc_flags
, port
, NULL
, NULL
,
562 *pkey
= be16_to_cpu(((__be16
*) out_mad
->data
)[index
% 32]);
570 static int mlx4_ib_query_pkey(struct ib_device
*ibdev
, u8 port
, u16 index
, u16
*pkey
)
572 return __mlx4_ib_query_pkey(ibdev
, port
, index
, pkey
, 0);
575 static int mlx4_ib_modify_device(struct ib_device
*ibdev
, int mask
,
576 struct ib_device_modify
*props
)
578 struct mlx4_cmd_mailbox
*mailbox
;
581 if (mask
& ~IB_DEVICE_MODIFY_NODE_DESC
)
584 if (!(mask
& IB_DEVICE_MODIFY_NODE_DESC
))
587 if (mlx4_is_slave(to_mdev(ibdev
)->dev
))
590 spin_lock_irqsave(&to_mdev(ibdev
)->sm_lock
, flags
);
591 memcpy(ibdev
->node_desc
, props
->node_desc
, 64);
592 spin_unlock_irqrestore(&to_mdev(ibdev
)->sm_lock
, flags
);
595 * If possible, pass node desc to FW, so it can generate
596 * a 144 trap. If cmd fails, just ignore.
598 mailbox
= mlx4_alloc_cmd_mailbox(to_mdev(ibdev
)->dev
);
602 memcpy(mailbox
->buf
, props
->node_desc
, 64);
603 mlx4_cmd(to_mdev(ibdev
)->dev
, mailbox
->dma
, 1, 0,
604 MLX4_CMD_SET_NODE
, MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
606 mlx4_free_cmd_mailbox(to_mdev(ibdev
)->dev
, mailbox
);
611 static int mlx4_ib_SET_PORT(struct mlx4_ib_dev
*dev
, u8 port
, int reset_qkey_viols
,
614 struct mlx4_cmd_mailbox
*mailbox
;
617 mailbox
= mlx4_alloc_cmd_mailbox(dev
->dev
);
619 return PTR_ERR(mailbox
);
621 if (dev
->dev
->flags
& MLX4_FLAG_OLD_PORT_CMDS
) {
622 *(u8
*) mailbox
->buf
= !!reset_qkey_viols
<< 6;
623 ((__be32
*) mailbox
->buf
)[2] = cpu_to_be32(cap_mask
);
625 ((u8
*) mailbox
->buf
)[3] = !!reset_qkey_viols
;
626 ((__be32
*) mailbox
->buf
)[1] = cpu_to_be32(cap_mask
);
629 err
= mlx4_cmd(dev
->dev
, mailbox
->dma
, port
, MLX4_SET_PORT_IB_OPCODE
,
630 MLX4_CMD_SET_PORT
, MLX4_CMD_TIME_CLASS_B
,
633 mlx4_free_cmd_mailbox(dev
->dev
, mailbox
);
637 static int mlx4_ib_modify_port(struct ib_device
*ibdev
, u8 port
, int mask
,
638 struct ib_port_modify
*props
)
640 struct mlx4_ib_dev
*mdev
= to_mdev(ibdev
);
641 u8 is_eth
= mdev
->dev
->caps
.port_type
[port
] == MLX4_PORT_TYPE_ETH
;
642 struct ib_port_attr attr
;
646 /* return OK if this is RoCE. CM calls ib_modify_port() regardless
647 * of whether port link layer is ETH or IB. For ETH ports, qkey
648 * violations and port capabilities are not meaningful.
653 mutex_lock(&mdev
->cap_mask_mutex
);
655 err
= mlx4_ib_query_port(ibdev
, port
, &attr
);
659 cap_mask
= (attr
.port_cap_flags
| props
->set_port_cap_mask
) &
660 ~props
->clr_port_cap_mask
;
662 err
= mlx4_ib_SET_PORT(mdev
, port
,
663 !!(mask
& IB_PORT_RESET_QKEY_CNTR
),
667 mutex_unlock(&to_mdev(ibdev
)->cap_mask_mutex
);
671 static struct ib_ucontext
*mlx4_ib_alloc_ucontext(struct ib_device
*ibdev
,
672 struct ib_udata
*udata
)
674 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
675 struct mlx4_ib_ucontext
*context
;
676 struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3
;
677 struct mlx4_ib_alloc_ucontext_resp resp
;
681 return ERR_PTR(-EAGAIN
);
683 if (ibdev
->uverbs_abi_ver
== MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION
) {
684 resp_v3
.qp_tab_size
= dev
->dev
->caps
.num_qps
;
685 resp_v3
.bf_reg_size
= dev
->dev
->caps
.bf_reg_size
;
686 resp_v3
.bf_regs_per_page
= dev
->dev
->caps
.bf_regs_per_page
;
688 resp
.dev_caps
= dev
->dev
->caps
.userspace_caps
;
689 resp
.qp_tab_size
= dev
->dev
->caps
.num_qps
;
690 resp
.bf_reg_size
= dev
->dev
->caps
.bf_reg_size
;
691 resp
.bf_regs_per_page
= dev
->dev
->caps
.bf_regs_per_page
;
692 resp
.cqe_size
= dev
->dev
->caps
.cqe_size
;
695 context
= kmalloc(sizeof *context
, GFP_KERNEL
);
697 return ERR_PTR(-ENOMEM
);
699 err
= mlx4_uar_alloc(to_mdev(ibdev
)->dev
, &context
->uar
);
705 INIT_LIST_HEAD(&context
->db_page_list
);
706 mutex_init(&context
->db_page_mutex
);
708 if (ibdev
->uverbs_abi_ver
== MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION
)
709 err
= ib_copy_to_udata(udata
, &resp_v3
, sizeof(resp_v3
));
711 err
= ib_copy_to_udata(udata
, &resp
, sizeof(resp
));
714 mlx4_uar_free(to_mdev(ibdev
)->dev
, &context
->uar
);
716 return ERR_PTR(-EFAULT
);
719 return &context
->ibucontext
;
722 static int mlx4_ib_dealloc_ucontext(struct ib_ucontext
*ibcontext
)
724 struct mlx4_ib_ucontext
*context
= to_mucontext(ibcontext
);
726 mlx4_uar_free(to_mdev(ibcontext
->device
)->dev
, &context
->uar
);
732 static int mlx4_ib_mmap(struct ib_ucontext
*context
, struct vm_area_struct
*vma
)
734 struct mlx4_ib_dev
*dev
= to_mdev(context
->device
);
736 if (vma
->vm_end
- vma
->vm_start
!= PAGE_SIZE
)
739 if (vma
->vm_pgoff
== 0) {
740 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
742 if (io_remap_pfn_range(vma
, vma
->vm_start
,
743 to_mucontext(context
)->uar
.pfn
,
744 PAGE_SIZE
, vma
->vm_page_prot
))
746 } else if (vma
->vm_pgoff
== 1 && dev
->dev
->caps
.bf_reg_size
!= 0) {
747 vma
->vm_page_prot
= pgprot_writecombine(vma
->vm_page_prot
);
749 if (io_remap_pfn_range(vma
, vma
->vm_start
,
750 to_mucontext(context
)->uar
.pfn
+
751 dev
->dev
->caps
.num_uars
,
752 PAGE_SIZE
, vma
->vm_page_prot
))
754 } else if (vma
->vm_pgoff
== 3) {
755 struct mlx4_clock_params params
;
756 int ret
= mlx4_get_internal_clock_params(dev
->dev
, ¶ms
);
761 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
762 if (io_remap_pfn_range(vma
, vma
->vm_start
,
763 (pci_resource_start(dev
->dev
->persist
->pdev
,
767 PAGE_SIZE
, vma
->vm_page_prot
))
776 static struct ib_pd
*mlx4_ib_alloc_pd(struct ib_device
*ibdev
,
777 struct ib_ucontext
*context
,
778 struct ib_udata
*udata
)
780 struct mlx4_ib_pd
*pd
;
783 pd
= kmalloc(sizeof *pd
, GFP_KERNEL
);
785 return ERR_PTR(-ENOMEM
);
787 err
= mlx4_pd_alloc(to_mdev(ibdev
)->dev
, &pd
->pdn
);
794 if (ib_copy_to_udata(udata
, &pd
->pdn
, sizeof (__u32
))) {
795 mlx4_pd_free(to_mdev(ibdev
)->dev
, pd
->pdn
);
797 return ERR_PTR(-EFAULT
);
803 static int mlx4_ib_dealloc_pd(struct ib_pd
*pd
)
805 mlx4_pd_free(to_mdev(pd
->device
)->dev
, to_mpd(pd
)->pdn
);
811 static struct ib_xrcd
*mlx4_ib_alloc_xrcd(struct ib_device
*ibdev
,
812 struct ib_ucontext
*context
,
813 struct ib_udata
*udata
)
815 struct mlx4_ib_xrcd
*xrcd
;
816 struct ib_cq_init_attr cq_attr
= {};
819 if (!(to_mdev(ibdev
)->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
))
820 return ERR_PTR(-ENOSYS
);
822 xrcd
= kmalloc(sizeof *xrcd
, GFP_KERNEL
);
824 return ERR_PTR(-ENOMEM
);
826 err
= mlx4_xrcd_alloc(to_mdev(ibdev
)->dev
, &xrcd
->xrcdn
);
830 xrcd
->pd
= ib_alloc_pd(ibdev
);
831 if (IS_ERR(xrcd
->pd
)) {
832 err
= PTR_ERR(xrcd
->pd
);
837 xrcd
->cq
= ib_create_cq(ibdev
, NULL
, NULL
, xrcd
, &cq_attr
);
838 if (IS_ERR(xrcd
->cq
)) {
839 err
= PTR_ERR(xrcd
->cq
);
843 return &xrcd
->ibxrcd
;
846 ib_dealloc_pd(xrcd
->pd
);
848 mlx4_xrcd_free(to_mdev(ibdev
)->dev
, xrcd
->xrcdn
);
854 static int mlx4_ib_dealloc_xrcd(struct ib_xrcd
*xrcd
)
856 ib_destroy_cq(to_mxrcd(xrcd
)->cq
);
857 ib_dealloc_pd(to_mxrcd(xrcd
)->pd
);
858 mlx4_xrcd_free(to_mdev(xrcd
->device
)->dev
, to_mxrcd(xrcd
)->xrcdn
);
864 static int add_gid_entry(struct ib_qp
*ibqp
, union ib_gid
*gid
)
866 struct mlx4_ib_qp
*mqp
= to_mqp(ibqp
);
867 struct mlx4_ib_dev
*mdev
= to_mdev(ibqp
->device
);
868 struct mlx4_ib_gid_entry
*ge
;
870 ge
= kzalloc(sizeof *ge
, GFP_KERNEL
);
875 if (mlx4_ib_add_mc(mdev
, mqp
, gid
)) {
876 ge
->port
= mqp
->port
;
880 mutex_lock(&mqp
->mutex
);
881 list_add_tail(&ge
->list
, &mqp
->gid_list
);
882 mutex_unlock(&mqp
->mutex
);
887 int mlx4_ib_add_mc(struct mlx4_ib_dev
*mdev
, struct mlx4_ib_qp
*mqp
,
890 struct net_device
*ndev
;
896 spin_lock_bh(&mdev
->iboe
.lock
);
897 ndev
= mdev
->iboe
.netdevs
[mqp
->port
- 1];
900 spin_unlock_bh(&mdev
->iboe
.lock
);
910 struct mlx4_ib_steering
{
911 struct list_head list
;
912 struct mlx4_flow_reg_id reg_id
;
916 static int parse_flow_attr(struct mlx4_dev
*dev
,
918 union ib_flow_spec
*ib_spec
,
919 struct _rule_hw
*mlx4_spec
)
921 enum mlx4_net_trans_rule_id type
;
923 switch (ib_spec
->type
) {
924 case IB_FLOW_SPEC_ETH
:
925 type
= MLX4_NET_TRANS_RULE_ID_ETH
;
926 memcpy(mlx4_spec
->eth
.dst_mac
, ib_spec
->eth
.val
.dst_mac
,
928 memcpy(mlx4_spec
->eth
.dst_mac_msk
, ib_spec
->eth
.mask
.dst_mac
,
930 mlx4_spec
->eth
.vlan_tag
= ib_spec
->eth
.val
.vlan_tag
;
931 mlx4_spec
->eth
.vlan_tag_msk
= ib_spec
->eth
.mask
.vlan_tag
;
933 case IB_FLOW_SPEC_IB
:
934 type
= MLX4_NET_TRANS_RULE_ID_IB
;
935 mlx4_spec
->ib
.l3_qpn
=
937 mlx4_spec
->ib
.qpn_mask
=
938 cpu_to_be32(MLX4_IB_FLOW_QPN_MASK
);
942 case IB_FLOW_SPEC_IPV4
:
943 type
= MLX4_NET_TRANS_RULE_ID_IPV4
;
944 mlx4_spec
->ipv4
.src_ip
= ib_spec
->ipv4
.val
.src_ip
;
945 mlx4_spec
->ipv4
.src_ip_msk
= ib_spec
->ipv4
.mask
.src_ip
;
946 mlx4_spec
->ipv4
.dst_ip
= ib_spec
->ipv4
.val
.dst_ip
;
947 mlx4_spec
->ipv4
.dst_ip_msk
= ib_spec
->ipv4
.mask
.dst_ip
;
950 case IB_FLOW_SPEC_TCP
:
951 case IB_FLOW_SPEC_UDP
:
952 type
= ib_spec
->type
== IB_FLOW_SPEC_TCP
?
953 MLX4_NET_TRANS_RULE_ID_TCP
:
954 MLX4_NET_TRANS_RULE_ID_UDP
;
955 mlx4_spec
->tcp_udp
.dst_port
= ib_spec
->tcp_udp
.val
.dst_port
;
956 mlx4_spec
->tcp_udp
.dst_port_msk
= ib_spec
->tcp_udp
.mask
.dst_port
;
957 mlx4_spec
->tcp_udp
.src_port
= ib_spec
->tcp_udp
.val
.src_port
;
958 mlx4_spec
->tcp_udp
.src_port_msk
= ib_spec
->tcp_udp
.mask
.src_port
;
964 if (mlx4_map_sw_to_hw_steering_id(dev
, type
) < 0 ||
965 mlx4_hw_rule_sz(dev
, type
) < 0)
967 mlx4_spec
->id
= cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev
, type
));
968 mlx4_spec
->size
= mlx4_hw_rule_sz(dev
, type
) >> 2;
969 return mlx4_hw_rule_sz(dev
, type
);
972 struct default_rules
{
973 __u32 mandatory_fields
[IB_FLOW_SPEC_SUPPORT_LAYERS
];
974 __u32 mandatory_not_fields
[IB_FLOW_SPEC_SUPPORT_LAYERS
];
975 __u32 rules_create_list
[IB_FLOW_SPEC_SUPPORT_LAYERS
];
978 static const struct default_rules default_table
[] = {
980 .mandatory_fields
= {IB_FLOW_SPEC_IPV4
},
981 .mandatory_not_fields
= {IB_FLOW_SPEC_ETH
},
982 .rules_create_list
= {IB_FLOW_SPEC_IB
},
983 .link_layer
= IB_LINK_LAYER_INFINIBAND
987 static int __mlx4_ib_default_rules_match(struct ib_qp
*qp
,
988 struct ib_flow_attr
*flow_attr
)
992 const struct default_rules
*pdefault_rules
= default_table
;
993 u8 link_layer
= rdma_port_get_link_layer(qp
->device
, flow_attr
->port
);
995 for (i
= 0; i
< ARRAY_SIZE(default_table
); i
++, pdefault_rules
++) {
996 __u32 field_types
[IB_FLOW_SPEC_SUPPORT_LAYERS
];
997 memset(&field_types
, 0, sizeof(field_types
));
999 if (link_layer
!= pdefault_rules
->link_layer
)
1002 ib_flow
= flow_attr
+ 1;
1003 /* we assume the specs are sorted */
1004 for (j
= 0, k
= 0; k
< IB_FLOW_SPEC_SUPPORT_LAYERS
&&
1005 j
< flow_attr
->num_of_specs
; k
++) {
1006 union ib_flow_spec
*current_flow
=
1007 (union ib_flow_spec
*)ib_flow
;
1009 /* same layer but different type */
1010 if (((current_flow
->type
& IB_FLOW_SPEC_LAYER_MASK
) ==
1011 (pdefault_rules
->mandatory_fields
[k
] &
1012 IB_FLOW_SPEC_LAYER_MASK
)) &&
1013 (current_flow
->type
!=
1014 pdefault_rules
->mandatory_fields
[k
]))
1017 /* same layer, try match next one */
1018 if (current_flow
->type
==
1019 pdefault_rules
->mandatory_fields
[k
]) {
1022 ((union ib_flow_spec
*)ib_flow
)->size
;
1026 ib_flow
= flow_attr
+ 1;
1027 for (j
= 0; j
< flow_attr
->num_of_specs
;
1028 j
++, ib_flow
+= ((union ib_flow_spec
*)ib_flow
)->size
)
1029 for (k
= 0; k
< IB_FLOW_SPEC_SUPPORT_LAYERS
; k
++)
1030 /* same layer and same type */
1031 if (((union ib_flow_spec
*)ib_flow
)->type
==
1032 pdefault_rules
->mandatory_not_fields
[k
])
1041 static int __mlx4_ib_create_default_rules(
1042 struct mlx4_ib_dev
*mdev
,
1044 const struct default_rules
*pdefault_rules
,
1045 struct _rule_hw
*mlx4_spec
) {
1049 for (i
= 0; i
< ARRAY_SIZE(pdefault_rules
->rules_create_list
); i
++) {
1051 union ib_flow_spec ib_spec
;
1052 switch (pdefault_rules
->rules_create_list
[i
]) {
1056 case IB_FLOW_SPEC_IB
:
1057 ib_spec
.type
= IB_FLOW_SPEC_IB
;
1058 ib_spec
.size
= sizeof(struct ib_flow_spec_ib
);
1065 /* We must put empty rule, qpn is being ignored */
1066 ret
= parse_flow_attr(mdev
->dev
, 0, &ib_spec
,
1069 pr_info("invalid parsing\n");
1073 mlx4_spec
= (void *)mlx4_spec
+ ret
;
1079 static int __mlx4_ib_create_flow(struct ib_qp
*qp
, struct ib_flow_attr
*flow_attr
,
1081 enum mlx4_net_trans_promisc_mode flow_type
,
1087 struct mlx4_ib_dev
*mdev
= to_mdev(qp
->device
);
1088 struct mlx4_cmd_mailbox
*mailbox
;
1089 struct mlx4_net_trans_rule_hw_ctrl
*ctrl
;
1092 static const u16 __mlx4_domain
[] = {
1093 [IB_FLOW_DOMAIN_USER
] = MLX4_DOMAIN_UVERBS
,
1094 [IB_FLOW_DOMAIN_ETHTOOL
] = MLX4_DOMAIN_ETHTOOL
,
1095 [IB_FLOW_DOMAIN_RFS
] = MLX4_DOMAIN_RFS
,
1096 [IB_FLOW_DOMAIN_NIC
] = MLX4_DOMAIN_NIC
,
1099 if (flow_attr
->priority
> MLX4_IB_FLOW_MAX_PRIO
) {
1100 pr_err("Invalid priority value %d\n", flow_attr
->priority
);
1104 if (domain
>= IB_FLOW_DOMAIN_NUM
) {
1105 pr_err("Invalid domain value %d\n", domain
);
1109 if (mlx4_map_sw_to_hw_steering_mode(mdev
->dev
, flow_type
) < 0)
1112 mailbox
= mlx4_alloc_cmd_mailbox(mdev
->dev
);
1113 if (IS_ERR(mailbox
))
1114 return PTR_ERR(mailbox
);
1115 ctrl
= mailbox
->buf
;
1117 ctrl
->prio
= cpu_to_be16(__mlx4_domain
[domain
] |
1118 flow_attr
->priority
);
1119 ctrl
->type
= mlx4_map_sw_to_hw_steering_mode(mdev
->dev
, flow_type
);
1120 ctrl
->port
= flow_attr
->port
;
1121 ctrl
->qpn
= cpu_to_be32(qp
->qp_num
);
1123 ib_flow
= flow_attr
+ 1;
1124 size
+= sizeof(struct mlx4_net_trans_rule_hw_ctrl
);
1125 /* Add default flows */
1126 default_flow
= __mlx4_ib_default_rules_match(qp
, flow_attr
);
1127 if (default_flow
>= 0) {
1128 ret
= __mlx4_ib_create_default_rules(
1129 mdev
, qp
, default_table
+ default_flow
,
1130 mailbox
->buf
+ size
);
1132 mlx4_free_cmd_mailbox(mdev
->dev
, mailbox
);
1137 for (i
= 0; i
< flow_attr
->num_of_specs
; i
++) {
1138 ret
= parse_flow_attr(mdev
->dev
, qp
->qp_num
, ib_flow
,
1139 mailbox
->buf
+ size
);
1141 mlx4_free_cmd_mailbox(mdev
->dev
, mailbox
);
1144 ib_flow
+= ((union ib_flow_spec
*) ib_flow
)->size
;
1148 ret
= mlx4_cmd_imm(mdev
->dev
, mailbox
->dma
, reg_id
, size
>> 2, 0,
1149 MLX4_QP_FLOW_STEERING_ATTACH
, MLX4_CMD_TIME_CLASS_A
,
1152 pr_err("mcg table is full. Fail to register network rule.\n");
1153 else if (ret
== -ENXIO
)
1154 pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
1156 pr_err("Invalid argumant. Fail to register network rule.\n");
1158 mlx4_free_cmd_mailbox(mdev
->dev
, mailbox
);
1162 static int __mlx4_ib_destroy_flow(struct mlx4_dev
*dev
, u64 reg_id
)
1165 err
= mlx4_cmd(dev
, reg_id
, 0, 0,
1166 MLX4_QP_FLOW_STEERING_DETACH
, MLX4_CMD_TIME_CLASS_A
,
1169 pr_err("Fail to detach network rule. registration id = 0x%llx\n",
1174 static int mlx4_ib_tunnel_steer_add(struct ib_qp
*qp
, struct ib_flow_attr
*flow_attr
,
1178 union ib_flow_spec
*ib_spec
;
1179 struct mlx4_dev
*dev
= to_mdev(qp
->device
)->dev
;
1182 if (dev
->caps
.tunnel_offload_mode
!= MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
||
1183 dev
->caps
.dmfs_high_steer_mode
== MLX4_STEERING_DMFS_A0_STATIC
)
1184 return 0; /* do nothing */
1186 ib_flow
= flow_attr
+ 1;
1187 ib_spec
= (union ib_flow_spec
*)ib_flow
;
1189 if (ib_spec
->type
!= IB_FLOW_SPEC_ETH
|| flow_attr
->num_of_specs
!= 1)
1190 return 0; /* do nothing */
1192 err
= mlx4_tunnel_steer_add(to_mdev(qp
->device
)->dev
, ib_spec
->eth
.val
.dst_mac
,
1193 flow_attr
->port
, qp
->qp_num
,
1194 MLX4_DOMAIN_UVERBS
| (flow_attr
->priority
& 0xff),
1199 static struct ib_flow
*mlx4_ib_create_flow(struct ib_qp
*qp
,
1200 struct ib_flow_attr
*flow_attr
,
1203 int err
= 0, i
= 0, j
= 0;
1204 struct mlx4_ib_flow
*mflow
;
1205 enum mlx4_net_trans_promisc_mode type
[2];
1206 struct mlx4_dev
*dev
= (to_mdev(qp
->device
))->dev
;
1207 int is_bonded
= mlx4_is_bonded(dev
);
1209 memset(type
, 0, sizeof(type
));
1211 mflow
= kzalloc(sizeof(*mflow
), GFP_KERNEL
);
1217 switch (flow_attr
->type
) {
1218 case IB_FLOW_ATTR_NORMAL
:
1219 type
[0] = MLX4_FS_REGULAR
;
1222 case IB_FLOW_ATTR_ALL_DEFAULT
:
1223 type
[0] = MLX4_FS_ALL_DEFAULT
;
1226 case IB_FLOW_ATTR_MC_DEFAULT
:
1227 type
[0] = MLX4_FS_MC_DEFAULT
;
1230 case IB_FLOW_ATTR_SNIFFER
:
1231 type
[0] = MLX4_FS_UC_SNIFFER
;
1232 type
[1] = MLX4_FS_MC_SNIFFER
;
1240 while (i
< ARRAY_SIZE(type
) && type
[i
]) {
1241 err
= __mlx4_ib_create_flow(qp
, flow_attr
, domain
, type
[i
],
1242 &mflow
->reg_id
[i
].id
);
1244 goto err_create_flow
;
1246 /* Application always sees one port so the mirror rule
1247 * must be on port #2
1249 flow_attr
->port
= 2;
1250 err
= __mlx4_ib_create_flow(qp
, flow_attr
,
1252 &mflow
->reg_id
[j
].mirror
);
1253 flow_attr
->port
= 1;
1255 goto err_create_flow
;
1262 if (i
< ARRAY_SIZE(type
) && flow_attr
->type
== IB_FLOW_ATTR_NORMAL
) {
1263 err
= mlx4_ib_tunnel_steer_add(qp
, flow_attr
,
1264 &mflow
->reg_id
[i
].id
);
1266 goto err_create_flow
;
1269 flow_attr
->port
= 2;
1270 err
= mlx4_ib_tunnel_steer_add(qp
, flow_attr
,
1271 &mflow
->reg_id
[j
].mirror
);
1272 flow_attr
->port
= 1;
1274 goto err_create_flow
;
1277 /* function to create mirror rule */
1281 return &mflow
->ibflow
;
1285 (void)__mlx4_ib_destroy_flow(to_mdev(qp
->device
)->dev
,
1286 mflow
->reg_id
[i
].id
);
1291 (void)__mlx4_ib_destroy_flow(to_mdev(qp
->device
)->dev
,
1292 mflow
->reg_id
[j
].mirror
);
1297 return ERR_PTR(err
);
1300 static int mlx4_ib_destroy_flow(struct ib_flow
*flow_id
)
1304 struct mlx4_ib_dev
*mdev
= to_mdev(flow_id
->qp
->device
);
1305 struct mlx4_ib_flow
*mflow
= to_mflow(flow_id
);
1307 while (i
< ARRAY_SIZE(mflow
->reg_id
) && mflow
->reg_id
[i
].id
) {
1308 err
= __mlx4_ib_destroy_flow(mdev
->dev
, mflow
->reg_id
[i
].id
);
1311 if (mflow
->reg_id
[i
].mirror
) {
1312 err
= __mlx4_ib_destroy_flow(mdev
->dev
,
1313 mflow
->reg_id
[i
].mirror
);
1324 static int mlx4_ib_mcg_attach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
1327 struct mlx4_ib_dev
*mdev
= to_mdev(ibqp
->device
);
1328 struct mlx4_dev
*dev
= mdev
->dev
;
1329 struct mlx4_ib_qp
*mqp
= to_mqp(ibqp
);
1330 struct mlx4_ib_steering
*ib_steering
= NULL
;
1331 enum mlx4_protocol prot
= MLX4_PROT_IB_IPV6
;
1332 struct mlx4_flow_reg_id reg_id
;
1334 if (mdev
->dev
->caps
.steering_mode
==
1335 MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1336 ib_steering
= kmalloc(sizeof(*ib_steering
), GFP_KERNEL
);
1341 err
= mlx4_multicast_attach(mdev
->dev
, &mqp
->mqp
, gid
->raw
, mqp
->port
,
1343 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
),
1346 pr_err("multicast attach op failed, err %d\n", err
);
1351 if (mlx4_is_bonded(dev
)) {
1352 err
= mlx4_multicast_attach(mdev
->dev
, &mqp
->mqp
, gid
->raw
,
1353 (mqp
->port
== 1) ? 2 : 1,
1355 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
),
1356 prot
, ®_id
.mirror
);
1361 err
= add_gid_entry(ibqp
, gid
);
1366 memcpy(ib_steering
->gid
.raw
, gid
->raw
, 16);
1367 ib_steering
->reg_id
= reg_id
;
1368 mutex_lock(&mqp
->mutex
);
1369 list_add(&ib_steering
->list
, &mqp
->steering_rules
);
1370 mutex_unlock(&mqp
->mutex
);
1375 mlx4_multicast_detach(mdev
->dev
, &mqp
->mqp
, gid
->raw
,
1378 mlx4_multicast_detach(mdev
->dev
, &mqp
->mqp
, gid
->raw
,
1379 prot
, reg_id
.mirror
);
1386 static struct mlx4_ib_gid_entry
*find_gid_entry(struct mlx4_ib_qp
*qp
, u8
*raw
)
1388 struct mlx4_ib_gid_entry
*ge
;
1389 struct mlx4_ib_gid_entry
*tmp
;
1390 struct mlx4_ib_gid_entry
*ret
= NULL
;
1392 list_for_each_entry_safe(ge
, tmp
, &qp
->gid_list
, list
) {
1393 if (!memcmp(raw
, ge
->gid
.raw
, 16)) {
1402 static int mlx4_ib_mcg_detach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
1405 struct mlx4_ib_dev
*mdev
= to_mdev(ibqp
->device
);
1406 struct mlx4_dev
*dev
= mdev
->dev
;
1407 struct mlx4_ib_qp
*mqp
= to_mqp(ibqp
);
1408 struct net_device
*ndev
;
1409 struct mlx4_ib_gid_entry
*ge
;
1410 struct mlx4_flow_reg_id reg_id
= {0, 0};
1411 enum mlx4_protocol prot
= MLX4_PROT_IB_IPV6
;
1413 if (mdev
->dev
->caps
.steering_mode
==
1414 MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1415 struct mlx4_ib_steering
*ib_steering
;
1417 mutex_lock(&mqp
->mutex
);
1418 list_for_each_entry(ib_steering
, &mqp
->steering_rules
, list
) {
1419 if (!memcmp(ib_steering
->gid
.raw
, gid
->raw
, 16)) {
1420 list_del(&ib_steering
->list
);
1424 mutex_unlock(&mqp
->mutex
);
1425 if (&ib_steering
->list
== &mqp
->steering_rules
) {
1426 pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
1429 reg_id
= ib_steering
->reg_id
;
1433 err
= mlx4_multicast_detach(mdev
->dev
, &mqp
->mqp
, gid
->raw
,
1438 if (mlx4_is_bonded(dev
)) {
1439 err
= mlx4_multicast_detach(mdev
->dev
, &mqp
->mqp
, gid
->raw
,
1440 prot
, reg_id
.mirror
);
1445 mutex_lock(&mqp
->mutex
);
1446 ge
= find_gid_entry(mqp
, gid
->raw
);
1448 spin_lock_bh(&mdev
->iboe
.lock
);
1449 ndev
= ge
->added
? mdev
->iboe
.netdevs
[ge
->port
- 1] : NULL
;
1452 spin_unlock_bh(&mdev
->iboe
.lock
);
1455 list_del(&ge
->list
);
1458 pr_warn("could not find mgid entry\n");
1460 mutex_unlock(&mqp
->mutex
);
1465 static int init_node_data(struct mlx4_ib_dev
*dev
)
1467 struct ib_smp
*in_mad
= NULL
;
1468 struct ib_smp
*out_mad
= NULL
;
1469 int mad_ifc_flags
= MLX4_MAD_IFC_IGNORE_KEYS
;
1472 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
1473 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
1474 if (!in_mad
|| !out_mad
)
1477 init_query_mad(in_mad
);
1478 in_mad
->attr_id
= IB_SMP_ATTR_NODE_DESC
;
1479 if (mlx4_is_master(dev
->dev
))
1480 mad_ifc_flags
|= MLX4_MAD_IFC_NET_VIEW
;
1482 err
= mlx4_MAD_IFC(dev
, mad_ifc_flags
, 1, NULL
, NULL
, in_mad
, out_mad
);
1486 memcpy(dev
->ib_dev
.node_desc
, out_mad
->data
, 64);
1488 in_mad
->attr_id
= IB_SMP_ATTR_NODE_INFO
;
1490 err
= mlx4_MAD_IFC(dev
, mad_ifc_flags
, 1, NULL
, NULL
, in_mad
, out_mad
);
1494 dev
->dev
->rev_id
= be32_to_cpup((__be32
*) (out_mad
->data
+ 32));
1495 memcpy(&dev
->ib_dev
.node_guid
, out_mad
->data
+ 12, 8);
1503 static ssize_t
show_hca(struct device
*device
, struct device_attribute
*attr
,
1506 struct mlx4_ib_dev
*dev
=
1507 container_of(device
, struct mlx4_ib_dev
, ib_dev
.dev
);
1508 return sprintf(buf
, "MT%d\n", dev
->dev
->persist
->pdev
->device
);
1511 static ssize_t
show_fw_ver(struct device
*device
, struct device_attribute
*attr
,
1514 struct mlx4_ib_dev
*dev
=
1515 container_of(device
, struct mlx4_ib_dev
, ib_dev
.dev
);
1516 return sprintf(buf
, "%d.%d.%d\n", (int) (dev
->dev
->caps
.fw_ver
>> 32),
1517 (int) (dev
->dev
->caps
.fw_ver
>> 16) & 0xffff,
1518 (int) dev
->dev
->caps
.fw_ver
& 0xffff);
1521 static ssize_t
show_rev(struct device
*device
, struct device_attribute
*attr
,
1524 struct mlx4_ib_dev
*dev
=
1525 container_of(device
, struct mlx4_ib_dev
, ib_dev
.dev
);
1526 return sprintf(buf
, "%x\n", dev
->dev
->rev_id
);
1529 static ssize_t
show_board(struct device
*device
, struct device_attribute
*attr
,
1532 struct mlx4_ib_dev
*dev
=
1533 container_of(device
, struct mlx4_ib_dev
, ib_dev
.dev
);
1534 return sprintf(buf
, "%.*s\n", MLX4_BOARD_ID_LEN
,
1535 dev
->dev
->board_id
);
1538 static DEVICE_ATTR(hw_rev
, S_IRUGO
, show_rev
, NULL
);
1539 static DEVICE_ATTR(fw_ver
, S_IRUGO
, show_fw_ver
, NULL
);
1540 static DEVICE_ATTR(hca_type
, S_IRUGO
, show_hca
, NULL
);
1541 static DEVICE_ATTR(board_id
, S_IRUGO
, show_board
, NULL
);
1543 static struct device_attribute
*mlx4_class_attributes
[] = {
1550 static void mlx4_addrconf_ifid_eui48(u8
*eui
, u16 vlan_id
,
1551 struct net_device
*dev
)
1553 memcpy(eui
, dev
->dev_addr
, 3);
1554 memcpy(eui
+ 5, dev
->dev_addr
+ 3, 3);
1555 if (vlan_id
< 0x1000) {
1556 eui
[3] = vlan_id
>> 8;
1557 eui
[4] = vlan_id
& 0xff;
1565 static void update_gids_task(struct work_struct
*work
)
1567 struct update_gid_work
*gw
= container_of(work
, struct update_gid_work
, work
);
1568 struct mlx4_cmd_mailbox
*mailbox
;
1571 struct mlx4_dev
*dev
= gw
->dev
->dev
;
1572 int is_bonded
= mlx4_is_bonded(dev
);
1574 if (!gw
->dev
->ib_active
)
1577 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1578 if (IS_ERR(mailbox
)) {
1579 pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox
));
1583 gids
= mailbox
->buf
;
1584 memcpy(gids
, gw
->gids
, sizeof gw
->gids
);
1586 err
= mlx4_cmd(dev
, mailbox
->dma
, MLX4_SET_PORT_GID_TABLE
<< 8 | gw
->port
,
1587 MLX4_SET_PORT_ETH_OPCODE
, MLX4_CMD_SET_PORT
,
1588 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_WRAPPED
);
1590 pr_warn("set port command failed\n");
1592 if ((gw
->port
== 1) || !is_bonded
)
1593 mlx4_ib_dispatch_event(gw
->dev
,
1594 is_bonded
? 1 : gw
->port
,
1595 IB_EVENT_GID_CHANGE
);
1597 mlx4_free_cmd_mailbox(dev
, mailbox
);
1601 static void reset_gids_task(struct work_struct
*work
)
1603 struct update_gid_work
*gw
=
1604 container_of(work
, struct update_gid_work
, work
);
1605 struct mlx4_cmd_mailbox
*mailbox
;
1608 struct mlx4_dev
*dev
= gw
->dev
->dev
;
1610 if (!gw
->dev
->ib_active
)
1613 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1614 if (IS_ERR(mailbox
)) {
1615 pr_warn("reset gid table failed\n");
1619 gids
= mailbox
->buf
;
1620 memcpy(gids
, gw
->gids
, sizeof(gw
->gids
));
1622 if (mlx4_ib_port_link_layer(&gw
->dev
->ib_dev
, gw
->port
) ==
1623 IB_LINK_LAYER_ETHERNET
) {
1624 err
= mlx4_cmd(dev
, mailbox
->dma
,
1625 MLX4_SET_PORT_GID_TABLE
<< 8 | gw
->port
,
1626 MLX4_SET_PORT_ETH_OPCODE
, MLX4_CMD_SET_PORT
,
1627 MLX4_CMD_TIME_CLASS_B
,
1630 pr_warn("set port %d command failed\n", gw
->port
);
1633 mlx4_free_cmd_mailbox(dev
, mailbox
);
1638 static int update_gid_table(struct mlx4_ib_dev
*dev
, int port
,
1639 union ib_gid
*gid
, int clear
,
1642 struct update_gid_work
*work
;
1644 int need_update
= 0;
1652 max_gids
= dev
->dev
->caps
.gid_table_len
[port
];
1653 for (i
= 1; i
< max_gids
; ++i
) {
1654 if (!memcmp(&dev
->iboe
.gid_table
[port
- 1][i
], gid
,
1661 dev
->iboe
.gid_table
[port
- 1][found
] =
1670 !memcmp(&dev
->iboe
.gid_table
[port
- 1][i
],
1671 &zgid
, sizeof(*gid
)))
1677 if (found
== -1 && !clear
&& free
>= 0) {
1678 dev
->iboe
.gid_table
[port
- 1][free
] = *gid
;
1685 work
= kzalloc(sizeof(*work
), GFP_ATOMIC
);
1689 memcpy(work
->gids
, dev
->iboe
.gid_table
[port
- 1], sizeof(work
->gids
));
1690 INIT_WORK(&work
->work
, update_gids_task
);
1693 queue_work(wq
, &work
->work
);
1698 static void mlx4_make_default_gid(struct net_device
*dev
, union ib_gid
*gid
)
1700 gid
->global
.subnet_prefix
= cpu_to_be64(0xfe80000000000000LL
);
1701 mlx4_addrconf_ifid_eui48(&gid
->raw
[8], 0xffff, dev
);
1705 static int reset_gid_table(struct mlx4_ib_dev
*dev
, u8 port
)
1707 struct update_gid_work
*work
;
1709 work
= kzalloc(sizeof(*work
), GFP_ATOMIC
);
1713 memset(dev
->iboe
.gid_table
[port
- 1], 0, sizeof(work
->gids
));
1714 memset(work
->gids
, 0, sizeof(work
->gids
));
1715 INIT_WORK(&work
->work
, reset_gids_task
);
1718 queue_work(wq
, &work
->work
);
1722 static int mlx4_ib_addr_event(int event
, struct net_device
*event_netdev
,
1723 struct mlx4_ib_dev
*ibdev
, union ib_gid
*gid
)
1725 struct mlx4_ib_iboe
*iboe
;
1727 struct net_device
*real_dev
= rdma_vlan_dev_real_dev(event_netdev
) ?
1728 rdma_vlan_dev_real_dev(event_netdev
) :
1730 union ib_gid default_gid
;
1732 mlx4_make_default_gid(real_dev
, &default_gid
);
1734 if (!memcmp(gid
, &default_gid
, sizeof(*gid
)))
1737 if (event
!= NETDEV_DOWN
&& event
!= NETDEV_UP
)
1740 if ((real_dev
!= event_netdev
) &&
1741 (event
== NETDEV_DOWN
) &&
1742 rdma_link_local_addr((struct in6_addr
*)gid
))
1745 iboe
= &ibdev
->iboe
;
1746 spin_lock_bh(&iboe
->lock
);
1748 for (port
= 1; port
<= ibdev
->dev
->caps
.num_ports
; ++port
)
1749 if ((netif_is_bond_master(real_dev
) &&
1750 (real_dev
== iboe
->masters
[port
- 1])) ||
1751 (!netif_is_bond_master(real_dev
) &&
1752 (real_dev
== iboe
->netdevs
[port
- 1])))
1753 update_gid_table(ibdev
, port
, gid
,
1754 event
== NETDEV_DOWN
, 0);
1756 spin_unlock_bh(&iboe
->lock
);
1761 static u8
mlx4_ib_get_dev_port(struct net_device
*dev
,
1762 struct mlx4_ib_dev
*ibdev
)
1765 struct mlx4_ib_iboe
*iboe
;
1766 struct net_device
*real_dev
= rdma_vlan_dev_real_dev(dev
) ?
1767 rdma_vlan_dev_real_dev(dev
) : dev
;
1769 iboe
= &ibdev
->iboe
;
1771 for (port
= 1; port
<= ibdev
->dev
->caps
.num_ports
; ++port
)
1772 if ((netif_is_bond_master(real_dev
) &&
1773 (real_dev
== iboe
->masters
[port
- 1])) ||
1774 (!netif_is_bond_master(real_dev
) &&
1775 (real_dev
== iboe
->netdevs
[port
- 1])))
1778 if ((port
== 0) || (port
> ibdev
->dev
->caps
.num_ports
))
1784 static int mlx4_ib_inet_event(struct notifier_block
*this, unsigned long event
,
1787 struct mlx4_ib_dev
*ibdev
;
1788 struct in_ifaddr
*ifa
= ptr
;
1790 struct net_device
*event_netdev
= ifa
->ifa_dev
->dev
;
1792 ipv6_addr_set_v4mapped(ifa
->ifa_address
, (struct in6_addr
*)&gid
);
1794 ibdev
= container_of(this, struct mlx4_ib_dev
, iboe
.nb_inet
);
1796 mlx4_ib_addr_event(event
, event_netdev
, ibdev
, &gid
);
1800 #if IS_ENABLED(CONFIG_IPV6)
1801 static int mlx4_ib_inet6_event(struct notifier_block
*this, unsigned long event
,
1804 struct mlx4_ib_dev
*ibdev
;
1805 struct inet6_ifaddr
*ifa
= ptr
;
1806 union ib_gid
*gid
= (union ib_gid
*)&ifa
->addr
;
1807 struct net_device
*event_netdev
= ifa
->idev
->dev
;
1809 ibdev
= container_of(this, struct mlx4_ib_dev
, iboe
.nb_inet6
);
1811 mlx4_ib_addr_event(event
, event_netdev
, ibdev
, gid
);
1816 #define MLX4_IB_INVALID_MAC ((u64)-1)
1817 static void mlx4_ib_update_qps(struct mlx4_ib_dev
*ibdev
,
1818 struct net_device
*dev
,
1822 u64 release_mac
= MLX4_IB_INVALID_MAC
;
1823 struct mlx4_ib_qp
*qp
;
1825 read_lock(&dev_base_lock
);
1826 new_smac
= mlx4_mac_to_u64(dev
->dev_addr
);
1827 read_unlock(&dev_base_lock
);
1829 atomic64_set(&ibdev
->iboe
.mac
[port
- 1], new_smac
);
1831 /* no need for update QP1 and mac registration in non-SRIOV */
1832 if (!mlx4_is_mfunc(ibdev
->dev
))
1835 mutex_lock(&ibdev
->qp1_proxy_lock
[port
- 1]);
1836 qp
= ibdev
->qp1_proxy
[port
- 1];
1840 struct mlx4_update_qp_params update_params
;
1842 mutex_lock(&qp
->mutex
);
1843 old_smac
= qp
->pri
.smac
;
1844 if (new_smac
== old_smac
)
1847 new_smac_index
= mlx4_register_mac(ibdev
->dev
, port
, new_smac
);
1849 if (new_smac_index
< 0)
1852 update_params
.smac_index
= new_smac_index
;
1853 if (mlx4_update_qp(ibdev
->dev
, qp
->mqp
.qpn
, MLX4_UPDATE_QP_SMAC
,
1855 release_mac
= new_smac
;
1858 /* if old port was zero, no mac was yet registered for this QP */
1859 if (qp
->pri
.smac_port
)
1860 release_mac
= old_smac
;
1861 qp
->pri
.smac
= new_smac
;
1862 qp
->pri
.smac_port
= port
;
1863 qp
->pri
.smac_index
= new_smac_index
;
1867 if (release_mac
!= MLX4_IB_INVALID_MAC
)
1868 mlx4_unregister_mac(ibdev
->dev
, port
, release_mac
);
1870 mutex_unlock(&qp
->mutex
);
1871 mutex_unlock(&ibdev
->qp1_proxy_lock
[port
- 1]);
1874 static void mlx4_ib_get_dev_addr(struct net_device
*dev
,
1875 struct mlx4_ib_dev
*ibdev
, u8 port
)
1877 struct in_device
*in_dev
;
1878 #if IS_ENABLED(CONFIG_IPV6)
1879 struct inet6_dev
*in6_dev
;
1881 struct inet6_ifaddr
*ifp
;
1882 union ib_gid default_gid
;
1887 if ((port
== 0) || (port
> ibdev
->dev
->caps
.num_ports
))
1891 in_dev
= in_dev_get(dev
);
1894 /*ifa->ifa_address;*/
1895 ipv6_addr_set_v4mapped(ifa
->ifa_address
,
1896 (struct in6_addr
*)&gid
);
1897 update_gid_table(ibdev
, port
, &gid
, 0, 0);
1902 #if IS_ENABLED(CONFIG_IPV6)
1903 mlx4_make_default_gid(dev
, &default_gid
);
1905 in6_dev
= in6_dev_get(dev
);
1907 read_lock_bh(&in6_dev
->lock
);
1908 list_for_each_entry(ifp
, &in6_dev
->addr_list
, if_list
) {
1909 pgid
= (union ib_gid
*)&ifp
->addr
;
1910 if (!memcmp(pgid
, &default_gid
, sizeof(*pgid
)))
1912 update_gid_table(ibdev
, port
, pgid
, 0, 0);
1914 read_unlock_bh(&in6_dev
->lock
);
1915 in6_dev_put(in6_dev
);
1920 static void mlx4_ib_set_default_gid(struct mlx4_ib_dev
*ibdev
,
1921 struct net_device
*dev
, u8 port
)
1924 mlx4_make_default_gid(dev
, &gid
);
1925 update_gid_table(ibdev
, port
, &gid
, 0, 1);
1928 static int mlx4_ib_init_gid_table(struct mlx4_ib_dev
*ibdev
)
1930 struct net_device
*dev
;
1931 struct mlx4_ib_iboe
*iboe
= &ibdev
->iboe
;
1935 for (i
= 1; i
<= ibdev
->num_ports
; ++i
) {
1936 if (rdma_port_get_link_layer(&ibdev
->ib_dev
, i
) ==
1937 IB_LINK_LAYER_ETHERNET
) {
1938 err
= reset_gid_table(ibdev
, i
);
1944 read_lock(&dev_base_lock
);
1945 spin_lock_bh(&iboe
->lock
);
1947 for_each_netdev(&init_net
, dev
) {
1948 u8 port
= mlx4_ib_get_dev_port(dev
, ibdev
);
1949 /* port will be non-zero only for ETH ports */
1951 mlx4_ib_set_default_gid(ibdev
, dev
, port
);
1952 mlx4_ib_get_dev_addr(dev
, ibdev
, port
);
1956 spin_unlock_bh(&iboe
->lock
);
1957 read_unlock(&dev_base_lock
);
1962 static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev
*ibdev
,
1963 struct net_device
*dev
,
1964 unsigned long event
)
1967 struct mlx4_ib_iboe
*iboe
;
1968 int update_qps_port
= -1;
1971 iboe
= &ibdev
->iboe
;
1973 spin_lock_bh(&iboe
->lock
);
1974 mlx4_foreach_ib_transport_port(port
, ibdev
->dev
) {
1975 enum ib_port_state port_state
= IB_PORT_NOP
;
1976 struct net_device
*old_master
= iboe
->masters
[port
- 1];
1977 struct net_device
*curr_netdev
;
1978 struct net_device
*curr_master
;
1980 iboe
->netdevs
[port
- 1] =
1981 mlx4_get_protocol_dev(ibdev
->dev
, MLX4_PROT_ETH
, port
);
1982 if (iboe
->netdevs
[port
- 1])
1983 mlx4_ib_set_default_gid(ibdev
,
1984 iboe
->netdevs
[port
- 1], port
);
1985 curr_netdev
= iboe
->netdevs
[port
- 1];
1987 if (iboe
->netdevs
[port
- 1] &&
1988 netif_is_bond_slave(iboe
->netdevs
[port
- 1])) {
1989 iboe
->masters
[port
- 1] = netdev_master_upper_dev_get(
1990 iboe
->netdevs
[port
- 1]);
1992 iboe
->masters
[port
- 1] = NULL
;
1994 curr_master
= iboe
->masters
[port
- 1];
1996 if (dev
== iboe
->netdevs
[port
- 1] &&
1997 (event
== NETDEV_CHANGEADDR
|| event
== NETDEV_REGISTER
||
1998 event
== NETDEV_UP
|| event
== NETDEV_CHANGE
))
1999 update_qps_port
= port
;
2002 port_state
= (netif_running(curr_netdev
) && netif_carrier_ok(curr_netdev
)) ?
2003 IB_PORT_ACTIVE
: IB_PORT_DOWN
;
2004 mlx4_ib_set_default_gid(ibdev
, curr_netdev
, port
);
2006 /* if using bonding/team and a slave port is down, we
2007 * don't want the bond IP based gids in the table since
2008 * flows that select port by gid may get the down port.
2010 if (port_state
== IB_PORT_DOWN
&&
2011 !mlx4_is_bonded(ibdev
->dev
)) {
2012 reset_gid_table(ibdev
, port
);
2013 mlx4_ib_set_default_gid(ibdev
,
2017 /* gids from the upper dev (bond/team)
2018 * should appear in port's gid table
2020 mlx4_ib_get_dev_addr(curr_master
,
2024 /* if bonding is used it is possible that we add it to
2025 * masters only after IP address is assigned to the
2026 * net bonding interface.
2028 if (curr_master
&& (old_master
!= curr_master
)) {
2029 reset_gid_table(ibdev
, port
);
2030 mlx4_ib_set_default_gid(ibdev
,
2032 mlx4_ib_get_dev_addr(curr_master
, ibdev
, port
);
2035 if (!curr_master
&& (old_master
!= curr_master
)) {
2036 reset_gid_table(ibdev
, port
);
2037 mlx4_ib_set_default_gid(ibdev
,
2039 mlx4_ib_get_dev_addr(curr_netdev
, ibdev
, port
);
2042 reset_gid_table(ibdev
, port
);
2046 spin_unlock_bh(&iboe
->lock
);
2048 if (update_qps_port
> 0)
2049 mlx4_ib_update_qps(ibdev
, dev
, update_qps_port
);
2052 static int mlx4_ib_netdev_event(struct notifier_block
*this,
2053 unsigned long event
, void *ptr
)
2055 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
2056 struct mlx4_ib_dev
*ibdev
;
2058 if (!net_eq(dev_net(dev
), &init_net
))
2061 ibdev
= container_of(this, struct mlx4_ib_dev
, iboe
.nb
);
2062 mlx4_ib_scan_netdevs(ibdev
, dev
, event
);
2067 static void init_pkeys(struct mlx4_ib_dev
*ibdev
)
2073 if (mlx4_is_master(ibdev
->dev
)) {
2074 for (slave
= 0; slave
<= ibdev
->dev
->persist
->num_vfs
;
2076 for (port
= 1; port
<= ibdev
->dev
->caps
.num_ports
; ++port
) {
2078 i
< ibdev
->dev
->phys_caps
.pkey_phys_table_len
[port
];
2080 ibdev
->pkeys
.virt2phys_pkey
[slave
][port
- 1][i
] =
2081 /* master has the identity virt2phys pkey mapping */
2082 (slave
== mlx4_master_func_num(ibdev
->dev
) || !i
) ? i
:
2083 ibdev
->dev
->phys_caps
.pkey_phys_table_len
[port
] - 1;
2084 mlx4_sync_pkey_table(ibdev
->dev
, slave
, port
, i
,
2085 ibdev
->pkeys
.virt2phys_pkey
[slave
][port
- 1][i
]);
2089 /* initialize pkey cache */
2090 for (port
= 1; port
<= ibdev
->dev
->caps
.num_ports
; ++port
) {
2092 i
< ibdev
->dev
->phys_caps
.pkey_phys_table_len
[port
];
2094 ibdev
->pkeys
.phys_pkey_cache
[port
-1][i
] =
2100 static void mlx4_ib_alloc_eqs(struct mlx4_dev
*dev
, struct mlx4_ib_dev
*ibdev
)
2102 int i
, j
, eq
= 0, total_eqs
= 0;
2104 ibdev
->eq_table
= kcalloc(dev
->caps
.num_comp_vectors
,
2105 sizeof(ibdev
->eq_table
[0]), GFP_KERNEL
);
2106 if (!ibdev
->eq_table
)
2109 for (i
= 1; i
<= dev
->caps
.num_ports
; i
++) {
2110 for (j
= 0; j
< mlx4_get_eqs_per_port(dev
, i
);
2112 if (i
> 1 && mlx4_is_eq_shared(dev
, total_eqs
))
2114 ibdev
->eq_table
[eq
] = total_eqs
;
2115 if (!mlx4_assign_eq(dev
, i
,
2116 &ibdev
->eq_table
[eq
]))
2119 ibdev
->eq_table
[eq
] = -1;
2123 for (i
= eq
; i
< dev
->caps
.num_comp_vectors
;
2124 ibdev
->eq_table
[i
++] = -1)
2127 /* Advertise the new number of EQs to clients */
2128 ibdev
->ib_dev
.num_comp_vectors
= eq
;
2131 static void mlx4_ib_free_eqs(struct mlx4_dev
*dev
, struct mlx4_ib_dev
*ibdev
)
2134 int total_eqs
= ibdev
->ib_dev
.num_comp_vectors
;
2136 /* no eqs were allocated */
2137 if (!ibdev
->eq_table
)
2140 /* Reset the advertised EQ number */
2141 ibdev
->ib_dev
.num_comp_vectors
= 0;
2143 for (i
= 0; i
< total_eqs
; i
++)
2144 mlx4_release_eq(dev
, ibdev
->eq_table
[i
]);
2146 kfree(ibdev
->eq_table
);
2147 ibdev
->eq_table
= NULL
;
2150 static int mlx4_port_immutable(struct ib_device
*ibdev
, u8 port_num
,
2151 struct ib_port_immutable
*immutable
)
2153 struct ib_port_attr attr
;
2156 err
= mlx4_ib_query_port(ibdev
, port_num
, &attr
);
2160 immutable
->pkey_tbl_len
= attr
.pkey_tbl_len
;
2161 immutable
->gid_tbl_len
= attr
.gid_tbl_len
;
2163 if (mlx4_ib_port_link_layer(ibdev
, port_num
) == IB_LINK_LAYER_INFINIBAND
)
2164 immutable
->core_cap_flags
= RDMA_CORE_PORT_IBA_IB
;
2166 immutable
->core_cap_flags
= RDMA_CORE_PORT_IBA_ROCE
;
2168 immutable
->max_mad_size
= IB_MGMT_MAD_SIZE
;
2173 static void *mlx4_ib_add(struct mlx4_dev
*dev
)
2175 struct mlx4_ib_dev
*ibdev
;
2179 struct mlx4_ib_iboe
*iboe
;
2180 int ib_num_ports
= 0;
2181 int num_req_counters
;
2185 pr_info_once("%s", mlx4_ib_version
);
2188 mlx4_foreach_ib_transport_port(i
, dev
)
2191 /* No point in registering a device with no ports... */
2195 ibdev
= (struct mlx4_ib_dev
*) ib_alloc_device(sizeof *ibdev
);
2197 dev_err(&dev
->persist
->pdev
->dev
,
2198 "Device struct alloc failed\n");
2202 iboe
= &ibdev
->iboe
;
2204 if (mlx4_pd_alloc(dev
, &ibdev
->priv_pdn
))
2207 if (mlx4_uar_alloc(dev
, &ibdev
->priv_uar
))
2210 ibdev
->uar_map
= ioremap((phys_addr_t
) ibdev
->priv_uar
.pfn
<< PAGE_SHIFT
,
2212 if (!ibdev
->uar_map
)
2214 MLX4_INIT_DOORBELL_LOCK(&ibdev
->uar_lock
);
2217 ibdev
->bond_next_port
= 0;
2219 strlcpy(ibdev
->ib_dev
.name
, "mlx4_%d", IB_DEVICE_NAME_MAX
);
2220 ibdev
->ib_dev
.owner
= THIS_MODULE
;
2221 ibdev
->ib_dev
.node_type
= RDMA_NODE_IB_CA
;
2222 ibdev
->ib_dev
.local_dma_lkey
= dev
->caps
.reserved_lkey
;
2223 ibdev
->num_ports
= num_ports
;
2224 ibdev
->ib_dev
.phys_port_cnt
= mlx4_is_bonded(dev
) ?
2225 1 : ibdev
->num_ports
;
2226 ibdev
->ib_dev
.num_comp_vectors
= dev
->caps
.num_comp_vectors
;
2227 ibdev
->ib_dev
.dma_device
= &dev
->persist
->pdev
->dev
;
2229 if (dev
->caps
.userspace_caps
)
2230 ibdev
->ib_dev
.uverbs_abi_ver
= MLX4_IB_UVERBS_ABI_VERSION
;
2232 ibdev
->ib_dev
.uverbs_abi_ver
= MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION
;
2234 ibdev
->ib_dev
.uverbs_cmd_mask
=
2235 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT
) |
2236 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
2237 (1ull << IB_USER_VERBS_CMD_QUERY_PORT
) |
2238 (1ull << IB_USER_VERBS_CMD_ALLOC_PD
) |
2239 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD
) |
2240 (1ull << IB_USER_VERBS_CMD_REG_MR
) |
2241 (1ull << IB_USER_VERBS_CMD_REREG_MR
) |
2242 (1ull << IB_USER_VERBS_CMD_DEREG_MR
) |
2243 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
2244 (1ull << IB_USER_VERBS_CMD_CREATE_CQ
) |
2245 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ
) |
2246 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ
) |
2247 (1ull << IB_USER_VERBS_CMD_CREATE_QP
) |
2248 (1ull << IB_USER_VERBS_CMD_MODIFY_QP
) |
2249 (1ull << IB_USER_VERBS_CMD_QUERY_QP
) |
2250 (1ull << IB_USER_VERBS_CMD_DESTROY_QP
) |
2251 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST
) |
2252 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST
) |
2253 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ
) |
2254 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ
) |
2255 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ
) |
2256 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ
) |
2257 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ
) |
2258 (1ull << IB_USER_VERBS_CMD_OPEN_QP
);
2260 ibdev
->ib_dev
.query_device
= mlx4_ib_query_device
;
2261 ibdev
->ib_dev
.query_port
= mlx4_ib_query_port
;
2262 ibdev
->ib_dev
.get_link_layer
= mlx4_ib_port_link_layer
;
2263 ibdev
->ib_dev
.query_gid
= mlx4_ib_query_gid
;
2264 ibdev
->ib_dev
.query_pkey
= mlx4_ib_query_pkey
;
2265 ibdev
->ib_dev
.modify_device
= mlx4_ib_modify_device
;
2266 ibdev
->ib_dev
.modify_port
= mlx4_ib_modify_port
;
2267 ibdev
->ib_dev
.alloc_ucontext
= mlx4_ib_alloc_ucontext
;
2268 ibdev
->ib_dev
.dealloc_ucontext
= mlx4_ib_dealloc_ucontext
;
2269 ibdev
->ib_dev
.mmap
= mlx4_ib_mmap
;
2270 ibdev
->ib_dev
.alloc_pd
= mlx4_ib_alloc_pd
;
2271 ibdev
->ib_dev
.dealloc_pd
= mlx4_ib_dealloc_pd
;
2272 ibdev
->ib_dev
.create_ah
= mlx4_ib_create_ah
;
2273 ibdev
->ib_dev
.query_ah
= mlx4_ib_query_ah
;
2274 ibdev
->ib_dev
.destroy_ah
= mlx4_ib_destroy_ah
;
2275 ibdev
->ib_dev
.create_srq
= mlx4_ib_create_srq
;
2276 ibdev
->ib_dev
.modify_srq
= mlx4_ib_modify_srq
;
2277 ibdev
->ib_dev
.query_srq
= mlx4_ib_query_srq
;
2278 ibdev
->ib_dev
.destroy_srq
= mlx4_ib_destroy_srq
;
2279 ibdev
->ib_dev
.post_srq_recv
= mlx4_ib_post_srq_recv
;
2280 ibdev
->ib_dev
.create_qp
= mlx4_ib_create_qp
;
2281 ibdev
->ib_dev
.modify_qp
= mlx4_ib_modify_qp
;
2282 ibdev
->ib_dev
.query_qp
= mlx4_ib_query_qp
;
2283 ibdev
->ib_dev
.destroy_qp
= mlx4_ib_destroy_qp
;
2284 ibdev
->ib_dev
.post_send
= mlx4_ib_post_send
;
2285 ibdev
->ib_dev
.post_recv
= mlx4_ib_post_recv
;
2286 ibdev
->ib_dev
.create_cq
= mlx4_ib_create_cq
;
2287 ibdev
->ib_dev
.modify_cq
= mlx4_ib_modify_cq
;
2288 ibdev
->ib_dev
.resize_cq
= mlx4_ib_resize_cq
;
2289 ibdev
->ib_dev
.destroy_cq
= mlx4_ib_destroy_cq
;
2290 ibdev
->ib_dev
.poll_cq
= mlx4_ib_poll_cq
;
2291 ibdev
->ib_dev
.req_notify_cq
= mlx4_ib_arm_cq
;
2292 ibdev
->ib_dev
.get_dma_mr
= mlx4_ib_get_dma_mr
;
2293 ibdev
->ib_dev
.reg_user_mr
= mlx4_ib_reg_user_mr
;
2294 ibdev
->ib_dev
.rereg_user_mr
= mlx4_ib_rereg_user_mr
;
2295 ibdev
->ib_dev
.dereg_mr
= mlx4_ib_dereg_mr
;
2296 ibdev
->ib_dev
.alloc_fast_reg_mr
= mlx4_ib_alloc_fast_reg_mr
;
2297 ibdev
->ib_dev
.alloc_fast_reg_page_list
= mlx4_ib_alloc_fast_reg_page_list
;
2298 ibdev
->ib_dev
.free_fast_reg_page_list
= mlx4_ib_free_fast_reg_page_list
;
2299 ibdev
->ib_dev
.attach_mcast
= mlx4_ib_mcg_attach
;
2300 ibdev
->ib_dev
.detach_mcast
= mlx4_ib_mcg_detach
;
2301 ibdev
->ib_dev
.process_mad
= mlx4_ib_process_mad
;
2302 ibdev
->ib_dev
.get_port_immutable
= mlx4_port_immutable
;
2304 if (!mlx4_is_slave(ibdev
->dev
)) {
2305 ibdev
->ib_dev
.alloc_fmr
= mlx4_ib_fmr_alloc
;
2306 ibdev
->ib_dev
.map_phys_fmr
= mlx4_ib_map_phys_fmr
;
2307 ibdev
->ib_dev
.unmap_fmr
= mlx4_ib_unmap_fmr
;
2308 ibdev
->ib_dev
.dealloc_fmr
= mlx4_ib_fmr_dealloc
;
2311 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_MEM_WINDOW
||
2312 dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_TYPE_2_WIN
) {
2313 ibdev
->ib_dev
.alloc_mw
= mlx4_ib_alloc_mw
;
2314 ibdev
->ib_dev
.bind_mw
= mlx4_ib_bind_mw
;
2315 ibdev
->ib_dev
.dealloc_mw
= mlx4_ib_dealloc_mw
;
2317 ibdev
->ib_dev
.uverbs_cmd_mask
|=
2318 (1ull << IB_USER_VERBS_CMD_ALLOC_MW
) |
2319 (1ull << IB_USER_VERBS_CMD_DEALLOC_MW
);
2322 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
) {
2323 ibdev
->ib_dev
.alloc_xrcd
= mlx4_ib_alloc_xrcd
;
2324 ibdev
->ib_dev
.dealloc_xrcd
= mlx4_ib_dealloc_xrcd
;
2325 ibdev
->ib_dev
.uverbs_cmd_mask
|=
2326 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD
) |
2327 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD
);
2330 if (check_flow_steering_support(dev
)) {
2331 ibdev
->steering_support
= MLX4_STEERING_MODE_DEVICE_MANAGED
;
2332 ibdev
->ib_dev
.create_flow
= mlx4_ib_create_flow
;
2333 ibdev
->ib_dev
.destroy_flow
= mlx4_ib_destroy_flow
;
2335 ibdev
->ib_dev
.uverbs_ex_cmd_mask
|=
2336 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW
) |
2337 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW
);
2340 ibdev
->ib_dev
.uverbs_ex_cmd_mask
|=
2341 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE
) |
2342 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ
);
2344 mlx4_ib_alloc_eqs(dev
, ibdev
);
2346 spin_lock_init(&iboe
->lock
);
2348 if (init_node_data(ibdev
))
2351 num_req_counters
= mlx4_is_bonded(dev
) ? 1 : ibdev
->num_ports
;
2352 for (i
= 0; i
< num_req_counters
; ++i
) {
2353 mutex_init(&ibdev
->qp1_proxy_lock
[i
]);
2355 if (mlx4_ib_port_link_layer(&ibdev
->ib_dev
, i
+ 1) ==
2356 IB_LINK_LAYER_ETHERNET
) {
2357 err
= mlx4_counter_alloc(ibdev
->dev
, &counter_index
);
2358 /* if failed to allocate a new counter, use default */
2361 mlx4_get_default_counter_index(dev
,
2365 } else { /* IB_LINK_LAYER_INFINIBAND use the default counter */
2366 counter_index
= mlx4_get_default_counter_index(dev
,
2369 ibdev
->counters
[i
].index
= counter_index
;
2370 ibdev
->counters
[i
].allocated
= allocated
;
2371 pr_info("counter index %d for port %d allocated %d\n",
2372 counter_index
, i
+ 1, allocated
);
2374 if (mlx4_is_bonded(dev
))
2375 for (i
= 1; i
< ibdev
->num_ports
; ++i
) {
2376 ibdev
->counters
[i
].index
= ibdev
->counters
[0].index
;
2377 ibdev
->counters
[i
].allocated
= 0;
2380 mlx4_foreach_port(i
, dev
, MLX4_PORT_TYPE_IB
)
2383 spin_lock_init(&ibdev
->sm_lock
);
2384 mutex_init(&ibdev
->cap_mask_mutex
);
2385 INIT_LIST_HEAD(&ibdev
->qp_list
);
2386 spin_lock_init(&ibdev
->reset_flow_resource_lock
);
2388 if (ibdev
->steering_support
== MLX4_STEERING_MODE_DEVICE_MANAGED
&&
2390 ibdev
->steer_qpn_count
= MLX4_IB_UC_MAX_NUM_QPS
;
2391 err
= mlx4_qp_reserve_range(dev
, ibdev
->steer_qpn_count
,
2392 MLX4_IB_UC_STEER_QPN_ALIGN
,
2393 &ibdev
->steer_qpn_base
, 0);
2397 ibdev
->ib_uc_qpns_bitmap
=
2398 kmalloc(BITS_TO_LONGS(ibdev
->steer_qpn_count
) *
2401 if (!ibdev
->ib_uc_qpns_bitmap
) {
2402 dev_err(&dev
->persist
->pdev
->dev
,
2403 "bit map alloc failed\n");
2404 goto err_steer_qp_release
;
2407 bitmap_zero(ibdev
->ib_uc_qpns_bitmap
, ibdev
->steer_qpn_count
);
2409 err
= mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
2410 dev
, ibdev
->steer_qpn_base
,
2411 ibdev
->steer_qpn_base
+
2412 ibdev
->steer_qpn_count
- 1);
2414 goto err_steer_free_bitmap
;
2417 for (j
= 1; j
<= ibdev
->dev
->caps
.num_ports
; j
++)
2418 atomic64_set(&iboe
->mac
[j
- 1], ibdev
->dev
->caps
.def_mac
[j
]);
2420 if (ib_register_device(&ibdev
->ib_dev
, NULL
))
2421 goto err_steer_free_bitmap
;
2423 if (mlx4_ib_mad_init(ibdev
))
2426 if (mlx4_ib_init_sriov(ibdev
))
2429 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_IBOE
) {
2430 if (!iboe
->nb
.notifier_call
) {
2431 iboe
->nb
.notifier_call
= mlx4_ib_netdev_event
;
2432 err
= register_netdevice_notifier(&iboe
->nb
);
2434 iboe
->nb
.notifier_call
= NULL
;
2438 if (!iboe
->nb_inet
.notifier_call
) {
2439 iboe
->nb_inet
.notifier_call
= mlx4_ib_inet_event
;
2440 err
= register_inetaddr_notifier(&iboe
->nb_inet
);
2442 iboe
->nb_inet
.notifier_call
= NULL
;
2446 #if IS_ENABLED(CONFIG_IPV6)
2447 if (!iboe
->nb_inet6
.notifier_call
) {
2448 iboe
->nb_inet6
.notifier_call
= mlx4_ib_inet6_event
;
2449 err
= register_inet6addr_notifier(&iboe
->nb_inet6
);
2451 iboe
->nb_inet6
.notifier_call
= NULL
;
2456 if (mlx4_ib_init_gid_table(ibdev
))
2460 for (j
= 0; j
< ARRAY_SIZE(mlx4_class_attributes
); ++j
) {
2461 if (device_create_file(&ibdev
->ib_dev
.dev
,
2462 mlx4_class_attributes
[j
]))
2466 ibdev
->ib_active
= true;
2468 if (mlx4_is_mfunc(ibdev
->dev
))
2471 /* create paravirt contexts for any VFs which are active */
2472 if (mlx4_is_master(ibdev
->dev
)) {
2473 for (j
= 0; j
< MLX4_MFUNC_MAX
; j
++) {
2474 if (j
== mlx4_master_func_num(ibdev
->dev
))
2476 if (mlx4_is_slave_active(ibdev
->dev
, j
))
2477 do_slave_init(ibdev
, j
, 1);
2483 if (ibdev
->iboe
.nb
.notifier_call
) {
2484 if (unregister_netdevice_notifier(&ibdev
->iboe
.nb
))
2485 pr_warn("failure unregistering notifier\n");
2486 ibdev
->iboe
.nb
.notifier_call
= NULL
;
2488 if (ibdev
->iboe
.nb_inet
.notifier_call
) {
2489 if (unregister_inetaddr_notifier(&ibdev
->iboe
.nb_inet
))
2490 pr_warn("failure unregistering notifier\n");
2491 ibdev
->iboe
.nb_inet
.notifier_call
= NULL
;
2493 #if IS_ENABLED(CONFIG_IPV6)
2494 if (ibdev
->iboe
.nb_inet6
.notifier_call
) {
2495 if (unregister_inet6addr_notifier(&ibdev
->iboe
.nb_inet6
))
2496 pr_warn("failure unregistering notifier\n");
2497 ibdev
->iboe
.nb_inet6
.notifier_call
= NULL
;
2500 flush_workqueue(wq
);
2502 mlx4_ib_close_sriov(ibdev
);
2505 mlx4_ib_mad_cleanup(ibdev
);
2508 ib_unregister_device(&ibdev
->ib_dev
);
2510 err_steer_free_bitmap
:
2511 kfree(ibdev
->ib_uc_qpns_bitmap
);
2513 err_steer_qp_release
:
2514 if (ibdev
->steering_support
== MLX4_STEERING_MODE_DEVICE_MANAGED
)
2515 mlx4_qp_release_range(dev
, ibdev
->steer_qpn_base
,
2516 ibdev
->steer_qpn_count
);
2518 for (i
= 0; i
< ibdev
->num_ports
; ++i
) {
2519 if (ibdev
->counters
[i
].index
!= -1 &&
2520 ibdev
->counters
[i
].allocated
)
2521 mlx4_counter_free(ibdev
->dev
,
2522 ibdev
->counters
[i
].index
);
2525 iounmap(ibdev
->uar_map
);
2528 mlx4_uar_free(dev
, &ibdev
->priv_uar
);
2531 mlx4_pd_free(dev
, ibdev
->priv_pdn
);
2534 ib_dealloc_device(&ibdev
->ib_dev
);
2539 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev
*dev
, int count
, int *qpn
)
2543 WARN_ON(!dev
->ib_uc_qpns_bitmap
);
2545 offset
= bitmap_find_free_region(dev
->ib_uc_qpns_bitmap
,
2546 dev
->steer_qpn_count
,
2547 get_count_order(count
));
2551 *qpn
= dev
->steer_qpn_base
+ offset
;
2555 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev
*dev
, u32 qpn
, int count
)
2558 dev
->steering_support
!= MLX4_STEERING_MODE_DEVICE_MANAGED
)
2561 BUG_ON(qpn
< dev
->steer_qpn_base
);
2563 bitmap_release_region(dev
->ib_uc_qpns_bitmap
,
2564 qpn
- dev
->steer_qpn_base
,
2565 get_count_order(count
));
2568 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev
*mdev
, struct mlx4_ib_qp
*mqp
,
2573 struct ib_flow_attr
*flow
= NULL
;
2574 struct ib_flow_spec_ib
*ib_spec
;
2577 flow_size
= sizeof(struct ib_flow_attr
) +
2578 sizeof(struct ib_flow_spec_ib
);
2579 flow
= kzalloc(flow_size
, GFP_KERNEL
);
2582 flow
->port
= mqp
->port
;
2583 flow
->num_of_specs
= 1;
2584 flow
->size
= flow_size
;
2585 ib_spec
= (struct ib_flow_spec_ib
*)(flow
+ 1);
2586 ib_spec
->type
= IB_FLOW_SPEC_IB
;
2587 ib_spec
->size
= sizeof(struct ib_flow_spec_ib
);
2588 /* Add an empty rule for IB L2 */
2589 memset(&ib_spec
->mask
, 0, sizeof(ib_spec
->mask
));
2591 err
= __mlx4_ib_create_flow(&mqp
->ibqp
, flow
,
2596 err
= __mlx4_ib_destroy_flow(mdev
->dev
, mqp
->reg_id
);
2602 static void mlx4_ib_remove(struct mlx4_dev
*dev
, void *ibdev_ptr
)
2604 struct mlx4_ib_dev
*ibdev
= ibdev_ptr
;
2607 ibdev
->ib_active
= false;
2608 flush_workqueue(wq
);
2610 mlx4_ib_close_sriov(ibdev
);
2611 mlx4_ib_mad_cleanup(ibdev
);
2612 ib_unregister_device(&ibdev
->ib_dev
);
2613 if (ibdev
->iboe
.nb
.notifier_call
) {
2614 if (unregister_netdevice_notifier(&ibdev
->iboe
.nb
))
2615 pr_warn("failure unregistering notifier\n");
2616 ibdev
->iboe
.nb
.notifier_call
= NULL
;
2619 if (ibdev
->steering_support
== MLX4_STEERING_MODE_DEVICE_MANAGED
) {
2620 mlx4_qp_release_range(dev
, ibdev
->steer_qpn_base
,
2621 ibdev
->steer_qpn_count
);
2622 kfree(ibdev
->ib_uc_qpns_bitmap
);
2625 if (ibdev
->iboe
.nb_inet
.notifier_call
) {
2626 if (unregister_inetaddr_notifier(&ibdev
->iboe
.nb_inet
))
2627 pr_warn("failure unregistering notifier\n");
2628 ibdev
->iboe
.nb_inet
.notifier_call
= NULL
;
2630 #if IS_ENABLED(CONFIG_IPV6)
2631 if (ibdev
->iboe
.nb_inet6
.notifier_call
) {
2632 if (unregister_inet6addr_notifier(&ibdev
->iboe
.nb_inet6
))
2633 pr_warn("failure unregistering notifier\n");
2634 ibdev
->iboe
.nb_inet6
.notifier_call
= NULL
;
2638 iounmap(ibdev
->uar_map
);
2639 for (p
= 0; p
< ibdev
->num_ports
; ++p
)
2640 if (ibdev
->counters
[p
].index
!= -1 &&
2641 ibdev
->counters
[p
].allocated
)
2642 mlx4_counter_free(ibdev
->dev
, ibdev
->counters
[p
].index
);
2643 mlx4_foreach_port(p
, dev
, MLX4_PORT_TYPE_IB
)
2644 mlx4_CLOSE_PORT(dev
, p
);
2646 mlx4_ib_free_eqs(dev
, ibdev
);
2648 mlx4_uar_free(dev
, &ibdev
->priv_uar
);
2649 mlx4_pd_free(dev
, ibdev
->priv_pdn
);
2650 ib_dealloc_device(&ibdev
->ib_dev
);
2653 static void do_slave_init(struct mlx4_ib_dev
*ibdev
, int slave
, int do_init
)
2655 struct mlx4_ib_demux_work
**dm
= NULL
;
2656 struct mlx4_dev
*dev
= ibdev
->dev
;
2658 unsigned long flags
;
2659 struct mlx4_active_ports actv_ports
;
2661 unsigned int first_port
;
2663 if (!mlx4_is_master(dev
))
2666 actv_ports
= mlx4_get_active_ports(dev
, slave
);
2667 ports
= bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
);
2668 first_port
= find_first_bit(actv_ports
.ports
, dev
->caps
.num_ports
);
2670 dm
= kcalloc(ports
, sizeof(*dm
), GFP_ATOMIC
);
2672 pr_err("failed to allocate memory for tunneling qp update\n");
2676 for (i
= 0; i
< ports
; i
++) {
2677 dm
[i
] = kmalloc(sizeof (struct mlx4_ib_demux_work
), GFP_ATOMIC
);
2679 pr_err("failed to allocate memory for tunneling qp update work struct\n");
2684 INIT_WORK(&dm
[i
]->work
, mlx4_ib_tunnels_update_work
);
2685 dm
[i
]->port
= first_port
+ i
+ 1;
2686 dm
[i
]->slave
= slave
;
2687 dm
[i
]->do_init
= do_init
;
2690 /* initialize or tear down tunnel QPs for the slave */
2691 spin_lock_irqsave(&ibdev
->sriov
.going_down_lock
, flags
);
2692 if (!ibdev
->sriov
.is_going_down
) {
2693 for (i
= 0; i
< ports
; i
++)
2694 queue_work(ibdev
->sriov
.demux
[i
].ud_wq
, &dm
[i
]->work
);
2695 spin_unlock_irqrestore(&ibdev
->sriov
.going_down_lock
, flags
);
2697 spin_unlock_irqrestore(&ibdev
->sriov
.going_down_lock
, flags
);
2698 for (i
= 0; i
< ports
; i
++)
2706 static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev
*ibdev
)
2708 struct mlx4_ib_qp
*mqp
;
2709 unsigned long flags_qp
;
2710 unsigned long flags_cq
;
2711 struct mlx4_ib_cq
*send_mcq
, *recv_mcq
;
2712 struct list_head cq_notify_list
;
2713 struct mlx4_cq
*mcq
;
2714 unsigned long flags
;
2716 pr_warn("mlx4_ib_handle_catas_error was started\n");
2717 INIT_LIST_HEAD(&cq_notify_list
);
2719 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
2720 spin_lock_irqsave(&ibdev
->reset_flow_resource_lock
, flags
);
2722 list_for_each_entry(mqp
, &ibdev
->qp_list
, qps_list
) {
2723 spin_lock_irqsave(&mqp
->sq
.lock
, flags_qp
);
2724 if (mqp
->sq
.tail
!= mqp
->sq
.head
) {
2725 send_mcq
= to_mcq(mqp
->ibqp
.send_cq
);
2726 spin_lock_irqsave(&send_mcq
->lock
, flags_cq
);
2727 if (send_mcq
->mcq
.comp
&&
2728 mqp
->ibqp
.send_cq
->comp_handler
) {
2729 if (!send_mcq
->mcq
.reset_notify_added
) {
2730 send_mcq
->mcq
.reset_notify_added
= 1;
2731 list_add_tail(&send_mcq
->mcq
.reset_notify
,
2735 spin_unlock_irqrestore(&send_mcq
->lock
, flags_cq
);
2737 spin_unlock_irqrestore(&mqp
->sq
.lock
, flags_qp
);
2738 /* Now, handle the QP's receive queue */
2739 spin_lock_irqsave(&mqp
->rq
.lock
, flags_qp
);
2740 /* no handling is needed for SRQ */
2741 if (!mqp
->ibqp
.srq
) {
2742 if (mqp
->rq
.tail
!= mqp
->rq
.head
) {
2743 recv_mcq
= to_mcq(mqp
->ibqp
.recv_cq
);
2744 spin_lock_irqsave(&recv_mcq
->lock
, flags_cq
);
2745 if (recv_mcq
->mcq
.comp
&&
2746 mqp
->ibqp
.recv_cq
->comp_handler
) {
2747 if (!recv_mcq
->mcq
.reset_notify_added
) {
2748 recv_mcq
->mcq
.reset_notify_added
= 1;
2749 list_add_tail(&recv_mcq
->mcq
.reset_notify
,
2753 spin_unlock_irqrestore(&recv_mcq
->lock
,
2757 spin_unlock_irqrestore(&mqp
->rq
.lock
, flags_qp
);
2760 list_for_each_entry(mcq
, &cq_notify_list
, reset_notify
) {
2763 spin_unlock_irqrestore(&ibdev
->reset_flow_resource_lock
, flags
);
2764 pr_warn("mlx4_ib_handle_catas_error ended\n");
2767 static void handle_bonded_port_state_event(struct work_struct
*work
)
2769 struct ib_event_work
*ew
=
2770 container_of(work
, struct ib_event_work
, work
);
2771 struct mlx4_ib_dev
*ibdev
= ew
->ib_dev
;
2772 enum ib_port_state bonded_port_state
= IB_PORT_NOP
;
2774 struct ib_event ibev
;
2777 spin_lock_bh(&ibdev
->iboe
.lock
);
2778 for (i
= 0; i
< MLX4_MAX_PORTS
; ++i
) {
2779 struct net_device
*curr_netdev
= ibdev
->iboe
.netdevs
[i
];
2780 enum ib_port_state curr_port_state
;
2786 (netif_running(curr_netdev
) &&
2787 netif_carrier_ok(curr_netdev
)) ?
2788 IB_PORT_ACTIVE
: IB_PORT_DOWN
;
2790 bonded_port_state
= (bonded_port_state
!= IB_PORT_ACTIVE
) ?
2791 curr_port_state
: IB_PORT_ACTIVE
;
2793 spin_unlock_bh(&ibdev
->iboe
.lock
);
2795 ibev
.device
= &ibdev
->ib_dev
;
2796 ibev
.element
.port_num
= 1;
2797 ibev
.event
= (bonded_port_state
== IB_PORT_ACTIVE
) ?
2798 IB_EVENT_PORT_ACTIVE
: IB_EVENT_PORT_ERR
;
2800 ib_dispatch_event(&ibev
);
2803 static void mlx4_ib_event(struct mlx4_dev
*dev
, void *ibdev_ptr
,
2804 enum mlx4_dev_event event
, unsigned long param
)
2806 struct ib_event ibev
;
2807 struct mlx4_ib_dev
*ibdev
= to_mdev((struct ib_device
*) ibdev_ptr
);
2808 struct mlx4_eqe
*eqe
= NULL
;
2809 struct ib_event_work
*ew
;
2812 if (mlx4_is_bonded(dev
) &&
2813 ((event
== MLX4_DEV_EVENT_PORT_UP
) ||
2814 (event
== MLX4_DEV_EVENT_PORT_DOWN
))) {
2815 ew
= kmalloc(sizeof(*ew
), GFP_ATOMIC
);
2818 INIT_WORK(&ew
->work
, handle_bonded_port_state_event
);
2820 queue_work(wq
, &ew
->work
);
2824 if (event
== MLX4_DEV_EVENT_PORT_MGMT_CHANGE
)
2825 eqe
= (struct mlx4_eqe
*)param
;
2830 case MLX4_DEV_EVENT_PORT_UP
:
2831 if (p
> ibdev
->num_ports
)
2833 if (mlx4_is_master(dev
) &&
2834 rdma_port_get_link_layer(&ibdev
->ib_dev
, p
) ==
2835 IB_LINK_LAYER_INFINIBAND
) {
2836 mlx4_ib_invalidate_all_guid_record(ibdev
, p
);
2838 ibev
.event
= IB_EVENT_PORT_ACTIVE
;
2841 case MLX4_DEV_EVENT_PORT_DOWN
:
2842 if (p
> ibdev
->num_ports
)
2844 ibev
.event
= IB_EVENT_PORT_ERR
;
2847 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR
:
2848 ibdev
->ib_active
= false;
2849 ibev
.event
= IB_EVENT_DEVICE_FATAL
;
2850 mlx4_ib_handle_catas_error(ibdev
);
2853 case MLX4_DEV_EVENT_PORT_MGMT_CHANGE
:
2854 ew
= kmalloc(sizeof *ew
, GFP_ATOMIC
);
2856 pr_err("failed to allocate memory for events work\n");
2860 INIT_WORK(&ew
->work
, handle_port_mgmt_change_event
);
2861 memcpy(&ew
->ib_eqe
, eqe
, sizeof *eqe
);
2863 /* need to queue only for port owner, which uses GEN_EQE */
2864 if (mlx4_is_master(dev
))
2865 queue_work(wq
, &ew
->work
);
2867 handle_port_mgmt_change_event(&ew
->work
);
2870 case MLX4_DEV_EVENT_SLAVE_INIT
:
2871 /* here, p is the slave id */
2872 do_slave_init(ibdev
, p
, 1);
2873 if (mlx4_is_master(dev
)) {
2876 for (i
= 1; i
<= ibdev
->num_ports
; i
++) {
2877 if (rdma_port_get_link_layer(&ibdev
->ib_dev
, i
)
2878 == IB_LINK_LAYER_INFINIBAND
)
2879 mlx4_ib_slave_alias_guid_event(ibdev
,
2886 case MLX4_DEV_EVENT_SLAVE_SHUTDOWN
:
2887 if (mlx4_is_master(dev
)) {
2890 for (i
= 1; i
<= ibdev
->num_ports
; i
++) {
2891 if (rdma_port_get_link_layer(&ibdev
->ib_dev
, i
)
2892 == IB_LINK_LAYER_INFINIBAND
)
2893 mlx4_ib_slave_alias_guid_event(ibdev
,
2898 /* here, p is the slave id */
2899 do_slave_init(ibdev
, p
, 0);
2906 ibev
.device
= ibdev_ptr
;
2907 ibev
.element
.port_num
= mlx4_is_bonded(ibdev
->dev
) ? 1 : (u8
)p
;
2909 ib_dispatch_event(&ibev
);
2912 static struct mlx4_interface mlx4_ib_interface
= {
2914 .remove
= mlx4_ib_remove
,
2915 .event
= mlx4_ib_event
,
2916 .protocol
= MLX4_PROT_IB_IPV6
,
2917 .flags
= MLX4_INTFF_BONDING
2920 static int __init
mlx4_ib_init(void)
2924 wq
= create_singlethread_workqueue("mlx4_ib");
2928 err
= mlx4_ib_mcg_init();
2932 err
= mlx4_register_interface(&mlx4_ib_interface
);
2939 mlx4_ib_mcg_destroy();
2942 destroy_workqueue(wq
);
2946 static void __exit
mlx4_ib_cleanup(void)
2948 mlx4_unregister_interface(&mlx4_ib_interface
);
2949 mlx4_ib_mcg_destroy();
2950 destroy_workqueue(wq
);
2953 module_init(mlx4_ib_init
);
2954 module_exit(mlx4_ib_cleanup
);