2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/netdevice.h>
39 #include <linux/inetdevice.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/if_vlan.h>
43 #include <net/addrconf.h>
45 #include <rdma/ib_smi.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/ib_addr.h>
48 #include <rdma/ib_cache.h>
50 #include <net/bonding.h>
52 #include <linux/mlx4/driver.h>
53 #include <linux/mlx4/cmd.h>
54 #include <linux/mlx4/qp.h>
59 #define DRV_NAME MLX4_IB_DRV_NAME
60 #define DRV_VERSION "2.2-1"
61 #define DRV_RELDATE "Feb 2014"
63 #define MLX4_IB_FLOW_MAX_PRIO 0xFFF
64 #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
65 #define MLX4_IB_CARD_REV_A0 0xA0
67 MODULE_AUTHOR("Roland Dreier");
68 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
69 MODULE_LICENSE("Dual BSD/GPL");
70 MODULE_VERSION(DRV_VERSION
);
72 int mlx4_ib_sm_guid_assign
= 0;
73 module_param_named(sm_guid_assign
, mlx4_ib_sm_guid_assign
, int, 0444);
74 MODULE_PARM_DESC(sm_guid_assign
, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)");
76 static const char mlx4_ib_version
[] =
77 DRV_NAME
": Mellanox ConnectX InfiniBand driver v"
78 DRV_VERSION
" (" DRV_RELDATE
")\n";
80 static void do_slave_init(struct mlx4_ib_dev
*ibdev
, int slave
, int do_init
);
82 static struct workqueue_struct
*wq
;
84 static void init_query_mad(struct ib_smp
*mad
)
86 mad
->base_version
= 1;
87 mad
->mgmt_class
= IB_MGMT_CLASS_SUBN_LID_ROUTED
;
88 mad
->class_version
= 1;
89 mad
->method
= IB_MGMT_METHOD_GET
;
92 static int check_flow_steering_support(struct mlx4_dev
*dev
)
94 int eth_num_ports
= 0;
97 int dmfs
= dev
->caps
.steering_mode
== MLX4_STEERING_MODE_DEVICE_MANAGED
;
101 mlx4_foreach_port(i
, dev
, MLX4_PORT_TYPE_ETH
)
103 mlx4_foreach_port(i
, dev
, MLX4_PORT_TYPE_IB
)
105 dmfs
&= (!ib_num_ports
||
106 (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_DMFS_IPOIB
)) &&
108 (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_FS_EN
));
109 if (ib_num_ports
&& mlx4_is_mfunc(dev
)) {
110 pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
117 static int num_ib_ports(struct mlx4_dev
*dev
)
122 mlx4_foreach_port(i
, dev
, MLX4_PORT_TYPE_IB
)
128 static struct net_device
*mlx4_ib_get_netdev(struct ib_device
*device
, u8 port_num
)
130 struct mlx4_ib_dev
*ibdev
= to_mdev(device
);
131 struct net_device
*dev
;
134 dev
= mlx4_get_protocol_dev(ibdev
->dev
, MLX4_PROT_ETH
, port_num
);
137 if (mlx4_is_bonded(ibdev
->dev
)) {
138 struct net_device
*upper
= NULL
;
140 upper
= netdev_master_upper_dev_get_rcu(dev
);
142 struct net_device
*active
;
144 active
= bond_option_active_slave_get_rcu(netdev_priv(upper
));
157 static int mlx4_ib_update_gids_v1(struct gid_entry
*gids
,
158 struct mlx4_ib_dev
*ibdev
,
161 struct mlx4_cmd_mailbox
*mailbox
;
163 struct mlx4_dev
*dev
= ibdev
->dev
;
165 union ib_gid
*gid_tbl
;
167 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
171 gid_tbl
= mailbox
->buf
;
173 for (i
= 0; i
< MLX4_MAX_PORT_GIDS
; ++i
)
174 memcpy(&gid_tbl
[i
], &gids
[i
].gid
, sizeof(union ib_gid
));
176 err
= mlx4_cmd(dev
, mailbox
->dma
,
177 MLX4_SET_PORT_GID_TABLE
<< 8 | port_num
,
178 1, MLX4_CMD_SET_PORT
, MLX4_CMD_TIME_CLASS_B
,
180 if (mlx4_is_bonded(dev
))
181 err
+= mlx4_cmd(dev
, mailbox
->dma
,
182 MLX4_SET_PORT_GID_TABLE
<< 8 | 2,
183 1, MLX4_CMD_SET_PORT
, MLX4_CMD_TIME_CLASS_B
,
186 mlx4_free_cmd_mailbox(dev
, mailbox
);
190 static int mlx4_ib_update_gids_v1_v2(struct gid_entry
*gids
,
191 struct mlx4_ib_dev
*ibdev
,
194 struct mlx4_cmd_mailbox
*mailbox
;
196 struct mlx4_dev
*dev
= ibdev
->dev
;
207 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
211 gid_tbl
= mailbox
->buf
;
212 for (i
= 0; i
< MLX4_MAX_PORT_GIDS
; ++i
) {
213 memcpy(&gid_tbl
[i
].gid
, &gids
[i
].gid
, sizeof(union ib_gid
));
214 if (gids
[i
].gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
) {
215 gid_tbl
[i
].version
= 2;
216 if (!ipv6_addr_v4mapped((struct in6_addr
*)&gids
[i
].gid
))
219 memset(&gid_tbl
[i
].gid
, 0, 12);
223 err
= mlx4_cmd(dev
, mailbox
->dma
,
224 MLX4_SET_PORT_ROCE_ADDR
<< 8 | port_num
,
225 1, MLX4_CMD_SET_PORT
, MLX4_CMD_TIME_CLASS_B
,
227 if (mlx4_is_bonded(dev
))
228 err
+= mlx4_cmd(dev
, mailbox
->dma
,
229 MLX4_SET_PORT_ROCE_ADDR
<< 8 | 2,
230 1, MLX4_CMD_SET_PORT
, MLX4_CMD_TIME_CLASS_B
,
233 mlx4_free_cmd_mailbox(dev
, mailbox
);
237 static int mlx4_ib_update_gids(struct gid_entry
*gids
,
238 struct mlx4_ib_dev
*ibdev
,
241 if (ibdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_ROCE_V1_V2
)
242 return mlx4_ib_update_gids_v1_v2(gids
, ibdev
, port_num
);
244 return mlx4_ib_update_gids_v1(gids
, ibdev
, port_num
);
247 static int mlx4_ib_add_gid(struct ib_device
*device
,
250 const union ib_gid
*gid
,
251 const struct ib_gid_attr
*attr
,
254 struct mlx4_ib_dev
*ibdev
= to_mdev(device
);
255 struct mlx4_ib_iboe
*iboe
= &ibdev
->iboe
;
256 struct mlx4_port_gid_table
*port_gid_table
;
257 int free
= -1, found
= -1;
261 struct gid_entry
*gids
= NULL
;
263 if (!rdma_cap_roce_gid_table(device
, port_num
))
266 if (port_num
> MLX4_MAX_PORTS
)
272 port_gid_table
= &iboe
->gids
[port_num
- 1];
273 spin_lock_bh(&iboe
->lock
);
274 for (i
= 0; i
< MLX4_MAX_PORT_GIDS
; ++i
) {
275 if (!memcmp(&port_gid_table
->gids
[i
].gid
, gid
, sizeof(*gid
)) &&
276 (port_gid_table
->gids
[i
].gid_type
== attr
->gid_type
)) {
280 if (free
< 0 && !memcmp(&port_gid_table
->gids
[i
].gid
, &zgid
, sizeof(*gid
)))
281 free
= i
; /* HW has space */
288 port_gid_table
->gids
[free
].ctx
= kmalloc(sizeof(*port_gid_table
->gids
[free
].ctx
), GFP_ATOMIC
);
289 if (!port_gid_table
->gids
[free
].ctx
) {
292 *context
= port_gid_table
->gids
[free
].ctx
;
293 memcpy(&port_gid_table
->gids
[free
].gid
, gid
, sizeof(*gid
));
294 port_gid_table
->gids
[free
].gid_type
= attr
->gid_type
;
295 port_gid_table
->gids
[free
].ctx
->real_index
= free
;
296 port_gid_table
->gids
[free
].ctx
->refcount
= 1;
301 struct gid_cache_context
*ctx
= port_gid_table
->gids
[found
].ctx
;
305 if (!ret
&& hw_update
) {
306 gids
= kmalloc(sizeof(*gids
) * MLX4_MAX_PORT_GIDS
, GFP_ATOMIC
);
310 for (i
= 0; i
< MLX4_MAX_PORT_GIDS
; i
++) {
311 memcpy(&gids
[i
].gid
, &port_gid_table
->gids
[i
].gid
, sizeof(union ib_gid
));
312 gids
[i
].gid_type
= port_gid_table
->gids
[i
].gid_type
;
316 spin_unlock_bh(&iboe
->lock
);
318 if (!ret
&& hw_update
) {
319 ret
= mlx4_ib_update_gids(gids
, ibdev
, port_num
);
326 static int mlx4_ib_del_gid(struct ib_device
*device
,
331 struct gid_cache_context
*ctx
= *context
;
332 struct mlx4_ib_dev
*ibdev
= to_mdev(device
);
333 struct mlx4_ib_iboe
*iboe
= &ibdev
->iboe
;
334 struct mlx4_port_gid_table
*port_gid_table
;
337 struct gid_entry
*gids
= NULL
;
339 if (!rdma_cap_roce_gid_table(device
, port_num
))
342 if (port_num
> MLX4_MAX_PORTS
)
345 port_gid_table
= &iboe
->gids
[port_num
- 1];
346 spin_lock_bh(&iboe
->lock
);
349 if (!ctx
->refcount
) {
350 unsigned int real_index
= ctx
->real_index
;
352 memcpy(&port_gid_table
->gids
[real_index
].gid
, &zgid
, sizeof(zgid
));
353 kfree(port_gid_table
->gids
[real_index
].ctx
);
354 port_gid_table
->gids
[real_index
].ctx
= NULL
;
358 if (!ret
&& hw_update
) {
361 gids
= kmalloc(sizeof(*gids
) * MLX4_MAX_PORT_GIDS
, GFP_ATOMIC
);
365 for (i
= 0; i
< MLX4_MAX_PORT_GIDS
; i
++)
366 memcpy(&gids
[i
].gid
, &port_gid_table
->gids
[i
].gid
, sizeof(union ib_gid
));
369 spin_unlock_bh(&iboe
->lock
);
371 if (!ret
&& hw_update
) {
372 ret
= mlx4_ib_update_gids(gids
, ibdev
, port_num
);
378 int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev
*ibdev
,
379 u8 port_num
, int index
)
381 struct mlx4_ib_iboe
*iboe
= &ibdev
->iboe
;
382 struct gid_cache_context
*ctx
= NULL
;
384 struct mlx4_port_gid_table
*port_gid_table
;
385 int real_index
= -EINVAL
;
389 struct ib_gid_attr attr
;
391 if (port_num
> MLX4_MAX_PORTS
)
394 if (mlx4_is_bonded(ibdev
->dev
))
397 if (!rdma_cap_roce_gid_table(&ibdev
->ib_dev
, port_num
))
400 ret
= ib_get_cached_gid(&ibdev
->ib_dev
, port_num
, index
, &gid
, &attr
);
407 if (!memcmp(&gid
, &zgid
, sizeof(gid
)))
410 spin_lock_irqsave(&iboe
->lock
, flags
);
411 port_gid_table
= &iboe
->gids
[port_num
- 1];
413 for (i
= 0; i
< MLX4_MAX_PORT_GIDS
; ++i
)
414 if (!memcmp(&port_gid_table
->gids
[i
].gid
, &gid
, sizeof(gid
)) &&
415 attr
.gid_type
== port_gid_table
->gids
[i
].gid_type
) {
416 ctx
= port_gid_table
->gids
[i
].ctx
;
420 real_index
= ctx
->real_index
;
421 spin_unlock_irqrestore(&iboe
->lock
, flags
);
425 static int mlx4_ib_query_device(struct ib_device
*ibdev
,
426 struct ib_device_attr
*props
,
427 struct ib_udata
*uhw
)
429 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
430 struct ib_smp
*in_mad
= NULL
;
431 struct ib_smp
*out_mad
= NULL
;
434 struct mlx4_uverbs_ex_query_device cmd
;
435 struct mlx4_uverbs_ex_query_device_resp resp
= {.comp_mask
= 0};
436 struct mlx4_clock_params clock_params
;
439 if (uhw
->inlen
< sizeof(cmd
))
442 err
= ib_copy_from_udata(&cmd
, uhw
, sizeof(cmd
));
453 resp
.response_length
= offsetof(typeof(resp
), response_length
) +
454 sizeof(resp
.response_length
);
455 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
456 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
457 if (!in_mad
|| !out_mad
)
460 init_query_mad(in_mad
);
461 in_mad
->attr_id
= IB_SMP_ATTR_NODE_INFO
;
463 err
= mlx4_MAD_IFC(to_mdev(ibdev
), MLX4_MAD_IFC_IGNORE_KEYS
,
464 1, NULL
, NULL
, in_mad
, out_mad
);
468 memset(props
, 0, sizeof *props
);
470 have_ib_ports
= num_ib_ports(dev
->dev
);
472 props
->fw_ver
= dev
->dev
->caps
.fw_ver
;
473 props
->device_cap_flags
= IB_DEVICE_CHANGE_PHY_PORT
|
474 IB_DEVICE_PORT_ACTIVE_EVENT
|
475 IB_DEVICE_SYS_IMAGE_GUID
|
476 IB_DEVICE_RC_RNR_NAK_GEN
|
477 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK
;
478 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR
)
479 props
->device_cap_flags
|= IB_DEVICE_BAD_PKEY_CNTR
;
480 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR
)
481 props
->device_cap_flags
|= IB_DEVICE_BAD_QKEY_CNTR
;
482 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_APM
&& have_ib_ports
)
483 props
->device_cap_flags
|= IB_DEVICE_AUTO_PATH_MIG
;
484 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_UD_AV_PORT
)
485 props
->device_cap_flags
|= IB_DEVICE_UD_AV_PORT_ENFORCE
;
486 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_IPOIB_CSUM
)
487 props
->device_cap_flags
|= IB_DEVICE_UD_IP_CSUM
;
488 if (dev
->dev
->caps
.max_gso_sz
&&
489 (dev
->dev
->rev_id
!= MLX4_IB_CARD_REV_A0
) &&
490 (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_BLH
))
491 props
->device_cap_flags
|= IB_DEVICE_UD_TSO
;
492 if (dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_RESERVED_LKEY
)
493 props
->device_cap_flags
|= IB_DEVICE_LOCAL_DMA_LKEY
;
494 if ((dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_LOCAL_INV
) &&
495 (dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_REMOTE_INV
) &&
496 (dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_FAST_REG_WR
))
497 props
->device_cap_flags
|= IB_DEVICE_MEM_MGT_EXTENSIONS
;
498 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
)
499 props
->device_cap_flags
|= IB_DEVICE_XRC
;
500 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_MEM_WINDOW
)
501 props
->device_cap_flags
|= IB_DEVICE_MEM_WINDOW
;
502 if (dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_TYPE_2_WIN
) {
503 if (dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_WIN_TYPE_2B
)
504 props
->device_cap_flags
|= IB_DEVICE_MEM_WINDOW_TYPE_2B
;
506 props
->device_cap_flags
|= IB_DEVICE_MEM_WINDOW_TYPE_2A
;
507 if (dev
->steering_support
== MLX4_STEERING_MODE_DEVICE_MANAGED
)
508 props
->device_cap_flags
|= IB_DEVICE_MANAGED_FLOW_STEERING
;
511 props
->device_cap_flags
|= IB_DEVICE_RAW_IP_CSUM
;
513 props
->vendor_id
= be32_to_cpup((__be32
*) (out_mad
->data
+ 36)) &
515 props
->vendor_part_id
= dev
->dev
->persist
->pdev
->device
;
516 props
->hw_ver
= be32_to_cpup((__be32
*) (out_mad
->data
+ 32));
517 memcpy(&props
->sys_image_guid
, out_mad
->data
+ 4, 8);
519 props
->max_mr_size
= ~0ull;
520 props
->page_size_cap
= dev
->dev
->caps
.page_size_cap
;
521 props
->max_qp
= dev
->dev
->quotas
.qp
;
522 props
->max_qp_wr
= dev
->dev
->caps
.max_wqes
- MLX4_IB_SQ_MAX_SPARE
;
523 props
->max_sge
= min(dev
->dev
->caps
.max_sq_sg
,
524 dev
->dev
->caps
.max_rq_sg
);
525 props
->max_sge_rd
= MLX4_MAX_SGE_RD
;
526 props
->max_cq
= dev
->dev
->quotas
.cq
;
527 props
->max_cqe
= dev
->dev
->caps
.max_cqes
;
528 props
->max_mr
= dev
->dev
->quotas
.mpt
;
529 props
->max_pd
= dev
->dev
->caps
.num_pds
- dev
->dev
->caps
.reserved_pds
;
530 props
->max_qp_rd_atom
= dev
->dev
->caps
.max_qp_dest_rdma
;
531 props
->max_qp_init_rd_atom
= dev
->dev
->caps
.max_qp_init_rdma
;
532 props
->max_res_rd_atom
= props
->max_qp_rd_atom
* props
->max_qp
;
533 props
->max_srq
= dev
->dev
->quotas
.srq
;
534 props
->max_srq_wr
= dev
->dev
->caps
.max_srq_wqes
- 1;
535 props
->max_srq_sge
= dev
->dev
->caps
.max_srq_sge
;
536 props
->max_fast_reg_page_list_len
= MLX4_MAX_FAST_REG_PAGES
;
537 props
->local_ca_ack_delay
= dev
->dev
->caps
.local_ca_ack_delay
;
538 props
->atomic_cap
= dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_ATOMIC
?
539 IB_ATOMIC_HCA
: IB_ATOMIC_NONE
;
540 props
->masked_atomic_cap
= props
->atomic_cap
;
541 props
->max_pkeys
= dev
->dev
->caps
.pkey_table_len
[1];
542 props
->max_mcast_grp
= dev
->dev
->caps
.num_mgms
+ dev
->dev
->caps
.num_amgms
;
543 props
->max_mcast_qp_attach
= dev
->dev
->caps
.num_qp_per_mgm
;
544 props
->max_total_mcast_qp_attach
= props
->max_mcast_qp_attach
*
545 props
->max_mcast_grp
;
546 props
->max_map_per_fmr
= dev
->dev
->caps
.max_fmr_maps
;
547 props
->hca_core_clock
= dev
->dev
->caps
.hca_core_clock
* 1000UL;
548 props
->timestamp_mask
= 0xFFFFFFFFFFFFULL
;
550 if (!mlx4_is_slave(dev
->dev
))
551 err
= mlx4_get_internal_clock_params(dev
->dev
, &clock_params
);
553 if (uhw
->outlen
>= resp
.response_length
+ sizeof(resp
.hca_core_clock_offset
)) {
554 resp
.response_length
+= sizeof(resp
.hca_core_clock_offset
);
555 if (!err
&& !mlx4_is_slave(dev
->dev
)) {
556 resp
.comp_mask
|= QUERY_DEVICE_RESP_MASK_TIMESTAMP
;
557 resp
.hca_core_clock_offset
= clock_params
.offset
% PAGE_SIZE
;
562 err
= ib_copy_to_udata(uhw
, &resp
, resp
.response_length
);
573 static enum rdma_link_layer
574 mlx4_ib_port_link_layer(struct ib_device
*device
, u8 port_num
)
576 struct mlx4_dev
*dev
= to_mdev(device
)->dev
;
578 return dev
->caps
.port_mask
[port_num
] == MLX4_PORT_TYPE_IB
?
579 IB_LINK_LAYER_INFINIBAND
: IB_LINK_LAYER_ETHERNET
;
582 static int ib_link_query_port(struct ib_device
*ibdev
, u8 port
,
583 struct ib_port_attr
*props
, int netw_view
)
585 struct ib_smp
*in_mad
= NULL
;
586 struct ib_smp
*out_mad
= NULL
;
587 int ext_active_speed
;
588 int mad_ifc_flags
= MLX4_MAD_IFC_IGNORE_KEYS
;
591 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
592 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
593 if (!in_mad
|| !out_mad
)
596 init_query_mad(in_mad
);
597 in_mad
->attr_id
= IB_SMP_ATTR_PORT_INFO
;
598 in_mad
->attr_mod
= cpu_to_be32(port
);
600 if (mlx4_is_mfunc(to_mdev(ibdev
)->dev
) && netw_view
)
601 mad_ifc_flags
|= MLX4_MAD_IFC_NET_VIEW
;
603 err
= mlx4_MAD_IFC(to_mdev(ibdev
), mad_ifc_flags
, port
, NULL
, NULL
,
609 props
->lid
= be16_to_cpup((__be16
*) (out_mad
->data
+ 16));
610 props
->lmc
= out_mad
->data
[34] & 0x7;
611 props
->sm_lid
= be16_to_cpup((__be16
*) (out_mad
->data
+ 18));
612 props
->sm_sl
= out_mad
->data
[36] & 0xf;
613 props
->state
= out_mad
->data
[32] & 0xf;
614 props
->phys_state
= out_mad
->data
[33] >> 4;
615 props
->port_cap_flags
= be32_to_cpup((__be32
*) (out_mad
->data
+ 20));
617 props
->gid_tbl_len
= out_mad
->data
[50];
619 props
->gid_tbl_len
= to_mdev(ibdev
)->dev
->caps
.gid_table_len
[port
];
620 props
->max_msg_sz
= to_mdev(ibdev
)->dev
->caps
.max_msg_sz
;
621 props
->pkey_tbl_len
= to_mdev(ibdev
)->dev
->caps
.pkey_table_len
[port
];
622 props
->bad_pkey_cntr
= be16_to_cpup((__be16
*) (out_mad
->data
+ 46));
623 props
->qkey_viol_cntr
= be16_to_cpup((__be16
*) (out_mad
->data
+ 48));
624 props
->active_width
= out_mad
->data
[31] & 0xf;
625 props
->active_speed
= out_mad
->data
[35] >> 4;
626 props
->max_mtu
= out_mad
->data
[41] & 0xf;
627 props
->active_mtu
= out_mad
->data
[36] >> 4;
628 props
->subnet_timeout
= out_mad
->data
[51] & 0x1f;
629 props
->max_vl_num
= out_mad
->data
[37] >> 4;
630 props
->init_type_reply
= out_mad
->data
[41] >> 4;
632 /* Check if extended speeds (EDR/FDR/...) are supported */
633 if (props
->port_cap_flags
& IB_PORT_EXTENDED_SPEEDS_SUP
) {
634 ext_active_speed
= out_mad
->data
[62] >> 4;
636 switch (ext_active_speed
) {
638 props
->active_speed
= IB_SPEED_FDR
;
641 props
->active_speed
= IB_SPEED_EDR
;
646 /* If reported active speed is QDR, check if is FDR-10 */
647 if (props
->active_speed
== IB_SPEED_QDR
) {
648 init_query_mad(in_mad
);
649 in_mad
->attr_id
= MLX4_ATTR_EXTENDED_PORT_INFO
;
650 in_mad
->attr_mod
= cpu_to_be32(port
);
652 err
= mlx4_MAD_IFC(to_mdev(ibdev
), mad_ifc_flags
, port
,
653 NULL
, NULL
, in_mad
, out_mad
);
657 /* Checking LinkSpeedActive for FDR-10 */
658 if (out_mad
->data
[15] & 0x1)
659 props
->active_speed
= IB_SPEED_FDR10
;
662 /* Avoid wrong speed value returned by FW if the IB link is down. */
663 if (props
->state
== IB_PORT_DOWN
)
664 props
->active_speed
= IB_SPEED_SDR
;
672 static u8
state_to_phys_state(enum ib_port_state state
)
674 return state
== IB_PORT_ACTIVE
? 5 : 3;
677 static int eth_link_query_port(struct ib_device
*ibdev
, u8 port
,
678 struct ib_port_attr
*props
, int netw_view
)
681 struct mlx4_ib_dev
*mdev
= to_mdev(ibdev
);
682 struct mlx4_ib_iboe
*iboe
= &mdev
->iboe
;
683 struct net_device
*ndev
;
685 struct mlx4_cmd_mailbox
*mailbox
;
687 int is_bonded
= mlx4_is_bonded(mdev
->dev
);
689 mailbox
= mlx4_alloc_cmd_mailbox(mdev
->dev
);
691 return PTR_ERR(mailbox
);
693 err
= mlx4_cmd_box(mdev
->dev
, 0, mailbox
->dma
, port
, 0,
694 MLX4_CMD_QUERY_PORT
, MLX4_CMD_TIME_CLASS_B
,
699 props
->active_width
= (((u8
*)mailbox
->buf
)[5] == 0x40) ?
700 IB_WIDTH_4X
: IB_WIDTH_1X
;
701 props
->active_speed
= IB_SPEED_QDR
;
702 props
->port_cap_flags
= IB_PORT_CM_SUP
| IB_PORT_IP_BASED_GIDS
;
703 props
->gid_tbl_len
= mdev
->dev
->caps
.gid_table_len
[port
];
704 props
->max_msg_sz
= mdev
->dev
->caps
.max_msg_sz
;
705 props
->pkey_tbl_len
= 1;
706 props
->max_mtu
= IB_MTU_4096
;
707 props
->max_vl_num
= 2;
708 props
->state
= IB_PORT_DOWN
;
709 props
->phys_state
= state_to_phys_state(props
->state
);
710 props
->active_mtu
= IB_MTU_256
;
711 spin_lock_bh(&iboe
->lock
);
712 ndev
= iboe
->netdevs
[port
- 1];
713 if (ndev
&& is_bonded
) {
714 rcu_read_lock(); /* required to get upper dev */
715 ndev
= netdev_master_upper_dev_get_rcu(ndev
);
721 tmp
= iboe_get_mtu(ndev
->mtu
);
722 props
->active_mtu
= tmp
? min(props
->max_mtu
, tmp
) : IB_MTU_256
;
724 props
->state
= (netif_running(ndev
) && netif_carrier_ok(ndev
)) ?
725 IB_PORT_ACTIVE
: IB_PORT_DOWN
;
726 props
->phys_state
= state_to_phys_state(props
->state
);
728 spin_unlock_bh(&iboe
->lock
);
730 mlx4_free_cmd_mailbox(mdev
->dev
, mailbox
);
734 int __mlx4_ib_query_port(struct ib_device
*ibdev
, u8 port
,
735 struct ib_port_attr
*props
, int netw_view
)
739 memset(props
, 0, sizeof *props
);
741 err
= mlx4_ib_port_link_layer(ibdev
, port
) == IB_LINK_LAYER_INFINIBAND
?
742 ib_link_query_port(ibdev
, port
, props
, netw_view
) :
743 eth_link_query_port(ibdev
, port
, props
, netw_view
);
748 static int mlx4_ib_query_port(struct ib_device
*ibdev
, u8 port
,
749 struct ib_port_attr
*props
)
751 /* returns host view */
752 return __mlx4_ib_query_port(ibdev
, port
, props
, 0);
755 int __mlx4_ib_query_gid(struct ib_device
*ibdev
, u8 port
, int index
,
756 union ib_gid
*gid
, int netw_view
)
758 struct ib_smp
*in_mad
= NULL
;
759 struct ib_smp
*out_mad
= NULL
;
761 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
763 int mad_ifc_flags
= MLX4_MAD_IFC_IGNORE_KEYS
;
765 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
766 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
767 if (!in_mad
|| !out_mad
)
770 init_query_mad(in_mad
);
771 in_mad
->attr_id
= IB_SMP_ATTR_PORT_INFO
;
772 in_mad
->attr_mod
= cpu_to_be32(port
);
774 if (mlx4_is_mfunc(dev
->dev
) && netw_view
)
775 mad_ifc_flags
|= MLX4_MAD_IFC_NET_VIEW
;
777 err
= mlx4_MAD_IFC(dev
, mad_ifc_flags
, port
, NULL
, NULL
, in_mad
, out_mad
);
781 memcpy(gid
->raw
, out_mad
->data
+ 8, 8);
783 if (mlx4_is_mfunc(dev
->dev
) && !netw_view
) {
785 /* For any index > 0, return the null guid */
792 init_query_mad(in_mad
);
793 in_mad
->attr_id
= IB_SMP_ATTR_GUID_INFO
;
794 in_mad
->attr_mod
= cpu_to_be32(index
/ 8);
796 err
= mlx4_MAD_IFC(dev
, mad_ifc_flags
, port
,
797 NULL
, NULL
, in_mad
, out_mad
);
801 memcpy(gid
->raw
+ 8, out_mad
->data
+ (index
% 8) * 8, 8);
805 memset(gid
->raw
+ 8, 0, 8);
811 static int mlx4_ib_query_gid(struct ib_device
*ibdev
, u8 port
, int index
,
816 if (rdma_protocol_ib(ibdev
, port
))
817 return __mlx4_ib_query_gid(ibdev
, port
, index
, gid
, 0);
819 if (!rdma_protocol_roce(ibdev
, port
))
822 if (!rdma_cap_roce_gid_table(ibdev
, port
))
825 ret
= ib_get_cached_gid(ibdev
, port
, index
, gid
, NULL
);
826 if (ret
== -EAGAIN
) {
827 memcpy(gid
, &zgid
, sizeof(*gid
));
834 int __mlx4_ib_query_pkey(struct ib_device
*ibdev
, u8 port
, u16 index
,
835 u16
*pkey
, int netw_view
)
837 struct ib_smp
*in_mad
= NULL
;
838 struct ib_smp
*out_mad
= NULL
;
839 int mad_ifc_flags
= MLX4_MAD_IFC_IGNORE_KEYS
;
842 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
843 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
844 if (!in_mad
|| !out_mad
)
847 init_query_mad(in_mad
);
848 in_mad
->attr_id
= IB_SMP_ATTR_PKEY_TABLE
;
849 in_mad
->attr_mod
= cpu_to_be32(index
/ 32);
851 if (mlx4_is_mfunc(to_mdev(ibdev
)->dev
) && netw_view
)
852 mad_ifc_flags
|= MLX4_MAD_IFC_NET_VIEW
;
854 err
= mlx4_MAD_IFC(to_mdev(ibdev
), mad_ifc_flags
, port
, NULL
, NULL
,
859 *pkey
= be16_to_cpu(((__be16
*) out_mad
->data
)[index
% 32]);
867 static int mlx4_ib_query_pkey(struct ib_device
*ibdev
, u8 port
, u16 index
, u16
*pkey
)
869 return __mlx4_ib_query_pkey(ibdev
, port
, index
, pkey
, 0);
872 static int mlx4_ib_modify_device(struct ib_device
*ibdev
, int mask
,
873 struct ib_device_modify
*props
)
875 struct mlx4_cmd_mailbox
*mailbox
;
878 if (mask
& ~IB_DEVICE_MODIFY_NODE_DESC
)
881 if (!(mask
& IB_DEVICE_MODIFY_NODE_DESC
))
884 if (mlx4_is_slave(to_mdev(ibdev
)->dev
))
887 spin_lock_irqsave(&to_mdev(ibdev
)->sm_lock
, flags
);
888 memcpy(ibdev
->node_desc
, props
->node_desc
, 64);
889 spin_unlock_irqrestore(&to_mdev(ibdev
)->sm_lock
, flags
);
892 * If possible, pass node desc to FW, so it can generate
893 * a 144 trap. If cmd fails, just ignore.
895 mailbox
= mlx4_alloc_cmd_mailbox(to_mdev(ibdev
)->dev
);
899 memcpy(mailbox
->buf
, props
->node_desc
, 64);
900 mlx4_cmd(to_mdev(ibdev
)->dev
, mailbox
->dma
, 1, 0,
901 MLX4_CMD_SET_NODE
, MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
903 mlx4_free_cmd_mailbox(to_mdev(ibdev
)->dev
, mailbox
);
908 static int mlx4_ib_SET_PORT(struct mlx4_ib_dev
*dev
, u8 port
, int reset_qkey_viols
,
911 struct mlx4_cmd_mailbox
*mailbox
;
914 mailbox
= mlx4_alloc_cmd_mailbox(dev
->dev
);
916 return PTR_ERR(mailbox
);
918 if (dev
->dev
->flags
& MLX4_FLAG_OLD_PORT_CMDS
) {
919 *(u8
*) mailbox
->buf
= !!reset_qkey_viols
<< 6;
920 ((__be32
*) mailbox
->buf
)[2] = cpu_to_be32(cap_mask
);
922 ((u8
*) mailbox
->buf
)[3] = !!reset_qkey_viols
;
923 ((__be32
*) mailbox
->buf
)[1] = cpu_to_be32(cap_mask
);
926 err
= mlx4_cmd(dev
->dev
, mailbox
->dma
, port
, MLX4_SET_PORT_IB_OPCODE
,
927 MLX4_CMD_SET_PORT
, MLX4_CMD_TIME_CLASS_B
,
930 mlx4_free_cmd_mailbox(dev
->dev
, mailbox
);
934 static int mlx4_ib_modify_port(struct ib_device
*ibdev
, u8 port
, int mask
,
935 struct ib_port_modify
*props
)
937 struct mlx4_ib_dev
*mdev
= to_mdev(ibdev
);
938 u8 is_eth
= mdev
->dev
->caps
.port_type
[port
] == MLX4_PORT_TYPE_ETH
;
939 struct ib_port_attr attr
;
943 /* return OK if this is RoCE. CM calls ib_modify_port() regardless
944 * of whether port link layer is ETH or IB. For ETH ports, qkey
945 * violations and port capabilities are not meaningful.
950 mutex_lock(&mdev
->cap_mask_mutex
);
952 err
= mlx4_ib_query_port(ibdev
, port
, &attr
);
956 cap_mask
= (attr
.port_cap_flags
| props
->set_port_cap_mask
) &
957 ~props
->clr_port_cap_mask
;
959 err
= mlx4_ib_SET_PORT(mdev
, port
,
960 !!(mask
& IB_PORT_RESET_QKEY_CNTR
),
964 mutex_unlock(&to_mdev(ibdev
)->cap_mask_mutex
);
968 static struct ib_ucontext
*mlx4_ib_alloc_ucontext(struct ib_device
*ibdev
,
969 struct ib_udata
*udata
)
971 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
972 struct mlx4_ib_ucontext
*context
;
973 struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3
;
974 struct mlx4_ib_alloc_ucontext_resp resp
;
978 return ERR_PTR(-EAGAIN
);
980 if (ibdev
->uverbs_abi_ver
== MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION
) {
981 resp_v3
.qp_tab_size
= dev
->dev
->caps
.num_qps
;
982 resp_v3
.bf_reg_size
= dev
->dev
->caps
.bf_reg_size
;
983 resp_v3
.bf_regs_per_page
= dev
->dev
->caps
.bf_regs_per_page
;
985 resp
.dev_caps
= dev
->dev
->caps
.userspace_caps
;
986 resp
.qp_tab_size
= dev
->dev
->caps
.num_qps
;
987 resp
.bf_reg_size
= dev
->dev
->caps
.bf_reg_size
;
988 resp
.bf_regs_per_page
= dev
->dev
->caps
.bf_regs_per_page
;
989 resp
.cqe_size
= dev
->dev
->caps
.cqe_size
;
992 context
= kzalloc(sizeof(*context
), GFP_KERNEL
);
994 return ERR_PTR(-ENOMEM
);
996 err
= mlx4_uar_alloc(to_mdev(ibdev
)->dev
, &context
->uar
);
1002 INIT_LIST_HEAD(&context
->db_page_list
);
1003 mutex_init(&context
->db_page_mutex
);
1005 if (ibdev
->uverbs_abi_ver
== MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION
)
1006 err
= ib_copy_to_udata(udata
, &resp_v3
, sizeof(resp_v3
));
1008 err
= ib_copy_to_udata(udata
, &resp
, sizeof(resp
));
1011 mlx4_uar_free(to_mdev(ibdev
)->dev
, &context
->uar
);
1013 return ERR_PTR(-EFAULT
);
1016 return &context
->ibucontext
;
1019 static int mlx4_ib_dealloc_ucontext(struct ib_ucontext
*ibcontext
)
1021 struct mlx4_ib_ucontext
*context
= to_mucontext(ibcontext
);
1023 mlx4_uar_free(to_mdev(ibcontext
->device
)->dev
, &context
->uar
);
1029 static void mlx4_ib_vma_open(struct vm_area_struct
*area
)
1031 /* vma_open is called when a new VMA is created on top of our VMA.
1032 * This is done through either mremap flow or split_vma (usually due
1033 * to mlock, madvise, munmap, etc.). We do not support a clone of the
1034 * vma, as this VMA is strongly hardware related. Therefore we set the
1035 * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
1036 * calling us again and trying to do incorrect actions. We assume that
1037 * the original vma size is exactly a single page that there will be no
1038 * "splitting" operations on.
1040 area
->vm_ops
= NULL
;
1043 static void mlx4_ib_vma_close(struct vm_area_struct
*area
)
1045 struct mlx4_ib_vma_private_data
*mlx4_ib_vma_priv_data
;
1047 /* It's guaranteed that all VMAs opened on a FD are closed before the
1048 * file itself is closed, therefore no sync is needed with the regular
1049 * closing flow. (e.g. mlx4_ib_dealloc_ucontext) However need a sync
1050 * with accessing the vma as part of mlx4_ib_disassociate_ucontext.
1051 * The close operation is usually called under mm->mmap_sem except when
1052 * process is exiting. The exiting case is handled explicitly as part
1053 * of mlx4_ib_disassociate_ucontext.
1055 mlx4_ib_vma_priv_data
= (struct mlx4_ib_vma_private_data
*)
1056 area
->vm_private_data
;
1058 /* set the vma context pointer to null in the mlx4_ib driver's private
1059 * data to protect against a race condition in mlx4_ib_dissassociate_ucontext().
1061 mlx4_ib_vma_priv_data
->vma
= NULL
;
1064 static const struct vm_operations_struct mlx4_ib_vm_ops
= {
1065 .open
= mlx4_ib_vma_open
,
1066 .close
= mlx4_ib_vma_close
1069 static void mlx4_ib_disassociate_ucontext(struct ib_ucontext
*ibcontext
)
1073 struct vm_area_struct
*vma
;
1074 struct mlx4_ib_ucontext
*context
= to_mucontext(ibcontext
);
1075 struct task_struct
*owning_process
= NULL
;
1076 struct mm_struct
*owning_mm
= NULL
;
1078 owning_process
= get_pid_task(ibcontext
->tgid
, PIDTYPE_PID
);
1079 if (!owning_process
)
1082 owning_mm
= get_task_mm(owning_process
);
1084 pr_info("no mm, disassociate ucontext is pending task termination\n");
1086 /* make sure that task is dead before returning, it may
1087 * prevent a rare case of module down in parallel to a
1088 * call to mlx4_ib_vma_close.
1090 put_task_struct(owning_process
);
1092 owning_process
= get_pid_task(ibcontext
->tgid
,
1094 if (!owning_process
||
1095 owning_process
->state
== TASK_DEAD
) {
1096 pr_info("disassociate ucontext done, task was terminated\n");
1097 /* in case task was dead need to release the task struct */
1099 put_task_struct(owning_process
);
1105 /* need to protect from a race on closing the vma as part of
1106 * mlx4_ib_vma_close().
1108 down_read(&owning_mm
->mmap_sem
);
1109 for (i
= 0; i
< HW_BAR_COUNT
; i
++) {
1110 vma
= context
->hw_bar_info
[i
].vma
;
1114 ret
= zap_vma_ptes(context
->hw_bar_info
[i
].vma
,
1115 context
->hw_bar_info
[i
].vma
->vm_start
,
1118 pr_err("Error: zap_vma_ptes failed for index=%d, ret=%d\n", i
, ret
);
1122 /* context going to be destroyed, should not access ops any more */
1123 context
->hw_bar_info
[i
].vma
->vm_ops
= NULL
;
1126 up_read(&owning_mm
->mmap_sem
);
1128 put_task_struct(owning_process
);
1131 static void mlx4_ib_set_vma_data(struct vm_area_struct
*vma
,
1132 struct mlx4_ib_vma_private_data
*vma_private_data
)
1134 vma_private_data
->vma
= vma
;
1135 vma
->vm_private_data
= vma_private_data
;
1136 vma
->vm_ops
= &mlx4_ib_vm_ops
;
1139 static int mlx4_ib_mmap(struct ib_ucontext
*context
, struct vm_area_struct
*vma
)
1141 struct mlx4_ib_dev
*dev
= to_mdev(context
->device
);
1142 struct mlx4_ib_ucontext
*mucontext
= to_mucontext(context
);
1144 if (vma
->vm_end
- vma
->vm_start
!= PAGE_SIZE
)
1147 if (vma
->vm_pgoff
== 0) {
1148 /* We prevent double mmaping on same context */
1149 if (mucontext
->hw_bar_info
[HW_BAR_DB
].vma
)
1152 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
1154 if (io_remap_pfn_range(vma
, vma
->vm_start
,
1155 to_mucontext(context
)->uar
.pfn
,
1156 PAGE_SIZE
, vma
->vm_page_prot
))
1159 mlx4_ib_set_vma_data(vma
, &mucontext
->hw_bar_info
[HW_BAR_DB
]);
1161 } else if (vma
->vm_pgoff
== 1 && dev
->dev
->caps
.bf_reg_size
!= 0) {
1162 /* We prevent double mmaping on same context */
1163 if (mucontext
->hw_bar_info
[HW_BAR_BF
].vma
)
1166 vma
->vm_page_prot
= pgprot_writecombine(vma
->vm_page_prot
);
1168 if (io_remap_pfn_range(vma
, vma
->vm_start
,
1169 to_mucontext(context
)->uar
.pfn
+
1170 dev
->dev
->caps
.num_uars
,
1171 PAGE_SIZE
, vma
->vm_page_prot
))
1174 mlx4_ib_set_vma_data(vma
, &mucontext
->hw_bar_info
[HW_BAR_BF
]);
1176 } else if (vma
->vm_pgoff
== 3) {
1177 struct mlx4_clock_params params
;
1180 /* We prevent double mmaping on same context */
1181 if (mucontext
->hw_bar_info
[HW_BAR_CLOCK
].vma
)
1184 ret
= mlx4_get_internal_clock_params(dev
->dev
, ¶ms
);
1189 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
1190 if (io_remap_pfn_range(vma
, vma
->vm_start
,
1191 (pci_resource_start(dev
->dev
->persist
->pdev
,
1195 PAGE_SIZE
, vma
->vm_page_prot
))
1198 mlx4_ib_set_vma_data(vma
,
1199 &mucontext
->hw_bar_info
[HW_BAR_CLOCK
]);
1207 static struct ib_pd
*mlx4_ib_alloc_pd(struct ib_device
*ibdev
,
1208 struct ib_ucontext
*context
,
1209 struct ib_udata
*udata
)
1211 struct mlx4_ib_pd
*pd
;
1214 pd
= kmalloc(sizeof *pd
, GFP_KERNEL
);
1216 return ERR_PTR(-ENOMEM
);
1218 err
= mlx4_pd_alloc(to_mdev(ibdev
)->dev
, &pd
->pdn
);
1221 return ERR_PTR(err
);
1225 if (ib_copy_to_udata(udata
, &pd
->pdn
, sizeof (__u32
))) {
1226 mlx4_pd_free(to_mdev(ibdev
)->dev
, pd
->pdn
);
1228 return ERR_PTR(-EFAULT
);
1234 static int mlx4_ib_dealloc_pd(struct ib_pd
*pd
)
1236 mlx4_pd_free(to_mdev(pd
->device
)->dev
, to_mpd(pd
)->pdn
);
1242 static struct ib_xrcd
*mlx4_ib_alloc_xrcd(struct ib_device
*ibdev
,
1243 struct ib_ucontext
*context
,
1244 struct ib_udata
*udata
)
1246 struct mlx4_ib_xrcd
*xrcd
;
1247 struct ib_cq_init_attr cq_attr
= {};
1250 if (!(to_mdev(ibdev
)->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
))
1251 return ERR_PTR(-ENOSYS
);
1253 xrcd
= kmalloc(sizeof *xrcd
, GFP_KERNEL
);
1255 return ERR_PTR(-ENOMEM
);
1257 err
= mlx4_xrcd_alloc(to_mdev(ibdev
)->dev
, &xrcd
->xrcdn
);
1261 xrcd
->pd
= ib_alloc_pd(ibdev
);
1262 if (IS_ERR(xrcd
->pd
)) {
1263 err
= PTR_ERR(xrcd
->pd
);
1268 xrcd
->cq
= ib_create_cq(ibdev
, NULL
, NULL
, xrcd
, &cq_attr
);
1269 if (IS_ERR(xrcd
->cq
)) {
1270 err
= PTR_ERR(xrcd
->cq
);
1274 return &xrcd
->ibxrcd
;
1277 ib_dealloc_pd(xrcd
->pd
);
1279 mlx4_xrcd_free(to_mdev(ibdev
)->dev
, xrcd
->xrcdn
);
1282 return ERR_PTR(err
);
1285 static int mlx4_ib_dealloc_xrcd(struct ib_xrcd
*xrcd
)
1287 ib_destroy_cq(to_mxrcd(xrcd
)->cq
);
1288 ib_dealloc_pd(to_mxrcd(xrcd
)->pd
);
1289 mlx4_xrcd_free(to_mdev(xrcd
->device
)->dev
, to_mxrcd(xrcd
)->xrcdn
);
1295 static int add_gid_entry(struct ib_qp
*ibqp
, union ib_gid
*gid
)
1297 struct mlx4_ib_qp
*mqp
= to_mqp(ibqp
);
1298 struct mlx4_ib_dev
*mdev
= to_mdev(ibqp
->device
);
1299 struct mlx4_ib_gid_entry
*ge
;
1301 ge
= kzalloc(sizeof *ge
, GFP_KERNEL
);
1306 if (mlx4_ib_add_mc(mdev
, mqp
, gid
)) {
1307 ge
->port
= mqp
->port
;
1311 mutex_lock(&mqp
->mutex
);
1312 list_add_tail(&ge
->list
, &mqp
->gid_list
);
1313 mutex_unlock(&mqp
->mutex
);
1318 static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev
*ibdev
,
1319 struct mlx4_ib_counters
*ctr_table
)
1321 struct counter_index
*counter
, *tmp_count
;
1323 mutex_lock(&ctr_table
->mutex
);
1324 list_for_each_entry_safe(counter
, tmp_count
, &ctr_table
->counters_list
,
1326 if (counter
->allocated
)
1327 mlx4_counter_free(ibdev
->dev
, counter
->index
);
1328 list_del(&counter
->list
);
1331 mutex_unlock(&ctr_table
->mutex
);
1334 int mlx4_ib_add_mc(struct mlx4_ib_dev
*mdev
, struct mlx4_ib_qp
*mqp
,
1337 struct net_device
*ndev
;
1343 spin_lock_bh(&mdev
->iboe
.lock
);
1344 ndev
= mdev
->iboe
.netdevs
[mqp
->port
- 1];
1347 spin_unlock_bh(&mdev
->iboe
.lock
);
1357 struct mlx4_ib_steering
{
1358 struct list_head list
;
1359 struct mlx4_flow_reg_id reg_id
;
1363 static int parse_flow_attr(struct mlx4_dev
*dev
,
1365 union ib_flow_spec
*ib_spec
,
1366 struct _rule_hw
*mlx4_spec
)
1368 enum mlx4_net_trans_rule_id type
;
1370 switch (ib_spec
->type
) {
1371 case IB_FLOW_SPEC_ETH
:
1372 type
= MLX4_NET_TRANS_RULE_ID_ETH
;
1373 memcpy(mlx4_spec
->eth
.dst_mac
, ib_spec
->eth
.val
.dst_mac
,
1375 memcpy(mlx4_spec
->eth
.dst_mac_msk
, ib_spec
->eth
.mask
.dst_mac
,
1377 mlx4_spec
->eth
.vlan_tag
= ib_spec
->eth
.val
.vlan_tag
;
1378 mlx4_spec
->eth
.vlan_tag_msk
= ib_spec
->eth
.mask
.vlan_tag
;
1380 case IB_FLOW_SPEC_IB
:
1381 type
= MLX4_NET_TRANS_RULE_ID_IB
;
1382 mlx4_spec
->ib
.l3_qpn
=
1383 cpu_to_be32(qp_num
);
1384 mlx4_spec
->ib
.qpn_mask
=
1385 cpu_to_be32(MLX4_IB_FLOW_QPN_MASK
);
1389 case IB_FLOW_SPEC_IPV4
:
1390 type
= MLX4_NET_TRANS_RULE_ID_IPV4
;
1391 mlx4_spec
->ipv4
.src_ip
= ib_spec
->ipv4
.val
.src_ip
;
1392 mlx4_spec
->ipv4
.src_ip_msk
= ib_spec
->ipv4
.mask
.src_ip
;
1393 mlx4_spec
->ipv4
.dst_ip
= ib_spec
->ipv4
.val
.dst_ip
;
1394 mlx4_spec
->ipv4
.dst_ip_msk
= ib_spec
->ipv4
.mask
.dst_ip
;
1397 case IB_FLOW_SPEC_TCP
:
1398 case IB_FLOW_SPEC_UDP
:
1399 type
= ib_spec
->type
== IB_FLOW_SPEC_TCP
?
1400 MLX4_NET_TRANS_RULE_ID_TCP
:
1401 MLX4_NET_TRANS_RULE_ID_UDP
;
1402 mlx4_spec
->tcp_udp
.dst_port
= ib_spec
->tcp_udp
.val
.dst_port
;
1403 mlx4_spec
->tcp_udp
.dst_port_msk
= ib_spec
->tcp_udp
.mask
.dst_port
;
1404 mlx4_spec
->tcp_udp
.src_port
= ib_spec
->tcp_udp
.val
.src_port
;
1405 mlx4_spec
->tcp_udp
.src_port_msk
= ib_spec
->tcp_udp
.mask
.src_port
;
1411 if (mlx4_map_sw_to_hw_steering_id(dev
, type
) < 0 ||
1412 mlx4_hw_rule_sz(dev
, type
) < 0)
1414 mlx4_spec
->id
= cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev
, type
));
1415 mlx4_spec
->size
= mlx4_hw_rule_sz(dev
, type
) >> 2;
1416 return mlx4_hw_rule_sz(dev
, type
);
1419 struct default_rules
{
1420 __u32 mandatory_fields
[IB_FLOW_SPEC_SUPPORT_LAYERS
];
1421 __u32 mandatory_not_fields
[IB_FLOW_SPEC_SUPPORT_LAYERS
];
1422 __u32 rules_create_list
[IB_FLOW_SPEC_SUPPORT_LAYERS
];
1425 static const struct default_rules default_table
[] = {
1427 .mandatory_fields
= {IB_FLOW_SPEC_IPV4
},
1428 .mandatory_not_fields
= {IB_FLOW_SPEC_ETH
},
1429 .rules_create_list
= {IB_FLOW_SPEC_IB
},
1430 .link_layer
= IB_LINK_LAYER_INFINIBAND
1434 static int __mlx4_ib_default_rules_match(struct ib_qp
*qp
,
1435 struct ib_flow_attr
*flow_attr
)
1439 const struct default_rules
*pdefault_rules
= default_table
;
1440 u8 link_layer
= rdma_port_get_link_layer(qp
->device
, flow_attr
->port
);
1442 for (i
= 0; i
< ARRAY_SIZE(default_table
); i
++, pdefault_rules
++) {
1443 __u32 field_types
[IB_FLOW_SPEC_SUPPORT_LAYERS
];
1444 memset(&field_types
, 0, sizeof(field_types
));
1446 if (link_layer
!= pdefault_rules
->link_layer
)
1449 ib_flow
= flow_attr
+ 1;
1450 /* we assume the specs are sorted */
1451 for (j
= 0, k
= 0; k
< IB_FLOW_SPEC_SUPPORT_LAYERS
&&
1452 j
< flow_attr
->num_of_specs
; k
++) {
1453 union ib_flow_spec
*current_flow
=
1454 (union ib_flow_spec
*)ib_flow
;
1456 /* same layer but different type */
1457 if (((current_flow
->type
& IB_FLOW_SPEC_LAYER_MASK
) ==
1458 (pdefault_rules
->mandatory_fields
[k
] &
1459 IB_FLOW_SPEC_LAYER_MASK
)) &&
1460 (current_flow
->type
!=
1461 pdefault_rules
->mandatory_fields
[k
]))
1464 /* same layer, try match next one */
1465 if (current_flow
->type
==
1466 pdefault_rules
->mandatory_fields
[k
]) {
1469 ((union ib_flow_spec
*)ib_flow
)->size
;
1473 ib_flow
= flow_attr
+ 1;
1474 for (j
= 0; j
< flow_attr
->num_of_specs
;
1475 j
++, ib_flow
+= ((union ib_flow_spec
*)ib_flow
)->size
)
1476 for (k
= 0; k
< IB_FLOW_SPEC_SUPPORT_LAYERS
; k
++)
1477 /* same layer and same type */
1478 if (((union ib_flow_spec
*)ib_flow
)->type
==
1479 pdefault_rules
->mandatory_not_fields
[k
])
1488 static int __mlx4_ib_create_default_rules(
1489 struct mlx4_ib_dev
*mdev
,
1491 const struct default_rules
*pdefault_rules
,
1492 struct _rule_hw
*mlx4_spec
) {
1496 for (i
= 0; i
< ARRAY_SIZE(pdefault_rules
->rules_create_list
); i
++) {
1498 union ib_flow_spec ib_spec
;
1499 switch (pdefault_rules
->rules_create_list
[i
]) {
1503 case IB_FLOW_SPEC_IB
:
1504 ib_spec
.type
= IB_FLOW_SPEC_IB
;
1505 ib_spec
.size
= sizeof(struct ib_flow_spec_ib
);
1512 /* We must put empty rule, qpn is being ignored */
1513 ret
= parse_flow_attr(mdev
->dev
, 0, &ib_spec
,
1516 pr_info("invalid parsing\n");
1520 mlx4_spec
= (void *)mlx4_spec
+ ret
;
1526 static int __mlx4_ib_create_flow(struct ib_qp
*qp
, struct ib_flow_attr
*flow_attr
,
1528 enum mlx4_net_trans_promisc_mode flow_type
,
1534 struct mlx4_ib_dev
*mdev
= to_mdev(qp
->device
);
1535 struct mlx4_cmd_mailbox
*mailbox
;
1536 struct mlx4_net_trans_rule_hw_ctrl
*ctrl
;
1539 static const u16 __mlx4_domain
[] = {
1540 [IB_FLOW_DOMAIN_USER
] = MLX4_DOMAIN_UVERBS
,
1541 [IB_FLOW_DOMAIN_ETHTOOL
] = MLX4_DOMAIN_ETHTOOL
,
1542 [IB_FLOW_DOMAIN_RFS
] = MLX4_DOMAIN_RFS
,
1543 [IB_FLOW_DOMAIN_NIC
] = MLX4_DOMAIN_NIC
,
1546 if (flow_attr
->priority
> MLX4_IB_FLOW_MAX_PRIO
) {
1547 pr_err("Invalid priority value %d\n", flow_attr
->priority
);
1551 if (domain
>= IB_FLOW_DOMAIN_NUM
) {
1552 pr_err("Invalid domain value %d\n", domain
);
1556 if (mlx4_map_sw_to_hw_steering_mode(mdev
->dev
, flow_type
) < 0)
1559 mailbox
= mlx4_alloc_cmd_mailbox(mdev
->dev
);
1560 if (IS_ERR(mailbox
))
1561 return PTR_ERR(mailbox
);
1562 ctrl
= mailbox
->buf
;
1564 ctrl
->prio
= cpu_to_be16(__mlx4_domain
[domain
] |
1565 flow_attr
->priority
);
1566 ctrl
->type
= mlx4_map_sw_to_hw_steering_mode(mdev
->dev
, flow_type
);
1567 ctrl
->port
= flow_attr
->port
;
1568 ctrl
->qpn
= cpu_to_be32(qp
->qp_num
);
1570 ib_flow
= flow_attr
+ 1;
1571 size
+= sizeof(struct mlx4_net_trans_rule_hw_ctrl
);
1572 /* Add default flows */
1573 default_flow
= __mlx4_ib_default_rules_match(qp
, flow_attr
);
1574 if (default_flow
>= 0) {
1575 ret
= __mlx4_ib_create_default_rules(
1576 mdev
, qp
, default_table
+ default_flow
,
1577 mailbox
->buf
+ size
);
1579 mlx4_free_cmd_mailbox(mdev
->dev
, mailbox
);
1584 for (i
= 0; i
< flow_attr
->num_of_specs
; i
++) {
1585 ret
= parse_flow_attr(mdev
->dev
, qp
->qp_num
, ib_flow
,
1586 mailbox
->buf
+ size
);
1588 mlx4_free_cmd_mailbox(mdev
->dev
, mailbox
);
1591 ib_flow
+= ((union ib_flow_spec
*) ib_flow
)->size
;
1595 ret
= mlx4_cmd_imm(mdev
->dev
, mailbox
->dma
, reg_id
, size
>> 2, 0,
1596 MLX4_QP_FLOW_STEERING_ATTACH
, MLX4_CMD_TIME_CLASS_A
,
1599 pr_err("mcg table is full. Fail to register network rule.\n");
1600 else if (ret
== -ENXIO
)
1601 pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
1603 pr_err("Invalid argumant. Fail to register network rule.\n");
1605 mlx4_free_cmd_mailbox(mdev
->dev
, mailbox
);
1609 static int __mlx4_ib_destroy_flow(struct mlx4_dev
*dev
, u64 reg_id
)
1612 err
= mlx4_cmd(dev
, reg_id
, 0, 0,
1613 MLX4_QP_FLOW_STEERING_DETACH
, MLX4_CMD_TIME_CLASS_A
,
1616 pr_err("Fail to detach network rule. registration id = 0x%llx\n",
1621 static int mlx4_ib_tunnel_steer_add(struct ib_qp
*qp
, struct ib_flow_attr
*flow_attr
,
1625 union ib_flow_spec
*ib_spec
;
1626 struct mlx4_dev
*dev
= to_mdev(qp
->device
)->dev
;
1629 if (dev
->caps
.tunnel_offload_mode
!= MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
||
1630 dev
->caps
.dmfs_high_steer_mode
== MLX4_STEERING_DMFS_A0_STATIC
)
1631 return 0; /* do nothing */
1633 ib_flow
= flow_attr
+ 1;
1634 ib_spec
= (union ib_flow_spec
*)ib_flow
;
1636 if (ib_spec
->type
!= IB_FLOW_SPEC_ETH
|| flow_attr
->num_of_specs
!= 1)
1637 return 0; /* do nothing */
1639 err
= mlx4_tunnel_steer_add(to_mdev(qp
->device
)->dev
, ib_spec
->eth
.val
.dst_mac
,
1640 flow_attr
->port
, qp
->qp_num
,
1641 MLX4_DOMAIN_UVERBS
| (flow_attr
->priority
& 0xff),
1646 static struct ib_flow
*mlx4_ib_create_flow(struct ib_qp
*qp
,
1647 struct ib_flow_attr
*flow_attr
,
1650 int err
= 0, i
= 0, j
= 0;
1651 struct mlx4_ib_flow
*mflow
;
1652 enum mlx4_net_trans_promisc_mode type
[2];
1653 struct mlx4_dev
*dev
= (to_mdev(qp
->device
))->dev
;
1654 int is_bonded
= mlx4_is_bonded(dev
);
1656 memset(type
, 0, sizeof(type
));
1658 mflow
= kzalloc(sizeof(*mflow
), GFP_KERNEL
);
1664 switch (flow_attr
->type
) {
1665 case IB_FLOW_ATTR_NORMAL
:
1666 type
[0] = MLX4_FS_REGULAR
;
1669 case IB_FLOW_ATTR_ALL_DEFAULT
:
1670 type
[0] = MLX4_FS_ALL_DEFAULT
;
1673 case IB_FLOW_ATTR_MC_DEFAULT
:
1674 type
[0] = MLX4_FS_MC_DEFAULT
;
1677 case IB_FLOW_ATTR_SNIFFER
:
1678 type
[0] = MLX4_FS_UC_SNIFFER
;
1679 type
[1] = MLX4_FS_MC_SNIFFER
;
1687 while (i
< ARRAY_SIZE(type
) && type
[i
]) {
1688 err
= __mlx4_ib_create_flow(qp
, flow_attr
, domain
, type
[i
],
1689 &mflow
->reg_id
[i
].id
);
1691 goto err_create_flow
;
1693 /* Application always sees one port so the mirror rule
1694 * must be on port #2
1696 flow_attr
->port
= 2;
1697 err
= __mlx4_ib_create_flow(qp
, flow_attr
,
1699 &mflow
->reg_id
[j
].mirror
);
1700 flow_attr
->port
= 1;
1702 goto err_create_flow
;
1709 if (i
< ARRAY_SIZE(type
) && flow_attr
->type
== IB_FLOW_ATTR_NORMAL
) {
1710 err
= mlx4_ib_tunnel_steer_add(qp
, flow_attr
,
1711 &mflow
->reg_id
[i
].id
);
1713 goto err_create_flow
;
1716 flow_attr
->port
= 2;
1717 err
= mlx4_ib_tunnel_steer_add(qp
, flow_attr
,
1718 &mflow
->reg_id
[j
].mirror
);
1719 flow_attr
->port
= 1;
1721 goto err_create_flow
;
1724 /* function to create mirror rule */
1728 return &mflow
->ibflow
;
1732 (void)__mlx4_ib_destroy_flow(to_mdev(qp
->device
)->dev
,
1733 mflow
->reg_id
[i
].id
);
1738 (void)__mlx4_ib_destroy_flow(to_mdev(qp
->device
)->dev
,
1739 mflow
->reg_id
[j
].mirror
);
1744 return ERR_PTR(err
);
1747 static int mlx4_ib_destroy_flow(struct ib_flow
*flow_id
)
1751 struct mlx4_ib_dev
*mdev
= to_mdev(flow_id
->qp
->device
);
1752 struct mlx4_ib_flow
*mflow
= to_mflow(flow_id
);
1754 while (i
< ARRAY_SIZE(mflow
->reg_id
) && mflow
->reg_id
[i
].id
) {
1755 err
= __mlx4_ib_destroy_flow(mdev
->dev
, mflow
->reg_id
[i
].id
);
1758 if (mflow
->reg_id
[i
].mirror
) {
1759 err
= __mlx4_ib_destroy_flow(mdev
->dev
,
1760 mflow
->reg_id
[i
].mirror
);
1771 static int mlx4_ib_mcg_attach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
1774 struct mlx4_ib_dev
*mdev
= to_mdev(ibqp
->device
);
1775 struct mlx4_dev
*dev
= mdev
->dev
;
1776 struct mlx4_ib_qp
*mqp
= to_mqp(ibqp
);
1777 struct mlx4_ib_steering
*ib_steering
= NULL
;
1778 enum mlx4_protocol prot
= MLX4_PROT_IB_IPV6
;
1779 struct mlx4_flow_reg_id reg_id
;
1781 if (mdev
->dev
->caps
.steering_mode
==
1782 MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1783 ib_steering
= kmalloc(sizeof(*ib_steering
), GFP_KERNEL
);
1788 err
= mlx4_multicast_attach(mdev
->dev
, &mqp
->mqp
, gid
->raw
, mqp
->port
,
1790 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
),
1793 pr_err("multicast attach op failed, err %d\n", err
);
1798 if (mlx4_is_bonded(dev
)) {
1799 err
= mlx4_multicast_attach(mdev
->dev
, &mqp
->mqp
, gid
->raw
,
1800 (mqp
->port
== 1) ? 2 : 1,
1802 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
),
1803 prot
, ®_id
.mirror
);
1808 err
= add_gid_entry(ibqp
, gid
);
1813 memcpy(ib_steering
->gid
.raw
, gid
->raw
, 16);
1814 ib_steering
->reg_id
= reg_id
;
1815 mutex_lock(&mqp
->mutex
);
1816 list_add(&ib_steering
->list
, &mqp
->steering_rules
);
1817 mutex_unlock(&mqp
->mutex
);
1822 mlx4_multicast_detach(mdev
->dev
, &mqp
->mqp
, gid
->raw
,
1825 mlx4_multicast_detach(mdev
->dev
, &mqp
->mqp
, gid
->raw
,
1826 prot
, reg_id
.mirror
);
1833 static struct mlx4_ib_gid_entry
*find_gid_entry(struct mlx4_ib_qp
*qp
, u8
*raw
)
1835 struct mlx4_ib_gid_entry
*ge
;
1836 struct mlx4_ib_gid_entry
*tmp
;
1837 struct mlx4_ib_gid_entry
*ret
= NULL
;
1839 list_for_each_entry_safe(ge
, tmp
, &qp
->gid_list
, list
) {
1840 if (!memcmp(raw
, ge
->gid
.raw
, 16)) {
1849 static int mlx4_ib_mcg_detach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
1852 struct mlx4_ib_dev
*mdev
= to_mdev(ibqp
->device
);
1853 struct mlx4_dev
*dev
= mdev
->dev
;
1854 struct mlx4_ib_qp
*mqp
= to_mqp(ibqp
);
1855 struct net_device
*ndev
;
1856 struct mlx4_ib_gid_entry
*ge
;
1857 struct mlx4_flow_reg_id reg_id
= {0, 0};
1858 enum mlx4_protocol prot
= MLX4_PROT_IB_IPV6
;
1860 if (mdev
->dev
->caps
.steering_mode
==
1861 MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1862 struct mlx4_ib_steering
*ib_steering
;
1864 mutex_lock(&mqp
->mutex
);
1865 list_for_each_entry(ib_steering
, &mqp
->steering_rules
, list
) {
1866 if (!memcmp(ib_steering
->gid
.raw
, gid
->raw
, 16)) {
1867 list_del(&ib_steering
->list
);
1871 mutex_unlock(&mqp
->mutex
);
1872 if (&ib_steering
->list
== &mqp
->steering_rules
) {
1873 pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
1876 reg_id
= ib_steering
->reg_id
;
1880 err
= mlx4_multicast_detach(mdev
->dev
, &mqp
->mqp
, gid
->raw
,
1885 if (mlx4_is_bonded(dev
)) {
1886 err
= mlx4_multicast_detach(mdev
->dev
, &mqp
->mqp
, gid
->raw
,
1887 prot
, reg_id
.mirror
);
1892 mutex_lock(&mqp
->mutex
);
1893 ge
= find_gid_entry(mqp
, gid
->raw
);
1895 spin_lock_bh(&mdev
->iboe
.lock
);
1896 ndev
= ge
->added
? mdev
->iboe
.netdevs
[ge
->port
- 1] : NULL
;
1899 spin_unlock_bh(&mdev
->iboe
.lock
);
1902 list_del(&ge
->list
);
1905 pr_warn("could not find mgid entry\n");
1907 mutex_unlock(&mqp
->mutex
);
1912 static int init_node_data(struct mlx4_ib_dev
*dev
)
1914 struct ib_smp
*in_mad
= NULL
;
1915 struct ib_smp
*out_mad
= NULL
;
1916 int mad_ifc_flags
= MLX4_MAD_IFC_IGNORE_KEYS
;
1919 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
1920 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
1921 if (!in_mad
|| !out_mad
)
1924 init_query_mad(in_mad
);
1925 in_mad
->attr_id
= IB_SMP_ATTR_NODE_DESC
;
1926 if (mlx4_is_master(dev
->dev
))
1927 mad_ifc_flags
|= MLX4_MAD_IFC_NET_VIEW
;
1929 err
= mlx4_MAD_IFC(dev
, mad_ifc_flags
, 1, NULL
, NULL
, in_mad
, out_mad
);
1933 memcpy(dev
->ib_dev
.node_desc
, out_mad
->data
, 64);
1935 in_mad
->attr_id
= IB_SMP_ATTR_NODE_INFO
;
1937 err
= mlx4_MAD_IFC(dev
, mad_ifc_flags
, 1, NULL
, NULL
, in_mad
, out_mad
);
1941 dev
->dev
->rev_id
= be32_to_cpup((__be32
*) (out_mad
->data
+ 32));
1942 memcpy(&dev
->ib_dev
.node_guid
, out_mad
->data
+ 12, 8);
1950 static ssize_t
show_hca(struct device
*device
, struct device_attribute
*attr
,
1953 struct mlx4_ib_dev
*dev
=
1954 container_of(device
, struct mlx4_ib_dev
, ib_dev
.dev
);
1955 return sprintf(buf
, "MT%d\n", dev
->dev
->persist
->pdev
->device
);
1958 static ssize_t
show_fw_ver(struct device
*device
, struct device_attribute
*attr
,
1961 struct mlx4_ib_dev
*dev
=
1962 container_of(device
, struct mlx4_ib_dev
, ib_dev
.dev
);
1963 return sprintf(buf
, "%d.%d.%d\n", (int) (dev
->dev
->caps
.fw_ver
>> 32),
1964 (int) (dev
->dev
->caps
.fw_ver
>> 16) & 0xffff,
1965 (int) dev
->dev
->caps
.fw_ver
& 0xffff);
1968 static ssize_t
show_rev(struct device
*device
, struct device_attribute
*attr
,
1971 struct mlx4_ib_dev
*dev
=
1972 container_of(device
, struct mlx4_ib_dev
, ib_dev
.dev
);
1973 return sprintf(buf
, "%x\n", dev
->dev
->rev_id
);
1976 static ssize_t
show_board(struct device
*device
, struct device_attribute
*attr
,
1979 struct mlx4_ib_dev
*dev
=
1980 container_of(device
, struct mlx4_ib_dev
, ib_dev
.dev
);
1981 return sprintf(buf
, "%.*s\n", MLX4_BOARD_ID_LEN
,
1982 dev
->dev
->board_id
);
1985 static DEVICE_ATTR(hw_rev
, S_IRUGO
, show_rev
, NULL
);
1986 static DEVICE_ATTR(fw_ver
, S_IRUGO
, show_fw_ver
, NULL
);
1987 static DEVICE_ATTR(hca_type
, S_IRUGO
, show_hca
, NULL
);
1988 static DEVICE_ATTR(board_id
, S_IRUGO
, show_board
, NULL
);
1990 static struct device_attribute
*mlx4_class_attributes
[] = {
1997 #define MLX4_IB_INVALID_MAC ((u64)-1)
1998 static void mlx4_ib_update_qps(struct mlx4_ib_dev
*ibdev
,
1999 struct net_device
*dev
,
2003 u64 release_mac
= MLX4_IB_INVALID_MAC
;
2004 struct mlx4_ib_qp
*qp
;
2006 read_lock(&dev_base_lock
);
2007 new_smac
= mlx4_mac_to_u64(dev
->dev_addr
);
2008 read_unlock(&dev_base_lock
);
2010 atomic64_set(&ibdev
->iboe
.mac
[port
- 1], new_smac
);
2012 /* no need for update QP1 and mac registration in non-SRIOV */
2013 if (!mlx4_is_mfunc(ibdev
->dev
))
2016 mutex_lock(&ibdev
->qp1_proxy_lock
[port
- 1]);
2017 qp
= ibdev
->qp1_proxy
[port
- 1];
2021 struct mlx4_update_qp_params update_params
;
2023 mutex_lock(&qp
->mutex
);
2024 old_smac
= qp
->pri
.smac
;
2025 if (new_smac
== old_smac
)
2028 new_smac_index
= mlx4_register_mac(ibdev
->dev
, port
, new_smac
);
2030 if (new_smac_index
< 0)
2033 update_params
.smac_index
= new_smac_index
;
2034 if (mlx4_update_qp(ibdev
->dev
, qp
->mqp
.qpn
, MLX4_UPDATE_QP_SMAC
,
2036 release_mac
= new_smac
;
2039 /* if old port was zero, no mac was yet registered for this QP */
2040 if (qp
->pri
.smac_port
)
2041 release_mac
= old_smac
;
2042 qp
->pri
.smac
= new_smac
;
2043 qp
->pri
.smac_port
= port
;
2044 qp
->pri
.smac_index
= new_smac_index
;
2048 if (release_mac
!= MLX4_IB_INVALID_MAC
)
2049 mlx4_unregister_mac(ibdev
->dev
, port
, release_mac
);
2051 mutex_unlock(&qp
->mutex
);
2052 mutex_unlock(&ibdev
->qp1_proxy_lock
[port
- 1]);
2055 static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev
*ibdev
,
2056 struct net_device
*dev
,
2057 unsigned long event
)
2060 struct mlx4_ib_iboe
*iboe
;
2061 int update_qps_port
= -1;
2066 iboe
= &ibdev
->iboe
;
2068 spin_lock_bh(&iboe
->lock
);
2069 mlx4_foreach_ib_transport_port(port
, ibdev
->dev
) {
2071 iboe
->netdevs
[port
- 1] =
2072 mlx4_get_protocol_dev(ibdev
->dev
, MLX4_PROT_ETH
, port
);
2074 if (dev
== iboe
->netdevs
[port
- 1] &&
2075 (event
== NETDEV_CHANGEADDR
|| event
== NETDEV_REGISTER
||
2076 event
== NETDEV_UP
|| event
== NETDEV_CHANGE
))
2077 update_qps_port
= port
;
2080 spin_unlock_bh(&iboe
->lock
);
2082 if (update_qps_port
> 0)
2083 mlx4_ib_update_qps(ibdev
, dev
, update_qps_port
);
2086 static int mlx4_ib_netdev_event(struct notifier_block
*this,
2087 unsigned long event
, void *ptr
)
2089 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
2090 struct mlx4_ib_dev
*ibdev
;
2092 if (!net_eq(dev_net(dev
), &init_net
))
2095 ibdev
= container_of(this, struct mlx4_ib_dev
, iboe
.nb
);
2096 mlx4_ib_scan_netdevs(ibdev
, dev
, event
);
2101 static void init_pkeys(struct mlx4_ib_dev
*ibdev
)
2107 if (mlx4_is_master(ibdev
->dev
)) {
2108 for (slave
= 0; slave
<= ibdev
->dev
->persist
->num_vfs
;
2110 for (port
= 1; port
<= ibdev
->dev
->caps
.num_ports
; ++port
) {
2112 i
< ibdev
->dev
->phys_caps
.pkey_phys_table_len
[port
];
2114 ibdev
->pkeys
.virt2phys_pkey
[slave
][port
- 1][i
] =
2115 /* master has the identity virt2phys pkey mapping */
2116 (slave
== mlx4_master_func_num(ibdev
->dev
) || !i
) ? i
:
2117 ibdev
->dev
->phys_caps
.pkey_phys_table_len
[port
] - 1;
2118 mlx4_sync_pkey_table(ibdev
->dev
, slave
, port
, i
,
2119 ibdev
->pkeys
.virt2phys_pkey
[slave
][port
- 1][i
]);
2123 /* initialize pkey cache */
2124 for (port
= 1; port
<= ibdev
->dev
->caps
.num_ports
; ++port
) {
2126 i
< ibdev
->dev
->phys_caps
.pkey_phys_table_len
[port
];
2128 ibdev
->pkeys
.phys_pkey_cache
[port
-1][i
] =
2134 static void mlx4_ib_alloc_eqs(struct mlx4_dev
*dev
, struct mlx4_ib_dev
*ibdev
)
2136 int i
, j
, eq
= 0, total_eqs
= 0;
2138 ibdev
->eq_table
= kcalloc(dev
->caps
.num_comp_vectors
,
2139 sizeof(ibdev
->eq_table
[0]), GFP_KERNEL
);
2140 if (!ibdev
->eq_table
)
2143 for (i
= 1; i
<= dev
->caps
.num_ports
; i
++) {
2144 for (j
= 0; j
< mlx4_get_eqs_per_port(dev
, i
);
2146 if (i
> 1 && mlx4_is_eq_shared(dev
, total_eqs
))
2148 ibdev
->eq_table
[eq
] = total_eqs
;
2149 if (!mlx4_assign_eq(dev
, i
,
2150 &ibdev
->eq_table
[eq
]))
2153 ibdev
->eq_table
[eq
] = -1;
2157 for (i
= eq
; i
< dev
->caps
.num_comp_vectors
;
2158 ibdev
->eq_table
[i
++] = -1)
2161 /* Advertise the new number of EQs to clients */
2162 ibdev
->ib_dev
.num_comp_vectors
= eq
;
2165 static void mlx4_ib_free_eqs(struct mlx4_dev
*dev
, struct mlx4_ib_dev
*ibdev
)
2168 int total_eqs
= ibdev
->ib_dev
.num_comp_vectors
;
2170 /* no eqs were allocated */
2171 if (!ibdev
->eq_table
)
2174 /* Reset the advertised EQ number */
2175 ibdev
->ib_dev
.num_comp_vectors
= 0;
2177 for (i
= 0; i
< total_eqs
; i
++)
2178 mlx4_release_eq(dev
, ibdev
->eq_table
[i
]);
2180 kfree(ibdev
->eq_table
);
2181 ibdev
->eq_table
= NULL
;
2184 static int mlx4_port_immutable(struct ib_device
*ibdev
, u8 port_num
,
2185 struct ib_port_immutable
*immutable
)
2187 struct ib_port_attr attr
;
2188 struct mlx4_ib_dev
*mdev
= to_mdev(ibdev
);
2191 err
= mlx4_ib_query_port(ibdev
, port_num
, &attr
);
2195 immutable
->pkey_tbl_len
= attr
.pkey_tbl_len
;
2196 immutable
->gid_tbl_len
= attr
.gid_tbl_len
;
2198 if (mlx4_ib_port_link_layer(ibdev
, port_num
) == IB_LINK_LAYER_INFINIBAND
) {
2199 immutable
->core_cap_flags
= RDMA_CORE_PORT_IBA_IB
;
2201 if (mdev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_IBOE
)
2202 immutable
->core_cap_flags
= RDMA_CORE_PORT_IBA_ROCE
;
2203 if (mdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_ROCE_V1_V2
)
2204 immutable
->core_cap_flags
= RDMA_CORE_PORT_IBA_ROCE
|
2205 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP
;
2208 immutable
->max_mad_size
= IB_MGMT_MAD_SIZE
;
2213 static void *mlx4_ib_add(struct mlx4_dev
*dev
)
2215 struct mlx4_ib_dev
*ibdev
;
2219 struct mlx4_ib_iboe
*iboe
;
2220 int ib_num_ports
= 0;
2221 int num_req_counters
;
2224 struct counter_index
*new_counter_index
= NULL
;
2226 pr_info_once("%s", mlx4_ib_version
);
2229 mlx4_foreach_ib_transport_port(i
, dev
)
2232 /* No point in registering a device with no ports... */
2236 ibdev
= (struct mlx4_ib_dev
*) ib_alloc_device(sizeof *ibdev
);
2238 dev_err(&dev
->persist
->pdev
->dev
,
2239 "Device struct alloc failed\n");
2243 iboe
= &ibdev
->iboe
;
2245 if (mlx4_pd_alloc(dev
, &ibdev
->priv_pdn
))
2248 if (mlx4_uar_alloc(dev
, &ibdev
->priv_uar
))
2251 ibdev
->uar_map
= ioremap((phys_addr_t
) ibdev
->priv_uar
.pfn
<< PAGE_SHIFT
,
2253 if (!ibdev
->uar_map
)
2255 MLX4_INIT_DOORBELL_LOCK(&ibdev
->uar_lock
);
2258 ibdev
->bond_next_port
= 0;
2260 strlcpy(ibdev
->ib_dev
.name
, "mlx4_%d", IB_DEVICE_NAME_MAX
);
2261 ibdev
->ib_dev
.owner
= THIS_MODULE
;
2262 ibdev
->ib_dev
.node_type
= RDMA_NODE_IB_CA
;
2263 ibdev
->ib_dev
.local_dma_lkey
= dev
->caps
.reserved_lkey
;
2264 ibdev
->num_ports
= num_ports
;
2265 ibdev
->ib_dev
.phys_port_cnt
= mlx4_is_bonded(dev
) ?
2266 1 : ibdev
->num_ports
;
2267 ibdev
->ib_dev
.num_comp_vectors
= dev
->caps
.num_comp_vectors
;
2268 ibdev
->ib_dev
.dma_device
= &dev
->persist
->pdev
->dev
;
2269 ibdev
->ib_dev
.get_netdev
= mlx4_ib_get_netdev
;
2270 ibdev
->ib_dev
.add_gid
= mlx4_ib_add_gid
;
2271 ibdev
->ib_dev
.del_gid
= mlx4_ib_del_gid
;
2273 if (dev
->caps
.userspace_caps
)
2274 ibdev
->ib_dev
.uverbs_abi_ver
= MLX4_IB_UVERBS_ABI_VERSION
;
2276 ibdev
->ib_dev
.uverbs_abi_ver
= MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION
;
2278 ibdev
->ib_dev
.uverbs_cmd_mask
=
2279 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT
) |
2280 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
2281 (1ull << IB_USER_VERBS_CMD_QUERY_PORT
) |
2282 (1ull << IB_USER_VERBS_CMD_ALLOC_PD
) |
2283 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD
) |
2284 (1ull << IB_USER_VERBS_CMD_REG_MR
) |
2285 (1ull << IB_USER_VERBS_CMD_REREG_MR
) |
2286 (1ull << IB_USER_VERBS_CMD_DEREG_MR
) |
2287 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
2288 (1ull << IB_USER_VERBS_CMD_CREATE_CQ
) |
2289 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ
) |
2290 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ
) |
2291 (1ull << IB_USER_VERBS_CMD_CREATE_QP
) |
2292 (1ull << IB_USER_VERBS_CMD_MODIFY_QP
) |
2293 (1ull << IB_USER_VERBS_CMD_QUERY_QP
) |
2294 (1ull << IB_USER_VERBS_CMD_DESTROY_QP
) |
2295 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST
) |
2296 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST
) |
2297 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ
) |
2298 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ
) |
2299 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ
) |
2300 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ
) |
2301 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ
) |
2302 (1ull << IB_USER_VERBS_CMD_OPEN_QP
);
2304 ibdev
->ib_dev
.query_device
= mlx4_ib_query_device
;
2305 ibdev
->ib_dev
.query_port
= mlx4_ib_query_port
;
2306 ibdev
->ib_dev
.get_link_layer
= mlx4_ib_port_link_layer
;
2307 ibdev
->ib_dev
.query_gid
= mlx4_ib_query_gid
;
2308 ibdev
->ib_dev
.query_pkey
= mlx4_ib_query_pkey
;
2309 ibdev
->ib_dev
.modify_device
= mlx4_ib_modify_device
;
2310 ibdev
->ib_dev
.modify_port
= mlx4_ib_modify_port
;
2311 ibdev
->ib_dev
.alloc_ucontext
= mlx4_ib_alloc_ucontext
;
2312 ibdev
->ib_dev
.dealloc_ucontext
= mlx4_ib_dealloc_ucontext
;
2313 ibdev
->ib_dev
.mmap
= mlx4_ib_mmap
;
2314 ibdev
->ib_dev
.alloc_pd
= mlx4_ib_alloc_pd
;
2315 ibdev
->ib_dev
.dealloc_pd
= mlx4_ib_dealloc_pd
;
2316 ibdev
->ib_dev
.create_ah
= mlx4_ib_create_ah
;
2317 ibdev
->ib_dev
.query_ah
= mlx4_ib_query_ah
;
2318 ibdev
->ib_dev
.destroy_ah
= mlx4_ib_destroy_ah
;
2319 ibdev
->ib_dev
.create_srq
= mlx4_ib_create_srq
;
2320 ibdev
->ib_dev
.modify_srq
= mlx4_ib_modify_srq
;
2321 ibdev
->ib_dev
.query_srq
= mlx4_ib_query_srq
;
2322 ibdev
->ib_dev
.destroy_srq
= mlx4_ib_destroy_srq
;
2323 ibdev
->ib_dev
.post_srq_recv
= mlx4_ib_post_srq_recv
;
2324 ibdev
->ib_dev
.create_qp
= mlx4_ib_create_qp
;
2325 ibdev
->ib_dev
.modify_qp
= mlx4_ib_modify_qp
;
2326 ibdev
->ib_dev
.query_qp
= mlx4_ib_query_qp
;
2327 ibdev
->ib_dev
.destroy_qp
= mlx4_ib_destroy_qp
;
2328 ibdev
->ib_dev
.post_send
= mlx4_ib_post_send
;
2329 ibdev
->ib_dev
.post_recv
= mlx4_ib_post_recv
;
2330 ibdev
->ib_dev
.create_cq
= mlx4_ib_create_cq
;
2331 ibdev
->ib_dev
.modify_cq
= mlx4_ib_modify_cq
;
2332 ibdev
->ib_dev
.resize_cq
= mlx4_ib_resize_cq
;
2333 ibdev
->ib_dev
.destroy_cq
= mlx4_ib_destroy_cq
;
2334 ibdev
->ib_dev
.poll_cq
= mlx4_ib_poll_cq
;
2335 ibdev
->ib_dev
.req_notify_cq
= mlx4_ib_arm_cq
;
2336 ibdev
->ib_dev
.get_dma_mr
= mlx4_ib_get_dma_mr
;
2337 ibdev
->ib_dev
.reg_user_mr
= mlx4_ib_reg_user_mr
;
2338 ibdev
->ib_dev
.rereg_user_mr
= mlx4_ib_rereg_user_mr
;
2339 ibdev
->ib_dev
.dereg_mr
= mlx4_ib_dereg_mr
;
2340 ibdev
->ib_dev
.alloc_mr
= mlx4_ib_alloc_mr
;
2341 ibdev
->ib_dev
.map_mr_sg
= mlx4_ib_map_mr_sg
;
2342 ibdev
->ib_dev
.attach_mcast
= mlx4_ib_mcg_attach
;
2343 ibdev
->ib_dev
.detach_mcast
= mlx4_ib_mcg_detach
;
2344 ibdev
->ib_dev
.process_mad
= mlx4_ib_process_mad
;
2345 ibdev
->ib_dev
.get_port_immutable
= mlx4_port_immutable
;
2346 ibdev
->ib_dev
.disassociate_ucontext
= mlx4_ib_disassociate_ucontext
;
2348 if (!mlx4_is_slave(ibdev
->dev
)) {
2349 ibdev
->ib_dev
.alloc_fmr
= mlx4_ib_fmr_alloc
;
2350 ibdev
->ib_dev
.map_phys_fmr
= mlx4_ib_map_phys_fmr
;
2351 ibdev
->ib_dev
.unmap_fmr
= mlx4_ib_unmap_fmr
;
2352 ibdev
->ib_dev
.dealloc_fmr
= mlx4_ib_fmr_dealloc
;
2355 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_MEM_WINDOW
||
2356 dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_TYPE_2_WIN
) {
2357 ibdev
->ib_dev
.alloc_mw
= mlx4_ib_alloc_mw
;
2358 ibdev
->ib_dev
.dealloc_mw
= mlx4_ib_dealloc_mw
;
2360 ibdev
->ib_dev
.uverbs_cmd_mask
|=
2361 (1ull << IB_USER_VERBS_CMD_ALLOC_MW
) |
2362 (1ull << IB_USER_VERBS_CMD_DEALLOC_MW
);
2365 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
) {
2366 ibdev
->ib_dev
.alloc_xrcd
= mlx4_ib_alloc_xrcd
;
2367 ibdev
->ib_dev
.dealloc_xrcd
= mlx4_ib_dealloc_xrcd
;
2368 ibdev
->ib_dev
.uverbs_cmd_mask
|=
2369 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD
) |
2370 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD
);
2373 if (check_flow_steering_support(dev
)) {
2374 ibdev
->steering_support
= MLX4_STEERING_MODE_DEVICE_MANAGED
;
2375 ibdev
->ib_dev
.create_flow
= mlx4_ib_create_flow
;
2376 ibdev
->ib_dev
.destroy_flow
= mlx4_ib_destroy_flow
;
2378 ibdev
->ib_dev
.uverbs_ex_cmd_mask
|=
2379 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW
) |
2380 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW
);
2383 ibdev
->ib_dev
.uverbs_ex_cmd_mask
|=
2384 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE
) |
2385 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ
) |
2386 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP
);
2388 mlx4_ib_alloc_eqs(dev
, ibdev
);
2390 spin_lock_init(&iboe
->lock
);
2392 if (init_node_data(ibdev
))
2395 for (i
= 0; i
< ibdev
->num_ports
; ++i
) {
2396 mutex_init(&ibdev
->counters_table
[i
].mutex
);
2397 INIT_LIST_HEAD(&ibdev
->counters_table
[i
].counters_list
);
2400 num_req_counters
= mlx4_is_bonded(dev
) ? 1 : ibdev
->num_ports
;
2401 for (i
= 0; i
< num_req_counters
; ++i
) {
2402 mutex_init(&ibdev
->qp1_proxy_lock
[i
]);
2404 if (mlx4_ib_port_link_layer(&ibdev
->ib_dev
, i
+ 1) ==
2405 IB_LINK_LAYER_ETHERNET
) {
2406 err
= mlx4_counter_alloc(ibdev
->dev
, &counter_index
);
2407 /* if failed to allocate a new counter, use default */
2410 mlx4_get_default_counter_index(dev
,
2414 } else { /* IB_LINK_LAYER_INFINIBAND use the default counter */
2415 counter_index
= mlx4_get_default_counter_index(dev
,
2418 new_counter_index
= kmalloc(sizeof(*new_counter_index
),
2420 if (!new_counter_index
) {
2422 mlx4_counter_free(ibdev
->dev
, counter_index
);
2425 new_counter_index
->index
= counter_index
;
2426 new_counter_index
->allocated
= allocated
;
2427 list_add_tail(&new_counter_index
->list
,
2428 &ibdev
->counters_table
[i
].counters_list
);
2429 ibdev
->counters_table
[i
].default_counter
= counter_index
;
2430 pr_info("counter index %d for port %d allocated %d\n",
2431 counter_index
, i
+ 1, allocated
);
2433 if (mlx4_is_bonded(dev
))
2434 for (i
= 1; i
< ibdev
->num_ports
; ++i
) {
2436 kmalloc(sizeof(struct counter_index
),
2438 if (!new_counter_index
)
2440 new_counter_index
->index
= counter_index
;
2441 new_counter_index
->allocated
= 0;
2442 list_add_tail(&new_counter_index
->list
,
2443 &ibdev
->counters_table
[i
].counters_list
);
2444 ibdev
->counters_table
[i
].default_counter
=
2448 mlx4_foreach_port(i
, dev
, MLX4_PORT_TYPE_IB
)
2451 spin_lock_init(&ibdev
->sm_lock
);
2452 mutex_init(&ibdev
->cap_mask_mutex
);
2453 INIT_LIST_HEAD(&ibdev
->qp_list
);
2454 spin_lock_init(&ibdev
->reset_flow_resource_lock
);
2456 if (ibdev
->steering_support
== MLX4_STEERING_MODE_DEVICE_MANAGED
&&
2458 ibdev
->steer_qpn_count
= MLX4_IB_UC_MAX_NUM_QPS
;
2459 err
= mlx4_qp_reserve_range(dev
, ibdev
->steer_qpn_count
,
2460 MLX4_IB_UC_STEER_QPN_ALIGN
,
2461 &ibdev
->steer_qpn_base
, 0);
2465 ibdev
->ib_uc_qpns_bitmap
=
2466 kmalloc(BITS_TO_LONGS(ibdev
->steer_qpn_count
) *
2469 if (!ibdev
->ib_uc_qpns_bitmap
) {
2470 dev_err(&dev
->persist
->pdev
->dev
,
2471 "bit map alloc failed\n");
2472 goto err_steer_qp_release
;
2475 bitmap_zero(ibdev
->ib_uc_qpns_bitmap
, ibdev
->steer_qpn_count
);
2477 err
= mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
2478 dev
, ibdev
->steer_qpn_base
,
2479 ibdev
->steer_qpn_base
+
2480 ibdev
->steer_qpn_count
- 1);
2482 goto err_steer_free_bitmap
;
2485 for (j
= 1; j
<= ibdev
->dev
->caps
.num_ports
; j
++)
2486 atomic64_set(&iboe
->mac
[j
- 1], ibdev
->dev
->caps
.def_mac
[j
]);
2488 if (ib_register_device(&ibdev
->ib_dev
, NULL
))
2489 goto err_steer_free_bitmap
;
2491 if (mlx4_ib_mad_init(ibdev
))
2494 if (mlx4_ib_init_sriov(ibdev
))
2497 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_IBOE
||
2498 dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_ROCE_V1_V2
) {
2499 if (!iboe
->nb
.notifier_call
) {
2500 iboe
->nb
.notifier_call
= mlx4_ib_netdev_event
;
2501 err
= register_netdevice_notifier(&iboe
->nb
);
2503 iboe
->nb
.notifier_call
= NULL
;
2507 if (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_ROCE_V1_V2
) {
2508 err
= mlx4_config_roce_v2_port(dev
, ROCE_V2_UDP_DPORT
);
2515 for (j
= 0; j
< ARRAY_SIZE(mlx4_class_attributes
); ++j
) {
2516 if (device_create_file(&ibdev
->ib_dev
.dev
,
2517 mlx4_class_attributes
[j
]))
2521 ibdev
->ib_active
= true;
2523 if (mlx4_is_mfunc(ibdev
->dev
))
2526 /* create paravirt contexts for any VFs which are active */
2527 if (mlx4_is_master(ibdev
->dev
)) {
2528 for (j
= 0; j
< MLX4_MFUNC_MAX
; j
++) {
2529 if (j
== mlx4_master_func_num(ibdev
->dev
))
2531 if (mlx4_is_slave_active(ibdev
->dev
, j
))
2532 do_slave_init(ibdev
, j
, 1);
2538 if (ibdev
->iboe
.nb
.notifier_call
) {
2539 if (unregister_netdevice_notifier(&ibdev
->iboe
.nb
))
2540 pr_warn("failure unregistering notifier\n");
2541 ibdev
->iboe
.nb
.notifier_call
= NULL
;
2543 flush_workqueue(wq
);
2545 mlx4_ib_close_sriov(ibdev
);
2548 mlx4_ib_mad_cleanup(ibdev
);
2551 ib_unregister_device(&ibdev
->ib_dev
);
2553 err_steer_free_bitmap
:
2554 kfree(ibdev
->ib_uc_qpns_bitmap
);
2556 err_steer_qp_release
:
2557 if (ibdev
->steering_support
== MLX4_STEERING_MODE_DEVICE_MANAGED
)
2558 mlx4_qp_release_range(dev
, ibdev
->steer_qpn_base
,
2559 ibdev
->steer_qpn_count
);
2561 for (i
= 0; i
< ibdev
->num_ports
; ++i
)
2562 mlx4_ib_delete_counters_table(ibdev
, &ibdev
->counters_table
[i
]);
2565 iounmap(ibdev
->uar_map
);
2568 mlx4_uar_free(dev
, &ibdev
->priv_uar
);
2571 mlx4_pd_free(dev
, ibdev
->priv_pdn
);
2574 ib_dealloc_device(&ibdev
->ib_dev
);
2579 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev
*dev
, int count
, int *qpn
)
2583 WARN_ON(!dev
->ib_uc_qpns_bitmap
);
2585 offset
= bitmap_find_free_region(dev
->ib_uc_qpns_bitmap
,
2586 dev
->steer_qpn_count
,
2587 get_count_order(count
));
2591 *qpn
= dev
->steer_qpn_base
+ offset
;
2595 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev
*dev
, u32 qpn
, int count
)
2598 dev
->steering_support
!= MLX4_STEERING_MODE_DEVICE_MANAGED
)
2601 BUG_ON(qpn
< dev
->steer_qpn_base
);
2603 bitmap_release_region(dev
->ib_uc_qpns_bitmap
,
2604 qpn
- dev
->steer_qpn_base
,
2605 get_count_order(count
));
2608 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev
*mdev
, struct mlx4_ib_qp
*mqp
,
2613 struct ib_flow_attr
*flow
= NULL
;
2614 struct ib_flow_spec_ib
*ib_spec
;
2617 flow_size
= sizeof(struct ib_flow_attr
) +
2618 sizeof(struct ib_flow_spec_ib
);
2619 flow
= kzalloc(flow_size
, GFP_KERNEL
);
2622 flow
->port
= mqp
->port
;
2623 flow
->num_of_specs
= 1;
2624 flow
->size
= flow_size
;
2625 ib_spec
= (struct ib_flow_spec_ib
*)(flow
+ 1);
2626 ib_spec
->type
= IB_FLOW_SPEC_IB
;
2627 ib_spec
->size
= sizeof(struct ib_flow_spec_ib
);
2628 /* Add an empty rule for IB L2 */
2629 memset(&ib_spec
->mask
, 0, sizeof(ib_spec
->mask
));
2631 err
= __mlx4_ib_create_flow(&mqp
->ibqp
, flow
,
2636 err
= __mlx4_ib_destroy_flow(mdev
->dev
, mqp
->reg_id
);
2642 static void mlx4_ib_remove(struct mlx4_dev
*dev
, void *ibdev_ptr
)
2644 struct mlx4_ib_dev
*ibdev
= ibdev_ptr
;
2647 ibdev
->ib_active
= false;
2648 flush_workqueue(wq
);
2650 mlx4_ib_close_sriov(ibdev
);
2651 mlx4_ib_mad_cleanup(ibdev
);
2652 ib_unregister_device(&ibdev
->ib_dev
);
2653 if (ibdev
->iboe
.nb
.notifier_call
) {
2654 if (unregister_netdevice_notifier(&ibdev
->iboe
.nb
))
2655 pr_warn("failure unregistering notifier\n");
2656 ibdev
->iboe
.nb
.notifier_call
= NULL
;
2659 if (ibdev
->steering_support
== MLX4_STEERING_MODE_DEVICE_MANAGED
) {
2660 mlx4_qp_release_range(dev
, ibdev
->steer_qpn_base
,
2661 ibdev
->steer_qpn_count
);
2662 kfree(ibdev
->ib_uc_qpns_bitmap
);
2665 iounmap(ibdev
->uar_map
);
2666 for (p
= 0; p
< ibdev
->num_ports
; ++p
)
2667 mlx4_ib_delete_counters_table(ibdev
, &ibdev
->counters_table
[p
]);
2669 mlx4_foreach_port(p
, dev
, MLX4_PORT_TYPE_IB
)
2670 mlx4_CLOSE_PORT(dev
, p
);
2672 mlx4_ib_free_eqs(dev
, ibdev
);
2674 mlx4_uar_free(dev
, &ibdev
->priv_uar
);
2675 mlx4_pd_free(dev
, ibdev
->priv_pdn
);
2676 ib_dealloc_device(&ibdev
->ib_dev
);
2679 static void do_slave_init(struct mlx4_ib_dev
*ibdev
, int slave
, int do_init
)
2681 struct mlx4_ib_demux_work
**dm
= NULL
;
2682 struct mlx4_dev
*dev
= ibdev
->dev
;
2684 unsigned long flags
;
2685 struct mlx4_active_ports actv_ports
;
2687 unsigned int first_port
;
2689 if (!mlx4_is_master(dev
))
2692 actv_ports
= mlx4_get_active_ports(dev
, slave
);
2693 ports
= bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
);
2694 first_port
= find_first_bit(actv_ports
.ports
, dev
->caps
.num_ports
);
2696 dm
= kcalloc(ports
, sizeof(*dm
), GFP_ATOMIC
);
2698 pr_err("failed to allocate memory for tunneling qp update\n");
2702 for (i
= 0; i
< ports
; i
++) {
2703 dm
[i
] = kmalloc(sizeof (struct mlx4_ib_demux_work
), GFP_ATOMIC
);
2705 pr_err("failed to allocate memory for tunneling qp update work struct\n");
2710 INIT_WORK(&dm
[i
]->work
, mlx4_ib_tunnels_update_work
);
2711 dm
[i
]->port
= first_port
+ i
+ 1;
2712 dm
[i
]->slave
= slave
;
2713 dm
[i
]->do_init
= do_init
;
2716 /* initialize or tear down tunnel QPs for the slave */
2717 spin_lock_irqsave(&ibdev
->sriov
.going_down_lock
, flags
);
2718 if (!ibdev
->sriov
.is_going_down
) {
2719 for (i
= 0; i
< ports
; i
++)
2720 queue_work(ibdev
->sriov
.demux
[i
].ud_wq
, &dm
[i
]->work
);
2721 spin_unlock_irqrestore(&ibdev
->sriov
.going_down_lock
, flags
);
2723 spin_unlock_irqrestore(&ibdev
->sriov
.going_down_lock
, flags
);
2724 for (i
= 0; i
< ports
; i
++)
2732 static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev
*ibdev
)
2734 struct mlx4_ib_qp
*mqp
;
2735 unsigned long flags_qp
;
2736 unsigned long flags_cq
;
2737 struct mlx4_ib_cq
*send_mcq
, *recv_mcq
;
2738 struct list_head cq_notify_list
;
2739 struct mlx4_cq
*mcq
;
2740 unsigned long flags
;
2742 pr_warn("mlx4_ib_handle_catas_error was started\n");
2743 INIT_LIST_HEAD(&cq_notify_list
);
2745 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
2746 spin_lock_irqsave(&ibdev
->reset_flow_resource_lock
, flags
);
2748 list_for_each_entry(mqp
, &ibdev
->qp_list
, qps_list
) {
2749 spin_lock_irqsave(&mqp
->sq
.lock
, flags_qp
);
2750 if (mqp
->sq
.tail
!= mqp
->sq
.head
) {
2751 send_mcq
= to_mcq(mqp
->ibqp
.send_cq
);
2752 spin_lock_irqsave(&send_mcq
->lock
, flags_cq
);
2753 if (send_mcq
->mcq
.comp
&&
2754 mqp
->ibqp
.send_cq
->comp_handler
) {
2755 if (!send_mcq
->mcq
.reset_notify_added
) {
2756 send_mcq
->mcq
.reset_notify_added
= 1;
2757 list_add_tail(&send_mcq
->mcq
.reset_notify
,
2761 spin_unlock_irqrestore(&send_mcq
->lock
, flags_cq
);
2763 spin_unlock_irqrestore(&mqp
->sq
.lock
, flags_qp
);
2764 /* Now, handle the QP's receive queue */
2765 spin_lock_irqsave(&mqp
->rq
.lock
, flags_qp
);
2766 /* no handling is needed for SRQ */
2767 if (!mqp
->ibqp
.srq
) {
2768 if (mqp
->rq
.tail
!= mqp
->rq
.head
) {
2769 recv_mcq
= to_mcq(mqp
->ibqp
.recv_cq
);
2770 spin_lock_irqsave(&recv_mcq
->lock
, flags_cq
);
2771 if (recv_mcq
->mcq
.comp
&&
2772 mqp
->ibqp
.recv_cq
->comp_handler
) {
2773 if (!recv_mcq
->mcq
.reset_notify_added
) {
2774 recv_mcq
->mcq
.reset_notify_added
= 1;
2775 list_add_tail(&recv_mcq
->mcq
.reset_notify
,
2779 spin_unlock_irqrestore(&recv_mcq
->lock
,
2783 spin_unlock_irqrestore(&mqp
->rq
.lock
, flags_qp
);
2786 list_for_each_entry(mcq
, &cq_notify_list
, reset_notify
) {
2789 spin_unlock_irqrestore(&ibdev
->reset_flow_resource_lock
, flags
);
2790 pr_warn("mlx4_ib_handle_catas_error ended\n");
2793 static void handle_bonded_port_state_event(struct work_struct
*work
)
2795 struct ib_event_work
*ew
=
2796 container_of(work
, struct ib_event_work
, work
);
2797 struct mlx4_ib_dev
*ibdev
= ew
->ib_dev
;
2798 enum ib_port_state bonded_port_state
= IB_PORT_NOP
;
2800 struct ib_event ibev
;
2803 spin_lock_bh(&ibdev
->iboe
.lock
);
2804 for (i
= 0; i
< MLX4_MAX_PORTS
; ++i
) {
2805 struct net_device
*curr_netdev
= ibdev
->iboe
.netdevs
[i
];
2806 enum ib_port_state curr_port_state
;
2812 (netif_running(curr_netdev
) &&
2813 netif_carrier_ok(curr_netdev
)) ?
2814 IB_PORT_ACTIVE
: IB_PORT_DOWN
;
2816 bonded_port_state
= (bonded_port_state
!= IB_PORT_ACTIVE
) ?
2817 curr_port_state
: IB_PORT_ACTIVE
;
2819 spin_unlock_bh(&ibdev
->iboe
.lock
);
2821 ibev
.device
= &ibdev
->ib_dev
;
2822 ibev
.element
.port_num
= 1;
2823 ibev
.event
= (bonded_port_state
== IB_PORT_ACTIVE
) ?
2824 IB_EVENT_PORT_ACTIVE
: IB_EVENT_PORT_ERR
;
2826 ib_dispatch_event(&ibev
);
2829 static void mlx4_ib_event(struct mlx4_dev
*dev
, void *ibdev_ptr
,
2830 enum mlx4_dev_event event
, unsigned long param
)
2832 struct ib_event ibev
;
2833 struct mlx4_ib_dev
*ibdev
= to_mdev((struct ib_device
*) ibdev_ptr
);
2834 struct mlx4_eqe
*eqe
= NULL
;
2835 struct ib_event_work
*ew
;
2838 if (mlx4_is_bonded(dev
) &&
2839 ((event
== MLX4_DEV_EVENT_PORT_UP
) ||
2840 (event
== MLX4_DEV_EVENT_PORT_DOWN
))) {
2841 ew
= kmalloc(sizeof(*ew
), GFP_ATOMIC
);
2844 INIT_WORK(&ew
->work
, handle_bonded_port_state_event
);
2846 queue_work(wq
, &ew
->work
);
2850 if (event
== MLX4_DEV_EVENT_PORT_MGMT_CHANGE
)
2851 eqe
= (struct mlx4_eqe
*)param
;
2856 case MLX4_DEV_EVENT_PORT_UP
:
2857 if (p
> ibdev
->num_ports
)
2859 if (mlx4_is_master(dev
) &&
2860 rdma_port_get_link_layer(&ibdev
->ib_dev
, p
) ==
2861 IB_LINK_LAYER_INFINIBAND
) {
2862 mlx4_ib_invalidate_all_guid_record(ibdev
, p
);
2864 ibev
.event
= IB_EVENT_PORT_ACTIVE
;
2867 case MLX4_DEV_EVENT_PORT_DOWN
:
2868 if (p
> ibdev
->num_ports
)
2870 ibev
.event
= IB_EVENT_PORT_ERR
;
2873 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR
:
2874 ibdev
->ib_active
= false;
2875 ibev
.event
= IB_EVENT_DEVICE_FATAL
;
2876 mlx4_ib_handle_catas_error(ibdev
);
2879 case MLX4_DEV_EVENT_PORT_MGMT_CHANGE
:
2880 ew
= kmalloc(sizeof *ew
, GFP_ATOMIC
);
2882 pr_err("failed to allocate memory for events work\n");
2886 INIT_WORK(&ew
->work
, handle_port_mgmt_change_event
);
2887 memcpy(&ew
->ib_eqe
, eqe
, sizeof *eqe
);
2889 /* need to queue only for port owner, which uses GEN_EQE */
2890 if (mlx4_is_master(dev
))
2891 queue_work(wq
, &ew
->work
);
2893 handle_port_mgmt_change_event(&ew
->work
);
2896 case MLX4_DEV_EVENT_SLAVE_INIT
:
2897 /* here, p is the slave id */
2898 do_slave_init(ibdev
, p
, 1);
2899 if (mlx4_is_master(dev
)) {
2902 for (i
= 1; i
<= ibdev
->num_ports
; i
++) {
2903 if (rdma_port_get_link_layer(&ibdev
->ib_dev
, i
)
2904 == IB_LINK_LAYER_INFINIBAND
)
2905 mlx4_ib_slave_alias_guid_event(ibdev
,
2912 case MLX4_DEV_EVENT_SLAVE_SHUTDOWN
:
2913 if (mlx4_is_master(dev
)) {
2916 for (i
= 1; i
<= ibdev
->num_ports
; i
++) {
2917 if (rdma_port_get_link_layer(&ibdev
->ib_dev
, i
)
2918 == IB_LINK_LAYER_INFINIBAND
)
2919 mlx4_ib_slave_alias_guid_event(ibdev
,
2924 /* here, p is the slave id */
2925 do_slave_init(ibdev
, p
, 0);
2932 ibev
.device
= ibdev_ptr
;
2933 ibev
.element
.port_num
= mlx4_is_bonded(ibdev
->dev
) ? 1 : (u8
)p
;
2935 ib_dispatch_event(&ibev
);
2938 static struct mlx4_interface mlx4_ib_interface
= {
2940 .remove
= mlx4_ib_remove
,
2941 .event
= mlx4_ib_event
,
2942 .protocol
= MLX4_PROT_IB_IPV6
,
2943 .flags
= MLX4_INTFF_BONDING
2946 static int __init
mlx4_ib_init(void)
2950 wq
= create_singlethread_workqueue("mlx4_ib");
2954 err
= mlx4_ib_mcg_init();
2958 err
= mlx4_register_interface(&mlx4_ib_interface
);
2965 mlx4_ib_mcg_destroy();
2968 destroy_workqueue(wq
);
2972 static void __exit
mlx4_ib_cleanup(void)
2974 mlx4_unregister_interface(&mlx4_ib_interface
);
2975 mlx4_ib_mcg_destroy();
2976 destroy_workqueue(wq
);
2979 module_init(mlx4_ib_init
);
2980 module_exit(mlx4_ib_cleanup
);