2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_mad.h>
34 #include <rdma/ib_smi.h>
35 #include <rdma/ib_sa.h>
36 #include <rdma/ib_cache.h>
38 #include <linux/random.h>
39 #include <linux/mlx4/cmd.h>
40 #include <linux/gfp.h>
41 #include <rdma/ib_pma.h>
46 MLX4_IB_VENDOR_CLASS1
= 0x9,
47 MLX4_IB_VENDOR_CLASS2
= 0xa
50 #define MLX4_TUN_SEND_WRID_SHIFT 34
51 #define MLX4_TUN_QPN_SHIFT 32
52 #define MLX4_TUN_WRID_RECV (((u64) 1) << MLX4_TUN_SEND_WRID_SHIFT)
53 #define MLX4_TUN_SET_WRID_QPN(a) (((u64) ((a) & 0x3)) << MLX4_TUN_QPN_SHIFT)
55 #define MLX4_TUN_IS_RECV(a) (((a) >> MLX4_TUN_SEND_WRID_SHIFT) & 0x1)
56 #define MLX4_TUN_WRID_QPN(a) (((a) >> MLX4_TUN_QPN_SHIFT) & 0x3)
58 /* Port mgmt change event handling */
60 #define GET_BLK_PTR_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.block_ptr)
61 #define GET_MASK_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.tbl_entries_mask)
62 #define NUM_IDX_IN_PKEY_TBL_BLK 32
63 #define GUID_TBL_ENTRY_SIZE 8 /* size in bytes */
64 #define GUID_TBL_BLK_NUM_ENTRIES 8
65 #define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
67 /* Counters should be saturate once they reach their maximum value */
68 #define ASSIGN_32BIT_COUNTER(counter, value) do {\
69 if ((value) > U32_MAX) \
70 counter = cpu_to_be32(U32_MAX); \
72 counter = cpu_to_be32(value); \
75 struct mlx4_mad_rcv_buf
{
80 struct mlx4_mad_snd_buf
{
84 struct mlx4_tunnel_mad
{
86 struct mlx4_ib_tunnel_header hdr
;
90 struct mlx4_rcv_tunnel_mad
{
91 struct mlx4_rcv_tunnel_hdr hdr
;
96 static void handle_client_rereg_event(struct mlx4_ib_dev
*dev
, u8 port_num
);
97 static void handle_lid_change_event(struct mlx4_ib_dev
*dev
, u8 port_num
);
98 static void __propagate_pkey_ev(struct mlx4_ib_dev
*dev
, int port_num
,
99 int block
, u32 change_bitmap
);
101 __be64
mlx4_ib_gen_node_guid(void)
103 #define NODE_GUID_HI ((u64) (((u64)IB_OPENIB_OUI) << 40))
104 return cpu_to_be64(NODE_GUID_HI
| prandom_u32());
107 __be64
mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx
*ctx
)
109 return cpu_to_be64(atomic_inc_return(&ctx
->tid
)) |
110 cpu_to_be64(0xff00000000000000LL
);
113 int mlx4_MAD_IFC(struct mlx4_ib_dev
*dev
, int mad_ifc_flags
,
114 int port
, struct ib_wc
*in_wc
, struct ib_grh
*in_grh
,
115 void *in_mad
, void *response_mad
)
117 struct mlx4_cmd_mailbox
*inmailbox
, *outmailbox
;
120 u32 in_modifier
= port
;
123 inmailbox
= mlx4_alloc_cmd_mailbox(dev
->dev
);
124 if (IS_ERR(inmailbox
))
125 return PTR_ERR(inmailbox
);
126 inbox
= inmailbox
->buf
;
128 outmailbox
= mlx4_alloc_cmd_mailbox(dev
->dev
);
129 if (IS_ERR(outmailbox
)) {
130 mlx4_free_cmd_mailbox(dev
->dev
, inmailbox
);
131 return PTR_ERR(outmailbox
);
134 memcpy(inbox
, in_mad
, 256);
137 * Key check traps can't be generated unless we have in_wc to
138 * tell us where to send the trap.
140 if ((mad_ifc_flags
& MLX4_MAD_IFC_IGNORE_MKEY
) || !in_wc
)
142 if ((mad_ifc_flags
& MLX4_MAD_IFC_IGNORE_BKEY
) || !in_wc
)
144 if (mlx4_is_mfunc(dev
->dev
) &&
145 (mad_ifc_flags
& MLX4_MAD_IFC_NET_VIEW
|| in_wc
))
161 memset(inbox
+ 256, 0, 256);
162 ext_info
= inbox
+ 256;
164 ext_info
->my_qpn
= cpu_to_be32(in_wc
->qp
->qp_num
);
165 ext_info
->rqpn
= cpu_to_be32(in_wc
->src_qp
);
166 ext_info
->sl
= in_wc
->sl
<< 4;
167 ext_info
->g_path
= in_wc
->dlid_path_bits
|
168 (in_wc
->wc_flags
& IB_WC_GRH
? 0x80 : 0);
169 ext_info
->pkey
= cpu_to_be16(in_wc
->pkey_index
);
172 memcpy(ext_info
->grh
, in_grh
, 40);
176 in_modifier
|= in_wc
->slid
<< 16;
179 err
= mlx4_cmd_box(dev
->dev
, inmailbox
->dma
, outmailbox
->dma
, in_modifier
,
180 mlx4_is_master(dev
->dev
) ? (op_modifier
& ~0x8) : op_modifier
,
181 MLX4_CMD_MAD_IFC
, MLX4_CMD_TIME_CLASS_C
,
182 (op_modifier
& 0x8) ? MLX4_CMD_NATIVE
: MLX4_CMD_WRAPPED
);
185 memcpy(response_mad
, outmailbox
->buf
, 256);
187 mlx4_free_cmd_mailbox(dev
->dev
, inmailbox
);
188 mlx4_free_cmd_mailbox(dev
->dev
, outmailbox
);
193 static void update_sm_ah(struct mlx4_ib_dev
*dev
, u8 port_num
, u16 lid
, u8 sl
)
195 struct ib_ah
*new_ah
;
196 struct ib_ah_attr ah_attr
;
199 if (!dev
->send_agent
[port_num
- 1][0])
202 memset(&ah_attr
, 0, sizeof ah_attr
);
205 ah_attr
.port_num
= port_num
;
207 new_ah
= ib_create_ah(dev
->send_agent
[port_num
- 1][0]->qp
->pd
,
212 spin_lock_irqsave(&dev
->sm_lock
, flags
);
213 if (dev
->sm_ah
[port_num
- 1])
214 ib_destroy_ah(dev
->sm_ah
[port_num
- 1]);
215 dev
->sm_ah
[port_num
- 1] = new_ah
;
216 spin_unlock_irqrestore(&dev
->sm_lock
, flags
);
220 * Snoop SM MADs for port info, GUID info, and P_Key table sets, so we can
221 * synthesize LID change, Client-Rereg, GID change, and P_Key change events.
223 static void smp_snoop(struct ib_device
*ibdev
, u8 port_num
, struct ib_mad
*mad
,
226 struct ib_port_info
*pinfo
;
229 u32 bn
, pkey_change_bitmap
;
233 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
234 if ((mad
->mad_hdr
.mgmt_class
== IB_MGMT_CLASS_SUBN_LID_ROUTED
||
235 mad
->mad_hdr
.mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) &&
236 mad
->mad_hdr
.method
== IB_MGMT_METHOD_SET
)
237 switch (mad
->mad_hdr
.attr_id
) {
238 case IB_SMP_ATTR_PORT_INFO
:
239 pinfo
= (struct ib_port_info
*) ((struct ib_smp
*) mad
)->data
;
240 lid
= be16_to_cpu(pinfo
->lid
);
242 update_sm_ah(dev
, port_num
,
243 be16_to_cpu(pinfo
->sm_lid
),
244 pinfo
->neighbormtu_mastersmsl
& 0xf);
246 if (pinfo
->clientrereg_resv_subnetto
& 0x80)
247 handle_client_rereg_event(dev
, port_num
);
250 handle_lid_change_event(dev
, port_num
);
253 case IB_SMP_ATTR_PKEY_TABLE
:
254 if (!mlx4_is_mfunc(dev
->dev
)) {
255 mlx4_ib_dispatch_event(dev
, port_num
,
256 IB_EVENT_PKEY_CHANGE
);
260 /* at this point, we are running in the master.
261 * Slaves do not receive SMPs.
263 bn
= be32_to_cpu(((struct ib_smp
*)mad
)->attr_mod
) & 0xFFFF;
264 base
= (__be16
*) &(((struct ib_smp
*)mad
)->data
[0]);
265 pkey_change_bitmap
= 0;
266 for (i
= 0; i
< 32; i
++) {
267 pr_debug("PKEY[%d] = x%x\n",
268 i
+ bn
*32, be16_to_cpu(base
[i
]));
269 if (be16_to_cpu(base
[i
]) !=
270 dev
->pkeys
.phys_pkey_cache
[port_num
- 1][i
+ bn
*32]) {
271 pkey_change_bitmap
|= (1 << i
);
272 dev
->pkeys
.phys_pkey_cache
[port_num
- 1][i
+ bn
*32] =
273 be16_to_cpu(base
[i
]);
276 pr_debug("PKEY Change event: port=%d, "
277 "block=0x%x, change_bitmap=0x%x\n",
278 port_num
, bn
, pkey_change_bitmap
);
280 if (pkey_change_bitmap
) {
281 mlx4_ib_dispatch_event(dev
, port_num
,
282 IB_EVENT_PKEY_CHANGE
);
283 if (!dev
->sriov
.is_going_down
)
284 __propagate_pkey_ev(dev
, port_num
, bn
,
289 case IB_SMP_ATTR_GUID_INFO
:
290 /* paravirtualized master's guid is guid 0 -- does not change */
291 if (!mlx4_is_master(dev
->dev
))
292 mlx4_ib_dispatch_event(dev
, port_num
,
293 IB_EVENT_GID_CHANGE
);
294 /*if master, notify relevant slaves*/
295 if (mlx4_is_master(dev
->dev
) &&
296 !dev
->sriov
.is_going_down
) {
297 bn
= be32_to_cpu(((struct ib_smp
*)mad
)->attr_mod
);
298 mlx4_ib_update_cache_on_guid_change(dev
, bn
, port_num
,
299 (u8
*)(&((struct ib_smp
*)mad
)->data
));
300 mlx4_ib_notify_slaves_on_guid_change(dev
, bn
, port_num
,
301 (u8
*)(&((struct ib_smp
*)mad
)->data
));
310 static void __propagate_pkey_ev(struct mlx4_ib_dev
*dev
, int port_num
,
311 int block
, u32 change_bitmap
)
313 int i
, ix
, slave
, err
;
316 for (slave
= 0; slave
< dev
->dev
->caps
.sqp_demux
; slave
++) {
317 if (slave
== mlx4_master_func_num(dev
->dev
))
319 if (!mlx4_is_slave_active(dev
->dev
, slave
))
323 for (i
= 0; i
< 32; i
++) {
324 if (!(change_bitmap
& (1 << i
)))
327 ix
< dev
->dev
->caps
.pkey_table_len
[port_num
]; ix
++) {
328 if (dev
->pkeys
.virt2phys_pkey
[slave
][port_num
- 1]
329 [ix
] == i
+ 32 * block
) {
330 err
= mlx4_gen_pkey_eqe(dev
->dev
, slave
, port_num
);
331 pr_debug("propagate_pkey_ev: slave %d,"
332 " port %d, ix %d (%d)\n",
333 slave
, port_num
, ix
, err
);
344 static void node_desc_override(struct ib_device
*dev
,
349 if ((mad
->mad_hdr
.mgmt_class
== IB_MGMT_CLASS_SUBN_LID_ROUTED
||
350 mad
->mad_hdr
.mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) &&
351 mad
->mad_hdr
.method
== IB_MGMT_METHOD_GET_RESP
&&
352 mad
->mad_hdr
.attr_id
== IB_SMP_ATTR_NODE_DESC
) {
353 spin_lock_irqsave(&to_mdev(dev
)->sm_lock
, flags
);
354 memcpy(((struct ib_smp
*) mad
)->data
, dev
->node_desc
, 64);
355 spin_unlock_irqrestore(&to_mdev(dev
)->sm_lock
, flags
);
359 static void forward_trap(struct mlx4_ib_dev
*dev
, u8 port_num
, struct ib_mad
*mad
)
361 int qpn
= mad
->mad_hdr
.mgmt_class
!= IB_MGMT_CLASS_SUBN_LID_ROUTED
;
362 struct ib_mad_send_buf
*send_buf
;
363 struct ib_mad_agent
*agent
= dev
->send_agent
[port_num
- 1][qpn
];
368 send_buf
= ib_create_send_mad(agent
, qpn
, 0, 0, IB_MGMT_MAD_HDR
,
369 IB_MGMT_MAD_DATA
, GFP_ATOMIC
);
370 if (IS_ERR(send_buf
))
373 * We rely here on the fact that MLX QPs don't use the
374 * address handle after the send is posted (this is
375 * wrong following the IB spec strictly, but we know
376 * it's OK for our devices).
378 spin_lock_irqsave(&dev
->sm_lock
, flags
);
379 memcpy(send_buf
->mad
, mad
, sizeof *mad
);
380 if ((send_buf
->ah
= dev
->sm_ah
[port_num
- 1]))
381 ret
= ib_post_send_mad(send_buf
, NULL
);
384 spin_unlock_irqrestore(&dev
->sm_lock
, flags
);
387 ib_free_send_mad(send_buf
);
391 static int mlx4_ib_demux_sa_handler(struct ib_device
*ibdev
, int port
, int slave
,
392 struct ib_sa_mad
*sa_mad
)
396 /* dispatch to different sa handlers */
397 switch (be16_to_cpu(sa_mad
->mad_hdr
.attr_id
)) {
398 case IB_SA_ATTR_MC_MEMBER_REC
:
399 ret
= mlx4_ib_mcg_demux_handler(ibdev
, port
, slave
, sa_mad
);
407 int mlx4_ib_find_real_gid(struct ib_device
*ibdev
, u8 port
, __be64 guid
)
409 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
412 for (i
= 0; i
< dev
->dev
->caps
.sqp_demux
; i
++) {
413 if (dev
->sriov
.demux
[port
- 1].guid_cache
[i
] == guid
)
420 static int find_slave_port_pkey_ix(struct mlx4_ib_dev
*dev
, int slave
,
421 u8 port
, u16 pkey
, u16
*ix
)
424 u8 unassigned_pkey_ix
, pkey_ix
, partial_ix
= 0xFF;
427 if (slave
== mlx4_master_func_num(dev
->dev
))
428 return ib_find_cached_pkey(&dev
->ib_dev
, port
, pkey
, ix
);
430 unassigned_pkey_ix
= dev
->dev
->phys_caps
.pkey_phys_table_len
[port
] - 1;
432 for (i
= 0; i
< dev
->dev
->caps
.pkey_table_len
[port
]; i
++) {
433 if (dev
->pkeys
.virt2phys_pkey
[slave
][port
- 1][i
] == unassigned_pkey_ix
)
436 pkey_ix
= dev
->pkeys
.virt2phys_pkey
[slave
][port
- 1][i
];
438 ret
= ib_get_cached_pkey(&dev
->ib_dev
, port
, pkey_ix
, &slot_pkey
);
441 if ((slot_pkey
& 0x7FFF) == (pkey
& 0x7FFF)) {
442 if (slot_pkey
& 0x8000) {
446 /* take first partial pkey index found */
447 if (partial_ix
== 0xFF)
448 partial_ix
= pkey_ix
;
453 if (partial_ix
< 0xFF) {
454 *ix
= (u16
) partial_ix
;
461 int mlx4_ib_send_to_slave(struct mlx4_ib_dev
*dev
, int slave
, u8 port
,
462 enum ib_qp_type dest_qpt
, struct ib_wc
*wc
,
463 struct ib_grh
*grh
, struct ib_mad
*mad
)
466 struct ib_send_wr wr
, *bad_wr
;
467 struct mlx4_ib_demux_pv_ctx
*tun_ctx
;
468 struct mlx4_ib_demux_pv_qp
*tun_qp
;
469 struct mlx4_rcv_tunnel_mad
*tun_mad
;
470 struct ib_ah_attr attr
;
472 struct ib_qp
*src_qp
= NULL
;
473 unsigned tun_tx_ix
= 0;
478 u8 is_eth
= dev
->dev
->caps
.port_type
[port
] == MLX4_PORT_TYPE_ETH
;
480 if (dest_qpt
> IB_QPT_GSI
)
483 tun_ctx
= dev
->sriov
.demux
[port
-1].tun
[slave
];
485 /* check if proxy qp created */
486 if (!tun_ctx
|| tun_ctx
->state
!= DEMUX_PV_STATE_ACTIVE
)
490 tun_qp
= &tun_ctx
->qp
[0];
492 tun_qp
= &tun_ctx
->qp
[1];
494 /* compute P_Key index to put in tunnel header for slave */
497 ret
= ib_get_cached_pkey(&dev
->ib_dev
, port
, wc
->pkey_index
, &cached_pkey
);
501 ret
= find_slave_port_pkey_ix(dev
, slave
, port
, cached_pkey
, &pkey_ix
);
504 tun_pkey_ix
= pkey_ix
;
506 tun_pkey_ix
= dev
->pkeys
.virt2phys_pkey
[slave
][port
- 1][0];
508 dqpn
= dev
->dev
->phys_caps
.base_proxy_sqpn
+ 8 * slave
+ port
+ (dest_qpt
* 2) - 1;
510 /* get tunnel tx data buf for slave */
513 /* create ah. Just need an empty one with the port num for the post send.
514 * The driver will set the force loopback bit in post_send */
515 memset(&attr
, 0, sizeof attr
);
516 attr
.port_num
= port
;
518 memcpy(&attr
.grh
.dgid
.raw
[0], &grh
->dgid
.raw
[0], 16);
519 attr
.ah_flags
= IB_AH_GRH
;
521 ah
= ib_create_ah(tun_ctx
->pd
, &attr
);
525 /* allocate tunnel tx buf after pass failure returns */
526 spin_lock(&tun_qp
->tx_lock
);
527 if (tun_qp
->tx_ix_head
- tun_qp
->tx_ix_tail
>=
528 (MLX4_NUM_TUNNEL_BUFS
- 1))
531 tun_tx_ix
= (++tun_qp
->tx_ix_head
) & (MLX4_NUM_TUNNEL_BUFS
- 1);
532 spin_unlock(&tun_qp
->tx_lock
);
536 tun_mad
= (struct mlx4_rcv_tunnel_mad
*) (tun_qp
->tx_ring
[tun_tx_ix
].buf
.addr
);
537 if (tun_qp
->tx_ring
[tun_tx_ix
].ah
)
538 ib_destroy_ah(tun_qp
->tx_ring
[tun_tx_ix
].ah
);
539 tun_qp
->tx_ring
[tun_tx_ix
].ah
= ah
;
540 ib_dma_sync_single_for_cpu(&dev
->ib_dev
,
541 tun_qp
->tx_ring
[tun_tx_ix
].buf
.map
,
542 sizeof (struct mlx4_rcv_tunnel_mad
),
545 /* copy over to tunnel buffer */
547 memcpy(&tun_mad
->grh
, grh
, sizeof *grh
);
548 memcpy(&tun_mad
->mad
, mad
, sizeof *mad
);
550 /* adjust tunnel data */
551 tun_mad
->hdr
.pkey_index
= cpu_to_be16(tun_pkey_ix
);
552 tun_mad
->hdr
.flags_src_qp
= cpu_to_be32(wc
->src_qp
& 0xFFFFFF);
553 tun_mad
->hdr
.g_ml_path
= (grh
&& (wc
->wc_flags
& IB_WC_GRH
)) ? 0x80 : 0;
557 if (mlx4_get_slave_default_vlan(dev
->dev
, port
, slave
, &vlan
,
560 if (vlan
!= wc
->vlan_id
)
561 /* Packet vlan is not the VST-assigned vlan.
566 /* Remove the vlan tag before forwarding
567 * the packet to the VF.
574 tun_mad
->hdr
.sl_vid
= cpu_to_be16(vlan
);
575 memcpy((char *)&tun_mad
->hdr
.mac_31_0
, &(wc
->smac
[0]), 4);
576 memcpy((char *)&tun_mad
->hdr
.slid_mac_47_32
, &(wc
->smac
[4]), 2);
578 tun_mad
->hdr
.sl_vid
= cpu_to_be16(((u16
)(wc
->sl
)) << 12);
579 tun_mad
->hdr
.slid_mac_47_32
= cpu_to_be16(wc
->slid
);
582 ib_dma_sync_single_for_device(&dev
->ib_dev
,
583 tun_qp
->tx_ring
[tun_tx_ix
].buf
.map
,
584 sizeof (struct mlx4_rcv_tunnel_mad
),
587 list
.addr
= tun_qp
->tx_ring
[tun_tx_ix
].buf
.map
;
588 list
.length
= sizeof (struct mlx4_rcv_tunnel_mad
);
589 list
.lkey
= tun_ctx
->mr
->lkey
;
592 wr
.wr
.ud
.port_num
= port
;
593 wr
.wr
.ud
.remote_qkey
= IB_QP_SET_QKEY
;
594 wr
.wr
.ud
.remote_qpn
= dqpn
;
596 wr
.wr_id
= ((u64
) tun_tx_ix
) | MLX4_TUN_SET_WRID_QPN(dest_qpt
);
599 wr
.opcode
= IB_WR_SEND
;
600 wr
.send_flags
= IB_SEND_SIGNALED
;
602 ret
= ib_post_send(src_qp
, &wr
, &bad_wr
);
609 static int mlx4_ib_demux_mad(struct ib_device
*ibdev
, u8 port
,
610 struct ib_wc
*wc
, struct ib_grh
*grh
,
613 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
619 if (rdma_port_get_link_layer(ibdev
, port
) == IB_LINK_LAYER_INFINIBAND
)
625 if (!(wc
->wc_flags
& IB_WC_GRH
)) {
626 mlx4_ib_warn(ibdev
, "RoCE grh not present.\n");
629 if (mad
->mad_hdr
.mgmt_class
!= IB_MGMT_CLASS_CM
) {
630 mlx4_ib_warn(ibdev
, "RoCE mgmt class is not CM\n");
633 if (mlx4_get_slave_from_roce_gid(dev
->dev
, port
, grh
->dgid
.raw
, &slave
)) {
634 mlx4_ib_warn(ibdev
, "failed matching grh\n");
637 if (slave
>= dev
->dev
->caps
.sqp_demux
) {
638 mlx4_ib_warn(ibdev
, "slave id: %d is bigger than allowed:%d\n",
639 slave
, dev
->dev
->caps
.sqp_demux
);
643 if (mlx4_ib_demux_cm_handler(ibdev
, port
, NULL
, mad
))
646 err
= mlx4_ib_send_to_slave(dev
, slave
, port
, wc
->qp
->qp_type
, wc
, grh
, mad
);
648 pr_debug("failed sending to slave %d via tunnel qp (%d)\n",
653 /* Initially assume that this mad is for us */
654 slave
= mlx4_master_func_num(dev
->dev
);
656 /* See if the slave id is encoded in a response mad */
657 if (mad
->mad_hdr
.method
& 0x80) {
658 slave_id
= (u8
*) &mad
->mad_hdr
.tid
;
660 if (slave
!= 255) /*255 indicates the dom0*/
661 *slave_id
= 0; /* remap tid */
664 /* If a grh is present, we demux according to it */
665 if (wc
->wc_flags
& IB_WC_GRH
) {
666 slave
= mlx4_ib_find_real_gid(ibdev
, port
, grh
->dgid
.global
.interface_id
);
668 mlx4_ib_warn(ibdev
, "failed matching grh\n");
672 /* Class-specific handling */
673 switch (mad
->mad_hdr
.mgmt_class
) {
674 case IB_MGMT_CLASS_SUBN_LID_ROUTED
:
675 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
:
676 /* 255 indicates the dom0 */
677 if (slave
!= 255 && slave
!= mlx4_master_func_num(dev
->dev
)) {
678 if (!mlx4_vf_smi_enabled(dev
->dev
, slave
, port
))
680 /* for a VF. drop unsolicited MADs */
681 if (!(mad
->mad_hdr
.method
& IB_MGMT_METHOD_RESP
)) {
682 mlx4_ib_warn(ibdev
, "demux QP0. rejecting unsolicited mad for slave %d class 0x%x, method 0x%x\n",
683 slave
, mad
->mad_hdr
.mgmt_class
,
684 mad
->mad_hdr
.method
);
689 case IB_MGMT_CLASS_SUBN_ADM
:
690 if (mlx4_ib_demux_sa_handler(ibdev
, port
, slave
,
691 (struct ib_sa_mad
*) mad
))
694 case IB_MGMT_CLASS_CM
:
695 if (mlx4_ib_demux_cm_handler(ibdev
, port
, &slave
, mad
))
698 case IB_MGMT_CLASS_DEVICE_MGMT
:
699 if (mad
->mad_hdr
.method
!= IB_MGMT_METHOD_GET_RESP
)
703 /* Drop unsupported classes for slaves in tunnel mode */
704 if (slave
!= mlx4_master_func_num(dev
->dev
)) {
705 pr_debug("dropping unsupported ingress mad from class:%d "
706 "for slave:%d\n", mad
->mad_hdr
.mgmt_class
, slave
);
710 /*make sure that no slave==255 was not handled yet.*/
711 if (slave
>= dev
->dev
->caps
.sqp_demux
) {
712 mlx4_ib_warn(ibdev
, "slave id: %d is bigger than allowed:%d\n",
713 slave
, dev
->dev
->caps
.sqp_demux
);
717 err
= mlx4_ib_send_to_slave(dev
, slave
, port
, wc
->qp
->qp_type
, wc
, grh
, mad
);
719 pr_debug("failed sending to slave %d via tunnel qp (%d)\n",
724 static int ib_process_mad(struct ib_device
*ibdev
, int mad_flags
, u8 port_num
,
725 struct ib_wc
*in_wc
, struct ib_grh
*in_grh
,
726 struct ib_mad
*in_mad
, struct ib_mad
*out_mad
)
728 u16 slid
, prev_lid
= 0;
730 struct ib_port_attr pattr
;
732 if (in_wc
&& in_wc
->qp
->qp_num
) {
733 pr_debug("received MAD: slid:%d sqpn:%d "
734 "dlid_bits:%d dqpn:%d wc_flags:0x%x, cls %x, mtd %x, atr %x\n",
735 in_wc
->slid
, in_wc
->src_qp
,
736 in_wc
->dlid_path_bits
,
739 in_mad
->mad_hdr
.mgmt_class
, in_mad
->mad_hdr
.method
,
740 be16_to_cpu(in_mad
->mad_hdr
.attr_id
));
741 if (in_wc
->wc_flags
& IB_WC_GRH
) {
742 pr_debug("sgid_hi:0x%016llx sgid_lo:0x%016llx\n",
743 be64_to_cpu(in_grh
->sgid
.global
.subnet_prefix
),
744 be64_to_cpu(in_grh
->sgid
.global
.interface_id
));
745 pr_debug("dgid_hi:0x%016llx dgid_lo:0x%016llx\n",
746 be64_to_cpu(in_grh
->dgid
.global
.subnet_prefix
),
747 be64_to_cpu(in_grh
->dgid
.global
.interface_id
));
751 slid
= in_wc
? in_wc
->slid
: be16_to_cpu(IB_LID_PERMISSIVE
);
753 if (in_mad
->mad_hdr
.method
== IB_MGMT_METHOD_TRAP
&& slid
== 0) {
754 forward_trap(to_mdev(ibdev
), port_num
, in_mad
);
755 return IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
;
758 if (in_mad
->mad_hdr
.mgmt_class
== IB_MGMT_CLASS_SUBN_LID_ROUTED
||
759 in_mad
->mad_hdr
.mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
760 if (in_mad
->mad_hdr
.method
!= IB_MGMT_METHOD_GET
&&
761 in_mad
->mad_hdr
.method
!= IB_MGMT_METHOD_SET
&&
762 in_mad
->mad_hdr
.method
!= IB_MGMT_METHOD_TRAP_REPRESS
)
763 return IB_MAD_RESULT_SUCCESS
;
766 * Don't process SMInfo queries -- the SMA can't handle them.
768 if (in_mad
->mad_hdr
.attr_id
== IB_SMP_ATTR_SM_INFO
)
769 return IB_MAD_RESULT_SUCCESS
;
770 } else if (in_mad
->mad_hdr
.mgmt_class
== IB_MGMT_CLASS_PERF_MGMT
||
771 in_mad
->mad_hdr
.mgmt_class
== MLX4_IB_VENDOR_CLASS1
||
772 in_mad
->mad_hdr
.mgmt_class
== MLX4_IB_VENDOR_CLASS2
||
773 in_mad
->mad_hdr
.mgmt_class
== IB_MGMT_CLASS_CONG_MGMT
) {
774 if (in_mad
->mad_hdr
.method
!= IB_MGMT_METHOD_GET
&&
775 in_mad
->mad_hdr
.method
!= IB_MGMT_METHOD_SET
)
776 return IB_MAD_RESULT_SUCCESS
;
778 return IB_MAD_RESULT_SUCCESS
;
780 if ((in_mad
->mad_hdr
.mgmt_class
== IB_MGMT_CLASS_SUBN_LID_ROUTED
||
781 in_mad
->mad_hdr
.mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) &&
782 in_mad
->mad_hdr
.method
== IB_MGMT_METHOD_SET
&&
783 in_mad
->mad_hdr
.attr_id
== IB_SMP_ATTR_PORT_INFO
&&
784 !ib_query_port(ibdev
, port_num
, &pattr
))
785 prev_lid
= pattr
.lid
;
787 err
= mlx4_MAD_IFC(to_mdev(ibdev
),
788 (mad_flags
& IB_MAD_IGNORE_MKEY
? MLX4_MAD_IFC_IGNORE_MKEY
: 0) |
789 (mad_flags
& IB_MAD_IGNORE_BKEY
? MLX4_MAD_IFC_IGNORE_BKEY
: 0) |
790 MLX4_MAD_IFC_NET_VIEW
,
791 port_num
, in_wc
, in_grh
, in_mad
, out_mad
);
793 return IB_MAD_RESULT_FAILURE
;
795 if (!out_mad
->mad_hdr
.status
) {
796 if (!(to_mdev(ibdev
)->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV
))
797 smp_snoop(ibdev
, port_num
, in_mad
, prev_lid
);
798 /* slaves get node desc from FW */
799 if (!mlx4_is_slave(to_mdev(ibdev
)->dev
))
800 node_desc_override(ibdev
, out_mad
);
803 /* set return bit in status of directed route responses */
804 if (in_mad
->mad_hdr
.mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
805 out_mad
->mad_hdr
.status
|= cpu_to_be16(1 << 15);
807 if (in_mad
->mad_hdr
.method
== IB_MGMT_METHOD_TRAP_REPRESS
)
808 /* no response for trap repress */
809 return IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
;
811 return IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_REPLY
;
814 static void edit_counter(struct mlx4_counter
*cnt
,
815 struct ib_pma_portcounters
*pma_cnt
)
817 ASSIGN_32BIT_COUNTER(pma_cnt
->port_xmit_data
,
818 (be64_to_cpu(cnt
->tx_bytes
) >> 2));
819 ASSIGN_32BIT_COUNTER(pma_cnt
->port_rcv_data
,
820 (be64_to_cpu(cnt
->rx_bytes
) >> 2));
821 ASSIGN_32BIT_COUNTER(pma_cnt
->port_xmit_packets
,
822 be64_to_cpu(cnt
->tx_frames
));
823 ASSIGN_32BIT_COUNTER(pma_cnt
->port_rcv_packets
,
824 be64_to_cpu(cnt
->rx_frames
));
827 static int iboe_process_mad(struct ib_device
*ibdev
, int mad_flags
, u8 port_num
,
828 struct ib_wc
*in_wc
, struct ib_grh
*in_grh
,
829 struct ib_mad
*in_mad
, struct ib_mad
*out_mad
)
831 struct mlx4_cmd_mailbox
*mailbox
;
832 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
834 u32 inmod
= dev
->counters
[port_num
- 1] & 0xffff;
837 if (in_mad
->mad_hdr
.mgmt_class
!= IB_MGMT_CLASS_PERF_MGMT
)
840 mailbox
= mlx4_alloc_cmd_mailbox(dev
->dev
);
842 return IB_MAD_RESULT_FAILURE
;
844 err
= mlx4_cmd_box(dev
->dev
, 0, mailbox
->dma
, inmod
, 0,
845 MLX4_CMD_QUERY_IF_STAT
, MLX4_CMD_TIME_CLASS_C
,
848 err
= IB_MAD_RESULT_FAILURE
;
850 memset(out_mad
->data
, 0, sizeof out_mad
->data
);
851 mode
= ((struct mlx4_counter
*)mailbox
->buf
)->counter_mode
;
852 switch (mode
& 0xf) {
854 edit_counter(mailbox
->buf
,
855 (void *)(out_mad
->data
+ 40));
856 err
= IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_REPLY
;
859 err
= IB_MAD_RESULT_FAILURE
;
863 mlx4_free_cmd_mailbox(dev
->dev
, mailbox
);
868 int mlx4_ib_process_mad(struct ib_device
*ibdev
, int mad_flags
, u8 port_num
,
869 struct ib_wc
*in_wc
, struct ib_grh
*in_grh
,
870 struct ib_mad
*in_mad
, struct ib_mad
*out_mad
)
872 switch (rdma_port_get_link_layer(ibdev
, port_num
)) {
873 case IB_LINK_LAYER_INFINIBAND
:
874 return ib_process_mad(ibdev
, mad_flags
, port_num
, in_wc
,
875 in_grh
, in_mad
, out_mad
);
876 case IB_LINK_LAYER_ETHERNET
:
877 return iboe_process_mad(ibdev
, mad_flags
, port_num
, in_wc
,
878 in_grh
, in_mad
, out_mad
);
884 static void send_handler(struct ib_mad_agent
*agent
,
885 struct ib_mad_send_wc
*mad_send_wc
)
887 if (mad_send_wc
->send_buf
->context
[0])
888 ib_destroy_ah(mad_send_wc
->send_buf
->context
[0]);
889 ib_free_send_mad(mad_send_wc
->send_buf
);
892 int mlx4_ib_mad_init(struct mlx4_ib_dev
*dev
)
894 struct ib_mad_agent
*agent
;
897 enum rdma_link_layer ll
;
899 for (p
= 0; p
< dev
->num_ports
; ++p
) {
900 ll
= rdma_port_get_link_layer(&dev
->ib_dev
, p
+ 1);
901 for (q
= 0; q
<= 1; ++q
) {
902 if (ll
== IB_LINK_LAYER_INFINIBAND
) {
903 agent
= ib_register_mad_agent(&dev
->ib_dev
, p
+ 1,
904 q
? IB_QPT_GSI
: IB_QPT_SMI
,
905 NULL
, 0, send_handler
,
908 ret
= PTR_ERR(agent
);
911 dev
->send_agent
[p
][q
] = agent
;
913 dev
->send_agent
[p
][q
] = NULL
;
920 for (p
= 0; p
< dev
->num_ports
; ++p
)
921 for (q
= 0; q
<= 1; ++q
)
922 if (dev
->send_agent
[p
][q
])
923 ib_unregister_mad_agent(dev
->send_agent
[p
][q
]);
928 void mlx4_ib_mad_cleanup(struct mlx4_ib_dev
*dev
)
930 struct ib_mad_agent
*agent
;
933 for (p
= 0; p
< dev
->num_ports
; ++p
) {
934 for (q
= 0; q
<= 1; ++q
) {
935 agent
= dev
->send_agent
[p
][q
];
937 dev
->send_agent
[p
][q
] = NULL
;
938 ib_unregister_mad_agent(agent
);
943 ib_destroy_ah(dev
->sm_ah
[p
]);
947 static void handle_lid_change_event(struct mlx4_ib_dev
*dev
, u8 port_num
)
949 mlx4_ib_dispatch_event(dev
, port_num
, IB_EVENT_LID_CHANGE
);
951 if (mlx4_is_master(dev
->dev
) && !dev
->sriov
.is_going_down
)
952 mlx4_gen_slaves_port_mgt_ev(dev
->dev
, port_num
,
953 MLX4_EQ_PORT_INFO_LID_CHANGE_MASK
);
956 static void handle_client_rereg_event(struct mlx4_ib_dev
*dev
, u8 port_num
)
958 /* re-configure the alias-guid and mcg's */
959 if (mlx4_is_master(dev
->dev
)) {
960 mlx4_ib_invalidate_all_guid_record(dev
, port_num
);
962 if (!dev
->sriov
.is_going_down
) {
963 mlx4_ib_mcg_port_cleanup(&dev
->sriov
.demux
[port_num
- 1], 0);
964 mlx4_gen_slaves_port_mgt_ev(dev
->dev
, port_num
,
965 MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK
);
968 mlx4_ib_dispatch_event(dev
, port_num
, IB_EVENT_CLIENT_REREGISTER
);
971 static void propagate_pkey_ev(struct mlx4_ib_dev
*dev
, int port_num
,
972 struct mlx4_eqe
*eqe
)
974 __propagate_pkey_ev(dev
, port_num
, GET_BLK_PTR_FROM_EQE(eqe
),
975 GET_MASK_FROM_EQE(eqe
));
978 static void handle_slaves_guid_change(struct mlx4_ib_dev
*dev
, u8 port_num
,
979 u32 guid_tbl_blk_num
, u32 change_bitmap
)
981 struct ib_smp
*in_mad
= NULL
;
982 struct ib_smp
*out_mad
= NULL
;
985 if (!mlx4_is_mfunc(dev
->dev
) || !mlx4_is_master(dev
->dev
))
988 in_mad
= kmalloc(sizeof *in_mad
, GFP_KERNEL
);
989 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
990 if (!in_mad
|| !out_mad
) {
991 mlx4_ib_warn(&dev
->ib_dev
, "failed to allocate memory for guid info mads\n");
995 guid_tbl_blk_num
*= 4;
997 for (i
= 0; i
< 4; i
++) {
998 if (change_bitmap
&& (!((change_bitmap
>> (8 * i
)) & 0xff)))
1000 memset(in_mad
, 0, sizeof *in_mad
);
1001 memset(out_mad
, 0, sizeof *out_mad
);
1003 in_mad
->base_version
= 1;
1004 in_mad
->mgmt_class
= IB_MGMT_CLASS_SUBN_LID_ROUTED
;
1005 in_mad
->class_version
= 1;
1006 in_mad
->method
= IB_MGMT_METHOD_GET
;
1007 in_mad
->attr_id
= IB_SMP_ATTR_GUID_INFO
;
1008 in_mad
->attr_mod
= cpu_to_be32(guid_tbl_blk_num
+ i
);
1010 if (mlx4_MAD_IFC(dev
,
1011 MLX4_MAD_IFC_IGNORE_KEYS
| MLX4_MAD_IFC_NET_VIEW
,
1012 port_num
, NULL
, NULL
, in_mad
, out_mad
)) {
1013 mlx4_ib_warn(&dev
->ib_dev
, "Failed in get GUID INFO MAD_IFC\n");
1017 mlx4_ib_update_cache_on_guid_change(dev
, guid_tbl_blk_num
+ i
,
1019 (u8
*)(&((struct ib_smp
*)out_mad
)->data
));
1020 mlx4_ib_notify_slaves_on_guid_change(dev
, guid_tbl_blk_num
+ i
,
1022 (u8
*)(&((struct ib_smp
*)out_mad
)->data
));
1031 void handle_port_mgmt_change_event(struct work_struct
*work
)
1033 struct ib_event_work
*ew
= container_of(work
, struct ib_event_work
, work
);
1034 struct mlx4_ib_dev
*dev
= ew
->ib_dev
;
1035 struct mlx4_eqe
*eqe
= &(ew
->ib_eqe
);
1036 u8 port
= eqe
->event
.port_mgmt_change
.port
;
1041 switch (eqe
->subtype
) {
1042 case MLX4_DEV_PMC_SUBTYPE_PORT_INFO
:
1043 changed_attr
= be32_to_cpu(eqe
->event
.port_mgmt_change
.params
.port_info
.changed_attr
);
1045 /* Update the SM ah - This should be done before handling
1046 the other changed attributes so that MADs can be sent to the SM */
1047 if (changed_attr
& MSTR_SM_CHANGE_MASK
) {
1048 u16 lid
= be16_to_cpu(eqe
->event
.port_mgmt_change
.params
.port_info
.mstr_sm_lid
);
1049 u8 sl
= eqe
->event
.port_mgmt_change
.params
.port_info
.mstr_sm_sl
& 0xf;
1050 update_sm_ah(dev
, port
, lid
, sl
);
1053 /* Check if it is a lid change event */
1054 if (changed_attr
& MLX4_EQ_PORT_INFO_LID_CHANGE_MASK
)
1055 handle_lid_change_event(dev
, port
);
1057 /* Generate GUID changed event */
1058 if (changed_attr
& MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK
) {
1059 mlx4_ib_dispatch_event(dev
, port
, IB_EVENT_GID_CHANGE
);
1060 /*if master, notify all slaves*/
1061 if (mlx4_is_master(dev
->dev
))
1062 mlx4_gen_slaves_port_mgt_ev(dev
->dev
, port
,
1063 MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK
);
1066 if (changed_attr
& MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK
)
1067 handle_client_rereg_event(dev
, port
);
1070 case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE
:
1071 mlx4_ib_dispatch_event(dev
, port
, IB_EVENT_PKEY_CHANGE
);
1072 if (mlx4_is_master(dev
->dev
) && !dev
->sriov
.is_going_down
)
1073 propagate_pkey_ev(dev
, port
, eqe
);
1075 case MLX4_DEV_PMC_SUBTYPE_GUID_INFO
:
1076 /* paravirtualized master's guid is guid 0 -- does not change */
1077 if (!mlx4_is_master(dev
->dev
))
1078 mlx4_ib_dispatch_event(dev
, port
, IB_EVENT_GID_CHANGE
);
1079 /*if master, notify relevant slaves*/
1080 else if (!dev
->sriov
.is_going_down
) {
1081 tbl_block
= GET_BLK_PTR_FROM_EQE(eqe
);
1082 change_bitmap
= GET_MASK_FROM_EQE(eqe
);
1083 handle_slaves_guid_change(dev
, port
, tbl_block
, change_bitmap
);
1087 pr_warn("Unsupported subtype 0x%x for "
1088 "Port Management Change event\n", eqe
->subtype
);
1094 void mlx4_ib_dispatch_event(struct mlx4_ib_dev
*dev
, u8 port_num
,
1095 enum ib_event_type type
)
1097 struct ib_event event
;
1099 event
.device
= &dev
->ib_dev
;
1100 event
.element
.port_num
= port_num
;
1103 ib_dispatch_event(&event
);
1106 static void mlx4_ib_tunnel_comp_handler(struct ib_cq
*cq
, void *arg
)
1108 unsigned long flags
;
1109 struct mlx4_ib_demux_pv_ctx
*ctx
= cq
->cq_context
;
1110 struct mlx4_ib_dev
*dev
= to_mdev(ctx
->ib_dev
);
1111 spin_lock_irqsave(&dev
->sriov
.going_down_lock
, flags
);
1112 if (!dev
->sriov
.is_going_down
&& ctx
->state
== DEMUX_PV_STATE_ACTIVE
)
1113 queue_work(ctx
->wq
, &ctx
->work
);
1114 spin_unlock_irqrestore(&dev
->sriov
.going_down_lock
, flags
);
1117 static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx
*ctx
,
1118 struct mlx4_ib_demux_pv_qp
*tun_qp
,
1121 struct ib_sge sg_list
;
1122 struct ib_recv_wr recv_wr
, *bad_recv_wr
;
1125 size
= (tun_qp
->qp
->qp_type
== IB_QPT_UD
) ?
1126 sizeof (struct mlx4_tunnel_mad
) : sizeof (struct mlx4_mad_rcv_buf
);
1128 sg_list
.addr
= tun_qp
->ring
[index
].map
;
1129 sg_list
.length
= size
;
1130 sg_list
.lkey
= ctx
->mr
->lkey
;
1132 recv_wr
.next
= NULL
;
1133 recv_wr
.sg_list
= &sg_list
;
1134 recv_wr
.num_sge
= 1;
1135 recv_wr
.wr_id
= (u64
) index
| MLX4_TUN_WRID_RECV
|
1136 MLX4_TUN_SET_WRID_QPN(tun_qp
->proxy_qpt
);
1137 ib_dma_sync_single_for_device(ctx
->ib_dev
, tun_qp
->ring
[index
].map
,
1138 size
, DMA_FROM_DEVICE
);
1139 return ib_post_recv(tun_qp
->qp
, &recv_wr
, &bad_recv_wr
);
1142 static int mlx4_ib_multiplex_sa_handler(struct ib_device
*ibdev
, int port
,
1143 int slave
, struct ib_sa_mad
*sa_mad
)
1147 /* dispatch to different sa handlers */
1148 switch (be16_to_cpu(sa_mad
->mad_hdr
.attr_id
)) {
1149 case IB_SA_ATTR_MC_MEMBER_REC
:
1150 ret
= mlx4_ib_mcg_multiplex_handler(ibdev
, port
, slave
, sa_mad
);
1158 static int is_proxy_qp0(struct mlx4_ib_dev
*dev
, int qpn
, int slave
)
1160 int proxy_start
= dev
->dev
->phys_caps
.base_proxy_sqpn
+ 8 * slave
;
1162 return (qpn
>= proxy_start
&& qpn
<= proxy_start
+ 1);
1166 int mlx4_ib_send_to_wire(struct mlx4_ib_dev
*dev
, int slave
, u8 port
,
1167 enum ib_qp_type dest_qpt
, u16 pkey_index
,
1168 u32 remote_qpn
, u32 qkey
, struct ib_ah_attr
*attr
,
1169 u8
*s_mac
, struct ib_mad
*mad
)
1172 struct ib_send_wr wr
, *bad_wr
;
1173 struct mlx4_ib_demux_pv_ctx
*sqp_ctx
;
1174 struct mlx4_ib_demux_pv_qp
*sqp
;
1175 struct mlx4_mad_snd_buf
*sqp_mad
;
1177 struct ib_qp
*send_qp
= NULL
;
1178 unsigned wire_tx_ix
= 0;
1185 sqp_ctx
= dev
->sriov
.sqps
[port
-1];
1187 /* check if proxy qp created */
1188 if (!sqp_ctx
|| sqp_ctx
->state
!= DEMUX_PV_STATE_ACTIVE
)
1191 if (dest_qpt
== IB_QPT_SMI
) {
1193 sqp
= &sqp_ctx
->qp
[0];
1194 wire_pkey_ix
= dev
->pkeys
.virt2phys_pkey
[slave
][port
- 1][0];
1197 sqp
= &sqp_ctx
->qp
[1];
1198 wire_pkey_ix
= dev
->pkeys
.virt2phys_pkey
[slave
][port
- 1][pkey_index
];
1204 sgid_index
= attr
->grh
.sgid_index
;
1205 attr
->grh
.sgid_index
= 0;
1206 ah
= ib_create_ah(sqp_ctx
->pd
, attr
);
1209 attr
->grh
.sgid_index
= sgid_index
;
1210 to_mah(ah
)->av
.ib
.gid_index
= sgid_index
;
1211 /* get rid of force-loopback bit */
1212 to_mah(ah
)->av
.ib
.port_pd
&= cpu_to_be32(0x7FFFFFFF);
1213 spin_lock(&sqp
->tx_lock
);
1214 if (sqp
->tx_ix_head
- sqp
->tx_ix_tail
>=
1215 (MLX4_NUM_TUNNEL_BUFS
- 1))
1218 wire_tx_ix
= (++sqp
->tx_ix_head
) & (MLX4_NUM_TUNNEL_BUFS
- 1);
1219 spin_unlock(&sqp
->tx_lock
);
1223 sqp_mad
= (struct mlx4_mad_snd_buf
*) (sqp
->tx_ring
[wire_tx_ix
].buf
.addr
);
1224 if (sqp
->tx_ring
[wire_tx_ix
].ah
)
1225 ib_destroy_ah(sqp
->tx_ring
[wire_tx_ix
].ah
);
1226 sqp
->tx_ring
[wire_tx_ix
].ah
= ah
;
1227 ib_dma_sync_single_for_cpu(&dev
->ib_dev
,
1228 sqp
->tx_ring
[wire_tx_ix
].buf
.map
,
1229 sizeof (struct mlx4_mad_snd_buf
),
1232 memcpy(&sqp_mad
->payload
, mad
, sizeof *mad
);
1234 ib_dma_sync_single_for_device(&dev
->ib_dev
,
1235 sqp
->tx_ring
[wire_tx_ix
].buf
.map
,
1236 sizeof (struct mlx4_mad_snd_buf
),
1239 list
.addr
= sqp
->tx_ring
[wire_tx_ix
].buf
.map
;
1240 list
.length
= sizeof (struct mlx4_mad_snd_buf
);
1241 list
.lkey
= sqp_ctx
->mr
->lkey
;
1244 wr
.wr
.ud
.port_num
= port
;
1245 wr
.wr
.ud
.pkey_index
= wire_pkey_ix
;
1246 wr
.wr
.ud
.remote_qkey
= qkey
;
1247 wr
.wr
.ud
.remote_qpn
= remote_qpn
;
1249 wr
.wr_id
= ((u64
) wire_tx_ix
) | MLX4_TUN_SET_WRID_QPN(src_qpnum
);
1252 wr
.opcode
= IB_WR_SEND
;
1253 wr
.send_flags
= IB_SEND_SIGNALED
;
1255 memcpy(to_mah(ah
)->av
.eth
.s_mac
, s_mac
, 6);
1258 ret
= ib_post_send(send_qp
, &wr
, &bad_wr
);
1265 static int get_slave_base_gid_ix(struct mlx4_ib_dev
*dev
, int slave
, int port
)
1267 if (rdma_port_get_link_layer(&dev
->ib_dev
, port
) == IB_LINK_LAYER_INFINIBAND
)
1269 return mlx4_get_base_gid_ix(dev
->dev
, slave
, port
);
1272 static void fill_in_real_sgid_index(struct mlx4_ib_dev
*dev
, int slave
, int port
,
1273 struct ib_ah_attr
*ah_attr
)
1275 if (rdma_port_get_link_layer(&dev
->ib_dev
, port
) == IB_LINK_LAYER_INFINIBAND
)
1276 ah_attr
->grh
.sgid_index
= slave
;
1278 ah_attr
->grh
.sgid_index
+= get_slave_base_gid_ix(dev
, slave
, port
);
1281 static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx
*ctx
, struct ib_wc
*wc
)
1283 struct mlx4_ib_dev
*dev
= to_mdev(ctx
->ib_dev
);
1284 struct mlx4_ib_demux_pv_qp
*tun_qp
= &ctx
->qp
[MLX4_TUN_WRID_QPN(wc
->wr_id
)];
1285 int wr_ix
= wc
->wr_id
& (MLX4_NUM_TUNNEL_BUFS
- 1);
1286 struct mlx4_tunnel_mad
*tunnel
= tun_qp
->ring
[wr_ix
].addr
;
1287 struct mlx4_ib_ah ah
;
1288 struct ib_ah_attr ah_attr
;
1293 /* Get slave that sent this packet */
1294 if (wc
->src_qp
< dev
->dev
->phys_caps
.base_proxy_sqpn
||
1295 wc
->src_qp
>= dev
->dev
->phys_caps
.base_proxy_sqpn
+ 8 * MLX4_MFUNC_MAX
||
1296 (wc
->src_qp
& 0x1) != ctx
->port
- 1 ||
1298 mlx4_ib_warn(ctx
->ib_dev
, "can't multiplex bad sqp:%d\n", wc
->src_qp
);
1301 slave
= ((wc
->src_qp
& ~0x7) - dev
->dev
->phys_caps
.base_proxy_sqpn
) / 8;
1302 if (slave
!= ctx
->slave
) {
1303 mlx4_ib_warn(ctx
->ib_dev
, "can't multiplex bad sqp:%d: "
1304 "belongs to another slave\n", wc
->src_qp
);
1308 /* Map transaction ID */
1309 ib_dma_sync_single_for_cpu(ctx
->ib_dev
, tun_qp
->ring
[wr_ix
].map
,
1310 sizeof (struct mlx4_tunnel_mad
),
1312 switch (tunnel
->mad
.mad_hdr
.method
) {
1313 case IB_MGMT_METHOD_SET
:
1314 case IB_MGMT_METHOD_GET
:
1315 case IB_MGMT_METHOD_REPORT
:
1316 case IB_SA_METHOD_GET_TABLE
:
1317 case IB_SA_METHOD_DELETE
:
1318 case IB_SA_METHOD_GET_MULTI
:
1319 case IB_SA_METHOD_GET_TRACE_TBL
:
1320 slave_id
= (u8
*) &tunnel
->mad
.mad_hdr
.tid
;
1322 mlx4_ib_warn(ctx
->ib_dev
, "egress mad has non-null tid msb:%d "
1323 "class:%d slave:%d\n", *slave_id
,
1324 tunnel
->mad
.mad_hdr
.mgmt_class
, slave
);
1332 /* Class-specific handling */
1333 switch (tunnel
->mad
.mad_hdr
.mgmt_class
) {
1334 case IB_MGMT_CLASS_SUBN_LID_ROUTED
:
1335 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
:
1336 if (slave
!= mlx4_master_func_num(dev
->dev
) &&
1337 !mlx4_vf_smi_enabled(dev
->dev
, slave
, ctx
->port
))
1340 case IB_MGMT_CLASS_SUBN_ADM
:
1341 if (mlx4_ib_multiplex_sa_handler(ctx
->ib_dev
, ctx
->port
, slave
,
1342 (struct ib_sa_mad
*) &tunnel
->mad
))
1345 case IB_MGMT_CLASS_CM
:
1346 if (mlx4_ib_multiplex_cm_handler(ctx
->ib_dev
, ctx
->port
, slave
,
1347 (struct ib_mad
*) &tunnel
->mad
))
1350 case IB_MGMT_CLASS_DEVICE_MGMT
:
1351 if (tunnel
->mad
.mad_hdr
.method
!= IB_MGMT_METHOD_GET
&&
1352 tunnel
->mad
.mad_hdr
.method
!= IB_MGMT_METHOD_SET
)
1356 /* Drop unsupported classes for slaves in tunnel mode */
1357 if (slave
!= mlx4_master_func_num(dev
->dev
)) {
1358 mlx4_ib_warn(ctx
->ib_dev
, "dropping unsupported egress mad from class:%d "
1359 "for slave:%d\n", tunnel
->mad
.mad_hdr
.mgmt_class
, slave
);
1364 /* We are using standard ib_core services to send the mad, so generate a
1365 * stadard address handle by decoding the tunnelled mlx4_ah fields */
1366 memcpy(&ah
.av
, &tunnel
->hdr
.av
, sizeof (struct mlx4_av
));
1367 ah
.ibah
.device
= ctx
->ib_dev
;
1368 mlx4_ib_query_ah(&ah
.ibah
, &ah_attr
);
1369 if (ah_attr
.ah_flags
& IB_AH_GRH
)
1370 fill_in_real_sgid_index(dev
, slave
, ctx
->port
, &ah_attr
);
1372 port
= mlx4_slave_convert_port(dev
->dev
, slave
, ah_attr
.port_num
);
1375 ah_attr
.port_num
= port
;
1376 memcpy(ah_attr
.dmac
, tunnel
->hdr
.mac
, 6);
1377 ah_attr
.vlan_id
= be16_to_cpu(tunnel
->hdr
.vlan
);
1378 /* if slave have default vlan use it */
1379 mlx4_get_slave_default_vlan(dev
->dev
, ctx
->port
, slave
,
1380 &ah_attr
.vlan_id
, &ah_attr
.sl
);
1382 mlx4_ib_send_to_wire(dev
, slave
, ctx
->port
,
1383 is_proxy_qp0(dev
, wc
->src_qp
, slave
) ?
1384 IB_QPT_SMI
: IB_QPT_GSI
,
1385 be16_to_cpu(tunnel
->hdr
.pkey_index
),
1386 be32_to_cpu(tunnel
->hdr
.remote_qpn
),
1387 be32_to_cpu(tunnel
->hdr
.qkey
),
1388 &ah_attr
, wc
->smac
, &tunnel
->mad
);
1391 static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx
*ctx
,
1392 enum ib_qp_type qp_type
, int is_tun
)
1395 struct mlx4_ib_demux_pv_qp
*tun_qp
;
1396 int rx_buf_size
, tx_buf_size
;
1398 if (qp_type
> IB_QPT_GSI
)
1401 tun_qp
= &ctx
->qp
[qp_type
];
1403 tun_qp
->ring
= kzalloc(sizeof (struct mlx4_ib_buf
) * MLX4_NUM_TUNNEL_BUFS
,
1408 tun_qp
->tx_ring
= kcalloc(MLX4_NUM_TUNNEL_BUFS
,
1409 sizeof (struct mlx4_ib_tun_tx_buf
),
1411 if (!tun_qp
->tx_ring
) {
1412 kfree(tun_qp
->ring
);
1413 tun_qp
->ring
= NULL
;
1418 rx_buf_size
= sizeof (struct mlx4_tunnel_mad
);
1419 tx_buf_size
= sizeof (struct mlx4_rcv_tunnel_mad
);
1421 rx_buf_size
= sizeof (struct mlx4_mad_rcv_buf
);
1422 tx_buf_size
= sizeof (struct mlx4_mad_snd_buf
);
1425 for (i
= 0; i
< MLX4_NUM_TUNNEL_BUFS
; i
++) {
1426 tun_qp
->ring
[i
].addr
= kmalloc(rx_buf_size
, GFP_KERNEL
);
1427 if (!tun_qp
->ring
[i
].addr
)
1429 tun_qp
->ring
[i
].map
= ib_dma_map_single(ctx
->ib_dev
,
1430 tun_qp
->ring
[i
].addr
,
1435 for (i
= 0; i
< MLX4_NUM_TUNNEL_BUFS
; i
++) {
1436 tun_qp
->tx_ring
[i
].buf
.addr
=
1437 kmalloc(tx_buf_size
, GFP_KERNEL
);
1438 if (!tun_qp
->tx_ring
[i
].buf
.addr
)
1440 tun_qp
->tx_ring
[i
].buf
.map
=
1441 ib_dma_map_single(ctx
->ib_dev
,
1442 tun_qp
->tx_ring
[i
].buf
.addr
,
1445 tun_qp
->tx_ring
[i
].ah
= NULL
;
1447 spin_lock_init(&tun_qp
->tx_lock
);
1448 tun_qp
->tx_ix_head
= 0;
1449 tun_qp
->tx_ix_tail
= 0;
1450 tun_qp
->proxy_qpt
= qp_type
;
1457 ib_dma_unmap_single(ctx
->ib_dev
, tun_qp
->tx_ring
[i
].buf
.map
,
1458 tx_buf_size
, DMA_TO_DEVICE
);
1459 kfree(tun_qp
->tx_ring
[i
].buf
.addr
);
1461 kfree(tun_qp
->tx_ring
);
1462 tun_qp
->tx_ring
= NULL
;
1463 i
= MLX4_NUM_TUNNEL_BUFS
;
1467 ib_dma_unmap_single(ctx
->ib_dev
, tun_qp
->ring
[i
].map
,
1468 rx_buf_size
, DMA_FROM_DEVICE
);
1469 kfree(tun_qp
->ring
[i
].addr
);
1471 kfree(tun_qp
->ring
);
1472 tun_qp
->ring
= NULL
;
1476 static void mlx4_ib_free_pv_qp_bufs(struct mlx4_ib_demux_pv_ctx
*ctx
,
1477 enum ib_qp_type qp_type
, int is_tun
)
1480 struct mlx4_ib_demux_pv_qp
*tun_qp
;
1481 int rx_buf_size
, tx_buf_size
;
1483 if (qp_type
> IB_QPT_GSI
)
1486 tun_qp
= &ctx
->qp
[qp_type
];
1488 rx_buf_size
= sizeof (struct mlx4_tunnel_mad
);
1489 tx_buf_size
= sizeof (struct mlx4_rcv_tunnel_mad
);
1491 rx_buf_size
= sizeof (struct mlx4_mad_rcv_buf
);
1492 tx_buf_size
= sizeof (struct mlx4_mad_snd_buf
);
1496 for (i
= 0; i
< MLX4_NUM_TUNNEL_BUFS
; i
++) {
1497 ib_dma_unmap_single(ctx
->ib_dev
, tun_qp
->ring
[i
].map
,
1498 rx_buf_size
, DMA_FROM_DEVICE
);
1499 kfree(tun_qp
->ring
[i
].addr
);
1502 for (i
= 0; i
< MLX4_NUM_TUNNEL_BUFS
; i
++) {
1503 ib_dma_unmap_single(ctx
->ib_dev
, tun_qp
->tx_ring
[i
].buf
.map
,
1504 tx_buf_size
, DMA_TO_DEVICE
);
1505 kfree(tun_qp
->tx_ring
[i
].buf
.addr
);
1506 if (tun_qp
->tx_ring
[i
].ah
)
1507 ib_destroy_ah(tun_qp
->tx_ring
[i
].ah
);
1509 kfree(tun_qp
->tx_ring
);
1510 kfree(tun_qp
->ring
);
1513 static void mlx4_ib_tunnel_comp_worker(struct work_struct
*work
)
1515 struct mlx4_ib_demux_pv_ctx
*ctx
;
1516 struct mlx4_ib_demux_pv_qp
*tun_qp
;
1519 ctx
= container_of(work
, struct mlx4_ib_demux_pv_ctx
, work
);
1520 ib_req_notify_cq(ctx
->cq
, IB_CQ_NEXT_COMP
);
1522 while (ib_poll_cq(ctx
->cq
, 1, &wc
) == 1) {
1523 tun_qp
= &ctx
->qp
[MLX4_TUN_WRID_QPN(wc
.wr_id
)];
1524 if (wc
.status
== IB_WC_SUCCESS
) {
1525 switch (wc
.opcode
) {
1527 mlx4_ib_multiplex_mad(ctx
, &wc
);
1528 ret
= mlx4_ib_post_pv_qp_buf(ctx
, tun_qp
,
1530 (MLX4_NUM_TUNNEL_BUFS
- 1));
1532 pr_err("Failed reposting tunnel "
1533 "buf:%lld\n", wc
.wr_id
);
1536 pr_debug("received tunnel send completion:"
1537 "wrid=0x%llx, status=0x%x\n",
1538 wc
.wr_id
, wc
.status
);
1539 ib_destroy_ah(tun_qp
->tx_ring
[wc
.wr_id
&
1540 (MLX4_NUM_TUNNEL_BUFS
- 1)].ah
);
1541 tun_qp
->tx_ring
[wc
.wr_id
& (MLX4_NUM_TUNNEL_BUFS
- 1)].ah
1543 spin_lock(&tun_qp
->tx_lock
);
1544 tun_qp
->tx_ix_tail
++;
1545 spin_unlock(&tun_qp
->tx_lock
);
1552 pr_debug("mlx4_ib: completion error in tunnel: %d."
1553 " status = %d, wrid = 0x%llx\n",
1554 ctx
->slave
, wc
.status
, wc
.wr_id
);
1555 if (!MLX4_TUN_IS_RECV(wc
.wr_id
)) {
1556 ib_destroy_ah(tun_qp
->tx_ring
[wc
.wr_id
&
1557 (MLX4_NUM_TUNNEL_BUFS
- 1)].ah
);
1558 tun_qp
->tx_ring
[wc
.wr_id
& (MLX4_NUM_TUNNEL_BUFS
- 1)].ah
1560 spin_lock(&tun_qp
->tx_lock
);
1561 tun_qp
->tx_ix_tail
++;
1562 spin_unlock(&tun_qp
->tx_lock
);
1568 static void pv_qp_event_handler(struct ib_event
*event
, void *qp_context
)
1570 struct mlx4_ib_demux_pv_ctx
*sqp
= qp_context
;
1572 /* It's worse than that! He's dead, Jim! */
1573 pr_err("Fatal error (%d) on a MAD QP on port %d\n",
1574 event
->event
, sqp
->port
);
1577 static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx
*ctx
,
1578 enum ib_qp_type qp_type
, int create_tun
)
1581 struct mlx4_ib_demux_pv_qp
*tun_qp
;
1582 struct mlx4_ib_qp_tunnel_init_attr qp_init_attr
;
1583 struct ib_qp_attr attr
;
1584 int qp_attr_mask_INIT
;
1586 if (qp_type
> IB_QPT_GSI
)
1589 tun_qp
= &ctx
->qp
[qp_type
];
1591 memset(&qp_init_attr
, 0, sizeof qp_init_attr
);
1592 qp_init_attr
.init_attr
.send_cq
= ctx
->cq
;
1593 qp_init_attr
.init_attr
.recv_cq
= ctx
->cq
;
1594 qp_init_attr
.init_attr
.sq_sig_type
= IB_SIGNAL_ALL_WR
;
1595 qp_init_attr
.init_attr
.cap
.max_send_wr
= MLX4_NUM_TUNNEL_BUFS
;
1596 qp_init_attr
.init_attr
.cap
.max_recv_wr
= MLX4_NUM_TUNNEL_BUFS
;
1597 qp_init_attr
.init_attr
.cap
.max_send_sge
= 1;
1598 qp_init_attr
.init_attr
.cap
.max_recv_sge
= 1;
1600 qp_init_attr
.init_attr
.qp_type
= IB_QPT_UD
;
1601 qp_init_attr
.init_attr
.create_flags
= MLX4_IB_SRIOV_TUNNEL_QP
;
1602 qp_init_attr
.port
= ctx
->port
;
1603 qp_init_attr
.slave
= ctx
->slave
;
1604 qp_init_attr
.proxy_qp_type
= qp_type
;
1605 qp_attr_mask_INIT
= IB_QP_STATE
| IB_QP_PKEY_INDEX
|
1606 IB_QP_QKEY
| IB_QP_PORT
;
1608 qp_init_attr
.init_attr
.qp_type
= qp_type
;
1609 qp_init_attr
.init_attr
.create_flags
= MLX4_IB_SRIOV_SQP
;
1610 qp_attr_mask_INIT
= IB_QP_STATE
| IB_QP_PKEY_INDEX
| IB_QP_QKEY
;
1612 qp_init_attr
.init_attr
.port_num
= ctx
->port
;
1613 qp_init_attr
.init_attr
.qp_context
= ctx
;
1614 qp_init_attr
.init_attr
.event_handler
= pv_qp_event_handler
;
1615 tun_qp
->qp
= ib_create_qp(ctx
->pd
, &qp_init_attr
.init_attr
);
1616 if (IS_ERR(tun_qp
->qp
)) {
1617 ret
= PTR_ERR(tun_qp
->qp
);
1619 pr_err("Couldn't create %s QP (%d)\n",
1620 create_tun
? "tunnel" : "special", ret
);
1624 memset(&attr
, 0, sizeof attr
);
1625 attr
.qp_state
= IB_QPS_INIT
;
1628 ret
= find_slave_port_pkey_ix(to_mdev(ctx
->ib_dev
), ctx
->slave
,
1629 ctx
->port
, IB_DEFAULT_PKEY_FULL
,
1631 if (ret
|| !create_tun
)
1633 to_mdev(ctx
->ib_dev
)->pkeys
.virt2phys_pkey
[ctx
->slave
][ctx
->port
- 1][0];
1634 attr
.qkey
= IB_QP1_QKEY
;
1635 attr
.port_num
= ctx
->port
;
1636 ret
= ib_modify_qp(tun_qp
->qp
, &attr
, qp_attr_mask_INIT
);
1638 pr_err("Couldn't change %s qp state to INIT (%d)\n",
1639 create_tun
? "tunnel" : "special", ret
);
1642 attr
.qp_state
= IB_QPS_RTR
;
1643 ret
= ib_modify_qp(tun_qp
->qp
, &attr
, IB_QP_STATE
);
1645 pr_err("Couldn't change %s qp state to RTR (%d)\n",
1646 create_tun
? "tunnel" : "special", ret
);
1649 attr
.qp_state
= IB_QPS_RTS
;
1651 ret
= ib_modify_qp(tun_qp
->qp
, &attr
, IB_QP_STATE
| IB_QP_SQ_PSN
);
1653 pr_err("Couldn't change %s qp state to RTS (%d)\n",
1654 create_tun
? "tunnel" : "special", ret
);
1658 for (i
= 0; i
< MLX4_NUM_TUNNEL_BUFS
; i
++) {
1659 ret
= mlx4_ib_post_pv_qp_buf(ctx
, tun_qp
, i
);
1661 pr_err(" mlx4_ib_post_pv_buf error"
1662 " (err = %d, i = %d)\n", ret
, i
);
1669 ib_destroy_qp(tun_qp
->qp
);
1675 * IB MAD completion callback for real SQPs
1677 static void mlx4_ib_sqp_comp_worker(struct work_struct
*work
)
1679 struct mlx4_ib_demux_pv_ctx
*ctx
;
1680 struct mlx4_ib_demux_pv_qp
*sqp
;
1685 ctx
= container_of(work
, struct mlx4_ib_demux_pv_ctx
, work
);
1686 ib_req_notify_cq(ctx
->cq
, IB_CQ_NEXT_COMP
);
1688 while (mlx4_ib_poll_cq(ctx
->cq
, 1, &wc
) == 1) {
1689 sqp
= &ctx
->qp
[MLX4_TUN_WRID_QPN(wc
.wr_id
)];
1690 if (wc
.status
== IB_WC_SUCCESS
) {
1691 switch (wc
.opcode
) {
1693 ib_destroy_ah(sqp
->tx_ring
[wc
.wr_id
&
1694 (MLX4_NUM_TUNNEL_BUFS
- 1)].ah
);
1695 sqp
->tx_ring
[wc
.wr_id
& (MLX4_NUM_TUNNEL_BUFS
- 1)].ah
1697 spin_lock(&sqp
->tx_lock
);
1699 spin_unlock(&sqp
->tx_lock
);
1702 mad
= (struct ib_mad
*) &(((struct mlx4_mad_rcv_buf
*)
1703 (sqp
->ring
[wc
.wr_id
&
1704 (MLX4_NUM_TUNNEL_BUFS
- 1)].addr
))->payload
);
1705 grh
= &(((struct mlx4_mad_rcv_buf
*)
1706 (sqp
->ring
[wc
.wr_id
&
1707 (MLX4_NUM_TUNNEL_BUFS
- 1)].addr
))->grh
);
1708 mlx4_ib_demux_mad(ctx
->ib_dev
, ctx
->port
, &wc
, grh
, mad
);
1709 if (mlx4_ib_post_pv_qp_buf(ctx
, sqp
, wc
.wr_id
&
1710 (MLX4_NUM_TUNNEL_BUFS
- 1)))
1711 pr_err("Failed reposting SQP "
1712 "buf:%lld\n", wc
.wr_id
);
1719 pr_debug("mlx4_ib: completion error in tunnel: %d."
1720 " status = %d, wrid = 0x%llx\n",
1721 ctx
->slave
, wc
.status
, wc
.wr_id
);
1722 if (!MLX4_TUN_IS_RECV(wc
.wr_id
)) {
1723 ib_destroy_ah(sqp
->tx_ring
[wc
.wr_id
&
1724 (MLX4_NUM_TUNNEL_BUFS
- 1)].ah
);
1725 sqp
->tx_ring
[wc
.wr_id
& (MLX4_NUM_TUNNEL_BUFS
- 1)].ah
1727 spin_lock(&sqp
->tx_lock
);
1729 spin_unlock(&sqp
->tx_lock
);
1735 static int alloc_pv_object(struct mlx4_ib_dev
*dev
, int slave
, int port
,
1736 struct mlx4_ib_demux_pv_ctx
**ret_ctx
)
1738 struct mlx4_ib_demux_pv_ctx
*ctx
;
1741 ctx
= kzalloc(sizeof (struct mlx4_ib_demux_pv_ctx
), GFP_KERNEL
);
1743 pr_err("failed allocating pv resource context "
1744 "for port %d, slave %d\n", port
, slave
);
1748 ctx
->ib_dev
= &dev
->ib_dev
;
1755 static void free_pv_object(struct mlx4_ib_dev
*dev
, int slave
, int port
)
1757 if (dev
->sriov
.demux
[port
- 1].tun
[slave
]) {
1758 kfree(dev
->sriov
.demux
[port
- 1].tun
[slave
]);
1759 dev
->sriov
.demux
[port
- 1].tun
[slave
] = NULL
;
1763 static int create_pv_resources(struct ib_device
*ibdev
, int slave
, int port
,
1764 int create_tun
, struct mlx4_ib_demux_pv_ctx
*ctx
)
1768 if (ctx
->state
!= DEMUX_PV_STATE_DOWN
)
1771 ctx
->state
= DEMUX_PV_STATE_STARTING
;
1772 /* have QP0 only if link layer is IB */
1773 if (rdma_port_get_link_layer(ibdev
, ctx
->port
) ==
1774 IB_LINK_LAYER_INFINIBAND
)
1778 ret
= mlx4_ib_alloc_pv_bufs(ctx
, IB_QPT_SMI
, create_tun
);
1780 pr_err("Failed allocating qp0 tunnel bufs (%d)\n", ret
);
1785 ret
= mlx4_ib_alloc_pv_bufs(ctx
, IB_QPT_GSI
, create_tun
);
1787 pr_err("Failed allocating qp1 tunnel bufs (%d)\n", ret
);
1791 cq_size
= 2 * MLX4_NUM_TUNNEL_BUFS
;
1795 ctx
->cq
= ib_create_cq(ctx
->ib_dev
, mlx4_ib_tunnel_comp_handler
,
1796 NULL
, ctx
, cq_size
, 0);
1797 if (IS_ERR(ctx
->cq
)) {
1798 ret
= PTR_ERR(ctx
->cq
);
1799 pr_err("Couldn't create tunnel CQ (%d)\n", ret
);
1803 ctx
->pd
= ib_alloc_pd(ctx
->ib_dev
);
1804 if (IS_ERR(ctx
->pd
)) {
1805 ret
= PTR_ERR(ctx
->pd
);
1806 pr_err("Couldn't create tunnel PD (%d)\n", ret
);
1810 ctx
->mr
= ib_get_dma_mr(ctx
->pd
, IB_ACCESS_LOCAL_WRITE
);
1811 if (IS_ERR(ctx
->mr
)) {
1812 ret
= PTR_ERR(ctx
->mr
);
1813 pr_err("Couldn't get tunnel DMA MR (%d)\n", ret
);
1818 ret
= create_pv_sqp(ctx
, IB_QPT_SMI
, create_tun
);
1820 pr_err("Couldn't create %s QP0 (%d)\n",
1821 create_tun
? "tunnel for" : "", ret
);
1826 ret
= create_pv_sqp(ctx
, IB_QPT_GSI
, create_tun
);
1828 pr_err("Couldn't create %s QP1 (%d)\n",
1829 create_tun
? "tunnel for" : "", ret
);
1834 INIT_WORK(&ctx
->work
, mlx4_ib_tunnel_comp_worker
);
1836 INIT_WORK(&ctx
->work
, mlx4_ib_sqp_comp_worker
);
1838 ctx
->wq
= to_mdev(ibdev
)->sriov
.demux
[port
- 1].wq
;
1840 ret
= ib_req_notify_cq(ctx
->cq
, IB_CQ_NEXT_COMP
);
1842 pr_err("Couldn't arm tunnel cq (%d)\n", ret
);
1845 ctx
->state
= DEMUX_PV_STATE_ACTIVE
;
1850 ib_destroy_qp(ctx
->qp
[1].qp
);
1851 ctx
->qp
[1].qp
= NULL
;
1856 ib_destroy_qp(ctx
->qp
[0].qp
);
1857 ctx
->qp
[0].qp
= NULL
;
1860 ib_dereg_mr(ctx
->mr
);
1864 ib_dealloc_pd(ctx
->pd
);
1868 ib_destroy_cq(ctx
->cq
);
1872 mlx4_ib_free_pv_qp_bufs(ctx
, IB_QPT_GSI
, create_tun
);
1876 mlx4_ib_free_pv_qp_bufs(ctx
, IB_QPT_SMI
, create_tun
);
1878 ctx
->state
= DEMUX_PV_STATE_DOWN
;
1882 static void destroy_pv_resources(struct mlx4_ib_dev
*dev
, int slave
, int port
,
1883 struct mlx4_ib_demux_pv_ctx
*ctx
, int flush
)
1887 if (ctx
->state
> DEMUX_PV_STATE_DOWN
) {
1888 ctx
->state
= DEMUX_PV_STATE_DOWNING
;
1890 flush_workqueue(ctx
->wq
);
1892 ib_destroy_qp(ctx
->qp
[0].qp
);
1893 ctx
->qp
[0].qp
= NULL
;
1894 mlx4_ib_free_pv_qp_bufs(ctx
, IB_QPT_SMI
, 1);
1896 ib_destroy_qp(ctx
->qp
[1].qp
);
1897 ctx
->qp
[1].qp
= NULL
;
1898 mlx4_ib_free_pv_qp_bufs(ctx
, IB_QPT_GSI
, 1);
1899 ib_dereg_mr(ctx
->mr
);
1901 ib_dealloc_pd(ctx
->pd
);
1903 ib_destroy_cq(ctx
->cq
);
1905 ctx
->state
= DEMUX_PV_STATE_DOWN
;
1909 static int mlx4_ib_tunnels_update(struct mlx4_ib_dev
*dev
, int slave
,
1910 int port
, int do_init
)
1915 clean_vf_mcast(&dev
->sriov
.demux
[port
- 1], slave
);
1916 /* for master, destroy real sqp resources */
1917 if (slave
== mlx4_master_func_num(dev
->dev
))
1918 destroy_pv_resources(dev
, slave
, port
,
1919 dev
->sriov
.sqps
[port
- 1], 1);
1920 /* destroy the tunnel qp resources */
1921 destroy_pv_resources(dev
, slave
, port
,
1922 dev
->sriov
.demux
[port
- 1].tun
[slave
], 1);
1926 /* create the tunnel qp resources */
1927 ret
= create_pv_resources(&dev
->ib_dev
, slave
, port
, 1,
1928 dev
->sriov
.demux
[port
- 1].tun
[slave
]);
1930 /* for master, create the real sqp resources */
1931 if (!ret
&& slave
== mlx4_master_func_num(dev
->dev
))
1932 ret
= create_pv_resources(&dev
->ib_dev
, slave
, port
, 0,
1933 dev
->sriov
.sqps
[port
- 1]);
1937 void mlx4_ib_tunnels_update_work(struct work_struct
*work
)
1939 struct mlx4_ib_demux_work
*dmxw
;
1941 dmxw
= container_of(work
, struct mlx4_ib_demux_work
, work
);
1942 mlx4_ib_tunnels_update(dmxw
->dev
, dmxw
->slave
, (int) dmxw
->port
,
1948 static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev
*dev
,
1949 struct mlx4_ib_demux_ctx
*ctx
,
1956 ctx
->tun
= kcalloc(dev
->dev
->caps
.sqp_demux
,
1957 sizeof (struct mlx4_ib_demux_pv_ctx
*), GFP_KERNEL
);
1963 ctx
->ib_dev
= &dev
->ib_dev
;
1966 i
< min(dev
->dev
->caps
.sqp_demux
,
1967 (u16
)(dev
->dev
->persist
->num_vfs
+ 1));
1969 struct mlx4_active_ports actv_ports
=
1970 mlx4_get_active_ports(dev
->dev
, i
);
1972 if (!test_bit(port
- 1, actv_ports
.ports
))
1975 ret
= alloc_pv_object(dev
, i
, port
, &ctx
->tun
[i
]);
1982 ret
= mlx4_ib_mcg_port_init(ctx
);
1984 pr_err("Failed initializing mcg para-virt (%d)\n", ret
);
1988 snprintf(name
, sizeof name
, "mlx4_ibt%d", port
);
1989 ctx
->wq
= create_singlethread_workqueue(name
);
1991 pr_err("Failed to create tunnelling WQ for port %d\n", port
);
1996 snprintf(name
, sizeof name
, "mlx4_ibud%d", port
);
1997 ctx
->ud_wq
= create_singlethread_workqueue(name
);
1999 pr_err("Failed to create up/down WQ for port %d\n", port
);
2007 destroy_workqueue(ctx
->wq
);
2011 mlx4_ib_mcg_port_cleanup(ctx
, 1);
2013 for (i
= 0; i
< dev
->dev
->caps
.sqp_demux
; i
++)
2014 free_pv_object(dev
, i
, port
);
2020 static void mlx4_ib_free_sqp_ctx(struct mlx4_ib_demux_pv_ctx
*sqp_ctx
)
2022 if (sqp_ctx
->state
> DEMUX_PV_STATE_DOWN
) {
2023 sqp_ctx
->state
= DEMUX_PV_STATE_DOWNING
;
2024 flush_workqueue(sqp_ctx
->wq
);
2025 if (sqp_ctx
->has_smi
) {
2026 ib_destroy_qp(sqp_ctx
->qp
[0].qp
);
2027 sqp_ctx
->qp
[0].qp
= NULL
;
2028 mlx4_ib_free_pv_qp_bufs(sqp_ctx
, IB_QPT_SMI
, 0);
2030 ib_destroy_qp(sqp_ctx
->qp
[1].qp
);
2031 sqp_ctx
->qp
[1].qp
= NULL
;
2032 mlx4_ib_free_pv_qp_bufs(sqp_ctx
, IB_QPT_GSI
, 0);
2033 ib_dereg_mr(sqp_ctx
->mr
);
2035 ib_dealloc_pd(sqp_ctx
->pd
);
2037 ib_destroy_cq(sqp_ctx
->cq
);
2039 sqp_ctx
->state
= DEMUX_PV_STATE_DOWN
;
2043 static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx
*ctx
)
2047 struct mlx4_ib_dev
*dev
= to_mdev(ctx
->ib_dev
);
2048 mlx4_ib_mcg_port_cleanup(ctx
, 1);
2049 for (i
= 0; i
< dev
->dev
->caps
.sqp_demux
; i
++) {
2052 if (ctx
->tun
[i
]->state
> DEMUX_PV_STATE_DOWN
)
2053 ctx
->tun
[i
]->state
= DEMUX_PV_STATE_DOWNING
;
2055 flush_workqueue(ctx
->wq
);
2056 for (i
= 0; i
< dev
->dev
->caps
.sqp_demux
; i
++) {
2057 destroy_pv_resources(dev
, i
, ctx
->port
, ctx
->tun
[i
], 0);
2058 free_pv_object(dev
, i
, ctx
->port
);
2061 destroy_workqueue(ctx
->ud_wq
);
2062 destroy_workqueue(ctx
->wq
);
2066 static void mlx4_ib_master_tunnels(struct mlx4_ib_dev
*dev
, int do_init
)
2070 if (!mlx4_is_master(dev
->dev
))
2072 /* initialize or tear down tunnel QPs for the master */
2073 for (i
= 0; i
< dev
->dev
->caps
.num_ports
; i
++)
2074 mlx4_ib_tunnels_update(dev
, mlx4_master_func_num(dev
->dev
), i
+ 1, do_init
);
2078 int mlx4_ib_init_sriov(struct mlx4_ib_dev
*dev
)
2083 if (!mlx4_is_mfunc(dev
->dev
))
2086 dev
->sriov
.is_going_down
= 0;
2087 spin_lock_init(&dev
->sriov
.going_down_lock
);
2088 mlx4_ib_cm_paravirt_init(dev
);
2090 mlx4_ib_warn(&dev
->ib_dev
, "multi-function enabled\n");
2092 if (mlx4_is_slave(dev
->dev
)) {
2093 mlx4_ib_warn(&dev
->ib_dev
, "operating in qp1 tunnel mode\n");
2097 for (i
= 0; i
< dev
->dev
->caps
.sqp_demux
; i
++) {
2098 if (i
== mlx4_master_func_num(dev
->dev
))
2099 mlx4_put_slave_node_guid(dev
->dev
, i
, dev
->ib_dev
.node_guid
);
2101 mlx4_put_slave_node_guid(dev
->dev
, i
, mlx4_ib_gen_node_guid());
2104 err
= mlx4_ib_init_alias_guid_service(dev
);
2106 mlx4_ib_warn(&dev
->ib_dev
, "Failed init alias guid process.\n");
2109 err
= mlx4_ib_device_register_sysfs(dev
);
2111 mlx4_ib_warn(&dev
->ib_dev
, "Failed to register sysfs\n");
2115 mlx4_ib_warn(&dev
->ib_dev
, "initializing demux service for %d qp1 clients\n",
2116 dev
->dev
->caps
.sqp_demux
);
2117 for (i
= 0; i
< dev
->num_ports
; i
++) {
2119 err
= __mlx4_ib_query_gid(&dev
->ib_dev
, i
+ 1, 0, &gid
, 1);
2122 dev
->sriov
.demux
[i
].guid_cache
[0] = gid
.global
.interface_id
;
2123 err
= alloc_pv_object(dev
, mlx4_master_func_num(dev
->dev
), i
+ 1,
2124 &dev
->sriov
.sqps
[i
]);
2127 err
= mlx4_ib_alloc_demux_ctx(dev
, &dev
->sriov
.demux
[i
], i
+ 1);
2131 mlx4_ib_master_tunnels(dev
, 1);
2135 free_pv_object(dev
, mlx4_master_func_num(dev
->dev
), i
+ 1);
2138 free_pv_object(dev
, mlx4_master_func_num(dev
->dev
), i
+ 1);
2139 mlx4_ib_free_demux_ctx(&dev
->sriov
.demux
[i
]);
2141 mlx4_ib_device_unregister_sysfs(dev
);
2144 mlx4_ib_destroy_alias_guid_service(dev
);
2147 mlx4_ib_cm_paravirt_clean(dev
, -1);
2152 void mlx4_ib_close_sriov(struct mlx4_ib_dev
*dev
)
2155 unsigned long flags
;
2157 if (!mlx4_is_mfunc(dev
->dev
))
2160 spin_lock_irqsave(&dev
->sriov
.going_down_lock
, flags
);
2161 dev
->sriov
.is_going_down
= 1;
2162 spin_unlock_irqrestore(&dev
->sriov
.going_down_lock
, flags
);
2163 if (mlx4_is_master(dev
->dev
)) {
2164 for (i
= 0; i
< dev
->num_ports
; i
++) {
2165 flush_workqueue(dev
->sriov
.demux
[i
].ud_wq
);
2166 mlx4_ib_free_sqp_ctx(dev
->sriov
.sqps
[i
]);
2167 kfree(dev
->sriov
.sqps
[i
]);
2168 dev
->sriov
.sqps
[i
] = NULL
;
2169 mlx4_ib_free_demux_ctx(&dev
->sriov
.demux
[i
]);
2172 mlx4_ib_cm_paravirt_clean(dev
, -1);
2173 mlx4_ib_destroy_alias_guid_service(dev
);
2174 mlx4_ib_device_unregister_sysfs(dev
);