2 * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_mad.h>
34 #include <rdma/ib_smi.h>
35 #include <rdma/ib_cache.h>
36 #include <rdma/ib_sa.h>
38 #include <linux/mlx4/cmd.h>
39 #include <linux/rbtree.h>
40 #include <linux/delay.h>
45 #define MAX_PEND_REQS_PER_FUNC 4
46 #define MAD_TIMEOUT_MS 2000
48 #define mcg_warn(fmt, arg...) pr_warn("MCG WARNING: " fmt, ##arg)
49 #define mcg_error(fmt, arg...) pr_err(fmt, ##arg)
50 #define mcg_warn_group(group, format, arg...) \
51 pr_warn("%s-%d: %16s (port %d): WARNING: " format, __func__, __LINE__,\
52 (group)->name, group->demux->port, ## arg)
54 #define mcg_error_group(group, format, arg...) \
55 pr_err(" %16s: " format, (group)->name, ## arg)
58 static union ib_gid mgid0
;
60 static struct workqueue_struct
*clean_wq
;
67 enum mcast_group_state
{
75 enum mcast_state state
;
78 struct list_head pending
;
81 struct ib_sa_mcmember_data
{
83 union ib_gid port_gid
;
91 __be32 sl_flowlabel_hoplimit
;
98 struct ib_sa_mcmember_data rec
;
100 struct list_head mgid0_list
;
101 struct mlx4_ib_demux_ctx
*demux
;
102 struct mcast_member func
[MAX_VFS
];
104 struct work_struct work
;
105 struct list_head pending_list
;
107 enum mcast_group_state state
;
108 enum mcast_group_state prev_state
;
109 struct ib_sa_mad response_sa_mad
;
112 char name
[33]; /* MGID string */
114 /* refcount is the reference count for the following:
115 1. Each queued request
116 2. Each invocation of the worker thread
117 3. Membership of the port at the SA
121 /* delayed work to clean pending SM request */
122 struct delayed_work timeout_work
;
123 struct list_head cleanup_list
;
128 struct ib_sa_mad sa_mad
;
129 struct list_head group_list
;
130 struct list_head func_list
;
131 struct mcast_group
*group
;
136 #define safe_atomic_dec(ref) \
138 if (atomic_dec_and_test(ref)) \
139 mcg_warn_group(group, "did not expect to reach zero\n"); \
142 static const char *get_state_string(enum mcast_group_state state
)
147 case MCAST_JOIN_SENT
:
148 return "MCAST_JOIN_SENT";
149 case MCAST_LEAVE_SENT
:
150 return "MCAST_LEAVE_SENT";
151 case MCAST_RESP_READY
:
152 return "MCAST_RESP_READY";
154 return "Invalid State";
157 static struct mcast_group
*mcast_find(struct mlx4_ib_demux_ctx
*ctx
,
160 struct rb_node
*node
= ctx
->mcg_table
.rb_node
;
161 struct mcast_group
*group
;
165 group
= rb_entry(node
, struct mcast_group
, node
);
166 ret
= memcmp(mgid
->raw
, group
->rec
.mgid
.raw
, sizeof *mgid
);
171 node
= node
->rb_left
;
173 node
= node
->rb_right
;
178 static struct mcast_group
*mcast_insert(struct mlx4_ib_demux_ctx
*ctx
,
179 struct mcast_group
*group
)
181 struct rb_node
**link
= &ctx
->mcg_table
.rb_node
;
182 struct rb_node
*parent
= NULL
;
183 struct mcast_group
*cur_group
;
188 cur_group
= rb_entry(parent
, struct mcast_group
, node
);
190 ret
= memcmp(group
->rec
.mgid
.raw
, cur_group
->rec
.mgid
.raw
,
191 sizeof group
->rec
.mgid
);
193 link
= &(*link
)->rb_left
;
195 link
= &(*link
)->rb_right
;
199 rb_link_node(&group
->node
, parent
, link
);
200 rb_insert_color(&group
->node
, &ctx
->mcg_table
);
204 static int send_mad_to_wire(struct mlx4_ib_demux_ctx
*ctx
, struct ib_mad
*mad
)
206 struct mlx4_ib_dev
*dev
= ctx
->dev
;
207 struct ib_ah_attr ah_attr
;
209 spin_lock(&dev
->sm_lock
);
210 if (!dev
->sm_ah
[ctx
->port
- 1]) {
211 /* port is not yet Active, sm_ah not ready */
212 spin_unlock(&dev
->sm_lock
);
215 mlx4_ib_query_ah(dev
->sm_ah
[ctx
->port
- 1], &ah_attr
);
216 spin_unlock(&dev
->sm_lock
);
217 return mlx4_ib_send_to_wire(dev
, mlx4_master_func_num(dev
->dev
), ctx
->port
,
218 IB_QPT_GSI
, 0, 1, IB_QP1_QKEY
, &ah_attr
, mad
);
221 static int send_mad_to_slave(int slave
, struct mlx4_ib_demux_ctx
*ctx
,
224 struct mlx4_ib_dev
*dev
= ctx
->dev
;
225 struct ib_mad_agent
*agent
= dev
->send_agent
[ctx
->port
- 1][1];
227 struct ib_ah_attr ah_attr
;
229 /* Our agent might not yet be registered when mads start to arrive */
233 ib_query_ah(dev
->sm_ah
[ctx
->port
- 1], &ah_attr
);
237 wc
.dlid_path_bits
= 0;
238 wc
.port_num
= ctx
->port
;
239 wc
.slid
= ah_attr
.dlid
; /* opensm lid */
241 return mlx4_ib_send_to_slave(dev
, slave
, ctx
->port
, IB_QPT_GSI
, &wc
, NULL
, mad
);
244 static int send_join_to_wire(struct mcast_group
*group
, struct ib_sa_mad
*sa_mad
)
246 struct ib_sa_mad mad
;
247 struct ib_sa_mcmember_data
*sa_mad_data
= (struct ib_sa_mcmember_data
*)&mad
.data
;
250 /* we rely on a mad request as arrived from a VF */
251 memcpy(&mad
, sa_mad
, sizeof mad
);
253 /* fix port GID to be the real one (slave 0) */
254 sa_mad_data
->port_gid
.global
.interface_id
= group
->demux
->guid_cache
[0];
256 /* assign our own TID */
257 mad
.mad_hdr
.tid
= mlx4_ib_get_new_demux_tid(group
->demux
);
258 group
->last_req_tid
= mad
.mad_hdr
.tid
; /* keep it for later validation */
260 ret
= send_mad_to_wire(group
->demux
, (struct ib_mad
*)&mad
);
261 /* set timeout handler */
263 /* calls mlx4_ib_mcg_timeout_handler */
264 queue_delayed_work(group
->demux
->mcg_wq
, &group
->timeout_work
,
265 msecs_to_jiffies(MAD_TIMEOUT_MS
));
271 static int send_leave_to_wire(struct mcast_group
*group
, u8 join_state
)
273 struct ib_sa_mad mad
;
274 struct ib_sa_mcmember_data
*sa_data
= (struct ib_sa_mcmember_data
*)&mad
.data
;
277 memset(&mad
, 0, sizeof mad
);
278 mad
.mad_hdr
.base_version
= 1;
279 mad
.mad_hdr
.mgmt_class
= IB_MGMT_CLASS_SUBN_ADM
;
280 mad
.mad_hdr
.class_version
= 2;
281 mad
.mad_hdr
.method
= IB_SA_METHOD_DELETE
;
282 mad
.mad_hdr
.status
= cpu_to_be16(0);
283 mad
.mad_hdr
.class_specific
= cpu_to_be16(0);
284 mad
.mad_hdr
.tid
= mlx4_ib_get_new_demux_tid(group
->demux
);
285 group
->last_req_tid
= mad
.mad_hdr
.tid
; /* keep it for later validation */
286 mad
.mad_hdr
.attr_id
= cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC
);
287 mad
.mad_hdr
.attr_mod
= cpu_to_be32(0);
288 mad
.sa_hdr
.sm_key
= 0x0;
289 mad
.sa_hdr
.attr_offset
= cpu_to_be16(7);
290 mad
.sa_hdr
.comp_mask
= IB_SA_MCMEMBER_REC_MGID
|
291 IB_SA_MCMEMBER_REC_PORT_GID
| IB_SA_MCMEMBER_REC_JOIN_STATE
;
293 *sa_data
= group
->rec
;
294 sa_data
->scope_join_state
= join_state
;
296 ret
= send_mad_to_wire(group
->demux
, (struct ib_mad
*)&mad
);
298 group
->state
= MCAST_IDLE
;
300 /* set timeout handler */
302 /* calls mlx4_ib_mcg_timeout_handler */
303 queue_delayed_work(group
->demux
->mcg_wq
, &group
->timeout_work
,
304 msecs_to_jiffies(MAD_TIMEOUT_MS
));
310 static int send_reply_to_slave(int slave
, struct mcast_group
*group
,
311 struct ib_sa_mad
*req_sa_mad
, u16 status
)
313 struct ib_sa_mad mad
;
314 struct ib_sa_mcmember_data
*sa_data
= (struct ib_sa_mcmember_data
*)&mad
.data
;
315 struct ib_sa_mcmember_data
*req_sa_data
= (struct ib_sa_mcmember_data
*)&req_sa_mad
->data
;
318 memset(&mad
, 0, sizeof mad
);
319 mad
.mad_hdr
.base_version
= 1;
320 mad
.mad_hdr
.mgmt_class
= IB_MGMT_CLASS_SUBN_ADM
;
321 mad
.mad_hdr
.class_version
= 2;
322 mad
.mad_hdr
.method
= IB_MGMT_METHOD_GET_RESP
;
323 mad
.mad_hdr
.status
= cpu_to_be16(status
);
324 mad
.mad_hdr
.class_specific
= cpu_to_be16(0);
325 mad
.mad_hdr
.tid
= req_sa_mad
->mad_hdr
.tid
;
326 *(u8
*)&mad
.mad_hdr
.tid
= 0; /* resetting tid to 0 */
327 mad
.mad_hdr
.attr_id
= cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC
);
328 mad
.mad_hdr
.attr_mod
= cpu_to_be32(0);
329 mad
.sa_hdr
.sm_key
= req_sa_mad
->sa_hdr
.sm_key
;
330 mad
.sa_hdr
.attr_offset
= cpu_to_be16(7);
331 mad
.sa_hdr
.comp_mask
= 0; /* ignored on responses, see IBTA spec */
333 *sa_data
= group
->rec
;
335 /* reconstruct VF's requested join_state and port_gid */
336 sa_data
->scope_join_state
&= 0xf0;
337 sa_data
->scope_join_state
|= (group
->func
[slave
].join_state
& 0x0f);
338 memcpy(&sa_data
->port_gid
, &req_sa_data
->port_gid
, sizeof req_sa_data
->port_gid
);
340 ret
= send_mad_to_slave(slave
, group
->demux
, (struct ib_mad
*)&mad
);
344 static int check_selector(ib_sa_comp_mask comp_mask
,
345 ib_sa_comp_mask selector_mask
,
346 ib_sa_comp_mask value_mask
,
347 u8 src_value
, u8 dst_value
)
350 u8 selector
= dst_value
>> 6;
354 if (!(comp_mask
& selector_mask
) || !(comp_mask
& value_mask
))
359 err
= (src_value
<= dst_value
);
362 err
= (src_value
>= dst_value
);
365 err
= (src_value
!= dst_value
);
375 static u16
cmp_rec(struct ib_sa_mcmember_data
*src
,
376 struct ib_sa_mcmember_data
*dst
, ib_sa_comp_mask comp_mask
)
378 /* src is group record, dst is request record */
379 /* MGID must already match */
380 /* Port_GID we always replace to our Port_GID, so it is a match */
382 #define MAD_STATUS_REQ_INVALID 0x0200
383 if (comp_mask
& IB_SA_MCMEMBER_REC_QKEY
&& src
->qkey
!= dst
->qkey
)
384 return MAD_STATUS_REQ_INVALID
;
385 if (comp_mask
& IB_SA_MCMEMBER_REC_MLID
&& src
->mlid
!= dst
->mlid
)
386 return MAD_STATUS_REQ_INVALID
;
387 if (check_selector(comp_mask
, IB_SA_MCMEMBER_REC_MTU_SELECTOR
,
388 IB_SA_MCMEMBER_REC_MTU
,
389 src
->mtusel_mtu
, dst
->mtusel_mtu
))
390 return MAD_STATUS_REQ_INVALID
;
391 if (comp_mask
& IB_SA_MCMEMBER_REC_TRAFFIC_CLASS
&&
392 src
->tclass
!= dst
->tclass
)
393 return MAD_STATUS_REQ_INVALID
;
394 if (comp_mask
& IB_SA_MCMEMBER_REC_PKEY
&& src
->pkey
!= dst
->pkey
)
395 return MAD_STATUS_REQ_INVALID
;
396 if (check_selector(comp_mask
, IB_SA_MCMEMBER_REC_RATE_SELECTOR
,
397 IB_SA_MCMEMBER_REC_RATE
,
398 src
->ratesel_rate
, dst
->ratesel_rate
))
399 return MAD_STATUS_REQ_INVALID
;
400 if (check_selector(comp_mask
,
401 IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR
,
402 IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME
,
403 src
->lifetmsel_lifetm
, dst
->lifetmsel_lifetm
))
404 return MAD_STATUS_REQ_INVALID
;
405 if (comp_mask
& IB_SA_MCMEMBER_REC_SL
&&
406 (be32_to_cpu(src
->sl_flowlabel_hoplimit
) & 0xf0000000) !=
407 (be32_to_cpu(dst
->sl_flowlabel_hoplimit
) & 0xf0000000))
408 return MAD_STATUS_REQ_INVALID
;
409 if (comp_mask
& IB_SA_MCMEMBER_REC_FLOW_LABEL
&&
410 (be32_to_cpu(src
->sl_flowlabel_hoplimit
) & 0x0fffff00) !=
411 (be32_to_cpu(dst
->sl_flowlabel_hoplimit
) & 0x0fffff00))
412 return MAD_STATUS_REQ_INVALID
;
413 if (comp_mask
& IB_SA_MCMEMBER_REC_HOP_LIMIT
&&
414 (be32_to_cpu(src
->sl_flowlabel_hoplimit
) & 0x000000ff) !=
415 (be32_to_cpu(dst
->sl_flowlabel_hoplimit
) & 0x000000ff))
416 return MAD_STATUS_REQ_INVALID
;
417 if (comp_mask
& IB_SA_MCMEMBER_REC_SCOPE
&&
418 (src
->scope_join_state
& 0xf0) !=
419 (dst
->scope_join_state
& 0xf0))
420 return MAD_STATUS_REQ_INVALID
;
422 /* join_state checked separately, proxy_join ignored */
427 /* release group, return 1 if this was last release and group is destroyed
428 * timout work is canceled sync */
429 static int release_group(struct mcast_group
*group
, int from_timeout_handler
)
431 struct mlx4_ib_demux_ctx
*ctx
= group
->demux
;
434 mutex_lock(&ctx
->mcg_table_lock
);
435 mutex_lock(&group
->lock
);
436 if (atomic_dec_and_test(&group
->refcount
)) {
437 if (!from_timeout_handler
) {
438 if (group
->state
!= MCAST_IDLE
&&
439 !cancel_delayed_work(&group
->timeout_work
)) {
440 atomic_inc(&group
->refcount
);
441 mutex_unlock(&group
->lock
);
442 mutex_unlock(&ctx
->mcg_table_lock
);
447 nzgroup
= memcmp(&group
->rec
.mgid
, &mgid0
, sizeof mgid0
);
448 if (!list_empty(&group
->pending_list
))
449 mcg_warn_group(group
, "releasing a group with non empty pending list\n");
451 rb_erase(&group
->node
, &ctx
->mcg_table
);
452 list_del_init(&group
->mgid0_list
);
453 mutex_unlock(&group
->lock
);
454 mutex_unlock(&ctx
->mcg_table_lock
);
458 mutex_unlock(&group
->lock
);
459 mutex_unlock(&ctx
->mcg_table_lock
);
464 static void adjust_membership(struct mcast_group
*group
, u8 join_state
, int inc
)
468 for (i
= 0; i
< 3; i
++, join_state
>>= 1)
469 if (join_state
& 0x1)
470 group
->members
[i
] += inc
;
473 static u8
get_leave_state(struct mcast_group
*group
)
478 for (i
= 0; i
< 3; i
++)
479 if (!group
->members
[i
])
480 leave_state
|= (1 << i
);
482 return leave_state
& (group
->rec
.scope_join_state
& 7);
485 static int join_group(struct mcast_group
*group
, int slave
, u8 join_mask
)
490 /* remove bits that slave is already member of, and adjust */
491 join_state
= join_mask
& (~group
->func
[slave
].join_state
);
492 adjust_membership(group
, join_state
, 1);
493 group
->func
[slave
].join_state
|= join_state
;
494 if (group
->func
[slave
].state
!= MCAST_MEMBER
&& join_state
) {
495 group
->func
[slave
].state
= MCAST_MEMBER
;
501 static int leave_group(struct mcast_group
*group
, int slave
, u8 leave_state
)
505 adjust_membership(group
, leave_state
, -1);
506 group
->func
[slave
].join_state
&= ~leave_state
;
507 if (!group
->func
[slave
].join_state
) {
508 group
->func
[slave
].state
= MCAST_NOT_MEMBER
;
514 static int check_leave(struct mcast_group
*group
, int slave
, u8 leave_mask
)
516 if (group
->func
[slave
].state
!= MCAST_MEMBER
)
517 return MAD_STATUS_REQ_INVALID
;
519 /* make sure we're not deleting unset bits */
520 if (~group
->func
[slave
].join_state
& leave_mask
)
521 return MAD_STATUS_REQ_INVALID
;
524 return MAD_STATUS_REQ_INVALID
;
529 static void mlx4_ib_mcg_timeout_handler(struct work_struct
*work
)
531 struct delayed_work
*delay
= to_delayed_work(work
);
532 struct mcast_group
*group
;
533 struct mcast_req
*req
= NULL
;
535 group
= container_of(delay
, typeof(*group
), timeout_work
);
537 mutex_lock(&group
->lock
);
538 if (group
->state
== MCAST_JOIN_SENT
) {
539 if (!list_empty(&group
->pending_list
)) {
540 req
= list_first_entry(&group
->pending_list
, struct mcast_req
, group_list
);
541 list_del(&req
->group_list
);
542 list_del(&req
->func_list
);
543 --group
->func
[req
->func
].num_pend_reqs
;
544 mutex_unlock(&group
->lock
);
546 if (memcmp(&group
->rec
.mgid
, &mgid0
, sizeof mgid0
)) {
547 if (release_group(group
, 1))
553 mutex_lock(&group
->lock
);
555 mcg_warn_group(group
, "DRIVER BUG\n");
556 } else if (group
->state
== MCAST_LEAVE_SENT
) {
557 if (group
->rec
.scope_join_state
& 7)
558 group
->rec
.scope_join_state
&= 0xf8;
559 group
->state
= MCAST_IDLE
;
560 mutex_unlock(&group
->lock
);
561 if (release_group(group
, 1))
563 mutex_lock(&group
->lock
);
565 mcg_warn_group(group
, "invalid state %s\n", get_state_string(group
->state
));
566 group
->state
= MCAST_IDLE
;
567 atomic_inc(&group
->refcount
);
568 if (!queue_work(group
->demux
->mcg_wq
, &group
->work
))
569 safe_atomic_dec(&group
->refcount
);
571 mutex_unlock(&group
->lock
);
574 static int handle_leave_req(struct mcast_group
*group
, u8 leave_mask
,
575 struct mcast_req
*req
)
580 leave_mask
= group
->func
[req
->func
].join_state
;
582 status
= check_leave(group
, req
->func
, leave_mask
);
584 leave_group(group
, req
->func
, leave_mask
);
587 send_reply_to_slave(req
->func
, group
, &req
->sa_mad
, status
);
588 --group
->func
[req
->func
].num_pend_reqs
;
589 list_del(&req
->group_list
);
590 list_del(&req
->func_list
);
595 static int handle_join_req(struct mcast_group
*group
, u8 join_mask
,
596 struct mcast_req
*req
)
598 u8 group_join_state
= group
->rec
.scope_join_state
& 7;
601 struct ib_sa_mcmember_data
*sa_data
= (struct ib_sa_mcmember_data
*)req
->sa_mad
.data
;
603 if (join_mask
== (group_join_state
& join_mask
)) {
604 /* port's membership need not change */
605 status
= cmp_rec(&group
->rec
, sa_data
, req
->sa_mad
.sa_hdr
.comp_mask
);
607 join_group(group
, req
->func
, join_mask
);
609 --group
->func
[req
->func
].num_pend_reqs
;
610 send_reply_to_slave(req
->func
, group
, &req
->sa_mad
, status
);
611 list_del(&req
->group_list
);
612 list_del(&req
->func_list
);
616 /* port's membership needs to be updated */
617 group
->prev_state
= group
->state
;
618 if (send_join_to_wire(group
, &req
->sa_mad
)) {
619 --group
->func
[req
->func
].num_pend_reqs
;
620 list_del(&req
->group_list
);
621 list_del(&req
->func_list
);
624 group
->state
= group
->prev_state
;
626 group
->state
= MCAST_JOIN_SENT
;
632 static void mlx4_ib_mcg_work_handler(struct work_struct
*work
)
634 struct mcast_group
*group
;
635 struct mcast_req
*req
= NULL
;
636 struct ib_sa_mcmember_data
*sa_data
;
638 int rc
= 1; /* release_count - this is for the scheduled work */
642 group
= container_of(work
, typeof(*group
), work
);
644 mutex_lock(&group
->lock
);
646 /* First, let's see if a response from SM is waiting regarding this group.
647 * If so, we need to update the group's REC. If this is a bad response, we
648 * may need to send a bad response to a VF waiting for it. If VF is waiting
649 * and this is a good response, the VF will be answered later in this func. */
650 if (group
->state
== MCAST_RESP_READY
) {
651 /* cancels mlx4_ib_mcg_timeout_handler */
652 cancel_delayed_work(&group
->timeout_work
);
653 status
= be16_to_cpu(group
->response_sa_mad
.mad_hdr
.status
);
654 method
= group
->response_sa_mad
.mad_hdr
.method
;
655 if (group
->last_req_tid
!= group
->response_sa_mad
.mad_hdr
.tid
) {
656 mcg_warn_group(group
, "Got MAD response to existing MGID but wrong TID, dropping. Resp TID=%llx, group TID=%llx\n",
657 be64_to_cpu(group
->response_sa_mad
.mad_hdr
.tid
),
658 be64_to_cpu(group
->last_req_tid
));
659 group
->state
= group
->prev_state
;
660 goto process_requests
;
663 if (!list_empty(&group
->pending_list
))
664 req
= list_first_entry(&group
->pending_list
,
665 struct mcast_req
, group_list
);
666 if ((method
== IB_MGMT_METHOD_GET_RESP
)) {
668 send_reply_to_slave(req
->func
, group
, &req
->sa_mad
, status
);
669 --group
->func
[req
->func
].num_pend_reqs
;
670 list_del(&req
->group_list
);
671 list_del(&req
->func_list
);
675 mcg_warn_group(group
, "no request for failed join\n");
676 } else if (method
== IB_SA_METHOD_DELETE_RESP
&& group
->demux
->flushing
)
682 resp_join_state
= ((struct ib_sa_mcmember_data
*)
683 group
->response_sa_mad
.data
)->scope_join_state
& 7;
684 cur_join_state
= group
->rec
.scope_join_state
& 7;
686 if (method
== IB_MGMT_METHOD_GET_RESP
) {
687 /* successfull join */
688 if (!cur_join_state
&& resp_join_state
)
690 } else if (!resp_join_state
)
692 memcpy(&group
->rec
, group
->response_sa_mad
.data
, sizeof group
->rec
);
694 group
->state
= MCAST_IDLE
;
698 /* We should now go over pending join/leave requests, as long as we are idle. */
699 while (!list_empty(&group
->pending_list
) && group
->state
== MCAST_IDLE
) {
700 req
= list_first_entry(&group
->pending_list
, struct mcast_req
,
702 sa_data
= (struct ib_sa_mcmember_data
*)req
->sa_mad
.data
;
703 req_join_state
= sa_data
->scope_join_state
& 0x7;
705 /* For a leave request, we will immediately answer the VF, and
706 * update our internal counters. The actual leave will be sent
707 * to SM later, if at all needed. We dequeue the request now. */
708 if (req
->sa_mad
.mad_hdr
.method
== IB_SA_METHOD_DELETE
)
709 rc
+= handle_leave_req(group
, req_join_state
, req
);
711 rc
+= handle_join_req(group
, req_join_state
, req
);
715 if (group
->state
== MCAST_IDLE
) {
716 req_join_state
= get_leave_state(group
);
717 if (req_join_state
) {
718 group
->rec
.scope_join_state
&= ~req_join_state
;
719 group
->prev_state
= group
->state
;
720 if (send_leave_to_wire(group
, req_join_state
)) {
721 group
->state
= group
->prev_state
;
724 group
->state
= MCAST_LEAVE_SENT
;
728 if (!list_empty(&group
->pending_list
) && group
->state
== MCAST_IDLE
)
729 goto process_requests
;
730 mutex_unlock(&group
->lock
);
733 release_group(group
, 0);
736 static struct mcast_group
*search_relocate_mgid0_group(struct mlx4_ib_demux_ctx
*ctx
,
738 union ib_gid
*new_mgid
)
740 struct mcast_group
*group
= NULL
, *cur_group
;
741 struct mcast_req
*req
;
742 struct list_head
*pos
;
745 mutex_lock(&ctx
->mcg_table_lock
);
746 list_for_each_safe(pos
, n
, &ctx
->mcg_mgid0_list
) {
747 group
= list_entry(pos
, struct mcast_group
, mgid0_list
);
748 mutex_lock(&group
->lock
);
749 if (group
->last_req_tid
== tid
) {
750 if (memcmp(new_mgid
, &mgid0
, sizeof mgid0
)) {
751 group
->rec
.mgid
= *new_mgid
;
752 sprintf(group
->name
, "%016llx%016llx",
753 be64_to_cpu(group
->rec
.mgid
.global
.subnet_prefix
),
754 be64_to_cpu(group
->rec
.mgid
.global
.interface_id
));
755 list_del_init(&group
->mgid0_list
);
756 cur_group
= mcast_insert(ctx
, group
);
758 /* A race between our code and SM. Silently cleaning the new one */
759 req
= list_first_entry(&group
->pending_list
,
760 struct mcast_req
, group_list
);
761 --group
->func
[req
->func
].num_pend_reqs
;
762 list_del(&req
->group_list
);
763 list_del(&req
->func_list
);
765 mutex_unlock(&group
->lock
);
766 mutex_unlock(&ctx
->mcg_table_lock
);
767 release_group(group
, 0);
771 atomic_inc(&group
->refcount
);
772 mutex_unlock(&group
->lock
);
773 mutex_unlock(&ctx
->mcg_table_lock
);
776 struct mcast_req
*tmp1
, *tmp2
;
778 list_del(&group
->mgid0_list
);
779 if (!list_empty(&group
->pending_list
) && group
->state
!= MCAST_IDLE
)
780 cancel_delayed_work_sync(&group
->timeout_work
);
782 list_for_each_entry_safe(tmp1
, tmp2
, &group
->pending_list
, group_list
) {
783 list_del(&tmp1
->group_list
);
786 mutex_unlock(&group
->lock
);
787 mutex_unlock(&ctx
->mcg_table_lock
);
792 mutex_unlock(&group
->lock
);
794 mutex_unlock(&ctx
->mcg_table_lock
);
799 static struct mcast_group
*acquire_group(struct mlx4_ib_demux_ctx
*ctx
,
800 union ib_gid
*mgid
, int create
,
803 struct mcast_group
*group
, *cur_group
;
807 is_mgid0
= !memcmp(&mgid0
, mgid
, sizeof mgid0
);
809 group
= mcast_find(ctx
, mgid
);
815 return ERR_PTR(-ENOENT
);
817 group
= kzalloc(sizeof *group
, gfp_mask
);
819 return ERR_PTR(-ENOMEM
);
822 group
->rec
.mgid
= *mgid
;
823 INIT_LIST_HEAD(&group
->pending_list
);
824 INIT_LIST_HEAD(&group
->mgid0_list
);
825 for (i
= 0; i
< MAX_VFS
; ++i
)
826 INIT_LIST_HEAD(&group
->func
[i
].pending
);
827 INIT_WORK(&group
->work
, mlx4_ib_mcg_work_handler
);
828 INIT_DELAYED_WORK(&group
->timeout_work
, mlx4_ib_mcg_timeout_handler
);
829 mutex_init(&group
->lock
);
830 sprintf(group
->name
, "%016llx%016llx",
831 be64_to_cpu(group
->rec
.mgid
.global
.subnet_prefix
),
832 be64_to_cpu(group
->rec
.mgid
.global
.interface_id
));
833 group
->state
= MCAST_IDLE
;
836 list_add(&group
->mgid0_list
, &ctx
->mcg_mgid0_list
);
840 cur_group
= mcast_insert(ctx
, group
);
842 mcg_warn("group just showed up %s - confused\n", cur_group
->name
);
844 return ERR_PTR(-EINVAL
);
848 atomic_inc(&group
->refcount
);
852 static void queue_req(struct mcast_req
*req
)
854 struct mcast_group
*group
= req
->group
;
856 atomic_inc(&group
->refcount
); /* for the request */
857 atomic_inc(&group
->refcount
); /* for scheduling the work */
858 list_add_tail(&req
->group_list
, &group
->pending_list
);
859 list_add_tail(&req
->func_list
, &group
->func
[req
->func
].pending
);
860 /* calls mlx4_ib_mcg_work_handler */
861 if (!queue_work(group
->demux
->mcg_wq
, &group
->work
))
862 safe_atomic_dec(&group
->refcount
);
865 int mlx4_ib_mcg_demux_handler(struct ib_device
*ibdev
, int port
, int slave
,
866 struct ib_sa_mad
*mad
)
868 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
869 struct ib_sa_mcmember_data
*rec
= (struct ib_sa_mcmember_data
*)mad
->data
;
870 struct mlx4_ib_demux_ctx
*ctx
= &dev
->sriov
.demux
[port
- 1];
871 struct mcast_group
*group
;
873 switch (mad
->mad_hdr
.method
) {
874 case IB_MGMT_METHOD_GET_RESP
:
875 case IB_SA_METHOD_DELETE_RESP
:
876 mutex_lock(&ctx
->mcg_table_lock
);
877 group
= acquire_group(ctx
, &rec
->mgid
, 0, GFP_KERNEL
);
878 mutex_unlock(&ctx
->mcg_table_lock
);
880 if (mad
->mad_hdr
.method
== IB_MGMT_METHOD_GET_RESP
) {
881 __be64 tid
= mad
->mad_hdr
.tid
;
882 *(u8
*)(&tid
) = (u8
)slave
; /* in group we kept the modified TID */
883 group
= search_relocate_mgid0_group(ctx
, tid
, &rec
->mgid
);
891 mutex_lock(&group
->lock
);
892 group
->response_sa_mad
= *mad
;
893 group
->prev_state
= group
->state
;
894 group
->state
= MCAST_RESP_READY
;
895 /* calls mlx4_ib_mcg_work_handler */
896 atomic_inc(&group
->refcount
);
897 if (!queue_work(ctx
->mcg_wq
, &group
->work
))
898 safe_atomic_dec(&group
->refcount
);
899 mutex_unlock(&group
->lock
);
900 release_group(group
, 0);
901 return 1; /* consumed */
902 case IB_MGMT_METHOD_SET
:
903 case IB_SA_METHOD_GET_TABLE
:
904 case IB_SA_METHOD_GET_TABLE_RESP
:
905 case IB_SA_METHOD_DELETE
:
906 return 0; /* not consumed, pass-through to guest over tunnel */
908 mcg_warn("In demux, port %d: unexpected MCMember method: 0x%x, dropping\n",
909 port
, mad
->mad_hdr
.method
);
910 return 1; /* consumed */
914 int mlx4_ib_mcg_multiplex_handler(struct ib_device
*ibdev
, int port
,
915 int slave
, struct ib_sa_mad
*sa_mad
)
917 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
918 struct ib_sa_mcmember_data
*rec
= (struct ib_sa_mcmember_data
*)sa_mad
->data
;
919 struct mlx4_ib_demux_ctx
*ctx
= &dev
->sriov
.demux
[port
- 1];
920 struct mcast_group
*group
;
921 struct mcast_req
*req
;
927 switch (sa_mad
->mad_hdr
.method
) {
928 case IB_MGMT_METHOD_SET
:
930 case IB_SA_METHOD_DELETE
:
931 req
= kzalloc(sizeof *req
, GFP_KERNEL
);
936 req
->sa_mad
= *sa_mad
;
938 mutex_lock(&ctx
->mcg_table_lock
);
939 group
= acquire_group(ctx
, &rec
->mgid
, may_create
, GFP_KERNEL
);
940 mutex_unlock(&ctx
->mcg_table_lock
);
943 return PTR_ERR(group
);
945 mutex_lock(&group
->lock
);
946 if (group
->func
[slave
].num_pend_reqs
> MAX_PEND_REQS_PER_FUNC
) {
947 mutex_unlock(&group
->lock
);
948 mcg_warn_group(group
, "Port %d, Func %d has too many pending requests (%d), dropping\n",
949 port
, slave
, MAX_PEND_REQS_PER_FUNC
);
950 release_group(group
, 0);
954 ++group
->func
[slave
].num_pend_reqs
;
957 mutex_unlock(&group
->lock
);
958 release_group(group
, 0);
959 return 1; /* consumed */
960 case IB_SA_METHOD_GET_TABLE
:
961 case IB_MGMT_METHOD_GET_RESP
:
962 case IB_SA_METHOD_GET_TABLE_RESP
:
963 case IB_SA_METHOD_DELETE_RESP
:
964 return 0; /* not consumed, pass-through */
966 mcg_warn("In multiplex, port %d, func %d: unexpected MCMember method: 0x%x, dropping\n",
967 port
, slave
, sa_mad
->mad_hdr
.method
);
968 return 1; /* consumed */
972 int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx
*ctx
)
976 atomic_set(&ctx
->tid
, 0);
977 sprintf(name
, "mlx4_ib_mcg%d", ctx
->port
);
978 ctx
->mcg_wq
= create_singlethread_workqueue(name
);
982 mutex_init(&ctx
->mcg_table_lock
);
983 ctx
->mcg_table
= RB_ROOT
;
984 INIT_LIST_HEAD(&ctx
->mcg_mgid0_list
);
990 static void force_clean_group(struct mcast_group
*group
)
992 struct mcast_req
*req
, *tmp
994 list_for_each_entry_safe(req
, tmp
, &group
->pending_list
, group_list
) {
995 list_del(&req
->group_list
);
998 rb_erase(&group
->node
, &group
->demux
->mcg_table
);
1002 static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx
*ctx
, int destroy_wq
)
1006 struct mcast_group
*group
;
1014 for (i
= 0; i
< MAX_VFS
; ++i
)
1015 clean_vf_mcast(ctx
, i
);
1017 end
= jiffies
+ msecs_to_jiffies(MAD_TIMEOUT_MS
+ 3000);
1020 mutex_lock(&ctx
->mcg_table_lock
);
1021 for (p
= rb_first(&ctx
->mcg_table
); p
; p
= rb_next(p
))
1023 mutex_unlock(&ctx
->mcg_table_lock
);
1028 } while (time_after(end
, jiffies
));
1030 flush_workqueue(ctx
->mcg_wq
);
1032 destroy_workqueue(ctx
->mcg_wq
);
1034 mutex_lock(&ctx
->mcg_table_lock
);
1035 while ((p
= rb_first(&ctx
->mcg_table
)) != NULL
) {
1036 group
= rb_entry(p
, struct mcast_group
, node
);
1037 if (atomic_read(&group
->refcount
))
1038 mcg_warn_group(group
, "group refcount %d!!! (pointer %p)\n", atomic_read(&group
->refcount
), group
);
1040 force_clean_group(group
);
1042 mutex_unlock(&ctx
->mcg_table_lock
);
1049 struct work_struct work
;
1050 struct mlx4_ib_demux_ctx
*ctx
;
1054 static void mcg_clean_task(struct work_struct
*work
)
1056 struct clean_work
*cw
= container_of(work
, struct clean_work
, work
);
1058 _mlx4_ib_mcg_port_cleanup(cw
->ctx
, cw
->destroy_wq
);
1062 void mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx
*ctx
, int destroy_wq
)
1064 struct clean_work
*work
;
1067 _mlx4_ib_mcg_port_cleanup(ctx
, destroy_wq
);
1071 work
= kmalloc(sizeof *work
, GFP_KERNEL
);
1073 mcg_warn("failed allocating work for cleanup\n");
1078 work
->destroy_wq
= destroy_wq
;
1079 INIT_WORK(&work
->work
, mcg_clean_task
);
1080 queue_work(clean_wq
, &work
->work
);
1083 static void build_leave_mad(struct mcast_req
*req
)
1085 struct ib_sa_mad
*mad
= &req
->sa_mad
;
1087 mad
->mad_hdr
.method
= IB_SA_METHOD_DELETE
;
1091 static void clear_pending_reqs(struct mcast_group
*group
, int vf
)
1093 struct mcast_req
*req
, *tmp
, *group_first
= NULL
;
1097 if (!list_empty(&group
->pending_list
))
1098 group_first
= list_first_entry(&group
->pending_list
, struct mcast_req
, group_list
);
1100 list_for_each_entry_safe(req
, tmp
, &group
->func
[vf
].pending
, func_list
) {
1102 if (group_first
== req
&&
1103 (group
->state
== MCAST_JOIN_SENT
||
1104 group
->state
== MCAST_LEAVE_SENT
)) {
1105 clear
= cancel_delayed_work(&group
->timeout_work
);
1107 group
->state
= MCAST_IDLE
;
1110 --group
->func
[vf
].num_pend_reqs
;
1111 list_del(&req
->group_list
);
1112 list_del(&req
->func_list
);
1114 atomic_dec(&group
->refcount
);
1118 if (!pend
&& (!list_empty(&group
->func
[vf
].pending
) || group
->func
[vf
].num_pend_reqs
)) {
1119 mcg_warn_group(group
, "DRIVER BUG: list_empty %d, num_pend_reqs %d\n",
1120 list_empty(&group
->func
[vf
].pending
), group
->func
[vf
].num_pend_reqs
);
1124 static int push_deleteing_req(struct mcast_group
*group
, int slave
)
1126 struct mcast_req
*req
;
1127 struct mcast_req
*pend_req
;
1129 if (!group
->func
[slave
].join_state
)
1132 req
= kzalloc(sizeof *req
, GFP_KERNEL
);
1134 mcg_warn_group(group
, "failed allocation - may leave stall groups\n");
1138 if (!list_empty(&group
->func
[slave
].pending
)) {
1139 pend_req
= list_entry(group
->func
[slave
].pending
.prev
, struct mcast_req
, group_list
);
1140 if (pend_req
->clean
) {
1149 ++group
->func
[slave
].num_pend_reqs
;
1150 build_leave_mad(req
);
1155 void clean_vf_mcast(struct mlx4_ib_demux_ctx
*ctx
, int slave
)
1157 struct mcast_group
*group
;
1160 mutex_lock(&ctx
->mcg_table_lock
);
1161 for (p
= rb_first(&ctx
->mcg_table
); p
; p
= rb_next(p
)) {
1162 group
= rb_entry(p
, struct mcast_group
, node
);
1163 mutex_lock(&group
->lock
);
1164 if (atomic_read(&group
->refcount
)) {
1165 /* clear pending requests of this VF */
1166 clear_pending_reqs(group
, slave
);
1167 push_deleteing_req(group
, slave
);
1169 mutex_unlock(&group
->lock
);
1171 mutex_unlock(&ctx
->mcg_table_lock
);
1175 int mlx4_ib_mcg_init(void)
1177 clean_wq
= create_singlethread_workqueue("mlx4_ib_mcg");
1184 void mlx4_ib_mcg_destroy(void)
1186 destroy_workqueue(clean_wq
);