Commit | Line | Data |
---|---|---|
faec2f7b | 1 | /* |
43506d95 | 2 | * Copyright (c) 2006 Intel Corporation. All rights reserved. |
faec2f7b SH |
3 | * |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #include <linux/completion.h> | |
34 | #include <linux/dma-mapping.h> | |
35 | #include <linux/err.h> | |
36 | #include <linux/interrupt.h> | |
b108d976 | 37 | #include <linux/export.h> |
5a0e3ad6 | 38 | #include <linux/slab.h> |
faec2f7b SH |
39 | #include <linux/bitops.h> |
40 | #include <linux/random.h> | |
41 | ||
42 | #include <rdma/ib_cache.h> | |
43 | #include "sa.h" | |
44 | ||
45 | static void mcast_add_one(struct ib_device *device); | |
7c1eb45a | 46 | static void mcast_remove_one(struct ib_device *device, void *client_data); |
faec2f7b SH |
47 | |
48 | static struct ib_client mcast_client = { | |
49 | .name = "ib_multicast", | |
50 | .add = mcast_add_one, | |
51 | .remove = mcast_remove_one | |
52 | }; | |
53 | ||
54 | static struct ib_sa_client sa_client; | |
55 | static struct workqueue_struct *mcast_wq; | |
56 | static union ib_gid mgid0; | |
57 | ||
58 | struct mcast_device; | |
59 | ||
60 | struct mcast_port { | |
61 | struct mcast_device *dev; | |
62 | spinlock_t lock; | |
63 | struct rb_root table; | |
64 | atomic_t refcount; | |
65 | struct completion comp; | |
66 | u8 port_num; | |
67 | }; | |
68 | ||
69 | struct mcast_device { | |
70 | struct ib_device *device; | |
71 | struct ib_event_handler event_handler; | |
72 | int start_port; | |
73 | int end_port; | |
74 | struct mcast_port port[0]; | |
75 | }; | |
76 | ||
77 | enum mcast_state { | |
faec2f7b SH |
78 | MCAST_JOINING, |
79 | MCAST_MEMBER, | |
547af765 SH |
80 | MCAST_ERROR, |
81 | }; | |
82 | ||
83 | enum mcast_group_state { | |
84 | MCAST_IDLE, | |
faec2f7b | 85 | MCAST_BUSY, |
547af765 SH |
86 | MCAST_GROUP_ERROR, |
87 | MCAST_PKEY_EVENT | |
88 | }; | |
89 | ||
90 | enum { | |
91 | MCAST_INVALID_PKEY_INDEX = 0xFFFF | |
faec2f7b SH |
92 | }; |
93 | ||
94 | struct mcast_member; | |
95 | ||
96 | struct mcast_group { | |
97 | struct ib_sa_mcmember_rec rec; | |
98 | struct rb_node node; | |
99 | struct mcast_port *port; | |
100 | spinlock_t lock; | |
101 | struct work_struct work; | |
102 | struct list_head pending_list; | |
103 | struct list_head active_list; | |
104 | struct mcast_member *last_join; | |
105 | int members[3]; | |
106 | atomic_t refcount; | |
547af765 | 107 | enum mcast_group_state state; |
faec2f7b SH |
108 | struct ib_sa_query *query; |
109 | int query_id; | |
547af765 | 110 | u16 pkey_index; |
e1d7806d YE |
111 | u8 leave_state; |
112 | int retries; | |
faec2f7b SH |
113 | }; |
114 | ||
115 | struct mcast_member { | |
116 | struct ib_sa_multicast multicast; | |
117 | struct ib_sa_client *client; | |
118 | struct mcast_group *group; | |
119 | struct list_head list; | |
120 | enum mcast_state state; | |
121 | atomic_t refcount; | |
122 | struct completion comp; | |
123 | }; | |
124 | ||
125 | static void join_handler(int status, struct ib_sa_mcmember_rec *rec, | |
126 | void *context); | |
127 | static void leave_handler(int status, struct ib_sa_mcmember_rec *rec, | |
128 | void *context); | |
129 | ||
130 | static struct mcast_group *mcast_find(struct mcast_port *port, | |
131 | union ib_gid *mgid) | |
132 | { | |
133 | struct rb_node *node = port->table.rb_node; | |
134 | struct mcast_group *group; | |
135 | int ret; | |
136 | ||
137 | while (node) { | |
138 | group = rb_entry(node, struct mcast_group, node); | |
139 | ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid); | |
140 | if (!ret) | |
141 | return group; | |
142 | ||
143 | if (ret < 0) | |
144 | node = node->rb_left; | |
145 | else | |
146 | node = node->rb_right; | |
147 | } | |
148 | return NULL; | |
149 | } | |
150 | ||
151 | static struct mcast_group *mcast_insert(struct mcast_port *port, | |
152 | struct mcast_group *group, | |
153 | int allow_duplicates) | |
154 | { | |
155 | struct rb_node **link = &port->table.rb_node; | |
156 | struct rb_node *parent = NULL; | |
157 | struct mcast_group *cur_group; | |
158 | int ret; | |
159 | ||
160 | while (*link) { | |
161 | parent = *link; | |
162 | cur_group = rb_entry(parent, struct mcast_group, node); | |
163 | ||
164 | ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw, | |
165 | sizeof group->rec.mgid); | |
166 | if (ret < 0) | |
167 | link = &(*link)->rb_left; | |
168 | else if (ret > 0) | |
169 | link = &(*link)->rb_right; | |
170 | else if (allow_duplicates) | |
171 | link = &(*link)->rb_left; | |
172 | else | |
173 | return cur_group; | |
174 | } | |
175 | rb_link_node(&group->node, parent, link); | |
176 | rb_insert_color(&group->node, &port->table); | |
177 | return NULL; | |
178 | } | |
179 | ||
180 | static void deref_port(struct mcast_port *port) | |
181 | { | |
182 | if (atomic_dec_and_test(&port->refcount)) | |
183 | complete(&port->comp); | |
184 | } | |
185 | ||
186 | static void release_group(struct mcast_group *group) | |
187 | { | |
188 | struct mcast_port *port = group->port; | |
189 | unsigned long flags; | |
190 | ||
191 | spin_lock_irqsave(&port->lock, flags); | |
192 | if (atomic_dec_and_test(&group->refcount)) { | |
193 | rb_erase(&group->node, &port->table); | |
194 | spin_unlock_irqrestore(&port->lock, flags); | |
195 | kfree(group); | |
196 | deref_port(port); | |
197 | } else | |
198 | spin_unlock_irqrestore(&port->lock, flags); | |
199 | } | |
200 | ||
201 | static void deref_member(struct mcast_member *member) | |
202 | { | |
203 | if (atomic_dec_and_test(&member->refcount)) | |
204 | complete(&member->comp); | |
205 | } | |
206 | ||
207 | static void queue_join(struct mcast_member *member) | |
208 | { | |
209 | struct mcast_group *group = member->group; | |
210 | unsigned long flags; | |
211 | ||
212 | spin_lock_irqsave(&group->lock, flags); | |
57cb61d5 | 213 | list_add_tail(&member->list, &group->pending_list); |
faec2f7b SH |
214 | if (group->state == MCAST_IDLE) { |
215 | group->state = MCAST_BUSY; | |
216 | atomic_inc(&group->refcount); | |
217 | queue_work(mcast_wq, &group->work); | |
218 | } | |
219 | spin_unlock_irqrestore(&group->lock, flags); | |
220 | } | |
221 | ||
222 | /* | |
223 | * A multicast group has three types of members: full member, non member, and | |
224 | * send only member. We need to keep track of the number of members of each | |
225 | * type based on their join state. Adjust the number of members the belong to | |
226 | * the specified join states. | |
227 | */ | |
228 | static void adjust_membership(struct mcast_group *group, u8 join_state, int inc) | |
229 | { | |
230 | int i; | |
231 | ||
232 | for (i = 0; i < 3; i++, join_state >>= 1) | |
233 | if (join_state & 0x1) | |
234 | group->members[i] += inc; | |
235 | } | |
236 | ||
237 | /* | |
238 | * If a multicast group has zero members left for a particular join state, but | |
239 | * the group is still a member with the SA, we need to leave that join state. | |
240 | * Determine which join states we still belong to, but that do not have any | |
241 | * active members. | |
242 | */ | |
243 | static u8 get_leave_state(struct mcast_group *group) | |
244 | { | |
245 | u8 leave_state = 0; | |
246 | int i; | |
247 | ||
248 | for (i = 0; i < 3; i++) | |
249 | if (!group->members[i]) | |
250 | leave_state |= (0x1 << i); | |
251 | ||
252 | return leave_state & group->rec.join_state; | |
253 | } | |
254 | ||
255 | static int check_selector(ib_sa_comp_mask comp_mask, | |
256 | ib_sa_comp_mask selector_mask, | |
257 | ib_sa_comp_mask value_mask, | |
258 | u8 selector, u8 src_value, u8 dst_value) | |
259 | { | |
260 | int err; | |
261 | ||
262 | if (!(comp_mask & selector_mask) || !(comp_mask & value_mask)) | |
263 | return 0; | |
264 | ||
265 | switch (selector) { | |
266 | case IB_SA_GT: | |
267 | err = (src_value <= dst_value); | |
268 | break; | |
269 | case IB_SA_LT: | |
270 | err = (src_value >= dst_value); | |
271 | break; | |
272 | case IB_SA_EQ: | |
273 | err = (src_value != dst_value); | |
274 | break; | |
275 | default: | |
276 | err = 0; | |
277 | break; | |
278 | } | |
279 | ||
280 | return err; | |
281 | } | |
282 | ||
283 | static int cmp_rec(struct ib_sa_mcmember_rec *src, | |
284 | struct ib_sa_mcmember_rec *dst, ib_sa_comp_mask comp_mask) | |
285 | { | |
286 | /* MGID must already match */ | |
287 | ||
288 | if (comp_mask & IB_SA_MCMEMBER_REC_PORT_GID && | |
289 | memcmp(&src->port_gid, &dst->port_gid, sizeof src->port_gid)) | |
290 | return -EINVAL; | |
291 | if (comp_mask & IB_SA_MCMEMBER_REC_QKEY && src->qkey != dst->qkey) | |
292 | return -EINVAL; | |
293 | if (comp_mask & IB_SA_MCMEMBER_REC_MLID && src->mlid != dst->mlid) | |
294 | return -EINVAL; | |
295 | if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_MTU_SELECTOR, | |
296 | IB_SA_MCMEMBER_REC_MTU, dst->mtu_selector, | |
297 | src->mtu, dst->mtu)) | |
298 | return -EINVAL; | |
299 | if (comp_mask & IB_SA_MCMEMBER_REC_TRAFFIC_CLASS && | |
300 | src->traffic_class != dst->traffic_class) | |
301 | return -EINVAL; | |
302 | if (comp_mask & IB_SA_MCMEMBER_REC_PKEY && src->pkey != dst->pkey) | |
303 | return -EINVAL; | |
304 | if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_RATE_SELECTOR, | |
305 | IB_SA_MCMEMBER_REC_RATE, dst->rate_selector, | |
306 | src->rate, dst->rate)) | |
307 | return -EINVAL; | |
308 | if (check_selector(comp_mask, | |
309 | IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR, | |
310 | IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME, | |
311 | dst->packet_life_time_selector, | |
312 | src->packet_life_time, dst->packet_life_time)) | |
313 | return -EINVAL; | |
314 | if (comp_mask & IB_SA_MCMEMBER_REC_SL && src->sl != dst->sl) | |
315 | return -EINVAL; | |
316 | if (comp_mask & IB_SA_MCMEMBER_REC_FLOW_LABEL && | |
317 | src->flow_label != dst->flow_label) | |
318 | return -EINVAL; | |
319 | if (comp_mask & IB_SA_MCMEMBER_REC_HOP_LIMIT && | |
320 | src->hop_limit != dst->hop_limit) | |
321 | return -EINVAL; | |
322 | if (comp_mask & IB_SA_MCMEMBER_REC_SCOPE && src->scope != dst->scope) | |
323 | return -EINVAL; | |
324 | ||
325 | /* join_state checked separately, proxy_join ignored */ | |
326 | ||
327 | return 0; | |
328 | } | |
329 | ||
330 | static int send_join(struct mcast_group *group, struct mcast_member *member) | |
331 | { | |
332 | struct mcast_port *port = group->port; | |
333 | int ret; | |
334 | ||
335 | group->last_join = member; | |
336 | ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device, | |
337 | port->port_num, IB_MGMT_METHOD_SET, | |
338 | &member->multicast.rec, | |
339 | member->multicast.comp_mask, | |
340 | 3000, GFP_KERNEL, join_handler, group, | |
341 | &group->query); | |
342 | if (ret >= 0) { | |
343 | group->query_id = ret; | |
344 | ret = 0; | |
345 | } | |
346 | return ret; | |
347 | } | |
348 | ||
349 | static int send_leave(struct mcast_group *group, u8 leave_state) | |
350 | { | |
351 | struct mcast_port *port = group->port; | |
352 | struct ib_sa_mcmember_rec rec; | |
353 | int ret; | |
354 | ||
355 | rec = group->rec; | |
356 | rec.join_state = leave_state; | |
e1d7806d | 357 | group->leave_state = leave_state; |
faec2f7b SH |
358 | |
359 | ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device, | |
360 | port->port_num, IB_SA_METHOD_DELETE, &rec, | |
361 | IB_SA_MCMEMBER_REC_MGID | | |
362 | IB_SA_MCMEMBER_REC_PORT_GID | | |
363 | IB_SA_MCMEMBER_REC_JOIN_STATE, | |
364 | 3000, GFP_KERNEL, leave_handler, | |
365 | group, &group->query); | |
366 | if (ret >= 0) { | |
367 | group->query_id = ret; | |
368 | ret = 0; | |
369 | } | |
370 | return ret; | |
371 | } | |
372 | ||
373 | static void join_group(struct mcast_group *group, struct mcast_member *member, | |
374 | u8 join_state) | |
375 | { | |
376 | member->state = MCAST_MEMBER; | |
377 | adjust_membership(group, join_state, 1); | |
378 | group->rec.join_state |= join_state; | |
379 | member->multicast.rec = group->rec; | |
380 | member->multicast.rec.join_state = join_state; | |
381 | list_move(&member->list, &group->active_list); | |
382 | } | |
383 | ||
384 | static int fail_join(struct mcast_group *group, struct mcast_member *member, | |
385 | int status) | |
386 | { | |
387 | spin_lock_irq(&group->lock); | |
388 | list_del_init(&member->list); | |
389 | spin_unlock_irq(&group->lock); | |
390 | return member->multicast.callback(status, &member->multicast); | |
391 | } | |
392 | ||
393 | static void process_group_error(struct mcast_group *group) | |
394 | { | |
395 | struct mcast_member *member; | |
547af765 SH |
396 | int ret = 0; |
397 | u16 pkey_index; | |
398 | ||
399 | if (group->state == MCAST_PKEY_EVENT) | |
400 | ret = ib_find_pkey(group->port->dev->device, | |
401 | group->port->port_num, | |
402 | be16_to_cpu(group->rec.pkey), &pkey_index); | |
faec2f7b SH |
403 | |
404 | spin_lock_irq(&group->lock); | |
547af765 SH |
405 | if (group->state == MCAST_PKEY_EVENT && !ret && |
406 | group->pkey_index == pkey_index) | |
407 | goto out; | |
408 | ||
faec2f7b SH |
409 | while (!list_empty(&group->active_list)) { |
410 | member = list_entry(group->active_list.next, | |
411 | struct mcast_member, list); | |
412 | atomic_inc(&member->refcount); | |
413 | list_del_init(&member->list); | |
414 | adjust_membership(group, member->multicast.rec.join_state, -1); | |
415 | member->state = MCAST_ERROR; | |
416 | spin_unlock_irq(&group->lock); | |
417 | ||
418 | ret = member->multicast.callback(-ENETRESET, | |
419 | &member->multicast); | |
420 | deref_member(member); | |
421 | if (ret) | |
422 | ib_sa_free_multicast(&member->multicast); | |
423 | spin_lock_irq(&group->lock); | |
424 | } | |
425 | ||
426 | group->rec.join_state = 0; | |
547af765 | 427 | out: |
faec2f7b SH |
428 | group->state = MCAST_BUSY; |
429 | spin_unlock_irq(&group->lock); | |
430 | } | |
431 | ||
432 | static void mcast_work_handler(struct work_struct *work) | |
433 | { | |
434 | struct mcast_group *group; | |
435 | struct mcast_member *member; | |
436 | struct ib_sa_multicast *multicast; | |
437 | int status, ret; | |
438 | u8 join_state; | |
439 | ||
440 | group = container_of(work, typeof(*group), work); | |
441 | retest: | |
442 | spin_lock_irq(&group->lock); | |
443 | while (!list_empty(&group->pending_list) || | |
547af765 | 444 | (group->state != MCAST_BUSY)) { |
faec2f7b | 445 | |
547af765 | 446 | if (group->state != MCAST_BUSY) { |
faec2f7b SH |
447 | spin_unlock_irq(&group->lock); |
448 | process_group_error(group); | |
449 | goto retest; | |
450 | } | |
451 | ||
452 | member = list_entry(group->pending_list.next, | |
453 | struct mcast_member, list); | |
454 | multicast = &member->multicast; | |
455 | join_state = multicast->rec.join_state; | |
456 | atomic_inc(&member->refcount); | |
457 | ||
458 | if (join_state == (group->rec.join_state & join_state)) { | |
459 | status = cmp_rec(&group->rec, &multicast->rec, | |
460 | multicast->comp_mask); | |
461 | if (!status) | |
462 | join_group(group, member, join_state); | |
463 | else | |
464 | list_del_init(&member->list); | |
465 | spin_unlock_irq(&group->lock); | |
466 | ret = multicast->callback(status, multicast); | |
467 | } else { | |
468 | spin_unlock_irq(&group->lock); | |
469 | status = send_join(group, member); | |
470 | if (!status) { | |
471 | deref_member(member); | |
472 | return; | |
473 | } | |
474 | ret = fail_join(group, member, status); | |
475 | } | |
476 | ||
477 | deref_member(member); | |
478 | if (ret) | |
479 | ib_sa_free_multicast(&member->multicast); | |
480 | spin_lock_irq(&group->lock); | |
481 | } | |
482 | ||
483 | join_state = get_leave_state(group); | |
484 | if (join_state) { | |
485 | group->rec.join_state &= ~join_state; | |
486 | spin_unlock_irq(&group->lock); | |
487 | if (send_leave(group, join_state)) | |
488 | goto retest; | |
489 | } else { | |
490 | group->state = MCAST_IDLE; | |
491 | spin_unlock_irq(&group->lock); | |
492 | release_group(group); | |
493 | } | |
494 | } | |
495 | ||
496 | /* | |
497 | * Fail a join request if it is still active - at the head of the pending queue. | |
498 | */ | |
499 | static void process_join_error(struct mcast_group *group, int status) | |
500 | { | |
501 | struct mcast_member *member; | |
502 | int ret; | |
503 | ||
504 | spin_lock_irq(&group->lock); | |
505 | member = list_entry(group->pending_list.next, | |
506 | struct mcast_member, list); | |
507 | if (group->last_join == member) { | |
508 | atomic_inc(&member->refcount); | |
509 | list_del_init(&member->list); | |
510 | spin_unlock_irq(&group->lock); | |
511 | ret = member->multicast.callback(status, &member->multicast); | |
512 | deref_member(member); | |
513 | if (ret) | |
514 | ib_sa_free_multicast(&member->multicast); | |
515 | } else | |
516 | spin_unlock_irq(&group->lock); | |
517 | } | |
518 | ||
519 | static void join_handler(int status, struct ib_sa_mcmember_rec *rec, | |
520 | void *context) | |
521 | { | |
522 | struct mcast_group *group = context; | |
547af765 | 523 | u16 pkey_index = MCAST_INVALID_PKEY_INDEX; |
faec2f7b SH |
524 | |
525 | if (status) | |
526 | process_join_error(group, status); | |
527 | else { | |
514f3ddf | 528 | int mgids_changed, is_mgid0; |
547af765 SH |
529 | ib_find_pkey(group->port->dev->device, group->port->port_num, |
530 | be16_to_cpu(rec->pkey), &pkey_index); | |
531 | ||
faec2f7b | 532 | spin_lock_irq(&group->port->lock); |
547af765 SH |
533 | if (group->state == MCAST_BUSY && |
534 | group->pkey_index == MCAST_INVALID_PKEY_INDEX) | |
535 | group->pkey_index = pkey_index; | |
514f3ddf JM |
536 | mgids_changed = memcmp(&rec->mgid, &group->rec.mgid, |
537 | sizeof(group->rec.mgid)); | |
538 | group->rec = *rec; | |
539 | if (mgids_changed) { | |
faec2f7b | 540 | rb_erase(&group->node, &group->port->table); |
514f3ddf JM |
541 | is_mgid0 = !memcmp(&mgid0, &group->rec.mgid, |
542 | sizeof(mgid0)); | |
543 | mcast_insert(group->port, group, is_mgid0); | |
faec2f7b SH |
544 | } |
545 | spin_unlock_irq(&group->port->lock); | |
546 | } | |
547 | mcast_work_handler(&group->work); | |
548 | } | |
549 | ||
550 | static void leave_handler(int status, struct ib_sa_mcmember_rec *rec, | |
551 | void *context) | |
552 | { | |
553 | struct mcast_group *group = context; | |
554 | ||
e1d7806d YE |
555 | if (status && group->retries > 0 && |
556 | !send_leave(group, group->leave_state)) | |
557 | group->retries--; | |
558 | else | |
559 | mcast_work_handler(&group->work); | |
faec2f7b SH |
560 | } |
561 | ||
562 | static struct mcast_group *acquire_group(struct mcast_port *port, | |
563 | union ib_gid *mgid, gfp_t gfp_mask) | |
564 | { | |
565 | struct mcast_group *group, *cur_group; | |
566 | unsigned long flags; | |
567 | int is_mgid0; | |
568 | ||
569 | is_mgid0 = !memcmp(&mgid0, mgid, sizeof mgid0); | |
570 | if (!is_mgid0) { | |
571 | spin_lock_irqsave(&port->lock, flags); | |
572 | group = mcast_find(port, mgid); | |
573 | if (group) | |
574 | goto found; | |
575 | spin_unlock_irqrestore(&port->lock, flags); | |
576 | } | |
577 | ||
578 | group = kzalloc(sizeof *group, gfp_mask); | |
579 | if (!group) | |
580 | return NULL; | |
581 | ||
e1d7806d | 582 | group->retries = 3; |
faec2f7b SH |
583 | group->port = port; |
584 | group->rec.mgid = *mgid; | |
547af765 | 585 | group->pkey_index = MCAST_INVALID_PKEY_INDEX; |
faec2f7b SH |
586 | INIT_LIST_HEAD(&group->pending_list); |
587 | INIT_LIST_HEAD(&group->active_list); | |
588 | INIT_WORK(&group->work, mcast_work_handler); | |
589 | spin_lock_init(&group->lock); | |
590 | ||
591 | spin_lock_irqsave(&port->lock, flags); | |
592 | cur_group = mcast_insert(port, group, is_mgid0); | |
593 | if (cur_group) { | |
594 | kfree(group); | |
595 | group = cur_group; | |
596 | } else | |
597 | atomic_inc(&port->refcount); | |
598 | found: | |
599 | atomic_inc(&group->refcount); | |
600 | spin_unlock_irqrestore(&port->lock, flags); | |
601 | return group; | |
602 | } | |
603 | ||
604 | /* | |
605 | * We serialize all join requests to a single group to make our lives much | |
606 | * easier. Otherwise, two users could try to join the same group | |
607 | * simultaneously, with different configurations, one could leave while the | |
608 | * join is in progress, etc., which makes locking around error recovery | |
609 | * difficult. | |
610 | */ | |
611 | struct ib_sa_multicast * | |
612 | ib_sa_join_multicast(struct ib_sa_client *client, | |
613 | struct ib_device *device, u8 port_num, | |
614 | struct ib_sa_mcmember_rec *rec, | |
615 | ib_sa_comp_mask comp_mask, gfp_t gfp_mask, | |
616 | int (*callback)(int status, | |
617 | struct ib_sa_multicast *multicast), | |
618 | void *context) | |
619 | { | |
620 | struct mcast_device *dev; | |
621 | struct mcast_member *member; | |
622 | struct ib_sa_multicast *multicast; | |
623 | int ret; | |
624 | ||
625 | dev = ib_get_client_data(device, &mcast_client); | |
626 | if (!dev) | |
627 | return ERR_PTR(-ENODEV); | |
628 | ||
629 | member = kmalloc(sizeof *member, gfp_mask); | |
630 | if (!member) | |
631 | return ERR_PTR(-ENOMEM); | |
632 | ||
633 | ib_sa_client_get(client); | |
634 | member->client = client; | |
635 | member->multicast.rec = *rec; | |
636 | member->multicast.comp_mask = comp_mask; | |
637 | member->multicast.callback = callback; | |
638 | member->multicast.context = context; | |
639 | init_completion(&member->comp); | |
640 | atomic_set(&member->refcount, 1); | |
641 | member->state = MCAST_JOINING; | |
642 | ||
643 | member->group = acquire_group(&dev->port[port_num - dev->start_port], | |
644 | &rec->mgid, gfp_mask); | |
645 | if (!member->group) { | |
646 | ret = -ENOMEM; | |
647 | goto err; | |
648 | } | |
649 | ||
650 | /* | |
651 | * The user will get the multicast structure in their callback. They | |
652 | * could then free the multicast structure before we can return from | |
653 | * this routine. So we save the pointer to return before queuing | |
654 | * any callback. | |
655 | */ | |
656 | multicast = &member->multicast; | |
657 | queue_join(member); | |
658 | return multicast; | |
659 | ||
660 | err: | |
661 | ib_sa_client_put(client); | |
662 | kfree(member); | |
663 | return ERR_PTR(ret); | |
664 | } | |
665 | EXPORT_SYMBOL(ib_sa_join_multicast); | |
666 | ||
667 | void ib_sa_free_multicast(struct ib_sa_multicast *multicast) | |
668 | { | |
669 | struct mcast_member *member; | |
670 | struct mcast_group *group; | |
671 | ||
672 | member = container_of(multicast, struct mcast_member, multicast); | |
673 | group = member->group; | |
674 | ||
675 | spin_lock_irq(&group->lock); | |
676 | if (member->state == MCAST_MEMBER) | |
677 | adjust_membership(group, multicast->rec.join_state, -1); | |
678 | ||
679 | list_del_init(&member->list); | |
680 | ||
681 | if (group->state == MCAST_IDLE) { | |
682 | group->state = MCAST_BUSY; | |
683 | spin_unlock_irq(&group->lock); | |
684 | /* Continue to hold reference on group until callback */ | |
685 | queue_work(mcast_wq, &group->work); | |
686 | } else { | |
687 | spin_unlock_irq(&group->lock); | |
688 | release_group(group); | |
689 | } | |
690 | ||
691 | deref_member(member); | |
692 | wait_for_completion(&member->comp); | |
693 | ib_sa_client_put(member->client); | |
694 | kfree(member); | |
695 | } | |
696 | EXPORT_SYMBOL(ib_sa_free_multicast); | |
697 | ||
698 | int ib_sa_get_mcmember_rec(struct ib_device *device, u8 port_num, | |
699 | union ib_gid *mgid, struct ib_sa_mcmember_rec *rec) | |
700 | { | |
701 | struct mcast_device *dev; | |
702 | struct mcast_port *port; | |
703 | struct mcast_group *group; | |
704 | unsigned long flags; | |
705 | int ret = 0; | |
706 | ||
707 | dev = ib_get_client_data(device, &mcast_client); | |
708 | if (!dev) | |
709 | return -ENODEV; | |
710 | ||
711 | port = &dev->port[port_num - dev->start_port]; | |
712 | spin_lock_irqsave(&port->lock, flags); | |
713 | group = mcast_find(port, mgid); | |
714 | if (group) | |
715 | *rec = group->rec; | |
716 | else | |
717 | ret = -EADDRNOTAVAIL; | |
718 | spin_unlock_irqrestore(&port->lock, flags); | |
719 | ||
720 | return ret; | |
721 | } | |
722 | EXPORT_SYMBOL(ib_sa_get_mcmember_rec); | |
723 | ||
724 | int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num, | |
725 | struct ib_sa_mcmember_rec *rec, | |
bee3c3c9 MS |
726 | struct net_device *ndev, |
727 | enum ib_gid_type gid_type, | |
faec2f7b SH |
728 | struct ib_ah_attr *ah_attr) |
729 | { | |
730 | int ret; | |
731 | u16 gid_index; | |
732 | u8 p; | |
733 | ||
bee3c3c9 MS |
734 | if (rdma_protocol_roce(device, port_num)) { |
735 | ret = ib_find_cached_gid_by_port(device, &rec->port_gid, | |
736 | gid_type, port_num, | |
737 | ndev, | |
738 | &gid_index); | |
739 | } else if (rdma_protocol_ib(device, port_num)) { | |
740 | ret = ib_find_cached_gid(device, &rec->port_gid, | |
741 | IB_GID_TYPE_IB, NULL, &p, | |
742 | &gid_index); | |
743 | } else { | |
744 | ret = -EINVAL; | |
745 | } | |
746 | ||
faec2f7b SH |
747 | if (ret) |
748 | return ret; | |
749 | ||
750 | memset(ah_attr, 0, sizeof *ah_attr); | |
751 | ah_attr->dlid = be16_to_cpu(rec->mlid); | |
752 | ah_attr->sl = rec->sl; | |
753 | ah_attr->port_num = port_num; | |
754 | ah_attr->static_rate = rec->rate; | |
755 | ||
756 | ah_attr->ah_flags = IB_AH_GRH; | |
757 | ah_attr->grh.dgid = rec->mgid; | |
758 | ||
759 | ah_attr->grh.sgid_index = (u8) gid_index; | |
760 | ah_attr->grh.flow_label = be32_to_cpu(rec->flow_label); | |
761 | ah_attr->grh.hop_limit = rec->hop_limit; | |
762 | ah_attr->grh.traffic_class = rec->traffic_class; | |
763 | ||
764 | return 0; | |
765 | } | |
766 | EXPORT_SYMBOL(ib_init_ah_from_mcmember); | |
767 | ||
547af765 SH |
768 | static void mcast_groups_event(struct mcast_port *port, |
769 | enum mcast_group_state state) | |
faec2f7b SH |
770 | { |
771 | struct mcast_group *group; | |
772 | struct rb_node *node; | |
773 | unsigned long flags; | |
774 | ||
775 | spin_lock_irqsave(&port->lock, flags); | |
776 | for (node = rb_first(&port->table); node; node = rb_next(node)) { | |
777 | group = rb_entry(node, struct mcast_group, node); | |
778 | spin_lock(&group->lock); | |
779 | if (group->state == MCAST_IDLE) { | |
780 | atomic_inc(&group->refcount); | |
781 | queue_work(mcast_wq, &group->work); | |
782 | } | |
547af765 SH |
783 | if (group->state != MCAST_GROUP_ERROR) |
784 | group->state = state; | |
faec2f7b SH |
785 | spin_unlock(&group->lock); |
786 | } | |
787 | spin_unlock_irqrestore(&port->lock, flags); | |
788 | } | |
789 | ||
790 | static void mcast_event_handler(struct ib_event_handler *handler, | |
791 | struct ib_event *event) | |
792 | { | |
793 | struct mcast_device *dev; | |
547af765 | 794 | int index; |
faec2f7b SH |
795 | |
796 | dev = container_of(handler, struct mcast_device, event_handler); | |
9247a8eb | 797 | if (!rdma_cap_ib_mcast(dev->device, event->element.port_num)) |
fac70d51 EC |
798 | return; |
799 | ||
547af765 | 800 | index = event->element.port_num - dev->start_port; |
faec2f7b SH |
801 | |
802 | switch (event->event) { | |
803 | case IB_EVENT_PORT_ERR: | |
804 | case IB_EVENT_LID_CHANGE: | |
805 | case IB_EVENT_SM_CHANGE: | |
806 | case IB_EVENT_CLIENT_REREGISTER: | |
547af765 SH |
807 | mcast_groups_event(&dev->port[index], MCAST_GROUP_ERROR); |
808 | break; | |
809 | case IB_EVENT_PKEY_CHANGE: | |
810 | mcast_groups_event(&dev->port[index], MCAST_PKEY_EVENT); | |
faec2f7b SH |
811 | break; |
812 | default: | |
813 | break; | |
814 | } | |
815 | } | |
816 | ||
817 | static void mcast_add_one(struct ib_device *device) | |
818 | { | |
819 | struct mcast_device *dev; | |
820 | struct mcast_port *port; | |
821 | int i; | |
fac70d51 | 822 | int count = 0; |
faec2f7b | 823 | |
faec2f7b SH |
824 | dev = kmalloc(sizeof *dev + device->phys_port_cnt * sizeof *port, |
825 | GFP_KERNEL); | |
826 | if (!dev) | |
827 | return; | |
828 | ||
4139032b HR |
829 | dev->start_port = rdma_start_port(device); |
830 | dev->end_port = rdma_end_port(device); | |
faec2f7b SH |
831 | |
832 | for (i = 0; i <= dev->end_port - dev->start_port; i++) { | |
a31ad3b0 | 833 | if (!rdma_cap_ib_mcast(device, dev->start_port + i)) |
fac70d51 | 834 | continue; |
faec2f7b SH |
835 | port = &dev->port[i]; |
836 | port->dev = dev; | |
837 | port->port_num = dev->start_port + i; | |
838 | spin_lock_init(&port->lock); | |
839 | port->table = RB_ROOT; | |
840 | init_completion(&port->comp); | |
841 | atomic_set(&port->refcount, 1); | |
fac70d51 EC |
842 | ++count; |
843 | } | |
844 | ||
845 | if (!count) { | |
846 | kfree(dev); | |
847 | return; | |
faec2f7b SH |
848 | } |
849 | ||
850 | dev->device = device; | |
851 | ib_set_client_data(device, &mcast_client, dev); | |
852 | ||
853 | INIT_IB_EVENT_HANDLER(&dev->event_handler, device, mcast_event_handler); | |
854 | ib_register_event_handler(&dev->event_handler); | |
855 | } | |
856 | ||
7c1eb45a | 857 | static void mcast_remove_one(struct ib_device *device, void *client_data) |
faec2f7b | 858 | { |
7c1eb45a | 859 | struct mcast_device *dev = client_data; |
faec2f7b SH |
860 | struct mcast_port *port; |
861 | int i; | |
862 | ||
faec2f7b SH |
863 | if (!dev) |
864 | return; | |
865 | ||
866 | ib_unregister_event_handler(&dev->event_handler); | |
867 | flush_workqueue(mcast_wq); | |
868 | ||
869 | for (i = 0; i <= dev->end_port - dev->start_port; i++) { | |
a31ad3b0 | 870 | if (rdma_cap_ib_mcast(device, dev->start_port + i)) { |
fac70d51 EC |
871 | port = &dev->port[i]; |
872 | deref_port(port); | |
873 | wait_for_completion(&port->comp); | |
874 | } | |
faec2f7b SH |
875 | } |
876 | ||
877 | kfree(dev); | |
878 | } | |
879 | ||
880 | int mcast_init(void) | |
881 | { | |
882 | int ret; | |
883 | ||
884 | mcast_wq = create_singlethread_workqueue("ib_mcast"); | |
885 | if (!mcast_wq) | |
886 | return -ENOMEM; | |
887 | ||
888 | ib_sa_register_client(&sa_client); | |
889 | ||
890 | ret = ib_register_client(&mcast_client); | |
891 | if (ret) | |
892 | goto err; | |
893 | return 0; | |
894 | ||
895 | err: | |
896 | ib_sa_unregister_client(&sa_client); | |
897 | destroy_workqueue(mcast_wq); | |
898 | return ret; | |
899 | } | |
900 | ||
901 | void mcast_cleanup(void) | |
902 | { | |
903 | ib_unregister_client(&mcast_client); | |
904 | ib_sa_unregister_client(&sa_client); | |
905 | destroy_workqueue(mcast_wq); | |
906 | } |