Commit | Line | Data |
---|---|---|
f931551b RC |
1 | /* |
2 | * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. | |
3 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | |
4 | * | |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
32 | */ | |
33 | ||
34 | #include <linux/rculist.h> | |
35 | ||
36 | #include "qib.h" | |
37 | ||
38 | /** | |
39 | * qib_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct | |
40 | * @qp: the QP to link | |
41 | */ | |
7c2e11fe | 42 | static struct qib_mcast_qp *qib_mcast_qp_alloc(struct rvt_qp *qp) |
f931551b RC |
43 | { |
44 | struct qib_mcast_qp *mqp; | |
45 | ||
041af0bb | 46 | mqp = kmalloc(sizeof(*mqp), GFP_KERNEL); |
f931551b RC |
47 | if (!mqp) |
48 | goto bail; | |
49 | ||
50 | mqp->qp = qp; | |
51 | atomic_inc(&qp->refcount); | |
52 | ||
53 | bail: | |
54 | return mqp; | |
55 | } | |
56 | ||
57 | static void qib_mcast_qp_free(struct qib_mcast_qp *mqp) | |
58 | { | |
7c2e11fe | 59 | struct rvt_qp *qp = mqp->qp; |
f931551b RC |
60 | |
61 | /* Notify qib_destroy_qp() if it is waiting. */ | |
62 | if (atomic_dec_and_test(&qp->refcount)) | |
63 | wake_up(&qp->wait); | |
64 | ||
65 | kfree(mqp); | |
66 | } | |
67 | ||
68 | /** | |
69 | * qib_mcast_alloc - allocate the multicast GID structure | |
70 | * @mgid: the multicast GID | |
71 | * | |
72 | * A list of QPs will be attached to this structure. | |
73 | */ | |
74 | static struct qib_mcast *qib_mcast_alloc(union ib_gid *mgid) | |
75 | { | |
76 | struct qib_mcast *mcast; | |
77 | ||
041af0bb | 78 | mcast = kmalloc(sizeof(*mcast), GFP_KERNEL); |
f931551b RC |
79 | if (!mcast) |
80 | goto bail; | |
81 | ||
82 | mcast->mgid = *mgid; | |
83 | INIT_LIST_HEAD(&mcast->qp_list); | |
84 | init_waitqueue_head(&mcast->wait); | |
85 | atomic_set(&mcast->refcount, 0); | |
86 | mcast->n_attached = 0; | |
87 | ||
88 | bail: | |
89 | return mcast; | |
90 | } | |
91 | ||
92 | static void qib_mcast_free(struct qib_mcast *mcast) | |
93 | { | |
94 | struct qib_mcast_qp *p, *tmp; | |
95 | ||
96 | list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) | |
97 | qib_mcast_qp_free(p); | |
98 | ||
99 | kfree(mcast); | |
100 | } | |
101 | ||
102 | /** | |
103 | * qib_mcast_find - search the global table for the given multicast GID | |
104 | * @ibp: the IB port structure | |
105 | * @mgid: the multicast GID to search for | |
106 | * | |
107 | * Returns NULL if not found. | |
108 | * | |
109 | * The caller is responsible for decrementing the reference count if found. | |
110 | */ | |
111 | struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid) | |
112 | { | |
113 | struct rb_node *n; | |
114 | unsigned long flags; | |
115 | struct qib_mcast *mcast; | |
116 | ||
f24a6d48 HC |
117 | spin_lock_irqsave(&ibp->rvp.lock, flags); |
118 | n = ibp->rvp.mcast_tree.rb_node; | |
f931551b RC |
119 | while (n) { |
120 | int ret; | |
121 | ||
122 | mcast = rb_entry(n, struct qib_mcast, rb_node); | |
123 | ||
124 | ret = memcmp(mgid->raw, mcast->mgid.raw, | |
125 | sizeof(union ib_gid)); | |
126 | if (ret < 0) | |
127 | n = n->rb_left; | |
128 | else if (ret > 0) | |
129 | n = n->rb_right; | |
130 | else { | |
131 | atomic_inc(&mcast->refcount); | |
f24a6d48 | 132 | spin_unlock_irqrestore(&ibp->rvp.lock, flags); |
f931551b RC |
133 | goto bail; |
134 | } | |
135 | } | |
f24a6d48 | 136 | spin_unlock_irqrestore(&ibp->rvp.lock, flags); |
f931551b RC |
137 | |
138 | mcast = NULL; | |
139 | ||
140 | bail: | |
141 | return mcast; | |
142 | } | |
143 | ||
144 | /** | |
145 | * qib_mcast_add - insert mcast GID into table and attach QP struct | |
146 | * @mcast: the mcast GID table | |
147 | * @mqp: the QP to attach | |
148 | * | |
149 | * Return zero if both were added. Return EEXIST if the GID was already in | |
150 | * the table but the QP was added. Return ESRCH if the QP was already | |
151 | * attached and neither structure was added. | |
152 | */ | |
153 | static int qib_mcast_add(struct qib_ibdev *dev, struct qib_ibport *ibp, | |
154 | struct qib_mcast *mcast, struct qib_mcast_qp *mqp) | |
155 | { | |
f24a6d48 | 156 | struct rb_node **n = &ibp->rvp.mcast_tree.rb_node; |
f931551b RC |
157 | struct rb_node *pn = NULL; |
158 | int ret; | |
159 | ||
f24a6d48 | 160 | spin_lock_irq(&ibp->rvp.lock); |
f931551b RC |
161 | |
162 | while (*n) { | |
163 | struct qib_mcast *tmcast; | |
164 | struct qib_mcast_qp *p; | |
165 | ||
166 | pn = *n; | |
167 | tmcast = rb_entry(pn, struct qib_mcast, rb_node); | |
168 | ||
169 | ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw, | |
170 | sizeof(union ib_gid)); | |
171 | if (ret < 0) { | |
172 | n = &pn->rb_left; | |
173 | continue; | |
174 | } | |
175 | if (ret > 0) { | |
176 | n = &pn->rb_right; | |
177 | continue; | |
178 | } | |
179 | ||
180 | /* Search the QP list to see if this is already there. */ | |
181 | list_for_each_entry_rcu(p, &tmcast->qp_list, list) { | |
182 | if (p->qp == mqp->qp) { | |
183 | ret = ESRCH; | |
184 | goto bail; | |
185 | } | |
186 | } | |
187 | if (tmcast->n_attached == ib_qib_max_mcast_qp_attached) { | |
188 | ret = ENOMEM; | |
189 | goto bail; | |
190 | } | |
191 | ||
192 | tmcast->n_attached++; | |
193 | ||
194 | list_add_tail_rcu(&mqp->list, &tmcast->qp_list); | |
195 | ret = EEXIST; | |
196 | goto bail; | |
197 | } | |
198 | ||
199 | spin_lock(&dev->n_mcast_grps_lock); | |
200 | if (dev->n_mcast_grps_allocated == ib_qib_max_mcast_grps) { | |
201 | spin_unlock(&dev->n_mcast_grps_lock); | |
202 | ret = ENOMEM; | |
203 | goto bail; | |
204 | } | |
205 | ||
206 | dev->n_mcast_grps_allocated++; | |
207 | spin_unlock(&dev->n_mcast_grps_lock); | |
208 | ||
209 | mcast->n_attached++; | |
210 | ||
211 | list_add_tail_rcu(&mqp->list, &mcast->qp_list); | |
212 | ||
213 | atomic_inc(&mcast->refcount); | |
214 | rb_link_node(&mcast->rb_node, pn, n); | |
f24a6d48 | 215 | rb_insert_color(&mcast->rb_node, &ibp->rvp.mcast_tree); |
f931551b RC |
216 | |
217 | ret = 0; | |
218 | ||
219 | bail: | |
f24a6d48 | 220 | spin_unlock_irq(&ibp->rvp.lock); |
f931551b RC |
221 | |
222 | return ret; | |
223 | } | |
224 | ||
225 | int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | |
226 | { | |
7c2e11fe | 227 | struct rvt_qp *qp = to_iqp(ibqp); |
f931551b RC |
228 | struct qib_ibdev *dev = to_idev(ibqp->device); |
229 | struct qib_ibport *ibp; | |
230 | struct qib_mcast *mcast; | |
231 | struct qib_mcast_qp *mqp; | |
232 | int ret; | |
233 | ||
234 | if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) { | |
235 | ret = -EINVAL; | |
236 | goto bail; | |
237 | } | |
238 | ||
239 | /* | |
240 | * Allocate data structures since its better to do this outside of | |
241 | * spin locks and it will most likely be needed. | |
242 | */ | |
243 | mcast = qib_mcast_alloc(gid); | |
244 | if (mcast == NULL) { | |
245 | ret = -ENOMEM; | |
246 | goto bail; | |
247 | } | |
248 | mqp = qib_mcast_qp_alloc(qp); | |
249 | if (mqp == NULL) { | |
250 | qib_mcast_free(mcast); | |
251 | ret = -ENOMEM; | |
252 | goto bail; | |
253 | } | |
254 | ibp = to_iport(ibqp->device, qp->port_num); | |
255 | switch (qib_mcast_add(dev, ibp, mcast, mqp)) { | |
256 | case ESRCH: | |
257 | /* Neither was used: OK to attach the same QP twice. */ | |
258 | qib_mcast_qp_free(mqp); | |
259 | qib_mcast_free(mcast); | |
260 | break; | |
261 | ||
262 | case EEXIST: /* The mcast wasn't used */ | |
263 | qib_mcast_free(mcast); | |
264 | break; | |
265 | ||
266 | case ENOMEM: | |
267 | /* Exceeded the maximum number of mcast groups. */ | |
268 | qib_mcast_qp_free(mqp); | |
269 | qib_mcast_free(mcast); | |
270 | ret = -ENOMEM; | |
271 | goto bail; | |
272 | ||
273 | default: | |
274 | break; | |
275 | } | |
276 | ||
277 | ret = 0; | |
278 | ||
279 | bail: | |
280 | return ret; | |
281 | } | |
282 | ||
283 | int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | |
284 | { | |
7c2e11fe | 285 | struct rvt_qp *qp = to_iqp(ibqp); |
f931551b RC |
286 | struct qib_ibdev *dev = to_idev(ibqp->device); |
287 | struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num); | |
288 | struct qib_mcast *mcast = NULL; | |
09dc9cd6 | 289 | struct qib_mcast_qp *p, *tmp, *delp = NULL; |
f931551b RC |
290 | struct rb_node *n; |
291 | int last = 0; | |
292 | int ret; | |
293 | ||
09dc9cd6 MM |
294 | if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) |
295 | return -EINVAL; | |
f931551b | 296 | |
f24a6d48 | 297 | spin_lock_irq(&ibp->rvp.lock); |
f931551b RC |
298 | |
299 | /* Find the GID in the mcast table. */ | |
f24a6d48 | 300 | n = ibp->rvp.mcast_tree.rb_node; |
f931551b RC |
301 | while (1) { |
302 | if (n == NULL) { | |
f24a6d48 | 303 | spin_unlock_irq(&ibp->rvp.lock); |
09dc9cd6 | 304 | return -EINVAL; |
f931551b RC |
305 | } |
306 | ||
307 | mcast = rb_entry(n, struct qib_mcast, rb_node); | |
308 | ret = memcmp(gid->raw, mcast->mgid.raw, | |
309 | sizeof(union ib_gid)); | |
310 | if (ret < 0) | |
311 | n = n->rb_left; | |
312 | else if (ret > 0) | |
313 | n = n->rb_right; | |
314 | else | |
315 | break; | |
316 | } | |
317 | ||
318 | /* Search the QP list. */ | |
319 | list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) { | |
320 | if (p->qp != qp) | |
321 | continue; | |
322 | /* | |
323 | * We found it, so remove it, but don't poison the forward | |
324 | * link until we are sure there are no list walkers. | |
325 | */ | |
326 | list_del_rcu(&p->list); | |
327 | mcast->n_attached--; | |
09dc9cd6 | 328 | delp = p; |
f931551b RC |
329 | |
330 | /* If this was the last attached QP, remove the GID too. */ | |
331 | if (list_empty(&mcast->qp_list)) { | |
f24a6d48 | 332 | rb_erase(&mcast->rb_node, &ibp->rvp.mcast_tree); |
f931551b RC |
333 | last = 1; |
334 | } | |
335 | break; | |
336 | } | |
337 | ||
f24a6d48 | 338 | spin_unlock_irq(&ibp->rvp.lock); |
09dc9cd6 MM |
339 | /* QP not attached */ |
340 | if (!delp) | |
341 | return -EINVAL; | |
342 | /* | |
343 | * Wait for any list walkers to finish before freeing the | |
344 | * list element. | |
345 | */ | |
346 | wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1); | |
347 | qib_mcast_qp_free(delp); | |
f931551b | 348 | |
f931551b RC |
349 | if (last) { |
350 | atomic_dec(&mcast->refcount); | |
351 | wait_event(mcast->wait, !atomic_read(&mcast->refcount)); | |
352 | qib_mcast_free(mcast); | |
353 | spin_lock_irq(&dev->n_mcast_grps_lock); | |
354 | dev->n_mcast_grps_allocated--; | |
355 | spin_unlock_irq(&dev->n_mcast_grps_lock); | |
356 | } | |
09dc9cd6 | 357 | return 0; |
f931551b RC |
358 | } |
359 | ||
360 | int qib_mcast_tree_empty(struct qib_ibport *ibp) | |
361 | { | |
f24a6d48 | 362 | return !(ibp->rvp.mcast_tree.rb_node); |
f931551b | 363 | } |