[PATCH] IB: Introduce RMPP APIs
[deliverable/linux.git] / drivers / infiniband / core / verbs.c
1 /*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Cisco Systems. All rights reserved.
8 *
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
14 *
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
17 * conditions are met:
18 *
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer.
22 *
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * SOFTWARE.
36 *
37 * $Id: verbs.c 1349 2004-12-16 21:09:43Z roland $
38 */
39
40 #include <linux/errno.h>
41 #include <linux/err.h>
42
43 #include <ib_verbs.h>
44 #include <ib_cache.h>
45
46 /* Protection domains */
47
48 struct ib_pd *ib_alloc_pd(struct ib_device *device)
49 {
50 struct ib_pd *pd;
51
52 pd = device->alloc_pd(device, NULL, NULL);
53
54 if (!IS_ERR(pd)) {
55 pd->device = device;
56 pd->uobject = NULL;
57 atomic_set(&pd->usecnt, 0);
58 }
59
60 return pd;
61 }
62 EXPORT_SYMBOL(ib_alloc_pd);
63
64 int ib_dealloc_pd(struct ib_pd *pd)
65 {
66 if (atomic_read(&pd->usecnt))
67 return -EBUSY;
68
69 return pd->device->dealloc_pd(pd);
70 }
71 EXPORT_SYMBOL(ib_dealloc_pd);
72
73 /* Address handles */
74
75 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
76 {
77 struct ib_ah *ah;
78
79 ah = pd->device->create_ah(pd, ah_attr);
80
81 if (!IS_ERR(ah)) {
82 ah->device = pd->device;
83 ah->pd = pd;
84 ah->uobject = NULL;
85 atomic_inc(&pd->usecnt);
86 }
87
88 return ah;
89 }
90 EXPORT_SYMBOL(ib_create_ah);
91
92 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
93 struct ib_grh *grh, u8 port_num)
94 {
95 struct ib_ah_attr ah_attr;
96 u32 flow_class;
97 u16 gid_index;
98 int ret;
99
100 memset(&ah_attr, 0, sizeof ah_attr);
101 ah_attr.dlid = wc->slid;
102 ah_attr.sl = wc->sl;
103 ah_attr.src_path_bits = wc->dlid_path_bits;
104 ah_attr.port_num = port_num;
105
106 if (wc->wc_flags & IB_WC_GRH) {
107 ah_attr.ah_flags = IB_AH_GRH;
108 ah_attr.grh.dgid = grh->dgid;
109
110 ret = ib_find_cached_gid(pd->device, &grh->sgid, &port_num,
111 &gid_index);
112 if (ret)
113 return ERR_PTR(ret);
114
115 ah_attr.grh.sgid_index = (u8) gid_index;
116 flow_class = be32_to_cpu(grh->version_tclass_flow);
117 ah_attr.grh.flow_label = flow_class & 0xFFFFF;
118 ah_attr.grh.traffic_class = (flow_class >> 20) & 0xFF;
119 ah_attr.grh.hop_limit = grh->hop_limit;
120 }
121
122 return ib_create_ah(pd, &ah_attr);
123 }
124 EXPORT_SYMBOL(ib_create_ah_from_wc);
125
126 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
127 {
128 return ah->device->modify_ah ?
129 ah->device->modify_ah(ah, ah_attr) :
130 -ENOSYS;
131 }
132 EXPORT_SYMBOL(ib_modify_ah);
133
134 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
135 {
136 return ah->device->query_ah ?
137 ah->device->query_ah(ah, ah_attr) :
138 -ENOSYS;
139 }
140 EXPORT_SYMBOL(ib_query_ah);
141
142 int ib_destroy_ah(struct ib_ah *ah)
143 {
144 struct ib_pd *pd;
145 int ret;
146
147 pd = ah->pd;
148 ret = ah->device->destroy_ah(ah);
149 if (!ret)
150 atomic_dec(&pd->usecnt);
151
152 return ret;
153 }
154 EXPORT_SYMBOL(ib_destroy_ah);
155
156 /* Queue pairs */
157
158 struct ib_qp *ib_create_qp(struct ib_pd *pd,
159 struct ib_qp_init_attr *qp_init_attr)
160 {
161 struct ib_qp *qp;
162
163 qp = pd->device->create_qp(pd, qp_init_attr, NULL);
164
165 if (!IS_ERR(qp)) {
166 qp->device = pd->device;
167 qp->pd = pd;
168 qp->send_cq = qp_init_attr->send_cq;
169 qp->recv_cq = qp_init_attr->recv_cq;
170 qp->srq = qp_init_attr->srq;
171 qp->uobject = NULL;
172 qp->event_handler = qp_init_attr->event_handler;
173 qp->qp_context = qp_init_attr->qp_context;
174 qp->qp_type = qp_init_attr->qp_type;
175 atomic_inc(&pd->usecnt);
176 atomic_inc(&qp_init_attr->send_cq->usecnt);
177 atomic_inc(&qp_init_attr->recv_cq->usecnt);
178 if (qp_init_attr->srq)
179 atomic_inc(&qp_init_attr->srq->usecnt);
180 }
181
182 return qp;
183 }
184 EXPORT_SYMBOL(ib_create_qp);
185
186 int ib_modify_qp(struct ib_qp *qp,
187 struct ib_qp_attr *qp_attr,
188 int qp_attr_mask)
189 {
190 return qp->device->modify_qp(qp, qp_attr, qp_attr_mask);
191 }
192 EXPORT_SYMBOL(ib_modify_qp);
193
194 int ib_query_qp(struct ib_qp *qp,
195 struct ib_qp_attr *qp_attr,
196 int qp_attr_mask,
197 struct ib_qp_init_attr *qp_init_attr)
198 {
199 return qp->device->query_qp ?
200 qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) :
201 -ENOSYS;
202 }
203 EXPORT_SYMBOL(ib_query_qp);
204
205 int ib_destroy_qp(struct ib_qp *qp)
206 {
207 struct ib_pd *pd;
208 struct ib_cq *scq, *rcq;
209 struct ib_srq *srq;
210 int ret;
211
212 pd = qp->pd;
213 scq = qp->send_cq;
214 rcq = qp->recv_cq;
215 srq = qp->srq;
216
217 ret = qp->device->destroy_qp(qp);
218 if (!ret) {
219 atomic_dec(&pd->usecnt);
220 atomic_dec(&scq->usecnt);
221 atomic_dec(&rcq->usecnt);
222 if (srq)
223 atomic_dec(&srq->usecnt);
224 }
225
226 return ret;
227 }
228 EXPORT_SYMBOL(ib_destroy_qp);
229
230 /* Completion queues */
231
232 struct ib_cq *ib_create_cq(struct ib_device *device,
233 ib_comp_handler comp_handler,
234 void (*event_handler)(struct ib_event *, void *),
235 void *cq_context, int cqe)
236 {
237 struct ib_cq *cq;
238
239 cq = device->create_cq(device, cqe, NULL, NULL);
240
241 if (!IS_ERR(cq)) {
242 cq->device = device;
243 cq->uobject = NULL;
244 cq->comp_handler = comp_handler;
245 cq->event_handler = event_handler;
246 cq->cq_context = cq_context;
247 atomic_set(&cq->usecnt, 0);
248 }
249
250 return cq;
251 }
252 EXPORT_SYMBOL(ib_create_cq);
253
254 int ib_destroy_cq(struct ib_cq *cq)
255 {
256 if (atomic_read(&cq->usecnt))
257 return -EBUSY;
258
259 return cq->device->destroy_cq(cq);
260 }
261 EXPORT_SYMBOL(ib_destroy_cq);
262
263 int ib_resize_cq(struct ib_cq *cq,
264 int cqe)
265 {
266 int ret;
267
268 if (!cq->device->resize_cq)
269 return -ENOSYS;
270
271 ret = cq->device->resize_cq(cq, &cqe);
272 if (!ret)
273 cq->cqe = cqe;
274
275 return ret;
276 }
277 EXPORT_SYMBOL(ib_resize_cq);
278
279 /* Memory regions */
280
281 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
282 {
283 struct ib_mr *mr;
284
285 mr = pd->device->get_dma_mr(pd, mr_access_flags);
286
287 if (!IS_ERR(mr)) {
288 mr->device = pd->device;
289 mr->pd = pd;
290 mr->uobject = NULL;
291 atomic_inc(&pd->usecnt);
292 atomic_set(&mr->usecnt, 0);
293 }
294
295 return mr;
296 }
297 EXPORT_SYMBOL(ib_get_dma_mr);
298
299 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
300 struct ib_phys_buf *phys_buf_array,
301 int num_phys_buf,
302 int mr_access_flags,
303 u64 *iova_start)
304 {
305 struct ib_mr *mr;
306
307 mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
308 mr_access_flags, iova_start);
309
310 if (!IS_ERR(mr)) {
311 mr->device = pd->device;
312 mr->pd = pd;
313 mr->uobject = NULL;
314 atomic_inc(&pd->usecnt);
315 atomic_set(&mr->usecnt, 0);
316 }
317
318 return mr;
319 }
320 EXPORT_SYMBOL(ib_reg_phys_mr);
321
322 int ib_rereg_phys_mr(struct ib_mr *mr,
323 int mr_rereg_mask,
324 struct ib_pd *pd,
325 struct ib_phys_buf *phys_buf_array,
326 int num_phys_buf,
327 int mr_access_flags,
328 u64 *iova_start)
329 {
330 struct ib_pd *old_pd;
331 int ret;
332
333 if (!mr->device->rereg_phys_mr)
334 return -ENOSYS;
335
336 if (atomic_read(&mr->usecnt))
337 return -EBUSY;
338
339 old_pd = mr->pd;
340
341 ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
342 phys_buf_array, num_phys_buf,
343 mr_access_flags, iova_start);
344
345 if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
346 atomic_dec(&old_pd->usecnt);
347 atomic_inc(&pd->usecnt);
348 }
349
350 return ret;
351 }
352 EXPORT_SYMBOL(ib_rereg_phys_mr);
353
354 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
355 {
356 return mr->device->query_mr ?
357 mr->device->query_mr(mr, mr_attr) : -ENOSYS;
358 }
359 EXPORT_SYMBOL(ib_query_mr);
360
361 int ib_dereg_mr(struct ib_mr *mr)
362 {
363 struct ib_pd *pd;
364 int ret;
365
366 if (atomic_read(&mr->usecnt))
367 return -EBUSY;
368
369 pd = mr->pd;
370 ret = mr->device->dereg_mr(mr);
371 if (!ret)
372 atomic_dec(&pd->usecnt);
373
374 return ret;
375 }
376 EXPORT_SYMBOL(ib_dereg_mr);
377
378 /* Memory windows */
379
380 struct ib_mw *ib_alloc_mw(struct ib_pd *pd)
381 {
382 struct ib_mw *mw;
383
384 if (!pd->device->alloc_mw)
385 return ERR_PTR(-ENOSYS);
386
387 mw = pd->device->alloc_mw(pd);
388 if (!IS_ERR(mw)) {
389 mw->device = pd->device;
390 mw->pd = pd;
391 mw->uobject = NULL;
392 atomic_inc(&pd->usecnt);
393 }
394
395 return mw;
396 }
397 EXPORT_SYMBOL(ib_alloc_mw);
398
399 int ib_dealloc_mw(struct ib_mw *mw)
400 {
401 struct ib_pd *pd;
402 int ret;
403
404 pd = mw->pd;
405 ret = mw->device->dealloc_mw(mw);
406 if (!ret)
407 atomic_dec(&pd->usecnt);
408
409 return ret;
410 }
411 EXPORT_SYMBOL(ib_dealloc_mw);
412
413 /* "Fast" memory regions */
414
415 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
416 int mr_access_flags,
417 struct ib_fmr_attr *fmr_attr)
418 {
419 struct ib_fmr *fmr;
420
421 if (!pd->device->alloc_fmr)
422 return ERR_PTR(-ENOSYS);
423
424 fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
425 if (!IS_ERR(fmr)) {
426 fmr->device = pd->device;
427 fmr->pd = pd;
428 atomic_inc(&pd->usecnt);
429 }
430
431 return fmr;
432 }
433 EXPORT_SYMBOL(ib_alloc_fmr);
434
435 int ib_unmap_fmr(struct list_head *fmr_list)
436 {
437 struct ib_fmr *fmr;
438
439 if (list_empty(fmr_list))
440 return 0;
441
442 fmr = list_entry(fmr_list->next, struct ib_fmr, list);
443 return fmr->device->unmap_fmr(fmr_list);
444 }
445 EXPORT_SYMBOL(ib_unmap_fmr);
446
447 int ib_dealloc_fmr(struct ib_fmr *fmr)
448 {
449 struct ib_pd *pd;
450 int ret;
451
452 pd = fmr->pd;
453 ret = fmr->device->dealloc_fmr(fmr);
454 if (!ret)
455 atomic_dec(&pd->usecnt);
456
457 return ret;
458 }
459 EXPORT_SYMBOL(ib_dealloc_fmr);
460
461 /* Multicast groups */
462
463 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
464 {
465 return qp->device->attach_mcast ?
466 qp->device->attach_mcast(qp, gid, lid) :
467 -ENOSYS;
468 }
469 EXPORT_SYMBOL(ib_attach_mcast);
470
471 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
472 {
473 return qp->device->detach_mcast ?
474 qp->device->detach_mcast(qp, gid, lid) :
475 -ENOSYS;
476 }
477 EXPORT_SYMBOL(ib_detach_mcast);
This page took 0.042987 seconds and 5 git commands to generate.