2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/file.h>
38 #include <linux/slab.h>
40 #include <asm/uaccess.h>
44 struct uverbs_lock_class
{
45 struct lock_class_key key
;
49 static struct uverbs_lock_class pd_lock_class
= { .name
= "PD-uobj" };
50 static struct uverbs_lock_class mr_lock_class
= { .name
= "MR-uobj" };
51 static struct uverbs_lock_class mw_lock_class
= { .name
= "MW-uobj" };
52 static struct uverbs_lock_class cq_lock_class
= { .name
= "CQ-uobj" };
53 static struct uverbs_lock_class qp_lock_class
= { .name
= "QP-uobj" };
54 static struct uverbs_lock_class ah_lock_class
= { .name
= "AH-uobj" };
55 static struct uverbs_lock_class srq_lock_class
= { .name
= "SRQ-uobj" };
56 static struct uverbs_lock_class xrcd_lock_class
= { .name
= "XRCD-uobj" };
57 static struct uverbs_lock_class rule_lock_class
= { .name
= "RULE-uobj" };
60 * The ib_uobject locking scheme is as follows:
62 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it
63 * needs to be held during all idr operations. When an object is
64 * looked up, a reference must be taken on the object's kref before
67 * - Each object also has an rwsem. This rwsem must be held for
68 * reading while an operation that uses the object is performed.
69 * For example, while registering an MR, the associated PD's
70 * uobject.mutex must be held for reading. The rwsem must be held
71 * for writing while initializing or destroying an object.
73 * - In addition, each object has a "live" flag. If this flag is not
74 * set, then lookups of the object will fail even if it is found in
75 * the idr. This handles a reader that blocks and does not acquire
76 * the rwsem until after the object is destroyed. The destroy
77 * operation will set the live flag to 0 and then drop the rwsem;
78 * this will allow the reader to acquire the rwsem, see that the
79 * live flag is 0, and then drop the rwsem and its reference to
80 * object. The underlying storage will not be freed until the last
81 * reference to the object is dropped.
84 static void init_uobj(struct ib_uobject
*uobj
, u64 user_handle
,
85 struct ib_ucontext
*context
, struct uverbs_lock_class
*c
)
87 uobj
->user_handle
= user_handle
;
88 uobj
->context
= context
;
89 kref_init(&uobj
->ref
);
90 init_rwsem(&uobj
->mutex
);
91 lockdep_set_class_and_name(&uobj
->mutex
, &c
->key
, c
->name
);
95 static void release_uobj(struct kref
*kref
)
97 kfree(container_of(kref
, struct ib_uobject
, ref
));
100 static void put_uobj(struct ib_uobject
*uobj
)
102 kref_put(&uobj
->ref
, release_uobj
);
105 static void put_uobj_read(struct ib_uobject
*uobj
)
107 up_read(&uobj
->mutex
);
111 static void put_uobj_write(struct ib_uobject
*uobj
)
113 up_write(&uobj
->mutex
);
117 static int idr_add_uobj(struct idr
*idr
, struct ib_uobject
*uobj
)
121 idr_preload(GFP_KERNEL
);
122 spin_lock(&ib_uverbs_idr_lock
);
124 ret
= idr_alloc(idr
, uobj
, 0, 0, GFP_NOWAIT
);
128 spin_unlock(&ib_uverbs_idr_lock
);
131 return ret
< 0 ? ret
: 0;
134 void idr_remove_uobj(struct idr
*idr
, struct ib_uobject
*uobj
)
136 spin_lock(&ib_uverbs_idr_lock
);
137 idr_remove(idr
, uobj
->id
);
138 spin_unlock(&ib_uverbs_idr_lock
);
141 static struct ib_uobject
*__idr_get_uobj(struct idr
*idr
, int id
,
142 struct ib_ucontext
*context
)
144 struct ib_uobject
*uobj
;
146 spin_lock(&ib_uverbs_idr_lock
);
147 uobj
= idr_find(idr
, id
);
149 if (uobj
->context
== context
)
150 kref_get(&uobj
->ref
);
154 spin_unlock(&ib_uverbs_idr_lock
);
159 static struct ib_uobject
*idr_read_uobj(struct idr
*idr
, int id
,
160 struct ib_ucontext
*context
, int nested
)
162 struct ib_uobject
*uobj
;
164 uobj
= __idr_get_uobj(idr
, id
, context
);
169 down_read_nested(&uobj
->mutex
, SINGLE_DEPTH_NESTING
);
171 down_read(&uobj
->mutex
);
180 static struct ib_uobject
*idr_write_uobj(struct idr
*idr
, int id
,
181 struct ib_ucontext
*context
)
183 struct ib_uobject
*uobj
;
185 uobj
= __idr_get_uobj(idr
, id
, context
);
189 down_write(&uobj
->mutex
);
191 put_uobj_write(uobj
);
198 static void *idr_read_obj(struct idr
*idr
, int id
, struct ib_ucontext
*context
,
201 struct ib_uobject
*uobj
;
203 uobj
= idr_read_uobj(idr
, id
, context
, nested
);
204 return uobj
? uobj
->object
: NULL
;
207 static struct ib_pd
*idr_read_pd(int pd_handle
, struct ib_ucontext
*context
)
209 return idr_read_obj(&ib_uverbs_pd_idr
, pd_handle
, context
, 0);
212 static void put_pd_read(struct ib_pd
*pd
)
214 put_uobj_read(pd
->uobject
);
217 static struct ib_cq
*idr_read_cq(int cq_handle
, struct ib_ucontext
*context
, int nested
)
219 return idr_read_obj(&ib_uverbs_cq_idr
, cq_handle
, context
, nested
);
222 static void put_cq_read(struct ib_cq
*cq
)
224 put_uobj_read(cq
->uobject
);
227 static struct ib_ah
*idr_read_ah(int ah_handle
, struct ib_ucontext
*context
)
229 return idr_read_obj(&ib_uverbs_ah_idr
, ah_handle
, context
, 0);
232 static void put_ah_read(struct ib_ah
*ah
)
234 put_uobj_read(ah
->uobject
);
237 static struct ib_qp
*idr_read_qp(int qp_handle
, struct ib_ucontext
*context
)
239 return idr_read_obj(&ib_uverbs_qp_idr
, qp_handle
, context
, 0);
242 static struct ib_qp
*idr_write_qp(int qp_handle
, struct ib_ucontext
*context
)
244 struct ib_uobject
*uobj
;
246 uobj
= idr_write_uobj(&ib_uverbs_qp_idr
, qp_handle
, context
);
247 return uobj
? uobj
->object
: NULL
;
250 static void put_qp_read(struct ib_qp
*qp
)
252 put_uobj_read(qp
->uobject
);
255 static void put_qp_write(struct ib_qp
*qp
)
257 put_uobj_write(qp
->uobject
);
260 static struct ib_srq
*idr_read_srq(int srq_handle
, struct ib_ucontext
*context
)
262 return idr_read_obj(&ib_uverbs_srq_idr
, srq_handle
, context
, 0);
265 static void put_srq_read(struct ib_srq
*srq
)
267 put_uobj_read(srq
->uobject
);
270 static struct ib_xrcd
*idr_read_xrcd(int xrcd_handle
, struct ib_ucontext
*context
,
271 struct ib_uobject
**uobj
)
273 *uobj
= idr_read_uobj(&ib_uverbs_xrcd_idr
, xrcd_handle
, context
, 0);
274 return *uobj
? (*uobj
)->object
: NULL
;
277 static void put_xrcd_read(struct ib_uobject
*uobj
)
282 ssize_t
ib_uverbs_get_context(struct ib_uverbs_file
*file
,
283 const char __user
*buf
,
284 int in_len
, int out_len
)
286 struct ib_uverbs_get_context cmd
;
287 struct ib_uverbs_get_context_resp resp
;
288 struct ib_udata udata
;
289 struct ib_device
*ibdev
= file
->device
->ib_dev
;
290 struct ib_ucontext
*ucontext
;
294 if (out_len
< sizeof resp
)
297 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
300 mutex_lock(&file
->mutex
);
302 if (file
->ucontext
) {
307 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
308 (unsigned long) cmd
.response
+ sizeof resp
,
309 in_len
- sizeof cmd
, out_len
- sizeof resp
);
311 ucontext
= ibdev
->alloc_ucontext(ibdev
, &udata
);
312 if (IS_ERR(ucontext
)) {
313 ret
= PTR_ERR(ucontext
);
317 ucontext
->device
= ibdev
;
318 INIT_LIST_HEAD(&ucontext
->pd_list
);
319 INIT_LIST_HEAD(&ucontext
->mr_list
);
320 INIT_LIST_HEAD(&ucontext
->mw_list
);
321 INIT_LIST_HEAD(&ucontext
->cq_list
);
322 INIT_LIST_HEAD(&ucontext
->qp_list
);
323 INIT_LIST_HEAD(&ucontext
->srq_list
);
324 INIT_LIST_HEAD(&ucontext
->ah_list
);
325 INIT_LIST_HEAD(&ucontext
->xrcd_list
);
326 INIT_LIST_HEAD(&ucontext
->rule_list
);
327 ucontext
->closing
= 0;
329 resp
.num_comp_vectors
= file
->device
->num_comp_vectors
;
331 ret
= get_unused_fd_flags(O_CLOEXEC
);
336 filp
= ib_uverbs_alloc_event_file(file
, 1);
342 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
343 &resp
, sizeof resp
)) {
348 file
->async_file
= filp
->private_data
;
350 INIT_IB_EVENT_HANDLER(&file
->event_handler
, file
->device
->ib_dev
,
351 ib_uverbs_event_handler
);
352 ret
= ib_register_event_handler(&file
->event_handler
);
356 kref_get(&file
->async_file
->ref
);
357 kref_get(&file
->ref
);
358 file
->ucontext
= ucontext
;
360 fd_install(resp
.async_fd
, filp
);
362 mutex_unlock(&file
->mutex
);
370 put_unused_fd(resp
.async_fd
);
373 ibdev
->dealloc_ucontext(ucontext
);
376 mutex_unlock(&file
->mutex
);
380 ssize_t
ib_uverbs_query_device(struct ib_uverbs_file
*file
,
381 const char __user
*buf
,
382 int in_len
, int out_len
)
384 struct ib_uverbs_query_device cmd
;
385 struct ib_uverbs_query_device_resp resp
;
386 struct ib_device_attr attr
;
389 if (out_len
< sizeof resp
)
392 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
395 ret
= ib_query_device(file
->device
->ib_dev
, &attr
);
399 memset(&resp
, 0, sizeof resp
);
401 resp
.fw_ver
= attr
.fw_ver
;
402 resp
.node_guid
= file
->device
->ib_dev
->node_guid
;
403 resp
.sys_image_guid
= attr
.sys_image_guid
;
404 resp
.max_mr_size
= attr
.max_mr_size
;
405 resp
.page_size_cap
= attr
.page_size_cap
;
406 resp
.vendor_id
= attr
.vendor_id
;
407 resp
.vendor_part_id
= attr
.vendor_part_id
;
408 resp
.hw_ver
= attr
.hw_ver
;
409 resp
.max_qp
= attr
.max_qp
;
410 resp
.max_qp_wr
= attr
.max_qp_wr
;
411 resp
.device_cap_flags
= attr
.device_cap_flags
;
412 resp
.max_sge
= attr
.max_sge
;
413 resp
.max_sge_rd
= attr
.max_sge_rd
;
414 resp
.max_cq
= attr
.max_cq
;
415 resp
.max_cqe
= attr
.max_cqe
;
416 resp
.max_mr
= attr
.max_mr
;
417 resp
.max_pd
= attr
.max_pd
;
418 resp
.max_qp_rd_atom
= attr
.max_qp_rd_atom
;
419 resp
.max_ee_rd_atom
= attr
.max_ee_rd_atom
;
420 resp
.max_res_rd_atom
= attr
.max_res_rd_atom
;
421 resp
.max_qp_init_rd_atom
= attr
.max_qp_init_rd_atom
;
422 resp
.max_ee_init_rd_atom
= attr
.max_ee_init_rd_atom
;
423 resp
.atomic_cap
= attr
.atomic_cap
;
424 resp
.max_ee
= attr
.max_ee
;
425 resp
.max_rdd
= attr
.max_rdd
;
426 resp
.max_mw
= attr
.max_mw
;
427 resp
.max_raw_ipv6_qp
= attr
.max_raw_ipv6_qp
;
428 resp
.max_raw_ethy_qp
= attr
.max_raw_ethy_qp
;
429 resp
.max_mcast_grp
= attr
.max_mcast_grp
;
430 resp
.max_mcast_qp_attach
= attr
.max_mcast_qp_attach
;
431 resp
.max_total_mcast_qp_attach
= attr
.max_total_mcast_qp_attach
;
432 resp
.max_ah
= attr
.max_ah
;
433 resp
.max_fmr
= attr
.max_fmr
;
434 resp
.max_map_per_fmr
= attr
.max_map_per_fmr
;
435 resp
.max_srq
= attr
.max_srq
;
436 resp
.max_srq_wr
= attr
.max_srq_wr
;
437 resp
.max_srq_sge
= attr
.max_srq_sge
;
438 resp
.max_pkeys
= attr
.max_pkeys
;
439 resp
.local_ca_ack_delay
= attr
.local_ca_ack_delay
;
440 resp
.phys_port_cnt
= file
->device
->ib_dev
->phys_port_cnt
;
442 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
449 ssize_t
ib_uverbs_query_port(struct ib_uverbs_file
*file
,
450 const char __user
*buf
,
451 int in_len
, int out_len
)
453 struct ib_uverbs_query_port cmd
;
454 struct ib_uverbs_query_port_resp resp
;
455 struct ib_port_attr attr
;
458 if (out_len
< sizeof resp
)
461 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
464 ret
= ib_query_port(file
->device
->ib_dev
, cmd
.port_num
, &attr
);
468 memset(&resp
, 0, sizeof resp
);
470 resp
.state
= attr
.state
;
471 resp
.max_mtu
= attr
.max_mtu
;
472 resp
.active_mtu
= attr
.active_mtu
;
473 resp
.gid_tbl_len
= attr
.gid_tbl_len
;
474 resp
.port_cap_flags
= attr
.port_cap_flags
;
475 resp
.max_msg_sz
= attr
.max_msg_sz
;
476 resp
.bad_pkey_cntr
= attr
.bad_pkey_cntr
;
477 resp
.qkey_viol_cntr
= attr
.qkey_viol_cntr
;
478 resp
.pkey_tbl_len
= attr
.pkey_tbl_len
;
480 resp
.sm_lid
= attr
.sm_lid
;
482 resp
.max_vl_num
= attr
.max_vl_num
;
483 resp
.sm_sl
= attr
.sm_sl
;
484 resp
.subnet_timeout
= attr
.subnet_timeout
;
485 resp
.init_type_reply
= attr
.init_type_reply
;
486 resp
.active_width
= attr
.active_width
;
487 resp
.active_speed
= attr
.active_speed
;
488 resp
.phys_state
= attr
.phys_state
;
489 resp
.link_layer
= rdma_port_get_link_layer(file
->device
->ib_dev
,
492 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
499 ssize_t
ib_uverbs_alloc_pd(struct ib_uverbs_file
*file
,
500 const char __user
*buf
,
501 int in_len
, int out_len
)
503 struct ib_uverbs_alloc_pd cmd
;
504 struct ib_uverbs_alloc_pd_resp resp
;
505 struct ib_udata udata
;
506 struct ib_uobject
*uobj
;
510 if (out_len
< sizeof resp
)
513 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
516 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
517 (unsigned long) cmd
.response
+ sizeof resp
,
518 in_len
- sizeof cmd
, out_len
- sizeof resp
);
520 uobj
= kmalloc(sizeof *uobj
, GFP_KERNEL
);
524 init_uobj(uobj
, 0, file
->ucontext
, &pd_lock_class
);
525 down_write(&uobj
->mutex
);
527 pd
= file
->device
->ib_dev
->alloc_pd(file
->device
->ib_dev
,
528 file
->ucontext
, &udata
);
534 pd
->device
= file
->device
->ib_dev
;
536 atomic_set(&pd
->usecnt
, 0);
539 ret
= idr_add_uobj(&ib_uverbs_pd_idr
, uobj
);
543 memset(&resp
, 0, sizeof resp
);
544 resp
.pd_handle
= uobj
->id
;
546 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
547 &resp
, sizeof resp
)) {
552 mutex_lock(&file
->mutex
);
553 list_add_tail(&uobj
->list
, &file
->ucontext
->pd_list
);
554 mutex_unlock(&file
->mutex
);
558 up_write(&uobj
->mutex
);
563 idr_remove_uobj(&ib_uverbs_pd_idr
, uobj
);
569 put_uobj_write(uobj
);
573 ssize_t
ib_uverbs_dealloc_pd(struct ib_uverbs_file
*file
,
574 const char __user
*buf
,
575 int in_len
, int out_len
)
577 struct ib_uverbs_dealloc_pd cmd
;
578 struct ib_uobject
*uobj
;
581 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
584 uobj
= idr_write_uobj(&ib_uverbs_pd_idr
, cmd
.pd_handle
, file
->ucontext
);
588 ret
= ib_dealloc_pd(uobj
->object
);
592 put_uobj_write(uobj
);
597 idr_remove_uobj(&ib_uverbs_pd_idr
, uobj
);
599 mutex_lock(&file
->mutex
);
600 list_del(&uobj
->list
);
601 mutex_unlock(&file
->mutex
);
608 struct xrcd_table_entry
{
610 struct ib_xrcd
*xrcd
;
614 static int xrcd_table_insert(struct ib_uverbs_device
*dev
,
616 struct ib_xrcd
*xrcd
)
618 struct xrcd_table_entry
*entry
, *scan
;
619 struct rb_node
**p
= &dev
->xrcd_tree
.rb_node
;
620 struct rb_node
*parent
= NULL
;
622 entry
= kmalloc(sizeof *entry
, GFP_KERNEL
);
627 entry
->inode
= inode
;
631 scan
= rb_entry(parent
, struct xrcd_table_entry
, node
);
633 if (inode
< scan
->inode
) {
635 } else if (inode
> scan
->inode
) {
643 rb_link_node(&entry
->node
, parent
, p
);
644 rb_insert_color(&entry
->node
, &dev
->xrcd_tree
);
649 static struct xrcd_table_entry
*xrcd_table_search(struct ib_uverbs_device
*dev
,
652 struct xrcd_table_entry
*entry
;
653 struct rb_node
*p
= dev
->xrcd_tree
.rb_node
;
656 entry
= rb_entry(p
, struct xrcd_table_entry
, node
);
658 if (inode
< entry
->inode
)
660 else if (inode
> entry
->inode
)
669 static struct ib_xrcd
*find_xrcd(struct ib_uverbs_device
*dev
, struct inode
*inode
)
671 struct xrcd_table_entry
*entry
;
673 entry
= xrcd_table_search(dev
, inode
);
680 static void xrcd_table_delete(struct ib_uverbs_device
*dev
,
683 struct xrcd_table_entry
*entry
;
685 entry
= xrcd_table_search(dev
, inode
);
688 rb_erase(&entry
->node
, &dev
->xrcd_tree
);
693 ssize_t
ib_uverbs_open_xrcd(struct ib_uverbs_file
*file
,
694 const char __user
*buf
, int in_len
,
697 struct ib_uverbs_open_xrcd cmd
;
698 struct ib_uverbs_open_xrcd_resp resp
;
699 struct ib_udata udata
;
700 struct ib_uxrcd_object
*obj
;
701 struct ib_xrcd
*xrcd
= NULL
;
702 struct fd f
= {NULL
, 0};
703 struct inode
*inode
= NULL
;
707 if (out_len
< sizeof resp
)
710 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
713 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
714 (unsigned long) cmd
.response
+ sizeof resp
,
715 in_len
- sizeof cmd
, out_len
- sizeof resp
);
717 mutex_lock(&file
->device
->xrcd_tree_mutex
);
720 /* search for file descriptor */
724 goto err_tree_mutex_unlock
;
727 inode
= file_inode(f
.file
);
728 xrcd
= find_xrcd(file
->device
, inode
);
729 if (!xrcd
&& !(cmd
.oflags
& O_CREAT
)) {
730 /* no file descriptor. Need CREATE flag */
732 goto err_tree_mutex_unlock
;
735 if (xrcd
&& cmd
.oflags
& O_EXCL
) {
737 goto err_tree_mutex_unlock
;
741 obj
= kmalloc(sizeof *obj
, GFP_KERNEL
);
744 goto err_tree_mutex_unlock
;
747 init_uobj(&obj
->uobject
, 0, file
->ucontext
, &xrcd_lock_class
);
749 down_write(&obj
->uobject
.mutex
);
752 xrcd
= file
->device
->ib_dev
->alloc_xrcd(file
->device
->ib_dev
,
753 file
->ucontext
, &udata
);
760 xrcd
->device
= file
->device
->ib_dev
;
761 atomic_set(&xrcd
->usecnt
, 0);
762 mutex_init(&xrcd
->tgt_qp_mutex
);
763 INIT_LIST_HEAD(&xrcd
->tgt_qp_list
);
767 atomic_set(&obj
->refcnt
, 0);
768 obj
->uobject
.object
= xrcd
;
769 ret
= idr_add_uobj(&ib_uverbs_xrcd_idr
, &obj
->uobject
);
773 memset(&resp
, 0, sizeof resp
);
774 resp
.xrcd_handle
= obj
->uobject
.id
;
778 /* create new inode/xrcd table entry */
779 ret
= xrcd_table_insert(file
->device
, inode
, xrcd
);
781 goto err_insert_xrcd
;
783 atomic_inc(&xrcd
->usecnt
);
786 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
787 &resp
, sizeof resp
)) {
795 mutex_lock(&file
->mutex
);
796 list_add_tail(&obj
->uobject
.list
, &file
->ucontext
->xrcd_list
);
797 mutex_unlock(&file
->mutex
);
799 obj
->uobject
.live
= 1;
800 up_write(&obj
->uobject
.mutex
);
802 mutex_unlock(&file
->device
->xrcd_tree_mutex
);
808 xrcd_table_delete(file
->device
, inode
);
809 atomic_dec(&xrcd
->usecnt
);
813 idr_remove_uobj(&ib_uverbs_xrcd_idr
, &obj
->uobject
);
816 ib_dealloc_xrcd(xrcd
);
819 put_uobj_write(&obj
->uobject
);
821 err_tree_mutex_unlock
:
825 mutex_unlock(&file
->device
->xrcd_tree_mutex
);
830 ssize_t
ib_uverbs_close_xrcd(struct ib_uverbs_file
*file
,
831 const char __user
*buf
, int in_len
,
834 struct ib_uverbs_close_xrcd cmd
;
835 struct ib_uobject
*uobj
;
836 struct ib_xrcd
*xrcd
= NULL
;
837 struct inode
*inode
= NULL
;
838 struct ib_uxrcd_object
*obj
;
842 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
845 mutex_lock(&file
->device
->xrcd_tree_mutex
);
846 uobj
= idr_write_uobj(&ib_uverbs_xrcd_idr
, cmd
.xrcd_handle
, file
->ucontext
);
854 obj
= container_of(uobj
, struct ib_uxrcd_object
, uobject
);
855 if (atomic_read(&obj
->refcnt
)) {
856 put_uobj_write(uobj
);
861 if (!inode
|| atomic_dec_and_test(&xrcd
->usecnt
)) {
862 ret
= ib_dealloc_xrcd(uobj
->object
);
869 atomic_inc(&xrcd
->usecnt
);
871 put_uobj_write(uobj
);
877 xrcd_table_delete(file
->device
, inode
);
879 idr_remove_uobj(&ib_uverbs_xrcd_idr
, uobj
);
880 mutex_lock(&file
->mutex
);
881 list_del(&uobj
->list
);
882 mutex_unlock(&file
->mutex
);
888 mutex_unlock(&file
->device
->xrcd_tree_mutex
);
892 void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device
*dev
,
893 struct ib_xrcd
*xrcd
)
898 if (inode
&& !atomic_dec_and_test(&xrcd
->usecnt
))
901 ib_dealloc_xrcd(xrcd
);
904 xrcd_table_delete(dev
, inode
);
907 ssize_t
ib_uverbs_reg_mr(struct ib_uverbs_file
*file
,
908 const char __user
*buf
, int in_len
,
911 struct ib_uverbs_reg_mr cmd
;
912 struct ib_uverbs_reg_mr_resp resp
;
913 struct ib_udata udata
;
914 struct ib_uobject
*uobj
;
919 if (out_len
< sizeof resp
)
922 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
925 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
926 (unsigned long) cmd
.response
+ sizeof resp
,
927 in_len
- sizeof cmd
, out_len
- sizeof resp
);
929 if ((cmd
.start
& ~PAGE_MASK
) != (cmd
.hca_va
& ~PAGE_MASK
))
932 ret
= ib_check_mr_access(cmd
.access_flags
);
936 uobj
= kmalloc(sizeof *uobj
, GFP_KERNEL
);
940 init_uobj(uobj
, 0, file
->ucontext
, &mr_lock_class
);
941 down_write(&uobj
->mutex
);
943 pd
= idr_read_pd(cmd
.pd_handle
, file
->ucontext
);
949 mr
= pd
->device
->reg_user_mr(pd
, cmd
.start
, cmd
.length
, cmd
.hca_va
,
950 cmd
.access_flags
, &udata
);
956 mr
->device
= pd
->device
;
959 atomic_inc(&pd
->usecnt
);
960 atomic_set(&mr
->usecnt
, 0);
963 ret
= idr_add_uobj(&ib_uverbs_mr_idr
, uobj
);
967 memset(&resp
, 0, sizeof resp
);
968 resp
.lkey
= mr
->lkey
;
969 resp
.rkey
= mr
->rkey
;
970 resp
.mr_handle
= uobj
->id
;
972 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
973 &resp
, sizeof resp
)) {
980 mutex_lock(&file
->mutex
);
981 list_add_tail(&uobj
->list
, &file
->ucontext
->mr_list
);
982 mutex_unlock(&file
->mutex
);
986 up_write(&uobj
->mutex
);
991 idr_remove_uobj(&ib_uverbs_mr_idr
, uobj
);
1000 put_uobj_write(uobj
);
1004 ssize_t
ib_uverbs_dereg_mr(struct ib_uverbs_file
*file
,
1005 const char __user
*buf
, int in_len
,
1008 struct ib_uverbs_dereg_mr cmd
;
1010 struct ib_uobject
*uobj
;
1013 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1016 uobj
= idr_write_uobj(&ib_uverbs_mr_idr
, cmd
.mr_handle
, file
->ucontext
);
1022 ret
= ib_dereg_mr(mr
);
1026 put_uobj_write(uobj
);
1031 idr_remove_uobj(&ib_uverbs_mr_idr
, uobj
);
1033 mutex_lock(&file
->mutex
);
1034 list_del(&uobj
->list
);
1035 mutex_unlock(&file
->mutex
);
1042 ssize_t
ib_uverbs_alloc_mw(struct ib_uverbs_file
*file
,
1043 const char __user
*buf
, int in_len
,
1046 struct ib_uverbs_alloc_mw cmd
;
1047 struct ib_uverbs_alloc_mw_resp resp
;
1048 struct ib_uobject
*uobj
;
1053 if (out_len
< sizeof(resp
))
1056 if (copy_from_user(&cmd
, buf
, sizeof(cmd
)))
1059 uobj
= kmalloc(sizeof(*uobj
), GFP_KERNEL
);
1063 init_uobj(uobj
, 0, file
->ucontext
, &mw_lock_class
);
1064 down_write(&uobj
->mutex
);
1066 pd
= idr_read_pd(cmd
.pd_handle
, file
->ucontext
);
1072 mw
= pd
->device
->alloc_mw(pd
, cmd
.mw_type
);
1078 mw
->device
= pd
->device
;
1081 atomic_inc(&pd
->usecnt
);
1084 ret
= idr_add_uobj(&ib_uverbs_mw_idr
, uobj
);
1088 memset(&resp
, 0, sizeof(resp
));
1089 resp
.rkey
= mw
->rkey
;
1090 resp
.mw_handle
= uobj
->id
;
1092 if (copy_to_user((void __user
*)(unsigned long)cmd
.response
,
1093 &resp
, sizeof(resp
))) {
1100 mutex_lock(&file
->mutex
);
1101 list_add_tail(&uobj
->list
, &file
->ucontext
->mw_list
);
1102 mutex_unlock(&file
->mutex
);
1106 up_write(&uobj
->mutex
);
1111 idr_remove_uobj(&ib_uverbs_mw_idr
, uobj
);
1120 put_uobj_write(uobj
);
1124 ssize_t
ib_uverbs_dealloc_mw(struct ib_uverbs_file
*file
,
1125 const char __user
*buf
, int in_len
,
1128 struct ib_uverbs_dealloc_mw cmd
;
1130 struct ib_uobject
*uobj
;
1133 if (copy_from_user(&cmd
, buf
, sizeof(cmd
)))
1136 uobj
= idr_write_uobj(&ib_uverbs_mw_idr
, cmd
.mw_handle
, file
->ucontext
);
1142 ret
= ib_dealloc_mw(mw
);
1146 put_uobj_write(uobj
);
1151 idr_remove_uobj(&ib_uverbs_mw_idr
, uobj
);
1153 mutex_lock(&file
->mutex
);
1154 list_del(&uobj
->list
);
1155 mutex_unlock(&file
->mutex
);
1162 ssize_t
ib_uverbs_create_comp_channel(struct ib_uverbs_file
*file
,
1163 const char __user
*buf
, int in_len
,
1166 struct ib_uverbs_create_comp_channel cmd
;
1167 struct ib_uverbs_create_comp_channel_resp resp
;
1171 if (out_len
< sizeof resp
)
1174 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1177 ret
= get_unused_fd_flags(O_CLOEXEC
);
1182 filp
= ib_uverbs_alloc_event_file(file
, 0);
1184 put_unused_fd(resp
.fd
);
1185 return PTR_ERR(filp
);
1188 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
1189 &resp
, sizeof resp
)) {
1190 put_unused_fd(resp
.fd
);
1195 fd_install(resp
.fd
, filp
);
1199 ssize_t
ib_uverbs_create_cq(struct ib_uverbs_file
*file
,
1200 const char __user
*buf
, int in_len
,
1203 struct ib_uverbs_create_cq cmd
;
1204 struct ib_uverbs_create_cq_resp resp
;
1205 struct ib_udata udata
;
1206 struct ib_ucq_object
*obj
;
1207 struct ib_uverbs_event_file
*ev_file
= NULL
;
1211 if (out_len
< sizeof resp
)
1214 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1217 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
1218 (unsigned long) cmd
.response
+ sizeof resp
,
1219 in_len
- sizeof cmd
, out_len
- sizeof resp
);
1221 if (cmd
.comp_vector
>= file
->device
->num_comp_vectors
)
1224 obj
= kmalloc(sizeof *obj
, GFP_KERNEL
);
1228 init_uobj(&obj
->uobject
, cmd
.user_handle
, file
->ucontext
, &cq_lock_class
);
1229 down_write(&obj
->uobject
.mutex
);
1231 if (cmd
.comp_channel
>= 0) {
1232 ev_file
= ib_uverbs_lookup_comp_file(cmd
.comp_channel
);
1239 obj
->uverbs_file
= file
;
1240 obj
->comp_events_reported
= 0;
1241 obj
->async_events_reported
= 0;
1242 INIT_LIST_HEAD(&obj
->comp_list
);
1243 INIT_LIST_HEAD(&obj
->async_list
);
1245 cq
= file
->device
->ib_dev
->create_cq(file
->device
->ib_dev
, cmd
.cqe
,
1247 file
->ucontext
, &udata
);
1253 cq
->device
= file
->device
->ib_dev
;
1254 cq
->uobject
= &obj
->uobject
;
1255 cq
->comp_handler
= ib_uverbs_comp_handler
;
1256 cq
->event_handler
= ib_uverbs_cq_event_handler
;
1257 cq
->cq_context
= ev_file
;
1258 atomic_set(&cq
->usecnt
, 0);
1260 obj
->uobject
.object
= cq
;
1261 ret
= idr_add_uobj(&ib_uverbs_cq_idr
, &obj
->uobject
);
1265 memset(&resp
, 0, sizeof resp
);
1266 resp
.cq_handle
= obj
->uobject
.id
;
1269 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
1270 &resp
, sizeof resp
)) {
1275 mutex_lock(&file
->mutex
);
1276 list_add_tail(&obj
->uobject
.list
, &file
->ucontext
->cq_list
);
1277 mutex_unlock(&file
->mutex
);
1279 obj
->uobject
.live
= 1;
1281 up_write(&obj
->uobject
.mutex
);
1286 idr_remove_uobj(&ib_uverbs_cq_idr
, &obj
->uobject
);
1293 ib_uverbs_release_ucq(file
, ev_file
, obj
);
1296 put_uobj_write(&obj
->uobject
);
1300 ssize_t
ib_uverbs_resize_cq(struct ib_uverbs_file
*file
,
1301 const char __user
*buf
, int in_len
,
1304 struct ib_uverbs_resize_cq cmd
;
1305 struct ib_uverbs_resize_cq_resp resp
;
1306 struct ib_udata udata
;
1310 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1313 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
1314 (unsigned long) cmd
.response
+ sizeof resp
,
1315 in_len
- sizeof cmd
, out_len
- sizeof resp
);
1317 cq
= idr_read_cq(cmd
.cq_handle
, file
->ucontext
, 0);
1321 ret
= cq
->device
->resize_cq(cq
, cmd
.cqe
, &udata
);
1327 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
1328 &resp
, sizeof resp
.cqe
))
1334 return ret
? ret
: in_len
;
1337 static int copy_wc_to_user(void __user
*dest
, struct ib_wc
*wc
)
1339 struct ib_uverbs_wc tmp
;
1341 tmp
.wr_id
= wc
->wr_id
;
1342 tmp
.status
= wc
->status
;
1343 tmp
.opcode
= wc
->opcode
;
1344 tmp
.vendor_err
= wc
->vendor_err
;
1345 tmp
.byte_len
= wc
->byte_len
;
1346 tmp
.ex
.imm_data
= (__u32 __force
) wc
->ex
.imm_data
;
1347 tmp
.qp_num
= wc
->qp
->qp_num
;
1348 tmp
.src_qp
= wc
->src_qp
;
1349 tmp
.wc_flags
= wc
->wc_flags
;
1350 tmp
.pkey_index
= wc
->pkey_index
;
1351 tmp
.slid
= wc
->slid
;
1353 tmp
.dlid_path_bits
= wc
->dlid_path_bits
;
1354 tmp
.port_num
= wc
->port_num
;
1357 if (copy_to_user(dest
, &tmp
, sizeof tmp
))
1363 ssize_t
ib_uverbs_poll_cq(struct ib_uverbs_file
*file
,
1364 const char __user
*buf
, int in_len
,
1367 struct ib_uverbs_poll_cq cmd
;
1368 struct ib_uverbs_poll_cq_resp resp
;
1369 u8 __user
*header_ptr
;
1370 u8 __user
*data_ptr
;
1375 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1378 cq
= idr_read_cq(cmd
.cq_handle
, file
->ucontext
, 0);
1382 /* we copy a struct ib_uverbs_poll_cq_resp to user space */
1383 header_ptr
= (void __user
*)(unsigned long) cmd
.response
;
1384 data_ptr
= header_ptr
+ sizeof resp
;
1386 memset(&resp
, 0, sizeof resp
);
1387 while (resp
.count
< cmd
.ne
) {
1388 ret
= ib_poll_cq(cq
, 1, &wc
);
1394 ret
= copy_wc_to_user(data_ptr
, &wc
);
1398 data_ptr
+= sizeof(struct ib_uverbs_wc
);
1402 if (copy_to_user(header_ptr
, &resp
, sizeof resp
)) {
1414 ssize_t
ib_uverbs_req_notify_cq(struct ib_uverbs_file
*file
,
1415 const char __user
*buf
, int in_len
,
1418 struct ib_uverbs_req_notify_cq cmd
;
1421 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1424 cq
= idr_read_cq(cmd
.cq_handle
, file
->ucontext
, 0);
1428 ib_req_notify_cq(cq
, cmd
.solicited_only
?
1429 IB_CQ_SOLICITED
: IB_CQ_NEXT_COMP
);
1436 ssize_t
ib_uverbs_destroy_cq(struct ib_uverbs_file
*file
,
1437 const char __user
*buf
, int in_len
,
1440 struct ib_uverbs_destroy_cq cmd
;
1441 struct ib_uverbs_destroy_cq_resp resp
;
1442 struct ib_uobject
*uobj
;
1444 struct ib_ucq_object
*obj
;
1445 struct ib_uverbs_event_file
*ev_file
;
1448 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1451 uobj
= idr_write_uobj(&ib_uverbs_cq_idr
, cmd
.cq_handle
, file
->ucontext
);
1455 ev_file
= cq
->cq_context
;
1456 obj
= container_of(cq
->uobject
, struct ib_ucq_object
, uobject
);
1458 ret
= ib_destroy_cq(cq
);
1462 put_uobj_write(uobj
);
1467 idr_remove_uobj(&ib_uverbs_cq_idr
, uobj
);
1469 mutex_lock(&file
->mutex
);
1470 list_del(&uobj
->list
);
1471 mutex_unlock(&file
->mutex
);
1473 ib_uverbs_release_ucq(file
, ev_file
, obj
);
1475 memset(&resp
, 0, sizeof resp
);
1476 resp
.comp_events_reported
= obj
->comp_events_reported
;
1477 resp
.async_events_reported
= obj
->async_events_reported
;
1481 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
1482 &resp
, sizeof resp
))
1488 ssize_t
ib_uverbs_create_qp(struct ib_uverbs_file
*file
,
1489 const char __user
*buf
, int in_len
,
1492 struct ib_uverbs_create_qp cmd
;
1493 struct ib_uverbs_create_qp_resp resp
;
1494 struct ib_udata udata
;
1495 struct ib_uqp_object
*obj
;
1496 struct ib_device
*device
;
1497 struct ib_pd
*pd
= NULL
;
1498 struct ib_xrcd
*xrcd
= NULL
;
1499 struct ib_uobject
*uninitialized_var(xrcd_uobj
);
1500 struct ib_cq
*scq
= NULL
, *rcq
= NULL
;
1501 struct ib_srq
*srq
= NULL
;
1503 struct ib_qp_init_attr attr
;
1506 if (out_len
< sizeof resp
)
1509 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1512 if (cmd
.qp_type
== IB_QPT_RAW_PACKET
&& !capable(CAP_NET_RAW
))
1515 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
1516 (unsigned long) cmd
.response
+ sizeof resp
,
1517 in_len
- sizeof cmd
, out_len
- sizeof resp
);
1519 obj
= kzalloc(sizeof *obj
, GFP_KERNEL
);
1523 init_uobj(&obj
->uevent
.uobject
, cmd
.user_handle
, file
->ucontext
, &qp_lock_class
);
1524 down_write(&obj
->uevent
.uobject
.mutex
);
1526 if (cmd
.qp_type
== IB_QPT_XRC_TGT
) {
1527 xrcd
= idr_read_xrcd(cmd
.pd_handle
, file
->ucontext
, &xrcd_uobj
);
1532 device
= xrcd
->device
;
1534 if (cmd
.qp_type
== IB_QPT_XRC_INI
) {
1535 cmd
.max_recv_wr
= cmd
.max_recv_sge
= 0;
1538 srq
= idr_read_srq(cmd
.srq_handle
, file
->ucontext
);
1539 if (!srq
|| srq
->srq_type
!= IB_SRQT_BASIC
) {
1545 if (cmd
.recv_cq_handle
!= cmd
.send_cq_handle
) {
1546 rcq
= idr_read_cq(cmd
.recv_cq_handle
, file
->ucontext
, 0);
1554 scq
= idr_read_cq(cmd
.send_cq_handle
, file
->ucontext
, !!rcq
);
1556 pd
= idr_read_pd(cmd
.pd_handle
, file
->ucontext
);
1562 device
= pd
->device
;
1565 attr
.event_handler
= ib_uverbs_qp_event_handler
;
1566 attr
.qp_context
= file
;
1571 attr
.sq_sig_type
= cmd
.sq_sig_all
? IB_SIGNAL_ALL_WR
: IB_SIGNAL_REQ_WR
;
1572 attr
.qp_type
= cmd
.qp_type
;
1573 attr
.create_flags
= 0;
1575 attr
.cap
.max_send_wr
= cmd
.max_send_wr
;
1576 attr
.cap
.max_recv_wr
= cmd
.max_recv_wr
;
1577 attr
.cap
.max_send_sge
= cmd
.max_send_sge
;
1578 attr
.cap
.max_recv_sge
= cmd
.max_recv_sge
;
1579 attr
.cap
.max_inline_data
= cmd
.max_inline_data
;
1581 obj
->uevent
.events_reported
= 0;
1582 INIT_LIST_HEAD(&obj
->uevent
.event_list
);
1583 INIT_LIST_HEAD(&obj
->mcast_list
);
1585 if (cmd
.qp_type
== IB_QPT_XRC_TGT
)
1586 qp
= ib_create_qp(pd
, &attr
);
1588 qp
= device
->create_qp(pd
, &attr
, &udata
);
1595 if (cmd
.qp_type
!= IB_QPT_XRC_TGT
) {
1597 qp
->device
= device
;
1599 qp
->send_cq
= attr
.send_cq
;
1600 qp
->recv_cq
= attr
.recv_cq
;
1602 qp
->event_handler
= attr
.event_handler
;
1603 qp
->qp_context
= attr
.qp_context
;
1604 qp
->qp_type
= attr
.qp_type
;
1605 atomic_set(&qp
->usecnt
, 0);
1606 atomic_inc(&pd
->usecnt
);
1607 atomic_inc(&attr
.send_cq
->usecnt
);
1609 atomic_inc(&attr
.recv_cq
->usecnt
);
1611 atomic_inc(&attr
.srq
->usecnt
);
1613 qp
->uobject
= &obj
->uevent
.uobject
;
1615 obj
->uevent
.uobject
.object
= qp
;
1616 ret
= idr_add_uobj(&ib_uverbs_qp_idr
, &obj
->uevent
.uobject
);
1620 memset(&resp
, 0, sizeof resp
);
1621 resp
.qpn
= qp
->qp_num
;
1622 resp
.qp_handle
= obj
->uevent
.uobject
.id
;
1623 resp
.max_recv_sge
= attr
.cap
.max_recv_sge
;
1624 resp
.max_send_sge
= attr
.cap
.max_send_sge
;
1625 resp
.max_recv_wr
= attr
.cap
.max_recv_wr
;
1626 resp
.max_send_wr
= attr
.cap
.max_send_wr
;
1627 resp
.max_inline_data
= attr
.cap
.max_inline_data
;
1629 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
1630 &resp
, sizeof resp
)) {
1636 obj
->uxrcd
= container_of(xrcd_uobj
, struct ib_uxrcd_object
,
1638 atomic_inc(&obj
->uxrcd
->refcnt
);
1639 put_xrcd_read(xrcd_uobj
);
1646 if (rcq
&& rcq
!= scq
)
1651 mutex_lock(&file
->mutex
);
1652 list_add_tail(&obj
->uevent
.uobject
.list
, &file
->ucontext
->qp_list
);
1653 mutex_unlock(&file
->mutex
);
1655 obj
->uevent
.uobject
.live
= 1;
1657 up_write(&obj
->uevent
.uobject
.mutex
);
1662 idr_remove_uobj(&ib_uverbs_qp_idr
, &obj
->uevent
.uobject
);
1669 put_xrcd_read(xrcd_uobj
);
1674 if (rcq
&& rcq
!= scq
)
1679 put_uobj_write(&obj
->uevent
.uobject
);
1683 ssize_t
ib_uverbs_open_qp(struct ib_uverbs_file
*file
,
1684 const char __user
*buf
, int in_len
, int out_len
)
1686 struct ib_uverbs_open_qp cmd
;
1687 struct ib_uverbs_create_qp_resp resp
;
1688 struct ib_udata udata
;
1689 struct ib_uqp_object
*obj
;
1690 struct ib_xrcd
*xrcd
;
1691 struct ib_uobject
*uninitialized_var(xrcd_uobj
);
1693 struct ib_qp_open_attr attr
;
1696 if (out_len
< sizeof resp
)
1699 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1702 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
1703 (unsigned long) cmd
.response
+ sizeof resp
,
1704 in_len
- sizeof cmd
, out_len
- sizeof resp
);
1706 obj
= kmalloc(sizeof *obj
, GFP_KERNEL
);
1710 init_uobj(&obj
->uevent
.uobject
, cmd
.user_handle
, file
->ucontext
, &qp_lock_class
);
1711 down_write(&obj
->uevent
.uobject
.mutex
);
1713 xrcd
= idr_read_xrcd(cmd
.pd_handle
, file
->ucontext
, &xrcd_uobj
);
1719 attr
.event_handler
= ib_uverbs_qp_event_handler
;
1720 attr
.qp_context
= file
;
1721 attr
.qp_num
= cmd
.qpn
;
1722 attr
.qp_type
= cmd
.qp_type
;
1724 obj
->uevent
.events_reported
= 0;
1725 INIT_LIST_HEAD(&obj
->uevent
.event_list
);
1726 INIT_LIST_HEAD(&obj
->mcast_list
);
1728 qp
= ib_open_qp(xrcd
, &attr
);
1734 qp
->uobject
= &obj
->uevent
.uobject
;
1736 obj
->uevent
.uobject
.object
= qp
;
1737 ret
= idr_add_uobj(&ib_uverbs_qp_idr
, &obj
->uevent
.uobject
);
1741 memset(&resp
, 0, sizeof resp
);
1742 resp
.qpn
= qp
->qp_num
;
1743 resp
.qp_handle
= obj
->uevent
.uobject
.id
;
1745 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
1746 &resp
, sizeof resp
)) {
1751 obj
->uxrcd
= container_of(xrcd_uobj
, struct ib_uxrcd_object
, uobject
);
1752 atomic_inc(&obj
->uxrcd
->refcnt
);
1753 put_xrcd_read(xrcd_uobj
);
1755 mutex_lock(&file
->mutex
);
1756 list_add_tail(&obj
->uevent
.uobject
.list
, &file
->ucontext
->qp_list
);
1757 mutex_unlock(&file
->mutex
);
1759 obj
->uevent
.uobject
.live
= 1;
1761 up_write(&obj
->uevent
.uobject
.mutex
);
1766 idr_remove_uobj(&ib_uverbs_qp_idr
, &obj
->uevent
.uobject
);
1772 put_xrcd_read(xrcd_uobj
);
1773 put_uobj_write(&obj
->uevent
.uobject
);
1777 ssize_t
ib_uverbs_query_qp(struct ib_uverbs_file
*file
,
1778 const char __user
*buf
, int in_len
,
1781 struct ib_uverbs_query_qp cmd
;
1782 struct ib_uverbs_query_qp_resp resp
;
1784 struct ib_qp_attr
*attr
;
1785 struct ib_qp_init_attr
*init_attr
;
1788 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1791 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
1792 init_attr
= kmalloc(sizeof *init_attr
, GFP_KERNEL
);
1793 if (!attr
|| !init_attr
) {
1798 qp
= idr_read_qp(cmd
.qp_handle
, file
->ucontext
);
1804 ret
= ib_query_qp(qp
, attr
, cmd
.attr_mask
, init_attr
);
1811 memset(&resp
, 0, sizeof resp
);
1813 resp
.qp_state
= attr
->qp_state
;
1814 resp
.cur_qp_state
= attr
->cur_qp_state
;
1815 resp
.path_mtu
= attr
->path_mtu
;
1816 resp
.path_mig_state
= attr
->path_mig_state
;
1817 resp
.qkey
= attr
->qkey
;
1818 resp
.rq_psn
= attr
->rq_psn
;
1819 resp
.sq_psn
= attr
->sq_psn
;
1820 resp
.dest_qp_num
= attr
->dest_qp_num
;
1821 resp
.qp_access_flags
= attr
->qp_access_flags
;
1822 resp
.pkey_index
= attr
->pkey_index
;
1823 resp
.alt_pkey_index
= attr
->alt_pkey_index
;
1824 resp
.sq_draining
= attr
->sq_draining
;
1825 resp
.max_rd_atomic
= attr
->max_rd_atomic
;
1826 resp
.max_dest_rd_atomic
= attr
->max_dest_rd_atomic
;
1827 resp
.min_rnr_timer
= attr
->min_rnr_timer
;
1828 resp
.port_num
= attr
->port_num
;
1829 resp
.timeout
= attr
->timeout
;
1830 resp
.retry_cnt
= attr
->retry_cnt
;
1831 resp
.rnr_retry
= attr
->rnr_retry
;
1832 resp
.alt_port_num
= attr
->alt_port_num
;
1833 resp
.alt_timeout
= attr
->alt_timeout
;
1835 memcpy(resp
.dest
.dgid
, attr
->ah_attr
.grh
.dgid
.raw
, 16);
1836 resp
.dest
.flow_label
= attr
->ah_attr
.grh
.flow_label
;
1837 resp
.dest
.sgid_index
= attr
->ah_attr
.grh
.sgid_index
;
1838 resp
.dest
.hop_limit
= attr
->ah_attr
.grh
.hop_limit
;
1839 resp
.dest
.traffic_class
= attr
->ah_attr
.grh
.traffic_class
;
1840 resp
.dest
.dlid
= attr
->ah_attr
.dlid
;
1841 resp
.dest
.sl
= attr
->ah_attr
.sl
;
1842 resp
.dest
.src_path_bits
= attr
->ah_attr
.src_path_bits
;
1843 resp
.dest
.static_rate
= attr
->ah_attr
.static_rate
;
1844 resp
.dest
.is_global
= !!(attr
->ah_attr
.ah_flags
& IB_AH_GRH
);
1845 resp
.dest
.port_num
= attr
->ah_attr
.port_num
;
1847 memcpy(resp
.alt_dest
.dgid
, attr
->alt_ah_attr
.grh
.dgid
.raw
, 16);
1848 resp
.alt_dest
.flow_label
= attr
->alt_ah_attr
.grh
.flow_label
;
1849 resp
.alt_dest
.sgid_index
= attr
->alt_ah_attr
.grh
.sgid_index
;
1850 resp
.alt_dest
.hop_limit
= attr
->alt_ah_attr
.grh
.hop_limit
;
1851 resp
.alt_dest
.traffic_class
= attr
->alt_ah_attr
.grh
.traffic_class
;
1852 resp
.alt_dest
.dlid
= attr
->alt_ah_attr
.dlid
;
1853 resp
.alt_dest
.sl
= attr
->alt_ah_attr
.sl
;
1854 resp
.alt_dest
.src_path_bits
= attr
->alt_ah_attr
.src_path_bits
;
1855 resp
.alt_dest
.static_rate
= attr
->alt_ah_attr
.static_rate
;
1856 resp
.alt_dest
.is_global
= !!(attr
->alt_ah_attr
.ah_flags
& IB_AH_GRH
);
1857 resp
.alt_dest
.port_num
= attr
->alt_ah_attr
.port_num
;
1859 resp
.max_send_wr
= init_attr
->cap
.max_send_wr
;
1860 resp
.max_recv_wr
= init_attr
->cap
.max_recv_wr
;
1861 resp
.max_send_sge
= init_attr
->cap
.max_send_sge
;
1862 resp
.max_recv_sge
= init_attr
->cap
.max_recv_sge
;
1863 resp
.max_inline_data
= init_attr
->cap
.max_inline_data
;
1864 resp
.sq_sig_all
= init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
;
1866 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
1867 &resp
, sizeof resp
))
1874 return ret
? ret
: in_len
;
1877 /* Remove ignored fields set in the attribute mask */
1878 static int modify_qp_mask(enum ib_qp_type qp_type
, int mask
)
1881 case IB_QPT_XRC_INI
:
1882 return mask
& ~(IB_QP_MAX_DEST_RD_ATOMIC
| IB_QP_MIN_RNR_TIMER
);
1883 case IB_QPT_XRC_TGT
:
1884 return mask
& ~(IB_QP_MAX_QP_RD_ATOMIC
| IB_QP_RETRY_CNT
|
1891 ssize_t
ib_uverbs_modify_qp(struct ib_uverbs_file
*file
,
1892 const char __user
*buf
, int in_len
,
1895 struct ib_uverbs_modify_qp cmd
;
1896 struct ib_udata udata
;
1898 struct ib_qp_attr
*attr
;
1901 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1904 INIT_UDATA(&udata
, buf
+ sizeof cmd
, NULL
, in_len
- sizeof cmd
,
1907 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
1911 qp
= idr_read_qp(cmd
.qp_handle
, file
->ucontext
);
1917 attr
->qp_state
= cmd
.qp_state
;
1918 attr
->cur_qp_state
= cmd
.cur_qp_state
;
1919 attr
->path_mtu
= cmd
.path_mtu
;
1920 attr
->path_mig_state
= cmd
.path_mig_state
;
1921 attr
->qkey
= cmd
.qkey
;
1922 attr
->rq_psn
= cmd
.rq_psn
;
1923 attr
->sq_psn
= cmd
.sq_psn
;
1924 attr
->dest_qp_num
= cmd
.dest_qp_num
;
1925 attr
->qp_access_flags
= cmd
.qp_access_flags
;
1926 attr
->pkey_index
= cmd
.pkey_index
;
1927 attr
->alt_pkey_index
= cmd
.alt_pkey_index
;
1928 attr
->en_sqd_async_notify
= cmd
.en_sqd_async_notify
;
1929 attr
->max_rd_atomic
= cmd
.max_rd_atomic
;
1930 attr
->max_dest_rd_atomic
= cmd
.max_dest_rd_atomic
;
1931 attr
->min_rnr_timer
= cmd
.min_rnr_timer
;
1932 attr
->port_num
= cmd
.port_num
;
1933 attr
->timeout
= cmd
.timeout
;
1934 attr
->retry_cnt
= cmd
.retry_cnt
;
1935 attr
->rnr_retry
= cmd
.rnr_retry
;
1936 attr
->alt_port_num
= cmd
.alt_port_num
;
1937 attr
->alt_timeout
= cmd
.alt_timeout
;
1939 memcpy(attr
->ah_attr
.grh
.dgid
.raw
, cmd
.dest
.dgid
, 16);
1940 attr
->ah_attr
.grh
.flow_label
= cmd
.dest
.flow_label
;
1941 attr
->ah_attr
.grh
.sgid_index
= cmd
.dest
.sgid_index
;
1942 attr
->ah_attr
.grh
.hop_limit
= cmd
.dest
.hop_limit
;
1943 attr
->ah_attr
.grh
.traffic_class
= cmd
.dest
.traffic_class
;
1944 attr
->ah_attr
.dlid
= cmd
.dest
.dlid
;
1945 attr
->ah_attr
.sl
= cmd
.dest
.sl
;
1946 attr
->ah_attr
.src_path_bits
= cmd
.dest
.src_path_bits
;
1947 attr
->ah_attr
.static_rate
= cmd
.dest
.static_rate
;
1948 attr
->ah_attr
.ah_flags
= cmd
.dest
.is_global
? IB_AH_GRH
: 0;
1949 attr
->ah_attr
.port_num
= cmd
.dest
.port_num
;
1951 memcpy(attr
->alt_ah_attr
.grh
.dgid
.raw
, cmd
.alt_dest
.dgid
, 16);
1952 attr
->alt_ah_attr
.grh
.flow_label
= cmd
.alt_dest
.flow_label
;
1953 attr
->alt_ah_attr
.grh
.sgid_index
= cmd
.alt_dest
.sgid_index
;
1954 attr
->alt_ah_attr
.grh
.hop_limit
= cmd
.alt_dest
.hop_limit
;
1955 attr
->alt_ah_attr
.grh
.traffic_class
= cmd
.alt_dest
.traffic_class
;
1956 attr
->alt_ah_attr
.dlid
= cmd
.alt_dest
.dlid
;
1957 attr
->alt_ah_attr
.sl
= cmd
.alt_dest
.sl
;
1958 attr
->alt_ah_attr
.src_path_bits
= cmd
.alt_dest
.src_path_bits
;
1959 attr
->alt_ah_attr
.static_rate
= cmd
.alt_dest
.static_rate
;
1960 attr
->alt_ah_attr
.ah_flags
= cmd
.alt_dest
.is_global
? IB_AH_GRH
: 0;
1961 attr
->alt_ah_attr
.port_num
= cmd
.alt_dest
.port_num
;
1963 if (qp
->real_qp
== qp
) {
1964 ret
= qp
->device
->modify_qp(qp
, attr
,
1965 modify_qp_mask(qp
->qp_type
, cmd
.attr_mask
), &udata
);
1967 ret
= ib_modify_qp(qp
, attr
, modify_qp_mask(qp
->qp_type
, cmd
.attr_mask
));
1983 ssize_t
ib_uverbs_destroy_qp(struct ib_uverbs_file
*file
,
1984 const char __user
*buf
, int in_len
,
1987 struct ib_uverbs_destroy_qp cmd
;
1988 struct ib_uverbs_destroy_qp_resp resp
;
1989 struct ib_uobject
*uobj
;
1991 struct ib_uqp_object
*obj
;
1994 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1997 memset(&resp
, 0, sizeof resp
);
1999 uobj
= idr_write_uobj(&ib_uverbs_qp_idr
, cmd
.qp_handle
, file
->ucontext
);
2003 obj
= container_of(uobj
, struct ib_uqp_object
, uevent
.uobject
);
2005 if (!list_empty(&obj
->mcast_list
)) {
2006 put_uobj_write(uobj
);
2010 ret
= ib_destroy_qp(qp
);
2014 put_uobj_write(uobj
);
2020 atomic_dec(&obj
->uxrcd
->refcnt
);
2022 idr_remove_uobj(&ib_uverbs_qp_idr
, uobj
);
2024 mutex_lock(&file
->mutex
);
2025 list_del(&uobj
->list
);
2026 mutex_unlock(&file
->mutex
);
2028 ib_uverbs_release_uevent(file
, &obj
->uevent
);
2030 resp
.events_reported
= obj
->uevent
.events_reported
;
2034 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
2035 &resp
, sizeof resp
))
2041 ssize_t
ib_uverbs_post_send(struct ib_uverbs_file
*file
,
2042 const char __user
*buf
, int in_len
,
2045 struct ib_uverbs_post_send cmd
;
2046 struct ib_uverbs_post_send_resp resp
;
2047 struct ib_uverbs_send_wr
*user_wr
;
2048 struct ib_send_wr
*wr
= NULL
, *last
, *next
, *bad_wr
;
2052 ssize_t ret
= -EINVAL
;
2054 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2057 if (in_len
< sizeof cmd
+ cmd
.wqe_size
* cmd
.wr_count
+
2058 cmd
.sge_count
* sizeof (struct ib_uverbs_sge
))
2061 if (cmd
.wqe_size
< sizeof (struct ib_uverbs_send_wr
))
2064 user_wr
= kmalloc(cmd
.wqe_size
, GFP_KERNEL
);
2068 qp
= idr_read_qp(cmd
.qp_handle
, file
->ucontext
);
2072 is_ud
= qp
->qp_type
== IB_QPT_UD
;
2075 for (i
= 0; i
< cmd
.wr_count
; ++i
) {
2076 if (copy_from_user(user_wr
,
2077 buf
+ sizeof cmd
+ i
* cmd
.wqe_size
,
2083 if (user_wr
->num_sge
+ sg_ind
> cmd
.sge_count
) {
2088 next
= kmalloc(ALIGN(sizeof *next
, sizeof (struct ib_sge
)) +
2089 user_wr
->num_sge
* sizeof (struct ib_sge
),
2103 next
->wr_id
= user_wr
->wr_id
;
2104 next
->num_sge
= user_wr
->num_sge
;
2105 next
->opcode
= user_wr
->opcode
;
2106 next
->send_flags
= user_wr
->send_flags
;
2109 next
->wr
.ud
.ah
= idr_read_ah(user_wr
->wr
.ud
.ah
,
2111 if (!next
->wr
.ud
.ah
) {
2115 next
->wr
.ud
.remote_qpn
= user_wr
->wr
.ud
.remote_qpn
;
2116 next
->wr
.ud
.remote_qkey
= user_wr
->wr
.ud
.remote_qkey
;
2117 if (next
->opcode
== IB_WR_SEND_WITH_IMM
)
2119 (__be32 __force
) user_wr
->ex
.imm_data
;
2121 switch (next
->opcode
) {
2122 case IB_WR_RDMA_WRITE_WITH_IMM
:
2124 (__be32 __force
) user_wr
->ex
.imm_data
;
2125 case IB_WR_RDMA_WRITE
:
2126 case IB_WR_RDMA_READ
:
2127 next
->wr
.rdma
.remote_addr
=
2128 user_wr
->wr
.rdma
.remote_addr
;
2129 next
->wr
.rdma
.rkey
=
2130 user_wr
->wr
.rdma
.rkey
;
2132 case IB_WR_SEND_WITH_IMM
:
2134 (__be32 __force
) user_wr
->ex
.imm_data
;
2136 case IB_WR_SEND_WITH_INV
:
2137 next
->ex
.invalidate_rkey
=
2138 user_wr
->ex
.invalidate_rkey
;
2140 case IB_WR_ATOMIC_CMP_AND_SWP
:
2141 case IB_WR_ATOMIC_FETCH_AND_ADD
:
2142 next
->wr
.atomic
.remote_addr
=
2143 user_wr
->wr
.atomic
.remote_addr
;
2144 next
->wr
.atomic
.compare_add
=
2145 user_wr
->wr
.atomic
.compare_add
;
2146 next
->wr
.atomic
.swap
= user_wr
->wr
.atomic
.swap
;
2147 next
->wr
.atomic
.rkey
= user_wr
->wr
.atomic
.rkey
;
2154 if (next
->num_sge
) {
2155 next
->sg_list
= (void *) next
+
2156 ALIGN(sizeof *next
, sizeof (struct ib_sge
));
2157 if (copy_from_user(next
->sg_list
,
2159 cmd
.wr_count
* cmd
.wqe_size
+
2160 sg_ind
* sizeof (struct ib_sge
),
2161 next
->num_sge
* sizeof (struct ib_sge
))) {
2165 sg_ind
+= next
->num_sge
;
2167 next
->sg_list
= NULL
;
2171 ret
= qp
->device
->post_send(qp
->real_qp
, wr
, &bad_wr
);
2173 for (next
= wr
; next
; next
= next
->next
) {
2179 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
2180 &resp
, sizeof resp
))
2187 if (is_ud
&& wr
->wr
.ud
.ah
)
2188 put_ah_read(wr
->wr
.ud
.ah
);
2197 return ret
? ret
: in_len
;
2200 static struct ib_recv_wr
*ib_uverbs_unmarshall_recv(const char __user
*buf
,
2206 struct ib_uverbs_recv_wr
*user_wr
;
2207 struct ib_recv_wr
*wr
= NULL
, *last
, *next
;
2212 if (in_len
< wqe_size
* wr_count
+
2213 sge_count
* sizeof (struct ib_uverbs_sge
))
2214 return ERR_PTR(-EINVAL
);
2216 if (wqe_size
< sizeof (struct ib_uverbs_recv_wr
))
2217 return ERR_PTR(-EINVAL
);
2219 user_wr
= kmalloc(wqe_size
, GFP_KERNEL
);
2221 return ERR_PTR(-ENOMEM
);
2225 for (i
= 0; i
< wr_count
; ++i
) {
2226 if (copy_from_user(user_wr
, buf
+ i
* wqe_size
,
2232 if (user_wr
->num_sge
+ sg_ind
> sge_count
) {
2237 next
= kmalloc(ALIGN(sizeof *next
, sizeof (struct ib_sge
)) +
2238 user_wr
->num_sge
* sizeof (struct ib_sge
),
2252 next
->wr_id
= user_wr
->wr_id
;
2253 next
->num_sge
= user_wr
->num_sge
;
2255 if (next
->num_sge
) {
2256 next
->sg_list
= (void *) next
+
2257 ALIGN(sizeof *next
, sizeof (struct ib_sge
));
2258 if (copy_from_user(next
->sg_list
,
2259 buf
+ wr_count
* wqe_size
+
2260 sg_ind
* sizeof (struct ib_sge
),
2261 next
->num_sge
* sizeof (struct ib_sge
))) {
2265 sg_ind
+= next
->num_sge
;
2267 next
->sg_list
= NULL
;
2282 return ERR_PTR(ret
);
2285 ssize_t
ib_uverbs_post_recv(struct ib_uverbs_file
*file
,
2286 const char __user
*buf
, int in_len
,
2289 struct ib_uverbs_post_recv cmd
;
2290 struct ib_uverbs_post_recv_resp resp
;
2291 struct ib_recv_wr
*wr
, *next
, *bad_wr
;
2293 ssize_t ret
= -EINVAL
;
2295 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2298 wr
= ib_uverbs_unmarshall_recv(buf
+ sizeof cmd
,
2299 in_len
- sizeof cmd
, cmd
.wr_count
,
2300 cmd
.sge_count
, cmd
.wqe_size
);
2304 qp
= idr_read_qp(cmd
.qp_handle
, file
->ucontext
);
2309 ret
= qp
->device
->post_recv(qp
->real_qp
, wr
, &bad_wr
);
2314 for (next
= wr
; next
; next
= next
->next
) {
2320 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
2321 &resp
, sizeof resp
))
2331 return ret
? ret
: in_len
;
2334 ssize_t
ib_uverbs_post_srq_recv(struct ib_uverbs_file
*file
,
2335 const char __user
*buf
, int in_len
,
2338 struct ib_uverbs_post_srq_recv cmd
;
2339 struct ib_uverbs_post_srq_recv_resp resp
;
2340 struct ib_recv_wr
*wr
, *next
, *bad_wr
;
2342 ssize_t ret
= -EINVAL
;
2344 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2347 wr
= ib_uverbs_unmarshall_recv(buf
+ sizeof cmd
,
2348 in_len
- sizeof cmd
, cmd
.wr_count
,
2349 cmd
.sge_count
, cmd
.wqe_size
);
2353 srq
= idr_read_srq(cmd
.srq_handle
, file
->ucontext
);
2358 ret
= srq
->device
->post_srq_recv(srq
, wr
, &bad_wr
);
2363 for (next
= wr
; next
; next
= next
->next
) {
2369 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
2370 &resp
, sizeof resp
))
2380 return ret
? ret
: in_len
;
2383 ssize_t
ib_uverbs_create_ah(struct ib_uverbs_file
*file
,
2384 const char __user
*buf
, int in_len
,
2387 struct ib_uverbs_create_ah cmd
;
2388 struct ib_uverbs_create_ah_resp resp
;
2389 struct ib_uobject
*uobj
;
2392 struct ib_ah_attr attr
;
2395 if (out_len
< sizeof resp
)
2398 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2401 uobj
= kmalloc(sizeof *uobj
, GFP_KERNEL
);
2405 init_uobj(uobj
, cmd
.user_handle
, file
->ucontext
, &ah_lock_class
);
2406 down_write(&uobj
->mutex
);
2408 pd
= idr_read_pd(cmd
.pd_handle
, file
->ucontext
);
2414 attr
.dlid
= cmd
.attr
.dlid
;
2415 attr
.sl
= cmd
.attr
.sl
;
2416 attr
.src_path_bits
= cmd
.attr
.src_path_bits
;
2417 attr
.static_rate
= cmd
.attr
.static_rate
;
2418 attr
.ah_flags
= cmd
.attr
.is_global
? IB_AH_GRH
: 0;
2419 attr
.port_num
= cmd
.attr
.port_num
;
2420 attr
.grh
.flow_label
= cmd
.attr
.grh
.flow_label
;
2421 attr
.grh
.sgid_index
= cmd
.attr
.grh
.sgid_index
;
2422 attr
.grh
.hop_limit
= cmd
.attr
.grh
.hop_limit
;
2423 attr
.grh
.traffic_class
= cmd
.attr
.grh
.traffic_class
;
2424 memcpy(attr
.grh
.dgid
.raw
, cmd
.attr
.grh
.dgid
, 16);
2426 ah
= ib_create_ah(pd
, &attr
);
2435 ret
= idr_add_uobj(&ib_uverbs_ah_idr
, uobj
);
2439 resp
.ah_handle
= uobj
->id
;
2441 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
2442 &resp
, sizeof resp
)) {
2449 mutex_lock(&file
->mutex
);
2450 list_add_tail(&uobj
->list
, &file
->ucontext
->ah_list
);
2451 mutex_unlock(&file
->mutex
);
2455 up_write(&uobj
->mutex
);
2460 idr_remove_uobj(&ib_uverbs_ah_idr
, uobj
);
2469 put_uobj_write(uobj
);
2473 ssize_t
ib_uverbs_destroy_ah(struct ib_uverbs_file
*file
,
2474 const char __user
*buf
, int in_len
, int out_len
)
2476 struct ib_uverbs_destroy_ah cmd
;
2478 struct ib_uobject
*uobj
;
2481 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2484 uobj
= idr_write_uobj(&ib_uverbs_ah_idr
, cmd
.ah_handle
, file
->ucontext
);
2489 ret
= ib_destroy_ah(ah
);
2493 put_uobj_write(uobj
);
2498 idr_remove_uobj(&ib_uverbs_ah_idr
, uobj
);
2500 mutex_lock(&file
->mutex
);
2501 list_del(&uobj
->list
);
2502 mutex_unlock(&file
->mutex
);
2509 ssize_t
ib_uverbs_attach_mcast(struct ib_uverbs_file
*file
,
2510 const char __user
*buf
, int in_len
,
2513 struct ib_uverbs_attach_mcast cmd
;
2515 struct ib_uqp_object
*obj
;
2516 struct ib_uverbs_mcast_entry
*mcast
;
2519 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2522 qp
= idr_write_qp(cmd
.qp_handle
, file
->ucontext
);
2526 obj
= container_of(qp
->uobject
, struct ib_uqp_object
, uevent
.uobject
);
2528 list_for_each_entry(mcast
, &obj
->mcast_list
, list
)
2529 if (cmd
.mlid
== mcast
->lid
&&
2530 !memcmp(cmd
.gid
, mcast
->gid
.raw
, sizeof mcast
->gid
.raw
)) {
2535 mcast
= kmalloc(sizeof *mcast
, GFP_KERNEL
);
2541 mcast
->lid
= cmd
.mlid
;
2542 memcpy(mcast
->gid
.raw
, cmd
.gid
, sizeof mcast
->gid
.raw
);
2544 ret
= ib_attach_mcast(qp
, &mcast
->gid
, cmd
.mlid
);
2546 list_add_tail(&mcast
->list
, &obj
->mcast_list
);
2553 return ret
? ret
: in_len
;
2556 ssize_t
ib_uverbs_detach_mcast(struct ib_uverbs_file
*file
,
2557 const char __user
*buf
, int in_len
,
2560 struct ib_uverbs_detach_mcast cmd
;
2561 struct ib_uqp_object
*obj
;
2563 struct ib_uverbs_mcast_entry
*mcast
;
2566 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2569 qp
= idr_write_qp(cmd
.qp_handle
, file
->ucontext
);
2573 ret
= ib_detach_mcast(qp
, (union ib_gid
*) cmd
.gid
, cmd
.mlid
);
2577 obj
= container_of(qp
->uobject
, struct ib_uqp_object
, uevent
.uobject
);
2579 list_for_each_entry(mcast
, &obj
->mcast_list
, list
)
2580 if (cmd
.mlid
== mcast
->lid
&&
2581 !memcmp(cmd
.gid
, mcast
->gid
.raw
, sizeof mcast
->gid
.raw
)) {
2582 list_del(&mcast
->list
);
2590 return ret
? ret
: in_len
;
2593 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec
*kern_spec
,
2594 union ib_flow_spec
*ib_spec
)
2596 if (kern_spec
->reserved
)
2599 ib_spec
->type
= kern_spec
->type
;
2601 switch (ib_spec
->type
) {
2602 case IB_FLOW_SPEC_ETH
:
2603 ib_spec
->eth
.size
= sizeof(struct ib_flow_spec_eth
);
2604 if (ib_spec
->eth
.size
!= kern_spec
->eth
.size
)
2606 memcpy(&ib_spec
->eth
.val
, &kern_spec
->eth
.val
,
2607 sizeof(struct ib_flow_eth_filter
));
2608 memcpy(&ib_spec
->eth
.mask
, &kern_spec
->eth
.mask
,
2609 sizeof(struct ib_flow_eth_filter
));
2611 case IB_FLOW_SPEC_IPV4
:
2612 ib_spec
->ipv4
.size
= sizeof(struct ib_flow_spec_ipv4
);
2613 if (ib_spec
->ipv4
.size
!= kern_spec
->ipv4
.size
)
2615 memcpy(&ib_spec
->ipv4
.val
, &kern_spec
->ipv4
.val
,
2616 sizeof(struct ib_flow_ipv4_filter
));
2617 memcpy(&ib_spec
->ipv4
.mask
, &kern_spec
->ipv4
.mask
,
2618 sizeof(struct ib_flow_ipv4_filter
));
2620 case IB_FLOW_SPEC_TCP
:
2621 case IB_FLOW_SPEC_UDP
:
2622 ib_spec
->tcp_udp
.size
= sizeof(struct ib_flow_spec_tcp_udp
);
2623 if (ib_spec
->tcp_udp
.size
!= kern_spec
->tcp_udp
.size
)
2625 memcpy(&ib_spec
->tcp_udp
.val
, &kern_spec
->tcp_udp
.val
,
2626 sizeof(struct ib_flow_tcp_udp_filter
));
2627 memcpy(&ib_spec
->tcp_udp
.mask
, &kern_spec
->tcp_udp
.mask
,
2628 sizeof(struct ib_flow_tcp_udp_filter
));
2636 int ib_uverbs_ex_create_flow(struct ib_uverbs_file
*file
,
2637 struct ib_udata
*ucore
,
2638 struct ib_udata
*uhw
)
2640 struct ib_uverbs_create_flow cmd
;
2641 struct ib_uverbs_create_flow_resp resp
;
2642 struct ib_uobject
*uobj
;
2643 struct ib_flow
*flow_id
;
2644 struct ib_uverbs_flow_attr
*kern_flow_attr
;
2645 struct ib_flow_attr
*flow_attr
;
2652 if (ucore
->inlen
< sizeof(cmd
))
2655 if (ucore
->outlen
< sizeof(resp
))
2658 err
= ib_copy_from_udata(&cmd
, ucore
, sizeof(cmd
));
2662 ucore
->inbuf
+= sizeof(cmd
);
2663 ucore
->inlen
-= sizeof(cmd
);
2668 if ((cmd
.flow_attr
.type
== IB_FLOW_ATTR_SNIFFER
&&
2669 !capable(CAP_NET_ADMIN
)) || !capable(CAP_NET_RAW
))
2672 if (cmd
.flow_attr
.num_of_specs
> IB_FLOW_SPEC_SUPPORT_LAYERS
)
2675 if (cmd
.flow_attr
.size
> ucore
->inlen
||
2676 cmd
.flow_attr
.size
>
2677 (cmd
.flow_attr
.num_of_specs
* sizeof(struct ib_uverbs_flow_spec
)))
2680 if (cmd
.flow_attr
.reserved
[0] ||
2681 cmd
.flow_attr
.reserved
[1])
2684 if (cmd
.flow_attr
.num_of_specs
) {
2685 kern_flow_attr
= kmalloc(sizeof(*kern_flow_attr
) + cmd
.flow_attr
.size
,
2687 if (!kern_flow_attr
)
2690 memcpy(kern_flow_attr
, &cmd
.flow_attr
, sizeof(*kern_flow_attr
));
2691 err
= ib_copy_from_udata(kern_flow_attr
+ 1, ucore
,
2692 cmd
.flow_attr
.size
);
2696 kern_flow_attr
= &cmd
.flow_attr
;
2699 uobj
= kmalloc(sizeof(*uobj
), GFP_KERNEL
);
2704 init_uobj(uobj
, 0, file
->ucontext
, &rule_lock_class
);
2705 down_write(&uobj
->mutex
);
2707 qp
= idr_read_qp(cmd
.qp_handle
, file
->ucontext
);
2713 flow_attr
= kmalloc(sizeof(*flow_attr
) + cmd
.flow_attr
.size
, GFP_KERNEL
);
2719 flow_attr
->type
= kern_flow_attr
->type
;
2720 flow_attr
->priority
= kern_flow_attr
->priority
;
2721 flow_attr
->num_of_specs
= kern_flow_attr
->num_of_specs
;
2722 flow_attr
->port
= kern_flow_attr
->port
;
2723 flow_attr
->flags
= kern_flow_attr
->flags
;
2724 flow_attr
->size
= sizeof(*flow_attr
);
2726 kern_spec
= kern_flow_attr
+ 1;
2727 ib_spec
= flow_attr
+ 1;
2728 for (i
= 0; i
< flow_attr
->num_of_specs
&&
2729 cmd
.flow_attr
.size
> offsetof(struct ib_uverbs_flow_spec
, reserved
) &&
2730 cmd
.flow_attr
.size
>=
2731 ((struct ib_uverbs_flow_spec
*)kern_spec
)->size
; i
++) {
2732 err
= kern_spec_to_ib_spec(kern_spec
, ib_spec
);
2736 ((union ib_flow_spec
*) ib_spec
)->size
;
2737 cmd
.flow_attr
.size
-= ((struct ib_uverbs_flow_spec
*)kern_spec
)->size
;
2738 kern_spec
+= ((struct ib_uverbs_flow_spec
*) kern_spec
)->size
;
2739 ib_spec
+= ((union ib_flow_spec
*) ib_spec
)->size
;
2741 if (cmd
.flow_attr
.size
|| (i
!= flow_attr
->num_of_specs
)) {
2742 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
2743 i
, cmd
.flow_attr
.size
);
2747 flow_id
= ib_create_flow(qp
, flow_attr
, IB_FLOW_DOMAIN_USER
);
2748 if (IS_ERR(flow_id
)) {
2749 err
= PTR_ERR(flow_id
);
2753 flow_id
->uobject
= uobj
;
2754 uobj
->object
= flow_id
;
2756 err
= idr_add_uobj(&ib_uverbs_rule_idr
, uobj
);
2760 memset(&resp
, 0, sizeof(resp
));
2761 resp
.flow_handle
= uobj
->id
;
2763 err
= ib_copy_to_udata(ucore
,
2764 &resp
, sizeof(resp
));
2769 mutex_lock(&file
->mutex
);
2770 list_add_tail(&uobj
->list
, &file
->ucontext
->rule_list
);
2771 mutex_unlock(&file
->mutex
);
2775 up_write(&uobj
->mutex
);
2777 if (cmd
.flow_attr
.num_of_specs
)
2778 kfree(kern_flow_attr
);
2781 idr_remove_uobj(&ib_uverbs_rule_idr
, uobj
);
2783 ib_destroy_flow(flow_id
);
2789 put_uobj_write(uobj
);
2791 if (cmd
.flow_attr
.num_of_specs
)
2792 kfree(kern_flow_attr
);
2796 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file
*file
,
2797 struct ib_udata
*ucore
,
2798 struct ib_udata
*uhw
)
2800 struct ib_uverbs_destroy_flow cmd
;
2801 struct ib_flow
*flow_id
;
2802 struct ib_uobject
*uobj
;
2805 if (ucore
->inlen
< sizeof(cmd
))
2808 ret
= ib_copy_from_udata(&cmd
, ucore
, sizeof(cmd
));
2815 uobj
= idr_write_uobj(&ib_uverbs_rule_idr
, cmd
.flow_handle
,
2819 flow_id
= uobj
->object
;
2821 ret
= ib_destroy_flow(flow_id
);
2825 put_uobj_write(uobj
);
2827 idr_remove_uobj(&ib_uverbs_rule_idr
, uobj
);
2829 mutex_lock(&file
->mutex
);
2830 list_del(&uobj
->list
);
2831 mutex_unlock(&file
->mutex
);
2838 static int __uverbs_create_xsrq(struct ib_uverbs_file
*file
,
2839 struct ib_uverbs_create_xsrq
*cmd
,
2840 struct ib_udata
*udata
)
2842 struct ib_uverbs_create_srq_resp resp
;
2843 struct ib_usrq_object
*obj
;
2846 struct ib_uobject
*uninitialized_var(xrcd_uobj
);
2847 struct ib_srq_init_attr attr
;
2850 obj
= kmalloc(sizeof *obj
, GFP_KERNEL
);
2854 init_uobj(&obj
->uevent
.uobject
, cmd
->user_handle
, file
->ucontext
, &srq_lock_class
);
2855 down_write(&obj
->uevent
.uobject
.mutex
);
2857 if (cmd
->srq_type
== IB_SRQT_XRC
) {
2858 attr
.ext
.xrc
.xrcd
= idr_read_xrcd(cmd
->xrcd_handle
, file
->ucontext
, &xrcd_uobj
);
2859 if (!attr
.ext
.xrc
.xrcd
) {
2864 obj
->uxrcd
= container_of(xrcd_uobj
, struct ib_uxrcd_object
, uobject
);
2865 atomic_inc(&obj
->uxrcd
->refcnt
);
2867 attr
.ext
.xrc
.cq
= idr_read_cq(cmd
->cq_handle
, file
->ucontext
, 0);
2868 if (!attr
.ext
.xrc
.cq
) {
2874 pd
= idr_read_pd(cmd
->pd_handle
, file
->ucontext
);
2880 attr
.event_handler
= ib_uverbs_srq_event_handler
;
2881 attr
.srq_context
= file
;
2882 attr
.srq_type
= cmd
->srq_type
;
2883 attr
.attr
.max_wr
= cmd
->max_wr
;
2884 attr
.attr
.max_sge
= cmd
->max_sge
;
2885 attr
.attr
.srq_limit
= cmd
->srq_limit
;
2887 obj
->uevent
.events_reported
= 0;
2888 INIT_LIST_HEAD(&obj
->uevent
.event_list
);
2890 srq
= pd
->device
->create_srq(pd
, &attr
, udata
);
2896 srq
->device
= pd
->device
;
2898 srq
->srq_type
= cmd
->srq_type
;
2899 srq
->uobject
= &obj
->uevent
.uobject
;
2900 srq
->event_handler
= attr
.event_handler
;
2901 srq
->srq_context
= attr
.srq_context
;
2903 if (cmd
->srq_type
== IB_SRQT_XRC
) {
2904 srq
->ext
.xrc
.cq
= attr
.ext
.xrc
.cq
;
2905 srq
->ext
.xrc
.xrcd
= attr
.ext
.xrc
.xrcd
;
2906 atomic_inc(&attr
.ext
.xrc
.cq
->usecnt
);
2907 atomic_inc(&attr
.ext
.xrc
.xrcd
->usecnt
);
2910 atomic_inc(&pd
->usecnt
);
2911 atomic_set(&srq
->usecnt
, 0);
2913 obj
->uevent
.uobject
.object
= srq
;
2914 ret
= idr_add_uobj(&ib_uverbs_srq_idr
, &obj
->uevent
.uobject
);
2918 memset(&resp
, 0, sizeof resp
);
2919 resp
.srq_handle
= obj
->uevent
.uobject
.id
;
2920 resp
.max_wr
= attr
.attr
.max_wr
;
2921 resp
.max_sge
= attr
.attr
.max_sge
;
2922 if (cmd
->srq_type
== IB_SRQT_XRC
)
2923 resp
.srqn
= srq
->ext
.xrc
.srq_num
;
2925 if (copy_to_user((void __user
*) (unsigned long) cmd
->response
,
2926 &resp
, sizeof resp
)) {
2931 if (cmd
->srq_type
== IB_SRQT_XRC
) {
2932 put_uobj_read(xrcd_uobj
);
2933 put_cq_read(attr
.ext
.xrc
.cq
);
2937 mutex_lock(&file
->mutex
);
2938 list_add_tail(&obj
->uevent
.uobject
.list
, &file
->ucontext
->srq_list
);
2939 mutex_unlock(&file
->mutex
);
2941 obj
->uevent
.uobject
.live
= 1;
2943 up_write(&obj
->uevent
.uobject
.mutex
);
2948 idr_remove_uobj(&ib_uverbs_srq_idr
, &obj
->uevent
.uobject
);
2951 ib_destroy_srq(srq
);
2957 if (cmd
->srq_type
== IB_SRQT_XRC
)
2958 put_cq_read(attr
.ext
.xrc
.cq
);
2961 if (cmd
->srq_type
== IB_SRQT_XRC
) {
2962 atomic_dec(&obj
->uxrcd
->refcnt
);
2963 put_uobj_read(xrcd_uobj
);
2967 put_uobj_write(&obj
->uevent
.uobject
);
2971 ssize_t
ib_uverbs_create_srq(struct ib_uverbs_file
*file
,
2972 const char __user
*buf
, int in_len
,
2975 struct ib_uverbs_create_srq cmd
;
2976 struct ib_uverbs_create_xsrq xcmd
;
2977 struct ib_uverbs_create_srq_resp resp
;
2978 struct ib_udata udata
;
2981 if (out_len
< sizeof resp
)
2984 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2987 xcmd
.response
= cmd
.response
;
2988 xcmd
.user_handle
= cmd
.user_handle
;
2989 xcmd
.srq_type
= IB_SRQT_BASIC
;
2990 xcmd
.pd_handle
= cmd
.pd_handle
;
2991 xcmd
.max_wr
= cmd
.max_wr
;
2992 xcmd
.max_sge
= cmd
.max_sge
;
2993 xcmd
.srq_limit
= cmd
.srq_limit
;
2995 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
2996 (unsigned long) cmd
.response
+ sizeof resp
,
2997 in_len
- sizeof cmd
, out_len
- sizeof resp
);
2999 ret
= __uverbs_create_xsrq(file
, &xcmd
, &udata
);
3006 ssize_t
ib_uverbs_create_xsrq(struct ib_uverbs_file
*file
,
3007 const char __user
*buf
, int in_len
, int out_len
)
3009 struct ib_uverbs_create_xsrq cmd
;
3010 struct ib_uverbs_create_srq_resp resp
;
3011 struct ib_udata udata
;
3014 if (out_len
< sizeof resp
)
3017 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
3020 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
3021 (unsigned long) cmd
.response
+ sizeof resp
,
3022 in_len
- sizeof cmd
, out_len
- sizeof resp
);
3024 ret
= __uverbs_create_xsrq(file
, &cmd
, &udata
);
3031 ssize_t
ib_uverbs_modify_srq(struct ib_uverbs_file
*file
,
3032 const char __user
*buf
, int in_len
,
3035 struct ib_uverbs_modify_srq cmd
;
3036 struct ib_udata udata
;
3038 struct ib_srq_attr attr
;
3041 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
3044 INIT_UDATA(&udata
, buf
+ sizeof cmd
, NULL
, in_len
- sizeof cmd
,
3047 srq
= idr_read_srq(cmd
.srq_handle
, file
->ucontext
);
3051 attr
.max_wr
= cmd
.max_wr
;
3052 attr
.srq_limit
= cmd
.srq_limit
;
3054 ret
= srq
->device
->modify_srq(srq
, &attr
, cmd
.attr_mask
, &udata
);
3058 return ret
? ret
: in_len
;
3061 ssize_t
ib_uverbs_query_srq(struct ib_uverbs_file
*file
,
3062 const char __user
*buf
,
3063 int in_len
, int out_len
)
3065 struct ib_uverbs_query_srq cmd
;
3066 struct ib_uverbs_query_srq_resp resp
;
3067 struct ib_srq_attr attr
;
3071 if (out_len
< sizeof resp
)
3074 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
3077 srq
= idr_read_srq(cmd
.srq_handle
, file
->ucontext
);
3081 ret
= ib_query_srq(srq
, &attr
);
3088 memset(&resp
, 0, sizeof resp
);
3090 resp
.max_wr
= attr
.max_wr
;
3091 resp
.max_sge
= attr
.max_sge
;
3092 resp
.srq_limit
= attr
.srq_limit
;
3094 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
3095 &resp
, sizeof resp
))
3101 ssize_t
ib_uverbs_destroy_srq(struct ib_uverbs_file
*file
,
3102 const char __user
*buf
, int in_len
,
3105 struct ib_uverbs_destroy_srq cmd
;
3106 struct ib_uverbs_destroy_srq_resp resp
;
3107 struct ib_uobject
*uobj
;
3109 struct ib_uevent_object
*obj
;
3111 struct ib_usrq_object
*us
;
3112 enum ib_srq_type srq_type
;
3114 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
3117 uobj
= idr_write_uobj(&ib_uverbs_srq_idr
, cmd
.srq_handle
, file
->ucontext
);
3121 obj
= container_of(uobj
, struct ib_uevent_object
, uobject
);
3122 srq_type
= srq
->srq_type
;
3124 ret
= ib_destroy_srq(srq
);
3128 put_uobj_write(uobj
);
3133 if (srq_type
== IB_SRQT_XRC
) {
3134 us
= container_of(obj
, struct ib_usrq_object
, uevent
);
3135 atomic_dec(&us
->uxrcd
->refcnt
);
3138 idr_remove_uobj(&ib_uverbs_srq_idr
, uobj
);
3140 mutex_lock(&file
->mutex
);
3141 list_del(&uobj
->list
);
3142 mutex_unlock(&file
->mutex
);
3144 ib_uverbs_release_uevent(file
, obj
);
3146 memset(&resp
, 0, sizeof resp
);
3147 resp
.events_reported
= obj
->events_reported
;
3151 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
3152 &resp
, sizeof resp
))
3155 return ret
? ret
: in_len
;