IB/uverbs: Explicitly pass ib_dev to uverbs commands
[deliverable/linux.git] / drivers / infiniband / core / uverbs_cmd.c
1 /*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36 #include <linux/file.h>
37 #include <linux/fs.h>
38 #include <linux/slab.h>
39 #include <linux/sched.h>
40
41 #include <asm/uaccess.h>
42
43 #include "uverbs.h"
44 #include "core_priv.h"
45
46 struct uverbs_lock_class {
47 struct lock_class_key key;
48 char name[16];
49 };
50
51 static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" };
52 static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" };
53 static struct uverbs_lock_class mw_lock_class = { .name = "MW-uobj" };
54 static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" };
55 static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" };
56 static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" };
57 static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" };
58 static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
59 static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
60
61 /*
62 * The ib_uobject locking scheme is as follows:
63 *
64 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it
65 * needs to be held during all idr operations. When an object is
66 * looked up, a reference must be taken on the object's kref before
67 * dropping this lock.
68 *
69 * - Each object also has an rwsem. This rwsem must be held for
70 * reading while an operation that uses the object is performed.
71 * For example, while registering an MR, the associated PD's
72 * uobject.mutex must be held for reading. The rwsem must be held
73 * for writing while initializing or destroying an object.
74 *
75 * - In addition, each object has a "live" flag. If this flag is not
76 * set, then lookups of the object will fail even if it is found in
77 * the idr. This handles a reader that blocks and does not acquire
78 * the rwsem until after the object is destroyed. The destroy
79 * operation will set the live flag to 0 and then drop the rwsem;
80 * this will allow the reader to acquire the rwsem, see that the
81 * live flag is 0, and then drop the rwsem and its reference to
82 * object. The underlying storage will not be freed until the last
83 * reference to the object is dropped.
84 */
85
86 static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
87 struct ib_ucontext *context, struct uverbs_lock_class *c)
88 {
89 uobj->user_handle = user_handle;
90 uobj->context = context;
91 kref_init(&uobj->ref);
92 init_rwsem(&uobj->mutex);
93 lockdep_set_class_and_name(&uobj->mutex, &c->key, c->name);
94 uobj->live = 0;
95 }
96
97 static void release_uobj(struct kref *kref)
98 {
99 kfree(container_of(kref, struct ib_uobject, ref));
100 }
101
102 static void put_uobj(struct ib_uobject *uobj)
103 {
104 kref_put(&uobj->ref, release_uobj);
105 }
106
107 static void put_uobj_read(struct ib_uobject *uobj)
108 {
109 up_read(&uobj->mutex);
110 put_uobj(uobj);
111 }
112
113 static void put_uobj_write(struct ib_uobject *uobj)
114 {
115 up_write(&uobj->mutex);
116 put_uobj(uobj);
117 }
118
119 static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj)
120 {
121 int ret;
122
123 idr_preload(GFP_KERNEL);
124 spin_lock(&ib_uverbs_idr_lock);
125
126 ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT);
127 if (ret >= 0)
128 uobj->id = ret;
129
130 spin_unlock(&ib_uverbs_idr_lock);
131 idr_preload_end();
132
133 return ret < 0 ? ret : 0;
134 }
135
136 void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj)
137 {
138 spin_lock(&ib_uverbs_idr_lock);
139 idr_remove(idr, uobj->id);
140 spin_unlock(&ib_uverbs_idr_lock);
141 }
142
143 static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
144 struct ib_ucontext *context)
145 {
146 struct ib_uobject *uobj;
147
148 spin_lock(&ib_uverbs_idr_lock);
149 uobj = idr_find(idr, id);
150 if (uobj) {
151 if (uobj->context == context)
152 kref_get(&uobj->ref);
153 else
154 uobj = NULL;
155 }
156 spin_unlock(&ib_uverbs_idr_lock);
157
158 return uobj;
159 }
160
161 static struct ib_uobject *idr_read_uobj(struct idr *idr, int id,
162 struct ib_ucontext *context, int nested)
163 {
164 struct ib_uobject *uobj;
165
166 uobj = __idr_get_uobj(idr, id, context);
167 if (!uobj)
168 return NULL;
169
170 if (nested)
171 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING);
172 else
173 down_read(&uobj->mutex);
174 if (!uobj->live) {
175 put_uobj_read(uobj);
176 return NULL;
177 }
178
179 return uobj;
180 }
181
182 static struct ib_uobject *idr_write_uobj(struct idr *idr, int id,
183 struct ib_ucontext *context)
184 {
185 struct ib_uobject *uobj;
186
187 uobj = __idr_get_uobj(idr, id, context);
188 if (!uobj)
189 return NULL;
190
191 down_write(&uobj->mutex);
192 if (!uobj->live) {
193 put_uobj_write(uobj);
194 return NULL;
195 }
196
197 return uobj;
198 }
199
200 static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context,
201 int nested)
202 {
203 struct ib_uobject *uobj;
204
205 uobj = idr_read_uobj(idr, id, context, nested);
206 return uobj ? uobj->object : NULL;
207 }
208
209 static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context)
210 {
211 return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0);
212 }
213
214 static void put_pd_read(struct ib_pd *pd)
215 {
216 put_uobj_read(pd->uobject);
217 }
218
219 static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested)
220 {
221 return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested);
222 }
223
224 static void put_cq_read(struct ib_cq *cq)
225 {
226 put_uobj_read(cq->uobject);
227 }
228
229 static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context)
230 {
231 return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0);
232 }
233
234 static void put_ah_read(struct ib_ah *ah)
235 {
236 put_uobj_read(ah->uobject);
237 }
238
239 static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
240 {
241 return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
242 }
243
244 static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context)
245 {
246 struct ib_uobject *uobj;
247
248 uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context);
249 return uobj ? uobj->object : NULL;
250 }
251
252 static void put_qp_read(struct ib_qp *qp)
253 {
254 put_uobj_read(qp->uobject);
255 }
256
257 static void put_qp_write(struct ib_qp *qp)
258 {
259 put_uobj_write(qp->uobject);
260 }
261
262 static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context)
263 {
264 return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0);
265 }
266
267 static void put_srq_read(struct ib_srq *srq)
268 {
269 put_uobj_read(srq->uobject);
270 }
271
272 static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context,
273 struct ib_uobject **uobj)
274 {
275 *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0);
276 return *uobj ? (*uobj)->object : NULL;
277 }
278
279 static void put_xrcd_read(struct ib_uobject *uobj)
280 {
281 put_uobj_read(uobj);
282 }
283
284 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
285 struct ib_device *ib_dev,
286 const char __user *buf,
287 int in_len, int out_len)
288 {
289 struct ib_uverbs_get_context cmd;
290 struct ib_uverbs_get_context_resp resp;
291 struct ib_udata udata;
292 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
293 struct ib_device_attr dev_attr;
294 #endif
295 struct ib_ucontext *ucontext;
296 struct file *filp;
297 int ret;
298
299 if (out_len < sizeof resp)
300 return -ENOSPC;
301
302 if (copy_from_user(&cmd, buf, sizeof cmd))
303 return -EFAULT;
304
305 mutex_lock(&file->mutex);
306
307 if (file->ucontext) {
308 ret = -EINVAL;
309 goto err;
310 }
311
312 INIT_UDATA(&udata, buf + sizeof cmd,
313 (unsigned long) cmd.response + sizeof resp,
314 in_len - sizeof cmd, out_len - sizeof resp);
315
316 ucontext = ib_dev->alloc_ucontext(ib_dev, &udata);
317 if (IS_ERR(ucontext)) {
318 ret = PTR_ERR(ucontext);
319 goto err;
320 }
321
322 ucontext->device = ib_dev;
323 INIT_LIST_HEAD(&ucontext->pd_list);
324 INIT_LIST_HEAD(&ucontext->mr_list);
325 INIT_LIST_HEAD(&ucontext->mw_list);
326 INIT_LIST_HEAD(&ucontext->cq_list);
327 INIT_LIST_HEAD(&ucontext->qp_list);
328 INIT_LIST_HEAD(&ucontext->srq_list);
329 INIT_LIST_HEAD(&ucontext->ah_list);
330 INIT_LIST_HEAD(&ucontext->xrcd_list);
331 INIT_LIST_HEAD(&ucontext->rule_list);
332 rcu_read_lock();
333 ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
334 rcu_read_unlock();
335 ucontext->closing = 0;
336
337 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
338 ucontext->umem_tree = RB_ROOT;
339 init_rwsem(&ucontext->umem_rwsem);
340 ucontext->odp_mrs_count = 0;
341 INIT_LIST_HEAD(&ucontext->no_private_counters);
342
343 ret = ib_query_device(ib_dev, &dev_attr);
344 if (ret)
345 goto err_free;
346 if (!(dev_attr.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
347 ucontext->invalidate_range = NULL;
348
349 #endif
350
351 resp.num_comp_vectors = file->device->num_comp_vectors;
352
353 ret = get_unused_fd_flags(O_CLOEXEC);
354 if (ret < 0)
355 goto err_free;
356 resp.async_fd = ret;
357
358 filp = ib_uverbs_alloc_event_file(file, ib_dev, 1);
359 if (IS_ERR(filp)) {
360 ret = PTR_ERR(filp);
361 goto err_fd;
362 }
363
364 if (copy_to_user((void __user *) (unsigned long) cmd.response,
365 &resp, sizeof resp)) {
366 ret = -EFAULT;
367 goto err_file;
368 }
369
370 file->ucontext = ucontext;
371
372 fd_install(resp.async_fd, filp);
373
374 mutex_unlock(&file->mutex);
375
376 return in_len;
377
378 err_file:
379 ib_uverbs_free_async_event_file(file);
380 fput(filp);
381
382 err_fd:
383 put_unused_fd(resp.async_fd);
384
385 err_free:
386 put_pid(ucontext->tgid);
387 ib_dev->dealloc_ucontext(ucontext);
388
389 err:
390 mutex_unlock(&file->mutex);
391 return ret;
392 }
393
394 static void copy_query_dev_fields(struct ib_uverbs_file *file,
395 struct ib_device *ib_dev,
396 struct ib_uverbs_query_device_resp *resp,
397 struct ib_device_attr *attr)
398 {
399 resp->fw_ver = attr->fw_ver;
400 resp->node_guid = ib_dev->node_guid;
401 resp->sys_image_guid = attr->sys_image_guid;
402 resp->max_mr_size = attr->max_mr_size;
403 resp->page_size_cap = attr->page_size_cap;
404 resp->vendor_id = attr->vendor_id;
405 resp->vendor_part_id = attr->vendor_part_id;
406 resp->hw_ver = attr->hw_ver;
407 resp->max_qp = attr->max_qp;
408 resp->max_qp_wr = attr->max_qp_wr;
409 resp->device_cap_flags = attr->device_cap_flags;
410 resp->max_sge = attr->max_sge;
411 resp->max_sge_rd = attr->max_sge_rd;
412 resp->max_cq = attr->max_cq;
413 resp->max_cqe = attr->max_cqe;
414 resp->max_mr = attr->max_mr;
415 resp->max_pd = attr->max_pd;
416 resp->max_qp_rd_atom = attr->max_qp_rd_atom;
417 resp->max_ee_rd_atom = attr->max_ee_rd_atom;
418 resp->max_res_rd_atom = attr->max_res_rd_atom;
419 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom;
420 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom;
421 resp->atomic_cap = attr->atomic_cap;
422 resp->max_ee = attr->max_ee;
423 resp->max_rdd = attr->max_rdd;
424 resp->max_mw = attr->max_mw;
425 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp;
426 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp;
427 resp->max_mcast_grp = attr->max_mcast_grp;
428 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach;
429 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
430 resp->max_ah = attr->max_ah;
431 resp->max_fmr = attr->max_fmr;
432 resp->max_map_per_fmr = attr->max_map_per_fmr;
433 resp->max_srq = attr->max_srq;
434 resp->max_srq_wr = attr->max_srq_wr;
435 resp->max_srq_sge = attr->max_srq_sge;
436 resp->max_pkeys = attr->max_pkeys;
437 resp->local_ca_ack_delay = attr->local_ca_ack_delay;
438 resp->phys_port_cnt = ib_dev->phys_port_cnt;
439 }
440
441 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
442 struct ib_device *ib_dev,
443 const char __user *buf,
444 int in_len, int out_len)
445 {
446 struct ib_uverbs_query_device cmd;
447 struct ib_uverbs_query_device_resp resp;
448 struct ib_device_attr attr;
449 int ret;
450
451 if (out_len < sizeof resp)
452 return -ENOSPC;
453
454 if (copy_from_user(&cmd, buf, sizeof cmd))
455 return -EFAULT;
456
457 ret = ib_query_device(ib_dev, &attr);
458 if (ret)
459 return ret;
460
461 memset(&resp, 0, sizeof resp);
462 copy_query_dev_fields(file, ib_dev, &resp, &attr);
463
464 if (copy_to_user((void __user *) (unsigned long) cmd.response,
465 &resp, sizeof resp))
466 return -EFAULT;
467
468 return in_len;
469 }
470
471 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
472 struct ib_device *ib_dev,
473 const char __user *buf,
474 int in_len, int out_len)
475 {
476 struct ib_uverbs_query_port cmd;
477 struct ib_uverbs_query_port_resp resp;
478 struct ib_port_attr attr;
479 int ret;
480
481 if (out_len < sizeof resp)
482 return -ENOSPC;
483
484 if (copy_from_user(&cmd, buf, sizeof cmd))
485 return -EFAULT;
486
487 ret = ib_query_port(ib_dev, cmd.port_num, &attr);
488 if (ret)
489 return ret;
490
491 memset(&resp, 0, sizeof resp);
492
493 resp.state = attr.state;
494 resp.max_mtu = attr.max_mtu;
495 resp.active_mtu = attr.active_mtu;
496 resp.gid_tbl_len = attr.gid_tbl_len;
497 resp.port_cap_flags = attr.port_cap_flags;
498 resp.max_msg_sz = attr.max_msg_sz;
499 resp.bad_pkey_cntr = attr.bad_pkey_cntr;
500 resp.qkey_viol_cntr = attr.qkey_viol_cntr;
501 resp.pkey_tbl_len = attr.pkey_tbl_len;
502 resp.lid = attr.lid;
503 resp.sm_lid = attr.sm_lid;
504 resp.lmc = attr.lmc;
505 resp.max_vl_num = attr.max_vl_num;
506 resp.sm_sl = attr.sm_sl;
507 resp.subnet_timeout = attr.subnet_timeout;
508 resp.init_type_reply = attr.init_type_reply;
509 resp.active_width = attr.active_width;
510 resp.active_speed = attr.active_speed;
511 resp.phys_state = attr.phys_state;
512 resp.link_layer = rdma_port_get_link_layer(ib_dev,
513 cmd.port_num);
514
515 if (copy_to_user((void __user *) (unsigned long) cmd.response,
516 &resp, sizeof resp))
517 return -EFAULT;
518
519 return in_len;
520 }
521
522 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
523 struct ib_device *ib_dev,
524 const char __user *buf,
525 int in_len, int out_len)
526 {
527 struct ib_uverbs_alloc_pd cmd;
528 struct ib_uverbs_alloc_pd_resp resp;
529 struct ib_udata udata;
530 struct ib_uobject *uobj;
531 struct ib_pd *pd;
532 int ret;
533
534 if (out_len < sizeof resp)
535 return -ENOSPC;
536
537 if (copy_from_user(&cmd, buf, sizeof cmd))
538 return -EFAULT;
539
540 INIT_UDATA(&udata, buf + sizeof cmd,
541 (unsigned long) cmd.response + sizeof resp,
542 in_len - sizeof cmd, out_len - sizeof resp);
543
544 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
545 if (!uobj)
546 return -ENOMEM;
547
548 init_uobj(uobj, 0, file->ucontext, &pd_lock_class);
549 down_write(&uobj->mutex);
550
551 pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata);
552 if (IS_ERR(pd)) {
553 ret = PTR_ERR(pd);
554 goto err;
555 }
556
557 pd->device = ib_dev;
558 pd->uobject = uobj;
559 pd->local_mr = NULL;
560 atomic_set(&pd->usecnt, 0);
561
562 uobj->object = pd;
563 ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj);
564 if (ret)
565 goto err_idr;
566
567 memset(&resp, 0, sizeof resp);
568 resp.pd_handle = uobj->id;
569
570 if (copy_to_user((void __user *) (unsigned long) cmd.response,
571 &resp, sizeof resp)) {
572 ret = -EFAULT;
573 goto err_copy;
574 }
575
576 mutex_lock(&file->mutex);
577 list_add_tail(&uobj->list, &file->ucontext->pd_list);
578 mutex_unlock(&file->mutex);
579
580 uobj->live = 1;
581
582 up_write(&uobj->mutex);
583
584 return in_len;
585
586 err_copy:
587 idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
588
589 err_idr:
590 ib_dealloc_pd(pd);
591
592 err:
593 put_uobj_write(uobj);
594 return ret;
595 }
596
597 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
598 struct ib_device *ib_dev,
599 const char __user *buf,
600 int in_len, int out_len)
601 {
602 struct ib_uverbs_dealloc_pd cmd;
603 struct ib_uobject *uobj;
604 struct ib_pd *pd;
605 int ret;
606
607 if (copy_from_user(&cmd, buf, sizeof cmd))
608 return -EFAULT;
609
610 uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext);
611 if (!uobj)
612 return -EINVAL;
613 pd = uobj->object;
614
615 if (atomic_read(&pd->usecnt)) {
616 ret = -EBUSY;
617 goto err_put;
618 }
619
620 ret = pd->device->dealloc_pd(uobj->object);
621 WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd");
622 if (ret)
623 goto err_put;
624
625 uobj->live = 0;
626 put_uobj_write(uobj);
627
628 idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
629
630 mutex_lock(&file->mutex);
631 list_del(&uobj->list);
632 mutex_unlock(&file->mutex);
633
634 put_uobj(uobj);
635
636 return in_len;
637
638 err_put:
639 put_uobj_write(uobj);
640 return ret;
641 }
642
643 struct xrcd_table_entry {
644 struct rb_node node;
645 struct ib_xrcd *xrcd;
646 struct inode *inode;
647 };
648
649 static int xrcd_table_insert(struct ib_uverbs_device *dev,
650 struct inode *inode,
651 struct ib_xrcd *xrcd)
652 {
653 struct xrcd_table_entry *entry, *scan;
654 struct rb_node **p = &dev->xrcd_tree.rb_node;
655 struct rb_node *parent = NULL;
656
657 entry = kmalloc(sizeof *entry, GFP_KERNEL);
658 if (!entry)
659 return -ENOMEM;
660
661 entry->xrcd = xrcd;
662 entry->inode = inode;
663
664 while (*p) {
665 parent = *p;
666 scan = rb_entry(parent, struct xrcd_table_entry, node);
667
668 if (inode < scan->inode) {
669 p = &(*p)->rb_left;
670 } else if (inode > scan->inode) {
671 p = &(*p)->rb_right;
672 } else {
673 kfree(entry);
674 return -EEXIST;
675 }
676 }
677
678 rb_link_node(&entry->node, parent, p);
679 rb_insert_color(&entry->node, &dev->xrcd_tree);
680 igrab(inode);
681 return 0;
682 }
683
684 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
685 struct inode *inode)
686 {
687 struct xrcd_table_entry *entry;
688 struct rb_node *p = dev->xrcd_tree.rb_node;
689
690 while (p) {
691 entry = rb_entry(p, struct xrcd_table_entry, node);
692
693 if (inode < entry->inode)
694 p = p->rb_left;
695 else if (inode > entry->inode)
696 p = p->rb_right;
697 else
698 return entry;
699 }
700
701 return NULL;
702 }
703
704 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode)
705 {
706 struct xrcd_table_entry *entry;
707
708 entry = xrcd_table_search(dev, inode);
709 if (!entry)
710 return NULL;
711
712 return entry->xrcd;
713 }
714
715 static void xrcd_table_delete(struct ib_uverbs_device *dev,
716 struct inode *inode)
717 {
718 struct xrcd_table_entry *entry;
719
720 entry = xrcd_table_search(dev, inode);
721 if (entry) {
722 iput(inode);
723 rb_erase(&entry->node, &dev->xrcd_tree);
724 kfree(entry);
725 }
726 }
727
728 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
729 struct ib_device *ib_dev,
730 const char __user *buf, int in_len,
731 int out_len)
732 {
733 struct ib_uverbs_open_xrcd cmd;
734 struct ib_uverbs_open_xrcd_resp resp;
735 struct ib_udata udata;
736 struct ib_uxrcd_object *obj;
737 struct ib_xrcd *xrcd = NULL;
738 struct fd f = {NULL, 0};
739 struct inode *inode = NULL;
740 int ret = 0;
741 int new_xrcd = 0;
742
743 if (out_len < sizeof resp)
744 return -ENOSPC;
745
746 if (copy_from_user(&cmd, buf, sizeof cmd))
747 return -EFAULT;
748
749 INIT_UDATA(&udata, buf + sizeof cmd,
750 (unsigned long) cmd.response + sizeof resp,
751 in_len - sizeof cmd, out_len - sizeof resp);
752
753 mutex_lock(&file->device->xrcd_tree_mutex);
754
755 if (cmd.fd != -1) {
756 /* search for file descriptor */
757 f = fdget(cmd.fd);
758 if (!f.file) {
759 ret = -EBADF;
760 goto err_tree_mutex_unlock;
761 }
762
763 inode = file_inode(f.file);
764 xrcd = find_xrcd(file->device, inode);
765 if (!xrcd && !(cmd.oflags & O_CREAT)) {
766 /* no file descriptor. Need CREATE flag */
767 ret = -EAGAIN;
768 goto err_tree_mutex_unlock;
769 }
770
771 if (xrcd && cmd.oflags & O_EXCL) {
772 ret = -EINVAL;
773 goto err_tree_mutex_unlock;
774 }
775 }
776
777 obj = kmalloc(sizeof *obj, GFP_KERNEL);
778 if (!obj) {
779 ret = -ENOMEM;
780 goto err_tree_mutex_unlock;
781 }
782
783 init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class);
784
785 down_write(&obj->uobject.mutex);
786
787 if (!xrcd) {
788 xrcd = ib_dev->alloc_xrcd(ib_dev, file->ucontext, &udata);
789 if (IS_ERR(xrcd)) {
790 ret = PTR_ERR(xrcd);
791 goto err;
792 }
793
794 xrcd->inode = inode;
795 xrcd->device = ib_dev;
796 atomic_set(&xrcd->usecnt, 0);
797 mutex_init(&xrcd->tgt_qp_mutex);
798 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
799 new_xrcd = 1;
800 }
801
802 atomic_set(&obj->refcnt, 0);
803 obj->uobject.object = xrcd;
804 ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
805 if (ret)
806 goto err_idr;
807
808 memset(&resp, 0, sizeof resp);
809 resp.xrcd_handle = obj->uobject.id;
810
811 if (inode) {
812 if (new_xrcd) {
813 /* create new inode/xrcd table entry */
814 ret = xrcd_table_insert(file->device, inode, xrcd);
815 if (ret)
816 goto err_insert_xrcd;
817 }
818 atomic_inc(&xrcd->usecnt);
819 }
820
821 if (copy_to_user((void __user *) (unsigned long) cmd.response,
822 &resp, sizeof resp)) {
823 ret = -EFAULT;
824 goto err_copy;
825 }
826
827 if (f.file)
828 fdput(f);
829
830 mutex_lock(&file->mutex);
831 list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list);
832 mutex_unlock(&file->mutex);
833
834 obj->uobject.live = 1;
835 up_write(&obj->uobject.mutex);
836
837 mutex_unlock(&file->device->xrcd_tree_mutex);
838 return in_len;
839
840 err_copy:
841 if (inode) {
842 if (new_xrcd)
843 xrcd_table_delete(file->device, inode);
844 atomic_dec(&xrcd->usecnt);
845 }
846
847 err_insert_xrcd:
848 idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
849
850 err_idr:
851 ib_dealloc_xrcd(xrcd);
852
853 err:
854 put_uobj_write(&obj->uobject);
855
856 err_tree_mutex_unlock:
857 if (f.file)
858 fdput(f);
859
860 mutex_unlock(&file->device->xrcd_tree_mutex);
861
862 return ret;
863 }
864
865 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
866 struct ib_device *ib_dev,
867 const char __user *buf, int in_len,
868 int out_len)
869 {
870 struct ib_uverbs_close_xrcd cmd;
871 struct ib_uobject *uobj;
872 struct ib_xrcd *xrcd = NULL;
873 struct inode *inode = NULL;
874 struct ib_uxrcd_object *obj;
875 int live;
876 int ret = 0;
877
878 if (copy_from_user(&cmd, buf, sizeof cmd))
879 return -EFAULT;
880
881 mutex_lock(&file->device->xrcd_tree_mutex);
882 uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext);
883 if (!uobj) {
884 ret = -EINVAL;
885 goto out;
886 }
887
888 xrcd = uobj->object;
889 inode = xrcd->inode;
890 obj = container_of(uobj, struct ib_uxrcd_object, uobject);
891 if (atomic_read(&obj->refcnt)) {
892 put_uobj_write(uobj);
893 ret = -EBUSY;
894 goto out;
895 }
896
897 if (!inode || atomic_dec_and_test(&xrcd->usecnt)) {
898 ret = ib_dealloc_xrcd(uobj->object);
899 if (!ret)
900 uobj->live = 0;
901 }
902
903 live = uobj->live;
904 if (inode && ret)
905 atomic_inc(&xrcd->usecnt);
906
907 put_uobj_write(uobj);
908
909 if (ret)
910 goto out;
911
912 if (inode && !live)
913 xrcd_table_delete(file->device, inode);
914
915 idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj);
916 mutex_lock(&file->mutex);
917 list_del(&uobj->list);
918 mutex_unlock(&file->mutex);
919
920 put_uobj(uobj);
921 ret = in_len;
922
923 out:
924 mutex_unlock(&file->device->xrcd_tree_mutex);
925 return ret;
926 }
927
928 void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
929 struct ib_xrcd *xrcd)
930 {
931 struct inode *inode;
932
933 inode = xrcd->inode;
934 if (inode && !atomic_dec_and_test(&xrcd->usecnt))
935 return;
936
937 ib_dealloc_xrcd(xrcd);
938
939 if (inode)
940 xrcd_table_delete(dev, inode);
941 }
942
943 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
944 struct ib_device *ib_dev,
945 const char __user *buf, int in_len,
946 int out_len)
947 {
948 struct ib_uverbs_reg_mr cmd;
949 struct ib_uverbs_reg_mr_resp resp;
950 struct ib_udata udata;
951 struct ib_uobject *uobj;
952 struct ib_pd *pd;
953 struct ib_mr *mr;
954 int ret;
955
956 if (out_len < sizeof resp)
957 return -ENOSPC;
958
959 if (copy_from_user(&cmd, buf, sizeof cmd))
960 return -EFAULT;
961
962 INIT_UDATA(&udata, buf + sizeof cmd,
963 (unsigned long) cmd.response + sizeof resp,
964 in_len - sizeof cmd, out_len - sizeof resp);
965
966 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
967 return -EINVAL;
968
969 ret = ib_check_mr_access(cmd.access_flags);
970 if (ret)
971 return ret;
972
973 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
974 if (!uobj)
975 return -ENOMEM;
976
977 init_uobj(uobj, 0, file->ucontext, &mr_lock_class);
978 down_write(&uobj->mutex);
979
980 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
981 if (!pd) {
982 ret = -EINVAL;
983 goto err_free;
984 }
985
986 if (cmd.access_flags & IB_ACCESS_ON_DEMAND) {
987 struct ib_device_attr attr;
988
989 ret = ib_query_device(pd->device, &attr);
990 if (ret || !(attr.device_cap_flags &
991 IB_DEVICE_ON_DEMAND_PAGING)) {
992 pr_debug("ODP support not available\n");
993 ret = -EINVAL;
994 goto err_put;
995 }
996 }
997
998 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
999 cmd.access_flags, &udata);
1000 if (IS_ERR(mr)) {
1001 ret = PTR_ERR(mr);
1002 goto err_put;
1003 }
1004
1005 mr->device = pd->device;
1006 mr->pd = pd;
1007 mr->uobject = uobj;
1008 atomic_inc(&pd->usecnt);
1009 atomic_set(&mr->usecnt, 0);
1010
1011 uobj->object = mr;
1012 ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj);
1013 if (ret)
1014 goto err_unreg;
1015
1016 memset(&resp, 0, sizeof resp);
1017 resp.lkey = mr->lkey;
1018 resp.rkey = mr->rkey;
1019 resp.mr_handle = uobj->id;
1020
1021 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1022 &resp, sizeof resp)) {
1023 ret = -EFAULT;
1024 goto err_copy;
1025 }
1026
1027 put_pd_read(pd);
1028
1029 mutex_lock(&file->mutex);
1030 list_add_tail(&uobj->list, &file->ucontext->mr_list);
1031 mutex_unlock(&file->mutex);
1032
1033 uobj->live = 1;
1034
1035 up_write(&uobj->mutex);
1036
1037 return in_len;
1038
1039 err_copy:
1040 idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
1041
1042 err_unreg:
1043 ib_dereg_mr(mr);
1044
1045 err_put:
1046 put_pd_read(pd);
1047
1048 err_free:
1049 put_uobj_write(uobj);
1050 return ret;
1051 }
1052
1053 ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
1054 struct ib_device *ib_dev,
1055 const char __user *buf, int in_len,
1056 int out_len)
1057 {
1058 struct ib_uverbs_rereg_mr cmd;
1059 struct ib_uverbs_rereg_mr_resp resp;
1060 struct ib_udata udata;
1061 struct ib_pd *pd = NULL;
1062 struct ib_mr *mr;
1063 struct ib_pd *old_pd;
1064 int ret;
1065 struct ib_uobject *uobj;
1066
1067 if (out_len < sizeof(resp))
1068 return -ENOSPC;
1069
1070 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1071 return -EFAULT;
1072
1073 INIT_UDATA(&udata, buf + sizeof(cmd),
1074 (unsigned long) cmd.response + sizeof(resp),
1075 in_len - sizeof(cmd), out_len - sizeof(resp));
1076
1077 if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags)
1078 return -EINVAL;
1079
1080 if ((cmd.flags & IB_MR_REREG_TRANS) &&
1081 (!cmd.start || !cmd.hca_va || 0 >= cmd.length ||
1082 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)))
1083 return -EINVAL;
1084
1085 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle,
1086 file->ucontext);
1087
1088 if (!uobj)
1089 return -EINVAL;
1090
1091 mr = uobj->object;
1092
1093 if (cmd.flags & IB_MR_REREG_ACCESS) {
1094 ret = ib_check_mr_access(cmd.access_flags);
1095 if (ret)
1096 goto put_uobjs;
1097 }
1098
1099 if (cmd.flags & IB_MR_REREG_PD) {
1100 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1101 if (!pd) {
1102 ret = -EINVAL;
1103 goto put_uobjs;
1104 }
1105 }
1106
1107 if (atomic_read(&mr->usecnt)) {
1108 ret = -EBUSY;
1109 goto put_uobj_pd;
1110 }
1111
1112 old_pd = mr->pd;
1113 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start,
1114 cmd.length, cmd.hca_va,
1115 cmd.access_flags, pd, &udata);
1116 if (!ret) {
1117 if (cmd.flags & IB_MR_REREG_PD) {
1118 atomic_inc(&pd->usecnt);
1119 mr->pd = pd;
1120 atomic_dec(&old_pd->usecnt);
1121 }
1122 } else {
1123 goto put_uobj_pd;
1124 }
1125
1126 memset(&resp, 0, sizeof(resp));
1127 resp.lkey = mr->lkey;
1128 resp.rkey = mr->rkey;
1129
1130 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1131 &resp, sizeof(resp)))
1132 ret = -EFAULT;
1133 else
1134 ret = in_len;
1135
1136 put_uobj_pd:
1137 if (cmd.flags & IB_MR_REREG_PD)
1138 put_pd_read(pd);
1139
1140 put_uobjs:
1141
1142 put_uobj_write(mr->uobject);
1143
1144 return ret;
1145 }
1146
1147 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
1148 struct ib_device *ib_dev,
1149 const char __user *buf, int in_len,
1150 int out_len)
1151 {
1152 struct ib_uverbs_dereg_mr cmd;
1153 struct ib_mr *mr;
1154 struct ib_uobject *uobj;
1155 int ret = -EINVAL;
1156
1157 if (copy_from_user(&cmd, buf, sizeof cmd))
1158 return -EFAULT;
1159
1160 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext);
1161 if (!uobj)
1162 return -EINVAL;
1163
1164 mr = uobj->object;
1165
1166 ret = ib_dereg_mr(mr);
1167 if (!ret)
1168 uobj->live = 0;
1169
1170 put_uobj_write(uobj);
1171
1172 if (ret)
1173 return ret;
1174
1175 idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
1176
1177 mutex_lock(&file->mutex);
1178 list_del(&uobj->list);
1179 mutex_unlock(&file->mutex);
1180
1181 put_uobj(uobj);
1182
1183 return in_len;
1184 }
1185
1186 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
1187 struct ib_device *ib_dev,
1188 const char __user *buf, int in_len,
1189 int out_len)
1190 {
1191 struct ib_uverbs_alloc_mw cmd;
1192 struct ib_uverbs_alloc_mw_resp resp;
1193 struct ib_uobject *uobj;
1194 struct ib_pd *pd;
1195 struct ib_mw *mw;
1196 int ret;
1197
1198 if (out_len < sizeof(resp))
1199 return -ENOSPC;
1200
1201 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1202 return -EFAULT;
1203
1204 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
1205 if (!uobj)
1206 return -ENOMEM;
1207
1208 init_uobj(uobj, 0, file->ucontext, &mw_lock_class);
1209 down_write(&uobj->mutex);
1210
1211 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1212 if (!pd) {
1213 ret = -EINVAL;
1214 goto err_free;
1215 }
1216
1217 mw = pd->device->alloc_mw(pd, cmd.mw_type);
1218 if (IS_ERR(mw)) {
1219 ret = PTR_ERR(mw);
1220 goto err_put;
1221 }
1222
1223 mw->device = pd->device;
1224 mw->pd = pd;
1225 mw->uobject = uobj;
1226 atomic_inc(&pd->usecnt);
1227
1228 uobj->object = mw;
1229 ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj);
1230 if (ret)
1231 goto err_unalloc;
1232
1233 memset(&resp, 0, sizeof(resp));
1234 resp.rkey = mw->rkey;
1235 resp.mw_handle = uobj->id;
1236
1237 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1238 &resp, sizeof(resp))) {
1239 ret = -EFAULT;
1240 goto err_copy;
1241 }
1242
1243 put_pd_read(pd);
1244
1245 mutex_lock(&file->mutex);
1246 list_add_tail(&uobj->list, &file->ucontext->mw_list);
1247 mutex_unlock(&file->mutex);
1248
1249 uobj->live = 1;
1250
1251 up_write(&uobj->mutex);
1252
1253 return in_len;
1254
1255 err_copy:
1256 idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1257
1258 err_unalloc:
1259 ib_dealloc_mw(mw);
1260
1261 err_put:
1262 put_pd_read(pd);
1263
1264 err_free:
1265 put_uobj_write(uobj);
1266 return ret;
1267 }
1268
1269 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file,
1270 struct ib_device *ib_dev,
1271 const char __user *buf, int in_len,
1272 int out_len)
1273 {
1274 struct ib_uverbs_dealloc_mw cmd;
1275 struct ib_mw *mw;
1276 struct ib_uobject *uobj;
1277 int ret = -EINVAL;
1278
1279 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1280 return -EFAULT;
1281
1282 uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext);
1283 if (!uobj)
1284 return -EINVAL;
1285
1286 mw = uobj->object;
1287
1288 ret = ib_dealloc_mw(mw);
1289 if (!ret)
1290 uobj->live = 0;
1291
1292 put_uobj_write(uobj);
1293
1294 if (ret)
1295 return ret;
1296
1297 idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1298
1299 mutex_lock(&file->mutex);
1300 list_del(&uobj->list);
1301 mutex_unlock(&file->mutex);
1302
1303 put_uobj(uobj);
1304
1305 return in_len;
1306 }
1307
1308 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
1309 struct ib_device *ib_dev,
1310 const char __user *buf, int in_len,
1311 int out_len)
1312 {
1313 struct ib_uverbs_create_comp_channel cmd;
1314 struct ib_uverbs_create_comp_channel_resp resp;
1315 struct file *filp;
1316 int ret;
1317
1318 if (out_len < sizeof resp)
1319 return -ENOSPC;
1320
1321 if (copy_from_user(&cmd, buf, sizeof cmd))
1322 return -EFAULT;
1323
1324 ret = get_unused_fd_flags(O_CLOEXEC);
1325 if (ret < 0)
1326 return ret;
1327 resp.fd = ret;
1328
1329 filp = ib_uverbs_alloc_event_file(file, ib_dev, 0);
1330 if (IS_ERR(filp)) {
1331 put_unused_fd(resp.fd);
1332 return PTR_ERR(filp);
1333 }
1334
1335 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1336 &resp, sizeof resp)) {
1337 put_unused_fd(resp.fd);
1338 fput(filp);
1339 return -EFAULT;
1340 }
1341
1342 fd_install(resp.fd, filp);
1343 return in_len;
1344 }
1345
1346 static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
1347 struct ib_device *ib_dev,
1348 struct ib_udata *ucore,
1349 struct ib_udata *uhw,
1350 struct ib_uverbs_ex_create_cq *cmd,
1351 size_t cmd_sz,
1352 int (*cb)(struct ib_uverbs_file *file,
1353 struct ib_ucq_object *obj,
1354 struct ib_uverbs_ex_create_cq_resp *resp,
1355 struct ib_udata *udata,
1356 void *context),
1357 void *context)
1358 {
1359 struct ib_ucq_object *obj;
1360 struct ib_uverbs_event_file *ev_file = NULL;
1361 struct ib_cq *cq;
1362 int ret;
1363 struct ib_uverbs_ex_create_cq_resp resp;
1364 struct ib_cq_init_attr attr = {};
1365
1366 if (cmd->comp_vector >= file->device->num_comp_vectors)
1367 return ERR_PTR(-EINVAL);
1368
1369 obj = kmalloc(sizeof *obj, GFP_KERNEL);
1370 if (!obj)
1371 return ERR_PTR(-ENOMEM);
1372
1373 init_uobj(&obj->uobject, cmd->user_handle, file->ucontext, &cq_lock_class);
1374 down_write(&obj->uobject.mutex);
1375
1376 if (cmd->comp_channel >= 0) {
1377 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel);
1378 if (!ev_file) {
1379 ret = -EINVAL;
1380 goto err;
1381 }
1382 }
1383
1384 obj->uverbs_file = file;
1385 obj->comp_events_reported = 0;
1386 obj->async_events_reported = 0;
1387 INIT_LIST_HEAD(&obj->comp_list);
1388 INIT_LIST_HEAD(&obj->async_list);
1389
1390 attr.cqe = cmd->cqe;
1391 attr.comp_vector = cmd->comp_vector;
1392
1393 if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags))
1394 attr.flags = cmd->flags;
1395
1396 cq = ib_dev->create_cq(ib_dev, &attr,
1397 file->ucontext, uhw);
1398 if (IS_ERR(cq)) {
1399 ret = PTR_ERR(cq);
1400 goto err_file;
1401 }
1402
1403 cq->device = ib_dev;
1404 cq->uobject = &obj->uobject;
1405 cq->comp_handler = ib_uverbs_comp_handler;
1406 cq->event_handler = ib_uverbs_cq_event_handler;
1407 cq->cq_context = ev_file;
1408 atomic_set(&cq->usecnt, 0);
1409
1410 obj->uobject.object = cq;
1411 ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject);
1412 if (ret)
1413 goto err_free;
1414
1415 memset(&resp, 0, sizeof resp);
1416 resp.base.cq_handle = obj->uobject.id;
1417 resp.base.cqe = cq->cqe;
1418
1419 resp.response_length = offsetof(typeof(resp), response_length) +
1420 sizeof(resp.response_length);
1421
1422 ret = cb(file, obj, &resp, ucore, context);
1423 if (ret)
1424 goto err_cb;
1425
1426 mutex_lock(&file->mutex);
1427 list_add_tail(&obj->uobject.list, &file->ucontext->cq_list);
1428 mutex_unlock(&file->mutex);
1429
1430 obj->uobject.live = 1;
1431
1432 up_write(&obj->uobject.mutex);
1433
1434 return obj;
1435
1436 err_cb:
1437 idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject);
1438
1439 err_free:
1440 ib_destroy_cq(cq);
1441
1442 err_file:
1443 if (ev_file)
1444 ib_uverbs_release_ucq(file, ev_file, obj);
1445
1446 err:
1447 put_uobj_write(&obj->uobject);
1448
1449 return ERR_PTR(ret);
1450 }
1451
1452 static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file,
1453 struct ib_ucq_object *obj,
1454 struct ib_uverbs_ex_create_cq_resp *resp,
1455 struct ib_udata *ucore, void *context)
1456 {
1457 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base)))
1458 return -EFAULT;
1459
1460 return 0;
1461 }
1462
1463 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
1464 struct ib_device *ib_dev,
1465 const char __user *buf, int in_len,
1466 int out_len)
1467 {
1468 struct ib_uverbs_create_cq cmd;
1469 struct ib_uverbs_ex_create_cq cmd_ex;
1470 struct ib_uverbs_create_cq_resp resp;
1471 struct ib_udata ucore;
1472 struct ib_udata uhw;
1473 struct ib_ucq_object *obj;
1474
1475 if (out_len < sizeof(resp))
1476 return -ENOSPC;
1477
1478 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1479 return -EFAULT;
1480
1481 INIT_UDATA(&ucore, buf, cmd.response, sizeof(cmd), sizeof(resp));
1482
1483 INIT_UDATA(&uhw, buf + sizeof(cmd),
1484 (unsigned long)cmd.response + sizeof(resp),
1485 in_len - sizeof(cmd), out_len - sizeof(resp));
1486
1487 memset(&cmd_ex, 0, sizeof(cmd_ex));
1488 cmd_ex.user_handle = cmd.user_handle;
1489 cmd_ex.cqe = cmd.cqe;
1490 cmd_ex.comp_vector = cmd.comp_vector;
1491 cmd_ex.comp_channel = cmd.comp_channel;
1492
1493 obj = create_cq(file, ib_dev, &ucore, &uhw, &cmd_ex,
1494 offsetof(typeof(cmd_ex), comp_channel) +
1495 sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb,
1496 NULL);
1497
1498 if (IS_ERR(obj))
1499 return PTR_ERR(obj);
1500
1501 return in_len;
1502 }
1503
1504 static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file,
1505 struct ib_ucq_object *obj,
1506 struct ib_uverbs_ex_create_cq_resp *resp,
1507 struct ib_udata *ucore, void *context)
1508 {
1509 if (ib_copy_to_udata(ucore, resp, resp->response_length))
1510 return -EFAULT;
1511
1512 return 0;
1513 }
1514
1515 int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file,
1516 struct ib_device *ib_dev,
1517 struct ib_udata *ucore,
1518 struct ib_udata *uhw)
1519 {
1520 struct ib_uverbs_ex_create_cq_resp resp;
1521 struct ib_uverbs_ex_create_cq cmd;
1522 struct ib_ucq_object *obj;
1523 int err;
1524
1525 if (ucore->inlen < sizeof(cmd))
1526 return -EINVAL;
1527
1528 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
1529 if (err)
1530 return err;
1531
1532 if (cmd.comp_mask)
1533 return -EINVAL;
1534
1535 if (cmd.reserved)
1536 return -EINVAL;
1537
1538 if (ucore->outlen < (offsetof(typeof(resp), response_length) +
1539 sizeof(resp.response_length)))
1540 return -ENOSPC;
1541
1542 obj = create_cq(file, ib_dev, ucore, uhw, &cmd,
1543 min(ucore->inlen, sizeof(cmd)),
1544 ib_uverbs_ex_create_cq_cb, NULL);
1545
1546 if (IS_ERR(obj))
1547 return PTR_ERR(obj);
1548
1549 return 0;
1550 }
1551
1552 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
1553 struct ib_device *ib_dev,
1554 const char __user *buf, int in_len,
1555 int out_len)
1556 {
1557 struct ib_uverbs_resize_cq cmd;
1558 struct ib_uverbs_resize_cq_resp resp;
1559 struct ib_udata udata;
1560 struct ib_cq *cq;
1561 int ret = -EINVAL;
1562
1563 if (copy_from_user(&cmd, buf, sizeof cmd))
1564 return -EFAULT;
1565
1566 INIT_UDATA(&udata, buf + sizeof cmd,
1567 (unsigned long) cmd.response + sizeof resp,
1568 in_len - sizeof cmd, out_len - sizeof resp);
1569
1570 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1571 if (!cq)
1572 return -EINVAL;
1573
1574 ret = cq->device->resize_cq(cq, cmd.cqe, &udata);
1575 if (ret)
1576 goto out;
1577
1578 resp.cqe = cq->cqe;
1579
1580 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1581 &resp, sizeof resp.cqe))
1582 ret = -EFAULT;
1583
1584 out:
1585 put_cq_read(cq);
1586
1587 return ret ? ret : in_len;
1588 }
1589
1590 static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
1591 {
1592 struct ib_uverbs_wc tmp;
1593
1594 tmp.wr_id = wc->wr_id;
1595 tmp.status = wc->status;
1596 tmp.opcode = wc->opcode;
1597 tmp.vendor_err = wc->vendor_err;
1598 tmp.byte_len = wc->byte_len;
1599 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data;
1600 tmp.qp_num = wc->qp->qp_num;
1601 tmp.src_qp = wc->src_qp;
1602 tmp.wc_flags = wc->wc_flags;
1603 tmp.pkey_index = wc->pkey_index;
1604 tmp.slid = wc->slid;
1605 tmp.sl = wc->sl;
1606 tmp.dlid_path_bits = wc->dlid_path_bits;
1607 tmp.port_num = wc->port_num;
1608 tmp.reserved = 0;
1609
1610 if (copy_to_user(dest, &tmp, sizeof tmp))
1611 return -EFAULT;
1612
1613 return 0;
1614 }
1615
1616 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
1617 struct ib_device *ib_dev,
1618 const char __user *buf, int in_len,
1619 int out_len)
1620 {
1621 struct ib_uverbs_poll_cq cmd;
1622 struct ib_uverbs_poll_cq_resp resp;
1623 u8 __user *header_ptr;
1624 u8 __user *data_ptr;
1625 struct ib_cq *cq;
1626 struct ib_wc wc;
1627 int ret;
1628
1629 if (copy_from_user(&cmd, buf, sizeof cmd))
1630 return -EFAULT;
1631
1632 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1633 if (!cq)
1634 return -EINVAL;
1635
1636 /* we copy a struct ib_uverbs_poll_cq_resp to user space */
1637 header_ptr = (void __user *)(unsigned long) cmd.response;
1638 data_ptr = header_ptr + sizeof resp;
1639
1640 memset(&resp, 0, sizeof resp);
1641 while (resp.count < cmd.ne) {
1642 ret = ib_poll_cq(cq, 1, &wc);
1643 if (ret < 0)
1644 goto out_put;
1645 if (!ret)
1646 break;
1647
1648 ret = copy_wc_to_user(data_ptr, &wc);
1649 if (ret)
1650 goto out_put;
1651
1652 data_ptr += sizeof(struct ib_uverbs_wc);
1653 ++resp.count;
1654 }
1655
1656 if (copy_to_user(header_ptr, &resp, sizeof resp)) {
1657 ret = -EFAULT;
1658 goto out_put;
1659 }
1660
1661 ret = in_len;
1662
1663 out_put:
1664 put_cq_read(cq);
1665 return ret;
1666 }
1667
1668 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
1669 struct ib_device *ib_dev,
1670 const char __user *buf, int in_len,
1671 int out_len)
1672 {
1673 struct ib_uverbs_req_notify_cq cmd;
1674 struct ib_cq *cq;
1675
1676 if (copy_from_user(&cmd, buf, sizeof cmd))
1677 return -EFAULT;
1678
1679 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1680 if (!cq)
1681 return -EINVAL;
1682
1683 ib_req_notify_cq(cq, cmd.solicited_only ?
1684 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
1685
1686 put_cq_read(cq);
1687
1688 return in_len;
1689 }
1690
1691 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
1692 struct ib_device *ib_dev,
1693 const char __user *buf, int in_len,
1694 int out_len)
1695 {
1696 struct ib_uverbs_destroy_cq cmd;
1697 struct ib_uverbs_destroy_cq_resp resp;
1698 struct ib_uobject *uobj;
1699 struct ib_cq *cq;
1700 struct ib_ucq_object *obj;
1701 struct ib_uverbs_event_file *ev_file;
1702 int ret = -EINVAL;
1703
1704 if (copy_from_user(&cmd, buf, sizeof cmd))
1705 return -EFAULT;
1706
1707 uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext);
1708 if (!uobj)
1709 return -EINVAL;
1710 cq = uobj->object;
1711 ev_file = cq->cq_context;
1712 obj = container_of(cq->uobject, struct ib_ucq_object, uobject);
1713
1714 ret = ib_destroy_cq(cq);
1715 if (!ret)
1716 uobj->live = 0;
1717
1718 put_uobj_write(uobj);
1719
1720 if (ret)
1721 return ret;
1722
1723 idr_remove_uobj(&ib_uverbs_cq_idr, uobj);
1724
1725 mutex_lock(&file->mutex);
1726 list_del(&uobj->list);
1727 mutex_unlock(&file->mutex);
1728
1729 ib_uverbs_release_ucq(file, ev_file, obj);
1730
1731 memset(&resp, 0, sizeof resp);
1732 resp.comp_events_reported = obj->comp_events_reported;
1733 resp.async_events_reported = obj->async_events_reported;
1734
1735 put_uobj(uobj);
1736
1737 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1738 &resp, sizeof resp))
1739 return -EFAULT;
1740
1741 return in_len;
1742 }
1743
1744 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1745 struct ib_device *ib_dev,
1746 const char __user *buf, int in_len,
1747 int out_len)
1748 {
1749 struct ib_uverbs_create_qp cmd;
1750 struct ib_uverbs_create_qp_resp resp;
1751 struct ib_udata udata;
1752 struct ib_uqp_object *obj;
1753 struct ib_device *device;
1754 struct ib_pd *pd = NULL;
1755 struct ib_xrcd *xrcd = NULL;
1756 struct ib_uobject *uninitialized_var(xrcd_uobj);
1757 struct ib_cq *scq = NULL, *rcq = NULL;
1758 struct ib_srq *srq = NULL;
1759 struct ib_qp *qp;
1760 struct ib_qp_init_attr attr;
1761 int ret;
1762
1763 if (out_len < sizeof resp)
1764 return -ENOSPC;
1765
1766 if (copy_from_user(&cmd, buf, sizeof cmd))
1767 return -EFAULT;
1768
1769 if (cmd.qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
1770 return -EPERM;
1771
1772 INIT_UDATA(&udata, buf + sizeof cmd,
1773 (unsigned long) cmd.response + sizeof resp,
1774 in_len - sizeof cmd, out_len - sizeof resp);
1775
1776 obj = kzalloc(sizeof *obj, GFP_KERNEL);
1777 if (!obj)
1778 return -ENOMEM;
1779
1780 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
1781 down_write(&obj->uevent.uobject.mutex);
1782
1783 if (cmd.qp_type == IB_QPT_XRC_TGT) {
1784 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
1785 if (!xrcd) {
1786 ret = -EINVAL;
1787 goto err_put;
1788 }
1789 device = xrcd->device;
1790 } else {
1791 if (cmd.qp_type == IB_QPT_XRC_INI) {
1792 cmd.max_recv_wr = cmd.max_recv_sge = 0;
1793 } else {
1794 if (cmd.is_srq) {
1795 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
1796 if (!srq || srq->srq_type != IB_SRQT_BASIC) {
1797 ret = -EINVAL;
1798 goto err_put;
1799 }
1800 }
1801
1802 if (cmd.recv_cq_handle != cmd.send_cq_handle) {
1803 rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext, 0);
1804 if (!rcq) {
1805 ret = -EINVAL;
1806 goto err_put;
1807 }
1808 }
1809 }
1810
1811 scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, !!rcq);
1812 rcq = rcq ?: scq;
1813 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1814 if (!pd || !scq) {
1815 ret = -EINVAL;
1816 goto err_put;
1817 }
1818
1819 device = pd->device;
1820 }
1821
1822 attr.event_handler = ib_uverbs_qp_event_handler;
1823 attr.qp_context = file;
1824 attr.send_cq = scq;
1825 attr.recv_cq = rcq;
1826 attr.srq = srq;
1827 attr.xrcd = xrcd;
1828 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
1829 attr.qp_type = cmd.qp_type;
1830 attr.create_flags = 0;
1831
1832 attr.cap.max_send_wr = cmd.max_send_wr;
1833 attr.cap.max_recv_wr = cmd.max_recv_wr;
1834 attr.cap.max_send_sge = cmd.max_send_sge;
1835 attr.cap.max_recv_sge = cmd.max_recv_sge;
1836 attr.cap.max_inline_data = cmd.max_inline_data;
1837
1838 obj->uevent.events_reported = 0;
1839 INIT_LIST_HEAD(&obj->uevent.event_list);
1840 INIT_LIST_HEAD(&obj->mcast_list);
1841
1842 if (cmd.qp_type == IB_QPT_XRC_TGT)
1843 qp = ib_create_qp(pd, &attr);
1844 else
1845 qp = device->create_qp(pd, &attr, &udata);
1846
1847 if (IS_ERR(qp)) {
1848 ret = PTR_ERR(qp);
1849 goto err_put;
1850 }
1851
1852 if (cmd.qp_type != IB_QPT_XRC_TGT) {
1853 qp->real_qp = qp;
1854 qp->device = device;
1855 qp->pd = pd;
1856 qp->send_cq = attr.send_cq;
1857 qp->recv_cq = attr.recv_cq;
1858 qp->srq = attr.srq;
1859 qp->event_handler = attr.event_handler;
1860 qp->qp_context = attr.qp_context;
1861 qp->qp_type = attr.qp_type;
1862 atomic_set(&qp->usecnt, 0);
1863 atomic_inc(&pd->usecnt);
1864 atomic_inc(&attr.send_cq->usecnt);
1865 if (attr.recv_cq)
1866 atomic_inc(&attr.recv_cq->usecnt);
1867 if (attr.srq)
1868 atomic_inc(&attr.srq->usecnt);
1869 }
1870 qp->uobject = &obj->uevent.uobject;
1871
1872 obj->uevent.uobject.object = qp;
1873 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1874 if (ret)
1875 goto err_destroy;
1876
1877 memset(&resp, 0, sizeof resp);
1878 resp.qpn = qp->qp_num;
1879 resp.qp_handle = obj->uevent.uobject.id;
1880 resp.max_recv_sge = attr.cap.max_recv_sge;
1881 resp.max_send_sge = attr.cap.max_send_sge;
1882 resp.max_recv_wr = attr.cap.max_recv_wr;
1883 resp.max_send_wr = attr.cap.max_send_wr;
1884 resp.max_inline_data = attr.cap.max_inline_data;
1885
1886 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1887 &resp, sizeof resp)) {
1888 ret = -EFAULT;
1889 goto err_copy;
1890 }
1891
1892 if (xrcd) {
1893 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
1894 uobject);
1895 atomic_inc(&obj->uxrcd->refcnt);
1896 put_xrcd_read(xrcd_uobj);
1897 }
1898
1899 if (pd)
1900 put_pd_read(pd);
1901 if (scq)
1902 put_cq_read(scq);
1903 if (rcq && rcq != scq)
1904 put_cq_read(rcq);
1905 if (srq)
1906 put_srq_read(srq);
1907
1908 mutex_lock(&file->mutex);
1909 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
1910 mutex_unlock(&file->mutex);
1911
1912 obj->uevent.uobject.live = 1;
1913
1914 up_write(&obj->uevent.uobject.mutex);
1915
1916 return in_len;
1917
1918 err_copy:
1919 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1920
1921 err_destroy:
1922 ib_destroy_qp(qp);
1923
1924 err_put:
1925 if (xrcd)
1926 put_xrcd_read(xrcd_uobj);
1927 if (pd)
1928 put_pd_read(pd);
1929 if (scq)
1930 put_cq_read(scq);
1931 if (rcq && rcq != scq)
1932 put_cq_read(rcq);
1933 if (srq)
1934 put_srq_read(srq);
1935
1936 put_uobj_write(&obj->uevent.uobject);
1937 return ret;
1938 }
1939
1940 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
1941 struct ib_device *ib_dev,
1942 const char __user *buf, int in_len, int out_len)
1943 {
1944 struct ib_uverbs_open_qp cmd;
1945 struct ib_uverbs_create_qp_resp resp;
1946 struct ib_udata udata;
1947 struct ib_uqp_object *obj;
1948 struct ib_xrcd *xrcd;
1949 struct ib_uobject *uninitialized_var(xrcd_uobj);
1950 struct ib_qp *qp;
1951 struct ib_qp_open_attr attr;
1952 int ret;
1953
1954 if (out_len < sizeof resp)
1955 return -ENOSPC;
1956
1957 if (copy_from_user(&cmd, buf, sizeof cmd))
1958 return -EFAULT;
1959
1960 INIT_UDATA(&udata, buf + sizeof cmd,
1961 (unsigned long) cmd.response + sizeof resp,
1962 in_len - sizeof cmd, out_len - sizeof resp);
1963
1964 obj = kmalloc(sizeof *obj, GFP_KERNEL);
1965 if (!obj)
1966 return -ENOMEM;
1967
1968 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
1969 down_write(&obj->uevent.uobject.mutex);
1970
1971 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
1972 if (!xrcd) {
1973 ret = -EINVAL;
1974 goto err_put;
1975 }
1976
1977 attr.event_handler = ib_uverbs_qp_event_handler;
1978 attr.qp_context = file;
1979 attr.qp_num = cmd.qpn;
1980 attr.qp_type = cmd.qp_type;
1981
1982 obj->uevent.events_reported = 0;
1983 INIT_LIST_HEAD(&obj->uevent.event_list);
1984 INIT_LIST_HEAD(&obj->mcast_list);
1985
1986 qp = ib_open_qp(xrcd, &attr);
1987 if (IS_ERR(qp)) {
1988 ret = PTR_ERR(qp);
1989 goto err_put;
1990 }
1991
1992 qp->uobject = &obj->uevent.uobject;
1993
1994 obj->uevent.uobject.object = qp;
1995 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1996 if (ret)
1997 goto err_destroy;
1998
1999 memset(&resp, 0, sizeof resp);
2000 resp.qpn = qp->qp_num;
2001 resp.qp_handle = obj->uevent.uobject.id;
2002
2003 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2004 &resp, sizeof resp)) {
2005 ret = -EFAULT;
2006 goto err_remove;
2007 }
2008
2009 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
2010 atomic_inc(&obj->uxrcd->refcnt);
2011 put_xrcd_read(xrcd_uobj);
2012
2013 mutex_lock(&file->mutex);
2014 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
2015 mutex_unlock(&file->mutex);
2016
2017 obj->uevent.uobject.live = 1;
2018
2019 up_write(&obj->uevent.uobject.mutex);
2020
2021 return in_len;
2022
2023 err_remove:
2024 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
2025
2026 err_destroy:
2027 ib_destroy_qp(qp);
2028
2029 err_put:
2030 put_xrcd_read(xrcd_uobj);
2031 put_uobj_write(&obj->uevent.uobject);
2032 return ret;
2033 }
2034
2035 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
2036 struct ib_device *ib_dev,
2037 const char __user *buf, int in_len,
2038 int out_len)
2039 {
2040 struct ib_uverbs_query_qp cmd;
2041 struct ib_uverbs_query_qp_resp resp;
2042 struct ib_qp *qp;
2043 struct ib_qp_attr *attr;
2044 struct ib_qp_init_attr *init_attr;
2045 int ret;
2046
2047 if (copy_from_user(&cmd, buf, sizeof cmd))
2048 return -EFAULT;
2049
2050 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2051 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
2052 if (!attr || !init_attr) {
2053 ret = -ENOMEM;
2054 goto out;
2055 }
2056
2057 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2058 if (!qp) {
2059 ret = -EINVAL;
2060 goto out;
2061 }
2062
2063 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
2064
2065 put_qp_read(qp);
2066
2067 if (ret)
2068 goto out;
2069
2070 memset(&resp, 0, sizeof resp);
2071
2072 resp.qp_state = attr->qp_state;
2073 resp.cur_qp_state = attr->cur_qp_state;
2074 resp.path_mtu = attr->path_mtu;
2075 resp.path_mig_state = attr->path_mig_state;
2076 resp.qkey = attr->qkey;
2077 resp.rq_psn = attr->rq_psn;
2078 resp.sq_psn = attr->sq_psn;
2079 resp.dest_qp_num = attr->dest_qp_num;
2080 resp.qp_access_flags = attr->qp_access_flags;
2081 resp.pkey_index = attr->pkey_index;
2082 resp.alt_pkey_index = attr->alt_pkey_index;
2083 resp.sq_draining = attr->sq_draining;
2084 resp.max_rd_atomic = attr->max_rd_atomic;
2085 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic;
2086 resp.min_rnr_timer = attr->min_rnr_timer;
2087 resp.port_num = attr->port_num;
2088 resp.timeout = attr->timeout;
2089 resp.retry_cnt = attr->retry_cnt;
2090 resp.rnr_retry = attr->rnr_retry;
2091 resp.alt_port_num = attr->alt_port_num;
2092 resp.alt_timeout = attr->alt_timeout;
2093
2094 memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16);
2095 resp.dest.flow_label = attr->ah_attr.grh.flow_label;
2096 resp.dest.sgid_index = attr->ah_attr.grh.sgid_index;
2097 resp.dest.hop_limit = attr->ah_attr.grh.hop_limit;
2098 resp.dest.traffic_class = attr->ah_attr.grh.traffic_class;
2099 resp.dest.dlid = attr->ah_attr.dlid;
2100 resp.dest.sl = attr->ah_attr.sl;
2101 resp.dest.src_path_bits = attr->ah_attr.src_path_bits;
2102 resp.dest.static_rate = attr->ah_attr.static_rate;
2103 resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH);
2104 resp.dest.port_num = attr->ah_attr.port_num;
2105
2106 memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16);
2107 resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label;
2108 resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index;
2109 resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit;
2110 resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class;
2111 resp.alt_dest.dlid = attr->alt_ah_attr.dlid;
2112 resp.alt_dest.sl = attr->alt_ah_attr.sl;
2113 resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits;
2114 resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate;
2115 resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH);
2116 resp.alt_dest.port_num = attr->alt_ah_attr.port_num;
2117
2118 resp.max_send_wr = init_attr->cap.max_send_wr;
2119 resp.max_recv_wr = init_attr->cap.max_recv_wr;
2120 resp.max_send_sge = init_attr->cap.max_send_sge;
2121 resp.max_recv_sge = init_attr->cap.max_recv_sge;
2122 resp.max_inline_data = init_attr->cap.max_inline_data;
2123 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
2124
2125 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2126 &resp, sizeof resp))
2127 ret = -EFAULT;
2128
2129 out:
2130 kfree(attr);
2131 kfree(init_attr);
2132
2133 return ret ? ret : in_len;
2134 }
2135
2136 /* Remove ignored fields set in the attribute mask */
2137 static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
2138 {
2139 switch (qp_type) {
2140 case IB_QPT_XRC_INI:
2141 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
2142 case IB_QPT_XRC_TGT:
2143 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
2144 IB_QP_RNR_RETRY);
2145 default:
2146 return mask;
2147 }
2148 }
2149
2150 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
2151 struct ib_device *ib_dev,
2152 const char __user *buf, int in_len,
2153 int out_len)
2154 {
2155 struct ib_uverbs_modify_qp cmd;
2156 struct ib_udata udata;
2157 struct ib_qp *qp;
2158 struct ib_qp_attr *attr;
2159 int ret;
2160
2161 if (copy_from_user(&cmd, buf, sizeof cmd))
2162 return -EFAULT;
2163
2164 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
2165 out_len);
2166
2167 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2168 if (!attr)
2169 return -ENOMEM;
2170
2171 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2172 if (!qp) {
2173 ret = -EINVAL;
2174 goto out;
2175 }
2176
2177 attr->qp_state = cmd.qp_state;
2178 attr->cur_qp_state = cmd.cur_qp_state;
2179 attr->path_mtu = cmd.path_mtu;
2180 attr->path_mig_state = cmd.path_mig_state;
2181 attr->qkey = cmd.qkey;
2182 attr->rq_psn = cmd.rq_psn;
2183 attr->sq_psn = cmd.sq_psn;
2184 attr->dest_qp_num = cmd.dest_qp_num;
2185 attr->qp_access_flags = cmd.qp_access_flags;
2186 attr->pkey_index = cmd.pkey_index;
2187 attr->alt_pkey_index = cmd.alt_pkey_index;
2188 attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
2189 attr->max_rd_atomic = cmd.max_rd_atomic;
2190 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic;
2191 attr->min_rnr_timer = cmd.min_rnr_timer;
2192 attr->port_num = cmd.port_num;
2193 attr->timeout = cmd.timeout;
2194 attr->retry_cnt = cmd.retry_cnt;
2195 attr->rnr_retry = cmd.rnr_retry;
2196 attr->alt_port_num = cmd.alt_port_num;
2197 attr->alt_timeout = cmd.alt_timeout;
2198
2199 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
2200 attr->ah_attr.grh.flow_label = cmd.dest.flow_label;
2201 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index;
2202 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit;
2203 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class;
2204 attr->ah_attr.dlid = cmd.dest.dlid;
2205 attr->ah_attr.sl = cmd.dest.sl;
2206 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits;
2207 attr->ah_attr.static_rate = cmd.dest.static_rate;
2208 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0;
2209 attr->ah_attr.port_num = cmd.dest.port_num;
2210
2211 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
2212 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label;
2213 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index;
2214 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit;
2215 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
2216 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid;
2217 attr->alt_ah_attr.sl = cmd.alt_dest.sl;
2218 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits;
2219 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate;
2220 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
2221 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
2222
2223 if (qp->real_qp == qp) {
2224 ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask);
2225 if (ret)
2226 goto release_qp;
2227 ret = qp->device->modify_qp(qp, attr,
2228 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
2229 } else {
2230 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
2231 }
2232
2233 if (ret)
2234 goto release_qp;
2235
2236 ret = in_len;
2237
2238 release_qp:
2239 put_qp_read(qp);
2240
2241 out:
2242 kfree(attr);
2243
2244 return ret;
2245 }
2246
2247 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
2248 struct ib_device *ib_dev,
2249 const char __user *buf, int in_len,
2250 int out_len)
2251 {
2252 struct ib_uverbs_destroy_qp cmd;
2253 struct ib_uverbs_destroy_qp_resp resp;
2254 struct ib_uobject *uobj;
2255 struct ib_qp *qp;
2256 struct ib_uqp_object *obj;
2257 int ret = -EINVAL;
2258
2259 if (copy_from_user(&cmd, buf, sizeof cmd))
2260 return -EFAULT;
2261
2262 memset(&resp, 0, sizeof resp);
2263
2264 uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext);
2265 if (!uobj)
2266 return -EINVAL;
2267 qp = uobj->object;
2268 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
2269
2270 if (!list_empty(&obj->mcast_list)) {
2271 put_uobj_write(uobj);
2272 return -EBUSY;
2273 }
2274
2275 ret = ib_destroy_qp(qp);
2276 if (!ret)
2277 uobj->live = 0;
2278
2279 put_uobj_write(uobj);
2280
2281 if (ret)
2282 return ret;
2283
2284 if (obj->uxrcd)
2285 atomic_dec(&obj->uxrcd->refcnt);
2286
2287 idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
2288
2289 mutex_lock(&file->mutex);
2290 list_del(&uobj->list);
2291 mutex_unlock(&file->mutex);
2292
2293 ib_uverbs_release_uevent(file, &obj->uevent);
2294
2295 resp.events_reported = obj->uevent.events_reported;
2296
2297 put_uobj(uobj);
2298
2299 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2300 &resp, sizeof resp))
2301 return -EFAULT;
2302
2303 return in_len;
2304 }
2305
2306 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2307 struct ib_device *ib_dev,
2308 const char __user *buf, int in_len,
2309 int out_len)
2310 {
2311 struct ib_uverbs_post_send cmd;
2312 struct ib_uverbs_post_send_resp resp;
2313 struct ib_uverbs_send_wr *user_wr;
2314 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr;
2315 struct ib_qp *qp;
2316 int i, sg_ind;
2317 int is_ud;
2318 ssize_t ret = -EINVAL;
2319
2320 if (copy_from_user(&cmd, buf, sizeof cmd))
2321 return -EFAULT;
2322
2323 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
2324 cmd.sge_count * sizeof (struct ib_uverbs_sge))
2325 return -EINVAL;
2326
2327 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
2328 return -EINVAL;
2329
2330 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
2331 if (!user_wr)
2332 return -ENOMEM;
2333
2334 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2335 if (!qp)
2336 goto out;
2337
2338 is_ud = qp->qp_type == IB_QPT_UD;
2339 sg_ind = 0;
2340 last = NULL;
2341 for (i = 0; i < cmd.wr_count; ++i) {
2342 if (copy_from_user(user_wr,
2343 buf + sizeof cmd + i * cmd.wqe_size,
2344 cmd.wqe_size)) {
2345 ret = -EFAULT;
2346 goto out_put;
2347 }
2348
2349 if (user_wr->num_sge + sg_ind > cmd.sge_count) {
2350 ret = -EINVAL;
2351 goto out_put;
2352 }
2353
2354 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2355 user_wr->num_sge * sizeof (struct ib_sge),
2356 GFP_KERNEL);
2357 if (!next) {
2358 ret = -ENOMEM;
2359 goto out_put;
2360 }
2361
2362 if (!last)
2363 wr = next;
2364 else
2365 last->next = next;
2366 last = next;
2367
2368 next->next = NULL;
2369 next->wr_id = user_wr->wr_id;
2370 next->num_sge = user_wr->num_sge;
2371 next->opcode = user_wr->opcode;
2372 next->send_flags = user_wr->send_flags;
2373
2374 if (is_ud) {
2375 next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
2376 file->ucontext);
2377 if (!next->wr.ud.ah) {
2378 ret = -EINVAL;
2379 goto out_put;
2380 }
2381 next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn;
2382 next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
2383 if (next->opcode == IB_WR_SEND_WITH_IMM)
2384 next->ex.imm_data =
2385 (__be32 __force) user_wr->ex.imm_data;
2386 } else {
2387 switch (next->opcode) {
2388 case IB_WR_RDMA_WRITE_WITH_IMM:
2389 next->ex.imm_data =
2390 (__be32 __force) user_wr->ex.imm_data;
2391 case IB_WR_RDMA_WRITE:
2392 case IB_WR_RDMA_READ:
2393 next->wr.rdma.remote_addr =
2394 user_wr->wr.rdma.remote_addr;
2395 next->wr.rdma.rkey =
2396 user_wr->wr.rdma.rkey;
2397 break;
2398 case IB_WR_SEND_WITH_IMM:
2399 next->ex.imm_data =
2400 (__be32 __force) user_wr->ex.imm_data;
2401 break;
2402 case IB_WR_SEND_WITH_INV:
2403 next->ex.invalidate_rkey =
2404 user_wr->ex.invalidate_rkey;
2405 break;
2406 case IB_WR_ATOMIC_CMP_AND_SWP:
2407 case IB_WR_ATOMIC_FETCH_AND_ADD:
2408 next->wr.atomic.remote_addr =
2409 user_wr->wr.atomic.remote_addr;
2410 next->wr.atomic.compare_add =
2411 user_wr->wr.atomic.compare_add;
2412 next->wr.atomic.swap = user_wr->wr.atomic.swap;
2413 next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
2414 break;
2415 default:
2416 break;
2417 }
2418 }
2419
2420 if (next->num_sge) {
2421 next->sg_list = (void *) next +
2422 ALIGN(sizeof *next, sizeof (struct ib_sge));
2423 if (copy_from_user(next->sg_list,
2424 buf + sizeof cmd +
2425 cmd.wr_count * cmd.wqe_size +
2426 sg_ind * sizeof (struct ib_sge),
2427 next->num_sge * sizeof (struct ib_sge))) {
2428 ret = -EFAULT;
2429 goto out_put;
2430 }
2431 sg_ind += next->num_sge;
2432 } else
2433 next->sg_list = NULL;
2434 }
2435
2436 resp.bad_wr = 0;
2437 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr);
2438 if (ret)
2439 for (next = wr; next; next = next->next) {
2440 ++resp.bad_wr;
2441 if (next == bad_wr)
2442 break;
2443 }
2444
2445 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2446 &resp, sizeof resp))
2447 ret = -EFAULT;
2448
2449 out_put:
2450 put_qp_read(qp);
2451
2452 while (wr) {
2453 if (is_ud && wr->wr.ud.ah)
2454 put_ah_read(wr->wr.ud.ah);
2455 next = wr->next;
2456 kfree(wr);
2457 wr = next;
2458 }
2459
2460 out:
2461 kfree(user_wr);
2462
2463 return ret ? ret : in_len;
2464 }
2465
2466 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
2467 int in_len,
2468 u32 wr_count,
2469 u32 sge_count,
2470 u32 wqe_size)
2471 {
2472 struct ib_uverbs_recv_wr *user_wr;
2473 struct ib_recv_wr *wr = NULL, *last, *next;
2474 int sg_ind;
2475 int i;
2476 int ret;
2477
2478 if (in_len < wqe_size * wr_count +
2479 sge_count * sizeof (struct ib_uverbs_sge))
2480 return ERR_PTR(-EINVAL);
2481
2482 if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
2483 return ERR_PTR(-EINVAL);
2484
2485 user_wr = kmalloc(wqe_size, GFP_KERNEL);
2486 if (!user_wr)
2487 return ERR_PTR(-ENOMEM);
2488
2489 sg_ind = 0;
2490 last = NULL;
2491 for (i = 0; i < wr_count; ++i) {
2492 if (copy_from_user(user_wr, buf + i * wqe_size,
2493 wqe_size)) {
2494 ret = -EFAULT;
2495 goto err;
2496 }
2497
2498 if (user_wr->num_sge + sg_ind > sge_count) {
2499 ret = -EINVAL;
2500 goto err;
2501 }
2502
2503 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2504 user_wr->num_sge * sizeof (struct ib_sge),
2505 GFP_KERNEL);
2506 if (!next) {
2507 ret = -ENOMEM;
2508 goto err;
2509 }
2510
2511 if (!last)
2512 wr = next;
2513 else
2514 last->next = next;
2515 last = next;
2516
2517 next->next = NULL;
2518 next->wr_id = user_wr->wr_id;
2519 next->num_sge = user_wr->num_sge;
2520
2521 if (next->num_sge) {
2522 next->sg_list = (void *) next +
2523 ALIGN(sizeof *next, sizeof (struct ib_sge));
2524 if (copy_from_user(next->sg_list,
2525 buf + wr_count * wqe_size +
2526 sg_ind * sizeof (struct ib_sge),
2527 next->num_sge * sizeof (struct ib_sge))) {
2528 ret = -EFAULT;
2529 goto err;
2530 }
2531 sg_ind += next->num_sge;
2532 } else
2533 next->sg_list = NULL;
2534 }
2535
2536 kfree(user_wr);
2537 return wr;
2538
2539 err:
2540 kfree(user_wr);
2541
2542 while (wr) {
2543 next = wr->next;
2544 kfree(wr);
2545 wr = next;
2546 }
2547
2548 return ERR_PTR(ret);
2549 }
2550
2551 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
2552 struct ib_device *ib_dev,
2553 const char __user *buf, int in_len,
2554 int out_len)
2555 {
2556 struct ib_uverbs_post_recv cmd;
2557 struct ib_uverbs_post_recv_resp resp;
2558 struct ib_recv_wr *wr, *next, *bad_wr;
2559 struct ib_qp *qp;
2560 ssize_t ret = -EINVAL;
2561
2562 if (copy_from_user(&cmd, buf, sizeof cmd))
2563 return -EFAULT;
2564
2565 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2566 in_len - sizeof cmd, cmd.wr_count,
2567 cmd.sge_count, cmd.wqe_size);
2568 if (IS_ERR(wr))
2569 return PTR_ERR(wr);
2570
2571 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2572 if (!qp)
2573 goto out;
2574
2575 resp.bad_wr = 0;
2576 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr);
2577
2578 put_qp_read(qp);
2579
2580 if (ret)
2581 for (next = wr; next; next = next->next) {
2582 ++resp.bad_wr;
2583 if (next == bad_wr)
2584 break;
2585 }
2586
2587 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2588 &resp, sizeof resp))
2589 ret = -EFAULT;
2590
2591 out:
2592 while (wr) {
2593 next = wr->next;
2594 kfree(wr);
2595 wr = next;
2596 }
2597
2598 return ret ? ret : in_len;
2599 }
2600
2601 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
2602 struct ib_device *ib_dev,
2603 const char __user *buf, int in_len,
2604 int out_len)
2605 {
2606 struct ib_uverbs_post_srq_recv cmd;
2607 struct ib_uverbs_post_srq_recv_resp resp;
2608 struct ib_recv_wr *wr, *next, *bad_wr;
2609 struct ib_srq *srq;
2610 ssize_t ret = -EINVAL;
2611
2612 if (copy_from_user(&cmd, buf, sizeof cmd))
2613 return -EFAULT;
2614
2615 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2616 in_len - sizeof cmd, cmd.wr_count,
2617 cmd.sge_count, cmd.wqe_size);
2618 if (IS_ERR(wr))
2619 return PTR_ERR(wr);
2620
2621 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
2622 if (!srq)
2623 goto out;
2624
2625 resp.bad_wr = 0;
2626 ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
2627
2628 put_srq_read(srq);
2629
2630 if (ret)
2631 for (next = wr; next; next = next->next) {
2632 ++resp.bad_wr;
2633 if (next == bad_wr)
2634 break;
2635 }
2636
2637 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2638 &resp, sizeof resp))
2639 ret = -EFAULT;
2640
2641 out:
2642 while (wr) {
2643 next = wr->next;
2644 kfree(wr);
2645 wr = next;
2646 }
2647
2648 return ret ? ret : in_len;
2649 }
2650
2651 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
2652 struct ib_device *ib_dev,
2653 const char __user *buf, int in_len,
2654 int out_len)
2655 {
2656 struct ib_uverbs_create_ah cmd;
2657 struct ib_uverbs_create_ah_resp resp;
2658 struct ib_uobject *uobj;
2659 struct ib_pd *pd;
2660 struct ib_ah *ah;
2661 struct ib_ah_attr attr;
2662 int ret;
2663
2664 if (out_len < sizeof resp)
2665 return -ENOSPC;
2666
2667 if (copy_from_user(&cmd, buf, sizeof cmd))
2668 return -EFAULT;
2669
2670 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
2671 if (!uobj)
2672 return -ENOMEM;
2673
2674 init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class);
2675 down_write(&uobj->mutex);
2676
2677 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
2678 if (!pd) {
2679 ret = -EINVAL;
2680 goto err;
2681 }
2682
2683 attr.dlid = cmd.attr.dlid;
2684 attr.sl = cmd.attr.sl;
2685 attr.src_path_bits = cmd.attr.src_path_bits;
2686 attr.static_rate = cmd.attr.static_rate;
2687 attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0;
2688 attr.port_num = cmd.attr.port_num;
2689 attr.grh.flow_label = cmd.attr.grh.flow_label;
2690 attr.grh.sgid_index = cmd.attr.grh.sgid_index;
2691 attr.grh.hop_limit = cmd.attr.grh.hop_limit;
2692 attr.grh.traffic_class = cmd.attr.grh.traffic_class;
2693 attr.vlan_id = 0;
2694 memset(&attr.dmac, 0, sizeof(attr.dmac));
2695 memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);
2696
2697 ah = ib_create_ah(pd, &attr);
2698 if (IS_ERR(ah)) {
2699 ret = PTR_ERR(ah);
2700 goto err_put;
2701 }
2702
2703 ah->uobject = uobj;
2704 uobj->object = ah;
2705
2706 ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj);
2707 if (ret)
2708 goto err_destroy;
2709
2710 resp.ah_handle = uobj->id;
2711
2712 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2713 &resp, sizeof resp)) {
2714 ret = -EFAULT;
2715 goto err_copy;
2716 }
2717
2718 put_pd_read(pd);
2719
2720 mutex_lock(&file->mutex);
2721 list_add_tail(&uobj->list, &file->ucontext->ah_list);
2722 mutex_unlock(&file->mutex);
2723
2724 uobj->live = 1;
2725
2726 up_write(&uobj->mutex);
2727
2728 return in_len;
2729
2730 err_copy:
2731 idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
2732
2733 err_destroy:
2734 ib_destroy_ah(ah);
2735
2736 err_put:
2737 put_pd_read(pd);
2738
2739 err:
2740 put_uobj_write(uobj);
2741 return ret;
2742 }
2743
2744 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
2745 struct ib_device *ib_dev,
2746 const char __user *buf, int in_len, int out_len)
2747 {
2748 struct ib_uverbs_destroy_ah cmd;
2749 struct ib_ah *ah;
2750 struct ib_uobject *uobj;
2751 int ret;
2752
2753 if (copy_from_user(&cmd, buf, sizeof cmd))
2754 return -EFAULT;
2755
2756 uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext);
2757 if (!uobj)
2758 return -EINVAL;
2759 ah = uobj->object;
2760
2761 ret = ib_destroy_ah(ah);
2762 if (!ret)
2763 uobj->live = 0;
2764
2765 put_uobj_write(uobj);
2766
2767 if (ret)
2768 return ret;
2769
2770 idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
2771
2772 mutex_lock(&file->mutex);
2773 list_del(&uobj->list);
2774 mutex_unlock(&file->mutex);
2775
2776 put_uobj(uobj);
2777
2778 return in_len;
2779 }
2780
2781 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
2782 struct ib_device *ib_dev,
2783 const char __user *buf, int in_len,
2784 int out_len)
2785 {
2786 struct ib_uverbs_attach_mcast cmd;
2787 struct ib_qp *qp;
2788 struct ib_uqp_object *obj;
2789 struct ib_uverbs_mcast_entry *mcast;
2790 int ret;
2791
2792 if (copy_from_user(&cmd, buf, sizeof cmd))
2793 return -EFAULT;
2794
2795 qp = idr_write_qp(cmd.qp_handle, file->ucontext);
2796 if (!qp)
2797 return -EINVAL;
2798
2799 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2800
2801 list_for_each_entry(mcast, &obj->mcast_list, list)
2802 if (cmd.mlid == mcast->lid &&
2803 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2804 ret = 0;
2805 goto out_put;
2806 }
2807
2808 mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
2809 if (!mcast) {
2810 ret = -ENOMEM;
2811 goto out_put;
2812 }
2813
2814 mcast->lid = cmd.mlid;
2815 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
2816
2817 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
2818 if (!ret)
2819 list_add_tail(&mcast->list, &obj->mcast_list);
2820 else
2821 kfree(mcast);
2822
2823 out_put:
2824 put_qp_write(qp);
2825
2826 return ret ? ret : in_len;
2827 }
2828
2829 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
2830 struct ib_device *ib_dev,
2831 const char __user *buf, int in_len,
2832 int out_len)
2833 {
2834 struct ib_uverbs_detach_mcast cmd;
2835 struct ib_uqp_object *obj;
2836 struct ib_qp *qp;
2837 struct ib_uverbs_mcast_entry *mcast;
2838 int ret = -EINVAL;
2839
2840 if (copy_from_user(&cmd, buf, sizeof cmd))
2841 return -EFAULT;
2842
2843 qp = idr_write_qp(cmd.qp_handle, file->ucontext);
2844 if (!qp)
2845 return -EINVAL;
2846
2847 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
2848 if (ret)
2849 goto out_put;
2850
2851 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2852
2853 list_for_each_entry(mcast, &obj->mcast_list, list)
2854 if (cmd.mlid == mcast->lid &&
2855 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2856 list_del(&mcast->list);
2857 kfree(mcast);
2858 break;
2859 }
2860
2861 out_put:
2862 put_qp_write(qp);
2863
2864 return ret ? ret : in_len;
2865 }
2866
2867 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
2868 union ib_flow_spec *ib_spec)
2869 {
2870 if (kern_spec->reserved)
2871 return -EINVAL;
2872
2873 ib_spec->type = kern_spec->type;
2874
2875 switch (ib_spec->type) {
2876 case IB_FLOW_SPEC_ETH:
2877 ib_spec->eth.size = sizeof(struct ib_flow_spec_eth);
2878 if (ib_spec->eth.size != kern_spec->eth.size)
2879 return -EINVAL;
2880 memcpy(&ib_spec->eth.val, &kern_spec->eth.val,
2881 sizeof(struct ib_flow_eth_filter));
2882 memcpy(&ib_spec->eth.mask, &kern_spec->eth.mask,
2883 sizeof(struct ib_flow_eth_filter));
2884 break;
2885 case IB_FLOW_SPEC_IPV4:
2886 ib_spec->ipv4.size = sizeof(struct ib_flow_spec_ipv4);
2887 if (ib_spec->ipv4.size != kern_spec->ipv4.size)
2888 return -EINVAL;
2889 memcpy(&ib_spec->ipv4.val, &kern_spec->ipv4.val,
2890 sizeof(struct ib_flow_ipv4_filter));
2891 memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask,
2892 sizeof(struct ib_flow_ipv4_filter));
2893 break;
2894 case IB_FLOW_SPEC_TCP:
2895 case IB_FLOW_SPEC_UDP:
2896 ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp);
2897 if (ib_spec->tcp_udp.size != kern_spec->tcp_udp.size)
2898 return -EINVAL;
2899 memcpy(&ib_spec->tcp_udp.val, &kern_spec->tcp_udp.val,
2900 sizeof(struct ib_flow_tcp_udp_filter));
2901 memcpy(&ib_spec->tcp_udp.mask, &kern_spec->tcp_udp.mask,
2902 sizeof(struct ib_flow_tcp_udp_filter));
2903 break;
2904 default:
2905 return -EINVAL;
2906 }
2907 return 0;
2908 }
2909
2910 int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
2911 struct ib_device *ib_dev,
2912 struct ib_udata *ucore,
2913 struct ib_udata *uhw)
2914 {
2915 struct ib_uverbs_create_flow cmd;
2916 struct ib_uverbs_create_flow_resp resp;
2917 struct ib_uobject *uobj;
2918 struct ib_flow *flow_id;
2919 struct ib_uverbs_flow_attr *kern_flow_attr;
2920 struct ib_flow_attr *flow_attr;
2921 struct ib_qp *qp;
2922 int err = 0;
2923 void *kern_spec;
2924 void *ib_spec;
2925 int i;
2926
2927 if (ucore->inlen < sizeof(cmd))
2928 return -EINVAL;
2929
2930 if (ucore->outlen < sizeof(resp))
2931 return -ENOSPC;
2932
2933 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
2934 if (err)
2935 return err;
2936
2937 ucore->inbuf += sizeof(cmd);
2938 ucore->inlen -= sizeof(cmd);
2939
2940 if (cmd.comp_mask)
2941 return -EINVAL;
2942
2943 if ((cmd.flow_attr.type == IB_FLOW_ATTR_SNIFFER &&
2944 !capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW))
2945 return -EPERM;
2946
2947 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
2948 return -EINVAL;
2949
2950 if (cmd.flow_attr.size > ucore->inlen ||
2951 cmd.flow_attr.size >
2952 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
2953 return -EINVAL;
2954
2955 if (cmd.flow_attr.reserved[0] ||
2956 cmd.flow_attr.reserved[1])
2957 return -EINVAL;
2958
2959 if (cmd.flow_attr.num_of_specs) {
2960 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
2961 GFP_KERNEL);
2962 if (!kern_flow_attr)
2963 return -ENOMEM;
2964
2965 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
2966 err = ib_copy_from_udata(kern_flow_attr + 1, ucore,
2967 cmd.flow_attr.size);
2968 if (err)
2969 goto err_free_attr;
2970 } else {
2971 kern_flow_attr = &cmd.flow_attr;
2972 }
2973
2974 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
2975 if (!uobj) {
2976 err = -ENOMEM;
2977 goto err_free_attr;
2978 }
2979 init_uobj(uobj, 0, file->ucontext, &rule_lock_class);
2980 down_write(&uobj->mutex);
2981
2982 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2983 if (!qp) {
2984 err = -EINVAL;
2985 goto err_uobj;
2986 }
2987
2988 flow_attr = kmalloc(sizeof(*flow_attr) + cmd.flow_attr.size, GFP_KERNEL);
2989 if (!flow_attr) {
2990 err = -ENOMEM;
2991 goto err_put;
2992 }
2993
2994 flow_attr->type = kern_flow_attr->type;
2995 flow_attr->priority = kern_flow_attr->priority;
2996 flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
2997 flow_attr->port = kern_flow_attr->port;
2998 flow_attr->flags = kern_flow_attr->flags;
2999 flow_attr->size = sizeof(*flow_attr);
3000
3001 kern_spec = kern_flow_attr + 1;
3002 ib_spec = flow_attr + 1;
3003 for (i = 0; i < flow_attr->num_of_specs &&
3004 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) &&
3005 cmd.flow_attr.size >=
3006 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) {
3007 err = kern_spec_to_ib_spec(kern_spec, ib_spec);
3008 if (err)
3009 goto err_free;
3010 flow_attr->size +=
3011 ((union ib_flow_spec *) ib_spec)->size;
3012 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size;
3013 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size;
3014 ib_spec += ((union ib_flow_spec *) ib_spec)->size;
3015 }
3016 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
3017 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
3018 i, cmd.flow_attr.size);
3019 err = -EINVAL;
3020 goto err_free;
3021 }
3022 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
3023 if (IS_ERR(flow_id)) {
3024 err = PTR_ERR(flow_id);
3025 goto err_free;
3026 }
3027 flow_id->qp = qp;
3028 flow_id->uobject = uobj;
3029 uobj->object = flow_id;
3030
3031 err = idr_add_uobj(&ib_uverbs_rule_idr, uobj);
3032 if (err)
3033 goto destroy_flow;
3034
3035 memset(&resp, 0, sizeof(resp));
3036 resp.flow_handle = uobj->id;
3037
3038 err = ib_copy_to_udata(ucore,
3039 &resp, sizeof(resp));
3040 if (err)
3041 goto err_copy;
3042
3043 put_qp_read(qp);
3044 mutex_lock(&file->mutex);
3045 list_add_tail(&uobj->list, &file->ucontext->rule_list);
3046 mutex_unlock(&file->mutex);
3047
3048 uobj->live = 1;
3049
3050 up_write(&uobj->mutex);
3051 kfree(flow_attr);
3052 if (cmd.flow_attr.num_of_specs)
3053 kfree(kern_flow_attr);
3054 return 0;
3055 err_copy:
3056 idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
3057 destroy_flow:
3058 ib_destroy_flow(flow_id);
3059 err_free:
3060 kfree(flow_attr);
3061 err_put:
3062 put_qp_read(qp);
3063 err_uobj:
3064 put_uobj_write(uobj);
3065 err_free_attr:
3066 if (cmd.flow_attr.num_of_specs)
3067 kfree(kern_flow_attr);
3068 return err;
3069 }
3070
3071 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
3072 struct ib_device *ib_dev,
3073 struct ib_udata *ucore,
3074 struct ib_udata *uhw)
3075 {
3076 struct ib_uverbs_destroy_flow cmd;
3077 struct ib_flow *flow_id;
3078 struct ib_uobject *uobj;
3079 int ret;
3080
3081 if (ucore->inlen < sizeof(cmd))
3082 return -EINVAL;
3083
3084 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3085 if (ret)
3086 return ret;
3087
3088 if (cmd.comp_mask)
3089 return -EINVAL;
3090
3091 uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle,
3092 file->ucontext);
3093 if (!uobj)
3094 return -EINVAL;
3095 flow_id = uobj->object;
3096
3097 ret = ib_destroy_flow(flow_id);
3098 if (!ret)
3099 uobj->live = 0;
3100
3101 put_uobj_write(uobj);
3102
3103 idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
3104
3105 mutex_lock(&file->mutex);
3106 list_del(&uobj->list);
3107 mutex_unlock(&file->mutex);
3108
3109 put_uobj(uobj);
3110
3111 return ret;
3112 }
3113
3114 static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
3115 struct ib_device *ib_dev,
3116 struct ib_uverbs_create_xsrq *cmd,
3117 struct ib_udata *udata)
3118 {
3119 struct ib_uverbs_create_srq_resp resp;
3120 struct ib_usrq_object *obj;
3121 struct ib_pd *pd;
3122 struct ib_srq *srq;
3123 struct ib_uobject *uninitialized_var(xrcd_uobj);
3124 struct ib_srq_init_attr attr;
3125 int ret;
3126
3127 obj = kmalloc(sizeof *obj, GFP_KERNEL);
3128 if (!obj)
3129 return -ENOMEM;
3130
3131 init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class);
3132 down_write(&obj->uevent.uobject.mutex);
3133
3134 if (cmd->srq_type == IB_SRQT_XRC) {
3135 attr.ext.xrc.xrcd = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj);
3136 if (!attr.ext.xrc.xrcd) {
3137 ret = -EINVAL;
3138 goto err;
3139 }
3140
3141 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
3142 atomic_inc(&obj->uxrcd->refcnt);
3143
3144 attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0);
3145 if (!attr.ext.xrc.cq) {
3146 ret = -EINVAL;
3147 goto err_put_xrcd;
3148 }
3149 }
3150
3151 pd = idr_read_pd(cmd->pd_handle, file->ucontext);
3152 if (!pd) {
3153 ret = -EINVAL;
3154 goto err_put_cq;
3155 }
3156
3157 attr.event_handler = ib_uverbs_srq_event_handler;
3158 attr.srq_context = file;
3159 attr.srq_type = cmd->srq_type;
3160 attr.attr.max_wr = cmd->max_wr;
3161 attr.attr.max_sge = cmd->max_sge;
3162 attr.attr.srq_limit = cmd->srq_limit;
3163
3164 obj->uevent.events_reported = 0;
3165 INIT_LIST_HEAD(&obj->uevent.event_list);
3166
3167 srq = pd->device->create_srq(pd, &attr, udata);
3168 if (IS_ERR(srq)) {
3169 ret = PTR_ERR(srq);
3170 goto err_put;
3171 }
3172
3173 srq->device = pd->device;
3174 srq->pd = pd;
3175 srq->srq_type = cmd->srq_type;
3176 srq->uobject = &obj->uevent.uobject;
3177 srq->event_handler = attr.event_handler;
3178 srq->srq_context = attr.srq_context;
3179
3180 if (cmd->srq_type == IB_SRQT_XRC) {
3181 srq->ext.xrc.cq = attr.ext.xrc.cq;
3182 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
3183 atomic_inc(&attr.ext.xrc.cq->usecnt);
3184 atomic_inc(&attr.ext.xrc.xrcd->usecnt);
3185 }
3186
3187 atomic_inc(&pd->usecnt);
3188 atomic_set(&srq->usecnt, 0);
3189
3190 obj->uevent.uobject.object = srq;
3191 ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
3192 if (ret)
3193 goto err_destroy;
3194
3195 memset(&resp, 0, sizeof resp);
3196 resp.srq_handle = obj->uevent.uobject.id;
3197 resp.max_wr = attr.attr.max_wr;
3198 resp.max_sge = attr.attr.max_sge;
3199 if (cmd->srq_type == IB_SRQT_XRC)
3200 resp.srqn = srq->ext.xrc.srq_num;
3201
3202 if (copy_to_user((void __user *) (unsigned long) cmd->response,
3203 &resp, sizeof resp)) {
3204 ret = -EFAULT;
3205 goto err_copy;
3206 }
3207
3208 if (cmd->srq_type == IB_SRQT_XRC) {
3209 put_uobj_read(xrcd_uobj);
3210 put_cq_read(attr.ext.xrc.cq);
3211 }
3212 put_pd_read(pd);
3213
3214 mutex_lock(&file->mutex);
3215 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list);
3216 mutex_unlock(&file->mutex);
3217
3218 obj->uevent.uobject.live = 1;
3219
3220 up_write(&obj->uevent.uobject.mutex);
3221
3222 return 0;
3223
3224 err_copy:
3225 idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
3226
3227 err_destroy:
3228 ib_destroy_srq(srq);
3229
3230 err_put:
3231 put_pd_read(pd);
3232
3233 err_put_cq:
3234 if (cmd->srq_type == IB_SRQT_XRC)
3235 put_cq_read(attr.ext.xrc.cq);
3236
3237 err_put_xrcd:
3238 if (cmd->srq_type == IB_SRQT_XRC) {
3239 atomic_dec(&obj->uxrcd->refcnt);
3240 put_uobj_read(xrcd_uobj);
3241 }
3242
3243 err:
3244 put_uobj_write(&obj->uevent.uobject);
3245 return ret;
3246 }
3247
3248 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
3249 struct ib_device *ib_dev,
3250 const char __user *buf, int in_len,
3251 int out_len)
3252 {
3253 struct ib_uverbs_create_srq cmd;
3254 struct ib_uverbs_create_xsrq xcmd;
3255 struct ib_uverbs_create_srq_resp resp;
3256 struct ib_udata udata;
3257 int ret;
3258
3259 if (out_len < sizeof resp)
3260 return -ENOSPC;
3261
3262 if (copy_from_user(&cmd, buf, sizeof cmd))
3263 return -EFAULT;
3264
3265 xcmd.response = cmd.response;
3266 xcmd.user_handle = cmd.user_handle;
3267 xcmd.srq_type = IB_SRQT_BASIC;
3268 xcmd.pd_handle = cmd.pd_handle;
3269 xcmd.max_wr = cmd.max_wr;
3270 xcmd.max_sge = cmd.max_sge;
3271 xcmd.srq_limit = cmd.srq_limit;
3272
3273 INIT_UDATA(&udata, buf + sizeof cmd,
3274 (unsigned long) cmd.response + sizeof resp,
3275 in_len - sizeof cmd, out_len - sizeof resp);
3276
3277 ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata);
3278 if (ret)
3279 return ret;
3280
3281 return in_len;
3282 }
3283
3284 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
3285 struct ib_device *ib_dev,
3286 const char __user *buf, int in_len, int out_len)
3287 {
3288 struct ib_uverbs_create_xsrq cmd;
3289 struct ib_uverbs_create_srq_resp resp;
3290 struct ib_udata udata;
3291 int ret;
3292
3293 if (out_len < sizeof resp)
3294 return -ENOSPC;
3295
3296 if (copy_from_user(&cmd, buf, sizeof cmd))
3297 return -EFAULT;
3298
3299 INIT_UDATA(&udata, buf + sizeof cmd,
3300 (unsigned long) cmd.response + sizeof resp,
3301 in_len - sizeof cmd, out_len - sizeof resp);
3302
3303 ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata);
3304 if (ret)
3305 return ret;
3306
3307 return in_len;
3308 }
3309
3310 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
3311 struct ib_device *ib_dev,
3312 const char __user *buf, int in_len,
3313 int out_len)
3314 {
3315 struct ib_uverbs_modify_srq cmd;
3316 struct ib_udata udata;
3317 struct ib_srq *srq;
3318 struct ib_srq_attr attr;
3319 int ret;
3320
3321 if (copy_from_user(&cmd, buf, sizeof cmd))
3322 return -EFAULT;
3323
3324 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
3325 out_len);
3326
3327 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
3328 if (!srq)
3329 return -EINVAL;
3330
3331 attr.max_wr = cmd.max_wr;
3332 attr.srq_limit = cmd.srq_limit;
3333
3334 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
3335
3336 put_srq_read(srq);
3337
3338 return ret ? ret : in_len;
3339 }
3340
3341 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
3342 struct ib_device *ib_dev,
3343 const char __user *buf,
3344 int in_len, int out_len)
3345 {
3346 struct ib_uverbs_query_srq cmd;
3347 struct ib_uverbs_query_srq_resp resp;
3348 struct ib_srq_attr attr;
3349 struct ib_srq *srq;
3350 int ret;
3351
3352 if (out_len < sizeof resp)
3353 return -ENOSPC;
3354
3355 if (copy_from_user(&cmd, buf, sizeof cmd))
3356 return -EFAULT;
3357
3358 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
3359 if (!srq)
3360 return -EINVAL;
3361
3362 ret = ib_query_srq(srq, &attr);
3363
3364 put_srq_read(srq);
3365
3366 if (ret)
3367 return ret;
3368
3369 memset(&resp, 0, sizeof resp);
3370
3371 resp.max_wr = attr.max_wr;
3372 resp.max_sge = attr.max_sge;
3373 resp.srq_limit = attr.srq_limit;
3374
3375 if (copy_to_user((void __user *) (unsigned long) cmd.response,
3376 &resp, sizeof resp))
3377 return -EFAULT;
3378
3379 return in_len;
3380 }
3381
3382 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
3383 struct ib_device *ib_dev,
3384 const char __user *buf, int in_len,
3385 int out_len)
3386 {
3387 struct ib_uverbs_destroy_srq cmd;
3388 struct ib_uverbs_destroy_srq_resp resp;
3389 struct ib_uobject *uobj;
3390 struct ib_srq *srq;
3391 struct ib_uevent_object *obj;
3392 int ret = -EINVAL;
3393 struct ib_usrq_object *us;
3394 enum ib_srq_type srq_type;
3395
3396 if (copy_from_user(&cmd, buf, sizeof cmd))
3397 return -EFAULT;
3398
3399 uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext);
3400 if (!uobj)
3401 return -EINVAL;
3402 srq = uobj->object;
3403 obj = container_of(uobj, struct ib_uevent_object, uobject);
3404 srq_type = srq->srq_type;
3405
3406 ret = ib_destroy_srq(srq);
3407 if (!ret)
3408 uobj->live = 0;
3409
3410 put_uobj_write(uobj);
3411
3412 if (ret)
3413 return ret;
3414
3415 if (srq_type == IB_SRQT_XRC) {
3416 us = container_of(obj, struct ib_usrq_object, uevent);
3417 atomic_dec(&us->uxrcd->refcnt);
3418 }
3419
3420 idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
3421
3422 mutex_lock(&file->mutex);
3423 list_del(&uobj->list);
3424 mutex_unlock(&file->mutex);
3425
3426 ib_uverbs_release_uevent(file, obj);
3427
3428 memset(&resp, 0, sizeof resp);
3429 resp.events_reported = obj->events_reported;
3430
3431 put_uobj(uobj);
3432
3433 if (copy_to_user((void __user *) (unsigned long) cmd.response,
3434 &resp, sizeof resp))
3435 ret = -EFAULT;
3436
3437 return ret ? ret : in_len;
3438 }
3439
3440 int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
3441 struct ib_device *ib_dev,
3442 struct ib_udata *ucore,
3443 struct ib_udata *uhw)
3444 {
3445 struct ib_uverbs_ex_query_device_resp resp;
3446 struct ib_uverbs_ex_query_device cmd;
3447 struct ib_device_attr attr;
3448 int err;
3449
3450 if (ucore->inlen < sizeof(cmd))
3451 return -EINVAL;
3452
3453 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3454 if (err)
3455 return err;
3456
3457 if (cmd.comp_mask)
3458 return -EINVAL;
3459
3460 if (cmd.reserved)
3461 return -EINVAL;
3462
3463 resp.response_length = offsetof(typeof(resp), odp_caps);
3464
3465 if (ucore->outlen < resp.response_length)
3466 return -ENOSPC;
3467
3468 memset(&attr, 0, sizeof(attr));
3469
3470 err = ib_dev->query_device(ib_dev, &attr, uhw);
3471 if (err)
3472 return err;
3473
3474 copy_query_dev_fields(file, ib_dev, &resp.base, &attr);
3475 resp.comp_mask = 0;
3476
3477 if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
3478 goto end;
3479
3480 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3481 resp.odp_caps.general_caps = attr.odp_caps.general_caps;
3482 resp.odp_caps.per_transport_caps.rc_odp_caps =
3483 attr.odp_caps.per_transport_caps.rc_odp_caps;
3484 resp.odp_caps.per_transport_caps.uc_odp_caps =
3485 attr.odp_caps.per_transport_caps.uc_odp_caps;
3486 resp.odp_caps.per_transport_caps.ud_odp_caps =
3487 attr.odp_caps.per_transport_caps.ud_odp_caps;
3488 resp.odp_caps.reserved = 0;
3489 #else
3490 memset(&resp.odp_caps, 0, sizeof(resp.odp_caps));
3491 #endif
3492 resp.response_length += sizeof(resp.odp_caps);
3493
3494 if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask))
3495 goto end;
3496
3497 resp.timestamp_mask = attr.timestamp_mask;
3498 resp.response_length += sizeof(resp.timestamp_mask);
3499
3500 if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock))
3501 goto end;
3502
3503 resp.hca_core_clock = attr.hca_core_clock;
3504 resp.response_length += sizeof(resp.hca_core_clock);
3505
3506 end:
3507 err = ib_copy_to_udata(ucore, &resp, resp.response_length);
3508 if (err)
3509 return err;
3510
3511 return 0;
3512 }
This page took 0.163576 seconds and 6 git commands to generate.