IB/cma: Accept connection without a valid netdev on RoCE
[deliverable/linux.git] / drivers / infiniband / core / uverbs_cmd.c
CommitLineData
bc38a6ab
RD
1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
f7c6a7b5 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
eb9d3cd5 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
8bdb0e86 5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved.
bc38a6ab
RD
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
bc38a6ab
RD
34 */
35
6b73597e 36#include <linux/file.h>
70a30e16 37#include <linux/fs.h>
5a0e3ad6 38#include <linux/slab.h>
8ada2c1c 39#include <linux/sched.h>
6b73597e 40
bc38a6ab
RD
41#include <asm/uaccess.h>
42
43#include "uverbs.h"
ed4c54e5 44#include "core_priv.h"
bc38a6ab 45
3bea57a5
RD
46struct uverbs_lock_class {
47 struct lock_class_key key;
48 char name[16];
49};
50
51static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" };
52static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" };
6b52a12b 53static struct uverbs_lock_class mw_lock_class = { .name = "MW-uobj" };
3bea57a5
RD
54static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" };
55static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" };
56static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" };
57static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" };
58static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
436f2ad0 59static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
bc38a6ab 60
9ead190b
RD
61/*
62 * The ib_uobject locking scheme is as follows:
63 *
64 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it
65 * needs to be held during all idr operations. When an object is
66 * looked up, a reference must be taken on the object's kref before
67 * dropping this lock.
68 *
69 * - Each object also has an rwsem. This rwsem must be held for
70 * reading while an operation that uses the object is performed.
71 * For example, while registering an MR, the associated PD's
72 * uobject.mutex must be held for reading. The rwsem must be held
73 * for writing while initializing or destroying an object.
74 *
75 * - In addition, each object has a "live" flag. If this flag is not
76 * set, then lookups of the object will fail even if it is found in
77 * the idr. This handles a reader that blocks and does not acquire
78 * the rwsem until after the object is destroyed. The destroy
79 * operation will set the live flag to 0 and then drop the rwsem;
80 * this will allow the reader to acquire the rwsem, see that the
81 * live flag is 0, and then drop the rwsem and its reference to
82 * object. The underlying storage will not be freed until the last
83 * reference to the object is dropped.
84 */
85
86static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
3bea57a5 87 struct ib_ucontext *context, struct uverbs_lock_class *c)
9ead190b
RD
88{
89 uobj->user_handle = user_handle;
90 uobj->context = context;
91 kref_init(&uobj->ref);
92 init_rwsem(&uobj->mutex);
3bea57a5 93 lockdep_set_class_and_name(&uobj->mutex, &c->key, c->name);
9ead190b
RD
94 uobj->live = 0;
95}
96
97static void release_uobj(struct kref *kref)
98{
99 kfree(container_of(kref, struct ib_uobject, ref));
100}
101
102static void put_uobj(struct ib_uobject *uobj)
103{
104 kref_put(&uobj->ref, release_uobj);
105}
106
107static void put_uobj_read(struct ib_uobject *uobj)
108{
109 up_read(&uobj->mutex);
110 put_uobj(uobj);
111}
112
113static void put_uobj_write(struct ib_uobject *uobj)
114{
115 up_write(&uobj->mutex);
116 put_uobj(uobj);
117}
118
119static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj)
3463175d
RD
120{
121 int ret;
122
3b069c5d 123 idr_preload(GFP_KERNEL);
9ead190b 124 spin_lock(&ib_uverbs_idr_lock);
3463175d 125
3b069c5d
TH
126 ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT);
127 if (ret >= 0)
128 uobj->id = ret;
129
130 spin_unlock(&ib_uverbs_idr_lock);
131 idr_preload_end();
3463175d 132
3b069c5d 133 return ret < 0 ? ret : 0;
3463175d
RD
134}
135
9ead190b
RD
136void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj)
137{
138 spin_lock(&ib_uverbs_idr_lock);
139 idr_remove(idr, uobj->id);
140 spin_unlock(&ib_uverbs_idr_lock);
141}
142
143static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
144 struct ib_ucontext *context)
145{
146 struct ib_uobject *uobj;
147
148 spin_lock(&ib_uverbs_idr_lock);
149 uobj = idr_find(idr, id);
cbfb50e6
RD
150 if (uobj) {
151 if (uobj->context == context)
152 kref_get(&uobj->ref);
153 else
154 uobj = NULL;
155 }
9ead190b
RD
156 spin_unlock(&ib_uverbs_idr_lock);
157
158 return uobj;
159}
160
161static struct ib_uobject *idr_read_uobj(struct idr *idr, int id,
1ccf6aa1 162 struct ib_ucontext *context, int nested)
9ead190b
RD
163{
164 struct ib_uobject *uobj;
165
166 uobj = __idr_get_uobj(idr, id, context);
167 if (!uobj)
168 return NULL;
169
1ccf6aa1
RD
170 if (nested)
171 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING);
172 else
173 down_read(&uobj->mutex);
9ead190b
RD
174 if (!uobj->live) {
175 put_uobj_read(uobj);
176 return NULL;
177 }
178
179 return uobj;
180}
181
182static struct ib_uobject *idr_write_uobj(struct idr *idr, int id,
183 struct ib_ucontext *context)
184{
185 struct ib_uobject *uobj;
186
187 uobj = __idr_get_uobj(idr, id, context);
188 if (!uobj)
189 return NULL;
190
191 down_write(&uobj->mutex);
192 if (!uobj->live) {
193 put_uobj_write(uobj);
194 return NULL;
195 }
196
197 return uobj;
198}
199
1ccf6aa1
RD
200static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context,
201 int nested)
9ead190b
RD
202{
203 struct ib_uobject *uobj;
204
1ccf6aa1 205 uobj = idr_read_uobj(idr, id, context, nested);
9ead190b
RD
206 return uobj ? uobj->object : NULL;
207}
208
209static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context)
210{
1ccf6aa1 211 return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0);
9ead190b
RD
212}
213
214static void put_pd_read(struct ib_pd *pd)
215{
216 put_uobj_read(pd->uobject);
217}
218
1ccf6aa1 219static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested)
9ead190b 220{
1ccf6aa1 221 return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested);
9ead190b
RD
222}
223
224static void put_cq_read(struct ib_cq *cq)
225{
226 put_uobj_read(cq->uobject);
227}
228
229static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context)
230{
1ccf6aa1 231 return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0);
9ead190b
RD
232}
233
234static void put_ah_read(struct ib_ah *ah)
235{
236 put_uobj_read(ah->uobject);
237}
238
239static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
240{
1ccf6aa1 241 return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
9ead190b
RD
242}
243
e214a0fe
EC
244static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context)
245{
246 struct ib_uobject *uobj;
247
248 uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context);
249 return uobj ? uobj->object : NULL;
250}
251
9ead190b
RD
252static void put_qp_read(struct ib_qp *qp)
253{
254 put_uobj_read(qp->uobject);
255}
256
e214a0fe
EC
257static void put_qp_write(struct ib_qp *qp)
258{
259 put_uobj_write(qp->uobject);
260}
261
9ead190b
RD
262static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context)
263{
1ccf6aa1 264 return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0);
9ead190b
RD
265}
266
267static void put_srq_read(struct ib_srq *srq)
268{
269 put_uobj_read(srq->uobject);
270}
271
53d0bd1e
SH
272static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context,
273 struct ib_uobject **uobj)
274{
275 *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0);
276 return *uobj ? (*uobj)->object : NULL;
277}
278
279static void put_xrcd_read(struct ib_uobject *uobj)
280{
281 put_uobj_read(uobj);
282}
283
bc38a6ab 284ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
057aec0d 285 struct ib_device *ib_dev,
bc38a6ab
RD
286 const char __user *buf,
287 int in_len, int out_len)
288{
289 struct ib_uverbs_get_context cmd;
290 struct ib_uverbs_get_context_resp resp;
291 struct ib_udata udata;
882214e2
HE
292#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
293 struct ib_device_attr dev_attr;
294#endif
63c47c28 295 struct ib_ucontext *ucontext;
6b73597e 296 struct file *filp;
63c47c28 297 int ret;
bc38a6ab
RD
298
299 if (out_len < sizeof resp)
300 return -ENOSPC;
301
302 if (copy_from_user(&cmd, buf, sizeof cmd))
303 return -EFAULT;
304
95ed644f 305 mutex_lock(&file->mutex);
63c47c28
RD
306
307 if (file->ucontext) {
308 ret = -EINVAL;
309 goto err;
310 }
311
bc38a6ab
RD
312 INIT_UDATA(&udata, buf + sizeof cmd,
313 (unsigned long) cmd.response + sizeof resp,
314 in_len - sizeof cmd, out_len - sizeof resp);
315
057aec0d 316 ucontext = ib_dev->alloc_ucontext(ib_dev, &udata);
77f76013 317 if (IS_ERR(ucontext)) {
df42245a 318 ret = PTR_ERR(ucontext);
77f76013
GC
319 goto err;
320 }
bc38a6ab 321
057aec0d 322 ucontext->device = ib_dev;
63c47c28
RD
323 INIT_LIST_HEAD(&ucontext->pd_list);
324 INIT_LIST_HEAD(&ucontext->mr_list);
325 INIT_LIST_HEAD(&ucontext->mw_list);
326 INIT_LIST_HEAD(&ucontext->cq_list);
327 INIT_LIST_HEAD(&ucontext->qp_list);
328 INIT_LIST_HEAD(&ucontext->srq_list);
329 INIT_LIST_HEAD(&ucontext->ah_list);
53d0bd1e 330 INIT_LIST_HEAD(&ucontext->xrcd_list);
436f2ad0 331 INIT_LIST_HEAD(&ucontext->rule_list);
8ada2c1c
SR
332 rcu_read_lock();
333 ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
334 rcu_read_unlock();
f7c6a7b5 335 ucontext->closing = 0;
bc38a6ab 336
882214e2
HE
337#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
338 ucontext->umem_tree = RB_ROOT;
339 init_rwsem(&ucontext->umem_rwsem);
340 ucontext->odp_mrs_count = 0;
341 INIT_LIST_HEAD(&ucontext->no_private_counters);
342
057aec0d 343 ret = ib_query_device(ib_dev, &dev_attr);
882214e2
HE
344 if (ret)
345 goto err_free;
346 if (!(dev_attr.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
347 ucontext->invalidate_range = NULL;
348
349#endif
350
6b73597e
RD
351 resp.num_comp_vectors = file->device->num_comp_vectors;
352
da183c7a 353 ret = get_unused_fd_flags(O_CLOEXEC);
b1e4594b
AV
354 if (ret < 0)
355 goto err_free;
356 resp.async_fd = ret;
357
057aec0d 358 filp = ib_uverbs_alloc_event_file(file, ib_dev, 1);
6b73597e
RD
359 if (IS_ERR(filp)) {
360 ret = PTR_ERR(filp);
b1e4594b 361 goto err_fd;
6b73597e 362 }
bc38a6ab
RD
363
364 if (copy_to_user((void __user *) (unsigned long) cmd.response,
63c47c28
RD
365 &resp, sizeof resp)) {
366 ret = -EFAULT;
6b73597e 367 goto err_file;
63c47c28
RD
368 }
369
70a30e16 370 file->ucontext = ucontext;
6b73597e
RD
371
372 fd_install(resp.async_fd, filp);
373
95ed644f 374 mutex_unlock(&file->mutex);
bc38a6ab
RD
375
376 return in_len;
377
6b73597e 378err_file:
03c40442 379 ib_uverbs_free_async_event_file(file);
6b73597e
RD
380 fput(filp);
381
b1e4594b
AV
382err_fd:
383 put_unused_fd(resp.async_fd);
384
63c47c28 385err_free:
8ada2c1c 386 put_pid(ucontext->tgid);
057aec0d 387 ib_dev->dealloc_ucontext(ucontext);
bc38a6ab 388
63c47c28 389err:
95ed644f 390 mutex_unlock(&file->mutex);
63c47c28 391 return ret;
bc38a6ab
RD
392}
393
02d1aa7a 394static void copy_query_dev_fields(struct ib_uverbs_file *file,
057aec0d 395 struct ib_device *ib_dev,
02d1aa7a
EC
396 struct ib_uverbs_query_device_resp *resp,
397 struct ib_device_attr *attr)
398{
399 resp->fw_ver = attr->fw_ver;
057aec0d 400 resp->node_guid = ib_dev->node_guid;
02d1aa7a
EC
401 resp->sys_image_guid = attr->sys_image_guid;
402 resp->max_mr_size = attr->max_mr_size;
403 resp->page_size_cap = attr->page_size_cap;
404 resp->vendor_id = attr->vendor_id;
405 resp->vendor_part_id = attr->vendor_part_id;
406 resp->hw_ver = attr->hw_ver;
407 resp->max_qp = attr->max_qp;
408 resp->max_qp_wr = attr->max_qp_wr;
409 resp->device_cap_flags = attr->device_cap_flags;
410 resp->max_sge = attr->max_sge;
411 resp->max_sge_rd = attr->max_sge_rd;
412 resp->max_cq = attr->max_cq;
413 resp->max_cqe = attr->max_cqe;
414 resp->max_mr = attr->max_mr;
415 resp->max_pd = attr->max_pd;
416 resp->max_qp_rd_atom = attr->max_qp_rd_atom;
417 resp->max_ee_rd_atom = attr->max_ee_rd_atom;
418 resp->max_res_rd_atom = attr->max_res_rd_atom;
419 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom;
420 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom;
421 resp->atomic_cap = attr->atomic_cap;
422 resp->max_ee = attr->max_ee;
423 resp->max_rdd = attr->max_rdd;
424 resp->max_mw = attr->max_mw;
425 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp;
426 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp;
427 resp->max_mcast_grp = attr->max_mcast_grp;
428 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach;
429 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
430 resp->max_ah = attr->max_ah;
431 resp->max_fmr = attr->max_fmr;
432 resp->max_map_per_fmr = attr->max_map_per_fmr;
433 resp->max_srq = attr->max_srq;
434 resp->max_srq_wr = attr->max_srq_wr;
435 resp->max_srq_sge = attr->max_srq_sge;
436 resp->max_pkeys = attr->max_pkeys;
437 resp->local_ca_ack_delay = attr->local_ca_ack_delay;
057aec0d 438 resp->phys_port_cnt = ib_dev->phys_port_cnt;
02d1aa7a
EC
439}
440
bc38a6ab 441ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
057aec0d 442 struct ib_device *ib_dev,
bc38a6ab
RD
443 const char __user *buf,
444 int in_len, int out_len)
445{
446 struct ib_uverbs_query_device cmd;
447 struct ib_uverbs_query_device_resp resp;
448 struct ib_device_attr attr;
449 int ret;
450
451 if (out_len < sizeof resp)
452 return -ENOSPC;
453
454 if (copy_from_user(&cmd, buf, sizeof cmd))
455 return -EFAULT;
456
057aec0d 457 ret = ib_query_device(ib_dev, &attr);
bc38a6ab
RD
458 if (ret)
459 return ret;
460
461 memset(&resp, 0, sizeof resp);
057aec0d 462 copy_query_dev_fields(file, ib_dev, &resp, &attr);
bc38a6ab
RD
463
464 if (copy_to_user((void __user *) (unsigned long) cmd.response,
465 &resp, sizeof resp))
466 return -EFAULT;
467
468 return in_len;
469}
470
471ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
057aec0d 472 struct ib_device *ib_dev,
bc38a6ab
RD
473 const char __user *buf,
474 int in_len, int out_len)
475{
476 struct ib_uverbs_query_port cmd;
477 struct ib_uverbs_query_port_resp resp;
478 struct ib_port_attr attr;
479 int ret;
480
481 if (out_len < sizeof resp)
482 return -ENOSPC;
483
484 if (copy_from_user(&cmd, buf, sizeof cmd))
485 return -EFAULT;
486
057aec0d 487 ret = ib_query_port(ib_dev, cmd.port_num, &attr);
bc38a6ab
RD
488 if (ret)
489 return ret;
490
491 memset(&resp, 0, sizeof resp);
492
493 resp.state = attr.state;
494 resp.max_mtu = attr.max_mtu;
495 resp.active_mtu = attr.active_mtu;
496 resp.gid_tbl_len = attr.gid_tbl_len;
497 resp.port_cap_flags = attr.port_cap_flags;
498 resp.max_msg_sz = attr.max_msg_sz;
499 resp.bad_pkey_cntr = attr.bad_pkey_cntr;
500 resp.qkey_viol_cntr = attr.qkey_viol_cntr;
501 resp.pkey_tbl_len = attr.pkey_tbl_len;
502 resp.lid = attr.lid;
503 resp.sm_lid = attr.sm_lid;
504 resp.lmc = attr.lmc;
505 resp.max_vl_num = attr.max_vl_num;
506 resp.sm_sl = attr.sm_sl;
507 resp.subnet_timeout = attr.subnet_timeout;
508 resp.init_type_reply = attr.init_type_reply;
509 resp.active_width = attr.active_width;
510 resp.active_speed = attr.active_speed;
511 resp.phys_state = attr.phys_state;
057aec0d 512 resp.link_layer = rdma_port_get_link_layer(ib_dev,
2420b60b 513 cmd.port_num);
bc38a6ab
RD
514
515 if (copy_to_user((void __user *) (unsigned long) cmd.response,
516 &resp, sizeof resp))
517 return -EFAULT;
518
519 return in_len;
520}
521
bc38a6ab 522ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
057aec0d 523 struct ib_device *ib_dev,
bc38a6ab
RD
524 const char __user *buf,
525 int in_len, int out_len)
526{
527 struct ib_uverbs_alloc_pd cmd;
528 struct ib_uverbs_alloc_pd_resp resp;
529 struct ib_udata udata;
530 struct ib_uobject *uobj;
531 struct ib_pd *pd;
532 int ret;
533
534 if (out_len < sizeof resp)
535 return -ENOSPC;
536
537 if (copy_from_user(&cmd, buf, sizeof cmd))
538 return -EFAULT;
539
540 INIT_UDATA(&udata, buf + sizeof cmd,
541 (unsigned long) cmd.response + sizeof resp,
542 in_len - sizeof cmd, out_len - sizeof resp);
543
544 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
545 if (!uobj)
546 return -ENOMEM;
547
3bea57a5 548 init_uobj(uobj, 0, file->ucontext, &pd_lock_class);
9ead190b 549 down_write(&uobj->mutex);
bc38a6ab 550
057aec0d 551 pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata);
bc38a6ab
RD
552 if (IS_ERR(pd)) {
553 ret = PTR_ERR(pd);
554 goto err;
555 }
556
057aec0d 557 pd->device = ib_dev;
bc38a6ab 558 pd->uobject = uobj;
96249d70 559 pd->local_mr = NULL;
bc38a6ab
RD
560 atomic_set(&pd->usecnt, 0);
561
9ead190b
RD
562 uobj->object = pd;
563 ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj);
bc38a6ab 564 if (ret)
9ead190b 565 goto err_idr;
bc38a6ab
RD
566
567 memset(&resp, 0, sizeof resp);
568 resp.pd_handle = uobj->id;
569
570 if (copy_to_user((void __user *) (unsigned long) cmd.response,
571 &resp, sizeof resp)) {
572 ret = -EFAULT;
9ead190b 573 goto err_copy;
bc38a6ab
RD
574 }
575
95ed644f 576 mutex_lock(&file->mutex);
eb9d3cd5 577 list_add_tail(&uobj->list, &file->ucontext->pd_list);
95ed644f 578 mutex_unlock(&file->mutex);
bc38a6ab 579
9ead190b
RD
580 uobj->live = 1;
581
582 up_write(&uobj->mutex);
bc38a6ab 583
eb9d3cd5
RD
584 return in_len;
585
9ead190b
RD
586err_copy:
587 idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
eb9d3cd5 588
9ead190b 589err_idr:
bc38a6ab
RD
590 ib_dealloc_pd(pd);
591
592err:
9ead190b 593 put_uobj_write(uobj);
bc38a6ab
RD
594 return ret;
595}
596
597ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
057aec0d 598 struct ib_device *ib_dev,
bc38a6ab
RD
599 const char __user *buf,
600 int in_len, int out_len)
601{
602 struct ib_uverbs_dealloc_pd cmd;
bc38a6ab 603 struct ib_uobject *uobj;
7dd78647 604 struct ib_pd *pd;
9ead190b 605 int ret;
bc38a6ab
RD
606
607 if (copy_from_user(&cmd, buf, sizeof cmd))
608 return -EFAULT;
609
9ead190b
RD
610 uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext);
611 if (!uobj)
612 return -EINVAL;
7dd78647 613 pd = uobj->object;
bc38a6ab 614
7dd78647
JG
615 if (atomic_read(&pd->usecnt)) {
616 ret = -EBUSY;
617 goto err_put;
618 }
bc38a6ab 619
7dd78647
JG
620 ret = pd->device->dealloc_pd(uobj->object);
621 WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd");
bc38a6ab 622 if (ret)
7dd78647
JG
623 goto err_put;
624
625 uobj->live = 0;
626 put_uobj_write(uobj);
bc38a6ab 627
9ead190b 628 idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
bc38a6ab 629
95ed644f 630 mutex_lock(&file->mutex);
bc38a6ab 631 list_del(&uobj->list);
95ed644f 632 mutex_unlock(&file->mutex);
bc38a6ab 633
9ead190b 634 put_uobj(uobj);
bc38a6ab 635
9ead190b 636 return in_len;
7dd78647
JG
637
638err_put:
639 put_uobj_write(uobj);
640 return ret;
bc38a6ab
RD
641}
642
53d0bd1e
SH
643struct xrcd_table_entry {
644 struct rb_node node;
645 struct ib_xrcd *xrcd;
646 struct inode *inode;
647};
648
649static int xrcd_table_insert(struct ib_uverbs_device *dev,
650 struct inode *inode,
651 struct ib_xrcd *xrcd)
652{
653 struct xrcd_table_entry *entry, *scan;
654 struct rb_node **p = &dev->xrcd_tree.rb_node;
655 struct rb_node *parent = NULL;
656
657 entry = kmalloc(sizeof *entry, GFP_KERNEL);
658 if (!entry)
659 return -ENOMEM;
660
661 entry->xrcd = xrcd;
662 entry->inode = inode;
663
664 while (*p) {
665 parent = *p;
666 scan = rb_entry(parent, struct xrcd_table_entry, node);
667
668 if (inode < scan->inode) {
669 p = &(*p)->rb_left;
670 } else if (inode > scan->inode) {
671 p = &(*p)->rb_right;
672 } else {
673 kfree(entry);
674 return -EEXIST;
675 }
676 }
677
678 rb_link_node(&entry->node, parent, p);
679 rb_insert_color(&entry->node, &dev->xrcd_tree);
680 igrab(inode);
681 return 0;
682}
683
684static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
685 struct inode *inode)
686{
687 struct xrcd_table_entry *entry;
688 struct rb_node *p = dev->xrcd_tree.rb_node;
689
690 while (p) {
691 entry = rb_entry(p, struct xrcd_table_entry, node);
692
693 if (inode < entry->inode)
694 p = p->rb_left;
695 else if (inode > entry->inode)
696 p = p->rb_right;
697 else
698 return entry;
699 }
700
701 return NULL;
702}
703
704static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode)
705{
706 struct xrcd_table_entry *entry;
707
708 entry = xrcd_table_search(dev, inode);
709 if (!entry)
710 return NULL;
711
712 return entry->xrcd;
713}
714
715static void xrcd_table_delete(struct ib_uverbs_device *dev,
716 struct inode *inode)
717{
718 struct xrcd_table_entry *entry;
719
720 entry = xrcd_table_search(dev, inode);
721 if (entry) {
722 iput(inode);
723 rb_erase(&entry->node, &dev->xrcd_tree);
724 kfree(entry);
725 }
726}
727
728ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
057aec0d 729 struct ib_device *ib_dev,
53d0bd1e
SH
730 const char __user *buf, int in_len,
731 int out_len)
732{
733 struct ib_uverbs_open_xrcd cmd;
734 struct ib_uverbs_open_xrcd_resp resp;
735 struct ib_udata udata;
736 struct ib_uxrcd_object *obj;
737 struct ib_xrcd *xrcd = NULL;
2903ff01 738 struct fd f = {NULL, 0};
53d0bd1e 739 struct inode *inode = NULL;
2903ff01 740 int ret = 0;
53d0bd1e
SH
741 int new_xrcd = 0;
742
743 if (out_len < sizeof resp)
744 return -ENOSPC;
745
746 if (copy_from_user(&cmd, buf, sizeof cmd))
747 return -EFAULT;
748
749 INIT_UDATA(&udata, buf + sizeof cmd,
750 (unsigned long) cmd.response + sizeof resp,
751 in_len - sizeof cmd, out_len - sizeof resp);
752
753 mutex_lock(&file->device->xrcd_tree_mutex);
754
755 if (cmd.fd != -1) {
756 /* search for file descriptor */
2903ff01
AV
757 f = fdget(cmd.fd);
758 if (!f.file) {
53d0bd1e
SH
759 ret = -EBADF;
760 goto err_tree_mutex_unlock;
761 }
762
496ad9aa 763 inode = file_inode(f.file);
53d0bd1e
SH
764 xrcd = find_xrcd(file->device, inode);
765 if (!xrcd && !(cmd.oflags & O_CREAT)) {
766 /* no file descriptor. Need CREATE flag */
767 ret = -EAGAIN;
768 goto err_tree_mutex_unlock;
769 }
770
771 if (xrcd && cmd.oflags & O_EXCL) {
772 ret = -EINVAL;
773 goto err_tree_mutex_unlock;
774 }
775 }
776
777 obj = kmalloc(sizeof *obj, GFP_KERNEL);
778 if (!obj) {
779 ret = -ENOMEM;
780 goto err_tree_mutex_unlock;
781 }
782
3bea57a5 783 init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class);
53d0bd1e
SH
784
785 down_write(&obj->uobject.mutex);
786
787 if (!xrcd) {
057aec0d 788 xrcd = ib_dev->alloc_xrcd(ib_dev, file->ucontext, &udata);
53d0bd1e
SH
789 if (IS_ERR(xrcd)) {
790 ret = PTR_ERR(xrcd);
791 goto err;
792 }
793
794 xrcd->inode = inode;
057aec0d 795 xrcd->device = ib_dev;
53d0bd1e
SH
796 atomic_set(&xrcd->usecnt, 0);
797 mutex_init(&xrcd->tgt_qp_mutex);
798 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
799 new_xrcd = 1;
800 }
801
802 atomic_set(&obj->refcnt, 0);
803 obj->uobject.object = xrcd;
804 ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
805 if (ret)
806 goto err_idr;
807
808 memset(&resp, 0, sizeof resp);
809 resp.xrcd_handle = obj->uobject.id;
810
811 if (inode) {
812 if (new_xrcd) {
813 /* create new inode/xrcd table entry */
814 ret = xrcd_table_insert(file->device, inode, xrcd);
815 if (ret)
816 goto err_insert_xrcd;
817 }
818 atomic_inc(&xrcd->usecnt);
819 }
820
821 if (copy_to_user((void __user *) (unsigned long) cmd.response,
822 &resp, sizeof resp)) {
823 ret = -EFAULT;
824 goto err_copy;
825 }
826
2903ff01
AV
827 if (f.file)
828 fdput(f);
53d0bd1e
SH
829
830 mutex_lock(&file->mutex);
831 list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list);
832 mutex_unlock(&file->mutex);
833
834 obj->uobject.live = 1;
835 up_write(&obj->uobject.mutex);
836
837 mutex_unlock(&file->device->xrcd_tree_mutex);
838 return in_len;
839
840err_copy:
841 if (inode) {
842 if (new_xrcd)
843 xrcd_table_delete(file->device, inode);
844 atomic_dec(&xrcd->usecnt);
845 }
846
847err_insert_xrcd:
848 idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
849
850err_idr:
851 ib_dealloc_xrcd(xrcd);
852
853err:
854 put_uobj_write(&obj->uobject);
855
856err_tree_mutex_unlock:
2903ff01
AV
857 if (f.file)
858 fdput(f);
53d0bd1e
SH
859
860 mutex_unlock(&file->device->xrcd_tree_mutex);
861
862 return ret;
863}
864
865ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
057aec0d 866 struct ib_device *ib_dev,
53d0bd1e
SH
867 const char __user *buf, int in_len,
868 int out_len)
869{
870 struct ib_uverbs_close_xrcd cmd;
871 struct ib_uobject *uobj;
872 struct ib_xrcd *xrcd = NULL;
873 struct inode *inode = NULL;
874 struct ib_uxrcd_object *obj;
875 int live;
876 int ret = 0;
877
878 if (copy_from_user(&cmd, buf, sizeof cmd))
879 return -EFAULT;
880
881 mutex_lock(&file->device->xrcd_tree_mutex);
882 uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext);
883 if (!uobj) {
884 ret = -EINVAL;
885 goto out;
886 }
887
888 xrcd = uobj->object;
889 inode = xrcd->inode;
890 obj = container_of(uobj, struct ib_uxrcd_object, uobject);
891 if (atomic_read(&obj->refcnt)) {
892 put_uobj_write(uobj);
893 ret = -EBUSY;
894 goto out;
895 }
896
897 if (!inode || atomic_dec_and_test(&xrcd->usecnt)) {
898 ret = ib_dealloc_xrcd(uobj->object);
899 if (!ret)
900 uobj->live = 0;
901 }
902
903 live = uobj->live;
904 if (inode && ret)
905 atomic_inc(&xrcd->usecnt);
906
907 put_uobj_write(uobj);
908
909 if (ret)
910 goto out;
911
912 if (inode && !live)
913 xrcd_table_delete(file->device, inode);
914
915 idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj);
916 mutex_lock(&file->mutex);
917 list_del(&uobj->list);
918 mutex_unlock(&file->mutex);
919
920 put_uobj(uobj);
921 ret = in_len;
922
923out:
924 mutex_unlock(&file->device->xrcd_tree_mutex);
925 return ret;
926}
927
928void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
929 struct ib_xrcd *xrcd)
930{
931 struct inode *inode;
932
933 inode = xrcd->inode;
934 if (inode && !atomic_dec_and_test(&xrcd->usecnt))
935 return;
936
937 ib_dealloc_xrcd(xrcd);
938
939 if (inode)
940 xrcd_table_delete(dev, inode);
941}
942
bc38a6ab 943ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
057aec0d 944 struct ib_device *ib_dev,
bc38a6ab
RD
945 const char __user *buf, int in_len,
946 int out_len)
947{
948 struct ib_uverbs_reg_mr cmd;
949 struct ib_uverbs_reg_mr_resp resp;
950 struct ib_udata udata;
f7c6a7b5 951 struct ib_uobject *uobj;
bc38a6ab
RD
952 struct ib_pd *pd;
953 struct ib_mr *mr;
954 int ret;
955
956 if (out_len < sizeof resp)
957 return -ENOSPC;
958
959 if (copy_from_user(&cmd, buf, sizeof cmd))
960 return -EFAULT;
961
962 INIT_UDATA(&udata, buf + sizeof cmd,
963 (unsigned long) cmd.response + sizeof resp,
964 in_len - sizeof cmd, out_len - sizeof resp);
965
966 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
967 return -EINVAL;
968
1c636f80
EC
969 ret = ib_check_mr_access(cmd.access_flags);
970 if (ret)
971 return ret;
f575394f 972
f7c6a7b5
RD
973 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
974 if (!uobj)
bc38a6ab
RD
975 return -ENOMEM;
976
3bea57a5 977 init_uobj(uobj, 0, file->ucontext, &mr_lock_class);
f7c6a7b5 978 down_write(&uobj->mutex);
bc38a6ab 979
9ead190b 980 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
aaf1aef5
RD
981 if (!pd) {
982 ret = -EINVAL;
f7c6a7b5 983 goto err_free;
aaf1aef5 984 }
bc38a6ab 985
860f10a7
SG
986 if (cmd.access_flags & IB_ACCESS_ON_DEMAND) {
987 struct ib_device_attr attr;
988
989 ret = ib_query_device(pd->device, &attr);
990 if (ret || !(attr.device_cap_flags &
991 IB_DEVICE_ON_DEMAND_PAGING)) {
992 pr_debug("ODP support not available\n");
993 ret = -EINVAL;
994 goto err_put;
995 }
996 }
997
f7c6a7b5
RD
998 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
999 cmd.access_flags, &udata);
bc38a6ab
RD
1000 if (IS_ERR(mr)) {
1001 ret = PTR_ERR(mr);
9ead190b 1002 goto err_put;
bc38a6ab
RD
1003 }
1004
1005 mr->device = pd->device;
1006 mr->pd = pd;
f7c6a7b5 1007 mr->uobject = uobj;
bc38a6ab
RD
1008 atomic_inc(&pd->usecnt);
1009 atomic_set(&mr->usecnt, 0);
1010
f7c6a7b5
RD
1011 uobj->object = mr;
1012 ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj);
bc38a6ab
RD
1013 if (ret)
1014 goto err_unreg;
1015
9ead190b
RD
1016 memset(&resp, 0, sizeof resp);
1017 resp.lkey = mr->lkey;
1018 resp.rkey = mr->rkey;
f7c6a7b5 1019 resp.mr_handle = uobj->id;
bc38a6ab 1020
bc38a6ab
RD
1021 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1022 &resp, sizeof resp)) {
1023 ret = -EFAULT;
9ead190b 1024 goto err_copy;
bc38a6ab
RD
1025 }
1026
9ead190b
RD
1027 put_pd_read(pd);
1028
95ed644f 1029 mutex_lock(&file->mutex);
f7c6a7b5 1030 list_add_tail(&uobj->list, &file->ucontext->mr_list);
95ed644f 1031 mutex_unlock(&file->mutex);
eb9d3cd5 1032
f7c6a7b5 1033 uobj->live = 1;
9ead190b 1034
f7c6a7b5 1035 up_write(&uobj->mutex);
bc38a6ab
RD
1036
1037 return in_len;
1038
9ead190b 1039err_copy:
f7c6a7b5 1040 idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
bc38a6ab
RD
1041
1042err_unreg:
1043 ib_dereg_mr(mr);
1044
9ead190b
RD
1045err_put:
1046 put_pd_read(pd);
bc38a6ab 1047
bc38a6ab 1048err_free:
f7c6a7b5 1049 put_uobj_write(uobj);
bc38a6ab
RD
1050 return ret;
1051}
1052
7e6edb9b 1053ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
057aec0d 1054 struct ib_device *ib_dev,
7e6edb9b
MB
1055 const char __user *buf, int in_len,
1056 int out_len)
1057{
1058 struct ib_uverbs_rereg_mr cmd;
1059 struct ib_uverbs_rereg_mr_resp resp;
1060 struct ib_udata udata;
1061 struct ib_pd *pd = NULL;
1062 struct ib_mr *mr;
1063 struct ib_pd *old_pd;
1064 int ret;
1065 struct ib_uobject *uobj;
1066
1067 if (out_len < sizeof(resp))
1068 return -ENOSPC;
1069
1070 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1071 return -EFAULT;
1072
1073 INIT_UDATA(&udata, buf + sizeof(cmd),
1074 (unsigned long) cmd.response + sizeof(resp),
1075 in_len - sizeof(cmd), out_len - sizeof(resp));
1076
1077 if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags)
1078 return -EINVAL;
1079
1080 if ((cmd.flags & IB_MR_REREG_TRANS) &&
1081 (!cmd.start || !cmd.hca_va || 0 >= cmd.length ||
1082 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)))
1083 return -EINVAL;
1084
1085 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle,
1086 file->ucontext);
1087
1088 if (!uobj)
1089 return -EINVAL;
1090
1091 mr = uobj->object;
1092
1093 if (cmd.flags & IB_MR_REREG_ACCESS) {
1094 ret = ib_check_mr_access(cmd.access_flags);
1095 if (ret)
1096 goto put_uobjs;
1097 }
1098
1099 if (cmd.flags & IB_MR_REREG_PD) {
1100 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1101 if (!pd) {
1102 ret = -EINVAL;
1103 goto put_uobjs;
1104 }
1105 }
1106
1107 if (atomic_read(&mr->usecnt)) {
1108 ret = -EBUSY;
1109 goto put_uobj_pd;
1110 }
1111
1112 old_pd = mr->pd;
1113 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start,
1114 cmd.length, cmd.hca_va,
1115 cmd.access_flags, pd, &udata);
1116 if (!ret) {
1117 if (cmd.flags & IB_MR_REREG_PD) {
1118 atomic_inc(&pd->usecnt);
1119 mr->pd = pd;
1120 atomic_dec(&old_pd->usecnt);
1121 }
1122 } else {
1123 goto put_uobj_pd;
1124 }
1125
1126 memset(&resp, 0, sizeof(resp));
1127 resp.lkey = mr->lkey;
1128 resp.rkey = mr->rkey;
1129
1130 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1131 &resp, sizeof(resp)))
1132 ret = -EFAULT;
1133 else
1134 ret = in_len;
1135
1136put_uobj_pd:
1137 if (cmd.flags & IB_MR_REREG_PD)
1138 put_pd_read(pd);
1139
1140put_uobjs:
1141
1142 put_uobj_write(mr->uobject);
1143
1144 return ret;
1145}
1146
bc38a6ab 1147ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
057aec0d 1148 struct ib_device *ib_dev,
bc38a6ab
RD
1149 const char __user *buf, int in_len,
1150 int out_len)
1151{
1152 struct ib_uverbs_dereg_mr cmd;
1153 struct ib_mr *mr;
9ead190b 1154 struct ib_uobject *uobj;
bc38a6ab
RD
1155 int ret = -EINVAL;
1156
1157 if (copy_from_user(&cmd, buf, sizeof cmd))
1158 return -EFAULT;
1159
9ead190b
RD
1160 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext);
1161 if (!uobj)
1162 return -EINVAL;
bc38a6ab 1163
f7c6a7b5 1164 mr = uobj->object;
bc38a6ab
RD
1165
1166 ret = ib_dereg_mr(mr);
9ead190b
RD
1167 if (!ret)
1168 uobj->live = 0;
1169
1170 put_uobj_write(uobj);
1171
bc38a6ab 1172 if (ret)
9ead190b 1173 return ret;
bc38a6ab 1174
9ead190b 1175 idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
bc38a6ab 1176
95ed644f 1177 mutex_lock(&file->mutex);
9ead190b 1178 list_del(&uobj->list);
95ed644f 1179 mutex_unlock(&file->mutex);
bc38a6ab 1180
9ead190b 1181 put_uobj(uobj);
bc38a6ab 1182
9ead190b 1183 return in_len;
bc38a6ab
RD
1184}
1185
6b52a12b 1186ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
057aec0d
YH
1187 struct ib_device *ib_dev,
1188 const char __user *buf, int in_len,
1189 int out_len)
6b52a12b
SM
1190{
1191 struct ib_uverbs_alloc_mw cmd;
1192 struct ib_uverbs_alloc_mw_resp resp;
1193 struct ib_uobject *uobj;
1194 struct ib_pd *pd;
1195 struct ib_mw *mw;
1196 int ret;
1197
1198 if (out_len < sizeof(resp))
1199 return -ENOSPC;
1200
1201 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1202 return -EFAULT;
1203
1204 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
1205 if (!uobj)
1206 return -ENOMEM;
1207
1208 init_uobj(uobj, 0, file->ucontext, &mw_lock_class);
1209 down_write(&uobj->mutex);
1210
1211 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1212 if (!pd) {
1213 ret = -EINVAL;
1214 goto err_free;
1215 }
1216
1217 mw = pd->device->alloc_mw(pd, cmd.mw_type);
1218 if (IS_ERR(mw)) {
1219 ret = PTR_ERR(mw);
1220 goto err_put;
1221 }
1222
1223 mw->device = pd->device;
1224 mw->pd = pd;
1225 mw->uobject = uobj;
1226 atomic_inc(&pd->usecnt);
1227
1228 uobj->object = mw;
1229 ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj);
1230 if (ret)
1231 goto err_unalloc;
1232
1233 memset(&resp, 0, sizeof(resp));
1234 resp.rkey = mw->rkey;
1235 resp.mw_handle = uobj->id;
1236
1237 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1238 &resp, sizeof(resp))) {
1239 ret = -EFAULT;
1240 goto err_copy;
1241 }
1242
1243 put_pd_read(pd);
1244
1245 mutex_lock(&file->mutex);
1246 list_add_tail(&uobj->list, &file->ucontext->mw_list);
1247 mutex_unlock(&file->mutex);
1248
1249 uobj->live = 1;
1250
1251 up_write(&uobj->mutex);
1252
1253 return in_len;
1254
1255err_copy:
1256 idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1257
1258err_unalloc:
1259 ib_dealloc_mw(mw);
1260
1261err_put:
1262 put_pd_read(pd);
1263
1264err_free:
1265 put_uobj_write(uobj);
1266 return ret;
1267}
1268
1269ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file,
057aec0d
YH
1270 struct ib_device *ib_dev,
1271 const char __user *buf, int in_len,
1272 int out_len)
6b52a12b
SM
1273{
1274 struct ib_uverbs_dealloc_mw cmd;
1275 struct ib_mw *mw;
1276 struct ib_uobject *uobj;
1277 int ret = -EINVAL;
1278
1279 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1280 return -EFAULT;
1281
1282 uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext);
1283 if (!uobj)
1284 return -EINVAL;
1285
1286 mw = uobj->object;
1287
1288 ret = ib_dealloc_mw(mw);
1289 if (!ret)
1290 uobj->live = 0;
1291
1292 put_uobj_write(uobj);
1293
1294 if (ret)
1295 return ret;
1296
1297 idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1298
1299 mutex_lock(&file->mutex);
1300 list_del(&uobj->list);
1301 mutex_unlock(&file->mutex);
1302
1303 put_uobj(uobj);
1304
1305 return in_len;
1306}
1307
6b73597e 1308ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
057aec0d 1309 struct ib_device *ib_dev,
6b73597e
RD
1310 const char __user *buf, int in_len,
1311 int out_len)
1312{
1313 struct ib_uverbs_create_comp_channel cmd;
1314 struct ib_uverbs_create_comp_channel_resp resp;
1315 struct file *filp;
b1e4594b 1316 int ret;
6b73597e
RD
1317
1318 if (out_len < sizeof resp)
1319 return -ENOSPC;
1320
1321 if (copy_from_user(&cmd, buf, sizeof cmd))
1322 return -EFAULT;
1323
da183c7a 1324 ret = get_unused_fd_flags(O_CLOEXEC);
b1e4594b
AV
1325 if (ret < 0)
1326 return ret;
1327 resp.fd = ret;
1328
057aec0d 1329 filp = ib_uverbs_alloc_event_file(file, ib_dev, 0);
b1e4594b
AV
1330 if (IS_ERR(filp)) {
1331 put_unused_fd(resp.fd);
6b73597e 1332 return PTR_ERR(filp);
b1e4594b 1333 }
6b73597e
RD
1334
1335 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1336 &resp, sizeof resp)) {
1337 put_unused_fd(resp.fd);
1338 fput(filp);
1339 return -EFAULT;
1340 }
1341
1342 fd_install(resp.fd, filp);
1343 return in_len;
1344}
1345
565197dd 1346static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
057aec0d 1347 struct ib_device *ib_dev,
565197dd
MB
1348 struct ib_udata *ucore,
1349 struct ib_udata *uhw,
1350 struct ib_uverbs_ex_create_cq *cmd,
1351 size_t cmd_sz,
1352 int (*cb)(struct ib_uverbs_file *file,
1353 struct ib_ucq_object *obj,
1354 struct ib_uverbs_ex_create_cq_resp *resp,
1355 struct ib_udata *udata,
1356 void *context),
1357 void *context)
bc38a6ab 1358{
9ead190b 1359 struct ib_ucq_object *obj;
6b73597e 1360 struct ib_uverbs_event_file *ev_file = NULL;
bc38a6ab
RD
1361 struct ib_cq *cq;
1362 int ret;
565197dd 1363 struct ib_uverbs_ex_create_cq_resp resp;
bcf4c1ea 1364 struct ib_cq_init_attr attr = {};
bc38a6ab 1365
565197dd
MB
1366 if (cmd->comp_vector >= file->device->num_comp_vectors)
1367 return ERR_PTR(-EINVAL);
bc38a6ab 1368
9ead190b
RD
1369 obj = kmalloc(sizeof *obj, GFP_KERNEL);
1370 if (!obj)
565197dd 1371 return ERR_PTR(-ENOMEM);
bc38a6ab 1372
565197dd 1373 init_uobj(&obj->uobject, cmd->user_handle, file->ucontext, &cq_lock_class);
9ead190b
RD
1374 down_write(&obj->uobject.mutex);
1375
565197dd
MB
1376 if (cmd->comp_channel >= 0) {
1377 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel);
ac4e7b35
JM
1378 if (!ev_file) {
1379 ret = -EINVAL;
1380 goto err;
1381 }
1382 }
1383
9ead190b
RD
1384 obj->uverbs_file = file;
1385 obj->comp_events_reported = 0;
1386 obj->async_events_reported = 0;
1387 INIT_LIST_HEAD(&obj->comp_list);
1388 INIT_LIST_HEAD(&obj->async_list);
bc38a6ab 1389
565197dd
MB
1390 attr.cqe = cmd->cqe;
1391 attr.comp_vector = cmd->comp_vector;
1392
1393 if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags))
1394 attr.flags = cmd->flags;
1395
057aec0d 1396 cq = ib_dev->create_cq(ib_dev, &attr,
565197dd 1397 file->ucontext, uhw);
bc38a6ab
RD
1398 if (IS_ERR(cq)) {
1399 ret = PTR_ERR(cq);
9ead190b 1400 goto err_file;
bc38a6ab
RD
1401 }
1402
057aec0d 1403 cq->device = ib_dev;
9ead190b 1404 cq->uobject = &obj->uobject;
bc38a6ab
RD
1405 cq->comp_handler = ib_uverbs_comp_handler;
1406 cq->event_handler = ib_uverbs_cq_event_handler;
6b73597e 1407 cq->cq_context = ev_file;
bc38a6ab
RD
1408 atomic_set(&cq->usecnt, 0);
1409
9ead190b
RD
1410 obj->uobject.object = cq;
1411 ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject);
bc38a6ab 1412 if (ret)
9ead190b 1413 goto err_free;
bc38a6ab
RD
1414
1415 memset(&resp, 0, sizeof resp);
565197dd
MB
1416 resp.base.cq_handle = obj->uobject.id;
1417 resp.base.cqe = cq->cqe;
bc38a6ab 1418
565197dd
MB
1419 resp.response_length = offsetof(typeof(resp), response_length) +
1420 sizeof(resp.response_length);
1421
1422 ret = cb(file, obj, &resp, ucore, context);
1423 if (ret)
1424 goto err_cb;
bc38a6ab 1425
95ed644f 1426 mutex_lock(&file->mutex);
9ead190b 1427 list_add_tail(&obj->uobject.list, &file->ucontext->cq_list);
95ed644f 1428 mutex_unlock(&file->mutex);
bc38a6ab 1429
9ead190b
RD
1430 obj->uobject.live = 1;
1431
1432 up_write(&obj->uobject.mutex);
bc38a6ab 1433
565197dd 1434 return obj;
eb9d3cd5 1435
565197dd 1436err_cb:
9ead190b 1437 idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject);
eb9d3cd5 1438
9ead190b 1439err_free:
bc38a6ab
RD
1440 ib_destroy_cq(cq);
1441
9ead190b 1442err_file:
ac4e7b35 1443 if (ev_file)
9ead190b
RD
1444 ib_uverbs_release_ucq(file, ev_file, obj);
1445
1446err:
1447 put_uobj_write(&obj->uobject);
565197dd
MB
1448
1449 return ERR_PTR(ret);
1450}
1451
1452static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file,
1453 struct ib_ucq_object *obj,
1454 struct ib_uverbs_ex_create_cq_resp *resp,
1455 struct ib_udata *ucore, void *context)
1456{
1457 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base)))
1458 return -EFAULT;
1459
1460 return 0;
1461}
1462
1463ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
057aec0d 1464 struct ib_device *ib_dev,
565197dd
MB
1465 const char __user *buf, int in_len,
1466 int out_len)
1467{
1468 struct ib_uverbs_create_cq cmd;
1469 struct ib_uverbs_ex_create_cq cmd_ex;
1470 struct ib_uverbs_create_cq_resp resp;
1471 struct ib_udata ucore;
1472 struct ib_udata uhw;
1473 struct ib_ucq_object *obj;
1474
1475 if (out_len < sizeof(resp))
1476 return -ENOSPC;
1477
1478 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1479 return -EFAULT;
1480
1481 INIT_UDATA(&ucore, buf, cmd.response, sizeof(cmd), sizeof(resp));
1482
1483 INIT_UDATA(&uhw, buf + sizeof(cmd),
1484 (unsigned long)cmd.response + sizeof(resp),
1485 in_len - sizeof(cmd), out_len - sizeof(resp));
1486
1487 memset(&cmd_ex, 0, sizeof(cmd_ex));
1488 cmd_ex.user_handle = cmd.user_handle;
1489 cmd_ex.cqe = cmd.cqe;
1490 cmd_ex.comp_vector = cmd.comp_vector;
1491 cmd_ex.comp_channel = cmd.comp_channel;
1492
057aec0d 1493 obj = create_cq(file, ib_dev, &ucore, &uhw, &cmd_ex,
565197dd
MB
1494 offsetof(typeof(cmd_ex), comp_channel) +
1495 sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb,
1496 NULL);
1497
1498 if (IS_ERR(obj))
1499 return PTR_ERR(obj);
1500
1501 return in_len;
1502}
1503
1504static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file,
1505 struct ib_ucq_object *obj,
1506 struct ib_uverbs_ex_create_cq_resp *resp,
1507 struct ib_udata *ucore, void *context)
1508{
1509 if (ib_copy_to_udata(ucore, resp, resp->response_length))
1510 return -EFAULT;
1511
1512 return 0;
1513}
1514
1515int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file,
057aec0d 1516 struct ib_device *ib_dev,
565197dd
MB
1517 struct ib_udata *ucore,
1518 struct ib_udata *uhw)
1519{
1520 struct ib_uverbs_ex_create_cq_resp resp;
1521 struct ib_uverbs_ex_create_cq cmd;
1522 struct ib_ucq_object *obj;
1523 int err;
1524
1525 if (ucore->inlen < sizeof(cmd))
1526 return -EINVAL;
1527
1528 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
1529 if (err)
1530 return err;
1531
1532 if (cmd.comp_mask)
1533 return -EINVAL;
1534
1535 if (cmd.reserved)
1536 return -EINVAL;
1537
1538 if (ucore->outlen < (offsetof(typeof(resp), response_length) +
1539 sizeof(resp.response_length)))
1540 return -ENOSPC;
1541
057aec0d 1542 obj = create_cq(file, ib_dev, ucore, uhw, &cmd,
565197dd
MB
1543 min(ucore->inlen, sizeof(cmd)),
1544 ib_uverbs_ex_create_cq_cb, NULL);
1545
1546 if (IS_ERR(obj))
1547 return PTR_ERR(obj);
1548
1549 return 0;
bc38a6ab
RD
1550}
1551
33b9b3ee 1552ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
057aec0d 1553 struct ib_device *ib_dev,
33b9b3ee
RD
1554 const char __user *buf, int in_len,
1555 int out_len)
1556{
1557 struct ib_uverbs_resize_cq cmd;
1558 struct ib_uverbs_resize_cq_resp resp;
1559 struct ib_udata udata;
1560 struct ib_cq *cq;
1561 int ret = -EINVAL;
1562
1563 if (copy_from_user(&cmd, buf, sizeof cmd))
1564 return -EFAULT;
1565
1566 INIT_UDATA(&udata, buf + sizeof cmd,
1567 (unsigned long) cmd.response + sizeof resp,
1568 in_len - sizeof cmd, out_len - sizeof resp);
1569
1ccf6aa1 1570 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
9ead190b
RD
1571 if (!cq)
1572 return -EINVAL;
33b9b3ee
RD
1573
1574 ret = cq->device->resize_cq(cq, cmd.cqe, &udata);
1575 if (ret)
1576 goto out;
1577
33b9b3ee
RD
1578 resp.cqe = cq->cqe;
1579
1580 if (copy_to_user((void __user *) (unsigned long) cmd.response,
64f817ba 1581 &resp, sizeof resp.cqe))
33b9b3ee
RD
1582 ret = -EFAULT;
1583
1584out:
9ead190b 1585 put_cq_read(cq);
33b9b3ee
RD
1586
1587 return ret ? ret : in_len;
1588}
1589
7182afea
DC
1590static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
1591{
1592 struct ib_uverbs_wc tmp;
1593
1594 tmp.wr_id = wc->wr_id;
1595 tmp.status = wc->status;
1596 tmp.opcode = wc->opcode;
1597 tmp.vendor_err = wc->vendor_err;
1598 tmp.byte_len = wc->byte_len;
1599 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data;
1600 tmp.qp_num = wc->qp->qp_num;
1601 tmp.src_qp = wc->src_qp;
1602 tmp.wc_flags = wc->wc_flags;
1603 tmp.pkey_index = wc->pkey_index;
1604 tmp.slid = wc->slid;
1605 tmp.sl = wc->sl;
1606 tmp.dlid_path_bits = wc->dlid_path_bits;
1607 tmp.port_num = wc->port_num;
1608 tmp.reserved = 0;
1609
1610 if (copy_to_user(dest, &tmp, sizeof tmp))
1611 return -EFAULT;
1612
1613 return 0;
1614}
1615
67cdb40c 1616ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
057aec0d 1617 struct ib_device *ib_dev,
67cdb40c
RD
1618 const char __user *buf, int in_len,
1619 int out_len)
1620{
1621 struct ib_uverbs_poll_cq cmd;
7182afea
DC
1622 struct ib_uverbs_poll_cq_resp resp;
1623 u8 __user *header_ptr;
1624 u8 __user *data_ptr;
67cdb40c 1625 struct ib_cq *cq;
7182afea
DC
1626 struct ib_wc wc;
1627 int ret;
67cdb40c
RD
1628
1629 if (copy_from_user(&cmd, buf, sizeof cmd))
1630 return -EFAULT;
1631
1ccf6aa1 1632 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
7182afea
DC
1633 if (!cq)
1634 return -EINVAL;
67cdb40c 1635
7182afea
DC
1636 /* we copy a struct ib_uverbs_poll_cq_resp to user space */
1637 header_ptr = (void __user *)(unsigned long) cmd.response;
1638 data_ptr = header_ptr + sizeof resp;
9ead190b 1639
7182afea
DC
1640 memset(&resp, 0, sizeof resp);
1641 while (resp.count < cmd.ne) {
1642 ret = ib_poll_cq(cq, 1, &wc);
1643 if (ret < 0)
1644 goto out_put;
1645 if (!ret)
1646 break;
1647
1648 ret = copy_wc_to_user(data_ptr, &wc);
1649 if (ret)
1650 goto out_put;
1651
1652 data_ptr += sizeof(struct ib_uverbs_wc);
1653 ++resp.count;
67cdb40c
RD
1654 }
1655
7182afea 1656 if (copy_to_user(header_ptr, &resp, sizeof resp)) {
67cdb40c 1657 ret = -EFAULT;
7182afea
DC
1658 goto out_put;
1659 }
67cdb40c 1660
7182afea 1661 ret = in_len;
67cdb40c 1662
7182afea
DC
1663out_put:
1664 put_cq_read(cq);
1665 return ret;
67cdb40c
RD
1666}
1667
1668ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
057aec0d 1669 struct ib_device *ib_dev,
67cdb40c
RD
1670 const char __user *buf, int in_len,
1671 int out_len)
1672{
1673 struct ib_uverbs_req_notify_cq cmd;
1674 struct ib_cq *cq;
67cdb40c
RD
1675
1676 if (copy_from_user(&cmd, buf, sizeof cmd))
1677 return -EFAULT;
1678
1ccf6aa1 1679 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
ab108676 1680 if (!cq)
9ead190b 1681 return -EINVAL;
67cdb40c 1682
9ead190b
RD
1683 ib_req_notify_cq(cq, cmd.solicited_only ?
1684 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
1685
ab108676 1686 put_cq_read(cq);
9ead190b
RD
1687
1688 return in_len;
67cdb40c
RD
1689}
1690
bc38a6ab 1691ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
057aec0d 1692 struct ib_device *ib_dev,
bc38a6ab
RD
1693 const char __user *buf, int in_len,
1694 int out_len)
1695{
63aaf647
RD
1696 struct ib_uverbs_destroy_cq cmd;
1697 struct ib_uverbs_destroy_cq_resp resp;
9ead190b 1698 struct ib_uobject *uobj;
63aaf647 1699 struct ib_cq *cq;
9ead190b 1700 struct ib_ucq_object *obj;
6b73597e 1701 struct ib_uverbs_event_file *ev_file;
63aaf647 1702 int ret = -EINVAL;
bc38a6ab
RD
1703
1704 if (copy_from_user(&cmd, buf, sizeof cmd))
1705 return -EFAULT;
1706
9ead190b
RD
1707 uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext);
1708 if (!uobj)
1709 return -EINVAL;
1710 cq = uobj->object;
1711 ev_file = cq->cq_context;
1712 obj = container_of(cq->uobject, struct ib_ucq_object, uobject);
bc38a6ab 1713
9ead190b
RD
1714 ret = ib_destroy_cq(cq);
1715 if (!ret)
1716 uobj->live = 0;
bc38a6ab 1717
9ead190b 1718 put_uobj_write(uobj);
bc38a6ab 1719
bc38a6ab 1720 if (ret)
9ead190b 1721 return ret;
bc38a6ab 1722
9ead190b 1723 idr_remove_uobj(&ib_uverbs_cq_idr, uobj);
bc38a6ab 1724
95ed644f 1725 mutex_lock(&file->mutex);
9ead190b 1726 list_del(&uobj->list);
95ed644f 1727 mutex_unlock(&file->mutex);
bc38a6ab 1728
9ead190b 1729 ib_uverbs_release_ucq(file, ev_file, obj);
63aaf647 1730
9ead190b
RD
1731 memset(&resp, 0, sizeof resp);
1732 resp.comp_events_reported = obj->comp_events_reported;
1733 resp.async_events_reported = obj->async_events_reported;
63aaf647 1734
9ead190b 1735 put_uobj(uobj);
bc38a6ab 1736
63aaf647
RD
1737 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1738 &resp, sizeof resp))
9ead190b 1739 return -EFAULT;
bc38a6ab 1740
9ead190b 1741 return in_len;
bc38a6ab
RD
1742}
1743
1744ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
057aec0d 1745 struct ib_device *ib_dev,
bc38a6ab
RD
1746 const char __user *buf, int in_len,
1747 int out_len)
1748{
1749 struct ib_uverbs_create_qp cmd;
1750 struct ib_uverbs_create_qp_resp resp;
1751 struct ib_udata udata;
9ead190b 1752 struct ib_uqp_object *obj;
b93f3c18
SH
1753 struct ib_device *device;
1754 struct ib_pd *pd = NULL;
1755 struct ib_xrcd *xrcd = NULL;
1756 struct ib_uobject *uninitialized_var(xrcd_uobj);
1757 struct ib_cq *scq = NULL, *rcq = NULL;
9977f4f6 1758 struct ib_srq *srq = NULL;
bc38a6ab
RD
1759 struct ib_qp *qp;
1760 struct ib_qp_init_attr attr;
1761 int ret;
1762
1763 if (out_len < sizeof resp)
1764 return -ENOSPC;
1765
1766 if (copy_from_user(&cmd, buf, sizeof cmd))
1767 return -EFAULT;
1768
c938a616
OG
1769 if (cmd.qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
1770 return -EPERM;
1771
bc38a6ab
RD
1772 INIT_UDATA(&udata, buf + sizeof cmd,
1773 (unsigned long) cmd.response + sizeof resp,
1774 in_len - sizeof cmd, out_len - sizeof resp);
1775
846be90d 1776 obj = kzalloc(sizeof *obj, GFP_KERNEL);
9ead190b 1777 if (!obj)
bc38a6ab
RD
1778 return -ENOMEM;
1779
3bea57a5 1780 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
9ead190b 1781 down_write(&obj->uevent.uobject.mutex);
bc38a6ab 1782
b93f3c18
SH
1783 if (cmd.qp_type == IB_QPT_XRC_TGT) {
1784 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
1785 if (!xrcd) {
1786 ret = -EINVAL;
1787 goto err_put;
1788 }
1789 device = xrcd->device;
9977f4f6 1790 } else {
b93f3c18
SH
1791 if (cmd.qp_type == IB_QPT_XRC_INI) {
1792 cmd.max_recv_wr = cmd.max_recv_sge = 0;
1793 } else {
1794 if (cmd.is_srq) {
1795 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
1796 if (!srq || srq->srq_type != IB_SRQT_BASIC) {
1797 ret = -EINVAL;
1798 goto err_put;
1799 }
1800 }
5909ce54
RD
1801
1802 if (cmd.recv_cq_handle != cmd.send_cq_handle) {
1803 rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext, 0);
1804 if (!rcq) {
1805 ret = -EINVAL;
1806 goto err_put;
1807 }
9977f4f6
SH
1808 }
1809 }
5909ce54
RD
1810
1811 scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, !!rcq);
1812 rcq = rcq ?: scq;
1813 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1814 if (!pd || !scq) {
1815 ret = -EINVAL;
1816 goto err_put;
1817 }
1818
b93f3c18 1819 device = pd->device;
9977f4f6
SH
1820 }
1821
bc38a6ab
RD
1822 attr.event_handler = ib_uverbs_qp_event_handler;
1823 attr.qp_context = file;
1824 attr.send_cq = scq;
1825 attr.recv_cq = rcq;
f520ba5a 1826 attr.srq = srq;
b93f3c18 1827 attr.xrcd = xrcd;
bc38a6ab
RD
1828 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
1829 attr.qp_type = cmd.qp_type;
b846f25a 1830 attr.create_flags = 0;
bc38a6ab
RD
1831
1832 attr.cap.max_send_wr = cmd.max_send_wr;
1833 attr.cap.max_recv_wr = cmd.max_recv_wr;
1834 attr.cap.max_send_sge = cmd.max_send_sge;
1835 attr.cap.max_recv_sge = cmd.max_recv_sge;
1836 attr.cap.max_inline_data = cmd.max_inline_data;
1837
9ead190b
RD
1838 obj->uevent.events_reported = 0;
1839 INIT_LIST_HEAD(&obj->uevent.event_list);
1840 INIT_LIST_HEAD(&obj->mcast_list);
bc38a6ab 1841
b93f3c18
SH
1842 if (cmd.qp_type == IB_QPT_XRC_TGT)
1843 qp = ib_create_qp(pd, &attr);
1844 else
1845 qp = device->create_qp(pd, &attr, &udata);
1846
bc38a6ab
RD
1847 if (IS_ERR(qp)) {
1848 ret = PTR_ERR(qp);
9ead190b 1849 goto err_put;
bc38a6ab
RD
1850 }
1851
b93f3c18 1852 if (cmd.qp_type != IB_QPT_XRC_TGT) {
0e0ec7e0 1853 qp->real_qp = qp;
b93f3c18
SH
1854 qp->device = device;
1855 qp->pd = pd;
1856 qp->send_cq = attr.send_cq;
1857 qp->recv_cq = attr.recv_cq;
1858 qp->srq = attr.srq;
1859 qp->event_handler = attr.event_handler;
1860 qp->qp_context = attr.qp_context;
1861 qp->qp_type = attr.qp_type;
e47e321a 1862 atomic_set(&qp->usecnt, 0);
b93f3c18
SH
1863 atomic_inc(&pd->usecnt);
1864 atomic_inc(&attr.send_cq->usecnt);
1865 if (attr.recv_cq)
1866 atomic_inc(&attr.recv_cq->usecnt);
1867 if (attr.srq)
1868 atomic_inc(&attr.srq->usecnt);
1869 }
1870 qp->uobject = &obj->uevent.uobject;
bc38a6ab 1871
9ead190b
RD
1872 obj->uevent.uobject.object = qp;
1873 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
bc38a6ab
RD
1874 if (ret)
1875 goto err_destroy;
1876
9ead190b
RD
1877 memset(&resp, 0, sizeof resp);
1878 resp.qpn = qp->qp_num;
1879 resp.qp_handle = obj->uevent.uobject.id;
77369ed3
JM
1880 resp.max_recv_sge = attr.cap.max_recv_sge;
1881 resp.max_send_sge = attr.cap.max_send_sge;
1882 resp.max_recv_wr = attr.cap.max_recv_wr;
1883 resp.max_send_wr = attr.cap.max_send_wr;
1884 resp.max_inline_data = attr.cap.max_inline_data;
bc38a6ab 1885
bc38a6ab
RD
1886 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1887 &resp, sizeof resp)) {
1888 ret = -EFAULT;
9ead190b 1889 goto err_copy;
bc38a6ab
RD
1890 }
1891
846be90d
YH
1892 if (xrcd) {
1893 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
1894 uobject);
1895 atomic_inc(&obj->uxrcd->refcnt);
b93f3c18 1896 put_xrcd_read(xrcd_uobj);
846be90d
YH
1897 }
1898
b93f3c18
SH
1899 if (pd)
1900 put_pd_read(pd);
1901 if (scq)
1902 put_cq_read(scq);
9977f4f6 1903 if (rcq && rcq != scq)
43db2bc0 1904 put_cq_read(rcq);
9ead190b
RD
1905 if (srq)
1906 put_srq_read(srq);
1907
95ed644f 1908 mutex_lock(&file->mutex);
9ead190b 1909 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
95ed644f 1910 mutex_unlock(&file->mutex);
eb9d3cd5 1911
9ead190b
RD
1912 obj->uevent.uobject.live = 1;
1913
1914 up_write(&obj->uevent.uobject.mutex);
bc38a6ab
RD
1915
1916 return in_len;
1917
9ead190b
RD
1918err_copy:
1919 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
bc38a6ab
RD
1920
1921err_destroy:
1922 ib_destroy_qp(qp);
1923
9ead190b 1924err_put:
b93f3c18
SH
1925 if (xrcd)
1926 put_xrcd_read(xrcd_uobj);
9ead190b
RD
1927 if (pd)
1928 put_pd_read(pd);
1929 if (scq)
1930 put_cq_read(scq);
43db2bc0 1931 if (rcq && rcq != scq)
9ead190b
RD
1932 put_cq_read(rcq);
1933 if (srq)
1934 put_srq_read(srq);
1935
1936 put_uobj_write(&obj->uevent.uobject);
bc38a6ab
RD
1937 return ret;
1938}
1939
42849b26 1940ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
057aec0d 1941 struct ib_device *ib_dev,
42849b26
SH
1942 const char __user *buf, int in_len, int out_len)
1943{
1944 struct ib_uverbs_open_qp cmd;
1945 struct ib_uverbs_create_qp_resp resp;
1946 struct ib_udata udata;
1947 struct ib_uqp_object *obj;
1948 struct ib_xrcd *xrcd;
1949 struct ib_uobject *uninitialized_var(xrcd_uobj);
1950 struct ib_qp *qp;
1951 struct ib_qp_open_attr attr;
1952 int ret;
1953
1954 if (out_len < sizeof resp)
1955 return -ENOSPC;
1956
1957 if (copy_from_user(&cmd, buf, sizeof cmd))
1958 return -EFAULT;
1959
1960 INIT_UDATA(&udata, buf + sizeof cmd,
1961 (unsigned long) cmd.response + sizeof resp,
1962 in_len - sizeof cmd, out_len - sizeof resp);
1963
1964 obj = kmalloc(sizeof *obj, GFP_KERNEL);
1965 if (!obj)
1966 return -ENOMEM;
1967
3bea57a5 1968 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
42849b26
SH
1969 down_write(&obj->uevent.uobject.mutex);
1970
1971 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
1972 if (!xrcd) {
1973 ret = -EINVAL;
1974 goto err_put;
1975 }
1976
1977 attr.event_handler = ib_uverbs_qp_event_handler;
1978 attr.qp_context = file;
1979 attr.qp_num = cmd.qpn;
1980 attr.qp_type = cmd.qp_type;
1981
1982 obj->uevent.events_reported = 0;
1983 INIT_LIST_HEAD(&obj->uevent.event_list);
1984 INIT_LIST_HEAD(&obj->mcast_list);
1985
1986 qp = ib_open_qp(xrcd, &attr);
1987 if (IS_ERR(qp)) {
1988 ret = PTR_ERR(qp);
1989 goto err_put;
1990 }
1991
1992 qp->uobject = &obj->uevent.uobject;
1993
1994 obj->uevent.uobject.object = qp;
1995 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1996 if (ret)
1997 goto err_destroy;
1998
1999 memset(&resp, 0, sizeof resp);
2000 resp.qpn = qp->qp_num;
2001 resp.qp_handle = obj->uevent.uobject.id;
2002
2003 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2004 &resp, sizeof resp)) {
2005 ret = -EFAULT;
2006 goto err_remove;
2007 }
2008
846be90d
YH
2009 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
2010 atomic_inc(&obj->uxrcd->refcnt);
42849b26
SH
2011 put_xrcd_read(xrcd_uobj);
2012
2013 mutex_lock(&file->mutex);
2014 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
2015 mutex_unlock(&file->mutex);
2016
2017 obj->uevent.uobject.live = 1;
2018
2019 up_write(&obj->uevent.uobject.mutex);
2020
2021 return in_len;
2022
2023err_remove:
2024 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
2025
2026err_destroy:
2027 ib_destroy_qp(qp);
2028
2029err_put:
2030 put_xrcd_read(xrcd_uobj);
2031 put_uobj_write(&obj->uevent.uobject);
2032 return ret;
2033}
2034
7ccc9a24 2035ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
057aec0d 2036 struct ib_device *ib_dev,
7ccc9a24
DB
2037 const char __user *buf, int in_len,
2038 int out_len)
2039{
2040 struct ib_uverbs_query_qp cmd;
2041 struct ib_uverbs_query_qp_resp resp;
2042 struct ib_qp *qp;
2043 struct ib_qp_attr *attr;
2044 struct ib_qp_init_attr *init_attr;
2045 int ret;
2046
2047 if (copy_from_user(&cmd, buf, sizeof cmd))
2048 return -EFAULT;
2049
2050 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2051 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
2052 if (!attr || !init_attr) {
2053 ret = -ENOMEM;
2054 goto out;
2055 }
2056
9ead190b
RD
2057 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2058 if (!qp) {
7ccc9a24 2059 ret = -EINVAL;
9ead190b
RD
2060 goto out;
2061 }
2062
2063 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
7ccc9a24 2064
9ead190b 2065 put_qp_read(qp);
7ccc9a24
DB
2066
2067 if (ret)
2068 goto out;
2069
2070 memset(&resp, 0, sizeof resp);
2071
2072 resp.qp_state = attr->qp_state;
2073 resp.cur_qp_state = attr->cur_qp_state;
2074 resp.path_mtu = attr->path_mtu;
2075 resp.path_mig_state = attr->path_mig_state;
2076 resp.qkey = attr->qkey;
2077 resp.rq_psn = attr->rq_psn;
2078 resp.sq_psn = attr->sq_psn;
2079 resp.dest_qp_num = attr->dest_qp_num;
2080 resp.qp_access_flags = attr->qp_access_flags;
2081 resp.pkey_index = attr->pkey_index;
2082 resp.alt_pkey_index = attr->alt_pkey_index;
0b26c88f 2083 resp.sq_draining = attr->sq_draining;
7ccc9a24
DB
2084 resp.max_rd_atomic = attr->max_rd_atomic;
2085 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic;
2086 resp.min_rnr_timer = attr->min_rnr_timer;
2087 resp.port_num = attr->port_num;
2088 resp.timeout = attr->timeout;
2089 resp.retry_cnt = attr->retry_cnt;
2090 resp.rnr_retry = attr->rnr_retry;
2091 resp.alt_port_num = attr->alt_port_num;
2092 resp.alt_timeout = attr->alt_timeout;
2093
2094 memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16);
2095 resp.dest.flow_label = attr->ah_attr.grh.flow_label;
2096 resp.dest.sgid_index = attr->ah_attr.grh.sgid_index;
2097 resp.dest.hop_limit = attr->ah_attr.grh.hop_limit;
2098 resp.dest.traffic_class = attr->ah_attr.grh.traffic_class;
2099 resp.dest.dlid = attr->ah_attr.dlid;
2100 resp.dest.sl = attr->ah_attr.sl;
2101 resp.dest.src_path_bits = attr->ah_attr.src_path_bits;
2102 resp.dest.static_rate = attr->ah_attr.static_rate;
2103 resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH);
2104 resp.dest.port_num = attr->ah_attr.port_num;
2105
2106 memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16);
2107 resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label;
2108 resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index;
2109 resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit;
2110 resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class;
2111 resp.alt_dest.dlid = attr->alt_ah_attr.dlid;
2112 resp.alt_dest.sl = attr->alt_ah_attr.sl;
2113 resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits;
2114 resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate;
2115 resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH);
2116 resp.alt_dest.port_num = attr->alt_ah_attr.port_num;
2117
2118 resp.max_send_wr = init_attr->cap.max_send_wr;
2119 resp.max_recv_wr = init_attr->cap.max_recv_wr;
2120 resp.max_send_sge = init_attr->cap.max_send_sge;
2121 resp.max_recv_sge = init_attr->cap.max_recv_sge;
2122 resp.max_inline_data = init_attr->cap.max_inline_data;
27d56300 2123 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
7ccc9a24
DB
2124
2125 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2126 &resp, sizeof resp))
2127 ret = -EFAULT;
2128
2129out:
2130 kfree(attr);
2131 kfree(init_attr);
2132
2133 return ret ? ret : in_len;
2134}
2135
9977f4f6
SH
2136/* Remove ignored fields set in the attribute mask */
2137static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
2138{
2139 switch (qp_type) {
2140 case IB_QPT_XRC_INI:
2141 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
b93f3c18
SH
2142 case IB_QPT_XRC_TGT:
2143 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
2144 IB_QP_RNR_RETRY);
9977f4f6
SH
2145 default:
2146 return mask;
2147 }
2148}
2149
bc38a6ab 2150ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
057aec0d 2151 struct ib_device *ib_dev,
bc38a6ab
RD
2152 const char __user *buf, int in_len,
2153 int out_len)
2154{
2155 struct ib_uverbs_modify_qp cmd;
9bc57e2d 2156 struct ib_udata udata;
bc38a6ab
RD
2157 struct ib_qp *qp;
2158 struct ib_qp_attr *attr;
2159 int ret;
2160
2161 if (copy_from_user(&cmd, buf, sizeof cmd))
2162 return -EFAULT;
2163
9bc57e2d
RC
2164 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
2165 out_len);
2166
bc38a6ab
RD
2167 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2168 if (!attr)
2169 return -ENOMEM;
2170
9ead190b
RD
2171 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2172 if (!qp) {
bc38a6ab
RD
2173 ret = -EINVAL;
2174 goto out;
2175 }
2176
2177 attr->qp_state = cmd.qp_state;
2178 attr->cur_qp_state = cmd.cur_qp_state;
2179 attr->path_mtu = cmd.path_mtu;
2180 attr->path_mig_state = cmd.path_mig_state;
2181 attr->qkey = cmd.qkey;
2182 attr->rq_psn = cmd.rq_psn;
2183 attr->sq_psn = cmd.sq_psn;
2184 attr->dest_qp_num = cmd.dest_qp_num;
2185 attr->qp_access_flags = cmd.qp_access_flags;
2186 attr->pkey_index = cmd.pkey_index;
702b2aac 2187 attr->alt_pkey_index = cmd.alt_pkey_index;
bc38a6ab
RD
2188 attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
2189 attr->max_rd_atomic = cmd.max_rd_atomic;
2190 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic;
2191 attr->min_rnr_timer = cmd.min_rnr_timer;
2192 attr->port_num = cmd.port_num;
2193 attr->timeout = cmd.timeout;
2194 attr->retry_cnt = cmd.retry_cnt;
2195 attr->rnr_retry = cmd.rnr_retry;
2196 attr->alt_port_num = cmd.alt_port_num;
2197 attr->alt_timeout = cmd.alt_timeout;
2198
2199 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
2200 attr->ah_attr.grh.flow_label = cmd.dest.flow_label;
2201 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index;
2202 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit;
2203 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class;
2204 attr->ah_attr.dlid = cmd.dest.dlid;
2205 attr->ah_attr.sl = cmd.dest.sl;
2206 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits;
2207 attr->ah_attr.static_rate = cmd.dest.static_rate;
2208 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0;
2209 attr->ah_attr.port_num = cmd.dest.port_num;
2210
2211 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
2212 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label;
2213 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index;
2214 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit;
2215 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
2216 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid;
2217 attr->alt_ah_attr.sl = cmd.alt_dest.sl;
2218 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits;
2219 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate;
2220 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
2221 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
2222
0e0ec7e0 2223 if (qp->real_qp == qp) {
ed4c54e5
OG
2224 ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask);
2225 if (ret)
0fb8bcf0 2226 goto release_qp;
0e0ec7e0
SH
2227 ret = qp->device->modify_qp(qp, attr,
2228 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
2229 } else {
2230 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
2231 }
9ead190b 2232
bc38a6ab 2233 if (ret)
0fb8bcf0 2234 goto release_qp;
bc38a6ab
RD
2235
2236 ret = in_len;
2237
0fb8bcf0
ML
2238release_qp:
2239 put_qp_read(qp);
2240
bc38a6ab 2241out:
bc38a6ab
RD
2242 kfree(attr);
2243
2244 return ret;
2245}
2246
2247ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
057aec0d 2248 struct ib_device *ib_dev,
bc38a6ab
RD
2249 const char __user *buf, int in_len,
2250 int out_len)
2251{
63aaf647
RD
2252 struct ib_uverbs_destroy_qp cmd;
2253 struct ib_uverbs_destroy_qp_resp resp;
9ead190b 2254 struct ib_uobject *uobj;
63aaf647 2255 struct ib_qp *qp;
9ead190b 2256 struct ib_uqp_object *obj;
63aaf647 2257 int ret = -EINVAL;
bc38a6ab
RD
2258
2259 if (copy_from_user(&cmd, buf, sizeof cmd))
2260 return -EFAULT;
2261
63aaf647
RD
2262 memset(&resp, 0, sizeof resp);
2263
9ead190b
RD
2264 uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext);
2265 if (!uobj)
2266 return -EINVAL;
2267 qp = uobj->object;
2268 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
f4e40156 2269
9ead190b
RD
2270 if (!list_empty(&obj->mcast_list)) {
2271 put_uobj_write(uobj);
2272 return -EBUSY;
f4e40156 2273 }
bc38a6ab
RD
2274
2275 ret = ib_destroy_qp(qp);
9ead190b
RD
2276 if (!ret)
2277 uobj->live = 0;
2278
2279 put_uobj_write(uobj);
2280
bc38a6ab 2281 if (ret)
9ead190b 2282 return ret;
bc38a6ab 2283
846be90d
YH
2284 if (obj->uxrcd)
2285 atomic_dec(&obj->uxrcd->refcnt);
2286
9ead190b 2287 idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
bc38a6ab 2288
95ed644f 2289 mutex_lock(&file->mutex);
9ead190b 2290 list_del(&uobj->list);
95ed644f 2291 mutex_unlock(&file->mutex);
bc38a6ab 2292
9ead190b 2293 ib_uverbs_release_uevent(file, &obj->uevent);
63aaf647 2294
9ead190b 2295 resp.events_reported = obj->uevent.events_reported;
63aaf647 2296
9ead190b 2297 put_uobj(uobj);
bc38a6ab 2298
63aaf647
RD
2299 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2300 &resp, sizeof resp))
9ead190b 2301 return -EFAULT;
bc38a6ab 2302
9ead190b 2303 return in_len;
bc38a6ab
RD
2304}
2305
67cdb40c 2306ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
057aec0d 2307 struct ib_device *ib_dev,
a74cd4af
RD
2308 const char __user *buf, int in_len,
2309 int out_len)
67cdb40c
RD
2310{
2311 struct ib_uverbs_post_send cmd;
2312 struct ib_uverbs_post_send_resp resp;
2313 struct ib_uverbs_send_wr *user_wr;
2314 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr;
2315 struct ib_qp *qp;
2316 int i, sg_ind;
9ead190b 2317 int is_ud;
67cdb40c
RD
2318 ssize_t ret = -EINVAL;
2319
2320 if (copy_from_user(&cmd, buf, sizeof cmd))
2321 return -EFAULT;
2322
2323 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
2324 cmd.sge_count * sizeof (struct ib_uverbs_sge))
2325 return -EINVAL;
2326
2327 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
2328 return -EINVAL;
2329
2330 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
2331 if (!user_wr)
2332 return -ENOMEM;
2333
9ead190b
RD
2334 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2335 if (!qp)
67cdb40c
RD
2336 goto out;
2337
9ead190b 2338 is_ud = qp->qp_type == IB_QPT_UD;
67cdb40c
RD
2339 sg_ind = 0;
2340 last = NULL;
2341 for (i = 0; i < cmd.wr_count; ++i) {
2342 if (copy_from_user(user_wr,
2343 buf + sizeof cmd + i * cmd.wqe_size,
2344 cmd.wqe_size)) {
2345 ret = -EFAULT;
9ead190b 2346 goto out_put;
67cdb40c
RD
2347 }
2348
2349 if (user_wr->num_sge + sg_ind > cmd.sge_count) {
2350 ret = -EINVAL;
9ead190b 2351 goto out_put;
67cdb40c
RD
2352 }
2353
2354 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2355 user_wr->num_sge * sizeof (struct ib_sge),
2356 GFP_KERNEL);
2357 if (!next) {
2358 ret = -ENOMEM;
9ead190b 2359 goto out_put;
67cdb40c
RD
2360 }
2361
2362 if (!last)
2363 wr = next;
2364 else
2365 last->next = next;
2366 last = next;
2367
2368 next->next = NULL;
2369 next->wr_id = user_wr->wr_id;
2370 next->num_sge = user_wr->num_sge;
2371 next->opcode = user_wr->opcode;
2372 next->send_flags = user_wr->send_flags;
67cdb40c 2373
9ead190b 2374 if (is_ud) {
b632ffa7
CH
2375 if (next->opcode != IB_WR_SEND &&
2376 next->opcode != IB_WR_SEND_WITH_IMM) {
2377 ret = -EINVAL;
2378 goto out_put;
2379 }
2380
9ead190b
RD
2381 next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
2382 file->ucontext);
67cdb40c
RD
2383 if (!next->wr.ud.ah) {
2384 ret = -EINVAL;
9ead190b 2385 goto out_put;
67cdb40c
RD
2386 }
2387 next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn;
2388 next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
6b7d103c
LI
2389 if (next->opcode == IB_WR_SEND_WITH_IMM)
2390 next->ex.imm_data =
2391 (__be32 __force) user_wr->ex.imm_data;
67cdb40c
RD
2392 } else {
2393 switch (next->opcode) {
67cdb40c 2394 case IB_WR_RDMA_WRITE_WITH_IMM:
0f39cf3d
RD
2395 next->ex.imm_data =
2396 (__be32 __force) user_wr->ex.imm_data;
2397 case IB_WR_RDMA_WRITE:
67cdb40c
RD
2398 case IB_WR_RDMA_READ:
2399 next->wr.rdma.remote_addr =
2400 user_wr->wr.rdma.remote_addr;
2401 next->wr.rdma.rkey =
2402 user_wr->wr.rdma.rkey;
2403 break;
0f39cf3d
RD
2404 case IB_WR_SEND_WITH_IMM:
2405 next->ex.imm_data =
2406 (__be32 __force) user_wr->ex.imm_data;
2407 break;
2408 case IB_WR_SEND_WITH_INV:
2409 next->ex.invalidate_rkey =
2410 user_wr->ex.invalidate_rkey;
2411 break;
67cdb40c
RD
2412 case IB_WR_ATOMIC_CMP_AND_SWP:
2413 case IB_WR_ATOMIC_FETCH_AND_ADD:
2414 next->wr.atomic.remote_addr =
2415 user_wr->wr.atomic.remote_addr;
2416 next->wr.atomic.compare_add =
2417 user_wr->wr.atomic.compare_add;
2418 next->wr.atomic.swap = user_wr->wr.atomic.swap;
2419 next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
b632ffa7 2420 case IB_WR_SEND:
67cdb40c
RD
2421 break;
2422 default:
b632ffa7
CH
2423 ret = -EINVAL;
2424 goto out_put;
67cdb40c
RD
2425 }
2426 }
2427
2428 if (next->num_sge) {
2429 next->sg_list = (void *) next +
2430 ALIGN(sizeof *next, sizeof (struct ib_sge));
2431 if (copy_from_user(next->sg_list,
2432 buf + sizeof cmd +
2433 cmd.wr_count * cmd.wqe_size +
2434 sg_ind * sizeof (struct ib_sge),
2435 next->num_sge * sizeof (struct ib_sge))) {
2436 ret = -EFAULT;
9ead190b 2437 goto out_put;
67cdb40c
RD
2438 }
2439 sg_ind += next->num_sge;
2440 } else
2441 next->sg_list = NULL;
2442 }
2443
2444 resp.bad_wr = 0;
0e0ec7e0 2445 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr);
67cdb40c
RD
2446 if (ret)
2447 for (next = wr; next; next = next->next) {
2448 ++resp.bad_wr;
2449 if (next == bad_wr)
2450 break;
2451 }
2452
2453 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2454 &resp, sizeof resp))
2455 ret = -EFAULT;
2456
9ead190b
RD
2457out_put:
2458 put_qp_read(qp);
67cdb40c
RD
2459
2460 while (wr) {
9ead190b
RD
2461 if (is_ud && wr->wr.ud.ah)
2462 put_ah_read(wr->wr.ud.ah);
67cdb40c
RD
2463 next = wr->next;
2464 kfree(wr);
2465 wr = next;
2466 }
2467
18320828 2468out:
67cdb40c
RD
2469 kfree(user_wr);
2470
2471 return ret ? ret : in_len;
2472}
2473
2474static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
2475 int in_len,
2476 u32 wr_count,
2477 u32 sge_count,
2478 u32 wqe_size)
2479{
2480 struct ib_uverbs_recv_wr *user_wr;
2481 struct ib_recv_wr *wr = NULL, *last, *next;
2482 int sg_ind;
2483 int i;
2484 int ret;
2485
2486 if (in_len < wqe_size * wr_count +
2487 sge_count * sizeof (struct ib_uverbs_sge))
2488 return ERR_PTR(-EINVAL);
2489
2490 if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
2491 return ERR_PTR(-EINVAL);
2492
2493 user_wr = kmalloc(wqe_size, GFP_KERNEL);
2494 if (!user_wr)
2495 return ERR_PTR(-ENOMEM);
2496
2497 sg_ind = 0;
2498 last = NULL;
2499 for (i = 0; i < wr_count; ++i) {
2500 if (copy_from_user(user_wr, buf + i * wqe_size,
2501 wqe_size)) {
2502 ret = -EFAULT;
2503 goto err;
2504 }
2505
2506 if (user_wr->num_sge + sg_ind > sge_count) {
2507 ret = -EINVAL;
2508 goto err;
2509 }
2510
2511 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2512 user_wr->num_sge * sizeof (struct ib_sge),
2513 GFP_KERNEL);
2514 if (!next) {
2515 ret = -ENOMEM;
2516 goto err;
2517 }
2518
2519 if (!last)
2520 wr = next;
2521 else
2522 last->next = next;
2523 last = next;
2524
2525 next->next = NULL;
2526 next->wr_id = user_wr->wr_id;
2527 next->num_sge = user_wr->num_sge;
2528
2529 if (next->num_sge) {
2530 next->sg_list = (void *) next +
2531 ALIGN(sizeof *next, sizeof (struct ib_sge));
2532 if (copy_from_user(next->sg_list,
2533 buf + wr_count * wqe_size +
2534 sg_ind * sizeof (struct ib_sge),
2535 next->num_sge * sizeof (struct ib_sge))) {
2536 ret = -EFAULT;
2537 goto err;
2538 }
2539 sg_ind += next->num_sge;
2540 } else
2541 next->sg_list = NULL;
2542 }
2543
2544 kfree(user_wr);
2545 return wr;
2546
2547err:
2548 kfree(user_wr);
2549
2550 while (wr) {
2551 next = wr->next;
2552 kfree(wr);
2553 wr = next;
2554 }
2555
2556 return ERR_PTR(ret);
2557}
2558
2559ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
057aec0d 2560 struct ib_device *ib_dev,
a74cd4af
RD
2561 const char __user *buf, int in_len,
2562 int out_len)
67cdb40c
RD
2563{
2564 struct ib_uverbs_post_recv cmd;
2565 struct ib_uverbs_post_recv_resp resp;
2566 struct ib_recv_wr *wr, *next, *bad_wr;
2567 struct ib_qp *qp;
2568 ssize_t ret = -EINVAL;
2569
2570 if (copy_from_user(&cmd, buf, sizeof cmd))
2571 return -EFAULT;
2572
2573 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2574 in_len - sizeof cmd, cmd.wr_count,
2575 cmd.sge_count, cmd.wqe_size);
2576 if (IS_ERR(wr))
2577 return PTR_ERR(wr);
2578
9ead190b
RD
2579 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2580 if (!qp)
67cdb40c
RD
2581 goto out;
2582
2583 resp.bad_wr = 0;
0e0ec7e0 2584 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr);
9ead190b
RD
2585
2586 put_qp_read(qp);
2587
67cdb40c
RD
2588 if (ret)
2589 for (next = wr; next; next = next->next) {
2590 ++resp.bad_wr;
2591 if (next == bad_wr)
2592 break;
2593 }
2594
67cdb40c
RD
2595 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2596 &resp, sizeof resp))
2597 ret = -EFAULT;
2598
2599out:
67cdb40c
RD
2600 while (wr) {
2601 next = wr->next;
2602 kfree(wr);
2603 wr = next;
2604 }
2605
2606 return ret ? ret : in_len;
2607}
2608
2609ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
057aec0d 2610 struct ib_device *ib_dev,
a74cd4af
RD
2611 const char __user *buf, int in_len,
2612 int out_len)
67cdb40c
RD
2613{
2614 struct ib_uverbs_post_srq_recv cmd;
2615 struct ib_uverbs_post_srq_recv_resp resp;
2616 struct ib_recv_wr *wr, *next, *bad_wr;
2617 struct ib_srq *srq;
2618 ssize_t ret = -EINVAL;
2619
2620 if (copy_from_user(&cmd, buf, sizeof cmd))
2621 return -EFAULT;
2622
2623 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2624 in_len - sizeof cmd, cmd.wr_count,
2625 cmd.sge_count, cmd.wqe_size);
2626 if (IS_ERR(wr))
2627 return PTR_ERR(wr);
2628
9ead190b
RD
2629 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
2630 if (!srq)
67cdb40c
RD
2631 goto out;
2632
2633 resp.bad_wr = 0;
2634 ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
9ead190b
RD
2635
2636 put_srq_read(srq);
2637
67cdb40c
RD
2638 if (ret)
2639 for (next = wr; next; next = next->next) {
2640 ++resp.bad_wr;
2641 if (next == bad_wr)
2642 break;
2643 }
2644
67cdb40c
RD
2645 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2646 &resp, sizeof resp))
2647 ret = -EFAULT;
2648
2649out:
67cdb40c
RD
2650 while (wr) {
2651 next = wr->next;
2652 kfree(wr);
2653 wr = next;
2654 }
2655
2656 return ret ? ret : in_len;
2657}
2658
2659ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
057aec0d 2660 struct ib_device *ib_dev,
67cdb40c
RD
2661 const char __user *buf, int in_len,
2662 int out_len)
2663{
2664 struct ib_uverbs_create_ah cmd;
2665 struct ib_uverbs_create_ah_resp resp;
2666 struct ib_uobject *uobj;
2667 struct ib_pd *pd;
2668 struct ib_ah *ah;
2669 struct ib_ah_attr attr;
2670 int ret;
2671
2672 if (out_len < sizeof resp)
2673 return -ENOSPC;
2674
2675 if (copy_from_user(&cmd, buf, sizeof cmd))
2676 return -EFAULT;
2677
2678 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
2679 if (!uobj)
2680 return -ENOMEM;
2681
3bea57a5 2682 init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class);
9ead190b 2683 down_write(&uobj->mutex);
67cdb40c 2684
9ead190b
RD
2685 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
2686 if (!pd) {
67cdb40c 2687 ret = -EINVAL;
9ead190b 2688 goto err;
67cdb40c
RD
2689 }
2690
67cdb40c
RD
2691 attr.dlid = cmd.attr.dlid;
2692 attr.sl = cmd.attr.sl;
2693 attr.src_path_bits = cmd.attr.src_path_bits;
2694 attr.static_rate = cmd.attr.static_rate;
ea5d4a6a 2695 attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0;
67cdb40c
RD
2696 attr.port_num = cmd.attr.port_num;
2697 attr.grh.flow_label = cmd.attr.grh.flow_label;
2698 attr.grh.sgid_index = cmd.attr.grh.sgid_index;
2699 attr.grh.hop_limit = cmd.attr.grh.hop_limit;
2700 attr.grh.traffic_class = cmd.attr.grh.traffic_class;
8b0f93d9
DS
2701 attr.vlan_id = 0;
2702 memset(&attr.dmac, 0, sizeof(attr.dmac));
67cdb40c
RD
2703 memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);
2704
2705 ah = ib_create_ah(pd, &attr);
2706 if (IS_ERR(ah)) {
2707 ret = PTR_ERR(ah);
ec924b47 2708 goto err_put;
67cdb40c
RD
2709 }
2710
9ead190b
RD
2711 ah->uobject = uobj;
2712 uobj->object = ah;
67cdb40c 2713
9ead190b 2714 ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj);
67cdb40c
RD
2715 if (ret)
2716 goto err_destroy;
2717
2718 resp.ah_handle = uobj->id;
2719
2720 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2721 &resp, sizeof resp)) {
2722 ret = -EFAULT;
9ead190b 2723 goto err_copy;
67cdb40c
RD
2724 }
2725
9ead190b
RD
2726 put_pd_read(pd);
2727
95ed644f 2728 mutex_lock(&file->mutex);
67cdb40c 2729 list_add_tail(&uobj->list, &file->ucontext->ah_list);
95ed644f 2730 mutex_unlock(&file->mutex);
67cdb40c 2731
9ead190b
RD
2732 uobj->live = 1;
2733
2734 up_write(&uobj->mutex);
67cdb40c
RD
2735
2736 return in_len;
2737
9ead190b
RD
2738err_copy:
2739 idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
67cdb40c
RD
2740
2741err_destroy:
2742 ib_destroy_ah(ah);
2743
ec924b47
MT
2744err_put:
2745 put_pd_read(pd);
2746
9ead190b
RD
2747err:
2748 put_uobj_write(uobj);
67cdb40c
RD
2749 return ret;
2750}
2751
2752ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
057aec0d 2753 struct ib_device *ib_dev,
67cdb40c
RD
2754 const char __user *buf, int in_len, int out_len)
2755{
2756 struct ib_uverbs_destroy_ah cmd;
2757 struct ib_ah *ah;
2758 struct ib_uobject *uobj;
9ead190b 2759 int ret;
67cdb40c
RD
2760
2761 if (copy_from_user(&cmd, buf, sizeof cmd))
2762 return -EFAULT;
2763
9ead190b
RD
2764 uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext);
2765 if (!uobj)
2766 return -EINVAL;
2767 ah = uobj->object;
67cdb40c 2768
9ead190b
RD
2769 ret = ib_destroy_ah(ah);
2770 if (!ret)
2771 uobj->live = 0;
67cdb40c 2772
9ead190b 2773 put_uobj_write(uobj);
67cdb40c 2774
67cdb40c 2775 if (ret)
9ead190b 2776 return ret;
67cdb40c 2777
9ead190b 2778 idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
67cdb40c 2779
95ed644f 2780 mutex_lock(&file->mutex);
67cdb40c 2781 list_del(&uobj->list);
95ed644f 2782 mutex_unlock(&file->mutex);
67cdb40c 2783
9ead190b 2784 put_uobj(uobj);
67cdb40c 2785
9ead190b 2786 return in_len;
67cdb40c
RD
2787}
2788
bc38a6ab 2789ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
057aec0d 2790 struct ib_device *ib_dev,
bc38a6ab
RD
2791 const char __user *buf, int in_len,
2792 int out_len)
2793{
2794 struct ib_uverbs_attach_mcast cmd;
2795 struct ib_qp *qp;
9ead190b 2796 struct ib_uqp_object *obj;
f4e40156 2797 struct ib_uverbs_mcast_entry *mcast;
9ead190b 2798 int ret;
bc38a6ab
RD
2799
2800 if (copy_from_user(&cmd, buf, sizeof cmd))
2801 return -EFAULT;
2802
e214a0fe 2803 qp = idr_write_qp(cmd.qp_handle, file->ucontext);
9ead190b
RD
2804 if (!qp)
2805 return -EINVAL;
f4e40156 2806
9ead190b 2807 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
f4e40156 2808
9ead190b 2809 list_for_each_entry(mcast, &obj->mcast_list, list)
f4e40156
JM
2810 if (cmd.mlid == mcast->lid &&
2811 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2812 ret = 0;
9ead190b 2813 goto out_put;
f4e40156
JM
2814 }
2815
2816 mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
2817 if (!mcast) {
2818 ret = -ENOMEM;
9ead190b 2819 goto out_put;
f4e40156
JM
2820 }
2821
2822 mcast->lid = cmd.mlid;
2823 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
bc38a6ab 2824
f4e40156 2825 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
9ead190b
RD
2826 if (!ret)
2827 list_add_tail(&mcast->list, &obj->mcast_list);
2828 else
f4e40156
JM
2829 kfree(mcast);
2830
9ead190b 2831out_put:
e214a0fe 2832 put_qp_write(qp);
bc38a6ab
RD
2833
2834 return ret ? ret : in_len;
2835}
2836
2837ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
057aec0d 2838 struct ib_device *ib_dev,
bc38a6ab
RD
2839 const char __user *buf, int in_len,
2840 int out_len)
2841{
2842 struct ib_uverbs_detach_mcast cmd;
9ead190b 2843 struct ib_uqp_object *obj;
bc38a6ab 2844 struct ib_qp *qp;
f4e40156 2845 struct ib_uverbs_mcast_entry *mcast;
bc38a6ab
RD
2846 int ret = -EINVAL;
2847
2848 if (copy_from_user(&cmd, buf, sizeof cmd))
2849 return -EFAULT;
2850
e214a0fe 2851 qp = idr_write_qp(cmd.qp_handle, file->ucontext);
9ead190b
RD
2852 if (!qp)
2853 return -EINVAL;
bc38a6ab 2854
f4e40156
JM
2855 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
2856 if (ret)
9ead190b 2857 goto out_put;
f4e40156 2858
9ead190b 2859 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
f4e40156 2860
9ead190b 2861 list_for_each_entry(mcast, &obj->mcast_list, list)
f4e40156
JM
2862 if (cmd.mlid == mcast->lid &&
2863 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2864 list_del(&mcast->list);
2865 kfree(mcast);
2866 break;
2867 }
2868
9ead190b 2869out_put:
e214a0fe 2870 put_qp_write(qp);
bc38a6ab
RD
2871
2872 return ret ? ret : in_len;
2873}
f520ba5a 2874
b68c9560 2875static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
436f2ad0
HHZ
2876 union ib_flow_spec *ib_spec)
2877{
c780d82a
YD
2878 if (kern_spec->reserved)
2879 return -EINVAL;
2880
436f2ad0
HHZ
2881 ib_spec->type = kern_spec->type;
2882
2883 switch (ib_spec->type) {
2884 case IB_FLOW_SPEC_ETH:
2885 ib_spec->eth.size = sizeof(struct ib_flow_spec_eth);
2886 if (ib_spec->eth.size != kern_spec->eth.size)
2887 return -EINVAL;
2888 memcpy(&ib_spec->eth.val, &kern_spec->eth.val,
2889 sizeof(struct ib_flow_eth_filter));
2890 memcpy(&ib_spec->eth.mask, &kern_spec->eth.mask,
2891 sizeof(struct ib_flow_eth_filter));
2892 break;
2893 case IB_FLOW_SPEC_IPV4:
2894 ib_spec->ipv4.size = sizeof(struct ib_flow_spec_ipv4);
2895 if (ib_spec->ipv4.size != kern_spec->ipv4.size)
2896 return -EINVAL;
2897 memcpy(&ib_spec->ipv4.val, &kern_spec->ipv4.val,
2898 sizeof(struct ib_flow_ipv4_filter));
2899 memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask,
2900 sizeof(struct ib_flow_ipv4_filter));
2901 break;
2902 case IB_FLOW_SPEC_TCP:
2903 case IB_FLOW_SPEC_UDP:
2904 ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp);
2905 if (ib_spec->tcp_udp.size != kern_spec->tcp_udp.size)
2906 return -EINVAL;
2907 memcpy(&ib_spec->tcp_udp.val, &kern_spec->tcp_udp.val,
2908 sizeof(struct ib_flow_tcp_udp_filter));
2909 memcpy(&ib_spec->tcp_udp.mask, &kern_spec->tcp_udp.mask,
2910 sizeof(struct ib_flow_tcp_udp_filter));
2911 break;
2912 default:
2913 return -EINVAL;
2914 }
2915 return 0;
2916}
2917
f21519b2 2918int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
057aec0d 2919 struct ib_device *ib_dev,
f21519b2
YD
2920 struct ib_udata *ucore,
2921 struct ib_udata *uhw)
436f2ad0
HHZ
2922{
2923 struct ib_uverbs_create_flow cmd;
2924 struct ib_uverbs_create_flow_resp resp;
2925 struct ib_uobject *uobj;
2926 struct ib_flow *flow_id;
d82693da 2927 struct ib_uverbs_flow_attr *kern_flow_attr;
436f2ad0
HHZ
2928 struct ib_flow_attr *flow_attr;
2929 struct ib_qp *qp;
2930 int err = 0;
2931 void *kern_spec;
2932 void *ib_spec;
2933 int i;
436f2ad0 2934
6bcca3d4
YD
2935 if (ucore->inlen < sizeof(cmd))
2936 return -EINVAL;
2937
f21519b2 2938 if (ucore->outlen < sizeof(resp))
436f2ad0
HHZ
2939 return -ENOSPC;
2940
f21519b2
YD
2941 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
2942 if (err)
2943 return err;
2944
2945 ucore->inbuf += sizeof(cmd);
2946 ucore->inlen -= sizeof(cmd);
436f2ad0 2947
22878dbc
MB
2948 if (cmd.comp_mask)
2949 return -EINVAL;
2950
436f2ad0
HHZ
2951 if ((cmd.flow_attr.type == IB_FLOW_ATTR_SNIFFER &&
2952 !capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW))
2953 return -EPERM;
2954
f8848274 2955 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
22878dbc
MB
2956 return -EINVAL;
2957
f21519b2 2958 if (cmd.flow_attr.size > ucore->inlen ||
f8848274 2959 cmd.flow_attr.size >
b68c9560 2960 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
22878dbc
MB
2961 return -EINVAL;
2962
c780d82a
YD
2963 if (cmd.flow_attr.reserved[0] ||
2964 cmd.flow_attr.reserved[1])
2965 return -EINVAL;
2966
436f2ad0 2967 if (cmd.flow_attr.num_of_specs) {
f8848274
MB
2968 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
2969 GFP_KERNEL);
436f2ad0
HHZ
2970 if (!kern_flow_attr)
2971 return -ENOMEM;
2972
2973 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
f21519b2
YD
2974 err = ib_copy_from_udata(kern_flow_attr + 1, ucore,
2975 cmd.flow_attr.size);
2976 if (err)
436f2ad0 2977 goto err_free_attr;
436f2ad0
HHZ
2978 } else {
2979 kern_flow_attr = &cmd.flow_attr;
436f2ad0
HHZ
2980 }
2981
2982 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
2983 if (!uobj) {
2984 err = -ENOMEM;
2985 goto err_free_attr;
2986 }
2987 init_uobj(uobj, 0, file->ucontext, &rule_lock_class);
2988 down_write(&uobj->mutex);
2989
2990 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2991 if (!qp) {
2992 err = -EINVAL;
2993 goto err_uobj;
2994 }
2995
f8848274 2996 flow_attr = kmalloc(sizeof(*flow_attr) + cmd.flow_attr.size, GFP_KERNEL);
436f2ad0
HHZ
2997 if (!flow_attr) {
2998 err = -ENOMEM;
2999 goto err_put;
3000 }
3001
3002 flow_attr->type = kern_flow_attr->type;
3003 flow_attr->priority = kern_flow_attr->priority;
3004 flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
3005 flow_attr->port = kern_flow_attr->port;
3006 flow_attr->flags = kern_flow_attr->flags;
3007 flow_attr->size = sizeof(*flow_attr);
3008
3009 kern_spec = kern_flow_attr + 1;
3010 ib_spec = flow_attr + 1;
f8848274 3011 for (i = 0; i < flow_attr->num_of_specs &&
b68c9560 3012 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) &&
f8848274 3013 cmd.flow_attr.size >=
b68c9560 3014 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) {
436f2ad0
HHZ
3015 err = kern_spec_to_ib_spec(kern_spec, ib_spec);
3016 if (err)
3017 goto err_free;
3018 flow_attr->size +=
3019 ((union ib_flow_spec *) ib_spec)->size;
b68c9560
YD
3020 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size;
3021 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size;
436f2ad0
HHZ
3022 ib_spec += ((union ib_flow_spec *) ib_spec)->size;
3023 }
f8848274
MB
3024 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
3025 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
3026 i, cmd.flow_attr.size);
98a37510 3027 err = -EINVAL;
436f2ad0
HHZ
3028 goto err_free;
3029 }
3030 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
3031 if (IS_ERR(flow_id)) {
3032 err = PTR_ERR(flow_id);
3033 goto err_free;
3034 }
3035 flow_id->qp = qp;
3036 flow_id->uobject = uobj;
3037 uobj->object = flow_id;
3038
3039 err = idr_add_uobj(&ib_uverbs_rule_idr, uobj);
3040 if (err)
3041 goto destroy_flow;
3042
3043 memset(&resp, 0, sizeof(resp));
3044 resp.flow_handle = uobj->id;
3045
f21519b2
YD
3046 err = ib_copy_to_udata(ucore,
3047 &resp, sizeof(resp));
3048 if (err)
436f2ad0 3049 goto err_copy;
436f2ad0
HHZ
3050
3051 put_qp_read(qp);
3052 mutex_lock(&file->mutex);
3053 list_add_tail(&uobj->list, &file->ucontext->rule_list);
3054 mutex_unlock(&file->mutex);
3055
3056 uobj->live = 1;
3057
3058 up_write(&uobj->mutex);
3059 kfree(flow_attr);
3060 if (cmd.flow_attr.num_of_specs)
3061 kfree(kern_flow_attr);
f21519b2 3062 return 0;
436f2ad0
HHZ
3063err_copy:
3064 idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
3065destroy_flow:
3066 ib_destroy_flow(flow_id);
3067err_free:
3068 kfree(flow_attr);
3069err_put:
3070 put_qp_read(qp);
3071err_uobj:
3072 put_uobj_write(uobj);
3073err_free_attr:
3074 if (cmd.flow_attr.num_of_specs)
3075 kfree(kern_flow_attr);
3076 return err;
3077}
3078
f21519b2 3079int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
057aec0d 3080 struct ib_device *ib_dev,
f21519b2
YD
3081 struct ib_udata *ucore,
3082 struct ib_udata *uhw)
3083{
436f2ad0
HHZ
3084 struct ib_uverbs_destroy_flow cmd;
3085 struct ib_flow *flow_id;
3086 struct ib_uobject *uobj;
3087 int ret;
3088
6bcca3d4
YD
3089 if (ucore->inlen < sizeof(cmd))
3090 return -EINVAL;
3091
f21519b2
YD
3092 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3093 if (ret)
3094 return ret;
436f2ad0 3095
2782c2d3
YD
3096 if (cmd.comp_mask)
3097 return -EINVAL;
3098
436f2ad0
HHZ
3099 uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle,
3100 file->ucontext);
3101 if (!uobj)
3102 return -EINVAL;
3103 flow_id = uobj->object;
3104
3105 ret = ib_destroy_flow(flow_id);
3106 if (!ret)
3107 uobj->live = 0;
3108
3109 put_uobj_write(uobj);
3110
3111 idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
3112
3113 mutex_lock(&file->mutex);
3114 list_del(&uobj->list);
3115 mutex_unlock(&file->mutex);
3116
3117 put_uobj(uobj);
3118
f21519b2 3119 return ret;
436f2ad0
HHZ
3120}
3121
c89d1bed 3122static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
057aec0d 3123 struct ib_device *ib_dev,
c89d1bed
SH
3124 struct ib_uverbs_create_xsrq *cmd,
3125 struct ib_udata *udata)
f520ba5a 3126{
f520ba5a 3127 struct ib_uverbs_create_srq_resp resp;
8541f8de 3128 struct ib_usrq_object *obj;
f520ba5a
RD
3129 struct ib_pd *pd;
3130 struct ib_srq *srq;
8541f8de 3131 struct ib_uobject *uninitialized_var(xrcd_uobj);
f520ba5a
RD
3132 struct ib_srq_init_attr attr;
3133 int ret;
3134
9ead190b
RD
3135 obj = kmalloc(sizeof *obj, GFP_KERNEL);
3136 if (!obj)
f520ba5a
RD
3137 return -ENOMEM;
3138
3bea57a5 3139 init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class);
8541f8de 3140 down_write(&obj->uevent.uobject.mutex);
f520ba5a 3141
8541f8de 3142 if (cmd->srq_type == IB_SRQT_XRC) {
8541f8de
SH
3143 attr.ext.xrc.xrcd = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj);
3144 if (!attr.ext.xrc.xrcd) {
3145 ret = -EINVAL;
5909ce54 3146 goto err;
8541f8de
SH
3147 }
3148
3149 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
3150 atomic_inc(&obj->uxrcd->refcnt);
5909ce54
RD
3151
3152 attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0);
3153 if (!attr.ext.xrc.cq) {
3154 ret = -EINVAL;
3155 goto err_put_xrcd;
3156 }
3157 }
3158
3159 pd = idr_read_pd(cmd->pd_handle, file->ucontext);
3160 if (!pd) {
3161 ret = -EINVAL;
3162 goto err_put_cq;
8541f8de
SH
3163 }
3164
f520ba5a
RD
3165 attr.event_handler = ib_uverbs_srq_event_handler;
3166 attr.srq_context = file;
8541f8de
SH
3167 attr.srq_type = cmd->srq_type;
3168 attr.attr.max_wr = cmd->max_wr;
3169 attr.attr.max_sge = cmd->max_sge;
3170 attr.attr.srq_limit = cmd->srq_limit;
f520ba5a 3171
8541f8de
SH
3172 obj->uevent.events_reported = 0;
3173 INIT_LIST_HEAD(&obj->uevent.event_list);
f520ba5a 3174
8541f8de 3175 srq = pd->device->create_srq(pd, &attr, udata);
f520ba5a
RD
3176 if (IS_ERR(srq)) {
3177 ret = PTR_ERR(srq);
ec924b47 3178 goto err_put;
f520ba5a
RD
3179 }
3180
8541f8de
SH
3181 srq->device = pd->device;
3182 srq->pd = pd;
3183 srq->srq_type = cmd->srq_type;
3184 srq->uobject = &obj->uevent.uobject;
f520ba5a
RD
3185 srq->event_handler = attr.event_handler;
3186 srq->srq_context = attr.srq_context;
8541f8de
SH
3187
3188 if (cmd->srq_type == IB_SRQT_XRC) {
3189 srq->ext.xrc.cq = attr.ext.xrc.cq;
3190 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
3191 atomic_inc(&attr.ext.xrc.cq->usecnt);
3192 atomic_inc(&attr.ext.xrc.xrcd->usecnt);
3193 }
3194
f520ba5a
RD
3195 atomic_inc(&pd->usecnt);
3196 atomic_set(&srq->usecnt, 0);
3197
8541f8de
SH
3198 obj->uevent.uobject.object = srq;
3199 ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
f520ba5a
RD
3200 if (ret)
3201 goto err_destroy;
3202
9ead190b 3203 memset(&resp, 0, sizeof resp);
8541f8de 3204 resp.srq_handle = obj->uevent.uobject.id;
ea88fd16
DB
3205 resp.max_wr = attr.attr.max_wr;
3206 resp.max_sge = attr.attr.max_sge;
8541f8de
SH
3207 if (cmd->srq_type == IB_SRQT_XRC)
3208 resp.srqn = srq->ext.xrc.srq_num;
f520ba5a 3209
8541f8de 3210 if (copy_to_user((void __user *) (unsigned long) cmd->response,
f520ba5a
RD
3211 &resp, sizeof resp)) {
3212 ret = -EFAULT;
9ead190b 3213 goto err_copy;
f520ba5a
RD
3214 }
3215
8541f8de
SH
3216 if (cmd->srq_type == IB_SRQT_XRC) {
3217 put_uobj_read(xrcd_uobj);
3218 put_cq_read(attr.ext.xrc.cq);
3219 }
9ead190b
RD
3220 put_pd_read(pd);
3221
95ed644f 3222 mutex_lock(&file->mutex);
8541f8de 3223 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list);
95ed644f 3224 mutex_unlock(&file->mutex);
eb9d3cd5 3225
8541f8de 3226 obj->uevent.uobject.live = 1;
9ead190b 3227
8541f8de 3228 up_write(&obj->uevent.uobject.mutex);
f520ba5a 3229
8541f8de 3230 return 0;
f520ba5a 3231
9ead190b 3232err_copy:
8541f8de 3233 idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
f520ba5a
RD
3234
3235err_destroy:
3236 ib_destroy_srq(srq);
3237
ec924b47 3238err_put:
5909ce54 3239 put_pd_read(pd);
8541f8de
SH
3240
3241err_put_cq:
3242 if (cmd->srq_type == IB_SRQT_XRC)
3243 put_cq_read(attr.ext.xrc.cq);
3244
5909ce54
RD
3245err_put_xrcd:
3246 if (cmd->srq_type == IB_SRQT_XRC) {
3247 atomic_dec(&obj->uxrcd->refcnt);
3248 put_uobj_read(xrcd_uobj);
3249 }
ec924b47 3250
9ead190b 3251err:
8541f8de 3252 put_uobj_write(&obj->uevent.uobject);
f520ba5a
RD
3253 return ret;
3254}
3255
8541f8de 3256ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
057aec0d 3257 struct ib_device *ib_dev,
8541f8de
SH
3258 const char __user *buf, int in_len,
3259 int out_len)
3260{
3261 struct ib_uverbs_create_srq cmd;
3262 struct ib_uverbs_create_xsrq xcmd;
3263 struct ib_uverbs_create_srq_resp resp;
3264 struct ib_udata udata;
3265 int ret;
3266
3267 if (out_len < sizeof resp)
3268 return -ENOSPC;
3269
3270 if (copy_from_user(&cmd, buf, sizeof cmd))
3271 return -EFAULT;
3272
3273 xcmd.response = cmd.response;
3274 xcmd.user_handle = cmd.user_handle;
3275 xcmd.srq_type = IB_SRQT_BASIC;
3276 xcmd.pd_handle = cmd.pd_handle;
3277 xcmd.max_wr = cmd.max_wr;
3278 xcmd.max_sge = cmd.max_sge;
3279 xcmd.srq_limit = cmd.srq_limit;
3280
3281 INIT_UDATA(&udata, buf + sizeof cmd,
3282 (unsigned long) cmd.response + sizeof resp,
3283 in_len - sizeof cmd, out_len - sizeof resp);
3284
057aec0d 3285 ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata);
8541f8de
SH
3286 if (ret)
3287 return ret;
3288
3289 return in_len;
3290}
3291
3292ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
057aec0d 3293 struct ib_device *ib_dev,
8541f8de
SH
3294 const char __user *buf, int in_len, int out_len)
3295{
3296 struct ib_uverbs_create_xsrq cmd;
3297 struct ib_uverbs_create_srq_resp resp;
3298 struct ib_udata udata;
3299 int ret;
3300
3301 if (out_len < sizeof resp)
3302 return -ENOSPC;
3303
3304 if (copy_from_user(&cmd, buf, sizeof cmd))
3305 return -EFAULT;
3306
3307 INIT_UDATA(&udata, buf + sizeof cmd,
3308 (unsigned long) cmd.response + sizeof resp,
3309 in_len - sizeof cmd, out_len - sizeof resp);
3310
057aec0d 3311 ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata);
8541f8de
SH
3312 if (ret)
3313 return ret;
3314
3315 return in_len;
3316}
3317
f520ba5a 3318ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
057aec0d 3319 struct ib_device *ib_dev,
f520ba5a
RD
3320 const char __user *buf, int in_len,
3321 int out_len)
3322{
3323 struct ib_uverbs_modify_srq cmd;
9bc57e2d 3324 struct ib_udata udata;
f520ba5a
RD
3325 struct ib_srq *srq;
3326 struct ib_srq_attr attr;
3327 int ret;
3328
3329 if (copy_from_user(&cmd, buf, sizeof cmd))
3330 return -EFAULT;
3331
9bc57e2d
RC
3332 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
3333 out_len);
3334
9ead190b
RD
3335 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
3336 if (!srq)
3337 return -EINVAL;
f520ba5a
RD
3338
3339 attr.max_wr = cmd.max_wr;
f520ba5a
RD
3340 attr.srq_limit = cmd.srq_limit;
3341
9bc57e2d 3342 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
f520ba5a 3343
9ead190b 3344 put_srq_read(srq);
f520ba5a
RD
3345
3346 return ret ? ret : in_len;
3347}
3348
8bdb0e86 3349ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
057aec0d 3350 struct ib_device *ib_dev,
8bdb0e86
DB
3351 const char __user *buf,
3352 int in_len, int out_len)
3353{
3354 struct ib_uverbs_query_srq cmd;
3355 struct ib_uverbs_query_srq_resp resp;
3356 struct ib_srq_attr attr;
3357 struct ib_srq *srq;
3358 int ret;
3359
3360 if (out_len < sizeof resp)
3361 return -ENOSPC;
3362
3363 if (copy_from_user(&cmd, buf, sizeof cmd))
3364 return -EFAULT;
3365
9ead190b
RD
3366 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
3367 if (!srq)
3368 return -EINVAL;
8bdb0e86 3369
9ead190b 3370 ret = ib_query_srq(srq, &attr);
8bdb0e86 3371
9ead190b 3372 put_srq_read(srq);
8bdb0e86
DB
3373
3374 if (ret)
9ead190b 3375 return ret;
8bdb0e86
DB
3376
3377 memset(&resp, 0, sizeof resp);
3378
3379 resp.max_wr = attr.max_wr;
3380 resp.max_sge = attr.max_sge;
3381 resp.srq_limit = attr.srq_limit;
3382
3383 if (copy_to_user((void __user *) (unsigned long) cmd.response,
3384 &resp, sizeof resp))
9ead190b 3385 return -EFAULT;
8bdb0e86 3386
9ead190b 3387 return in_len;
8bdb0e86
DB
3388}
3389
f520ba5a 3390ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
057aec0d 3391 struct ib_device *ib_dev,
f520ba5a
RD
3392 const char __user *buf, int in_len,
3393 int out_len)
3394{
63aaf647
RD
3395 struct ib_uverbs_destroy_srq cmd;
3396 struct ib_uverbs_destroy_srq_resp resp;
9ead190b 3397 struct ib_uobject *uobj;
63aaf647 3398 struct ib_srq *srq;
9ead190b 3399 struct ib_uevent_object *obj;
63aaf647 3400 int ret = -EINVAL;
846be90d
YH
3401 struct ib_usrq_object *us;
3402 enum ib_srq_type srq_type;
f520ba5a
RD
3403
3404 if (copy_from_user(&cmd, buf, sizeof cmd))
3405 return -EFAULT;
3406
9ead190b
RD
3407 uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext);
3408 if (!uobj)
3409 return -EINVAL;
3410 srq = uobj->object;
3411 obj = container_of(uobj, struct ib_uevent_object, uobject);
846be90d 3412 srq_type = srq->srq_type;
63aaf647 3413
9ead190b
RD
3414 ret = ib_destroy_srq(srq);
3415 if (!ret)
3416 uobj->live = 0;
f520ba5a 3417
9ead190b 3418 put_uobj_write(uobj);
f520ba5a 3419
f520ba5a 3420 if (ret)
9ead190b 3421 return ret;
f520ba5a 3422
846be90d
YH
3423 if (srq_type == IB_SRQT_XRC) {
3424 us = container_of(obj, struct ib_usrq_object, uevent);
3425 atomic_dec(&us->uxrcd->refcnt);
3426 }
3427
9ead190b 3428 idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
f520ba5a 3429
95ed644f 3430 mutex_lock(&file->mutex);
9ead190b 3431 list_del(&uobj->list);
95ed644f 3432 mutex_unlock(&file->mutex);
f520ba5a 3433
9ead190b 3434 ib_uverbs_release_uevent(file, obj);
63aaf647 3435
9ead190b
RD
3436 memset(&resp, 0, sizeof resp);
3437 resp.events_reported = obj->events_reported;
63aaf647 3438
9ead190b 3439 put_uobj(uobj);
f520ba5a 3440
63aaf647
RD
3441 if (copy_to_user((void __user *) (unsigned long) cmd.response,
3442 &resp, sizeof resp))
3443 ret = -EFAULT;
3444
f520ba5a
RD
3445 return ret ? ret : in_len;
3446}
02d1aa7a
EC
3447
3448int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
057aec0d 3449 struct ib_device *ib_dev,
02d1aa7a
EC
3450 struct ib_udata *ucore,
3451 struct ib_udata *uhw)
3452{
3453 struct ib_uverbs_ex_query_device_resp resp;
3454 struct ib_uverbs_ex_query_device cmd;
3455 struct ib_device_attr attr;
02d1aa7a
EC
3456 int err;
3457
02d1aa7a
EC
3458 if (ucore->inlen < sizeof(cmd))
3459 return -EINVAL;
3460
3461 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3462 if (err)
3463 return err;
3464
3465 if (cmd.comp_mask)
3466 return -EINVAL;
3467
3468 if (cmd.reserved)
3469 return -EINVAL;
3470
f4056bfd 3471 resp.response_length = offsetof(typeof(resp), odp_caps);
02d1aa7a
EC
3472
3473 if (ucore->outlen < resp.response_length)
3474 return -ENOSPC;
3475
24306dc6
MB
3476 memset(&attr, 0, sizeof(attr));
3477
057aec0d 3478 err = ib_dev->query_device(ib_dev, &attr, uhw);
02d1aa7a
EC
3479 if (err)
3480 return err;
3481
057aec0d 3482 copy_query_dev_fields(file, ib_dev, &resp.base, &attr);
02d1aa7a
EC
3483 resp.comp_mask = 0;
3484
f4056bfd
HE
3485 if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
3486 goto end;
3487
3488#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3489 resp.odp_caps.general_caps = attr.odp_caps.general_caps;
3490 resp.odp_caps.per_transport_caps.rc_odp_caps =
3491 attr.odp_caps.per_transport_caps.rc_odp_caps;
3492 resp.odp_caps.per_transport_caps.uc_odp_caps =
3493 attr.odp_caps.per_transport_caps.uc_odp_caps;
3494 resp.odp_caps.per_transport_caps.ud_odp_caps =
3495 attr.odp_caps.per_transport_caps.ud_odp_caps;
3496 resp.odp_caps.reserved = 0;
3497#else
3498 memset(&resp.odp_caps, 0, sizeof(resp.odp_caps));
3499#endif
3500 resp.response_length += sizeof(resp.odp_caps);
3501
24306dc6
MB
3502 if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask))
3503 goto end;
3504
3505 resp.timestamp_mask = attr.timestamp_mask;
3506 resp.response_length += sizeof(resp.timestamp_mask);
3507
3508 if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock))
3509 goto end;
3510
3511 resp.hca_core_clock = attr.hca_core_clock;
3512 resp.response_length += sizeof(resp.hca_core_clock);
3513
f4056bfd 3514end:
02d1aa7a
EC
3515 err = ib_copy_to_udata(ucore, &resp, resp.response_length);
3516 if (err)
3517 return err;
3518
3519 return 0;
3520}
This page took 0.893595 seconds and 5 git commands to generate.