Merge branch 'kconfig' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild
[deliverable/linux.git] / drivers / infiniband / core / ucma.c
CommitLineData
75216638
SH
1/*
2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/completion.h>
88314e4d 34#include <linux/file.h>
75216638
SH
35#include <linux/mutex.h>
36#include <linux/poll.h>
d43c36dc 37#include <linux/sched.h>
75216638
SH
38#include <linux/idr.h>
39#include <linux/in.h>
40#include <linux/in6.h>
41#include <linux/miscdevice.h>
5a0e3ad6 42#include <linux/slab.h>
97cb7e40 43#include <linux/sysctl.h>
e4dd23d7 44#include <linux/module.h>
75216638
SH
45
46#include <rdma/rdma_user_cm.h>
47#include <rdma/ib_marshall.h>
48#include <rdma/rdma_cm.h>
a7ca1f00 49#include <rdma/rdma_cm_ib.h>
ee7aed45 50#include <rdma/ib_addr.h>
edaa7a55 51#include <rdma/ib.h>
75216638
SH
52
53MODULE_AUTHOR("Sean Hefty");
54MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
55MODULE_LICENSE("Dual BSD/GPL");
56
97cb7e40
SW
57static unsigned int max_backlog = 1024;
58
59static struct ctl_table_header *ucma_ctl_table_hdr;
f3a5e3e3 60static struct ctl_table ucma_ctl_table[] = {
97cb7e40
SW
61 {
62 .procname = "max_backlog",
63 .data = &max_backlog,
64 .maxlen = sizeof max_backlog,
65 .mode = 0644,
66 .proc_handler = proc_dointvec,
67 },
68 { }
69};
70
75216638
SH
71struct ucma_file {
72 struct mutex mut;
73 struct file *filp;
74 struct list_head ctx_list;
75 struct list_head event_list;
76 wait_queue_head_t poll_wait;
77};
78
79struct ucma_context {
80 int id;
81 struct completion comp;
82 atomic_t ref;
83 int events_reported;
84 int backlog;
85
86 struct ucma_file *file;
87 struct rdma_cm_id *cm_id;
88 u64 uid;
89
90 struct list_head list;
c8f6a362
SH
91 struct list_head mc_list;
92};
93
94struct ucma_multicast {
95 struct ucma_context *ctx;
96 int id;
97 int events_reported;
98
99 u64 uid;
100 struct list_head list;
3f446754 101 struct sockaddr_storage addr;
75216638
SH
102};
103
104struct ucma_event {
105 struct ucma_context *ctx;
c8f6a362 106 struct ucma_multicast *mc;
75216638
SH
107 struct list_head list;
108 struct rdma_cm_id *cm_id;
109 struct rdma_ucm_event_resp resp;
110};
111
112static DEFINE_MUTEX(mut);
113static DEFINE_IDR(ctx_idr);
c8f6a362 114static DEFINE_IDR(multicast_idr);
75216638
SH
115
116static inline struct ucma_context *_ucma_find_context(int id,
117 struct ucma_file *file)
118{
119 struct ucma_context *ctx;
120
121 ctx = idr_find(&ctx_idr, id);
122 if (!ctx)
123 ctx = ERR_PTR(-ENOENT);
124 else if (ctx->file != file)
125 ctx = ERR_PTR(-EINVAL);
126 return ctx;
127}
128
129static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
130{
131 struct ucma_context *ctx;
132
133 mutex_lock(&mut);
134 ctx = _ucma_find_context(id, file);
135 if (!IS_ERR(ctx))
136 atomic_inc(&ctx->ref);
137 mutex_unlock(&mut);
138 return ctx;
139}
140
141static void ucma_put_ctx(struct ucma_context *ctx)
142{
143 if (atomic_dec_and_test(&ctx->ref))
144 complete(&ctx->comp);
145}
146
147static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
148{
149 struct ucma_context *ctx;
75216638
SH
150
151 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
152 if (!ctx)
153 return NULL;
154
155 atomic_set(&ctx->ref, 1);
156 init_completion(&ctx->comp);
c8f6a362 157 INIT_LIST_HEAD(&ctx->mc_list);
75216638
SH
158 ctx->file = file;
159
3b069c5d
TH
160 mutex_lock(&mut);
161 ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL);
162 mutex_unlock(&mut);
163 if (ctx->id < 0)
75216638
SH
164 goto error;
165
166 list_add_tail(&ctx->list, &file->ctx_list);
167 return ctx;
168
169error:
170 kfree(ctx);
171 return NULL;
172}
173
c8f6a362
SH
174static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
175{
176 struct ucma_multicast *mc;
c8f6a362
SH
177
178 mc = kzalloc(sizeof(*mc), GFP_KERNEL);
179 if (!mc)
180 return NULL;
181
3b069c5d
TH
182 mutex_lock(&mut);
183 mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL);
184 mutex_unlock(&mut);
185 if (mc->id < 0)
c8f6a362
SH
186 goto error;
187
188 mc->ctx = ctx;
189 list_add_tail(&mc->list, &ctx->mc_list);
190 return mc;
191
192error:
193 kfree(mc);
194 return NULL;
195}
196
75216638
SH
197static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
198 struct rdma_conn_param *src)
199{
200 if (src->private_data_len)
201 memcpy(dst->private_data, src->private_data,
202 src->private_data_len);
203 dst->private_data_len = src->private_data_len;
204 dst->responder_resources =src->responder_resources;
205 dst->initiator_depth = src->initiator_depth;
206 dst->flow_control = src->flow_control;
207 dst->retry_count = src->retry_count;
208 dst->rnr_retry_count = src->rnr_retry_count;
209 dst->srq = src->srq;
210 dst->qp_num = src->qp_num;
211}
212
213static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
214 struct rdma_ud_param *src)
215{
216 if (src->private_data_len)
217 memcpy(dst->private_data, src->private_data,
218 src->private_data_len);
219 dst->private_data_len = src->private_data_len;
220 ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
221 dst->qp_num = src->qp_num;
222 dst->qkey = src->qkey;
223}
224
225static void ucma_set_event_context(struct ucma_context *ctx,
226 struct rdma_cm_event *event,
227 struct ucma_event *uevent)
228{
229 uevent->ctx = ctx;
c8f6a362
SH
230 switch (event->event) {
231 case RDMA_CM_EVENT_MULTICAST_JOIN:
232 case RDMA_CM_EVENT_MULTICAST_ERROR:
233 uevent->mc = (struct ucma_multicast *)
234 event->param.ud.private_data;
235 uevent->resp.uid = uevent->mc->uid;
236 uevent->resp.id = uevent->mc->id;
237 break;
238 default:
239 uevent->resp.uid = ctx->uid;
240 uevent->resp.id = ctx->id;
241 break;
242 }
75216638
SH
243}
244
245static int ucma_event_handler(struct rdma_cm_id *cm_id,
246 struct rdma_cm_event *event)
247{
248 struct ucma_event *uevent;
249 struct ucma_context *ctx = cm_id->context;
250 int ret = 0;
251
252 uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
253 if (!uevent)
254 return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
255
418edaab 256 mutex_lock(&ctx->file->mut);
75216638
SH
257 uevent->cm_id = cm_id;
258 ucma_set_event_context(ctx, event, uevent);
259 uevent->resp.event = event->event;
260 uevent->resp.status = event->status;
638ef7a6 261 if (cm_id->qp_type == IB_QPT_UD)
75216638
SH
262 ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
263 else
264 ucma_copy_conn_event(&uevent->resp.param.conn,
265 &event->param.conn);
266
75216638
SH
267 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
268 if (!ctx->backlog) {
3492856e 269 ret = -ENOMEM;
30a5ec98 270 kfree(uevent);
75216638
SH
271 goto out;
272 }
273 ctx->backlog--;
c6b21824 274 } else if (!ctx->uid || ctx->cm_id != cm_id) {
0cefcf0b
SH
275 /*
276 * We ignore events for new connections until userspace has set
277 * their context. This can only happen if an error occurs on a
278 * new connection before the user accepts it. This is okay,
279 * since the accept will just fail later.
280 */
281 kfree(uevent);
282 goto out;
75216638 283 }
0cefcf0b 284
75216638
SH
285 list_add_tail(&uevent->list, &ctx->file->event_list);
286 wake_up_interruptible(&ctx->file->poll_wait);
287out:
288 mutex_unlock(&ctx->file->mut);
289 return ret;
290}
291
292static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
293 int in_len, int out_len)
294{
295 struct ucma_context *ctx;
296 struct rdma_ucm_get_event cmd;
297 struct ucma_event *uevent;
298 int ret = 0;
75216638
SH
299
300 if (out_len < sizeof uevent->resp)
301 return -ENOSPC;
302
303 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
304 return -EFAULT;
305
306 mutex_lock(&file->mut);
307 while (list_empty(&file->event_list)) {
d92f7644 308 mutex_unlock(&file->mut);
75216638 309
d92f7644
SH
310 if (file->filp->f_flags & O_NONBLOCK)
311 return -EAGAIN;
312
313 if (wait_event_interruptible(file->poll_wait,
314 !list_empty(&file->event_list)))
315 return -ERESTARTSYS;
75216638 316
75216638 317 mutex_lock(&file->mut);
75216638
SH
318 }
319
75216638
SH
320 uevent = list_entry(file->event_list.next, struct ucma_event, list);
321
322 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
323 ctx = ucma_alloc_ctx(file);
324 if (!ctx) {
325 ret = -ENOMEM;
326 goto done;
327 }
328 uevent->ctx->backlog++;
329 ctx->cm_id = uevent->cm_id;
330 ctx->cm_id->context = ctx;
331 uevent->resp.id = ctx->id;
332 }
333
334 if (copy_to_user((void __user *)(unsigned long)cmd.response,
335 &uevent->resp, sizeof uevent->resp)) {
336 ret = -EFAULT;
337 goto done;
338 }
339
340 list_del(&uevent->list);
341 uevent->ctx->events_reported++;
c8f6a362
SH
342 if (uevent->mc)
343 uevent->mc->events_reported++;
75216638
SH
344 kfree(uevent);
345done:
346 mutex_unlock(&file->mut);
347 return ret;
348}
349
b26f9b99
SH
350static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
351{
352 switch (cmd->ps) {
353 case RDMA_PS_TCP:
354 *qp_type = IB_QPT_RC;
355 return 0;
356 case RDMA_PS_UDP:
357 case RDMA_PS_IPOIB:
358 *qp_type = IB_QPT_UD;
359 return 0;
638ef7a6
SH
360 case RDMA_PS_IB:
361 *qp_type = cmd->qp_type;
362 return 0;
b26f9b99
SH
363 default:
364 return -EINVAL;
365 }
366}
367
368static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
369 int in_len, int out_len)
75216638
SH
370{
371 struct rdma_ucm_create_id cmd;
372 struct rdma_ucm_create_id_resp resp;
373 struct ucma_context *ctx;
b26f9b99 374 enum ib_qp_type qp_type;
75216638
SH
375 int ret;
376
377 if (out_len < sizeof(resp))
378 return -ENOSPC;
379
380 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
381 return -EFAULT;
382
b26f9b99
SH
383 ret = ucma_get_qp_type(&cmd, &qp_type);
384 if (ret)
385 return ret;
386
75216638
SH
387 mutex_lock(&file->mut);
388 ctx = ucma_alloc_ctx(file);
389 mutex_unlock(&file->mut);
390 if (!ctx)
391 return -ENOMEM;
392
393 ctx->uid = cmd.uid;
b26f9b99 394 ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps, qp_type);
75216638
SH
395 if (IS_ERR(ctx->cm_id)) {
396 ret = PTR_ERR(ctx->cm_id);
397 goto err1;
398 }
399
400 resp.id = ctx->id;
401 if (copy_to_user((void __user *)(unsigned long)cmd.response,
402 &resp, sizeof(resp))) {
403 ret = -EFAULT;
404 goto err2;
405 }
406 return 0;
407
408err2:
409 rdma_destroy_id(ctx->cm_id);
410err1:
411 mutex_lock(&mut);
412 idr_remove(&ctx_idr, ctx->id);
413 mutex_unlock(&mut);
414 kfree(ctx);
415 return ret;
416}
417
c8f6a362
SH
418static void ucma_cleanup_multicast(struct ucma_context *ctx)
419{
420 struct ucma_multicast *mc, *tmp;
421
422 mutex_lock(&mut);
423 list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
424 list_del(&mc->list);
425 idr_remove(&multicast_idr, mc->id);
426 kfree(mc);
427 }
428 mutex_unlock(&mut);
429}
430
c8f6a362
SH
431static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
432{
433 struct ucma_event *uevent, *tmp;
434
435 list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
436 if (uevent->mc != mc)
437 continue;
438
439 list_del(&uevent->list);
440 kfree(uevent);
441 }
442}
443
186834b5
HS
444/*
445 * We cannot hold file->mut when calling rdma_destroy_id() or we can
446 * deadlock. We also acquire file->mut in ucma_event_handler(), and
447 * rdma_destroy_id() will wait until all callbacks have completed.
448 */
75216638
SH
449static int ucma_free_ctx(struct ucma_context *ctx)
450{
451 int events_reported;
186834b5
HS
452 struct ucma_event *uevent, *tmp;
453 LIST_HEAD(list);
75216638
SH
454
455 /* No new events will be generated after destroying the id. */
456 rdma_destroy_id(ctx->cm_id);
457
c8f6a362
SH
458 ucma_cleanup_multicast(ctx);
459
75216638
SH
460 /* Cleanup events not yet reported to the user. */
461 mutex_lock(&ctx->file->mut);
186834b5
HS
462 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
463 if (uevent->ctx == ctx)
464 list_move_tail(&uevent->list, &list);
465 }
75216638
SH
466 list_del(&ctx->list);
467 mutex_unlock(&ctx->file->mut);
468
186834b5
HS
469 list_for_each_entry_safe(uevent, tmp, &list, list) {
470 list_del(&uevent->list);
471 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
472 rdma_destroy_id(uevent->cm_id);
473 kfree(uevent);
474 }
475
75216638
SH
476 events_reported = ctx->events_reported;
477 kfree(ctx);
478 return events_reported;
479}
480
481static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
482 int in_len, int out_len)
483{
484 struct rdma_ucm_destroy_id cmd;
485 struct rdma_ucm_destroy_id_resp resp;
486 struct ucma_context *ctx;
487 int ret = 0;
488
489 if (out_len < sizeof(resp))
490 return -ENOSPC;
491
492 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
493 return -EFAULT;
494
495 mutex_lock(&mut);
496 ctx = _ucma_find_context(cmd.id, file);
497 if (!IS_ERR(ctx))
498 idr_remove(&ctx_idr, ctx->id);
499 mutex_unlock(&mut);
500
501 if (IS_ERR(ctx))
502 return PTR_ERR(ctx);
503
504 ucma_put_ctx(ctx);
505 wait_for_completion(&ctx->comp);
506 resp.events_reported = ucma_free_ctx(ctx);
507
508 if (copy_to_user((void __user *)(unsigned long)cmd.response,
509 &resp, sizeof(resp)))
510 ret = -EFAULT;
511
512 return ret;
513}
514
05ad9457 515static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
75216638
SH
516 int in_len, int out_len)
517{
05ad9457 518 struct rdma_ucm_bind_ip cmd;
75216638
SH
519 struct ucma_context *ctx;
520 int ret;
521
522 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
523 return -EFAULT;
524
525 ctx = ucma_get_ctx(file, cmd.id);
526 if (IS_ERR(ctx))
527 return PTR_ERR(ctx);
528
529 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
530 ucma_put_ctx(ctx);
531 return ret;
532}
533
eebe4c3a
SH
534static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
535 int in_len, int out_len)
536{
537 struct rdma_ucm_bind cmd;
538 struct sockaddr *addr;
539 struct ucma_context *ctx;
540 int ret;
541
542 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
543 return -EFAULT;
544
545 addr = (struct sockaddr *) &cmd.addr;
546 if (cmd.reserved || !cmd.addr_size || (cmd.addr_size != rdma_addr_size(addr)))
547 return -EINVAL;
548
549 ctx = ucma_get_ctx(file, cmd.id);
550 if (IS_ERR(ctx))
551 return PTR_ERR(ctx);
552
553 ret = rdma_bind_addr(ctx->cm_id, addr);
554 ucma_put_ctx(ctx);
555 return ret;
556}
557
05ad9457
SH
558static ssize_t ucma_resolve_ip(struct ucma_file *file,
559 const char __user *inbuf,
560 int in_len, int out_len)
75216638 561{
05ad9457 562 struct rdma_ucm_resolve_ip cmd;
75216638
SH
563 struct ucma_context *ctx;
564 int ret;
565
566 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
567 return -EFAULT;
568
569 ctx = ucma_get_ctx(file, cmd.id);
570 if (IS_ERR(ctx))
571 return PTR_ERR(ctx);
572
573 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
574 (struct sockaddr *) &cmd.dst_addr,
575 cmd.timeout_ms);
576 ucma_put_ctx(ctx);
577 return ret;
578}
579
209cf2a7
SH
580static ssize_t ucma_resolve_addr(struct ucma_file *file,
581 const char __user *inbuf,
582 int in_len, int out_len)
583{
584 struct rdma_ucm_resolve_addr cmd;
585 struct sockaddr *src, *dst;
586 struct ucma_context *ctx;
587 int ret;
588
589 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
590 return -EFAULT;
591
592 src = (struct sockaddr *) &cmd.src_addr;
593 dst = (struct sockaddr *) &cmd.dst_addr;
594 if (cmd.reserved || (cmd.src_size && (cmd.src_size != rdma_addr_size(src))) ||
595 !cmd.dst_size || (cmd.dst_size != rdma_addr_size(dst)))
596 return -EINVAL;
597
598 ctx = ucma_get_ctx(file, cmd.id);
599 if (IS_ERR(ctx))
600 return PTR_ERR(ctx);
601
602 ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms);
603 ucma_put_ctx(ctx);
604 return ret;
605}
606
75216638
SH
607static ssize_t ucma_resolve_route(struct ucma_file *file,
608 const char __user *inbuf,
609 int in_len, int out_len)
610{
611 struct rdma_ucm_resolve_route cmd;
612 struct ucma_context *ctx;
613 int ret;
614
615 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
616 return -EFAULT;
617
618 ctx = ucma_get_ctx(file, cmd.id);
619 if (IS_ERR(ctx))
620 return PTR_ERR(ctx);
621
622 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
623 ucma_put_ctx(ctx);
624 return ret;
625}
626
627static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
628 struct rdma_route *route)
629{
630 struct rdma_dev_addr *dev_addr;
631
632 resp->num_paths = route->num_paths;
633 switch (route->num_paths) {
634 case 0:
635 dev_addr = &route->addr.dev_addr;
6f8372b6
SH
636 rdma_addr_get_dgid(dev_addr,
637 (union ib_gid *) &resp->ib_route[0].dgid);
638 rdma_addr_get_sgid(dev_addr,
639 (union ib_gid *) &resp->ib_route[0].sgid);
75216638
SH
640 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
641 break;
642 case 2:
643 ib_copy_path_rec_to_user(&resp->ib_route[1],
644 &route->path_rec[1]);
645 /* fall through */
646 case 1:
647 ib_copy_path_rec_to_user(&resp->ib_route[0],
648 &route->path_rec[0]);
649 break;
650 default:
651 break;
652 }
653}
654
3c86aa70
EC
655static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
656 struct rdma_route *route)
657{
3c86aa70
EC
658
659 resp->num_paths = route->num_paths;
660 switch (route->num_paths) {
661 case 0:
7b85627b
MS
662 rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr,
663 (union ib_gid *)&resp->ib_route[0].dgid);
664 rdma_ip2gid((struct sockaddr *)&route->addr.src_addr,
665 (union ib_gid *)&resp->ib_route[0].sgid);
3c86aa70
EC
666 resp->ib_route[0].pkey = cpu_to_be16(0xffff);
667 break;
668 case 2:
669 ib_copy_path_rec_to_user(&resp->ib_route[1],
670 &route->path_rec[1]);
671 /* fall through */
672 case 1:
673 ib_copy_path_rec_to_user(&resp->ib_route[0],
674 &route->path_rec[0]);
675 break;
676 default:
677 break;
678 }
679}
680
e86f8b06
SW
681static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
682 struct rdma_route *route)
683{
684 struct rdma_dev_addr *dev_addr;
685
686 dev_addr = &route->addr.dev_addr;
687 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
688 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
689}
690
75216638
SH
691static ssize_t ucma_query_route(struct ucma_file *file,
692 const char __user *inbuf,
693 int in_len, int out_len)
694{
ee7aed45 695 struct rdma_ucm_query cmd;
75216638
SH
696 struct rdma_ucm_query_route_resp resp;
697 struct ucma_context *ctx;
698 struct sockaddr *addr;
699 int ret = 0;
700
701 if (out_len < sizeof(resp))
702 return -ENOSPC;
703
704 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
705 return -EFAULT;
706
707 ctx = ucma_get_ctx(file, cmd.id);
708 if (IS_ERR(ctx))
709 return PTR_ERR(ctx);
710
711 memset(&resp, 0, sizeof resp);
3f446754 712 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
75216638
SH
713 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
714 sizeof(struct sockaddr_in) :
715 sizeof(struct sockaddr_in6));
3f446754 716 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
75216638
SH
717 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
718 sizeof(struct sockaddr_in) :
719 sizeof(struct sockaddr_in6));
720 if (!ctx->cm_id->device)
721 goto out;
722
9cda779c 723 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
75216638 724 resp.port_num = ctx->cm_id->port_num;
c72f2189 725
fe53ba2f 726 if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num))
c72f2189 727 ucma_copy_ib_route(&resp, &ctx->cm_id->route);
5d9fb044 728 else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num))
c72f2189
MW
729 ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
730 else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num))
e86f8b06 731 ucma_copy_iw_route(&resp, &ctx->cm_id->route);
75216638
SH
732
733out:
734 if (copy_to_user((void __user *)(unsigned long)cmd.response,
735 &resp, sizeof(resp)))
736 ret = -EFAULT;
737
738 ucma_put_ctx(ctx);
739 return ret;
740}
741
ee7aed45
SH
742static void ucma_query_device_addr(struct rdma_cm_id *cm_id,
743 struct rdma_ucm_query_addr_resp *resp)
744{
745 if (!cm_id->device)
746 return;
747
748 resp->node_guid = (__force __u64) cm_id->device->node_guid;
749 resp->port_num = cm_id->port_num;
750 resp->pkey = (__force __u16) cpu_to_be16(
751 ib_addr_get_pkey(&cm_id->route.addr.dev_addr));
752}
753
754static ssize_t ucma_query_addr(struct ucma_context *ctx,
755 void __user *response, int out_len)
756{
757 struct rdma_ucm_query_addr_resp resp;
758 struct sockaddr *addr;
759 int ret = 0;
760
761 if (out_len < sizeof(resp))
762 return -ENOSPC;
763
764 memset(&resp, 0, sizeof resp);
765
766 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
767 resp.src_size = rdma_addr_size(addr);
768 memcpy(&resp.src_addr, addr, resp.src_size);
769
770 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
771 resp.dst_size = rdma_addr_size(addr);
772 memcpy(&resp.dst_addr, addr, resp.dst_size);
773
774 ucma_query_device_addr(ctx->cm_id, &resp);
775
776 if (copy_to_user(response, &resp, sizeof(resp)))
777 ret = -EFAULT;
778
779 return ret;
780}
781
ac53b264
SH
782static ssize_t ucma_query_path(struct ucma_context *ctx,
783 void __user *response, int out_len)
784{
785 struct rdma_ucm_query_path_resp *resp;
786 int i, ret = 0;
787
788 if (out_len < sizeof(*resp))
789 return -ENOSPC;
790
791 resp = kzalloc(out_len, GFP_KERNEL);
792 if (!resp)
793 return -ENOMEM;
794
795 resp->num_paths = ctx->cm_id->route.num_paths;
796 for (i = 0, out_len -= sizeof(*resp);
797 i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data);
798 i++, out_len -= sizeof(struct ib_path_rec_data)) {
799
800 resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY |
801 IB_PATH_BIDIRECTIONAL;
802 ib_sa_pack_path(&ctx->cm_id->route.path_rec[i],
803 &resp->path_data[i].path_rec);
804 }
805
806 if (copy_to_user(response, resp,
807 sizeof(*resp) + (i * sizeof(struct ib_path_rec_data))))
808 ret = -EFAULT;
809
810 kfree(resp);
811 return ret;
812}
813
edaa7a55
SH
814static ssize_t ucma_query_gid(struct ucma_context *ctx,
815 void __user *response, int out_len)
816{
817 struct rdma_ucm_query_addr_resp resp;
818 struct sockaddr_ib *addr;
819 int ret = 0;
820
821 if (out_len < sizeof(resp))
822 return -ENOSPC;
823
824 memset(&resp, 0, sizeof resp);
825
826 ucma_query_device_addr(ctx->cm_id, &resp);
827
828 addr = (struct sockaddr_ib *) &resp.src_addr;
829 resp.src_size = sizeof(*addr);
830 if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) {
831 memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size);
832 } else {
833 addr->sib_family = AF_IB;
834 addr->sib_pkey = (__force __be16) resp.pkey;
835 rdma_addr_get_sgid(&ctx->cm_id->route.addr.dev_addr,
836 (union ib_gid *) &addr->sib_addr);
837 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
838 &ctx->cm_id->route.addr.src_addr);
839 }
840
841 addr = (struct sockaddr_ib *) &resp.dst_addr;
842 resp.dst_size = sizeof(*addr);
843 if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) {
844 memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size);
845 } else {
846 addr->sib_family = AF_IB;
847 addr->sib_pkey = (__force __be16) resp.pkey;
848 rdma_addr_get_dgid(&ctx->cm_id->route.addr.dev_addr,
849 (union ib_gid *) &addr->sib_addr);
850 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
851 &ctx->cm_id->route.addr.dst_addr);
852 }
853
854 if (copy_to_user(response, &resp, sizeof(resp)))
855 ret = -EFAULT;
856
857 return ret;
858}
859
ee7aed45
SH
860static ssize_t ucma_query(struct ucma_file *file,
861 const char __user *inbuf,
862 int in_len, int out_len)
863{
864 struct rdma_ucm_query cmd;
865 struct ucma_context *ctx;
866 void __user *response;
867 int ret;
868
869 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
870 return -EFAULT;
871
872 response = (void __user *)(unsigned long) cmd.response;
873 ctx = ucma_get_ctx(file, cmd.id);
874 if (IS_ERR(ctx))
875 return PTR_ERR(ctx);
876
877 switch (cmd.option) {
878 case RDMA_USER_CM_QUERY_ADDR:
879 ret = ucma_query_addr(ctx, response, out_len);
880 break;
ac53b264
SH
881 case RDMA_USER_CM_QUERY_PATH:
882 ret = ucma_query_path(ctx, response, out_len);
883 break;
edaa7a55
SH
884 case RDMA_USER_CM_QUERY_GID:
885 ret = ucma_query_gid(ctx, response, out_len);
886 break;
ee7aed45
SH
887 default:
888 ret = -ENOSYS;
889 break;
890 }
891
892 ucma_put_ctx(ctx);
893 return ret;
894}
895
5c438135
SH
896static void ucma_copy_conn_param(struct rdma_cm_id *id,
897 struct rdma_conn_param *dst,
75216638
SH
898 struct rdma_ucm_conn_param *src)
899{
900 dst->private_data = src->private_data;
901 dst->private_data_len = src->private_data_len;
902 dst->responder_resources =src->responder_resources;
903 dst->initiator_depth = src->initiator_depth;
904 dst->flow_control = src->flow_control;
905 dst->retry_count = src->retry_count;
906 dst->rnr_retry_count = src->rnr_retry_count;
907 dst->srq = src->srq;
908 dst->qp_num = src->qp_num;
5c438135 909 dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0;
75216638
SH
910}
911
912static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
913 int in_len, int out_len)
914{
915 struct rdma_ucm_connect cmd;
916 struct rdma_conn_param conn_param;
917 struct ucma_context *ctx;
918 int ret;
919
920 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
921 return -EFAULT;
922
923 if (!cmd.conn_param.valid)
924 return -EINVAL;
925
926 ctx = ucma_get_ctx(file, cmd.id);
927 if (IS_ERR(ctx))
928 return PTR_ERR(ctx);
929
5c438135 930 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
75216638
SH
931 ret = rdma_connect(ctx->cm_id, &conn_param);
932 ucma_put_ctx(ctx);
933 return ret;
934}
935
936static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
937 int in_len, int out_len)
938{
939 struct rdma_ucm_listen cmd;
940 struct ucma_context *ctx;
941 int ret;
942
943 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
944 return -EFAULT;
945
946 ctx = ucma_get_ctx(file, cmd.id);
947 if (IS_ERR(ctx))
948 return PTR_ERR(ctx);
949
97cb7e40
SW
950 ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
951 cmd.backlog : max_backlog;
75216638
SH
952 ret = rdma_listen(ctx->cm_id, ctx->backlog);
953 ucma_put_ctx(ctx);
954 return ret;
955}
956
957static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
958 int in_len, int out_len)
959{
960 struct rdma_ucm_accept cmd;
961 struct rdma_conn_param conn_param;
962 struct ucma_context *ctx;
963 int ret;
964
965 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
966 return -EFAULT;
967
968 ctx = ucma_get_ctx(file, cmd.id);
969 if (IS_ERR(ctx))
970 return PTR_ERR(ctx);
971
972 if (cmd.conn_param.valid) {
5c438135 973 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
9ced69ca 974 mutex_lock(&file->mut);
75216638 975 ret = rdma_accept(ctx->cm_id, &conn_param);
9ced69ca
SH
976 if (!ret)
977 ctx->uid = cmd.uid;
978 mutex_unlock(&file->mut);
75216638
SH
979 } else
980 ret = rdma_accept(ctx->cm_id, NULL);
981
982 ucma_put_ctx(ctx);
983 return ret;
984}
985
986static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
987 int in_len, int out_len)
988{
989 struct rdma_ucm_reject cmd;
990 struct ucma_context *ctx;
991 int ret;
992
993 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
994 return -EFAULT;
995
996 ctx = ucma_get_ctx(file, cmd.id);
997 if (IS_ERR(ctx))
998 return PTR_ERR(ctx);
999
1000 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
1001 ucma_put_ctx(ctx);
1002 return ret;
1003}
1004
1005static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
1006 int in_len, int out_len)
1007{
1008 struct rdma_ucm_disconnect cmd;
1009 struct ucma_context *ctx;
1010 int ret;
1011
1012 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1013 return -EFAULT;
1014
1015 ctx = ucma_get_ctx(file, cmd.id);
1016 if (IS_ERR(ctx))
1017 return PTR_ERR(ctx);
1018
1019 ret = rdma_disconnect(ctx->cm_id);
1020 ucma_put_ctx(ctx);
1021 return ret;
1022}
1023
1024static ssize_t ucma_init_qp_attr(struct ucma_file *file,
1025 const char __user *inbuf,
1026 int in_len, int out_len)
1027{
1028 struct rdma_ucm_init_qp_attr cmd;
1029 struct ib_uverbs_qp_attr resp;
1030 struct ucma_context *ctx;
1031 struct ib_qp_attr qp_attr;
1032 int ret;
1033
1034 if (out_len < sizeof(resp))
1035 return -ENOSPC;
1036
1037 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1038 return -EFAULT;
1039
1040 ctx = ucma_get_ctx(file, cmd.id);
1041 if (IS_ERR(ctx))
1042 return PTR_ERR(ctx);
1043
1044 resp.qp_attr_mask = 0;
1045 memset(&qp_attr, 0, sizeof qp_attr);
1046 qp_attr.qp_state = cmd.qp_state;
1047 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
1048 if (ret)
1049 goto out;
1050
1051 ib_copy_qp_attr_to_user(&resp, &qp_attr);
1052 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1053 &resp, sizeof(resp)))
1054 ret = -EFAULT;
1055
1056out:
1057 ucma_put_ctx(ctx);
1058 return ret;
1059}
1060
7ce86409
SH
1061static int ucma_set_option_id(struct ucma_context *ctx, int optname,
1062 void *optval, size_t optlen)
1063{
1064 int ret = 0;
1065
1066 switch (optname) {
1067 case RDMA_OPTION_ID_TOS:
1068 if (optlen != sizeof(u8)) {
1069 ret = -EINVAL;
1070 break;
1071 }
1072 rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
1073 break;
a9bb7912
HS
1074 case RDMA_OPTION_ID_REUSEADDR:
1075 if (optlen != sizeof(int)) {
1076 ret = -EINVAL;
1077 break;
1078 }
1079 ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
1080 break;
68602120
SH
1081 case RDMA_OPTION_ID_AFONLY:
1082 if (optlen != sizeof(int)) {
1083 ret = -EINVAL;
1084 break;
1085 }
1086 ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0);
1087 break;
7ce86409
SH
1088 default:
1089 ret = -ENOSYS;
1090 }
1091
1092 return ret;
1093}
1094
a7ca1f00
SH
1095static int ucma_set_ib_path(struct ucma_context *ctx,
1096 struct ib_path_rec_data *path_data, size_t optlen)
1097{
1098 struct ib_sa_path_rec sa_path;
1099 struct rdma_cm_event event;
1100 int ret;
1101
1102 if (optlen % sizeof(*path_data))
1103 return -EINVAL;
1104
1105 for (; optlen; optlen -= sizeof(*path_data), path_data++) {
1106 if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
1107 IB_PATH_BIDIRECTIONAL))
1108 break;
1109 }
1110
1111 if (!optlen)
1112 return -EINVAL;
1113
c2be9dc0
IN
1114 memset(&sa_path, 0, sizeof(sa_path));
1115 sa_path.vlan_id = 0xffff;
1116
a7ca1f00
SH
1117 ib_sa_unpack_path(path_data->path_rec, &sa_path);
1118 ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
1119 if (ret)
1120 return ret;
1121
1122 memset(&event, 0, sizeof event);
1123 event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1124 return ucma_event_handler(ctx->cm_id, &event);
1125}
1126
1127static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
1128 void *optval, size_t optlen)
1129{
1130 int ret;
1131
1132 switch (optname) {
1133 case RDMA_OPTION_IB_PATH:
1134 ret = ucma_set_ib_path(ctx, optval, optlen);
1135 break;
1136 default:
1137 ret = -ENOSYS;
1138 }
1139
1140 return ret;
1141}
1142
7ce86409
SH
1143static int ucma_set_option_level(struct ucma_context *ctx, int level,
1144 int optname, void *optval, size_t optlen)
1145{
1146 int ret;
1147
1148 switch (level) {
1149 case RDMA_OPTION_ID:
1150 ret = ucma_set_option_id(ctx, optname, optval, optlen);
1151 break;
a7ca1f00
SH
1152 case RDMA_OPTION_IB:
1153 ret = ucma_set_option_ib(ctx, optname, optval, optlen);
1154 break;
7ce86409
SH
1155 default:
1156 ret = -ENOSYS;
1157 }
1158
1159 return ret;
1160}
1161
1162static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
1163 int in_len, int out_len)
1164{
1165 struct rdma_ucm_set_option cmd;
1166 struct ucma_context *ctx;
1167 void *optval;
1168 int ret;
1169
1170 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1171 return -EFAULT;
1172
1173 ctx = ucma_get_ctx(file, cmd.id);
1174 if (IS_ERR(ctx))
1175 return PTR_ERR(ctx);
1176
0764c76e
RD
1177 optval = memdup_user((void __user *) (unsigned long) cmd.optval,
1178 cmd.optlen);
1179 if (IS_ERR(optval)) {
1180 ret = PTR_ERR(optval);
1181 goto out;
7ce86409
SH
1182 }
1183
1184 ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
1185 cmd.optlen);
7ce86409 1186 kfree(optval);
0764c76e
RD
1187
1188out:
7ce86409
SH
1189 ucma_put_ctx(ctx);
1190 return ret;
1191}
1192
75216638
SH
1193static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1194 int in_len, int out_len)
1195{
1196 struct rdma_ucm_notify cmd;
1197 struct ucma_context *ctx;
1198 int ret;
1199
1200 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1201 return -EFAULT;
1202
1203 ctx = ucma_get_ctx(file, cmd.id);
1204 if (IS_ERR(ctx))
1205 return PTR_ERR(ctx);
1206
1207 ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
1208 ucma_put_ctx(ctx);
1209 return ret;
1210}
1211
5bc2b7b3
SH
1212static ssize_t ucma_process_join(struct ucma_file *file,
1213 struct rdma_ucm_join_mcast *cmd, int out_len)
c8f6a362 1214{
c8f6a362
SH
1215 struct rdma_ucm_create_id_resp resp;
1216 struct ucma_context *ctx;
1217 struct ucma_multicast *mc;
5bc2b7b3 1218 struct sockaddr *addr;
c8f6a362
SH
1219 int ret;
1220
1221 if (out_len < sizeof(resp))
1222 return -ENOSPC;
1223
5bc2b7b3
SH
1224 addr = (struct sockaddr *) &cmd->addr;
1225 if (cmd->reserved || !cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr)))
1226 return -EINVAL;
c8f6a362 1227
5bc2b7b3 1228 ctx = ucma_get_ctx(file, cmd->id);
c8f6a362
SH
1229 if (IS_ERR(ctx))
1230 return PTR_ERR(ctx);
1231
1232 mutex_lock(&file->mut);
1233 mc = ucma_alloc_multicast(ctx);
6aea938f
JB
1234 if (!mc) {
1235 ret = -ENOMEM;
c8f6a362
SH
1236 goto err1;
1237 }
1238
5bc2b7b3
SH
1239 mc->uid = cmd->uid;
1240 memcpy(&mc->addr, addr, cmd->addr_size);
3f446754 1241 ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc);
c8f6a362
SH
1242 if (ret)
1243 goto err2;
1244
1245 resp.id = mc->id;
5bc2b7b3 1246 if (copy_to_user((void __user *)(unsigned long) cmd->response,
c8f6a362
SH
1247 &resp, sizeof(resp))) {
1248 ret = -EFAULT;
1249 goto err3;
1250 }
1251
1252 mutex_unlock(&file->mut);
1253 ucma_put_ctx(ctx);
1254 return 0;
1255
1256err3:
3f446754 1257 rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
c8f6a362
SH
1258 ucma_cleanup_mc_events(mc);
1259err2:
1260 mutex_lock(&mut);
1261 idr_remove(&multicast_idr, mc->id);
1262 mutex_unlock(&mut);
1263 list_del(&mc->list);
1264 kfree(mc);
1265err1:
1266 mutex_unlock(&file->mut);
1267 ucma_put_ctx(ctx);
1268 return ret;
1269}
1270
5bc2b7b3
SH
1271static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
1272 const char __user *inbuf,
1273 int in_len, int out_len)
1274{
1275 struct rdma_ucm_join_ip_mcast cmd;
1276 struct rdma_ucm_join_mcast join_cmd;
1277
1278 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1279 return -EFAULT;
1280
1281 join_cmd.response = cmd.response;
1282 join_cmd.uid = cmd.uid;
1283 join_cmd.id = cmd.id;
1284 join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr);
1285 join_cmd.reserved = 0;
1286 memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
1287
1288 return ucma_process_join(file, &join_cmd, out_len);
1289}
1290
1291static ssize_t ucma_join_multicast(struct ucma_file *file,
1292 const char __user *inbuf,
1293 int in_len, int out_len)
1294{
1295 struct rdma_ucm_join_mcast cmd;
1296
1297 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1298 return -EFAULT;
1299
1300 return ucma_process_join(file, &cmd, out_len);
1301}
1302
c8f6a362
SH
1303static ssize_t ucma_leave_multicast(struct ucma_file *file,
1304 const char __user *inbuf,
1305 int in_len, int out_len)
1306{
1307 struct rdma_ucm_destroy_id cmd;
1308 struct rdma_ucm_destroy_id_resp resp;
1309 struct ucma_multicast *mc;
1310 int ret = 0;
1311
1312 if (out_len < sizeof(resp))
1313 return -ENOSPC;
1314
1315 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1316 return -EFAULT;
1317
1318 mutex_lock(&mut);
1319 mc = idr_find(&multicast_idr, cmd.id);
1320 if (!mc)
1321 mc = ERR_PTR(-ENOENT);
1322 else if (mc->ctx->file != file)
1323 mc = ERR_PTR(-EINVAL);
1324 else {
1325 idr_remove(&multicast_idr, mc->id);
1326 atomic_inc(&mc->ctx->ref);
1327 }
1328 mutex_unlock(&mut);
1329
1330 if (IS_ERR(mc)) {
1331 ret = PTR_ERR(mc);
1332 goto out;
1333 }
1334
3f446754 1335 rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
c8f6a362
SH
1336 mutex_lock(&mc->ctx->file->mut);
1337 ucma_cleanup_mc_events(mc);
1338 list_del(&mc->list);
1339 mutex_unlock(&mc->ctx->file->mut);
1340
1341 ucma_put_ctx(mc->ctx);
1342 resp.events_reported = mc->events_reported;
1343 kfree(mc);
1344
1345 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1346 &resp, sizeof(resp)))
1347 ret = -EFAULT;
1348out:
1349 return ret;
1350}
1351
88314e4d
SH
1352static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
1353{
1354 /* Acquire mutex's based on pointer comparison to prevent deadlock. */
1355 if (file1 < file2) {
1356 mutex_lock(&file1->mut);
31b57b87 1357 mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING);
88314e4d
SH
1358 } else {
1359 mutex_lock(&file2->mut);
31b57b87 1360 mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING);
88314e4d
SH
1361 }
1362}
1363
1364static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
1365{
1366 if (file1 < file2) {
1367 mutex_unlock(&file2->mut);
1368 mutex_unlock(&file1->mut);
1369 } else {
1370 mutex_unlock(&file1->mut);
1371 mutex_unlock(&file2->mut);
1372 }
1373}
1374
1375static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
1376{
1377 struct ucma_event *uevent, *tmp;
1378
1379 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
1380 if (uevent->ctx == ctx)
1381 list_move_tail(&uevent->list, &file->event_list);
1382}
1383
1384static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1385 const char __user *inbuf,
1386 int in_len, int out_len)
1387{
1388 struct rdma_ucm_migrate_id cmd;
1389 struct rdma_ucm_migrate_resp resp;
1390 struct ucma_context *ctx;
2903ff01 1391 struct fd f;
88314e4d
SH
1392 struct ucma_file *cur_file;
1393 int ret = 0;
1394
1395 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1396 return -EFAULT;
1397
1398 /* Get current fd to protect against it being closed */
2903ff01
AV
1399 f = fdget(cmd.fd);
1400 if (!f.file)
88314e4d
SH
1401 return -ENOENT;
1402
1403 /* Validate current fd and prevent destruction of id. */
2903ff01 1404 ctx = ucma_get_ctx(f.file->private_data, cmd.id);
88314e4d
SH
1405 if (IS_ERR(ctx)) {
1406 ret = PTR_ERR(ctx);
1407 goto file_put;
1408 }
1409
1410 cur_file = ctx->file;
1411 if (cur_file == new_file) {
1412 resp.events_reported = ctx->events_reported;
1413 goto response;
1414 }
1415
1416 /*
1417 * Migrate events between fd's, maintaining order, and avoiding new
1418 * events being added before existing events.
1419 */
1420 ucma_lock_files(cur_file, new_file);
1421 mutex_lock(&mut);
1422
1423 list_move_tail(&ctx->list, &new_file->ctx_list);
1424 ucma_move_events(ctx, new_file);
1425 ctx->file = new_file;
1426 resp.events_reported = ctx->events_reported;
1427
1428 mutex_unlock(&mut);
1429 ucma_unlock_files(cur_file, new_file);
1430
1431response:
1432 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1433 &resp, sizeof(resp)))
1434 ret = -EFAULT;
1435
1436 ucma_put_ctx(ctx);
1437file_put:
2903ff01 1438 fdput(f);
88314e4d
SH
1439 return ret;
1440}
1441
75216638
SH
1442static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
1443 const char __user *inbuf,
1444 int in_len, int out_len) = {
05ad9457
SH
1445 [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
1446 [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
1447 [RDMA_USER_CM_CMD_BIND_IP] = ucma_bind_ip,
1448 [RDMA_USER_CM_CMD_RESOLVE_IP] = ucma_resolve_ip,
1449 [RDMA_USER_CM_CMD_RESOLVE_ROUTE] = ucma_resolve_route,
1450 [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
1451 [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
1452 [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
1453 [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
1454 [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
1455 [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
1456 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
1457 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
1458 [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
1459 [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option,
1460 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
1461 [RDMA_USER_CM_CMD_JOIN_IP_MCAST] = ucma_join_ip_multicast,
1462 [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast,
1463 [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id,
eebe4c3a 1464 [RDMA_USER_CM_CMD_QUERY] = ucma_query,
209cf2a7 1465 [RDMA_USER_CM_CMD_BIND] = ucma_bind,
5bc2b7b3
SH
1466 [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
1467 [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast
75216638
SH
1468};
1469
1470static ssize_t ucma_write(struct file *filp, const char __user *buf,
1471 size_t len, loff_t *pos)
1472{
1473 struct ucma_file *file = filp->private_data;
1474 struct rdma_ucm_cmd_hdr hdr;
1475 ssize_t ret;
1476
1477 if (len < sizeof(hdr))
1478 return -EINVAL;
1479
1480 if (copy_from_user(&hdr, buf, sizeof(hdr)))
1481 return -EFAULT;
1482
caf6e3f2 1483 if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
75216638
SH
1484 return -EINVAL;
1485
1486 if (hdr.in + sizeof(hdr) > len)
1487 return -EINVAL;
1488
1489 if (!ucma_cmd_table[hdr.cmd])
1490 return -ENOSYS;
1491
1492 ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
1493 if (!ret)
1494 ret = len;
1495
1496 return ret;
1497}
1498
1499static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
1500{
1501 struct ucma_file *file = filp->private_data;
1502 unsigned int mask = 0;
1503
1504 poll_wait(filp, &file->poll_wait, wait);
1505
1506 if (!list_empty(&file->event_list))
1507 mask = POLLIN | POLLRDNORM;
1508
1509 return mask;
1510}
1511
f7a6117e
RD
1512/*
1513 * ucma_open() does not need the BKL:
1514 *
1515 * - no global state is referred to;
1516 * - there is no ioctl method to race against;
1517 * - no further module initialization is required for open to work
1518 * after the device is registered.
1519 */
75216638
SH
1520static int ucma_open(struct inode *inode, struct file *filp)
1521{
1522 struct ucma_file *file;
1523
1524 file = kmalloc(sizeof *file, GFP_KERNEL);
1525 if (!file)
1526 return -ENOMEM;
1527
1528 INIT_LIST_HEAD(&file->event_list);
1529 INIT_LIST_HEAD(&file->ctx_list);
1530 init_waitqueue_head(&file->poll_wait);
1531 mutex_init(&file->mut);
1532
1533 filp->private_data = file;
1534 file->filp = filp;
bc1db9af
RD
1535
1536 return nonseekable_open(inode, filp);
75216638
SH
1537}
1538
1539static int ucma_close(struct inode *inode, struct file *filp)
1540{
1541 struct ucma_file *file = filp->private_data;
1542 struct ucma_context *ctx, *tmp;
1543
1544 mutex_lock(&file->mut);
1545 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
1546 mutex_unlock(&file->mut);
1547
1548 mutex_lock(&mut);
1549 idr_remove(&ctx_idr, ctx->id);
1550 mutex_unlock(&mut);
1551
1552 ucma_free_ctx(ctx);
1553 mutex_lock(&file->mut);
1554 }
1555 mutex_unlock(&file->mut);
1556 kfree(file);
1557 return 0;
1558}
1559
2b8693c0 1560static const struct file_operations ucma_fops = {
75216638
SH
1561 .owner = THIS_MODULE,
1562 .open = ucma_open,
1563 .release = ucma_close,
1564 .write = ucma_write,
1565 .poll = ucma_poll,
bc1db9af 1566 .llseek = no_llseek,
75216638
SH
1567};
1568
1569static struct miscdevice ucma_misc = {
04ea2f81
RD
1570 .minor = MISC_DYNAMIC_MINOR,
1571 .name = "rdma_cm",
1572 .nodename = "infiniband/rdma_cm",
1573 .mode = 0666,
1574 .fops = &ucma_fops,
75216638
SH
1575};
1576
1577static ssize_t show_abi_version(struct device *dev,
1578 struct device_attribute *attr,
1579 char *buf)
1580{
1581 return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1582}
1583static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1584
1585static int __init ucma_init(void)
1586{
1587 int ret;
1588
1589 ret = misc_register(&ucma_misc);
1590 if (ret)
1591 return ret;
1592
1593 ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1594 if (ret) {
1595 printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
97cb7e40
SW
1596 goto err1;
1597 }
1598
ec8f23ce 1599 ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table);
97cb7e40
SW
1600 if (!ucma_ctl_table_hdr) {
1601 printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n");
1602 ret = -ENOMEM;
1603 goto err2;
75216638
SH
1604 }
1605 return 0;
97cb7e40
SW
1606err2:
1607 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1608err1:
75216638
SH
1609 misc_deregister(&ucma_misc);
1610 return ret;
1611}
1612
1613static void __exit ucma_cleanup(void)
1614{
5dd3df10 1615 unregister_net_sysctl_table(ucma_ctl_table_hdr);
75216638
SH
1616 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1617 misc_deregister(&ucma_misc);
1618 idr_destroy(&ctx_idr);
45d25420 1619 idr_destroy(&multicast_idr);
75216638
SH
1620}
1621
1622module_init(ucma_init);
1623module_exit(ucma_cleanup);
This page took 0.654068 seconds and 5 git commands to generate.