infiniband: Fix up module files that need to include module.h
[deliverable/linux.git] / drivers / infiniband / core / ucma.c
CommitLineData
75216638
SH
1/*
2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/completion.h>
88314e4d 34#include <linux/file.h>
75216638
SH
35#include <linux/mutex.h>
36#include <linux/poll.h>
d43c36dc 37#include <linux/sched.h>
75216638
SH
38#include <linux/idr.h>
39#include <linux/in.h>
40#include <linux/in6.h>
41#include <linux/miscdevice.h>
5a0e3ad6 42#include <linux/slab.h>
97cb7e40 43#include <linux/sysctl.h>
e4dd23d7 44#include <linux/module.h>
75216638
SH
45
46#include <rdma/rdma_user_cm.h>
47#include <rdma/ib_marshall.h>
48#include <rdma/rdma_cm.h>
a7ca1f00 49#include <rdma/rdma_cm_ib.h>
75216638
SH
50
51MODULE_AUTHOR("Sean Hefty");
52MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
53MODULE_LICENSE("Dual BSD/GPL");
54
97cb7e40
SW
55static unsigned int max_backlog = 1024;
56
57static struct ctl_table_header *ucma_ctl_table_hdr;
58static ctl_table ucma_ctl_table[] = {
59 {
60 .procname = "max_backlog",
61 .data = &max_backlog,
62 .maxlen = sizeof max_backlog,
63 .mode = 0644,
64 .proc_handler = proc_dointvec,
65 },
66 { }
67};
68
69static struct ctl_path ucma_ctl_path[] = {
70 { .procname = "net" },
71 { .procname = "rdma_ucm" },
72 { }
75216638
SH
73};
74
75struct ucma_file {
76 struct mutex mut;
77 struct file *filp;
78 struct list_head ctx_list;
79 struct list_head event_list;
80 wait_queue_head_t poll_wait;
81};
82
83struct ucma_context {
84 int id;
85 struct completion comp;
86 atomic_t ref;
87 int events_reported;
88 int backlog;
89
90 struct ucma_file *file;
91 struct rdma_cm_id *cm_id;
92 u64 uid;
93
94 struct list_head list;
c8f6a362
SH
95 struct list_head mc_list;
96};
97
98struct ucma_multicast {
99 struct ucma_context *ctx;
100 int id;
101 int events_reported;
102
103 u64 uid;
104 struct list_head list;
3f446754 105 struct sockaddr_storage addr;
75216638
SH
106};
107
108struct ucma_event {
109 struct ucma_context *ctx;
c8f6a362 110 struct ucma_multicast *mc;
75216638
SH
111 struct list_head list;
112 struct rdma_cm_id *cm_id;
113 struct rdma_ucm_event_resp resp;
114};
115
116static DEFINE_MUTEX(mut);
117static DEFINE_IDR(ctx_idr);
c8f6a362 118static DEFINE_IDR(multicast_idr);
75216638
SH
119
120static inline struct ucma_context *_ucma_find_context(int id,
121 struct ucma_file *file)
122{
123 struct ucma_context *ctx;
124
125 ctx = idr_find(&ctx_idr, id);
126 if (!ctx)
127 ctx = ERR_PTR(-ENOENT);
128 else if (ctx->file != file)
129 ctx = ERR_PTR(-EINVAL);
130 return ctx;
131}
132
133static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
134{
135 struct ucma_context *ctx;
136
137 mutex_lock(&mut);
138 ctx = _ucma_find_context(id, file);
139 if (!IS_ERR(ctx))
140 atomic_inc(&ctx->ref);
141 mutex_unlock(&mut);
142 return ctx;
143}
144
145static void ucma_put_ctx(struct ucma_context *ctx)
146{
147 if (atomic_dec_and_test(&ctx->ref))
148 complete(&ctx->comp);
149}
150
151static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
152{
153 struct ucma_context *ctx;
154 int ret;
155
156 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
157 if (!ctx)
158 return NULL;
159
160 atomic_set(&ctx->ref, 1);
161 init_completion(&ctx->comp);
c8f6a362 162 INIT_LIST_HEAD(&ctx->mc_list);
75216638
SH
163 ctx->file = file;
164
165 do {
166 ret = idr_pre_get(&ctx_idr, GFP_KERNEL);
167 if (!ret)
168 goto error;
169
170 mutex_lock(&mut);
171 ret = idr_get_new(&ctx_idr, ctx, &ctx->id);
172 mutex_unlock(&mut);
173 } while (ret == -EAGAIN);
174
175 if (ret)
176 goto error;
177
178 list_add_tail(&ctx->list, &file->ctx_list);
179 return ctx;
180
181error:
182 kfree(ctx);
183 return NULL;
184}
185
c8f6a362
SH
186static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
187{
188 struct ucma_multicast *mc;
189 int ret;
190
191 mc = kzalloc(sizeof(*mc), GFP_KERNEL);
192 if (!mc)
193 return NULL;
194
195 do {
196 ret = idr_pre_get(&multicast_idr, GFP_KERNEL);
197 if (!ret)
198 goto error;
199
200 mutex_lock(&mut);
201 ret = idr_get_new(&multicast_idr, mc, &mc->id);
202 mutex_unlock(&mut);
203 } while (ret == -EAGAIN);
204
205 if (ret)
206 goto error;
207
208 mc->ctx = ctx;
209 list_add_tail(&mc->list, &ctx->mc_list);
210 return mc;
211
212error:
213 kfree(mc);
214 return NULL;
215}
216
75216638
SH
217static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
218 struct rdma_conn_param *src)
219{
220 if (src->private_data_len)
221 memcpy(dst->private_data, src->private_data,
222 src->private_data_len);
223 dst->private_data_len = src->private_data_len;
224 dst->responder_resources =src->responder_resources;
225 dst->initiator_depth = src->initiator_depth;
226 dst->flow_control = src->flow_control;
227 dst->retry_count = src->retry_count;
228 dst->rnr_retry_count = src->rnr_retry_count;
229 dst->srq = src->srq;
230 dst->qp_num = src->qp_num;
231}
232
233static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
234 struct rdma_ud_param *src)
235{
236 if (src->private_data_len)
237 memcpy(dst->private_data, src->private_data,
238 src->private_data_len);
239 dst->private_data_len = src->private_data_len;
240 ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
241 dst->qp_num = src->qp_num;
242 dst->qkey = src->qkey;
243}
244
245static void ucma_set_event_context(struct ucma_context *ctx,
246 struct rdma_cm_event *event,
247 struct ucma_event *uevent)
248{
249 uevent->ctx = ctx;
c8f6a362
SH
250 switch (event->event) {
251 case RDMA_CM_EVENT_MULTICAST_JOIN:
252 case RDMA_CM_EVENT_MULTICAST_ERROR:
253 uevent->mc = (struct ucma_multicast *)
254 event->param.ud.private_data;
255 uevent->resp.uid = uevent->mc->uid;
256 uevent->resp.id = uevent->mc->id;
257 break;
258 default:
259 uevent->resp.uid = ctx->uid;
260 uevent->resp.id = ctx->id;
261 break;
262 }
75216638
SH
263}
264
265static int ucma_event_handler(struct rdma_cm_id *cm_id,
266 struct rdma_cm_event *event)
267{
268 struct ucma_event *uevent;
269 struct ucma_context *ctx = cm_id->context;
270 int ret = 0;
271
272 uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
273 if (!uevent)
274 return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
275
276 uevent->cm_id = cm_id;
277 ucma_set_event_context(ctx, event, uevent);
278 uevent->resp.event = event->event;
279 uevent->resp.status = event->status;
c8f6a362 280 if (cm_id->ps == RDMA_PS_UDP || cm_id->ps == RDMA_PS_IPOIB)
75216638
SH
281 ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
282 else
283 ucma_copy_conn_event(&uevent->resp.param.conn,
284 &event->param.conn);
285
286 mutex_lock(&ctx->file->mut);
287 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
288 if (!ctx->backlog) {
3492856e 289 ret = -ENOMEM;
30a5ec98 290 kfree(uevent);
75216638
SH
291 goto out;
292 }
293 ctx->backlog--;
0cefcf0b
SH
294 } else if (!ctx->uid) {
295 /*
296 * We ignore events for new connections until userspace has set
297 * their context. This can only happen if an error occurs on a
298 * new connection before the user accepts it. This is okay,
299 * since the accept will just fail later.
300 */
301 kfree(uevent);
302 goto out;
75216638 303 }
0cefcf0b 304
75216638
SH
305 list_add_tail(&uevent->list, &ctx->file->event_list);
306 wake_up_interruptible(&ctx->file->poll_wait);
307out:
308 mutex_unlock(&ctx->file->mut);
309 return ret;
310}
311
312static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
313 int in_len, int out_len)
314{
315 struct ucma_context *ctx;
316 struct rdma_ucm_get_event cmd;
317 struct ucma_event *uevent;
318 int ret = 0;
319 DEFINE_WAIT(wait);
320
321 if (out_len < sizeof uevent->resp)
322 return -ENOSPC;
323
324 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
325 return -EFAULT;
326
327 mutex_lock(&file->mut);
328 while (list_empty(&file->event_list)) {
d92f7644 329 mutex_unlock(&file->mut);
75216638 330
d92f7644
SH
331 if (file->filp->f_flags & O_NONBLOCK)
332 return -EAGAIN;
333
334 if (wait_event_interruptible(file->poll_wait,
335 !list_empty(&file->event_list)))
336 return -ERESTARTSYS;
75216638 337
75216638 338 mutex_lock(&file->mut);
75216638
SH
339 }
340
75216638
SH
341 uevent = list_entry(file->event_list.next, struct ucma_event, list);
342
343 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
344 ctx = ucma_alloc_ctx(file);
345 if (!ctx) {
346 ret = -ENOMEM;
347 goto done;
348 }
349 uevent->ctx->backlog++;
350 ctx->cm_id = uevent->cm_id;
351 ctx->cm_id->context = ctx;
352 uevent->resp.id = ctx->id;
353 }
354
355 if (copy_to_user((void __user *)(unsigned long)cmd.response,
356 &uevent->resp, sizeof uevent->resp)) {
357 ret = -EFAULT;
358 goto done;
359 }
360
361 list_del(&uevent->list);
362 uevent->ctx->events_reported++;
c8f6a362
SH
363 if (uevent->mc)
364 uevent->mc->events_reported++;
75216638
SH
365 kfree(uevent);
366done:
367 mutex_unlock(&file->mut);
368 return ret;
369}
370
b26f9b99
SH
371static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
372{
373 switch (cmd->ps) {
374 case RDMA_PS_TCP:
375 *qp_type = IB_QPT_RC;
376 return 0;
377 case RDMA_PS_UDP:
378 case RDMA_PS_IPOIB:
379 *qp_type = IB_QPT_UD;
380 return 0;
381 default:
382 return -EINVAL;
383 }
384}
385
386static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
387 int in_len, int out_len)
75216638
SH
388{
389 struct rdma_ucm_create_id cmd;
390 struct rdma_ucm_create_id_resp resp;
391 struct ucma_context *ctx;
b26f9b99 392 enum ib_qp_type qp_type;
75216638
SH
393 int ret;
394
395 if (out_len < sizeof(resp))
396 return -ENOSPC;
397
398 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
399 return -EFAULT;
400
b26f9b99
SH
401 ret = ucma_get_qp_type(&cmd, &qp_type);
402 if (ret)
403 return ret;
404
75216638
SH
405 mutex_lock(&file->mut);
406 ctx = ucma_alloc_ctx(file);
407 mutex_unlock(&file->mut);
408 if (!ctx)
409 return -ENOMEM;
410
411 ctx->uid = cmd.uid;
b26f9b99 412 ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps, qp_type);
75216638
SH
413 if (IS_ERR(ctx->cm_id)) {
414 ret = PTR_ERR(ctx->cm_id);
415 goto err1;
416 }
417
418 resp.id = ctx->id;
419 if (copy_to_user((void __user *)(unsigned long)cmd.response,
420 &resp, sizeof(resp))) {
421 ret = -EFAULT;
422 goto err2;
423 }
424 return 0;
425
426err2:
427 rdma_destroy_id(ctx->cm_id);
428err1:
429 mutex_lock(&mut);
430 idr_remove(&ctx_idr, ctx->id);
431 mutex_unlock(&mut);
432 kfree(ctx);
433 return ret;
434}
435
c8f6a362
SH
436static void ucma_cleanup_multicast(struct ucma_context *ctx)
437{
438 struct ucma_multicast *mc, *tmp;
439
440 mutex_lock(&mut);
441 list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
442 list_del(&mc->list);
443 idr_remove(&multicast_idr, mc->id);
444 kfree(mc);
445 }
446 mutex_unlock(&mut);
447}
448
75216638
SH
449static void ucma_cleanup_events(struct ucma_context *ctx)
450{
451 struct ucma_event *uevent, *tmp;
452
453 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
454 if (uevent->ctx != ctx)
455 continue;
456
457 list_del(&uevent->list);
458
459 /* clear incoming connections. */
460 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
461 rdma_destroy_id(uevent->cm_id);
462
463 kfree(uevent);
464 }
465}
466
c8f6a362
SH
467static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
468{
469 struct ucma_event *uevent, *tmp;
470
471 list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
472 if (uevent->mc != mc)
473 continue;
474
475 list_del(&uevent->list);
476 kfree(uevent);
477 }
478}
479
75216638
SH
480static int ucma_free_ctx(struct ucma_context *ctx)
481{
482 int events_reported;
483
484 /* No new events will be generated after destroying the id. */
485 rdma_destroy_id(ctx->cm_id);
486
c8f6a362
SH
487 ucma_cleanup_multicast(ctx);
488
75216638
SH
489 /* Cleanup events not yet reported to the user. */
490 mutex_lock(&ctx->file->mut);
491 ucma_cleanup_events(ctx);
492 list_del(&ctx->list);
493 mutex_unlock(&ctx->file->mut);
494
495 events_reported = ctx->events_reported;
496 kfree(ctx);
497 return events_reported;
498}
499
500static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
501 int in_len, int out_len)
502{
503 struct rdma_ucm_destroy_id cmd;
504 struct rdma_ucm_destroy_id_resp resp;
505 struct ucma_context *ctx;
506 int ret = 0;
507
508 if (out_len < sizeof(resp))
509 return -ENOSPC;
510
511 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
512 return -EFAULT;
513
514 mutex_lock(&mut);
515 ctx = _ucma_find_context(cmd.id, file);
516 if (!IS_ERR(ctx))
517 idr_remove(&ctx_idr, ctx->id);
518 mutex_unlock(&mut);
519
520 if (IS_ERR(ctx))
521 return PTR_ERR(ctx);
522
523 ucma_put_ctx(ctx);
524 wait_for_completion(&ctx->comp);
525 resp.events_reported = ucma_free_ctx(ctx);
526
527 if (copy_to_user((void __user *)(unsigned long)cmd.response,
528 &resp, sizeof(resp)))
529 ret = -EFAULT;
530
531 return ret;
532}
533
534static ssize_t ucma_bind_addr(struct ucma_file *file, const char __user *inbuf,
535 int in_len, int out_len)
536{
537 struct rdma_ucm_bind_addr cmd;
538 struct ucma_context *ctx;
539 int ret;
540
541 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
542 return -EFAULT;
543
544 ctx = ucma_get_ctx(file, cmd.id);
545 if (IS_ERR(ctx))
546 return PTR_ERR(ctx);
547
548 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
549 ucma_put_ctx(ctx);
550 return ret;
551}
552
553static ssize_t ucma_resolve_addr(struct ucma_file *file,
554 const char __user *inbuf,
555 int in_len, int out_len)
556{
557 struct rdma_ucm_resolve_addr cmd;
558 struct ucma_context *ctx;
559 int ret;
560
561 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
562 return -EFAULT;
563
564 ctx = ucma_get_ctx(file, cmd.id);
565 if (IS_ERR(ctx))
566 return PTR_ERR(ctx);
567
568 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
569 (struct sockaddr *) &cmd.dst_addr,
570 cmd.timeout_ms);
571 ucma_put_ctx(ctx);
572 return ret;
573}
574
575static ssize_t ucma_resolve_route(struct ucma_file *file,
576 const char __user *inbuf,
577 int in_len, int out_len)
578{
579 struct rdma_ucm_resolve_route cmd;
580 struct ucma_context *ctx;
581 int ret;
582
583 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
584 return -EFAULT;
585
586 ctx = ucma_get_ctx(file, cmd.id);
587 if (IS_ERR(ctx))
588 return PTR_ERR(ctx);
589
590 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
591 ucma_put_ctx(ctx);
592 return ret;
593}
594
595static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
596 struct rdma_route *route)
597{
598 struct rdma_dev_addr *dev_addr;
599
600 resp->num_paths = route->num_paths;
601 switch (route->num_paths) {
602 case 0:
603 dev_addr = &route->addr.dev_addr;
6f8372b6
SH
604 rdma_addr_get_dgid(dev_addr,
605 (union ib_gid *) &resp->ib_route[0].dgid);
606 rdma_addr_get_sgid(dev_addr,
607 (union ib_gid *) &resp->ib_route[0].sgid);
75216638
SH
608 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
609 break;
610 case 2:
611 ib_copy_path_rec_to_user(&resp->ib_route[1],
612 &route->path_rec[1]);
613 /* fall through */
614 case 1:
615 ib_copy_path_rec_to_user(&resp->ib_route[0],
616 &route->path_rec[0]);
617 break;
618 default:
619 break;
620 }
621}
622
3c86aa70
EC
623static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
624 struct rdma_route *route)
625{
626 struct rdma_dev_addr *dev_addr;
af7bd463
EC
627 struct net_device *dev;
628 u16 vid = 0;
3c86aa70
EC
629
630 resp->num_paths = route->num_paths;
631 switch (route->num_paths) {
632 case 0:
633 dev_addr = &route->addr.dev_addr;
af7bd463
EC
634 dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
635 if (dev) {
636 vid = rdma_vlan_dev_vlan_id(dev);
637 dev_put(dev);
638 }
639
640 iboe_mac_vlan_to_ll((union ib_gid *) &resp->ib_route[0].dgid,
641 dev_addr->dst_dev_addr, vid);
3c86aa70
EC
642 iboe_addr_get_sgid(dev_addr,
643 (union ib_gid *) &resp->ib_route[0].sgid);
644 resp->ib_route[0].pkey = cpu_to_be16(0xffff);
645 break;
646 case 2:
647 ib_copy_path_rec_to_user(&resp->ib_route[1],
648 &route->path_rec[1]);
649 /* fall through */
650 case 1:
651 ib_copy_path_rec_to_user(&resp->ib_route[0],
652 &route->path_rec[0]);
653 break;
654 default:
655 break;
656 }
657}
658
e86f8b06
SW
659static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
660 struct rdma_route *route)
661{
662 struct rdma_dev_addr *dev_addr;
663
664 dev_addr = &route->addr.dev_addr;
665 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
666 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
667}
668
75216638
SH
669static ssize_t ucma_query_route(struct ucma_file *file,
670 const char __user *inbuf,
671 int in_len, int out_len)
672{
673 struct rdma_ucm_query_route cmd;
674 struct rdma_ucm_query_route_resp resp;
675 struct ucma_context *ctx;
676 struct sockaddr *addr;
677 int ret = 0;
678
679 if (out_len < sizeof(resp))
680 return -ENOSPC;
681
682 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
683 return -EFAULT;
684
685 ctx = ucma_get_ctx(file, cmd.id);
686 if (IS_ERR(ctx))
687 return PTR_ERR(ctx);
688
689 memset(&resp, 0, sizeof resp);
3f446754 690 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
75216638
SH
691 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
692 sizeof(struct sockaddr_in) :
693 sizeof(struct sockaddr_in6));
3f446754 694 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
75216638
SH
695 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
696 sizeof(struct sockaddr_in) :
697 sizeof(struct sockaddr_in6));
698 if (!ctx->cm_id->device)
699 goto out;
700
9cda779c 701 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
75216638 702 resp.port_num = ctx->cm_id->port_num;
e86f8b06
SW
703 switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
704 case RDMA_TRANSPORT_IB:
705 switch (rdma_port_get_link_layer(ctx->cm_id->device,
706 ctx->cm_id->port_num)) {
3c86aa70
EC
707 case IB_LINK_LAYER_INFINIBAND:
708 ucma_copy_ib_route(&resp, &ctx->cm_id->route);
709 break;
710 case IB_LINK_LAYER_ETHERNET:
711 ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
712 break;
713 default:
714 break;
715 }
e86f8b06
SW
716 break;
717 case RDMA_TRANSPORT_IWARP:
718 ucma_copy_iw_route(&resp, &ctx->cm_id->route);
719 break;
720 default:
721 break;
75216638
SH
722 }
723
724out:
725 if (copy_to_user((void __user *)(unsigned long)cmd.response,
726 &resp, sizeof(resp)))
727 ret = -EFAULT;
728
729 ucma_put_ctx(ctx);
730 return ret;
731}
732
733static void ucma_copy_conn_param(struct rdma_conn_param *dst,
734 struct rdma_ucm_conn_param *src)
735{
736 dst->private_data = src->private_data;
737 dst->private_data_len = src->private_data_len;
738 dst->responder_resources =src->responder_resources;
739 dst->initiator_depth = src->initiator_depth;
740 dst->flow_control = src->flow_control;
741 dst->retry_count = src->retry_count;
742 dst->rnr_retry_count = src->rnr_retry_count;
743 dst->srq = src->srq;
744 dst->qp_num = src->qp_num;
745}
746
747static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
748 int in_len, int out_len)
749{
750 struct rdma_ucm_connect cmd;
751 struct rdma_conn_param conn_param;
752 struct ucma_context *ctx;
753 int ret;
754
755 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
756 return -EFAULT;
757
758 if (!cmd.conn_param.valid)
759 return -EINVAL;
760
761 ctx = ucma_get_ctx(file, cmd.id);
762 if (IS_ERR(ctx))
763 return PTR_ERR(ctx);
764
765 ucma_copy_conn_param(&conn_param, &cmd.conn_param);
766 ret = rdma_connect(ctx->cm_id, &conn_param);
767 ucma_put_ctx(ctx);
768 return ret;
769}
770
771static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
772 int in_len, int out_len)
773{
774 struct rdma_ucm_listen cmd;
775 struct ucma_context *ctx;
776 int ret;
777
778 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
779 return -EFAULT;
780
781 ctx = ucma_get_ctx(file, cmd.id);
782 if (IS_ERR(ctx))
783 return PTR_ERR(ctx);
784
97cb7e40
SW
785 ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
786 cmd.backlog : max_backlog;
75216638
SH
787 ret = rdma_listen(ctx->cm_id, ctx->backlog);
788 ucma_put_ctx(ctx);
789 return ret;
790}
791
792static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
793 int in_len, int out_len)
794{
795 struct rdma_ucm_accept cmd;
796 struct rdma_conn_param conn_param;
797 struct ucma_context *ctx;
798 int ret;
799
800 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
801 return -EFAULT;
802
803 ctx = ucma_get_ctx(file, cmd.id);
804 if (IS_ERR(ctx))
805 return PTR_ERR(ctx);
806
807 if (cmd.conn_param.valid) {
808 ctx->uid = cmd.uid;
809 ucma_copy_conn_param(&conn_param, &cmd.conn_param);
810 ret = rdma_accept(ctx->cm_id, &conn_param);
811 } else
812 ret = rdma_accept(ctx->cm_id, NULL);
813
814 ucma_put_ctx(ctx);
815 return ret;
816}
817
818static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
819 int in_len, int out_len)
820{
821 struct rdma_ucm_reject cmd;
822 struct ucma_context *ctx;
823 int ret;
824
825 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
826 return -EFAULT;
827
828 ctx = ucma_get_ctx(file, cmd.id);
829 if (IS_ERR(ctx))
830 return PTR_ERR(ctx);
831
832 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
833 ucma_put_ctx(ctx);
834 return ret;
835}
836
837static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
838 int in_len, int out_len)
839{
840 struct rdma_ucm_disconnect cmd;
841 struct ucma_context *ctx;
842 int ret;
843
844 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
845 return -EFAULT;
846
847 ctx = ucma_get_ctx(file, cmd.id);
848 if (IS_ERR(ctx))
849 return PTR_ERR(ctx);
850
851 ret = rdma_disconnect(ctx->cm_id);
852 ucma_put_ctx(ctx);
853 return ret;
854}
855
856static ssize_t ucma_init_qp_attr(struct ucma_file *file,
857 const char __user *inbuf,
858 int in_len, int out_len)
859{
860 struct rdma_ucm_init_qp_attr cmd;
861 struct ib_uverbs_qp_attr resp;
862 struct ucma_context *ctx;
863 struct ib_qp_attr qp_attr;
864 int ret;
865
866 if (out_len < sizeof(resp))
867 return -ENOSPC;
868
869 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
870 return -EFAULT;
871
872 ctx = ucma_get_ctx(file, cmd.id);
873 if (IS_ERR(ctx))
874 return PTR_ERR(ctx);
875
876 resp.qp_attr_mask = 0;
877 memset(&qp_attr, 0, sizeof qp_attr);
878 qp_attr.qp_state = cmd.qp_state;
879 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
880 if (ret)
881 goto out;
882
883 ib_copy_qp_attr_to_user(&resp, &qp_attr);
884 if (copy_to_user((void __user *)(unsigned long)cmd.response,
885 &resp, sizeof(resp)))
886 ret = -EFAULT;
887
888out:
889 ucma_put_ctx(ctx);
890 return ret;
891}
892
7ce86409
SH
893static int ucma_set_option_id(struct ucma_context *ctx, int optname,
894 void *optval, size_t optlen)
895{
896 int ret = 0;
897
898 switch (optname) {
899 case RDMA_OPTION_ID_TOS:
900 if (optlen != sizeof(u8)) {
901 ret = -EINVAL;
902 break;
903 }
904 rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
905 break;
a9bb7912
HS
906 case RDMA_OPTION_ID_REUSEADDR:
907 if (optlen != sizeof(int)) {
908 ret = -EINVAL;
909 break;
910 }
911 ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
912 break;
7ce86409
SH
913 default:
914 ret = -ENOSYS;
915 }
916
917 return ret;
918}
919
a7ca1f00
SH
920static int ucma_set_ib_path(struct ucma_context *ctx,
921 struct ib_path_rec_data *path_data, size_t optlen)
922{
923 struct ib_sa_path_rec sa_path;
924 struct rdma_cm_event event;
925 int ret;
926
927 if (optlen % sizeof(*path_data))
928 return -EINVAL;
929
930 for (; optlen; optlen -= sizeof(*path_data), path_data++) {
931 if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
932 IB_PATH_BIDIRECTIONAL))
933 break;
934 }
935
936 if (!optlen)
937 return -EINVAL;
938
939 ib_sa_unpack_path(path_data->path_rec, &sa_path);
940 ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
941 if (ret)
942 return ret;
943
944 memset(&event, 0, sizeof event);
945 event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
946 return ucma_event_handler(ctx->cm_id, &event);
947}
948
949static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
950 void *optval, size_t optlen)
951{
952 int ret;
953
954 switch (optname) {
955 case RDMA_OPTION_IB_PATH:
956 ret = ucma_set_ib_path(ctx, optval, optlen);
957 break;
958 default:
959 ret = -ENOSYS;
960 }
961
962 return ret;
963}
964
7ce86409
SH
965static int ucma_set_option_level(struct ucma_context *ctx, int level,
966 int optname, void *optval, size_t optlen)
967{
968 int ret;
969
970 switch (level) {
971 case RDMA_OPTION_ID:
972 ret = ucma_set_option_id(ctx, optname, optval, optlen);
973 break;
a7ca1f00
SH
974 case RDMA_OPTION_IB:
975 ret = ucma_set_option_ib(ctx, optname, optval, optlen);
976 break;
7ce86409
SH
977 default:
978 ret = -ENOSYS;
979 }
980
981 return ret;
982}
983
984static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
985 int in_len, int out_len)
986{
987 struct rdma_ucm_set_option cmd;
988 struct ucma_context *ctx;
989 void *optval;
990 int ret;
991
992 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
993 return -EFAULT;
994
995 ctx = ucma_get_ctx(file, cmd.id);
996 if (IS_ERR(ctx))
997 return PTR_ERR(ctx);
998
999 optval = kmalloc(cmd.optlen, GFP_KERNEL);
1000 if (!optval) {
1001 ret = -ENOMEM;
1002 goto out1;
1003 }
1004
1005 if (copy_from_user(optval, (void __user *) (unsigned long) cmd.optval,
1006 cmd.optlen)) {
1007 ret = -EFAULT;
1008 goto out2;
1009 }
1010
1011 ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
1012 cmd.optlen);
1013out2:
1014 kfree(optval);
1015out1:
1016 ucma_put_ctx(ctx);
1017 return ret;
1018}
1019
75216638
SH
1020static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1021 int in_len, int out_len)
1022{
1023 struct rdma_ucm_notify cmd;
1024 struct ucma_context *ctx;
1025 int ret;
1026
1027 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1028 return -EFAULT;
1029
1030 ctx = ucma_get_ctx(file, cmd.id);
1031 if (IS_ERR(ctx))
1032 return PTR_ERR(ctx);
1033
1034 ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
1035 ucma_put_ctx(ctx);
1036 return ret;
1037}
1038
c8f6a362
SH
1039static ssize_t ucma_join_multicast(struct ucma_file *file,
1040 const char __user *inbuf,
1041 int in_len, int out_len)
1042{
1043 struct rdma_ucm_join_mcast cmd;
1044 struct rdma_ucm_create_id_resp resp;
1045 struct ucma_context *ctx;
1046 struct ucma_multicast *mc;
1047 int ret;
1048
1049 if (out_len < sizeof(resp))
1050 return -ENOSPC;
1051
1052 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1053 return -EFAULT;
1054
1055 ctx = ucma_get_ctx(file, cmd.id);
1056 if (IS_ERR(ctx))
1057 return PTR_ERR(ctx);
1058
1059 mutex_lock(&file->mut);
1060 mc = ucma_alloc_multicast(ctx);
6aea938f
JB
1061 if (!mc) {
1062 ret = -ENOMEM;
c8f6a362
SH
1063 goto err1;
1064 }
1065
1066 mc->uid = cmd.uid;
1067 memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr);
3f446754 1068 ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc);
c8f6a362
SH
1069 if (ret)
1070 goto err2;
1071
1072 resp.id = mc->id;
1073 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1074 &resp, sizeof(resp))) {
1075 ret = -EFAULT;
1076 goto err3;
1077 }
1078
1079 mutex_unlock(&file->mut);
1080 ucma_put_ctx(ctx);
1081 return 0;
1082
1083err3:
3f446754 1084 rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
c8f6a362
SH
1085 ucma_cleanup_mc_events(mc);
1086err2:
1087 mutex_lock(&mut);
1088 idr_remove(&multicast_idr, mc->id);
1089 mutex_unlock(&mut);
1090 list_del(&mc->list);
1091 kfree(mc);
1092err1:
1093 mutex_unlock(&file->mut);
1094 ucma_put_ctx(ctx);
1095 return ret;
1096}
1097
1098static ssize_t ucma_leave_multicast(struct ucma_file *file,
1099 const char __user *inbuf,
1100 int in_len, int out_len)
1101{
1102 struct rdma_ucm_destroy_id cmd;
1103 struct rdma_ucm_destroy_id_resp resp;
1104 struct ucma_multicast *mc;
1105 int ret = 0;
1106
1107 if (out_len < sizeof(resp))
1108 return -ENOSPC;
1109
1110 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1111 return -EFAULT;
1112
1113 mutex_lock(&mut);
1114 mc = idr_find(&multicast_idr, cmd.id);
1115 if (!mc)
1116 mc = ERR_PTR(-ENOENT);
1117 else if (mc->ctx->file != file)
1118 mc = ERR_PTR(-EINVAL);
1119 else {
1120 idr_remove(&multicast_idr, mc->id);
1121 atomic_inc(&mc->ctx->ref);
1122 }
1123 mutex_unlock(&mut);
1124
1125 if (IS_ERR(mc)) {
1126 ret = PTR_ERR(mc);
1127 goto out;
1128 }
1129
3f446754 1130 rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
c8f6a362
SH
1131 mutex_lock(&mc->ctx->file->mut);
1132 ucma_cleanup_mc_events(mc);
1133 list_del(&mc->list);
1134 mutex_unlock(&mc->ctx->file->mut);
1135
1136 ucma_put_ctx(mc->ctx);
1137 resp.events_reported = mc->events_reported;
1138 kfree(mc);
1139
1140 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1141 &resp, sizeof(resp)))
1142 ret = -EFAULT;
1143out:
1144 return ret;
1145}
1146
88314e4d
SH
1147static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
1148{
1149 /* Acquire mutex's based on pointer comparison to prevent deadlock. */
1150 if (file1 < file2) {
1151 mutex_lock(&file1->mut);
1152 mutex_lock(&file2->mut);
1153 } else {
1154 mutex_lock(&file2->mut);
1155 mutex_lock(&file1->mut);
1156 }
1157}
1158
1159static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
1160{
1161 if (file1 < file2) {
1162 mutex_unlock(&file2->mut);
1163 mutex_unlock(&file1->mut);
1164 } else {
1165 mutex_unlock(&file1->mut);
1166 mutex_unlock(&file2->mut);
1167 }
1168}
1169
1170static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
1171{
1172 struct ucma_event *uevent, *tmp;
1173
1174 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
1175 if (uevent->ctx == ctx)
1176 list_move_tail(&uevent->list, &file->event_list);
1177}
1178
1179static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1180 const char __user *inbuf,
1181 int in_len, int out_len)
1182{
1183 struct rdma_ucm_migrate_id cmd;
1184 struct rdma_ucm_migrate_resp resp;
1185 struct ucma_context *ctx;
1186 struct file *filp;
1187 struct ucma_file *cur_file;
1188 int ret = 0;
1189
1190 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1191 return -EFAULT;
1192
1193 /* Get current fd to protect against it being closed */
1194 filp = fget(cmd.fd);
1195 if (!filp)
1196 return -ENOENT;
1197
1198 /* Validate current fd and prevent destruction of id. */
1199 ctx = ucma_get_ctx(filp->private_data, cmd.id);
1200 if (IS_ERR(ctx)) {
1201 ret = PTR_ERR(ctx);
1202 goto file_put;
1203 }
1204
1205 cur_file = ctx->file;
1206 if (cur_file == new_file) {
1207 resp.events_reported = ctx->events_reported;
1208 goto response;
1209 }
1210
1211 /*
1212 * Migrate events between fd's, maintaining order, and avoiding new
1213 * events being added before existing events.
1214 */
1215 ucma_lock_files(cur_file, new_file);
1216 mutex_lock(&mut);
1217
1218 list_move_tail(&ctx->list, &new_file->ctx_list);
1219 ucma_move_events(ctx, new_file);
1220 ctx->file = new_file;
1221 resp.events_reported = ctx->events_reported;
1222
1223 mutex_unlock(&mut);
1224 ucma_unlock_files(cur_file, new_file);
1225
1226response:
1227 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1228 &resp, sizeof(resp)))
1229 ret = -EFAULT;
1230
1231 ucma_put_ctx(ctx);
1232file_put:
1233 fput(filp);
1234 return ret;
1235}
1236
75216638
SH
1237static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
1238 const char __user *inbuf,
1239 int in_len, int out_len) = {
1240 [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
1241 [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
1242 [RDMA_USER_CM_CMD_BIND_ADDR] = ucma_bind_addr,
1243 [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
1244 [RDMA_USER_CM_CMD_RESOLVE_ROUTE]= ucma_resolve_route,
1245 [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
1246 [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
1247 [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
1248 [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
1249 [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
1250 [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
1251 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
1252 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
1253 [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
7ce86409 1254 [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option,
75216638 1255 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
c8f6a362
SH
1256 [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast,
1257 [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast,
88314e4d 1258 [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id
75216638
SH
1259};
1260
1261static ssize_t ucma_write(struct file *filp, const char __user *buf,
1262 size_t len, loff_t *pos)
1263{
1264 struct ucma_file *file = filp->private_data;
1265 struct rdma_ucm_cmd_hdr hdr;
1266 ssize_t ret;
1267
1268 if (len < sizeof(hdr))
1269 return -EINVAL;
1270
1271 if (copy_from_user(&hdr, buf, sizeof(hdr)))
1272 return -EFAULT;
1273
1274 if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
1275 return -EINVAL;
1276
1277 if (hdr.in + sizeof(hdr) > len)
1278 return -EINVAL;
1279
1280 if (!ucma_cmd_table[hdr.cmd])
1281 return -ENOSYS;
1282
1283 ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
1284 if (!ret)
1285 ret = len;
1286
1287 return ret;
1288}
1289
1290static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
1291{
1292 struct ucma_file *file = filp->private_data;
1293 unsigned int mask = 0;
1294
1295 poll_wait(filp, &file->poll_wait, wait);
1296
1297 if (!list_empty(&file->event_list))
1298 mask = POLLIN | POLLRDNORM;
1299
1300 return mask;
1301}
1302
f7a6117e
RD
1303/*
1304 * ucma_open() does not need the BKL:
1305 *
1306 * - no global state is referred to;
1307 * - there is no ioctl method to race against;
1308 * - no further module initialization is required for open to work
1309 * after the device is registered.
1310 */
75216638
SH
1311static int ucma_open(struct inode *inode, struct file *filp)
1312{
1313 struct ucma_file *file;
1314
1315 file = kmalloc(sizeof *file, GFP_KERNEL);
1316 if (!file)
1317 return -ENOMEM;
1318
1319 INIT_LIST_HEAD(&file->event_list);
1320 INIT_LIST_HEAD(&file->ctx_list);
1321 init_waitqueue_head(&file->poll_wait);
1322 mutex_init(&file->mut);
1323
1324 filp->private_data = file;
1325 file->filp = filp;
bc1db9af
RD
1326
1327 return nonseekable_open(inode, filp);
75216638
SH
1328}
1329
1330static int ucma_close(struct inode *inode, struct file *filp)
1331{
1332 struct ucma_file *file = filp->private_data;
1333 struct ucma_context *ctx, *tmp;
1334
1335 mutex_lock(&file->mut);
1336 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
1337 mutex_unlock(&file->mut);
1338
1339 mutex_lock(&mut);
1340 idr_remove(&ctx_idr, ctx->id);
1341 mutex_unlock(&mut);
1342
1343 ucma_free_ctx(ctx);
1344 mutex_lock(&file->mut);
1345 }
1346 mutex_unlock(&file->mut);
1347 kfree(file);
1348 return 0;
1349}
1350
2b8693c0 1351static const struct file_operations ucma_fops = {
75216638
SH
1352 .owner = THIS_MODULE,
1353 .open = ucma_open,
1354 .release = ucma_close,
1355 .write = ucma_write,
1356 .poll = ucma_poll,
bc1db9af 1357 .llseek = no_llseek,
75216638
SH
1358};
1359
1360static struct miscdevice ucma_misc = {
04ea2f81
RD
1361 .minor = MISC_DYNAMIC_MINOR,
1362 .name = "rdma_cm",
1363 .nodename = "infiniband/rdma_cm",
1364 .mode = 0666,
1365 .fops = &ucma_fops,
75216638
SH
1366};
1367
1368static ssize_t show_abi_version(struct device *dev,
1369 struct device_attribute *attr,
1370 char *buf)
1371{
1372 return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1373}
1374static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1375
1376static int __init ucma_init(void)
1377{
1378 int ret;
1379
1380 ret = misc_register(&ucma_misc);
1381 if (ret)
1382 return ret;
1383
1384 ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1385 if (ret) {
1386 printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
97cb7e40
SW
1387 goto err1;
1388 }
1389
1390 ucma_ctl_table_hdr = register_sysctl_paths(ucma_ctl_path, ucma_ctl_table);
1391 if (!ucma_ctl_table_hdr) {
1392 printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n");
1393 ret = -ENOMEM;
1394 goto err2;
75216638
SH
1395 }
1396 return 0;
97cb7e40
SW
1397err2:
1398 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1399err1:
75216638
SH
1400 misc_deregister(&ucma_misc);
1401 return ret;
1402}
1403
1404static void __exit ucma_cleanup(void)
1405{
97cb7e40 1406 unregister_sysctl_table(ucma_ctl_table_hdr);
75216638
SH
1407 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1408 misc_deregister(&ucma_misc);
1409 idr_destroy(&ctx_idr);
1410}
1411
1412module_init(ucma_init);
1413module_exit(ucma_cleanup);
This page took 0.451099 seconds and 5 git commands to generate.