Commit | Line | Data |
---|---|---|
bc38a6ab RD |
1 | /* |
2 | * Copyright (c) 2005 Topspin Communications. All rights reserved. | |
33b9b3ee | 3 | * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. |
2a1d9b7f RD |
4 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. |
5 | * Copyright (c) 2005 Voltaire, Inc. All rights reserved. | |
67cdb40c | 6 | * Copyright (c) 2005 PathScale, Inc. All rights reserved. |
bc38a6ab RD |
7 | * |
8 | * This software is available to you under a choice of one of two | |
9 | * licenses. You may choose to be licensed under the terms of the GNU | |
10 | * General Public License (GPL) Version 2, available from the file | |
11 | * COPYING in the main directory of this source tree, or the | |
12 | * OpenIB.org BSD license below: | |
13 | * | |
14 | * Redistribution and use in source and binary forms, with or | |
15 | * without modification, are permitted provided that the following | |
16 | * conditions are met: | |
17 | * | |
18 | * - Redistributions of source code must retain the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer. | |
21 | * | |
22 | * - Redistributions in binary form must reproduce the above | |
23 | * copyright notice, this list of conditions and the following | |
24 | * disclaimer in the documentation and/or other materials | |
25 | * provided with the distribution. | |
26 | * | |
27 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
28 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
29 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
30 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
31 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
32 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
33 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
34 | * SOFTWARE. | |
35 | * | |
36 | * $Id: uverbs_main.c 2733 2005-06-28 19:14:34Z roland $ | |
37 | */ | |
38 | ||
39 | #include <linux/module.h> | |
40 | #include <linux/init.h> | |
41 | #include <linux/device.h> | |
42 | #include <linux/err.h> | |
43 | #include <linux/fs.h> | |
44 | #include <linux/poll.h> | |
45 | #include <linux/file.h> | |
46 | #include <linux/mount.h> | |
70a30e16 | 47 | #include <linux/cdev.h> |
bc38a6ab RD |
48 | |
49 | #include <asm/uaccess.h> | |
50 | ||
51 | #include "uverbs.h" | |
52 | ||
53 | MODULE_AUTHOR("Roland Dreier"); | |
54 | MODULE_DESCRIPTION("InfiniBand userspace verbs access"); | |
55 | MODULE_LICENSE("Dual BSD/GPL"); | |
56 | ||
57 | #define INFINIBANDEVENTFS_MAGIC 0x49426576 /* "IBev" */ | |
58 | ||
59 | enum { | |
60 | IB_UVERBS_MAJOR = 231, | |
61 | IB_UVERBS_BASE_MINOR = 192, | |
62 | IB_UVERBS_MAX_DEVICES = 32 | |
63 | }; | |
64 | ||
65 | #define IB_UVERBS_BASE_DEV MKDEV(IB_UVERBS_MAJOR, IB_UVERBS_BASE_MINOR) | |
66 | ||
70a30e16 RD |
67 | static struct class *uverbs_class; |
68 | ||
95ed644f | 69 | DEFINE_MUTEX(ib_uverbs_idr_mutex); |
bc38a6ab RD |
70 | DEFINE_IDR(ib_uverbs_pd_idr); |
71 | DEFINE_IDR(ib_uverbs_mr_idr); | |
72 | DEFINE_IDR(ib_uverbs_mw_idr); | |
73 | DEFINE_IDR(ib_uverbs_ah_idr); | |
74 | DEFINE_IDR(ib_uverbs_cq_idr); | |
75 | DEFINE_IDR(ib_uverbs_qp_idr); | |
f520ba5a | 76 | DEFINE_IDR(ib_uverbs_srq_idr); |
bc38a6ab RD |
77 | |
78 | static spinlock_t map_lock; | |
70a30e16 | 79 | static struct ib_uverbs_device *dev_table[IB_UVERBS_MAX_DEVICES]; |
bc38a6ab RD |
80 | static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES); |
81 | ||
82 | static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file, | |
83 | const char __user *buf, int in_len, | |
84 | int out_len) = { | |
6b73597e RD |
85 | [IB_USER_VERBS_CMD_GET_CONTEXT] = ib_uverbs_get_context, |
86 | [IB_USER_VERBS_CMD_QUERY_DEVICE] = ib_uverbs_query_device, | |
87 | [IB_USER_VERBS_CMD_QUERY_PORT] = ib_uverbs_query_port, | |
88 | [IB_USER_VERBS_CMD_ALLOC_PD] = ib_uverbs_alloc_pd, | |
89 | [IB_USER_VERBS_CMD_DEALLOC_PD] = ib_uverbs_dealloc_pd, | |
90 | [IB_USER_VERBS_CMD_REG_MR] = ib_uverbs_reg_mr, | |
91 | [IB_USER_VERBS_CMD_DEREG_MR] = ib_uverbs_dereg_mr, | |
92 | [IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL] = ib_uverbs_create_comp_channel, | |
93 | [IB_USER_VERBS_CMD_CREATE_CQ] = ib_uverbs_create_cq, | |
33b9b3ee | 94 | [IB_USER_VERBS_CMD_RESIZE_CQ] = ib_uverbs_resize_cq, |
67cdb40c RD |
95 | [IB_USER_VERBS_CMD_POLL_CQ] = ib_uverbs_poll_cq, |
96 | [IB_USER_VERBS_CMD_REQ_NOTIFY_CQ] = ib_uverbs_req_notify_cq, | |
6b73597e RD |
97 | [IB_USER_VERBS_CMD_DESTROY_CQ] = ib_uverbs_destroy_cq, |
98 | [IB_USER_VERBS_CMD_CREATE_QP] = ib_uverbs_create_qp, | |
7ccc9a24 | 99 | [IB_USER_VERBS_CMD_QUERY_QP] = ib_uverbs_query_qp, |
6b73597e RD |
100 | [IB_USER_VERBS_CMD_MODIFY_QP] = ib_uverbs_modify_qp, |
101 | [IB_USER_VERBS_CMD_DESTROY_QP] = ib_uverbs_destroy_qp, | |
67cdb40c RD |
102 | [IB_USER_VERBS_CMD_POST_SEND] = ib_uverbs_post_send, |
103 | [IB_USER_VERBS_CMD_POST_RECV] = ib_uverbs_post_recv, | |
104 | [IB_USER_VERBS_CMD_POST_SRQ_RECV] = ib_uverbs_post_srq_recv, | |
105 | [IB_USER_VERBS_CMD_CREATE_AH] = ib_uverbs_create_ah, | |
106 | [IB_USER_VERBS_CMD_DESTROY_AH] = ib_uverbs_destroy_ah, | |
6b73597e RD |
107 | [IB_USER_VERBS_CMD_ATTACH_MCAST] = ib_uverbs_attach_mcast, |
108 | [IB_USER_VERBS_CMD_DETACH_MCAST] = ib_uverbs_detach_mcast, | |
109 | [IB_USER_VERBS_CMD_CREATE_SRQ] = ib_uverbs_create_srq, | |
110 | [IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq, | |
111 | [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq, | |
bc38a6ab RD |
112 | }; |
113 | ||
114 | static struct vfsmount *uverbs_event_mnt; | |
115 | ||
116 | static void ib_uverbs_add_one(struct ib_device *device); | |
117 | static void ib_uverbs_remove_one(struct ib_device *device); | |
118 | ||
70a30e16 RD |
119 | static void ib_uverbs_release_dev(struct kref *ref) |
120 | { | |
121 | struct ib_uverbs_device *dev = | |
122 | container_of(ref, struct ib_uverbs_device, ref); | |
123 | ||
124 | kfree(dev); | |
125 | } | |
126 | ||
127 | void ib_uverbs_release_ucq(struct ib_uverbs_file *file, | |
128 | struct ib_uverbs_event_file *ev_file, | |
129 | struct ib_ucq_object *uobj) | |
130 | { | |
131 | struct ib_uverbs_event *evt, *tmp; | |
132 | ||
133 | if (ev_file) { | |
134 | spin_lock_irq(&ev_file->lock); | |
135 | list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) { | |
136 | list_del(&evt->list); | |
137 | kfree(evt); | |
138 | } | |
139 | spin_unlock_irq(&ev_file->lock); | |
140 | ||
141 | kref_put(&ev_file->ref, ib_uverbs_release_event_file); | |
142 | } | |
143 | ||
144 | spin_lock_irq(&file->async_file->lock); | |
145 | list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) { | |
146 | list_del(&evt->list); | |
147 | kfree(evt); | |
148 | } | |
149 | spin_unlock_irq(&file->async_file->lock); | |
150 | } | |
151 | ||
152 | void ib_uverbs_release_uevent(struct ib_uverbs_file *file, | |
153 | struct ib_uevent_object *uobj) | |
154 | { | |
155 | struct ib_uverbs_event *evt, *tmp; | |
156 | ||
157 | spin_lock_irq(&file->async_file->lock); | |
158 | list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) { | |
159 | list_del(&evt->list); | |
160 | kfree(evt); | |
161 | } | |
162 | spin_unlock_irq(&file->async_file->lock); | |
163 | } | |
164 | ||
f4e40156 JM |
165 | static void ib_uverbs_detach_umcast(struct ib_qp *qp, |
166 | struct ib_uqp_object *uobj) | |
167 | { | |
168 | struct ib_uverbs_mcast_entry *mcast, *tmp; | |
169 | ||
170 | list_for_each_entry_safe(mcast, tmp, &uobj->mcast_list, list) { | |
171 | ib_detach_mcast(qp, &mcast->gid, mcast->lid); | |
172 | list_del(&mcast->list); | |
173 | kfree(mcast); | |
174 | } | |
175 | } | |
176 | ||
70a30e16 RD |
177 | static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, |
178 | struct ib_ucontext *context) | |
bc38a6ab RD |
179 | { |
180 | struct ib_uobject *uobj, *tmp; | |
181 | ||
182 | if (!context) | |
183 | return 0; | |
184 | ||
95ed644f | 185 | mutex_lock(&ib_uverbs_idr_mutex); |
bc38a6ab | 186 | |
67cdb40c RD |
187 | list_for_each_entry_safe(uobj, tmp, &context->ah_list, list) { |
188 | struct ib_ah *ah = idr_find(&ib_uverbs_ah_idr, uobj->id); | |
189 | idr_remove(&ib_uverbs_ah_idr, uobj->id); | |
190 | ib_destroy_ah(ah); | |
191 | list_del(&uobj->list); | |
192 | kfree(uobj); | |
193 | } | |
bc38a6ab RD |
194 | |
195 | list_for_each_entry_safe(uobj, tmp, &context->qp_list, list) { | |
196 | struct ib_qp *qp = idr_find(&ib_uverbs_qp_idr, uobj->id); | |
f4e40156 JM |
197 | struct ib_uqp_object *uqp = |
198 | container_of(uobj, struct ib_uqp_object, uevent.uobject); | |
bc38a6ab | 199 | idr_remove(&ib_uverbs_qp_idr, uobj->id); |
f4e40156 | 200 | ib_uverbs_detach_umcast(qp, uqp); |
bc38a6ab RD |
201 | ib_destroy_qp(qp); |
202 | list_del(&uobj->list); | |
f4e40156 JM |
203 | ib_uverbs_release_uevent(file, &uqp->uevent); |
204 | kfree(uqp); | |
bc38a6ab RD |
205 | } |
206 | ||
207 | list_for_each_entry_safe(uobj, tmp, &context->cq_list, list) { | |
208 | struct ib_cq *cq = idr_find(&ib_uverbs_cq_idr, uobj->id); | |
70a30e16 RD |
209 | struct ib_uverbs_event_file *ev_file = cq->cq_context; |
210 | struct ib_ucq_object *ucq = | |
211 | container_of(uobj, struct ib_ucq_object, uobject); | |
bc38a6ab RD |
212 | idr_remove(&ib_uverbs_cq_idr, uobj->id); |
213 | ib_destroy_cq(cq); | |
214 | list_del(&uobj->list); | |
70a30e16 RD |
215 | ib_uverbs_release_ucq(file, ev_file, ucq); |
216 | kfree(ucq); | |
bc38a6ab RD |
217 | } |
218 | ||
f520ba5a RD |
219 | list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) { |
220 | struct ib_srq *srq = idr_find(&ib_uverbs_srq_idr, uobj->id); | |
70a30e16 RD |
221 | struct ib_uevent_object *uevent = |
222 | container_of(uobj, struct ib_uevent_object, uobject); | |
f520ba5a RD |
223 | idr_remove(&ib_uverbs_srq_idr, uobj->id); |
224 | ib_destroy_srq(srq); | |
225 | list_del(&uobj->list); | |
70a30e16 RD |
226 | ib_uverbs_release_uevent(file, uevent); |
227 | kfree(uevent); | |
f520ba5a RD |
228 | } |
229 | ||
bc38a6ab RD |
230 | /* XXX Free MWs */ |
231 | ||
232 | list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) { | |
233 | struct ib_mr *mr = idr_find(&ib_uverbs_mr_idr, uobj->id); | |
e1bcfcaa | 234 | struct ib_device *mrdev = mr->device; |
bc38a6ab RD |
235 | struct ib_umem_object *memobj; |
236 | ||
237 | idr_remove(&ib_uverbs_mr_idr, uobj->id); | |
238 | ib_dereg_mr(mr); | |
239 | ||
240 | memobj = container_of(uobj, struct ib_umem_object, uobject); | |
e1bcfcaa | 241 | ib_umem_release_on_close(mrdev, &memobj->umem); |
bc38a6ab RD |
242 | |
243 | list_del(&uobj->list); | |
244 | kfree(memobj); | |
245 | } | |
246 | ||
247 | list_for_each_entry_safe(uobj, tmp, &context->pd_list, list) { | |
248 | struct ib_pd *pd = idr_find(&ib_uverbs_pd_idr, uobj->id); | |
249 | idr_remove(&ib_uverbs_pd_idr, uobj->id); | |
250 | ib_dealloc_pd(pd); | |
251 | list_del(&uobj->list); | |
252 | kfree(uobj); | |
253 | } | |
254 | ||
95ed644f | 255 | mutex_unlock(&ib_uverbs_idr_mutex); |
bc38a6ab RD |
256 | |
257 | return context->device->dealloc_ucontext(context); | |
258 | } | |
259 | ||
260 | static void ib_uverbs_release_file(struct kref *ref) | |
261 | { | |
262 | struct ib_uverbs_file *file = | |
263 | container_of(ref, struct ib_uverbs_file, ref); | |
264 | ||
265 | module_put(file->device->ib_dev->owner); | |
70a30e16 RD |
266 | kref_put(&file->device->ref, ib_uverbs_release_dev); |
267 | ||
bc38a6ab RD |
268 | kfree(file); |
269 | } | |
270 | ||
271 | static ssize_t ib_uverbs_event_read(struct file *filp, char __user *buf, | |
272 | size_t count, loff_t *pos) | |
273 | { | |
274 | struct ib_uverbs_event_file *file = filp->private_data; | |
63aaf647 | 275 | struct ib_uverbs_event *event; |
bc38a6ab RD |
276 | int eventsz; |
277 | int ret = 0; | |
278 | ||
279 | spin_lock_irq(&file->lock); | |
280 | ||
6b73597e | 281 | while (list_empty(&file->event_list)) { |
bc38a6ab RD |
282 | spin_unlock_irq(&file->lock); |
283 | ||
284 | if (filp->f_flags & O_NONBLOCK) | |
285 | return -EAGAIN; | |
286 | ||
287 | if (wait_event_interruptible(file->poll_wait, | |
6b73597e | 288 | !list_empty(&file->event_list))) |
bc38a6ab RD |
289 | return -ERESTARTSYS; |
290 | ||
291 | spin_lock_irq(&file->lock); | |
292 | } | |
293 | ||
63aaf647 RD |
294 | event = list_entry(file->event_list.next, struct ib_uverbs_event, list); |
295 | ||
296 | if (file->is_async) | |
bc38a6ab | 297 | eventsz = sizeof (struct ib_uverbs_async_event_desc); |
63aaf647 | 298 | else |
bc38a6ab | 299 | eventsz = sizeof (struct ib_uverbs_comp_event_desc); |
bc38a6ab RD |
300 | |
301 | if (eventsz > count) { | |
302 | ret = -EINVAL; | |
303 | event = NULL; | |
63aaf647 | 304 | } else { |
bc38a6ab | 305 | list_del(file->event_list.next); |
63aaf647 RD |
306 | if (event->counter) { |
307 | ++(*event->counter); | |
308 | list_del(&event->obj_list); | |
309 | } | |
310 | } | |
bc38a6ab RD |
311 | |
312 | spin_unlock_irq(&file->lock); | |
313 | ||
314 | if (event) { | |
315 | if (copy_to_user(buf, event, eventsz)) | |
316 | ret = -EFAULT; | |
317 | else | |
318 | ret = eventsz; | |
319 | } | |
320 | ||
321 | kfree(event); | |
322 | ||
323 | return ret; | |
324 | } | |
325 | ||
326 | static unsigned int ib_uverbs_event_poll(struct file *filp, | |
327 | struct poll_table_struct *wait) | |
328 | { | |
329 | unsigned int pollflags = 0; | |
330 | struct ib_uverbs_event_file *file = filp->private_data; | |
331 | ||
332 | poll_wait(filp, &file->poll_wait, wait); | |
333 | ||
334 | spin_lock_irq(&file->lock); | |
6b73597e | 335 | if (!list_empty(&file->event_list)) |
bc38a6ab RD |
336 | pollflags = POLLIN | POLLRDNORM; |
337 | spin_unlock_irq(&file->lock); | |
338 | ||
339 | return pollflags; | |
340 | } | |
341 | ||
6b73597e | 342 | void ib_uverbs_release_event_file(struct kref *ref) |
bc38a6ab | 343 | { |
6b73597e RD |
344 | struct ib_uverbs_event_file *file = |
345 | container_of(ref, struct ib_uverbs_event_file, ref); | |
bc38a6ab | 346 | |
6b73597e | 347 | kfree(file); |
bc38a6ab RD |
348 | } |
349 | ||
abdf119b GN |
350 | static int ib_uverbs_event_fasync(int fd, struct file *filp, int on) |
351 | { | |
352 | struct ib_uverbs_event_file *file = filp->private_data; | |
353 | ||
354 | return fasync_helper(fd, filp, on, &file->async_queue); | |
355 | } | |
356 | ||
bc38a6ab RD |
357 | static int ib_uverbs_event_close(struct inode *inode, struct file *filp) |
358 | { | |
359 | struct ib_uverbs_event_file *file = filp->private_data; | |
6b73597e RD |
360 | struct ib_uverbs_event *entry, *tmp; |
361 | ||
362 | spin_lock_irq(&file->lock); | |
363 | file->file = NULL; | |
364 | list_for_each_entry_safe(entry, tmp, &file->event_list, list) { | |
365 | if (entry->counter) | |
366 | list_del(&entry->obj_list); | |
367 | kfree(entry); | |
368 | } | |
369 | spin_unlock_irq(&file->lock); | |
bc38a6ab | 370 | |
abdf119b | 371 | ib_uverbs_event_fasync(-1, filp, 0); |
6b73597e RD |
372 | |
373 | if (file->is_async) { | |
374 | ib_unregister_event_handler(&file->uverbs_file->event_handler); | |
375 | kref_put(&file->uverbs_file->ref, ib_uverbs_release_file); | |
376 | } | |
377 | kref_put(&file->ref, ib_uverbs_release_event_file); | |
bc38a6ab RD |
378 | |
379 | return 0; | |
380 | } | |
381 | ||
382 | static struct file_operations uverbs_event_fops = { | |
6b73597e | 383 | .owner = THIS_MODULE, |
bc38a6ab RD |
384 | .read = ib_uverbs_event_read, |
385 | .poll = ib_uverbs_event_poll, | |
abdf119b GN |
386 | .release = ib_uverbs_event_close, |
387 | .fasync = ib_uverbs_event_fasync | |
bc38a6ab RD |
388 | }; |
389 | ||
390 | void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context) | |
391 | { | |
6b73597e RD |
392 | struct ib_uverbs_event_file *file = cq_context; |
393 | struct ib_ucq_object *uobj; | |
394 | struct ib_uverbs_event *entry; | |
395 | unsigned long flags; | |
396 | ||
397 | if (!file) | |
398 | return; | |
399 | ||
400 | spin_lock_irqsave(&file->lock, flags); | |
401 | if (!file->file) { | |
402 | spin_unlock_irqrestore(&file->lock, flags); | |
403 | return; | |
404 | } | |
bc38a6ab RD |
405 | |
406 | entry = kmalloc(sizeof *entry, GFP_ATOMIC); | |
305a7e87 RD |
407 | if (!entry) { |
408 | spin_unlock_irqrestore(&file->lock, flags); | |
bc38a6ab | 409 | return; |
305a7e87 | 410 | } |
bc38a6ab | 411 | |
63aaf647 RD |
412 | uobj = container_of(cq->uobject, struct ib_ucq_object, uobject); |
413 | ||
414 | entry->desc.comp.cq_handle = cq->uobject->user_handle; | |
415 | entry->counter = &uobj->comp_events_reported; | |
bc38a6ab | 416 | |
6b73597e | 417 | list_add_tail(&entry->list, &file->event_list); |
63aaf647 | 418 | list_add_tail(&entry->obj_list, &uobj->comp_list); |
6b73597e | 419 | spin_unlock_irqrestore(&file->lock, flags); |
bc38a6ab | 420 | |
6b73597e RD |
421 | wake_up_interruptible(&file->poll_wait); |
422 | kill_fasync(&file->async_queue, SIGIO, POLL_IN); | |
bc38a6ab RD |
423 | } |
424 | ||
425 | static void ib_uverbs_async_handler(struct ib_uverbs_file *file, | |
63aaf647 RD |
426 | __u64 element, __u64 event, |
427 | struct list_head *obj_list, | |
428 | u32 *counter) | |
bc38a6ab | 429 | { |
63aaf647 | 430 | struct ib_uverbs_event *entry; |
bc38a6ab RD |
431 | unsigned long flags; |
432 | ||
6b73597e RD |
433 | spin_lock_irqsave(&file->async_file->lock, flags); |
434 | if (!file->async_file->file) { | |
435 | spin_unlock_irqrestore(&file->async_file->lock, flags); | |
436 | return; | |
437 | } | |
438 | ||
bc38a6ab | 439 | entry = kmalloc(sizeof *entry, GFP_ATOMIC); |
305a7e87 RD |
440 | if (!entry) { |
441 | spin_unlock_irqrestore(&file->async_file->lock, flags); | |
bc38a6ab | 442 | return; |
305a7e87 | 443 | } |
bc38a6ab | 444 | |
63aaf647 RD |
445 | entry->desc.async.element = element; |
446 | entry->desc.async.event_type = event; | |
447 | entry->counter = counter; | |
bc38a6ab | 448 | |
6b73597e | 449 | list_add_tail(&entry->list, &file->async_file->event_list); |
63aaf647 RD |
450 | if (obj_list) |
451 | list_add_tail(&entry->obj_list, obj_list); | |
6b73597e | 452 | spin_unlock_irqrestore(&file->async_file->lock, flags); |
bc38a6ab | 453 | |
6b73597e RD |
454 | wake_up_interruptible(&file->async_file->poll_wait); |
455 | kill_fasync(&file->async_file->async_queue, SIGIO, POLL_IN); | |
bc38a6ab RD |
456 | } |
457 | ||
458 | void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr) | |
459 | { | |
7162a3e0 RD |
460 | struct ib_ucq_object *uobj = container_of(event->element.cq->uobject, |
461 | struct ib_ucq_object, uobject); | |
63aaf647 | 462 | |
7162a3e0 | 463 | ib_uverbs_async_handler(uobj->uverbs_file, uobj->uobject.user_handle, |
63aaf647 RD |
464 | event->event, &uobj->async_list, |
465 | &uobj->async_events_reported); | |
bc38a6ab RD |
466 | } |
467 | ||
468 | void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr) | |
469 | { | |
63aaf647 RD |
470 | struct ib_uevent_object *uobj; |
471 | ||
472 | uobj = container_of(event->element.qp->uobject, | |
473 | struct ib_uevent_object, uobject); | |
474 | ||
475 | ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle, | |
476 | event->event, &uobj->event_list, | |
477 | &uobj->events_reported); | |
bc38a6ab RD |
478 | } |
479 | ||
f520ba5a RD |
480 | void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr) |
481 | { | |
63aaf647 RD |
482 | struct ib_uevent_object *uobj; |
483 | ||
484 | uobj = container_of(event->element.srq->uobject, | |
485 | struct ib_uevent_object, uobject); | |
486 | ||
487 | ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle, | |
488 | event->event, &uobj->event_list, | |
489 | &uobj->events_reported); | |
f520ba5a RD |
490 | } |
491 | ||
6b73597e RD |
492 | void ib_uverbs_event_handler(struct ib_event_handler *handler, |
493 | struct ib_event *event) | |
bc38a6ab RD |
494 | { |
495 | struct ib_uverbs_file *file = | |
496 | container_of(handler, struct ib_uverbs_file, event_handler); | |
497 | ||
63aaf647 RD |
498 | ib_uverbs_async_handler(file, event->element.port_num, event->event, |
499 | NULL, NULL); | |
bc38a6ab RD |
500 | } |
501 | ||
6b73597e RD |
502 | struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file, |
503 | int is_async, int *fd) | |
bc38a6ab | 504 | { |
6b73597e | 505 | struct ib_uverbs_event_file *ev_file; |
bc38a6ab | 506 | struct file *filp; |
6b73597e | 507 | int ret; |
bc38a6ab | 508 | |
6b73597e RD |
509 | ev_file = kmalloc(sizeof *ev_file, GFP_KERNEL); |
510 | if (!ev_file) | |
511 | return ERR_PTR(-ENOMEM); | |
512 | ||
513 | kref_init(&ev_file->ref); | |
514 | spin_lock_init(&ev_file->lock); | |
515 | INIT_LIST_HEAD(&ev_file->event_list); | |
516 | init_waitqueue_head(&ev_file->poll_wait); | |
517 | ev_file->uverbs_file = uverbs_file; | |
518 | ev_file->async_queue = NULL; | |
519 | ev_file->is_async = is_async; | |
520 | ||
521 | *fd = get_unused_fd(); | |
522 | if (*fd < 0) { | |
523 | ret = *fd; | |
524 | goto err; | |
525 | } | |
bc38a6ab RD |
526 | |
527 | filp = get_empty_filp(); | |
528 | if (!filp) { | |
6b73597e RD |
529 | ret = -ENFILE; |
530 | goto err_fd; | |
bc38a6ab RD |
531 | } |
532 | ||
6b73597e RD |
533 | ev_file->file = filp; |
534 | ||
535 | /* | |
536 | * fops_get() can't fail here, because we're coming from a | |
537 | * system call on a uverbs file, which will already have a | |
538 | * module reference. | |
539 | */ | |
540 | filp->f_op = fops_get(&uverbs_event_fops); | |
bc38a6ab RD |
541 | filp->f_vfsmnt = mntget(uverbs_event_mnt); |
542 | filp->f_dentry = dget(uverbs_event_mnt->mnt_root); | |
543 | filp->f_mapping = filp->f_dentry->d_inode->i_mapping; | |
544 | filp->f_flags = O_RDONLY; | |
545 | filp->f_mode = FMODE_READ; | |
6b73597e | 546 | filp->private_data = ev_file; |
bc38a6ab | 547 | |
6b73597e | 548 | return filp; |
bc38a6ab | 549 | |
6b73597e RD |
550 | err_fd: |
551 | put_unused_fd(*fd); | |
552 | ||
553 | err: | |
554 | kfree(ev_file); | |
555 | return ERR_PTR(ret); | |
556 | } | |
557 | ||
558 | /* | |
559 | * Look up a completion event file by FD. If lookup is successful, | |
560 | * takes a ref to the event file struct that it returns; if | |
561 | * unsuccessful, returns NULL. | |
562 | */ | |
563 | struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd) | |
564 | { | |
565 | struct ib_uverbs_event_file *ev_file = NULL; | |
566 | struct file *filp; | |
567 | ||
568 | filp = fget(fd); | |
569 | if (!filp) | |
570 | return NULL; | |
571 | ||
572 | if (filp->f_op != &uverbs_event_fops) | |
573 | goto out; | |
574 | ||
575 | ev_file = filp->private_data; | |
576 | if (ev_file->is_async) { | |
577 | ev_file = NULL; | |
578 | goto out; | |
579 | } | |
580 | ||
581 | kref_get(&ev_file->ref); | |
582 | ||
583 | out: | |
584 | fput(filp); | |
585 | return ev_file; | |
bc38a6ab RD |
586 | } |
587 | ||
588 | static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, | |
589 | size_t count, loff_t *pos) | |
590 | { | |
591 | struct ib_uverbs_file *file = filp->private_data; | |
592 | struct ib_uverbs_cmd_hdr hdr; | |
593 | ||
594 | if (count < sizeof hdr) | |
595 | return -EINVAL; | |
596 | ||
597 | if (copy_from_user(&hdr, buf, sizeof hdr)) | |
598 | return -EFAULT; | |
599 | ||
600 | if (hdr.in_words * 4 != count) | |
601 | return -EINVAL; | |
602 | ||
63c47c28 RD |
603 | if (hdr.command < 0 || |
604 | hdr.command >= ARRAY_SIZE(uverbs_cmd_table) || | |
883a99c7 RD |
605 | !uverbs_cmd_table[hdr.command] || |
606 | !(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command))) | |
bc38a6ab RD |
607 | return -EINVAL; |
608 | ||
6b73597e | 609 | if (!file->ucontext && |
bc38a6ab RD |
610 | hdr.command != IB_USER_VERBS_CMD_GET_CONTEXT) |
611 | return -EINVAL; | |
612 | ||
613 | return uverbs_cmd_table[hdr.command](file, buf + sizeof hdr, | |
614 | hdr.in_words * 4, hdr.out_words * 4); | |
615 | } | |
616 | ||
617 | static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma) | |
618 | { | |
619 | struct ib_uverbs_file *file = filp->private_data; | |
620 | ||
621 | if (!file->ucontext) | |
622 | return -ENODEV; | |
623 | else | |
624 | return file->device->ib_dev->mmap(file->ucontext, vma); | |
625 | } | |
626 | ||
627 | static int ib_uverbs_open(struct inode *inode, struct file *filp) | |
628 | { | |
70a30e16 | 629 | struct ib_uverbs_device *dev; |
bc38a6ab | 630 | struct ib_uverbs_file *file; |
70a30e16 | 631 | int ret; |
bc38a6ab | 632 | |
70a30e16 RD |
633 | spin_lock(&map_lock); |
634 | dev = dev_table[iminor(inode) - IB_UVERBS_BASE_MINOR]; | |
635 | if (dev) | |
636 | kref_get(&dev->ref); | |
637 | spin_unlock(&map_lock); | |
638 | ||
639 | if (!dev) | |
640 | return -ENXIO; | |
641 | ||
642 | if (!try_module_get(dev->ib_dev->owner)) { | |
643 | ret = -ENODEV; | |
644 | goto err; | |
645 | } | |
bc38a6ab | 646 | |
6b73597e | 647 | file = kmalloc(sizeof *file, GFP_KERNEL); |
63c47c28 | 648 | if (!file) { |
70a30e16 RD |
649 | ret = -ENOMEM; |
650 | goto err_module; | |
63c47c28 | 651 | } |
bc38a6ab | 652 | |
70a30e16 RD |
653 | file->device = dev; |
654 | file->ucontext = NULL; | |
655 | file->async_file = NULL; | |
bc38a6ab | 656 | kref_init(&file->ref); |
95ed644f | 657 | mutex_init(&file->mutex); |
bc38a6ab | 658 | |
bc38a6ab RD |
659 | filp->private_data = file; |
660 | ||
bc38a6ab | 661 | return 0; |
70a30e16 RD |
662 | |
663 | err_module: | |
664 | module_put(dev->ib_dev->owner); | |
665 | ||
666 | err: | |
667 | kref_put(&dev->ref, ib_uverbs_release_dev); | |
668 | ||
669 | return ret; | |
bc38a6ab RD |
670 | } |
671 | ||
672 | static int ib_uverbs_close(struct inode *inode, struct file *filp) | |
673 | { | |
674 | struct ib_uverbs_file *file = filp->private_data; | |
bc38a6ab | 675 | |
70a30e16 RD |
676 | ib_uverbs_cleanup_ucontext(file, file->ucontext); |
677 | ||
678 | if (file->async_file) | |
679 | kref_put(&file->async_file->ref, ib_uverbs_release_event_file); | |
bc38a6ab | 680 | |
bc38a6ab RD |
681 | kref_put(&file->ref, ib_uverbs_release_file); |
682 | ||
683 | return 0; | |
684 | } | |
685 | ||
686 | static struct file_operations uverbs_fops = { | |
687 | .owner = THIS_MODULE, | |
688 | .write = ib_uverbs_write, | |
689 | .open = ib_uverbs_open, | |
690 | .release = ib_uverbs_close | |
691 | }; | |
692 | ||
693 | static struct file_operations uverbs_mmap_fops = { | |
694 | .owner = THIS_MODULE, | |
695 | .write = ib_uverbs_write, | |
696 | .mmap = ib_uverbs_mmap, | |
697 | .open = ib_uverbs_open, | |
698 | .release = ib_uverbs_close | |
699 | }; | |
700 | ||
701 | static struct ib_client uverbs_client = { | |
702 | .name = "uverbs", | |
703 | .add = ib_uverbs_add_one, | |
704 | .remove = ib_uverbs_remove_one | |
705 | }; | |
706 | ||
707 | static ssize_t show_ibdev(struct class_device *class_dev, char *buf) | |
708 | { | |
70a30e16 RD |
709 | struct ib_uverbs_device *dev = class_get_devdata(class_dev); |
710 | ||
711 | if (!dev) | |
712 | return -ENODEV; | |
bc38a6ab RD |
713 | |
714 | return sprintf(buf, "%s\n", dev->ib_dev->name); | |
715 | } | |
716 | static CLASS_DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); | |
717 | ||
274c0891 RD |
718 | static ssize_t show_dev_abi_version(struct class_device *class_dev, char *buf) |
719 | { | |
70a30e16 RD |
720 | struct ib_uverbs_device *dev = class_get_devdata(class_dev); |
721 | ||
722 | if (!dev) | |
723 | return -ENODEV; | |
274c0891 RD |
724 | |
725 | return sprintf(buf, "%d\n", dev->ib_dev->uverbs_abi_ver); | |
726 | } | |
727 | static CLASS_DEVICE_ATTR(abi_version, S_IRUGO, show_dev_abi_version, NULL); | |
728 | ||
bc38a6ab RD |
729 | static ssize_t show_abi_version(struct class *class, char *buf) |
730 | { | |
731 | return sprintf(buf, "%d\n", IB_USER_VERBS_ABI_VERSION); | |
732 | } | |
733 | static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL); | |
734 | ||
735 | static void ib_uverbs_add_one(struct ib_device *device) | |
736 | { | |
737 | struct ib_uverbs_device *uverbs_dev; | |
738 | ||
739 | if (!device->alloc_ucontext) | |
740 | return; | |
741 | ||
de6eb66b | 742 | uverbs_dev = kzalloc(sizeof *uverbs_dev, GFP_KERNEL); |
bc38a6ab RD |
743 | if (!uverbs_dev) |
744 | return; | |
745 | ||
70a30e16 RD |
746 | kref_init(&uverbs_dev->ref); |
747 | ||
bc38a6ab RD |
748 | spin_lock(&map_lock); |
749 | uverbs_dev->devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES); | |
750 | if (uverbs_dev->devnum >= IB_UVERBS_MAX_DEVICES) { | |
751 | spin_unlock(&map_lock); | |
752 | goto err; | |
753 | } | |
754 | set_bit(uverbs_dev->devnum, dev_map); | |
755 | spin_unlock(&map_lock); | |
756 | ||
6b73597e RD |
757 | uverbs_dev->ib_dev = device; |
758 | uverbs_dev->num_comp_vectors = 1; | |
bc38a6ab | 759 | |
70a30e16 RD |
760 | uverbs_dev->dev = cdev_alloc(); |
761 | if (!uverbs_dev->dev) | |
bc38a6ab | 762 | goto err; |
70a30e16 RD |
763 | uverbs_dev->dev->owner = THIS_MODULE; |
764 | uverbs_dev->dev->ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops; | |
765 | kobject_set_name(&uverbs_dev->dev->kobj, "uverbs%d", uverbs_dev->devnum); | |
766 | if (cdev_add(uverbs_dev->dev, IB_UVERBS_BASE_DEV + uverbs_dev->devnum, 1)) | |
767 | goto err_cdev; | |
bc38a6ab | 768 | |
4cce3390 RD |
769 | uverbs_dev->class_dev = class_device_create(uverbs_class, NULL, |
770 | uverbs_dev->dev->dev, | |
70a30e16 RD |
771 | device->dma_device, |
772 | "uverbs%d", uverbs_dev->devnum); | |
773 | if (IS_ERR(uverbs_dev->class_dev)) | |
bc38a6ab RD |
774 | goto err_cdev; |
775 | ||
70a30e16 RD |
776 | class_set_devdata(uverbs_dev->class_dev, uverbs_dev); |
777 | ||
778 | if (class_device_create_file(uverbs_dev->class_dev, &class_device_attr_ibdev)) | |
bc38a6ab | 779 | goto err_class; |
70a30e16 | 780 | if (class_device_create_file(uverbs_dev->class_dev, &class_device_attr_abi_version)) |
274c0891 | 781 | goto err_class; |
bc38a6ab | 782 | |
70a30e16 RD |
783 | spin_lock(&map_lock); |
784 | dev_table[uverbs_dev->devnum] = uverbs_dev; | |
785 | spin_unlock(&map_lock); | |
786 | ||
bc38a6ab RD |
787 | ib_set_client_data(device, &uverbs_client, uverbs_dev); |
788 | ||
789 | return; | |
790 | ||
791 | err_class: | |
70a30e16 | 792 | class_device_destroy(uverbs_class, uverbs_dev->dev->dev); |
bc38a6ab RD |
793 | |
794 | err_cdev: | |
70a30e16 | 795 | cdev_del(uverbs_dev->dev); |
bc38a6ab RD |
796 | clear_bit(uverbs_dev->devnum, dev_map); |
797 | ||
798 | err: | |
70a30e16 | 799 | kref_put(&uverbs_dev->ref, ib_uverbs_release_dev); |
bc38a6ab RD |
800 | return; |
801 | } | |
802 | ||
803 | static void ib_uverbs_remove_one(struct ib_device *device) | |
804 | { | |
805 | struct ib_uverbs_device *uverbs_dev = ib_get_client_data(device, &uverbs_client); | |
806 | ||
807 | if (!uverbs_dev) | |
808 | return; | |
809 | ||
70a30e16 RD |
810 | class_set_devdata(uverbs_dev->class_dev, NULL); |
811 | class_device_destroy(uverbs_class, uverbs_dev->dev->dev); | |
812 | cdev_del(uverbs_dev->dev); | |
813 | ||
814 | spin_lock(&map_lock); | |
815 | dev_table[uverbs_dev->devnum] = NULL; | |
816 | spin_unlock(&map_lock); | |
817 | ||
818 | clear_bit(uverbs_dev->devnum, dev_map); | |
819 | kref_put(&uverbs_dev->ref, ib_uverbs_release_dev); | |
bc38a6ab RD |
820 | } |
821 | ||
822 | static struct super_block *uverbs_event_get_sb(struct file_system_type *fs_type, int flags, | |
823 | const char *dev_name, void *data) | |
824 | { | |
825 | return get_sb_pseudo(fs_type, "infinibandevent:", NULL, | |
826 | INFINIBANDEVENTFS_MAGIC); | |
827 | } | |
828 | ||
829 | static struct file_system_type uverbs_event_fs = { | |
830 | /* No owner field so module can be unloaded */ | |
831 | .name = "infinibandeventfs", | |
832 | .get_sb = uverbs_event_get_sb, | |
833 | .kill_sb = kill_litter_super | |
834 | }; | |
835 | ||
836 | static int __init ib_uverbs_init(void) | |
837 | { | |
838 | int ret; | |
839 | ||
840 | spin_lock_init(&map_lock); | |
841 | ||
842 | ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES, | |
843 | "infiniband_verbs"); | |
844 | if (ret) { | |
845 | printk(KERN_ERR "user_verbs: couldn't register device number\n"); | |
846 | goto out; | |
847 | } | |
848 | ||
70a30e16 RD |
849 | uverbs_class = class_create(THIS_MODULE, "infiniband_verbs"); |
850 | if (IS_ERR(uverbs_class)) { | |
851 | ret = PTR_ERR(uverbs_class); | |
bc38a6ab RD |
852 | printk(KERN_ERR "user_verbs: couldn't create class infiniband_verbs\n"); |
853 | goto out_chrdev; | |
854 | } | |
855 | ||
70a30e16 | 856 | ret = class_create_file(uverbs_class, &class_attr_abi_version); |
bc38a6ab RD |
857 | if (ret) { |
858 | printk(KERN_ERR "user_verbs: couldn't create abi_version attribute\n"); | |
859 | goto out_class; | |
860 | } | |
861 | ||
862 | ret = register_filesystem(&uverbs_event_fs); | |
863 | if (ret) { | |
864 | printk(KERN_ERR "user_verbs: couldn't register infinibandeventfs\n"); | |
865 | goto out_class; | |
866 | } | |
867 | ||
868 | uverbs_event_mnt = kern_mount(&uverbs_event_fs); | |
869 | if (IS_ERR(uverbs_event_mnt)) { | |
870 | ret = PTR_ERR(uverbs_event_mnt); | |
871 | printk(KERN_ERR "user_verbs: couldn't mount infinibandeventfs\n"); | |
872 | goto out_fs; | |
873 | } | |
874 | ||
875 | ret = ib_register_client(&uverbs_client); | |
876 | if (ret) { | |
877 | printk(KERN_ERR "user_verbs: couldn't register client\n"); | |
878 | goto out_mnt; | |
879 | } | |
880 | ||
881 | return 0; | |
882 | ||
883 | out_mnt: | |
884 | mntput(uverbs_event_mnt); | |
885 | ||
886 | out_fs: | |
887 | unregister_filesystem(&uverbs_event_fs); | |
888 | ||
889 | out_class: | |
70a30e16 | 890 | class_destroy(uverbs_class); |
bc38a6ab RD |
891 | |
892 | out_chrdev: | |
893 | unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES); | |
894 | ||
895 | out: | |
896 | return ret; | |
897 | } | |
898 | ||
899 | static void __exit ib_uverbs_cleanup(void) | |
900 | { | |
901 | ib_unregister_client(&uverbs_client); | |
902 | mntput(uverbs_event_mnt); | |
903 | unregister_filesystem(&uverbs_event_fs); | |
70a30e16 | 904 | class_destroy(uverbs_class); |
bc38a6ab | 905 | unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES); |
cc76e33e | 906 | flush_scheduled_work(); |
5d7edb3c RD |
907 | idr_destroy(&ib_uverbs_pd_idr); |
908 | idr_destroy(&ib_uverbs_mr_idr); | |
909 | idr_destroy(&ib_uverbs_mw_idr); | |
910 | idr_destroy(&ib_uverbs_ah_idr); | |
911 | idr_destroy(&ib_uverbs_cq_idr); | |
912 | idr_destroy(&ib_uverbs_qp_idr); | |
913 | idr_destroy(&ib_uverbs_srq_idr); | |
bc38a6ab RD |
914 | } |
915 | ||
916 | module_init(ib_uverbs_init); | |
917 | module_exit(ib_uverbs_cleanup); |