Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * An async IO implementation for Linux | |
3 | * Written by Benjamin LaHaise <bcrl@kvack.org> | |
4 | * | |
5 | * Implements an efficient asynchronous io interface. | |
6 | * | |
7 | * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved. | |
8 | * | |
9 | * See ../COPYING for licensing terms. | |
10 | */ | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/init.h> | |
13 | #include <linux/errno.h> | |
14 | #include <linux/time.h> | |
15 | #include <linux/aio_abi.h> | |
630d9c47 | 16 | #include <linux/export.h> |
1da177e4 | 17 | #include <linux/syscalls.h> |
b9d128f1 | 18 | #include <linux/backing-dev.h> |
027445c3 | 19 | #include <linux/uio.h> |
1da177e4 LT |
20 | |
21 | #define DEBUG 0 | |
22 | ||
23 | #include <linux/sched.h> | |
24 | #include <linux/fs.h> | |
25 | #include <linux/file.h> | |
26 | #include <linux/mm.h> | |
27 | #include <linux/mman.h> | |
3d2d827f | 28 | #include <linux/mmu_context.h> |
1da177e4 LT |
29 | #include <linux/slab.h> |
30 | #include <linux/timer.h> | |
31 | #include <linux/aio.h> | |
32 | #include <linux/highmem.h> | |
33 | #include <linux/workqueue.h> | |
34 | #include <linux/security.h> | |
9c3060be | 35 | #include <linux/eventfd.h> |
cfb1e33e | 36 | #include <linux/blkdev.h> |
9d85cba7 | 37 | #include <linux/compat.h> |
1da177e4 LT |
38 | |
39 | #include <asm/kmap_types.h> | |
40 | #include <asm/uaccess.h> | |
1da177e4 LT |
41 | |
42 | #if DEBUG > 1 | |
43 | #define dprintk printk | |
44 | #else | |
45 | #define dprintk(x...) do { ; } while (0) | |
46 | #endif | |
47 | ||
4e179bca KO |
48 | #define AIO_RING_MAGIC 0xa10a10a1 |
49 | #define AIO_RING_COMPAT_FEATURES 1 | |
50 | #define AIO_RING_INCOMPAT_FEATURES 0 | |
51 | struct aio_ring { | |
52 | unsigned id; /* kernel internal index number */ | |
53 | unsigned nr; /* number of io_events */ | |
54 | unsigned head; | |
55 | unsigned tail; | |
56 | ||
57 | unsigned magic; | |
58 | unsigned compat_features; | |
59 | unsigned incompat_features; | |
60 | unsigned header_length; /* size of aio_ring */ | |
61 | ||
62 | ||
63 | struct io_event io_events[0]; | |
64 | }; /* 128 bytes + ring size */ | |
65 | ||
66 | #define AIO_RING_PAGES 8 | |
67 | struct aio_ring_info { | |
68 | unsigned long mmap_base; | |
69 | unsigned long mmap_size; | |
70 | ||
71 | struct page **ring_pages; | |
72 | spinlock_t ring_lock; | |
73 | long nr_pages; | |
74 | ||
75 | unsigned nr, tail; | |
76 | ||
77 | struct page *internal_pages[AIO_RING_PAGES]; | |
78 | }; | |
79 | ||
80 | static inline unsigned aio_ring_avail(struct aio_ring_info *info, | |
81 | struct aio_ring *ring) | |
82 | { | |
83 | return (ring->head + info->nr - 1 - ring->tail) % info->nr; | |
84 | } | |
85 | ||
86 | struct kioctx { | |
87 | atomic_t users; | |
88 | int dead; | |
89 | ||
90 | /* This needs improving */ | |
91 | unsigned long user_id; | |
92 | struct hlist_node list; | |
93 | ||
94 | wait_queue_head_t wait; | |
95 | ||
96 | spinlock_t ctx_lock; | |
97 | ||
98 | int reqs_active; | |
99 | struct list_head active_reqs; /* used for cancellation */ | |
100 | ||
101 | /* sys_io_setup currently limits this to an unsigned int */ | |
102 | unsigned max_reqs; | |
103 | ||
104 | struct aio_ring_info ring_info; | |
105 | ||
106 | struct rcu_head rcu_head; | |
107 | }; | |
108 | ||
1da177e4 | 109 | /*------ sysctl variables----*/ |
d55b5fda ZB |
110 | static DEFINE_SPINLOCK(aio_nr_lock); |
111 | unsigned long aio_nr; /* current system wide number of aio requests */ | |
112 | unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ | |
1da177e4 LT |
113 | /*----end sysctl variables---*/ |
114 | ||
e18b890b CL |
115 | static struct kmem_cache *kiocb_cachep; |
116 | static struct kmem_cache *kioctx_cachep; | |
1da177e4 | 117 | |
1da177e4 LT |
118 | /* aio_setup |
119 | * Creates the slab caches used by the aio routines, panic on | |
120 | * failure as this is done early during the boot sequence. | |
121 | */ | |
122 | static int __init aio_setup(void) | |
123 | { | |
0a31bd5f CL |
124 | kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); |
125 | kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); | |
1da177e4 | 126 | |
1da177e4 LT |
127 | pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page)); |
128 | ||
129 | return 0; | |
130 | } | |
385773e0 | 131 | __initcall(aio_setup); |
1da177e4 LT |
132 | |
133 | static void aio_free_ring(struct kioctx *ctx) | |
134 | { | |
135 | struct aio_ring_info *info = &ctx->ring_info; | |
136 | long i; | |
137 | ||
138 | for (i=0; i<info->nr_pages; i++) | |
139 | put_page(info->ring_pages[i]); | |
140 | ||
936af157 | 141 | if (info->mmap_size) { |
bfce281c | 142 | vm_munmap(info->mmap_base, info->mmap_size); |
936af157 | 143 | } |
1da177e4 LT |
144 | |
145 | if (info->ring_pages && info->ring_pages != info->internal_pages) | |
146 | kfree(info->ring_pages); | |
147 | info->ring_pages = NULL; | |
148 | info->nr = 0; | |
149 | } | |
150 | ||
151 | static int aio_setup_ring(struct kioctx *ctx) | |
152 | { | |
153 | struct aio_ring *ring; | |
154 | struct aio_ring_info *info = &ctx->ring_info; | |
155 | unsigned nr_events = ctx->max_reqs; | |
41003a7b | 156 | struct mm_struct *mm = current->mm; |
41badc15 | 157 | unsigned long size, populate; |
1da177e4 LT |
158 | int nr_pages; |
159 | ||
160 | /* Compensate for the ring buffer's head/tail overlap entry */ | |
161 | nr_events += 2; /* 1 is required, 2 for good luck */ | |
162 | ||
163 | size = sizeof(struct aio_ring); | |
164 | size += sizeof(struct io_event) * nr_events; | |
165 | nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT; | |
166 | ||
167 | if (nr_pages < 0) | |
168 | return -EINVAL; | |
169 | ||
170 | nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event); | |
171 | ||
172 | info->nr = 0; | |
173 | info->ring_pages = info->internal_pages; | |
174 | if (nr_pages > AIO_RING_PAGES) { | |
11b0b5ab | 175 | info->ring_pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); |
1da177e4 LT |
176 | if (!info->ring_pages) |
177 | return -ENOMEM; | |
1da177e4 LT |
178 | } |
179 | ||
180 | info->mmap_size = nr_pages * PAGE_SIZE; | |
181 | dprintk("attempting mmap of %lu bytes\n", info->mmap_size); | |
41003a7b | 182 | down_write(&mm->mmap_sem); |
e3fc629d AV |
183 | info->mmap_base = do_mmap_pgoff(NULL, 0, info->mmap_size, |
184 | PROT_READ|PROT_WRITE, | |
bebeb3d6 ML |
185 | MAP_ANONYMOUS|MAP_PRIVATE, 0, |
186 | &populate); | |
1da177e4 | 187 | if (IS_ERR((void *)info->mmap_base)) { |
41003a7b | 188 | up_write(&mm->mmap_sem); |
1da177e4 LT |
189 | info->mmap_size = 0; |
190 | aio_free_ring(ctx); | |
191 | return -EAGAIN; | |
192 | } | |
193 | ||
194 | dprintk("mmap address: 0x%08lx\n", info->mmap_base); | |
41003a7b | 195 | info->nr_pages = get_user_pages(current, mm, info->mmap_base, nr_pages, |
1da177e4 | 196 | 1, 0, info->ring_pages, NULL); |
41003a7b | 197 | up_write(&mm->mmap_sem); |
1da177e4 LT |
198 | |
199 | if (unlikely(info->nr_pages != nr_pages)) { | |
200 | aio_free_ring(ctx); | |
201 | return -EAGAIN; | |
202 | } | |
bebeb3d6 | 203 | if (populate) |
41badc15 | 204 | mm_populate(info->mmap_base, populate); |
1da177e4 LT |
205 | |
206 | ctx->user_id = info->mmap_base; | |
207 | ||
208 | info->nr = nr_events; /* trusted copy */ | |
209 | ||
e8e3c3d6 | 210 | ring = kmap_atomic(info->ring_pages[0]); |
1da177e4 LT |
211 | ring->nr = nr_events; /* user copy */ |
212 | ring->id = ctx->user_id; | |
213 | ring->head = ring->tail = 0; | |
214 | ring->magic = AIO_RING_MAGIC; | |
215 | ring->compat_features = AIO_RING_COMPAT_FEATURES; | |
216 | ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; | |
217 | ring->header_length = sizeof(struct aio_ring); | |
e8e3c3d6 | 218 | kunmap_atomic(ring); |
1da177e4 LT |
219 | |
220 | return 0; | |
221 | } | |
222 | ||
223 | ||
224 | /* aio_ring_event: returns a pointer to the event at the given index from | |
e8e3c3d6 | 225 | * kmap_atomic(). Release the pointer with put_aio_ring_event(); |
1da177e4 LT |
226 | */ |
227 | #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event)) | |
228 | #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) | |
229 | #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) | |
230 | ||
e8e3c3d6 | 231 | #define aio_ring_event(info, nr) ({ \ |
1da177e4 LT |
232 | unsigned pos = (nr) + AIO_EVENTS_OFFSET; \ |
233 | struct io_event *__event; \ | |
234 | __event = kmap_atomic( \ | |
e8e3c3d6 | 235 | (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE]); \ |
1da177e4 LT |
236 | __event += pos % AIO_EVENTS_PER_PAGE; \ |
237 | __event; \ | |
238 | }) | |
239 | ||
e8e3c3d6 | 240 | #define put_aio_ring_event(event) do { \ |
1da177e4 LT |
241 | struct io_event *__event = (event); \ |
242 | (void)__event; \ | |
e8e3c3d6 | 243 | kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK)); \ |
1da177e4 LT |
244 | } while(0) |
245 | ||
abf137dd JA |
246 | static void ctx_rcu_free(struct rcu_head *head) |
247 | { | |
248 | struct kioctx *ctx = container_of(head, struct kioctx, rcu_head); | |
abf137dd | 249 | kmem_cache_free(kioctx_cachep, ctx); |
abf137dd | 250 | } |
d5470b59 AB |
251 | |
252 | /* __put_ioctx | |
253 | * Called when the last user of an aio context has gone away, | |
254 | * and the struct needs to be freed. | |
255 | */ | |
256 | static void __put_ioctx(struct kioctx *ctx) | |
257 | { | |
2dd542b7 | 258 | unsigned nr_events = ctx->max_reqs; |
d5470b59 AB |
259 | BUG_ON(ctx->reqs_active); |
260 | ||
d5470b59 | 261 | aio_free_ring(ctx); |
2dd542b7 AV |
262 | if (nr_events) { |
263 | spin_lock(&aio_nr_lock); | |
264 | BUG_ON(aio_nr - nr_events > aio_nr); | |
265 | aio_nr -= nr_events; | |
266 | spin_unlock(&aio_nr_lock); | |
267 | } | |
d5470b59 | 268 | pr_debug("__put_ioctx: freeing %p\n", ctx); |
abf137dd | 269 | call_rcu(&ctx->rcu_head, ctx_rcu_free); |
d5470b59 AB |
270 | } |
271 | ||
3bd9a5d7 NP |
272 | static inline int try_get_ioctx(struct kioctx *kioctx) |
273 | { | |
274 | return atomic_inc_not_zero(&kioctx->users); | |
275 | } | |
276 | ||
277 | static inline void put_ioctx(struct kioctx *kioctx) | |
278 | { | |
279 | BUG_ON(atomic_read(&kioctx->users) <= 0); | |
280 | if (unlikely(atomic_dec_and_test(&kioctx->users))) | |
281 | __put_ioctx(kioctx); | |
282 | } | |
d5470b59 | 283 | |
906b973c KO |
284 | static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb, |
285 | struct io_event *res) | |
286 | { | |
287 | int (*cancel)(struct kiocb *, struct io_event *); | |
288 | int ret = -EINVAL; | |
289 | ||
290 | cancel = kiocb->ki_cancel; | |
291 | kiocbSetCancelled(kiocb); | |
292 | if (cancel) { | |
293 | kiocb->ki_users++; | |
294 | spin_unlock_irq(&ctx->ctx_lock); | |
295 | ||
296 | memset(res, 0, sizeof(*res)); | |
297 | res->obj = (u64)(unsigned long)kiocb->ki_obj.user; | |
298 | res->data = kiocb->ki_user_data; | |
299 | ret = cancel(kiocb, res); | |
300 | ||
301 | spin_lock_irq(&ctx->ctx_lock); | |
302 | } | |
303 | ||
304 | return ret; | |
305 | } | |
306 | ||
1da177e4 LT |
307 | /* ioctx_alloc |
308 | * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. | |
309 | */ | |
310 | static struct kioctx *ioctx_alloc(unsigned nr_events) | |
311 | { | |
41003a7b | 312 | struct mm_struct *mm = current->mm; |
1da177e4 | 313 | struct kioctx *ctx; |
e23754f8 | 314 | int err = -ENOMEM; |
1da177e4 LT |
315 | |
316 | /* Prevent overflows */ | |
317 | if ((nr_events > (0x10000000U / sizeof(struct io_event))) || | |
318 | (nr_events > (0x10000000U / sizeof(struct kiocb)))) { | |
319 | pr_debug("ENOMEM: nr_events too high\n"); | |
320 | return ERR_PTR(-EINVAL); | |
321 | } | |
322 | ||
2dd542b7 | 323 | if (!nr_events || (unsigned long)nr_events > aio_max_nr) |
1da177e4 LT |
324 | return ERR_PTR(-EAGAIN); |
325 | ||
c3762229 | 326 | ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); |
1da177e4 LT |
327 | if (!ctx) |
328 | return ERR_PTR(-ENOMEM); | |
329 | ||
1da177e4 | 330 | ctx->max_reqs = nr_events; |
1da177e4 | 331 | |
86b62a2c | 332 | atomic_set(&ctx->users, 2); |
1da177e4 LT |
333 | spin_lock_init(&ctx->ctx_lock); |
334 | spin_lock_init(&ctx->ring_info.ring_lock); | |
335 | init_waitqueue_head(&ctx->wait); | |
336 | ||
337 | INIT_LIST_HEAD(&ctx->active_reqs); | |
1da177e4 LT |
338 | |
339 | if (aio_setup_ring(ctx) < 0) | |
340 | goto out_freectx; | |
341 | ||
342 | /* limit the number of system wide aios */ | |
9fa1cb39 | 343 | spin_lock(&aio_nr_lock); |
2dd542b7 AV |
344 | if (aio_nr + nr_events > aio_max_nr || |
345 | aio_nr + nr_events < aio_nr) { | |
9fa1cb39 | 346 | spin_unlock(&aio_nr_lock); |
1da177e4 | 347 | goto out_cleanup; |
2dd542b7 AV |
348 | } |
349 | aio_nr += ctx->max_reqs; | |
9fa1cb39 | 350 | spin_unlock(&aio_nr_lock); |
1da177e4 | 351 | |
39fa0031 | 352 | /* now link into global list. */ |
abf137dd JA |
353 | spin_lock(&mm->ioctx_lock); |
354 | hlist_add_head_rcu(&ctx->list, &mm->ioctx_list); | |
355 | spin_unlock(&mm->ioctx_lock); | |
1da177e4 LT |
356 | |
357 | dprintk("aio: allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", | |
41003a7b | 358 | ctx, ctx->user_id, mm, ctx->ring_info.nr); |
1da177e4 LT |
359 | return ctx; |
360 | ||
361 | out_cleanup: | |
e23754f8 AV |
362 | err = -EAGAIN; |
363 | aio_free_ring(ctx); | |
1da177e4 | 364 | out_freectx: |
1da177e4 | 365 | kmem_cache_free(kioctx_cachep, ctx); |
e23754f8 AV |
366 | dprintk("aio: error allocating ioctx %d\n", err); |
367 | return ERR_PTR(err); | |
1da177e4 LT |
368 | } |
369 | ||
06af121e | 370 | /* kill_ctx |
1da177e4 LT |
371 | * Cancels all outstanding aio requests on an aio context. Used |
372 | * when the processes owning a context have all exited to encourage | |
373 | * the rapid destruction of the kioctx. | |
374 | */ | |
06af121e | 375 | static void kill_ctx(struct kioctx *ctx) |
1da177e4 | 376 | { |
06af121e AV |
377 | struct task_struct *tsk = current; |
378 | DECLARE_WAITQUEUE(wait, tsk); | |
1da177e4 | 379 | struct io_event res; |
906b973c | 380 | struct kiocb *req; |
06af121e | 381 | |
1da177e4 LT |
382 | spin_lock_irq(&ctx->ctx_lock); |
383 | ctx->dead = 1; | |
384 | while (!list_empty(&ctx->active_reqs)) { | |
906b973c KO |
385 | req = list_first_entry(&ctx->active_reqs, |
386 | struct kiocb, ki_list); | |
387 | ||
388 | list_del_init(&req->ki_list); | |
389 | kiocb_cancel(ctx, req, &res); | |
1da177e4 | 390 | } |
1da177e4 | 391 | |
1da177e4 | 392 | if (!ctx->reqs_active) |
dee11c23 | 393 | goto out; |
1da177e4 LT |
394 | |
395 | add_wait_queue(&ctx->wait, &wait); | |
396 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | |
397 | while (ctx->reqs_active) { | |
dee11c23 | 398 | spin_unlock_irq(&ctx->ctx_lock); |
41d10da3 | 399 | io_schedule(); |
1da177e4 | 400 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); |
dee11c23 | 401 | spin_lock_irq(&ctx->ctx_lock); |
1da177e4 LT |
402 | } |
403 | __set_task_state(tsk, TASK_RUNNING); | |
404 | remove_wait_queue(&ctx->wait, &wait); | |
dee11c23 KC |
405 | |
406 | out: | |
407 | spin_unlock_irq(&ctx->ctx_lock); | |
1da177e4 LT |
408 | } |
409 | ||
410 | /* wait_on_sync_kiocb: | |
411 | * Waits on the given sync kiocb to complete. | |
412 | */ | |
fc9b52cd | 413 | ssize_t wait_on_sync_kiocb(struct kiocb *iocb) |
1da177e4 LT |
414 | { |
415 | while (iocb->ki_users) { | |
416 | set_current_state(TASK_UNINTERRUPTIBLE); | |
417 | if (!iocb->ki_users) | |
418 | break; | |
41d10da3 | 419 | io_schedule(); |
1da177e4 LT |
420 | } |
421 | __set_current_state(TASK_RUNNING); | |
422 | return iocb->ki_user_data; | |
423 | } | |
385773e0 | 424 | EXPORT_SYMBOL(wait_on_sync_kiocb); |
1da177e4 LT |
425 | |
426 | /* exit_aio: called when the last user of mm goes away. At this point, | |
427 | * there is no way for any new requests to be submited or any of the | |
428 | * io_* syscalls to be called on the context. However, there may be | |
429 | * outstanding requests which hold references to the context; as they | |
430 | * go away, they will call put_ioctx and release any pinned memory | |
431 | * associated with the request (held via struct page * references). | |
432 | */ | |
fc9b52cd | 433 | void exit_aio(struct mm_struct *mm) |
1da177e4 | 434 | { |
abf137dd JA |
435 | struct kioctx *ctx; |
436 | ||
437 | while (!hlist_empty(&mm->ioctx_list)) { | |
438 | ctx = hlist_entry(mm->ioctx_list.first, struct kioctx, list); | |
439 | hlist_del_rcu(&ctx->list); | |
440 | ||
06af121e | 441 | kill_ctx(ctx); |
1da177e4 LT |
442 | |
443 | if (1 != atomic_read(&ctx->users)) | |
444 | printk(KERN_DEBUG | |
445 | "exit_aio:ioctx still alive: %d %d %d\n", | |
446 | atomic_read(&ctx->users), ctx->dead, | |
447 | ctx->reqs_active); | |
936af157 AV |
448 | /* |
449 | * We don't need to bother with munmap() here - | |
450 | * exit_mmap(mm) is coming and it'll unmap everything. | |
451 | * Since aio_free_ring() uses non-zero ->mmap_size | |
452 | * as indicator that it needs to unmap the area, | |
453 | * just set it to 0; aio_free_ring() is the only | |
454 | * place that uses ->mmap_size, so it's safe. | |
936af157 AV |
455 | */ |
456 | ctx->ring_info.mmap_size = 0; | |
1da177e4 | 457 | put_ioctx(ctx); |
1da177e4 LT |
458 | } |
459 | } | |
460 | ||
1da177e4 LT |
461 | /* aio_get_req |
462 | * Allocate a slot for an aio request. Increments the users count | |
463 | * of the kioctx so that the kioctx stays around until all requests are | |
464 | * complete. Returns NULL if no requests are free. | |
465 | * | |
466 | * Returns with kiocb->users set to 2. The io submit code path holds | |
467 | * an extra reference while submitting the i/o. | |
468 | * This prevents races between the aio code path referencing the | |
469 | * req (after submitting it) and aio_complete() freeing the req. | |
470 | */ | |
fc9b52cd | 471 | static struct kiocb *__aio_get_req(struct kioctx *ctx) |
1da177e4 LT |
472 | { |
473 | struct kiocb *req = NULL; | |
1da177e4 LT |
474 | |
475 | req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL); | |
476 | if (unlikely(!req)) | |
477 | return NULL; | |
478 | ||
4faa5285 | 479 | req->ki_flags = 0; |
1da177e4 LT |
480 | req->ki_users = 2; |
481 | req->ki_key = 0; | |
482 | req->ki_ctx = ctx; | |
483 | req->ki_cancel = NULL; | |
484 | req->ki_retry = NULL; | |
1da177e4 LT |
485 | req->ki_dtor = NULL; |
486 | req->private = NULL; | |
eed4e51f | 487 | req->ki_iovec = NULL; |
87c3a86e | 488 | req->ki_eventfd = NULL; |
1da177e4 | 489 | |
080d676d JM |
490 | return req; |
491 | } | |
492 | ||
493 | /* | |
494 | * struct kiocb's are allocated in batches to reduce the number of | |
495 | * times the ctx lock is acquired and released. | |
496 | */ | |
497 | #define KIOCB_BATCH_SIZE 32L | |
498 | struct kiocb_batch { | |
499 | struct list_head head; | |
500 | long count; /* number of requests left to allocate */ | |
501 | }; | |
502 | ||
503 | static void kiocb_batch_init(struct kiocb_batch *batch, long total) | |
504 | { | |
505 | INIT_LIST_HEAD(&batch->head); | |
506 | batch->count = total; | |
507 | } | |
508 | ||
69e4747e | 509 | static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch) |
080d676d JM |
510 | { |
511 | struct kiocb *req, *n; | |
512 | ||
69e4747e GN |
513 | if (list_empty(&batch->head)) |
514 | return; | |
515 | ||
516 | spin_lock_irq(&ctx->ctx_lock); | |
080d676d JM |
517 | list_for_each_entry_safe(req, n, &batch->head, ki_batch) { |
518 | list_del(&req->ki_batch); | |
69e4747e | 519 | list_del(&req->ki_list); |
080d676d | 520 | kmem_cache_free(kiocb_cachep, req); |
69e4747e | 521 | ctx->reqs_active--; |
080d676d | 522 | } |
880641bb JM |
523 | if (unlikely(!ctx->reqs_active && ctx->dead)) |
524 | wake_up_all(&ctx->wait); | |
69e4747e | 525 | spin_unlock_irq(&ctx->ctx_lock); |
080d676d JM |
526 | } |
527 | ||
528 | /* | |
529 | * Allocate a batch of kiocbs. This avoids taking and dropping the | |
530 | * context lock a lot during setup. | |
531 | */ | |
532 | static int kiocb_batch_refill(struct kioctx *ctx, struct kiocb_batch *batch) | |
533 | { | |
534 | unsigned short allocated, to_alloc; | |
535 | long avail; | |
080d676d JM |
536 | struct kiocb *req, *n; |
537 | struct aio_ring *ring; | |
538 | ||
539 | to_alloc = min(batch->count, KIOCB_BATCH_SIZE); | |
540 | for (allocated = 0; allocated < to_alloc; allocated++) { | |
541 | req = __aio_get_req(ctx); | |
542 | if (!req) | |
543 | /* allocation failed, go with what we've got */ | |
544 | break; | |
545 | list_add(&req->ki_batch, &batch->head); | |
546 | } | |
547 | ||
548 | if (allocated == 0) | |
549 | goto out; | |
550 | ||
1da177e4 | 551 | spin_lock_irq(&ctx->ctx_lock); |
080d676d JM |
552 | ring = kmap_atomic(ctx->ring_info.ring_pages[0]); |
553 | ||
554 | avail = aio_ring_avail(&ctx->ring_info, ring) - ctx->reqs_active; | |
555 | BUG_ON(avail < 0); | |
080d676d JM |
556 | if (avail < allocated) { |
557 | /* Trim back the number of requests. */ | |
558 | list_for_each_entry_safe(req, n, &batch->head, ki_batch) { | |
559 | list_del(&req->ki_batch); | |
560 | kmem_cache_free(kiocb_cachep, req); | |
561 | if (--allocated <= avail) | |
562 | break; | |
563 | } | |
564 | } | |
565 | ||
566 | batch->count -= allocated; | |
567 | list_for_each_entry(req, &batch->head, ki_batch) { | |
1da177e4 | 568 | list_add(&req->ki_list, &ctx->active_reqs); |
1da177e4 | 569 | ctx->reqs_active++; |
1da177e4 | 570 | } |
1da177e4 | 571 | |
080d676d JM |
572 | kunmap_atomic(ring); |
573 | spin_unlock_irq(&ctx->ctx_lock); | |
1da177e4 | 574 | |
080d676d JM |
575 | out: |
576 | return allocated; | |
1da177e4 LT |
577 | } |
578 | ||
080d676d JM |
579 | static inline struct kiocb *aio_get_req(struct kioctx *ctx, |
580 | struct kiocb_batch *batch) | |
1da177e4 LT |
581 | { |
582 | struct kiocb *req; | |
080d676d JM |
583 | |
584 | if (list_empty(&batch->head)) | |
585 | if (kiocb_batch_refill(ctx, batch) == 0) | |
586 | return NULL; | |
587 | req = list_first_entry(&batch->head, struct kiocb, ki_batch); | |
588 | list_del(&req->ki_batch); | |
1da177e4 LT |
589 | return req; |
590 | } | |
591 | ||
592 | static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) | |
593 | { | |
d00689af ZB |
594 | assert_spin_locked(&ctx->ctx_lock); |
595 | ||
13389010 DL |
596 | if (req->ki_eventfd != NULL) |
597 | eventfd_ctx_put(req->ki_eventfd); | |
1da177e4 LT |
598 | if (req->ki_dtor) |
599 | req->ki_dtor(req); | |
eed4e51f BP |
600 | if (req->ki_iovec != &req->ki_inline_vec) |
601 | kfree(req->ki_iovec); | |
1da177e4 LT |
602 | kmem_cache_free(kiocb_cachep, req); |
603 | ctx->reqs_active--; | |
604 | ||
605 | if (unlikely(!ctx->reqs_active && ctx->dead)) | |
e91f90bb | 606 | wake_up_all(&ctx->wait); |
1da177e4 LT |
607 | } |
608 | ||
1da177e4 LT |
609 | /* __aio_put_req |
610 | * Returns true if this put was the last user of the request. | |
611 | */ | |
2d68449e | 612 | static void __aio_put_req(struct kioctx *ctx, struct kiocb *req) |
1da177e4 | 613 | { |
516e0cc5 AV |
614 | dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n", |
615 | req, atomic_long_read(&req->ki_filp->f_count)); | |
1da177e4 | 616 | |
d00689af ZB |
617 | assert_spin_locked(&ctx->ctx_lock); |
618 | ||
87c3a86e | 619 | req->ki_users--; |
93e06b41 | 620 | BUG_ON(req->ki_users < 0); |
1da177e4 | 621 | if (likely(req->ki_users)) |
2d68449e | 622 | return; |
1da177e4 LT |
623 | list_del(&req->ki_list); /* remove from active_reqs */ |
624 | req->ki_cancel = NULL; | |
625 | req->ki_retry = NULL; | |
626 | ||
3ffa3c0e AV |
627 | fput(req->ki_filp); |
628 | req->ki_filp = NULL; | |
629 | really_put_req(ctx, req); | |
1da177e4 LT |
630 | } |
631 | ||
632 | /* aio_put_req | |
633 | * Returns true if this put was the last user of the kiocb, | |
634 | * false if the request is still in use. | |
635 | */ | |
2d68449e | 636 | void aio_put_req(struct kiocb *req) |
1da177e4 LT |
637 | { |
638 | struct kioctx *ctx = req->ki_ctx; | |
1da177e4 | 639 | spin_lock_irq(&ctx->ctx_lock); |
2d68449e | 640 | __aio_put_req(ctx, req); |
1da177e4 | 641 | spin_unlock_irq(&ctx->ctx_lock); |
1da177e4 | 642 | } |
385773e0 | 643 | EXPORT_SYMBOL(aio_put_req); |
1da177e4 | 644 | |
d5470b59 | 645 | static struct kioctx *lookup_ioctx(unsigned long ctx_id) |
1da177e4 | 646 | { |
abf137dd | 647 | struct mm_struct *mm = current->mm; |
65c24491 | 648 | struct kioctx *ctx, *ret = NULL; |
1da177e4 | 649 | |
abf137dd JA |
650 | rcu_read_lock(); |
651 | ||
b67bfe0d | 652 | hlist_for_each_entry_rcu(ctx, &mm->ioctx_list, list) { |
3bd9a5d7 NP |
653 | /* |
654 | * RCU protects us against accessing freed memory but | |
655 | * we have to be careful not to get a reference when the | |
656 | * reference count already dropped to 0 (ctx->dead test | |
657 | * is unreliable because of races). | |
658 | */ | |
659 | if (ctx->user_id == ctx_id && !ctx->dead && try_get_ioctx(ctx)){ | |
65c24491 | 660 | ret = ctx; |
1da177e4 LT |
661 | break; |
662 | } | |
abf137dd | 663 | } |
1da177e4 | 664 | |
abf137dd | 665 | rcu_read_unlock(); |
65c24491 | 666 | return ret; |
1da177e4 LT |
667 | } |
668 | ||
1da177e4 LT |
669 | /* aio_complete |
670 | * Called when the io request on the given iocb is complete. | |
1da177e4 | 671 | */ |
2d68449e | 672 | void aio_complete(struct kiocb *iocb, long res, long res2) |
1da177e4 LT |
673 | { |
674 | struct kioctx *ctx = iocb->ki_ctx; | |
675 | struct aio_ring_info *info; | |
676 | struct aio_ring *ring; | |
677 | struct io_event *event; | |
678 | unsigned long flags; | |
679 | unsigned long tail; | |
1da177e4 | 680 | |
20dcae32 ZB |
681 | /* |
682 | * Special case handling for sync iocbs: | |
683 | * - events go directly into the iocb for fast handling | |
684 | * - the sync task with the iocb in its stack holds the single iocb | |
685 | * ref, no other paths have a way to get another ref | |
686 | * - the sync task helpfully left a reference to itself in the iocb | |
1da177e4 LT |
687 | */ |
688 | if (is_sync_kiocb(iocb)) { | |
20dcae32 | 689 | BUG_ON(iocb->ki_users != 1); |
1da177e4 | 690 | iocb->ki_user_data = res; |
20dcae32 | 691 | iocb->ki_users = 0; |
1da177e4 | 692 | wake_up_process(iocb->ki_obj.tsk); |
2d68449e | 693 | return; |
1da177e4 LT |
694 | } |
695 | ||
696 | info = &ctx->ring_info; | |
697 | ||
698 | /* add a completion event to the ring buffer. | |
699 | * must be done holding ctx->ctx_lock to prevent | |
700 | * other code from messing with the tail | |
701 | * pointer since we might be called from irq | |
702 | * context. | |
703 | */ | |
704 | spin_lock_irqsave(&ctx->ctx_lock, flags); | |
705 | ||
1da177e4 LT |
706 | /* |
707 | * cancelled requests don't get events, userland was given one | |
708 | * when the event got cancelled. | |
709 | */ | |
710 | if (kiocbIsCancelled(iocb)) | |
711 | goto put_rq; | |
712 | ||
e8e3c3d6 | 713 | ring = kmap_atomic(info->ring_pages[0]); |
1da177e4 LT |
714 | |
715 | tail = info->tail; | |
e8e3c3d6 | 716 | event = aio_ring_event(info, tail); |
4bf69b2a KC |
717 | if (++tail >= info->nr) |
718 | tail = 0; | |
1da177e4 LT |
719 | |
720 | event->obj = (u64)(unsigned long)iocb->ki_obj.user; | |
721 | event->data = iocb->ki_user_data; | |
722 | event->res = res; | |
723 | event->res2 = res2; | |
724 | ||
725 | dprintk("aio_complete: %p[%lu]: %p: %p %Lx %lx %lx\n", | |
726 | ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data, | |
727 | res, res2); | |
728 | ||
729 | /* after flagging the request as done, we | |
730 | * must never even look at it again | |
731 | */ | |
732 | smp_wmb(); /* make event visible before updating tail */ | |
733 | ||
734 | info->tail = tail; | |
735 | ring->tail = tail; | |
736 | ||
e8e3c3d6 CW |
737 | put_aio_ring_event(event); |
738 | kunmap_atomic(ring); | |
1da177e4 LT |
739 | |
740 | pr_debug("added to ring %p at [%lu]\n", iocb, tail); | |
8d1c98b0 DL |
741 | |
742 | /* | |
743 | * Check if the user asked us to deliver the result through an | |
744 | * eventfd. The eventfd_signal() function is safe to be called | |
745 | * from IRQ context. | |
746 | */ | |
87c3a86e | 747 | if (iocb->ki_eventfd != NULL) |
8d1c98b0 DL |
748 | eventfd_signal(iocb->ki_eventfd, 1); |
749 | ||
1da177e4 LT |
750 | put_rq: |
751 | /* everything turned out well, dispose of the aiocb. */ | |
2d68449e | 752 | __aio_put_req(ctx, iocb); |
1da177e4 | 753 | |
6cb2a210 QB |
754 | /* |
755 | * We have to order our ring_info tail store above and test | |
756 | * of the wait list below outside the wait lock. This is | |
757 | * like in wake_up_bit() where clearing a bit has to be | |
758 | * ordered with the unlocked test. | |
759 | */ | |
760 | smp_mb(); | |
761 | ||
1da177e4 LT |
762 | if (waitqueue_active(&ctx->wait)) |
763 | wake_up(&ctx->wait); | |
764 | ||
dee11c23 | 765 | spin_unlock_irqrestore(&ctx->ctx_lock, flags); |
1da177e4 | 766 | } |
385773e0 | 767 | EXPORT_SYMBOL(aio_complete); |
1da177e4 LT |
768 | |
769 | /* aio_read_evt | |
770 | * Pull an event off of the ioctx's event ring. Returns the number of | |
771 | * events fetched (0 or 1 ;-) | |
772 | * FIXME: make this use cmpxchg. | |
773 | * TODO: make the ringbuffer user mmap()able (requires FIXME). | |
774 | */ | |
775 | static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent) | |
776 | { | |
777 | struct aio_ring_info *info = &ioctx->ring_info; | |
778 | struct aio_ring *ring; | |
779 | unsigned long head; | |
780 | int ret = 0; | |
781 | ||
e8e3c3d6 | 782 | ring = kmap_atomic(info->ring_pages[0]); |
1da177e4 LT |
783 | dprintk("in aio_read_evt h%lu t%lu m%lu\n", |
784 | (unsigned long)ring->head, (unsigned long)ring->tail, | |
785 | (unsigned long)ring->nr); | |
786 | ||
787 | if (ring->head == ring->tail) | |
788 | goto out; | |
789 | ||
790 | spin_lock(&info->ring_lock); | |
791 | ||
792 | head = ring->head % info->nr; | |
793 | if (head != ring->tail) { | |
e8e3c3d6 | 794 | struct io_event *evp = aio_ring_event(info, head); |
1da177e4 LT |
795 | *ent = *evp; |
796 | head = (head + 1) % info->nr; | |
797 | smp_mb(); /* finish reading the event before updatng the head */ | |
798 | ring->head = head; | |
799 | ret = 1; | |
e8e3c3d6 | 800 | put_aio_ring_event(evp); |
1da177e4 LT |
801 | } |
802 | spin_unlock(&info->ring_lock); | |
803 | ||
804 | out: | |
1da177e4 LT |
805 | dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret, |
806 | (unsigned long)ring->head, (unsigned long)ring->tail); | |
91d80a84 | 807 | kunmap_atomic(ring); |
1da177e4 LT |
808 | return ret; |
809 | } | |
810 | ||
811 | struct aio_timeout { | |
812 | struct timer_list timer; | |
813 | int timed_out; | |
814 | struct task_struct *p; | |
815 | }; | |
816 | ||
817 | static void timeout_func(unsigned long data) | |
818 | { | |
819 | struct aio_timeout *to = (struct aio_timeout *)data; | |
820 | ||
821 | to->timed_out = 1; | |
822 | wake_up_process(to->p); | |
823 | } | |
824 | ||
825 | static inline void init_timeout(struct aio_timeout *to) | |
826 | { | |
c6f3a97f | 827 | setup_timer_on_stack(&to->timer, timeout_func, (unsigned long) to); |
1da177e4 LT |
828 | to->timed_out = 0; |
829 | to->p = current; | |
830 | } | |
831 | ||
832 | static inline void set_timeout(long start_jiffies, struct aio_timeout *to, | |
833 | const struct timespec *ts) | |
834 | { | |
835 | to->timer.expires = start_jiffies + timespec_to_jiffies(ts); | |
836 | if (time_after(to->timer.expires, jiffies)) | |
837 | add_timer(&to->timer); | |
838 | else | |
839 | to->timed_out = 1; | |
840 | } | |
841 | ||
842 | static inline void clear_timeout(struct aio_timeout *to) | |
843 | { | |
844 | del_singleshot_timer_sync(&to->timer); | |
845 | } | |
846 | ||
847 | static int read_events(struct kioctx *ctx, | |
848 | long min_nr, long nr, | |
849 | struct io_event __user *event, | |
850 | struct timespec __user *timeout) | |
851 | { | |
852 | long start_jiffies = jiffies; | |
853 | struct task_struct *tsk = current; | |
854 | DECLARE_WAITQUEUE(wait, tsk); | |
855 | int ret; | |
856 | int i = 0; | |
857 | struct io_event ent; | |
858 | struct aio_timeout to; | |
1da177e4 LT |
859 | |
860 | /* needed to zero any padding within an entry (there shouldn't be | |
861 | * any, but C is fun! | |
862 | */ | |
863 | memset(&ent, 0, sizeof(ent)); | |
1da177e4 LT |
864 | ret = 0; |
865 | while (likely(i < nr)) { | |
866 | ret = aio_read_evt(ctx, &ent); | |
867 | if (unlikely(ret <= 0)) | |
868 | break; | |
869 | ||
870 | dprintk("read event: %Lx %Lx %Lx %Lx\n", | |
871 | ent.data, ent.obj, ent.res, ent.res2); | |
872 | ||
873 | /* Could we split the check in two? */ | |
874 | ret = -EFAULT; | |
875 | if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) { | |
876 | dprintk("aio: lost an event due to EFAULT.\n"); | |
877 | break; | |
878 | } | |
879 | ret = 0; | |
880 | ||
881 | /* Good, event copied to userland, update counts. */ | |
882 | event ++; | |
883 | i ++; | |
884 | } | |
885 | ||
886 | if (min_nr <= i) | |
887 | return i; | |
888 | if (ret) | |
889 | return ret; | |
890 | ||
891 | /* End fast path */ | |
892 | ||
1da177e4 LT |
893 | init_timeout(&to); |
894 | if (timeout) { | |
895 | struct timespec ts; | |
896 | ret = -EFAULT; | |
897 | if (unlikely(copy_from_user(&ts, timeout, sizeof(ts)))) | |
898 | goto out; | |
899 | ||
900 | set_timeout(start_jiffies, &to, &ts); | |
901 | } | |
902 | ||
903 | while (likely(i < nr)) { | |
904 | add_wait_queue_exclusive(&ctx->wait, &wait); | |
905 | do { | |
906 | set_task_state(tsk, TASK_INTERRUPTIBLE); | |
907 | ret = aio_read_evt(ctx, &ent); | |
908 | if (ret) | |
909 | break; | |
910 | if (min_nr <= i) | |
911 | break; | |
e92adcba JM |
912 | if (unlikely(ctx->dead)) { |
913 | ret = -EINVAL; | |
914 | break; | |
915 | } | |
1da177e4 LT |
916 | if (to.timed_out) /* Only check after read evt */ |
917 | break; | |
e00ba3da JM |
918 | /* Try to only show up in io wait if there are ops |
919 | * in flight */ | |
920 | if (ctx->reqs_active) | |
921 | io_schedule(); | |
922 | else | |
923 | schedule(); | |
1da177e4 LT |
924 | if (signal_pending(tsk)) { |
925 | ret = -EINTR; | |
926 | break; | |
927 | } | |
928 | /*ret = aio_read_evt(ctx, &ent);*/ | |
929 | } while (1) ; | |
930 | ||
931 | set_task_state(tsk, TASK_RUNNING); | |
932 | remove_wait_queue(&ctx->wait, &wait); | |
933 | ||
934 | if (unlikely(ret <= 0)) | |
935 | break; | |
936 | ||
937 | ret = -EFAULT; | |
938 | if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) { | |
939 | dprintk("aio: lost an event due to EFAULT.\n"); | |
940 | break; | |
941 | } | |
942 | ||
943 | /* Good, event copied to userland, update counts. */ | |
944 | event ++; | |
945 | i ++; | |
946 | } | |
947 | ||
948 | if (timeout) | |
949 | clear_timeout(&to); | |
950 | out: | |
c6f3a97f | 951 | destroy_timer_on_stack(&to.timer); |
1da177e4 LT |
952 | return i ? i : ret; |
953 | } | |
954 | ||
955 | /* Take an ioctx and remove it from the list of ioctx's. Protects | |
956 | * against races with itself via ->dead. | |
957 | */ | |
958 | static void io_destroy(struct kioctx *ioctx) | |
959 | { | |
960 | struct mm_struct *mm = current->mm; | |
1da177e4 LT |
961 | int was_dead; |
962 | ||
963 | /* delete the entry from the list is someone else hasn't already */ | |
abf137dd | 964 | spin_lock(&mm->ioctx_lock); |
1da177e4 LT |
965 | was_dead = ioctx->dead; |
966 | ioctx->dead = 1; | |
abf137dd JA |
967 | hlist_del_rcu(&ioctx->list); |
968 | spin_unlock(&mm->ioctx_lock); | |
1da177e4 LT |
969 | |
970 | dprintk("aio_release(%p)\n", ioctx); | |
971 | if (likely(!was_dead)) | |
972 | put_ioctx(ioctx); /* twice for the list */ | |
973 | ||
06af121e | 974 | kill_ctx(ioctx); |
e92adcba JM |
975 | |
976 | /* | |
977 | * Wake up any waiters. The setting of ctx->dead must be seen | |
978 | * by other CPUs at this point. Right now, we rely on the | |
979 | * locking done by the above calls to ensure this consistency. | |
980 | */ | |
e91f90bb | 981 | wake_up_all(&ioctx->wait); |
1da177e4 LT |
982 | } |
983 | ||
984 | /* sys_io_setup: | |
985 | * Create an aio_context capable of receiving at least nr_events. | |
986 | * ctxp must not point to an aio_context that already exists, and | |
987 | * must be initialized to 0 prior to the call. On successful | |
988 | * creation of the aio_context, *ctxp is filled in with the resulting | |
989 | * handle. May fail with -EINVAL if *ctxp is not initialized, | |
990 | * if the specified nr_events exceeds internal limits. May fail | |
991 | * with -EAGAIN if the specified nr_events exceeds the user's limit | |
992 | * of available events. May fail with -ENOMEM if insufficient kernel | |
993 | * resources are available. May fail with -EFAULT if an invalid | |
994 | * pointer is passed for ctxp. Will fail with -ENOSYS if not | |
995 | * implemented. | |
996 | */ | |
002c8976 | 997 | SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) |
1da177e4 LT |
998 | { |
999 | struct kioctx *ioctx = NULL; | |
1000 | unsigned long ctx; | |
1001 | long ret; | |
1002 | ||
1003 | ret = get_user(ctx, ctxp); | |
1004 | if (unlikely(ret)) | |
1005 | goto out; | |
1006 | ||
1007 | ret = -EINVAL; | |
d55b5fda ZB |
1008 | if (unlikely(ctx || nr_events == 0)) { |
1009 | pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n", | |
1010 | ctx, nr_events); | |
1da177e4 LT |
1011 | goto out; |
1012 | } | |
1013 | ||
1014 | ioctx = ioctx_alloc(nr_events); | |
1015 | ret = PTR_ERR(ioctx); | |
1016 | if (!IS_ERR(ioctx)) { | |
1017 | ret = put_user(ioctx->user_id, ctxp); | |
a2e1859a AV |
1018 | if (ret) |
1019 | io_destroy(ioctx); | |
1020 | put_ioctx(ioctx); | |
1da177e4 LT |
1021 | } |
1022 | ||
1023 | out: | |
1024 | return ret; | |
1025 | } | |
1026 | ||
1027 | /* sys_io_destroy: | |
1028 | * Destroy the aio_context specified. May cancel any outstanding | |
1029 | * AIOs and block on completion. Will fail with -ENOSYS if not | |
642b5123 | 1030 | * implemented. May fail with -EINVAL if the context pointed to |
1da177e4 LT |
1031 | * is invalid. |
1032 | */ | |
002c8976 | 1033 | SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) |
1da177e4 LT |
1034 | { |
1035 | struct kioctx *ioctx = lookup_ioctx(ctx); | |
1036 | if (likely(NULL != ioctx)) { | |
1037 | io_destroy(ioctx); | |
a2e1859a | 1038 | put_ioctx(ioctx); |
1da177e4 LT |
1039 | return 0; |
1040 | } | |
1041 | pr_debug("EINVAL: io_destroy: invalid context id\n"); | |
1042 | return -EINVAL; | |
1043 | } | |
1044 | ||
eed4e51f | 1045 | static void aio_advance_iovec(struct kiocb *iocb, ssize_t ret) |
1da177e4 | 1046 | { |
eed4e51f BP |
1047 | struct iovec *iov = &iocb->ki_iovec[iocb->ki_cur_seg]; |
1048 | ||
1049 | BUG_ON(ret <= 0); | |
1050 | ||
1051 | while (iocb->ki_cur_seg < iocb->ki_nr_segs && ret > 0) { | |
1052 | ssize_t this = min((ssize_t)iov->iov_len, ret); | |
1053 | iov->iov_base += this; | |
1054 | iov->iov_len -= this; | |
1055 | iocb->ki_left -= this; | |
1056 | ret -= this; | |
1057 | if (iov->iov_len == 0) { | |
1058 | iocb->ki_cur_seg++; | |
1059 | iov++; | |
897f15fb | 1060 | } |
eed4e51f | 1061 | } |
1da177e4 | 1062 | |
eed4e51f BP |
1063 | /* the caller should not have done more io than what fit in |
1064 | * the remaining iovecs */ | |
1065 | BUG_ON(ret > 0 && iocb->ki_left == 0); | |
1da177e4 LT |
1066 | } |
1067 | ||
eed4e51f | 1068 | static ssize_t aio_rw_vect_retry(struct kiocb *iocb) |
1da177e4 LT |
1069 | { |
1070 | struct file *file = iocb->ki_filp; | |
eed4e51f BP |
1071 | struct address_space *mapping = file->f_mapping; |
1072 | struct inode *inode = mapping->host; | |
1073 | ssize_t (*rw_op)(struct kiocb *, const struct iovec *, | |
1074 | unsigned long, loff_t); | |
1da177e4 | 1075 | ssize_t ret = 0; |
eed4e51f BP |
1076 | unsigned short opcode; |
1077 | ||
1078 | if ((iocb->ki_opcode == IOCB_CMD_PREADV) || | |
1079 | (iocb->ki_opcode == IOCB_CMD_PREAD)) { | |
1080 | rw_op = file->f_op->aio_read; | |
1081 | opcode = IOCB_CMD_PREADV; | |
1082 | } else { | |
1083 | rw_op = file->f_op->aio_write; | |
1084 | opcode = IOCB_CMD_PWRITEV; | |
1085 | } | |
1da177e4 | 1086 | |
c2ec6682 RR |
1087 | /* This matches the pread()/pwrite() logic */ |
1088 | if (iocb->ki_pos < 0) | |
1089 | return -EINVAL; | |
1090 | ||
8d71db4f AV |
1091 | if (opcode == IOCB_CMD_PWRITEV) |
1092 | file_start_write(file); | |
897f15fb | 1093 | do { |
eed4e51f BP |
1094 | ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg], |
1095 | iocb->ki_nr_segs - iocb->ki_cur_seg, | |
1096 | iocb->ki_pos); | |
1097 | if (ret > 0) | |
1098 | aio_advance_iovec(iocb, ret); | |
1099 | ||
1100 | /* retry all partial writes. retry partial reads as long as its a | |
1101 | * regular file. */ | |
1102 | } while (ret > 0 && iocb->ki_left > 0 && | |
1103 | (opcode == IOCB_CMD_PWRITEV || | |
1104 | (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode)))); | |
8d71db4f AV |
1105 | if (opcode == IOCB_CMD_PWRITEV) |
1106 | file_end_write(file); | |
1da177e4 | 1107 | |
eed4e51f BP |
1108 | /* This means we must have transferred all that we could */ |
1109 | /* No need to retry anymore */ | |
1da177e4 LT |
1110 | if ((ret == 0) || (iocb->ki_left == 0)) |
1111 | ret = iocb->ki_nbytes - iocb->ki_left; | |
1112 | ||
7adfa2ff RR |
1113 | /* If we managed to write some out we return that, rather than |
1114 | * the eventual error. */ | |
1115 | if (opcode == IOCB_CMD_PWRITEV | |
41003a7b | 1116 | && ret < 0 && ret != -EIOCBQUEUED |
7adfa2ff RR |
1117 | && iocb->ki_nbytes - iocb->ki_left) |
1118 | ret = iocb->ki_nbytes - iocb->ki_left; | |
1119 | ||
1da177e4 LT |
1120 | return ret; |
1121 | } | |
1122 | ||
1123 | static ssize_t aio_fdsync(struct kiocb *iocb) | |
1124 | { | |
1125 | struct file *file = iocb->ki_filp; | |
1126 | ssize_t ret = -EINVAL; | |
1127 | ||
1128 | if (file->f_op->aio_fsync) | |
1129 | ret = file->f_op->aio_fsync(iocb, 1); | |
1130 | return ret; | |
1131 | } | |
1132 | ||
1133 | static ssize_t aio_fsync(struct kiocb *iocb) | |
1134 | { | |
1135 | struct file *file = iocb->ki_filp; | |
1136 | ssize_t ret = -EINVAL; | |
1137 | ||
1138 | if (file->f_op->aio_fsync) | |
1139 | ret = file->f_op->aio_fsync(iocb, 0); | |
1140 | return ret; | |
1141 | } | |
1142 | ||
9d85cba7 | 1143 | static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat) |
eed4e51f BP |
1144 | { |
1145 | ssize_t ret; | |
1146 | ||
9d85cba7 JM |
1147 | #ifdef CONFIG_COMPAT |
1148 | if (compat) | |
1149 | ret = compat_rw_copy_check_uvector(type, | |
1150 | (struct compat_iovec __user *)kiocb->ki_buf, | |
1151 | kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec, | |
ac34ebb3 | 1152 | &kiocb->ki_iovec); |
9d85cba7 JM |
1153 | else |
1154 | #endif | |
1155 | ret = rw_copy_check_uvector(type, | |
1156 | (struct iovec __user *)kiocb->ki_buf, | |
1157 | kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec, | |
ac34ebb3 | 1158 | &kiocb->ki_iovec); |
eed4e51f BP |
1159 | if (ret < 0) |
1160 | goto out; | |
1161 | ||
a70b52ec LT |
1162 | ret = rw_verify_area(type, kiocb->ki_filp, &kiocb->ki_pos, ret); |
1163 | if (ret < 0) | |
1164 | goto out; | |
1165 | ||
eed4e51f BP |
1166 | kiocb->ki_nr_segs = kiocb->ki_nbytes; |
1167 | kiocb->ki_cur_seg = 0; | |
1168 | /* ki_nbytes/left now reflect bytes instead of segs */ | |
1169 | kiocb->ki_nbytes = ret; | |
1170 | kiocb->ki_left = ret; | |
1171 | ||
1172 | ret = 0; | |
1173 | out: | |
1174 | return ret; | |
1175 | } | |
1176 | ||
a70b52ec | 1177 | static ssize_t aio_setup_single_vector(int type, struct file * file, struct kiocb *kiocb) |
eed4e51f | 1178 | { |
a70b52ec LT |
1179 | int bytes; |
1180 | ||
1181 | bytes = rw_verify_area(type, file, &kiocb->ki_pos, kiocb->ki_left); | |
1182 | if (bytes < 0) | |
1183 | return bytes; | |
1184 | ||
eed4e51f BP |
1185 | kiocb->ki_iovec = &kiocb->ki_inline_vec; |
1186 | kiocb->ki_iovec->iov_base = kiocb->ki_buf; | |
a70b52ec | 1187 | kiocb->ki_iovec->iov_len = bytes; |
eed4e51f BP |
1188 | kiocb->ki_nr_segs = 1; |
1189 | kiocb->ki_cur_seg = 0; | |
eed4e51f BP |
1190 | return 0; |
1191 | } | |
1192 | ||
1da177e4 LT |
1193 | /* |
1194 | * aio_setup_iocb: | |
1195 | * Performs the initial checks and aio retry method | |
1196 | * setup for the kiocb at the time of io submission. | |
1197 | */ | |
9d85cba7 | 1198 | static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat) |
1da177e4 LT |
1199 | { |
1200 | struct file *file = kiocb->ki_filp; | |
1201 | ssize_t ret = 0; | |
1202 | ||
1203 | switch (kiocb->ki_opcode) { | |
1204 | case IOCB_CMD_PREAD: | |
1205 | ret = -EBADF; | |
1206 | if (unlikely(!(file->f_mode & FMODE_READ))) | |
1207 | break; | |
1208 | ret = -EFAULT; | |
1209 | if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf, | |
1210 | kiocb->ki_left))) | |
1211 | break; | |
a70b52ec | 1212 | ret = aio_setup_single_vector(READ, file, kiocb); |
eed4e51f BP |
1213 | if (ret) |
1214 | break; | |
1da177e4 LT |
1215 | ret = -EINVAL; |
1216 | if (file->f_op->aio_read) | |
eed4e51f | 1217 | kiocb->ki_retry = aio_rw_vect_retry; |
1da177e4 LT |
1218 | break; |
1219 | case IOCB_CMD_PWRITE: | |
1220 | ret = -EBADF; | |
1221 | if (unlikely(!(file->f_mode & FMODE_WRITE))) | |
1222 | break; | |
1223 | ret = -EFAULT; | |
1224 | if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf, | |
1225 | kiocb->ki_left))) | |
1226 | break; | |
a70b52ec | 1227 | ret = aio_setup_single_vector(WRITE, file, kiocb); |
eed4e51f BP |
1228 | if (ret) |
1229 | break; | |
1230 | ret = -EINVAL; | |
1231 | if (file->f_op->aio_write) | |
1232 | kiocb->ki_retry = aio_rw_vect_retry; | |
1233 | break; | |
1234 | case IOCB_CMD_PREADV: | |
1235 | ret = -EBADF; | |
1236 | if (unlikely(!(file->f_mode & FMODE_READ))) | |
1237 | break; | |
9d85cba7 | 1238 | ret = aio_setup_vectored_rw(READ, kiocb, compat); |
eed4e51f BP |
1239 | if (ret) |
1240 | break; | |
1241 | ret = -EINVAL; | |
1242 | if (file->f_op->aio_read) | |
1243 | kiocb->ki_retry = aio_rw_vect_retry; | |
1244 | break; | |
1245 | case IOCB_CMD_PWRITEV: | |
1246 | ret = -EBADF; | |
1247 | if (unlikely(!(file->f_mode & FMODE_WRITE))) | |
1248 | break; | |
9d85cba7 | 1249 | ret = aio_setup_vectored_rw(WRITE, kiocb, compat); |
eed4e51f BP |
1250 | if (ret) |
1251 | break; | |
1da177e4 LT |
1252 | ret = -EINVAL; |
1253 | if (file->f_op->aio_write) | |
eed4e51f | 1254 | kiocb->ki_retry = aio_rw_vect_retry; |
1da177e4 LT |
1255 | break; |
1256 | case IOCB_CMD_FDSYNC: | |
1257 | ret = -EINVAL; | |
1258 | if (file->f_op->aio_fsync) | |
1259 | kiocb->ki_retry = aio_fdsync; | |
1260 | break; | |
1261 | case IOCB_CMD_FSYNC: | |
1262 | ret = -EINVAL; | |
1263 | if (file->f_op->aio_fsync) | |
1264 | kiocb->ki_retry = aio_fsync; | |
1265 | break; | |
1266 | default: | |
1267 | dprintk("EINVAL: io_submit: no operation provided\n"); | |
1268 | ret = -EINVAL; | |
1269 | } | |
1270 | ||
1271 | if (!kiocb->ki_retry) | |
1272 | return ret; | |
1273 | ||
1274 | return 0; | |
1275 | } | |
1276 | ||
d5470b59 | 1277 | static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, |
080d676d JM |
1278 | struct iocb *iocb, struct kiocb_batch *batch, |
1279 | bool compat) | |
1da177e4 LT |
1280 | { |
1281 | struct kiocb *req; | |
1282 | struct file *file; | |
1283 | ssize_t ret; | |
1284 | ||
1285 | /* enforce forwards compatibility on users */ | |
9c3060be | 1286 | if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) { |
1da177e4 LT |
1287 | pr_debug("EINVAL: io_submit: reserve field set\n"); |
1288 | return -EINVAL; | |
1289 | } | |
1290 | ||
1291 | /* prevent overflows */ | |
1292 | if (unlikely( | |
1293 | (iocb->aio_buf != (unsigned long)iocb->aio_buf) || | |
1294 | (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) || | |
1295 | ((ssize_t)iocb->aio_nbytes < 0) | |
1296 | )) { | |
1297 | pr_debug("EINVAL: io_submit: overflow check\n"); | |
1298 | return -EINVAL; | |
1299 | } | |
1300 | ||
1301 | file = fget(iocb->aio_fildes); | |
1302 | if (unlikely(!file)) | |
1303 | return -EBADF; | |
1304 | ||
080d676d | 1305 | req = aio_get_req(ctx, batch); /* returns with 2 references to req */ |
1da177e4 LT |
1306 | if (unlikely(!req)) { |
1307 | fput(file); | |
1308 | return -EAGAIN; | |
1309 | } | |
87e2831c | 1310 | req->ki_filp = file; |
9c3060be DL |
1311 | if (iocb->aio_flags & IOCB_FLAG_RESFD) { |
1312 | /* | |
1313 | * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an | |
1314 | * instance of the file* now. The file descriptor must be | |
1315 | * an eventfd() fd, and will be signaled for each completed | |
1316 | * event using the eventfd_signal() function. | |
1317 | */ | |
13389010 | 1318 | req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd); |
801678c5 | 1319 | if (IS_ERR(req->ki_eventfd)) { |
9c3060be | 1320 | ret = PTR_ERR(req->ki_eventfd); |
87c3a86e | 1321 | req->ki_eventfd = NULL; |
9c3060be DL |
1322 | goto out_put_req; |
1323 | } | |
1324 | } | |
1da177e4 | 1325 | |
212079cf | 1326 | ret = put_user(req->ki_key, &user_iocb->aio_key); |
1da177e4 LT |
1327 | if (unlikely(ret)) { |
1328 | dprintk("EFAULT: aio_key\n"); | |
1329 | goto out_put_req; | |
1330 | } | |
1331 | ||
1332 | req->ki_obj.user = user_iocb; | |
1333 | req->ki_user_data = iocb->aio_data; | |
1334 | req->ki_pos = iocb->aio_offset; | |
1335 | ||
1336 | req->ki_buf = (char __user *)(unsigned long)iocb->aio_buf; | |
1337 | req->ki_left = req->ki_nbytes = iocb->aio_nbytes; | |
1338 | req->ki_opcode = iocb->aio_lio_opcode; | |
1da177e4 | 1339 | |
9d85cba7 | 1340 | ret = aio_setup_iocb(req, compat); |
1da177e4 LT |
1341 | |
1342 | if (ret) | |
1343 | goto out_put_req; | |
1344 | ||
1345 | spin_lock_irq(&ctx->ctx_lock); | |
7137c6bd JK |
1346 | /* |
1347 | * We could have raced with io_destroy() and are currently holding a | |
1348 | * reference to ctx which should be destroyed. We cannot submit IO | |
1349 | * since ctx gets freed as soon as io_submit() puts its reference. The | |
1350 | * check here is reliable: io_destroy() sets ctx->dead before waiting | |
1351 | * for outstanding IO and the barrier between these two is realized by | |
1352 | * unlock of mm->ioctx_lock and lock of ctx->ctx_lock. Analogously we | |
1353 | * increment ctx->reqs_active before checking for ctx->dead and the | |
1354 | * barrier is realized by unlock and lock of ctx->ctx_lock. Thus if we | |
1355 | * don't see ctx->dead set here, io_destroy() waits for our IO to | |
1356 | * finish. | |
1357 | */ | |
41003a7b | 1358 | if (ctx->dead) |
7137c6bd | 1359 | ret = -EINVAL; |
41003a7b ZB |
1360 | spin_unlock_irq(&ctx->ctx_lock); |
1361 | if (ret) | |
7137c6bd | 1362 | goto out_put_req; |
41003a7b ZB |
1363 | |
1364 | if (unlikely(kiocbIsCancelled(req))) | |
1365 | ret = -EINTR; | |
1366 | else | |
1367 | ret = req->ki_retry(req); | |
1368 | ||
1369 | if (ret != -EIOCBQUEUED) { | |
1370 | /* | |
1371 | * There's no easy way to restart the syscall since other AIO's | |
1372 | * may be already running. Just fail this IO with EINTR. | |
1373 | */ | |
1374 | if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR || | |
1375 | ret == -ERESTARTNOHAND || | |
1376 | ret == -ERESTART_RESTARTBLOCK)) | |
1377 | ret = -EINTR; | |
1378 | aio_complete(req, ret, 0); | |
7137c6bd | 1379 | } |
cfb1e33e | 1380 | |
1da177e4 LT |
1381 | aio_put_req(req); /* drop extra ref to req */ |
1382 | return 0; | |
1383 | ||
1384 | out_put_req: | |
1385 | aio_put_req(req); /* drop extra ref to req */ | |
1386 | aio_put_req(req); /* drop i/o ref to req */ | |
1387 | return ret; | |
1388 | } | |
1389 | ||
9d85cba7 JM |
1390 | long do_io_submit(aio_context_t ctx_id, long nr, |
1391 | struct iocb __user *__user *iocbpp, bool compat) | |
1da177e4 LT |
1392 | { |
1393 | struct kioctx *ctx; | |
1394 | long ret = 0; | |
080d676d | 1395 | int i = 0; |
9f5b9425 | 1396 | struct blk_plug plug; |
080d676d | 1397 | struct kiocb_batch batch; |
1da177e4 LT |
1398 | |
1399 | if (unlikely(nr < 0)) | |
1400 | return -EINVAL; | |
1401 | ||
75e1c70f JM |
1402 | if (unlikely(nr > LONG_MAX/sizeof(*iocbpp))) |
1403 | nr = LONG_MAX/sizeof(*iocbpp); | |
1404 | ||
1da177e4 LT |
1405 | if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp))))) |
1406 | return -EFAULT; | |
1407 | ||
1408 | ctx = lookup_ioctx(ctx_id); | |
1409 | if (unlikely(!ctx)) { | |
1410 | pr_debug("EINVAL: io_submit: invalid context id\n"); | |
1411 | return -EINVAL; | |
1412 | } | |
1413 | ||
080d676d JM |
1414 | kiocb_batch_init(&batch, nr); |
1415 | ||
9f5b9425 SL |
1416 | blk_start_plug(&plug); |
1417 | ||
1da177e4 LT |
1418 | /* |
1419 | * AKPM: should this return a partial result if some of the IOs were | |
1420 | * successfully submitted? | |
1421 | */ | |
1422 | for (i=0; i<nr; i++) { | |
1423 | struct iocb __user *user_iocb; | |
1424 | struct iocb tmp; | |
1425 | ||
1426 | if (unlikely(__get_user(user_iocb, iocbpp + i))) { | |
1427 | ret = -EFAULT; | |
1428 | break; | |
1429 | } | |
1430 | ||
1431 | if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) { | |
1432 | ret = -EFAULT; | |
1433 | break; | |
1434 | } | |
1435 | ||
080d676d | 1436 | ret = io_submit_one(ctx, user_iocb, &tmp, &batch, compat); |
1da177e4 LT |
1437 | if (ret) |
1438 | break; | |
1439 | } | |
9f5b9425 | 1440 | blk_finish_plug(&plug); |
1da177e4 | 1441 | |
69e4747e | 1442 | kiocb_batch_free(ctx, &batch); |
1da177e4 LT |
1443 | put_ioctx(ctx); |
1444 | return i ? i : ret; | |
1445 | } | |
1446 | ||
9d85cba7 JM |
1447 | /* sys_io_submit: |
1448 | * Queue the nr iocbs pointed to by iocbpp for processing. Returns | |
1449 | * the number of iocbs queued. May return -EINVAL if the aio_context | |
1450 | * specified by ctx_id is invalid, if nr is < 0, if the iocb at | |
1451 | * *iocbpp[0] is not properly initialized, if the operation specified | |
1452 | * is invalid for the file descriptor in the iocb. May fail with | |
1453 | * -EFAULT if any of the data structures point to invalid data. May | |
1454 | * fail with -EBADF if the file descriptor specified in the first | |
1455 | * iocb is invalid. May fail with -EAGAIN if insufficient resources | |
1456 | * are available to queue any iocbs. Will return 0 if nr is 0. Will | |
1457 | * fail with -ENOSYS if not implemented. | |
1458 | */ | |
1459 | SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, | |
1460 | struct iocb __user * __user *, iocbpp) | |
1461 | { | |
1462 | return do_io_submit(ctx_id, nr, iocbpp, 0); | |
1463 | } | |
1464 | ||
1da177e4 LT |
1465 | /* lookup_kiocb |
1466 | * Finds a given iocb for cancellation. | |
1da177e4 | 1467 | */ |
25ee7e38 AB |
1468 | static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, |
1469 | u32 key) | |
1da177e4 LT |
1470 | { |
1471 | struct list_head *pos; | |
d00689af ZB |
1472 | |
1473 | assert_spin_locked(&ctx->ctx_lock); | |
1474 | ||
1da177e4 LT |
1475 | /* TODO: use a hash or array, this sucks. */ |
1476 | list_for_each(pos, &ctx->active_reqs) { | |
1477 | struct kiocb *kiocb = list_kiocb(pos); | |
1478 | if (kiocb->ki_obj.user == iocb && kiocb->ki_key == key) | |
1479 | return kiocb; | |
1480 | } | |
1481 | return NULL; | |
1482 | } | |
1483 | ||
1484 | /* sys_io_cancel: | |
1485 | * Attempts to cancel an iocb previously passed to io_submit. If | |
1486 | * the operation is successfully cancelled, the resulting event is | |
1487 | * copied into the memory pointed to by result without being placed | |
1488 | * into the completion queue and 0 is returned. May fail with | |
1489 | * -EFAULT if any of the data structures pointed to are invalid. | |
1490 | * May fail with -EINVAL if aio_context specified by ctx_id is | |
1491 | * invalid. May fail with -EAGAIN if the iocb specified was not | |
1492 | * cancelled. Will fail with -ENOSYS if not implemented. | |
1493 | */ | |
002c8976 HC |
1494 | SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, |
1495 | struct io_event __user *, result) | |
1da177e4 | 1496 | { |
906b973c | 1497 | struct io_event res; |
1da177e4 LT |
1498 | struct kioctx *ctx; |
1499 | struct kiocb *kiocb; | |
1500 | u32 key; | |
1501 | int ret; | |
1502 | ||
1503 | ret = get_user(key, &iocb->aio_key); | |
1504 | if (unlikely(ret)) | |
1505 | return -EFAULT; | |
1506 | ||
1507 | ctx = lookup_ioctx(ctx_id); | |
1508 | if (unlikely(!ctx)) | |
1509 | return -EINVAL; | |
1510 | ||
1511 | spin_lock_irq(&ctx->ctx_lock); | |
906b973c | 1512 | |
1da177e4 | 1513 | kiocb = lookup_kiocb(ctx, iocb, key); |
906b973c KO |
1514 | if (kiocb) |
1515 | ret = kiocb_cancel(ctx, kiocb, &res); | |
1516 | else | |
1517 | ret = -EINVAL; | |
1518 | ||
1da177e4 LT |
1519 | spin_unlock_irq(&ctx->ctx_lock); |
1520 | ||
906b973c KO |
1521 | if (!ret) { |
1522 | /* Cancellation succeeded -- copy the result | |
1523 | * into the user's buffer. | |
1524 | */ | |
1525 | if (copy_to_user(result, &res, sizeof(res))) | |
1526 | ret = -EFAULT; | |
1527 | } | |
1da177e4 LT |
1528 | |
1529 | put_ioctx(ctx); | |
1530 | ||
1531 | return ret; | |
1532 | } | |
1533 | ||
1534 | /* io_getevents: | |
1535 | * Attempts to read at least min_nr events and up to nr events from | |
642b5123 ST |
1536 | * the completion queue for the aio_context specified by ctx_id. If |
1537 | * it succeeds, the number of read events is returned. May fail with | |
1538 | * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is | |
1539 | * out of range, if timeout is out of range. May fail with -EFAULT | |
1540 | * if any of the memory specified is invalid. May return 0 or | |
1541 | * < min_nr if the timeout specified by timeout has elapsed | |
1542 | * before sufficient events are available, where timeout == NULL | |
1543 | * specifies an infinite timeout. Note that the timeout pointed to by | |
1544 | * timeout is relative and will be updated if not NULL and the | |
1545 | * operation blocks. Will fail with -ENOSYS if not implemented. | |
1da177e4 | 1546 | */ |
002c8976 HC |
1547 | SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id, |
1548 | long, min_nr, | |
1549 | long, nr, | |
1550 | struct io_event __user *, events, | |
1551 | struct timespec __user *, timeout) | |
1da177e4 LT |
1552 | { |
1553 | struct kioctx *ioctx = lookup_ioctx(ctx_id); | |
1554 | long ret = -EINVAL; | |
1555 | ||
1556 | if (likely(ioctx)) { | |
2e410255 | 1557 | if (likely(min_nr <= nr && min_nr >= 0)) |
1da177e4 LT |
1558 | ret = read_events(ioctx, min_nr, nr, events, timeout); |
1559 | put_ioctx(ioctx); | |
1560 | } | |
1da177e4 LT |
1561 | return ret; |
1562 | } |