Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * An async IO implementation for Linux | |
3 | * Written by Benjamin LaHaise <bcrl@kvack.org> | |
4 | * | |
5 | * Implements an efficient asynchronous io interface. | |
6 | * | |
7 | * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved. | |
8 | * | |
9 | * See ../COPYING for licensing terms. | |
10 | */ | |
caf4167a KO |
11 | #define pr_fmt(fmt) "%s: " fmt, __func__ |
12 | ||
1da177e4 LT |
13 | #include <linux/kernel.h> |
14 | #include <linux/init.h> | |
15 | #include <linux/errno.h> | |
16 | #include <linux/time.h> | |
17 | #include <linux/aio_abi.h> | |
630d9c47 | 18 | #include <linux/export.h> |
1da177e4 | 19 | #include <linux/syscalls.h> |
b9d128f1 | 20 | #include <linux/backing-dev.h> |
027445c3 | 21 | #include <linux/uio.h> |
1da177e4 | 22 | |
1da177e4 LT |
23 | #include <linux/sched.h> |
24 | #include <linux/fs.h> | |
25 | #include <linux/file.h> | |
26 | #include <linux/mm.h> | |
27 | #include <linux/mman.h> | |
3d2d827f | 28 | #include <linux/mmu_context.h> |
1da177e4 LT |
29 | #include <linux/slab.h> |
30 | #include <linux/timer.h> | |
31 | #include <linux/aio.h> | |
32 | #include <linux/highmem.h> | |
33 | #include <linux/workqueue.h> | |
34 | #include <linux/security.h> | |
9c3060be | 35 | #include <linux/eventfd.h> |
cfb1e33e | 36 | #include <linux/blkdev.h> |
9d85cba7 | 37 | #include <linux/compat.h> |
3c2a0909 | 38 | #include <linux/personality.h> |
1da177e4 LT |
39 | |
40 | #include <asm/kmap_types.h> | |
41 | #include <asm/uaccess.h> | |
1da177e4 | 42 | |
4e179bca KO |
43 | #define AIO_RING_MAGIC 0xa10a10a1 |
44 | #define AIO_RING_COMPAT_FEATURES 1 | |
45 | #define AIO_RING_INCOMPAT_FEATURES 0 | |
46 | struct aio_ring { | |
47 | unsigned id; /* kernel internal index number */ | |
48 | unsigned nr; /* number of io_events */ | |
49 | unsigned head; | |
50 | unsigned tail; | |
51 | ||
52 | unsigned magic; | |
53 | unsigned compat_features; | |
54 | unsigned incompat_features; | |
55 | unsigned header_length; /* size of aio_ring */ | |
56 | ||
57 | ||
58 | struct io_event io_events[0]; | |
59 | }; /* 128 bytes + ring size */ | |
60 | ||
61 | #define AIO_RING_PAGES 8 | |
4e179bca | 62 | |
4e179bca KO |
63 | struct kioctx { |
64 | atomic_t users; | |
36f55889 | 65 | atomic_t dead; |
4e179bca KO |
66 | |
67 | /* This needs improving */ | |
68 | unsigned long user_id; | |
69 | struct hlist_node list; | |
70 | ||
3e845ce0 KO |
71 | /* |
72 | * This is what userspace passed to io_setup(), it's not used for | |
73 | * anything but counting against the global max_reqs quota. | |
74 | * | |
58c85dc2 | 75 | * The real limit is nr_events - 1, which will be larger (see |
3e845ce0 KO |
76 | * aio_setup_ring()) |
77 | */ | |
4e179bca KO |
78 | unsigned max_reqs; |
79 | ||
58c85dc2 KO |
80 | /* Size of ringbuffer, in units of struct io_event */ |
81 | unsigned nr_events; | |
4e179bca | 82 | |
58c85dc2 KO |
83 | unsigned long mmap_base; |
84 | unsigned long mmap_size; | |
85 | ||
86 | struct page **ring_pages; | |
87 | long nr_pages; | |
88 | ||
4e23bcae KO |
89 | struct rcu_head rcu_head; |
90 | struct work_struct rcu_work; | |
91 | ||
92 | struct { | |
93 | atomic_t reqs_active; | |
94 | } ____cacheline_aligned_in_smp; | |
95 | ||
96 | struct { | |
97 | spinlock_t ctx_lock; | |
98 | struct list_head active_reqs; /* used for cancellation */ | |
99 | } ____cacheline_aligned_in_smp; | |
100 | ||
58c85dc2 KO |
101 | struct { |
102 | struct mutex ring_lock; | |
4e23bcae KO |
103 | wait_queue_head_t wait; |
104 | } ____cacheline_aligned_in_smp; | |
58c85dc2 KO |
105 | |
106 | struct { | |
107 | unsigned tail; | |
108 | spinlock_t completion_lock; | |
4e23bcae | 109 | } ____cacheline_aligned_in_smp; |
58c85dc2 KO |
110 | |
111 | struct page *internal_pages[AIO_RING_PAGES]; | |
4e179bca KO |
112 | }; |
113 | ||
1da177e4 | 114 | /*------ sysctl variables----*/ |
d55b5fda ZB |
115 | static DEFINE_SPINLOCK(aio_nr_lock); |
116 | unsigned long aio_nr; /* current system wide number of aio requests */ | |
117 | unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ | |
1da177e4 LT |
118 | /*----end sysctl variables---*/ |
119 | ||
e18b890b CL |
120 | static struct kmem_cache *kiocb_cachep; |
121 | static struct kmem_cache *kioctx_cachep; | |
1da177e4 | 122 | |
1da177e4 LT |
123 | /* aio_setup |
124 | * Creates the slab caches used by the aio routines, panic on | |
125 | * failure as this is done early during the boot sequence. | |
126 | */ | |
127 | static int __init aio_setup(void) | |
128 | { | |
0a31bd5f CL |
129 | kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); |
130 | kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); | |
1da177e4 | 131 | |
caf4167a | 132 | pr_debug("sizeof(struct page) = %zu\n", sizeof(struct page)); |
1da177e4 LT |
133 | |
134 | return 0; | |
135 | } | |
385773e0 | 136 | __initcall(aio_setup); |
1da177e4 LT |
137 | |
138 | static void aio_free_ring(struct kioctx *ctx) | |
139 | { | |
1da177e4 LT |
140 | long i; |
141 | ||
58c85dc2 KO |
142 | for (i = 0; i < ctx->nr_pages; i++) |
143 | put_page(ctx->ring_pages[i]); | |
1da177e4 | 144 | |
58c85dc2 KO |
145 | if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) |
146 | kfree(ctx->ring_pages); | |
1da177e4 LT |
147 | } |
148 | ||
149 | static int aio_setup_ring(struct kioctx *ctx) | |
150 | { | |
151 | struct aio_ring *ring; | |
1da177e4 | 152 | unsigned nr_events = ctx->max_reqs; |
41003a7b | 153 | struct mm_struct *mm = current->mm; |
41badc15 | 154 | unsigned long size, populate; |
1da177e4 LT |
155 | int nr_pages; |
156 | ||
3c2a0909 S |
157 | if (current->personality & READ_IMPLIES_EXEC) |
158 | return -EPERM; | |
159 | ||
1da177e4 LT |
160 | /* Compensate for the ring buffer's head/tail overlap entry */ |
161 | nr_events += 2; /* 1 is required, 2 for good luck */ | |
162 | ||
163 | size = sizeof(struct aio_ring); | |
164 | size += sizeof(struct io_event) * nr_events; | |
165 | nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT; | |
166 | ||
167 | if (nr_pages < 0) | |
168 | return -EINVAL; | |
169 | ||
170 | nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event); | |
171 | ||
58c85dc2 KO |
172 | ctx->nr_events = 0; |
173 | ctx->ring_pages = ctx->internal_pages; | |
1da177e4 | 174 | if (nr_pages > AIO_RING_PAGES) { |
58c85dc2 KO |
175 | ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), |
176 | GFP_KERNEL); | |
177 | if (!ctx->ring_pages) | |
1da177e4 | 178 | return -ENOMEM; |
1da177e4 LT |
179 | } |
180 | ||
58c85dc2 KO |
181 | ctx->mmap_size = nr_pages * PAGE_SIZE; |
182 | pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size); | |
41003a7b | 183 | down_write(&mm->mmap_sem); |
58c85dc2 KO |
184 | ctx->mmap_base = do_mmap_pgoff(NULL, 0, ctx->mmap_size, |
185 | PROT_READ|PROT_WRITE, | |
186 | MAP_ANONYMOUS|MAP_PRIVATE, 0, &populate); | |
187 | if (IS_ERR((void *)ctx->mmap_base)) { | |
41003a7b | 188 | up_write(&mm->mmap_sem); |
58c85dc2 | 189 | ctx->mmap_size = 0; |
1da177e4 LT |
190 | aio_free_ring(ctx); |
191 | return -EAGAIN; | |
192 | } | |
193 | ||
58c85dc2 KO |
194 | pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); |
195 | ctx->nr_pages = get_user_pages(current, mm, ctx->mmap_base, nr_pages, | |
196 | 1, 0, ctx->ring_pages, NULL); | |
41003a7b | 197 | up_write(&mm->mmap_sem); |
1da177e4 | 198 | |
58c85dc2 | 199 | if (unlikely(ctx->nr_pages != nr_pages)) { |
1da177e4 LT |
200 | aio_free_ring(ctx); |
201 | return -EAGAIN; | |
202 | } | |
bebeb3d6 | 203 | if (populate) |
58c85dc2 | 204 | mm_populate(ctx->mmap_base, populate); |
1da177e4 | 205 | |
58c85dc2 KO |
206 | ctx->user_id = ctx->mmap_base; |
207 | ctx->nr_events = nr_events; /* trusted copy */ | |
1da177e4 | 208 | |
58c85dc2 | 209 | ring = kmap_atomic(ctx->ring_pages[0]); |
1da177e4 LT |
210 | ring->nr = nr_events; /* user copy */ |
211 | ring->id = ctx->user_id; | |
212 | ring->head = ring->tail = 0; | |
213 | ring->magic = AIO_RING_MAGIC; | |
214 | ring->compat_features = AIO_RING_COMPAT_FEATURES; | |
215 | ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; | |
216 | ring->header_length = sizeof(struct aio_ring); | |
e8e3c3d6 | 217 | kunmap_atomic(ring); |
58c85dc2 | 218 | flush_dcache_page(ctx->ring_pages[0]); |
1da177e4 LT |
219 | |
220 | return 0; | |
221 | } | |
222 | ||
1da177e4 LT |
223 | #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event)) |
224 | #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) | |
225 | #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) | |
226 | ||
0460fef2 KO |
227 | void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel) |
228 | { | |
229 | struct kioctx *ctx = req->ki_ctx; | |
230 | unsigned long flags; | |
231 | ||
232 | spin_lock_irqsave(&ctx->ctx_lock, flags); | |
233 | ||
234 | if (!req->ki_list.next) | |
235 | list_add(&req->ki_list, &ctx->active_reqs); | |
236 | ||
237 | req->ki_cancel = cancel; | |
238 | ||
239 | spin_unlock_irqrestore(&ctx->ctx_lock, flags); | |
240 | } | |
241 | EXPORT_SYMBOL(kiocb_set_cancel_fn); | |
242 | ||
906b973c KO |
243 | static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb, |
244 | struct io_event *res) | |
245 | { | |
0460fef2 | 246 | kiocb_cancel_fn *old, *cancel; |
906b973c KO |
247 | int ret = -EINVAL; |
248 | ||
0460fef2 KO |
249 | /* |
250 | * Don't want to set kiocb->ki_cancel = KIOCB_CANCELLED unless it | |
251 | * actually has a cancel function, hence the cmpxchg() | |
252 | */ | |
253 | ||
254 | cancel = ACCESS_ONCE(kiocb->ki_cancel); | |
255 | do { | |
256 | if (!cancel || cancel == KIOCB_CANCELLED) | |
257 | return ret; | |
906b973c | 258 | |
0460fef2 KO |
259 | old = cancel; |
260 | cancel = cmpxchg(&kiocb->ki_cancel, old, KIOCB_CANCELLED); | |
261 | } while (cancel != old); | |
906b973c | 262 | |
0460fef2 KO |
263 | atomic_inc(&kiocb->ki_users); |
264 | spin_unlock_irq(&ctx->ctx_lock); | |
265 | ||
266 | memset(res, 0, sizeof(*res)); | |
267 | res->obj = (u64)(unsigned long)kiocb->ki_obj.user; | |
268 | res->data = kiocb->ki_user_data; | |
269 | ret = cancel(kiocb, res); | |
270 | ||
271 | spin_lock_irq(&ctx->ctx_lock); | |
906b973c KO |
272 | |
273 | return ret; | |
274 | } | |
275 | ||
36f55889 KO |
276 | static void free_ioctx_rcu(struct rcu_head *head) |
277 | { | |
278 | struct kioctx *ctx = container_of(head, struct kioctx, rcu_head); | |
279 | kmem_cache_free(kioctx_cachep, ctx); | |
280 | } | |
281 | ||
282 | /* | |
283 | * When this function runs, the kioctx has been removed from the "hash table" | |
284 | * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - | |
285 | * now it's safe to cancel any that need to be. | |
286 | */ | |
287 | static void free_ioctx(struct kioctx *ctx) | |
288 | { | |
3e845ce0 | 289 | struct aio_ring *ring; |
36f55889 KO |
290 | struct io_event res; |
291 | struct kiocb *req; | |
3e845ce0 | 292 | unsigned head, avail; |
36f55889 KO |
293 | |
294 | spin_lock_irq(&ctx->ctx_lock); | |
295 | ||
296 | while (!list_empty(&ctx->active_reqs)) { | |
297 | req = list_first_entry(&ctx->active_reqs, | |
298 | struct kiocb, ki_list); | |
299 | ||
300 | list_del_init(&req->ki_list); | |
301 | kiocb_cancel(ctx, req, &res); | |
302 | } | |
303 | ||
304 | spin_unlock_irq(&ctx->ctx_lock); | |
305 | ||
58c85dc2 | 306 | ring = kmap_atomic(ctx->ring_pages[0]); |
3e845ce0 KO |
307 | head = ring->head; |
308 | kunmap_atomic(ring); | |
309 | ||
310 | while (atomic_read(&ctx->reqs_active) > 0) { | |
03e04f04 BL |
311 | wait_event(ctx->wait, |
312 | head != ctx->tail || | |
313 | atomic_read(&ctx->reqs_active) <= 0); | |
3e845ce0 | 314 | |
58c85dc2 | 315 | avail = (head <= ctx->tail ? ctx->tail : ctx->nr_events) - head; |
3e845ce0 | 316 | |
3e845ce0 | 317 | head += avail; |
58c85dc2 | 318 | head %= ctx->nr_events; |
3e845ce0 KO |
319 | } |
320 | ||
321 | WARN_ON(atomic_read(&ctx->reqs_active) < 0); | |
36f55889 KO |
322 | |
323 | aio_free_ring(ctx); | |
324 | ||
36f55889 KO |
325 | pr_debug("freeing %p\n", ctx); |
326 | ||
327 | /* | |
328 | * Here the call_rcu() is between the wait_event() for reqs_active to | |
329 | * hit 0, and freeing the ioctx. | |
330 | * | |
331 | * aio_complete() decrements reqs_active, but it has to touch the ioctx | |
332 | * after to issue a wakeup so we use rcu. | |
333 | */ | |
334 | call_rcu(&ctx->rcu_head, free_ioctx_rcu); | |
335 | } | |
336 | ||
337 | static void put_ioctx(struct kioctx *ctx) | |
338 | { | |
339 | if (unlikely(atomic_dec_and_test(&ctx->users))) | |
340 | free_ioctx(ctx); | |
341 | } | |
342 | ||
1da177e4 LT |
343 | /* ioctx_alloc |
344 | * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. | |
345 | */ | |
346 | static struct kioctx *ioctx_alloc(unsigned nr_events) | |
347 | { | |
41003a7b | 348 | struct mm_struct *mm = current->mm; |
1da177e4 | 349 | struct kioctx *ctx; |
e23754f8 | 350 | int err = -ENOMEM; |
1da177e4 LT |
351 | |
352 | /* Prevent overflows */ | |
353 | if ((nr_events > (0x10000000U / sizeof(struct io_event))) || | |
354 | (nr_events > (0x10000000U / sizeof(struct kiocb)))) { | |
355 | pr_debug("ENOMEM: nr_events too high\n"); | |
356 | return ERR_PTR(-EINVAL); | |
357 | } | |
358 | ||
2dd542b7 | 359 | if (!nr_events || (unsigned long)nr_events > aio_max_nr) |
1da177e4 LT |
360 | return ERR_PTR(-EAGAIN); |
361 | ||
c3762229 | 362 | ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); |
1da177e4 LT |
363 | if (!ctx) |
364 | return ERR_PTR(-ENOMEM); | |
365 | ||
1da177e4 | 366 | ctx->max_reqs = nr_events; |
1da177e4 | 367 | |
86b62a2c | 368 | atomic_set(&ctx->users, 2); |
36f55889 | 369 | atomic_set(&ctx->dead, 0); |
1da177e4 | 370 | spin_lock_init(&ctx->ctx_lock); |
0460fef2 | 371 | spin_lock_init(&ctx->completion_lock); |
58c85dc2 | 372 | mutex_init(&ctx->ring_lock); |
1da177e4 LT |
373 | init_waitqueue_head(&ctx->wait); |
374 | ||
375 | INIT_LIST_HEAD(&ctx->active_reqs); | |
1da177e4 LT |
376 | |
377 | if (aio_setup_ring(ctx) < 0) | |
378 | goto out_freectx; | |
379 | ||
380 | /* limit the number of system wide aios */ | |
9fa1cb39 | 381 | spin_lock(&aio_nr_lock); |
2dd542b7 AV |
382 | if (aio_nr + nr_events > aio_max_nr || |
383 | aio_nr + nr_events < aio_nr) { | |
9fa1cb39 | 384 | spin_unlock(&aio_nr_lock); |
1da177e4 | 385 | goto out_cleanup; |
2dd542b7 AV |
386 | } |
387 | aio_nr += ctx->max_reqs; | |
9fa1cb39 | 388 | spin_unlock(&aio_nr_lock); |
1da177e4 | 389 | |
39fa0031 | 390 | /* now link into global list. */ |
abf137dd JA |
391 | spin_lock(&mm->ioctx_lock); |
392 | hlist_add_head_rcu(&ctx->list, &mm->ioctx_list); | |
393 | spin_unlock(&mm->ioctx_lock); | |
1da177e4 | 394 | |
caf4167a | 395 | pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", |
58c85dc2 | 396 | ctx, ctx->user_id, mm, ctx->nr_events); |
1da177e4 LT |
397 | return ctx; |
398 | ||
399 | out_cleanup: | |
e23754f8 AV |
400 | err = -EAGAIN; |
401 | aio_free_ring(ctx); | |
1da177e4 | 402 | out_freectx: |
1da177e4 | 403 | kmem_cache_free(kioctx_cachep, ctx); |
caf4167a | 404 | pr_debug("error allocating ioctx %d\n", err); |
e23754f8 | 405 | return ERR_PTR(err); |
1da177e4 LT |
406 | } |
407 | ||
36f55889 | 408 | static void kill_ioctx_work(struct work_struct *work) |
1da177e4 | 409 | { |
36f55889 | 410 | struct kioctx *ctx = container_of(work, struct kioctx, rcu_work); |
06af121e | 411 | |
36f55889 KO |
412 | wake_up_all(&ctx->wait); |
413 | put_ioctx(ctx); | |
414 | } | |
906b973c | 415 | |
36f55889 KO |
416 | static void kill_ioctx_rcu(struct rcu_head *head) |
417 | { | |
418 | struct kioctx *ctx = container_of(head, struct kioctx, rcu_head); | |
1da177e4 | 419 | |
36f55889 KO |
420 | INIT_WORK(&ctx->rcu_work, kill_ioctx_work); |
421 | schedule_work(&ctx->rcu_work); | |
422 | } | |
1da177e4 | 423 | |
36f55889 KO |
424 | /* kill_ioctx |
425 | * Cancels all outstanding aio requests on an aio context. Used | |
426 | * when the processes owning a context have all exited to encourage | |
427 | * the rapid destruction of the kioctx. | |
428 | */ | |
f8715e7d | 429 | static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx) |
36f55889 KO |
430 | { |
431 | if (!atomic_xchg(&ctx->dead, 1)) { | |
f8715e7d | 432 | spin_lock(&mm->ioctx_lock); |
36f55889 | 433 | hlist_del_rcu(&ctx->list); |
f8715e7d | 434 | spin_unlock(&mm->ioctx_lock); |
dee11c23 | 435 | |
36f55889 | 436 | /* |
4fcc712f KO |
437 | * It'd be more correct to do this in free_ioctx(), after all |
438 | * the outstanding kiocbs have finished - but by then io_destroy | |
439 | * has already returned, so io_setup() could potentially return | |
440 | * -EAGAIN with no ioctxs actually in use (as far as userspace | |
441 | * could tell). | |
36f55889 | 442 | */ |
4fcc712f KO |
443 | spin_lock(&aio_nr_lock); |
444 | BUG_ON(aio_nr - ctx->max_reqs > aio_nr); | |
445 | aio_nr -= ctx->max_reqs; | |
446 | spin_unlock(&aio_nr_lock); | |
447 | ||
448 | if (ctx->mmap_size) | |
449 | vm_munmap(ctx->mmap_base, ctx->mmap_size); | |
450 | ||
451 | /* Between hlist_del_rcu() and dropping the initial ref */ | |
452 | call_rcu(&ctx->rcu_head, kill_ioctx_rcu); | |
36f55889 | 453 | } |
1da177e4 LT |
454 | } |
455 | ||
456 | /* wait_on_sync_kiocb: | |
457 | * Waits on the given sync kiocb to complete. | |
458 | */ | |
fc9b52cd | 459 | ssize_t wait_on_sync_kiocb(struct kiocb *iocb) |
1da177e4 | 460 | { |
11599eba | 461 | while (atomic_read(&iocb->ki_users)) { |
1da177e4 | 462 | set_current_state(TASK_UNINTERRUPTIBLE); |
11599eba | 463 | if (!atomic_read(&iocb->ki_users)) |
1da177e4 | 464 | break; |
41d10da3 | 465 | io_schedule(); |
1da177e4 LT |
466 | } |
467 | __set_current_state(TASK_RUNNING); | |
468 | return iocb->ki_user_data; | |
469 | } | |
385773e0 | 470 | EXPORT_SYMBOL(wait_on_sync_kiocb); |
1da177e4 | 471 | |
36f55889 KO |
472 | /* |
473 | * exit_aio: called when the last user of mm goes away. At this point, there is | |
474 | * no way for any new requests to be submited or any of the io_* syscalls to be | |
475 | * called on the context. | |
476 | * | |
477 | * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on | |
478 | * them. | |
1da177e4 | 479 | */ |
fc9b52cd | 480 | void exit_aio(struct mm_struct *mm) |
1da177e4 | 481 | { |
abf137dd | 482 | struct kioctx *ctx; |
36f55889 | 483 | struct hlist_node *n; |
abf137dd | 484 | |
36f55889 | 485 | hlist_for_each_entry_safe(ctx, n, &mm->ioctx_list, list) { |
1da177e4 LT |
486 | if (1 != atomic_read(&ctx->users)) |
487 | printk(KERN_DEBUG | |
488 | "exit_aio:ioctx still alive: %d %d %d\n", | |
36f55889 KO |
489 | atomic_read(&ctx->users), |
490 | atomic_read(&ctx->dead), | |
11599eba | 491 | atomic_read(&ctx->reqs_active)); |
936af157 AV |
492 | /* |
493 | * We don't need to bother with munmap() here - | |
494 | * exit_mmap(mm) is coming and it'll unmap everything. | |
495 | * Since aio_free_ring() uses non-zero ->mmap_size | |
496 | * as indicator that it needs to unmap the area, | |
497 | * just set it to 0; aio_free_ring() is the only | |
498 | * place that uses ->mmap_size, so it's safe. | |
936af157 | 499 | */ |
58c85dc2 | 500 | ctx->mmap_size = 0; |
36f55889 | 501 | |
f8715e7d | 502 | kill_ioctx(mm, ctx); |
1da177e4 LT |
503 | } |
504 | } | |
505 | ||
1da177e4 | 506 | /* aio_get_req |
11599eba | 507 | * Allocate a slot for an aio request. Increments the ki_users count |
1da177e4 LT |
508 | * of the kioctx so that the kioctx stays around until all requests are |
509 | * complete. Returns NULL if no requests are free. | |
510 | * | |
11599eba | 511 | * Returns with kiocb->ki_users set to 2. The io submit code path holds |
1da177e4 LT |
512 | * an extra reference while submitting the i/o. |
513 | * This prevents races between the aio code path referencing the | |
514 | * req (after submitting it) and aio_complete() freeing the req. | |
515 | */ | |
a1c8eae7 | 516 | static inline struct kiocb *aio_get_req(struct kioctx *ctx) |
1da177e4 | 517 | { |
a1c8eae7 KO |
518 | struct kiocb *req; |
519 | ||
58c85dc2 | 520 | if (atomic_read(&ctx->reqs_active) >= ctx->nr_events) |
a1c8eae7 KO |
521 | return NULL; |
522 | ||
58c85dc2 | 523 | if (atomic_inc_return(&ctx->reqs_active) > ctx->nr_events - 1) |
a1c8eae7 | 524 | goto out_put; |
1da177e4 | 525 | |
0460fef2 | 526 | req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO); |
1da177e4 | 527 | if (unlikely(!req)) |
a1c8eae7 | 528 | goto out_put; |
1da177e4 | 529 | |
11599eba | 530 | atomic_set(&req->ki_users, 2); |
1da177e4 | 531 | req->ki_ctx = ctx; |
1da177e4 | 532 | |
080d676d | 533 | return req; |
a1c8eae7 KO |
534 | out_put: |
535 | atomic_dec(&ctx->reqs_active); | |
536 | return NULL; | |
1da177e4 LT |
537 | } |
538 | ||
11599eba | 539 | static void kiocb_free(struct kiocb *req) |
1da177e4 | 540 | { |
1d98ebfc KO |
541 | if (req->ki_filp) |
542 | fput(req->ki_filp); | |
13389010 DL |
543 | if (req->ki_eventfd != NULL) |
544 | eventfd_ctx_put(req->ki_eventfd); | |
1da177e4 LT |
545 | if (req->ki_dtor) |
546 | req->ki_dtor(req); | |
eed4e51f BP |
547 | if (req->ki_iovec != &req->ki_inline_vec) |
548 | kfree(req->ki_iovec); | |
1da177e4 | 549 | kmem_cache_free(kiocb_cachep, req); |
1da177e4 LT |
550 | } |
551 | ||
2d68449e | 552 | void aio_put_req(struct kiocb *req) |
1da177e4 | 553 | { |
11599eba KO |
554 | if (atomic_dec_and_test(&req->ki_users)) |
555 | kiocb_free(req); | |
1da177e4 | 556 | } |
385773e0 | 557 | EXPORT_SYMBOL(aio_put_req); |
1da177e4 | 558 | |
d5470b59 | 559 | static struct kioctx *lookup_ioctx(unsigned long ctx_id) |
1da177e4 | 560 | { |
abf137dd | 561 | struct mm_struct *mm = current->mm; |
65c24491 | 562 | struct kioctx *ctx, *ret = NULL; |
1da177e4 | 563 | |
abf137dd JA |
564 | rcu_read_lock(); |
565 | ||
b67bfe0d | 566 | hlist_for_each_entry_rcu(ctx, &mm->ioctx_list, list) { |
36f55889 KO |
567 | if (ctx->user_id == ctx_id) { |
568 | atomic_inc(&ctx->users); | |
65c24491 | 569 | ret = ctx; |
1da177e4 LT |
570 | break; |
571 | } | |
abf137dd | 572 | } |
1da177e4 | 573 | |
abf137dd | 574 | rcu_read_unlock(); |
65c24491 | 575 | return ret; |
1da177e4 LT |
576 | } |
577 | ||
1da177e4 LT |
578 | /* aio_complete |
579 | * Called when the io request on the given iocb is complete. | |
1da177e4 | 580 | */ |
2d68449e | 581 | void aio_complete(struct kiocb *iocb, long res, long res2) |
1da177e4 LT |
582 | { |
583 | struct kioctx *ctx = iocb->ki_ctx; | |
1da177e4 | 584 | struct aio_ring *ring; |
21b40200 | 585 | struct io_event *ev_page, *event; |
1da177e4 | 586 | unsigned long flags; |
21b40200 | 587 | unsigned tail, pos; |
1da177e4 | 588 | |
20dcae32 ZB |
589 | /* |
590 | * Special case handling for sync iocbs: | |
591 | * - events go directly into the iocb for fast handling | |
592 | * - the sync task with the iocb in its stack holds the single iocb | |
593 | * ref, no other paths have a way to get another ref | |
594 | * - the sync task helpfully left a reference to itself in the iocb | |
1da177e4 LT |
595 | */ |
596 | if (is_sync_kiocb(iocb)) { | |
11599eba | 597 | BUG_ON(atomic_read(&iocb->ki_users) != 1); |
1da177e4 | 598 | iocb->ki_user_data = res; |
11599eba | 599 | atomic_set(&iocb->ki_users, 0); |
1da177e4 | 600 | wake_up_process(iocb->ki_obj.tsk); |
2d68449e | 601 | return; |
1da177e4 LT |
602 | } |
603 | ||
36f55889 | 604 | /* |
36f55889 KO |
605 | * Take rcu_read_lock() in case the kioctx is being destroyed, as we |
606 | * need to issue a wakeup after decrementing reqs_active. | |
1da177e4 | 607 | */ |
36f55889 | 608 | rcu_read_lock(); |
1da177e4 | 609 | |
0460fef2 KO |
610 | if (iocb->ki_list.next) { |
611 | unsigned long flags; | |
612 | ||
613 | spin_lock_irqsave(&ctx->ctx_lock, flags); | |
614 | list_del(&iocb->ki_list); | |
615 | spin_unlock_irqrestore(&ctx->ctx_lock, flags); | |
616 | } | |
11599eba | 617 | |
1da177e4 LT |
618 | /* |
619 | * cancelled requests don't get events, userland was given one | |
620 | * when the event got cancelled. | |
621 | */ | |
0460fef2 | 622 | if (unlikely(xchg(&iocb->ki_cancel, |
3e845ce0 KO |
623 | KIOCB_CANCELLED) == KIOCB_CANCELLED)) { |
624 | atomic_dec(&ctx->reqs_active); | |
625 | /* Still need the wake_up in case free_ioctx is waiting */ | |
1da177e4 | 626 | goto put_rq; |
3e845ce0 | 627 | } |
1da177e4 | 628 | |
0460fef2 KO |
629 | /* |
630 | * Add a completion event to the ring buffer. Must be done holding | |
631 | * ctx->ctx_lock to prevent other code from messing with the tail | |
632 | * pointer since we might be called from irq context. | |
633 | */ | |
634 | spin_lock_irqsave(&ctx->completion_lock, flags); | |
635 | ||
58c85dc2 | 636 | tail = ctx->tail; |
21b40200 KO |
637 | pos = tail + AIO_EVENTS_OFFSET; |
638 | ||
58c85dc2 | 639 | if (++tail >= ctx->nr_events) |
4bf69b2a | 640 | tail = 0; |
1da177e4 | 641 | |
58c85dc2 | 642 | ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); |
21b40200 KO |
643 | event = ev_page + pos % AIO_EVENTS_PER_PAGE; |
644 | ||
1da177e4 LT |
645 | event->obj = (u64)(unsigned long)iocb->ki_obj.user; |
646 | event->data = iocb->ki_user_data; | |
647 | event->res = res; | |
648 | event->res2 = res2; | |
649 | ||
21b40200 | 650 | kunmap_atomic(ev_page); |
58c85dc2 | 651 | flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); |
21b40200 KO |
652 | |
653 | pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n", | |
caf4167a KO |
654 | ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data, |
655 | res, res2); | |
1da177e4 LT |
656 | |
657 | /* after flagging the request as done, we | |
658 | * must never even look at it again | |
659 | */ | |
660 | smp_wmb(); /* make event visible before updating tail */ | |
661 | ||
58c85dc2 | 662 | ctx->tail = tail; |
1da177e4 | 663 | |
58c85dc2 | 664 | ring = kmap_atomic(ctx->ring_pages[0]); |
21b40200 | 665 | ring->tail = tail; |
e8e3c3d6 | 666 | kunmap_atomic(ring); |
58c85dc2 | 667 | flush_dcache_page(ctx->ring_pages[0]); |
1da177e4 | 668 | |
0460fef2 KO |
669 | spin_unlock_irqrestore(&ctx->completion_lock, flags); |
670 | ||
21b40200 | 671 | pr_debug("added to ring %p at [%u]\n", iocb, tail); |
8d1c98b0 DL |
672 | |
673 | /* | |
674 | * Check if the user asked us to deliver the result through an | |
675 | * eventfd. The eventfd_signal() function is safe to be called | |
676 | * from IRQ context. | |
677 | */ | |
87c3a86e | 678 | if (iocb->ki_eventfd != NULL) |
8d1c98b0 DL |
679 | eventfd_signal(iocb->ki_eventfd, 1); |
680 | ||
1da177e4 LT |
681 | put_rq: |
682 | /* everything turned out well, dispose of the aiocb. */ | |
11599eba | 683 | aio_put_req(iocb); |
6745cb91 | 684 | atomic_dec(&ctx->reqs_active); |
1da177e4 | 685 | |
6cb2a210 QB |
686 | /* |
687 | * We have to order our ring_info tail store above and test | |
688 | * of the wait list below outside the wait lock. This is | |
689 | * like in wake_up_bit() where clearing a bit has to be | |
690 | * ordered with the unlocked test. | |
691 | */ | |
692 | smp_mb(); | |
693 | ||
1da177e4 LT |
694 | if (waitqueue_active(&ctx->wait)) |
695 | wake_up(&ctx->wait); | |
696 | ||
36f55889 | 697 | rcu_read_unlock(); |
1da177e4 | 698 | } |
385773e0 | 699 | EXPORT_SYMBOL(aio_complete); |
1da177e4 | 700 | |
a31ad380 KO |
701 | /* aio_read_events |
702 | * Pull an event off of the ioctx's event ring. Returns the number of | |
703 | * events fetched | |
1da177e4 | 704 | */ |
a31ad380 KO |
705 | static long aio_read_events_ring(struct kioctx *ctx, |
706 | struct io_event __user *event, long nr) | |
1da177e4 | 707 | { |
1da177e4 | 708 | struct aio_ring *ring; |
a31ad380 KO |
709 | unsigned head, pos; |
710 | long ret = 0; | |
711 | int copy_ret; | |
712 | ||
58c85dc2 | 713 | mutex_lock(&ctx->ring_lock); |
1da177e4 | 714 | |
58c85dc2 | 715 | ring = kmap_atomic(ctx->ring_pages[0]); |
a31ad380 KO |
716 | head = ring->head; |
717 | kunmap_atomic(ring); | |
718 | ||
58c85dc2 | 719 | pr_debug("h%u t%u m%u\n", head, ctx->tail, ctx->nr_events); |
1da177e4 | 720 | |
58c85dc2 | 721 | if (head == ctx->tail) |
1da177e4 LT |
722 | goto out; |
723 | ||
d36db46c BL |
724 | head %= ctx->nr_events; |
725 | ||
a31ad380 KO |
726 | while (ret < nr) { |
727 | long avail; | |
728 | struct io_event *ev; | |
729 | struct page *page; | |
730 | ||
58c85dc2 KO |
731 | avail = (head <= ctx->tail ? ctx->tail : ctx->nr_events) - head; |
732 | if (head == ctx->tail) | |
a31ad380 KO |
733 | break; |
734 | ||
735 | avail = min(avail, nr - ret); | |
736 | avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - | |
737 | ((head + AIO_EVENTS_OFFSET) % AIO_EVENTS_PER_PAGE)); | |
738 | ||
739 | pos = head + AIO_EVENTS_OFFSET; | |
58c85dc2 | 740 | page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]; |
a31ad380 KO |
741 | pos %= AIO_EVENTS_PER_PAGE; |
742 | ||
743 | ev = kmap(page); | |
744 | copy_ret = copy_to_user(event + ret, ev + pos, | |
745 | sizeof(*ev) * avail); | |
746 | kunmap(page); | |
747 | ||
748 | if (unlikely(copy_ret)) { | |
749 | ret = -EFAULT; | |
750 | goto out; | |
751 | } | |
752 | ||
753 | ret += avail; | |
754 | head += avail; | |
58c85dc2 | 755 | head %= ctx->nr_events; |
1da177e4 | 756 | } |
1da177e4 | 757 | |
58c85dc2 | 758 | ring = kmap_atomic(ctx->ring_pages[0]); |
a31ad380 | 759 | ring->head = head; |
91d80a84 | 760 | kunmap_atomic(ring); |
58c85dc2 | 761 | flush_dcache_page(ctx->ring_pages[0]); |
a31ad380 | 762 | |
58c85dc2 | 763 | pr_debug("%li h%u t%u\n", ret, head, ctx->tail); |
a31ad380 | 764 | out: |
58c85dc2 | 765 | mutex_unlock(&ctx->ring_lock); |
a31ad380 | 766 | |
1da177e4 LT |
767 | return ret; |
768 | } | |
769 | ||
a31ad380 KO |
770 | static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr, |
771 | struct io_event __user *event, long *i) | |
1da177e4 | 772 | { |
a31ad380 | 773 | long ret = aio_read_events_ring(ctx, event + *i, nr - *i); |
1da177e4 | 774 | |
a31ad380 KO |
775 | if (ret > 0) |
776 | *i += ret; | |
1da177e4 | 777 | |
a31ad380 KO |
778 | if (unlikely(atomic_read(&ctx->dead))) |
779 | ret = -EINVAL; | |
1da177e4 | 780 | |
a31ad380 KO |
781 | if (!*i) |
782 | *i = ret; | |
1da177e4 | 783 | |
a31ad380 | 784 | return ret < 0 || *i >= min_nr; |
1da177e4 LT |
785 | } |
786 | ||
a31ad380 | 787 | static long read_events(struct kioctx *ctx, long min_nr, long nr, |
1da177e4 LT |
788 | struct io_event __user *event, |
789 | struct timespec __user *timeout) | |
790 | { | |
a31ad380 KO |
791 | ktime_t until = { .tv64 = KTIME_MAX }; |
792 | long ret = 0; | |
1da177e4 | 793 | |
1da177e4 LT |
794 | if (timeout) { |
795 | struct timespec ts; | |
a31ad380 | 796 | |
1da177e4 | 797 | if (unlikely(copy_from_user(&ts, timeout, sizeof(ts)))) |
a31ad380 | 798 | return -EFAULT; |
1da177e4 | 799 | |
a31ad380 | 800 | until = timespec_to_ktime(ts); |
1da177e4 LT |
801 | } |
802 | ||
a31ad380 KO |
803 | /* |
804 | * Note that aio_read_events() is being called as the conditional - i.e. | |
805 | * we're calling it after prepare_to_wait() has set task state to | |
806 | * TASK_INTERRUPTIBLE. | |
807 | * | |
808 | * But aio_read_events() can block, and if it blocks it's going to flip | |
809 | * the task state back to TASK_RUNNING. | |
810 | * | |
811 | * This should be ok, provided it doesn't flip the state back to | |
812 | * TASK_RUNNING and return 0 too much - that causes us to spin. That | |
813 | * will only happen if the mutex_lock() call blocks, and we then find | |
814 | * the ringbuffer empty. So in practice we should be ok, but it's | |
815 | * something to be aware of when touching this code. | |
816 | */ | |
817 | wait_event_interruptible_hrtimeout(ctx->wait, | |
818 | aio_read_events(ctx, min_nr, nr, event, &ret), until); | |
1da177e4 | 819 | |
a31ad380 KO |
820 | if (!ret && signal_pending(current)) |
821 | ret = -EINTR; | |
1da177e4 | 822 | |
a31ad380 | 823 | return ret; |
1da177e4 LT |
824 | } |
825 | ||
1da177e4 LT |
826 | /* sys_io_setup: |
827 | * Create an aio_context capable of receiving at least nr_events. | |
828 | * ctxp must not point to an aio_context that already exists, and | |
829 | * must be initialized to 0 prior to the call. On successful | |
830 | * creation of the aio_context, *ctxp is filled in with the resulting | |
831 | * handle. May fail with -EINVAL if *ctxp is not initialized, | |
832 | * if the specified nr_events exceeds internal limits. May fail | |
833 | * with -EAGAIN if the specified nr_events exceeds the user's limit | |
834 | * of available events. May fail with -ENOMEM if insufficient kernel | |
835 | * resources are available. May fail with -EFAULT if an invalid | |
836 | * pointer is passed for ctxp. Will fail with -ENOSYS if not | |
837 | * implemented. | |
838 | */ | |
002c8976 | 839 | SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) |
1da177e4 LT |
840 | { |
841 | struct kioctx *ioctx = NULL; | |
842 | unsigned long ctx; | |
843 | long ret; | |
844 | ||
845 | ret = get_user(ctx, ctxp); | |
846 | if (unlikely(ret)) | |
847 | goto out; | |
848 | ||
849 | ret = -EINVAL; | |
d55b5fda ZB |
850 | if (unlikely(ctx || nr_events == 0)) { |
851 | pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n", | |
852 | ctx, nr_events); | |
1da177e4 LT |
853 | goto out; |
854 | } | |
855 | ||
856 | ioctx = ioctx_alloc(nr_events); | |
857 | ret = PTR_ERR(ioctx); | |
858 | if (!IS_ERR(ioctx)) { | |
859 | ret = put_user(ioctx->user_id, ctxp); | |
a2e1859a | 860 | if (ret) |
f8715e7d | 861 | kill_ioctx(current->mm, ioctx); |
a2e1859a | 862 | put_ioctx(ioctx); |
1da177e4 LT |
863 | } |
864 | ||
865 | out: | |
866 | return ret; | |
867 | } | |
868 | ||
869 | /* sys_io_destroy: | |
870 | * Destroy the aio_context specified. May cancel any outstanding | |
871 | * AIOs and block on completion. Will fail with -ENOSYS if not | |
642b5123 | 872 | * implemented. May fail with -EINVAL if the context pointed to |
1da177e4 LT |
873 | * is invalid. |
874 | */ | |
002c8976 | 875 | SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) |
1da177e4 LT |
876 | { |
877 | struct kioctx *ioctx = lookup_ioctx(ctx); | |
878 | if (likely(NULL != ioctx)) { | |
f8715e7d | 879 | kill_ioctx(current->mm, ioctx); |
a2e1859a | 880 | put_ioctx(ioctx); |
1da177e4 LT |
881 | return 0; |
882 | } | |
883 | pr_debug("EINVAL: io_destroy: invalid context id\n"); | |
884 | return -EINVAL; | |
885 | } | |
886 | ||
eed4e51f | 887 | static void aio_advance_iovec(struct kiocb *iocb, ssize_t ret) |
1da177e4 | 888 | { |
eed4e51f BP |
889 | struct iovec *iov = &iocb->ki_iovec[iocb->ki_cur_seg]; |
890 | ||
891 | BUG_ON(ret <= 0); | |
892 | ||
893 | while (iocb->ki_cur_seg < iocb->ki_nr_segs && ret > 0) { | |
894 | ssize_t this = min((ssize_t)iov->iov_len, ret); | |
895 | iov->iov_base += this; | |
896 | iov->iov_len -= this; | |
897 | iocb->ki_left -= this; | |
898 | ret -= this; | |
899 | if (iov->iov_len == 0) { | |
900 | iocb->ki_cur_seg++; | |
901 | iov++; | |
897f15fb | 902 | } |
eed4e51f | 903 | } |
1da177e4 | 904 | |
eed4e51f BP |
905 | /* the caller should not have done more io than what fit in |
906 | * the remaining iovecs */ | |
907 | BUG_ON(ret > 0 && iocb->ki_left == 0); | |
1da177e4 LT |
908 | } |
909 | ||
41ef4eb8 KO |
910 | typedef ssize_t (aio_rw_op)(struct kiocb *, const struct iovec *, |
911 | unsigned long, loff_t); | |
912 | ||
913 | static ssize_t aio_rw_vect_retry(struct kiocb *iocb, int rw, aio_rw_op *rw_op) | |
1da177e4 LT |
914 | { |
915 | struct file *file = iocb->ki_filp; | |
eed4e51f BP |
916 | struct address_space *mapping = file->f_mapping; |
917 | struct inode *inode = mapping->host; | |
1da177e4 LT |
918 | ssize_t ret = 0; |
919 | ||
c2ec6682 RR |
920 | /* This matches the pread()/pwrite() logic */ |
921 | if (iocb->ki_pos < 0) | |
922 | return -EINVAL; | |
923 | ||
41ef4eb8 | 924 | if (rw == WRITE) |
8d71db4f | 925 | file_start_write(file); |
897f15fb | 926 | do { |
eed4e51f BP |
927 | ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg], |
928 | iocb->ki_nr_segs - iocb->ki_cur_seg, | |
929 | iocb->ki_pos); | |
930 | if (ret > 0) | |
931 | aio_advance_iovec(iocb, ret); | |
932 | ||
933 | /* retry all partial writes. retry partial reads as long as its a | |
934 | * regular file. */ | |
935 | } while (ret > 0 && iocb->ki_left > 0 && | |
41ef4eb8 | 936 | (rw == WRITE || |
eed4e51f | 937 | (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode)))); |
41ef4eb8 | 938 | if (rw == WRITE) |
8d71db4f | 939 | file_end_write(file); |
1da177e4 | 940 | |
eed4e51f BP |
941 | /* This means we must have transferred all that we could */ |
942 | /* No need to retry anymore */ | |
1da177e4 LT |
943 | if ((ret == 0) || (iocb->ki_left == 0)) |
944 | ret = iocb->ki_nbytes - iocb->ki_left; | |
945 | ||
7adfa2ff RR |
946 | /* If we managed to write some out we return that, rather than |
947 | * the eventual error. */ | |
41ef4eb8 | 948 | if (rw == WRITE |
41003a7b | 949 | && ret < 0 && ret != -EIOCBQUEUED |
7adfa2ff RR |
950 | && iocb->ki_nbytes - iocb->ki_left) |
951 | ret = iocb->ki_nbytes - iocb->ki_left; | |
952 | ||
1da177e4 LT |
953 | return ret; |
954 | } | |
955 | ||
41ef4eb8 | 956 | static ssize_t aio_setup_vectored_rw(int rw, struct kiocb *kiocb, bool compat) |
eed4e51f BP |
957 | { |
958 | ssize_t ret; | |
959 | ||
41ef4eb8 KO |
960 | kiocb->ki_nr_segs = kiocb->ki_nbytes; |
961 | ||
9d85cba7 JM |
962 | #ifdef CONFIG_COMPAT |
963 | if (compat) | |
41ef4eb8 | 964 | ret = compat_rw_copy_check_uvector(rw, |
9d85cba7 | 965 | (struct compat_iovec __user *)kiocb->ki_buf, |
41ef4eb8 | 966 | kiocb->ki_nr_segs, 1, &kiocb->ki_inline_vec, |
ac34ebb3 | 967 | &kiocb->ki_iovec); |
9d85cba7 JM |
968 | else |
969 | #endif | |
41ef4eb8 | 970 | ret = rw_copy_check_uvector(rw, |
9d85cba7 | 971 | (struct iovec __user *)kiocb->ki_buf, |
41ef4eb8 | 972 | kiocb->ki_nr_segs, 1, &kiocb->ki_inline_vec, |
ac34ebb3 | 973 | &kiocb->ki_iovec); |
eed4e51f | 974 | if (ret < 0) |
41ef4eb8 | 975 | return ret; |
a70b52ec | 976 | |
41ef4eb8 | 977 | /* ki_nbytes now reflect bytes instead of segs */ |
eed4e51f | 978 | kiocb->ki_nbytes = ret; |
41ef4eb8 | 979 | return 0; |
eed4e51f BP |
980 | } |
981 | ||
41ef4eb8 | 982 | static ssize_t aio_setup_single_vector(int rw, struct kiocb *kiocb) |
eed4e51f | 983 | { |
3c2a0909 S |
984 | size_t len = kiocb->ki_nbytes; |
985 | ||
986 | if (len > MAX_RW_COUNT) | |
987 | len = MAX_RW_COUNT; | |
988 | ||
989 | if (unlikely(!access_ok(!rw, kiocb->ki_buf, len))) | |
990 | return -EFAULT; | |
a70b52ec | 991 | |
eed4e51f BP |
992 | kiocb->ki_iovec = &kiocb->ki_inline_vec; |
993 | kiocb->ki_iovec->iov_base = kiocb->ki_buf; | |
3c2a0909 | 994 | kiocb->ki_iovec->iov_len = len; |
eed4e51f | 995 | kiocb->ki_nr_segs = 1; |
eed4e51f BP |
996 | return 0; |
997 | } | |
998 | ||
1da177e4 LT |
999 | /* |
1000 | * aio_setup_iocb: | |
1001 | * Performs the initial checks and aio retry method | |
1002 | * setup for the kiocb at the time of io submission. | |
1003 | */ | |
41ef4eb8 | 1004 | static ssize_t aio_run_iocb(struct kiocb *req, bool compat) |
1da177e4 | 1005 | { |
41ef4eb8 KO |
1006 | struct file *file = req->ki_filp; |
1007 | ssize_t ret; | |
1008 | int rw; | |
1009 | fmode_t mode; | |
1010 | aio_rw_op *rw_op; | |
1da177e4 | 1011 | |
41ef4eb8 | 1012 | switch (req->ki_opcode) { |
1da177e4 | 1013 | case IOCB_CMD_PREAD: |
eed4e51f | 1014 | case IOCB_CMD_PREADV: |
41ef4eb8 KO |
1015 | mode = FMODE_READ; |
1016 | rw = READ; | |
1017 | rw_op = file->f_op->aio_read; | |
1018 | goto rw_common; | |
1019 | ||
1020 | case IOCB_CMD_PWRITE: | |
eed4e51f | 1021 | case IOCB_CMD_PWRITEV: |
41ef4eb8 KO |
1022 | mode = FMODE_WRITE; |
1023 | rw = WRITE; | |
1024 | rw_op = file->f_op->aio_write; | |
1025 | goto rw_common; | |
1026 | rw_common: | |
1027 | if (unlikely(!(file->f_mode & mode))) | |
1028 | return -EBADF; | |
1029 | ||
1030 | if (!rw_op) | |
1031 | return -EINVAL; | |
1032 | ||
1033 | ret = (req->ki_opcode == IOCB_CMD_PREADV || | |
1034 | req->ki_opcode == IOCB_CMD_PWRITEV) | |
1035 | ? aio_setup_vectored_rw(rw, req, compat) | |
1036 | : aio_setup_single_vector(rw, req); | |
eed4e51f | 1037 | if (ret) |
41ef4eb8 KO |
1038 | return ret; |
1039 | ||
1040 | ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes); | |
1041 | if (ret < 0) | |
1042 | return ret; | |
1043 | ||
1044 | req->ki_nbytes = ret; | |
1045 | req->ki_left = ret; | |
1046 | ||
1047 | ret = aio_rw_vect_retry(req, rw, rw_op); | |
1da177e4 | 1048 | break; |
41ef4eb8 | 1049 | |
1da177e4 | 1050 | case IOCB_CMD_FDSYNC: |
41ef4eb8 KO |
1051 | if (!file->f_op->aio_fsync) |
1052 | return -EINVAL; | |
1053 | ||
1054 | ret = file->f_op->aio_fsync(req, 1); | |
1da177e4 | 1055 | break; |
41ef4eb8 | 1056 | |
1da177e4 | 1057 | case IOCB_CMD_FSYNC: |
41ef4eb8 KO |
1058 | if (!file->f_op->aio_fsync) |
1059 | return -EINVAL; | |
1060 | ||
1061 | ret = file->f_op->aio_fsync(req, 0); | |
1da177e4 | 1062 | break; |
41ef4eb8 | 1063 | |
1da177e4 | 1064 | default: |
caf4167a | 1065 | pr_debug("EINVAL: no operation provided\n"); |
41ef4eb8 | 1066 | return -EINVAL; |
1da177e4 LT |
1067 | } |
1068 | ||
41ef4eb8 KO |
1069 | if (ret != -EIOCBQUEUED) { |
1070 | /* | |
1071 | * There's no easy way to restart the syscall since other AIO's | |
1072 | * may be already running. Just fail this IO with EINTR. | |
1073 | */ | |
1074 | if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR || | |
1075 | ret == -ERESTARTNOHAND || | |
1076 | ret == -ERESTART_RESTARTBLOCK)) | |
1077 | ret = -EINTR; | |
1078 | aio_complete(req, ret, 0); | |
1079 | } | |
1da177e4 LT |
1080 | |
1081 | return 0; | |
1082 | } | |
1083 | ||
d5470b59 | 1084 | static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, |
a1c8eae7 | 1085 | struct iocb *iocb, bool compat) |
1da177e4 LT |
1086 | { |
1087 | struct kiocb *req; | |
1da177e4 LT |
1088 | ssize_t ret; |
1089 | ||
1090 | /* enforce forwards compatibility on users */ | |
9c3060be | 1091 | if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) { |
caf4167a | 1092 | pr_debug("EINVAL: reserve field set\n"); |
1da177e4 LT |
1093 | return -EINVAL; |
1094 | } | |
1095 | ||
1096 | /* prevent overflows */ | |
1097 | if (unlikely( | |
1098 | (iocb->aio_buf != (unsigned long)iocb->aio_buf) || | |
1099 | (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) || | |
1100 | ((ssize_t)iocb->aio_nbytes < 0) | |
1101 | )) { | |
1102 | pr_debug("EINVAL: io_submit: overflow check\n"); | |
1103 | return -EINVAL; | |
1104 | } | |
1105 | ||
41ef4eb8 | 1106 | req = aio_get_req(ctx); |
1d98ebfc | 1107 | if (unlikely(!req)) |
1da177e4 | 1108 | return -EAGAIN; |
1d98ebfc KO |
1109 | |
1110 | req->ki_filp = fget(iocb->aio_fildes); | |
1111 | if (unlikely(!req->ki_filp)) { | |
1112 | ret = -EBADF; | |
1113 | goto out_put_req; | |
1da177e4 | 1114 | } |
1d98ebfc | 1115 | |
9c3060be DL |
1116 | if (iocb->aio_flags & IOCB_FLAG_RESFD) { |
1117 | /* | |
1118 | * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an | |
1119 | * instance of the file* now. The file descriptor must be | |
1120 | * an eventfd() fd, and will be signaled for each completed | |
1121 | * event using the eventfd_signal() function. | |
1122 | */ | |
13389010 | 1123 | req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd); |
801678c5 | 1124 | if (IS_ERR(req->ki_eventfd)) { |
9c3060be | 1125 | ret = PTR_ERR(req->ki_eventfd); |
87c3a86e | 1126 | req->ki_eventfd = NULL; |
9c3060be DL |
1127 | goto out_put_req; |
1128 | } | |
1129 | } | |
1da177e4 | 1130 | |
8a660890 | 1131 | ret = put_user(KIOCB_KEY, &user_iocb->aio_key); |
1da177e4 | 1132 | if (unlikely(ret)) { |
caf4167a | 1133 | pr_debug("EFAULT: aio_key\n"); |
1da177e4 LT |
1134 | goto out_put_req; |
1135 | } | |
1136 | ||
1137 | req->ki_obj.user = user_iocb; | |
1138 | req->ki_user_data = iocb->aio_data; | |
1139 | req->ki_pos = iocb->aio_offset; | |
1140 | ||
1141 | req->ki_buf = (char __user *)(unsigned long)iocb->aio_buf; | |
1142 | req->ki_left = req->ki_nbytes = iocb->aio_nbytes; | |
1143 | req->ki_opcode = iocb->aio_lio_opcode; | |
1da177e4 | 1144 | |
41ef4eb8 | 1145 | ret = aio_run_iocb(req, compat); |
41003a7b | 1146 | if (ret) |
7137c6bd | 1147 | goto out_put_req; |
41003a7b | 1148 | |
1da177e4 LT |
1149 | aio_put_req(req); /* drop extra ref to req */ |
1150 | return 0; | |
1da177e4 | 1151 | out_put_req: |
11599eba | 1152 | atomic_dec(&ctx->reqs_active); |
1da177e4 LT |
1153 | aio_put_req(req); /* drop extra ref to req */ |
1154 | aio_put_req(req); /* drop i/o ref to req */ | |
1155 | return ret; | |
1156 | } | |
1157 | ||
9d85cba7 JM |
1158 | long do_io_submit(aio_context_t ctx_id, long nr, |
1159 | struct iocb __user *__user *iocbpp, bool compat) | |
1da177e4 LT |
1160 | { |
1161 | struct kioctx *ctx; | |
1162 | long ret = 0; | |
080d676d | 1163 | int i = 0; |
9f5b9425 | 1164 | struct blk_plug plug; |
1da177e4 LT |
1165 | |
1166 | if (unlikely(nr < 0)) | |
1167 | return -EINVAL; | |
1168 | ||
75e1c70f JM |
1169 | if (unlikely(nr > LONG_MAX/sizeof(*iocbpp))) |
1170 | nr = LONG_MAX/sizeof(*iocbpp); | |
1171 | ||
1da177e4 LT |
1172 | if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp))))) |
1173 | return -EFAULT; | |
1174 | ||
1175 | ctx = lookup_ioctx(ctx_id); | |
1176 | if (unlikely(!ctx)) { | |
caf4167a | 1177 | pr_debug("EINVAL: invalid context id\n"); |
1da177e4 LT |
1178 | return -EINVAL; |
1179 | } | |
1180 | ||
9f5b9425 SL |
1181 | blk_start_plug(&plug); |
1182 | ||
1da177e4 LT |
1183 | /* |
1184 | * AKPM: should this return a partial result if some of the IOs were | |
1185 | * successfully submitted? | |
1186 | */ | |
1187 | for (i=0; i<nr; i++) { | |
1188 | struct iocb __user *user_iocb; | |
1189 | struct iocb tmp; | |
1190 | ||
1191 | if (unlikely(__get_user(user_iocb, iocbpp + i))) { | |
1192 | ret = -EFAULT; | |
1193 | break; | |
1194 | } | |
1195 | ||
1196 | if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) { | |
1197 | ret = -EFAULT; | |
1198 | break; | |
1199 | } | |
1200 | ||
a1c8eae7 | 1201 | ret = io_submit_one(ctx, user_iocb, &tmp, compat); |
1da177e4 LT |
1202 | if (ret) |
1203 | break; | |
1204 | } | |
9f5b9425 | 1205 | blk_finish_plug(&plug); |
1da177e4 LT |
1206 | |
1207 | put_ioctx(ctx); | |
1208 | return i ? i : ret; | |
1209 | } | |
1210 | ||
9d85cba7 JM |
1211 | /* sys_io_submit: |
1212 | * Queue the nr iocbs pointed to by iocbpp for processing. Returns | |
1213 | * the number of iocbs queued. May return -EINVAL if the aio_context | |
1214 | * specified by ctx_id is invalid, if nr is < 0, if the iocb at | |
1215 | * *iocbpp[0] is not properly initialized, if the operation specified | |
1216 | * is invalid for the file descriptor in the iocb. May fail with | |
1217 | * -EFAULT if any of the data structures point to invalid data. May | |
1218 | * fail with -EBADF if the file descriptor specified in the first | |
1219 | * iocb is invalid. May fail with -EAGAIN if insufficient resources | |
1220 | * are available to queue any iocbs. Will return 0 if nr is 0. Will | |
1221 | * fail with -ENOSYS if not implemented. | |
1222 | */ | |
1223 | SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, | |
1224 | struct iocb __user * __user *, iocbpp) | |
1225 | { | |
1226 | return do_io_submit(ctx_id, nr, iocbpp, 0); | |
1227 | } | |
1228 | ||
1da177e4 LT |
1229 | /* lookup_kiocb |
1230 | * Finds a given iocb for cancellation. | |
1da177e4 | 1231 | */ |
25ee7e38 AB |
1232 | static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, |
1233 | u32 key) | |
1da177e4 LT |
1234 | { |
1235 | struct list_head *pos; | |
d00689af ZB |
1236 | |
1237 | assert_spin_locked(&ctx->ctx_lock); | |
1238 | ||
8a660890 KO |
1239 | if (key != KIOCB_KEY) |
1240 | return NULL; | |
1241 | ||
1da177e4 LT |
1242 | /* TODO: use a hash or array, this sucks. */ |
1243 | list_for_each(pos, &ctx->active_reqs) { | |
1244 | struct kiocb *kiocb = list_kiocb(pos); | |
8a660890 | 1245 | if (kiocb->ki_obj.user == iocb) |
1da177e4 LT |
1246 | return kiocb; |
1247 | } | |
1248 | return NULL; | |
1249 | } | |
1250 | ||
1251 | /* sys_io_cancel: | |
1252 | * Attempts to cancel an iocb previously passed to io_submit. If | |
1253 | * the operation is successfully cancelled, the resulting event is | |
1254 | * copied into the memory pointed to by result without being placed | |
1255 | * into the completion queue and 0 is returned. May fail with | |
1256 | * -EFAULT if any of the data structures pointed to are invalid. | |
1257 | * May fail with -EINVAL if aio_context specified by ctx_id is | |
1258 | * invalid. May fail with -EAGAIN if the iocb specified was not | |
1259 | * cancelled. Will fail with -ENOSYS if not implemented. | |
1260 | */ | |
002c8976 HC |
1261 | SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, |
1262 | struct io_event __user *, result) | |
1da177e4 | 1263 | { |
906b973c | 1264 | struct io_event res; |
1da177e4 LT |
1265 | struct kioctx *ctx; |
1266 | struct kiocb *kiocb; | |
1267 | u32 key; | |
1268 | int ret; | |
1269 | ||
1270 | ret = get_user(key, &iocb->aio_key); | |
1271 | if (unlikely(ret)) | |
1272 | return -EFAULT; | |
1273 | ||
1274 | ctx = lookup_ioctx(ctx_id); | |
1275 | if (unlikely(!ctx)) | |
1276 | return -EINVAL; | |
1277 | ||
1278 | spin_lock_irq(&ctx->ctx_lock); | |
906b973c | 1279 | |
1da177e4 | 1280 | kiocb = lookup_kiocb(ctx, iocb, key); |
906b973c KO |
1281 | if (kiocb) |
1282 | ret = kiocb_cancel(ctx, kiocb, &res); | |
1283 | else | |
1284 | ret = -EINVAL; | |
1285 | ||
1da177e4 LT |
1286 | spin_unlock_irq(&ctx->ctx_lock); |
1287 | ||
906b973c KO |
1288 | if (!ret) { |
1289 | /* Cancellation succeeded -- copy the result | |
1290 | * into the user's buffer. | |
1291 | */ | |
1292 | if (copy_to_user(result, &res, sizeof(res))) | |
1293 | ret = -EFAULT; | |
1294 | } | |
1da177e4 LT |
1295 | |
1296 | put_ioctx(ctx); | |
1297 | ||
1298 | return ret; | |
1299 | } | |
1300 | ||
1301 | /* io_getevents: | |
1302 | * Attempts to read at least min_nr events and up to nr events from | |
642b5123 ST |
1303 | * the completion queue for the aio_context specified by ctx_id. If |
1304 | * it succeeds, the number of read events is returned. May fail with | |
1305 | * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is | |
1306 | * out of range, if timeout is out of range. May fail with -EFAULT | |
1307 | * if any of the memory specified is invalid. May return 0 or | |
1308 | * < min_nr if the timeout specified by timeout has elapsed | |
1309 | * before sufficient events are available, where timeout == NULL | |
1310 | * specifies an infinite timeout. Note that the timeout pointed to by | |
6900807c | 1311 | * timeout is relative. Will fail with -ENOSYS if not implemented. |
1da177e4 | 1312 | */ |
002c8976 HC |
1313 | SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id, |
1314 | long, min_nr, | |
1315 | long, nr, | |
1316 | struct io_event __user *, events, | |
1317 | struct timespec __user *, timeout) | |
1da177e4 LT |
1318 | { |
1319 | struct kioctx *ioctx = lookup_ioctx(ctx_id); | |
1320 | long ret = -EINVAL; | |
1321 | ||
1322 | if (likely(ioctx)) { | |
2e410255 | 1323 | if (likely(min_nr <= nr && min_nr >= 0)) |
1da177e4 LT |
1324 | ret = read_events(ioctx, min_nr, nr, events, timeout); |
1325 | put_ioctx(ioctx); | |
1326 | } | |
1da177e4 LT |
1327 | return ret; |
1328 | } |