kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \
} while(0)
+
+/* __put_ioctx
+ * Called when the last user of an aio context has gone away,
+ * and the struct needs to be freed.
+ */
+static void __put_ioctx(struct kioctx *ctx)
+{
+ unsigned nr_events = ctx->max_reqs;
+
+ BUG_ON(ctx->reqs_active);
+
+ cancel_delayed_work(&ctx->wq);
+ cancel_work_sync(&ctx->wq.work);
+ aio_free_ring(ctx);
+ mmdrop(ctx->mm);
+ ctx->mm = NULL;
+ pr_debug("__put_ioctx: freeing %p\n", ctx);
+ kmem_cache_free(kioctx_cachep, ctx);
+
+ if (nr_events) {
+ spin_lock(&aio_nr_lock);
+ BUG_ON(aio_nr - nr_events > aio_nr);
+ aio_nr -= nr_events;
+ spin_unlock(&aio_nr_lock);
+ }
+}
+
+#define get_ioctx(kioctx) do { \
+ BUG_ON(atomic_read(&(kioctx)->users) <= 0); \
+ atomic_inc(&(kioctx)->users); \
+} while (0)
+#define put_ioctx(kioctx) do { \
+ BUG_ON(atomic_read(&(kioctx)->users) <= 0); \
+ if (unlikely(atomic_dec_and_test(&(kioctx)->users))) \
+ __put_ioctx(kioctx); \
+} while (0)
+
/* ioctx_alloc
* Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
*/
}
}
-/* __put_ioctx
- * Called when the last user of an aio context has gone away,
- * and the struct needs to be freed.
- */
-void __put_ioctx(struct kioctx *ctx)
-{
- unsigned nr_events = ctx->max_reqs;
-
- BUG_ON(ctx->reqs_active);
-
- cancel_delayed_work(&ctx->wq);
- cancel_work_sync(&ctx->wq.work);
- aio_free_ring(ctx);
- mmdrop(ctx->mm);
- ctx->mm = NULL;
- pr_debug("__put_ioctx: freeing %p\n", ctx);
- kmem_cache_free(kioctx_cachep, ctx);
-
- if (nr_events) {
- spin_lock(&aio_nr_lock);
- BUG_ON(aio_nr - nr_events > aio_nr);
- aio_nr -= nr_events;
- spin_unlock(&aio_nr_lock);
- }
-}
-
/* aio_get_req
* Allocate a slot for an aio request. Increments the users count
* of the kioctx so that the kioctx stays around until all requests are
/* Lookup an ioctx id. ioctx_list is lockless for reads.
* FIXME: this is O(n) and is only suitable for development.
*/
-struct kioctx *lookup_ioctx(unsigned long ctx_id)
+static struct kioctx *lookup_ioctx(unsigned long ctx_id)
{
struct kioctx *ioctx;
struct mm_struct *mm;
return 1;
}
-int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
+static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
struct iocb *iocb)
{
struct kiocb *req;
extern int aio_put_req(struct kiocb *iocb);
extern void kick_iocb(struct kiocb *iocb);
extern int aio_complete(struct kiocb *iocb, long res, long res2);
-extern void __put_ioctx(struct kioctx *ctx);
struct mm_struct;
extern void exit_aio(struct mm_struct *mm);
-extern struct kioctx *lookup_ioctx(unsigned long ctx_id);
-extern int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
- struct iocb *iocb);
-
-/* semi private, but used by the 32bit emulations: */
-struct kioctx *lookup_ioctx(unsigned long ctx_id);
-int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
- struct iocb *iocb);
-
-#define get_ioctx(kioctx) do { \
- BUG_ON(atomic_read(&(kioctx)->users) <= 0); \
- atomic_inc(&(kioctx)->users); \
-} while (0)
-#define put_ioctx(kioctx) do { \
- BUG_ON(atomic_read(&(kioctx)->users) <= 0); \
- if (unlikely(atomic_dec_and_test(&(kioctx)->users))) \
- __put_ioctx(kioctx); \
-} while (0)
#define io_wait_to_kiocb(wait) container_of(wait, struct kiocb, ki_wait)