/*
* Copyright (C) 2003 Christophe Saout <christophe@saout.de>
* Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
- * Copyright (C) 2006-2007 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*/
struct workqueue_struct *io_queue;
struct workqueue_struct *crypt_queue;
+ wait_queue_head_t writeq;
+
/*
* crypto related data
*/
ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
ctx->sector = sector + cc->iv_offset;
init_completion(&ctx->restart);
- /*
- * Crypto operation can be asynchronous,
- * ctx->pending is increased after request submission.
- * We need to ensure that we don't call the crypt finish
- * operation before pending got incremented
- * (dependent on crypt submission return code).
- */
- atomic_set(&ctx->pending, 2);
+ atomic_set(&ctx->pending, 1);
}
static int crypt_convert_block(struct crypt_config *cc,
static int crypt_convert(struct crypt_config *cc,
struct convert_context *ctx)
{
- int r = 0;
+ int r;
while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
ctx->idx_out < ctx->bio_out->bi_vcnt) {
crypt_alloc_req(cc, ctx);
+ atomic_inc(&ctx->pending);
+
r = crypt_convert_block(cc, ctx, cc->req);
switch (r) {
+ /* async */
case -EBUSY:
wait_for_completion(&ctx->restart);
INIT_COMPLETION(ctx->restart);
/* fall through*/
case -EINPROGRESS:
- atomic_inc(&ctx->pending);
cc->req = NULL;
- r = 0;
- /* fall through*/
+ ctx->sector++;
+ continue;
+
+ /* sync */
case 0:
+ atomic_dec(&ctx->pending);
ctx->sector++;
continue;
- }
- break;
+ /* error */
+ default:
+ atomic_dec(&ctx->pending);
+ return r;
+ }
}
- /*
- * If there are pending crypto operation run async
- * code. Otherwise process return code synchronously.
- * The step of 2 ensures that async finish doesn't
- * call crypto finish too early.
- */
- if (atomic_sub_return(2, &ctx->pending))
- return -EINPROGRESS;
-
- return r;
+ return 0;
}
static void dm_crypt_bio_destructor(struct bio *bio)
static void kcryptd_io_write(struct dm_crypt_io *io)
{
struct bio *clone = io->ctx.bio_out;
+ struct crypt_config *cc = io->target->private;
generic_make_request(clone);
+ wake_up(&cc->writeq);
}
static void kcryptd_io(struct work_struct *work)
r = crypt_convert(cc, &io->ctx);
- if (r != -EINPROGRESS) {
+ if (atomic_dec_and_test(&io->ctx.pending)) {
+ /* processed, no running async crypto */
kcryptd_crypt_write_io_submit(io, r, 0);
if (unlikely(r < 0))
return;
atomic_inc(&io->pending);
/* out of memory -> run queues */
- if (unlikely(remaining))
+ if (unlikely(remaining)) {
+ /* wait for async crypto then reinitialize pending */
+ wait_event(cc->writeq, !atomic_read(&io->ctx.pending));
+ atomic_set(&io->ctx.pending, 1);
congestion_wait(WRITE, HZ/100);
+ }
}
}
r = crypt_convert(cc, &io->ctx);
- if (r != -EINPROGRESS)
+ if (atomic_dec_and_test(&io->ctx.pending))
kcryptd_crypt_read_done(io, r);
crypt_dec_pending(io);
goto bad_crypt_queue;
}
+ init_waitqueue_head(&cc->writeq);
ti->private = cc;
return 0;