static struct kmem_cache *_crypt_io_pool;
+static void clone_init(struct crypt_io *, struct bio *);
+
/*
* Different IV generation algorithms:
*
* May return a smaller bio when running out of pages
*/
static struct bio *
-crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
+crypt_alloc_buffer(struct crypt_io *io, unsigned int size,
struct bio *base_bio, unsigned int *bio_vec_idx)
{
+ struct crypt_config *cc = io->target->private;
struct bio *clone;
unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
if (!clone)
return NULL;
- clone->bi_destructor = dm_crypt_bio_destructor;
+ clone_init(io, clone);
/* if the last bio was not complete, continue where that one ended */
clone->bi_idx = *bio_vec_idx;
clone->bi_end_io = crypt_endio;
clone->bi_bdev = cc->dev->bdev;
clone->bi_rw = io->base_bio->bi_rw;
+ clone->bi_destructor = dm_crypt_bio_destructor;
}
static void process_read(struct crypt_io *io)
}
clone_init(io, clone);
- clone->bi_destructor = dm_crypt_bio_destructor;
clone->bi_idx = 0;
clone->bi_vcnt = bio_segments(base_bio);
clone->bi_size = base_bio->bi_size;
* so repeat the whole process until all the data can be handled.
*/
while (remaining) {
- clone = crypt_alloc_buffer(cc, base_bio->bi_size,
+ clone = crypt_alloc_buffer(io, base_bio->bi_size,
io->first_clone, &bvec_idx);
if (unlikely(!clone)) {
dec_pending(io, -ENOMEM);
return;
}
- clone_init(io, clone);
clone->bi_sector = cc->start + sector;
if (!io->first_clone) {