unsigned int enabled_steps;
};
+/* device unit number for iv sector */
+#define PG_DUN(i,p) \
+ ((((i)->i_ino & 0xffffffff) << 32) | ((p)->index & 0xffffffff))
+
+static inline bool f2fs_may_encrypt_bio(struct inode *inode,
+ struct f2fs_io_info *fio)
+{
+#ifdef CONFIG_CRYPTO_DISKCIPHER
+ if (fio && (fio->type != DATA || fio->encrypted_page))
+ return false;
+
+ return (f2fs_encrypted_file(inode) &&
+ fscrypt_disk_encrypted(inode));
+#else
+ return false;
+#endif
+}
+
+static inline bool f2fs_bio_disk_encrypted(unsigned int bi_opf)
+{
+ if (bi_opf & REQ_CRYPT)
+ return true;
+ else
+ return false;
+}
+
+static bool f2fs_mergeable_bio(struct bio *bio, u64 dun, void *ci, bool bio_encrypted)
+{
+#ifdef CONFIG_CRYPTO_DISKCIPHER
+ if (!bio)
+ return true;
+
+ /* if both of them are not encrypted, no further check is needed */
+ if (!f2fs_bio_disk_encrypted(bio->bi_opf) && !bio_encrypted)
+ return true;
+
+#ifdef CONFIG_CRYPTO_DISKCIPHER_DUN
+ if (bio->bi_aux_private == ci)
+ return bio_end_dun(bio) == dun;
+ else
+ return false;
+#else
+ return bio->bi_aux_private == ci;
+#endif
+#else
+ return true;
+#endif
+}
+
static void __read_end_io(struct bio *bio)
{
struct page *page;
}
#endif
+ if(f2fs_bio_disk_encrypted(bio->bi_opf))
+ goto end_io;
+
if (f2fs_bio_post_read_required(bio)) {
struct bio_post_read_ctx *ctx = bio->bi_private;
return;
}
+end_io:
__read_end_io(bio);
}
if (!io->bio)
return;
- bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
+ if (f2fs_bio_disk_encrypted(io->bio->bi_opf))
+ bio_set_op_attrs(io->bio, fio->op, fio->op_flags | REQ_CRYPT);
+ else
+ bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
if (is_read_io(fio->op))
trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
struct bio *bio;
struct page *page = fio->encrypted_page ?
fio->encrypted_page : fio->page;
+ struct inode *inode = fio->page->mapping->host;
verify_block_addr(fio, fio->new_blkaddr);
trace_f2fs_submit_page_bio(page, fio);
bio_put(bio);
return -EFAULT;
}
+ fio->op_flags |= fio->encrypted_page ? REQ_NOENCRYPT : 0;
bio_set_op_attrs(bio, fio->op, fio->op_flags);
+ if (f2fs_may_encrypt_bio(inode, fio))
+ fscrypt_set_bio(inode, bio, PG_DUN(inode, fio->page));
+
__submit_bio(fio->sbi, bio, fio->type);
if (!is_read_io(fio->op))
struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
struct page *bio_page;
int err = 0;
+ struct inode *inode;
+ bool bio_encrypted;
+ u64 dun;
f2fs_bug_on(sbi, is_read_io(fio->op));
verify_block_addr(fio, fio->new_blkaddr);
bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
+ inode = fio->page->mapping->host;
+ dun = PG_DUN(inode, fio->page);
+ bio_encrypted = f2fs_may_encrypt_bio(inode, fio);
+ fio->op_flags |= fio->encrypted_page ? REQ_NOENCRYPT : 0;
/* set submitted = true as a return value */
fio->submitted = true;
(io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) ||
!__same_bdev(sbi, fio->new_blkaddr, io->bio)))
__submit_merged_bio(io);
+
+ if (!f2fs_mergeable_bio(io->bio, dun, fscrypt_get_diskcipher(inode), bio_encrypted))
+ __submit_merged_bio(io);
+
alloc_new:
if (io->bio == NULL) {
if ((fio->type == DATA || fio->type == NODE) &&
io->bio = __bio_alloc(sbi, fio->new_blkaddr, fio->io_wbc,
BIO_MAX_PAGES, false,
fio->type, fio->temp);
+ if (bio_encrypted)
+ fscrypt_set_bio(inode, io->bio, dun);
+
io->fio = *fio;
}
bio->bi_end_io = f2fs_read_end_io;
bio_set_op_attrs(bio, REQ_OP_READ, 0);
- if (f2fs_encrypted_file(inode))
+ if (f2fs_encrypted_file(inode) && !fscrypt_disk_encrypted(inode))
post_read_steps |= 1 << STEP_DECRYPT;
if (post_read_steps) {
ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
ctx->bio = bio;
ctx->enabled_steps = post_read_steps;
bio->bi_private = ctx;
+ bio_set_op_attrs(bio, REQ_OP_READ,
+ (f2fs_encrypted_inode(inode) ?
+ REQ_NOENCRYPT :
+ 0));
/* wait the page to be moved by cleaning */
f2fs_wait_on_block_writeback(sbi, blkaddr);
bio_put(bio);
return -EFAULT;
}
+
+ if (f2fs_may_encrypt_bio(inode, NULL))
+ fscrypt_set_bio(inode, bio, PG_DUN(inode, page));
+
__submit_bio(F2FS_I_SB(inode), bio, DATA);
return 0;
}
sector_t last_block_in_file;
sector_t block_nr;
struct f2fs_map_blocks map;
+ bool bio_encrypted;
+ u64 dun;
map.m_pblk = 0;
map.m_lblk = 0;
__submit_bio(F2FS_I_SB(inode), bio, DATA);
bio = NULL;
}
+
+ dun = PG_DUN(inode, page);
+ bio_encrypted = f2fs_may_encrypt_bio(inode, NULL);
+ if (!f2fs_mergeable_bio(bio, dun, fscrypt_get_diskcipher(inode), bio_encrypted)) {
+ __submit_bio(F2FS_I_SB(inode), bio, DATA);
+ bio = NULL;
+ }
+
if (bio == NULL) {
bio = f2fs_grab_read_bio(inode, block_nr, nr_pages);
if (IS_ERR(bio)) {
bio = NULL;
goto set_error_page;
}
+ if (f2fs_may_encrypt_bio(inode, NULL))
+ fscrypt_set_bio(inode, bio, dun);
}
if (bio_add_page(bio, page, blocksize, 0) < blocksize)
f2fs_wait_on_block_writeback(fio->sbi, fio->old_blkaddr);
retry_encrypt:
+ if (fscrypt_disk_encrypted(inode))
+ return 0;
+
fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
PAGE_SIZE, 0, fio->page->index, gfp_flags);
if (!IS_ERR(fio->encrypted_page))