unsigned int enabled_steps;
};
+static inline bool f2fs_may_encrypt_bio(struct inode *inode,
+ struct f2fs_io_info *fio)
+{
+ if (fio && (fio->type != DATA || fio->encrypted_page))
+ return false;
+
+ return (f2fs_encrypted_file(inode) &&
+ fscrypt_disk_encrypted(inode));
+}
+
+static inline bool f2fs_bio_disk_encrypted(unsigned int bi_opf)
+{
+ if (bi_opf & REQ_CRYPT)
+ return true;
+ else
+ return false;
+}
+
+static bool f2fs_mergeable_bio(struct bio *bio, void *ci, bool bio_encrypted)
+{
+ if (!bio)
+ return true;
+
+ /* if both of them are not encrypted, no further check is needed */
+ if (!f2fs_bio_disk_encrypted(bio->bi_opf) && !bio_encrypted)
+ return true;
+
+ /* ICE allows only consecutive iv_key stream. */
+ return bio->bi_aux_private == ci;
+}
+
static void __read_end_io(struct bio *bio)
{
struct page *page;
}
#endif
+ if(f2fs_bio_disk_encrypted(bio->bi_opf))
+ goto end_io;
+
if (f2fs_bio_post_read_required(bio)) {
struct bio_post_read_ctx *ctx = bio->bi_private;
return;
}
+end_io:
__read_end_io(bio);
}
if (!io->bio)
return;
- bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
+ if (f2fs_bio_disk_encrypted(io->bio->bi_opf))
+ bio_set_op_attrs(io->bio, fio->op, fio->op_flags | REQ_CRYPT);
+ else
+ bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
if (is_read_io(fio->op))
trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
struct bio *bio;
struct page *page = fio->encrypted_page ?
fio->encrypted_page : fio->page;
+ struct inode *inode = fio->page->mapping->host;
verify_block_addr(fio, fio->new_blkaddr);
trace_f2fs_submit_page_bio(page, fio);
fio->op_flags |= fio->encrypted_page ? REQ_NOENCRYPT : 0;
bio_set_op_attrs(bio, fio->op, fio->op_flags);
+ if (f2fs_may_encrypt_bio(inode, fio))
+ fscrypt_set_bio(inode, bio);
+
__submit_bio(fio->sbi, bio, fio->type);
if (!is_read_io(fio->op))
struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
struct page *bio_page;
int err = 0;
+ struct inode *inode;
+ bool bio_encrypted;
f2fs_bug_on(sbi, is_read_io(fio->op));
verify_block_addr(fio, fio->new_blkaddr);
bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
+ inode = fio->page->mapping->host;
+ bio_encrypted = f2fs_may_encrypt_bio(inode, fio);
fio->op_flags |= fio->encrypted_page ? REQ_NOENCRYPT : 0;
/* set submitted = true as a return value */
(io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) ||
!__same_bdev(sbi, fio->new_blkaddr, io->bio)))
__submit_merged_bio(io);
+
+ if (!f2fs_mergeable_bio(io->bio, fscrypt_get_diskcipher(inode), bio_encrypted))
+ __submit_merged_bio(io);
+
alloc_new:
if (io->bio == NULL) {
if ((fio->type == DATA || fio->type == NODE) &&
io->bio = __bio_alloc(sbi, fio->new_blkaddr, fio->io_wbc,
BIO_MAX_PAGES, false,
fio->type, fio->temp);
+ if (bio_encrypted)
+ fscrypt_set_bio(inode, io->bio);
+
io->fio = *fio;
}
bio->bi_end_io = f2fs_read_end_io;
bio_set_op_attrs(bio, REQ_OP_READ, 0);
- if (f2fs_encrypted_file(inode))
+ if (f2fs_encrypted_file(inode) && !fscrypt_disk_encrypted(inode))
post_read_steps |= 1 << STEP_DECRYPT;
if (post_read_steps) {
ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
bio_put(bio);
return -EFAULT;
}
+
+ if (f2fs_may_encrypt_bio(inode, NULL))
+ fscrypt_set_bio(inode, bio);
+
__submit_bio(F2FS_I_SB(inode), bio, DATA);
return 0;
}
sector_t last_block_in_file;
sector_t block_nr;
struct f2fs_map_blocks map;
+ bool bio_encrypted;
map.m_pblk = 0;
map.m_lblk = 0;
__submit_bio(F2FS_I_SB(inode), bio, DATA);
bio = NULL;
}
+
+ bio_encrypted = f2fs_may_encrypt_bio(inode, NULL);
+ if (!f2fs_mergeable_bio(bio, fscrypt_get_diskcipher(inode), bio_encrypted)) {
+ __submit_bio(F2FS_I_SB(inode), bio, DATA);
+ bio = NULL;
+ }
+
if (bio == NULL) {
bio = f2fs_grab_read_bio(inode, block_nr, nr_pages);
if (IS_ERR(bio)) {
bio = NULL;
goto set_error_page;
}
+ if (f2fs_may_encrypt_bio(inode, NULL))
+ fscrypt_set_bio(inode, bio);
}
if (bio_add_page(bio, page, blocksize, 0) < blocksize)
f2fs_wait_on_block_writeback(fio->sbi, fio->old_blkaddr);
retry_encrypt:
+ if (fscrypt_disk_encrypted(inode))
+ return 0;
+
fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
PAGE_SIZE, 0, fio->page->index, gfp_flags);
if (!IS_ERR(fio->encrypted_page))