return bio;
}
- static inline void __submit_bio(struct f2fs_sb_info *sbi, struct bio *bio)
-static inline void __submit_bio(struct f2fs_sb_info *sbi, int rw,
- struct bio *bio, enum page_type type)
++static inline void __submit_bio(struct f2fs_sb_info *sbi,
++ struct bio *bio, enum page_type type)
{
- if (!is_read_io(bio_op(bio)))
- if (!is_read_io(rw)) {
++ if (!is_read_io(bio_op(bio))) {
atomic_inc(&sbi->nr_wb_bios);
- submit_bio(rw, bio);
+ if (f2fs_sb_mounted_hmsmr(sbi->sb) &&
+ current->plug && (type == DATA || type == NODE))
+ blk_finish_plug(current->plug);
+ }
+ submit_bio(bio);
}
static void __submit_merged_bio(struct f2fs_bio_info *io)
else
trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
- __submit_bio(io->sbi, fio->rw, io->bio, fio->type);
+ bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
+
- __submit_bio(io->sbi, io->bio);
++ __submit_bio(io->sbi, io->bio, fio->type);
io->bio = NULL;
}
bio_put(bio);
return -EFAULT;
}
+ bio->bi_rw = fio->op_flags;
+ bio_set_op_attrs(bio, fio->op, fio->op_flags);
- __submit_bio(fio->sbi, bio);
- __submit_bio(fio->sbi, fio->rw, bio, fio->type);
++ __submit_bio(fio->sbi, bio, fio->type);
return 0;
}
*/
if (bio && (last_block_in_bio != block_nr - 1)) {
submit_and_realloc:
- __submit_bio(F2FS_I_SB(inode), bio);
- __submit_bio(F2FS_I_SB(inode), READ, bio, DATA);
++ __submit_bio(F2FS_I_SB(inode), bio, DATA);
bio = NULL;
}
if (bio == NULL) {
- struct fscrypt_ctx *ctx = NULL;
-
- if (f2fs_encrypted_inode(inode) &&
- S_ISREG(inode->i_mode)) {
-
- ctx = fscrypt_get_ctx(inode, GFP_NOFS);
- if (IS_ERR(ctx))
- goto set_error_page;
-
- /* wait the page to be moved by cleaning */
- f2fs_wait_on_encrypted_page_writeback(
- F2FS_I_SB(inode), block_nr);
- }
-
- bio = bio_alloc(GFP_KERNEL,
- min_t(int, nr_pages, BIO_MAX_PAGES));
- if (!bio) {
- if (ctx)
- fscrypt_release_ctx(ctx);
+ bio = f2fs_grab_bio(inode, block_nr, nr_pages);
+ if (IS_ERR(bio)) {
+ bio = NULL;
goto set_error_page;
}
- bio->bi_bdev = bdev;
- bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr);
- bio->bi_end_io = f2fs_read_end_io;
- bio->bi_private = ctx;
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
}
if (bio_add_page(bio, page, blocksize, 0) < blocksize)
goto next_page;
confused:
if (bio) {
- __submit_bio(F2FS_I_SB(inode), bio);
- __submit_bio(F2FS_I_SB(inode), READ, bio, DATA);
++ __submit_bio(F2FS_I_SB(inode), bio, DATA);
bio = NULL;
}
unlock_page(page);
}
BUG_ON(pages && !list_empty(pages));
if (bio)
- __submit_bio(F2FS_I_SB(inode), bio);
- __submit_bio(F2FS_I_SB(inode), READ, bio, DATA);
++ __submit_bio(F2FS_I_SB(inode), bio, DATA);
return 0;
}
if (blkaddr == NEW_ADDR) {
zero_user_segment(page, 0, PAGE_SIZE);
} else {
- struct f2fs_io_info fio = {
- .sbi = sbi,
- .type = DATA,
- .op = REQ_OP_READ,
- .op_flags = READ_SYNC,
- .old_blkaddr = blkaddr,
- .new_blkaddr = blkaddr,
- .page = page,
- .encrypted_page = NULL,
- };
- err = f2fs_submit_page_bio(&fio);
- if (err)
- goto fail;
+ struct bio *bio;
- lock_page(page);
- if (unlikely(!PageUptodate(page))) {
- err = -EIO;
+ bio = f2fs_grab_bio(inode, blkaddr, 1);
+ if (IS_ERR(bio)) {
+ err = PTR_ERR(bio);
goto fail;
}
-
++ bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC);
+ if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
+ bio_put(bio);
+ err = -EFAULT;
+ goto fail;
+ }
+
- __submit_bio(sbi, READ_SYNC, bio, DATA);
++ __submit_bio(sbi, bio, DATA);
+
+ lock_page(page);
if (unlikely(page->mapping != mapping)) {
f2fs_put_page(page, 1);
goto repeat;