BUG_ON(path[depth].p_hdr == NULL);
/* try to insert block into found extent and return */
- if (ex && (flag != EXT4_GET_BLOCKS_PRE_IO)
+ if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO)
&& ext4_can_extents_be_merged(inode, ex, newext)) {
ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n",
ext4_ext_is_uninitialized(newext),
merge:
/* try to merge extents to the right */
- if (flag != EXT4_GET_BLOCKS_PRE_IO)
+ if (!(flag & EXT4_GET_BLOCKS_PRE_IO))
ext4_ext_try_to_merge(inode, path, nearex);
/* try to merge extents to the left */
ext4_ext_show_leaf(inode, path);
/* get_block() before submit the IO, split the extent */
- if (flags == EXT4_GET_BLOCKS_PRE_IO) {
+ if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
ret = ext4_split_unwritten_extents(handle,
inode, path, iblock,
max_blocks, flags);
io->flag = EXT4_IO_UNWRITTEN;
else
ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
+ if (ext4_should_dioread_nolock(inode))
+ set_buffer_uninit(bh_result);
goto out;
}
/* IO end_io complete, convert the filled extent to written */
- if (flags == EXT4_GET_BLOCKS_CONVERT) {
+ if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
ret = ext4_convert_unwritten_extents_endio(handle, inode,
path);
if (ret >= 0)
if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
ext4_ext_mark_uninitialized(&newex);
/*
- * io_end structure was created for every async
- * direct IO write to the middle of the file.
- * To avoid unecessary convertion for every aio dio rewrite
- * to the mid of file, here we flag the IO that is really
- * need the convertion.
+ * io_end structure was created for every IO write to an
+ * uninitialized extent. To avoid unecessary conversion,
+ * here we flag the IO that really needs the conversion.
* For non asycn direct IO case, flag the inode state
* that we need to perform convertion when IO is done.
*/
- if (flags == EXT4_GET_BLOCKS_PRE_IO) {
+ if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
if (io)
io->flag = EXT4_IO_UNWRITTEN;
else
ext4_set_inode_state(inode,
EXT4_STATE_DIO_UNWRITTEN);
}
+ if (ext4_should_dioread_nolock(inode))
+ set_buffer_uninit(bh_result);
}
if (unlikely(EXT4_I(inode)->i_flags & EXT4_EOFBLOCKS_FL)) {
#include <linux/uio.h>
#include <linux/bio.h>
#include <linux/workqueue.h>
+#include <linux/kernel.h>
#include "ext4_jbd2.h"
#include "xattr.h"
ext4_truncate(inode);
}
+static int ext4_get_block_write(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create);
static int ext4_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
}
*pagep = page;
- ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
- ext4_get_block);
+ if (ext4_should_dioread_nolock(inode))
+ ret = block_write_begin(file, mapping, pos, len, flags, pagep,
+ fsdata, ext4_get_block_write);
+ else
+ ret = block_write_begin(file, mapping, pos, len, flags, pagep,
+ fsdata, ext4_get_block);
if (!ret && ext4_should_journal_data(inode)) {
ret = walk_page_buffers(handle, page_buffers(page),
} else if (buffer_mapped(bh))
BUG_ON(bh->b_blocknr != pblock);
+ if (buffer_uninit(exbh))
+ set_buffer_uninit(bh);
cur_logical++;
pblock++;
} while ((bh = bh->b_this_page) != head);
*/
new.b_state = 0;
get_blocks_flags = EXT4_GET_BLOCKS_CREATE;
+ if (ext4_should_dioread_nolock(mpd->inode))
+ get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
if (mpd->b_state & (1 << BH_Delay))
get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
return ret;
}
+static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
+static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
+
/*
* Note that we don't need to start a transaction unless we're journaling data
* because we should have holes filled from ext4_page_mkwrite(). We even don't
int ret = 0;
loff_t size;
unsigned int len;
- struct buffer_head *page_bufs;
+ struct buffer_head *page_bufs = NULL;
struct inode *inode = page->mapping->host;
trace_ext4_writepage(inode, page);
if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
ret = nobh_writepage(page, noalloc_get_block_write, wbc);
- else
+ else if (page_bufs && buffer_uninit(page_bufs)) {
+ ext4_set_bh_endio(page_bufs, inode);
+ ret = block_write_full_page_endio(page, noalloc_get_block_write,
+ wbc, ext4_end_io_buffer_write);
+ } else
ret = block_write_full_page(page, noalloc_get_block_write,
wbc);
return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
}
+static void ext4_free_io_end(ext4_io_end_t *io)
+{
+ BUG_ON(!io);
+ if (io->page)
+ put_page(io->page);
+ iput(io->inode);
+ kfree(io);
+}
+
+static void ext4_invalidatepage_free_endio(struct page *page, unsigned long offset)
+{
+ struct buffer_head *head, *bh;
+ unsigned int curr_off = 0;
+
+ if (!page_has_buffers(page))
+ return;
+ head = bh = page_buffers(page);
+ do {
+ if (offset <= curr_off && test_clear_buffer_uninit(bh)
+ && bh->b_private) {
+ ext4_free_io_end(bh->b_private);
+ bh->b_private = NULL;
+ bh->b_end_io = NULL;
+ }
+ curr_off = curr_off + bh->b_size;
+ bh = bh->b_this_page;
+ } while (bh != head);
+}
+
static void ext4_invalidatepage(struct page *page, unsigned long offset)
{
journal_t *journal = EXT4_JOURNAL(page->mapping->host);
+ /*
+ * free any io_end structure allocated for buffers to be discarded
+ */
+ if (ext4_should_dioread_nolock(page->mapping->host))
+ ext4_invalidatepage_free_endio(page, offset);
/*
* If it's a full truncate we just forget about the pending dirtying
*/
static int ext4_get_block_write(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
- handle_t *handle = NULL;
+ handle_t *handle = ext4_journal_current_handle();
int ret = 0;
unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
int dio_credits;
+ int started = 0;
ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n",
inode->i_ino, create);
*/
create = EXT4_GET_BLOCKS_IO_CREATE_EXT;
- if (max_blocks > DIO_MAX_BLOCKS)
- max_blocks = DIO_MAX_BLOCKS;
- dio_credits = ext4_chunk_trans_blocks(inode, max_blocks);
- handle = ext4_journal_start(inode, dio_credits);
- if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
- goto out;
+ if (!handle) {
+ if (max_blocks > DIO_MAX_BLOCKS)
+ max_blocks = DIO_MAX_BLOCKS;
+ dio_credits = ext4_chunk_trans_blocks(inode, max_blocks);
+ handle = ext4_journal_start(inode, dio_credits);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ goto out;
+ }
+ started = 1;
}
+
ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result,
create);
if (ret > 0) {
bh_result->b_size = (ret << inode->i_blkbits);
ret = 0;
}
- ext4_journal_stop(handle);
+ if (started)
+ ext4_journal_stop(handle);
out:
return ret;
}
-static void ext4_free_io_end(ext4_io_end_t *io)
-{
- BUG_ON(!io);
- iput(io->inode);
- kfree(io);
-}
-
static void dump_completed_IO(struct inode * inode)
{
#ifdef EXT4_DEBUG
struct list_head *cur, *before, *after;
ext4_io_end_t *io, *io0, *io1;
+ unsigned long flags;
if (list_empty(&EXT4_I(inode)->i_completed_io_list)){
ext4_debug("inode %lu completed_io list is empty\n", inode->i_ino);
}
ext4_debug("Dump inode %lu completed_io list \n", inode->i_ino);
+ spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
list_for_each_entry(io, &EXT4_I(inode)->i_completed_io_list, list){
cur = &io->list;
before = cur->prev;
ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
io, inode->i_ino, io0, io1);
}
+ spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
#endif
}
if (io->flag != EXT4_IO_UNWRITTEN)
return ret;
- if (offset + size <= i_size_read(inode))
- ret = ext4_convert_unwritten_extents(inode, offset, size);
-
+ ret = ext4_convert_unwritten_extents(inode, offset, size);
if (ret < 0) {
printk(KERN_EMERG "%s: failed to convert unwritten"
"extents to written extents, error is %d"
*/
static void ext4_end_io_work(struct work_struct *work)
{
- ext4_io_end_t *io = container_of(work, ext4_io_end_t, work);
- struct inode *inode = io->inode;
- int ret = 0;
+ ext4_io_end_t *io = container_of(work, ext4_io_end_t, work);
+ struct inode *inode = io->inode;
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ unsigned long flags;
+ int ret;
mutex_lock(&inode->i_mutex);
ret = ext4_end_io_nolock(io);
- if (ret >= 0) {
- if (!list_empty(&io->list))
- list_del_init(&io->list);
- ext4_free_io_end(io);
+ if (ret < 0) {
+ mutex_unlock(&inode->i_mutex);
+ return;
}
+
+ spin_lock_irqsave(&ei->i_completed_io_lock, flags);
+ if (!list_empty(&io->list))
+ list_del_init(&io->list);
+ spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
mutex_unlock(&inode->i_mutex);
+ ext4_free_io_end(io);
}
/*
int flush_completed_IO(struct inode *inode)
{
ext4_io_end_t *io;
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ unsigned long flags;
int ret = 0;
int ret2 = 0;
- if (list_empty(&EXT4_I(inode)->i_completed_io_list))
+ if (list_empty(&ei->i_completed_io_list))
return ret;
dump_completed_IO(inode);
- while (!list_empty(&EXT4_I(inode)->i_completed_io_list)){
- io = list_entry(EXT4_I(inode)->i_completed_io_list.next,
+ spin_lock_irqsave(&ei->i_completed_io_lock, flags);
+ while (!list_empty(&ei->i_completed_io_list)){
+ io = list_entry(ei->i_completed_io_list.next,
ext4_io_end_t, list);
/*
* Calling ext4_end_io_nolock() to convert completed
* avoid double converting from both fsync and background work
* queue work.
*/
+ spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
ret = ext4_end_io_nolock(io);
+ spin_lock_irqsave(&ei->i_completed_io_lock, flags);
if (ret < 0)
ret2 = ret;
else
list_del_init(&io->list);
}
+ spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
return (ret2 < 0) ? ret2 : 0;
}
-static ext4_io_end_t *ext4_init_io_end (struct inode *inode)
+static ext4_io_end_t *ext4_init_io_end (struct inode *inode, gfp_t flags)
{
ext4_io_end_t *io = NULL;
- io = kmalloc(sizeof(*io), GFP_NOFS);
+ io = kmalloc(sizeof(*io), flags);
if (io) {
igrab(inode);
io->flag = 0;
io->offset = 0;
io->size = 0;
- io->error = 0;
+ io->page = NULL;
INIT_WORK(&io->work, ext4_end_io_work);
INIT_LIST_HEAD(&io->list);
}
{
ext4_io_end_t *io_end = iocb->private;
struct workqueue_struct *wq;
+ unsigned long flags;
+ struct ext4_inode_info *ei;
/* if not async direct IO or dio with 0 bytes write, just return */
if (!io_end || !size)
io_end->offset = offset;
io_end->size = size;
+ io_end->flag = EXT4_IO_UNWRITTEN;
wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
/* queue the work to convert unwritten extents to written */
queue_work(wq, &io_end->work);
/* Add the io_end to per-inode completed aio dio list*/
- list_add_tail(&io_end->list,
- &EXT4_I(io_end->inode)->i_completed_io_list);
+ ei = EXT4_I(io_end->inode);
+ spin_lock_irqsave(&ei->i_completed_io_lock, flags);
+ list_add_tail(&io_end->list, &ei->i_completed_io_list);
+ spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
iocb->private = NULL;
}
+static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate)
+{
+ ext4_io_end_t *io_end = bh->b_private;
+ struct workqueue_struct *wq;
+ struct inode *inode;
+ unsigned long flags;
+
+ if (!test_clear_buffer_uninit(bh) || !io_end)
+ goto out;
+
+ if (!(io_end->inode->i_sb->s_flags & MS_ACTIVE)) {
+ printk("sb umounted, discard end_io request for inode %lu\n",
+ io_end->inode->i_ino);
+ ext4_free_io_end(io_end);
+ goto out;
+ }
+
+ io_end->flag = EXT4_IO_UNWRITTEN;
+ inode = io_end->inode;
+
+ /* Add the io_end to per-inode completed io list*/
+ spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
+ list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list);
+ spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
+
+ wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq;
+ /* queue the work to convert unwritten extents to written */
+ queue_work(wq, &io_end->work);
+out:
+ bh->b_private = NULL;
+ bh->b_end_io = NULL;
+ clear_buffer_uninit(bh);
+ end_buffer_async_write(bh, uptodate);
+}
+
+static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode)
+{
+ ext4_io_end_t *io_end;
+ struct page *page = bh->b_page;
+ loff_t offset = (sector_t)page->index << PAGE_CACHE_SHIFT;
+ size_t size = bh->b_size;
+
+retry:
+ io_end = ext4_init_io_end(inode, GFP_ATOMIC);
+ if (!io_end) {
+ if (printk_ratelimit())
+ printk(KERN_WARNING "%s: allocation fail\n", __func__);
+ schedule();
+ goto retry;
+ }
+ io_end->offset = offset;
+ io_end->size = size;
+ /*
+ * We need to hold a reference to the page to make sure it
+ * doesn't get evicted before ext4_end_io_work() has a chance
+ * to convert the extent from written to unwritten.
+ */
+ io_end->page = page;
+ get_page(io_end->page);
+
+ bh->b_private = io_end;
+ bh->b_end_io = ext4_end_io_buffer_write;
+ return 0;
+}
+
/*
* For ext4 extent files, ext4 will do direct-io write to holes,
* preallocated extents, and those write extend the file, no need to
iocb->private = NULL;
EXT4_I(inode)->cur_aio_dio = NULL;
if (!is_sync_kiocb(iocb)) {
- iocb->private = ext4_init_io_end(inode);
+ iocb->private = ext4_init_io_end(inode, GFP_NOFS);
if (!iocb->private)
return -ENOMEM;
/*
ei->i_reserved_quota = 0;
#endif
INIT_LIST_HEAD(&ei->i_completed_io_list);
+ spin_lock_init(&ei->i_completed_io_lock);
ei->cur_aio_dio = NULL;
ei->i_sync_tid = 0;
ei->i_datasync_tid = 0;
if (test_opt(sb, NOLOAD))
seq_puts(seq, ",norecovery");
+ if (test_opt(sb, DIOREAD_NOLOCK))
+ seq_puts(seq, ",dioread_nolock");
+
ext4_show_quota_options(seq, sb);
return 0;
Opt_stripe, Opt_delalloc, Opt_nodelalloc,
Opt_block_validity, Opt_noblock_validity,
Opt_inode_readahead_blks, Opt_journal_ioprio,
+ Opt_dioread_nolock, Opt_dioread_lock,
Opt_discard, Opt_nodiscard,
};
{Opt_auto_da_alloc, "auto_da_alloc=%u"},
{Opt_auto_da_alloc, "auto_da_alloc"},
{Opt_noauto_da_alloc, "noauto_da_alloc"},
+ {Opt_dioread_nolock, "dioread_nolock"},
+ {Opt_dioread_lock, "dioread_lock"},
{Opt_discard, "discard"},
{Opt_nodiscard, "nodiscard"},
{Opt_err, NULL},
case Opt_nodiscard:
clear_opt(sbi->s_mount_opt, DISCARD);
break;
+ case Opt_dioread_nolock:
+ set_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
+ break;
+ case Opt_dioread_lock:
+ clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
+ break;
default:
ext4_msg(sb, KERN_ERR,
"Unrecognized mount option \"%s\" "
EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) {
ext4_msg(sb, KERN_ERR, "required journal recovery "
"suppressed and not mounted read-only");
- goto failed_mount4;
+ goto failed_mount_wq;
} else {
clear_opt(sbi->s_mount_opt, DATA_FLAGS);
set_opt(sbi->s_mount_opt, WRITEBACK_DATA);
!jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
JBD2_FEATURE_INCOMPAT_64BIT)) {
ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
- goto failed_mount4;
+ goto failed_mount_wq;
}
if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
(sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
ext4_msg(sb, KERN_ERR, "Journal does not support "
"requested data journaling mode");
- goto failed_mount4;
+ goto failed_mount_wq;
}
default:
break;
set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
no_journal:
-
if (test_opt(sb, NOBH)) {
if (!(test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)) {
ext4_msg(sb, KERN_WARNING, "Ignoring nobh option - "
"its supported only with writeback mode");
clear_opt(sbi->s_mount_opt, NOBH);
}
+ if (test_opt(sb, DIOREAD_NOLOCK)) {
+ ext4_msg(sb, KERN_WARNING, "dioread_nolock option is "
+ "not supported with nobh mode");
+ goto failed_mount_wq;
+ }
}
EXT4_SB(sb)->dio_unwritten_wq = create_workqueue("ext4-dio-unwritten");
if (!EXT4_SB(sb)->dio_unwritten_wq) {
"requested data journaling mode");
clear_opt(sbi->s_mount_opt, DELALLOC);
}
+ if (test_opt(sb, DIOREAD_NOLOCK)) {
+ if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
+ ext4_msg(sb, KERN_WARNING, "Ignoring dioread_nolock "
+ "option - requested data journaling mode");
+ clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
+ }
+ if (sb->s_blocksize < PAGE_SIZE) {
+ ext4_msg(sb, KERN_WARNING, "Ignoring dioread_nolock "
+ "option - block size is too small");
+ clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
+ }
+ }
err = ext4_setup_system_zone(sb);
if (err) {