goto out;
}
- iov_iter_init(&i, iov, nr_segs, count, 0);
+ iov_iter_init(&i, WRITE, iov, nr_segs, count);
err = file_remove_suid(file);
if (err) {
CEPH_OSD_FLAG_ONDISK |
CEPH_OSD_FLAG_WRITE;
- iov_iter_init(&i, iov, nr_segs, count, 0);
+ iov_iter_init(&i, WRITE, iov, nr_segs, count);
while (iov_iter_count(&i) > 0) {
void __user *data = i.iov->iov_base + i.iov_offset;
CEPH_OSD_FLAG_WRITE |
CEPH_OSD_FLAG_ACK;
- iov_iter_init(&i, iov, nr_segs, count, 0);
+ iov_iter_init(&i, WRITE, iov, nr_segs, count);
while ((len = iov_iter_count(&i)) > 0) {
size_t left;
int checkeof = 0, read = 0;
struct iov_iter i;
- iov_iter_init(&i, iov, nr_segs, len, 0);
+ iov_iter_init(&i, READ, iov, nr_segs, len);
again:
dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
* are pending vmtruncate. So write and vmtruncate
* can not run at the same time
*/
- iov_iter_init(&from, iov, nr_segs, count, 0);
+ iov_iter_init(&from, WRITE, iov, nr_segs, count);
written = generic_perform_write(file, &from, pos);
if (likely(written >= 0))
iocb->ki_pos = pos + written;
else
pid = current->tgid;
- iov_iter_init(&it, iov, nr_segs, len, 0);
+ iov_iter_init(&it, WRITE, iov, nr_segs, len);
do {
size_t save_len;
if (!len)
return 0;
- iov_iter_init(&to, iov, nr_segs, len, 0);
+ iov_iter_init(&to, READ, iov, nr_segs, len);
INIT_LIST_HEAD(&rdata_list);
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
if (err)
goto out;
- iov_iter_init(&i, iov, nr_segs, count, 0);
+ iov_iter_init(&i, WRITE, iov, nr_segs, count);
if (count == 0)
goto out;
struct fuse_req *req;
struct iov_iter ii;
- iov_iter_init(&ii, iov, nr_segs, count, 0);
+ iov_iter_init(&ii, write ? WRITE : READ, iov, nr_segs, count);
if (io->async)
req = fuse_get_req_for_background(fc, fuse_iter_npages(&ii));
if (!bytes)
return 0;
- iov_iter_init(&ii, iov, nr_segs, bytes, 0);
+ iov_iter_init(&ii, to_user ? READ : WRITE, iov, nr_segs, bytes);
while (iov_iter_count(&ii)) {
struct page *page = pages[page_idx++];
ssize_t result;
struct iov_iter to;
- iov_iter_init(&to, iov, nr_segs, count, 0);
+ iov_iter_init(&to, READ, iov, nr_segs, count);
if (iocb->ki_filp->f_flags & O_DIRECT)
return nfs_file_direct_read(iocb, &to, pos, true);
ssize_t result;
size_t count = iov_length(iov, nr_segs);
struct iov_iter from;
- iov_iter_init(&from, iov, nr_segs, count, 0);
+ iov_iter_init(&from, WRITE, iov, nr_segs, count);
result = nfs_key_timeout_notify(file, inode);
if (result)
if (ret)
goto out_dio;
- iov_iter_init(&from, iov, nr_segs, count, 0);
+ iov_iter_init(&from, WRITE, iov, nr_segs, count);
if (direct_io) {
written = generic_file_direct_write(iocb, &from, *ppos,
count, ocount);
if (unlikely(total_len == 0))
return 0;
- iov_iter_init(&iter, iov, nr_segs, total_len, 0);
+ iov_iter_init(&iter, READ, iov, nr_segs, total_len);
do_wakeup = 0;
ret = 0;
if (ret <= 0)
return ret;
- iov_iter_init(&iter, iov, nr_segs, count, 0);
+ iov_iter_init(&iter, READ, iov, nr_segs, count);
sd.len = 0;
sd.total_len = count;
}
trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);
- iov_iter_init(&from, iovp, nr_segs, count, 0);
+ iov_iter_init(&from, WRITE, iovp, nr_segs, count);
ret = generic_file_direct_write(iocb, &from, pos, count, ocount);
out:
if (ret)
goto out;
- iov_iter_init(&from, iovp, nr_segs, count, 0);
+ iov_iter_init(&from, WRITE, iovp, nr_segs, count);
/* We can write back this queue in page reclaim */
current->backing_dev_info = mapping->backing_dev_info;
};
struct iov_iter {
+ int type;
const struct iovec *iov;
unsigned long nr_segs;
size_t iov_offset;
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i);
unsigned long iov_iter_alignment(const struct iov_iter *i);
-
-static inline void iov_iter_init(struct iov_iter *i,
- const struct iovec *iov, unsigned long nr_segs,
- size_t count, size_t written)
-{
- i->iov = iov;
- i->nr_segs = nr_segs;
- i->iov_offset = 0;
- i->count = count + written;
-
- iov_iter_advance(i, written);
-}
+void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov,
+ unsigned long nr_segs, size_t count);
static inline size_t iov_iter_count(struct iov_iter *i)
{
size_t count = iov_length(iov, nr_segs);
struct iov_iter i;
- iov_iter_init(&i, iov, nr_segs, count, 0);
+ iov_iter_init(&i, READ, iov, nr_segs, count);
return generic_file_read_iter(iocb, &i);
}
EXPORT_SYMBOL(generic_file_aio_read);
if (err)
goto out;
- iov_iter_init(&from, iov, nr_segs, count, 0);
+ iov_iter_init(&from, WRITE, iov, nr_segs, count);
/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
if (unlikely(file->f_flags & O_DIRECT)) {
return res;
}
EXPORT_SYMBOL(iov_iter_alignment);
+
+void iov_iter_init(struct iov_iter *i, int direction,
+ const struct iovec *iov, unsigned long nr_segs,
+ size_t count)
+{
+ /* It will get better. Eventually... */
+ if (segment_eq(get_fs(), KERNEL_DS))
+ direction |= REQ_KERNEL;
+ i->type = direction;
+ i->iov = iov;
+ i->nr_segs = nr_segs;
+ i->iov_offset = 0;
+ i->count = count;
+}
+EXPORT_SYMBOL(iov_iter_init);
init_sync_kiocb(&kiocb, swap_file);
kiocb.ki_pos = page_file_offset(page);
kiocb.ki_nbytes = PAGE_SIZE;
- iov_iter_init(&from, &iov, 1, PAGE_SIZE, 0);
+ iov_iter_init(&from, KERNEL_WRITE, &iov, 1, PAGE_SIZE);
set_page_writeback(page);
unlock_page(page);
if (rc <= 0)
goto free_iovecs;
- iov_iter_init(&iter, iov_l, liovcnt, rc, 0);
+ iov_iter_init(&iter, vm_write ? WRITE : READ, iov_l, liovcnt, rc);
rc = rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, UIO_FASTIOV,
iovstack_r, &iov_r);
&iov_l);
if (rc <= 0)
goto free_iovecs;
- iov_iter_init(&iter, iov_l, liovcnt, rc, 0);
+ iov_iter_init(&iter, vm_write ? WRITE : READ, iov_l, liovcnt, rc);
rc = compat_rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt,
UIO_FASTIOV, iovstack_r,
&iov_r);
loff_t *ppos = &iocb->ki_pos;
struct iov_iter iter;
- iov_iter_init(&iter, iov, nr_segs, count, 0);
+ iov_iter_init(&iter, READ, iov, nr_segs, count);
/*
* Might this read be for a stacking filesystem? Then when reading