#include <linux/security.h>
#include <linux/magic.h>
#include <linux/migrate.h>
+#include <linux/uio.h>
#include <asm/uaccess.h>
}
#endif
-static int
+static size_t
hugetlbfs_read_actor(struct page *page, unsigned long offset,
- char __user *buf, unsigned long count,
- unsigned long size)
+ struct iov_iter *to, unsigned long size)
{
- char *kaddr;
- unsigned long left, copied = 0;
+ size_t copied = 0;
int i, chunksize;
- if (size > count)
- size = count;
-
/* Find which 4k chunk and offset with in that chunk */
i = offset >> PAGE_CACHE_SHIFT;
offset = offset & ~PAGE_CACHE_MASK;
while (size) {
+ size_t n;
chunksize = PAGE_CACHE_SIZE;
if (offset)
chunksize -= offset;
if (chunksize > size)
chunksize = size;
- kaddr = kmap(&page[i]);
- left = __copy_to_user(buf, kaddr + offset, chunksize);
- kunmap(&page[i]);
- if (left) {
- copied += (chunksize - left);
- break;
- }
+ n = copy_page_to_iter(&page[i], offset, chunksize, to);
+ copied += n;
+ if (n != chunksize)
+ return copied;
offset = 0;
size -= chunksize;
- buf += chunksize;
- copied += chunksize;
i++;
}
- return copied ? copied : -EFAULT;
+ return copied;
}
/*
* data. Its *very* similar to do_generic_mapping_read(), we can't use that
* since it has PAGE_CACHE_SIZE assumptions.
*/
-static ssize_t hugetlbfs_read(struct file *filp, char __user *buf,
- size_t len, loff_t *ppos)
+static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
- struct hstate *h = hstate_file(filp);
- struct address_space *mapping = filp->f_mapping;
+ struct file *file = iocb->ki_filp;
+ struct hstate *h = hstate_file(file);
+ struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
- unsigned long index = *ppos >> huge_page_shift(h);
- unsigned long offset = *ppos & ~huge_page_mask(h);
+ unsigned long index = iocb->ki_pos >> huge_page_shift(h);
+ unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
unsigned long end_index;
loff_t isize;
ssize_t retval = 0;
- /* validate length */
- if (len == 0)
- goto out;
-
- for (;;) {
+ while (iov_iter_count(to)) {
struct page *page;
- unsigned long nr, ret;
- int ra;
+ size_t nr, copied;
/* nr is the maximum number of bytes to copy from this page */
nr = huge_page_size(h);
isize = i_size_read(inode);
if (!isize)
- goto out;
+ break;
end_index = (isize - 1) >> huge_page_shift(h);
- if (index >= end_index) {
- if (index > end_index)
- goto out;
+ if (index > end_index)
+ break;
+ if (index == end_index) {
nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
if (nr <= offset)
- goto out;
+ break;
}
nr = nr - offset;
* We have a HOLE, zero out the user-buffer for the
* length of the hole or request.
*/
- ret = len < nr ? len : nr;
- if (clear_user(buf, ret))
- ra = -EFAULT;
- else
- ra = 0;
+ copied = iov_iter_zero(nr, to);
} else {
unlock_page(page);
/*
* We have the page, copy it to user space buffer.
*/
- ra = hugetlbfs_read_actor(page, offset, buf, len, nr);
- ret = ra;
+ copied = hugetlbfs_read_actor(page, offset, to, nr);
page_cache_release(page);
}
- if (ra < 0) {
- if (retval == 0)
- retval = ra;
- goto out;
+ offset += copied;
+ retval += copied;
+ if (copied != nr && iov_iter_count(to)) {
+ if (!retval)
+ retval = -EFAULT;
+ break;
}
-
- offset += ret;
- retval += ret;
- len -= ret;
index += offset >> huge_page_shift(h);
offset &= ~huge_page_mask(h);
-
- /* short read or no more work */
- if ((ret != nr) || (len == 0))
- break;
}
-out:
- *ppos = ((loff_t)index << huge_page_shift(h)) + offset;
+ iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
return retval;
}
}
const struct file_operations hugetlbfs_file_operations = {
- .read = hugetlbfs_read,
+ .read = new_sync_read,
+ .read_iter = hugetlbfs_read_iter,
.mmap = hugetlbfs_file_mmap,
.fsync = noop_fsync,
.get_unmapped_area = hugetlb_get_unmapped_area,