usb: gadget: f_mtp: Avoid race between mtp_read and mtp_function_disable
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / fs / ext4 / file.c
1 /*
2 * linux/fs/ext4/file.c
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * from
10 *
11 * linux/fs/minix/file.c
12 *
13 * Copyright (C) 1991, 1992 Linus Torvalds
14 *
15 * ext4 fs regular file handling primitives
16 *
17 * 64-bit file support on 64-bit platforms by Jakub Jelinek
18 * (jj@sunsite.ms.mff.cuni.cz)
19 */
20
21 #include <linux/time.h>
22 #include <linux/fs.h>
23 #include <linux/mount.h>
24 #include <linux/path.h>
25 #include <linux/dax.h>
26 #include <linux/quotaops.h>
27 #include <linux/pagevec.h>
28 #include <linux/uio.h>
29 #include "ext4.h"
30 #include "ext4_jbd2.h"
31 #include "xattr.h"
32 #include "acl.h"
33
34 /*
35 * Called when an inode is released. Note that this is different
36 * from ext4_file_open: open gets called at every open, but release
37 * gets called only when /all/ the files are closed.
38 */
39 static int ext4_release_file(struct inode *inode, struct file *filp)
40 {
41 if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
42 ext4_alloc_da_blocks(inode);
43 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
44 }
45 /* if we are the last writer on the inode, drop the block reservation */
46 if ((filp->f_mode & FMODE_WRITE) &&
47 (atomic_read(&inode->i_writecount) == 1) &&
48 !EXT4_I(inode)->i_reserved_data_blocks)
49 {
50 down_write(&EXT4_I(inode)->i_data_sem);
51 ext4_discard_preallocations(inode);
52 up_write(&EXT4_I(inode)->i_data_sem);
53 }
54 if (is_dx(inode) && filp->private_data)
55 ext4_htree_free_dir_info(filp->private_data);
56
57 return 0;
58 }
59
60 static void ext4_unwritten_wait(struct inode *inode)
61 {
62 wait_queue_head_t *wq = ext4_ioend_wq(inode);
63
64 wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
65 }
66
67 /*
68 * This tests whether the IO in question is block-aligned or not.
69 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
70 * are converted to written only after the IO is complete. Until they are
71 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
72 * it needs to zero out portions of the start and/or end block. If 2 AIO
73 * threads are at work on the same unwritten block, they must be synchronized
74 * or one thread will zero the other's data, causing corruption.
75 */
76 static int
77 ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
78 {
79 struct super_block *sb = inode->i_sb;
80 int blockmask = sb->s_blocksize - 1;
81
82 if (pos >= i_size_read(inode))
83 return 0;
84
85 if ((pos | iov_iter_alignment(from)) & blockmask)
86 return 1;
87
88 return 0;
89 }
90
91 static ssize_t
92 ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
93 {
94 struct file *file = iocb->ki_filp;
95 struct inode *inode = file_inode(iocb->ki_filp);
96 struct mutex *aio_mutex = NULL;
97 struct blk_plug plug;
98 int o_direct = iocb->ki_flags & IOCB_DIRECT;
99 int overwrite = 0;
100 ssize_t ret;
101
102 /*
103 * Unaligned direct AIO must be serialized; see comment above
104 * In the case of O_APPEND, assume that we must always serialize
105 */
106 if (o_direct &&
107 ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
108 !is_sync_kiocb(iocb) &&
109 (iocb->ki_flags & IOCB_APPEND ||
110 ext4_unaligned_aio(inode, from, iocb->ki_pos))) {
111 aio_mutex = ext4_aio_mutex(inode);
112 mutex_lock(aio_mutex);
113 ext4_unwritten_wait(inode);
114 }
115
116 mutex_lock(&inode->i_mutex);
117 ret = generic_write_checks(iocb, from);
118 if (ret <= 0)
119 goto out;
120
121 /*
122 * If we have encountered a bitmap-format file, the size limit
123 * is smaller than s_maxbytes, which is for extent-mapped files.
124 */
125 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
126 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
127
128 if (iocb->ki_pos >= sbi->s_bitmap_maxbytes) {
129 ret = -EFBIG;
130 goto out;
131 }
132 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
133 }
134
135 iocb->private = &overwrite;
136 if (o_direct) {
137 size_t length = iov_iter_count(from);
138 loff_t pos = iocb->ki_pos;
139 blk_start_plug(&plug);
140
141 /* check whether we do a DIO overwrite or not */
142 if (ext4_should_dioread_nolock(inode) && !aio_mutex &&
143 !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) {
144 struct ext4_map_blocks map;
145 unsigned int blkbits = inode->i_blkbits;
146 int err, len;
147
148 map.m_lblk = pos >> blkbits;
149 map.m_len = (EXT4_BLOCK_ALIGN(pos + length, blkbits) >> blkbits)
150 - map.m_lblk;
151 len = map.m_len;
152
153 err = ext4_map_blocks(NULL, inode, &map, 0);
154 /*
155 * 'err==len' means that all of blocks has
156 * been preallocated no matter they are
157 * initialized or not. For excluding
158 * unwritten extents, we need to check
159 * m_flags. There are two conditions that
160 * indicate for initialized extents. 1) If we
161 * hit extent cache, EXT4_MAP_MAPPED flag is
162 * returned; 2) If we do a real lookup,
163 * non-flags are returned. So we should check
164 * these two conditions.
165 */
166 if (err == len && (map.m_flags & EXT4_MAP_MAPPED))
167 overwrite = 1;
168 }
169 }
170
171 ret = __generic_file_write_iter(iocb, from);
172 mutex_unlock(&inode->i_mutex);
173
174 if (ret > 0) {
175 ssize_t err;
176
177 err = generic_write_sync(file, iocb->ki_pos - ret, ret);
178 if (err < 0)
179 ret = err;
180 }
181 if (o_direct)
182 blk_finish_plug(&plug);
183
184 if (aio_mutex)
185 mutex_unlock(aio_mutex);
186 return ret;
187
188 out:
189 mutex_unlock(&inode->i_mutex);
190 if (aio_mutex)
191 mutex_unlock(aio_mutex);
192 return ret;
193 }
194
195 #ifdef CONFIG_FS_DAX
196 static void ext4_end_io_unwritten(struct buffer_head *bh, int uptodate)
197 {
198 struct inode *inode = bh->b_assoc_map->host;
199 /* XXX: breaks on 32-bit > 16TB. Is that even supported? */
200 loff_t offset = (loff_t)(uintptr_t)bh->b_private << inode->i_blkbits;
201 int err;
202 if (!uptodate)
203 return;
204 WARN_ON(!buffer_unwritten(bh));
205 err = ext4_convert_unwritten_extents(NULL, inode, offset, bh->b_size);
206 }
207
208 static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
209 {
210 int result;
211 handle_t *handle = NULL;
212 struct inode *inode = file_inode(vma->vm_file);
213 struct super_block *sb = inode->i_sb;
214 bool write = vmf->flags & FAULT_FLAG_WRITE;
215
216 if (write) {
217 sb_start_pagefault(sb);
218 file_update_time(vma->vm_file);
219 down_read(&EXT4_I(inode)->i_mmap_sem);
220 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
221 EXT4_DATA_TRANS_BLOCKS(sb));
222 } else
223 down_read(&EXT4_I(inode)->i_mmap_sem);
224
225 if (IS_ERR(handle))
226 result = VM_FAULT_SIGBUS;
227 else
228 result = __dax_fault(vma, vmf, ext4_get_block_dax,
229 ext4_end_io_unwritten);
230
231 if (write) {
232 if (!IS_ERR(handle))
233 ext4_journal_stop(handle);
234 up_read(&EXT4_I(inode)->i_mmap_sem);
235 sb_end_pagefault(sb);
236 } else
237 up_read(&EXT4_I(inode)->i_mmap_sem);
238
239 return result;
240 }
241
242 static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
243 pmd_t *pmd, unsigned int flags)
244 {
245 int result;
246 handle_t *handle = NULL;
247 struct inode *inode = file_inode(vma->vm_file);
248 struct super_block *sb = inode->i_sb;
249 bool write = flags & FAULT_FLAG_WRITE;
250
251 if (write) {
252 sb_start_pagefault(sb);
253 file_update_time(vma->vm_file);
254 down_read(&EXT4_I(inode)->i_mmap_sem);
255 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
256 ext4_chunk_trans_blocks(inode,
257 PMD_SIZE / PAGE_SIZE));
258 } else
259 down_read(&EXT4_I(inode)->i_mmap_sem);
260
261 if (IS_ERR(handle))
262 result = VM_FAULT_SIGBUS;
263 else
264 result = __dax_pmd_fault(vma, addr, pmd, flags,
265 ext4_get_block_dax, ext4_end_io_unwritten);
266
267 if (write) {
268 if (!IS_ERR(handle))
269 ext4_journal_stop(handle);
270 up_read(&EXT4_I(inode)->i_mmap_sem);
271 sb_end_pagefault(sb);
272 } else
273 up_read(&EXT4_I(inode)->i_mmap_sem);
274
275 return result;
276 }
277
278 static int ext4_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
279 {
280 int err;
281 struct inode *inode = file_inode(vma->vm_file);
282
283 sb_start_pagefault(inode->i_sb);
284 file_update_time(vma->vm_file);
285 down_read(&EXT4_I(inode)->i_mmap_sem);
286 err = __dax_mkwrite(vma, vmf, ext4_get_block_dax,
287 ext4_end_io_unwritten);
288 up_read(&EXT4_I(inode)->i_mmap_sem);
289 sb_end_pagefault(inode->i_sb);
290
291 return err;
292 }
293
294 /*
295 * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_mkwrite()
296 * handler we check for races agaist truncate. Note that since we cycle through
297 * i_mmap_sem, we are sure that also any hole punching that began before we
298 * were called is finished by now and so if it included part of the file we
299 * are working on, our pte will get unmapped and the check for pte_same() in
300 * wp_pfn_shared() fails. Thus fault gets retried and things work out as
301 * desired.
302 */
303 static int ext4_dax_pfn_mkwrite(struct vm_area_struct *vma,
304 struct vm_fault *vmf)
305 {
306 struct inode *inode = file_inode(vma->vm_file);
307 struct super_block *sb = inode->i_sb;
308 int ret = VM_FAULT_NOPAGE;
309 loff_t size;
310
311 sb_start_pagefault(sb);
312 file_update_time(vma->vm_file);
313 down_read(&EXT4_I(inode)->i_mmap_sem);
314 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
315 if (vmf->pgoff >= size)
316 ret = VM_FAULT_SIGBUS;
317 up_read(&EXT4_I(inode)->i_mmap_sem);
318 sb_end_pagefault(sb);
319
320 return ret;
321 }
322
323 static const struct vm_operations_struct ext4_dax_vm_ops = {
324 .fault = ext4_dax_fault,
325 .pmd_fault = ext4_dax_pmd_fault,
326 .page_mkwrite = ext4_dax_mkwrite,
327 .pfn_mkwrite = ext4_dax_pfn_mkwrite,
328 };
329 #else
330 #define ext4_dax_vm_ops ext4_file_vm_ops
331 #endif
332
333 static const struct vm_operations_struct ext4_file_vm_ops = {
334 .fault = ext4_filemap_fault,
335 .map_pages = filemap_map_pages,
336 .page_mkwrite = ext4_page_mkwrite,
337 };
338
339 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
340 {
341 struct inode *inode = file->f_mapping->host;
342
343 if (ext4_encrypted_inode(inode)) {
344 int err = ext4_get_encryption_info(inode);
345 if (err)
346 return 0;
347 if (ext4_encryption_info(inode) == NULL)
348 return -ENOKEY;
349 }
350 file_accessed(file);
351 if (IS_DAX(file_inode(file))) {
352 vma->vm_ops = &ext4_dax_vm_ops;
353 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
354 } else {
355 vma->vm_ops = &ext4_file_vm_ops;
356 }
357 return 0;
358 }
359
360 static int ext4_file_open(struct inode * inode, struct file * filp)
361 {
362 struct super_block *sb = inode->i_sb;
363 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
364 struct vfsmount *mnt = filp->f_path.mnt;
365 struct dentry *dir;
366 struct path path;
367 char buf[64], *cp;
368 int ret;
369
370 if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
371 !(sb->s_flags & MS_RDONLY))) {
372 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
373 /*
374 * Sample where the filesystem has been mounted and
375 * store it in the superblock for sysadmin convenience
376 * when trying to sort through large numbers of block
377 * devices or filesystem images.
378 */
379 memset(buf, 0, sizeof(buf));
380 path.mnt = mnt;
381 path.dentry = mnt->mnt_root;
382 cp = d_path(&path, buf, sizeof(buf));
383 if (!IS_ERR(cp)) {
384 handle_t *handle;
385 int err;
386
387 handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
388 if (IS_ERR(handle))
389 return PTR_ERR(handle);
390 BUFFER_TRACE(sbi->s_sbh, "get_write_access");
391 err = ext4_journal_get_write_access(handle, sbi->s_sbh);
392 if (err) {
393 ext4_journal_stop(handle);
394 return err;
395 }
396 strlcpy(sbi->s_es->s_last_mounted, cp,
397 sizeof(sbi->s_es->s_last_mounted));
398 ext4_handle_dirty_super(handle, sb);
399 ext4_journal_stop(handle);
400 }
401 }
402 if (ext4_encrypted_inode(inode)) {
403 ret = ext4_get_encryption_info(inode);
404 if (ret)
405 return -EACCES;
406 if (ext4_encryption_info(inode) == NULL)
407 return -ENOKEY;
408 }
409
410 dir = dget_parent(filp->f_path.dentry);
411 if (ext4_encrypted_inode(d_inode(dir)) &&
412 !ext4_is_child_context_consistent_with_parent(d_inode(dir), inode)) {
413 ext4_warning(inode->i_sb,
414 "Inconsistent encryption contexts: %lu/%lu\n",
415 (unsigned long) d_inode(dir)->i_ino,
416 (unsigned long) inode->i_ino);
417 dput(dir);
418 return -EPERM;
419 }
420 dput(dir);
421 /*
422 * Set up the jbd2_inode if we are opening the inode for
423 * writing and the journal is present
424 */
425 if (filp->f_mode & FMODE_WRITE) {
426 ret = ext4_inode_attach_jinode(inode);
427 if (ret < 0)
428 return ret;
429 }
430 return dquot_file_open(inode, filp);
431 }
432
433 /*
434 * Here we use ext4_map_blocks() to get a block mapping for a extent-based
435 * file rather than ext4_ext_walk_space() because we can introduce
436 * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
437 * function. When extent status tree has been fully implemented, it will
438 * track all extent status for a file and we can directly use it to
439 * retrieve the offset for SEEK_DATA/SEEK_HOLE.
440 */
441
442 /*
443 * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
444 * lookup page cache to check whether or not there has some data between
445 * [startoff, endoff] because, if this range contains an unwritten extent,
446 * we determine this extent as a data or a hole according to whether the
447 * page cache has data or not.
448 */
449 static int ext4_find_unwritten_pgoff(struct inode *inode,
450 int whence,
451 struct ext4_map_blocks *map,
452 loff_t *offset)
453 {
454 struct pagevec pvec;
455 unsigned int blkbits;
456 pgoff_t index;
457 pgoff_t end;
458 loff_t endoff;
459 loff_t startoff;
460 loff_t lastoff;
461 int found = 0;
462
463 blkbits = inode->i_sb->s_blocksize_bits;
464 startoff = *offset;
465 lastoff = startoff;
466 endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits;
467
468 index = startoff >> PAGE_CACHE_SHIFT;
469 end = endoff >> PAGE_CACHE_SHIFT;
470
471 pagevec_init(&pvec, 0);
472 do {
473 int i, num;
474 unsigned long nr_pages;
475
476 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
477 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
478 (pgoff_t)num);
479 if (nr_pages == 0)
480 break;
481
482 for (i = 0; i < nr_pages; i++) {
483 struct page *page = pvec.pages[i];
484 struct buffer_head *bh, *head;
485
486 /*
487 * If current offset is smaller than the page offset,
488 * there is a hole at this offset.
489 */
490 if (whence == SEEK_HOLE && lastoff < endoff &&
491 lastoff < page_offset(pvec.pages[i])) {
492 found = 1;
493 *offset = lastoff;
494 goto out;
495 }
496
497 if (page->index > end)
498 goto out;
499
500 lock_page(page);
501
502 if (unlikely(page->mapping != inode->i_mapping)) {
503 unlock_page(page);
504 continue;
505 }
506
507 if (!page_has_buffers(page)) {
508 unlock_page(page);
509 continue;
510 }
511
512 if (page_has_buffers(page)) {
513 lastoff = page_offset(page);
514 bh = head = page_buffers(page);
515 do {
516 if (lastoff + bh->b_size <= startoff)
517 goto next;
518 if (buffer_uptodate(bh) ||
519 buffer_unwritten(bh)) {
520 if (whence == SEEK_DATA)
521 found = 1;
522 } else {
523 if (whence == SEEK_HOLE)
524 found = 1;
525 }
526 if (found) {
527 *offset = max_t(loff_t,
528 startoff, lastoff);
529 unlock_page(page);
530 goto out;
531 }
532 next:
533 lastoff += bh->b_size;
534 bh = bh->b_this_page;
535 } while (bh != head);
536 }
537
538 lastoff = page_offset(page) + PAGE_SIZE;
539 unlock_page(page);
540 }
541
542 /* The no. of pages is less than our desired, we are done. */
543 if (nr_pages < num)
544 break;
545
546 index = pvec.pages[i - 1]->index + 1;
547 pagevec_release(&pvec);
548 } while (index <= end);
549
550 if (whence == SEEK_HOLE && lastoff < endoff) {
551 found = 1;
552 *offset = lastoff;
553 }
554 out:
555 pagevec_release(&pvec);
556 return found;
557 }
558
559 /*
560 * ext4_seek_data() retrieves the offset for SEEK_DATA.
561 */
562 static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
563 {
564 struct inode *inode = file->f_mapping->host;
565 struct ext4_map_blocks map;
566 struct extent_status es;
567 ext4_lblk_t start, last, end;
568 loff_t dataoff, isize;
569 int blkbits;
570 int ret = 0;
571
572 mutex_lock(&inode->i_mutex);
573
574 isize = i_size_read(inode);
575 if (offset < 0 || offset >= isize) {
576 mutex_unlock(&inode->i_mutex);
577 return -ENXIO;
578 }
579
580 blkbits = inode->i_sb->s_blocksize_bits;
581 start = offset >> blkbits;
582 last = start;
583 end = isize >> blkbits;
584 dataoff = offset;
585
586 do {
587 map.m_lblk = last;
588 map.m_len = end - last + 1;
589 ret = ext4_map_blocks(NULL, inode, &map, 0);
590 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
591 if (last != start)
592 dataoff = (loff_t)last << blkbits;
593 break;
594 }
595
596 /*
597 * If there is a delay extent at this offset,
598 * it will be as a data.
599 */
600 ext4_es_find_delayed_extent_range(inode, last, last, &es);
601 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
602 if (last != start)
603 dataoff = (loff_t)last << blkbits;
604 break;
605 }
606
607 /*
608 * If there is a unwritten extent at this offset,
609 * it will be as a data or a hole according to page
610 * cache that has data or not.
611 */
612 if (map.m_flags & EXT4_MAP_UNWRITTEN) {
613 int unwritten;
614 unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA,
615 &map, &dataoff);
616 if (unwritten)
617 break;
618 }
619
620 last++;
621 dataoff = (loff_t)last << blkbits;
622 } while (last <= end);
623
624 mutex_unlock(&inode->i_mutex);
625
626 if (dataoff > isize)
627 return -ENXIO;
628
629 return vfs_setpos(file, dataoff, maxsize);
630 }
631
632 /*
633 * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
634 */
635 static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
636 {
637 struct inode *inode = file->f_mapping->host;
638 struct ext4_map_blocks map;
639 struct extent_status es;
640 ext4_lblk_t start, last, end;
641 loff_t holeoff, isize;
642 int blkbits;
643 int ret = 0;
644
645 mutex_lock(&inode->i_mutex);
646
647 isize = i_size_read(inode);
648 if (offset < 0 || offset >= isize) {
649 mutex_unlock(&inode->i_mutex);
650 return -ENXIO;
651 }
652
653 blkbits = inode->i_sb->s_blocksize_bits;
654 start = offset >> blkbits;
655 last = start;
656 end = isize >> blkbits;
657 holeoff = offset;
658
659 do {
660 map.m_lblk = last;
661 map.m_len = end - last + 1;
662 ret = ext4_map_blocks(NULL, inode, &map, 0);
663 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
664 last += ret;
665 holeoff = (loff_t)last << blkbits;
666 continue;
667 }
668
669 /*
670 * If there is a delay extent at this offset,
671 * we will skip this extent.
672 */
673 ext4_es_find_delayed_extent_range(inode, last, last, &es);
674 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
675 last = es.es_lblk + es.es_len;
676 holeoff = (loff_t)last << blkbits;
677 continue;
678 }
679
680 /*
681 * If there is a unwritten extent at this offset,
682 * it will be as a data or a hole according to page
683 * cache that has data or not.
684 */
685 if (map.m_flags & EXT4_MAP_UNWRITTEN) {
686 int unwritten;
687 unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
688 &map, &holeoff);
689 if (!unwritten) {
690 last += ret;
691 holeoff = (loff_t)last << blkbits;
692 continue;
693 }
694 }
695
696 /* find a hole */
697 break;
698 } while (last <= end);
699
700 mutex_unlock(&inode->i_mutex);
701
702 if (holeoff > isize)
703 holeoff = isize;
704
705 return vfs_setpos(file, holeoff, maxsize);
706 }
707
708 /*
709 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
710 * by calling generic_file_llseek_size() with the appropriate maxbytes
711 * value for each.
712 */
713 loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
714 {
715 struct inode *inode = file->f_mapping->host;
716 loff_t maxbytes;
717
718 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
719 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
720 else
721 maxbytes = inode->i_sb->s_maxbytes;
722
723 switch (whence) {
724 case SEEK_SET:
725 case SEEK_CUR:
726 case SEEK_END:
727 return generic_file_llseek_size(file, offset, whence,
728 maxbytes, i_size_read(inode));
729 case SEEK_DATA:
730 return ext4_seek_data(file, offset, maxbytes);
731 case SEEK_HOLE:
732 return ext4_seek_hole(file, offset, maxbytes);
733 }
734
735 return -EINVAL;
736 }
737
738 const struct file_operations ext4_file_operations = {
739 .llseek = ext4_llseek,
740 .read_iter = generic_file_read_iter,
741 .write_iter = ext4_file_write_iter,
742 .unlocked_ioctl = ext4_ioctl,
743 #ifdef CONFIG_COMPAT
744 .compat_ioctl = ext4_compat_ioctl,
745 #endif
746 .mmap = ext4_file_mmap,
747 .open = ext4_file_open,
748 .release = ext4_release_file,
749 .fsync = ext4_sync_file,
750 .splice_read = generic_file_splice_read,
751 .splice_write = iter_file_splice_write,
752 .fallocate = ext4_fallocate,
753 };
754
755 const struct inode_operations ext4_file_inode_operations = {
756 .setattr = ext4_setattr,
757 .getattr = ext4_getattr,
758 .setxattr = generic_setxattr,
759 .getxattr = generic_getxattr,
760 .listxattr = ext4_listxattr,
761 .removexattr = generic_removexattr,
762 .get_acl = ext4_get_acl,
763 .set_acl = ext4_set_acl,
764 .fiemap = ext4_fiemap,
765 };
766