disable some mediatekl custom warnings
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / xfs / xfs_file.c
1 /*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_log.h"
21 #include "xfs_sb.h"
22 #include "xfs_ag.h"
23 #include "xfs_trans.h"
24 #include "xfs_mount.h"
25 #include "xfs_bmap_btree.h"
26 #include "xfs_alloc.h"
27 #include "xfs_dinode.h"
28 #include "xfs_inode.h"
29 #include "xfs_inode_item.h"
30 #include "xfs_bmap.h"
31 #include "xfs_error.h"
32 #include "xfs_vnodeops.h"
33 #include "xfs_da_btree.h"
34 #include "xfs_dir2_format.h"
35 #include "xfs_dir2_priv.h"
36 #include "xfs_ioctl.h"
37 #include "xfs_trace.h"
38
39 #include <linux/aio.h>
40 #include <linux/dcache.h>
41 #include <linux/falloc.h>
42 #include <linux/pagevec.h>
43
44 static const struct vm_operations_struct xfs_file_vm_ops;
45
46 /*
47 * Locking primitives for read and write IO paths to ensure we consistently use
48 * and order the inode->i_mutex, ip->i_lock and ip->i_iolock.
49 */
50 static inline void
51 xfs_rw_ilock(
52 struct xfs_inode *ip,
53 int type)
54 {
55 if (type & XFS_IOLOCK_EXCL)
56 mutex_lock(&VFS_I(ip)->i_mutex);
57 xfs_ilock(ip, type);
58 }
59
60 static inline void
61 xfs_rw_iunlock(
62 struct xfs_inode *ip,
63 int type)
64 {
65 xfs_iunlock(ip, type);
66 if (type & XFS_IOLOCK_EXCL)
67 mutex_unlock(&VFS_I(ip)->i_mutex);
68 }
69
70 static inline void
71 xfs_rw_ilock_demote(
72 struct xfs_inode *ip,
73 int type)
74 {
75 xfs_ilock_demote(ip, type);
76 if (type & XFS_IOLOCK_EXCL)
77 mutex_unlock(&VFS_I(ip)->i_mutex);
78 }
79
80 /*
81 * xfs_iozero
82 *
83 * xfs_iozero clears the specified range of buffer supplied,
84 * and marks all the affected blocks as valid and modified. If
85 * an affected block is not allocated, it will be allocated. If
86 * an affected block is not completely overwritten, and is not
87 * valid before the operation, it will be read from disk before
88 * being partially zeroed.
89 */
90 int
91 xfs_iozero(
92 struct xfs_inode *ip, /* inode */
93 loff_t pos, /* offset in file */
94 size_t count) /* size of data to zero */
95 {
96 struct page *page;
97 struct address_space *mapping;
98 int status;
99
100 mapping = VFS_I(ip)->i_mapping;
101 do {
102 unsigned offset, bytes;
103 void *fsdata;
104
105 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
106 bytes = PAGE_CACHE_SIZE - offset;
107 if (bytes > count)
108 bytes = count;
109
110 status = pagecache_write_begin(NULL, mapping, pos, bytes,
111 AOP_FLAG_UNINTERRUPTIBLE,
112 &page, &fsdata);
113 if (status)
114 break;
115
116 zero_user(page, offset, bytes);
117
118 status = pagecache_write_end(NULL, mapping, pos, bytes, bytes,
119 page, fsdata);
120 WARN_ON(status <= 0); /* can't return less than zero! */
121 pos += bytes;
122 count -= bytes;
123 status = 0;
124 } while (count);
125
126 return (-status);
127 }
128
129 /*
130 * Fsync operations on directories are much simpler than on regular files,
131 * as there is no file data to flush, and thus also no need for explicit
132 * cache flush operations, and there are no non-transaction metadata updates
133 * on directories either.
134 */
135 STATIC int
136 xfs_dir_fsync(
137 struct file *file,
138 loff_t start,
139 loff_t end,
140 int datasync)
141 {
142 struct xfs_inode *ip = XFS_I(file->f_mapping->host);
143 struct xfs_mount *mp = ip->i_mount;
144 xfs_lsn_t lsn = 0;
145
146 trace_xfs_dir_fsync(ip);
147
148 xfs_ilock(ip, XFS_ILOCK_SHARED);
149 if (xfs_ipincount(ip))
150 lsn = ip->i_itemp->ili_last_lsn;
151 xfs_iunlock(ip, XFS_ILOCK_SHARED);
152
153 if (!lsn)
154 return 0;
155 return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
156 }
157
158 STATIC int
159 xfs_file_fsync(
160 struct file *file,
161 loff_t start,
162 loff_t end,
163 int datasync)
164 {
165 struct inode *inode = file->f_mapping->host;
166 struct xfs_inode *ip = XFS_I(inode);
167 struct xfs_mount *mp = ip->i_mount;
168 int error = 0;
169 int log_flushed = 0;
170 xfs_lsn_t lsn = 0;
171
172 trace_xfs_file_fsync(ip);
173
174 error = filemap_write_and_wait_range(inode->i_mapping, start, end);
175 if (error)
176 return error;
177
178 if (XFS_FORCED_SHUTDOWN(mp))
179 return -XFS_ERROR(EIO);
180
181 xfs_iflags_clear(ip, XFS_ITRUNCATED);
182
183 if (mp->m_flags & XFS_MOUNT_BARRIER) {
184 /*
185 * If we have an RT and/or log subvolume we need to make sure
186 * to flush the write cache the device used for file data
187 * first. This is to ensure newly written file data make
188 * it to disk before logging the new inode size in case of
189 * an extending write.
190 */
191 if (XFS_IS_REALTIME_INODE(ip))
192 xfs_blkdev_issue_flush(mp->m_rtdev_targp);
193 else if (mp->m_logdev_targp != mp->m_ddev_targp)
194 xfs_blkdev_issue_flush(mp->m_ddev_targp);
195 }
196
197 /*
198 * All metadata updates are logged, which means that we just have
199 * to flush the log up to the latest LSN that touched the inode.
200 */
201 xfs_ilock(ip, XFS_ILOCK_SHARED);
202 if (xfs_ipincount(ip)) {
203 if (!datasync ||
204 (ip->i_itemp->ili_fields & ~XFS_ILOG_TIMESTAMP))
205 lsn = ip->i_itemp->ili_last_lsn;
206 }
207 xfs_iunlock(ip, XFS_ILOCK_SHARED);
208
209 if (lsn)
210 error = _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed);
211
212 /*
213 * If we only have a single device, and the log force about was
214 * a no-op we might have to flush the data device cache here.
215 * This can only happen for fdatasync/O_DSYNC if we were overwriting
216 * an already allocated file and thus do not have any metadata to
217 * commit.
218 */
219 if ((mp->m_flags & XFS_MOUNT_BARRIER) &&
220 mp->m_logdev_targp == mp->m_ddev_targp &&
221 !XFS_IS_REALTIME_INODE(ip) &&
222 !log_flushed)
223 xfs_blkdev_issue_flush(mp->m_ddev_targp);
224
225 return -error;
226 }
227
228 STATIC ssize_t
229 xfs_file_aio_read(
230 struct kiocb *iocb,
231 const struct iovec *iovp,
232 unsigned long nr_segs,
233 loff_t pos)
234 {
235 struct file *file = iocb->ki_filp;
236 struct inode *inode = file->f_mapping->host;
237 struct xfs_inode *ip = XFS_I(inode);
238 struct xfs_mount *mp = ip->i_mount;
239 size_t size = 0;
240 ssize_t ret = 0;
241 int ioflags = 0;
242 xfs_fsize_t n;
243
244 XFS_STATS_INC(xs_read_calls);
245
246 BUG_ON(iocb->ki_pos != pos);
247
248 if (unlikely(file->f_flags & O_DIRECT))
249 ioflags |= IO_ISDIRECT;
250 if (file->f_mode & FMODE_NOCMTIME)
251 ioflags |= IO_INVIS;
252
253 ret = generic_segment_checks(iovp, &nr_segs, &size, VERIFY_WRITE);
254 if (ret < 0)
255 return ret;
256
257 if (unlikely(ioflags & IO_ISDIRECT)) {
258 xfs_buftarg_t *target =
259 XFS_IS_REALTIME_INODE(ip) ?
260 mp->m_rtdev_targp : mp->m_ddev_targp;
261 if ((pos & target->bt_smask) || (size & target->bt_smask)) {
262 if (pos == i_size_read(inode))
263 return 0;
264 return -XFS_ERROR(EINVAL);
265 }
266 }
267
268 n = mp->m_super->s_maxbytes - pos;
269 if (n <= 0 || size == 0)
270 return 0;
271
272 if (n < size)
273 size = n;
274
275 if (XFS_FORCED_SHUTDOWN(mp))
276 return -EIO;
277
278 /*
279 * Locking is a bit tricky here. If we take an exclusive lock
280 * for direct IO, we effectively serialise all new concurrent
281 * read IO to this file and block it behind IO that is currently in
282 * progress because IO in progress holds the IO lock shared. We only
283 * need to hold the lock exclusive to blow away the page cache, so
284 * only take lock exclusively if the page cache needs invalidation.
285 * This allows the normal direct IO case of no page cache pages to
286 * proceeed concurrently without serialisation.
287 */
288 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
289 if ((ioflags & IO_ISDIRECT) && inode->i_mapping->nrpages) {
290 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
291 xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);
292
293 if (inode->i_mapping->nrpages) {
294 ret = -filemap_write_and_wait_range(
295 VFS_I(ip)->i_mapping,
296 pos, -1);
297 if (ret) {
298 xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
299 return ret;
300 }
301 truncate_pagecache_range(VFS_I(ip), pos, -1);
302 }
303 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
304 }
305
306 trace_xfs_file_read(ip, size, pos, ioflags);
307
308 ret = generic_file_aio_read(iocb, iovp, nr_segs, pos);
309 if (ret > 0)
310 XFS_STATS_ADD(xs_read_bytes, ret);
311
312 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
313 return ret;
314 }
315
316 STATIC ssize_t
317 xfs_file_splice_read(
318 struct file *infilp,
319 loff_t *ppos,
320 struct pipe_inode_info *pipe,
321 size_t count,
322 unsigned int flags)
323 {
324 struct xfs_inode *ip = XFS_I(infilp->f_mapping->host);
325 int ioflags = 0;
326 ssize_t ret;
327
328 XFS_STATS_INC(xs_read_calls);
329
330 if (infilp->f_mode & FMODE_NOCMTIME)
331 ioflags |= IO_INVIS;
332
333 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
334 return -EIO;
335
336 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
337
338 trace_xfs_file_splice_read(ip, count, *ppos, ioflags);
339
340 ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
341 if (ret > 0)
342 XFS_STATS_ADD(xs_read_bytes, ret);
343
344 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
345 return ret;
346 }
347
348 /*
349 * xfs_file_splice_write() does not use xfs_rw_ilock() because
350 * generic_file_splice_write() takes the i_mutex itself. This, in theory,
351 * couuld cause lock inversions between the aio_write path and the splice path
352 * if someone is doing concurrent splice(2) based writes and write(2) based
353 * writes to the same inode. The only real way to fix this is to re-implement
354 * the generic code here with correct locking orders.
355 */
356 STATIC ssize_t
357 xfs_file_splice_write(
358 struct pipe_inode_info *pipe,
359 struct file *outfilp,
360 loff_t *ppos,
361 size_t count,
362 unsigned int flags)
363 {
364 struct inode *inode = outfilp->f_mapping->host;
365 struct xfs_inode *ip = XFS_I(inode);
366 int ioflags = 0;
367 ssize_t ret;
368
369 XFS_STATS_INC(xs_write_calls);
370
371 if (outfilp->f_mode & FMODE_NOCMTIME)
372 ioflags |= IO_INVIS;
373
374 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
375 return -EIO;
376
377 xfs_ilock(ip, XFS_IOLOCK_EXCL);
378
379 trace_xfs_file_splice_write(ip, count, *ppos, ioflags);
380
381 ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
382 if (ret > 0)
383 XFS_STATS_ADD(xs_write_bytes, ret);
384
385 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
386 return ret;
387 }
388
389 /*
390 * This routine is called to handle zeroing any space in the last block of the
391 * file that is beyond the EOF. We do this since the size is being increased
392 * without writing anything to that block and we don't want to read the
393 * garbage on the disk.
394 */
395 STATIC int /* error (positive) */
396 xfs_zero_last_block(
397 struct xfs_inode *ip,
398 xfs_fsize_t offset,
399 xfs_fsize_t isize)
400 {
401 struct xfs_mount *mp = ip->i_mount;
402 xfs_fileoff_t last_fsb = XFS_B_TO_FSBT(mp, isize);
403 int zero_offset = XFS_B_FSB_OFFSET(mp, isize);
404 int zero_len;
405 int nimaps = 1;
406 int error = 0;
407 struct xfs_bmbt_irec imap;
408
409 xfs_ilock(ip, XFS_ILOCK_EXCL);
410 error = xfs_bmapi_read(ip, last_fsb, 1, &imap, &nimaps, 0);
411 xfs_iunlock(ip, XFS_ILOCK_EXCL);
412 if (error)
413 return error;
414
415 ASSERT(nimaps > 0);
416
417 /*
418 * If the block underlying isize is just a hole, then there
419 * is nothing to zero.
420 */
421 if (imap.br_startblock == HOLESTARTBLOCK)
422 return 0;
423
424 zero_len = mp->m_sb.sb_blocksize - zero_offset;
425 if (isize + zero_len > offset)
426 zero_len = offset - isize;
427 return xfs_iozero(ip, isize, zero_len);
428 }
429
430 /*
431 * Zero any on disk space between the current EOF and the new, larger EOF.
432 *
433 * This handles the normal case of zeroing the remainder of the last block in
434 * the file and the unusual case of zeroing blocks out beyond the size of the
435 * file. This second case only happens with fixed size extents and when the
436 * system crashes before the inode size was updated but after blocks were
437 * allocated.
438 *
439 * Expects the iolock to be held exclusive, and will take the ilock internally.
440 */
441 int /* error (positive) */
442 xfs_zero_eof(
443 struct xfs_inode *ip,
444 xfs_off_t offset, /* starting I/O offset */
445 xfs_fsize_t isize) /* current inode size */
446 {
447 struct xfs_mount *mp = ip->i_mount;
448 xfs_fileoff_t start_zero_fsb;
449 xfs_fileoff_t end_zero_fsb;
450 xfs_fileoff_t zero_count_fsb;
451 xfs_fileoff_t last_fsb;
452 xfs_fileoff_t zero_off;
453 xfs_fsize_t zero_len;
454 int nimaps;
455 int error = 0;
456 struct xfs_bmbt_irec imap;
457
458 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
459 ASSERT(offset > isize);
460
461 /*
462 * First handle zeroing the block on which isize resides.
463 *
464 * We only zero a part of that block so it is handled specially.
465 */
466 if (XFS_B_FSB_OFFSET(mp, isize) != 0) {
467 error = xfs_zero_last_block(ip, offset, isize);
468 if (error)
469 return error;
470 }
471
472 /*
473 * Calculate the range between the new size and the old where blocks
474 * needing to be zeroed may exist.
475 *
476 * To get the block where the last byte in the file currently resides,
477 * we need to subtract one from the size and truncate back to a block
478 * boundary. We subtract 1 in case the size is exactly on a block
479 * boundary.
480 */
481 last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
482 start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
483 end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
484 ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
485 if (last_fsb == end_zero_fsb) {
486 /*
487 * The size was only incremented on its last block.
488 * We took care of that above, so just return.
489 */
490 return 0;
491 }
492
493 ASSERT(start_zero_fsb <= end_zero_fsb);
494 while (start_zero_fsb <= end_zero_fsb) {
495 nimaps = 1;
496 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
497
498 xfs_ilock(ip, XFS_ILOCK_EXCL);
499 error = xfs_bmapi_read(ip, start_zero_fsb, zero_count_fsb,
500 &imap, &nimaps, 0);
501 xfs_iunlock(ip, XFS_ILOCK_EXCL);
502 if (error)
503 return error;
504
505 ASSERT(nimaps > 0);
506
507 if (imap.br_state == XFS_EXT_UNWRITTEN ||
508 imap.br_startblock == HOLESTARTBLOCK) {
509 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
510 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
511 continue;
512 }
513
514 /*
515 * There are blocks we need to zero.
516 */
517 zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
518 zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
519
520 if ((zero_off + zero_len) > offset)
521 zero_len = offset - zero_off;
522
523 error = xfs_iozero(ip, zero_off, zero_len);
524 if (error)
525 return error;
526
527 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
528 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
529 }
530
531 return 0;
532 }
533
534 /*
535 * Common pre-write limit and setup checks.
536 *
537 * Called with the iolocked held either shared and exclusive according to
538 * @iolock, and returns with it held. Might upgrade the iolock to exclusive
539 * if called for a direct write beyond i_size.
540 */
541 STATIC ssize_t
542 xfs_file_aio_write_checks(
543 struct file *file,
544 loff_t *pos,
545 size_t *count,
546 int *iolock)
547 {
548 struct inode *inode = file->f_mapping->host;
549 struct xfs_inode *ip = XFS_I(inode);
550 int error = 0;
551
552 restart:
553 error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode));
554 if (error)
555 return error;
556
557 /*
558 * If the offset is beyond the size of the file, we need to zero any
559 * blocks that fall between the existing EOF and the start of this
560 * write. If zeroing is needed and we are currently holding the
561 * iolock shared, we need to update it to exclusive which implies
562 * having to redo all checks before.
563 */
564 if (*pos > i_size_read(inode)) {
565 if (*iolock == XFS_IOLOCK_SHARED) {
566 xfs_rw_iunlock(ip, *iolock);
567 *iolock = XFS_IOLOCK_EXCL;
568 xfs_rw_ilock(ip, *iolock);
569 goto restart;
570 }
571 error = -xfs_zero_eof(ip, *pos, i_size_read(inode));
572 if (error)
573 return error;
574 }
575
576 /*
577 * Updating the timestamps will grab the ilock again from
578 * xfs_fs_dirty_inode, so we have to call it after dropping the
579 * lock above. Eventually we should look into a way to avoid
580 * the pointless lock roundtrip.
581 */
582 if (likely(!(file->f_mode & FMODE_NOCMTIME))) {
583 error = file_update_time(file);
584 if (error)
585 return error;
586 }
587
588 /*
589 * If we're writing the file then make sure to clear the setuid and
590 * setgid bits if the process is not being run by root. This keeps
591 * people from modifying setuid and setgid binaries.
592 */
593 return file_remove_suid(file);
594 }
595
596 /*
597 * xfs_file_dio_aio_write - handle direct IO writes
598 *
599 * Lock the inode appropriately to prepare for and issue a direct IO write.
600 * By separating it from the buffered write path we remove all the tricky to
601 * follow locking changes and looping.
602 *
603 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
604 * until we're sure the bytes at the new EOF have been zeroed and/or the cached
605 * pages are flushed out.
606 *
607 * In most cases the direct IO writes will be done holding IOLOCK_SHARED
608 * allowing them to be done in parallel with reads and other direct IO writes.
609 * However, if the IO is not aligned to filesystem blocks, the direct IO layer
610 * needs to do sub-block zeroing and that requires serialisation against other
611 * direct IOs to the same block. In this case we need to serialise the
612 * submission of the unaligned IOs so that we don't get racing block zeroing in
613 * the dio layer. To avoid the problem with aio, we also need to wait for
614 * outstanding IOs to complete so that unwritten extent conversion is completed
615 * before we try to map the overlapping block. This is currently implemented by
616 * hitting it with a big hammer (i.e. inode_dio_wait()).
617 *
618 * Returns with locks held indicated by @iolock and errors indicated by
619 * negative return values.
620 */
621 STATIC ssize_t
622 xfs_file_dio_aio_write(
623 struct kiocb *iocb,
624 const struct iovec *iovp,
625 unsigned long nr_segs,
626 loff_t pos,
627 size_t ocount)
628 {
629 struct file *file = iocb->ki_filp;
630 struct address_space *mapping = file->f_mapping;
631 struct inode *inode = mapping->host;
632 struct xfs_inode *ip = XFS_I(inode);
633 struct xfs_mount *mp = ip->i_mount;
634 ssize_t ret = 0;
635 size_t count = ocount;
636 int unaligned_io = 0;
637 int iolock;
638 struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ?
639 mp->m_rtdev_targp : mp->m_ddev_targp;
640
641 if ((pos & target->bt_smask) || (count & target->bt_smask))
642 return -XFS_ERROR(EINVAL);
643
644 if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask))
645 unaligned_io = 1;
646
647 /*
648 * We don't need to take an exclusive lock unless there page cache needs
649 * to be invalidated or unaligned IO is being executed. We don't need to
650 * consider the EOF extension case here because
651 * xfs_file_aio_write_checks() will relock the inode as necessary for
652 * EOF zeroing cases and fill out the new inode size as appropriate.
653 */
654 if (unaligned_io || mapping->nrpages)
655 iolock = XFS_IOLOCK_EXCL;
656 else
657 iolock = XFS_IOLOCK_SHARED;
658 xfs_rw_ilock(ip, iolock);
659
660 /*
661 * Recheck if there are cached pages that need invalidate after we got
662 * the iolock to protect against other threads adding new pages while
663 * we were waiting for the iolock.
664 */
665 if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) {
666 xfs_rw_iunlock(ip, iolock);
667 iolock = XFS_IOLOCK_EXCL;
668 xfs_rw_ilock(ip, iolock);
669 }
670
671 ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock);
672 if (ret)
673 goto out;
674
675 if (mapping->nrpages) {
676 ret = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
677 pos, -1);
678 if (ret)
679 goto out;
680 truncate_pagecache_range(VFS_I(ip), pos, -1);
681 }
682
683 /*
684 * If we are doing unaligned IO, wait for all other IO to drain,
685 * otherwise demote the lock if we had to flush cached pages
686 */
687 if (unaligned_io)
688 inode_dio_wait(inode);
689 else if (iolock == XFS_IOLOCK_EXCL) {
690 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
691 iolock = XFS_IOLOCK_SHARED;
692 }
693
694 trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);
695 ret = generic_file_direct_write(iocb, iovp,
696 &nr_segs, pos, &iocb->ki_pos, count, ocount);
697
698 out:
699 xfs_rw_iunlock(ip, iolock);
700
701 /* No fallback to buffered IO on errors for XFS. */
702 ASSERT(ret < 0 || ret == count);
703 return ret;
704 }
705
706 STATIC ssize_t
707 xfs_file_buffered_aio_write(
708 struct kiocb *iocb,
709 const struct iovec *iovp,
710 unsigned long nr_segs,
711 loff_t pos,
712 size_t ocount)
713 {
714 struct file *file = iocb->ki_filp;
715 struct address_space *mapping = file->f_mapping;
716 struct inode *inode = mapping->host;
717 struct xfs_inode *ip = XFS_I(inode);
718 ssize_t ret;
719 int enospc = 0;
720 int iolock = XFS_IOLOCK_EXCL;
721 size_t count = ocount;
722
723 xfs_rw_ilock(ip, iolock);
724
725 ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock);
726 if (ret)
727 goto out;
728
729 /* We can write back this queue in page reclaim */
730 current->backing_dev_info = mapping->backing_dev_info;
731
732 write_retry:
733 trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0);
734 ret = generic_file_buffered_write(iocb, iovp, nr_segs,
735 pos, &iocb->ki_pos, count, 0);
736
737 /*
738 * If we just got an ENOSPC, try to write back all dirty inodes to
739 * convert delalloc space to free up some of the excess reserved
740 * metadata space.
741 */
742 if (ret == -ENOSPC && !enospc) {
743 enospc = 1;
744 xfs_flush_inodes(ip->i_mount);
745 goto write_retry;
746 }
747
748 current->backing_dev_info = NULL;
749 out:
750 xfs_rw_iunlock(ip, iolock);
751 return ret;
752 }
753
754 STATIC ssize_t
755 xfs_file_aio_write(
756 struct kiocb *iocb,
757 const struct iovec *iovp,
758 unsigned long nr_segs,
759 loff_t pos)
760 {
761 struct file *file = iocb->ki_filp;
762 struct address_space *mapping = file->f_mapping;
763 struct inode *inode = mapping->host;
764 struct xfs_inode *ip = XFS_I(inode);
765 ssize_t ret;
766 size_t ocount = 0;
767
768 XFS_STATS_INC(xs_write_calls);
769
770 BUG_ON(iocb->ki_pos != pos);
771
772 ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ);
773 if (ret)
774 return ret;
775
776 if (ocount == 0)
777 return 0;
778
779 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
780 ret = -EIO;
781 goto out;
782 }
783
784 if (unlikely(file->f_flags & O_DIRECT))
785 ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, ocount);
786 else
787 ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos,
788 ocount);
789
790 if (ret > 0) {
791 ssize_t err;
792
793 XFS_STATS_ADD(xs_write_bytes, ret);
794
795 /* Handle various SYNC-type writes */
796 err = generic_write_sync(file, pos, ret);
797 if (err < 0)
798 ret = err;
799 }
800
801 out:
802 return ret;
803 }
804
805 STATIC long
806 xfs_file_fallocate(
807 struct file *file,
808 int mode,
809 loff_t offset,
810 loff_t len)
811 {
812 struct inode *inode = file_inode(file);
813 long error;
814 loff_t new_size = 0;
815 xfs_flock64_t bf;
816 xfs_inode_t *ip = XFS_I(inode);
817 int cmd = XFS_IOC_RESVSP;
818 int attr_flags = XFS_ATTR_NOLOCK;
819
820 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
821 return -EOPNOTSUPP;
822
823 bf.l_whence = 0;
824 bf.l_start = offset;
825 bf.l_len = len;
826
827 xfs_ilock(ip, XFS_IOLOCK_EXCL);
828
829 if (mode & FALLOC_FL_PUNCH_HOLE)
830 cmd = XFS_IOC_UNRESVSP;
831
832 /* check the new inode size is valid before allocating */
833 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
834 offset + len > i_size_read(inode)) {
835 new_size = offset + len;
836 error = inode_newsize_ok(inode, new_size);
837 if (error)
838 goto out_unlock;
839 }
840
841 if (file->f_flags & O_DSYNC)
842 attr_flags |= XFS_ATTR_SYNC;
843
844 error = -xfs_change_file_space(ip, cmd, &bf, 0, attr_flags);
845 if (error)
846 goto out_unlock;
847
848 /* Change file size if needed */
849 if (new_size) {
850 struct iattr iattr;
851
852 iattr.ia_valid = ATTR_SIZE;
853 iattr.ia_size = new_size;
854 error = -xfs_setattr_size(ip, &iattr, XFS_ATTR_NOLOCK);
855 }
856
857 out_unlock:
858 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
859 return error;
860 }
861
862
863 STATIC int
864 xfs_file_open(
865 struct inode *inode,
866 struct file *file)
867 {
868 if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
869 return -EFBIG;
870 if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
871 return -EIO;
872 return 0;
873 }
874
875 STATIC int
876 xfs_dir_open(
877 struct inode *inode,
878 struct file *file)
879 {
880 struct xfs_inode *ip = XFS_I(inode);
881 int mode;
882 int error;
883
884 error = xfs_file_open(inode, file);
885 if (error)
886 return error;
887
888 /*
889 * If there are any blocks, read-ahead block 0 as we're almost
890 * certain to have the next operation be a read there.
891 */
892 mode = xfs_ilock_map_shared(ip);
893 if (ip->i_d.di_nextents > 0)
894 xfs_dir3_data_readahead(NULL, ip, 0, -1);
895 xfs_iunlock(ip, mode);
896 return 0;
897 }
898
899 STATIC int
900 xfs_file_release(
901 struct inode *inode,
902 struct file *filp)
903 {
904 return -xfs_release(XFS_I(inode));
905 }
906
907 STATIC int
908 xfs_file_readdir(
909 struct file *filp,
910 void *dirent,
911 filldir_t filldir)
912 {
913 struct inode *inode = file_inode(filp);
914 xfs_inode_t *ip = XFS_I(inode);
915 int error;
916 size_t bufsize;
917
918 /*
919 * The Linux API doesn't pass down the total size of the buffer
920 * we read into down to the filesystem. With the filldir concept
921 * it's not needed for correct information, but the XFS dir2 leaf
922 * code wants an estimate of the buffer size to calculate it's
923 * readahead window and size the buffers used for mapping to
924 * physical blocks.
925 *
926 * Try to give it an estimate that's good enough, maybe at some
927 * point we can change the ->readdir prototype to include the
928 * buffer size. For now we use the current glibc buffer size.
929 */
930 bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size);
931
932 error = xfs_readdir(ip, dirent, bufsize,
933 (xfs_off_t *)&filp->f_pos, filldir);
934 if (error)
935 return -error;
936 return 0;
937 }
938
939 STATIC int
940 xfs_file_mmap(
941 struct file *filp,
942 struct vm_area_struct *vma)
943 {
944 vma->vm_ops = &xfs_file_vm_ops;
945
946 file_accessed(filp);
947 return 0;
948 }
949
950 /*
951 * mmap()d file has taken write protection fault and is being made
952 * writable. We can set the page state up correctly for a writable
953 * page, which means we can do correct delalloc accounting (ENOSPC
954 * checking!) and unwritten extent mapping.
955 */
956 STATIC int
957 xfs_vm_page_mkwrite(
958 struct vm_area_struct *vma,
959 struct vm_fault *vmf)
960 {
961 return block_page_mkwrite(vma, vmf, xfs_get_blocks);
962 }
963
964 /*
965 * This type is designed to indicate the type of offset we would like
966 * to search from page cache for either xfs_seek_data() or xfs_seek_hole().
967 */
968 enum {
969 HOLE_OFF = 0,
970 DATA_OFF,
971 };
972
973 /*
974 * Lookup the desired type of offset from the given page.
975 *
976 * On success, return true and the offset argument will point to the
977 * start of the region that was found. Otherwise this function will
978 * return false and keep the offset argument unchanged.
979 */
980 STATIC bool
981 xfs_lookup_buffer_offset(
982 struct page *page,
983 loff_t *offset,
984 unsigned int type)
985 {
986 loff_t lastoff = page_offset(page);
987 bool found = false;
988 struct buffer_head *bh, *head;
989
990 bh = head = page_buffers(page);
991 do {
992 /*
993 * Unwritten extents that have data in the page
994 * cache covering them can be identified by the
995 * BH_Unwritten state flag. Pages with multiple
996 * buffers might have a mix of holes, data and
997 * unwritten extents - any buffer with valid
998 * data in it should have BH_Uptodate flag set
999 * on it.
1000 */
1001 if (buffer_unwritten(bh) ||
1002 buffer_uptodate(bh)) {
1003 if (type == DATA_OFF)
1004 found = true;
1005 } else {
1006 if (type == HOLE_OFF)
1007 found = true;
1008 }
1009
1010 if (found) {
1011 *offset = lastoff;
1012 break;
1013 }
1014 lastoff += bh->b_size;
1015 } while ((bh = bh->b_this_page) != head);
1016
1017 return found;
1018 }
1019
1020 /*
1021 * This routine is called to find out and return a data or hole offset
1022 * from the page cache for unwritten extents according to the desired
1023 * type for xfs_seek_data() or xfs_seek_hole().
1024 *
1025 * The argument offset is used to tell where we start to search from the
1026 * page cache. Map is used to figure out the end points of the range to
1027 * lookup pages.
1028 *
1029 * Return true if the desired type of offset was found, and the argument
1030 * offset is filled with that address. Otherwise, return false and keep
1031 * offset unchanged.
1032 */
1033 STATIC bool
1034 xfs_find_get_desired_pgoff(
1035 struct inode *inode,
1036 struct xfs_bmbt_irec *map,
1037 unsigned int type,
1038 loff_t *offset)
1039 {
1040 struct xfs_inode *ip = XFS_I(inode);
1041 struct xfs_mount *mp = ip->i_mount;
1042 struct pagevec pvec;
1043 pgoff_t index;
1044 pgoff_t end;
1045 loff_t endoff;
1046 loff_t startoff = *offset;
1047 loff_t lastoff = startoff;
1048 bool found = false;
1049
1050 pagevec_init(&pvec, 0);
1051
1052 index = startoff >> PAGE_CACHE_SHIFT;
1053 endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount);
1054 end = endoff >> PAGE_CACHE_SHIFT;
1055 do {
1056 int want;
1057 unsigned nr_pages;
1058 unsigned int i;
1059
1060 want = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
1061 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
1062 want);
1063 /*
1064 * No page mapped into given range. If we are searching holes
1065 * and if this is the first time we got into the loop, it means
1066 * that the given offset is landed in a hole, return it.
1067 *
1068 * If we have already stepped through some block buffers to find
1069 * holes but they all contains data. In this case, the last
1070 * offset is already updated and pointed to the end of the last
1071 * mapped page, if it does not reach the endpoint to search,
1072 * that means there should be a hole between them.
1073 */
1074 if (nr_pages == 0) {
1075 /* Data search found nothing */
1076 if (type == DATA_OFF)
1077 break;
1078
1079 ASSERT(type == HOLE_OFF);
1080 if (lastoff == startoff || lastoff < endoff) {
1081 found = true;
1082 *offset = lastoff;
1083 }
1084 break;
1085 }
1086
1087 /*
1088 * At lease we found one page. If this is the first time we
1089 * step into the loop, and if the first page index offset is
1090 * greater than the given search offset, a hole was found.
1091 */
1092 if (type == HOLE_OFF && lastoff == startoff &&
1093 lastoff < page_offset(pvec.pages[0])) {
1094 found = true;
1095 break;
1096 }
1097
1098 for (i = 0; i < nr_pages; i++) {
1099 struct page *page = pvec.pages[i];
1100 loff_t b_offset;
1101
1102 /*
1103 * At this point, the page may be truncated or
1104 * invalidated (changing page->mapping to NULL),
1105 * or even swizzled back from swapper_space to tmpfs
1106 * file mapping. However, page->index will not change
1107 * because we have a reference on the page.
1108 *
1109 * Searching done if the page index is out of range.
1110 * If the current offset is not reaches the end of
1111 * the specified search range, there should be a hole
1112 * between them.
1113 */
1114 if (page->index > end) {
1115 if (type == HOLE_OFF && lastoff < endoff) {
1116 *offset = lastoff;
1117 found = true;
1118 }
1119 goto out;
1120 }
1121
1122 lock_page(page);
1123 /*
1124 * Page truncated or invalidated(page->mapping == NULL).
1125 * We can freely skip it and proceed to check the next
1126 * page.
1127 */
1128 if (unlikely(page->mapping != inode->i_mapping)) {
1129 unlock_page(page);
1130 continue;
1131 }
1132
1133 if (!page_has_buffers(page)) {
1134 unlock_page(page);
1135 continue;
1136 }
1137
1138 found = xfs_lookup_buffer_offset(page, &b_offset, type);
1139 if (found) {
1140 /*
1141 * The found offset may be less than the start
1142 * point to search if this is the first time to
1143 * come here.
1144 */
1145 *offset = max_t(loff_t, startoff, b_offset);
1146 unlock_page(page);
1147 goto out;
1148 }
1149
1150 /*
1151 * We either searching data but nothing was found, or
1152 * searching hole but found a data buffer. In either
1153 * case, probably the next page contains the desired
1154 * things, update the last offset to it so.
1155 */
1156 lastoff = page_offset(page) + PAGE_SIZE;
1157 unlock_page(page);
1158 }
1159
1160 /*
1161 * The number of returned pages less than our desired, search
1162 * done. In this case, nothing was found for searching data,
1163 * but we found a hole behind the last offset.
1164 */
1165 if (nr_pages < want) {
1166 if (type == HOLE_OFF) {
1167 *offset = lastoff;
1168 found = true;
1169 }
1170 break;
1171 }
1172
1173 index = pvec.pages[i - 1]->index + 1;
1174 pagevec_release(&pvec);
1175 } while (index <= end);
1176
1177 out:
1178 pagevec_release(&pvec);
1179 return found;
1180 }
1181
1182 STATIC loff_t
1183 xfs_seek_data(
1184 struct file *file,
1185 loff_t start)
1186 {
1187 struct inode *inode = file->f_mapping->host;
1188 struct xfs_inode *ip = XFS_I(inode);
1189 struct xfs_mount *mp = ip->i_mount;
1190 loff_t uninitialized_var(offset);
1191 xfs_fsize_t isize;
1192 xfs_fileoff_t fsbno;
1193 xfs_filblks_t end;
1194 uint lock;
1195 int error;
1196
1197 lock = xfs_ilock_map_shared(ip);
1198
1199 isize = i_size_read(inode);
1200 if (start >= isize) {
1201 error = ENXIO;
1202 goto out_unlock;
1203 }
1204
1205 /*
1206 * Try to read extents from the first block indicated
1207 * by fsbno to the end block of the file.
1208 */
1209 fsbno = XFS_B_TO_FSBT(mp, start);
1210 end = XFS_B_TO_FSB(mp, isize);
1211 for (;;) {
1212 struct xfs_bmbt_irec map[2];
1213 int nmap = 2;
1214 unsigned int i;
1215
1216 error = xfs_bmapi_read(ip, fsbno, end - fsbno, map, &nmap,
1217 XFS_BMAPI_ENTIRE);
1218 if (error)
1219 goto out_unlock;
1220
1221 /* No extents at given offset, must be beyond EOF */
1222 if (nmap == 0) {
1223 error = ENXIO;
1224 goto out_unlock;
1225 }
1226
1227 for (i = 0; i < nmap; i++) {
1228 offset = max_t(loff_t, start,
1229 XFS_FSB_TO_B(mp, map[i].br_startoff));
1230
1231 /* Landed in a data extent */
1232 if (map[i].br_startblock == DELAYSTARTBLOCK ||
1233 (map[i].br_state == XFS_EXT_NORM &&
1234 !isnullstartblock(map[i].br_startblock)))
1235 goto out;
1236
1237 /*
1238 * Landed in an unwritten extent, try to search data
1239 * from page cache.
1240 */
1241 if (map[i].br_state == XFS_EXT_UNWRITTEN) {
1242 if (xfs_find_get_desired_pgoff(inode, &map[i],
1243 DATA_OFF, &offset))
1244 goto out;
1245 }
1246 }
1247
1248 /*
1249 * map[0] is hole or its an unwritten extent but
1250 * without data in page cache. Probably means that
1251 * we are reading after EOF if nothing in map[1].
1252 */
1253 if (nmap == 1) {
1254 error = ENXIO;
1255 goto out_unlock;
1256 }
1257
1258 ASSERT(i > 1);
1259
1260 /*
1261 * Nothing was found, proceed to the next round of search
1262 * if reading offset not beyond or hit EOF.
1263 */
1264 fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount;
1265 start = XFS_FSB_TO_B(mp, fsbno);
1266 if (start >= isize) {
1267 error = ENXIO;
1268 goto out_unlock;
1269 }
1270 }
1271
1272 out:
1273 if (offset != file->f_pos)
1274 file->f_pos = offset;
1275
1276 out_unlock:
1277 xfs_iunlock_map_shared(ip, lock);
1278
1279 if (error)
1280 return -error;
1281 return offset;
1282 }
1283
1284 STATIC loff_t
1285 xfs_seek_hole(
1286 struct file *file,
1287 loff_t start)
1288 {
1289 struct inode *inode = file->f_mapping->host;
1290 struct xfs_inode *ip = XFS_I(inode);
1291 struct xfs_mount *mp = ip->i_mount;
1292 loff_t uninitialized_var(offset);
1293 xfs_fsize_t isize;
1294 xfs_fileoff_t fsbno;
1295 xfs_filblks_t end;
1296 uint lock;
1297 int error;
1298
1299 if (XFS_FORCED_SHUTDOWN(mp))
1300 return -XFS_ERROR(EIO);
1301
1302 lock = xfs_ilock_map_shared(ip);
1303
1304 isize = i_size_read(inode);
1305 if (start >= isize) {
1306 error = ENXIO;
1307 goto out_unlock;
1308 }
1309
1310 fsbno = XFS_B_TO_FSBT(mp, start);
1311 end = XFS_B_TO_FSB(mp, isize);
1312
1313 for (;;) {
1314 struct xfs_bmbt_irec map[2];
1315 int nmap = 2;
1316 unsigned int i;
1317
1318 error = xfs_bmapi_read(ip, fsbno, end - fsbno, map, &nmap,
1319 XFS_BMAPI_ENTIRE);
1320 if (error)
1321 goto out_unlock;
1322
1323 /* No extents at given offset, must be beyond EOF */
1324 if (nmap == 0) {
1325 error = ENXIO;
1326 goto out_unlock;
1327 }
1328
1329 for (i = 0; i < nmap; i++) {
1330 offset = max_t(loff_t, start,
1331 XFS_FSB_TO_B(mp, map[i].br_startoff));
1332
1333 /* Landed in a hole */
1334 if (map[i].br_startblock == HOLESTARTBLOCK)
1335 goto out;
1336
1337 /*
1338 * Landed in an unwritten extent, try to search hole
1339 * from page cache.
1340 */
1341 if (map[i].br_state == XFS_EXT_UNWRITTEN) {
1342 if (xfs_find_get_desired_pgoff(inode, &map[i],
1343 HOLE_OFF, &offset))
1344 goto out;
1345 }
1346 }
1347
1348 /*
1349 * map[0] contains data or its unwritten but contains
1350 * data in page cache, probably means that we are
1351 * reading after EOF. We should fix offset to point
1352 * to the end of the file(i.e., there is an implicit
1353 * hole at the end of any file).
1354 */
1355 if (nmap == 1) {
1356 offset = isize;
1357 break;
1358 }
1359
1360 ASSERT(i > 1);
1361
1362 /*
1363 * Both mappings contains data, proceed to the next round of
1364 * search if the current reading offset not beyond or hit EOF.
1365 */
1366 fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount;
1367 start = XFS_FSB_TO_B(mp, fsbno);
1368 if (start >= isize) {
1369 offset = isize;
1370 break;
1371 }
1372 }
1373
1374 out:
1375 /*
1376 * At this point, we must have found a hole. However, the returned
1377 * offset may be bigger than the file size as it may be aligned to
1378 * page boundary for unwritten extents, we need to deal with this
1379 * situation in particular.
1380 */
1381 offset = min_t(loff_t, offset, isize);
1382 if (offset != file->f_pos)
1383 file->f_pos = offset;
1384
1385 out_unlock:
1386 xfs_iunlock_map_shared(ip, lock);
1387
1388 if (error)
1389 return -error;
1390 return offset;
1391 }
1392
1393 STATIC loff_t
1394 xfs_file_llseek(
1395 struct file *file,
1396 loff_t offset,
1397 int origin)
1398 {
1399 switch (origin) {
1400 case SEEK_END:
1401 case SEEK_CUR:
1402 case SEEK_SET:
1403 return generic_file_llseek(file, offset, origin);
1404 case SEEK_DATA:
1405 return xfs_seek_data(file, offset);
1406 case SEEK_HOLE:
1407 return xfs_seek_hole(file, offset);
1408 default:
1409 return -EINVAL;
1410 }
1411 }
1412
1413 const struct file_operations xfs_file_operations = {
1414 .llseek = xfs_file_llseek,
1415 .read = do_sync_read,
1416 .write = do_sync_write,
1417 .aio_read = xfs_file_aio_read,
1418 .aio_write = xfs_file_aio_write,
1419 .splice_read = xfs_file_splice_read,
1420 .splice_write = xfs_file_splice_write,
1421 .unlocked_ioctl = xfs_file_ioctl,
1422 #ifdef CONFIG_COMPAT
1423 .compat_ioctl = xfs_file_compat_ioctl,
1424 #endif
1425 .mmap = xfs_file_mmap,
1426 .open = xfs_file_open,
1427 .release = xfs_file_release,
1428 .fsync = xfs_file_fsync,
1429 .fallocate = xfs_file_fallocate,
1430 };
1431
1432 const struct file_operations xfs_dir_file_operations = {
1433 .open = xfs_dir_open,
1434 .read = generic_read_dir,
1435 .readdir = xfs_file_readdir,
1436 .llseek = generic_file_llseek,
1437 .unlocked_ioctl = xfs_file_ioctl,
1438 #ifdef CONFIG_COMPAT
1439 .compat_ioctl = xfs_file_compat_ioctl,
1440 #endif
1441 .fsync = xfs_dir_fsync,
1442 };
1443
1444 static const struct vm_operations_struct xfs_file_vm_ops = {
1445 .fault = filemap_fault,
1446 .page_mkwrite = xfs_vm_page_mkwrite,
1447 .remap_pages = generic_file_remap_pages,
1448 };