Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
7b718769 NS |
2 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. |
3 | * All Rights Reserved. | |
1da177e4 | 4 | * |
7b718769 NS |
5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public License as | |
1da177e4 LT |
7 | * published by the Free Software Foundation. |
8 | * | |
7b718769 NS |
9 | * This program is distributed in the hope that it would be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
1da177e4 | 13 | * |
7b718769 NS |
14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write the Free Software Foundation, | |
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
1da177e4 | 17 | */ |
1da177e4 | 18 | #include "xfs.h" |
dda35b8f | 19 | #include "xfs_fs.h" |
1da177e4 LT |
20 | #include "xfs_log.h" |
21 | #include "xfs_sb.h" | |
a844f451 | 22 | #include "xfs_ag.h" |
1da177e4 | 23 | #include "xfs_trans.h" |
1da177e4 LT |
24 | #include "xfs_mount.h" |
25 | #include "xfs_bmap_btree.h" | |
1da177e4 | 26 | #include "xfs_alloc.h" |
1da177e4 LT |
27 | #include "xfs_dinode.h" |
28 | #include "xfs_inode.h" | |
fd3200be | 29 | #include "xfs_inode_item.h" |
dda35b8f | 30 | #include "xfs_bmap.h" |
1da177e4 | 31 | #include "xfs_error.h" |
739bfb2a | 32 | #include "xfs_vnodeops.h" |
f999a5bf | 33 | #include "xfs_da_btree.h" |
ddcd856d | 34 | #include "xfs_ioctl.h" |
dda35b8f | 35 | #include "xfs_trace.h" |
1da177e4 LT |
36 | |
37 | #include <linux/dcache.h> | |
2fe17c10 | 38 | #include <linux/falloc.h> |
1da177e4 | 39 | |
f0f37e2f | 40 | static const struct vm_operations_struct xfs_file_vm_ops; |
1da177e4 | 41 | |
487f84f3 DC |
42 | /* |
43 | * Locking primitives for read and write IO paths to ensure we consistently use | |
44 | * and order the inode->i_mutex, ip->i_lock and ip->i_iolock. | |
45 | */ | |
46 | static inline void | |
47 | xfs_rw_ilock( | |
48 | struct xfs_inode *ip, | |
49 | int type) | |
50 | { | |
51 | if (type & XFS_IOLOCK_EXCL) | |
52 | mutex_lock(&VFS_I(ip)->i_mutex); | |
53 | xfs_ilock(ip, type); | |
54 | } | |
55 | ||
56 | static inline void | |
57 | xfs_rw_iunlock( | |
58 | struct xfs_inode *ip, | |
59 | int type) | |
60 | { | |
61 | xfs_iunlock(ip, type); | |
62 | if (type & XFS_IOLOCK_EXCL) | |
63 | mutex_unlock(&VFS_I(ip)->i_mutex); | |
64 | } | |
65 | ||
66 | static inline void | |
67 | xfs_rw_ilock_demote( | |
68 | struct xfs_inode *ip, | |
69 | int type) | |
70 | { | |
71 | xfs_ilock_demote(ip, type); | |
72 | if (type & XFS_IOLOCK_EXCL) | |
73 | mutex_unlock(&VFS_I(ip)->i_mutex); | |
74 | } | |
75 | ||
dda35b8f CH |
76 | /* |
77 | * xfs_iozero | |
78 | * | |
79 | * xfs_iozero clears the specified range of buffer supplied, | |
80 | * and marks all the affected blocks as valid and modified. If | |
81 | * an affected block is not allocated, it will be allocated. If | |
82 | * an affected block is not completely overwritten, and is not | |
83 | * valid before the operation, it will be read from disk before | |
84 | * being partially zeroed. | |
85 | */ | |
86 | STATIC int | |
87 | xfs_iozero( | |
88 | struct xfs_inode *ip, /* inode */ | |
89 | loff_t pos, /* offset in file */ | |
90 | size_t count) /* size of data to zero */ | |
91 | { | |
92 | struct page *page; | |
93 | struct address_space *mapping; | |
94 | int status; | |
95 | ||
96 | mapping = VFS_I(ip)->i_mapping; | |
97 | do { | |
98 | unsigned offset, bytes; | |
99 | void *fsdata; | |
100 | ||
101 | offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ | |
102 | bytes = PAGE_CACHE_SIZE - offset; | |
103 | if (bytes > count) | |
104 | bytes = count; | |
105 | ||
106 | status = pagecache_write_begin(NULL, mapping, pos, bytes, | |
107 | AOP_FLAG_UNINTERRUPTIBLE, | |
108 | &page, &fsdata); | |
109 | if (status) | |
110 | break; | |
111 | ||
112 | zero_user(page, offset, bytes); | |
113 | ||
114 | status = pagecache_write_end(NULL, mapping, pos, bytes, bytes, | |
115 | page, fsdata); | |
116 | WARN_ON(status <= 0); /* can't return less than zero! */ | |
117 | pos += bytes; | |
118 | count -= bytes; | |
119 | status = 0; | |
120 | } while (count); | |
121 | ||
122 | return (-status); | |
123 | } | |
124 | ||
1da2f2db CH |
125 | /* |
126 | * Fsync operations on directories are much simpler than on regular files, | |
127 | * as there is no file data to flush, and thus also no need for explicit | |
128 | * cache flush operations, and there are no non-transaction metadata updates | |
129 | * on directories either. | |
130 | */ | |
131 | STATIC int | |
132 | xfs_dir_fsync( | |
133 | struct file *file, | |
134 | loff_t start, | |
135 | loff_t end, | |
136 | int datasync) | |
137 | { | |
138 | struct xfs_inode *ip = XFS_I(file->f_mapping->host); | |
139 | struct xfs_mount *mp = ip->i_mount; | |
140 | xfs_lsn_t lsn = 0; | |
141 | ||
142 | trace_xfs_dir_fsync(ip); | |
143 | ||
144 | xfs_ilock(ip, XFS_ILOCK_SHARED); | |
145 | if (xfs_ipincount(ip)) | |
146 | lsn = ip->i_itemp->ili_last_lsn; | |
147 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | |
148 | ||
149 | if (!lsn) | |
150 | return 0; | |
151 | return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL); | |
152 | } | |
153 | ||
fd3200be CH |
154 | STATIC int |
155 | xfs_file_fsync( | |
156 | struct file *file, | |
02c24a82 JB |
157 | loff_t start, |
158 | loff_t end, | |
fd3200be CH |
159 | int datasync) |
160 | { | |
7ea80859 CH |
161 | struct inode *inode = file->f_mapping->host; |
162 | struct xfs_inode *ip = XFS_I(inode); | |
a27a263b | 163 | struct xfs_mount *mp = ip->i_mount; |
fd3200be CH |
164 | int error = 0; |
165 | int log_flushed = 0; | |
b1037058 | 166 | xfs_lsn_t lsn = 0; |
fd3200be | 167 | |
cca28fb8 | 168 | trace_xfs_file_fsync(ip); |
fd3200be | 169 | |
02c24a82 JB |
170 | error = filemap_write_and_wait_range(inode->i_mapping, start, end); |
171 | if (error) | |
172 | return error; | |
173 | ||
a27a263b | 174 | if (XFS_FORCED_SHUTDOWN(mp)) |
fd3200be CH |
175 | return -XFS_ERROR(EIO); |
176 | ||
177 | xfs_iflags_clear(ip, XFS_ITRUNCATED); | |
178 | ||
a27a263b CH |
179 | if (mp->m_flags & XFS_MOUNT_BARRIER) { |
180 | /* | |
181 | * If we have an RT and/or log subvolume we need to make sure | |
182 | * to flush the write cache the device used for file data | |
183 | * first. This is to ensure newly written file data make | |
184 | * it to disk before logging the new inode size in case of | |
185 | * an extending write. | |
186 | */ | |
187 | if (XFS_IS_REALTIME_INODE(ip)) | |
188 | xfs_blkdev_issue_flush(mp->m_rtdev_targp); | |
189 | else if (mp->m_logdev_targp != mp->m_ddev_targp) | |
190 | xfs_blkdev_issue_flush(mp->m_ddev_targp); | |
191 | } | |
192 | ||
fd3200be | 193 | /* |
8a9c9980 CH |
194 | * All metadata updates are logged, which means that we just have |
195 | * to flush the log up to the latest LSN that touched the inode. | |
fd3200be CH |
196 | */ |
197 | xfs_ilock(ip, XFS_ILOCK_SHARED); | |
8f639dde CH |
198 | if (xfs_ipincount(ip)) { |
199 | if (!datasync || | |
200 | (ip->i_itemp->ili_fields & ~XFS_ILOG_TIMESTAMP)) | |
201 | lsn = ip->i_itemp->ili_last_lsn; | |
202 | } | |
8a9c9980 | 203 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
fd3200be | 204 | |
8a9c9980 | 205 | if (lsn) |
b1037058 CH |
206 | error = _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed); |
207 | ||
a27a263b CH |
208 | /* |
209 | * If we only have a single device, and the log force about was | |
210 | * a no-op we might have to flush the data device cache here. | |
211 | * This can only happen for fdatasync/O_DSYNC if we were overwriting | |
212 | * an already allocated file and thus do not have any metadata to | |
213 | * commit. | |
214 | */ | |
215 | if ((mp->m_flags & XFS_MOUNT_BARRIER) && | |
216 | mp->m_logdev_targp == mp->m_ddev_targp && | |
217 | !XFS_IS_REALTIME_INODE(ip) && | |
218 | !log_flushed) | |
219 | xfs_blkdev_issue_flush(mp->m_ddev_targp); | |
fd3200be CH |
220 | |
221 | return -error; | |
222 | } | |
223 | ||
00258e36 CH |
224 | STATIC ssize_t |
225 | xfs_file_aio_read( | |
dda35b8f CH |
226 | struct kiocb *iocb, |
227 | const struct iovec *iovp, | |
00258e36 CH |
228 | unsigned long nr_segs, |
229 | loff_t pos) | |
dda35b8f CH |
230 | { |
231 | struct file *file = iocb->ki_filp; | |
232 | struct inode *inode = file->f_mapping->host; | |
00258e36 CH |
233 | struct xfs_inode *ip = XFS_I(inode); |
234 | struct xfs_mount *mp = ip->i_mount; | |
dda35b8f CH |
235 | size_t size = 0; |
236 | ssize_t ret = 0; | |
00258e36 | 237 | int ioflags = 0; |
dda35b8f CH |
238 | xfs_fsize_t n; |
239 | unsigned long seg; | |
240 | ||
dda35b8f CH |
241 | XFS_STATS_INC(xs_read_calls); |
242 | ||
00258e36 CH |
243 | BUG_ON(iocb->ki_pos != pos); |
244 | ||
245 | if (unlikely(file->f_flags & O_DIRECT)) | |
246 | ioflags |= IO_ISDIRECT; | |
247 | if (file->f_mode & FMODE_NOCMTIME) | |
248 | ioflags |= IO_INVIS; | |
249 | ||
dda35b8f | 250 | /* START copy & waste from filemap.c */ |
00258e36 | 251 | for (seg = 0; seg < nr_segs; seg++) { |
dda35b8f CH |
252 | const struct iovec *iv = &iovp[seg]; |
253 | ||
254 | /* | |
255 | * If any segment has a negative length, or the cumulative | |
256 | * length ever wraps negative then return -EINVAL. | |
257 | */ | |
258 | size += iv->iov_len; | |
259 | if (unlikely((ssize_t)(size|iv->iov_len) < 0)) | |
260 | return XFS_ERROR(-EINVAL); | |
261 | } | |
262 | /* END copy & waste from filemap.c */ | |
263 | ||
264 | if (unlikely(ioflags & IO_ISDIRECT)) { | |
265 | xfs_buftarg_t *target = | |
266 | XFS_IS_REALTIME_INODE(ip) ? | |
267 | mp->m_rtdev_targp : mp->m_ddev_targp; | |
00258e36 | 268 | if ((iocb->ki_pos & target->bt_smask) || |
dda35b8f | 269 | (size & target->bt_smask)) { |
ce7ae151 | 270 | if (iocb->ki_pos == i_size_read(inode)) |
00258e36 | 271 | return 0; |
dda35b8f CH |
272 | return -XFS_ERROR(EINVAL); |
273 | } | |
274 | } | |
275 | ||
00258e36 CH |
276 | n = XFS_MAXIOFFSET(mp) - iocb->ki_pos; |
277 | if (n <= 0 || size == 0) | |
dda35b8f CH |
278 | return 0; |
279 | ||
280 | if (n < size) | |
281 | size = n; | |
282 | ||
283 | if (XFS_FORCED_SHUTDOWN(mp)) | |
284 | return -EIO; | |
285 | ||
0c38a251 DC |
286 | /* |
287 | * Locking is a bit tricky here. If we take an exclusive lock | |
288 | * for direct IO, we effectively serialise all new concurrent | |
289 | * read IO to this file and block it behind IO that is currently in | |
290 | * progress because IO in progress holds the IO lock shared. We only | |
291 | * need to hold the lock exclusive to blow away the page cache, so | |
292 | * only take lock exclusively if the page cache needs invalidation. | |
293 | * This allows the normal direct IO case of no page cache pages to | |
294 | * proceeed concurrently without serialisation. | |
295 | */ | |
296 | xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); | |
297 | if ((ioflags & IO_ISDIRECT) && inode->i_mapping->nrpages) { | |
298 | xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); | |
487f84f3 DC |
299 | xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); |
300 | ||
00258e36 CH |
301 | if (inode->i_mapping->nrpages) { |
302 | ret = -xfs_flushinval_pages(ip, | |
303 | (iocb->ki_pos & PAGE_CACHE_MASK), | |
304 | -1, FI_REMAPF_LOCKED); | |
487f84f3 DC |
305 | if (ret) { |
306 | xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL); | |
307 | return ret; | |
308 | } | |
00258e36 | 309 | } |
487f84f3 | 310 | xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); |
0c38a251 | 311 | } |
dda35b8f | 312 | |
00258e36 | 313 | trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags); |
dda35b8f | 314 | |
00258e36 | 315 | ret = generic_file_aio_read(iocb, iovp, nr_segs, iocb->ki_pos); |
dda35b8f CH |
316 | if (ret > 0) |
317 | XFS_STATS_ADD(xs_read_bytes, ret); | |
318 | ||
487f84f3 | 319 | xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); |
dda35b8f CH |
320 | return ret; |
321 | } | |
322 | ||
00258e36 CH |
323 | STATIC ssize_t |
324 | xfs_file_splice_read( | |
dda35b8f CH |
325 | struct file *infilp, |
326 | loff_t *ppos, | |
327 | struct pipe_inode_info *pipe, | |
328 | size_t count, | |
00258e36 | 329 | unsigned int flags) |
dda35b8f | 330 | { |
00258e36 | 331 | struct xfs_inode *ip = XFS_I(infilp->f_mapping->host); |
00258e36 | 332 | int ioflags = 0; |
dda35b8f CH |
333 | ssize_t ret; |
334 | ||
335 | XFS_STATS_INC(xs_read_calls); | |
00258e36 CH |
336 | |
337 | if (infilp->f_mode & FMODE_NOCMTIME) | |
338 | ioflags |= IO_INVIS; | |
339 | ||
dda35b8f CH |
340 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) |
341 | return -EIO; | |
342 | ||
487f84f3 | 343 | xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); |
dda35b8f | 344 | |
dda35b8f CH |
345 | trace_xfs_file_splice_read(ip, count, *ppos, ioflags); |
346 | ||
347 | ret = generic_file_splice_read(infilp, ppos, pipe, count, flags); | |
348 | if (ret > 0) | |
349 | XFS_STATS_ADD(xs_read_bytes, ret); | |
350 | ||
487f84f3 | 351 | xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); |
dda35b8f CH |
352 | return ret; |
353 | } | |
354 | ||
487f84f3 DC |
355 | /* |
356 | * xfs_file_splice_write() does not use xfs_rw_ilock() because | |
357 | * generic_file_splice_write() takes the i_mutex itself. This, in theory, | |
358 | * couuld cause lock inversions between the aio_write path and the splice path | |
359 | * if someone is doing concurrent splice(2) based writes and write(2) based | |
360 | * writes to the same inode. The only real way to fix this is to re-implement | |
361 | * the generic code here with correct locking orders. | |
362 | */ | |
00258e36 CH |
363 | STATIC ssize_t |
364 | xfs_file_splice_write( | |
dda35b8f CH |
365 | struct pipe_inode_info *pipe, |
366 | struct file *outfilp, | |
367 | loff_t *ppos, | |
368 | size_t count, | |
00258e36 | 369 | unsigned int flags) |
dda35b8f | 370 | { |
dda35b8f | 371 | struct inode *inode = outfilp->f_mapping->host; |
00258e36 | 372 | struct xfs_inode *ip = XFS_I(inode); |
00258e36 CH |
373 | int ioflags = 0; |
374 | ssize_t ret; | |
dda35b8f CH |
375 | |
376 | XFS_STATS_INC(xs_write_calls); | |
00258e36 CH |
377 | |
378 | if (outfilp->f_mode & FMODE_NOCMTIME) | |
379 | ioflags |= IO_INVIS; | |
380 | ||
dda35b8f CH |
381 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) |
382 | return -EIO; | |
383 | ||
384 | xfs_ilock(ip, XFS_IOLOCK_EXCL); | |
385 | ||
dda35b8f CH |
386 | trace_xfs_file_splice_write(ip, count, *ppos, ioflags); |
387 | ||
388 | ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags); | |
ce7ae151 CH |
389 | if (ret > 0) |
390 | XFS_STATS_ADD(xs_write_bytes, ret); | |
dda35b8f | 391 | |
dda35b8f CH |
392 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); |
393 | return ret; | |
394 | } | |
395 | ||
396 | /* | |
193aec10 CH |
397 | * This routine is called to handle zeroing any space in the last block of the |
398 | * file that is beyond the EOF. We do this since the size is being increased | |
399 | * without writing anything to that block and we don't want to read the | |
400 | * garbage on the disk. | |
dda35b8f CH |
401 | */ |
402 | STATIC int /* error (positive) */ | |
403 | xfs_zero_last_block( | |
193aec10 CH |
404 | struct xfs_inode *ip, |
405 | xfs_fsize_t offset, | |
406 | xfs_fsize_t isize) | |
dda35b8f | 407 | { |
193aec10 CH |
408 | struct xfs_mount *mp = ip->i_mount; |
409 | xfs_fileoff_t last_fsb = XFS_B_TO_FSBT(mp, isize); | |
410 | int zero_offset = XFS_B_FSB_OFFSET(mp, isize); | |
411 | int zero_len; | |
412 | int nimaps = 1; | |
413 | int error = 0; | |
414 | struct xfs_bmbt_irec imap; | |
dda35b8f | 415 | |
193aec10 | 416 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
5c8ed202 | 417 | error = xfs_bmapi_read(ip, last_fsb, 1, &imap, &nimaps, 0); |
193aec10 | 418 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
5c8ed202 | 419 | if (error) |
dda35b8f | 420 | return error; |
193aec10 | 421 | |
dda35b8f | 422 | ASSERT(nimaps > 0); |
193aec10 | 423 | |
dda35b8f CH |
424 | /* |
425 | * If the block underlying isize is just a hole, then there | |
426 | * is nothing to zero. | |
427 | */ | |
193aec10 | 428 | if (imap.br_startblock == HOLESTARTBLOCK) |
dda35b8f | 429 | return 0; |
dda35b8f CH |
430 | |
431 | zero_len = mp->m_sb.sb_blocksize - zero_offset; | |
432 | if (isize + zero_len > offset) | |
433 | zero_len = offset - isize; | |
193aec10 | 434 | return xfs_iozero(ip, isize, zero_len); |
dda35b8f CH |
435 | } |
436 | ||
437 | /* | |
193aec10 CH |
438 | * Zero any on disk space between the current EOF and the new, larger EOF. |
439 | * | |
440 | * This handles the normal case of zeroing the remainder of the last block in | |
441 | * the file and the unusual case of zeroing blocks out beyond the size of the | |
442 | * file. This second case only happens with fixed size extents and when the | |
443 | * system crashes before the inode size was updated but after blocks were | |
444 | * allocated. | |
445 | * | |
446 | * Expects the iolock to be held exclusive, and will take the ilock internally. | |
dda35b8f | 447 | */ |
dda35b8f CH |
448 | int /* error (positive) */ |
449 | xfs_zero_eof( | |
193aec10 CH |
450 | struct xfs_inode *ip, |
451 | xfs_off_t offset, /* starting I/O offset */ | |
452 | xfs_fsize_t isize) /* current inode size */ | |
dda35b8f | 453 | { |
193aec10 CH |
454 | struct xfs_mount *mp = ip->i_mount; |
455 | xfs_fileoff_t start_zero_fsb; | |
456 | xfs_fileoff_t end_zero_fsb; | |
457 | xfs_fileoff_t zero_count_fsb; | |
458 | xfs_fileoff_t last_fsb; | |
459 | xfs_fileoff_t zero_off; | |
460 | xfs_fsize_t zero_len; | |
461 | int nimaps; | |
462 | int error = 0; | |
463 | struct xfs_bmbt_irec imap; | |
464 | ||
465 | ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); | |
dda35b8f CH |
466 | ASSERT(offset > isize); |
467 | ||
468 | /* | |
469 | * First handle zeroing the block on which isize resides. | |
193aec10 | 470 | * |
dda35b8f CH |
471 | * We only zero a part of that block so it is handled specially. |
472 | */ | |
193aec10 CH |
473 | if (XFS_B_FSB_OFFSET(mp, isize) != 0) { |
474 | error = xfs_zero_last_block(ip, offset, isize); | |
475 | if (error) | |
476 | return error; | |
dda35b8f CH |
477 | } |
478 | ||
479 | /* | |
193aec10 CH |
480 | * Calculate the range between the new size and the old where blocks |
481 | * needing to be zeroed may exist. | |
482 | * | |
483 | * To get the block where the last byte in the file currently resides, | |
484 | * we need to subtract one from the size and truncate back to a block | |
485 | * boundary. We subtract 1 in case the size is exactly on a block | |
486 | * boundary. | |
dda35b8f CH |
487 | */ |
488 | last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1; | |
489 | start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize); | |
490 | end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1); | |
491 | ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb); | |
492 | if (last_fsb == end_zero_fsb) { | |
493 | /* | |
494 | * The size was only incremented on its last block. | |
495 | * We took care of that above, so just return. | |
496 | */ | |
497 | return 0; | |
498 | } | |
499 | ||
500 | ASSERT(start_zero_fsb <= end_zero_fsb); | |
501 | while (start_zero_fsb <= end_zero_fsb) { | |
502 | nimaps = 1; | |
503 | zero_count_fsb = end_zero_fsb - start_zero_fsb + 1; | |
193aec10 CH |
504 | |
505 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
5c8ed202 DC |
506 | error = xfs_bmapi_read(ip, start_zero_fsb, zero_count_fsb, |
507 | &imap, &nimaps, 0); | |
193aec10 CH |
508 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
509 | if (error) | |
dda35b8f | 510 | return error; |
193aec10 | 511 | |
dda35b8f CH |
512 | ASSERT(nimaps > 0); |
513 | ||
514 | if (imap.br_state == XFS_EXT_UNWRITTEN || | |
515 | imap.br_startblock == HOLESTARTBLOCK) { | |
dda35b8f CH |
516 | start_zero_fsb = imap.br_startoff + imap.br_blockcount; |
517 | ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); | |
518 | continue; | |
519 | } | |
520 | ||
521 | /* | |
522 | * There are blocks we need to zero. | |
dda35b8f | 523 | */ |
dda35b8f CH |
524 | zero_off = XFS_FSB_TO_B(mp, start_zero_fsb); |
525 | zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount); | |
526 | ||
527 | if ((zero_off + zero_len) > offset) | |
528 | zero_len = offset - zero_off; | |
529 | ||
530 | error = xfs_iozero(ip, zero_off, zero_len); | |
193aec10 CH |
531 | if (error) |
532 | return error; | |
dda35b8f CH |
533 | |
534 | start_zero_fsb = imap.br_startoff + imap.br_blockcount; | |
535 | ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); | |
dda35b8f CH |
536 | } |
537 | ||
538 | return 0; | |
dda35b8f CH |
539 | } |
540 | ||
4d8d1581 DC |
541 | /* |
542 | * Common pre-write limit and setup checks. | |
543 | * | |
5bf1f262 CH |
544 | * Called with the iolocked held either shared and exclusive according to |
545 | * @iolock, and returns with it held. Might upgrade the iolock to exclusive | |
546 | * if called for a direct write beyond i_size. | |
4d8d1581 DC |
547 | */ |
548 | STATIC ssize_t | |
549 | xfs_file_aio_write_checks( | |
550 | struct file *file, | |
551 | loff_t *pos, | |
552 | size_t *count, | |
553 | int *iolock) | |
554 | { | |
555 | struct inode *inode = file->f_mapping->host; | |
556 | struct xfs_inode *ip = XFS_I(inode); | |
4d8d1581 DC |
557 | int error = 0; |
558 | ||
7271d243 | 559 | restart: |
4d8d1581 | 560 | error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode)); |
467f7899 | 561 | if (error) |
4d8d1581 | 562 | return error; |
4d8d1581 | 563 | |
4d8d1581 DC |
564 | /* |
565 | * If the offset is beyond the size of the file, we need to zero any | |
566 | * blocks that fall between the existing EOF and the start of this | |
2813d682 | 567 | * write. If zeroing is needed and we are currently holding the |
467f7899 CH |
568 | * iolock shared, we need to update it to exclusive which implies |
569 | * having to redo all checks before. | |
4d8d1581 | 570 | */ |
2813d682 | 571 | if (*pos > i_size_read(inode)) { |
7271d243 | 572 | if (*iolock == XFS_IOLOCK_SHARED) { |
467f7899 | 573 | xfs_rw_iunlock(ip, *iolock); |
7271d243 | 574 | *iolock = XFS_IOLOCK_EXCL; |
467f7899 | 575 | xfs_rw_ilock(ip, *iolock); |
7271d243 DC |
576 | goto restart; |
577 | } | |
ce7ae151 | 578 | error = -xfs_zero_eof(ip, *pos, i_size_read(inode)); |
467f7899 CH |
579 | if (error) |
580 | return error; | |
7271d243 | 581 | } |
4d8d1581 | 582 | |
8a9c9980 CH |
583 | /* |
584 | * Updating the timestamps will grab the ilock again from | |
585 | * xfs_fs_dirty_inode, so we have to call it after dropping the | |
586 | * lock above. Eventually we should look into a way to avoid | |
587 | * the pointless lock roundtrip. | |
588 | */ | |
589 | if (likely(!(file->f_mode & FMODE_NOCMTIME))) | |
590 | file_update_time(file); | |
591 | ||
4d8d1581 DC |
592 | /* |
593 | * If we're writing the file then make sure to clear the setuid and | |
594 | * setgid bits if the process is not being run by root. This keeps | |
595 | * people from modifying setuid and setgid binaries. | |
596 | */ | |
597 | return file_remove_suid(file); | |
4d8d1581 DC |
598 | } |
599 | ||
f0d26e86 DC |
600 | /* |
601 | * xfs_file_dio_aio_write - handle direct IO writes | |
602 | * | |
603 | * Lock the inode appropriately to prepare for and issue a direct IO write. | |
eda77982 | 604 | * By separating it from the buffered write path we remove all the tricky to |
f0d26e86 DC |
605 | * follow locking changes and looping. |
606 | * | |
eda77982 DC |
607 | * If there are cached pages or we're extending the file, we need IOLOCK_EXCL |
608 | * until we're sure the bytes at the new EOF have been zeroed and/or the cached | |
609 | * pages are flushed out. | |
610 | * | |
611 | * In most cases the direct IO writes will be done holding IOLOCK_SHARED | |
612 | * allowing them to be done in parallel with reads and other direct IO writes. | |
613 | * However, if the IO is not aligned to filesystem blocks, the direct IO layer | |
614 | * needs to do sub-block zeroing and that requires serialisation against other | |
615 | * direct IOs to the same block. In this case we need to serialise the | |
616 | * submission of the unaligned IOs so that we don't get racing block zeroing in | |
617 | * the dio layer. To avoid the problem with aio, we also need to wait for | |
618 | * outstanding IOs to complete so that unwritten extent conversion is completed | |
619 | * before we try to map the overlapping block. This is currently implemented by | |
4a06fd26 | 620 | * hitting it with a big hammer (i.e. inode_dio_wait()). |
eda77982 | 621 | * |
f0d26e86 DC |
622 | * Returns with locks held indicated by @iolock and errors indicated by |
623 | * negative return values. | |
624 | */ | |
625 | STATIC ssize_t | |
626 | xfs_file_dio_aio_write( | |
627 | struct kiocb *iocb, | |
628 | const struct iovec *iovp, | |
629 | unsigned long nr_segs, | |
630 | loff_t pos, | |
d0606464 | 631 | size_t ocount) |
f0d26e86 DC |
632 | { |
633 | struct file *file = iocb->ki_filp; | |
634 | struct address_space *mapping = file->f_mapping; | |
635 | struct inode *inode = mapping->host; | |
636 | struct xfs_inode *ip = XFS_I(inode); | |
637 | struct xfs_mount *mp = ip->i_mount; | |
638 | ssize_t ret = 0; | |
f0d26e86 | 639 | size_t count = ocount; |
eda77982 | 640 | int unaligned_io = 0; |
d0606464 | 641 | int iolock; |
f0d26e86 DC |
642 | struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ? |
643 | mp->m_rtdev_targp : mp->m_ddev_targp; | |
644 | ||
f0d26e86 DC |
645 | if ((pos & target->bt_smask) || (count & target->bt_smask)) |
646 | return -XFS_ERROR(EINVAL); | |
647 | ||
eda77982 DC |
648 | if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask)) |
649 | unaligned_io = 1; | |
650 | ||
7271d243 DC |
651 | /* |
652 | * We don't need to take an exclusive lock unless there page cache needs | |
653 | * to be invalidated or unaligned IO is being executed. We don't need to | |
654 | * consider the EOF extension case here because | |
655 | * xfs_file_aio_write_checks() will relock the inode as necessary for | |
656 | * EOF zeroing cases and fill out the new inode size as appropriate. | |
657 | */ | |
658 | if (unaligned_io || mapping->nrpages) | |
d0606464 | 659 | iolock = XFS_IOLOCK_EXCL; |
f0d26e86 | 660 | else |
d0606464 CH |
661 | iolock = XFS_IOLOCK_SHARED; |
662 | xfs_rw_ilock(ip, iolock); | |
c58cb165 CH |
663 | |
664 | /* | |
665 | * Recheck if there are cached pages that need invalidate after we got | |
666 | * the iolock to protect against other threads adding new pages while | |
667 | * we were waiting for the iolock. | |
668 | */ | |
d0606464 CH |
669 | if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) { |
670 | xfs_rw_iunlock(ip, iolock); | |
671 | iolock = XFS_IOLOCK_EXCL; | |
672 | xfs_rw_ilock(ip, iolock); | |
c58cb165 | 673 | } |
f0d26e86 | 674 | |
d0606464 | 675 | ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock); |
4d8d1581 | 676 | if (ret) |
d0606464 | 677 | goto out; |
f0d26e86 DC |
678 | |
679 | if (mapping->nrpages) { | |
f0d26e86 DC |
680 | ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1, |
681 | FI_REMAPF_LOCKED); | |
682 | if (ret) | |
d0606464 | 683 | goto out; |
f0d26e86 DC |
684 | } |
685 | ||
eda77982 DC |
686 | /* |
687 | * If we are doing unaligned IO, wait for all other IO to drain, | |
688 | * otherwise demote the lock if we had to flush cached pages | |
689 | */ | |
690 | if (unaligned_io) | |
4a06fd26 | 691 | inode_dio_wait(inode); |
d0606464 | 692 | else if (iolock == XFS_IOLOCK_EXCL) { |
f0d26e86 | 693 | xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); |
d0606464 | 694 | iolock = XFS_IOLOCK_SHARED; |
f0d26e86 DC |
695 | } |
696 | ||
697 | trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0); | |
698 | ret = generic_file_direct_write(iocb, iovp, | |
699 | &nr_segs, pos, &iocb->ki_pos, count, ocount); | |
700 | ||
d0606464 CH |
701 | out: |
702 | xfs_rw_iunlock(ip, iolock); | |
703 | ||
f0d26e86 DC |
704 | /* No fallback to buffered IO on errors for XFS. */ |
705 | ASSERT(ret < 0 || ret == count); | |
706 | return ret; | |
707 | } | |
708 | ||
00258e36 | 709 | STATIC ssize_t |
637bbc75 | 710 | xfs_file_buffered_aio_write( |
dda35b8f CH |
711 | struct kiocb *iocb, |
712 | const struct iovec *iovp, | |
00258e36 | 713 | unsigned long nr_segs, |
637bbc75 | 714 | loff_t pos, |
d0606464 | 715 | size_t ocount) |
dda35b8f CH |
716 | { |
717 | struct file *file = iocb->ki_filp; | |
718 | struct address_space *mapping = file->f_mapping; | |
719 | struct inode *inode = mapping->host; | |
00258e36 | 720 | struct xfs_inode *ip = XFS_I(inode); |
637bbc75 DC |
721 | ssize_t ret; |
722 | int enospc = 0; | |
d0606464 | 723 | int iolock = XFS_IOLOCK_EXCL; |
637bbc75 | 724 | size_t count = ocount; |
dda35b8f | 725 | |
d0606464 | 726 | xfs_rw_ilock(ip, iolock); |
dda35b8f | 727 | |
d0606464 | 728 | ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock); |
4d8d1581 | 729 | if (ret) |
d0606464 | 730 | goto out; |
dda35b8f CH |
731 | |
732 | /* We can write back this queue in page reclaim */ | |
733 | current->backing_dev_info = mapping->backing_dev_info; | |
734 | ||
dda35b8f | 735 | write_retry: |
637bbc75 DC |
736 | trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0); |
737 | ret = generic_file_buffered_write(iocb, iovp, nr_segs, | |
738 | pos, &iocb->ki_pos, count, ret); | |
739 | /* | |
740 | * if we just got an ENOSPC, flush the inode now we aren't holding any | |
741 | * page locks and retry *once* | |
742 | */ | |
743 | if (ret == -ENOSPC && !enospc) { | |
637bbc75 | 744 | enospc = 1; |
d0606464 CH |
745 | ret = -xfs_flush_pages(ip, 0, -1, 0, FI_NONE); |
746 | if (!ret) | |
747 | goto write_retry; | |
dda35b8f | 748 | } |
d0606464 | 749 | |
dda35b8f | 750 | current->backing_dev_info = NULL; |
d0606464 CH |
751 | out: |
752 | xfs_rw_iunlock(ip, iolock); | |
637bbc75 DC |
753 | return ret; |
754 | } | |
755 | ||
756 | STATIC ssize_t | |
757 | xfs_file_aio_write( | |
758 | struct kiocb *iocb, | |
759 | const struct iovec *iovp, | |
760 | unsigned long nr_segs, | |
761 | loff_t pos) | |
762 | { | |
763 | struct file *file = iocb->ki_filp; | |
764 | struct address_space *mapping = file->f_mapping; | |
765 | struct inode *inode = mapping->host; | |
766 | struct xfs_inode *ip = XFS_I(inode); | |
767 | ssize_t ret; | |
637bbc75 DC |
768 | size_t ocount = 0; |
769 | ||
770 | XFS_STATS_INC(xs_write_calls); | |
771 | ||
772 | BUG_ON(iocb->ki_pos != pos); | |
773 | ||
774 | ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ); | |
775 | if (ret) | |
776 | return ret; | |
777 | ||
778 | if (ocount == 0) | |
779 | return 0; | |
780 | ||
781 | xfs_wait_for_freeze(ip->i_mount, SB_FREEZE_WRITE); | |
782 | ||
783 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | |
784 | return -EIO; | |
785 | ||
786 | if (unlikely(file->f_flags & O_DIRECT)) | |
d0606464 | 787 | ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, ocount); |
637bbc75 DC |
788 | else |
789 | ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos, | |
d0606464 | 790 | ocount); |
dda35b8f | 791 | |
d0606464 CH |
792 | if (ret > 0) { |
793 | ssize_t err; | |
dda35b8f | 794 | |
d0606464 | 795 | XFS_STATS_ADD(xs_write_bytes, ret); |
dda35b8f | 796 | |
d0606464 CH |
797 | /* Handle various SYNC-type writes */ |
798 | err = generic_write_sync(file, pos, ret); | |
799 | if (err < 0) | |
800 | ret = err; | |
dda35b8f CH |
801 | } |
802 | ||
a363f0c2 | 803 | return ret; |
dda35b8f CH |
804 | } |
805 | ||
2fe17c10 CH |
806 | STATIC long |
807 | xfs_file_fallocate( | |
808 | struct file *file, | |
809 | int mode, | |
810 | loff_t offset, | |
811 | loff_t len) | |
812 | { | |
813 | struct inode *inode = file->f_path.dentry->d_inode; | |
814 | long error; | |
815 | loff_t new_size = 0; | |
816 | xfs_flock64_t bf; | |
817 | xfs_inode_t *ip = XFS_I(inode); | |
818 | int cmd = XFS_IOC_RESVSP; | |
82878897 | 819 | int attr_flags = XFS_ATTR_NOLOCK; |
2fe17c10 CH |
820 | |
821 | if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) | |
822 | return -EOPNOTSUPP; | |
823 | ||
824 | bf.l_whence = 0; | |
825 | bf.l_start = offset; | |
826 | bf.l_len = len; | |
827 | ||
828 | xfs_ilock(ip, XFS_IOLOCK_EXCL); | |
829 | ||
830 | if (mode & FALLOC_FL_PUNCH_HOLE) | |
831 | cmd = XFS_IOC_UNRESVSP; | |
832 | ||
833 | /* check the new inode size is valid before allocating */ | |
834 | if (!(mode & FALLOC_FL_KEEP_SIZE) && | |
835 | offset + len > i_size_read(inode)) { | |
836 | new_size = offset + len; | |
837 | error = inode_newsize_ok(inode, new_size); | |
838 | if (error) | |
839 | goto out_unlock; | |
840 | } | |
841 | ||
82878897 DC |
842 | if (file->f_flags & O_DSYNC) |
843 | attr_flags |= XFS_ATTR_SYNC; | |
844 | ||
845 | error = -xfs_change_file_space(ip, cmd, &bf, 0, attr_flags); | |
2fe17c10 CH |
846 | if (error) |
847 | goto out_unlock; | |
848 | ||
849 | /* Change file size if needed */ | |
850 | if (new_size) { | |
851 | struct iattr iattr; | |
852 | ||
853 | iattr.ia_valid = ATTR_SIZE; | |
854 | iattr.ia_size = new_size; | |
c4ed4243 | 855 | error = -xfs_setattr_size(ip, &iattr, XFS_ATTR_NOLOCK); |
2fe17c10 CH |
856 | } |
857 | ||
858 | out_unlock: | |
859 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | |
860 | return error; | |
861 | } | |
862 | ||
863 | ||
1da177e4 | 864 | STATIC int |
3562fd45 | 865 | xfs_file_open( |
1da177e4 | 866 | struct inode *inode, |
f999a5bf | 867 | struct file *file) |
1da177e4 | 868 | { |
f999a5bf | 869 | if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) |
1da177e4 | 870 | return -EFBIG; |
f999a5bf CH |
871 | if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb))) |
872 | return -EIO; | |
873 | return 0; | |
874 | } | |
875 | ||
876 | STATIC int | |
877 | xfs_dir_open( | |
878 | struct inode *inode, | |
879 | struct file *file) | |
880 | { | |
881 | struct xfs_inode *ip = XFS_I(inode); | |
882 | int mode; | |
883 | int error; | |
884 | ||
885 | error = xfs_file_open(inode, file); | |
886 | if (error) | |
887 | return error; | |
888 | ||
889 | /* | |
890 | * If there are any blocks, read-ahead block 0 as we're almost | |
891 | * certain to have the next operation be a read there. | |
892 | */ | |
893 | mode = xfs_ilock_map_shared(ip); | |
894 | if (ip->i_d.di_nextents > 0) | |
895 | xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK); | |
896 | xfs_iunlock(ip, mode); | |
897 | return 0; | |
1da177e4 LT |
898 | } |
899 | ||
1da177e4 | 900 | STATIC int |
3562fd45 | 901 | xfs_file_release( |
1da177e4 LT |
902 | struct inode *inode, |
903 | struct file *filp) | |
904 | { | |
739bfb2a | 905 | return -xfs_release(XFS_I(inode)); |
1da177e4 LT |
906 | } |
907 | ||
1da177e4 | 908 | STATIC int |
3562fd45 | 909 | xfs_file_readdir( |
1da177e4 LT |
910 | struct file *filp, |
911 | void *dirent, | |
912 | filldir_t filldir) | |
913 | { | |
051e7cd4 | 914 | struct inode *inode = filp->f_path.dentry->d_inode; |
739bfb2a | 915 | xfs_inode_t *ip = XFS_I(inode); |
051e7cd4 CH |
916 | int error; |
917 | size_t bufsize; | |
918 | ||
919 | /* | |
920 | * The Linux API doesn't pass down the total size of the buffer | |
921 | * we read into down to the filesystem. With the filldir concept | |
922 | * it's not needed for correct information, but the XFS dir2 leaf | |
923 | * code wants an estimate of the buffer size to calculate it's | |
924 | * readahead window and size the buffers used for mapping to | |
925 | * physical blocks. | |
926 | * | |
927 | * Try to give it an estimate that's good enough, maybe at some | |
928 | * point we can change the ->readdir prototype to include the | |
a9cc799e | 929 | * buffer size. For now we use the current glibc buffer size. |
051e7cd4 | 930 | */ |
a9cc799e | 931 | bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size); |
051e7cd4 | 932 | |
739bfb2a | 933 | error = xfs_readdir(ip, dirent, bufsize, |
051e7cd4 CH |
934 | (xfs_off_t *)&filp->f_pos, filldir); |
935 | if (error) | |
936 | return -error; | |
937 | return 0; | |
1da177e4 LT |
938 | } |
939 | ||
1da177e4 | 940 | STATIC int |
3562fd45 | 941 | xfs_file_mmap( |
1da177e4 LT |
942 | struct file *filp, |
943 | struct vm_area_struct *vma) | |
944 | { | |
3562fd45 | 945 | vma->vm_ops = &xfs_file_vm_ops; |
d0217ac0 | 946 | vma->vm_flags |= VM_CAN_NONLINEAR; |
6fac0cb4 | 947 | |
fbc1462b | 948 | file_accessed(filp); |
1da177e4 LT |
949 | return 0; |
950 | } | |
951 | ||
4f57dbc6 DC |
952 | /* |
953 | * mmap()d file has taken write protection fault and is being made | |
954 | * writable. We can set the page state up correctly for a writable | |
955 | * page, which means we can do correct delalloc accounting (ENOSPC | |
956 | * checking!) and unwritten extent mapping. | |
957 | */ | |
958 | STATIC int | |
959 | xfs_vm_page_mkwrite( | |
960 | struct vm_area_struct *vma, | |
c2ec175c | 961 | struct vm_fault *vmf) |
4f57dbc6 | 962 | { |
c2ec175c | 963 | return block_page_mkwrite(vma, vmf, xfs_get_blocks); |
4f57dbc6 DC |
964 | } |
965 | ||
3fe3e6b1 JL |
966 | STATIC loff_t |
967 | xfs_seek_data( | |
968 | struct file *file, | |
969 | loff_t start, | |
970 | u32 type) | |
971 | { | |
972 | struct inode *inode = file->f_mapping->host; | |
973 | struct xfs_inode *ip = XFS_I(inode); | |
974 | struct xfs_mount *mp = ip->i_mount; | |
975 | struct xfs_bmbt_irec map[2]; | |
976 | int nmap = 2; | |
977 | loff_t uninitialized_var(offset); | |
978 | xfs_fsize_t isize; | |
979 | xfs_fileoff_t fsbno; | |
980 | xfs_filblks_t end; | |
981 | uint lock; | |
982 | int error; | |
983 | ||
984 | lock = xfs_ilock_map_shared(ip); | |
985 | ||
986 | isize = i_size_read(inode); | |
987 | if (start >= isize) { | |
988 | error = ENXIO; | |
989 | goto out_unlock; | |
990 | } | |
991 | ||
992 | fsbno = XFS_B_TO_FSBT(mp, start); | |
993 | ||
994 | /* | |
995 | * Try to read extents from the first block indicated | |
996 | * by fsbno to the end block of the file. | |
997 | */ | |
998 | end = XFS_B_TO_FSB(mp, isize); | |
999 | ||
1000 | error = xfs_bmapi_read(ip, fsbno, end - fsbno, map, &nmap, | |
1001 | XFS_BMAPI_ENTIRE); | |
1002 | if (error) | |
1003 | goto out_unlock; | |
1004 | ||
1005 | /* | |
1006 | * Treat unwritten extent as data extent since it might | |
1007 | * contains dirty data in page cache. | |
1008 | */ | |
1009 | if (map[0].br_startblock != HOLESTARTBLOCK) { | |
1010 | offset = max_t(loff_t, start, | |
1011 | XFS_FSB_TO_B(mp, map[0].br_startoff)); | |
1012 | } else { | |
1013 | if (nmap == 1) { | |
1014 | error = ENXIO; | |
1015 | goto out_unlock; | |
1016 | } | |
1017 | ||
1018 | offset = max_t(loff_t, start, | |
1019 | XFS_FSB_TO_B(mp, map[1].br_startoff)); | |
1020 | } | |
1021 | ||
1022 | if (offset != file->f_pos) | |
1023 | file->f_pos = offset; | |
1024 | ||
1025 | out_unlock: | |
1026 | xfs_iunlock_map_shared(ip, lock); | |
1027 | ||
1028 | if (error) | |
1029 | return -error; | |
1030 | return offset; | |
1031 | } | |
1032 | ||
1033 | STATIC loff_t | |
1034 | xfs_seek_hole( | |
1035 | struct file *file, | |
1036 | loff_t start, | |
1037 | u32 type) | |
1038 | { | |
1039 | struct inode *inode = file->f_mapping->host; | |
1040 | struct xfs_inode *ip = XFS_I(inode); | |
1041 | struct xfs_mount *mp = ip->i_mount; | |
1042 | loff_t uninitialized_var(offset); | |
1043 | loff_t holeoff; | |
1044 | xfs_fsize_t isize; | |
1045 | xfs_fileoff_t fsbno; | |
1046 | uint lock; | |
1047 | int error; | |
1048 | ||
1049 | if (XFS_FORCED_SHUTDOWN(mp)) | |
1050 | return -XFS_ERROR(EIO); | |
1051 | ||
1052 | lock = xfs_ilock_map_shared(ip); | |
1053 | ||
1054 | isize = i_size_read(inode); | |
1055 | if (start >= isize) { | |
1056 | error = ENXIO; | |
1057 | goto out_unlock; | |
1058 | } | |
1059 | ||
1060 | fsbno = XFS_B_TO_FSBT(mp, start); | |
1061 | error = xfs_bmap_first_unused(NULL, ip, 1, &fsbno, XFS_DATA_FORK); | |
1062 | if (error) | |
1063 | goto out_unlock; | |
1064 | ||
1065 | holeoff = XFS_FSB_TO_B(mp, fsbno); | |
1066 | if (holeoff <= start) | |
1067 | offset = start; | |
1068 | else { | |
1069 | /* | |
1070 | * xfs_bmap_first_unused() could return a value bigger than | |
1071 | * isize if there are no more holes past the supplied offset. | |
1072 | */ | |
1073 | offset = min_t(loff_t, holeoff, isize); | |
1074 | } | |
1075 | ||
1076 | if (offset != file->f_pos) | |
1077 | file->f_pos = offset; | |
1078 | ||
1079 | out_unlock: | |
1080 | xfs_iunlock_map_shared(ip, lock); | |
1081 | ||
1082 | if (error) | |
1083 | return -error; | |
1084 | return offset; | |
1085 | } | |
1086 | ||
1087 | STATIC loff_t | |
1088 | xfs_file_llseek( | |
1089 | struct file *file, | |
1090 | loff_t offset, | |
1091 | int origin) | |
1092 | { | |
1093 | switch (origin) { | |
1094 | case SEEK_END: | |
1095 | case SEEK_CUR: | |
1096 | case SEEK_SET: | |
1097 | return generic_file_llseek(file, offset, origin); | |
1098 | case SEEK_DATA: | |
1099 | return xfs_seek_data(file, offset, origin); | |
1100 | case SEEK_HOLE: | |
1101 | return xfs_seek_hole(file, offset, origin); | |
1102 | default: | |
1103 | return -EINVAL; | |
1104 | } | |
1105 | } | |
1106 | ||
4b6f5d20 | 1107 | const struct file_operations xfs_file_operations = { |
3fe3e6b1 | 1108 | .llseek = xfs_file_llseek, |
1da177e4 | 1109 | .read = do_sync_read, |
bb3f724e | 1110 | .write = do_sync_write, |
3562fd45 NS |
1111 | .aio_read = xfs_file_aio_read, |
1112 | .aio_write = xfs_file_aio_write, | |
1b895840 NS |
1113 | .splice_read = xfs_file_splice_read, |
1114 | .splice_write = xfs_file_splice_write, | |
3562fd45 | 1115 | .unlocked_ioctl = xfs_file_ioctl, |
1da177e4 | 1116 | #ifdef CONFIG_COMPAT |
3562fd45 | 1117 | .compat_ioctl = xfs_file_compat_ioctl, |
1da177e4 | 1118 | #endif |
3562fd45 NS |
1119 | .mmap = xfs_file_mmap, |
1120 | .open = xfs_file_open, | |
1121 | .release = xfs_file_release, | |
1122 | .fsync = xfs_file_fsync, | |
2fe17c10 | 1123 | .fallocate = xfs_file_fallocate, |
1da177e4 LT |
1124 | }; |
1125 | ||
4b6f5d20 | 1126 | const struct file_operations xfs_dir_file_operations = { |
f999a5bf | 1127 | .open = xfs_dir_open, |
1da177e4 | 1128 | .read = generic_read_dir, |
3562fd45 | 1129 | .readdir = xfs_file_readdir, |
59af1584 | 1130 | .llseek = generic_file_llseek, |
3562fd45 | 1131 | .unlocked_ioctl = xfs_file_ioctl, |
d3870398 | 1132 | #ifdef CONFIG_COMPAT |
3562fd45 | 1133 | .compat_ioctl = xfs_file_compat_ioctl, |
d3870398 | 1134 | #endif |
1da2f2db | 1135 | .fsync = xfs_dir_fsync, |
1da177e4 LT |
1136 | }; |
1137 | ||
f0f37e2f | 1138 | static const struct vm_operations_struct xfs_file_vm_ops = { |
54cb8821 | 1139 | .fault = filemap_fault, |
4f57dbc6 | 1140 | .page_mkwrite = xfs_vm_page_mkwrite, |
6fac0cb4 | 1141 | }; |