Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
7b718769 NS |
2 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. |
3 | * All Rights Reserved. | |
1da177e4 | 4 | * |
7b718769 NS |
5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public License as | |
1da177e4 LT |
7 | * published by the Free Software Foundation. |
8 | * | |
7b718769 NS |
9 | * This program is distributed in the hope that it would be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
1da177e4 | 13 | * |
7b718769 NS |
14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write the Free Software Foundation, | |
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
1da177e4 | 17 | */ |
1da177e4 | 18 | #include "xfs.h" |
dda35b8f | 19 | #include "xfs_fs.h" |
a844f451 | 20 | #include "xfs_bit.h" |
1da177e4 | 21 | #include "xfs_log.h" |
a844f451 | 22 | #include "xfs_inum.h" |
1da177e4 | 23 | #include "xfs_sb.h" |
a844f451 | 24 | #include "xfs_ag.h" |
1da177e4 | 25 | #include "xfs_trans.h" |
1da177e4 LT |
26 | #include "xfs_mount.h" |
27 | #include "xfs_bmap_btree.h" | |
1da177e4 | 28 | #include "xfs_alloc.h" |
1da177e4 LT |
29 | #include "xfs_dinode.h" |
30 | #include "xfs_inode.h" | |
fd3200be | 31 | #include "xfs_inode_item.h" |
dda35b8f | 32 | #include "xfs_bmap.h" |
1da177e4 | 33 | #include "xfs_error.h" |
739bfb2a | 34 | #include "xfs_vnodeops.h" |
f999a5bf | 35 | #include "xfs_da_btree.h" |
ddcd856d | 36 | #include "xfs_ioctl.h" |
dda35b8f | 37 | #include "xfs_trace.h" |
1da177e4 LT |
38 | |
39 | #include <linux/dcache.h> | |
1da177e4 | 40 | |
f0f37e2f | 41 | static const struct vm_operations_struct xfs_file_vm_ops; |
1da177e4 | 42 | |
487f84f3 DC |
43 | /* |
44 | * Locking primitives for read and write IO paths to ensure we consistently use | |
45 | * and order the inode->i_mutex, ip->i_lock and ip->i_iolock. | |
46 | */ | |
47 | static inline void | |
48 | xfs_rw_ilock( | |
49 | struct xfs_inode *ip, | |
50 | int type) | |
51 | { | |
52 | if (type & XFS_IOLOCK_EXCL) | |
53 | mutex_lock(&VFS_I(ip)->i_mutex); | |
54 | xfs_ilock(ip, type); | |
55 | } | |
56 | ||
57 | static inline void | |
58 | xfs_rw_iunlock( | |
59 | struct xfs_inode *ip, | |
60 | int type) | |
61 | { | |
62 | xfs_iunlock(ip, type); | |
63 | if (type & XFS_IOLOCK_EXCL) | |
64 | mutex_unlock(&VFS_I(ip)->i_mutex); | |
65 | } | |
66 | ||
67 | static inline void | |
68 | xfs_rw_ilock_demote( | |
69 | struct xfs_inode *ip, | |
70 | int type) | |
71 | { | |
72 | xfs_ilock_demote(ip, type); | |
73 | if (type & XFS_IOLOCK_EXCL) | |
74 | mutex_unlock(&VFS_I(ip)->i_mutex); | |
75 | } | |
76 | ||
dda35b8f CH |
77 | /* |
78 | * xfs_iozero | |
79 | * | |
80 | * xfs_iozero clears the specified range of buffer supplied, | |
81 | * and marks all the affected blocks as valid and modified. If | |
82 | * an affected block is not allocated, it will be allocated. If | |
83 | * an affected block is not completely overwritten, and is not | |
84 | * valid before the operation, it will be read from disk before | |
85 | * being partially zeroed. | |
86 | */ | |
87 | STATIC int | |
88 | xfs_iozero( | |
89 | struct xfs_inode *ip, /* inode */ | |
90 | loff_t pos, /* offset in file */ | |
91 | size_t count) /* size of data to zero */ | |
92 | { | |
93 | struct page *page; | |
94 | struct address_space *mapping; | |
95 | int status; | |
96 | ||
97 | mapping = VFS_I(ip)->i_mapping; | |
98 | do { | |
99 | unsigned offset, bytes; | |
100 | void *fsdata; | |
101 | ||
102 | offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ | |
103 | bytes = PAGE_CACHE_SIZE - offset; | |
104 | if (bytes > count) | |
105 | bytes = count; | |
106 | ||
107 | status = pagecache_write_begin(NULL, mapping, pos, bytes, | |
108 | AOP_FLAG_UNINTERRUPTIBLE, | |
109 | &page, &fsdata); | |
110 | if (status) | |
111 | break; | |
112 | ||
113 | zero_user(page, offset, bytes); | |
114 | ||
115 | status = pagecache_write_end(NULL, mapping, pos, bytes, bytes, | |
116 | page, fsdata); | |
117 | WARN_ON(status <= 0); /* can't return less than zero! */ | |
118 | pos += bytes; | |
119 | count -= bytes; | |
120 | status = 0; | |
121 | } while (count); | |
122 | ||
123 | return (-status); | |
124 | } | |
125 | ||
fd3200be CH |
126 | STATIC int |
127 | xfs_file_fsync( | |
128 | struct file *file, | |
fd3200be CH |
129 | int datasync) |
130 | { | |
7ea80859 CH |
131 | struct inode *inode = file->f_mapping->host; |
132 | struct xfs_inode *ip = XFS_I(inode); | |
fd3200be CH |
133 | struct xfs_trans *tp; |
134 | int error = 0; | |
135 | int log_flushed = 0; | |
136 | ||
cca28fb8 | 137 | trace_xfs_file_fsync(ip); |
fd3200be CH |
138 | |
139 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | |
140 | return -XFS_ERROR(EIO); | |
141 | ||
142 | xfs_iflags_clear(ip, XFS_ITRUNCATED); | |
143 | ||
37bc5743 CH |
144 | xfs_ioend_wait(ip); |
145 | ||
fd3200be CH |
146 | /* |
147 | * We always need to make sure that the required inode state is safe on | |
148 | * disk. The inode might be clean but we still might need to force the | |
149 | * log because of committed transactions that haven't hit the disk yet. | |
150 | * Likewise, there could be unflushed non-transactional changes to the | |
151 | * inode core that have to go to disk and this requires us to issue | |
152 | * a synchronous transaction to capture these changes correctly. | |
153 | * | |
154 | * This code relies on the assumption that if the i_update_core field | |
155 | * of the inode is clear and the inode is unpinned then it is clean | |
156 | * and no action is required. | |
157 | */ | |
158 | xfs_ilock(ip, XFS_ILOCK_SHARED); | |
159 | ||
66d834ea CH |
160 | /* |
161 | * First check if the VFS inode is marked dirty. All the dirtying | |
162 | * of non-transactional updates no goes through mark_inode_dirty*, | |
163 | * which allows us to distinguish beteeen pure timestamp updates | |
164 | * and i_size updates which need to be caught for fdatasync. | |
165 | * After that also theck for the dirty state in the XFS inode, which | |
166 | * might gets cleared when the inode gets written out via the AIL | |
167 | * or xfs_iflush_cluster. | |
168 | */ | |
7ea80859 CH |
169 | if (((inode->i_state & I_DIRTY_DATASYNC) || |
170 | ((inode->i_state & I_DIRTY_SYNC) && !datasync)) && | |
66d834ea | 171 | ip->i_update_core) { |
fd3200be CH |
172 | /* |
173 | * Kick off a transaction to log the inode core to get the | |
174 | * updates. The sync transaction will also force the log. | |
175 | */ | |
176 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | |
177 | tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_FSYNC_TS); | |
178 | error = xfs_trans_reserve(tp, 0, | |
179 | XFS_FSYNC_TS_LOG_RES(ip->i_mount), 0, 0, 0); | |
180 | if (error) { | |
181 | xfs_trans_cancel(tp, 0); | |
182 | return -error; | |
183 | } | |
184 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
185 | ||
186 | /* | |
187 | * Note - it's possible that we might have pushed ourselves out | |
188 | * of the way during trans_reserve which would flush the inode. | |
189 | * But there's no guarantee that the inode buffer has actually | |
190 | * gone out yet (it's delwri). Plus the buffer could be pinned | |
191 | * anyway if it's part of an inode in another recent | |
192 | * transaction. So we play it safe and fire off the | |
193 | * transaction anyway. | |
194 | */ | |
898621d5 | 195 | xfs_trans_ijoin(tp, ip); |
fd3200be CH |
196 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
197 | xfs_trans_set_sync(tp); | |
198 | error = _xfs_trans_commit(tp, 0, &log_flushed); | |
199 | ||
200 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
201 | } else { | |
202 | /* | |
203 | * Timestamps/size haven't changed since last inode flush or | |
204 | * inode transaction commit. That means either nothing got | |
205 | * written or a transaction committed which caught the updates. | |
206 | * If the latter happened and the transaction hasn't hit the | |
207 | * disk yet, the inode will be still be pinned. If it is, | |
208 | * force the log. | |
209 | */ | |
fd3200be | 210 | if (xfs_ipincount(ip)) { |
024910cb CH |
211 | error = _xfs_log_force_lsn(ip->i_mount, |
212 | ip->i_itemp->ili_last_lsn, | |
213 | XFS_LOG_SYNC, &log_flushed); | |
fd3200be | 214 | } |
024910cb | 215 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
fd3200be CH |
216 | } |
217 | ||
218 | if (ip->i_mount->m_flags & XFS_MOUNT_BARRIER) { | |
219 | /* | |
220 | * If the log write didn't issue an ordered tag we need | |
221 | * to flush the disk cache for the data device now. | |
222 | */ | |
223 | if (!log_flushed) | |
224 | xfs_blkdev_issue_flush(ip->i_mount->m_ddev_targp); | |
225 | ||
226 | /* | |
227 | * If this inode is on the RT dev we need to flush that | |
228 | * cache as well. | |
229 | */ | |
230 | if (XFS_IS_REALTIME_INODE(ip)) | |
231 | xfs_blkdev_issue_flush(ip->i_mount->m_rtdev_targp); | |
232 | } | |
233 | ||
234 | return -error; | |
235 | } | |
236 | ||
00258e36 CH |
237 | STATIC ssize_t |
238 | xfs_file_aio_read( | |
dda35b8f CH |
239 | struct kiocb *iocb, |
240 | const struct iovec *iovp, | |
00258e36 CH |
241 | unsigned long nr_segs, |
242 | loff_t pos) | |
dda35b8f CH |
243 | { |
244 | struct file *file = iocb->ki_filp; | |
245 | struct inode *inode = file->f_mapping->host; | |
00258e36 CH |
246 | struct xfs_inode *ip = XFS_I(inode); |
247 | struct xfs_mount *mp = ip->i_mount; | |
dda35b8f CH |
248 | size_t size = 0; |
249 | ssize_t ret = 0; | |
00258e36 | 250 | int ioflags = 0; |
dda35b8f CH |
251 | xfs_fsize_t n; |
252 | unsigned long seg; | |
253 | ||
dda35b8f CH |
254 | XFS_STATS_INC(xs_read_calls); |
255 | ||
00258e36 CH |
256 | BUG_ON(iocb->ki_pos != pos); |
257 | ||
258 | if (unlikely(file->f_flags & O_DIRECT)) | |
259 | ioflags |= IO_ISDIRECT; | |
260 | if (file->f_mode & FMODE_NOCMTIME) | |
261 | ioflags |= IO_INVIS; | |
262 | ||
dda35b8f | 263 | /* START copy & waste from filemap.c */ |
00258e36 | 264 | for (seg = 0; seg < nr_segs; seg++) { |
dda35b8f CH |
265 | const struct iovec *iv = &iovp[seg]; |
266 | ||
267 | /* | |
268 | * If any segment has a negative length, or the cumulative | |
269 | * length ever wraps negative then return -EINVAL. | |
270 | */ | |
271 | size += iv->iov_len; | |
272 | if (unlikely((ssize_t)(size|iv->iov_len) < 0)) | |
273 | return XFS_ERROR(-EINVAL); | |
274 | } | |
275 | /* END copy & waste from filemap.c */ | |
276 | ||
277 | if (unlikely(ioflags & IO_ISDIRECT)) { | |
278 | xfs_buftarg_t *target = | |
279 | XFS_IS_REALTIME_INODE(ip) ? | |
280 | mp->m_rtdev_targp : mp->m_ddev_targp; | |
00258e36 | 281 | if ((iocb->ki_pos & target->bt_smask) || |
dda35b8f | 282 | (size & target->bt_smask)) { |
00258e36 CH |
283 | if (iocb->ki_pos == ip->i_size) |
284 | return 0; | |
dda35b8f CH |
285 | return -XFS_ERROR(EINVAL); |
286 | } | |
287 | } | |
288 | ||
00258e36 CH |
289 | n = XFS_MAXIOFFSET(mp) - iocb->ki_pos; |
290 | if (n <= 0 || size == 0) | |
dda35b8f CH |
291 | return 0; |
292 | ||
293 | if (n < size) | |
294 | size = n; | |
295 | ||
296 | if (XFS_FORCED_SHUTDOWN(mp)) | |
297 | return -EIO; | |
298 | ||
dda35b8f | 299 | if (unlikely(ioflags & IO_ISDIRECT)) { |
487f84f3 DC |
300 | xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); |
301 | ||
00258e36 CH |
302 | if (inode->i_mapping->nrpages) { |
303 | ret = -xfs_flushinval_pages(ip, | |
304 | (iocb->ki_pos & PAGE_CACHE_MASK), | |
305 | -1, FI_REMAPF_LOCKED); | |
487f84f3 DC |
306 | if (ret) { |
307 | xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL); | |
308 | return ret; | |
309 | } | |
00258e36 | 310 | } |
487f84f3 DC |
311 | xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); |
312 | } else | |
313 | xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); | |
dda35b8f | 314 | |
00258e36 | 315 | trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags); |
dda35b8f | 316 | |
00258e36 | 317 | ret = generic_file_aio_read(iocb, iovp, nr_segs, iocb->ki_pos); |
dda35b8f CH |
318 | if (ret > 0) |
319 | XFS_STATS_ADD(xs_read_bytes, ret); | |
320 | ||
487f84f3 | 321 | xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); |
dda35b8f CH |
322 | return ret; |
323 | } | |
324 | ||
00258e36 CH |
325 | STATIC ssize_t |
326 | xfs_file_splice_read( | |
dda35b8f CH |
327 | struct file *infilp, |
328 | loff_t *ppos, | |
329 | struct pipe_inode_info *pipe, | |
330 | size_t count, | |
00258e36 | 331 | unsigned int flags) |
dda35b8f | 332 | { |
00258e36 | 333 | struct xfs_inode *ip = XFS_I(infilp->f_mapping->host); |
00258e36 | 334 | int ioflags = 0; |
dda35b8f CH |
335 | ssize_t ret; |
336 | ||
337 | XFS_STATS_INC(xs_read_calls); | |
00258e36 CH |
338 | |
339 | if (infilp->f_mode & FMODE_NOCMTIME) | |
340 | ioflags |= IO_INVIS; | |
341 | ||
dda35b8f CH |
342 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) |
343 | return -EIO; | |
344 | ||
487f84f3 | 345 | xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); |
dda35b8f | 346 | |
dda35b8f CH |
347 | trace_xfs_file_splice_read(ip, count, *ppos, ioflags); |
348 | ||
349 | ret = generic_file_splice_read(infilp, ppos, pipe, count, flags); | |
350 | if (ret > 0) | |
351 | XFS_STATS_ADD(xs_read_bytes, ret); | |
352 | ||
487f84f3 | 353 | xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); |
dda35b8f CH |
354 | return ret; |
355 | } | |
356 | ||
edafb6da DC |
357 | STATIC void |
358 | xfs_aio_write_isize_update( | |
359 | struct inode *inode, | |
360 | loff_t *ppos, | |
361 | ssize_t bytes_written) | |
362 | { | |
363 | struct xfs_inode *ip = XFS_I(inode); | |
364 | xfs_fsize_t isize = i_size_read(inode); | |
365 | ||
366 | if (bytes_written > 0) | |
367 | XFS_STATS_ADD(xs_write_bytes, bytes_written); | |
368 | ||
369 | if (unlikely(bytes_written < 0 && bytes_written != -EFAULT && | |
370 | *ppos > isize)) | |
371 | *ppos = isize; | |
372 | ||
373 | if (*ppos > ip->i_size) { | |
487f84f3 | 374 | xfs_rw_ilock(ip, XFS_ILOCK_EXCL); |
edafb6da DC |
375 | if (*ppos > ip->i_size) |
376 | ip->i_size = *ppos; | |
487f84f3 | 377 | xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); |
edafb6da DC |
378 | } |
379 | } | |
380 | ||
4c5cfd1b DC |
381 | /* |
382 | * If this was a direct or synchronous I/O that failed (such as ENOSPC) then | |
383 | * part of the I/O may have been written to disk before the error occured. In | |
384 | * this case the on-disk file size may have been adjusted beyond the in-memory | |
385 | * file size and now needs to be truncated back. | |
386 | */ | |
387 | STATIC void | |
388 | xfs_aio_write_newsize_update( | |
389 | struct xfs_inode *ip) | |
390 | { | |
391 | if (ip->i_new_size) { | |
487f84f3 | 392 | xfs_rw_ilock(ip, XFS_ILOCK_EXCL); |
4c5cfd1b DC |
393 | ip->i_new_size = 0; |
394 | if (ip->i_d.di_size > ip->i_size) | |
395 | ip->i_d.di_size = ip->i_size; | |
487f84f3 | 396 | xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); |
4c5cfd1b DC |
397 | } |
398 | } | |
399 | ||
487f84f3 DC |
400 | /* |
401 | * xfs_file_splice_write() does not use xfs_rw_ilock() because | |
402 | * generic_file_splice_write() takes the i_mutex itself. This, in theory, | |
403 | * couuld cause lock inversions between the aio_write path and the splice path | |
404 | * if someone is doing concurrent splice(2) based writes and write(2) based | |
405 | * writes to the same inode. The only real way to fix this is to re-implement | |
406 | * the generic code here with correct locking orders. | |
407 | */ | |
00258e36 CH |
408 | STATIC ssize_t |
409 | xfs_file_splice_write( | |
dda35b8f CH |
410 | struct pipe_inode_info *pipe, |
411 | struct file *outfilp, | |
412 | loff_t *ppos, | |
413 | size_t count, | |
00258e36 | 414 | unsigned int flags) |
dda35b8f | 415 | { |
dda35b8f | 416 | struct inode *inode = outfilp->f_mapping->host; |
00258e36 | 417 | struct xfs_inode *ip = XFS_I(inode); |
edafb6da | 418 | xfs_fsize_t new_size; |
00258e36 CH |
419 | int ioflags = 0; |
420 | ssize_t ret; | |
dda35b8f CH |
421 | |
422 | XFS_STATS_INC(xs_write_calls); | |
00258e36 CH |
423 | |
424 | if (outfilp->f_mode & FMODE_NOCMTIME) | |
425 | ioflags |= IO_INVIS; | |
426 | ||
dda35b8f CH |
427 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) |
428 | return -EIO; | |
429 | ||
430 | xfs_ilock(ip, XFS_IOLOCK_EXCL); | |
431 | ||
dda35b8f CH |
432 | new_size = *ppos + count; |
433 | ||
434 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
435 | if (new_size > ip->i_size) | |
436 | ip->i_new_size = new_size; | |
437 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
438 | ||
439 | trace_xfs_file_splice_write(ip, count, *ppos, ioflags); | |
440 | ||
441 | ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags); | |
dda35b8f | 442 | |
edafb6da | 443 | xfs_aio_write_isize_update(inode, ppos, ret); |
4c5cfd1b | 444 | xfs_aio_write_newsize_update(ip); |
dda35b8f CH |
445 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); |
446 | return ret; | |
447 | } | |
448 | ||
449 | /* | |
450 | * This routine is called to handle zeroing any space in the last | |
451 | * block of the file that is beyond the EOF. We do this since the | |
452 | * size is being increased without writing anything to that block | |
453 | * and we don't want anyone to read the garbage on the disk. | |
454 | */ | |
455 | STATIC int /* error (positive) */ | |
456 | xfs_zero_last_block( | |
457 | xfs_inode_t *ip, | |
458 | xfs_fsize_t offset, | |
459 | xfs_fsize_t isize) | |
460 | { | |
461 | xfs_fileoff_t last_fsb; | |
462 | xfs_mount_t *mp = ip->i_mount; | |
463 | int nimaps; | |
464 | int zero_offset; | |
465 | int zero_len; | |
466 | int error = 0; | |
467 | xfs_bmbt_irec_t imap; | |
468 | ||
469 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | |
470 | ||
471 | zero_offset = XFS_B_FSB_OFFSET(mp, isize); | |
472 | if (zero_offset == 0) { | |
473 | /* | |
474 | * There are no extra bytes in the last block on disk to | |
475 | * zero, so return. | |
476 | */ | |
477 | return 0; | |
478 | } | |
479 | ||
480 | last_fsb = XFS_B_TO_FSBT(mp, isize); | |
481 | nimaps = 1; | |
482 | error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap, | |
b4e9181e | 483 | &nimaps, NULL); |
dda35b8f CH |
484 | if (error) { |
485 | return error; | |
486 | } | |
487 | ASSERT(nimaps > 0); | |
488 | /* | |
489 | * If the block underlying isize is just a hole, then there | |
490 | * is nothing to zero. | |
491 | */ | |
492 | if (imap.br_startblock == HOLESTARTBLOCK) { | |
493 | return 0; | |
494 | } | |
495 | /* | |
496 | * Zero the part of the last block beyond the EOF, and write it | |
497 | * out sync. We need to drop the ilock while we do this so we | |
498 | * don't deadlock when the buffer cache calls back to us. | |
499 | */ | |
500 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
501 | ||
502 | zero_len = mp->m_sb.sb_blocksize - zero_offset; | |
503 | if (isize + zero_len > offset) | |
504 | zero_len = offset - isize; | |
505 | error = xfs_iozero(ip, isize, zero_len); | |
506 | ||
507 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
508 | ASSERT(error >= 0); | |
509 | return error; | |
510 | } | |
511 | ||
512 | /* | |
513 | * Zero any on disk space between the current EOF and the new, | |
514 | * larger EOF. This handles the normal case of zeroing the remainder | |
515 | * of the last block in the file and the unusual case of zeroing blocks | |
516 | * out beyond the size of the file. This second case only happens | |
517 | * with fixed size extents and when the system crashes before the inode | |
518 | * size was updated but after blocks were allocated. If fill is set, | |
519 | * then any holes in the range are filled and zeroed. If not, the holes | |
520 | * are left alone as holes. | |
521 | */ | |
522 | ||
523 | int /* error (positive) */ | |
524 | xfs_zero_eof( | |
525 | xfs_inode_t *ip, | |
526 | xfs_off_t offset, /* starting I/O offset */ | |
527 | xfs_fsize_t isize) /* current inode size */ | |
528 | { | |
529 | xfs_mount_t *mp = ip->i_mount; | |
530 | xfs_fileoff_t start_zero_fsb; | |
531 | xfs_fileoff_t end_zero_fsb; | |
532 | xfs_fileoff_t zero_count_fsb; | |
533 | xfs_fileoff_t last_fsb; | |
534 | xfs_fileoff_t zero_off; | |
535 | xfs_fsize_t zero_len; | |
536 | int nimaps; | |
537 | int error = 0; | |
538 | xfs_bmbt_irec_t imap; | |
539 | ||
540 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); | |
541 | ASSERT(offset > isize); | |
542 | ||
543 | /* | |
544 | * First handle zeroing the block on which isize resides. | |
545 | * We only zero a part of that block so it is handled specially. | |
546 | */ | |
547 | error = xfs_zero_last_block(ip, offset, isize); | |
548 | if (error) { | |
549 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); | |
550 | return error; | |
551 | } | |
552 | ||
553 | /* | |
554 | * Calculate the range between the new size and the old | |
555 | * where blocks needing to be zeroed may exist. To get the | |
556 | * block where the last byte in the file currently resides, | |
557 | * we need to subtract one from the size and truncate back | |
558 | * to a block boundary. We subtract 1 in case the size is | |
559 | * exactly on a block boundary. | |
560 | */ | |
561 | last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1; | |
562 | start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize); | |
563 | end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1); | |
564 | ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb); | |
565 | if (last_fsb == end_zero_fsb) { | |
566 | /* | |
567 | * The size was only incremented on its last block. | |
568 | * We took care of that above, so just return. | |
569 | */ | |
570 | return 0; | |
571 | } | |
572 | ||
573 | ASSERT(start_zero_fsb <= end_zero_fsb); | |
574 | while (start_zero_fsb <= end_zero_fsb) { | |
575 | nimaps = 1; | |
576 | zero_count_fsb = end_zero_fsb - start_zero_fsb + 1; | |
577 | error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb, | |
b4e9181e | 578 | 0, NULL, 0, &imap, &nimaps, NULL); |
dda35b8f CH |
579 | if (error) { |
580 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); | |
581 | return error; | |
582 | } | |
583 | ASSERT(nimaps > 0); | |
584 | ||
585 | if (imap.br_state == XFS_EXT_UNWRITTEN || | |
586 | imap.br_startblock == HOLESTARTBLOCK) { | |
587 | /* | |
588 | * This loop handles initializing pages that were | |
589 | * partially initialized by the code below this | |
590 | * loop. It basically zeroes the part of the page | |
591 | * that sits on a hole and sets the page as P_HOLE | |
592 | * and calls remapf if it is a mapped file. | |
593 | */ | |
594 | start_zero_fsb = imap.br_startoff + imap.br_blockcount; | |
595 | ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); | |
596 | continue; | |
597 | } | |
598 | ||
599 | /* | |
600 | * There are blocks we need to zero. | |
601 | * Drop the inode lock while we're doing the I/O. | |
602 | * We'll still have the iolock to protect us. | |
603 | */ | |
604 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
605 | ||
606 | zero_off = XFS_FSB_TO_B(mp, start_zero_fsb); | |
607 | zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount); | |
608 | ||
609 | if ((zero_off + zero_len) > offset) | |
610 | zero_len = offset - zero_off; | |
611 | ||
612 | error = xfs_iozero(ip, zero_off, zero_len); | |
613 | if (error) { | |
614 | goto out_lock; | |
615 | } | |
616 | ||
617 | start_zero_fsb = imap.br_startoff + imap.br_blockcount; | |
618 | ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); | |
619 | ||
620 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
621 | } | |
622 | ||
623 | return 0; | |
624 | ||
625 | out_lock: | |
626 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
627 | ASSERT(error >= 0); | |
628 | return error; | |
629 | } | |
630 | ||
4d8d1581 DC |
631 | /* |
632 | * Common pre-write limit and setup checks. | |
633 | * | |
634 | * Returns with iolock held according to @iolock. | |
635 | */ | |
636 | STATIC ssize_t | |
637 | xfs_file_aio_write_checks( | |
638 | struct file *file, | |
639 | loff_t *pos, | |
640 | size_t *count, | |
641 | int *iolock) | |
642 | { | |
643 | struct inode *inode = file->f_mapping->host; | |
644 | struct xfs_inode *ip = XFS_I(inode); | |
645 | xfs_fsize_t new_size; | |
646 | int error = 0; | |
647 | ||
648 | error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode)); | |
649 | if (error) { | |
650 | xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock); | |
651 | *iolock = 0; | |
652 | return error; | |
653 | } | |
654 | ||
655 | new_size = *pos + *count; | |
656 | if (new_size > ip->i_size) | |
657 | ip->i_new_size = new_size; | |
658 | ||
659 | if (likely(!(file->f_mode & FMODE_NOCMTIME))) | |
660 | file_update_time(file); | |
661 | ||
662 | /* | |
663 | * If the offset is beyond the size of the file, we need to zero any | |
664 | * blocks that fall between the existing EOF and the start of this | |
665 | * write. | |
666 | */ | |
667 | if (*pos > ip->i_size) | |
668 | error = -xfs_zero_eof(ip, *pos, ip->i_size); | |
669 | ||
670 | xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); | |
671 | if (error) | |
672 | return error; | |
673 | ||
674 | /* | |
675 | * If we're writing the file then make sure to clear the setuid and | |
676 | * setgid bits if the process is not being run by root. This keeps | |
677 | * people from modifying setuid and setgid binaries. | |
678 | */ | |
679 | return file_remove_suid(file); | |
680 | ||
681 | } | |
682 | ||
f0d26e86 DC |
683 | /* |
684 | * xfs_file_dio_aio_write - handle direct IO writes | |
685 | * | |
686 | * Lock the inode appropriately to prepare for and issue a direct IO write. | |
eda77982 | 687 | * By separating it from the buffered write path we remove all the tricky to |
f0d26e86 DC |
688 | * follow locking changes and looping. |
689 | * | |
eda77982 DC |
690 | * If there are cached pages or we're extending the file, we need IOLOCK_EXCL |
691 | * until we're sure the bytes at the new EOF have been zeroed and/or the cached | |
692 | * pages are flushed out. | |
693 | * | |
694 | * In most cases the direct IO writes will be done holding IOLOCK_SHARED | |
695 | * allowing them to be done in parallel with reads and other direct IO writes. | |
696 | * However, if the IO is not aligned to filesystem blocks, the direct IO layer | |
697 | * needs to do sub-block zeroing and that requires serialisation against other | |
698 | * direct IOs to the same block. In this case we need to serialise the | |
699 | * submission of the unaligned IOs so that we don't get racing block zeroing in | |
700 | * the dio layer. To avoid the problem with aio, we also need to wait for | |
701 | * outstanding IOs to complete so that unwritten extent conversion is completed | |
702 | * before we try to map the overlapping block. This is currently implemented by | |
703 | * hitting it with a big hammer (i.e. xfs_ioend_wait()). | |
704 | * | |
f0d26e86 DC |
705 | * Returns with locks held indicated by @iolock and errors indicated by |
706 | * negative return values. | |
707 | */ | |
708 | STATIC ssize_t | |
709 | xfs_file_dio_aio_write( | |
710 | struct kiocb *iocb, | |
711 | const struct iovec *iovp, | |
712 | unsigned long nr_segs, | |
713 | loff_t pos, | |
714 | size_t ocount, | |
715 | int *iolock) | |
716 | { | |
717 | struct file *file = iocb->ki_filp; | |
718 | struct address_space *mapping = file->f_mapping; | |
719 | struct inode *inode = mapping->host; | |
720 | struct xfs_inode *ip = XFS_I(inode); | |
721 | struct xfs_mount *mp = ip->i_mount; | |
722 | ssize_t ret = 0; | |
f0d26e86 | 723 | size_t count = ocount; |
eda77982 | 724 | int unaligned_io = 0; |
f0d26e86 DC |
725 | struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ? |
726 | mp->m_rtdev_targp : mp->m_ddev_targp; | |
727 | ||
728 | *iolock = 0; | |
729 | if ((pos & target->bt_smask) || (count & target->bt_smask)) | |
730 | return -XFS_ERROR(EINVAL); | |
731 | ||
eda77982 DC |
732 | if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask)) |
733 | unaligned_io = 1; | |
734 | ||
735 | if (unaligned_io || mapping->nrpages || pos > ip->i_size) | |
f0d26e86 DC |
736 | *iolock = XFS_IOLOCK_EXCL; |
737 | else | |
738 | *iolock = XFS_IOLOCK_SHARED; | |
739 | xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock); | |
740 | ||
4d8d1581 DC |
741 | ret = xfs_file_aio_write_checks(file, &pos, &count, iolock); |
742 | if (ret) | |
f0d26e86 DC |
743 | return ret; |
744 | ||
745 | if (mapping->nrpages) { | |
746 | WARN_ON(*iolock != XFS_IOLOCK_EXCL); | |
747 | ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1, | |
748 | FI_REMAPF_LOCKED); | |
749 | if (ret) | |
750 | return ret; | |
751 | } | |
752 | ||
eda77982 DC |
753 | /* |
754 | * If we are doing unaligned IO, wait for all other IO to drain, | |
755 | * otherwise demote the lock if we had to flush cached pages | |
756 | */ | |
757 | if (unaligned_io) | |
758 | xfs_ioend_wait(ip); | |
759 | else if (*iolock == XFS_IOLOCK_EXCL) { | |
f0d26e86 DC |
760 | xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); |
761 | *iolock = XFS_IOLOCK_SHARED; | |
762 | } | |
763 | ||
764 | trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0); | |
765 | ret = generic_file_direct_write(iocb, iovp, | |
766 | &nr_segs, pos, &iocb->ki_pos, count, ocount); | |
767 | ||
768 | /* No fallback to buffered IO on errors for XFS. */ | |
769 | ASSERT(ret < 0 || ret == count); | |
770 | return ret; | |
771 | } | |
772 | ||
00258e36 | 773 | STATIC ssize_t |
637bbc75 | 774 | xfs_file_buffered_aio_write( |
dda35b8f CH |
775 | struct kiocb *iocb, |
776 | const struct iovec *iovp, | |
00258e36 | 777 | unsigned long nr_segs, |
637bbc75 DC |
778 | loff_t pos, |
779 | size_t ocount, | |
780 | int *iolock) | |
dda35b8f CH |
781 | { |
782 | struct file *file = iocb->ki_filp; | |
783 | struct address_space *mapping = file->f_mapping; | |
784 | struct inode *inode = mapping->host; | |
00258e36 | 785 | struct xfs_inode *ip = XFS_I(inode); |
637bbc75 DC |
786 | ssize_t ret; |
787 | int enospc = 0; | |
637bbc75 | 788 | size_t count = ocount; |
dda35b8f | 789 | |
637bbc75 DC |
790 | *iolock = XFS_IOLOCK_EXCL; |
791 | xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock); | |
dda35b8f | 792 | |
4d8d1581 DC |
793 | ret = xfs_file_aio_write_checks(file, &pos, &count, iolock); |
794 | if (ret) | |
637bbc75 | 795 | return ret; |
dda35b8f CH |
796 | |
797 | /* We can write back this queue in page reclaim */ | |
798 | current->backing_dev_info = mapping->backing_dev_info; | |
799 | ||
dda35b8f | 800 | write_retry: |
637bbc75 DC |
801 | trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0); |
802 | ret = generic_file_buffered_write(iocb, iovp, nr_segs, | |
803 | pos, &iocb->ki_pos, count, ret); | |
804 | /* | |
805 | * if we just got an ENOSPC, flush the inode now we aren't holding any | |
806 | * page locks and retry *once* | |
807 | */ | |
808 | if (ret == -ENOSPC && !enospc) { | |
809 | ret = -xfs_flush_pages(ip, 0, -1, 0, FI_NONE); | |
810 | if (ret) | |
811 | return ret; | |
812 | enospc = 1; | |
813 | goto write_retry; | |
dda35b8f | 814 | } |
dda35b8f | 815 | current->backing_dev_info = NULL; |
637bbc75 DC |
816 | return ret; |
817 | } | |
818 | ||
819 | STATIC ssize_t | |
820 | xfs_file_aio_write( | |
821 | struct kiocb *iocb, | |
822 | const struct iovec *iovp, | |
823 | unsigned long nr_segs, | |
824 | loff_t pos) | |
825 | { | |
826 | struct file *file = iocb->ki_filp; | |
827 | struct address_space *mapping = file->f_mapping; | |
828 | struct inode *inode = mapping->host; | |
829 | struct xfs_inode *ip = XFS_I(inode); | |
830 | ssize_t ret; | |
831 | int iolock; | |
832 | size_t ocount = 0; | |
833 | ||
834 | XFS_STATS_INC(xs_write_calls); | |
835 | ||
836 | BUG_ON(iocb->ki_pos != pos); | |
837 | ||
838 | ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ); | |
839 | if (ret) | |
840 | return ret; | |
841 | ||
842 | if (ocount == 0) | |
843 | return 0; | |
844 | ||
845 | xfs_wait_for_freeze(ip->i_mount, SB_FREEZE_WRITE); | |
846 | ||
847 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | |
848 | return -EIO; | |
849 | ||
850 | if (unlikely(file->f_flags & O_DIRECT)) | |
851 | ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, | |
852 | ocount, &iolock); | |
853 | else | |
854 | ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos, | |
855 | ocount, &iolock); | |
dda35b8f | 856 | |
edafb6da | 857 | xfs_aio_write_isize_update(inode, &iocb->ki_pos, ret); |
dda35b8f | 858 | |
dda35b8f | 859 | if (ret <= 0) |
637bbc75 | 860 | goto out_unlock; |
dda35b8f | 861 | |
dda35b8f CH |
862 | /* Handle various SYNC-type writes */ |
863 | if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) { | |
864 | loff_t end = pos + ret - 1; | |
a363f0c2 | 865 | int error, error2; |
dda35b8f | 866 | |
487f84f3 | 867 | xfs_rw_iunlock(ip, iolock); |
a363f0c2 | 868 | error = filemap_write_and_wait_range(mapping, pos, end); |
487f84f3 | 869 | xfs_rw_ilock(ip, iolock); |
dda35b8f | 870 | |
7ea80859 | 871 | error2 = -xfs_file_fsync(file, |
fd3200be | 872 | (file->f_flags & __O_SYNC) ? 0 : 1); |
a363f0c2 DC |
873 | if (error) |
874 | ret = error; | |
875 | else if (error2) | |
876 | ret = error2; | |
dda35b8f CH |
877 | } |
878 | ||
637bbc75 | 879 | out_unlock: |
4c5cfd1b | 880 | xfs_aio_write_newsize_update(ip); |
487f84f3 | 881 | xfs_rw_iunlock(ip, iolock); |
a363f0c2 | 882 | return ret; |
dda35b8f CH |
883 | } |
884 | ||
1da177e4 | 885 | STATIC int |
3562fd45 | 886 | xfs_file_open( |
1da177e4 | 887 | struct inode *inode, |
f999a5bf | 888 | struct file *file) |
1da177e4 | 889 | { |
f999a5bf | 890 | if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) |
1da177e4 | 891 | return -EFBIG; |
f999a5bf CH |
892 | if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb))) |
893 | return -EIO; | |
894 | return 0; | |
895 | } | |
896 | ||
897 | STATIC int | |
898 | xfs_dir_open( | |
899 | struct inode *inode, | |
900 | struct file *file) | |
901 | { | |
902 | struct xfs_inode *ip = XFS_I(inode); | |
903 | int mode; | |
904 | int error; | |
905 | ||
906 | error = xfs_file_open(inode, file); | |
907 | if (error) | |
908 | return error; | |
909 | ||
910 | /* | |
911 | * If there are any blocks, read-ahead block 0 as we're almost | |
912 | * certain to have the next operation be a read there. | |
913 | */ | |
914 | mode = xfs_ilock_map_shared(ip); | |
915 | if (ip->i_d.di_nextents > 0) | |
916 | xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK); | |
917 | xfs_iunlock(ip, mode); | |
918 | return 0; | |
1da177e4 LT |
919 | } |
920 | ||
1da177e4 | 921 | STATIC int |
3562fd45 | 922 | xfs_file_release( |
1da177e4 LT |
923 | struct inode *inode, |
924 | struct file *filp) | |
925 | { | |
739bfb2a | 926 | return -xfs_release(XFS_I(inode)); |
1da177e4 LT |
927 | } |
928 | ||
1da177e4 | 929 | STATIC int |
3562fd45 | 930 | xfs_file_readdir( |
1da177e4 LT |
931 | struct file *filp, |
932 | void *dirent, | |
933 | filldir_t filldir) | |
934 | { | |
051e7cd4 | 935 | struct inode *inode = filp->f_path.dentry->d_inode; |
739bfb2a | 936 | xfs_inode_t *ip = XFS_I(inode); |
051e7cd4 CH |
937 | int error; |
938 | size_t bufsize; | |
939 | ||
940 | /* | |
941 | * The Linux API doesn't pass down the total size of the buffer | |
942 | * we read into down to the filesystem. With the filldir concept | |
943 | * it's not needed for correct information, but the XFS dir2 leaf | |
944 | * code wants an estimate of the buffer size to calculate it's | |
945 | * readahead window and size the buffers used for mapping to | |
946 | * physical blocks. | |
947 | * | |
948 | * Try to give it an estimate that's good enough, maybe at some | |
949 | * point we can change the ->readdir prototype to include the | |
a9cc799e | 950 | * buffer size. For now we use the current glibc buffer size. |
051e7cd4 | 951 | */ |
a9cc799e | 952 | bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size); |
051e7cd4 | 953 | |
739bfb2a | 954 | error = xfs_readdir(ip, dirent, bufsize, |
051e7cd4 CH |
955 | (xfs_off_t *)&filp->f_pos, filldir); |
956 | if (error) | |
957 | return -error; | |
958 | return 0; | |
1da177e4 LT |
959 | } |
960 | ||
1da177e4 | 961 | STATIC int |
3562fd45 | 962 | xfs_file_mmap( |
1da177e4 LT |
963 | struct file *filp, |
964 | struct vm_area_struct *vma) | |
965 | { | |
3562fd45 | 966 | vma->vm_ops = &xfs_file_vm_ops; |
d0217ac0 | 967 | vma->vm_flags |= VM_CAN_NONLINEAR; |
6fac0cb4 | 968 | |
fbc1462b | 969 | file_accessed(filp); |
1da177e4 LT |
970 | return 0; |
971 | } | |
972 | ||
4f57dbc6 DC |
973 | /* |
974 | * mmap()d file has taken write protection fault and is being made | |
975 | * writable. We can set the page state up correctly for a writable | |
976 | * page, which means we can do correct delalloc accounting (ENOSPC | |
977 | * checking!) and unwritten extent mapping. | |
978 | */ | |
979 | STATIC int | |
980 | xfs_vm_page_mkwrite( | |
981 | struct vm_area_struct *vma, | |
c2ec175c | 982 | struct vm_fault *vmf) |
4f57dbc6 | 983 | { |
c2ec175c | 984 | return block_page_mkwrite(vma, vmf, xfs_get_blocks); |
4f57dbc6 DC |
985 | } |
986 | ||
4b6f5d20 | 987 | const struct file_operations xfs_file_operations = { |
1da177e4 LT |
988 | .llseek = generic_file_llseek, |
989 | .read = do_sync_read, | |
bb3f724e | 990 | .write = do_sync_write, |
3562fd45 NS |
991 | .aio_read = xfs_file_aio_read, |
992 | .aio_write = xfs_file_aio_write, | |
1b895840 NS |
993 | .splice_read = xfs_file_splice_read, |
994 | .splice_write = xfs_file_splice_write, | |
3562fd45 | 995 | .unlocked_ioctl = xfs_file_ioctl, |
1da177e4 | 996 | #ifdef CONFIG_COMPAT |
3562fd45 | 997 | .compat_ioctl = xfs_file_compat_ioctl, |
1da177e4 | 998 | #endif |
3562fd45 NS |
999 | .mmap = xfs_file_mmap, |
1000 | .open = xfs_file_open, | |
1001 | .release = xfs_file_release, | |
1002 | .fsync = xfs_file_fsync, | |
1da177e4 LT |
1003 | }; |
1004 | ||
4b6f5d20 | 1005 | const struct file_operations xfs_dir_file_operations = { |
f999a5bf | 1006 | .open = xfs_dir_open, |
1da177e4 | 1007 | .read = generic_read_dir, |
3562fd45 | 1008 | .readdir = xfs_file_readdir, |
59af1584 | 1009 | .llseek = generic_file_llseek, |
3562fd45 | 1010 | .unlocked_ioctl = xfs_file_ioctl, |
d3870398 | 1011 | #ifdef CONFIG_COMPAT |
3562fd45 | 1012 | .compat_ioctl = xfs_file_compat_ioctl, |
d3870398 | 1013 | #endif |
3562fd45 | 1014 | .fsync = xfs_file_fsync, |
1da177e4 LT |
1015 | }; |
1016 | ||
f0f37e2f | 1017 | static const struct vm_operations_struct xfs_file_vm_ops = { |
54cb8821 | 1018 | .fault = filemap_fault, |
4f57dbc6 | 1019 | .page_mkwrite = xfs_vm_page_mkwrite, |
6fac0cb4 | 1020 | }; |