Commit | Line | Data |
---|---|---|
f79e2abb AM |
1 | /* |
2 | * High-level sync()-related operations | |
3 | */ | |
4 | ||
5 | #include <linux/kernel.h> | |
6 | #include <linux/file.h> | |
7 | #include <linux/fs.h> | |
5a0e3ad6 | 8 | #include <linux/slab.h> |
630d9c47 | 9 | #include <linux/export.h> |
b7ed78f5 | 10 | #include <linux/namei.h> |
914e2637 | 11 | #include <linux/sched.h> |
f79e2abb AM |
12 | #include <linux/writeback.h> |
13 | #include <linux/syscalls.h> | |
14 | #include <linux/linkage.h> | |
15 | #include <linux/pagemap.h> | |
cf9a2ae8 | 16 | #include <linux/quotaops.h> |
5129a469 | 17 | #include <linux/backing-dev.h> |
5a3e5cb8 | 18 | #include "internal.h" |
f79e2abb AM |
19 | |
20 | #define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \ | |
21 | SYNC_FILE_RANGE_WAIT_AFTER) | |
22 | ||
1cac41cb MB |
23 | /* Interruptible sync for Samsung Mobile Device */ |
24 | #ifdef CONFIG_INTERRUPTIBLE_SYNC | |
25 | ||
26 | #include <linux/workqueue.h> | |
27 | #include <linux/suspend.h> | |
28 | #include <linux/delay.h> | |
29 | ||
30 | //#define CONFIG_INTR_SYNC_DEBUG | |
31 | ||
32 | #ifdef CONFIG_INTR_SYNC_DEBUG | |
33 | #define dbg_print printk | |
34 | #else | |
35 | #define dbg_print(...) | |
36 | #endif | |
37 | ||
38 | enum { | |
39 | INTR_SYNC_STATE_IDLE = 0, | |
40 | INTR_SYNC_STATE_QUEUED, | |
41 | INTR_SYNC_STATE_RUNNING, | |
42 | INTR_SYNC_STATE_MAX | |
43 | }; | |
44 | ||
45 | struct interruptible_sync_work { | |
46 | int id; | |
47 | int ret; | |
48 | unsigned int waiter; | |
49 | unsigned int state; | |
50 | unsigned long version; | |
51 | spinlock_t lock; | |
52 | struct completion done; | |
53 | struct work_struct work; | |
54 | }; | |
55 | ||
56 | /* Initially, intr_sync_work has zero pending */ | |
57 | static struct interruptible_sync_work intr_sync_work[2]; | |
58 | ||
59 | /* Last work start time */ | |
60 | static atomic_t running_work_idx; | |
61 | ||
62 | /* intr_sync_wq will be created when intr_sync() is called at first time. | |
63 | * And it is alive till system shutdown */ | |
64 | static struct workqueue_struct *intr_sync_wq; | |
65 | ||
66 | /* It prevents double allocation of intr_sync_wq */ | |
67 | static DEFINE_MUTEX(intr_sync_wq_lock); | |
68 | ||
69 | static inline struct interruptible_sync_work *INTR_SYNC_WORK(struct work_struct *work) | |
70 | { | |
71 | return container_of(work, struct interruptible_sync_work, work); | |
72 | } | |
73 | ||
74 | static void do_intr_sync(struct work_struct *work) | |
75 | { | |
76 | struct interruptible_sync_work *sync_work = INTR_SYNC_WORK(work); | |
77 | int ret = 0; | |
78 | unsigned int waiter; | |
79 | ||
80 | spin_lock(&sync_work->lock); | |
81 | atomic_set(&running_work_idx, sync_work->id); | |
82 | sync_work->state = INTR_SYNC_STATE_RUNNING; | |
83 | waiter = sync_work->waiter; | |
84 | spin_unlock(&sync_work->lock); | |
85 | ||
86 | dbg_print("\nintr_sync: %s: call sys_sync on work[%d]-%ld\n", | |
87 | __func__, sync_work->id, sync_work->version); | |
88 | ||
89 | /* if no one waits, do not call sync() */ | |
90 | if (waiter) { | |
91 | ret = sys_sync(); | |
92 | dbg_print("\nintr_sync: %s: done sys_sync on work[%d]-%ld\n", | |
93 | __func__, sync_work->id, sync_work->version); | |
94 | } else { | |
95 | dbg_print("\nintr_sync: %s: cancel,no_wait on work[%d]-%ld\n", | |
96 | __func__, sync_work->id, sync_work->version); | |
97 | } | |
98 | ||
99 | spin_lock(&sync_work->lock); | |
100 | sync_work->version++; | |
101 | sync_work->ret = ret; | |
102 | sync_work->state = INTR_SYNC_STATE_IDLE; | |
103 | complete_all(&sync_work->done); | |
104 | spin_unlock(&sync_work->lock); | |
105 | } | |
106 | ||
107 | /* wakeup functions that depend on PM facilities | |
108 | * | |
109 | * struct intr_wakeup_data : wrapper structure for variables for PM | |
110 | * each thread has own instance of it | |
111 | * __prepare_wakeup_event() : prepare and check intr_wakeup_data | |
112 | * __check_wakeup_event() : check wakeup-event with intr_wakeup_data | |
113 | */ | |
114 | struct intr_wakeup_data { | |
115 | unsigned int cnt; | |
116 | }; | |
117 | ||
118 | static inline int __prepare_wakeup_event(struct intr_wakeup_data *wd) | |
119 | { | |
120 | if (pm_get_wakeup_count(&wd->cnt, false)) | |
121 | return 0; | |
122 | ||
123 | pr_info("intr_sync: detected wakeup events before sync\n"); | |
124 | pm_print_active_wakeup_sources(); | |
125 | return -EBUSY; | |
126 | } | |
127 | ||
128 | static inline int __check_wakeup_event(struct intr_wakeup_data *wd) | |
129 | { | |
130 | unsigned int cnt, no_inpr; | |
131 | ||
132 | no_inpr = pm_get_wakeup_count(&cnt, false); | |
133 | if (no_inpr && (cnt == wd->cnt)) | |
134 | return 0; | |
135 | ||
136 | pr_info("intr_sync: detected wakeup events(no_inpr: %u cnt: %u->%u)\n", | |
137 | no_inpr, wd->cnt, cnt); | |
138 | pm_print_active_wakeup_sources(); | |
139 | return -EBUSY; | |
140 | } | |
141 | ||
142 | /* Interruptible Sync | |
143 | * | |
144 | * intr_sync() is same function as sys_sync() except that it can wakeup. | |
145 | * It's possible because of inter_syncd workqueue. | |
146 | * | |
147 | * If system gets wakeup event while sync_work is running, | |
148 | * just return -EBUSY, otherwise 0. | |
149 | * | |
150 | * If intr_sync() is called again while sync_work is running, it will enqueue | |
151 | * idle sync_work to work_queue and wait the completion of it. | |
152 | * If there is not idle sync_work but queued one, it just increases waiter by 1, | |
153 | * and waits the completion of queued sync_work. | |
154 | * | |
155 | * If you want to know returned value of sys_sync(), | |
156 | * you can get it from the argument, sync_ret | |
157 | */ | |
158 | ||
159 | int intr_sync(int *sync_ret) | |
160 | { | |
161 | int ret; | |
162 | enqueue_sync_wait: | |
163 | /* If the workqueue exists, try to enqueue work and wait */ | |
164 | if (likely(intr_sync_wq)) { | |
165 | struct interruptible_sync_work *sync_work; | |
166 | struct intr_wakeup_data wd; | |
167 | int work_idx; | |
168 | int work_ver; | |
169 | find_idle: | |
170 | work_idx = !atomic_read(&running_work_idx); | |
171 | sync_work = &intr_sync_work[work_idx]; | |
172 | ||
173 | /* Prepare intr_wakeup_data and check wakeup event: | |
174 | * If a wakeup-event is detected, wake up right now | |
175 | */ | |
176 | if (__prepare_wakeup_event(&wd)) { | |
177 | dbg_print("intr_sync: detect wakeup event " | |
178 | "before waiting work[%d]\n", work_idx); | |
179 | return -EBUSY; | |
180 | } | |
181 | ||
182 | dbg_print("\nintr_sync: try to wait work[%d]\n", work_idx); | |
183 | ||
184 | spin_lock(&sync_work->lock); | |
185 | work_ver = sync_work->version; | |
186 | if (sync_work->state == INTR_SYNC_STATE_RUNNING) { | |
187 | spin_unlock(&sync_work->lock); | |
188 | dbg_print("intr_sync: work[%d] is already running, " | |
189 | "find idle work\n", work_idx); | |
190 | goto find_idle; | |
191 | } | |
192 | ||
193 | sync_work->waiter++; | |
194 | if (sync_work->state == INTR_SYNC_STATE_IDLE) { | |
195 | dbg_print("intr_sync: enqueue work[%d]\n", work_idx); | |
196 | sync_work->state = INTR_SYNC_STATE_QUEUED; | |
197 | reinit_completion(&sync_work->done); | |
198 | queue_work(intr_sync_wq, &sync_work->work); | |
199 | } | |
200 | spin_unlock(&sync_work->lock); | |
201 | ||
202 | do { | |
203 | /* Check wakeup event first before waiting: | |
204 | * If a wakeup-event is detected, wake up right now | |
205 | */ | |
206 | if (__check_wakeup_event(&wd)) { | |
207 | spin_lock(&sync_work->lock); | |
208 | sync_work->waiter--; | |
209 | spin_unlock(&sync_work->lock); | |
210 | dbg_print("intr_sync: detect wakeup event " | |
211 | "while waiting work[%d]\n", work_idx); | |
212 | return -EBUSY; | |
213 | } | |
214 | ||
215 | // dbg_print("intr_sync: waiting work[%d]\n", work_idx); | |
216 | /* Return 0 if timed out, or positive if completed. */ | |
217 | ret = wait_for_completion_io_timeout( | |
218 | &sync_work->done, HZ/10); | |
219 | /* A work that we are waiting for has done. */ | |
220 | if ((ret > 0) || (sync_work->version != work_ver)) | |
221 | break; | |
222 | // dbg_print("intr_sync: timeout work[%d]\n", work_idx); | |
223 | } while (1); | |
224 | ||
225 | spin_lock(&sync_work->lock); | |
226 | sync_work->waiter--; | |
227 | if (sync_ret) | |
228 | *sync_ret = sync_work->ret; | |
229 | spin_unlock(&sync_work->lock); | |
230 | dbg_print("intr_sync: sync work[%d] is done with ret(%d)\n", | |
231 | work_idx, sync_work->ret); | |
232 | return 0; | |
233 | } | |
234 | ||
235 | /* check whether a workqueue exists or not under locked state. | |
236 | * Create new one if a workqueue is not created yet. | |
237 | */ | |
238 | mutex_lock(&intr_sync_wq_lock); | |
239 | if (likely(!intr_sync_wq)) { | |
240 | intr_sync_work[0].id = 0; | |
241 | intr_sync_work[1].id = 1; | |
242 | INIT_WORK(&intr_sync_work[0].work, do_intr_sync); | |
243 | INIT_WORK(&intr_sync_work[1].work, do_intr_sync); | |
244 | spin_lock_init(&intr_sync_work[0].lock); | |
245 | spin_lock_init(&intr_sync_work[1].lock); | |
246 | init_completion(&intr_sync_work[0].done); | |
247 | init_completion(&intr_sync_work[1].done); | |
248 | intr_sync_wq = alloc_ordered_workqueue("intr_syncd", WQ_MEM_RECLAIM); | |
249 | dbg_print("\nintr_sync: try to allocate intr_sync_queue\n"); | |
250 | } | |
251 | mutex_unlock(&intr_sync_wq_lock); | |
252 | ||
253 | /* try to enqueue work again if the workqueue is created successfully */ | |
254 | if (likely(intr_sync_wq)) | |
255 | goto enqueue_sync_wait; | |
256 | ||
257 | printk("\nintr_sync: allocation failed, just call sync()\n"); | |
258 | ret = sys_sync(); | |
259 | if (sync_ret) | |
260 | *sync_ret = ret; | |
261 | return 0; | |
262 | } | |
263 | #else /* CONFIG_INTERRUPTIBLE_SYNC */ | |
264 | int intr_sync(int *sync_ret) | |
265 | { | |
266 | int ret = sys_sync(); | |
267 | if (sync_ret) | |
268 | *sync_ret = ret; | |
269 | return 0; | |
270 | } | |
271 | #endif /* CONFIG_INTERRUPTIBLE_SYNC */ | |
272 | ||
c15c54f5 | 273 | /* |
d8a8559c JA |
274 | * Do the filesystem syncing work. For simple filesystems |
275 | * writeback_inodes_sb(sb) just dirties buffers with inodes so we have to | |
276 | * submit IO for these buffers via __sync_blockdev(). This also speeds up the | |
277 | * wait == 1 case since in that case write_inode() functions do | |
278 | * sync_dirty_buffer() and thus effectively write one block at a time. | |
c15c54f5 | 279 | */ |
0dc83bd3 | 280 | static int __sync_filesystem(struct super_block *sb, int wait) |
c15c54f5 | 281 | { |
5fb324ad | 282 | if (wait) |
0dc83bd3 | 283 | sync_inodes_sb(sb); |
5fb324ad | 284 | else |
0e175a18 | 285 | writeback_inodes_sb(sb, WB_REASON_SYNC); |
5fb324ad | 286 | |
c15c54f5 JK |
287 | if (sb->s_op->sync_fs) |
288 | sb->s_op->sync_fs(sb, wait); | |
289 | return __sync_blockdev(sb->s_bdev, wait); | |
290 | } | |
291 | ||
292 | /* | |
293 | * Write out and wait upon all dirty data associated with this | |
294 | * superblock. Filesystem data as well as the underlying block | |
295 | * device. Takes the superblock lock. | |
296 | */ | |
60b0680f | 297 | int sync_filesystem(struct super_block *sb) |
c15c54f5 JK |
298 | { |
299 | int ret; | |
300 | ||
5af7926f CH |
301 | /* |
302 | * We need to be protected against the filesystem going from | |
303 | * r/o to r/w or vice versa. | |
304 | */ | |
305 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); | |
306 | ||
307 | /* | |
308 | * No point in syncing out anything if the filesystem is read-only. | |
309 | */ | |
310 | if (sb->s_flags & MS_RDONLY) | |
311 | return 0; | |
312 | ||
0dc83bd3 | 313 | ret = __sync_filesystem(sb, 0); |
c15c54f5 JK |
314 | if (ret < 0) |
315 | return ret; | |
0dc83bd3 | 316 | return __sync_filesystem(sb, 1); |
c15c54f5 | 317 | } |
10096fb1 | 318 | EXPORT_SYMBOL(sync_filesystem); |
c15c54f5 | 319 | |
b3de6531 | 320 | static void sync_inodes_one_sb(struct super_block *sb, void *arg) |
01a05b33 | 321 | { |
95f28604 | 322 | if (!(sb->s_flags & MS_RDONLY)) |
0dc83bd3 | 323 | sync_inodes_sb(sb); |
01a05b33 | 324 | } |
b3de6531 | 325 | |
b3de6531 JK |
326 | static void sync_fs_one_sb(struct super_block *sb, void *arg) |
327 | { | |
328 | if (!(sb->s_flags & MS_RDONLY) && sb->s_op->sync_fs) | |
329 | sb->s_op->sync_fs(sb, *(int *)arg); | |
330 | } | |
331 | ||
d0e91b13 | 332 | static void fdatawrite_one_bdev(struct block_device *bdev, void *arg) |
b3de6531 | 333 | { |
d0e91b13 | 334 | filemap_fdatawrite(bdev->bd_inode->i_mapping); |
a8c7176b JK |
335 | } |
336 | ||
d0e91b13 | 337 | static void fdatawait_one_bdev(struct block_device *bdev, void *arg) |
a8c7176b | 338 | { |
aa750fd7 JN |
339 | /* |
340 | * We keep the error status of individual mapping so that | |
341 | * applications can catch the writeback error using fsync(2). | |
342 | * See filemap_fdatawait_keep_errors() for details. | |
343 | */ | |
344 | filemap_fdatawait_keep_errors(bdev->bd_inode->i_mapping); | |
c15c54f5 JK |
345 | } |
346 | ||
3beab0b4 | 347 | /* |
4ea425b6 JK |
348 | * Sync everything. We start by waking flusher threads so that most of |
349 | * writeback runs on all devices in parallel. Then we sync all inodes reliably | |
350 | * which effectively also waits for all flusher threads to finish doing | |
351 | * writeback. At this point all data is on disk so metadata should be stable | |
352 | * and we tell filesystems to sync their metadata via ->sync_fs() calls. | |
353 | * Finally, we writeout all block devices because some filesystems (e.g. ext2) | |
354 | * just write metadata (such as inodes or bitmaps) to block device page cache | |
355 | * and do not sync it on their own in ->sync_fs(). | |
3beab0b4 | 356 | */ |
5cee5815 | 357 | SYSCALL_DEFINE0(sync) |
cf9a2ae8 | 358 | { |
b3de6531 JK |
359 | int nowait = 0, wait = 1; |
360 | ||
0e175a18 | 361 | wakeup_flusher_threads(0, WB_REASON_SYNC); |
0dc83bd3 | 362 | iterate_supers(sync_inodes_one_sb, NULL); |
4ea425b6 | 363 | iterate_supers(sync_fs_one_sb, &nowait); |
b3de6531 | 364 | iterate_supers(sync_fs_one_sb, &wait); |
d0e91b13 JK |
365 | iterate_bdevs(fdatawrite_one_bdev, NULL); |
366 | iterate_bdevs(fdatawait_one_bdev, NULL); | |
cf9a2ae8 DH |
367 | if (unlikely(laptop_mode)) |
368 | laptop_sync_completion(); | |
cf9a2ae8 DH |
369 | return 0; |
370 | } | |
371 | ||
a2a9537a JA |
372 | static void do_sync_work(struct work_struct *work) |
373 | { | |
b3de6531 JK |
374 | int nowait = 0; |
375 | ||
5cee5815 JK |
376 | /* |
377 | * Sync twice to reduce the possibility we skipped some inodes / pages | |
378 | * because they were temporarily locked | |
379 | */ | |
b3de6531 JK |
380 | iterate_supers(sync_inodes_one_sb, &nowait); |
381 | iterate_supers(sync_fs_one_sb, &nowait); | |
d0e91b13 | 382 | iterate_bdevs(fdatawrite_one_bdev, NULL); |
b3de6531 JK |
383 | iterate_supers(sync_inodes_one_sb, &nowait); |
384 | iterate_supers(sync_fs_one_sb, &nowait); | |
d0e91b13 | 385 | iterate_bdevs(fdatawrite_one_bdev, NULL); |
5cee5815 | 386 | printk("Emergency Sync complete\n"); |
a2a9537a JA |
387 | kfree(work); |
388 | } | |
389 | ||
cf9a2ae8 DH |
390 | void emergency_sync(void) |
391 | { | |
a2a9537a JA |
392 | struct work_struct *work; |
393 | ||
394 | work = kmalloc(sizeof(*work), GFP_ATOMIC); | |
395 | if (work) { | |
396 | INIT_WORK(work, do_sync_work); | |
397 | schedule_work(work); | |
398 | } | |
cf9a2ae8 DH |
399 | } |
400 | ||
b7ed78f5 SW |
401 | /* |
402 | * sync a single super | |
403 | */ | |
404 | SYSCALL_DEFINE1(syncfs, int, fd) | |
405 | { | |
2903ff01 | 406 | struct fd f = fdget(fd); |
b7ed78f5 SW |
407 | struct super_block *sb; |
408 | int ret; | |
b7ed78f5 | 409 | |
2903ff01 | 410 | if (!f.file) |
b7ed78f5 | 411 | return -EBADF; |
b583043e | 412 | sb = f.file->f_path.dentry->d_sb; |
b7ed78f5 SW |
413 | |
414 | down_read(&sb->s_umount); | |
415 | ret = sync_filesystem(sb); | |
416 | up_read(&sb->s_umount); | |
417 | ||
2903ff01 | 418 | fdput(f); |
b7ed78f5 SW |
419 | return ret; |
420 | } | |
421 | ||
4c728ef5 | 422 | /** |
148f948b | 423 | * vfs_fsync_range - helper to sync a range of data & metadata to disk |
4c728ef5 | 424 | * @file: file to sync |
148f948b JK |
425 | * @start: offset in bytes of the beginning of data range to sync |
426 | * @end: offset in bytes of the end of data range (inclusive) | |
427 | * @datasync: perform only datasync | |
4c728ef5 | 428 | * |
148f948b JK |
429 | * Write back data in range @start..@end and metadata for @file to disk. If |
430 | * @datasync is set only metadata needed to access modified file data is | |
431 | * written. | |
4c728ef5 | 432 | */ |
8018ab05 | 433 | int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync) |
cf9a2ae8 | 434 | { |
0ae45f63 TT |
435 | struct inode *inode = file->f_mapping->host; |
436 | ||
72c2d531 | 437 | if (!file->f_op->fsync) |
02c24a82 | 438 | return -EINVAL; |
0ae45f63 TT |
439 | if (!datasync && (inode->i_state & I_DIRTY_TIME)) { |
440 | spin_lock(&inode->i_lock); | |
441 | inode->i_state &= ~I_DIRTY_TIME; | |
442 | spin_unlock(&inode->i_lock); | |
443 | mark_inode_dirty_sync(inode); | |
444 | } | |
02c24a82 | 445 | return file->f_op->fsync(file, start, end, datasync); |
cf9a2ae8 | 446 | } |
148f948b JK |
447 | EXPORT_SYMBOL(vfs_fsync_range); |
448 | ||
449 | /** | |
450 | * vfs_fsync - perform a fsync or fdatasync on a file | |
451 | * @file: file to sync | |
148f948b JK |
452 | * @datasync: only perform a fdatasync operation |
453 | * | |
454 | * Write back data and metadata for @file to disk. If @datasync is | |
455 | * set only metadata needed to access modified file data is written. | |
148f948b | 456 | */ |
8018ab05 | 457 | int vfs_fsync(struct file *file, int datasync) |
148f948b | 458 | { |
8018ab05 | 459 | return vfs_fsync_range(file, 0, LLONG_MAX, datasync); |
148f948b | 460 | } |
4c728ef5 | 461 | EXPORT_SYMBOL(vfs_fsync); |
cf9a2ae8 | 462 | |
4c728ef5 | 463 | static int do_fsync(unsigned int fd, int datasync) |
cf9a2ae8 | 464 | { |
2903ff01 | 465 | struct fd f = fdget(fd); |
cf9a2ae8 DH |
466 | int ret = -EBADF; |
467 | ||
2903ff01 AV |
468 | if (f.file) { |
469 | ret = vfs_fsync(f.file, datasync); | |
470 | fdput(f); | |
a4f5f251 | 471 | inc_syscfs(current); |
cf9a2ae8 DH |
472 | } |
473 | return ret; | |
474 | } | |
475 | ||
a5f8fa9e | 476 | SYSCALL_DEFINE1(fsync, unsigned int, fd) |
cf9a2ae8 | 477 | { |
4c728ef5 | 478 | return do_fsync(fd, 0); |
cf9a2ae8 DH |
479 | } |
480 | ||
a5f8fa9e | 481 | SYSCALL_DEFINE1(fdatasync, unsigned int, fd) |
cf9a2ae8 | 482 | { |
4c728ef5 | 483 | return do_fsync(fd, 1); |
cf9a2ae8 DH |
484 | } |
485 | ||
f79e2abb AM |
486 | /* |
487 | * sys_sync_file_range() permits finely controlled syncing over a segment of | |
488 | * a file in the range offset .. (offset+nbytes-1) inclusive. If nbytes is | |
489 | * zero then sys_sync_file_range() will operate from offset out to EOF. | |
490 | * | |
491 | * The flag bits are: | |
492 | * | |
493 | * SYNC_FILE_RANGE_WAIT_BEFORE: wait upon writeout of all pages in the range | |
494 | * before performing the write. | |
495 | * | |
496 | * SYNC_FILE_RANGE_WRITE: initiate writeout of all those dirty pages in the | |
cce77081 PM |
497 | * range which are not presently under writeback. Note that this may block for |
498 | * significant periods due to exhaustion of disk request structures. | |
f79e2abb AM |
499 | * |
500 | * SYNC_FILE_RANGE_WAIT_AFTER: wait upon writeout of all pages in the range | |
501 | * after performing the write. | |
502 | * | |
503 | * Useful combinations of the flag bits are: | |
504 | * | |
505 | * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE: ensures that all pages | |
506 | * in the range which were dirty on entry to sys_sync_file_range() are placed | |
507 | * under writeout. This is a start-write-for-data-integrity operation. | |
508 | * | |
509 | * SYNC_FILE_RANGE_WRITE: start writeout of all dirty pages in the range which | |
510 | * are not presently under writeout. This is an asynchronous flush-to-disk | |
511 | * operation. Not suitable for data integrity operations. | |
512 | * | |
513 | * SYNC_FILE_RANGE_WAIT_BEFORE (or SYNC_FILE_RANGE_WAIT_AFTER): wait for | |
514 | * completion of writeout of all pages in the range. This will be used after an | |
515 | * earlier SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE operation to wait | |
516 | * for that operation to complete and to return the result. | |
517 | * | |
518 | * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE|SYNC_FILE_RANGE_WAIT_AFTER: | |
519 | * a traditional sync() operation. This is a write-for-data-integrity operation | |
520 | * which will ensure that all pages in the range which were dirty on entry to | |
521 | * sys_sync_file_range() are committed to disk. | |
522 | * | |
523 | * | |
524 | * SYNC_FILE_RANGE_WAIT_BEFORE and SYNC_FILE_RANGE_WAIT_AFTER will detect any | |
525 | * I/O errors or ENOSPC conditions and will return those to the caller, after | |
526 | * clearing the EIO and ENOSPC flags in the address_space. | |
527 | * | |
528 | * It should be noted that none of these operations write out the file's | |
529 | * metadata. So unless the application is strictly performing overwrites of | |
530 | * already-instantiated disk blocks, there are no guarantees here that the data | |
531 | * will be available after a crash. | |
532 | */ | |
4a0fd5bf AV |
533 | SYSCALL_DEFINE4(sync_file_range, int, fd, loff_t, offset, loff_t, nbytes, |
534 | unsigned int, flags) | |
f79e2abb AM |
535 | { |
536 | int ret; | |
2903ff01 | 537 | struct fd f; |
7a0ad10c | 538 | struct address_space *mapping; |
f79e2abb | 539 | loff_t endbyte; /* inclusive */ |
f79e2abb AM |
540 | umode_t i_mode; |
541 | ||
542 | ret = -EINVAL; | |
543 | if (flags & ~VALID_FLAGS) | |
544 | goto out; | |
545 | ||
546 | endbyte = offset + nbytes; | |
547 | ||
548 | if ((s64)offset < 0) | |
549 | goto out; | |
550 | if ((s64)endbyte < 0) | |
551 | goto out; | |
552 | if (endbyte < offset) | |
553 | goto out; | |
554 | ||
555 | if (sizeof(pgoff_t) == 4) { | |
556 | if (offset >= (0x100000000ULL << PAGE_CACHE_SHIFT)) { | |
557 | /* | |
558 | * The range starts outside a 32 bit machine's | |
559 | * pagecache addressing capabilities. Let it "succeed" | |
560 | */ | |
561 | ret = 0; | |
562 | goto out; | |
563 | } | |
564 | if (endbyte >= (0x100000000ULL << PAGE_CACHE_SHIFT)) { | |
565 | /* | |
566 | * Out to EOF | |
567 | */ | |
568 | nbytes = 0; | |
569 | } | |
570 | } | |
571 | ||
572 | if (nbytes == 0) | |
111ebb6e | 573 | endbyte = LLONG_MAX; |
f79e2abb AM |
574 | else |
575 | endbyte--; /* inclusive */ | |
576 | ||
577 | ret = -EBADF; | |
2903ff01 AV |
578 | f = fdget(fd); |
579 | if (!f.file) | |
f79e2abb AM |
580 | goto out; |
581 | ||
496ad9aa | 582 | i_mode = file_inode(f.file)->i_mode; |
f79e2abb AM |
583 | ret = -ESPIPE; |
584 | if (!S_ISREG(i_mode) && !S_ISBLK(i_mode) && !S_ISDIR(i_mode) && | |
585 | !S_ISLNK(i_mode)) | |
586 | goto out_put; | |
587 | ||
2903ff01 | 588 | mapping = f.file->f_mapping; |
7a0ad10c CH |
589 | if (!mapping) { |
590 | ret = -EINVAL; | |
591 | goto out_put; | |
592 | } | |
593 | ||
594 | ret = 0; | |
595 | if (flags & SYNC_FILE_RANGE_WAIT_BEFORE) { | |
596 | ret = filemap_fdatawait_range(mapping, offset, endbyte); | |
597 | if (ret < 0) | |
598 | goto out_put; | |
599 | } | |
600 | ||
601 | if (flags & SYNC_FILE_RANGE_WRITE) { | |
23d01270 JK |
602 | ret = __filemap_fdatawrite_range(mapping, offset, endbyte, |
603 | WB_SYNC_NONE); | |
7a0ad10c CH |
604 | if (ret < 0) |
605 | goto out_put; | |
606 | } | |
607 | ||
608 | if (flags & SYNC_FILE_RANGE_WAIT_AFTER) | |
609 | ret = filemap_fdatawait_range(mapping, offset, endbyte); | |
610 | ||
f79e2abb | 611 | out_put: |
2903ff01 | 612 | fdput(f); |
f79e2abb AM |
613 | out: |
614 | return ret; | |
615 | } | |
616 | ||
edd5cd4a DW |
617 | /* It would be nice if people remember that not all the world's an i386 |
618 | when they introduce new system calls */ | |
4a0fd5bf AV |
619 | SYSCALL_DEFINE4(sync_file_range2, int, fd, unsigned int, flags, |
620 | loff_t, offset, loff_t, nbytes) | |
edd5cd4a DW |
621 | { |
622 | return sys_sync_file_range(fd, offset, nbytes, flags); | |
623 | } |