irq: Better struct irqaction layout
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / ocfs2 / file.c
1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * file.c
5 *
6 * File open, close, extend, truncate
7 *
8 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
24 */
25
26 #include <linux/capability.h>
27 #include <linux/fs.h>
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/highmem.h>
31 #include <linux/pagemap.h>
32 #include <linux/uio.h>
33 #include <linux/sched.h>
34 #include <linux/splice.h>
35 #include <linux/mount.h>
36 #include <linux/writeback.h>
37 #include <linux/falloc.h>
38 #include <linux/quotaops.h>
39 #include <linux/blkdev.h>
40
41 #define MLOG_MASK_PREFIX ML_INODE
42 #include <cluster/masklog.h>
43
44 #include "ocfs2.h"
45
46 #include "alloc.h"
47 #include "aops.h"
48 #include "dir.h"
49 #include "dlmglue.h"
50 #include "extent_map.h"
51 #include "file.h"
52 #include "sysfile.h"
53 #include "inode.h"
54 #include "ioctl.h"
55 #include "journal.h"
56 #include "locks.h"
57 #include "mmap.h"
58 #include "suballoc.h"
59 #include "super.h"
60 #include "xattr.h"
61 #include "acl.h"
62 #include "quota.h"
63 #include "refcounttree.h"
64
65 #include "buffer_head_io.h"
66
67 static int ocfs2_init_file_private(struct inode *inode, struct file *file)
68 {
69 struct ocfs2_file_private *fp;
70
71 fp = kzalloc(sizeof(struct ocfs2_file_private), GFP_KERNEL);
72 if (!fp)
73 return -ENOMEM;
74
75 fp->fp_file = file;
76 mutex_init(&fp->fp_mutex);
77 ocfs2_file_lock_res_init(&fp->fp_flock, fp);
78 file->private_data = fp;
79
80 return 0;
81 }
82
83 static void ocfs2_free_file_private(struct inode *inode, struct file *file)
84 {
85 struct ocfs2_file_private *fp = file->private_data;
86 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
87
88 if (fp) {
89 ocfs2_simple_drop_lockres(osb, &fp->fp_flock);
90 ocfs2_lock_res_free(&fp->fp_flock);
91 kfree(fp);
92 file->private_data = NULL;
93 }
94 }
95
96 static int ocfs2_file_open(struct inode *inode, struct file *file)
97 {
98 int status;
99 int mode = file->f_flags;
100 struct ocfs2_inode_info *oi = OCFS2_I(inode);
101
102 mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file,
103 file->f_path.dentry->d_name.len, file->f_path.dentry->d_name.name);
104
105 if (file->f_mode & FMODE_WRITE)
106 dquot_initialize(inode);
107
108 spin_lock(&oi->ip_lock);
109
110 /* Check that the inode hasn't been wiped from disk by another
111 * node. If it hasn't then we're safe as long as we hold the
112 * spin lock until our increment of open count. */
113 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) {
114 spin_unlock(&oi->ip_lock);
115
116 status = -ENOENT;
117 goto leave;
118 }
119
120 if (mode & O_DIRECT)
121 oi->ip_flags |= OCFS2_INODE_OPEN_DIRECT;
122
123 oi->ip_open_count++;
124 spin_unlock(&oi->ip_lock);
125
126 status = ocfs2_init_file_private(inode, file);
127 if (status) {
128 /*
129 * We want to set open count back if we're failing the
130 * open.
131 */
132 spin_lock(&oi->ip_lock);
133 oi->ip_open_count--;
134 spin_unlock(&oi->ip_lock);
135 }
136
137 leave:
138 mlog_exit(status);
139 return status;
140 }
141
142 static int ocfs2_file_release(struct inode *inode, struct file *file)
143 {
144 struct ocfs2_inode_info *oi = OCFS2_I(inode);
145
146 mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file,
147 file->f_path.dentry->d_name.len,
148 file->f_path.dentry->d_name.name);
149
150 spin_lock(&oi->ip_lock);
151 if (!--oi->ip_open_count)
152 oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT;
153 spin_unlock(&oi->ip_lock);
154
155 ocfs2_free_file_private(inode, file);
156
157 mlog_exit(0);
158
159 return 0;
160 }
161
162 static int ocfs2_dir_open(struct inode *inode, struct file *file)
163 {
164 return ocfs2_init_file_private(inode, file);
165 }
166
167 static int ocfs2_dir_release(struct inode *inode, struct file *file)
168 {
169 ocfs2_free_file_private(inode, file);
170 return 0;
171 }
172
173 static int ocfs2_sync_file(struct file *file, int datasync)
174 {
175 int err = 0;
176 journal_t *journal;
177 struct inode *inode = file->f_mapping->host;
178 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
179
180 mlog_entry("(0x%p, %d, 0x%p, '%.*s')\n", file, datasync,
181 file->f_path.dentry, file->f_path.dentry->d_name.len,
182 file->f_path.dentry->d_name.name);
183
184 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) {
185 /*
186 * We still have to flush drive's caches to get data to the
187 * platter
188 */
189 if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER)
190 blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
191 goto bail;
192 }
193
194 journal = osb->journal->j_journal;
195 err = jbd2_journal_force_commit(journal);
196
197 bail:
198 mlog_exit(err);
199
200 return (err < 0) ? -EIO : 0;
201 }
202
203 int ocfs2_should_update_atime(struct inode *inode,
204 struct vfsmount *vfsmnt)
205 {
206 struct timespec now;
207 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
208
209 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
210 return 0;
211
212 if ((inode->i_flags & S_NOATIME) ||
213 ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)))
214 return 0;
215
216 /*
217 * We can be called with no vfsmnt structure - NFSD will
218 * sometimes do this.
219 *
220 * Note that our action here is different than touch_atime() -
221 * if we can't tell whether this is a noatime mount, then we
222 * don't know whether to trust the value of s_atime_quantum.
223 */
224 if (vfsmnt == NULL)
225 return 0;
226
227 if ((vfsmnt->mnt_flags & MNT_NOATIME) ||
228 ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
229 return 0;
230
231 if (vfsmnt->mnt_flags & MNT_RELATIME) {
232 if ((timespec_compare(&inode->i_atime, &inode->i_mtime) <= 0) ||
233 (timespec_compare(&inode->i_atime, &inode->i_ctime) <= 0))
234 return 1;
235
236 return 0;
237 }
238
239 now = CURRENT_TIME;
240 if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum))
241 return 0;
242 else
243 return 1;
244 }
245
246 int ocfs2_update_inode_atime(struct inode *inode,
247 struct buffer_head *bh)
248 {
249 int ret;
250 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
251 handle_t *handle;
252 struct ocfs2_dinode *di = (struct ocfs2_dinode *) bh->b_data;
253
254 mlog_entry_void();
255
256 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
257 if (IS_ERR(handle)) {
258 ret = PTR_ERR(handle);
259 mlog_errno(ret);
260 goto out;
261 }
262
263 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
264 OCFS2_JOURNAL_ACCESS_WRITE);
265 if (ret) {
266 mlog_errno(ret);
267 goto out_commit;
268 }
269
270 /*
271 * Don't use ocfs2_mark_inode_dirty() here as we don't always
272 * have i_mutex to guard against concurrent changes to other
273 * inode fields.
274 */
275 inode->i_atime = CURRENT_TIME;
276 di->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
277 di->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
278 ocfs2_journal_dirty(handle, bh);
279
280 out_commit:
281 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
282 out:
283 mlog_exit(ret);
284 return ret;
285 }
286
287 static int ocfs2_set_inode_size(handle_t *handle,
288 struct inode *inode,
289 struct buffer_head *fe_bh,
290 u64 new_i_size)
291 {
292 int status;
293
294 mlog_entry_void();
295 i_size_write(inode, new_i_size);
296 inode->i_blocks = ocfs2_inode_sector_count(inode);
297 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
298
299 status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
300 if (status < 0) {
301 mlog_errno(status);
302 goto bail;
303 }
304
305 bail:
306 mlog_exit(status);
307 return status;
308 }
309
310 int ocfs2_simple_size_update(struct inode *inode,
311 struct buffer_head *di_bh,
312 u64 new_i_size)
313 {
314 int ret;
315 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
316 handle_t *handle = NULL;
317
318 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
319 if (IS_ERR(handle)) {
320 ret = PTR_ERR(handle);
321 mlog_errno(ret);
322 goto out;
323 }
324
325 ret = ocfs2_set_inode_size(handle, inode, di_bh,
326 new_i_size);
327 if (ret < 0)
328 mlog_errno(ret);
329
330 ocfs2_commit_trans(osb, handle);
331 out:
332 return ret;
333 }
334
335 static int ocfs2_cow_file_pos(struct inode *inode,
336 struct buffer_head *fe_bh,
337 u64 offset)
338 {
339 int status;
340 u32 phys, cpos = offset >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
341 unsigned int num_clusters = 0;
342 unsigned int ext_flags = 0;
343
344 /*
345 * If the new offset is aligned to the range of the cluster, there is
346 * no space for ocfs2_zero_range_for_truncate to fill, so no need to
347 * CoW either.
348 */
349 if ((offset & (OCFS2_SB(inode->i_sb)->s_clustersize - 1)) == 0)
350 return 0;
351
352 status = ocfs2_get_clusters(inode, cpos, &phys,
353 &num_clusters, &ext_flags);
354 if (status) {
355 mlog_errno(status);
356 goto out;
357 }
358
359 if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
360 goto out;
361
362 return ocfs2_refcount_cow(inode, NULL, fe_bh, cpos, 1, cpos+1);
363
364 out:
365 return status;
366 }
367
368 static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
369 struct inode *inode,
370 struct buffer_head *fe_bh,
371 u64 new_i_size)
372 {
373 int status;
374 handle_t *handle;
375 struct ocfs2_dinode *di;
376 u64 cluster_bytes;
377
378 mlog_entry_void();
379
380 /*
381 * We need to CoW the cluster contains the offset if it is reflinked
382 * since we will call ocfs2_zero_range_for_truncate later which will
383 * write "0" from offset to the end of the cluster.
384 */
385 status = ocfs2_cow_file_pos(inode, fe_bh, new_i_size);
386 if (status) {
387 mlog_errno(status);
388 return status;
389 }
390
391 /* TODO: This needs to actually orphan the inode in this
392 * transaction. */
393
394 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
395 if (IS_ERR(handle)) {
396 status = PTR_ERR(handle);
397 mlog_errno(status);
398 goto out;
399 }
400
401 status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), fe_bh,
402 OCFS2_JOURNAL_ACCESS_WRITE);
403 if (status < 0) {
404 mlog_errno(status);
405 goto out_commit;
406 }
407
408 /*
409 * Do this before setting i_size.
410 */
411 cluster_bytes = ocfs2_align_bytes_to_clusters(inode->i_sb, new_i_size);
412 status = ocfs2_zero_range_for_truncate(inode, handle, new_i_size,
413 cluster_bytes);
414 if (status) {
415 mlog_errno(status);
416 goto out_commit;
417 }
418
419 i_size_write(inode, new_i_size);
420 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
421
422 di = (struct ocfs2_dinode *) fe_bh->b_data;
423 di->i_size = cpu_to_le64(new_i_size);
424 di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec);
425 di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
426
427 ocfs2_journal_dirty(handle, fe_bh);
428
429 out_commit:
430 ocfs2_commit_trans(osb, handle);
431 out:
432
433 mlog_exit(status);
434 return status;
435 }
436
437 static int ocfs2_truncate_file(struct inode *inode,
438 struct buffer_head *di_bh,
439 u64 new_i_size)
440 {
441 int status = 0;
442 struct ocfs2_dinode *fe = NULL;
443 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
444
445 mlog_entry("(inode = %llu, new_i_size = %llu\n",
446 (unsigned long long)OCFS2_I(inode)->ip_blkno,
447 (unsigned long long)new_i_size);
448
449 /* We trust di_bh because it comes from ocfs2_inode_lock(), which
450 * already validated it */
451 fe = (struct ocfs2_dinode *) di_bh->b_data;
452
453 mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode),
454 "Inode %llu, inode i_size = %lld != di "
455 "i_size = %llu, i_flags = 0x%x\n",
456 (unsigned long long)OCFS2_I(inode)->ip_blkno,
457 i_size_read(inode),
458 (unsigned long long)le64_to_cpu(fe->i_size),
459 le32_to_cpu(fe->i_flags));
460
461 if (new_i_size > le64_to_cpu(fe->i_size)) {
462 mlog(0, "asked to truncate file with size (%llu) to size (%llu)!\n",
463 (unsigned long long)le64_to_cpu(fe->i_size),
464 (unsigned long long)new_i_size);
465 status = -EINVAL;
466 mlog_errno(status);
467 goto bail;
468 }
469
470 mlog(0, "inode %llu, i_size = %llu, new_i_size = %llu\n",
471 (unsigned long long)le64_to_cpu(fe->i_blkno),
472 (unsigned long long)le64_to_cpu(fe->i_size),
473 (unsigned long long)new_i_size);
474
475 /* lets handle the simple truncate cases before doing any more
476 * cluster locking. */
477 if (new_i_size == le64_to_cpu(fe->i_size))
478 goto bail;
479
480 down_write(&OCFS2_I(inode)->ip_alloc_sem);
481
482 ocfs2_resv_discard(&osb->osb_la_resmap,
483 &OCFS2_I(inode)->ip_la_data_resv);
484
485 /*
486 * The inode lock forced other nodes to sync and drop their
487 * pages, which (correctly) happens even if we have a truncate
488 * without allocation change - ocfs2 cluster sizes can be much
489 * greater than page size, so we have to truncate them
490 * anyway.
491 */
492 unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1);
493 truncate_inode_pages(inode->i_mapping, new_i_size);
494
495 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
496 status = ocfs2_truncate_inline(inode, di_bh, new_i_size,
497 i_size_read(inode), 1);
498 if (status)
499 mlog_errno(status);
500
501 goto bail_unlock_sem;
502 }
503
504 /* alright, we're going to need to do a full blown alloc size
505 * change. Orphan the inode so that recovery can complete the
506 * truncate if necessary. This does the task of marking
507 * i_size. */
508 status = ocfs2_orphan_for_truncate(osb, inode, di_bh, new_i_size);
509 if (status < 0) {
510 mlog_errno(status);
511 goto bail_unlock_sem;
512 }
513
514 status = ocfs2_commit_truncate(osb, inode, di_bh);
515 if (status < 0) {
516 mlog_errno(status);
517 goto bail_unlock_sem;
518 }
519
520 /* TODO: orphan dir cleanup here. */
521 bail_unlock_sem:
522 up_write(&OCFS2_I(inode)->ip_alloc_sem);
523
524 bail:
525 if (!status && OCFS2_I(inode)->ip_clusters == 0)
526 status = ocfs2_try_remove_refcount_tree(inode, di_bh);
527
528 mlog_exit(status);
529 return status;
530 }
531
532 /*
533 * extend file allocation only here.
534 * we'll update all the disk stuff, and oip->alloc_size
535 *
536 * expect stuff to be locked, a transaction started and enough data /
537 * metadata reservations in the contexts.
538 *
539 * Will return -EAGAIN, and a reason if a restart is needed.
540 * If passed in, *reason will always be set, even in error.
541 */
542 int ocfs2_add_inode_data(struct ocfs2_super *osb,
543 struct inode *inode,
544 u32 *logical_offset,
545 u32 clusters_to_add,
546 int mark_unwritten,
547 struct buffer_head *fe_bh,
548 handle_t *handle,
549 struct ocfs2_alloc_context *data_ac,
550 struct ocfs2_alloc_context *meta_ac,
551 enum ocfs2_alloc_restarted *reason_ret)
552 {
553 int ret;
554 struct ocfs2_extent_tree et;
555
556 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), fe_bh);
557 ret = ocfs2_add_clusters_in_btree(handle, &et, logical_offset,
558 clusters_to_add, mark_unwritten,
559 data_ac, meta_ac, reason_ret);
560
561 return ret;
562 }
563
564 static int __ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
565 u32 clusters_to_add, int mark_unwritten)
566 {
567 int status = 0;
568 int restart_func = 0;
569 int credits;
570 u32 prev_clusters;
571 struct buffer_head *bh = NULL;
572 struct ocfs2_dinode *fe = NULL;
573 handle_t *handle = NULL;
574 struct ocfs2_alloc_context *data_ac = NULL;
575 struct ocfs2_alloc_context *meta_ac = NULL;
576 enum ocfs2_alloc_restarted why;
577 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
578 struct ocfs2_extent_tree et;
579 int did_quota = 0;
580
581 mlog_entry("(clusters_to_add = %u)\n", clusters_to_add);
582
583 /*
584 * This function only exists for file systems which don't
585 * support holes.
586 */
587 BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb));
588
589 status = ocfs2_read_inode_block(inode, &bh);
590 if (status < 0) {
591 mlog_errno(status);
592 goto leave;
593 }
594 fe = (struct ocfs2_dinode *) bh->b_data;
595
596 restart_all:
597 BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
598
599 mlog(0, "extend inode %llu, i_size = %lld, di->i_clusters = %u, "
600 "clusters_to_add = %u\n",
601 (unsigned long long)OCFS2_I(inode)->ip_blkno,
602 (long long)i_size_read(inode), le32_to_cpu(fe->i_clusters),
603 clusters_to_add);
604 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), bh);
605 status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0,
606 &data_ac, &meta_ac);
607 if (status) {
608 mlog_errno(status);
609 goto leave;
610 }
611
612 credits = ocfs2_calc_extend_credits(osb->sb, &fe->id2.i_list,
613 clusters_to_add);
614 handle = ocfs2_start_trans(osb, credits);
615 if (IS_ERR(handle)) {
616 status = PTR_ERR(handle);
617 handle = NULL;
618 mlog_errno(status);
619 goto leave;
620 }
621
622 restarted_transaction:
623 status = dquot_alloc_space_nodirty(inode,
624 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
625 if (status)
626 goto leave;
627 did_quota = 1;
628
629 /* reserve a write to the file entry early on - that we if we
630 * run out of credits in the allocation path, we can still
631 * update i_size. */
632 status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
633 OCFS2_JOURNAL_ACCESS_WRITE);
634 if (status < 0) {
635 mlog_errno(status);
636 goto leave;
637 }
638
639 prev_clusters = OCFS2_I(inode)->ip_clusters;
640
641 status = ocfs2_add_inode_data(osb,
642 inode,
643 &logical_start,
644 clusters_to_add,
645 mark_unwritten,
646 bh,
647 handle,
648 data_ac,
649 meta_ac,
650 &why);
651 if ((status < 0) && (status != -EAGAIN)) {
652 if (status != -ENOSPC)
653 mlog_errno(status);
654 goto leave;
655 }
656
657 ocfs2_journal_dirty(handle, bh);
658
659 spin_lock(&OCFS2_I(inode)->ip_lock);
660 clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters);
661 spin_unlock(&OCFS2_I(inode)->ip_lock);
662 /* Release unused quota reservation */
663 dquot_free_space(inode,
664 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
665 did_quota = 0;
666
667 if (why != RESTART_NONE && clusters_to_add) {
668 if (why == RESTART_META) {
669 mlog(0, "restarting function.\n");
670 restart_func = 1;
671 status = 0;
672 } else {
673 BUG_ON(why != RESTART_TRANS);
674
675 mlog(0, "restarting transaction.\n");
676 /* TODO: This can be more intelligent. */
677 credits = ocfs2_calc_extend_credits(osb->sb,
678 &fe->id2.i_list,
679 clusters_to_add);
680 status = ocfs2_extend_trans(handle, credits);
681 if (status < 0) {
682 /* handle still has to be committed at
683 * this point. */
684 status = -ENOMEM;
685 mlog_errno(status);
686 goto leave;
687 }
688 goto restarted_transaction;
689 }
690 }
691
692 mlog(0, "fe: i_clusters = %u, i_size=%llu\n",
693 le32_to_cpu(fe->i_clusters),
694 (unsigned long long)le64_to_cpu(fe->i_size));
695 mlog(0, "inode: ip_clusters=%u, i_size=%lld\n",
696 OCFS2_I(inode)->ip_clusters, (long long)i_size_read(inode));
697
698 leave:
699 if (status < 0 && did_quota)
700 dquot_free_space(inode,
701 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
702 if (handle) {
703 ocfs2_commit_trans(osb, handle);
704 handle = NULL;
705 }
706 if (data_ac) {
707 ocfs2_free_alloc_context(data_ac);
708 data_ac = NULL;
709 }
710 if (meta_ac) {
711 ocfs2_free_alloc_context(meta_ac);
712 meta_ac = NULL;
713 }
714 if ((!status) && restart_func) {
715 restart_func = 0;
716 goto restart_all;
717 }
718 brelse(bh);
719 bh = NULL;
720
721 mlog_exit(status);
722 return status;
723 }
724
725 /*
726 * While a write will already be ordering the data, a truncate will not.
727 * Thus, we need to explicitly order the zeroed pages.
728 */
729 static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode)
730 {
731 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
732 handle_t *handle = NULL;
733 int ret = 0;
734
735 if (!ocfs2_should_order_data(inode))
736 goto out;
737
738 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
739 if (IS_ERR(handle)) {
740 ret = -ENOMEM;
741 mlog_errno(ret);
742 goto out;
743 }
744
745 ret = ocfs2_jbd2_file_inode(handle, inode);
746 if (ret < 0)
747 mlog_errno(ret);
748
749 out:
750 if (ret) {
751 if (!IS_ERR(handle))
752 ocfs2_commit_trans(osb, handle);
753 handle = ERR_PTR(ret);
754 }
755 return handle;
756 }
757
758 /* Some parts of this taken from generic_cont_expand, which turned out
759 * to be too fragile to do exactly what we need without us having to
760 * worry about recursive locking in ->write_begin() and ->write_end(). */
761 static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
762 u64 abs_to)
763 {
764 struct address_space *mapping = inode->i_mapping;
765 struct page *page;
766 unsigned long index = abs_from >> PAGE_CACHE_SHIFT;
767 handle_t *handle = NULL;
768 int ret = 0;
769 unsigned zero_from, zero_to, block_start, block_end;
770
771 BUG_ON(abs_from >= abs_to);
772 BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT));
773 BUG_ON(abs_from & (inode->i_blkbits - 1));
774
775 page = find_or_create_page(mapping, index, GFP_NOFS);
776 if (!page) {
777 ret = -ENOMEM;
778 mlog_errno(ret);
779 goto out;
780 }
781
782 /* Get the offsets within the page that we want to zero */
783 zero_from = abs_from & (PAGE_CACHE_SIZE - 1);
784 zero_to = abs_to & (PAGE_CACHE_SIZE - 1);
785 if (!zero_to)
786 zero_to = PAGE_CACHE_SIZE;
787
788 mlog(0,
789 "abs_from = %llu, abs_to = %llu, index = %lu, zero_from = %u, zero_to = %u\n",
790 (unsigned long long)abs_from, (unsigned long long)abs_to,
791 index, zero_from, zero_to);
792
793 /* We know that zero_from is block aligned */
794 for (block_start = zero_from; block_start < zero_to;
795 block_start = block_end) {
796 block_end = block_start + (1 << inode->i_blkbits);
797
798 /*
799 * block_start is block-aligned. Bump it by one to force
800 * __block_write_begin and block_commit_write to zero the
801 * whole block.
802 */
803 ret = __block_write_begin(page, block_start + 1, 0,
804 ocfs2_get_block);
805 if (ret < 0) {
806 mlog_errno(ret);
807 goto out_unlock;
808 }
809
810 if (!handle) {
811 handle = ocfs2_zero_start_ordered_transaction(inode);
812 if (IS_ERR(handle)) {
813 ret = PTR_ERR(handle);
814 handle = NULL;
815 break;
816 }
817 }
818
819 /* must not update i_size! */
820 ret = block_commit_write(page, block_start + 1,
821 block_start + 1);
822 if (ret < 0)
823 mlog_errno(ret);
824 else
825 ret = 0;
826 }
827
828 if (handle)
829 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
830
831 out_unlock:
832 unlock_page(page);
833 page_cache_release(page);
834 out:
835 return ret;
836 }
837
838 /*
839 * Find the next range to zero. We do this in terms of bytes because
840 * that's what ocfs2_zero_extend() wants, and it is dealing with the
841 * pagecache. We may return multiple extents.
842 *
843 * zero_start and zero_end are ocfs2_zero_extend()s current idea of what
844 * needs to be zeroed. range_start and range_end return the next zeroing
845 * range. A subsequent call should pass the previous range_end as its
846 * zero_start. If range_end is 0, there's nothing to do.
847 *
848 * Unwritten extents are skipped over. Refcounted extents are CoWd.
849 */
850 static int ocfs2_zero_extend_get_range(struct inode *inode,
851 struct buffer_head *di_bh,
852 u64 zero_start, u64 zero_end,
853 u64 *range_start, u64 *range_end)
854 {
855 int rc = 0, needs_cow = 0;
856 u32 p_cpos, zero_clusters = 0;
857 u32 zero_cpos =
858 zero_start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
859 u32 last_cpos = ocfs2_clusters_for_bytes(inode->i_sb, zero_end);
860 unsigned int num_clusters = 0;
861 unsigned int ext_flags = 0;
862
863 while (zero_cpos < last_cpos) {
864 rc = ocfs2_get_clusters(inode, zero_cpos, &p_cpos,
865 &num_clusters, &ext_flags);
866 if (rc) {
867 mlog_errno(rc);
868 goto out;
869 }
870
871 if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
872 zero_clusters = num_clusters;
873 if (ext_flags & OCFS2_EXT_REFCOUNTED)
874 needs_cow = 1;
875 break;
876 }
877
878 zero_cpos += num_clusters;
879 }
880 if (!zero_clusters) {
881 *range_end = 0;
882 goto out;
883 }
884
885 while ((zero_cpos + zero_clusters) < last_cpos) {
886 rc = ocfs2_get_clusters(inode, zero_cpos + zero_clusters,
887 &p_cpos, &num_clusters,
888 &ext_flags);
889 if (rc) {
890 mlog_errno(rc);
891 goto out;
892 }
893
894 if (!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN))
895 break;
896 if (ext_flags & OCFS2_EXT_REFCOUNTED)
897 needs_cow = 1;
898 zero_clusters += num_clusters;
899 }
900 if ((zero_cpos + zero_clusters) > last_cpos)
901 zero_clusters = last_cpos - zero_cpos;
902
903 if (needs_cow) {
904 rc = ocfs2_refcount_cow(inode, NULL, di_bh, zero_cpos,
905 zero_clusters, UINT_MAX);
906 if (rc) {
907 mlog_errno(rc);
908 goto out;
909 }
910 }
911
912 *range_start = ocfs2_clusters_to_bytes(inode->i_sb, zero_cpos);
913 *range_end = ocfs2_clusters_to_bytes(inode->i_sb,
914 zero_cpos + zero_clusters);
915
916 out:
917 return rc;
918 }
919
920 /*
921 * Zero one range returned from ocfs2_zero_extend_get_range(). The caller
922 * has made sure that the entire range needs zeroing.
923 */
924 static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
925 u64 range_end)
926 {
927 int rc = 0;
928 u64 next_pos;
929 u64 zero_pos = range_start;
930
931 mlog(0, "range_start = %llu, range_end = %llu\n",
932 (unsigned long long)range_start,
933 (unsigned long long)range_end);
934 BUG_ON(range_start >= range_end);
935
936 while (zero_pos < range_end) {
937 next_pos = (zero_pos & PAGE_CACHE_MASK) + PAGE_CACHE_SIZE;
938 if (next_pos > range_end)
939 next_pos = range_end;
940 rc = ocfs2_write_zero_page(inode, zero_pos, next_pos);
941 if (rc < 0) {
942 mlog_errno(rc);
943 break;
944 }
945 zero_pos = next_pos;
946
947 /*
948 * Very large extends have the potential to lock up
949 * the cpu for extended periods of time.
950 */
951 cond_resched();
952 }
953
954 return rc;
955 }
956
957 int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
958 loff_t zero_to_size)
959 {
960 int ret = 0;
961 u64 zero_start, range_start = 0, range_end = 0;
962 struct super_block *sb = inode->i_sb;
963
964 zero_start = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
965 mlog(0, "zero_start %llu for i_size %llu\n",
966 (unsigned long long)zero_start,
967 (unsigned long long)i_size_read(inode));
968 while (zero_start < zero_to_size) {
969 ret = ocfs2_zero_extend_get_range(inode, di_bh, zero_start,
970 zero_to_size,
971 &range_start,
972 &range_end);
973 if (ret) {
974 mlog_errno(ret);
975 break;
976 }
977 if (!range_end)
978 break;
979 /* Trim the ends */
980 if (range_start < zero_start)
981 range_start = zero_start;
982 if (range_end > zero_to_size)
983 range_end = zero_to_size;
984
985 ret = ocfs2_zero_extend_range(inode, range_start,
986 range_end);
987 if (ret) {
988 mlog_errno(ret);
989 break;
990 }
991 zero_start = range_end;
992 }
993
994 return ret;
995 }
996
997 int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh,
998 u64 new_i_size, u64 zero_to)
999 {
1000 int ret;
1001 u32 clusters_to_add;
1002 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1003
1004 /*
1005 * Only quota files call this without a bh, and they can't be
1006 * refcounted.
1007 */
1008 BUG_ON(!di_bh && (oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
1009 BUG_ON(!di_bh && !(oi->ip_flags & OCFS2_INODE_SYSTEM_FILE));
1010
1011 clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size);
1012 if (clusters_to_add < oi->ip_clusters)
1013 clusters_to_add = 0;
1014 else
1015 clusters_to_add -= oi->ip_clusters;
1016
1017 if (clusters_to_add) {
1018 ret = __ocfs2_extend_allocation(inode, oi->ip_clusters,
1019 clusters_to_add, 0);
1020 if (ret) {
1021 mlog_errno(ret);
1022 goto out;
1023 }
1024 }
1025
1026 /*
1027 * Call this even if we don't add any clusters to the tree. We
1028 * still need to zero the area between the old i_size and the
1029 * new i_size.
1030 */
1031 ret = ocfs2_zero_extend(inode, di_bh, zero_to);
1032 if (ret < 0)
1033 mlog_errno(ret);
1034
1035 out:
1036 return ret;
1037 }
1038
1039 static int ocfs2_extend_file(struct inode *inode,
1040 struct buffer_head *di_bh,
1041 u64 new_i_size)
1042 {
1043 int ret = 0;
1044 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1045
1046 BUG_ON(!di_bh);
1047
1048 /* setattr sometimes calls us like this. */
1049 if (new_i_size == 0)
1050 goto out;
1051
1052 if (i_size_read(inode) == new_i_size)
1053 goto out;
1054 BUG_ON(new_i_size < i_size_read(inode));
1055
1056 /*
1057 * The alloc sem blocks people in read/write from reading our
1058 * allocation until we're done changing it. We depend on
1059 * i_mutex to block other extend/truncate calls while we're
1060 * here. We even have to hold it for sparse files because there
1061 * might be some tail zeroing.
1062 */
1063 down_write(&oi->ip_alloc_sem);
1064
1065 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1066 /*
1067 * We can optimize small extends by keeping the inodes
1068 * inline data.
1069 */
1070 if (ocfs2_size_fits_inline_data(di_bh, new_i_size)) {
1071 up_write(&oi->ip_alloc_sem);
1072 goto out_update_size;
1073 }
1074
1075 ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1076 if (ret) {
1077 up_write(&oi->ip_alloc_sem);
1078 mlog_errno(ret);
1079 goto out;
1080 }
1081 }
1082
1083 if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
1084 ret = ocfs2_zero_extend(inode, di_bh, new_i_size);
1085 else
1086 ret = ocfs2_extend_no_holes(inode, di_bh, new_i_size,
1087 new_i_size);
1088
1089 up_write(&oi->ip_alloc_sem);
1090
1091 if (ret < 0) {
1092 mlog_errno(ret);
1093 goto out;
1094 }
1095
1096 out_update_size:
1097 ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
1098 if (ret < 0)
1099 mlog_errno(ret);
1100
1101 out:
1102 return ret;
1103 }
1104
1105 int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
1106 {
1107 int status = 0, size_change;
1108 struct inode *inode = dentry->d_inode;
1109 struct super_block *sb = inode->i_sb;
1110 struct ocfs2_super *osb = OCFS2_SB(sb);
1111 struct buffer_head *bh = NULL;
1112 handle_t *handle = NULL;
1113 struct dquot *transfer_to[MAXQUOTAS] = { };
1114 int qtype;
1115
1116 mlog_entry("(0x%p, '%.*s')\n", dentry,
1117 dentry->d_name.len, dentry->d_name.name);
1118
1119 /* ensuring we don't even attempt to truncate a symlink */
1120 if (S_ISLNK(inode->i_mode))
1121 attr->ia_valid &= ~ATTR_SIZE;
1122
1123 if (attr->ia_valid & ATTR_MODE)
1124 mlog(0, "mode change: %d\n", attr->ia_mode);
1125 if (attr->ia_valid & ATTR_UID)
1126 mlog(0, "uid change: %d\n", attr->ia_uid);
1127 if (attr->ia_valid & ATTR_GID)
1128 mlog(0, "gid change: %d\n", attr->ia_gid);
1129 if (attr->ia_valid & ATTR_SIZE)
1130 mlog(0, "size change...\n");
1131 if (attr->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME))
1132 mlog(0, "time change...\n");
1133
1134 #define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
1135 | ATTR_GID | ATTR_UID | ATTR_MODE)
1136 if (!(attr->ia_valid & OCFS2_VALID_ATTRS)) {
1137 mlog(0, "can't handle attrs: 0x%x\n", attr->ia_valid);
1138 return 0;
1139 }
1140
1141 status = inode_change_ok(inode, attr);
1142 if (status)
1143 return status;
1144
1145 if (is_quota_modification(inode, attr))
1146 dquot_initialize(inode);
1147 size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
1148 if (size_change) {
1149 status = ocfs2_rw_lock(inode, 1);
1150 if (status < 0) {
1151 mlog_errno(status);
1152 goto bail;
1153 }
1154 }
1155
1156 status = ocfs2_inode_lock(inode, &bh, 1);
1157 if (status < 0) {
1158 if (status != -ENOENT)
1159 mlog_errno(status);
1160 goto bail_unlock_rw;
1161 }
1162
1163 if (size_change && attr->ia_size != i_size_read(inode)) {
1164 status = inode_newsize_ok(inode, attr->ia_size);
1165 if (status)
1166 goto bail_unlock;
1167
1168 if (i_size_read(inode) > attr->ia_size) {
1169 if (ocfs2_should_order_data(inode)) {
1170 status = ocfs2_begin_ordered_truncate(inode,
1171 attr->ia_size);
1172 if (status)
1173 goto bail_unlock;
1174 }
1175 status = ocfs2_truncate_file(inode, bh, attr->ia_size);
1176 } else
1177 status = ocfs2_extend_file(inode, bh, attr->ia_size);
1178 if (status < 0) {
1179 if (status != -ENOSPC)
1180 mlog_errno(status);
1181 status = -ENOSPC;
1182 goto bail_unlock;
1183 }
1184 }
1185
1186 if ((attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
1187 (attr->ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
1188 /*
1189 * Gather pointers to quota structures so that allocation /
1190 * freeing of quota structures happens here and not inside
1191 * dquot_transfer() where we have problems with lock ordering
1192 */
1193 if (attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid
1194 && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1195 OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
1196 transfer_to[USRQUOTA] = dqget(sb, attr->ia_uid,
1197 USRQUOTA);
1198 if (!transfer_to[USRQUOTA]) {
1199 status = -ESRCH;
1200 goto bail_unlock;
1201 }
1202 }
1203 if (attr->ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid
1204 && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1205 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
1206 transfer_to[GRPQUOTA] = dqget(sb, attr->ia_gid,
1207 GRPQUOTA);
1208 if (!transfer_to[GRPQUOTA]) {
1209 status = -ESRCH;
1210 goto bail_unlock;
1211 }
1212 }
1213 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS +
1214 2 * ocfs2_quota_trans_credits(sb));
1215 if (IS_ERR(handle)) {
1216 status = PTR_ERR(handle);
1217 mlog_errno(status);
1218 goto bail_unlock;
1219 }
1220 status = __dquot_transfer(inode, transfer_to);
1221 if (status < 0)
1222 goto bail_commit;
1223 } else {
1224 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1225 if (IS_ERR(handle)) {
1226 status = PTR_ERR(handle);
1227 mlog_errno(status);
1228 goto bail_unlock;
1229 }
1230 }
1231
1232 /*
1233 * This will intentionally not wind up calling truncate_setsize(),
1234 * since all the work for a size change has been done above.
1235 * Otherwise, we could get into problems with truncate as
1236 * ip_alloc_sem is used there to protect against i_size
1237 * changes.
1238 *
1239 * XXX: this means the conditional below can probably be removed.
1240 */
1241 if ((attr->ia_valid & ATTR_SIZE) &&
1242 attr->ia_size != i_size_read(inode)) {
1243 status = vmtruncate(inode, attr->ia_size);
1244 if (status) {
1245 mlog_errno(status);
1246 goto bail_commit;
1247 }
1248 }
1249
1250 setattr_copy(inode, attr);
1251 mark_inode_dirty(inode);
1252
1253 status = ocfs2_mark_inode_dirty(handle, inode, bh);
1254 if (status < 0)
1255 mlog_errno(status);
1256
1257 bail_commit:
1258 ocfs2_commit_trans(osb, handle);
1259 bail_unlock:
1260 ocfs2_inode_unlock(inode, 1);
1261 bail_unlock_rw:
1262 if (size_change)
1263 ocfs2_rw_unlock(inode, 1);
1264 bail:
1265 brelse(bh);
1266
1267 /* Release quota pointers in case we acquired them */
1268 for (qtype = 0; qtype < MAXQUOTAS; qtype++)
1269 dqput(transfer_to[qtype]);
1270
1271 if (!status && attr->ia_valid & ATTR_MODE) {
1272 status = ocfs2_acl_chmod(inode);
1273 if (status < 0)
1274 mlog_errno(status);
1275 }
1276
1277 mlog_exit(status);
1278 return status;
1279 }
1280
1281 int ocfs2_getattr(struct vfsmount *mnt,
1282 struct dentry *dentry,
1283 struct kstat *stat)
1284 {
1285 struct inode *inode = dentry->d_inode;
1286 struct super_block *sb = dentry->d_inode->i_sb;
1287 struct ocfs2_super *osb = sb->s_fs_info;
1288 int err;
1289
1290 mlog_entry_void();
1291
1292 err = ocfs2_inode_revalidate(dentry);
1293 if (err) {
1294 if (err != -ENOENT)
1295 mlog_errno(err);
1296 goto bail;
1297 }
1298
1299 generic_fillattr(inode, stat);
1300
1301 /* We set the blksize from the cluster size for performance */
1302 stat->blksize = osb->s_clustersize;
1303
1304 bail:
1305 mlog_exit(err);
1306
1307 return err;
1308 }
1309
1310 int ocfs2_permission(struct inode *inode, int mask)
1311 {
1312 int ret;
1313
1314 mlog_entry_void();
1315
1316 ret = ocfs2_inode_lock(inode, NULL, 0);
1317 if (ret) {
1318 if (ret != -ENOENT)
1319 mlog_errno(ret);
1320 goto out;
1321 }
1322
1323 ret = generic_permission(inode, mask, ocfs2_check_acl);
1324
1325 ocfs2_inode_unlock(inode, 0);
1326 out:
1327 mlog_exit(ret);
1328 return ret;
1329 }
1330
1331 static int __ocfs2_write_remove_suid(struct inode *inode,
1332 struct buffer_head *bh)
1333 {
1334 int ret;
1335 handle_t *handle;
1336 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1337 struct ocfs2_dinode *di;
1338
1339 mlog_entry("(Inode %llu, mode 0%o)\n",
1340 (unsigned long long)OCFS2_I(inode)->ip_blkno, inode->i_mode);
1341
1342 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1343 if (IS_ERR(handle)) {
1344 ret = PTR_ERR(handle);
1345 mlog_errno(ret);
1346 goto out;
1347 }
1348
1349 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
1350 OCFS2_JOURNAL_ACCESS_WRITE);
1351 if (ret < 0) {
1352 mlog_errno(ret);
1353 goto out_trans;
1354 }
1355
1356 inode->i_mode &= ~S_ISUID;
1357 if ((inode->i_mode & S_ISGID) && (inode->i_mode & S_IXGRP))
1358 inode->i_mode &= ~S_ISGID;
1359
1360 di = (struct ocfs2_dinode *) bh->b_data;
1361 di->i_mode = cpu_to_le16(inode->i_mode);
1362
1363 ocfs2_journal_dirty(handle, bh);
1364
1365 out_trans:
1366 ocfs2_commit_trans(osb, handle);
1367 out:
1368 mlog_exit(ret);
1369 return ret;
1370 }
1371
1372 /*
1373 * Will look for holes and unwritten extents in the range starting at
1374 * pos for count bytes (inclusive).
1375 */
1376 static int ocfs2_check_range_for_holes(struct inode *inode, loff_t pos,
1377 size_t count)
1378 {
1379 int ret = 0;
1380 unsigned int extent_flags;
1381 u32 cpos, clusters, extent_len, phys_cpos;
1382 struct super_block *sb = inode->i_sb;
1383
1384 cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
1385 clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
1386
1387 while (clusters) {
1388 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
1389 &extent_flags);
1390 if (ret < 0) {
1391 mlog_errno(ret);
1392 goto out;
1393 }
1394
1395 if (phys_cpos == 0 || (extent_flags & OCFS2_EXT_UNWRITTEN)) {
1396 ret = 1;
1397 break;
1398 }
1399
1400 if (extent_len > clusters)
1401 extent_len = clusters;
1402
1403 clusters -= extent_len;
1404 cpos += extent_len;
1405 }
1406 out:
1407 return ret;
1408 }
1409
1410 static int ocfs2_write_remove_suid(struct inode *inode)
1411 {
1412 int ret;
1413 struct buffer_head *bh = NULL;
1414
1415 ret = ocfs2_read_inode_block(inode, &bh);
1416 if (ret < 0) {
1417 mlog_errno(ret);
1418 goto out;
1419 }
1420
1421 ret = __ocfs2_write_remove_suid(inode, bh);
1422 out:
1423 brelse(bh);
1424 return ret;
1425 }
1426
1427 /*
1428 * Allocate enough extents to cover the region starting at byte offset
1429 * start for len bytes. Existing extents are skipped, any extents
1430 * added are marked as "unwritten".
1431 */
1432 static int ocfs2_allocate_unwritten_extents(struct inode *inode,
1433 u64 start, u64 len)
1434 {
1435 int ret;
1436 u32 cpos, phys_cpos, clusters, alloc_size;
1437 u64 end = start + len;
1438 struct buffer_head *di_bh = NULL;
1439
1440 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1441 ret = ocfs2_read_inode_block(inode, &di_bh);
1442 if (ret) {
1443 mlog_errno(ret);
1444 goto out;
1445 }
1446
1447 /*
1448 * Nothing to do if the requested reservation range
1449 * fits within the inode.
1450 */
1451 if (ocfs2_size_fits_inline_data(di_bh, end))
1452 goto out;
1453
1454 ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1455 if (ret) {
1456 mlog_errno(ret);
1457 goto out;
1458 }
1459 }
1460
1461 /*
1462 * We consider both start and len to be inclusive.
1463 */
1464 cpos = start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
1465 clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len);
1466 clusters -= cpos;
1467
1468 while (clusters) {
1469 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos,
1470 &alloc_size, NULL);
1471 if (ret) {
1472 mlog_errno(ret);
1473 goto out;
1474 }
1475
1476 /*
1477 * Hole or existing extent len can be arbitrary, so
1478 * cap it to our own allocation request.
1479 */
1480 if (alloc_size > clusters)
1481 alloc_size = clusters;
1482
1483 if (phys_cpos) {
1484 /*
1485 * We already have an allocation at this
1486 * region so we can safely skip it.
1487 */
1488 goto next;
1489 }
1490
1491 ret = __ocfs2_extend_allocation(inode, cpos, alloc_size, 1);
1492 if (ret) {
1493 if (ret != -ENOSPC)
1494 mlog_errno(ret);
1495 goto out;
1496 }
1497
1498 next:
1499 cpos += alloc_size;
1500 clusters -= alloc_size;
1501 }
1502
1503 ret = 0;
1504 out:
1505
1506 brelse(di_bh);
1507 return ret;
1508 }
1509
1510 /*
1511 * Truncate a byte range, avoiding pages within partial clusters. This
1512 * preserves those pages for the zeroing code to write to.
1513 */
1514 static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start,
1515 u64 byte_len)
1516 {
1517 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1518 loff_t start, end;
1519 struct address_space *mapping = inode->i_mapping;
1520
1521 start = (loff_t)ocfs2_align_bytes_to_clusters(inode->i_sb, byte_start);
1522 end = byte_start + byte_len;
1523 end = end & ~(osb->s_clustersize - 1);
1524
1525 if (start < end) {
1526 unmap_mapping_range(mapping, start, end - start, 0);
1527 truncate_inode_pages_range(mapping, start, end - 1);
1528 }
1529 }
1530
1531 static int ocfs2_zero_partial_clusters(struct inode *inode,
1532 u64 start, u64 len)
1533 {
1534 int ret = 0;
1535 u64 tmpend, end = start + len;
1536 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1537 unsigned int csize = osb->s_clustersize;
1538 handle_t *handle;
1539
1540 /*
1541 * The "start" and "end" values are NOT necessarily part of
1542 * the range whose allocation is being deleted. Rather, this
1543 * is what the user passed in with the request. We must zero
1544 * partial clusters here. There's no need to worry about
1545 * physical allocation - the zeroing code knows to skip holes.
1546 */
1547 mlog(0, "byte start: %llu, end: %llu\n",
1548 (unsigned long long)start, (unsigned long long)end);
1549
1550 /*
1551 * If both edges are on a cluster boundary then there's no
1552 * zeroing required as the region is part of the allocation to
1553 * be truncated.
1554 */
1555 if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
1556 goto out;
1557
1558 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1559 if (IS_ERR(handle)) {
1560 ret = PTR_ERR(handle);
1561 mlog_errno(ret);
1562 goto out;
1563 }
1564
1565 /*
1566 * We want to get the byte offset of the end of the 1st cluster.
1567 */
1568 tmpend = (u64)osb->s_clustersize + (start & ~(osb->s_clustersize - 1));
1569 if (tmpend > end)
1570 tmpend = end;
1571
1572 mlog(0, "1st range: start: %llu, tmpend: %llu\n",
1573 (unsigned long long)start, (unsigned long long)tmpend);
1574
1575 ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend);
1576 if (ret)
1577 mlog_errno(ret);
1578
1579 if (tmpend < end) {
1580 /*
1581 * This may make start and end equal, but the zeroing
1582 * code will skip any work in that case so there's no
1583 * need to catch it up here.
1584 */
1585 start = end & ~(osb->s_clustersize - 1);
1586
1587 mlog(0, "2nd range: start: %llu, end: %llu\n",
1588 (unsigned long long)start, (unsigned long long)end);
1589
1590 ret = ocfs2_zero_range_for_truncate(inode, handle, start, end);
1591 if (ret)
1592 mlog_errno(ret);
1593 }
1594
1595 ocfs2_commit_trans(osb, handle);
1596 out:
1597 return ret;
1598 }
1599
1600 static int ocfs2_find_rec(struct ocfs2_extent_list *el, u32 pos)
1601 {
1602 int i;
1603 struct ocfs2_extent_rec *rec = NULL;
1604
1605 for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
1606
1607 rec = &el->l_recs[i];
1608
1609 if (le32_to_cpu(rec->e_cpos) < pos)
1610 break;
1611 }
1612
1613 return i;
1614 }
1615
1616 /*
1617 * Helper to calculate the punching pos and length in one run, we handle the
1618 * following three cases in order:
1619 *
1620 * - remove the entire record
1621 * - remove a partial record
1622 * - no record needs to be removed (hole-punching completed)
1623 */
1624 static void ocfs2_calc_trunc_pos(struct inode *inode,
1625 struct ocfs2_extent_list *el,
1626 struct ocfs2_extent_rec *rec,
1627 u32 trunc_start, u32 *trunc_cpos,
1628 u32 *trunc_len, u32 *trunc_end,
1629 u64 *blkno, int *done)
1630 {
1631 int ret = 0;
1632 u32 coff, range;
1633
1634 range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
1635
1636 if (le32_to_cpu(rec->e_cpos) >= trunc_start) {
1637 *trunc_cpos = le32_to_cpu(rec->e_cpos);
1638 /*
1639 * Skip holes if any.
1640 */
1641 if (range < *trunc_end)
1642 *trunc_end = range;
1643 *trunc_len = *trunc_end - le32_to_cpu(rec->e_cpos);
1644 *blkno = le64_to_cpu(rec->e_blkno);
1645 *trunc_end = le32_to_cpu(rec->e_cpos);
1646 } else if (range > trunc_start) {
1647 *trunc_cpos = trunc_start;
1648 *trunc_len = *trunc_end - trunc_start;
1649 coff = trunc_start - le32_to_cpu(rec->e_cpos);
1650 *blkno = le64_to_cpu(rec->e_blkno) +
1651 ocfs2_clusters_to_blocks(inode->i_sb, coff);
1652 *trunc_end = trunc_start;
1653 } else {
1654 /*
1655 * It may have two following possibilities:
1656 *
1657 * - last record has been removed
1658 * - trunc_start was within a hole
1659 *
1660 * both two cases mean the completion of hole punching.
1661 */
1662 ret = 1;
1663 }
1664
1665 *done = ret;
1666 }
1667
1668 static int ocfs2_remove_inode_range(struct inode *inode,
1669 struct buffer_head *di_bh, u64 byte_start,
1670 u64 byte_len)
1671 {
1672 int ret = 0, flags = 0, done = 0, i;
1673 u32 trunc_start, trunc_len, trunc_end, trunc_cpos, phys_cpos;
1674 u32 cluster_in_el;
1675 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1676 struct ocfs2_cached_dealloc_ctxt dealloc;
1677 struct address_space *mapping = inode->i_mapping;
1678 struct ocfs2_extent_tree et;
1679 struct ocfs2_path *path = NULL;
1680 struct ocfs2_extent_list *el = NULL;
1681 struct ocfs2_extent_rec *rec = NULL;
1682 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
1683 u64 blkno, refcount_loc = le64_to_cpu(di->i_refcount_loc);
1684
1685 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
1686 ocfs2_init_dealloc_ctxt(&dealloc);
1687
1688 if (byte_len == 0)
1689 return 0;
1690
1691 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1692 ret = ocfs2_truncate_inline(inode, di_bh, byte_start,
1693 byte_start + byte_len, 0);
1694 if (ret) {
1695 mlog_errno(ret);
1696 goto out;
1697 }
1698 /*
1699 * There's no need to get fancy with the page cache
1700 * truncate of an inline-data inode. We're talking
1701 * about less than a page here, which will be cached
1702 * in the dinode buffer anyway.
1703 */
1704 unmap_mapping_range(mapping, 0, 0, 0);
1705 truncate_inode_pages(mapping, 0);
1706 goto out;
1707 }
1708
1709 /*
1710 * For reflinks, we may need to CoW 2 clusters which might be
1711 * partially zero'd later, if hole's start and end offset were
1712 * within one cluster(means is not exactly aligned to clustersize).
1713 */
1714
1715 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL) {
1716
1717 ret = ocfs2_cow_file_pos(inode, di_bh, byte_start);
1718 if (ret) {
1719 mlog_errno(ret);
1720 goto out;
1721 }
1722
1723 ret = ocfs2_cow_file_pos(inode, di_bh, byte_start + byte_len);
1724 if (ret) {
1725 mlog_errno(ret);
1726 goto out;
1727 }
1728 }
1729
1730 trunc_start = ocfs2_clusters_for_bytes(osb->sb, byte_start);
1731 trunc_end = (byte_start + byte_len) >> osb->s_clustersize_bits;
1732 cluster_in_el = trunc_end;
1733
1734 mlog(0, "Inode: %llu, start: %llu, len: %llu, cstart: %u, cend: %u\n",
1735 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1736 (unsigned long long)byte_start,
1737 (unsigned long long)byte_len, trunc_start, trunc_end);
1738
1739 ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len);
1740 if (ret) {
1741 mlog_errno(ret);
1742 goto out;
1743 }
1744
1745 path = ocfs2_new_path_from_et(&et);
1746 if (!path) {
1747 ret = -ENOMEM;
1748 mlog_errno(ret);
1749 goto out;
1750 }
1751
1752 while (trunc_end > trunc_start) {
1753
1754 ret = ocfs2_find_path(INODE_CACHE(inode), path,
1755 cluster_in_el);
1756 if (ret) {
1757 mlog_errno(ret);
1758 goto out;
1759 }
1760
1761 el = path_leaf_el(path);
1762
1763 i = ocfs2_find_rec(el, trunc_end);
1764 /*
1765 * Need to go to previous extent block.
1766 */
1767 if (i < 0) {
1768 if (path->p_tree_depth == 0)
1769 break;
1770
1771 ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb,
1772 path,
1773 &cluster_in_el);
1774 if (ret) {
1775 mlog_errno(ret);
1776 goto out;
1777 }
1778
1779 /*
1780 * We've reached the leftmost extent block,
1781 * it's safe to leave.
1782 */
1783 if (cluster_in_el == 0)
1784 break;
1785
1786 /*
1787 * The 'pos' searched for previous extent block is
1788 * always one cluster less than actual trunc_end.
1789 */
1790 trunc_end = cluster_in_el + 1;
1791
1792 ocfs2_reinit_path(path, 1);
1793
1794 continue;
1795
1796 } else
1797 rec = &el->l_recs[i];
1798
1799 ocfs2_calc_trunc_pos(inode, el, rec, trunc_start, &trunc_cpos,
1800 &trunc_len, &trunc_end, &blkno, &done);
1801 if (done)
1802 break;
1803
1804 flags = rec->e_flags;
1805 phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
1806
1807 ret = ocfs2_remove_btree_range(inode, &et, trunc_cpos,
1808 phys_cpos, trunc_len, flags,
1809 &dealloc, refcount_loc);
1810 if (ret < 0) {
1811 mlog_errno(ret);
1812 goto out;
1813 }
1814
1815 cluster_in_el = trunc_end;
1816
1817 ocfs2_reinit_path(path, 1);
1818 }
1819
1820 ocfs2_truncate_cluster_pages(inode, byte_start, byte_len);
1821
1822 out:
1823 ocfs2_schedule_truncate_log_flush(osb, 1);
1824 ocfs2_run_deallocs(osb, &dealloc);
1825
1826 return ret;
1827 }
1828
1829 /*
1830 * Parts of this function taken from xfs_change_file_space()
1831 */
1832 static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
1833 loff_t f_pos, unsigned int cmd,
1834 struct ocfs2_space_resv *sr,
1835 int change_size)
1836 {
1837 int ret;
1838 s64 llen;
1839 loff_t size;
1840 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1841 struct buffer_head *di_bh = NULL;
1842 handle_t *handle;
1843 unsigned long long max_off = inode->i_sb->s_maxbytes;
1844
1845 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
1846 return -EROFS;
1847
1848 mutex_lock(&inode->i_mutex);
1849
1850 /*
1851 * This prevents concurrent writes on other nodes
1852 */
1853 ret = ocfs2_rw_lock(inode, 1);
1854 if (ret) {
1855 mlog_errno(ret);
1856 goto out;
1857 }
1858
1859 ret = ocfs2_inode_lock(inode, &di_bh, 1);
1860 if (ret) {
1861 mlog_errno(ret);
1862 goto out_rw_unlock;
1863 }
1864
1865 if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
1866 ret = -EPERM;
1867 goto out_inode_unlock;
1868 }
1869
1870 switch (sr->l_whence) {
1871 case 0: /*SEEK_SET*/
1872 break;
1873 case 1: /*SEEK_CUR*/
1874 sr->l_start += f_pos;
1875 break;
1876 case 2: /*SEEK_END*/
1877 sr->l_start += i_size_read(inode);
1878 break;
1879 default:
1880 ret = -EINVAL;
1881 goto out_inode_unlock;
1882 }
1883 sr->l_whence = 0;
1884
1885 llen = sr->l_len > 0 ? sr->l_len - 1 : sr->l_len;
1886
1887 if (sr->l_start < 0
1888 || sr->l_start > max_off
1889 || (sr->l_start + llen) < 0
1890 || (sr->l_start + llen) > max_off) {
1891 ret = -EINVAL;
1892 goto out_inode_unlock;
1893 }
1894 size = sr->l_start + sr->l_len;
1895
1896 if (cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) {
1897 if (sr->l_len <= 0) {
1898 ret = -EINVAL;
1899 goto out_inode_unlock;
1900 }
1901 }
1902
1903 if (file && should_remove_suid(file->f_path.dentry)) {
1904 ret = __ocfs2_write_remove_suid(inode, di_bh);
1905 if (ret) {
1906 mlog_errno(ret);
1907 goto out_inode_unlock;
1908 }
1909 }
1910
1911 down_write(&OCFS2_I(inode)->ip_alloc_sem);
1912 switch (cmd) {
1913 case OCFS2_IOC_RESVSP:
1914 case OCFS2_IOC_RESVSP64:
1915 /*
1916 * This takes unsigned offsets, but the signed ones we
1917 * pass have been checked against overflow above.
1918 */
1919 ret = ocfs2_allocate_unwritten_extents(inode, sr->l_start,
1920 sr->l_len);
1921 break;
1922 case OCFS2_IOC_UNRESVSP:
1923 case OCFS2_IOC_UNRESVSP64:
1924 ret = ocfs2_remove_inode_range(inode, di_bh, sr->l_start,
1925 sr->l_len);
1926 break;
1927 default:
1928 ret = -EINVAL;
1929 }
1930 up_write(&OCFS2_I(inode)->ip_alloc_sem);
1931 if (ret) {
1932 mlog_errno(ret);
1933 goto out_inode_unlock;
1934 }
1935
1936 /*
1937 * We update c/mtime for these changes
1938 */
1939 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1940 if (IS_ERR(handle)) {
1941 ret = PTR_ERR(handle);
1942 mlog_errno(ret);
1943 goto out_inode_unlock;
1944 }
1945
1946 if (change_size && i_size_read(inode) < size)
1947 i_size_write(inode, size);
1948
1949 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
1950 ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
1951 if (ret < 0)
1952 mlog_errno(ret);
1953
1954 ocfs2_commit_trans(osb, handle);
1955
1956 out_inode_unlock:
1957 brelse(di_bh);
1958 ocfs2_inode_unlock(inode, 1);
1959 out_rw_unlock:
1960 ocfs2_rw_unlock(inode, 1);
1961
1962 out:
1963 mutex_unlock(&inode->i_mutex);
1964 return ret;
1965 }
1966
1967 int ocfs2_change_file_space(struct file *file, unsigned int cmd,
1968 struct ocfs2_space_resv *sr)
1969 {
1970 struct inode *inode = file->f_path.dentry->d_inode;
1971 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1972
1973 if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) &&
1974 !ocfs2_writes_unwritten_extents(osb))
1975 return -ENOTTY;
1976 else if ((cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) &&
1977 !ocfs2_sparse_alloc(osb))
1978 return -ENOTTY;
1979
1980 if (!S_ISREG(inode->i_mode))
1981 return -EINVAL;
1982
1983 if (!(file->f_mode & FMODE_WRITE))
1984 return -EBADF;
1985
1986 return __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0);
1987 }
1988
1989 static long ocfs2_fallocate(struct inode *inode, int mode, loff_t offset,
1990 loff_t len)
1991 {
1992 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1993 struct ocfs2_space_resv sr;
1994 int change_size = 1;
1995
1996 if (!ocfs2_writes_unwritten_extents(osb))
1997 return -EOPNOTSUPP;
1998
1999 if (S_ISDIR(inode->i_mode))
2000 return -ENODEV;
2001
2002 if (mode & FALLOC_FL_KEEP_SIZE)
2003 change_size = 0;
2004
2005 sr.l_whence = 0;
2006 sr.l_start = (s64)offset;
2007 sr.l_len = (s64)len;
2008
2009 return __ocfs2_change_file_space(NULL, inode, offset,
2010 OCFS2_IOC_RESVSP64, &sr, change_size);
2011 }
2012
2013 int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos,
2014 size_t count)
2015 {
2016 int ret = 0;
2017 unsigned int extent_flags;
2018 u32 cpos, clusters, extent_len, phys_cpos;
2019 struct super_block *sb = inode->i_sb;
2020
2021 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)) ||
2022 !(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL) ||
2023 OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
2024 return 0;
2025
2026 cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
2027 clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
2028
2029 while (clusters) {
2030 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
2031 &extent_flags);
2032 if (ret < 0) {
2033 mlog_errno(ret);
2034 goto out;
2035 }
2036
2037 if (phys_cpos && (extent_flags & OCFS2_EXT_REFCOUNTED)) {
2038 ret = 1;
2039 break;
2040 }
2041
2042 if (extent_len > clusters)
2043 extent_len = clusters;
2044
2045 clusters -= extent_len;
2046 cpos += extent_len;
2047 }
2048 out:
2049 return ret;
2050 }
2051
2052 static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
2053 struct file *file,
2054 loff_t pos, size_t count,
2055 int *meta_level)
2056 {
2057 int ret;
2058 struct buffer_head *di_bh = NULL;
2059 u32 cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
2060 u32 clusters =
2061 ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
2062
2063 ret = ocfs2_inode_lock(inode, &di_bh, 1);
2064 if (ret) {
2065 mlog_errno(ret);
2066 goto out;
2067 }
2068
2069 *meta_level = 1;
2070
2071 ret = ocfs2_refcount_cow(inode, file, di_bh, cpos, clusters, UINT_MAX);
2072 if (ret)
2073 mlog_errno(ret);
2074 out:
2075 brelse(di_bh);
2076 return ret;
2077 }
2078
2079 static int ocfs2_prepare_inode_for_write(struct file *file,
2080 loff_t *ppos,
2081 size_t count,
2082 int appending,
2083 int *direct_io,
2084 int *has_refcount)
2085 {
2086 int ret = 0, meta_level = 0;
2087 struct dentry *dentry = file->f_path.dentry;
2088 struct inode *inode = dentry->d_inode;
2089 loff_t saved_pos, end;
2090
2091 /*
2092 * We start with a read level meta lock and only jump to an ex
2093 * if we need to make modifications here.
2094 */
2095 for(;;) {
2096 ret = ocfs2_inode_lock(inode, NULL, meta_level);
2097 if (ret < 0) {
2098 meta_level = -1;
2099 mlog_errno(ret);
2100 goto out;
2101 }
2102
2103 /* Clear suid / sgid if necessary. We do this here
2104 * instead of later in the write path because
2105 * remove_suid() calls ->setattr without any hint that
2106 * we may have already done our cluster locking. Since
2107 * ocfs2_setattr() *must* take cluster locks to
2108 * proceeed, this will lead us to recursively lock the
2109 * inode. There's also the dinode i_size state which
2110 * can be lost via setattr during extending writes (we
2111 * set inode->i_size at the end of a write. */
2112 if (should_remove_suid(dentry)) {
2113 if (meta_level == 0) {
2114 ocfs2_inode_unlock(inode, meta_level);
2115 meta_level = 1;
2116 continue;
2117 }
2118
2119 ret = ocfs2_write_remove_suid(inode);
2120 if (ret < 0) {
2121 mlog_errno(ret);
2122 goto out_unlock;
2123 }
2124 }
2125
2126 /* work on a copy of ppos until we're sure that we won't have
2127 * to recalculate it due to relocking. */
2128 if (appending) {
2129 saved_pos = i_size_read(inode);
2130 mlog(0, "O_APPEND: inode->i_size=%llu\n", saved_pos);
2131 } else {
2132 saved_pos = *ppos;
2133 }
2134
2135 end = saved_pos + count;
2136
2137 ret = ocfs2_check_range_for_refcount(inode, saved_pos, count);
2138 if (ret == 1) {
2139 ocfs2_inode_unlock(inode, meta_level);
2140 meta_level = -1;
2141
2142 ret = ocfs2_prepare_inode_for_refcount(inode,
2143 file,
2144 saved_pos,
2145 count,
2146 &meta_level);
2147 if (has_refcount)
2148 *has_refcount = 1;
2149 if (direct_io)
2150 *direct_io = 0;
2151 }
2152
2153 if (ret < 0) {
2154 mlog_errno(ret);
2155 goto out_unlock;
2156 }
2157
2158 /*
2159 * Skip the O_DIRECT checks if we don't need
2160 * them.
2161 */
2162 if (!direct_io || !(*direct_io))
2163 break;
2164
2165 /*
2166 * There's no sane way to do direct writes to an inode
2167 * with inline data.
2168 */
2169 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
2170 *direct_io = 0;
2171 break;
2172 }
2173
2174 /*
2175 * Allowing concurrent direct writes means
2176 * i_size changes wouldn't be synchronized, so
2177 * one node could wind up truncating another
2178 * nodes writes.
2179 */
2180 if (end > i_size_read(inode)) {
2181 *direct_io = 0;
2182 break;
2183 }
2184
2185 /*
2186 * We don't fill holes during direct io, so
2187 * check for them here. If any are found, the
2188 * caller will have to retake some cluster
2189 * locks and initiate the io as buffered.
2190 */
2191 ret = ocfs2_check_range_for_holes(inode, saved_pos, count);
2192 if (ret == 1) {
2193 *direct_io = 0;
2194 ret = 0;
2195 } else if (ret < 0)
2196 mlog_errno(ret);
2197 break;
2198 }
2199
2200 if (appending)
2201 *ppos = saved_pos;
2202
2203 out_unlock:
2204 if (meta_level >= 0)
2205 ocfs2_inode_unlock(inode, meta_level);
2206
2207 out:
2208 return ret;
2209 }
2210
2211 static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
2212 const struct iovec *iov,
2213 unsigned long nr_segs,
2214 loff_t pos)
2215 {
2216 int ret, direct_io, appending, rw_level, have_alloc_sem = 0;
2217 int can_do_direct, has_refcount = 0;
2218 ssize_t written = 0;
2219 size_t ocount; /* original count */
2220 size_t count; /* after file limit checks */
2221 loff_t old_size, *ppos = &iocb->ki_pos;
2222 u32 old_clusters;
2223 struct file *file = iocb->ki_filp;
2224 struct inode *inode = file->f_path.dentry->d_inode;
2225 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2226 int full_coherency = !(osb->s_mount_opt &
2227 OCFS2_MOUNT_COHERENCY_BUFFERED);
2228
2229 mlog_entry("(0x%p, %u, '%.*s')\n", file,
2230 (unsigned int)nr_segs,
2231 file->f_path.dentry->d_name.len,
2232 file->f_path.dentry->d_name.name);
2233
2234 if (iocb->ki_left == 0)
2235 return 0;
2236
2237 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
2238
2239 appending = file->f_flags & O_APPEND ? 1 : 0;
2240 direct_io = file->f_flags & O_DIRECT ? 1 : 0;
2241
2242 mutex_lock(&inode->i_mutex);
2243
2244 relock:
2245 /* to match setattr's i_mutex -> i_alloc_sem -> rw_lock ordering */
2246 if (direct_io) {
2247 down_read(&inode->i_alloc_sem);
2248 have_alloc_sem = 1;
2249 }
2250
2251 /*
2252 * Concurrent O_DIRECT writes are allowed with
2253 * mount_option "coherency=buffered".
2254 */
2255 rw_level = (!direct_io || full_coherency);
2256
2257 ret = ocfs2_rw_lock(inode, rw_level);
2258 if (ret < 0) {
2259 mlog_errno(ret);
2260 goto out_sems;
2261 }
2262
2263 /*
2264 * O_DIRECT writes with "coherency=full" need to take EX cluster
2265 * inode_lock to guarantee coherency.
2266 */
2267 if (direct_io && full_coherency) {
2268 /*
2269 * We need to take and drop the inode lock to force
2270 * other nodes to drop their caches. Buffered I/O
2271 * already does this in write_begin().
2272 */
2273 ret = ocfs2_inode_lock(inode, NULL, 1);
2274 if (ret < 0) {
2275 mlog_errno(ret);
2276 goto out_sems;
2277 }
2278
2279 ocfs2_inode_unlock(inode, 1);
2280 }
2281
2282 can_do_direct = direct_io;
2283 ret = ocfs2_prepare_inode_for_write(file, ppos,
2284 iocb->ki_left, appending,
2285 &can_do_direct, &has_refcount);
2286 if (ret < 0) {
2287 mlog_errno(ret);
2288 goto out;
2289 }
2290
2291 /*
2292 * We can't complete the direct I/O as requested, fall back to
2293 * buffered I/O.
2294 */
2295 if (direct_io && !can_do_direct) {
2296 ocfs2_rw_unlock(inode, rw_level);
2297 up_read(&inode->i_alloc_sem);
2298
2299 have_alloc_sem = 0;
2300 rw_level = -1;
2301
2302 direct_io = 0;
2303 goto relock;
2304 }
2305
2306 /*
2307 * To later detect whether a journal commit for sync writes is
2308 * necessary, we sample i_size, and cluster count here.
2309 */
2310 old_size = i_size_read(inode);
2311 old_clusters = OCFS2_I(inode)->ip_clusters;
2312
2313 /* communicate with ocfs2_dio_end_io */
2314 ocfs2_iocb_set_rw_locked(iocb, rw_level);
2315
2316 ret = generic_segment_checks(iov, &nr_segs, &ocount,
2317 VERIFY_READ);
2318 if (ret)
2319 goto out_dio;
2320
2321 count = ocount;
2322 ret = generic_write_checks(file, ppos, &count,
2323 S_ISBLK(inode->i_mode));
2324 if (ret)
2325 goto out_dio;
2326
2327 if (direct_io) {
2328 written = generic_file_direct_write(iocb, iov, &nr_segs, *ppos,
2329 ppos, count, ocount);
2330 if (written < 0) {
2331 ret = written;
2332 goto out_dio;
2333 }
2334 } else {
2335 current->backing_dev_info = file->f_mapping->backing_dev_info;
2336 written = generic_file_buffered_write(iocb, iov, nr_segs, *ppos,
2337 ppos, count, 0);
2338 current->backing_dev_info = NULL;
2339 }
2340
2341 out_dio:
2342 /* buffered aio wouldn't have proper lock coverage today */
2343 BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
2344
2345 if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
2346 ((file->f_flags & O_DIRECT) && !direct_io)) {
2347 ret = filemap_fdatawrite_range(file->f_mapping, pos,
2348 pos + count - 1);
2349 if (ret < 0)
2350 written = ret;
2351
2352 if (!ret && ((old_size != i_size_read(inode)) ||
2353 (old_clusters != OCFS2_I(inode)->ip_clusters) ||
2354 has_refcount)) {
2355 ret = jbd2_journal_force_commit(osb->journal->j_journal);
2356 if (ret < 0)
2357 written = ret;
2358 }
2359
2360 if (!ret)
2361 ret = filemap_fdatawait_range(file->f_mapping, pos,
2362 pos + count - 1);
2363 }
2364
2365 /*
2366 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
2367 * function pointer which is called when o_direct io completes so that
2368 * it can unlock our rw lock. (it's the clustered equivalent of
2369 * i_alloc_sem; protects truncate from racing with pending ios).
2370 * Unfortunately there are error cases which call end_io and others
2371 * that don't. so we don't have to unlock the rw_lock if either an
2372 * async dio is going to do it in the future or an end_io after an
2373 * error has already done it.
2374 */
2375 if ((ret == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
2376 rw_level = -1;
2377 have_alloc_sem = 0;
2378 }
2379
2380 out:
2381 if (rw_level != -1)
2382 ocfs2_rw_unlock(inode, rw_level);
2383
2384 out_sems:
2385 if (have_alloc_sem)
2386 up_read(&inode->i_alloc_sem);
2387
2388 mutex_unlock(&inode->i_mutex);
2389
2390 if (written)
2391 ret = written;
2392 mlog_exit(ret);
2393 return ret;
2394 }
2395
2396 static int ocfs2_splice_to_file(struct pipe_inode_info *pipe,
2397 struct file *out,
2398 struct splice_desc *sd)
2399 {
2400 int ret;
2401
2402 ret = ocfs2_prepare_inode_for_write(out, &sd->pos,
2403 sd->total_len, 0, NULL, NULL);
2404 if (ret < 0) {
2405 mlog_errno(ret);
2406 return ret;
2407 }
2408
2409 return splice_from_pipe_feed(pipe, sd, pipe_to_file);
2410 }
2411
2412 static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
2413 struct file *out,
2414 loff_t *ppos,
2415 size_t len,
2416 unsigned int flags)
2417 {
2418 int ret;
2419 struct address_space *mapping = out->f_mapping;
2420 struct inode *inode = mapping->host;
2421 struct splice_desc sd = {
2422 .total_len = len,
2423 .flags = flags,
2424 .pos = *ppos,
2425 .u.file = out,
2426 };
2427
2428 mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", out, pipe,
2429 (unsigned int)len,
2430 out->f_path.dentry->d_name.len,
2431 out->f_path.dentry->d_name.name);
2432
2433 if (pipe->inode)
2434 mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_PARENT);
2435
2436 splice_from_pipe_begin(&sd);
2437 do {
2438 ret = splice_from_pipe_next(pipe, &sd);
2439 if (ret <= 0)
2440 break;
2441
2442 mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
2443 ret = ocfs2_rw_lock(inode, 1);
2444 if (ret < 0)
2445 mlog_errno(ret);
2446 else {
2447 ret = ocfs2_splice_to_file(pipe, out, &sd);
2448 ocfs2_rw_unlock(inode, 1);
2449 }
2450 mutex_unlock(&inode->i_mutex);
2451 } while (ret > 0);
2452 splice_from_pipe_end(pipe, &sd);
2453
2454 if (pipe->inode)
2455 mutex_unlock(&pipe->inode->i_mutex);
2456
2457 if (sd.num_spliced)
2458 ret = sd.num_spliced;
2459
2460 if (ret > 0) {
2461 unsigned long nr_pages;
2462 int err;
2463
2464 nr_pages = (ret + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
2465
2466 err = generic_write_sync(out, *ppos, ret);
2467 if (err)
2468 ret = err;
2469 else
2470 *ppos += ret;
2471
2472 balance_dirty_pages_ratelimited_nr(mapping, nr_pages);
2473 }
2474
2475 mlog_exit(ret);
2476 return ret;
2477 }
2478
2479 static ssize_t ocfs2_file_splice_read(struct file *in,
2480 loff_t *ppos,
2481 struct pipe_inode_info *pipe,
2482 size_t len,
2483 unsigned int flags)
2484 {
2485 int ret = 0, lock_level = 0;
2486 struct inode *inode = in->f_path.dentry->d_inode;
2487
2488 mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", in, pipe,
2489 (unsigned int)len,
2490 in->f_path.dentry->d_name.len,
2491 in->f_path.dentry->d_name.name);
2492
2493 /*
2494 * See the comment in ocfs2_file_aio_read()
2495 */
2496 ret = ocfs2_inode_lock_atime(inode, in->f_vfsmnt, &lock_level);
2497 if (ret < 0) {
2498 mlog_errno(ret);
2499 goto bail;
2500 }
2501 ocfs2_inode_unlock(inode, lock_level);
2502
2503 ret = generic_file_splice_read(in, ppos, pipe, len, flags);
2504
2505 bail:
2506 mlog_exit(ret);
2507 return ret;
2508 }
2509
2510 static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
2511 const struct iovec *iov,
2512 unsigned long nr_segs,
2513 loff_t pos)
2514 {
2515 int ret = 0, rw_level = -1, have_alloc_sem = 0, lock_level = 0;
2516 struct file *filp = iocb->ki_filp;
2517 struct inode *inode = filp->f_path.dentry->d_inode;
2518
2519 mlog_entry("(0x%p, %u, '%.*s')\n", filp,
2520 (unsigned int)nr_segs,
2521 filp->f_path.dentry->d_name.len,
2522 filp->f_path.dentry->d_name.name);
2523
2524 if (!inode) {
2525 ret = -EINVAL;
2526 mlog_errno(ret);
2527 goto bail;
2528 }
2529
2530 /*
2531 * buffered reads protect themselves in ->readpage(). O_DIRECT reads
2532 * need locks to protect pending reads from racing with truncate.
2533 */
2534 if (filp->f_flags & O_DIRECT) {
2535 down_read(&inode->i_alloc_sem);
2536 have_alloc_sem = 1;
2537
2538 ret = ocfs2_rw_lock(inode, 0);
2539 if (ret < 0) {
2540 mlog_errno(ret);
2541 goto bail;
2542 }
2543 rw_level = 0;
2544 /* communicate with ocfs2_dio_end_io */
2545 ocfs2_iocb_set_rw_locked(iocb, rw_level);
2546 }
2547
2548 /*
2549 * We're fine letting folks race truncates and extending
2550 * writes with read across the cluster, just like they can
2551 * locally. Hence no rw_lock during read.
2552 *
2553 * Take and drop the meta data lock to update inode fields
2554 * like i_size. This allows the checks down below
2555 * generic_file_aio_read() a chance of actually working.
2556 */
2557 ret = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level);
2558 if (ret < 0) {
2559 mlog_errno(ret);
2560 goto bail;
2561 }
2562 ocfs2_inode_unlock(inode, lock_level);
2563
2564 ret = generic_file_aio_read(iocb, iov, nr_segs, iocb->ki_pos);
2565 if (ret == -EINVAL)
2566 mlog(0, "generic_file_aio_read returned -EINVAL\n");
2567
2568 /* buffered aio wouldn't have proper lock coverage today */
2569 BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT));
2570
2571 /* see ocfs2_file_aio_write */
2572 if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
2573 rw_level = -1;
2574 have_alloc_sem = 0;
2575 }
2576
2577 bail:
2578 if (have_alloc_sem)
2579 up_read(&inode->i_alloc_sem);
2580 if (rw_level != -1)
2581 ocfs2_rw_unlock(inode, rw_level);
2582 mlog_exit(ret);
2583
2584 return ret;
2585 }
2586
2587 const struct inode_operations ocfs2_file_iops = {
2588 .setattr = ocfs2_setattr,
2589 .getattr = ocfs2_getattr,
2590 .permission = ocfs2_permission,
2591 .setxattr = generic_setxattr,
2592 .getxattr = generic_getxattr,
2593 .listxattr = ocfs2_listxattr,
2594 .removexattr = generic_removexattr,
2595 .fallocate = ocfs2_fallocate,
2596 .fiemap = ocfs2_fiemap,
2597 };
2598
2599 const struct inode_operations ocfs2_special_file_iops = {
2600 .setattr = ocfs2_setattr,
2601 .getattr = ocfs2_getattr,
2602 .permission = ocfs2_permission,
2603 };
2604
2605 /*
2606 * Other than ->lock, keep ocfs2_fops and ocfs2_dops in sync with
2607 * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks!
2608 */
2609 const struct file_operations ocfs2_fops = {
2610 .llseek = generic_file_llseek,
2611 .read = do_sync_read,
2612 .write = do_sync_write,
2613 .mmap = ocfs2_mmap,
2614 .fsync = ocfs2_sync_file,
2615 .release = ocfs2_file_release,
2616 .open = ocfs2_file_open,
2617 .aio_read = ocfs2_file_aio_read,
2618 .aio_write = ocfs2_file_aio_write,
2619 .unlocked_ioctl = ocfs2_ioctl,
2620 #ifdef CONFIG_COMPAT
2621 .compat_ioctl = ocfs2_compat_ioctl,
2622 #endif
2623 .lock = ocfs2_lock,
2624 .flock = ocfs2_flock,
2625 .splice_read = ocfs2_file_splice_read,
2626 .splice_write = ocfs2_file_splice_write,
2627 };
2628
2629 const struct file_operations ocfs2_dops = {
2630 .llseek = generic_file_llseek,
2631 .read = generic_read_dir,
2632 .readdir = ocfs2_readdir,
2633 .fsync = ocfs2_sync_file,
2634 .release = ocfs2_dir_release,
2635 .open = ocfs2_dir_open,
2636 .unlocked_ioctl = ocfs2_ioctl,
2637 #ifdef CONFIG_COMPAT
2638 .compat_ioctl = ocfs2_compat_ioctl,
2639 #endif
2640 .lock = ocfs2_lock,
2641 .flock = ocfs2_flock,
2642 };
2643
2644 /*
2645 * POSIX-lockless variants of our file_operations.
2646 *
2647 * These will be used if the underlying cluster stack does not support
2648 * posix file locking, if the user passes the "localflocks" mount
2649 * option, or if we have a local-only fs.
2650 *
2651 * ocfs2_flock is in here because all stacks handle UNIX file locks,
2652 * so we still want it in the case of no stack support for
2653 * plocks. Internally, it will do the right thing when asked to ignore
2654 * the cluster.
2655 */
2656 const struct file_operations ocfs2_fops_no_plocks = {
2657 .llseek = generic_file_llseek,
2658 .read = do_sync_read,
2659 .write = do_sync_write,
2660 .mmap = ocfs2_mmap,
2661 .fsync = ocfs2_sync_file,
2662 .release = ocfs2_file_release,
2663 .open = ocfs2_file_open,
2664 .aio_read = ocfs2_file_aio_read,
2665 .aio_write = ocfs2_file_aio_write,
2666 .unlocked_ioctl = ocfs2_ioctl,
2667 #ifdef CONFIG_COMPAT
2668 .compat_ioctl = ocfs2_compat_ioctl,
2669 #endif
2670 .flock = ocfs2_flock,
2671 .splice_read = ocfs2_file_splice_read,
2672 .splice_write = ocfs2_file_splice_write,
2673 };
2674
2675 const struct file_operations ocfs2_dops_no_plocks = {
2676 .llseek = generic_file_llseek,
2677 .read = generic_read_dir,
2678 .readdir = ocfs2_readdir,
2679 .fsync = ocfs2_sync_file,
2680 .release = ocfs2_dir_release,
2681 .open = ocfs2_dir_open,
2682 .unlocked_ioctl = ocfs2_ioctl,
2683 #ifdef CONFIG_COMPAT
2684 .compat_ioctl = ocfs2_compat_ioctl,
2685 #endif
2686 .flock = ocfs2_flock,
2687 };