2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/pagemap.h>
15 #include <linux/uio.h>
16 #include <linux/blkdev.h>
18 #include <linux/mount.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/falloc.h>
22 #include <linux/swap.h>
23 #include <linux/crc32.h>
24 #include <linux/writeback.h>
25 #include <asm/uaccess.h>
26 #include <linux/dlm.h>
27 #include <linux/dlm_plock.h>
28 #include <linux/aio.h>
45 * gfs2_llseek - seek to a location in a file
48 * @whence: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
50 * SEEK_END requires the glock for the file because it references the
53 * Returns: The new offset, or errno
56 static loff_t
gfs2_llseek(struct file
*file
, loff_t offset
, int whence
)
58 struct gfs2_inode
*ip
= GFS2_I(file
->f_mapping
->host
);
59 struct gfs2_holder i_gh
;
63 case SEEK_END
: /* These reference inode->i_size */
66 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, LM_FLAG_ANY
,
69 error
= generic_file_llseek(file
, offset
, whence
);
70 gfs2_glock_dq_uninit(&i_gh
);
75 error
= generic_file_llseek(file
, offset
, whence
);
85 * gfs2_readdir - Read directory entries from a directory
86 * @file: The directory to read from
87 * @dirent: Buffer for dirents
88 * @filldir: Function used to do the copying
93 static int gfs2_readdir(struct file
*file
, void *dirent
, filldir_t filldir
)
95 struct inode
*dir
= file
->f_mapping
->host
;
96 struct gfs2_inode
*dip
= GFS2_I(dir
);
97 struct gfs2_holder d_gh
;
98 u64 offset
= file
->f_pos
;
101 gfs2_holder_init(dip
->i_gl
, LM_ST_SHARED
, 0, &d_gh
);
102 error
= gfs2_glock_nq(&d_gh
);
104 gfs2_holder_uninit(&d_gh
);
108 error
= gfs2_dir_read(dir
, &offset
, dirent
, filldir
, &file
->f_ra
);
110 gfs2_glock_dq_uninit(&d_gh
);
112 file
->f_pos
= offset
;
119 * @table: A table of 32 u32 flags
120 * @val: a 32 bit value to convert
122 * This function can be used to convert between fsflags values and
123 * GFS2's own flags values.
125 * Returns: the converted flags
127 static u32
fsflags_cvt(const u32
*table
, u32 val
)
139 static const u32 fsflags_to_gfs2
[32] = {
141 [4] = GFS2_DIF_IMMUTABLE
,
142 [5] = GFS2_DIF_APPENDONLY
,
143 [7] = GFS2_DIF_NOATIME
,
144 [12] = GFS2_DIF_EXHASH
,
145 [14] = GFS2_DIF_INHERIT_JDATA
,
146 [17] = GFS2_DIF_TOPDIR
,
149 static const u32 gfs2_to_fsflags
[32] = {
150 [gfs2fl_Sync
] = FS_SYNC_FL
,
151 [gfs2fl_Immutable
] = FS_IMMUTABLE_FL
,
152 [gfs2fl_AppendOnly
] = FS_APPEND_FL
,
153 [gfs2fl_NoAtime
] = FS_NOATIME_FL
,
154 [gfs2fl_ExHash
] = FS_INDEX_FL
,
155 [gfs2fl_TopLevel
] = FS_TOPDIR_FL
,
156 [gfs2fl_InheritJdata
] = FS_JOURNAL_DATA_FL
,
159 static int gfs2_get_flags(struct file
*filp
, u32 __user
*ptr
)
161 struct inode
*inode
= file_inode(filp
);
162 struct gfs2_inode
*ip
= GFS2_I(inode
);
163 struct gfs2_holder gh
;
167 gfs2_holder_init(ip
->i_gl
, LM_ST_SHARED
, 0, &gh
);
168 error
= gfs2_glock_nq(&gh
);
172 fsflags
= fsflags_cvt(gfs2_to_fsflags
, ip
->i_diskflags
);
173 if (!S_ISDIR(inode
->i_mode
) && ip
->i_diskflags
& GFS2_DIF_JDATA
)
174 fsflags
|= FS_JOURNAL_DATA_FL
;
175 if (put_user(fsflags
, ptr
))
179 gfs2_holder_uninit(&gh
);
183 void gfs2_set_inode_flags(struct inode
*inode
)
185 struct gfs2_inode
*ip
= GFS2_I(inode
);
186 unsigned int flags
= inode
->i_flags
;
188 flags
&= ~(S_SYNC
|S_APPEND
|S_IMMUTABLE
|S_NOATIME
|S_DIRSYNC
|S_NOSEC
);
189 if ((ip
->i_eattr
== 0) && !is_sxid(inode
->i_mode
))
190 inode
->i_flags
|= S_NOSEC
;
191 if (ip
->i_diskflags
& GFS2_DIF_IMMUTABLE
)
192 flags
|= S_IMMUTABLE
;
193 if (ip
->i_diskflags
& GFS2_DIF_APPENDONLY
)
195 if (ip
->i_diskflags
& GFS2_DIF_NOATIME
)
197 if (ip
->i_diskflags
& GFS2_DIF_SYNC
)
199 inode
->i_flags
= flags
;
202 /* Flags that can be set by user space */
203 #define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
204 GFS2_DIF_IMMUTABLE| \
205 GFS2_DIF_APPENDONLY| \
210 GFS2_DIF_INHERIT_JDATA)
213 * gfs2_set_flags - set flags on an inode
215 * @flags: The flags to set
216 * @mask: Indicates which flags are valid
219 static int do_gfs2_set_flags(struct file
*filp
, u32 reqflags
, u32 mask
)
221 struct inode
*inode
= file_inode(filp
);
222 struct gfs2_inode
*ip
= GFS2_I(inode
);
223 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
224 struct buffer_head
*bh
;
225 struct gfs2_holder gh
;
227 u32 new_flags
, flags
;
229 error
= mnt_want_write_file(filp
);
233 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &gh
);
238 if (!inode_owner_or_capable(inode
))
242 flags
= ip
->i_diskflags
;
243 new_flags
= (flags
& ~mask
) | (reqflags
& mask
);
244 if ((new_flags
^ flags
) == 0)
248 if ((new_flags
^ flags
) & ~GFS2_FLAGS_USER_SET
)
252 if (IS_IMMUTABLE(inode
) && (new_flags
& GFS2_DIF_IMMUTABLE
))
254 if (IS_APPEND(inode
) && (new_flags
& GFS2_DIF_APPENDONLY
))
256 if (((new_flags
^ flags
) & GFS2_DIF_IMMUTABLE
) &&
257 !capable(CAP_LINUX_IMMUTABLE
))
259 if (!IS_IMMUTABLE(inode
)) {
260 error
= gfs2_permission(inode
, MAY_WRITE
);
264 if ((flags
^ new_flags
) & GFS2_DIF_JDATA
) {
265 if (flags
& GFS2_DIF_JDATA
)
266 gfs2_log_flush(sdp
, ip
->i_gl
);
267 error
= filemap_fdatawrite(inode
->i_mapping
);
270 error
= filemap_fdatawait(inode
->i_mapping
);
274 error
= gfs2_trans_begin(sdp
, RES_DINODE
, 0);
277 error
= gfs2_meta_inode_buffer(ip
, &bh
);
280 gfs2_trans_add_meta(ip
->i_gl
, bh
);
281 ip
->i_diskflags
= new_flags
;
282 gfs2_dinode_out(ip
, bh
->b_data
);
284 gfs2_set_inode_flags(inode
);
285 gfs2_set_aops(inode
);
289 gfs2_glock_dq_uninit(&gh
);
291 mnt_drop_write_file(filp
);
295 static int gfs2_set_flags(struct file
*filp
, u32 __user
*ptr
)
297 struct inode
*inode
= file_inode(filp
);
298 u32 fsflags
, gfsflags
;
300 if (get_user(fsflags
, ptr
))
303 gfsflags
= fsflags_cvt(fsflags_to_gfs2
, fsflags
);
304 if (!S_ISDIR(inode
->i_mode
)) {
305 gfsflags
&= ~GFS2_DIF_TOPDIR
;
306 if (gfsflags
& GFS2_DIF_INHERIT_JDATA
)
307 gfsflags
^= (GFS2_DIF_JDATA
| GFS2_DIF_INHERIT_JDATA
);
308 return do_gfs2_set_flags(filp
, gfsflags
, ~0);
310 return do_gfs2_set_flags(filp
, gfsflags
, ~GFS2_DIF_JDATA
);
313 static long gfs2_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
316 case FS_IOC_GETFLAGS
:
317 return gfs2_get_flags(filp
, (u32 __user
*)arg
);
318 case FS_IOC_SETFLAGS
:
319 return gfs2_set_flags(filp
, (u32 __user
*)arg
);
321 return gfs2_fitrim(filp
, (void __user
*)arg
);
327 * gfs2_size_hint - Give a hint to the size of a write request
328 * @file: The struct file
329 * @offset: The file offset of the write
330 * @size: The length of the write
332 * When we are about to do a write, this function records the total
333 * write size in order to provide a suitable hint to the lower layers
334 * about how many blocks will be required.
338 static void gfs2_size_hint(struct file
*filep
, loff_t offset
, size_t size
)
340 struct inode
*inode
= file_inode(filep
);
341 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
342 struct gfs2_inode
*ip
= GFS2_I(inode
);
343 size_t blks
= (size
+ sdp
->sd_sb
.sb_bsize
- 1) >> sdp
->sd_sb
.sb_bsize_shift
;
344 int hint
= min_t(size_t, INT_MAX
, blks
);
346 atomic_set(&ip
->i_res
->rs_sizehint
, hint
);
350 * gfs2_allocate_page_backing - Use bmap to allocate blocks
351 * @page: The (locked) page to allocate backing for
353 * We try to allocate all the blocks required for the page in
354 * one go. This might fail for various reasons, so we keep
355 * trying until all the blocks to back this page are allocated.
356 * If some of the blocks are already allocated, thats ok too.
359 static int gfs2_allocate_page_backing(struct page
*page
)
361 struct inode
*inode
= page
->mapping
->host
;
362 struct buffer_head bh
;
363 unsigned long size
= PAGE_CACHE_SIZE
;
364 u64 lblock
= page
->index
<< (PAGE_CACHE_SHIFT
- inode
->i_blkbits
);
369 gfs2_block_map(inode
, lblock
, &bh
, 1);
370 if (!buffer_mapped(&bh
))
373 lblock
+= (bh
.b_size
>> inode
->i_blkbits
);
379 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
380 * @vma: The virtual memory area
381 * @page: The page which is about to become writable
383 * When the page becomes writable, we need to ensure that we have
384 * blocks allocated on disk to back that page.
387 static int gfs2_page_mkwrite(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
389 struct page
*page
= vmf
->page
;
390 struct inode
*inode
= file_inode(vma
->vm_file
);
391 struct gfs2_inode
*ip
= GFS2_I(inode
);
392 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
393 unsigned long last_index
;
394 u64 pos
= page
->index
<< PAGE_CACHE_SHIFT
;
395 unsigned int data_blocks
, ind_blocks
, rblocks
;
396 struct gfs2_holder gh
;
400 sb_start_pagefault(inode
->i_sb
);
402 /* Update file times before taking page lock */
403 file_update_time(vma
->vm_file
);
405 ret
= gfs2_rs_alloc(ip
);
409 gfs2_size_hint(vma
->vm_file
, pos
, PAGE_CACHE_SIZE
);
411 gfs2_holder_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &gh
);
412 ret
= gfs2_glock_nq(&gh
);
416 set_bit(GLF_DIRTY
, &ip
->i_gl
->gl_flags
);
417 set_bit(GIF_SW_PAGED
, &ip
->i_flags
);
419 if (!gfs2_write_alloc_required(ip
, pos
, PAGE_CACHE_SIZE
)) {
421 if (!PageUptodate(page
) || page
->mapping
!= inode
->i_mapping
) {
428 ret
= gfs2_rindex_update(sdp
);
432 ret
= gfs2_quota_lock_check(ip
);
435 gfs2_write_calc_reserv(ip
, PAGE_CACHE_SIZE
, &data_blocks
, &ind_blocks
);
436 ret
= gfs2_inplace_reserve(ip
, data_blocks
+ ind_blocks
, 0);
438 goto out_quota_unlock
;
440 rblocks
= RES_DINODE
+ ind_blocks
;
441 if (gfs2_is_jdata(ip
))
442 rblocks
+= data_blocks
? data_blocks
: 1;
443 if (ind_blocks
|| data_blocks
) {
444 rblocks
+= RES_STATFS
+ RES_QUOTA
;
445 rblocks
+= gfs2_rg_blocks(ip
, data_blocks
+ ind_blocks
);
447 ret
= gfs2_trans_begin(sdp
, rblocks
, 0);
453 size
= i_size_read(inode
);
454 last_index
= (size
- 1) >> PAGE_CACHE_SHIFT
;
455 /* Check page index against inode size */
456 if (size
== 0 || (page
->index
> last_index
))
460 /* If truncated, we must retry the operation, we may have raced
461 * with the glock demotion code.
463 if (!PageUptodate(page
) || page
->mapping
!= inode
->i_mapping
)
466 /* Unstuff, if required, and allocate backing blocks for page */
468 if (gfs2_is_stuffed(ip
))
469 ret
= gfs2_unstuff_dinode(ip
, page
);
471 ret
= gfs2_allocate_page_backing(page
);
478 gfs2_inplace_release(ip
);
480 gfs2_quota_unlock(ip
);
484 gfs2_holder_uninit(&gh
);
486 set_page_dirty(page
);
487 wait_for_stable_page(page
);
489 sb_end_pagefault(inode
->i_sb
);
490 return block_page_mkwrite_return(ret
);
493 static const struct vm_operations_struct gfs2_vm_ops
= {
494 .fault
= filemap_fault
,
495 .page_mkwrite
= gfs2_page_mkwrite
,
496 .remap_pages
= generic_file_remap_pages
,
501 * @file: The file to map
502 * @vma: The VMA which described the mapping
504 * There is no need to get a lock here unless we should be updating
505 * atime. We ignore any locking errors since the only consequence is
506 * a missed atime update (which will just be deferred until later).
511 static int gfs2_mmap(struct file
*file
, struct vm_area_struct
*vma
)
513 struct gfs2_inode
*ip
= GFS2_I(file
->f_mapping
->host
);
515 if (!(file
->f_flags
& O_NOATIME
) &&
516 !IS_NOATIME(&ip
->i_inode
)) {
517 struct gfs2_holder i_gh
;
520 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, LM_FLAG_ANY
,
524 /* grab lock to update inode */
525 gfs2_glock_dq_uninit(&i_gh
);
528 vma
->vm_ops
= &gfs2_vm_ops
;
534 * gfs2_open - open a file
535 * @inode: the inode to open
536 * @file: the struct file for this opening
541 static int gfs2_open(struct inode
*inode
, struct file
*file
)
543 struct gfs2_inode
*ip
= GFS2_I(inode
);
544 struct gfs2_holder i_gh
;
545 struct gfs2_file
*fp
;
548 fp
= kzalloc(sizeof(struct gfs2_file
), GFP_KERNEL
);
552 mutex_init(&fp
->f_fl_mutex
);
554 gfs2_assert_warn(GFS2_SB(inode
), !file
->private_data
);
555 file
->private_data
= fp
;
557 if (S_ISREG(ip
->i_inode
.i_mode
)) {
558 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, LM_FLAG_ANY
,
563 if (!(file
->f_flags
& O_LARGEFILE
) &&
564 i_size_read(inode
) > MAX_NON_LFS
) {
569 gfs2_glock_dq_uninit(&i_gh
);
575 gfs2_glock_dq_uninit(&i_gh
);
577 file
->private_data
= NULL
;
583 * gfs2_release - called to close a struct file
584 * @inode: the inode the struct file belongs to
585 * @file: the struct file being closed
590 static int gfs2_release(struct inode
*inode
, struct file
*file
)
592 struct gfs2_inode
*ip
= GFS2_I(inode
);
594 kfree(file
->private_data
);
595 file
->private_data
= NULL
;
597 if ((file
->f_mode
& FMODE_WRITE
) &&
598 (atomic_read(&inode
->i_writecount
) == 1))
605 * gfs2_fsync - sync the dirty data for a file (across the cluster)
606 * @file: the file that points to the dentry
607 * @start: the start position in the file to sync
608 * @end: the end position in the file to sync
609 * @datasync: set if we can ignore timestamp changes
611 * We split the data flushing here so that we don't wait for the data
612 * until after we've also sent the metadata to disk. Note that for
613 * data=ordered, we will write & wait for the data at the log flush
614 * stage anyway, so this is unlikely to make much of a difference
615 * except in the data=writeback case.
617 * If the fdatawrite fails due to any reason except -EIO, we will
618 * continue the remainder of the fsync, although we'll still report
619 * the error at the end. This is to match filemap_write_and_wait_range()
625 static int gfs2_fsync(struct file
*file
, loff_t start
, loff_t end
,
628 struct address_space
*mapping
= file
->f_mapping
;
629 struct inode
*inode
= mapping
->host
;
630 int sync_state
= inode
->i_state
& (I_DIRTY_SYNC
|I_DIRTY_DATASYNC
);
631 struct gfs2_inode
*ip
= GFS2_I(inode
);
632 int ret
= 0, ret1
= 0;
634 if (mapping
->nrpages
) {
635 ret1
= filemap_fdatawrite_range(mapping
, start
, end
);
641 sync_state
&= ~I_DIRTY_SYNC
;
644 ret
= sync_inode_metadata(inode
, 1);
647 if (gfs2_is_jdata(ip
))
648 filemap_write_and_wait(mapping
);
649 gfs2_ail_flush(ip
->i_gl
, 1);
652 if (mapping
->nrpages
)
653 ret
= filemap_fdatawait_range(mapping
, start
, end
);
655 return ret
? ret
: ret1
;
659 * gfs2_file_aio_write - Perform a write to a file
660 * @iocb: The io context
661 * @iov: The data to write
662 * @nr_segs: Number of @iov segments
663 * @pos: The file position
665 * We have to do a lock/unlock here to refresh the inode size for
666 * O_APPEND writes, otherwise we can land up writing at the wrong
667 * offset. There is still a race, but provided the app is using its
668 * own file locking, this will make O_APPEND work as expected.
672 static ssize_t
gfs2_file_aio_write(struct kiocb
*iocb
, const struct iovec
*iov
,
673 unsigned long nr_segs
, loff_t pos
)
675 struct file
*file
= iocb
->ki_filp
;
676 size_t writesize
= iov_length(iov
, nr_segs
);
677 struct gfs2_inode
*ip
= GFS2_I(file_inode(file
));
680 ret
= gfs2_rs_alloc(ip
);
684 gfs2_size_hint(file
, pos
, writesize
);
686 if (file
->f_flags
& O_APPEND
) {
687 struct gfs2_holder gh
;
689 ret
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, 0, &gh
);
692 gfs2_glock_dq_uninit(&gh
);
695 return generic_file_aio_write(iocb
, iov
, nr_segs
, pos
);
698 static int fallocate_chunk(struct inode
*inode
, loff_t offset
, loff_t len
,
701 struct gfs2_inode
*ip
= GFS2_I(inode
);
702 struct buffer_head
*dibh
;
705 unsigned int nr_blks
;
706 sector_t lblock
= offset
>> inode
->i_blkbits
;
708 error
= gfs2_meta_inode_buffer(ip
, &dibh
);
712 gfs2_trans_add_meta(ip
->i_gl
, dibh
);
714 if (gfs2_is_stuffed(ip
)) {
715 error
= gfs2_unstuff_dinode(ip
, NULL
);
721 struct buffer_head bh_map
= { .b_state
= 0, .b_blocknr
= 0 };
723 set_buffer_zeronew(&bh_map
);
725 error
= gfs2_block_map(inode
, lblock
, &bh_map
, 1);
728 len
-= bh_map
.b_size
;
729 nr_blks
= bh_map
.b_size
>> inode
->i_blkbits
;
731 if (!buffer_new(&bh_map
))
733 if (unlikely(!buffer_zeronew(&bh_map
))) {
738 if (offset
+ size
> inode
->i_size
&& !(mode
& FALLOC_FL_KEEP_SIZE
))
739 i_size_write(inode
, offset
+ size
);
741 mark_inode_dirty(inode
);
748 static void calc_max_reserv(struct gfs2_inode
*ip
, loff_t max
, loff_t
*len
,
749 unsigned int *data_blocks
, unsigned int *ind_blocks
)
751 const struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
752 unsigned int max_blocks
= ip
->i_rgd
->rd_free_clone
;
753 unsigned int tmp
, max_data
= max_blocks
- 3 * (sdp
->sd_max_height
- 1);
755 for (tmp
= max_data
; tmp
> sdp
->sd_diptrs
;) {
756 tmp
= DIV_ROUND_UP(tmp
, sdp
->sd_inptrs
);
759 /* This calculation isn't the exact reverse of gfs2_write_calc_reserve,
760 so it might end up with fewer data blocks */
761 if (max_data
<= *data_blocks
)
763 *data_blocks
= max_data
;
764 *ind_blocks
= max_blocks
- max_data
;
765 *len
= ((loff_t
)max_data
- 3) << sdp
->sd_sb
.sb_bsize_shift
;
768 gfs2_write_calc_reserv(ip
, max
, data_blocks
, ind_blocks
);
772 static long gfs2_fallocate(struct file
*file
, int mode
, loff_t offset
,
775 struct inode
*inode
= file_inode(file
);
776 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
777 struct gfs2_inode
*ip
= GFS2_I(inode
);
778 unsigned int data_blocks
= 0, ind_blocks
= 0, rblocks
;
779 loff_t bytes
, max_bytes
;
781 const loff_t pos
= offset
;
782 const loff_t count
= len
;
783 loff_t bsize_mask
= ~((loff_t
)sdp
->sd_sb
.sb_bsize
- 1);
784 loff_t next
= (offset
+ len
- 1) >> sdp
->sd_sb
.sb_bsize_shift
;
785 loff_t max_chunk_size
= UINT_MAX
& bsize_mask
;
786 next
= (next
+ 1) << sdp
->sd_sb
.sb_bsize_shift
;
788 /* We only support the FALLOC_FL_KEEP_SIZE mode */
789 if (mode
& ~FALLOC_FL_KEEP_SIZE
)
792 offset
&= bsize_mask
;
795 bytes
= sdp
->sd_max_rg_data
* sdp
->sd_sb
.sb_bsize
/ 2;
800 bytes
= sdp
->sd_sb
.sb_bsize
;
802 error
= gfs2_rs_alloc(ip
);
806 gfs2_holder_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &ip
->i_gh
);
807 error
= gfs2_glock_nq(&ip
->i_gh
);
811 gfs2_size_hint(file
, offset
, len
);
816 if (!gfs2_write_alloc_required(ip
, offset
, bytes
)) {
821 error
= gfs2_quota_lock_check(ip
);
826 gfs2_write_calc_reserv(ip
, bytes
, &data_blocks
, &ind_blocks
);
828 error
= gfs2_inplace_reserve(ip
, data_blocks
+ ind_blocks
, 0);
830 if (error
== -ENOSPC
&& bytes
> sdp
->sd_sb
.sb_bsize
) {
834 bytes
= sdp
->sd_sb
.sb_bsize
;
840 calc_max_reserv(ip
, (len
> max_chunk_size
)? max_chunk_size
: len
,
841 &max_bytes
, &data_blocks
, &ind_blocks
);
843 rblocks
= RES_DINODE
+ ind_blocks
+ RES_STATFS
+ RES_QUOTA
+
844 RES_RG_HDR
+ gfs2_rg_blocks(ip
, data_blocks
+ ind_blocks
);
845 if (gfs2_is_jdata(ip
))
846 rblocks
+= data_blocks
? data_blocks
: 1;
848 error
= gfs2_trans_begin(sdp
, rblocks
,
849 PAGE_CACHE_SIZE
/sdp
->sd_sb
.sb_bsize
);
853 error
= fallocate_chunk(inode
, offset
, max_bytes
, mode
);
861 gfs2_inplace_release(ip
);
862 gfs2_quota_unlock(ip
);
866 error
= generic_write_sync(file
, pos
, count
);
870 gfs2_inplace_release(ip
);
872 gfs2_quota_unlock(ip
);
874 gfs2_glock_dq(&ip
->i_gh
);
876 gfs2_holder_uninit(&ip
->i_gh
);
880 #ifdef CONFIG_GFS2_FS_LOCKING_DLM
883 * gfs2_setlease - acquire/release a file lease
884 * @file: the file pointer
888 * We don't currently have a way to enforce a lease across the whole
889 * cluster; until we do, disable leases (by just returning -EINVAL),
890 * unless the administrator has requested purely local locking.
892 * Locking: called under lock_flocks
897 static int gfs2_setlease(struct file
*file
, long arg
, struct file_lock
**fl
)
903 * gfs2_lock - acquire/release a posix lock on a file
904 * @file: the file pointer
905 * @cmd: either modify or retrieve lock state, possibly wait
906 * @fl: type and range of lock
911 static int gfs2_lock(struct file
*file
, int cmd
, struct file_lock
*fl
)
913 struct gfs2_inode
*ip
= GFS2_I(file
->f_mapping
->host
);
914 struct gfs2_sbd
*sdp
= GFS2_SB(file
->f_mapping
->host
);
915 struct lm_lockstruct
*ls
= &sdp
->sd_lockstruct
;
917 if (!(fl
->fl_flags
& FL_POSIX
))
919 if (__mandatory_lock(&ip
->i_inode
) && fl
->fl_type
!= F_UNLCK
)
922 if (cmd
== F_CANCELLK
) {
925 fl
->fl_type
= F_UNLCK
;
927 if (unlikely(test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
))) {
928 if (fl
->fl_type
== F_UNLCK
)
929 posix_lock_file_wait(file
, fl
);
933 return dlm_posix_get(ls
->ls_dlm
, ip
->i_no_addr
, file
, fl
);
934 else if (fl
->fl_type
== F_UNLCK
)
935 return dlm_posix_unlock(ls
->ls_dlm
, ip
->i_no_addr
, file
, fl
);
937 return dlm_posix_lock(ls
->ls_dlm
, ip
->i_no_addr
, file
, cmd
, fl
);
940 static int do_flock(struct file
*file
, int cmd
, struct file_lock
*fl
)
942 struct gfs2_file
*fp
= file
->private_data
;
943 struct gfs2_holder
*fl_gh
= &fp
->f_fl_gh
;
944 struct gfs2_inode
*ip
= GFS2_I(file_inode(file
));
945 struct gfs2_glock
*gl
;
950 state
= (fl
->fl_type
== F_WRLCK
) ? LM_ST_EXCLUSIVE
: LM_ST_SHARED
;
951 flags
= (IS_SETLKW(cmd
) ? 0 : LM_FLAG_TRY
) | GL_EXACT
| GL_NOCACHE
;
953 mutex_lock(&fp
->f_fl_mutex
);
957 if (fl_gh
->gh_state
== state
)
959 flock_lock_file_wait(file
,
960 &(struct file_lock
){.fl_type
= F_UNLCK
});
961 gfs2_glock_dq_wait(fl_gh
);
962 gfs2_holder_reinit(state
, flags
, fl_gh
);
964 error
= gfs2_glock_get(GFS2_SB(&ip
->i_inode
), ip
->i_no_addr
,
965 &gfs2_flock_glops
, CREATE
, &gl
);
968 gfs2_holder_init(gl
, state
, flags
, fl_gh
);
971 error
= gfs2_glock_nq(fl_gh
);
973 gfs2_holder_uninit(fl_gh
);
974 if (error
== GLR_TRYFAILED
)
977 error
= flock_lock_file_wait(file
, fl
);
978 gfs2_assert_warn(GFS2_SB(&ip
->i_inode
), !error
);
982 mutex_unlock(&fp
->f_fl_mutex
);
986 static void do_unflock(struct file
*file
, struct file_lock
*fl
)
988 struct gfs2_file
*fp
= file
->private_data
;
989 struct gfs2_holder
*fl_gh
= &fp
->f_fl_gh
;
991 mutex_lock(&fp
->f_fl_mutex
);
992 flock_lock_file_wait(file
, fl
);
994 gfs2_glock_dq_wait(fl_gh
);
995 gfs2_holder_uninit(fl_gh
);
997 mutex_unlock(&fp
->f_fl_mutex
);
1001 * gfs2_flock - acquire/release a flock lock on a file
1002 * @file: the file pointer
1003 * @cmd: either modify or retrieve lock state, possibly wait
1004 * @fl: type and range of lock
1009 static int gfs2_flock(struct file
*file
, int cmd
, struct file_lock
*fl
)
1011 if (!(fl
->fl_flags
& FL_FLOCK
))
1013 if (fl
->fl_type
& LOCK_MAND
)
1016 if (fl
->fl_type
== F_UNLCK
) {
1017 do_unflock(file
, fl
);
1020 return do_flock(file
, cmd
, fl
);
1024 const struct file_operations gfs2_file_fops
= {
1025 .llseek
= gfs2_llseek
,
1026 .read
= do_sync_read
,
1027 .aio_read
= generic_file_aio_read
,
1028 .write
= do_sync_write
,
1029 .aio_write
= gfs2_file_aio_write
,
1030 .unlocked_ioctl
= gfs2_ioctl
,
1033 .release
= gfs2_release
,
1034 .fsync
= gfs2_fsync
,
1036 .flock
= gfs2_flock
,
1037 .splice_read
= generic_file_splice_read
,
1038 .splice_write
= generic_file_splice_write
,
1039 .setlease
= gfs2_setlease
,
1040 .fallocate
= gfs2_fallocate
,
1043 const struct file_operations gfs2_dir_fops
= {
1044 .readdir
= gfs2_readdir
,
1045 .unlocked_ioctl
= gfs2_ioctl
,
1047 .release
= gfs2_release
,
1048 .fsync
= gfs2_fsync
,
1050 .flock
= gfs2_flock
,
1051 .llseek
= default_llseek
,
1054 #endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1056 const struct file_operations gfs2_file_fops_nolock
= {
1057 .llseek
= gfs2_llseek
,
1058 .read
= do_sync_read
,
1059 .aio_read
= generic_file_aio_read
,
1060 .write
= do_sync_write
,
1061 .aio_write
= gfs2_file_aio_write
,
1062 .unlocked_ioctl
= gfs2_ioctl
,
1065 .release
= gfs2_release
,
1066 .fsync
= gfs2_fsync
,
1067 .splice_read
= generic_file_splice_read
,
1068 .splice_write
= generic_file_splice_write
,
1069 .setlease
= generic_setlease
,
1070 .fallocate
= gfs2_fallocate
,
1073 const struct file_operations gfs2_dir_fops_nolock
= {
1074 .readdir
= gfs2_readdir
,
1075 .unlocked_ioctl
= gfs2_ioctl
,
1077 .release
= gfs2_release
,
1078 .fsync
= gfs2_fsync
,
1079 .llseek
= default_llseek
,