Merge remote-tracking branch 'asoc/fix/wm8960' into tmp
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / ext4 / file.c
CommitLineData
ac27a0ec 1/*
617ba13b 2 * linux/fs/ext4/file.c
ac27a0ec
DK
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * from
10 *
11 * linux/fs/minix/file.c
12 *
13 * Copyright (C) 1991, 1992 Linus Torvalds
14 *
617ba13b 15 * ext4 fs regular file handling primitives
ac27a0ec
DK
16 *
17 * 64-bit file support on 64-bit platforms by Jakub Jelinek
18 * (jj@sunsite.ms.mff.cuni.cz)
19 */
20
21#include <linux/time.h>
22#include <linux/fs.h>
dab291af 23#include <linux/jbd2.h>
bc0b0d6d
TT
24#include <linux/mount.h>
25#include <linux/path.h>
871a2931 26#include <linux/quotaops.h>
c8c0df24 27#include <linux/pagevec.h>
3dcf5451
CH
28#include "ext4.h"
29#include "ext4_jbd2.h"
ac27a0ec
DK
30#include "xattr.h"
31#include "acl.h"
32
33/*
34 * Called when an inode is released. Note that this is different
617ba13b 35 * from ext4_file_open: open gets called at every open, but release
ac27a0ec
DK
36 * gets called only when /all/ the files are closed.
37 */
af5bc92d 38static int ext4_release_file(struct inode *inode, struct file *filp)
ac27a0ec 39{
19f5fb7a 40 if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
7d8f9f7d 41 ext4_alloc_da_blocks(inode);
19f5fb7a 42 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
7d8f9f7d 43 }
ac27a0ec
DK
44 /* if we are the last writer on the inode, drop the block reservation */
45 if ((filp->f_mode & FMODE_WRITE) &&
d6014301
AK
46 (atomic_read(&inode->i_writecount) == 1) &&
47 !EXT4_I(inode)->i_reserved_data_blocks)
ac27a0ec 48 {
0e855ac8 49 down_write(&EXT4_I(inode)->i_data_sem);
c2ea3fde 50 ext4_discard_preallocations(inode);
0e855ac8 51 up_write(&EXT4_I(inode)->i_data_sem);
ac27a0ec
DK
52 }
53 if (is_dx(inode) && filp->private_data)
617ba13b 54 ext4_htree_free_dir_info(filp->private_data);
ac27a0ec
DK
55
56 return 0;
57}
58
c278531d 59void ext4_unwritten_wait(struct inode *inode)
e9e3bcec
ES
60{
61 wait_queue_head_t *wq = ext4_ioend_wq(inode);
62
e27f41e1 63 wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
e9e3bcec
ES
64}
65
66/*
67 * This tests whether the IO in question is block-aligned or not.
68 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
69 * are converted to written only after the IO is complete. Until they are
70 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
71 * it needs to zero out portions of the start and/or end block. If 2 AIO
72 * threads are at work on the same unwritten block, they must be synchronized
73 * or one thread will zero the other's data, causing corruption.
74 */
75static int
76ext4_unaligned_aio(struct inode *inode, const struct iovec *iov,
77 unsigned long nr_segs, loff_t pos)
78{
79 struct super_block *sb = inode->i_sb;
80 int blockmask = sb->s_blocksize - 1;
81 size_t count = iov_length(iov, nr_segs);
82 loff_t final_size = pos + count;
83
84 if (pos >= inode->i_size)
85 return 0;
86
87 if ((pos & blockmask) || (final_size & blockmask))
88 return 1;
89
90 return 0;
91}
92
ac27a0ec 93static ssize_t
fbe10494
ZL
94ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
95 unsigned long nr_segs, loff_t pos)
ac27a0ec 96{
4bd809db
ZL
97 struct file *file = iocb->ki_filp;
98 struct inode *inode = file->f_mapping->host;
99 struct blk_plug plug;
e9e3bcec 100 int unaligned_aio = 0;
8563000d 101 ssize_t ret;
4bd809db
ZL
102 int overwrite = 0;
103 size_t length = iov_length(iov, nr_segs);
ac27a0ec 104
fbe10494
ZL
105 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
106 !is_sync_kiocb(iocb))
e9e3bcec 107 unaligned_aio = ext4_unaligned_aio(inode, iov, nr_segs, pos);
e2b46574 108
e9e3bcec
ES
109 /* Unaligned direct AIO must be serialized; see comment above */
110 if (unaligned_aio) {
e9e3bcec 111 mutex_lock(ext4_aio_mutex(inode));
e27f41e1 112 ext4_unwritten_wait(inode);
e9e3bcec
ES
113 }
114
4bd809db
ZL
115 BUG_ON(iocb->ki_pos != pos);
116
117 mutex_lock(&inode->i_mutex);
118 blk_start_plug(&plug);
119
120 iocb->private = &overwrite;
121
122 /* check whether we do a DIO overwrite or not */
123 if (ext4_should_dioread_nolock(inode) && !unaligned_aio &&
124 !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) {
125 struct ext4_map_blocks map;
126 unsigned int blkbits = inode->i_blkbits;
127 int err, len;
128
129 map.m_lblk = pos >> blkbits;
130 map.m_len = (EXT4_BLOCK_ALIGN(pos + length, blkbits) >> blkbits)
131 - map.m_lblk;
132 len = map.m_len;
133
134 err = ext4_map_blocks(NULL, inode, &map, 0);
135 /*
136 * 'err==len' means that all of blocks has been preallocated no
137 * matter they are initialized or not. For excluding
138 * uninitialized extents, we need to check m_flags. There are
139 * two conditions that indicate for initialized extents.
140 * 1) If we hit extent cache, EXT4_MAP_MAPPED flag is returned;
141 * 2) If we do a real lookup, non-flags are returned.
142 * So we should check these two conditions.
143 */
144 if (err == len && (map.m_flags & EXT4_MAP_MAPPED))
145 overwrite = 1;
146 }
147
148 ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
149 mutex_unlock(&inode->i_mutex);
150
151 if (ret > 0 || ret == -EIOCBQUEUED) {
152 ssize_t err;
153
154 err = generic_write_sync(file, pos, ret);
155 if (err < 0 && ret > 0)
156 ret = err;
157 }
158 blk_finish_plug(&plug);
e9e3bcec
ES
159
160 if (unaligned_aio)
161 mutex_unlock(ext4_aio_mutex(inode));
162
163 return ret;
ac27a0ec
DK
164}
165
ac27a0ec 166static ssize_t
617ba13b 167ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
ac27a0ec
DK
168 unsigned long nr_segs, loff_t pos)
169{
496ad9aa 170 struct inode *inode = file_inode(iocb->ki_filp);
8563000d 171 ssize_t ret;
ac27a0ec 172
e2b46574
ES
173 /*
174 * If we have encountered a bitmap-format file, the size limit
175 * is smaller than s_maxbytes, which is for extent-mapped files.
176 */
177
12e9b892 178 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
e2b46574
ES
179 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
180 size_t length = iov_length(iov, nr_segs);
ac27a0ec 181
d889dc83
TO
182 if ((pos > sbi->s_bitmap_maxbytes ||
183 (pos == sbi->s_bitmap_maxbytes && length > 0)))
e2b46574
ES
184 return -EFBIG;
185
186 if (pos + length > sbi->s_bitmap_maxbytes) {
187 nr_segs = iov_shorten((struct iovec *)iov, nr_segs,
188 sbi->s_bitmap_maxbytes - pos);
189 }
190 }
191
fbe10494
ZL
192 if (unlikely(iocb->ki_filp->f_flags & O_DIRECT))
193 ret = ext4_file_dio_write(iocb, iov, nr_segs, pos);
194 else
195 ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
e9e3bcec
ES
196
197 return ret;
ac27a0ec
DK
198}
199
f0f37e2f 200static const struct vm_operations_struct ext4_file_vm_ops = {
2e9ee850
AK
201 .fault = filemap_fault,
202 .page_mkwrite = ext4_page_mkwrite,
0b173bc4 203 .remap_pages = generic_file_remap_pages,
2e9ee850
AK
204};
205
206static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
207{
208 struct address_space *mapping = file->f_mapping;
209
210 if (!mapping->a_ops->readpage)
211 return -ENOEXEC;
212 file_accessed(file);
213 vma->vm_ops = &ext4_file_vm_ops;
2e9ee850
AK
214 return 0;
215}
216
bc0b0d6d
TT
217static int ext4_file_open(struct inode * inode, struct file * filp)
218{
219 struct super_block *sb = inode->i_sb;
220 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
8aefcd55 221 struct ext4_inode_info *ei = EXT4_I(inode);
bc0b0d6d
TT
222 struct vfsmount *mnt = filp->f_path.mnt;
223 struct path path;
224 char buf[64], *cp;
225
226 if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
227 !(sb->s_flags & MS_RDONLY))) {
228 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
229 /*
230 * Sample where the filesystem has been mounted and
231 * store it in the superblock for sysadmin convenience
232 * when trying to sort through large numbers of block
233 * devices or filesystem images.
234 */
235 memset(buf, 0, sizeof(buf));
3899167d
AV
236 path.mnt = mnt;
237 path.dentry = mnt->mnt_root;
bc0b0d6d 238 cp = d_path(&path, buf, sizeof(buf));
bc0b0d6d 239 if (!IS_ERR(cp)) {
044ce47f
JK
240 handle_t *handle;
241 int err;
242
9924a92a 243 handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
044ce47f
JK
244 if (IS_ERR(handle))
245 return PTR_ERR(handle);
246 err = ext4_journal_get_write_access(handle, sbi->s_sbh);
247 if (err) {
248 ext4_journal_stop(handle);
249 return err;
250 }
cf803903
DW
251 strlcpy(sbi->s_es->s_last_mounted, cp,
252 sizeof(sbi->s_es->s_last_mounted));
044ce47f
JK
253 ext4_handle_dirty_super(handle, sb);
254 ext4_journal_stop(handle);
bc0b0d6d
TT
255 }
256 }
8aefcd55
TT
257 /*
258 * Set up the jbd2_inode if we are opening the inode for
259 * writing and the journal is present
260 */
261 if (sbi->s_journal && !ei->jinode && (filp->f_mode & FMODE_WRITE)) {
262 struct jbd2_inode *jinode = jbd2_alloc_inode(GFP_KERNEL);
263
264 spin_lock(&inode->i_lock);
265 if (!ei->jinode) {
266 if (!jinode) {
267 spin_unlock(&inode->i_lock);
268 return -ENOMEM;
269 }
270 ei->jinode = jinode;
271 jbd2_journal_init_jbd_inode(ei->jinode, inode);
272 jinode = NULL;
273 }
274 spin_unlock(&inode->i_lock);
275 if (unlikely(jinode != NULL))
276 jbd2_free_inode(jinode);
277 }
907f4554 278 return dquot_file_open(inode, filp);
bc0b0d6d
TT
279}
280
c8c0df24
ZL
281/*
282 * Here we use ext4_map_blocks() to get a block mapping for a extent-based
283 * file rather than ext4_ext_walk_space() because we can introduce
284 * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
285 * function. When extent status tree has been fully implemented, it will
286 * track all extent status for a file and we can directly use it to
287 * retrieve the offset for SEEK_DATA/SEEK_HOLE.
288 */
289
290/*
291 * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
292 * lookup page cache to check whether or not there has some data between
293 * [startoff, endoff] because, if this range contains an unwritten extent,
294 * we determine this extent as a data or a hole according to whether the
295 * page cache has data or not.
296 */
297static int ext4_find_unwritten_pgoff(struct inode *inode,
965c8e59 298 int whence,
c8c0df24
ZL
299 struct ext4_map_blocks *map,
300 loff_t *offset)
301{
302 struct pagevec pvec;
303 unsigned int blkbits;
304 pgoff_t index;
305 pgoff_t end;
306 loff_t endoff;
307 loff_t startoff;
308 loff_t lastoff;
309 int found = 0;
310
311 blkbits = inode->i_sb->s_blocksize_bits;
312 startoff = *offset;
313 lastoff = startoff;
314 endoff = (map->m_lblk + map->m_len) << blkbits;
315
316 index = startoff >> PAGE_CACHE_SHIFT;
317 end = endoff >> PAGE_CACHE_SHIFT;
318
319 pagevec_init(&pvec, 0);
320 do {
321 int i, num;
322 unsigned long nr_pages;
323
324 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
325 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
326 (pgoff_t)num);
327 if (nr_pages == 0) {
965c8e59 328 if (whence == SEEK_DATA)
c8c0df24
ZL
329 break;
330
965c8e59 331 BUG_ON(whence != SEEK_HOLE);
c8c0df24
ZL
332 /*
333 * If this is the first time to go into the loop and
334 * offset is not beyond the end offset, it will be a
335 * hole at this offset
336 */
337 if (lastoff == startoff || lastoff < endoff)
338 found = 1;
339 break;
340 }
341
342 /*
343 * If this is the first time to go into the loop and
344 * offset is smaller than the first page offset, it will be a
345 * hole at this offset.
346 */
965c8e59 347 if (lastoff == startoff && whence == SEEK_HOLE &&
c8c0df24
ZL
348 lastoff < page_offset(pvec.pages[0])) {
349 found = 1;
350 break;
351 }
352
353 for (i = 0; i < nr_pages; i++) {
354 struct page *page = pvec.pages[i];
355 struct buffer_head *bh, *head;
356
357 /*
358 * If the current offset is not beyond the end of given
359 * range, it will be a hole.
360 */
965c8e59 361 if (lastoff < endoff && whence == SEEK_HOLE &&
c8c0df24
ZL
362 page->index > end) {
363 found = 1;
364 *offset = lastoff;
365 goto out;
366 }
367
368 lock_page(page);
369
370 if (unlikely(page->mapping != inode->i_mapping)) {
371 unlock_page(page);
372 continue;
373 }
374
375 if (!page_has_buffers(page)) {
376 unlock_page(page);
377 continue;
378 }
379
380 if (page_has_buffers(page)) {
381 lastoff = page_offset(page);
382 bh = head = page_buffers(page);
383 do {
384 if (buffer_uptodate(bh) ||
385 buffer_unwritten(bh)) {
965c8e59 386 if (whence == SEEK_DATA)
c8c0df24
ZL
387 found = 1;
388 } else {
965c8e59 389 if (whence == SEEK_HOLE)
c8c0df24
ZL
390 found = 1;
391 }
392 if (found) {
393 *offset = max_t(loff_t,
394 startoff, lastoff);
395 unlock_page(page);
396 goto out;
397 }
398 lastoff += bh->b_size;
399 bh = bh->b_this_page;
400 } while (bh != head);
401 }
402
403 lastoff = page_offset(page) + PAGE_SIZE;
404 unlock_page(page);
405 }
406
407 /*
408 * The no. of pages is less than our desired, that would be a
409 * hole in there.
410 */
965c8e59 411 if (nr_pages < num && whence == SEEK_HOLE) {
c8c0df24
ZL
412 found = 1;
413 *offset = lastoff;
414 break;
415 }
416
417 index = pvec.pages[i - 1]->index + 1;
418 pagevec_release(&pvec);
419 } while (index <= end);
420
421out:
422 pagevec_release(&pvec);
423 return found;
424}
425
426/*
427 * ext4_seek_data() retrieves the offset for SEEK_DATA.
428 */
429static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
430{
431 struct inode *inode = file->f_mapping->host;
432 struct ext4_map_blocks map;
433 struct extent_status es;
434 ext4_lblk_t start, last, end;
435 loff_t dataoff, isize;
436 int blkbits;
437 int ret = 0;
438
439 mutex_lock(&inode->i_mutex);
440
441 isize = i_size_read(inode);
442 if (offset >= isize) {
443 mutex_unlock(&inode->i_mutex);
444 return -ENXIO;
445 }
446
447 blkbits = inode->i_sb->s_blocksize_bits;
448 start = offset >> blkbits;
449 last = start;
450 end = isize >> blkbits;
451 dataoff = offset;
452
453 do {
454 map.m_lblk = last;
455 map.m_len = end - last + 1;
456 ret = ext4_map_blocks(NULL, inode, &map, 0);
457 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
458 if (last != start)
459 dataoff = last << blkbits;
460 break;
461 }
462
463 /*
464 * If there is a delay extent at this offset,
465 * it will be as a data.
466 */
be401363 467 ext4_es_find_delayed_extent(inode, last, &es);
06b0c886 468 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
c8c0df24
ZL
469 if (last != start)
470 dataoff = last << blkbits;
471 break;
472 }
473
474 /*
475 * If there is a unwritten extent at this offset,
476 * it will be as a data or a hole according to page
477 * cache that has data or not.
478 */
479 if (map.m_flags & EXT4_MAP_UNWRITTEN) {
480 int unwritten;
481 unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA,
482 &map, &dataoff);
483 if (unwritten)
484 break;
485 }
486
487 last++;
488 dataoff = last << blkbits;
489 } while (last <= end);
490
491 mutex_unlock(&inode->i_mutex);
492
493 if (dataoff > isize)
494 return -ENXIO;
495
496 if (dataoff < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET))
497 return -EINVAL;
498 if (dataoff > maxsize)
499 return -EINVAL;
500
501 if (dataoff != file->f_pos) {
502 file->f_pos = dataoff;
503 file->f_version = 0;
504 }
505
506 return dataoff;
507}
508
509/*
510 * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
511 */
512static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
513{
514 struct inode *inode = file->f_mapping->host;
515 struct ext4_map_blocks map;
516 struct extent_status es;
517 ext4_lblk_t start, last, end;
518 loff_t holeoff, isize;
519 int blkbits;
520 int ret = 0;
521
522 mutex_lock(&inode->i_mutex);
523
524 isize = i_size_read(inode);
525 if (offset >= isize) {
526 mutex_unlock(&inode->i_mutex);
527 return -ENXIO;
528 }
529
530 blkbits = inode->i_sb->s_blocksize_bits;
531 start = offset >> blkbits;
532 last = start;
533 end = isize >> blkbits;
534 holeoff = offset;
535
536 do {
537 map.m_lblk = last;
538 map.m_len = end - last + 1;
539 ret = ext4_map_blocks(NULL, inode, &map, 0);
540 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
541 last += ret;
542 holeoff = last << blkbits;
543 continue;
544 }
545
546 /*
547 * If there is a delay extent at this offset,
548 * we will skip this extent.
549 */
be401363 550 ext4_es_find_delayed_extent(inode, last, &es);
06b0c886
ZL
551 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
552 last = es.es_lblk + es.es_len;
c8c0df24
ZL
553 holeoff = last << blkbits;
554 continue;
555 }
556
557 /*
558 * If there is a unwritten extent at this offset,
559 * it will be as a data or a hole according to page
560 * cache that has data or not.
561 */
562 if (map.m_flags & EXT4_MAP_UNWRITTEN) {
563 int unwritten;
564 unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
565 &map, &holeoff);
566 if (!unwritten) {
567 last += ret;
568 holeoff = last << blkbits;
569 continue;
570 }
571 }
572
573 /* find a hole */
574 break;
575 } while (last <= end);
576
577 mutex_unlock(&inode->i_mutex);
578
579 if (holeoff > isize)
580 holeoff = isize;
581
582 if (holeoff < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET))
583 return -EINVAL;
584 if (holeoff > maxsize)
585 return -EINVAL;
586
587 if (holeoff != file->f_pos) {
588 file->f_pos = holeoff;
589 file->f_version = 0;
590 }
591
592 return holeoff;
593}
594
e0d10bfa 595/*
ec7268ce
ES
596 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
597 * by calling generic_file_llseek_size() with the appropriate maxbytes
598 * value for each.
e0d10bfa 599 */
965c8e59 600loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
e0d10bfa
TO
601{
602 struct inode *inode = file->f_mapping->host;
603 loff_t maxbytes;
604
605 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
606 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
607 else
608 maxbytes = inode->i_sb->s_maxbytes;
e0d10bfa 609
965c8e59 610 switch (whence) {
c8c0df24
ZL
611 case SEEK_SET:
612 case SEEK_CUR:
613 case SEEK_END:
965c8e59 614 return generic_file_llseek_size(file, offset, whence,
c8c0df24
ZL
615 maxbytes, i_size_read(inode));
616 case SEEK_DATA:
617 return ext4_seek_data(file, offset, maxbytes);
618 case SEEK_HOLE:
619 return ext4_seek_hole(file, offset, maxbytes);
620 }
621
622 return -EINVAL;
e0d10bfa
TO
623}
624
617ba13b 625const struct file_operations ext4_file_operations = {
e0d10bfa 626 .llseek = ext4_llseek,
ac27a0ec
DK
627 .read = do_sync_read,
628 .write = do_sync_write,
629 .aio_read = generic_file_aio_read,
617ba13b 630 .aio_write = ext4_file_write,
5cdd7b2d 631 .unlocked_ioctl = ext4_ioctl,
ac27a0ec 632#ifdef CONFIG_COMPAT
617ba13b 633 .compat_ioctl = ext4_compat_ioctl,
ac27a0ec 634#endif
2e9ee850 635 .mmap = ext4_file_mmap,
bc0b0d6d 636 .open = ext4_file_open,
617ba13b
MC
637 .release = ext4_release_file,
638 .fsync = ext4_sync_file,
ac27a0ec
DK
639 .splice_read = generic_file_splice_read,
640 .splice_write = generic_file_splice_write,
2fe17c10 641 .fallocate = ext4_fallocate,
ac27a0ec
DK
642};
643
754661f1 644const struct inode_operations ext4_file_inode_operations = {
617ba13b 645 .setattr = ext4_setattr,
3e3398a0 646 .getattr = ext4_getattr,
ac27a0ec
DK
647 .setxattr = generic_setxattr,
648 .getxattr = generic_getxattr,
617ba13b 649 .listxattr = ext4_listxattr,
ac27a0ec 650 .removexattr = generic_removexattr,
4e34e719 651 .get_acl = ext4_get_acl,
6873fa0d 652 .fiemap = ext4_fiemap,
ac27a0ec
DK
653};
654