switch ->create() to umode_t
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / xfs / xfs_iops.c
1 /*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_acl.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_alloc.h"
28 #include "xfs_quota.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_dinode.h"
32 #include "xfs_inode.h"
33 #include "xfs_bmap.h"
34 #include "xfs_rtalloc.h"
35 #include "xfs_error.h"
36 #include "xfs_itable.h"
37 #include "xfs_rw.h"
38 #include "xfs_attr.h"
39 #include "xfs_buf_item.h"
40 #include "xfs_utils.h"
41 #include "xfs_vnodeops.h"
42 #include "xfs_inode_item.h"
43 #include "xfs_trace.h"
44
45 #include <linux/capability.h>
46 #include <linux/xattr.h>
47 #include <linux/namei.h>
48 #include <linux/posix_acl.h>
49 #include <linux/security.h>
50 #include <linux/fiemap.h>
51 #include <linux/slab.h>
52
53 /*
54 * Bring the timestamps in the XFS inode uptodate.
55 *
56 * Used before writing the inode to disk.
57 */
58 void
59 xfs_synchronize_times(
60 xfs_inode_t *ip)
61 {
62 struct inode *inode = VFS_I(ip);
63
64 ip->i_d.di_atime.t_sec = (__int32_t)inode->i_atime.tv_sec;
65 ip->i_d.di_atime.t_nsec = (__int32_t)inode->i_atime.tv_nsec;
66 ip->i_d.di_ctime.t_sec = (__int32_t)inode->i_ctime.tv_sec;
67 ip->i_d.di_ctime.t_nsec = (__int32_t)inode->i_ctime.tv_nsec;
68 ip->i_d.di_mtime.t_sec = (__int32_t)inode->i_mtime.tv_sec;
69 ip->i_d.di_mtime.t_nsec = (__int32_t)inode->i_mtime.tv_nsec;
70 }
71
72 /*
73 * If the linux inode is valid, mark it dirty, else mark the dirty state
74 * in the XFS inode to make sure we pick it up when reclaiming the inode.
75 */
76 void
77 xfs_mark_inode_dirty_sync(
78 xfs_inode_t *ip)
79 {
80 struct inode *inode = VFS_I(ip);
81
82 if (!(inode->i_state & (I_WILL_FREE|I_FREEING)))
83 mark_inode_dirty_sync(inode);
84 else {
85 barrier();
86 ip->i_update_core = 1;
87 }
88 }
89
90 void
91 xfs_mark_inode_dirty(
92 xfs_inode_t *ip)
93 {
94 struct inode *inode = VFS_I(ip);
95
96 if (!(inode->i_state & (I_WILL_FREE|I_FREEING)))
97 mark_inode_dirty(inode);
98 else {
99 barrier();
100 ip->i_update_core = 1;
101 }
102
103 }
104
105
106 int xfs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
107 void *fs_info)
108 {
109 const struct xattr *xattr;
110 struct xfs_inode *ip = XFS_I(inode);
111 int error = 0;
112
113 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
114 error = xfs_attr_set(ip, xattr->name, xattr->value,
115 xattr->value_len, ATTR_SECURE);
116 if (error < 0)
117 break;
118 }
119 return error;
120 }
121
122 /*
123 * Hook in SELinux. This is not quite correct yet, what we really need
124 * here (as we do for default ACLs) is a mechanism by which creation of
125 * these attrs can be journalled at inode creation time (along with the
126 * inode, of course, such that log replay can't cause these to be lost).
127 */
128
129 STATIC int
130 xfs_init_security(
131 struct inode *inode,
132 struct inode *dir,
133 const struct qstr *qstr)
134 {
135 return security_inode_init_security(inode, dir, qstr,
136 &xfs_initxattrs, NULL);
137 }
138
139 static void
140 xfs_dentry_to_name(
141 struct xfs_name *namep,
142 struct dentry *dentry)
143 {
144 namep->name = dentry->d_name.name;
145 namep->len = dentry->d_name.len;
146 }
147
148 STATIC void
149 xfs_cleanup_inode(
150 struct inode *dir,
151 struct inode *inode,
152 struct dentry *dentry)
153 {
154 struct xfs_name teardown;
155
156 /* Oh, the horror.
157 * If we can't add the ACL or we fail in
158 * xfs_init_security we must back out.
159 * ENOSPC can hit here, among other things.
160 */
161 xfs_dentry_to_name(&teardown, dentry);
162
163 xfs_remove(XFS_I(dir), &teardown, XFS_I(inode));
164 iput(inode);
165 }
166
167 STATIC int
168 xfs_vn_mknod(
169 struct inode *dir,
170 struct dentry *dentry,
171 int mode,
172 dev_t rdev)
173 {
174 struct inode *inode;
175 struct xfs_inode *ip = NULL;
176 struct posix_acl *default_acl = NULL;
177 struct xfs_name name;
178 int error;
179
180 /*
181 * Irix uses Missed'em'V split, but doesn't want to see
182 * the upper 5 bits of (14bit) major.
183 */
184 if (S_ISCHR(mode) || S_ISBLK(mode)) {
185 if (unlikely(!sysv_valid_dev(rdev) || MAJOR(rdev) & ~0x1ff))
186 return -EINVAL;
187 rdev = sysv_encode_dev(rdev);
188 } else {
189 rdev = 0;
190 }
191
192 if (IS_POSIXACL(dir)) {
193 default_acl = xfs_get_acl(dir, ACL_TYPE_DEFAULT);
194 if (IS_ERR(default_acl))
195 return PTR_ERR(default_acl);
196
197 if (!default_acl)
198 mode &= ~current_umask();
199 }
200
201 xfs_dentry_to_name(&name, dentry);
202 error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip);
203 if (unlikely(error))
204 goto out_free_acl;
205
206 inode = VFS_I(ip);
207
208 error = xfs_init_security(inode, dir, &dentry->d_name);
209 if (unlikely(error))
210 goto out_cleanup_inode;
211
212 if (default_acl) {
213 error = -xfs_inherit_acl(inode, default_acl);
214 default_acl = NULL;
215 if (unlikely(error))
216 goto out_cleanup_inode;
217 }
218
219
220 d_instantiate(dentry, inode);
221 return -error;
222
223 out_cleanup_inode:
224 xfs_cleanup_inode(dir, inode, dentry);
225 out_free_acl:
226 posix_acl_release(default_acl);
227 return -error;
228 }
229
230 STATIC int
231 xfs_vn_create(
232 struct inode *dir,
233 struct dentry *dentry,
234 umode_t mode,
235 struct nameidata *nd)
236 {
237 return xfs_vn_mknod(dir, dentry, mode, 0);
238 }
239
240 STATIC int
241 xfs_vn_mkdir(
242 struct inode *dir,
243 struct dentry *dentry,
244 umode_t mode)
245 {
246 return xfs_vn_mknod(dir, dentry, mode|S_IFDIR, 0);
247 }
248
249 STATIC struct dentry *
250 xfs_vn_lookup(
251 struct inode *dir,
252 struct dentry *dentry,
253 struct nameidata *nd)
254 {
255 struct xfs_inode *cip;
256 struct xfs_name name;
257 int error;
258
259 if (dentry->d_name.len >= MAXNAMELEN)
260 return ERR_PTR(-ENAMETOOLONG);
261
262 xfs_dentry_to_name(&name, dentry);
263 error = xfs_lookup(XFS_I(dir), &name, &cip, NULL);
264 if (unlikely(error)) {
265 if (unlikely(error != ENOENT))
266 return ERR_PTR(-error);
267 d_add(dentry, NULL);
268 return NULL;
269 }
270
271 return d_splice_alias(VFS_I(cip), dentry);
272 }
273
274 STATIC struct dentry *
275 xfs_vn_ci_lookup(
276 struct inode *dir,
277 struct dentry *dentry,
278 struct nameidata *nd)
279 {
280 struct xfs_inode *ip;
281 struct xfs_name xname;
282 struct xfs_name ci_name;
283 struct qstr dname;
284 int error;
285
286 if (dentry->d_name.len >= MAXNAMELEN)
287 return ERR_PTR(-ENAMETOOLONG);
288
289 xfs_dentry_to_name(&xname, dentry);
290 error = xfs_lookup(XFS_I(dir), &xname, &ip, &ci_name);
291 if (unlikely(error)) {
292 if (unlikely(error != ENOENT))
293 return ERR_PTR(-error);
294 /*
295 * call d_add(dentry, NULL) here when d_drop_negative_children
296 * is called in xfs_vn_mknod (ie. allow negative dentries
297 * with CI filesystems).
298 */
299 return NULL;
300 }
301
302 /* if exact match, just splice and exit */
303 if (!ci_name.name)
304 return d_splice_alias(VFS_I(ip), dentry);
305
306 /* else case-insensitive match... */
307 dname.name = ci_name.name;
308 dname.len = ci_name.len;
309 dentry = d_add_ci(dentry, VFS_I(ip), &dname);
310 kmem_free(ci_name.name);
311 return dentry;
312 }
313
314 STATIC int
315 xfs_vn_link(
316 struct dentry *old_dentry,
317 struct inode *dir,
318 struct dentry *dentry)
319 {
320 struct inode *inode = old_dentry->d_inode;
321 struct xfs_name name;
322 int error;
323
324 xfs_dentry_to_name(&name, dentry);
325
326 error = xfs_link(XFS_I(dir), XFS_I(inode), &name);
327 if (unlikely(error))
328 return -error;
329
330 ihold(inode);
331 d_instantiate(dentry, inode);
332 return 0;
333 }
334
335 STATIC int
336 xfs_vn_unlink(
337 struct inode *dir,
338 struct dentry *dentry)
339 {
340 struct xfs_name name;
341 int error;
342
343 xfs_dentry_to_name(&name, dentry);
344
345 error = -xfs_remove(XFS_I(dir), &name, XFS_I(dentry->d_inode));
346 if (error)
347 return error;
348
349 /*
350 * With unlink, the VFS makes the dentry "negative": no inode,
351 * but still hashed. This is incompatible with case-insensitive
352 * mode, so invalidate (unhash) the dentry in CI-mode.
353 */
354 if (xfs_sb_version_hasasciici(&XFS_M(dir->i_sb)->m_sb))
355 d_invalidate(dentry);
356 return 0;
357 }
358
359 STATIC int
360 xfs_vn_symlink(
361 struct inode *dir,
362 struct dentry *dentry,
363 const char *symname)
364 {
365 struct inode *inode;
366 struct xfs_inode *cip = NULL;
367 struct xfs_name name;
368 int error;
369 mode_t mode;
370
371 mode = S_IFLNK |
372 (irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO);
373 xfs_dentry_to_name(&name, dentry);
374
375 error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip);
376 if (unlikely(error))
377 goto out;
378
379 inode = VFS_I(cip);
380
381 error = xfs_init_security(inode, dir, &dentry->d_name);
382 if (unlikely(error))
383 goto out_cleanup_inode;
384
385 d_instantiate(dentry, inode);
386 return 0;
387
388 out_cleanup_inode:
389 xfs_cleanup_inode(dir, inode, dentry);
390 out:
391 return -error;
392 }
393
394 STATIC int
395 xfs_vn_rename(
396 struct inode *odir,
397 struct dentry *odentry,
398 struct inode *ndir,
399 struct dentry *ndentry)
400 {
401 struct inode *new_inode = ndentry->d_inode;
402 struct xfs_name oname;
403 struct xfs_name nname;
404
405 xfs_dentry_to_name(&oname, odentry);
406 xfs_dentry_to_name(&nname, ndentry);
407
408 return -xfs_rename(XFS_I(odir), &oname, XFS_I(odentry->d_inode),
409 XFS_I(ndir), &nname, new_inode ?
410 XFS_I(new_inode) : NULL);
411 }
412
413 /*
414 * careful here - this function can get called recursively, so
415 * we need to be very careful about how much stack we use.
416 * uio is kmalloced for this reason...
417 */
418 STATIC void *
419 xfs_vn_follow_link(
420 struct dentry *dentry,
421 struct nameidata *nd)
422 {
423 char *link;
424 int error = -ENOMEM;
425
426 link = kmalloc(MAXPATHLEN+1, GFP_KERNEL);
427 if (!link)
428 goto out_err;
429
430 error = -xfs_readlink(XFS_I(dentry->d_inode), link);
431 if (unlikely(error))
432 goto out_kfree;
433
434 nd_set_link(nd, link);
435 return NULL;
436
437 out_kfree:
438 kfree(link);
439 out_err:
440 nd_set_link(nd, ERR_PTR(error));
441 return NULL;
442 }
443
444 STATIC void
445 xfs_vn_put_link(
446 struct dentry *dentry,
447 struct nameidata *nd,
448 void *p)
449 {
450 char *s = nd_get_link(nd);
451
452 if (!IS_ERR(s))
453 kfree(s);
454 }
455
456 STATIC int
457 xfs_vn_getattr(
458 struct vfsmount *mnt,
459 struct dentry *dentry,
460 struct kstat *stat)
461 {
462 struct inode *inode = dentry->d_inode;
463 struct xfs_inode *ip = XFS_I(inode);
464 struct xfs_mount *mp = ip->i_mount;
465
466 trace_xfs_getattr(ip);
467
468 if (XFS_FORCED_SHUTDOWN(mp))
469 return -XFS_ERROR(EIO);
470
471 stat->size = XFS_ISIZE(ip);
472 stat->dev = inode->i_sb->s_dev;
473 stat->mode = ip->i_d.di_mode;
474 stat->nlink = ip->i_d.di_nlink;
475 stat->uid = ip->i_d.di_uid;
476 stat->gid = ip->i_d.di_gid;
477 stat->ino = ip->i_ino;
478 stat->atime = inode->i_atime;
479 stat->mtime = inode->i_mtime;
480 stat->ctime = inode->i_ctime;
481 stat->blocks =
482 XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
483
484
485 switch (inode->i_mode & S_IFMT) {
486 case S_IFBLK:
487 case S_IFCHR:
488 stat->blksize = BLKDEV_IOSIZE;
489 stat->rdev = MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff,
490 sysv_minor(ip->i_df.if_u2.if_rdev));
491 break;
492 default:
493 if (XFS_IS_REALTIME_INODE(ip)) {
494 /*
495 * If the file blocks are being allocated from a
496 * realtime volume, then return the inode's realtime
497 * extent size or the realtime volume's extent size.
498 */
499 stat->blksize =
500 xfs_get_extsz_hint(ip) << mp->m_sb.sb_blocklog;
501 } else
502 stat->blksize = xfs_preferred_iosize(mp);
503 stat->rdev = 0;
504 break;
505 }
506
507 return 0;
508 }
509
510 int
511 xfs_setattr_nonsize(
512 struct xfs_inode *ip,
513 struct iattr *iattr,
514 int flags)
515 {
516 xfs_mount_t *mp = ip->i_mount;
517 struct inode *inode = VFS_I(ip);
518 int mask = iattr->ia_valid;
519 xfs_trans_t *tp;
520 int error;
521 uid_t uid = 0, iuid = 0;
522 gid_t gid = 0, igid = 0;
523 struct xfs_dquot *udqp = NULL, *gdqp = NULL;
524 struct xfs_dquot *olddquot1 = NULL, *olddquot2 = NULL;
525
526 trace_xfs_setattr(ip);
527
528 if (mp->m_flags & XFS_MOUNT_RDONLY)
529 return XFS_ERROR(EROFS);
530
531 if (XFS_FORCED_SHUTDOWN(mp))
532 return XFS_ERROR(EIO);
533
534 error = -inode_change_ok(inode, iattr);
535 if (error)
536 return XFS_ERROR(error);
537
538 ASSERT((mask & ATTR_SIZE) == 0);
539
540 /*
541 * If disk quotas is on, we make sure that the dquots do exist on disk,
542 * before we start any other transactions. Trying to do this later
543 * is messy. We don't care to take a readlock to look at the ids
544 * in inode here, because we can't hold it across the trans_reserve.
545 * If the IDs do change before we take the ilock, we're covered
546 * because the i_*dquot fields will get updated anyway.
547 */
548 if (XFS_IS_QUOTA_ON(mp) && (mask & (ATTR_UID|ATTR_GID))) {
549 uint qflags = 0;
550
551 if ((mask & ATTR_UID) && XFS_IS_UQUOTA_ON(mp)) {
552 uid = iattr->ia_uid;
553 qflags |= XFS_QMOPT_UQUOTA;
554 } else {
555 uid = ip->i_d.di_uid;
556 }
557 if ((mask & ATTR_GID) && XFS_IS_GQUOTA_ON(mp)) {
558 gid = iattr->ia_gid;
559 qflags |= XFS_QMOPT_GQUOTA;
560 } else {
561 gid = ip->i_d.di_gid;
562 }
563
564 /*
565 * We take a reference when we initialize udqp and gdqp,
566 * so it is important that we never blindly double trip on
567 * the same variable. See xfs_create() for an example.
568 */
569 ASSERT(udqp == NULL);
570 ASSERT(gdqp == NULL);
571 error = xfs_qm_vop_dqalloc(ip, uid, gid, xfs_get_projid(ip),
572 qflags, &udqp, &gdqp);
573 if (error)
574 return error;
575 }
576
577 tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
578 error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
579 if (error)
580 goto out_dqrele;
581
582 xfs_ilock(ip, XFS_ILOCK_EXCL);
583
584 /*
585 * Change file ownership. Must be the owner or privileged.
586 */
587 if (mask & (ATTR_UID|ATTR_GID)) {
588 /*
589 * These IDs could have changed since we last looked at them.
590 * But, we're assured that if the ownership did change
591 * while we didn't have the inode locked, inode's dquot(s)
592 * would have changed also.
593 */
594 iuid = ip->i_d.di_uid;
595 igid = ip->i_d.di_gid;
596 gid = (mask & ATTR_GID) ? iattr->ia_gid : igid;
597 uid = (mask & ATTR_UID) ? iattr->ia_uid : iuid;
598
599 /*
600 * Do a quota reservation only if uid/gid is actually
601 * going to change.
602 */
603 if (XFS_IS_QUOTA_RUNNING(mp) &&
604 ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) ||
605 (XFS_IS_GQUOTA_ON(mp) && igid != gid))) {
606 ASSERT(tp);
607 error = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp,
608 capable(CAP_FOWNER) ?
609 XFS_QMOPT_FORCE_RES : 0);
610 if (error) /* out of quota */
611 goto out_trans_cancel;
612 }
613 }
614
615 xfs_trans_ijoin(tp, ip, 0);
616
617 /*
618 * Change file ownership. Must be the owner or privileged.
619 */
620 if (mask & (ATTR_UID|ATTR_GID)) {
621 /*
622 * CAP_FSETID overrides the following restrictions:
623 *
624 * The set-user-ID and set-group-ID bits of a file will be
625 * cleared upon successful return from chown()
626 */
627 if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) &&
628 !capable(CAP_FSETID))
629 ip->i_d.di_mode &= ~(S_ISUID|S_ISGID);
630
631 /*
632 * Change the ownerships and register quota modifications
633 * in the transaction.
634 */
635 if (iuid != uid) {
636 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_UQUOTA_ON(mp)) {
637 ASSERT(mask & ATTR_UID);
638 ASSERT(udqp);
639 olddquot1 = xfs_qm_vop_chown(tp, ip,
640 &ip->i_udquot, udqp);
641 }
642 ip->i_d.di_uid = uid;
643 inode->i_uid = uid;
644 }
645 if (igid != gid) {
646 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) {
647 ASSERT(!XFS_IS_PQUOTA_ON(mp));
648 ASSERT(mask & ATTR_GID);
649 ASSERT(gdqp);
650 olddquot2 = xfs_qm_vop_chown(tp, ip,
651 &ip->i_gdquot, gdqp);
652 }
653 ip->i_d.di_gid = gid;
654 inode->i_gid = gid;
655 }
656 }
657
658 /*
659 * Change file access modes.
660 */
661 if (mask & ATTR_MODE) {
662 umode_t mode = iattr->ia_mode;
663
664 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
665 mode &= ~S_ISGID;
666
667 ip->i_d.di_mode &= S_IFMT;
668 ip->i_d.di_mode |= mode & ~S_IFMT;
669
670 inode->i_mode &= S_IFMT;
671 inode->i_mode |= mode & ~S_IFMT;
672 }
673
674 /*
675 * Change file access or modified times.
676 */
677 if (mask & ATTR_ATIME) {
678 inode->i_atime = iattr->ia_atime;
679 ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec;
680 ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec;
681 ip->i_update_core = 1;
682 }
683 if (mask & ATTR_CTIME) {
684 inode->i_ctime = iattr->ia_ctime;
685 ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec;
686 ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec;
687 ip->i_update_core = 1;
688 }
689 if (mask & ATTR_MTIME) {
690 inode->i_mtime = iattr->ia_mtime;
691 ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec;
692 ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec;
693 ip->i_update_core = 1;
694 }
695
696 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
697
698 XFS_STATS_INC(xs_ig_attrchg);
699
700 if (mp->m_flags & XFS_MOUNT_WSYNC)
701 xfs_trans_set_sync(tp);
702 error = xfs_trans_commit(tp, 0);
703
704 xfs_iunlock(ip, XFS_ILOCK_EXCL);
705
706 /*
707 * Release any dquot(s) the inode had kept before chown.
708 */
709 xfs_qm_dqrele(olddquot1);
710 xfs_qm_dqrele(olddquot2);
711 xfs_qm_dqrele(udqp);
712 xfs_qm_dqrele(gdqp);
713
714 if (error)
715 return XFS_ERROR(error);
716
717 /*
718 * XXX(hch): Updating the ACL entries is not atomic vs the i_mode
719 * update. We could avoid this with linked transactions
720 * and passing down the transaction pointer all the way
721 * to attr_set. No previous user of the generic
722 * Posix ACL code seems to care about this issue either.
723 */
724 if ((mask & ATTR_MODE) && !(flags & XFS_ATTR_NOACL)) {
725 error = -xfs_acl_chmod(inode);
726 if (error)
727 return XFS_ERROR(error);
728 }
729
730 return 0;
731
732 out_trans_cancel:
733 xfs_trans_cancel(tp, 0);
734 xfs_iunlock(ip, XFS_ILOCK_EXCL);
735 out_dqrele:
736 xfs_qm_dqrele(udqp);
737 xfs_qm_dqrele(gdqp);
738 return error;
739 }
740
741 /*
742 * Truncate file. Must have write permission and not be a directory.
743 */
744 int
745 xfs_setattr_size(
746 struct xfs_inode *ip,
747 struct iattr *iattr,
748 int flags)
749 {
750 struct xfs_mount *mp = ip->i_mount;
751 struct inode *inode = VFS_I(ip);
752 int mask = iattr->ia_valid;
753 struct xfs_trans *tp;
754 int error;
755 uint lock_flags;
756 uint commit_flags = 0;
757
758 trace_xfs_setattr(ip);
759
760 if (mp->m_flags & XFS_MOUNT_RDONLY)
761 return XFS_ERROR(EROFS);
762
763 if (XFS_FORCED_SHUTDOWN(mp))
764 return XFS_ERROR(EIO);
765
766 error = -inode_change_ok(inode, iattr);
767 if (error)
768 return XFS_ERROR(error);
769
770 ASSERT(S_ISREG(ip->i_d.di_mode));
771 ASSERT((mask & (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
772 ATTR_MTIME_SET|ATTR_KILL_SUID|ATTR_KILL_SGID|
773 ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
774
775 lock_flags = XFS_ILOCK_EXCL;
776 if (!(flags & XFS_ATTR_NOLOCK))
777 lock_flags |= XFS_IOLOCK_EXCL;
778 xfs_ilock(ip, lock_flags);
779
780 /*
781 * Short circuit the truncate case for zero length files.
782 */
783 if (iattr->ia_size == 0 &&
784 ip->i_size == 0 && ip->i_d.di_nextents == 0) {
785 if (!(mask & (ATTR_CTIME|ATTR_MTIME)))
786 goto out_unlock;
787
788 /*
789 * Use the regular setattr path to update the timestamps.
790 */
791 xfs_iunlock(ip, lock_flags);
792 iattr->ia_valid &= ~ATTR_SIZE;
793 return xfs_setattr_nonsize(ip, iattr, 0);
794 }
795
796 /*
797 * Make sure that the dquots are attached to the inode.
798 */
799 error = xfs_qm_dqattach_locked(ip, 0);
800 if (error)
801 goto out_unlock;
802
803 /*
804 * Now we can make the changes. Before we join the inode to the
805 * transaction, take care of the part of the truncation that must be
806 * done without the inode lock. This needs to be done before joining
807 * the inode to the transaction, because the inode cannot be unlocked
808 * once it is a part of the transaction.
809 */
810 if (iattr->ia_size > ip->i_size) {
811 /*
812 * Do the first part of growing a file: zero any data in the
813 * last block that is beyond the old EOF. We need to do this
814 * before the inode is joined to the transaction to modify
815 * i_size.
816 */
817 error = xfs_zero_eof(ip, iattr->ia_size, ip->i_size);
818 if (error)
819 goto out_unlock;
820 }
821 xfs_iunlock(ip, XFS_ILOCK_EXCL);
822 lock_flags &= ~XFS_ILOCK_EXCL;
823
824 /*
825 * We are going to log the inode size change in this transaction so
826 * any previous writes that are beyond the on disk EOF and the new
827 * EOF that have not been written out need to be written here. If we
828 * do not write the data out, we expose ourselves to the null files
829 * problem.
830 *
831 * Only flush from the on disk size to the smaller of the in memory
832 * file size or the new size as that's the range we really care about
833 * here and prevents waiting for other data not within the range we
834 * care about here.
835 */
836 if (ip->i_size != ip->i_d.di_size && iattr->ia_size > ip->i_d.di_size) {
837 error = xfs_flush_pages(ip, ip->i_d.di_size, iattr->ia_size, 0,
838 FI_NONE);
839 if (error)
840 goto out_unlock;
841 }
842
843 /*
844 * Wait for all direct I/O to complete.
845 */
846 inode_dio_wait(inode);
847
848 error = -block_truncate_page(inode->i_mapping, iattr->ia_size,
849 xfs_get_blocks);
850 if (error)
851 goto out_unlock;
852
853 tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE);
854 error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
855 XFS_TRANS_PERM_LOG_RES,
856 XFS_ITRUNCATE_LOG_COUNT);
857 if (error)
858 goto out_trans_cancel;
859
860 truncate_setsize(inode, iattr->ia_size);
861
862 commit_flags = XFS_TRANS_RELEASE_LOG_RES;
863 lock_flags |= XFS_ILOCK_EXCL;
864
865 xfs_ilock(ip, XFS_ILOCK_EXCL);
866
867 xfs_trans_ijoin(tp, ip, 0);
868
869 /*
870 * Only change the c/mtime if we are changing the size or we are
871 * explicitly asked to change it. This handles the semantic difference
872 * between truncate() and ftruncate() as implemented in the VFS.
873 *
874 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
875 * special case where we need to update the times despite not having
876 * these flags set. For all other operations the VFS set these flags
877 * explicitly if it wants a timestamp update.
878 */
879 if (iattr->ia_size != ip->i_size &&
880 (!(mask & (ATTR_CTIME | ATTR_MTIME)))) {
881 iattr->ia_ctime = iattr->ia_mtime =
882 current_fs_time(inode->i_sb);
883 mask |= ATTR_CTIME | ATTR_MTIME;
884 }
885
886 if (iattr->ia_size > ip->i_size) {
887 ip->i_d.di_size = iattr->ia_size;
888 ip->i_size = iattr->ia_size;
889 } else if (iattr->ia_size <= ip->i_size ||
890 (iattr->ia_size == 0 && ip->i_d.di_nextents)) {
891 error = xfs_itruncate_data(&tp, ip, iattr->ia_size);
892 if (error)
893 goto out_trans_abort;
894
895 /*
896 * Truncated "down", so we're removing references to old data
897 * here - if we delay flushing for a long time, we expose
898 * ourselves unduly to the notorious NULL files problem. So,
899 * we mark this inode and flush it when the file is closed,
900 * and do not wait the usual (long) time for writeout.
901 */
902 xfs_iflags_set(ip, XFS_ITRUNCATED);
903 }
904
905 if (mask & ATTR_CTIME) {
906 inode->i_ctime = iattr->ia_ctime;
907 ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec;
908 ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec;
909 ip->i_update_core = 1;
910 }
911 if (mask & ATTR_MTIME) {
912 inode->i_mtime = iattr->ia_mtime;
913 ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec;
914 ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec;
915 ip->i_update_core = 1;
916 }
917
918 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
919
920 XFS_STATS_INC(xs_ig_attrchg);
921
922 if (mp->m_flags & XFS_MOUNT_WSYNC)
923 xfs_trans_set_sync(tp);
924
925 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
926 out_unlock:
927 if (lock_flags)
928 xfs_iunlock(ip, lock_flags);
929 return error;
930
931 out_trans_abort:
932 commit_flags |= XFS_TRANS_ABORT;
933 out_trans_cancel:
934 xfs_trans_cancel(tp, commit_flags);
935 goto out_unlock;
936 }
937
938 STATIC int
939 xfs_vn_setattr(
940 struct dentry *dentry,
941 struct iattr *iattr)
942 {
943 if (iattr->ia_valid & ATTR_SIZE)
944 return -xfs_setattr_size(XFS_I(dentry->d_inode), iattr, 0);
945 return -xfs_setattr_nonsize(XFS_I(dentry->d_inode), iattr, 0);
946 }
947
948 #define XFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
949
950 /*
951 * Call fiemap helper to fill in user data.
952 * Returns positive errors to xfs_getbmap.
953 */
954 STATIC int
955 xfs_fiemap_format(
956 void **arg,
957 struct getbmapx *bmv,
958 int *full)
959 {
960 int error;
961 struct fiemap_extent_info *fieinfo = *arg;
962 u32 fiemap_flags = 0;
963 u64 logical, physical, length;
964
965 /* Do nothing for a hole */
966 if (bmv->bmv_block == -1LL)
967 return 0;
968
969 logical = BBTOB(bmv->bmv_offset);
970 physical = BBTOB(bmv->bmv_block);
971 length = BBTOB(bmv->bmv_length);
972
973 if (bmv->bmv_oflags & BMV_OF_PREALLOC)
974 fiemap_flags |= FIEMAP_EXTENT_UNWRITTEN;
975 else if (bmv->bmv_oflags & BMV_OF_DELALLOC) {
976 fiemap_flags |= FIEMAP_EXTENT_DELALLOC;
977 physical = 0; /* no block yet */
978 }
979 if (bmv->bmv_oflags & BMV_OF_LAST)
980 fiemap_flags |= FIEMAP_EXTENT_LAST;
981
982 error = fiemap_fill_next_extent(fieinfo, logical, physical,
983 length, fiemap_flags);
984 if (error > 0) {
985 error = 0;
986 *full = 1; /* user array now full */
987 }
988
989 return -error;
990 }
991
992 STATIC int
993 xfs_vn_fiemap(
994 struct inode *inode,
995 struct fiemap_extent_info *fieinfo,
996 u64 start,
997 u64 length)
998 {
999 xfs_inode_t *ip = XFS_I(inode);
1000 struct getbmapx bm;
1001 int error;
1002
1003 error = fiemap_check_flags(fieinfo, XFS_FIEMAP_FLAGS);
1004 if (error)
1005 return error;
1006
1007 /* Set up bmap header for xfs internal routine */
1008 bm.bmv_offset = BTOBB(start);
1009 /* Special case for whole file */
1010 if (length == FIEMAP_MAX_OFFSET)
1011 bm.bmv_length = -1LL;
1012 else
1013 bm.bmv_length = BTOBB(length);
1014
1015 /* We add one because in getbmap world count includes the header */
1016 bm.bmv_count = !fieinfo->fi_extents_max ? MAXEXTNUM :
1017 fieinfo->fi_extents_max + 1;
1018 bm.bmv_count = min_t(__s32, bm.bmv_count,
1019 (PAGE_SIZE * 16 / sizeof(struct getbmapx)));
1020 bm.bmv_iflags = BMV_IF_PREALLOC | BMV_IF_NO_HOLES;
1021 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR)
1022 bm.bmv_iflags |= BMV_IF_ATTRFORK;
1023 if (!(fieinfo->fi_flags & FIEMAP_FLAG_SYNC))
1024 bm.bmv_iflags |= BMV_IF_DELALLOC;
1025
1026 error = xfs_getbmap(ip, &bm, xfs_fiemap_format, fieinfo);
1027 if (error)
1028 return -error;
1029
1030 return 0;
1031 }
1032
1033 static const struct inode_operations xfs_inode_operations = {
1034 .get_acl = xfs_get_acl,
1035 .getattr = xfs_vn_getattr,
1036 .setattr = xfs_vn_setattr,
1037 .setxattr = generic_setxattr,
1038 .getxattr = generic_getxattr,
1039 .removexattr = generic_removexattr,
1040 .listxattr = xfs_vn_listxattr,
1041 .fiemap = xfs_vn_fiemap,
1042 };
1043
1044 static const struct inode_operations xfs_dir_inode_operations = {
1045 .create = xfs_vn_create,
1046 .lookup = xfs_vn_lookup,
1047 .link = xfs_vn_link,
1048 .unlink = xfs_vn_unlink,
1049 .symlink = xfs_vn_symlink,
1050 .mkdir = xfs_vn_mkdir,
1051 /*
1052 * Yes, XFS uses the same method for rmdir and unlink.
1053 *
1054 * There are some subtile differences deeper in the code,
1055 * but we use S_ISDIR to check for those.
1056 */
1057 .rmdir = xfs_vn_unlink,
1058 .mknod = xfs_vn_mknod,
1059 .rename = xfs_vn_rename,
1060 .get_acl = xfs_get_acl,
1061 .getattr = xfs_vn_getattr,
1062 .setattr = xfs_vn_setattr,
1063 .setxattr = generic_setxattr,
1064 .getxattr = generic_getxattr,
1065 .removexattr = generic_removexattr,
1066 .listxattr = xfs_vn_listxattr,
1067 };
1068
1069 static const struct inode_operations xfs_dir_ci_inode_operations = {
1070 .create = xfs_vn_create,
1071 .lookup = xfs_vn_ci_lookup,
1072 .link = xfs_vn_link,
1073 .unlink = xfs_vn_unlink,
1074 .symlink = xfs_vn_symlink,
1075 .mkdir = xfs_vn_mkdir,
1076 /*
1077 * Yes, XFS uses the same method for rmdir and unlink.
1078 *
1079 * There are some subtile differences deeper in the code,
1080 * but we use S_ISDIR to check for those.
1081 */
1082 .rmdir = xfs_vn_unlink,
1083 .mknod = xfs_vn_mknod,
1084 .rename = xfs_vn_rename,
1085 .get_acl = xfs_get_acl,
1086 .getattr = xfs_vn_getattr,
1087 .setattr = xfs_vn_setattr,
1088 .setxattr = generic_setxattr,
1089 .getxattr = generic_getxattr,
1090 .removexattr = generic_removexattr,
1091 .listxattr = xfs_vn_listxattr,
1092 };
1093
1094 static const struct inode_operations xfs_symlink_inode_operations = {
1095 .readlink = generic_readlink,
1096 .follow_link = xfs_vn_follow_link,
1097 .put_link = xfs_vn_put_link,
1098 .get_acl = xfs_get_acl,
1099 .getattr = xfs_vn_getattr,
1100 .setattr = xfs_vn_setattr,
1101 .setxattr = generic_setxattr,
1102 .getxattr = generic_getxattr,
1103 .removexattr = generic_removexattr,
1104 .listxattr = xfs_vn_listxattr,
1105 };
1106
1107 STATIC void
1108 xfs_diflags_to_iflags(
1109 struct inode *inode,
1110 struct xfs_inode *ip)
1111 {
1112 if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
1113 inode->i_flags |= S_IMMUTABLE;
1114 else
1115 inode->i_flags &= ~S_IMMUTABLE;
1116 if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
1117 inode->i_flags |= S_APPEND;
1118 else
1119 inode->i_flags &= ~S_APPEND;
1120 if (ip->i_d.di_flags & XFS_DIFLAG_SYNC)
1121 inode->i_flags |= S_SYNC;
1122 else
1123 inode->i_flags &= ~S_SYNC;
1124 if (ip->i_d.di_flags & XFS_DIFLAG_NOATIME)
1125 inode->i_flags |= S_NOATIME;
1126 else
1127 inode->i_flags &= ~S_NOATIME;
1128 }
1129
1130 /*
1131 * Initialize the Linux inode, set up the operation vectors and
1132 * unlock the inode.
1133 *
1134 * When reading existing inodes from disk this is called directly
1135 * from xfs_iget, when creating a new inode it is called from
1136 * xfs_ialloc after setting up the inode.
1137 *
1138 * We are always called with an uninitialised linux inode here.
1139 * We need to initialise the necessary fields and take a reference
1140 * on it.
1141 */
1142 void
1143 xfs_setup_inode(
1144 struct xfs_inode *ip)
1145 {
1146 struct inode *inode = &ip->i_vnode;
1147
1148 inode->i_ino = ip->i_ino;
1149 inode->i_state = I_NEW;
1150
1151 inode_sb_list_add(inode);
1152 /* make the inode look hashed for the writeback code */
1153 hlist_add_fake(&inode->i_hash);
1154
1155 inode->i_mode = ip->i_d.di_mode;
1156 set_nlink(inode, ip->i_d.di_nlink);
1157 inode->i_uid = ip->i_d.di_uid;
1158 inode->i_gid = ip->i_d.di_gid;
1159
1160 switch (inode->i_mode & S_IFMT) {
1161 case S_IFBLK:
1162 case S_IFCHR:
1163 inode->i_rdev =
1164 MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff,
1165 sysv_minor(ip->i_df.if_u2.if_rdev));
1166 break;
1167 default:
1168 inode->i_rdev = 0;
1169 break;
1170 }
1171
1172 inode->i_generation = ip->i_d.di_gen;
1173 i_size_write(inode, ip->i_d.di_size);
1174 inode->i_atime.tv_sec = ip->i_d.di_atime.t_sec;
1175 inode->i_atime.tv_nsec = ip->i_d.di_atime.t_nsec;
1176 inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec;
1177 inode->i_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec;
1178 inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec;
1179 inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec;
1180 xfs_diflags_to_iflags(inode, ip);
1181
1182 switch (inode->i_mode & S_IFMT) {
1183 case S_IFREG:
1184 inode->i_op = &xfs_inode_operations;
1185 inode->i_fop = &xfs_file_operations;
1186 inode->i_mapping->a_ops = &xfs_address_space_operations;
1187 break;
1188 case S_IFDIR:
1189 if (xfs_sb_version_hasasciici(&XFS_M(inode->i_sb)->m_sb))
1190 inode->i_op = &xfs_dir_ci_inode_operations;
1191 else
1192 inode->i_op = &xfs_dir_inode_operations;
1193 inode->i_fop = &xfs_dir_file_operations;
1194 break;
1195 case S_IFLNK:
1196 inode->i_op = &xfs_symlink_inode_operations;
1197 if (!(ip->i_df.if_flags & XFS_IFINLINE))
1198 inode->i_mapping->a_ops = &xfs_address_space_operations;
1199 break;
1200 default:
1201 inode->i_op = &xfs_inode_operations;
1202 init_special_inode(inode, inode->i_mode, inode->i_rdev);
1203 break;
1204 }
1205
1206 /*
1207 * If there is no attribute fork no ACL can exist on this inode,
1208 * and it can't have any file capabilities attached to it either.
1209 */
1210 if (!XFS_IFORK_Q(ip)) {
1211 inode_has_no_xattr(inode);
1212 cache_no_acl(inode);
1213 }
1214
1215 xfs_iflags_clear(ip, XFS_INEW);
1216 barrier();
1217
1218 unlock_new_inode(inode);
1219 }