2 * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
28 * For further information regarding this notice, see:
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
37 #include "xfs_trans.h"
42 #include "xfs_alloc.h"
43 #include "xfs_dmapi.h"
44 #include "xfs_mount.h"
45 #include "xfs_bmap_btree.h"
46 #include "xfs_alloc_btree.h"
47 #include "xfs_ialloc_btree.h"
48 #include "xfs_dir_sf.h"
49 #include "xfs_attr_sf.h"
50 #include "xfs_dir2_sf.h"
51 #include "xfs_dinode.h"
52 #include "xfs_inode.h"
53 #include "xfs_btree.h"
54 #include "xfs_ialloc.h"
55 #include "xfs_rtalloc.h"
56 #include "xfs_itable.h"
57 #include "xfs_error.h"
64 #include "xfs_buf_item.h"
65 #include "xfs_utils.h"
66 #include "xfs_dfrag.h"
67 #include "xfs_fsops.h"
69 #include <linux/dcache.h>
70 #include <linux/mount.h>
71 #include <linux/namei.h>
72 #include <linux/pagemap.h>
75 * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to
76 * a file or fs handle.
78 * XFS_IOC_PATH_TO_FSHANDLE
79 * returns fs handle for a mount point or path within that mount point
80 * XFS_IOC_FD_TO_HANDLE
81 * returns full handle for a FD opened in user space
82 * XFS_IOC_PATH_TO_HANDLE
83 * returns full handle for a path
92 xfs_fsop_handlereq_t hreq
;
96 if (copy_from_user(&hreq
, arg
, sizeof(hreq
)))
97 return -XFS_ERROR(EFAULT
);
99 memset((char *)&handle
, 0, sizeof(handle
));
102 case XFS_IOC_PATH_TO_FSHANDLE
:
103 case XFS_IOC_PATH_TO_HANDLE
: {
107 error
= user_path_walk_link((const char __user
*)hreq
.path
, &nd
);
112 ASSERT(nd
.dentry
->d_inode
);
113 inode
= igrab(nd
.dentry
->d_inode
);
118 case XFS_IOC_FD_TO_HANDLE
: {
121 file
= fget(hreq
.fd
);
125 ASSERT(file
->f_dentry
);
126 ASSERT(file
->f_dentry
->d_inode
);
127 inode
= igrab(file
->f_dentry
->d_inode
);
134 return -XFS_ERROR(EINVAL
);
137 if (inode
->i_sb
->s_magic
!= XFS_SB_MAGIC
) {
138 /* we're not in XFS anymore, Toto */
140 return -XFS_ERROR(EINVAL
);
143 switch (inode
->i_mode
& S_IFMT
) {
150 return -XFS_ERROR(EBADF
);
153 /* we need the vnode */
154 vp
= LINVFS_GET_VP(inode
);
156 /* now we can grab the fsid */
157 memcpy(&handle
.ha_fsid
, vp
->v_vfsp
->vfs_altfsid
, sizeof(xfs_fsid_t
));
158 hsize
= sizeof(xfs_fsid_t
);
160 if (cmd
!= XFS_IOC_PATH_TO_FSHANDLE
) {
165 /* need to get access to the xfs_inode to read the generation */
166 bhv
= vn_bhv_lookup_unlocked(VN_BHV_HEAD(vp
), &xfs_vnodeops
);
168 ip
= XFS_BHVTOI(bhv
);
170 lock_mode
= xfs_ilock_map_shared(ip
);
172 /* fill in fid section of handle from inode */
173 handle
.ha_fid
.xfs_fid_len
= sizeof(xfs_fid_t
) -
174 sizeof(handle
.ha_fid
.xfs_fid_len
);
175 handle
.ha_fid
.xfs_fid_pad
= 0;
176 handle
.ha_fid
.xfs_fid_gen
= ip
->i_d
.di_gen
;
177 handle
.ha_fid
.xfs_fid_ino
= ip
->i_ino
;
179 xfs_iunlock_map_shared(ip
, lock_mode
);
181 hsize
= XFS_HSIZE(handle
);
184 /* now copy our handle into the user buffer & write out the size */
185 if (copy_to_user(hreq
.ohandle
, &handle
, hsize
) ||
186 copy_to_user(hreq
.ohandlen
, &hsize
, sizeof(__s32
))) {
188 return -XFS_ERROR(EFAULT
);
197 * Convert userspace handle data into vnode (and inode).
198 * We [ab]use the fact that all the fsop_handlereq ioctl calls
199 * have a data structure argument whose first component is always
200 * a xfs_fsop_handlereq_t, so we can cast to and from this type.
201 * This allows us to optimise the copy_from_user calls and gives
202 * a handy, shared routine.
204 * If no error, caller must always VN_RELE the returned vp.
207 xfs_vget_fsop_handlereq(
209 struct inode
*parinode
, /* parent inode pointer */
210 xfs_fsop_handlereq_t
*hreq
,
212 struct inode
**inode
)
217 xfs_handle_t
*handlep
;
220 struct inode
*inodep
;
227 * Only allow handle opens under a directory.
229 if (!S_ISDIR(parinode
->i_mode
))
230 return XFS_ERROR(ENOTDIR
);
232 hanp
= hreq
->ihandle
;
233 hlen
= hreq
->ihandlen
;
236 if (hlen
< sizeof(handlep
->ha_fsid
) || hlen
> sizeof(*handlep
))
237 return XFS_ERROR(EINVAL
);
238 if (copy_from_user(handlep
, hanp
, hlen
))
239 return XFS_ERROR(EFAULT
);
240 if (hlen
< sizeof(*handlep
))
241 memset(((char *)handlep
) + hlen
, 0, sizeof(*handlep
) - hlen
);
242 if (hlen
> sizeof(handlep
->ha_fsid
)) {
243 if (handlep
->ha_fid
.xfs_fid_len
!=
244 (hlen
- sizeof(handlep
->ha_fsid
)
245 - sizeof(handlep
->ha_fid
.xfs_fid_len
))
246 || handlep
->ha_fid
.xfs_fid_pad
)
247 return XFS_ERROR(EINVAL
);
251 * Crack the handle, obtain the inode # & generation #
253 xfid
= (struct xfs_fid
*)&handlep
->ha_fid
;
254 if (xfid
->xfs_fid_len
== sizeof(*xfid
) - sizeof(xfid
->xfs_fid_len
)) {
255 ino
= xfid
->xfs_fid_ino
;
256 igen
= xfid
->xfs_fid_gen
;
258 return XFS_ERROR(EINVAL
);
262 * Get the XFS inode, building a vnode to go with it.
264 error
= xfs_iget(mp
, NULL
, ino
, 0, XFS_ILOCK_SHARED
, &ip
, 0);
268 return XFS_ERROR(EIO
);
269 if (ip
->i_d
.di_mode
== 0 || ip
->i_d
.di_gen
!= igen
) {
270 xfs_iput_new(ip
, XFS_ILOCK_SHARED
);
271 return XFS_ERROR(ENOENT
);
275 inodep
= LINVFS_GET_IP(vpp
);
276 xfs_iunlock(ip
, XFS_ILOCK_SHARED
);
287 struct file
*parfilp
,
288 struct inode
*parinode
)
295 struct dentry
*dentry
;
297 xfs_fsop_handlereq_t hreq
;
299 if (!capable(CAP_SYS_ADMIN
))
300 return -XFS_ERROR(EPERM
);
301 if (copy_from_user(&hreq
, arg
, sizeof(xfs_fsop_handlereq_t
)))
302 return -XFS_ERROR(EFAULT
);
304 error
= xfs_vget_fsop_handlereq(mp
, parinode
, &hreq
, &vp
, &inode
);
308 /* Restrict xfs_open_by_handle to directories & regular files. */
309 if (!(S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
))) {
311 return -XFS_ERROR(EINVAL
);
314 #if BITS_PER_LONG != 32
315 hreq
.oflags
|= O_LARGEFILE
;
317 /* Put open permission in namei format. */
318 permflag
= hreq
.oflags
;
319 if ((permflag
+1) & O_ACCMODE
)
321 if (permflag
& O_TRUNC
)
324 if ((!(permflag
& O_APPEND
) || (permflag
& O_TRUNC
)) &&
325 (permflag
& FMODE_WRITE
) && IS_APPEND(inode
)) {
327 return -XFS_ERROR(EPERM
);
330 if ((permflag
& FMODE_WRITE
) && IS_IMMUTABLE(inode
)) {
332 return -XFS_ERROR(EACCES
);
335 /* Can't write directories. */
336 if ( S_ISDIR(inode
->i_mode
) && (permflag
& FMODE_WRITE
)) {
338 return -XFS_ERROR(EISDIR
);
341 if ((new_fd
= get_unused_fd()) < 0) {
346 dentry
= d_alloc_anon(inode
);
347 if (dentry
== NULL
) {
349 put_unused_fd(new_fd
);
350 return -XFS_ERROR(ENOMEM
);
353 /* Ensure umount returns EBUSY on umounts while this file is open. */
354 mntget(parfilp
->f_vfsmnt
);
356 /* Create file pointer. */
357 filp
= dentry_open(dentry
, parfilp
->f_vfsmnt
, hreq
.oflags
);
359 put_unused_fd(new_fd
);
360 return -XFS_ERROR(-PTR_ERR(filp
));
362 if (inode
->i_mode
& S_IFREG
)
363 filp
->f_op
= &linvfs_invis_file_operations
;
365 fd_install(new_fd
, filp
);
370 xfs_readlink_by_handle(
373 struct file
*parfilp
,
374 struct inode
*parinode
)
380 xfs_fsop_handlereq_t hreq
;
384 if (!capable(CAP_SYS_ADMIN
))
385 return -XFS_ERROR(EPERM
);
386 if (copy_from_user(&hreq
, arg
, sizeof(xfs_fsop_handlereq_t
)))
387 return -XFS_ERROR(EFAULT
);
389 error
= xfs_vget_fsop_handlereq(mp
, parinode
, &hreq
, &vp
, &inode
);
393 /* Restrict this handle operation to symlinks only. */
394 if (!S_ISLNK(inode
->i_mode
)) {
396 return -XFS_ERROR(EINVAL
);
399 if (copy_from_user(&olen
, hreq
.ohandlen
, sizeof(__u32
))) {
401 return -XFS_ERROR(EFAULT
);
404 aiov
.iov_base
= hreq
.ohandle
;
406 auio
.uio_iov
= &aiov
;
409 auio
.uio_segflg
= UIO_USERSPACE
;
410 auio
.uio_resid
= olen
;
412 VOP_READLINK(vp
, &auio
, IO_INVIS
, NULL
, error
);
415 return (olen
- auio
.uio_resid
);
419 xfs_fssetdm_by_handle(
422 struct file
*parfilp
,
423 struct inode
*parinode
)
426 struct fsdmidata fsd
;
427 xfs_fsop_setdm_handlereq_t dmhreq
;
432 if (!capable(CAP_MKNOD
))
433 return -XFS_ERROR(EPERM
);
434 if (copy_from_user(&dmhreq
, arg
, sizeof(xfs_fsop_setdm_handlereq_t
)))
435 return -XFS_ERROR(EFAULT
);
437 error
= xfs_vget_fsop_handlereq(mp
, parinode
, &dmhreq
.hreq
, &vp
, &inode
);
441 if (IS_IMMUTABLE(inode
) || IS_APPEND(inode
)) {
443 return -XFS_ERROR(EPERM
);
446 if (copy_from_user(&fsd
, dmhreq
.data
, sizeof(fsd
))) {
448 return -XFS_ERROR(EFAULT
);
451 bdp
= bhv_base_unlocked(VN_BHV_HEAD(vp
));
452 error
= xfs_set_dmattrs(bdp
, fsd
.fsd_dmevmask
, fsd
.fsd_dmstate
, NULL
);
461 xfs_attrlist_by_handle(
464 struct file
*parfilp
,
465 struct inode
*parinode
)
468 attrlist_cursor_kern_t
*cursor
;
469 xfs_fsop_attrlist_handlereq_t al_hreq
;
474 if (!capable(CAP_SYS_ADMIN
))
475 return -XFS_ERROR(EPERM
);
476 if (copy_from_user(&al_hreq
, arg
, sizeof(xfs_fsop_attrlist_handlereq_t
)))
477 return -XFS_ERROR(EFAULT
);
478 if (al_hreq
.buflen
> XATTR_LIST_MAX
)
479 return -XFS_ERROR(EINVAL
);
481 error
= xfs_vget_fsop_handlereq(mp
, parinode
, &al_hreq
.hreq
,
486 kbuf
= kmalloc(al_hreq
.buflen
, GFP_KERNEL
);
490 cursor
= (attrlist_cursor_kern_t
*)&al_hreq
.pos
;
491 VOP_ATTR_LIST(vp
, kbuf
, al_hreq
.buflen
, al_hreq
.flags
,
492 cursor
, NULL
, error
);
496 if (copy_to_user(al_hreq
.buffer
, kbuf
, al_hreq
.buflen
))
508 xfs_attrmulti_attr_get(
518 if (*len
> XATTR_SIZE_MAX
)
520 kbuf
= kmalloc(*len
, GFP_KERNEL
);
524 VOP_ATTR_GET(vp
, name
, kbuf
, len
, flags
, NULL
, error
);
528 if (copy_to_user(ubuf
, kbuf
, *len
))
537 xfs_attrmulti_attr_set(
540 const char __user
*ubuf
,
547 if (IS_IMMUTABLE(&vp
->v_inode
) || IS_APPEND(&vp
->v_inode
))
549 if (len
> XATTR_SIZE_MAX
)
552 kbuf
= kmalloc(len
, GFP_KERNEL
);
556 if (copy_from_user(kbuf
, ubuf
, len
))
559 VOP_ATTR_SET(vp
, name
, kbuf
, len
, flags
, NULL
, error
);
567 xfs_attrmulti_attr_remove(
574 if (IS_IMMUTABLE(&vp
->v_inode
) || IS_APPEND(&vp
->v_inode
))
577 VOP_ATTR_REMOVE(vp
, name
, flags
, NULL
, error
);
582 xfs_attrmulti_by_handle(
585 struct file
*parfilp
,
586 struct inode
*parinode
)
589 xfs_attr_multiop_t
*ops
;
590 xfs_fsop_attrmulti_handlereq_t am_hreq
;
593 unsigned int i
, size
;
596 if (!capable(CAP_SYS_ADMIN
))
597 return -XFS_ERROR(EPERM
);
598 if (copy_from_user(&am_hreq
, arg
, sizeof(xfs_fsop_attrmulti_handlereq_t
)))
599 return -XFS_ERROR(EFAULT
);
601 error
= xfs_vget_fsop_handlereq(mp
, parinode
, &am_hreq
.hreq
, &vp
, &inode
);
606 size
= am_hreq
.opcount
* sizeof(attr_multiop_t
);
607 if (!size
|| size
> 16 * PAGE_SIZE
)
611 ops
= kmalloc(size
, GFP_KERNEL
);
616 if (copy_from_user(ops
, am_hreq
.ops
, size
))
619 attr_name
= kmalloc(MAXNAMELEN
, GFP_KERNEL
);
625 for (i
= 0; i
< am_hreq
.opcount
; i
++) {
626 ops
[i
].am_error
= strncpy_from_user(attr_name
,
627 ops
[i
].am_attrname
, MAXNAMELEN
);
628 if (ops
[i
].am_error
== 0 || ops
[i
].am_error
== MAXNAMELEN
)
630 if (ops
[i
].am_error
< 0)
633 switch (ops
[i
].am_opcode
) {
635 ops
[i
].am_error
= xfs_attrmulti_attr_get(vp
,
636 attr_name
, ops
[i
].am_attrvalue
,
637 &ops
[i
].am_length
, ops
[i
].am_flags
);
640 ops
[i
].am_error
= xfs_attrmulti_attr_set(vp
,
641 attr_name
, ops
[i
].am_attrvalue
,
642 ops
[i
].am_length
, ops
[i
].am_flags
);
645 ops
[i
].am_error
= xfs_attrmulti_attr_remove(vp
,
646 attr_name
, ops
[i
].am_flags
);
649 ops
[i
].am_error
= EINVAL
;
653 if (copy_to_user(am_hreq
.ops
, ops
, size
))
654 error
= XFS_ERROR(EFAULT
);
665 /* prototypes for a few of the stack-hungry cases that have
666 * their own functions. Functions are defined after their use
667 * so gcc doesn't get fancy and inline them with -03 */
685 xfs_ioc_fsgeometry_v1(
729 vp
= LINVFS_GET_VP(inode
);
731 vn_trace_entry(vp
, "xfs_ioctl", (inst_t
*)__return_address
);
733 ip
= XFS_BHVTOI(bdp
);
738 case XFS_IOC_ALLOCSP
:
741 case XFS_IOC_UNRESVSP
:
742 case XFS_IOC_ALLOCSP64
:
743 case XFS_IOC_FREESP64
:
744 case XFS_IOC_RESVSP64
:
745 case XFS_IOC_UNRESVSP64
:
747 * Only allow the sys admin to reserve space unless
748 * unwritten extents are enabled.
750 if (!XFS_SB_VERSION_HASEXTFLGBIT(&mp
->m_sb
) &&
751 !capable(CAP_SYS_ADMIN
))
754 return xfs_ioc_space(bdp
, vp
, filp
, ioflags
, cmd
, arg
);
756 case XFS_IOC_DIOINFO
: {
758 xfs_buftarg_t
*target
=
759 (ip
->i_d
.di_flags
& XFS_DIFLAG_REALTIME
) ?
760 mp
->m_rtdev_targp
: mp
->m_ddev_targp
;
762 da
.d_mem
= da
.d_miniosz
= 1 << target
->pbr_sshift
;
763 /* The size dio will do in one go */
764 da
.d_maxiosz
= 64 * PAGE_CACHE_SIZE
;
766 if (copy_to_user(arg
, &da
, sizeof(da
)))
767 return -XFS_ERROR(EFAULT
);
771 case XFS_IOC_FSBULKSTAT_SINGLE
:
772 case XFS_IOC_FSBULKSTAT
:
773 case XFS_IOC_FSINUMBERS
:
774 return xfs_ioc_bulkstat(mp
, cmd
, arg
);
776 case XFS_IOC_FSGEOMETRY_V1
:
777 return xfs_ioc_fsgeometry_v1(mp
, arg
);
779 case XFS_IOC_FSGEOMETRY
:
780 return xfs_ioc_fsgeometry(mp
, arg
);
782 case XFS_IOC_GETVERSION
:
783 case XFS_IOC_GETXFLAGS
:
784 case XFS_IOC_SETXFLAGS
:
785 case XFS_IOC_FSGETXATTR
:
786 case XFS_IOC_FSSETXATTR
:
787 case XFS_IOC_FSGETXATTRA
:
788 return xfs_ioc_xattr(vp
, ip
, filp
, cmd
, arg
);
790 case XFS_IOC_FSSETDM
: {
791 struct fsdmidata dmi
;
793 if (copy_from_user(&dmi
, arg
, sizeof(dmi
)))
794 return -XFS_ERROR(EFAULT
);
796 error
= xfs_set_dmattrs(bdp
, dmi
.fsd_dmevmask
, dmi
.fsd_dmstate
,
801 case XFS_IOC_GETBMAP
:
802 case XFS_IOC_GETBMAPA
:
803 return xfs_ioc_getbmap(bdp
, filp
, ioflags
, cmd
, arg
);
805 case XFS_IOC_GETBMAPX
:
806 return xfs_ioc_getbmapx(bdp
, arg
);
808 case XFS_IOC_FD_TO_HANDLE
:
809 case XFS_IOC_PATH_TO_HANDLE
:
810 case XFS_IOC_PATH_TO_FSHANDLE
:
811 return xfs_find_handle(cmd
, arg
);
813 case XFS_IOC_OPEN_BY_HANDLE
:
814 return xfs_open_by_handle(mp
, arg
, filp
, inode
);
816 case XFS_IOC_FSSETDM_BY_HANDLE
:
817 return xfs_fssetdm_by_handle(mp
, arg
, filp
, inode
);
819 case XFS_IOC_READLINK_BY_HANDLE
:
820 return xfs_readlink_by_handle(mp
, arg
, filp
, inode
);
822 case XFS_IOC_ATTRLIST_BY_HANDLE
:
823 return xfs_attrlist_by_handle(mp
, arg
, filp
, inode
);
825 case XFS_IOC_ATTRMULTI_BY_HANDLE
:
826 return xfs_attrmulti_by_handle(mp
, arg
, filp
, inode
);
828 case XFS_IOC_SWAPEXT
: {
829 error
= xfs_swapext((struct xfs_swapext __user
*)arg
);
833 case XFS_IOC_FSCOUNTS
: {
834 xfs_fsop_counts_t out
;
836 error
= xfs_fs_counts(mp
, &out
);
840 if (copy_to_user(arg
, &out
, sizeof(out
)))
841 return -XFS_ERROR(EFAULT
);
845 case XFS_IOC_SET_RESBLKS
: {
846 xfs_fsop_resblks_t inout
;
849 if (!capable(CAP_SYS_ADMIN
))
852 if (copy_from_user(&inout
, arg
, sizeof(inout
)))
853 return -XFS_ERROR(EFAULT
);
855 /* input parameter is passed in resblks field of structure */
857 error
= xfs_reserve_blocks(mp
, &in
, &inout
);
861 if (copy_to_user(arg
, &inout
, sizeof(inout
)))
862 return -XFS_ERROR(EFAULT
);
866 case XFS_IOC_GET_RESBLKS
: {
867 xfs_fsop_resblks_t out
;
869 if (!capable(CAP_SYS_ADMIN
))
872 error
= xfs_reserve_blocks(mp
, NULL
, &out
);
876 if (copy_to_user(arg
, &out
, sizeof(out
)))
877 return -XFS_ERROR(EFAULT
);
882 case XFS_IOC_FSGROWFSDATA
: {
883 xfs_growfs_data_t in
;
885 if (!capable(CAP_SYS_ADMIN
))
888 if (copy_from_user(&in
, arg
, sizeof(in
)))
889 return -XFS_ERROR(EFAULT
);
891 error
= xfs_growfs_data(mp
, &in
);
895 case XFS_IOC_FSGROWFSLOG
: {
898 if (!capable(CAP_SYS_ADMIN
))
901 if (copy_from_user(&in
, arg
, sizeof(in
)))
902 return -XFS_ERROR(EFAULT
);
904 error
= xfs_growfs_log(mp
, &in
);
908 case XFS_IOC_FSGROWFSRT
: {
911 if (!capable(CAP_SYS_ADMIN
))
914 if (copy_from_user(&in
, arg
, sizeof(in
)))
915 return -XFS_ERROR(EFAULT
);
917 error
= xfs_growfs_rt(mp
, &in
);
922 if (!capable(CAP_SYS_ADMIN
))
925 if (inode
->i_sb
->s_frozen
== SB_UNFROZEN
)
926 freeze_bdev(inode
->i_sb
->s_bdev
);
930 if (!capable(CAP_SYS_ADMIN
))
932 if (inode
->i_sb
->s_frozen
!= SB_UNFROZEN
)
933 thaw_bdev(inode
->i_sb
->s_bdev
, inode
->i_sb
);
936 case XFS_IOC_GOINGDOWN
: {
939 if (!capable(CAP_SYS_ADMIN
))
942 if (get_user(in
, (__uint32_t __user
*)arg
))
943 return -XFS_ERROR(EFAULT
);
945 error
= xfs_fs_goingdown(mp
, in
);
949 case XFS_IOC_ERROR_INJECTION
: {
950 xfs_error_injection_t in
;
952 if (!capable(CAP_SYS_ADMIN
))
955 if (copy_from_user(&in
, arg
, sizeof(in
)))
956 return -XFS_ERROR(EFAULT
);
958 error
= xfs_errortag_add(in
.errtag
, mp
);
962 case XFS_IOC_ERROR_CLEARALL
:
963 if (!capable(CAP_SYS_ADMIN
))
966 error
= xfs_errortag_clearall(mp
);
987 if (vp
->v_inode
.i_flags
& (S_IMMUTABLE
|S_APPEND
))
988 return -XFS_ERROR(EPERM
);
990 if (!(filp
->f_mode
& FMODE_WRITE
))
991 return -XFS_ERROR(EBADF
);
994 return -XFS_ERROR(EINVAL
);
996 if (copy_from_user(&bf
, arg
, sizeof(bf
)))
997 return -XFS_ERROR(EFAULT
);
999 if (filp
->f_flags
& (O_NDELAY
|O_NONBLOCK
))
1000 attr_flags
|= ATTR_NONBLOCK
;
1001 if (ioflags
& IO_INVIS
)
1002 attr_flags
|= ATTR_DMI
;
1004 error
= xfs_change_file_space(bdp
, cmd
, &bf
, filp
->f_pos
,
1015 xfs_fsop_bulkreq_t bulkreq
;
1016 int count
; /* # of records returned */
1017 xfs_ino_t inlast
; /* last inode number */
1021 /* done = 1 if there are more stats to get and if bulkstat */
1022 /* should be called again (unused here, but used in dmapi) */
1024 if (!capable(CAP_SYS_ADMIN
))
1027 if (XFS_FORCED_SHUTDOWN(mp
))
1028 return -XFS_ERROR(EIO
);
1030 if (copy_from_user(&bulkreq
, arg
, sizeof(xfs_fsop_bulkreq_t
)))
1031 return -XFS_ERROR(EFAULT
);
1033 if (copy_from_user(&inlast
, bulkreq
.lastip
, sizeof(__s64
)))
1034 return -XFS_ERROR(EFAULT
);
1036 if ((count
= bulkreq
.icount
) <= 0)
1037 return -XFS_ERROR(EINVAL
);
1039 if (cmd
== XFS_IOC_FSINUMBERS
)
1040 error
= xfs_inumbers(mp
, &inlast
, &count
,
1042 else if (cmd
== XFS_IOC_FSBULKSTAT_SINGLE
)
1043 error
= xfs_bulkstat_single(mp
, &inlast
,
1044 bulkreq
.ubuffer
, &done
);
1045 else { /* XFS_IOC_FSBULKSTAT */
1046 if (count
== 1 && inlast
!= 0) {
1048 error
= xfs_bulkstat_single(mp
, &inlast
,
1049 bulkreq
.ubuffer
, &done
);
1051 error
= xfs_bulkstat(mp
, &inlast
, &count
,
1052 (bulkstat_one_pf
)xfs_bulkstat_one
, NULL
,
1053 sizeof(xfs_bstat_t
), bulkreq
.ubuffer
,
1054 BULKSTAT_FG_QUICK
, &done
);
1061 if (bulkreq
.ocount
!= NULL
) {
1062 if (copy_to_user(bulkreq
.lastip
, &inlast
,
1064 return -XFS_ERROR(EFAULT
);
1066 if (copy_to_user(bulkreq
.ocount
, &count
, sizeof(count
)))
1067 return -XFS_ERROR(EFAULT
);
1074 xfs_ioc_fsgeometry_v1(
1078 xfs_fsop_geom_v1_t fsgeo
;
1081 error
= xfs_fs_geometry(mp
, (xfs_fsop_geom_t
*)&fsgeo
, 3);
1085 if (copy_to_user(arg
, &fsgeo
, sizeof(fsgeo
)))
1086 return -XFS_ERROR(EFAULT
);
1095 xfs_fsop_geom_t fsgeo
;
1098 error
= xfs_fs_geometry(mp
, &fsgeo
, 4);
1102 if (copy_to_user(arg
, &fsgeo
, sizeof(fsgeo
)))
1103 return -XFS_ERROR(EFAULT
);
1108 * Linux extended inode flags interface.
1110 #define LINUX_XFLAG_SYNC 0x00000008 /* Synchronous updates */
1111 #define LINUX_XFLAG_IMMUTABLE 0x00000010 /* Immutable file */
1112 #define LINUX_XFLAG_APPEND 0x00000020 /* writes to file may only append */
1113 #define LINUX_XFLAG_NODUMP 0x00000040 /* do not dump file */
1114 #define LINUX_XFLAG_NOATIME 0x00000080 /* do not update atime */
1117 xfs_merge_ioc_xflags(
1121 unsigned int xflags
= start
;
1123 if (flags
& LINUX_XFLAG_IMMUTABLE
)
1124 xflags
|= XFS_XFLAG_IMMUTABLE
;
1126 xflags
&= ~XFS_XFLAG_IMMUTABLE
;
1127 if (flags
& LINUX_XFLAG_APPEND
)
1128 xflags
|= XFS_XFLAG_APPEND
;
1130 xflags
&= ~XFS_XFLAG_APPEND
;
1131 if (flags
& LINUX_XFLAG_SYNC
)
1132 xflags
|= XFS_XFLAG_SYNC
;
1134 xflags
&= ~XFS_XFLAG_SYNC
;
1135 if (flags
& LINUX_XFLAG_NOATIME
)
1136 xflags
|= XFS_XFLAG_NOATIME
;
1138 xflags
&= ~XFS_XFLAG_NOATIME
;
1139 if (flags
& LINUX_XFLAG_NODUMP
)
1140 xflags
|= XFS_XFLAG_NODUMP
;
1142 xflags
&= ~XFS_XFLAG_NODUMP
;
1149 __uint16_t di_flags
)
1151 unsigned int flags
= 0;
1153 if (di_flags
& XFS_DIFLAG_IMMUTABLE
)
1154 flags
|= LINUX_XFLAG_IMMUTABLE
;
1155 if (di_flags
& XFS_DIFLAG_APPEND
)
1156 flags
|= LINUX_XFLAG_APPEND
;
1157 if (di_flags
& XFS_DIFLAG_SYNC
)
1158 flags
|= LINUX_XFLAG_SYNC
;
1159 if (di_flags
& XFS_DIFLAG_NOATIME
)
1160 flags
|= LINUX_XFLAG_NOATIME
;
1161 if (di_flags
& XFS_DIFLAG_NODUMP
)
1162 flags
|= LINUX_XFLAG_NODUMP
;
1181 case XFS_IOC_FSGETXATTR
: {
1182 va
.va_mask
= XFS_AT_XFLAGS
| XFS_AT_EXTSIZE
| \
1183 XFS_AT_NEXTENTS
| XFS_AT_PROJID
;
1184 VOP_GETATTR(vp
, &va
, 0, NULL
, error
);
1188 fa
.fsx_xflags
= va
.va_xflags
;
1189 fa
.fsx_extsize
= va
.va_extsize
;
1190 fa
.fsx_nextents
= va
.va_nextents
;
1191 fa
.fsx_projid
= va
.va_projid
;
1193 if (copy_to_user(arg
, &fa
, sizeof(fa
)))
1194 return -XFS_ERROR(EFAULT
);
1198 case XFS_IOC_FSSETXATTR
: {
1199 if (copy_from_user(&fa
, arg
, sizeof(fa
)))
1200 return -XFS_ERROR(EFAULT
);
1203 if (filp
->f_flags
& (O_NDELAY
|O_NONBLOCK
))
1204 attr_flags
|= ATTR_NONBLOCK
;
1206 va
.va_mask
= XFS_AT_XFLAGS
| XFS_AT_EXTSIZE
| XFS_AT_PROJID
;
1207 va
.va_xflags
= fa
.fsx_xflags
;
1208 va
.va_extsize
= fa
.fsx_extsize
;
1209 va
.va_projid
= fa
.fsx_projid
;
1211 VOP_SETATTR(vp
, &va
, attr_flags
, NULL
, error
);
1213 vn_revalidate(vp
); /* update Linux inode flags */
1217 case XFS_IOC_FSGETXATTRA
: {
1218 va
.va_mask
= XFS_AT_XFLAGS
| XFS_AT_EXTSIZE
| \
1219 XFS_AT_ANEXTENTS
| XFS_AT_PROJID
;
1220 VOP_GETATTR(vp
, &va
, 0, NULL
, error
);
1224 fa
.fsx_xflags
= va
.va_xflags
;
1225 fa
.fsx_extsize
= va
.va_extsize
;
1226 fa
.fsx_nextents
= va
.va_anextents
;
1227 fa
.fsx_projid
= va
.va_projid
;
1229 if (copy_to_user(arg
, &fa
, sizeof(fa
)))
1230 return -XFS_ERROR(EFAULT
);
1234 case XFS_IOC_GETXFLAGS
: {
1235 flags
= xfs_di2lxflags(ip
->i_d
.di_flags
);
1236 if (copy_to_user(arg
, &flags
, sizeof(flags
)))
1237 return -XFS_ERROR(EFAULT
);
1241 case XFS_IOC_SETXFLAGS
: {
1242 if (copy_from_user(&flags
, arg
, sizeof(flags
)))
1243 return -XFS_ERROR(EFAULT
);
1245 if (flags
& ~(LINUX_XFLAG_IMMUTABLE
| LINUX_XFLAG_APPEND
| \
1246 LINUX_XFLAG_NOATIME
| LINUX_XFLAG_NODUMP
| \
1248 return -XFS_ERROR(EOPNOTSUPP
);
1251 if (filp
->f_flags
& (O_NDELAY
|O_NONBLOCK
))
1252 attr_flags
|= ATTR_NONBLOCK
;
1254 va
.va_mask
= XFS_AT_XFLAGS
;
1255 va
.va_xflags
= xfs_merge_ioc_xflags(flags
,
1258 VOP_SETATTR(vp
, &va
, attr_flags
, NULL
, error
);
1260 vn_revalidate(vp
); /* update Linux inode flags */
1264 case XFS_IOC_GETVERSION
: {
1265 flags
= LINVFS_GET_IP(vp
)->i_generation
;
1266 if (copy_to_user(arg
, &flags
, sizeof(flags
)))
1267 return -XFS_ERROR(EFAULT
);
1288 if (copy_from_user(&bm
, arg
, sizeof(bm
)))
1289 return -XFS_ERROR(EFAULT
);
1291 if (bm
.bmv_count
< 2)
1292 return -XFS_ERROR(EINVAL
);
1294 iflags
= (cmd
== XFS_IOC_GETBMAPA
? BMV_IF_ATTRFORK
: 0);
1295 if (ioflags
& IO_INVIS
)
1296 iflags
|= BMV_IF_NO_DMAPI_READ
;
1298 error
= xfs_getbmap(bdp
, &bm
, (struct getbmap __user
*)arg
+1, iflags
);
1302 if (copy_to_user(arg
, &bm
, sizeof(bm
)))
1303 return -XFS_ERROR(EFAULT
);
1312 struct getbmapx bmx
;
1317 if (copy_from_user(&bmx
, arg
, sizeof(bmx
)))
1318 return -XFS_ERROR(EFAULT
);
1320 if (bmx
.bmv_count
< 2)
1321 return -XFS_ERROR(EINVAL
);
1324 * Map input getbmapx structure to a getbmap
1325 * structure for xfs_getbmap.
1327 GETBMAP_CONVERT(bmx
, bm
);
1329 iflags
= bmx
.bmv_iflags
;
1331 if (iflags
& (~BMV_IF_VALID
))
1332 return -XFS_ERROR(EINVAL
);
1334 iflags
|= BMV_IF_EXTENDED
;
1336 error
= xfs_getbmap(bdp
, &bm
, (struct getbmapx __user
*)arg
+1, iflags
);
1340 GETBMAP_CONVERT(bm
, bmx
);
1342 if (copy_to_user(arg
, &bmx
, sizeof(bmx
)))
1343 return -XFS_ERROR(EFAULT
);