2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
24 #include "xfs_trans.h"
28 #include "xfs_mount.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_dinode.h"
33 #include "xfs_inode.h"
34 #include "xfs_btree.h"
35 #include "xfs_ialloc.h"
36 #include "xfs_alloc.h"
37 #include "xfs_rtalloc.h"
39 #include "xfs_error.h"
41 #include "xfs_quota.h"
42 #include "xfs_fsops.h"
43 #include "xfs_utils.h"
44 #include "xfs_trace.h"
47 STATIC
void xfs_unmountfs_wait(xfs_mount_t
*);
51 STATIC
void xfs_icsb_balance_counter(xfs_mount_t
*, xfs_sb_field_t
,
53 STATIC
void xfs_icsb_balance_counter_locked(xfs_mount_t
*, xfs_sb_field_t
,
55 STATIC
void xfs_icsb_disable_counter(xfs_mount_t
*, xfs_sb_field_t
);
58 #define xfs_icsb_balance_counter(mp, a, b) do { } while (0)
59 #define xfs_icsb_balance_counter_locked(mp, a, b) do { } while (0)
64 short type
; /* 0 = integer
65 * 1 = binary / string (no translation)
68 { offsetof(xfs_sb_t
, sb_magicnum
), 0 },
69 { offsetof(xfs_sb_t
, sb_blocksize
), 0 },
70 { offsetof(xfs_sb_t
, sb_dblocks
), 0 },
71 { offsetof(xfs_sb_t
, sb_rblocks
), 0 },
72 { offsetof(xfs_sb_t
, sb_rextents
), 0 },
73 { offsetof(xfs_sb_t
, sb_uuid
), 1 },
74 { offsetof(xfs_sb_t
, sb_logstart
), 0 },
75 { offsetof(xfs_sb_t
, sb_rootino
), 0 },
76 { offsetof(xfs_sb_t
, sb_rbmino
), 0 },
77 { offsetof(xfs_sb_t
, sb_rsumino
), 0 },
78 { offsetof(xfs_sb_t
, sb_rextsize
), 0 },
79 { offsetof(xfs_sb_t
, sb_agblocks
), 0 },
80 { offsetof(xfs_sb_t
, sb_agcount
), 0 },
81 { offsetof(xfs_sb_t
, sb_rbmblocks
), 0 },
82 { offsetof(xfs_sb_t
, sb_logblocks
), 0 },
83 { offsetof(xfs_sb_t
, sb_versionnum
), 0 },
84 { offsetof(xfs_sb_t
, sb_sectsize
), 0 },
85 { offsetof(xfs_sb_t
, sb_inodesize
), 0 },
86 { offsetof(xfs_sb_t
, sb_inopblock
), 0 },
87 { offsetof(xfs_sb_t
, sb_fname
[0]), 1 },
88 { offsetof(xfs_sb_t
, sb_blocklog
), 0 },
89 { offsetof(xfs_sb_t
, sb_sectlog
), 0 },
90 { offsetof(xfs_sb_t
, sb_inodelog
), 0 },
91 { offsetof(xfs_sb_t
, sb_inopblog
), 0 },
92 { offsetof(xfs_sb_t
, sb_agblklog
), 0 },
93 { offsetof(xfs_sb_t
, sb_rextslog
), 0 },
94 { offsetof(xfs_sb_t
, sb_inprogress
), 0 },
95 { offsetof(xfs_sb_t
, sb_imax_pct
), 0 },
96 { offsetof(xfs_sb_t
, sb_icount
), 0 },
97 { offsetof(xfs_sb_t
, sb_ifree
), 0 },
98 { offsetof(xfs_sb_t
, sb_fdblocks
), 0 },
99 { offsetof(xfs_sb_t
, sb_frextents
), 0 },
100 { offsetof(xfs_sb_t
, sb_uquotino
), 0 },
101 { offsetof(xfs_sb_t
, sb_gquotino
), 0 },
102 { offsetof(xfs_sb_t
, sb_qflags
), 0 },
103 { offsetof(xfs_sb_t
, sb_flags
), 0 },
104 { offsetof(xfs_sb_t
, sb_shared_vn
), 0 },
105 { offsetof(xfs_sb_t
, sb_inoalignmt
), 0 },
106 { offsetof(xfs_sb_t
, sb_unit
), 0 },
107 { offsetof(xfs_sb_t
, sb_width
), 0 },
108 { offsetof(xfs_sb_t
, sb_dirblklog
), 0 },
109 { offsetof(xfs_sb_t
, sb_logsectlog
), 0 },
110 { offsetof(xfs_sb_t
, sb_logsectsize
),0 },
111 { offsetof(xfs_sb_t
, sb_logsunit
), 0 },
112 { offsetof(xfs_sb_t
, sb_features2
), 0 },
113 { offsetof(xfs_sb_t
, sb_bad_features2
), 0 },
114 { sizeof(xfs_sb_t
), 0 }
117 static DEFINE_MUTEX(xfs_uuid_table_mutex
);
118 static int xfs_uuid_table_size
;
119 static uuid_t
*xfs_uuid_table
;
122 * See if the UUID is unique among mounted XFS filesystems.
123 * Mount fails if UUID is nil or a FS with the same UUID is already mounted.
127 struct xfs_mount
*mp
)
129 uuid_t
*uuid
= &mp
->m_sb
.sb_uuid
;
132 if (mp
->m_flags
& XFS_MOUNT_NOUUID
)
135 if (uuid_is_nil(uuid
)) {
137 "XFS: Filesystem %s has nil UUID - can't mount",
139 return XFS_ERROR(EINVAL
);
142 mutex_lock(&xfs_uuid_table_mutex
);
143 for (i
= 0, hole
= -1; i
< xfs_uuid_table_size
; i
++) {
144 if (uuid_is_nil(&xfs_uuid_table
[i
])) {
148 if (uuid_equal(uuid
, &xfs_uuid_table
[i
]))
153 xfs_uuid_table
= kmem_realloc(xfs_uuid_table
,
154 (xfs_uuid_table_size
+ 1) * sizeof(*xfs_uuid_table
),
155 xfs_uuid_table_size
* sizeof(*xfs_uuid_table
),
157 hole
= xfs_uuid_table_size
++;
159 xfs_uuid_table
[hole
] = *uuid
;
160 mutex_unlock(&xfs_uuid_table_mutex
);
165 mutex_unlock(&xfs_uuid_table_mutex
);
166 cmn_err(CE_WARN
, "XFS: Filesystem %s has duplicate UUID - can't mount",
168 return XFS_ERROR(EINVAL
);
173 struct xfs_mount
*mp
)
175 uuid_t
*uuid
= &mp
->m_sb
.sb_uuid
;
178 if (mp
->m_flags
& XFS_MOUNT_NOUUID
)
181 mutex_lock(&xfs_uuid_table_mutex
);
182 for (i
= 0; i
< xfs_uuid_table_size
; i
++) {
183 if (uuid_is_nil(&xfs_uuid_table
[i
]))
185 if (!uuid_equal(uuid
, &xfs_uuid_table
[i
]))
187 memset(&xfs_uuid_table
[i
], 0, sizeof(uuid_t
));
190 ASSERT(i
< xfs_uuid_table_size
);
191 mutex_unlock(&xfs_uuid_table_mutex
);
196 * Reference counting access wrappers to the perag structures.
197 * Because we never free per-ag structures, the only thing we
198 * have to protect against changes is the tree structure itself.
201 xfs_perag_get(struct xfs_mount
*mp
, xfs_agnumber_t agno
)
203 struct xfs_perag
*pag
;
207 pag
= radix_tree_lookup(&mp
->m_perag_tree
, agno
);
209 ASSERT(atomic_read(&pag
->pag_ref
) >= 0);
210 ref
= atomic_inc_return(&pag
->pag_ref
);
213 trace_xfs_perag_get(mp
, agno
, ref
, _RET_IP_
);
218 * search from @first to find the next perag with the given tag set.
222 struct xfs_mount
*mp
,
223 xfs_agnumber_t first
,
226 struct xfs_perag
*pag
;
231 found
= radix_tree_gang_lookup_tag(&mp
->m_perag_tree
,
232 (void **)&pag
, first
, 1, tag
);
237 ref
= atomic_inc_return(&pag
->pag_ref
);
239 trace_xfs_perag_get_tag(mp
, pag
->pag_agno
, ref
, _RET_IP_
);
244 xfs_perag_put(struct xfs_perag
*pag
)
248 ASSERT(atomic_read(&pag
->pag_ref
) > 0);
249 ref
= atomic_dec_return(&pag
->pag_ref
);
250 trace_xfs_perag_put(pag
->pag_mount
, pag
->pag_agno
, ref
, _RET_IP_
);
255 struct rcu_head
*head
)
257 struct xfs_perag
*pag
= container_of(head
, struct xfs_perag
, rcu_head
);
259 ASSERT(atomic_read(&pag
->pag_ref
) == 0);
264 * Free up the per-ag resources associated with the mount structure.
271 struct xfs_perag
*pag
;
273 for (agno
= 0; agno
< mp
->m_sb
.sb_agcount
; agno
++) {
274 spin_lock(&mp
->m_perag_lock
);
275 pag
= radix_tree_delete(&mp
->m_perag_tree
, agno
);
276 spin_unlock(&mp
->m_perag_lock
);
278 call_rcu(&pag
->rcu_head
, __xfs_free_perag
);
283 * Check size of device based on the (data/realtime) block count.
284 * Note: this check is used by the growfs code as well as mount.
287 xfs_sb_validate_fsb_count(
291 ASSERT(PAGE_SHIFT
>= sbp
->sb_blocklog
);
292 ASSERT(sbp
->sb_blocklog
>= BBSHIFT
);
294 #if XFS_BIG_BLKNOS /* Limited by ULONG_MAX of page cache index */
295 if (nblocks
>> (PAGE_CACHE_SHIFT
- sbp
->sb_blocklog
) > ULONG_MAX
)
297 #else /* Limited by UINT_MAX of sectors */
298 if (nblocks
<< (sbp
->sb_blocklog
- BBSHIFT
) > UINT_MAX
)
305 * Check the validity of the SB found.
308 xfs_mount_validate_sb(
314 * If the log device and data device have the
315 * same device number, the log is internal.
316 * Consequently, the sb_logstart should be non-zero. If
317 * we have a zero sb_logstart in this case, we may be trying to mount
318 * a volume filesystem in a non-volume manner.
320 if (sbp
->sb_magicnum
!= XFS_SB_MAGIC
) {
321 xfs_fs_mount_cmn_err(flags
, "bad magic number");
322 return XFS_ERROR(EWRONGFS
);
325 if (!xfs_sb_good_version(sbp
)) {
326 xfs_fs_mount_cmn_err(flags
, "bad version");
327 return XFS_ERROR(EWRONGFS
);
331 sbp
->sb_logstart
== 0 && mp
->m_logdev_targp
== mp
->m_ddev_targp
)) {
332 xfs_fs_mount_cmn_err(flags
,
333 "filesystem is marked as having an external log; "
334 "specify logdev on the\nmount command line.");
335 return XFS_ERROR(EINVAL
);
339 sbp
->sb_logstart
!= 0 && mp
->m_logdev_targp
!= mp
->m_ddev_targp
)) {
340 xfs_fs_mount_cmn_err(flags
,
341 "filesystem is marked as having an internal log; "
342 "do not specify logdev on\nthe mount command line.");
343 return XFS_ERROR(EINVAL
);
347 * More sanity checking. These were stolen directly from
351 sbp
->sb_agcount
<= 0 ||
352 sbp
->sb_sectsize
< XFS_MIN_SECTORSIZE
||
353 sbp
->sb_sectsize
> XFS_MAX_SECTORSIZE
||
354 sbp
->sb_sectlog
< XFS_MIN_SECTORSIZE_LOG
||
355 sbp
->sb_sectlog
> XFS_MAX_SECTORSIZE_LOG
||
356 sbp
->sb_sectsize
!= (1 << sbp
->sb_sectlog
) ||
357 sbp
->sb_blocksize
< XFS_MIN_BLOCKSIZE
||
358 sbp
->sb_blocksize
> XFS_MAX_BLOCKSIZE
||
359 sbp
->sb_blocklog
< XFS_MIN_BLOCKSIZE_LOG
||
360 sbp
->sb_blocklog
> XFS_MAX_BLOCKSIZE_LOG
||
361 sbp
->sb_blocksize
!= (1 << sbp
->sb_blocklog
) ||
362 sbp
->sb_inodesize
< XFS_DINODE_MIN_SIZE
||
363 sbp
->sb_inodesize
> XFS_DINODE_MAX_SIZE
||
364 sbp
->sb_inodelog
< XFS_DINODE_MIN_LOG
||
365 sbp
->sb_inodelog
> XFS_DINODE_MAX_LOG
||
366 sbp
->sb_inodesize
!= (1 << sbp
->sb_inodelog
) ||
367 (sbp
->sb_blocklog
- sbp
->sb_inodelog
!= sbp
->sb_inopblog
) ||
368 (sbp
->sb_rextsize
* sbp
->sb_blocksize
> XFS_MAX_RTEXTSIZE
) ||
369 (sbp
->sb_rextsize
* sbp
->sb_blocksize
< XFS_MIN_RTEXTSIZE
) ||
370 (sbp
->sb_imax_pct
> 100 /* zero sb_imax_pct is valid */))) {
371 xfs_fs_mount_cmn_err(flags
, "SB sanity check 1 failed");
372 return XFS_ERROR(EFSCORRUPTED
);
376 * Sanity check AG count, size fields against data size field
379 sbp
->sb_dblocks
== 0 ||
381 (xfs_drfsbno_t
)sbp
->sb_agcount
* sbp
->sb_agblocks
||
382 sbp
->sb_dblocks
< (xfs_drfsbno_t
)(sbp
->sb_agcount
- 1) *
383 sbp
->sb_agblocks
+ XFS_MIN_AG_BLOCKS
)) {
384 xfs_fs_mount_cmn_err(flags
, "SB sanity check 2 failed");
385 return XFS_ERROR(EFSCORRUPTED
);
389 * Until this is fixed only page-sized or smaller data blocks work.
391 if (unlikely(sbp
->sb_blocksize
> PAGE_SIZE
)) {
392 xfs_fs_mount_cmn_err(flags
,
393 "file system with blocksize %d bytes",
395 xfs_fs_mount_cmn_err(flags
,
396 "only pagesize (%ld) or less will currently work.",
398 return XFS_ERROR(ENOSYS
);
402 * Currently only very few inode sizes are supported.
404 switch (sbp
->sb_inodesize
) {
411 xfs_fs_mount_cmn_err(flags
,
412 "inode size of %d bytes not supported",
414 return XFS_ERROR(ENOSYS
);
417 if (xfs_sb_validate_fsb_count(sbp
, sbp
->sb_dblocks
) ||
418 xfs_sb_validate_fsb_count(sbp
, sbp
->sb_rblocks
)) {
419 xfs_fs_mount_cmn_err(flags
,
420 "file system too large to be mounted on this system.");
421 return XFS_ERROR(EFBIG
);
424 if (unlikely(sbp
->sb_inprogress
)) {
425 xfs_fs_mount_cmn_err(flags
, "file system busy");
426 return XFS_ERROR(EFSCORRUPTED
);
430 * Version 1 directory format has never worked on Linux.
432 if (unlikely(!xfs_sb_version_hasdirv2(sbp
))) {
433 xfs_fs_mount_cmn_err(flags
,
434 "file system using version 1 directory format");
435 return XFS_ERROR(ENOSYS
);
442 xfs_initialize_perag(
444 xfs_agnumber_t agcount
,
445 xfs_agnumber_t
*maxagi
)
447 xfs_agnumber_t index
, max_metadata
;
448 xfs_agnumber_t first_initialised
= 0;
452 xfs_sb_t
*sbp
= &mp
->m_sb
;
456 * Walk the current per-ag tree so we don't try to initialise AGs
457 * that already exist (growfs case). Allocate and insert all the
458 * AGs we don't find ready for initialisation.
460 for (index
= 0; index
< agcount
; index
++) {
461 pag
= xfs_perag_get(mp
, index
);
466 if (!first_initialised
)
467 first_initialised
= index
;
469 pag
= kmem_zalloc(sizeof(*pag
), KM_MAYFAIL
);
472 pag
->pag_agno
= index
;
474 rwlock_init(&pag
->pag_ici_lock
);
475 mutex_init(&pag
->pag_ici_reclaim_lock
);
476 INIT_RADIX_TREE(&pag
->pag_ici_root
, GFP_ATOMIC
);
477 spin_lock_init(&pag
->pag_buf_lock
);
478 pag
->pag_buf_tree
= RB_ROOT
;
480 if (radix_tree_preload(GFP_NOFS
))
483 spin_lock(&mp
->m_perag_lock
);
484 if (radix_tree_insert(&mp
->m_perag_tree
, index
, pag
)) {
486 spin_unlock(&mp
->m_perag_lock
);
487 radix_tree_preload_end();
491 spin_unlock(&mp
->m_perag_lock
);
492 radix_tree_preload_end();
496 * If we mount with the inode64 option, or no inode overflows
497 * the legacy 32-bit address space clear the inode32 option.
499 agino
= XFS_OFFBNO_TO_AGINO(mp
, sbp
->sb_agblocks
- 1, 0);
500 ino
= XFS_AGINO_TO_INO(mp
, agcount
- 1, agino
);
502 if ((mp
->m_flags
& XFS_MOUNT_SMALL_INUMS
) && ino
> XFS_MAXINUMBER_32
)
503 mp
->m_flags
|= XFS_MOUNT_32BITINODES
;
505 mp
->m_flags
&= ~XFS_MOUNT_32BITINODES
;
507 if (mp
->m_flags
& XFS_MOUNT_32BITINODES
) {
509 * Calculate how much should be reserved for inodes to meet
510 * the max inode percentage.
512 if (mp
->m_maxicount
) {
515 icount
= sbp
->sb_dblocks
* sbp
->sb_imax_pct
;
517 icount
+= sbp
->sb_agblocks
- 1;
518 do_div(icount
, sbp
->sb_agblocks
);
519 max_metadata
= icount
;
521 max_metadata
= agcount
;
524 for (index
= 0; index
< agcount
; index
++) {
525 ino
= XFS_AGINO_TO_INO(mp
, index
, agino
);
526 if (ino
> XFS_MAXINUMBER_32
) {
531 pag
= xfs_perag_get(mp
, index
);
532 pag
->pagi_inodeok
= 1;
533 if (index
< max_metadata
)
534 pag
->pagf_metadata
= 1;
538 for (index
= 0; index
< agcount
; index
++) {
539 pag
= xfs_perag_get(mp
, index
);
540 pag
->pagi_inodeok
= 1;
551 for (; index
> first_initialised
; index
--) {
552 pag
= radix_tree_delete(&mp
->m_perag_tree
, index
);
563 to
->sb_magicnum
= be32_to_cpu(from
->sb_magicnum
);
564 to
->sb_blocksize
= be32_to_cpu(from
->sb_blocksize
);
565 to
->sb_dblocks
= be64_to_cpu(from
->sb_dblocks
);
566 to
->sb_rblocks
= be64_to_cpu(from
->sb_rblocks
);
567 to
->sb_rextents
= be64_to_cpu(from
->sb_rextents
);
568 memcpy(&to
->sb_uuid
, &from
->sb_uuid
, sizeof(to
->sb_uuid
));
569 to
->sb_logstart
= be64_to_cpu(from
->sb_logstart
);
570 to
->sb_rootino
= be64_to_cpu(from
->sb_rootino
);
571 to
->sb_rbmino
= be64_to_cpu(from
->sb_rbmino
);
572 to
->sb_rsumino
= be64_to_cpu(from
->sb_rsumino
);
573 to
->sb_rextsize
= be32_to_cpu(from
->sb_rextsize
);
574 to
->sb_agblocks
= be32_to_cpu(from
->sb_agblocks
);
575 to
->sb_agcount
= be32_to_cpu(from
->sb_agcount
);
576 to
->sb_rbmblocks
= be32_to_cpu(from
->sb_rbmblocks
);
577 to
->sb_logblocks
= be32_to_cpu(from
->sb_logblocks
);
578 to
->sb_versionnum
= be16_to_cpu(from
->sb_versionnum
);
579 to
->sb_sectsize
= be16_to_cpu(from
->sb_sectsize
);
580 to
->sb_inodesize
= be16_to_cpu(from
->sb_inodesize
);
581 to
->sb_inopblock
= be16_to_cpu(from
->sb_inopblock
);
582 memcpy(&to
->sb_fname
, &from
->sb_fname
, sizeof(to
->sb_fname
));
583 to
->sb_blocklog
= from
->sb_blocklog
;
584 to
->sb_sectlog
= from
->sb_sectlog
;
585 to
->sb_inodelog
= from
->sb_inodelog
;
586 to
->sb_inopblog
= from
->sb_inopblog
;
587 to
->sb_agblklog
= from
->sb_agblklog
;
588 to
->sb_rextslog
= from
->sb_rextslog
;
589 to
->sb_inprogress
= from
->sb_inprogress
;
590 to
->sb_imax_pct
= from
->sb_imax_pct
;
591 to
->sb_icount
= be64_to_cpu(from
->sb_icount
);
592 to
->sb_ifree
= be64_to_cpu(from
->sb_ifree
);
593 to
->sb_fdblocks
= be64_to_cpu(from
->sb_fdblocks
);
594 to
->sb_frextents
= be64_to_cpu(from
->sb_frextents
);
595 to
->sb_uquotino
= be64_to_cpu(from
->sb_uquotino
);
596 to
->sb_gquotino
= be64_to_cpu(from
->sb_gquotino
);
597 to
->sb_qflags
= be16_to_cpu(from
->sb_qflags
);
598 to
->sb_flags
= from
->sb_flags
;
599 to
->sb_shared_vn
= from
->sb_shared_vn
;
600 to
->sb_inoalignmt
= be32_to_cpu(from
->sb_inoalignmt
);
601 to
->sb_unit
= be32_to_cpu(from
->sb_unit
);
602 to
->sb_width
= be32_to_cpu(from
->sb_width
);
603 to
->sb_dirblklog
= from
->sb_dirblklog
;
604 to
->sb_logsectlog
= from
->sb_logsectlog
;
605 to
->sb_logsectsize
= be16_to_cpu(from
->sb_logsectsize
);
606 to
->sb_logsunit
= be32_to_cpu(from
->sb_logsunit
);
607 to
->sb_features2
= be32_to_cpu(from
->sb_features2
);
608 to
->sb_bad_features2
= be32_to_cpu(from
->sb_bad_features2
);
612 * Copy in core superblock to ondisk one.
614 * The fields argument is mask of superblock fields to copy.
622 xfs_caddr_t to_ptr
= (xfs_caddr_t
)to
;
623 xfs_caddr_t from_ptr
= (xfs_caddr_t
)from
;
633 f
= (xfs_sb_field_t
)xfs_lowbit64((__uint64_t
)fields
);
634 first
= xfs_sb_info
[f
].offset
;
635 size
= xfs_sb_info
[f
+ 1].offset
- first
;
637 ASSERT(xfs_sb_info
[f
].type
== 0 || xfs_sb_info
[f
].type
== 1);
639 if (size
== 1 || xfs_sb_info
[f
].type
== 1) {
640 memcpy(to_ptr
+ first
, from_ptr
+ first
, size
);
644 *(__be16
*)(to_ptr
+ first
) =
645 cpu_to_be16(*(__u16
*)(from_ptr
+ first
));
648 *(__be32
*)(to_ptr
+ first
) =
649 cpu_to_be32(*(__u32
*)(from_ptr
+ first
));
652 *(__be64
*)(to_ptr
+ first
) =
653 cpu_to_be64(*(__u64
*)(from_ptr
+ first
));
660 fields
&= ~(1LL << f
);
667 * Does the initial read of the superblock.
670 xfs_readsb(xfs_mount_t
*mp
, int flags
)
672 unsigned int sector_size
;
676 ASSERT(mp
->m_sb_bp
== NULL
);
677 ASSERT(mp
->m_ddev_targp
!= NULL
);
680 * Allocate a (locked) buffer to hold the superblock.
681 * This will be kept around at all times to optimize
682 * access to the superblock.
684 sector_size
= xfs_getsize_buftarg(mp
->m_ddev_targp
);
687 bp
= xfs_buf_read_uncached(mp
, mp
->m_ddev_targp
,
688 XFS_SB_DADDR
, sector_size
, 0);
690 xfs_fs_mount_cmn_err(flags
, "SB buffer read failed");
695 * Initialize the mount structure from the superblock.
696 * But first do some basic consistency checking.
698 xfs_sb_from_disk(&mp
->m_sb
, XFS_BUF_TO_SBP(bp
));
699 error
= xfs_mount_validate_sb(mp
, &(mp
->m_sb
), flags
);
701 xfs_fs_mount_cmn_err(flags
, "SB validate failed");
706 * We must be able to do sector-sized and sector-aligned IO.
708 if (sector_size
> mp
->m_sb
.sb_sectsize
) {
709 xfs_fs_mount_cmn_err(flags
,
710 "device supports only %u byte sectors (not %u)",
711 sector_size
, mp
->m_sb
.sb_sectsize
);
717 * If device sector size is smaller than the superblock size,
718 * re-read the superblock so the buffer is correctly sized.
720 if (sector_size
< mp
->m_sb
.sb_sectsize
) {
722 sector_size
= mp
->m_sb
.sb_sectsize
;
726 /* Initialize per-cpu counters */
727 xfs_icsb_reinit_counters(mp
);
742 * Mount initialization code establishing various mount
743 * fields from the superblock associated with the given
747 xfs_mount_common(xfs_mount_t
*mp
, xfs_sb_t
*sbp
)
749 mp
->m_agfrotor
= mp
->m_agirotor
= 0;
750 spin_lock_init(&mp
->m_agirotor_lock
);
751 mp
->m_maxagi
= mp
->m_sb
.sb_agcount
;
752 mp
->m_blkbit_log
= sbp
->sb_blocklog
+ XFS_NBBYLOG
;
753 mp
->m_blkbb_log
= sbp
->sb_blocklog
- BBSHIFT
;
754 mp
->m_sectbb_log
= sbp
->sb_sectlog
- BBSHIFT
;
755 mp
->m_agno_log
= xfs_highbit32(sbp
->sb_agcount
- 1) + 1;
756 mp
->m_agino_log
= sbp
->sb_inopblog
+ sbp
->sb_agblklog
;
757 mp
->m_blockmask
= sbp
->sb_blocksize
- 1;
758 mp
->m_blockwsize
= sbp
->sb_blocksize
>> XFS_WORDLOG
;
759 mp
->m_blockwmask
= mp
->m_blockwsize
- 1;
761 mp
->m_alloc_mxr
[0] = xfs_allocbt_maxrecs(mp
, sbp
->sb_blocksize
, 1);
762 mp
->m_alloc_mxr
[1] = xfs_allocbt_maxrecs(mp
, sbp
->sb_blocksize
, 0);
763 mp
->m_alloc_mnr
[0] = mp
->m_alloc_mxr
[0] / 2;
764 mp
->m_alloc_mnr
[1] = mp
->m_alloc_mxr
[1] / 2;
766 mp
->m_inobt_mxr
[0] = xfs_inobt_maxrecs(mp
, sbp
->sb_blocksize
, 1);
767 mp
->m_inobt_mxr
[1] = xfs_inobt_maxrecs(mp
, sbp
->sb_blocksize
, 0);
768 mp
->m_inobt_mnr
[0] = mp
->m_inobt_mxr
[0] / 2;
769 mp
->m_inobt_mnr
[1] = mp
->m_inobt_mxr
[1] / 2;
771 mp
->m_bmap_dmxr
[0] = xfs_bmbt_maxrecs(mp
, sbp
->sb_blocksize
, 1);
772 mp
->m_bmap_dmxr
[1] = xfs_bmbt_maxrecs(mp
, sbp
->sb_blocksize
, 0);
773 mp
->m_bmap_dmnr
[0] = mp
->m_bmap_dmxr
[0] / 2;
774 mp
->m_bmap_dmnr
[1] = mp
->m_bmap_dmxr
[1] / 2;
776 mp
->m_bsize
= XFS_FSB_TO_BB(mp
, 1);
777 mp
->m_ialloc_inos
= (int)MAX((__uint16_t
)XFS_INODES_PER_CHUNK
,
779 mp
->m_ialloc_blks
= mp
->m_ialloc_inos
>> sbp
->sb_inopblog
;
783 * xfs_initialize_perag_data
785 * Read in each per-ag structure so we can count up the number of
786 * allocated inodes, free inodes and used filesystem blocks as this
787 * information is no longer persistent in the superblock. Once we have
788 * this information, write it into the in-core superblock structure.
791 xfs_initialize_perag_data(xfs_mount_t
*mp
, xfs_agnumber_t agcount
)
793 xfs_agnumber_t index
;
795 xfs_sb_t
*sbp
= &mp
->m_sb
;
799 uint64_t bfreelst
= 0;
803 for (index
= 0; index
< agcount
; index
++) {
805 * read the agf, then the agi. This gets us
806 * all the information we need and populates the
807 * per-ag structures for us.
809 error
= xfs_alloc_pagf_init(mp
, NULL
, index
, 0);
813 error
= xfs_ialloc_pagi_init(mp
, NULL
, index
);
816 pag
= xfs_perag_get(mp
, index
);
817 ifree
+= pag
->pagi_freecount
;
818 ialloc
+= pag
->pagi_count
;
819 bfree
+= pag
->pagf_freeblks
;
820 bfreelst
+= pag
->pagf_flcount
;
821 btree
+= pag
->pagf_btreeblks
;
825 * Overwrite incore superblock counters with just-read data
827 spin_lock(&mp
->m_sb_lock
);
828 sbp
->sb_ifree
= ifree
;
829 sbp
->sb_icount
= ialloc
;
830 sbp
->sb_fdblocks
= bfree
+ bfreelst
+ btree
;
831 spin_unlock(&mp
->m_sb_lock
);
833 /* Fixup the per-cpu counters as well. */
834 xfs_icsb_reinit_counters(mp
);
840 * Update alignment values based on mount options and sb values
843 xfs_update_alignment(xfs_mount_t
*mp
)
845 xfs_sb_t
*sbp
= &(mp
->m_sb
);
849 * If stripe unit and stripe width are not multiples
850 * of the fs blocksize turn off alignment.
852 if ((BBTOB(mp
->m_dalign
) & mp
->m_blockmask
) ||
853 (BBTOB(mp
->m_swidth
) & mp
->m_blockmask
)) {
854 if (mp
->m_flags
& XFS_MOUNT_RETERR
) {
856 "XFS: alignment check 1 failed");
857 return XFS_ERROR(EINVAL
);
859 mp
->m_dalign
= mp
->m_swidth
= 0;
862 * Convert the stripe unit and width to FSBs.
864 mp
->m_dalign
= XFS_BB_TO_FSBT(mp
, mp
->m_dalign
);
865 if (mp
->m_dalign
&& (sbp
->sb_agblocks
% mp
->m_dalign
)) {
866 if (mp
->m_flags
& XFS_MOUNT_RETERR
) {
867 return XFS_ERROR(EINVAL
);
869 xfs_fs_cmn_err(CE_WARN
, mp
,
870 "stripe alignment turned off: sunit(%d)/swidth(%d) incompatible with agsize(%d)",
871 mp
->m_dalign
, mp
->m_swidth
,
876 } else if (mp
->m_dalign
) {
877 mp
->m_swidth
= XFS_BB_TO_FSBT(mp
, mp
->m_swidth
);
879 if (mp
->m_flags
& XFS_MOUNT_RETERR
) {
880 xfs_fs_cmn_err(CE_WARN
, mp
,
881 "stripe alignment turned off: sunit(%d) less than bsize(%d)",
884 return XFS_ERROR(EINVAL
);
891 * Update superblock with new values
894 if (xfs_sb_version_hasdalign(sbp
)) {
895 if (sbp
->sb_unit
!= mp
->m_dalign
) {
896 sbp
->sb_unit
= mp
->m_dalign
;
897 mp
->m_update_flags
|= XFS_SB_UNIT
;
899 if (sbp
->sb_width
!= mp
->m_swidth
) {
900 sbp
->sb_width
= mp
->m_swidth
;
901 mp
->m_update_flags
|= XFS_SB_WIDTH
;
904 } else if ((mp
->m_flags
& XFS_MOUNT_NOALIGN
) != XFS_MOUNT_NOALIGN
&&
905 xfs_sb_version_hasdalign(&mp
->m_sb
)) {
906 mp
->m_dalign
= sbp
->sb_unit
;
907 mp
->m_swidth
= sbp
->sb_width
;
914 * Set the maximum inode count for this filesystem
917 xfs_set_maxicount(xfs_mount_t
*mp
)
919 xfs_sb_t
*sbp
= &(mp
->m_sb
);
922 if (sbp
->sb_imax_pct
) {
924 * Make sure the maximum inode count is a multiple
925 * of the units we allocate inodes in.
927 icount
= sbp
->sb_dblocks
* sbp
->sb_imax_pct
;
929 do_div(icount
, mp
->m_ialloc_blks
);
930 mp
->m_maxicount
= (icount
* mp
->m_ialloc_blks
) <<
938 * Set the default minimum read and write sizes unless
939 * already specified in a mount option.
940 * We use smaller I/O sizes when the file system
941 * is being used for NFS service (wsync mount option).
944 xfs_set_rw_sizes(xfs_mount_t
*mp
)
946 xfs_sb_t
*sbp
= &(mp
->m_sb
);
947 int readio_log
, writeio_log
;
949 if (!(mp
->m_flags
& XFS_MOUNT_DFLT_IOSIZE
)) {
950 if (mp
->m_flags
& XFS_MOUNT_WSYNC
) {
951 readio_log
= XFS_WSYNC_READIO_LOG
;
952 writeio_log
= XFS_WSYNC_WRITEIO_LOG
;
954 readio_log
= XFS_READIO_LOG_LARGE
;
955 writeio_log
= XFS_WRITEIO_LOG_LARGE
;
958 readio_log
= mp
->m_readio_log
;
959 writeio_log
= mp
->m_writeio_log
;
962 if (sbp
->sb_blocklog
> readio_log
) {
963 mp
->m_readio_log
= sbp
->sb_blocklog
;
965 mp
->m_readio_log
= readio_log
;
967 mp
->m_readio_blocks
= 1 << (mp
->m_readio_log
- sbp
->sb_blocklog
);
968 if (sbp
->sb_blocklog
> writeio_log
) {
969 mp
->m_writeio_log
= sbp
->sb_blocklog
;
971 mp
->m_writeio_log
= writeio_log
;
973 mp
->m_writeio_blocks
= 1 << (mp
->m_writeio_log
- sbp
->sb_blocklog
);
977 * Set whether we're using inode alignment.
980 xfs_set_inoalignment(xfs_mount_t
*mp
)
982 if (xfs_sb_version_hasalign(&mp
->m_sb
) &&
983 mp
->m_sb
.sb_inoalignmt
>=
984 XFS_B_TO_FSBT(mp
, mp
->m_inode_cluster_size
))
985 mp
->m_inoalign_mask
= mp
->m_sb
.sb_inoalignmt
- 1;
987 mp
->m_inoalign_mask
= 0;
989 * If we are using stripe alignment, check whether
990 * the stripe unit is a multiple of the inode alignment
992 if (mp
->m_dalign
&& mp
->m_inoalign_mask
&&
993 !(mp
->m_dalign
& mp
->m_inoalign_mask
))
994 mp
->m_sinoalign
= mp
->m_dalign
;
1000 * Check that the data (and log if separate) are an ok size.
1003 xfs_check_sizes(xfs_mount_t
*mp
)
1008 d
= (xfs_daddr_t
)XFS_FSB_TO_BB(mp
, mp
->m_sb
.sb_dblocks
);
1009 if (XFS_BB_TO_FSB(mp
, d
) != mp
->m_sb
.sb_dblocks
) {
1010 cmn_err(CE_WARN
, "XFS: filesystem size mismatch detected");
1011 return XFS_ERROR(EFBIG
);
1013 bp
= xfs_buf_read_uncached(mp
, mp
->m_ddev_targp
,
1014 d
- XFS_FSS_TO_BB(mp
, 1),
1015 BBTOB(XFS_FSS_TO_BB(mp
, 1)), 0);
1017 cmn_err(CE_WARN
, "XFS: last sector read failed");
1022 if (mp
->m_logdev_targp
!= mp
->m_ddev_targp
) {
1023 d
= (xfs_daddr_t
)XFS_FSB_TO_BB(mp
, mp
->m_sb
.sb_logblocks
);
1024 if (XFS_BB_TO_FSB(mp
, d
) != mp
->m_sb
.sb_logblocks
) {
1025 cmn_err(CE_WARN
, "XFS: log size mismatch detected");
1026 return XFS_ERROR(EFBIG
);
1028 bp
= xfs_buf_read_uncached(mp
, mp
->m_logdev_targp
,
1029 d
- XFS_FSB_TO_BB(mp
, 1),
1030 XFS_FSB_TO_B(mp
, 1), 0);
1032 cmn_err(CE_WARN
, "XFS: log device read failed");
1041 * Clear the quotaflags in memory and in the superblock.
1044 xfs_mount_reset_sbqflags(
1045 struct xfs_mount
*mp
)
1048 struct xfs_trans
*tp
;
1053 * It is OK to look at sb_qflags here in mount path,
1054 * without m_sb_lock.
1056 if (mp
->m_sb
.sb_qflags
== 0)
1058 spin_lock(&mp
->m_sb_lock
);
1059 mp
->m_sb
.sb_qflags
= 0;
1060 spin_unlock(&mp
->m_sb_lock
);
1063 * If the fs is readonly, let the incore superblock run
1064 * with quotas off but don't flush the update out to disk
1066 if (mp
->m_flags
& XFS_MOUNT_RDONLY
)
1070 xfs_fs_cmn_err(CE_NOTE
, mp
, "Writing superblock quota changes");
1073 tp
= xfs_trans_alloc(mp
, XFS_TRANS_QM_SBCHANGE
);
1074 error
= xfs_trans_reserve(tp
, 0, mp
->m_sb
.sb_sectsize
+ 128, 0, 0,
1075 XFS_DEFAULT_LOG_COUNT
);
1077 xfs_trans_cancel(tp
, 0);
1078 xfs_fs_cmn_err(CE_ALERT
, mp
,
1079 "xfs_mount_reset_sbqflags: Superblock update failed!");
1083 xfs_mod_sb(tp
, XFS_SB_QFLAGS
);
1084 return xfs_trans_commit(tp
, 0);
1088 xfs_default_resblks(xfs_mount_t
*mp
)
1093 * We default to 5% or 8192 fsbs of space reserved, whichever is
1094 * smaller. This is intended to cover concurrent allocation
1095 * transactions when we initially hit enospc. These each require a 4
1096 * block reservation. Hence by default we cover roughly 2000 concurrent
1097 * allocation reservations.
1099 resblks
= mp
->m_sb
.sb_dblocks
;
1100 do_div(resblks
, 20);
1101 resblks
= min_t(__uint64_t
, resblks
, 8192);
1106 * This function does the following on an initial mount of a file system:
1107 * - reads the superblock from disk and init the mount struct
1108 * - if we're a 32-bit kernel, do a size check on the superblock
1109 * so we don't mount terabyte filesystems
1110 * - init mount struct realtime fields
1111 * - allocate inode hash table for fs
1112 * - init directory manager
1113 * - perform recovery and init the log manager
1119 xfs_sb_t
*sbp
= &(mp
->m_sb
);
1122 uint quotamount
= 0;
1123 uint quotaflags
= 0;
1126 xfs_mount_common(mp
, sbp
);
1129 * Check for a mismatched features2 values. Older kernels
1130 * read & wrote into the wrong sb offset for sb_features2
1131 * on some platforms due to xfs_sb_t not being 64bit size aligned
1132 * when sb_features2 was added, which made older superblock
1133 * reading/writing routines swap it as a 64-bit value.
1135 * For backwards compatibility, we make both slots equal.
1137 * If we detect a mismatched field, we OR the set bits into the
1138 * existing features2 field in case it has already been modified; we
1139 * don't want to lose any features. We then update the bad location
1140 * with the ORed value so that older kernels will see any features2
1141 * flags, and mark the two fields as needing updates once the
1142 * transaction subsystem is online.
1144 if (xfs_sb_has_mismatched_features2(sbp
)) {
1146 "XFS: correcting sb_features alignment problem");
1147 sbp
->sb_features2
|= sbp
->sb_bad_features2
;
1148 sbp
->sb_bad_features2
= sbp
->sb_features2
;
1149 mp
->m_update_flags
|= XFS_SB_FEATURES2
| XFS_SB_BAD_FEATURES2
;
1152 * Re-check for ATTR2 in case it was found in bad_features2
1155 if (xfs_sb_version_hasattr2(&mp
->m_sb
) &&
1156 !(mp
->m_flags
& XFS_MOUNT_NOATTR2
))
1157 mp
->m_flags
|= XFS_MOUNT_ATTR2
;
1160 if (xfs_sb_version_hasattr2(&mp
->m_sb
) &&
1161 (mp
->m_flags
& XFS_MOUNT_NOATTR2
)) {
1162 xfs_sb_version_removeattr2(&mp
->m_sb
);
1163 mp
->m_update_flags
|= XFS_SB_FEATURES2
;
1165 /* update sb_versionnum for the clearing of the morebits */
1166 if (!sbp
->sb_features2
)
1167 mp
->m_update_flags
|= XFS_SB_VERSIONNUM
;
1171 * Check if sb_agblocks is aligned at stripe boundary
1172 * If sb_agblocks is NOT aligned turn off m_dalign since
1173 * allocator alignment is within an ag, therefore ag has
1174 * to be aligned at stripe boundary.
1176 error
= xfs_update_alignment(mp
);
1180 xfs_alloc_compute_maxlevels(mp
);
1181 xfs_bmap_compute_maxlevels(mp
, XFS_DATA_FORK
);
1182 xfs_bmap_compute_maxlevels(mp
, XFS_ATTR_FORK
);
1183 xfs_ialloc_compute_maxlevels(mp
);
1185 xfs_set_maxicount(mp
);
1187 mp
->m_maxioffset
= xfs_max_file_offset(sbp
->sb_blocklog
);
1189 error
= xfs_uuid_mount(mp
);
1194 * Set the minimum read and write sizes
1196 xfs_set_rw_sizes(mp
);
1199 * Set the inode cluster size.
1200 * This may still be overridden by the file system
1201 * block size if it is larger than the chosen cluster size.
1203 mp
->m_inode_cluster_size
= XFS_INODE_BIG_CLUSTER_SIZE
;
1206 * Set inode alignment fields
1208 xfs_set_inoalignment(mp
);
1211 * Check that the data (and log if separate) are an ok size.
1213 error
= xfs_check_sizes(mp
);
1215 goto out_remove_uuid
;
1218 * Initialize realtime fields in the mount structure
1220 error
= xfs_rtmount_init(mp
);
1222 cmn_err(CE_WARN
, "XFS: RT mount failed");
1223 goto out_remove_uuid
;
1227 * Copies the low order bits of the timestamp and the randomly
1228 * set "sequence" number out of a UUID.
1230 uuid_getnodeuniq(&sbp
->sb_uuid
, mp
->m_fixedfsid
);
1232 mp
->m_dmevmask
= 0; /* not persistent; set after each mount */
1237 * Initialize the attribute manager's entries.
1239 mp
->m_attr_magicpct
= (mp
->m_sb
.sb_blocksize
* 37) / 100;
1242 * Initialize the precomputed transaction reservations values.
1247 * Allocate and initialize the per-ag data.
1249 spin_lock_init(&mp
->m_perag_lock
);
1250 INIT_RADIX_TREE(&mp
->m_perag_tree
, GFP_ATOMIC
);
1251 error
= xfs_initialize_perag(mp
, sbp
->sb_agcount
, &mp
->m_maxagi
);
1253 cmn_err(CE_WARN
, "XFS: Failed per-ag init: %d", error
);
1254 goto out_remove_uuid
;
1257 if (!sbp
->sb_logblocks
) {
1258 cmn_err(CE_WARN
, "XFS: no log defined");
1259 XFS_ERROR_REPORT("xfs_mountfs", XFS_ERRLEVEL_LOW
, mp
);
1260 error
= XFS_ERROR(EFSCORRUPTED
);
1261 goto out_free_perag
;
1265 * log's mount-time initialization. Perform 1st part recovery if needed
1267 error
= xfs_log_mount(mp
, mp
->m_logdev_targp
,
1268 XFS_FSB_TO_DADDR(mp
, sbp
->sb_logstart
),
1269 XFS_FSB_TO_BB(mp
, sbp
->sb_logblocks
));
1271 cmn_err(CE_WARN
, "XFS: log mount failed");
1272 goto out_free_perag
;
1276 * Now the log is mounted, we know if it was an unclean shutdown or
1277 * not. If it was, with the first phase of recovery has completed, we
1278 * have consistent AG blocks on disk. We have not recovered EFIs yet,
1279 * but they are recovered transactionally in the second recovery phase
1282 * Hence we can safely re-initialise incore superblock counters from
1283 * the per-ag data. These may not be correct if the filesystem was not
1284 * cleanly unmounted, so we need to wait for recovery to finish before
1287 * If the filesystem was cleanly unmounted, then we can trust the
1288 * values in the superblock to be correct and we don't need to do
1291 * If we are currently making the filesystem, the initialisation will
1292 * fail as the perag data is in an undefined state.
1294 if (xfs_sb_version_haslazysbcount(&mp
->m_sb
) &&
1295 !XFS_LAST_UNMOUNT_WAS_CLEAN(mp
) &&
1296 !mp
->m_sb
.sb_inprogress
) {
1297 error
= xfs_initialize_perag_data(mp
, sbp
->sb_agcount
);
1299 goto out_free_perag
;
1303 * Get and sanity-check the root inode.
1304 * Save the pointer to it in the mount structure.
1306 error
= xfs_iget(mp
, NULL
, sbp
->sb_rootino
, 0, XFS_ILOCK_EXCL
, &rip
);
1308 cmn_err(CE_WARN
, "XFS: failed to read root inode");
1309 goto out_log_dealloc
;
1312 ASSERT(rip
!= NULL
);
1314 if (unlikely((rip
->i_d
.di_mode
& S_IFMT
) != S_IFDIR
)) {
1315 cmn_err(CE_WARN
, "XFS: corrupted root inode");
1316 cmn_err(CE_WARN
, "Device %s - root %llu is not a directory",
1317 XFS_BUFTARG_NAME(mp
->m_ddev_targp
),
1318 (unsigned long long)rip
->i_ino
);
1319 xfs_iunlock(rip
, XFS_ILOCK_EXCL
);
1320 XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW
,
1322 error
= XFS_ERROR(EFSCORRUPTED
);
1325 mp
->m_rootip
= rip
; /* save it */
1327 xfs_iunlock(rip
, XFS_ILOCK_EXCL
);
1330 * Initialize realtime inode pointers in the mount structure
1332 error
= xfs_rtmount_inodes(mp
);
1335 * Free up the root inode.
1337 cmn_err(CE_WARN
, "XFS: failed to read RT inodes");
1342 * If this is a read-only mount defer the superblock updates until
1343 * the next remount into writeable mode. Otherwise we would never
1344 * perform the update e.g. for the root filesystem.
1346 if (mp
->m_update_flags
&& !(mp
->m_flags
& XFS_MOUNT_RDONLY
)) {
1347 error
= xfs_mount_log_sb(mp
, mp
->m_update_flags
);
1349 cmn_err(CE_WARN
, "XFS: failed to write sb changes");
1355 * Initialise the XFS quota management subsystem for this mount
1357 if (XFS_IS_QUOTA_RUNNING(mp
)) {
1358 error
= xfs_qm_newmount(mp
, "amount
, "aflags
);
1362 ASSERT(!XFS_IS_QUOTA_ON(mp
));
1365 * If a file system had quotas running earlier, but decided to
1366 * mount without -o uquota/pquota/gquota options, revoke the
1367 * quotachecked license.
1369 if (mp
->m_sb
.sb_qflags
& XFS_ALL_QUOTA_ACCT
) {
1371 "XFS: resetting qflags for filesystem %s",
1374 error
= xfs_mount_reset_sbqflags(mp
);
1381 * Finish recovering the file system. This part needed to be
1382 * delayed until after the root and real-time bitmap inodes
1383 * were consistently read in.
1385 error
= xfs_log_mount_finish(mp
);
1387 cmn_err(CE_WARN
, "XFS: log mount finish failed");
1392 * Complete the quota initialisation, post-log-replay component.
1395 ASSERT(mp
->m_qflags
== 0);
1396 mp
->m_qflags
= quotaflags
;
1398 xfs_qm_mount_quotas(mp
);
1402 * Now we are mounted, reserve a small amount of unused space for
1403 * privileged transactions. This is needed so that transaction
1404 * space required for critical operations can dip into this pool
1405 * when at ENOSPC. This is needed for operations like create with
1406 * attr, unwritten extent conversion at ENOSPC, etc. Data allocations
1407 * are not allowed to use this reserved space.
1409 * This may drive us straight to ENOSPC on mount, but that implies
1410 * we were already there on the last unmount. Warn if this occurs.
1412 if (!(mp
->m_flags
& XFS_MOUNT_RDONLY
)) {
1413 resblks
= xfs_default_resblks(mp
);
1414 error
= xfs_reserve_blocks(mp
, &resblks
, NULL
);
1416 cmn_err(CE_WARN
, "XFS: Unable to allocate reserve "
1417 "blocks. Continuing without a reserve pool.");
1423 xfs_rtunmount_inodes(mp
);
1427 xfs_log_unmount(mp
);
1431 xfs_uuid_unmount(mp
);
1437 * This flushes out the inodes,dquots and the superblock, unmounts the
1438 * log and makes sure that incore structures are freed.
1442 struct xfs_mount
*mp
)
1447 xfs_qm_unmount_quotas(mp
);
1448 xfs_rtunmount_inodes(mp
);
1449 IRELE(mp
->m_rootip
);
1452 * We can potentially deadlock here if we have an inode cluster
1453 * that has been freed has its buffer still pinned in memory because
1454 * the transaction is still sitting in a iclog. The stale inodes
1455 * on that buffer will have their flush locks held until the
1456 * transaction hits the disk and the callbacks run. the inode
1457 * flush takes the flush lock unconditionally and with nothing to
1458 * push out the iclog we will never get that unlocked. hence we
1459 * need to force the log first.
1461 xfs_log_force(mp
, XFS_LOG_SYNC
);
1464 * Do a delwri reclaim pass first so that as many dirty inodes are
1465 * queued up for IO as possible. Then flush the buffers before making
1466 * a synchronous path to catch all the remaining inodes are reclaimed.
1467 * This makes the reclaim process as quick as possible by avoiding
1468 * synchronous writeout and blocking on inodes already in the delwri
1469 * state as much as possible.
1471 xfs_reclaim_inodes(mp
, 0);
1472 XFS_bflush(mp
->m_ddev_targp
);
1473 xfs_reclaim_inodes(mp
, SYNC_WAIT
);
1478 * Flush out the log synchronously so that we know for sure
1479 * that nothing is pinned. This is important because bflush()
1480 * will skip pinned buffers.
1482 xfs_log_force(mp
, XFS_LOG_SYNC
);
1484 xfs_binval(mp
->m_ddev_targp
);
1485 if (mp
->m_rtdev_targp
) {
1486 xfs_binval(mp
->m_rtdev_targp
);
1490 * Unreserve any blocks we have so that when we unmount we don't account
1491 * the reserved free space as used. This is really only necessary for
1492 * lazy superblock counting because it trusts the incore superblock
1493 * counters to be absolutely correct on clean unmount.
1495 * We don't bother correcting this elsewhere for lazy superblock
1496 * counting because on mount of an unclean filesystem we reconstruct the
1497 * correct counter value and this is irrelevant.
1499 * For non-lazy counter filesystems, this doesn't matter at all because
1500 * we only every apply deltas to the superblock and hence the incore
1501 * value does not matter....
1504 error
= xfs_reserve_blocks(mp
, &resblks
, NULL
);
1506 cmn_err(CE_WARN
, "XFS: Unable to free reserved block pool. "
1507 "Freespace may not be correct on next mount.");
1509 error
= xfs_log_sbcount(mp
, 1);
1511 cmn_err(CE_WARN
, "XFS: Unable to update superblock counters. "
1512 "Freespace may not be correct on next mount.");
1513 xfs_unmountfs_writesb(mp
);
1514 xfs_unmountfs_wait(mp
); /* wait for async bufs */
1515 xfs_log_unmount_write(mp
);
1516 xfs_log_unmount(mp
);
1517 xfs_uuid_unmount(mp
);
1520 xfs_errortag_clearall(mp
, 0);
1526 xfs_unmountfs_wait(xfs_mount_t
*mp
)
1528 if (mp
->m_logdev_targp
!= mp
->m_ddev_targp
)
1529 xfs_wait_buftarg(mp
->m_logdev_targp
);
1530 if (mp
->m_rtdev_targp
)
1531 xfs_wait_buftarg(mp
->m_rtdev_targp
);
1532 xfs_wait_buftarg(mp
->m_ddev_targp
);
1536 xfs_fs_writable(xfs_mount_t
*mp
)
1538 return !(xfs_test_for_freeze(mp
) || XFS_FORCED_SHUTDOWN(mp
) ||
1539 (mp
->m_flags
& XFS_MOUNT_RDONLY
));
1545 * Called either periodically to keep the on disk superblock values
1546 * roughly up to date or from unmount to make sure the values are
1547 * correct on a clean unmount.
1549 * Note this code can be called during the process of freezing, so
1550 * we may need to use the transaction allocator which does not not
1551 * block when the transaction subsystem is in its frozen state.
1561 if (!xfs_fs_writable(mp
))
1564 xfs_icsb_sync_counters(mp
, 0);
1567 * we don't need to do this if we are updating the superblock
1568 * counters on every modification.
1570 if (!xfs_sb_version_haslazysbcount(&mp
->m_sb
))
1573 tp
= _xfs_trans_alloc(mp
, XFS_TRANS_SB_COUNT
, KM_SLEEP
);
1574 error
= xfs_trans_reserve(tp
, 0, mp
->m_sb
.sb_sectsize
+ 128, 0, 0,
1575 XFS_DEFAULT_LOG_COUNT
);
1577 xfs_trans_cancel(tp
, 0);
1581 xfs_mod_sb(tp
, XFS_SB_IFREE
| XFS_SB_ICOUNT
| XFS_SB_FDBLOCKS
);
1583 xfs_trans_set_sync(tp
);
1584 error
= xfs_trans_commit(tp
, 0);
1589 xfs_unmountfs_writesb(xfs_mount_t
*mp
)
1595 * skip superblock write if fs is read-only, or
1596 * if we are doing a forced umount.
1598 if (!((mp
->m_flags
& XFS_MOUNT_RDONLY
) ||
1599 XFS_FORCED_SHUTDOWN(mp
))) {
1601 sbp
= xfs_getsb(mp
, 0);
1603 XFS_BUF_UNDONE(sbp
);
1604 XFS_BUF_UNREAD(sbp
);
1605 XFS_BUF_UNDELAYWRITE(sbp
);
1607 XFS_BUF_UNASYNC(sbp
);
1608 ASSERT(XFS_BUF_TARGET(sbp
) == mp
->m_ddev_targp
);
1609 xfsbdstrat(mp
, sbp
);
1610 error
= xfs_buf_iowait(sbp
);
1612 xfs_ioerror_alert("xfs_unmountfs_writesb",
1613 mp
, sbp
, XFS_BUF_ADDR(sbp
));
1620 * xfs_mod_sb() can be used to copy arbitrary changes to the
1621 * in-core superblock into the superblock buffer to be logged.
1622 * It does not provide the higher level of locking that is
1623 * needed to protect the in-core superblock from concurrent
1627 xfs_mod_sb(xfs_trans_t
*tp
, __int64_t fields
)
1639 bp
= xfs_trans_getsb(tp
, mp
, 0);
1640 first
= sizeof(xfs_sb_t
);
1643 /* translate/copy */
1645 xfs_sb_to_disk(XFS_BUF_TO_SBP(bp
), &mp
->m_sb
, fields
);
1647 /* find modified range */
1648 f
= (xfs_sb_field_t
)xfs_highbit64((__uint64_t
)fields
);
1649 ASSERT((1LL << f
) & XFS_SB_MOD_BITS
);
1650 last
= xfs_sb_info
[f
+ 1].offset
- 1;
1652 f
= (xfs_sb_field_t
)xfs_lowbit64((__uint64_t
)fields
);
1653 ASSERT((1LL << f
) & XFS_SB_MOD_BITS
);
1654 first
= xfs_sb_info
[f
].offset
;
1656 xfs_trans_log_buf(tp
, bp
, first
, last
);
1661 * xfs_mod_incore_sb_unlocked() is a utility routine common used to apply
1662 * a delta to a specified field in the in-core superblock. Simply
1663 * switch on the field indicated and apply the delta to that field.
1664 * Fields are not allowed to dip below zero, so if the delta would
1665 * do this do not apply it and return EINVAL.
1667 * The m_sb_lock must be held when this routine is called.
1670 xfs_mod_incore_sb_unlocked(
1672 xfs_sb_field_t field
,
1676 int scounter
; /* short counter for 32 bit fields */
1677 long long lcounter
; /* long counter for 64 bit fields */
1678 long long res_used
, rem
;
1681 * With the in-core superblock spin lock held, switch
1682 * on the indicated field. Apply the delta to the
1683 * proper field. If the fields value would dip below
1684 * 0, then do not apply the delta and return EINVAL.
1687 case XFS_SBS_ICOUNT
:
1688 lcounter
= (long long)mp
->m_sb
.sb_icount
;
1692 return XFS_ERROR(EINVAL
);
1694 mp
->m_sb
.sb_icount
= lcounter
;
1697 lcounter
= (long long)mp
->m_sb
.sb_ifree
;
1701 return XFS_ERROR(EINVAL
);
1703 mp
->m_sb
.sb_ifree
= lcounter
;
1705 case XFS_SBS_FDBLOCKS
:
1706 lcounter
= (long long)
1707 mp
->m_sb
.sb_fdblocks
- XFS_ALLOC_SET_ASIDE(mp
);
1708 res_used
= (long long)(mp
->m_resblks
- mp
->m_resblks_avail
);
1710 if (delta
> 0) { /* Putting blocks back */
1711 if (res_used
> delta
) {
1712 mp
->m_resblks_avail
+= delta
;
1714 rem
= delta
- res_used
;
1715 mp
->m_resblks_avail
= mp
->m_resblks
;
1718 } else { /* Taking blocks away */
1720 if (lcounter
>= 0) {
1721 mp
->m_sb
.sb_fdblocks
= lcounter
+
1722 XFS_ALLOC_SET_ASIDE(mp
);
1727 * We are out of blocks, use any available reserved
1728 * blocks if were allowed to.
1731 return XFS_ERROR(ENOSPC
);
1733 lcounter
= (long long)mp
->m_resblks_avail
+ delta
;
1734 if (lcounter
>= 0) {
1735 mp
->m_resblks_avail
= lcounter
;
1738 printk_once(KERN_WARNING
1739 "Filesystem \"%s\": reserve blocks depleted! "
1740 "Consider increasing reserve pool size.",
1742 return XFS_ERROR(ENOSPC
);
1745 mp
->m_sb
.sb_fdblocks
= lcounter
+ XFS_ALLOC_SET_ASIDE(mp
);
1747 case XFS_SBS_FREXTENTS
:
1748 lcounter
= (long long)mp
->m_sb
.sb_frextents
;
1751 return XFS_ERROR(ENOSPC
);
1753 mp
->m_sb
.sb_frextents
= lcounter
;
1755 case XFS_SBS_DBLOCKS
:
1756 lcounter
= (long long)mp
->m_sb
.sb_dblocks
;
1760 return XFS_ERROR(EINVAL
);
1762 mp
->m_sb
.sb_dblocks
= lcounter
;
1764 case XFS_SBS_AGCOUNT
:
1765 scounter
= mp
->m_sb
.sb_agcount
;
1769 return XFS_ERROR(EINVAL
);
1771 mp
->m_sb
.sb_agcount
= scounter
;
1773 case XFS_SBS_IMAX_PCT
:
1774 scounter
= mp
->m_sb
.sb_imax_pct
;
1778 return XFS_ERROR(EINVAL
);
1780 mp
->m_sb
.sb_imax_pct
= scounter
;
1782 case XFS_SBS_REXTSIZE
:
1783 scounter
= mp
->m_sb
.sb_rextsize
;
1787 return XFS_ERROR(EINVAL
);
1789 mp
->m_sb
.sb_rextsize
= scounter
;
1791 case XFS_SBS_RBMBLOCKS
:
1792 scounter
= mp
->m_sb
.sb_rbmblocks
;
1796 return XFS_ERROR(EINVAL
);
1798 mp
->m_sb
.sb_rbmblocks
= scounter
;
1800 case XFS_SBS_RBLOCKS
:
1801 lcounter
= (long long)mp
->m_sb
.sb_rblocks
;
1805 return XFS_ERROR(EINVAL
);
1807 mp
->m_sb
.sb_rblocks
= lcounter
;
1809 case XFS_SBS_REXTENTS
:
1810 lcounter
= (long long)mp
->m_sb
.sb_rextents
;
1814 return XFS_ERROR(EINVAL
);
1816 mp
->m_sb
.sb_rextents
= lcounter
;
1818 case XFS_SBS_REXTSLOG
:
1819 scounter
= mp
->m_sb
.sb_rextslog
;
1823 return XFS_ERROR(EINVAL
);
1825 mp
->m_sb
.sb_rextslog
= scounter
;
1829 return XFS_ERROR(EINVAL
);
1834 * xfs_mod_incore_sb() is used to change a field in the in-core
1835 * superblock structure by the specified delta. This modification
1836 * is protected by the m_sb_lock. Just use the xfs_mod_incore_sb_unlocked()
1837 * routine to do the work.
1841 struct xfs_mount
*mp
,
1842 xfs_sb_field_t field
,
1848 #ifdef HAVE_PERCPU_SB
1849 ASSERT(field
< XFS_SBS_ICOUNT
|| field
> XFS_SBS_FDBLOCKS
);
1851 spin_lock(&mp
->m_sb_lock
);
1852 status
= xfs_mod_incore_sb_unlocked(mp
, field
, delta
, rsvd
);
1853 spin_unlock(&mp
->m_sb_lock
);
1859 * Change more than one field in the in-core superblock structure at a time.
1861 * The fields and changes to those fields are specified in the array of
1862 * xfs_mod_sb structures passed in. Either all of the specified deltas
1863 * will be applied or none of them will. If any modified field dips below 0,
1864 * then all modifications will be backed out and EINVAL will be returned.
1866 * Note that this function may not be used for the superblock values that
1867 * are tracked with the in-memory per-cpu counters - a direct call to
1868 * xfs_icsb_modify_counters is required for these.
1871 xfs_mod_incore_sb_batch(
1872 struct xfs_mount
*mp
,
1877 xfs_mod_sb_t
*msbp
= &msb
[0];
1881 * Loop through the array of mod structures and apply each individually.
1882 * If any fail, then back out all those which have already been applied.
1883 * Do all of this within the scope of the m_sb_lock so that all of the
1884 * changes will be atomic.
1886 spin_lock(&mp
->m_sb_lock
);
1887 for (msbp
= &msbp
[0]; msbp
< (msb
+ nmsb
); msbp
++) {
1888 ASSERT(msbp
->msb_field
< XFS_SBS_ICOUNT
||
1889 msbp
->msb_field
> XFS_SBS_FDBLOCKS
);
1891 error
= xfs_mod_incore_sb_unlocked(mp
, msbp
->msb_field
,
1892 msbp
->msb_delta
, rsvd
);
1896 spin_unlock(&mp
->m_sb_lock
);
1900 while (--msbp
>= msb
) {
1901 error
= xfs_mod_incore_sb_unlocked(mp
, msbp
->msb_field
,
1902 -msbp
->msb_delta
, rsvd
);
1905 spin_unlock(&mp
->m_sb_lock
);
1910 * xfs_getsb() is called to obtain the buffer for the superblock.
1911 * The buffer is returned locked and read in from disk.
1912 * The buffer should be released with a call to xfs_brelse().
1914 * If the flags parameter is BUF_TRYLOCK, then we'll only return
1915 * the superblock buffer if it can be locked without sleeping.
1916 * If it can't then we'll return NULL.
1925 ASSERT(mp
->m_sb_bp
!= NULL
);
1927 if (flags
& XBF_TRYLOCK
) {
1928 if (!XFS_BUF_CPSEMA(bp
)) {
1932 XFS_BUF_PSEMA(bp
, PRIBIO
);
1935 ASSERT(XFS_BUF_ISDONE(bp
));
1940 * Used to free the superblock along various error paths.
1944 struct xfs_mount
*mp
)
1946 struct xfs_buf
*bp
= mp
->m_sb_bp
;
1954 * Used to log changes to the superblock unit and width fields which could
1955 * be altered by the mount options, as well as any potential sb_features2
1956 * fixup. Only the first superblock is updated.
1966 ASSERT(fields
& (XFS_SB_UNIT
| XFS_SB_WIDTH
| XFS_SB_UUID
|
1967 XFS_SB_FEATURES2
| XFS_SB_BAD_FEATURES2
|
1968 XFS_SB_VERSIONNUM
));
1970 tp
= xfs_trans_alloc(mp
, XFS_TRANS_SB_UNIT
);
1971 error
= xfs_trans_reserve(tp
, 0, mp
->m_sb
.sb_sectsize
+ 128, 0, 0,
1972 XFS_DEFAULT_LOG_COUNT
);
1974 xfs_trans_cancel(tp
, 0);
1977 xfs_mod_sb(tp
, fields
);
1978 error
= xfs_trans_commit(tp
, 0);
1983 * If the underlying (data/log/rt) device is readonly, there are some
1984 * operations that cannot proceed.
1987 xfs_dev_is_read_only(
1988 struct xfs_mount
*mp
,
1991 if (xfs_readonly_buftarg(mp
->m_ddev_targp
) ||
1992 xfs_readonly_buftarg(mp
->m_logdev_targp
) ||
1993 (mp
->m_rtdev_targp
&& xfs_readonly_buftarg(mp
->m_rtdev_targp
))) {
1995 "XFS: %s required on read-only device.", message
);
1997 "XFS: write access unavailable, cannot proceed.");
2003 #ifdef HAVE_PERCPU_SB
2005 * Per-cpu incore superblock counters
2007 * Simple concept, difficult implementation
2009 * Basically, replace the incore superblock counters with a distributed per cpu
2010 * counter for contended fields (e.g. free block count).
2012 * Difficulties arise in that the incore sb is used for ENOSPC checking, and
2013 * hence needs to be accurately read when we are running low on space. Hence
2014 * there is a method to enable and disable the per-cpu counters based on how
2015 * much "stuff" is available in them.
2017 * Basically, a counter is enabled if there is enough free resource to justify
2018 * running a per-cpu fast-path. If the per-cpu counter runs out (i.e. a local
2019 * ENOSPC), then we disable the counters to synchronise all callers and
2020 * re-distribute the available resources.
2022 * If, once we redistributed the available resources, we still get a failure,
2023 * we disable the per-cpu counter and go through the slow path.
2025 * The slow path is the current xfs_mod_incore_sb() function. This means that
2026 * when we disable a per-cpu counter, we need to drain its resources back to
2027 * the global superblock. We do this after disabling the counter to prevent
2028 * more threads from queueing up on the counter.
2030 * Essentially, this means that we still need a lock in the fast path to enable
2031 * synchronisation between the global counters and the per-cpu counters. This
2032 * is not a problem because the lock will be local to a CPU almost all the time
2033 * and have little contention except when we get to ENOSPC conditions.
2035 * Basically, this lock becomes a barrier that enables us to lock out the fast
2036 * path while we do things like enabling and disabling counters and
2037 * synchronising the counters.
2041 * 1. m_sb_lock before picking up per-cpu locks
2042 * 2. per-cpu locks always picked up via for_each_online_cpu() order
2043 * 3. accurate counter sync requires m_sb_lock + per cpu locks
2044 * 4. modifying per-cpu counters requires holding per-cpu lock
2045 * 5. modifying global counters requires holding m_sb_lock
2046 * 6. enabling or disabling a counter requires holding the m_sb_lock
2047 * and _none_ of the per-cpu locks.
2049 * Disabled counters are only ever re-enabled by a balance operation
2050 * that results in more free resources per CPU than a given threshold.
2051 * To ensure counters don't remain disabled, they are rebalanced when
2052 * the global resource goes above a higher threshold (i.e. some hysteresis
2053 * is present to prevent thrashing).
2056 #ifdef CONFIG_HOTPLUG_CPU
2058 * hot-plug CPU notifier support.
2060 * We need a notifier per filesystem as we need to be able to identify
2061 * the filesystem to balance the counters out. This is achieved by
2062 * having a notifier block embedded in the xfs_mount_t and doing pointer
2063 * magic to get the mount pointer from the notifier block address.
2066 xfs_icsb_cpu_notify(
2067 struct notifier_block
*nfb
,
2068 unsigned long action
,
2071 xfs_icsb_cnts_t
*cntp
;
2074 mp
= (xfs_mount_t
*)container_of(nfb
, xfs_mount_t
, m_icsb_notifier
);
2075 cntp
= (xfs_icsb_cnts_t
*)
2076 per_cpu_ptr(mp
->m_sb_cnts
, (unsigned long)hcpu
);
2078 case CPU_UP_PREPARE
:
2079 case CPU_UP_PREPARE_FROZEN
:
2080 /* Easy Case - initialize the area and locks, and
2081 * then rebalance when online does everything else for us. */
2082 memset(cntp
, 0, sizeof(xfs_icsb_cnts_t
));
2085 case CPU_ONLINE_FROZEN
:
2087 xfs_icsb_balance_counter(mp
, XFS_SBS_ICOUNT
, 0);
2088 xfs_icsb_balance_counter(mp
, XFS_SBS_IFREE
, 0);
2089 xfs_icsb_balance_counter(mp
, XFS_SBS_FDBLOCKS
, 0);
2090 xfs_icsb_unlock(mp
);
2093 case CPU_DEAD_FROZEN
:
2094 /* Disable all the counters, then fold the dead cpu's
2095 * count into the total on the global superblock and
2096 * re-enable the counters. */
2098 spin_lock(&mp
->m_sb_lock
);
2099 xfs_icsb_disable_counter(mp
, XFS_SBS_ICOUNT
);
2100 xfs_icsb_disable_counter(mp
, XFS_SBS_IFREE
);
2101 xfs_icsb_disable_counter(mp
, XFS_SBS_FDBLOCKS
);
2103 mp
->m_sb
.sb_icount
+= cntp
->icsb_icount
;
2104 mp
->m_sb
.sb_ifree
+= cntp
->icsb_ifree
;
2105 mp
->m_sb
.sb_fdblocks
+= cntp
->icsb_fdblocks
;
2107 memset(cntp
, 0, sizeof(xfs_icsb_cnts_t
));
2109 xfs_icsb_balance_counter_locked(mp
, XFS_SBS_ICOUNT
, 0);
2110 xfs_icsb_balance_counter_locked(mp
, XFS_SBS_IFREE
, 0);
2111 xfs_icsb_balance_counter_locked(mp
, XFS_SBS_FDBLOCKS
, 0);
2112 spin_unlock(&mp
->m_sb_lock
);
2113 xfs_icsb_unlock(mp
);
2119 #endif /* CONFIG_HOTPLUG_CPU */
2122 xfs_icsb_init_counters(
2125 xfs_icsb_cnts_t
*cntp
;
2128 mp
->m_sb_cnts
= alloc_percpu(xfs_icsb_cnts_t
);
2129 if (mp
->m_sb_cnts
== NULL
)
2132 #ifdef CONFIG_HOTPLUG_CPU
2133 mp
->m_icsb_notifier
.notifier_call
= xfs_icsb_cpu_notify
;
2134 mp
->m_icsb_notifier
.priority
= 0;
2135 register_hotcpu_notifier(&mp
->m_icsb_notifier
);
2136 #endif /* CONFIG_HOTPLUG_CPU */
2138 for_each_online_cpu(i
) {
2139 cntp
= (xfs_icsb_cnts_t
*)per_cpu_ptr(mp
->m_sb_cnts
, i
);
2140 memset(cntp
, 0, sizeof(xfs_icsb_cnts_t
));
2143 mutex_init(&mp
->m_icsb_mutex
);
2146 * start with all counters disabled so that the
2147 * initial balance kicks us off correctly
2149 mp
->m_icsb_counters
= -1;
2154 xfs_icsb_reinit_counters(
2159 * start with all counters disabled so that the
2160 * initial balance kicks us off correctly
2162 mp
->m_icsb_counters
= -1;
2163 xfs_icsb_balance_counter(mp
, XFS_SBS_ICOUNT
, 0);
2164 xfs_icsb_balance_counter(mp
, XFS_SBS_IFREE
, 0);
2165 xfs_icsb_balance_counter(mp
, XFS_SBS_FDBLOCKS
, 0);
2166 xfs_icsb_unlock(mp
);
2170 xfs_icsb_destroy_counters(
2173 if (mp
->m_sb_cnts
) {
2174 unregister_hotcpu_notifier(&mp
->m_icsb_notifier
);
2175 free_percpu(mp
->m_sb_cnts
);
2177 mutex_destroy(&mp
->m_icsb_mutex
);
2182 xfs_icsb_cnts_t
*icsbp
)
2184 while (test_and_set_bit(XFS_ICSB_FLAG_LOCK
, &icsbp
->icsb_flags
)) {
2190 xfs_icsb_unlock_cntr(
2191 xfs_icsb_cnts_t
*icsbp
)
2193 clear_bit(XFS_ICSB_FLAG_LOCK
, &icsbp
->icsb_flags
);
2198 xfs_icsb_lock_all_counters(
2201 xfs_icsb_cnts_t
*cntp
;
2204 for_each_online_cpu(i
) {
2205 cntp
= (xfs_icsb_cnts_t
*)per_cpu_ptr(mp
->m_sb_cnts
, i
);
2206 xfs_icsb_lock_cntr(cntp
);
2211 xfs_icsb_unlock_all_counters(
2214 xfs_icsb_cnts_t
*cntp
;
2217 for_each_online_cpu(i
) {
2218 cntp
= (xfs_icsb_cnts_t
*)per_cpu_ptr(mp
->m_sb_cnts
, i
);
2219 xfs_icsb_unlock_cntr(cntp
);
2226 xfs_icsb_cnts_t
*cnt
,
2229 xfs_icsb_cnts_t
*cntp
;
2232 memset(cnt
, 0, sizeof(xfs_icsb_cnts_t
));
2234 if (!(flags
& XFS_ICSB_LAZY_COUNT
))
2235 xfs_icsb_lock_all_counters(mp
);
2237 for_each_online_cpu(i
) {
2238 cntp
= (xfs_icsb_cnts_t
*)per_cpu_ptr(mp
->m_sb_cnts
, i
);
2239 cnt
->icsb_icount
+= cntp
->icsb_icount
;
2240 cnt
->icsb_ifree
+= cntp
->icsb_ifree
;
2241 cnt
->icsb_fdblocks
+= cntp
->icsb_fdblocks
;
2244 if (!(flags
& XFS_ICSB_LAZY_COUNT
))
2245 xfs_icsb_unlock_all_counters(mp
);
2249 xfs_icsb_counter_disabled(
2251 xfs_sb_field_t field
)
2253 ASSERT((field
>= XFS_SBS_ICOUNT
) && (field
<= XFS_SBS_FDBLOCKS
));
2254 return test_bit(field
, &mp
->m_icsb_counters
);
2258 xfs_icsb_disable_counter(
2260 xfs_sb_field_t field
)
2262 xfs_icsb_cnts_t cnt
;
2264 ASSERT((field
>= XFS_SBS_ICOUNT
) && (field
<= XFS_SBS_FDBLOCKS
));
2267 * If we are already disabled, then there is nothing to do
2268 * here. We check before locking all the counters to avoid
2269 * the expensive lock operation when being called in the
2270 * slow path and the counter is already disabled. This is
2271 * safe because the only time we set or clear this state is under
2274 if (xfs_icsb_counter_disabled(mp
, field
))
2277 xfs_icsb_lock_all_counters(mp
);
2278 if (!test_and_set_bit(field
, &mp
->m_icsb_counters
)) {
2279 /* drain back to superblock */
2281 xfs_icsb_count(mp
, &cnt
, XFS_ICSB_LAZY_COUNT
);
2283 case XFS_SBS_ICOUNT
:
2284 mp
->m_sb
.sb_icount
= cnt
.icsb_icount
;
2287 mp
->m_sb
.sb_ifree
= cnt
.icsb_ifree
;
2289 case XFS_SBS_FDBLOCKS
:
2290 mp
->m_sb
.sb_fdblocks
= cnt
.icsb_fdblocks
;
2297 xfs_icsb_unlock_all_counters(mp
);
2301 xfs_icsb_enable_counter(
2303 xfs_sb_field_t field
,
2307 xfs_icsb_cnts_t
*cntp
;
2310 ASSERT((field
>= XFS_SBS_ICOUNT
) && (field
<= XFS_SBS_FDBLOCKS
));
2312 xfs_icsb_lock_all_counters(mp
);
2313 for_each_online_cpu(i
) {
2314 cntp
= per_cpu_ptr(mp
->m_sb_cnts
, i
);
2316 case XFS_SBS_ICOUNT
:
2317 cntp
->icsb_icount
= count
+ resid
;
2320 cntp
->icsb_ifree
= count
+ resid
;
2322 case XFS_SBS_FDBLOCKS
:
2323 cntp
->icsb_fdblocks
= count
+ resid
;
2331 clear_bit(field
, &mp
->m_icsb_counters
);
2332 xfs_icsb_unlock_all_counters(mp
);
2336 xfs_icsb_sync_counters_locked(
2340 xfs_icsb_cnts_t cnt
;
2342 xfs_icsb_count(mp
, &cnt
, flags
);
2344 if (!xfs_icsb_counter_disabled(mp
, XFS_SBS_ICOUNT
))
2345 mp
->m_sb
.sb_icount
= cnt
.icsb_icount
;
2346 if (!xfs_icsb_counter_disabled(mp
, XFS_SBS_IFREE
))
2347 mp
->m_sb
.sb_ifree
= cnt
.icsb_ifree
;
2348 if (!xfs_icsb_counter_disabled(mp
, XFS_SBS_FDBLOCKS
))
2349 mp
->m_sb
.sb_fdblocks
= cnt
.icsb_fdblocks
;
2353 * Accurate update of per-cpu counters to incore superblock
2356 xfs_icsb_sync_counters(
2360 spin_lock(&mp
->m_sb_lock
);
2361 xfs_icsb_sync_counters_locked(mp
, flags
);
2362 spin_unlock(&mp
->m_sb_lock
);
2366 * Balance and enable/disable counters as necessary.
2368 * Thresholds for re-enabling counters are somewhat magic. inode counts are
2369 * chosen to be the same number as single on disk allocation chunk per CPU, and
2370 * free blocks is something far enough zero that we aren't going thrash when we
2371 * get near ENOSPC. We also need to supply a minimum we require per cpu to
2372 * prevent looping endlessly when xfs_alloc_space asks for more than will
2373 * be distributed to a single CPU but each CPU has enough blocks to be
2376 * Note that we can be called when counters are already disabled.
2377 * xfs_icsb_disable_counter() optimises the counter locking in this case to
2378 * prevent locking every per-cpu counter needlessly.
2381 #define XFS_ICSB_INO_CNTR_REENABLE (uint64_t)64
2382 #define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \
2383 (uint64_t)(512 + XFS_ALLOC_SET_ASIDE(mp))
2385 xfs_icsb_balance_counter_locked(
2387 xfs_sb_field_t field
,
2390 uint64_t count
, resid
;
2391 int weight
= num_online_cpus();
2392 uint64_t min
= (uint64_t)min_per_cpu
;
2394 /* disable counter and sync counter */
2395 xfs_icsb_disable_counter(mp
, field
);
2397 /* update counters - first CPU gets residual*/
2399 case XFS_SBS_ICOUNT
:
2400 count
= mp
->m_sb
.sb_icount
;
2401 resid
= do_div(count
, weight
);
2402 if (count
< max(min
, XFS_ICSB_INO_CNTR_REENABLE
))
2406 count
= mp
->m_sb
.sb_ifree
;
2407 resid
= do_div(count
, weight
);
2408 if (count
< max(min
, XFS_ICSB_INO_CNTR_REENABLE
))
2411 case XFS_SBS_FDBLOCKS
:
2412 count
= mp
->m_sb
.sb_fdblocks
;
2413 resid
= do_div(count
, weight
);
2414 if (count
< max(min
, XFS_ICSB_FDBLK_CNTR_REENABLE(mp
)))
2419 count
= resid
= 0; /* quiet, gcc */
2423 xfs_icsb_enable_counter(mp
, field
, count
, resid
);
2427 xfs_icsb_balance_counter(
2429 xfs_sb_field_t fields
,
2432 spin_lock(&mp
->m_sb_lock
);
2433 xfs_icsb_balance_counter_locked(mp
, fields
, min_per_cpu
);
2434 spin_unlock(&mp
->m_sb_lock
);
2438 xfs_icsb_modify_counters(
2440 xfs_sb_field_t field
,
2444 xfs_icsb_cnts_t
*icsbp
;
2445 long long lcounter
; /* long counter for 64 bit fields */
2451 icsbp
= this_cpu_ptr(mp
->m_sb_cnts
);
2454 * if the counter is disabled, go to slow path
2456 if (unlikely(xfs_icsb_counter_disabled(mp
, field
)))
2458 xfs_icsb_lock_cntr(icsbp
);
2459 if (unlikely(xfs_icsb_counter_disabled(mp
, field
))) {
2460 xfs_icsb_unlock_cntr(icsbp
);
2465 case XFS_SBS_ICOUNT
:
2466 lcounter
= icsbp
->icsb_icount
;
2468 if (unlikely(lcounter
< 0))
2469 goto balance_counter
;
2470 icsbp
->icsb_icount
= lcounter
;
2474 lcounter
= icsbp
->icsb_ifree
;
2476 if (unlikely(lcounter
< 0))
2477 goto balance_counter
;
2478 icsbp
->icsb_ifree
= lcounter
;
2481 case XFS_SBS_FDBLOCKS
:
2482 BUG_ON((mp
->m_resblks
- mp
->m_resblks_avail
) != 0);
2484 lcounter
= icsbp
->icsb_fdblocks
- XFS_ALLOC_SET_ASIDE(mp
);
2486 if (unlikely(lcounter
< 0))
2487 goto balance_counter
;
2488 icsbp
->icsb_fdblocks
= lcounter
+ XFS_ALLOC_SET_ASIDE(mp
);
2494 xfs_icsb_unlock_cntr(icsbp
);
2502 * serialise with a mutex so we don't burn lots of cpu on
2503 * the superblock lock. We still need to hold the superblock
2504 * lock, however, when we modify the global structures.
2509 * Now running atomically.
2511 * If the counter is enabled, someone has beaten us to rebalancing.
2512 * Drop the lock and try again in the fast path....
2514 if (!(xfs_icsb_counter_disabled(mp
, field
))) {
2515 xfs_icsb_unlock(mp
);
2520 * The counter is currently disabled. Because we are
2521 * running atomically here, we know a rebalance cannot
2522 * be in progress. Hence we can go straight to operating
2523 * on the global superblock. We do not call xfs_mod_incore_sb()
2524 * here even though we need to get the m_sb_lock. Doing so
2525 * will cause us to re-enter this function and deadlock.
2526 * Hence we get the m_sb_lock ourselves and then call
2527 * xfs_mod_incore_sb_unlocked() as the unlocked path operates
2528 * directly on the global counters.
2530 spin_lock(&mp
->m_sb_lock
);
2531 ret
= xfs_mod_incore_sb_unlocked(mp
, field
, delta
, rsvd
);
2532 spin_unlock(&mp
->m_sb_lock
);
2535 * Now that we've modified the global superblock, we
2536 * may be able to re-enable the distributed counters
2537 * (e.g. lots of space just got freed). After that
2541 xfs_icsb_balance_counter(mp
, field
, 0);
2542 xfs_icsb_unlock(mp
);
2546 xfs_icsb_unlock_cntr(icsbp
);
2550 * We may have multiple threads here if multiple per-cpu
2551 * counters run dry at the same time. This will mean we can
2552 * do more balances than strictly necessary but it is not
2553 * the common slowpath case.
2558 * running atomically.
2560 * This will leave the counter in the correct state for future
2561 * accesses. After the rebalance, we simply try again and our retry
2562 * will either succeed through the fast path or slow path without
2563 * another balance operation being required.
2565 xfs_icsb_balance_counter(mp
, field
, delta
);
2566 xfs_icsb_unlock(mp
);