2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
24 #include "xfs_trans.h"
25 #include "xfs_trans_priv.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_dinode.h"
34 #include "xfs_inode.h"
35 #include "xfs_btree.h"
36 #include "xfs_ialloc.h"
37 #include "xfs_alloc.h"
38 #include "xfs_rtalloc.h"
40 #include "xfs_error.h"
41 #include "xfs_quota.h"
42 #include "xfs_fsops.h"
43 #include "xfs_utils.h"
44 #include "xfs_trace.h"
45 #include "xfs_icache.h"
46 #include "xfs_cksum.h"
47 #include "xfs_buf_item.h"
51 STATIC
void xfs_icsb_balance_counter(xfs_mount_t
*, xfs_sb_field_t
,
53 STATIC
void xfs_icsb_balance_counter_locked(xfs_mount_t
*, xfs_sb_field_t
,
55 STATIC
void xfs_icsb_disable_counter(xfs_mount_t
*, xfs_sb_field_t
);
58 #define xfs_icsb_balance_counter(mp, a, b) do { } while (0)
59 #define xfs_icsb_balance_counter_locked(mp, a, b) do { } while (0)
64 short type
; /* 0 = integer
65 * 1 = binary / string (no translation)
68 { offsetof(xfs_sb_t
, sb_magicnum
), 0 },
69 { offsetof(xfs_sb_t
, sb_blocksize
), 0 },
70 { offsetof(xfs_sb_t
, sb_dblocks
), 0 },
71 { offsetof(xfs_sb_t
, sb_rblocks
), 0 },
72 { offsetof(xfs_sb_t
, sb_rextents
), 0 },
73 { offsetof(xfs_sb_t
, sb_uuid
), 1 },
74 { offsetof(xfs_sb_t
, sb_logstart
), 0 },
75 { offsetof(xfs_sb_t
, sb_rootino
), 0 },
76 { offsetof(xfs_sb_t
, sb_rbmino
), 0 },
77 { offsetof(xfs_sb_t
, sb_rsumino
), 0 },
78 { offsetof(xfs_sb_t
, sb_rextsize
), 0 },
79 { offsetof(xfs_sb_t
, sb_agblocks
), 0 },
80 { offsetof(xfs_sb_t
, sb_agcount
), 0 },
81 { offsetof(xfs_sb_t
, sb_rbmblocks
), 0 },
82 { offsetof(xfs_sb_t
, sb_logblocks
), 0 },
83 { offsetof(xfs_sb_t
, sb_versionnum
), 0 },
84 { offsetof(xfs_sb_t
, sb_sectsize
), 0 },
85 { offsetof(xfs_sb_t
, sb_inodesize
), 0 },
86 { offsetof(xfs_sb_t
, sb_inopblock
), 0 },
87 { offsetof(xfs_sb_t
, sb_fname
[0]), 1 },
88 { offsetof(xfs_sb_t
, sb_blocklog
), 0 },
89 { offsetof(xfs_sb_t
, sb_sectlog
), 0 },
90 { offsetof(xfs_sb_t
, sb_inodelog
), 0 },
91 { offsetof(xfs_sb_t
, sb_inopblog
), 0 },
92 { offsetof(xfs_sb_t
, sb_agblklog
), 0 },
93 { offsetof(xfs_sb_t
, sb_rextslog
), 0 },
94 { offsetof(xfs_sb_t
, sb_inprogress
), 0 },
95 { offsetof(xfs_sb_t
, sb_imax_pct
), 0 },
96 { offsetof(xfs_sb_t
, sb_icount
), 0 },
97 { offsetof(xfs_sb_t
, sb_ifree
), 0 },
98 { offsetof(xfs_sb_t
, sb_fdblocks
), 0 },
99 { offsetof(xfs_sb_t
, sb_frextents
), 0 },
100 { offsetof(xfs_sb_t
, sb_uquotino
), 0 },
101 { offsetof(xfs_sb_t
, sb_gquotino
), 0 },
102 { offsetof(xfs_sb_t
, sb_qflags
), 0 },
103 { offsetof(xfs_sb_t
, sb_flags
), 0 },
104 { offsetof(xfs_sb_t
, sb_shared_vn
), 0 },
105 { offsetof(xfs_sb_t
, sb_inoalignmt
), 0 },
106 { offsetof(xfs_sb_t
, sb_unit
), 0 },
107 { offsetof(xfs_sb_t
, sb_width
), 0 },
108 { offsetof(xfs_sb_t
, sb_dirblklog
), 0 },
109 { offsetof(xfs_sb_t
, sb_logsectlog
), 0 },
110 { offsetof(xfs_sb_t
, sb_logsectsize
),0 },
111 { offsetof(xfs_sb_t
, sb_logsunit
), 0 },
112 { offsetof(xfs_sb_t
, sb_features2
), 0 },
113 { offsetof(xfs_sb_t
, sb_bad_features2
), 0 },
114 { offsetof(xfs_sb_t
, sb_features_compat
), 0 },
115 { offsetof(xfs_sb_t
, sb_features_ro_compat
), 0 },
116 { offsetof(xfs_sb_t
, sb_features_incompat
), 0 },
117 { offsetof(xfs_sb_t
, sb_features_log_incompat
), 0 },
118 { offsetof(xfs_sb_t
, sb_crc
), 0 },
119 { offsetof(xfs_sb_t
, sb_pad
), 0 },
120 { offsetof(xfs_sb_t
, sb_pquotino
), 0 },
121 { offsetof(xfs_sb_t
, sb_lsn
), 0 },
122 { sizeof(xfs_sb_t
), 0 }
125 static DEFINE_MUTEX(xfs_uuid_table_mutex
);
126 static int xfs_uuid_table_size
;
127 static uuid_t
*xfs_uuid_table
;
130 * See if the UUID is unique among mounted XFS filesystems.
131 * Mount fails if UUID is nil or a FS with the same UUID is already mounted.
135 struct xfs_mount
*mp
)
137 uuid_t
*uuid
= &mp
->m_sb
.sb_uuid
;
140 if (mp
->m_flags
& XFS_MOUNT_NOUUID
)
143 if (uuid_is_nil(uuid
)) {
144 xfs_warn(mp
, "Filesystem has nil UUID - can't mount");
145 return XFS_ERROR(EINVAL
);
148 mutex_lock(&xfs_uuid_table_mutex
);
149 for (i
= 0, hole
= -1; i
< xfs_uuid_table_size
; i
++) {
150 if (uuid_is_nil(&xfs_uuid_table
[i
])) {
154 if (uuid_equal(uuid
, &xfs_uuid_table
[i
]))
159 xfs_uuid_table
= kmem_realloc(xfs_uuid_table
,
160 (xfs_uuid_table_size
+ 1) * sizeof(*xfs_uuid_table
),
161 xfs_uuid_table_size
* sizeof(*xfs_uuid_table
),
163 hole
= xfs_uuid_table_size
++;
165 xfs_uuid_table
[hole
] = *uuid
;
166 mutex_unlock(&xfs_uuid_table_mutex
);
171 mutex_unlock(&xfs_uuid_table_mutex
);
172 xfs_warn(mp
, "Filesystem has duplicate UUID %pU - can't mount", uuid
);
173 return XFS_ERROR(EINVAL
);
178 struct xfs_mount
*mp
)
180 uuid_t
*uuid
= &mp
->m_sb
.sb_uuid
;
183 if (mp
->m_flags
& XFS_MOUNT_NOUUID
)
186 mutex_lock(&xfs_uuid_table_mutex
);
187 for (i
= 0; i
< xfs_uuid_table_size
; i
++) {
188 if (uuid_is_nil(&xfs_uuid_table
[i
]))
190 if (!uuid_equal(uuid
, &xfs_uuid_table
[i
]))
192 memset(&xfs_uuid_table
[i
], 0, sizeof(uuid_t
));
195 ASSERT(i
< xfs_uuid_table_size
);
196 mutex_unlock(&xfs_uuid_table_mutex
);
201 * Reference counting access wrappers to the perag structures.
202 * Because we never free per-ag structures, the only thing we
203 * have to protect against changes is the tree structure itself.
206 xfs_perag_get(struct xfs_mount
*mp
, xfs_agnumber_t agno
)
208 struct xfs_perag
*pag
;
212 pag
= radix_tree_lookup(&mp
->m_perag_tree
, agno
);
214 ASSERT(atomic_read(&pag
->pag_ref
) >= 0);
215 ref
= atomic_inc_return(&pag
->pag_ref
);
218 trace_xfs_perag_get(mp
, agno
, ref
, _RET_IP_
);
223 * search from @first to find the next perag with the given tag set.
227 struct xfs_mount
*mp
,
228 xfs_agnumber_t first
,
231 struct xfs_perag
*pag
;
236 found
= radix_tree_gang_lookup_tag(&mp
->m_perag_tree
,
237 (void **)&pag
, first
, 1, tag
);
242 ref
= atomic_inc_return(&pag
->pag_ref
);
244 trace_xfs_perag_get_tag(mp
, pag
->pag_agno
, ref
, _RET_IP_
);
249 xfs_perag_put(struct xfs_perag
*pag
)
253 ASSERT(atomic_read(&pag
->pag_ref
) > 0);
254 ref
= atomic_dec_return(&pag
->pag_ref
);
255 trace_xfs_perag_put(pag
->pag_mount
, pag
->pag_agno
, ref
, _RET_IP_
);
260 struct rcu_head
*head
)
262 struct xfs_perag
*pag
= container_of(head
, struct xfs_perag
, rcu_head
);
264 ASSERT(atomic_read(&pag
->pag_ref
) == 0);
269 * Free up the per-ag resources associated with the mount structure.
276 struct xfs_perag
*pag
;
278 for (agno
= 0; agno
< mp
->m_sb
.sb_agcount
; agno
++) {
279 spin_lock(&mp
->m_perag_lock
);
280 pag
= radix_tree_delete(&mp
->m_perag_tree
, agno
);
281 spin_unlock(&mp
->m_perag_lock
);
283 ASSERT(atomic_read(&pag
->pag_ref
) == 0);
284 call_rcu(&pag
->rcu_head
, __xfs_free_perag
);
289 * Check size of device based on the (data/realtime) block count.
290 * Note: this check is used by the growfs code as well as mount.
293 xfs_sb_validate_fsb_count(
297 ASSERT(PAGE_SHIFT
>= sbp
->sb_blocklog
);
298 ASSERT(sbp
->sb_blocklog
>= BBSHIFT
);
300 #if XFS_BIG_BLKNOS /* Limited by ULONG_MAX of page cache index */
301 if (nblocks
>> (PAGE_CACHE_SHIFT
- sbp
->sb_blocklog
) > ULONG_MAX
)
303 #else /* Limited by UINT_MAX of sectors */
304 if (nblocks
<< (sbp
->sb_blocklog
- BBSHIFT
) > UINT_MAX
)
311 * Check the validity of the SB found.
314 xfs_mount_validate_sb(
317 bool check_inprogress
)
321 * If the log device and data device have the
322 * same device number, the log is internal.
323 * Consequently, the sb_logstart should be non-zero. If
324 * we have a zero sb_logstart in this case, we may be trying to mount
325 * a volume filesystem in a non-volume manner.
327 if (sbp
->sb_magicnum
!= XFS_SB_MAGIC
) {
328 xfs_warn(mp
, "bad magic number");
329 return XFS_ERROR(EWRONGFS
);
333 if (!xfs_sb_good_version(sbp
)) {
334 xfs_warn(mp
, "bad version");
335 return XFS_ERROR(EWRONGFS
);
339 * Version 5 superblock feature mask validation. Reject combinations the
340 * kernel cannot support up front before checking anything else.
342 if (XFS_SB_VERSION_NUM(sbp
) == XFS_SB_VERSION_5
) {
344 "Version 5 superblock detected. This kernel has EXPERIMENTAL support enabled!\n"
345 "Use of these features in this kernel is at your own risk!");
347 if (xfs_sb_has_compat_feature(sbp
,
348 XFS_SB_FEAT_COMPAT_UNKNOWN
)) {
350 "Superblock has unknown compatible features (0x%x) enabled.\n"
351 "Using a more recent kernel is recommended.",
352 (sbp
->sb_features_compat
&
353 XFS_SB_FEAT_COMPAT_UNKNOWN
));
356 if (xfs_sb_has_ro_compat_feature(sbp
,
357 XFS_SB_FEAT_RO_COMPAT_UNKNOWN
)) {
359 "Superblock has unknown read-only compatible features (0x%x) enabled.",
360 (sbp
->sb_features_ro_compat
&
361 XFS_SB_FEAT_RO_COMPAT_UNKNOWN
));
362 if (!(mp
->m_flags
& XFS_MOUNT_RDONLY
)) {
364 "Attempted to mount read-only compatible filesystem read-write.\n"
365 "Filesystem can only be safely mounted read only.");
366 return XFS_ERROR(EINVAL
);
369 if (xfs_sb_has_incompat_feature(sbp
,
370 XFS_SB_FEAT_INCOMPAT_UNKNOWN
)) {
372 "Superblock has unknown incompatible features (0x%x) enabled.\n"
373 "Filesystem can not be safely mounted by this kernel.",
374 (sbp
->sb_features_incompat
&
375 XFS_SB_FEAT_INCOMPAT_UNKNOWN
));
376 return XFS_ERROR(EINVAL
);
381 sbp
->sb_logstart
== 0 && mp
->m_logdev_targp
== mp
->m_ddev_targp
)) {
383 "filesystem is marked as having an external log; "
384 "specify logdev on the mount command line.");
385 return XFS_ERROR(EINVAL
);
389 sbp
->sb_logstart
!= 0 && mp
->m_logdev_targp
!= mp
->m_ddev_targp
)) {
391 "filesystem is marked as having an internal log; "
392 "do not specify logdev on the mount command line.");
393 return XFS_ERROR(EINVAL
);
397 * More sanity checking. Most of these were stolen directly from
401 sbp
->sb_agcount
<= 0 ||
402 sbp
->sb_sectsize
< XFS_MIN_SECTORSIZE
||
403 sbp
->sb_sectsize
> XFS_MAX_SECTORSIZE
||
404 sbp
->sb_sectlog
< XFS_MIN_SECTORSIZE_LOG
||
405 sbp
->sb_sectlog
> XFS_MAX_SECTORSIZE_LOG
||
406 sbp
->sb_sectsize
!= (1 << sbp
->sb_sectlog
) ||
407 sbp
->sb_blocksize
< XFS_MIN_BLOCKSIZE
||
408 sbp
->sb_blocksize
> XFS_MAX_BLOCKSIZE
||
409 sbp
->sb_blocklog
< XFS_MIN_BLOCKSIZE_LOG
||
410 sbp
->sb_blocklog
> XFS_MAX_BLOCKSIZE_LOG
||
411 sbp
->sb_blocksize
!= (1 << sbp
->sb_blocklog
) ||
412 sbp
->sb_inodesize
< XFS_DINODE_MIN_SIZE
||
413 sbp
->sb_inodesize
> XFS_DINODE_MAX_SIZE
||
414 sbp
->sb_inodelog
< XFS_DINODE_MIN_LOG
||
415 sbp
->sb_inodelog
> XFS_DINODE_MAX_LOG
||
416 sbp
->sb_inodesize
!= (1 << sbp
->sb_inodelog
) ||
417 (sbp
->sb_blocklog
- sbp
->sb_inodelog
!= sbp
->sb_inopblog
) ||
418 (sbp
->sb_rextsize
* sbp
->sb_blocksize
> XFS_MAX_RTEXTSIZE
) ||
419 (sbp
->sb_rextsize
* sbp
->sb_blocksize
< XFS_MIN_RTEXTSIZE
) ||
420 (sbp
->sb_imax_pct
> 100 /* zero sb_imax_pct is valid */) ||
421 sbp
->sb_dblocks
== 0 ||
422 sbp
->sb_dblocks
> XFS_MAX_DBLOCKS(sbp
) ||
423 sbp
->sb_dblocks
< XFS_MIN_DBLOCKS(sbp
))) {
424 XFS_CORRUPTION_ERROR("SB sanity check failed",
425 XFS_ERRLEVEL_LOW
, mp
, sbp
);
426 return XFS_ERROR(EFSCORRUPTED
);
430 * Until this is fixed only page-sized or smaller data blocks work.
432 if (unlikely(sbp
->sb_blocksize
> PAGE_SIZE
)) {
434 "File system with blocksize %d bytes. "
435 "Only pagesize (%ld) or less will currently work.",
436 sbp
->sb_blocksize
, PAGE_SIZE
);
437 return XFS_ERROR(ENOSYS
);
441 * Currently only very few inode sizes are supported.
443 switch (sbp
->sb_inodesize
) {
450 xfs_warn(mp
, "inode size of %d bytes not supported",
452 return XFS_ERROR(ENOSYS
);
455 if (xfs_sb_validate_fsb_count(sbp
, sbp
->sb_dblocks
) ||
456 xfs_sb_validate_fsb_count(sbp
, sbp
->sb_rblocks
)) {
458 "file system too large to be mounted on this system.");
459 return XFS_ERROR(EFBIG
);
462 if (check_inprogress
&& sbp
->sb_inprogress
) {
463 xfs_warn(mp
, "Offline file system operation in progress!");
464 return XFS_ERROR(EFSCORRUPTED
);
468 * Version 1 directory format has never worked on Linux.
470 if (unlikely(!xfs_sb_version_hasdirv2(sbp
))) {
471 xfs_warn(mp
, "file system using version 1 directory format");
472 return XFS_ERROR(ENOSYS
);
479 xfs_initialize_perag(
481 xfs_agnumber_t agcount
,
482 xfs_agnumber_t
*maxagi
)
484 xfs_agnumber_t index
;
485 xfs_agnumber_t first_initialised
= 0;
489 xfs_sb_t
*sbp
= &mp
->m_sb
;
493 * Walk the current per-ag tree so we don't try to initialise AGs
494 * that already exist (growfs case). Allocate and insert all the
495 * AGs we don't find ready for initialisation.
497 for (index
= 0; index
< agcount
; index
++) {
498 pag
= xfs_perag_get(mp
, index
);
503 if (!first_initialised
)
504 first_initialised
= index
;
506 pag
= kmem_zalloc(sizeof(*pag
), KM_MAYFAIL
);
509 pag
->pag_agno
= index
;
511 spin_lock_init(&pag
->pag_ici_lock
);
512 mutex_init(&pag
->pag_ici_reclaim_lock
);
513 INIT_RADIX_TREE(&pag
->pag_ici_root
, GFP_ATOMIC
);
514 spin_lock_init(&pag
->pag_buf_lock
);
515 pag
->pag_buf_tree
= RB_ROOT
;
517 if (radix_tree_preload(GFP_NOFS
))
520 spin_lock(&mp
->m_perag_lock
);
521 if (radix_tree_insert(&mp
->m_perag_tree
, index
, pag
)) {
523 spin_unlock(&mp
->m_perag_lock
);
524 radix_tree_preload_end();
528 spin_unlock(&mp
->m_perag_lock
);
529 radix_tree_preload_end();
533 * If we mount with the inode64 option, or no inode overflows
534 * the legacy 32-bit address space clear the inode32 option.
536 agino
= XFS_OFFBNO_TO_AGINO(mp
, sbp
->sb_agblocks
- 1, 0);
537 ino
= XFS_AGINO_TO_INO(mp
, agcount
- 1, agino
);
539 if ((mp
->m_flags
& XFS_MOUNT_SMALL_INUMS
) && ino
> XFS_MAXINUMBER_32
)
540 mp
->m_flags
|= XFS_MOUNT_32BITINODES
;
542 mp
->m_flags
&= ~XFS_MOUNT_32BITINODES
;
544 if (mp
->m_flags
& XFS_MOUNT_32BITINODES
)
545 index
= xfs_set_inode32(mp
);
547 index
= xfs_set_inode64(mp
);
555 for (; index
> first_initialised
; index
--) {
556 pag
= radix_tree_delete(&mp
->m_perag_tree
, index
);
567 to
->sb_magicnum
= be32_to_cpu(from
->sb_magicnum
);
568 to
->sb_blocksize
= be32_to_cpu(from
->sb_blocksize
);
569 to
->sb_dblocks
= be64_to_cpu(from
->sb_dblocks
);
570 to
->sb_rblocks
= be64_to_cpu(from
->sb_rblocks
);
571 to
->sb_rextents
= be64_to_cpu(from
->sb_rextents
);
572 memcpy(&to
->sb_uuid
, &from
->sb_uuid
, sizeof(to
->sb_uuid
));
573 to
->sb_logstart
= be64_to_cpu(from
->sb_logstart
);
574 to
->sb_rootino
= be64_to_cpu(from
->sb_rootino
);
575 to
->sb_rbmino
= be64_to_cpu(from
->sb_rbmino
);
576 to
->sb_rsumino
= be64_to_cpu(from
->sb_rsumino
);
577 to
->sb_rextsize
= be32_to_cpu(from
->sb_rextsize
);
578 to
->sb_agblocks
= be32_to_cpu(from
->sb_agblocks
);
579 to
->sb_agcount
= be32_to_cpu(from
->sb_agcount
);
580 to
->sb_rbmblocks
= be32_to_cpu(from
->sb_rbmblocks
);
581 to
->sb_logblocks
= be32_to_cpu(from
->sb_logblocks
);
582 to
->sb_versionnum
= be16_to_cpu(from
->sb_versionnum
);
583 to
->sb_sectsize
= be16_to_cpu(from
->sb_sectsize
);
584 to
->sb_inodesize
= be16_to_cpu(from
->sb_inodesize
);
585 to
->sb_inopblock
= be16_to_cpu(from
->sb_inopblock
);
586 memcpy(&to
->sb_fname
, &from
->sb_fname
, sizeof(to
->sb_fname
));
587 to
->sb_blocklog
= from
->sb_blocklog
;
588 to
->sb_sectlog
= from
->sb_sectlog
;
589 to
->sb_inodelog
= from
->sb_inodelog
;
590 to
->sb_inopblog
= from
->sb_inopblog
;
591 to
->sb_agblklog
= from
->sb_agblklog
;
592 to
->sb_rextslog
= from
->sb_rextslog
;
593 to
->sb_inprogress
= from
->sb_inprogress
;
594 to
->sb_imax_pct
= from
->sb_imax_pct
;
595 to
->sb_icount
= be64_to_cpu(from
->sb_icount
);
596 to
->sb_ifree
= be64_to_cpu(from
->sb_ifree
);
597 to
->sb_fdblocks
= be64_to_cpu(from
->sb_fdblocks
);
598 to
->sb_frextents
= be64_to_cpu(from
->sb_frextents
);
599 to
->sb_uquotino
= be64_to_cpu(from
->sb_uquotino
);
600 to
->sb_gquotino
= be64_to_cpu(from
->sb_gquotino
);
601 to
->sb_qflags
= be16_to_cpu(from
->sb_qflags
);
602 to
->sb_flags
= from
->sb_flags
;
603 to
->sb_shared_vn
= from
->sb_shared_vn
;
604 to
->sb_inoalignmt
= be32_to_cpu(from
->sb_inoalignmt
);
605 to
->sb_unit
= be32_to_cpu(from
->sb_unit
);
606 to
->sb_width
= be32_to_cpu(from
->sb_width
);
607 to
->sb_dirblklog
= from
->sb_dirblklog
;
608 to
->sb_logsectlog
= from
->sb_logsectlog
;
609 to
->sb_logsectsize
= be16_to_cpu(from
->sb_logsectsize
);
610 to
->sb_logsunit
= be32_to_cpu(from
->sb_logsunit
);
611 to
->sb_features2
= be32_to_cpu(from
->sb_features2
);
612 to
->sb_bad_features2
= be32_to_cpu(from
->sb_bad_features2
);
613 to
->sb_features_compat
= be32_to_cpu(from
->sb_features_compat
);
614 to
->sb_features_ro_compat
= be32_to_cpu(from
->sb_features_ro_compat
);
615 to
->sb_features_incompat
= be32_to_cpu(from
->sb_features_incompat
);
616 to
->sb_features_log_incompat
=
617 be32_to_cpu(from
->sb_features_log_incompat
);
619 to
->sb_pquotino
= be64_to_cpu(from
->sb_pquotino
);
620 to
->sb_lsn
= be64_to_cpu(from
->sb_lsn
);
624 * Copy in core superblock to ondisk one.
626 * The fields argument is mask of superblock fields to copy.
634 xfs_caddr_t to_ptr
= (xfs_caddr_t
)to
;
635 xfs_caddr_t from_ptr
= (xfs_caddr_t
)from
;
645 f
= (xfs_sb_field_t
)xfs_lowbit64((__uint64_t
)fields
);
646 first
= xfs_sb_info
[f
].offset
;
647 size
= xfs_sb_info
[f
+ 1].offset
- first
;
649 ASSERT(xfs_sb_info
[f
].type
== 0 || xfs_sb_info
[f
].type
== 1);
651 if (size
== 1 || xfs_sb_info
[f
].type
== 1) {
652 memcpy(to_ptr
+ first
, from_ptr
+ first
, size
);
656 *(__be16
*)(to_ptr
+ first
) =
657 cpu_to_be16(*(__u16
*)(from_ptr
+ first
));
660 *(__be32
*)(to_ptr
+ first
) =
661 cpu_to_be32(*(__u32
*)(from_ptr
+ first
));
664 *(__be64
*)(to_ptr
+ first
) =
665 cpu_to_be64(*(__u64
*)(from_ptr
+ first
));
672 fields
&= ~(1LL << f
);
680 struct xfs_mount
*mp
= bp
->b_target
->bt_mount
;
683 xfs_sb_from_disk(&sb
, XFS_BUF_TO_SBP(bp
));
686 * Only check the in progress field for the primary superblock as
687 * mkfs.xfs doesn't clear it from secondary superblocks.
689 return xfs_mount_validate_sb(mp
, &sb
, bp
->b_bn
== XFS_SB_DADDR
);
693 * If the superblock has the CRC feature bit set or the CRC field is non-null,
694 * check that the CRC is valid. We check the CRC field is non-null because a
695 * single bit error could clear the feature bit and unused parts of the
696 * superblock are supposed to be zero. Hence a non-null crc field indicates that
697 * we've potentially lost a feature bit and we should check it anyway.
703 struct xfs_mount
*mp
= bp
->b_target
->bt_mount
;
704 struct xfs_dsb
*dsb
= XFS_BUF_TO_SBP(bp
);
708 * open code the version check to avoid needing to convert the entire
709 * superblock from disk order just to check the version number
711 if (dsb
->sb_magicnum
== cpu_to_be32(XFS_SB_MAGIC
) &&
712 (((be16_to_cpu(dsb
->sb_versionnum
) & XFS_SB_VERSION_NUMBITS
) ==
716 if (!xfs_verify_cksum(bp
->b_addr
, be16_to_cpu(dsb
->sb_sectsize
),
717 offsetof(struct xfs_sb
, sb_crc
))) {
718 error
= EFSCORRUPTED
;
722 error
= xfs_sb_verify(bp
);
726 XFS_CORRUPTION_ERROR(__func__
, XFS_ERRLEVEL_LOW
, mp
, bp
->b_addr
);
727 xfs_buf_ioerror(bp
, error
);
732 * We may be probed for a filesystem match, so we may not want to emit
733 * messages when the superblock buffer is not actually an XFS superblock.
734 * If we find an XFS superblock, the run a normal, noisy mount because we are
735 * really going to mount it and want to know about errors.
738 xfs_sb_quiet_read_verify(
741 struct xfs_dsb
*dsb
= XFS_BUF_TO_SBP(bp
);
744 if (dsb
->sb_magicnum
== cpu_to_be32(XFS_SB_MAGIC
)) {
745 /* XFS filesystem, verify noisily! */
746 xfs_sb_read_verify(bp
);
750 xfs_buf_ioerror(bp
, EWRONGFS
);
757 struct xfs_mount
*mp
= bp
->b_target
->bt_mount
;
758 struct xfs_buf_log_item
*bip
= bp
->b_fspriv
;
761 error
= xfs_sb_verify(bp
);
763 XFS_CORRUPTION_ERROR(__func__
, XFS_ERRLEVEL_LOW
, mp
, bp
->b_addr
);
764 xfs_buf_ioerror(bp
, error
);
768 if (!xfs_sb_version_hascrc(&mp
->m_sb
))
772 XFS_BUF_TO_SBP(bp
)->sb_lsn
= cpu_to_be64(bip
->bli_item
.li_lsn
);
774 xfs_update_cksum(bp
->b_addr
, BBTOB(bp
->b_length
),
775 offsetof(struct xfs_sb
, sb_crc
));
778 const struct xfs_buf_ops xfs_sb_buf_ops
= {
779 .verify_read
= xfs_sb_read_verify
,
780 .verify_write
= xfs_sb_write_verify
,
783 static const struct xfs_buf_ops xfs_sb_quiet_buf_ops
= {
784 .verify_read
= xfs_sb_quiet_read_verify
,
785 .verify_write
= xfs_sb_write_verify
,
791 * Does the initial read of the superblock.
794 xfs_readsb(xfs_mount_t
*mp
, int flags
)
796 unsigned int sector_size
;
798 struct xfs_sb
*sbp
= &mp
->m_sb
;
800 int loud
= !(flags
& XFS_MFSI_QUIET
);
802 ASSERT(mp
->m_sb_bp
== NULL
);
803 ASSERT(mp
->m_ddev_targp
!= NULL
);
806 * Allocate a (locked) buffer to hold the superblock.
807 * This will be kept around at all times to optimize
808 * access to the superblock.
810 sector_size
= xfs_getsize_buftarg(mp
->m_ddev_targp
);
813 bp
= xfs_buf_read_uncached(mp
->m_ddev_targp
, XFS_SB_DADDR
,
814 BTOBB(sector_size
), 0,
815 loud
? &xfs_sb_buf_ops
816 : &xfs_sb_quiet_buf_ops
);
819 xfs_warn(mp
, "SB buffer read failed");
825 xfs_warn(mp
, "SB validate failed with error %d.", error
);
830 * Initialize the mount structure from the superblock.
832 xfs_sb_from_disk(&mp
->m_sb
, XFS_BUF_TO_SBP(bp
));
835 * We must be able to do sector-sized and sector-aligned IO.
837 if (sector_size
> sbp
->sb_sectsize
) {
839 xfs_warn(mp
, "device supports %u byte sectors (not %u)",
840 sector_size
, sbp
->sb_sectsize
);
846 * If device sector size is smaller than the superblock size,
847 * re-read the superblock so the buffer is correctly sized.
849 if (sector_size
< sbp
->sb_sectsize
) {
851 sector_size
= sbp
->sb_sectsize
;
855 /* Initialize per-cpu counters */
856 xfs_icsb_reinit_counters(mp
);
858 /* no need to be quiet anymore, so reset the buf ops */
859 bp
->b_ops
= &xfs_sb_buf_ops
;
874 * Mount initialization code establishing various mount
875 * fields from the superblock associated with the given
879 xfs_mount_common(xfs_mount_t
*mp
, xfs_sb_t
*sbp
)
881 mp
->m_agfrotor
= mp
->m_agirotor
= 0;
882 spin_lock_init(&mp
->m_agirotor_lock
);
883 mp
->m_maxagi
= mp
->m_sb
.sb_agcount
;
884 mp
->m_blkbit_log
= sbp
->sb_blocklog
+ XFS_NBBYLOG
;
885 mp
->m_blkbb_log
= sbp
->sb_blocklog
- BBSHIFT
;
886 mp
->m_sectbb_log
= sbp
->sb_sectlog
- BBSHIFT
;
887 mp
->m_agno_log
= xfs_highbit32(sbp
->sb_agcount
- 1) + 1;
888 mp
->m_agino_log
= sbp
->sb_inopblog
+ sbp
->sb_agblklog
;
889 mp
->m_blockmask
= sbp
->sb_blocksize
- 1;
890 mp
->m_blockwsize
= sbp
->sb_blocksize
>> XFS_WORDLOG
;
891 mp
->m_blockwmask
= mp
->m_blockwsize
- 1;
893 mp
->m_alloc_mxr
[0] = xfs_allocbt_maxrecs(mp
, sbp
->sb_blocksize
, 1);
894 mp
->m_alloc_mxr
[1] = xfs_allocbt_maxrecs(mp
, sbp
->sb_blocksize
, 0);
895 mp
->m_alloc_mnr
[0] = mp
->m_alloc_mxr
[0] / 2;
896 mp
->m_alloc_mnr
[1] = mp
->m_alloc_mxr
[1] / 2;
898 mp
->m_inobt_mxr
[0] = xfs_inobt_maxrecs(mp
, sbp
->sb_blocksize
, 1);
899 mp
->m_inobt_mxr
[1] = xfs_inobt_maxrecs(mp
, sbp
->sb_blocksize
, 0);
900 mp
->m_inobt_mnr
[0] = mp
->m_inobt_mxr
[0] / 2;
901 mp
->m_inobt_mnr
[1] = mp
->m_inobt_mxr
[1] / 2;
903 mp
->m_bmap_dmxr
[0] = xfs_bmbt_maxrecs(mp
, sbp
->sb_blocksize
, 1);
904 mp
->m_bmap_dmxr
[1] = xfs_bmbt_maxrecs(mp
, sbp
->sb_blocksize
, 0);
905 mp
->m_bmap_dmnr
[0] = mp
->m_bmap_dmxr
[0] / 2;
906 mp
->m_bmap_dmnr
[1] = mp
->m_bmap_dmxr
[1] / 2;
908 mp
->m_bsize
= XFS_FSB_TO_BB(mp
, 1);
909 mp
->m_ialloc_inos
= (int)MAX((__uint16_t
)XFS_INODES_PER_CHUNK
,
911 mp
->m_ialloc_blks
= mp
->m_ialloc_inos
>> sbp
->sb_inopblog
;
915 * xfs_initialize_perag_data
917 * Read in each per-ag structure so we can count up the number of
918 * allocated inodes, free inodes and used filesystem blocks as this
919 * information is no longer persistent in the superblock. Once we have
920 * this information, write it into the in-core superblock structure.
923 xfs_initialize_perag_data(xfs_mount_t
*mp
, xfs_agnumber_t agcount
)
925 xfs_agnumber_t index
;
927 xfs_sb_t
*sbp
= &mp
->m_sb
;
931 uint64_t bfreelst
= 0;
935 for (index
= 0; index
< agcount
; index
++) {
937 * read the agf, then the agi. This gets us
938 * all the information we need and populates the
939 * per-ag structures for us.
941 error
= xfs_alloc_pagf_init(mp
, NULL
, index
, 0);
945 error
= xfs_ialloc_pagi_init(mp
, NULL
, index
);
948 pag
= xfs_perag_get(mp
, index
);
949 ifree
+= pag
->pagi_freecount
;
950 ialloc
+= pag
->pagi_count
;
951 bfree
+= pag
->pagf_freeblks
;
952 bfreelst
+= pag
->pagf_flcount
;
953 btree
+= pag
->pagf_btreeblks
;
957 * Overwrite incore superblock counters with just-read data
959 spin_lock(&mp
->m_sb_lock
);
960 sbp
->sb_ifree
= ifree
;
961 sbp
->sb_icount
= ialloc
;
962 sbp
->sb_fdblocks
= bfree
+ bfreelst
+ btree
;
963 spin_unlock(&mp
->m_sb_lock
);
965 /* Fixup the per-cpu counters as well. */
966 xfs_icsb_reinit_counters(mp
);
972 * Update alignment values based on mount options and sb values
975 xfs_update_alignment(xfs_mount_t
*mp
)
977 xfs_sb_t
*sbp
= &(mp
->m_sb
);
981 * If stripe unit and stripe width are not multiples
982 * of the fs blocksize turn off alignment.
984 if ((BBTOB(mp
->m_dalign
) & mp
->m_blockmask
) ||
985 (BBTOB(mp
->m_swidth
) & mp
->m_blockmask
)) {
986 if (mp
->m_flags
& XFS_MOUNT_RETERR
) {
987 xfs_warn(mp
, "alignment check failed: "
988 "(sunit/swidth vs. blocksize)");
989 return XFS_ERROR(EINVAL
);
991 mp
->m_dalign
= mp
->m_swidth
= 0;
994 * Convert the stripe unit and width to FSBs.
996 mp
->m_dalign
= XFS_BB_TO_FSBT(mp
, mp
->m_dalign
);
997 if (mp
->m_dalign
&& (sbp
->sb_agblocks
% mp
->m_dalign
)) {
998 if (mp
->m_flags
& XFS_MOUNT_RETERR
) {
999 xfs_warn(mp
, "alignment check failed: "
1000 "(sunit/swidth vs. ag size)");
1001 return XFS_ERROR(EINVAL
);
1004 "stripe alignment turned off: sunit(%d)/swidth(%d) "
1005 "incompatible with agsize(%d)",
1006 mp
->m_dalign
, mp
->m_swidth
,
1011 } else if (mp
->m_dalign
) {
1012 mp
->m_swidth
= XFS_BB_TO_FSBT(mp
, mp
->m_swidth
);
1014 if (mp
->m_flags
& XFS_MOUNT_RETERR
) {
1015 xfs_warn(mp
, "alignment check failed: "
1016 "sunit(%d) less than bsize(%d)",
1018 mp
->m_blockmask
+1);
1019 return XFS_ERROR(EINVAL
);
1026 * Update superblock with new values
1029 if (xfs_sb_version_hasdalign(sbp
)) {
1030 if (sbp
->sb_unit
!= mp
->m_dalign
) {
1031 sbp
->sb_unit
= mp
->m_dalign
;
1032 mp
->m_update_flags
|= XFS_SB_UNIT
;
1034 if (sbp
->sb_width
!= mp
->m_swidth
) {
1035 sbp
->sb_width
= mp
->m_swidth
;
1036 mp
->m_update_flags
|= XFS_SB_WIDTH
;
1039 } else if ((mp
->m_flags
& XFS_MOUNT_NOALIGN
) != XFS_MOUNT_NOALIGN
&&
1040 xfs_sb_version_hasdalign(&mp
->m_sb
)) {
1041 mp
->m_dalign
= sbp
->sb_unit
;
1042 mp
->m_swidth
= sbp
->sb_width
;
1049 * Set the maximum inode count for this filesystem
1052 xfs_set_maxicount(xfs_mount_t
*mp
)
1054 xfs_sb_t
*sbp
= &(mp
->m_sb
);
1057 if (sbp
->sb_imax_pct
) {
1059 * Make sure the maximum inode count is a multiple
1060 * of the units we allocate inodes in.
1062 icount
= sbp
->sb_dblocks
* sbp
->sb_imax_pct
;
1063 do_div(icount
, 100);
1064 do_div(icount
, mp
->m_ialloc_blks
);
1065 mp
->m_maxicount
= (icount
* mp
->m_ialloc_blks
) <<
1068 mp
->m_maxicount
= 0;
1073 * Set the default minimum read and write sizes unless
1074 * already specified in a mount option.
1075 * We use smaller I/O sizes when the file system
1076 * is being used for NFS service (wsync mount option).
1079 xfs_set_rw_sizes(xfs_mount_t
*mp
)
1081 xfs_sb_t
*sbp
= &(mp
->m_sb
);
1082 int readio_log
, writeio_log
;
1084 if (!(mp
->m_flags
& XFS_MOUNT_DFLT_IOSIZE
)) {
1085 if (mp
->m_flags
& XFS_MOUNT_WSYNC
) {
1086 readio_log
= XFS_WSYNC_READIO_LOG
;
1087 writeio_log
= XFS_WSYNC_WRITEIO_LOG
;
1089 readio_log
= XFS_READIO_LOG_LARGE
;
1090 writeio_log
= XFS_WRITEIO_LOG_LARGE
;
1093 readio_log
= mp
->m_readio_log
;
1094 writeio_log
= mp
->m_writeio_log
;
1097 if (sbp
->sb_blocklog
> readio_log
) {
1098 mp
->m_readio_log
= sbp
->sb_blocklog
;
1100 mp
->m_readio_log
= readio_log
;
1102 mp
->m_readio_blocks
= 1 << (mp
->m_readio_log
- sbp
->sb_blocklog
);
1103 if (sbp
->sb_blocklog
> writeio_log
) {
1104 mp
->m_writeio_log
= sbp
->sb_blocklog
;
1106 mp
->m_writeio_log
= writeio_log
;
1108 mp
->m_writeio_blocks
= 1 << (mp
->m_writeio_log
- sbp
->sb_blocklog
);
1112 * precalculate the low space thresholds for dynamic speculative preallocation.
1115 xfs_set_low_space_thresholds(
1116 struct xfs_mount
*mp
)
1120 for (i
= 0; i
< XFS_LOWSP_MAX
; i
++) {
1121 __uint64_t space
= mp
->m_sb
.sb_dblocks
;
1124 mp
->m_low_space
[i
] = space
* (i
+ 1);
1130 * Set whether we're using inode alignment.
1133 xfs_set_inoalignment(xfs_mount_t
*mp
)
1135 if (xfs_sb_version_hasalign(&mp
->m_sb
) &&
1136 mp
->m_sb
.sb_inoalignmt
>=
1137 XFS_B_TO_FSBT(mp
, mp
->m_inode_cluster_size
))
1138 mp
->m_inoalign_mask
= mp
->m_sb
.sb_inoalignmt
- 1;
1140 mp
->m_inoalign_mask
= 0;
1142 * If we are using stripe alignment, check whether
1143 * the stripe unit is a multiple of the inode alignment
1145 if (mp
->m_dalign
&& mp
->m_inoalign_mask
&&
1146 !(mp
->m_dalign
& mp
->m_inoalign_mask
))
1147 mp
->m_sinoalign
= mp
->m_dalign
;
1149 mp
->m_sinoalign
= 0;
1153 * Check that the data (and log if separate) are an ok size.
1156 xfs_check_sizes(xfs_mount_t
*mp
)
1161 d
= (xfs_daddr_t
)XFS_FSB_TO_BB(mp
, mp
->m_sb
.sb_dblocks
);
1162 if (XFS_BB_TO_FSB(mp
, d
) != mp
->m_sb
.sb_dblocks
) {
1163 xfs_warn(mp
, "filesystem size mismatch detected");
1164 return XFS_ERROR(EFBIG
);
1166 bp
= xfs_buf_read_uncached(mp
->m_ddev_targp
,
1167 d
- XFS_FSS_TO_BB(mp
, 1),
1168 XFS_FSS_TO_BB(mp
, 1), 0, NULL
);
1170 xfs_warn(mp
, "last sector read failed");
1175 if (mp
->m_logdev_targp
!= mp
->m_ddev_targp
) {
1176 d
= (xfs_daddr_t
)XFS_FSB_TO_BB(mp
, mp
->m_sb
.sb_logblocks
);
1177 if (XFS_BB_TO_FSB(mp
, d
) != mp
->m_sb
.sb_logblocks
) {
1178 xfs_warn(mp
, "log size mismatch detected");
1179 return XFS_ERROR(EFBIG
);
1181 bp
= xfs_buf_read_uncached(mp
->m_logdev_targp
,
1182 d
- XFS_FSB_TO_BB(mp
, 1),
1183 XFS_FSB_TO_BB(mp
, 1), 0, NULL
);
1185 xfs_warn(mp
, "log device read failed");
1194 * Clear the quotaflags in memory and in the superblock.
1197 xfs_mount_reset_sbqflags(
1198 struct xfs_mount
*mp
)
1201 struct xfs_trans
*tp
;
1206 * It is OK to look at sb_qflags here in mount path,
1207 * without m_sb_lock.
1209 if (mp
->m_sb
.sb_qflags
== 0)
1211 spin_lock(&mp
->m_sb_lock
);
1212 mp
->m_sb
.sb_qflags
= 0;
1213 spin_unlock(&mp
->m_sb_lock
);
1216 * If the fs is readonly, let the incore superblock run
1217 * with quotas off but don't flush the update out to disk
1219 if (mp
->m_flags
& XFS_MOUNT_RDONLY
)
1222 tp
= xfs_trans_alloc(mp
, XFS_TRANS_QM_SBCHANGE
);
1223 error
= xfs_trans_reserve(tp
, 0, XFS_QM_SBCHANGE_LOG_RES(mp
),
1224 0, 0, XFS_DEFAULT_LOG_COUNT
);
1226 xfs_trans_cancel(tp
, 0);
1227 xfs_alert(mp
, "%s: Superblock update failed!", __func__
);
1231 xfs_mod_sb(tp
, XFS_SB_QFLAGS
);
1232 return xfs_trans_commit(tp
, 0);
1236 xfs_default_resblks(xfs_mount_t
*mp
)
1241 * We default to 5% or 8192 fsbs of space reserved, whichever is
1242 * smaller. This is intended to cover concurrent allocation
1243 * transactions when we initially hit enospc. These each require a 4
1244 * block reservation. Hence by default we cover roughly 2000 concurrent
1245 * allocation reservations.
1247 resblks
= mp
->m_sb
.sb_dblocks
;
1248 do_div(resblks
, 20);
1249 resblks
= min_t(__uint64_t
, resblks
, 8192);
1254 * This function does the following on an initial mount of a file system:
1255 * - reads the superblock from disk and init the mount struct
1256 * - if we're a 32-bit kernel, do a size check on the superblock
1257 * so we don't mount terabyte filesystems
1258 * - init mount struct realtime fields
1259 * - allocate inode hash table for fs
1260 * - init directory manager
1261 * - perform recovery and init the log manager
1267 xfs_sb_t
*sbp
= &(mp
->m_sb
);
1270 uint quotamount
= 0;
1271 uint quotaflags
= 0;
1274 xfs_mount_common(mp
, sbp
);
1277 * Check for a mismatched features2 values. Older kernels
1278 * read & wrote into the wrong sb offset for sb_features2
1279 * on some platforms due to xfs_sb_t not being 64bit size aligned
1280 * when sb_features2 was added, which made older superblock
1281 * reading/writing routines swap it as a 64-bit value.
1283 * For backwards compatibility, we make both slots equal.
1285 * If we detect a mismatched field, we OR the set bits into the
1286 * existing features2 field in case it has already been modified; we
1287 * don't want to lose any features. We then update the bad location
1288 * with the ORed value so that older kernels will see any features2
1289 * flags, and mark the two fields as needing updates once the
1290 * transaction subsystem is online.
1292 if (xfs_sb_has_mismatched_features2(sbp
)) {
1293 xfs_warn(mp
, "correcting sb_features alignment problem");
1294 sbp
->sb_features2
|= sbp
->sb_bad_features2
;
1295 sbp
->sb_bad_features2
= sbp
->sb_features2
;
1296 mp
->m_update_flags
|= XFS_SB_FEATURES2
| XFS_SB_BAD_FEATURES2
;
1299 * Re-check for ATTR2 in case it was found in bad_features2
1302 if (xfs_sb_version_hasattr2(&mp
->m_sb
) &&
1303 !(mp
->m_flags
& XFS_MOUNT_NOATTR2
))
1304 mp
->m_flags
|= XFS_MOUNT_ATTR2
;
1307 if (xfs_sb_version_hasattr2(&mp
->m_sb
) &&
1308 (mp
->m_flags
& XFS_MOUNT_NOATTR2
)) {
1309 xfs_sb_version_removeattr2(&mp
->m_sb
);
1310 mp
->m_update_flags
|= XFS_SB_FEATURES2
;
1312 /* update sb_versionnum for the clearing of the morebits */
1313 if (!sbp
->sb_features2
)
1314 mp
->m_update_flags
|= XFS_SB_VERSIONNUM
;
1318 * Check if sb_agblocks is aligned at stripe boundary
1319 * If sb_agblocks is NOT aligned turn off m_dalign since
1320 * allocator alignment is within an ag, therefore ag has
1321 * to be aligned at stripe boundary.
1323 error
= xfs_update_alignment(mp
);
1327 xfs_alloc_compute_maxlevels(mp
);
1328 xfs_bmap_compute_maxlevels(mp
, XFS_DATA_FORK
);
1329 xfs_bmap_compute_maxlevels(mp
, XFS_ATTR_FORK
);
1330 xfs_ialloc_compute_maxlevels(mp
);
1332 xfs_set_maxicount(mp
);
1334 error
= xfs_uuid_mount(mp
);
1339 * Set the minimum read and write sizes
1341 xfs_set_rw_sizes(mp
);
1343 /* set the low space thresholds for dynamic preallocation */
1344 xfs_set_low_space_thresholds(mp
);
1347 * Set the inode cluster size.
1348 * This may still be overridden by the file system
1349 * block size if it is larger than the chosen cluster size.
1351 mp
->m_inode_cluster_size
= XFS_INODE_BIG_CLUSTER_SIZE
;
1354 * Set inode alignment fields
1356 xfs_set_inoalignment(mp
);
1359 * Check that the data (and log if separate) are an ok size.
1361 error
= xfs_check_sizes(mp
);
1363 goto out_remove_uuid
;
1366 * Initialize realtime fields in the mount structure
1368 error
= xfs_rtmount_init(mp
);
1370 xfs_warn(mp
, "RT mount failed");
1371 goto out_remove_uuid
;
1375 * Copies the low order bits of the timestamp and the randomly
1376 * set "sequence" number out of a UUID.
1378 uuid_getnodeuniq(&sbp
->sb_uuid
, mp
->m_fixedfsid
);
1380 mp
->m_dmevmask
= 0; /* not persistent; set after each mount */
1385 * Initialize the attribute manager's entries.
1387 mp
->m_attr_magicpct
= (mp
->m_sb
.sb_blocksize
* 37) / 100;
1390 * Initialize the precomputed transaction reservations values.
1395 * Allocate and initialize the per-ag data.
1397 spin_lock_init(&mp
->m_perag_lock
);
1398 INIT_RADIX_TREE(&mp
->m_perag_tree
, GFP_ATOMIC
);
1399 error
= xfs_initialize_perag(mp
, sbp
->sb_agcount
, &mp
->m_maxagi
);
1401 xfs_warn(mp
, "Failed per-ag init: %d", error
);
1402 goto out_remove_uuid
;
1405 if (!sbp
->sb_logblocks
) {
1406 xfs_warn(mp
, "no log defined");
1407 XFS_ERROR_REPORT("xfs_mountfs", XFS_ERRLEVEL_LOW
, mp
);
1408 error
= XFS_ERROR(EFSCORRUPTED
);
1409 goto out_free_perag
;
1413 * log's mount-time initialization. Perform 1st part recovery if needed
1415 error
= xfs_log_mount(mp
, mp
->m_logdev_targp
,
1416 XFS_FSB_TO_DADDR(mp
, sbp
->sb_logstart
),
1417 XFS_FSB_TO_BB(mp
, sbp
->sb_logblocks
));
1419 xfs_warn(mp
, "log mount failed");
1424 * Now the log is mounted, we know if it was an unclean shutdown or
1425 * not. If it was, with the first phase of recovery has completed, we
1426 * have consistent AG blocks on disk. We have not recovered EFIs yet,
1427 * but they are recovered transactionally in the second recovery phase
1430 * Hence we can safely re-initialise incore superblock counters from
1431 * the per-ag data. These may not be correct if the filesystem was not
1432 * cleanly unmounted, so we need to wait for recovery to finish before
1435 * If the filesystem was cleanly unmounted, then we can trust the
1436 * values in the superblock to be correct and we don't need to do
1439 * If we are currently making the filesystem, the initialisation will
1440 * fail as the perag data is in an undefined state.
1442 if (xfs_sb_version_haslazysbcount(&mp
->m_sb
) &&
1443 !XFS_LAST_UNMOUNT_WAS_CLEAN(mp
) &&
1444 !mp
->m_sb
.sb_inprogress
) {
1445 error
= xfs_initialize_perag_data(mp
, sbp
->sb_agcount
);
1451 * Get and sanity-check the root inode.
1452 * Save the pointer to it in the mount structure.
1454 error
= xfs_iget(mp
, NULL
, sbp
->sb_rootino
, 0, XFS_ILOCK_EXCL
, &rip
);
1456 xfs_warn(mp
, "failed to read root inode");
1457 goto out_log_dealloc
;
1460 ASSERT(rip
!= NULL
);
1462 if (unlikely(!S_ISDIR(rip
->i_d
.di_mode
))) {
1463 xfs_warn(mp
, "corrupted root inode %llu: not a directory",
1464 (unsigned long long)rip
->i_ino
);
1465 xfs_iunlock(rip
, XFS_ILOCK_EXCL
);
1466 XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW
,
1468 error
= XFS_ERROR(EFSCORRUPTED
);
1471 mp
->m_rootip
= rip
; /* save it */
1473 xfs_iunlock(rip
, XFS_ILOCK_EXCL
);
1476 * Initialize realtime inode pointers in the mount structure
1478 error
= xfs_rtmount_inodes(mp
);
1481 * Free up the root inode.
1483 xfs_warn(mp
, "failed to read RT inodes");
1488 * If this is a read-only mount defer the superblock updates until
1489 * the next remount into writeable mode. Otherwise we would never
1490 * perform the update e.g. for the root filesystem.
1492 if (mp
->m_update_flags
&& !(mp
->m_flags
& XFS_MOUNT_RDONLY
)) {
1493 error
= xfs_mount_log_sb(mp
, mp
->m_update_flags
);
1495 xfs_warn(mp
, "failed to write sb changes");
1501 * Initialise the XFS quota management subsystem for this mount
1503 if (XFS_IS_QUOTA_RUNNING(mp
)) {
1504 error
= xfs_qm_newmount(mp
, "amount
, "aflags
);
1508 ASSERT(!XFS_IS_QUOTA_ON(mp
));
1511 * If a file system had quotas running earlier, but decided to
1512 * mount without -o uquota/pquota/gquota options, revoke the
1513 * quotachecked license.
1515 if (mp
->m_sb
.sb_qflags
& XFS_ALL_QUOTA_ACCT
) {
1516 xfs_notice(mp
, "resetting quota flags");
1517 error
= xfs_mount_reset_sbqflags(mp
);
1524 * Finish recovering the file system. This part needed to be
1525 * delayed until after the root and real-time bitmap inodes
1526 * were consistently read in.
1528 error
= xfs_log_mount_finish(mp
);
1530 xfs_warn(mp
, "log mount finish failed");
1535 * Complete the quota initialisation, post-log-replay component.
1538 ASSERT(mp
->m_qflags
== 0);
1539 mp
->m_qflags
= quotaflags
;
1541 xfs_qm_mount_quotas(mp
);
1545 * Now we are mounted, reserve a small amount of unused space for
1546 * privileged transactions. This is needed so that transaction
1547 * space required for critical operations can dip into this pool
1548 * when at ENOSPC. This is needed for operations like create with
1549 * attr, unwritten extent conversion at ENOSPC, etc. Data allocations
1550 * are not allowed to use this reserved space.
1552 * This may drive us straight to ENOSPC on mount, but that implies
1553 * we were already there on the last unmount. Warn if this occurs.
1555 if (!(mp
->m_flags
& XFS_MOUNT_RDONLY
)) {
1556 resblks
= xfs_default_resblks(mp
);
1557 error
= xfs_reserve_blocks(mp
, &resblks
, NULL
);
1560 "Unable to allocate reserve blocks. Continuing without reserve pool.");
1566 xfs_rtunmount_inodes(mp
);
1570 xfs_log_unmount(mp
);
1572 if (mp
->m_logdev_targp
&& mp
->m_logdev_targp
!= mp
->m_ddev_targp
)
1573 xfs_wait_buftarg(mp
->m_logdev_targp
);
1574 xfs_wait_buftarg(mp
->m_ddev_targp
);
1578 xfs_uuid_unmount(mp
);
1584 * This flushes out the inodes,dquots and the superblock, unmounts the
1585 * log and makes sure that incore structures are freed.
1589 struct xfs_mount
*mp
)
1594 cancel_delayed_work_sync(&mp
->m_eofblocks_work
);
1596 xfs_qm_unmount_quotas(mp
);
1597 xfs_rtunmount_inodes(mp
);
1598 IRELE(mp
->m_rootip
);
1601 * We can potentially deadlock here if we have an inode cluster
1602 * that has been freed has its buffer still pinned in memory because
1603 * the transaction is still sitting in a iclog. The stale inodes
1604 * on that buffer will have their flush locks held until the
1605 * transaction hits the disk and the callbacks run. the inode
1606 * flush takes the flush lock unconditionally and with nothing to
1607 * push out the iclog we will never get that unlocked. hence we
1608 * need to force the log first.
1610 xfs_log_force(mp
, XFS_LOG_SYNC
);
1613 * Flush all pending changes from the AIL.
1615 xfs_ail_push_all_sync(mp
->m_ail
);
1618 * And reclaim all inodes. At this point there should be no dirty
1619 * inodes and none should be pinned or locked, but use synchronous
1620 * reclaim just to be sure. We can stop background inode reclaim
1621 * here as well if it is still running.
1623 cancel_delayed_work_sync(&mp
->m_reclaim_work
);
1624 xfs_reclaim_inodes(mp
, SYNC_WAIT
);
1629 * Unreserve any blocks we have so that when we unmount we don't account
1630 * the reserved free space as used. This is really only necessary for
1631 * lazy superblock counting because it trusts the incore superblock
1632 * counters to be absolutely correct on clean unmount.
1634 * We don't bother correcting this elsewhere for lazy superblock
1635 * counting because on mount of an unclean filesystem we reconstruct the
1636 * correct counter value and this is irrelevant.
1638 * For non-lazy counter filesystems, this doesn't matter at all because
1639 * we only every apply deltas to the superblock and hence the incore
1640 * value does not matter....
1643 error
= xfs_reserve_blocks(mp
, &resblks
, NULL
);
1645 xfs_warn(mp
, "Unable to free reserved block pool. "
1646 "Freespace may not be correct on next mount.");
1648 error
= xfs_log_sbcount(mp
);
1650 xfs_warn(mp
, "Unable to update superblock counters. "
1651 "Freespace may not be correct on next mount.");
1653 xfs_log_unmount(mp
);
1654 xfs_uuid_unmount(mp
);
1657 xfs_errortag_clearall(mp
, 0);
1663 xfs_fs_writable(xfs_mount_t
*mp
)
1665 return !(mp
->m_super
->s_writers
.frozen
|| XFS_FORCED_SHUTDOWN(mp
) ||
1666 (mp
->m_flags
& XFS_MOUNT_RDONLY
));
1672 * Sync the superblock counters to disk.
1674 * Note this code can be called during the process of freezing, so
1675 * we may need to use the transaction allocator which does not
1676 * block when the transaction subsystem is in its frozen state.
1679 xfs_log_sbcount(xfs_mount_t
*mp
)
1684 if (!xfs_fs_writable(mp
))
1687 xfs_icsb_sync_counters(mp
, 0);
1690 * we don't need to do this if we are updating the superblock
1691 * counters on every modification.
1693 if (!xfs_sb_version_haslazysbcount(&mp
->m_sb
))
1696 tp
= _xfs_trans_alloc(mp
, XFS_TRANS_SB_COUNT
, KM_SLEEP
);
1697 error
= xfs_trans_reserve(tp
, 0, XFS_SB_LOG_RES(mp
), 0, 0,
1698 XFS_DEFAULT_LOG_COUNT
);
1700 xfs_trans_cancel(tp
, 0);
1704 xfs_mod_sb(tp
, XFS_SB_IFREE
| XFS_SB_ICOUNT
| XFS_SB_FDBLOCKS
);
1705 xfs_trans_set_sync(tp
);
1706 error
= xfs_trans_commit(tp
, 0);
1711 * xfs_mod_sb() can be used to copy arbitrary changes to the
1712 * in-core superblock into the superblock buffer to be logged.
1713 * It does not provide the higher level of locking that is
1714 * needed to protect the in-core superblock from concurrent
1718 xfs_mod_sb(xfs_trans_t
*tp
, __int64_t fields
)
1730 bp
= xfs_trans_getsb(tp
, mp
, 0);
1731 first
= sizeof(xfs_sb_t
);
1734 /* translate/copy */
1736 xfs_sb_to_disk(XFS_BUF_TO_SBP(bp
), &mp
->m_sb
, fields
);
1738 /* find modified range */
1739 f
= (xfs_sb_field_t
)xfs_highbit64((__uint64_t
)fields
);
1740 ASSERT((1LL << f
) & XFS_SB_MOD_BITS
);
1741 last
= xfs_sb_info
[f
+ 1].offset
- 1;
1743 f
= (xfs_sb_field_t
)xfs_lowbit64((__uint64_t
)fields
);
1744 ASSERT((1LL << f
) & XFS_SB_MOD_BITS
);
1745 first
= xfs_sb_info
[f
].offset
;
1747 xfs_trans_buf_set_type(tp
, bp
, XFS_BLFT_SB_BUF
);
1748 xfs_trans_log_buf(tp
, bp
, first
, last
);
1753 * xfs_mod_incore_sb_unlocked() is a utility routine common used to apply
1754 * a delta to a specified field in the in-core superblock. Simply
1755 * switch on the field indicated and apply the delta to that field.
1756 * Fields are not allowed to dip below zero, so if the delta would
1757 * do this do not apply it and return EINVAL.
1759 * The m_sb_lock must be held when this routine is called.
1762 xfs_mod_incore_sb_unlocked(
1764 xfs_sb_field_t field
,
1768 int scounter
; /* short counter for 32 bit fields */
1769 long long lcounter
; /* long counter for 64 bit fields */
1770 long long res_used
, rem
;
1773 * With the in-core superblock spin lock held, switch
1774 * on the indicated field. Apply the delta to the
1775 * proper field. If the fields value would dip below
1776 * 0, then do not apply the delta and return EINVAL.
1779 case XFS_SBS_ICOUNT
:
1780 lcounter
= (long long)mp
->m_sb
.sb_icount
;
1784 return XFS_ERROR(EINVAL
);
1786 mp
->m_sb
.sb_icount
= lcounter
;
1789 lcounter
= (long long)mp
->m_sb
.sb_ifree
;
1793 return XFS_ERROR(EINVAL
);
1795 mp
->m_sb
.sb_ifree
= lcounter
;
1797 case XFS_SBS_FDBLOCKS
:
1798 lcounter
= (long long)
1799 mp
->m_sb
.sb_fdblocks
- XFS_ALLOC_SET_ASIDE(mp
);
1800 res_used
= (long long)(mp
->m_resblks
- mp
->m_resblks_avail
);
1802 if (delta
> 0) { /* Putting blocks back */
1803 if (res_used
> delta
) {
1804 mp
->m_resblks_avail
+= delta
;
1806 rem
= delta
- res_used
;
1807 mp
->m_resblks_avail
= mp
->m_resblks
;
1810 } else { /* Taking blocks away */
1812 if (lcounter
>= 0) {
1813 mp
->m_sb
.sb_fdblocks
= lcounter
+
1814 XFS_ALLOC_SET_ASIDE(mp
);
1819 * We are out of blocks, use any available reserved
1820 * blocks if were allowed to.
1823 return XFS_ERROR(ENOSPC
);
1825 lcounter
= (long long)mp
->m_resblks_avail
+ delta
;
1826 if (lcounter
>= 0) {
1827 mp
->m_resblks_avail
= lcounter
;
1830 printk_once(KERN_WARNING
1831 "Filesystem \"%s\": reserve blocks depleted! "
1832 "Consider increasing reserve pool size.",
1834 return XFS_ERROR(ENOSPC
);
1837 mp
->m_sb
.sb_fdblocks
= lcounter
+ XFS_ALLOC_SET_ASIDE(mp
);
1839 case XFS_SBS_FREXTENTS
:
1840 lcounter
= (long long)mp
->m_sb
.sb_frextents
;
1843 return XFS_ERROR(ENOSPC
);
1845 mp
->m_sb
.sb_frextents
= lcounter
;
1847 case XFS_SBS_DBLOCKS
:
1848 lcounter
= (long long)mp
->m_sb
.sb_dblocks
;
1852 return XFS_ERROR(EINVAL
);
1854 mp
->m_sb
.sb_dblocks
= lcounter
;
1856 case XFS_SBS_AGCOUNT
:
1857 scounter
= mp
->m_sb
.sb_agcount
;
1861 return XFS_ERROR(EINVAL
);
1863 mp
->m_sb
.sb_agcount
= scounter
;
1865 case XFS_SBS_IMAX_PCT
:
1866 scounter
= mp
->m_sb
.sb_imax_pct
;
1870 return XFS_ERROR(EINVAL
);
1872 mp
->m_sb
.sb_imax_pct
= scounter
;
1874 case XFS_SBS_REXTSIZE
:
1875 scounter
= mp
->m_sb
.sb_rextsize
;
1879 return XFS_ERROR(EINVAL
);
1881 mp
->m_sb
.sb_rextsize
= scounter
;
1883 case XFS_SBS_RBMBLOCKS
:
1884 scounter
= mp
->m_sb
.sb_rbmblocks
;
1888 return XFS_ERROR(EINVAL
);
1890 mp
->m_sb
.sb_rbmblocks
= scounter
;
1892 case XFS_SBS_RBLOCKS
:
1893 lcounter
= (long long)mp
->m_sb
.sb_rblocks
;
1897 return XFS_ERROR(EINVAL
);
1899 mp
->m_sb
.sb_rblocks
= lcounter
;
1901 case XFS_SBS_REXTENTS
:
1902 lcounter
= (long long)mp
->m_sb
.sb_rextents
;
1906 return XFS_ERROR(EINVAL
);
1908 mp
->m_sb
.sb_rextents
= lcounter
;
1910 case XFS_SBS_REXTSLOG
:
1911 scounter
= mp
->m_sb
.sb_rextslog
;
1915 return XFS_ERROR(EINVAL
);
1917 mp
->m_sb
.sb_rextslog
= scounter
;
1921 return XFS_ERROR(EINVAL
);
1926 * xfs_mod_incore_sb() is used to change a field in the in-core
1927 * superblock structure by the specified delta. This modification
1928 * is protected by the m_sb_lock. Just use the xfs_mod_incore_sb_unlocked()
1929 * routine to do the work.
1933 struct xfs_mount
*mp
,
1934 xfs_sb_field_t field
,
1940 #ifdef HAVE_PERCPU_SB
1941 ASSERT(field
< XFS_SBS_ICOUNT
|| field
> XFS_SBS_FDBLOCKS
);
1943 spin_lock(&mp
->m_sb_lock
);
1944 status
= xfs_mod_incore_sb_unlocked(mp
, field
, delta
, rsvd
);
1945 spin_unlock(&mp
->m_sb_lock
);
1951 * Change more than one field in the in-core superblock structure at a time.
1953 * The fields and changes to those fields are specified in the array of
1954 * xfs_mod_sb structures passed in. Either all of the specified deltas
1955 * will be applied or none of them will. If any modified field dips below 0,
1956 * then all modifications will be backed out and EINVAL will be returned.
1958 * Note that this function may not be used for the superblock values that
1959 * are tracked with the in-memory per-cpu counters - a direct call to
1960 * xfs_icsb_modify_counters is required for these.
1963 xfs_mod_incore_sb_batch(
1964 struct xfs_mount
*mp
,
1973 * Loop through the array of mod structures and apply each individually.
1974 * If any fail, then back out all those which have already been applied.
1975 * Do all of this within the scope of the m_sb_lock so that all of the
1976 * changes will be atomic.
1978 spin_lock(&mp
->m_sb_lock
);
1979 for (msbp
= msb
; msbp
< (msb
+ nmsb
); msbp
++) {
1980 ASSERT(msbp
->msb_field
< XFS_SBS_ICOUNT
||
1981 msbp
->msb_field
> XFS_SBS_FDBLOCKS
);
1983 error
= xfs_mod_incore_sb_unlocked(mp
, msbp
->msb_field
,
1984 msbp
->msb_delta
, rsvd
);
1988 spin_unlock(&mp
->m_sb_lock
);
1992 while (--msbp
>= msb
) {
1993 error
= xfs_mod_incore_sb_unlocked(mp
, msbp
->msb_field
,
1994 -msbp
->msb_delta
, rsvd
);
1997 spin_unlock(&mp
->m_sb_lock
);
2002 * xfs_getsb() is called to obtain the buffer for the superblock.
2003 * The buffer is returned locked and read in from disk.
2004 * The buffer should be released with a call to xfs_brelse().
2006 * If the flags parameter is BUF_TRYLOCK, then we'll only return
2007 * the superblock buffer if it can be locked without sleeping.
2008 * If it can't then we'll return NULL.
2012 struct xfs_mount
*mp
,
2015 struct xfs_buf
*bp
= mp
->m_sb_bp
;
2017 if (!xfs_buf_trylock(bp
)) {
2018 if (flags
& XBF_TRYLOCK
)
2024 ASSERT(XFS_BUF_ISDONE(bp
));
2029 * Used to free the superblock along various error paths.
2033 struct xfs_mount
*mp
)
2035 struct xfs_buf
*bp
= mp
->m_sb_bp
;
2043 * Used to log changes to the superblock unit and width fields which could
2044 * be altered by the mount options, as well as any potential sb_features2
2045 * fixup. Only the first superblock is updated.
2055 ASSERT(fields
& (XFS_SB_UNIT
| XFS_SB_WIDTH
| XFS_SB_UUID
|
2056 XFS_SB_FEATURES2
| XFS_SB_BAD_FEATURES2
|
2057 XFS_SB_VERSIONNUM
));
2059 tp
= xfs_trans_alloc(mp
, XFS_TRANS_SB_UNIT
);
2060 error
= xfs_trans_reserve(tp
, 0, XFS_SB_LOG_RES(mp
), 0, 0,
2061 XFS_DEFAULT_LOG_COUNT
);
2063 xfs_trans_cancel(tp
, 0);
2066 xfs_mod_sb(tp
, fields
);
2067 error
= xfs_trans_commit(tp
, 0);
2072 * If the underlying (data/log/rt) device is readonly, there are some
2073 * operations that cannot proceed.
2076 xfs_dev_is_read_only(
2077 struct xfs_mount
*mp
,
2080 if (xfs_readonly_buftarg(mp
->m_ddev_targp
) ||
2081 xfs_readonly_buftarg(mp
->m_logdev_targp
) ||
2082 (mp
->m_rtdev_targp
&& xfs_readonly_buftarg(mp
->m_rtdev_targp
))) {
2083 xfs_notice(mp
, "%s required on read-only device.", message
);
2084 xfs_notice(mp
, "write access unavailable, cannot proceed.");
2090 #ifdef HAVE_PERCPU_SB
2092 * Per-cpu incore superblock counters
2094 * Simple concept, difficult implementation
2096 * Basically, replace the incore superblock counters with a distributed per cpu
2097 * counter for contended fields (e.g. free block count).
2099 * Difficulties arise in that the incore sb is used for ENOSPC checking, and
2100 * hence needs to be accurately read when we are running low on space. Hence
2101 * there is a method to enable and disable the per-cpu counters based on how
2102 * much "stuff" is available in them.
2104 * Basically, a counter is enabled if there is enough free resource to justify
2105 * running a per-cpu fast-path. If the per-cpu counter runs out (i.e. a local
2106 * ENOSPC), then we disable the counters to synchronise all callers and
2107 * re-distribute the available resources.
2109 * If, once we redistributed the available resources, we still get a failure,
2110 * we disable the per-cpu counter and go through the slow path.
2112 * The slow path is the current xfs_mod_incore_sb() function. This means that
2113 * when we disable a per-cpu counter, we need to drain its resources back to
2114 * the global superblock. We do this after disabling the counter to prevent
2115 * more threads from queueing up on the counter.
2117 * Essentially, this means that we still need a lock in the fast path to enable
2118 * synchronisation between the global counters and the per-cpu counters. This
2119 * is not a problem because the lock will be local to a CPU almost all the time
2120 * and have little contention except when we get to ENOSPC conditions.
2122 * Basically, this lock becomes a barrier that enables us to lock out the fast
2123 * path while we do things like enabling and disabling counters and
2124 * synchronising the counters.
2128 * 1. m_sb_lock before picking up per-cpu locks
2129 * 2. per-cpu locks always picked up via for_each_online_cpu() order
2130 * 3. accurate counter sync requires m_sb_lock + per cpu locks
2131 * 4. modifying per-cpu counters requires holding per-cpu lock
2132 * 5. modifying global counters requires holding m_sb_lock
2133 * 6. enabling or disabling a counter requires holding the m_sb_lock
2134 * and _none_ of the per-cpu locks.
2136 * Disabled counters are only ever re-enabled by a balance operation
2137 * that results in more free resources per CPU than a given threshold.
2138 * To ensure counters don't remain disabled, they are rebalanced when
2139 * the global resource goes above a higher threshold (i.e. some hysteresis
2140 * is present to prevent thrashing).
2143 #ifdef CONFIG_HOTPLUG_CPU
2145 * hot-plug CPU notifier support.
2147 * We need a notifier per filesystem as we need to be able to identify
2148 * the filesystem to balance the counters out. This is achieved by
2149 * having a notifier block embedded in the xfs_mount_t and doing pointer
2150 * magic to get the mount pointer from the notifier block address.
2153 xfs_icsb_cpu_notify(
2154 struct notifier_block
*nfb
,
2155 unsigned long action
,
2158 xfs_icsb_cnts_t
*cntp
;
2161 mp
= (xfs_mount_t
*)container_of(nfb
, xfs_mount_t
, m_icsb_notifier
);
2162 cntp
= (xfs_icsb_cnts_t
*)
2163 per_cpu_ptr(mp
->m_sb_cnts
, (unsigned long)hcpu
);
2165 case CPU_UP_PREPARE
:
2166 case CPU_UP_PREPARE_FROZEN
:
2167 /* Easy Case - initialize the area and locks, and
2168 * then rebalance when online does everything else for us. */
2169 memset(cntp
, 0, sizeof(xfs_icsb_cnts_t
));
2172 case CPU_ONLINE_FROZEN
:
2174 xfs_icsb_balance_counter(mp
, XFS_SBS_ICOUNT
, 0);
2175 xfs_icsb_balance_counter(mp
, XFS_SBS_IFREE
, 0);
2176 xfs_icsb_balance_counter(mp
, XFS_SBS_FDBLOCKS
, 0);
2177 xfs_icsb_unlock(mp
);
2180 case CPU_DEAD_FROZEN
:
2181 /* Disable all the counters, then fold the dead cpu's
2182 * count into the total on the global superblock and
2183 * re-enable the counters. */
2185 spin_lock(&mp
->m_sb_lock
);
2186 xfs_icsb_disable_counter(mp
, XFS_SBS_ICOUNT
);
2187 xfs_icsb_disable_counter(mp
, XFS_SBS_IFREE
);
2188 xfs_icsb_disable_counter(mp
, XFS_SBS_FDBLOCKS
);
2190 mp
->m_sb
.sb_icount
+= cntp
->icsb_icount
;
2191 mp
->m_sb
.sb_ifree
+= cntp
->icsb_ifree
;
2192 mp
->m_sb
.sb_fdblocks
+= cntp
->icsb_fdblocks
;
2194 memset(cntp
, 0, sizeof(xfs_icsb_cnts_t
));
2196 xfs_icsb_balance_counter_locked(mp
, XFS_SBS_ICOUNT
, 0);
2197 xfs_icsb_balance_counter_locked(mp
, XFS_SBS_IFREE
, 0);
2198 xfs_icsb_balance_counter_locked(mp
, XFS_SBS_FDBLOCKS
, 0);
2199 spin_unlock(&mp
->m_sb_lock
);
2200 xfs_icsb_unlock(mp
);
2206 #endif /* CONFIG_HOTPLUG_CPU */
2209 xfs_icsb_init_counters(
2212 xfs_icsb_cnts_t
*cntp
;
2215 mp
->m_sb_cnts
= alloc_percpu(xfs_icsb_cnts_t
);
2216 if (mp
->m_sb_cnts
== NULL
)
2219 #ifdef CONFIG_HOTPLUG_CPU
2220 mp
->m_icsb_notifier
.notifier_call
= xfs_icsb_cpu_notify
;
2221 mp
->m_icsb_notifier
.priority
= 0;
2222 register_hotcpu_notifier(&mp
->m_icsb_notifier
);
2223 #endif /* CONFIG_HOTPLUG_CPU */
2225 for_each_online_cpu(i
) {
2226 cntp
= (xfs_icsb_cnts_t
*)per_cpu_ptr(mp
->m_sb_cnts
, i
);
2227 memset(cntp
, 0, sizeof(xfs_icsb_cnts_t
));
2230 mutex_init(&mp
->m_icsb_mutex
);
2233 * start with all counters disabled so that the
2234 * initial balance kicks us off correctly
2236 mp
->m_icsb_counters
= -1;
2241 xfs_icsb_reinit_counters(
2246 * start with all counters disabled so that the
2247 * initial balance kicks us off correctly
2249 mp
->m_icsb_counters
= -1;
2250 xfs_icsb_balance_counter(mp
, XFS_SBS_ICOUNT
, 0);
2251 xfs_icsb_balance_counter(mp
, XFS_SBS_IFREE
, 0);
2252 xfs_icsb_balance_counter(mp
, XFS_SBS_FDBLOCKS
, 0);
2253 xfs_icsb_unlock(mp
);
2257 xfs_icsb_destroy_counters(
2260 if (mp
->m_sb_cnts
) {
2261 unregister_hotcpu_notifier(&mp
->m_icsb_notifier
);
2262 free_percpu(mp
->m_sb_cnts
);
2264 mutex_destroy(&mp
->m_icsb_mutex
);
2269 xfs_icsb_cnts_t
*icsbp
)
2271 while (test_and_set_bit(XFS_ICSB_FLAG_LOCK
, &icsbp
->icsb_flags
)) {
2277 xfs_icsb_unlock_cntr(
2278 xfs_icsb_cnts_t
*icsbp
)
2280 clear_bit(XFS_ICSB_FLAG_LOCK
, &icsbp
->icsb_flags
);
2285 xfs_icsb_lock_all_counters(
2288 xfs_icsb_cnts_t
*cntp
;
2291 for_each_online_cpu(i
) {
2292 cntp
= (xfs_icsb_cnts_t
*)per_cpu_ptr(mp
->m_sb_cnts
, i
);
2293 xfs_icsb_lock_cntr(cntp
);
2298 xfs_icsb_unlock_all_counters(
2301 xfs_icsb_cnts_t
*cntp
;
2304 for_each_online_cpu(i
) {
2305 cntp
= (xfs_icsb_cnts_t
*)per_cpu_ptr(mp
->m_sb_cnts
, i
);
2306 xfs_icsb_unlock_cntr(cntp
);
2313 xfs_icsb_cnts_t
*cnt
,
2316 xfs_icsb_cnts_t
*cntp
;
2319 memset(cnt
, 0, sizeof(xfs_icsb_cnts_t
));
2321 if (!(flags
& XFS_ICSB_LAZY_COUNT
))
2322 xfs_icsb_lock_all_counters(mp
);
2324 for_each_online_cpu(i
) {
2325 cntp
= (xfs_icsb_cnts_t
*)per_cpu_ptr(mp
->m_sb_cnts
, i
);
2326 cnt
->icsb_icount
+= cntp
->icsb_icount
;
2327 cnt
->icsb_ifree
+= cntp
->icsb_ifree
;
2328 cnt
->icsb_fdblocks
+= cntp
->icsb_fdblocks
;
2331 if (!(flags
& XFS_ICSB_LAZY_COUNT
))
2332 xfs_icsb_unlock_all_counters(mp
);
2336 xfs_icsb_counter_disabled(
2338 xfs_sb_field_t field
)
2340 ASSERT((field
>= XFS_SBS_ICOUNT
) && (field
<= XFS_SBS_FDBLOCKS
));
2341 return test_bit(field
, &mp
->m_icsb_counters
);
2345 xfs_icsb_disable_counter(
2347 xfs_sb_field_t field
)
2349 xfs_icsb_cnts_t cnt
;
2351 ASSERT((field
>= XFS_SBS_ICOUNT
) && (field
<= XFS_SBS_FDBLOCKS
));
2354 * If we are already disabled, then there is nothing to do
2355 * here. We check before locking all the counters to avoid
2356 * the expensive lock operation when being called in the
2357 * slow path and the counter is already disabled. This is
2358 * safe because the only time we set or clear this state is under
2361 if (xfs_icsb_counter_disabled(mp
, field
))
2364 xfs_icsb_lock_all_counters(mp
);
2365 if (!test_and_set_bit(field
, &mp
->m_icsb_counters
)) {
2366 /* drain back to superblock */
2368 xfs_icsb_count(mp
, &cnt
, XFS_ICSB_LAZY_COUNT
);
2370 case XFS_SBS_ICOUNT
:
2371 mp
->m_sb
.sb_icount
= cnt
.icsb_icount
;
2374 mp
->m_sb
.sb_ifree
= cnt
.icsb_ifree
;
2376 case XFS_SBS_FDBLOCKS
:
2377 mp
->m_sb
.sb_fdblocks
= cnt
.icsb_fdblocks
;
2384 xfs_icsb_unlock_all_counters(mp
);
2388 xfs_icsb_enable_counter(
2390 xfs_sb_field_t field
,
2394 xfs_icsb_cnts_t
*cntp
;
2397 ASSERT((field
>= XFS_SBS_ICOUNT
) && (field
<= XFS_SBS_FDBLOCKS
));
2399 xfs_icsb_lock_all_counters(mp
);
2400 for_each_online_cpu(i
) {
2401 cntp
= per_cpu_ptr(mp
->m_sb_cnts
, i
);
2403 case XFS_SBS_ICOUNT
:
2404 cntp
->icsb_icount
= count
+ resid
;
2407 cntp
->icsb_ifree
= count
+ resid
;
2409 case XFS_SBS_FDBLOCKS
:
2410 cntp
->icsb_fdblocks
= count
+ resid
;
2418 clear_bit(field
, &mp
->m_icsb_counters
);
2419 xfs_icsb_unlock_all_counters(mp
);
2423 xfs_icsb_sync_counters_locked(
2427 xfs_icsb_cnts_t cnt
;
2429 xfs_icsb_count(mp
, &cnt
, flags
);
2431 if (!xfs_icsb_counter_disabled(mp
, XFS_SBS_ICOUNT
))
2432 mp
->m_sb
.sb_icount
= cnt
.icsb_icount
;
2433 if (!xfs_icsb_counter_disabled(mp
, XFS_SBS_IFREE
))
2434 mp
->m_sb
.sb_ifree
= cnt
.icsb_ifree
;
2435 if (!xfs_icsb_counter_disabled(mp
, XFS_SBS_FDBLOCKS
))
2436 mp
->m_sb
.sb_fdblocks
= cnt
.icsb_fdblocks
;
2440 * Accurate update of per-cpu counters to incore superblock
2443 xfs_icsb_sync_counters(
2447 spin_lock(&mp
->m_sb_lock
);
2448 xfs_icsb_sync_counters_locked(mp
, flags
);
2449 spin_unlock(&mp
->m_sb_lock
);
2453 * Balance and enable/disable counters as necessary.
2455 * Thresholds for re-enabling counters are somewhat magic. inode counts are
2456 * chosen to be the same number as single on disk allocation chunk per CPU, and
2457 * free blocks is something far enough zero that we aren't going thrash when we
2458 * get near ENOSPC. We also need to supply a minimum we require per cpu to
2459 * prevent looping endlessly when xfs_alloc_space asks for more than will
2460 * be distributed to a single CPU but each CPU has enough blocks to be
2463 * Note that we can be called when counters are already disabled.
2464 * xfs_icsb_disable_counter() optimises the counter locking in this case to
2465 * prevent locking every per-cpu counter needlessly.
2468 #define XFS_ICSB_INO_CNTR_REENABLE (uint64_t)64
2469 #define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \
2470 (uint64_t)(512 + XFS_ALLOC_SET_ASIDE(mp))
2472 xfs_icsb_balance_counter_locked(
2474 xfs_sb_field_t field
,
2477 uint64_t count
, resid
;
2478 int weight
= num_online_cpus();
2479 uint64_t min
= (uint64_t)min_per_cpu
;
2481 /* disable counter and sync counter */
2482 xfs_icsb_disable_counter(mp
, field
);
2484 /* update counters - first CPU gets residual*/
2486 case XFS_SBS_ICOUNT
:
2487 count
= mp
->m_sb
.sb_icount
;
2488 resid
= do_div(count
, weight
);
2489 if (count
< max(min
, XFS_ICSB_INO_CNTR_REENABLE
))
2493 count
= mp
->m_sb
.sb_ifree
;
2494 resid
= do_div(count
, weight
);
2495 if (count
< max(min
, XFS_ICSB_INO_CNTR_REENABLE
))
2498 case XFS_SBS_FDBLOCKS
:
2499 count
= mp
->m_sb
.sb_fdblocks
;
2500 resid
= do_div(count
, weight
);
2501 if (count
< max(min
, XFS_ICSB_FDBLK_CNTR_REENABLE(mp
)))
2506 count
= resid
= 0; /* quiet, gcc */
2510 xfs_icsb_enable_counter(mp
, field
, count
, resid
);
2514 xfs_icsb_balance_counter(
2516 xfs_sb_field_t fields
,
2519 spin_lock(&mp
->m_sb_lock
);
2520 xfs_icsb_balance_counter_locked(mp
, fields
, min_per_cpu
);
2521 spin_unlock(&mp
->m_sb_lock
);
2525 xfs_icsb_modify_counters(
2527 xfs_sb_field_t field
,
2531 xfs_icsb_cnts_t
*icsbp
;
2532 long long lcounter
; /* long counter for 64 bit fields */
2538 icsbp
= this_cpu_ptr(mp
->m_sb_cnts
);
2541 * if the counter is disabled, go to slow path
2543 if (unlikely(xfs_icsb_counter_disabled(mp
, field
)))
2545 xfs_icsb_lock_cntr(icsbp
);
2546 if (unlikely(xfs_icsb_counter_disabled(mp
, field
))) {
2547 xfs_icsb_unlock_cntr(icsbp
);
2552 case XFS_SBS_ICOUNT
:
2553 lcounter
= icsbp
->icsb_icount
;
2555 if (unlikely(lcounter
< 0))
2556 goto balance_counter
;
2557 icsbp
->icsb_icount
= lcounter
;
2561 lcounter
= icsbp
->icsb_ifree
;
2563 if (unlikely(lcounter
< 0))
2564 goto balance_counter
;
2565 icsbp
->icsb_ifree
= lcounter
;
2568 case XFS_SBS_FDBLOCKS
:
2569 BUG_ON((mp
->m_resblks
- mp
->m_resblks_avail
) != 0);
2571 lcounter
= icsbp
->icsb_fdblocks
- XFS_ALLOC_SET_ASIDE(mp
);
2573 if (unlikely(lcounter
< 0))
2574 goto balance_counter
;
2575 icsbp
->icsb_fdblocks
= lcounter
+ XFS_ALLOC_SET_ASIDE(mp
);
2581 xfs_icsb_unlock_cntr(icsbp
);
2589 * serialise with a mutex so we don't burn lots of cpu on
2590 * the superblock lock. We still need to hold the superblock
2591 * lock, however, when we modify the global structures.
2596 * Now running atomically.
2598 * If the counter is enabled, someone has beaten us to rebalancing.
2599 * Drop the lock and try again in the fast path....
2601 if (!(xfs_icsb_counter_disabled(mp
, field
))) {
2602 xfs_icsb_unlock(mp
);
2607 * The counter is currently disabled. Because we are
2608 * running atomically here, we know a rebalance cannot
2609 * be in progress. Hence we can go straight to operating
2610 * on the global superblock. We do not call xfs_mod_incore_sb()
2611 * here even though we need to get the m_sb_lock. Doing so
2612 * will cause us to re-enter this function and deadlock.
2613 * Hence we get the m_sb_lock ourselves and then call
2614 * xfs_mod_incore_sb_unlocked() as the unlocked path operates
2615 * directly on the global counters.
2617 spin_lock(&mp
->m_sb_lock
);
2618 ret
= xfs_mod_incore_sb_unlocked(mp
, field
, delta
, rsvd
);
2619 spin_unlock(&mp
->m_sb_lock
);
2622 * Now that we've modified the global superblock, we
2623 * may be able to re-enable the distributed counters
2624 * (e.g. lots of space just got freed). After that
2628 xfs_icsb_balance_counter(mp
, field
, 0);
2629 xfs_icsb_unlock(mp
);
2633 xfs_icsb_unlock_cntr(icsbp
);
2637 * We may have multiple threads here if multiple per-cpu
2638 * counters run dry at the same time. This will mean we can
2639 * do more balances than strictly necessary but it is not
2640 * the common slowpath case.
2645 * running atomically.
2647 * This will leave the counter in the correct state for future
2648 * accesses. After the rebalance, we simply try again and our retry
2649 * will either succeed through the fast path or slow path without
2650 * another balance operation being required.
2652 xfs_icsb_balance_counter(mp
, field
, delta
);
2653 xfs_icsb_unlock(mp
);