2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
25 #include "xfs_trans.h"
29 #include "xfs_dmapi.h"
30 #include "xfs_mount.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
38 #include "xfs_btree.h"
39 #include "xfs_ialloc.h"
40 #include "xfs_quota.h"
41 #include "xfs_utils.h"
42 #include "xfs_trans_priv.h"
43 #include "xfs_inode_item.h"
45 #include "xfs_btree_trace.h"
46 #include "xfs_dir2_trace.h"
50 * Allocate and initialise an xfs_inode.
52 STATIC
struct xfs_inode
*
60 * if this didn't occur in transactions, we could use
61 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
62 * code up to do this anyway.
64 ip
= kmem_zone_alloc(xfs_inode_zone
, KM_SLEEP
);
67 if (inode_init_always(mp
->m_super
, VFS_I(ip
))) {
68 kmem_zone_free(xfs_inode_zone
, ip
);
72 ASSERT(atomic_read(&ip
->i_iocount
) == 0);
73 ASSERT(atomic_read(&ip
->i_pincount
) == 0);
74 ASSERT(!spin_is_locked(&ip
->i_flags_lock
));
75 ASSERT(completion_done(&ip
->i_flush
));
77 /* initialise the xfs inode */
80 memset(&ip
->i_imap
, 0, sizeof(struct xfs_imap
));
82 memset(&ip
->i_df
, 0, sizeof(xfs_ifork_t
));
84 ip
->i_update_core
= 0;
85 ip
->i_update_size
= 0;
86 ip
->i_delayed_blks
= 0;
87 memset(&ip
->i_d
, 0, sizeof(xfs_icdinode_t
));
92 * Initialize inode's trace buffers.
94 #ifdef XFS_INODE_TRACE
95 ip
->i_trace
= ktrace_alloc(INODE_TRACE_SIZE
, KM_NOFS
);
98 ip
->i_xtrace
= ktrace_alloc(XFS_BMAP_KTRACE_SIZE
, KM_NOFS
);
100 #ifdef XFS_BTREE_TRACE
101 ip
->i_btrace
= ktrace_alloc(XFS_BMBT_KTRACE_SIZE
, KM_NOFS
);
104 ip
->i_rwtrace
= ktrace_alloc(XFS_RW_KTRACE_SIZE
, KM_NOFS
);
106 #ifdef XFS_ILOCK_TRACE
107 ip
->i_lock_trace
= ktrace_alloc(XFS_ILOCK_KTRACE_SIZE
, KM_NOFS
);
109 #ifdef XFS_DIR2_TRACE
110 ip
->i_dir_trace
= ktrace_alloc(XFS_DIR2_KTRACE_SIZE
, KM_NOFS
);
113 /* prevent anyone from using this yet */
114 VFS_I(ip
)->i_state
= I_NEW
|I_LOCK
;
120 * Check the validity of the inode we just found it the cache
124 struct xfs_perag
*pag
,
125 struct xfs_inode
*ip
,
127 int lock_flags
) __releases(pag
->pag_ici_lock
)
129 struct xfs_mount
*mp
= ip
->i_mount
;
133 * If INEW is set this inode is being set up
134 * If IRECLAIM is set this inode is being torn down
135 * Pause and try again.
137 if (xfs_iflags_test(ip
, (XFS_INEW
|XFS_IRECLAIM
))) {
138 XFS_STATS_INC(xs_ig_frecycle
);
142 /* If IRECLAIMABLE is set, we've torn down the vfs inode part */
143 if (xfs_iflags_test(ip
, XFS_IRECLAIMABLE
)) {
146 * If lookup is racing with unlink, then we should return an
147 * error immediately so we don't remove it from the reclaim
148 * list and potentially leak the inode.
150 if ((ip
->i_d
.di_mode
== 0) && !(flags
& XFS_IGET_CREATE
)) {
155 xfs_itrace_exit_tag(ip
, "xfs_iget.alloc");
158 * We need to re-initialise the VFS inode as it has been
159 * 'freed' by the VFS. Do this here so we can deal with
160 * errors cleanly, then tag it so it can be set up correctly
163 if (inode_init_always(mp
->m_super
, VFS_I(ip
))) {
169 * We must set the XFS_INEW flag before clearing the
170 * XFS_IRECLAIMABLE flag so that if a racing lookup does
171 * not find the XFS_IRECLAIMABLE above but has the igrab()
172 * below succeed we can safely check XFS_INEW to detect
173 * that this inode is still being initialised.
175 xfs_iflags_set(ip
, XFS_INEW
);
176 xfs_iflags_clear(ip
, XFS_IRECLAIMABLE
);
178 /* clear the radix tree reclaim flag as well. */
179 __xfs_inode_clear_reclaim_tag(mp
, pag
, ip
);
180 } else if (!igrab(VFS_I(ip
))) {
181 /* If the VFS inode is being torn down, pause and try again. */
182 XFS_STATS_INC(xs_ig_frecycle
);
184 } else if (xfs_iflags_test(ip
, XFS_INEW
)) {
186 * We are racing with another cache hit that is
187 * currently recycling this inode out of the XFS_IRECLAIMABLE
188 * state. Wait for the initialisation to complete before
191 wait_on_inode(VFS_I(ip
));
194 if (ip
->i_d
.di_mode
== 0 && !(flags
& XFS_IGET_CREATE
)) {
200 /* We've got a live one. */
201 read_unlock(&pag
->pag_ici_lock
);
204 xfs_ilock(ip
, lock_flags
);
206 xfs_iflags_clear(ip
, XFS_ISTALE
);
207 xfs_itrace_exit_tag(ip
, "xfs_iget.found");
208 XFS_STATS_INC(xs_ig_found
);
212 read_unlock(&pag
->pag_ici_lock
);
219 struct xfs_mount
*mp
,
220 struct xfs_perag
*pag
,
223 struct xfs_inode
**ipp
,
226 int lock_flags
) __releases(pag
->pag_ici_lock
)
228 struct xfs_inode
*ip
;
230 unsigned long first_index
, mask
;
231 xfs_agino_t agino
= XFS_INO_TO_AGINO(mp
, ino
);
233 ip
= xfs_inode_alloc(mp
, ino
);
237 error
= xfs_iread(mp
, tp
, ip
, bno
, flags
);
241 xfs_itrace_exit_tag(ip
, "xfs_iget.alloc");
243 if ((ip
->i_d
.di_mode
== 0) && !(flags
& XFS_IGET_CREATE
)) {
249 * Preload the radix tree so we can insert safely under the
250 * write spinlock. Note that we cannot sleep inside the preload
253 if (radix_tree_preload(GFP_KERNEL
)) {
259 * Because the inode hasn't been added to the radix-tree yet it can't
260 * be found by another thread, so we can do the non-sleeping lock here.
263 if (!xfs_ilock_nowait(ip
, lock_flags
))
267 mask
= ~(((XFS_INODE_CLUSTER_SIZE(mp
) >> mp
->m_sb
.sb_inodelog
)) - 1);
268 first_index
= agino
& mask
;
269 write_lock(&pag
->pag_ici_lock
);
271 /* insert the new inode */
272 error
= radix_tree_insert(&pag
->pag_ici_root
, agino
, ip
);
273 if (unlikely(error
)) {
274 WARN_ON(error
!= -EEXIST
);
275 XFS_STATS_INC(xs_ig_dup
);
277 goto out_preload_end
;
280 /* These values _must_ be set before releasing the radix tree lock! */
281 ip
->i_udquot
= ip
->i_gdquot
= NULL
;
282 xfs_iflags_set(ip
, XFS_INEW
);
284 write_unlock(&pag
->pag_ici_lock
);
285 radix_tree_preload_end();
290 write_unlock(&pag
->pag_ici_lock
);
291 radix_tree_preload_end();
293 xfs_iunlock(ip
, lock_flags
);
295 xfs_destroy_inode(ip
);
300 * Look up an inode by number in the given file system.
301 * The inode is looked up in the cache held in each AG.
302 * If the inode is found in the cache, initialise the vfs inode
305 * If it is not in core, read it in from the file system's device,
306 * add it to the cache and initialise the vfs inode.
308 * The inode is locked according to the value of the lock_flags parameter.
309 * This flag parameter indicates how and if the inode's IO lock and inode lock
312 * mp -- the mount point structure for the current file system. It points
313 * to the inode hash table.
314 * tp -- a pointer to the current transaction if there is one. This is
315 * simply passed through to the xfs_iread() call.
316 * ino -- the number of the inode desired. This is the unique identifier
317 * within the file system for the inode being requested.
318 * lock_flags -- flags indicating how to lock the inode. See the comment
319 * for xfs_ilock() for a list of valid values.
320 * bno -- the block number starting the buffer containing the inode,
321 * if known (as by bulkstat), else 0.
338 /* the radix tree exists only in inode capable AGs */
339 if (XFS_INO_TO_AGNO(mp
, ino
) >= mp
->m_maxagi
)
342 /* get the perag structure and ensure that it's inode capable */
343 pag
= xfs_get_perag(mp
, ino
);
344 if (!pag
->pagi_inodeok
)
346 ASSERT(pag
->pag_ici_init
);
347 agino
= XFS_INO_TO_AGINO(mp
, ino
);
351 read_lock(&pag
->pag_ici_lock
);
352 ip
= radix_tree_lookup(&pag
->pag_ici_root
, agino
);
355 error
= xfs_iget_cache_hit(pag
, ip
, flags
, lock_flags
);
357 goto out_error_or_again
;
359 read_unlock(&pag
->pag_ici_lock
);
360 XFS_STATS_INC(xs_ig_missed
);
362 error
= xfs_iget_cache_miss(mp
, pag
, tp
, ino
, &ip
, bno
,
365 goto out_error_or_again
;
367 xfs_put_perag(mp
, pag
);
371 ASSERT(ip
->i_df
.if_ext_max
==
372 XFS_IFORK_DSIZE(ip
) / sizeof(xfs_bmbt_rec_t
));
374 * If we have a real type for an on-disk inode, we can set ops(&unlock)
375 * now. If it's a new inode being created, xfs_ialloc will handle it.
377 if (xfs_iflags_test(ip
, XFS_INEW
) && ip
->i_d
.di_mode
!= 0)
382 if (error
== EAGAIN
) {
386 xfs_put_perag(mp
, pag
);
392 * Look for the inode corresponding to the given ino in the hash table.
393 * If it is there and its i_transp pointer matches tp, return it.
394 * Otherwise, return NULL.
397 xfs_inode_incore(xfs_mount_t
*mp
,
404 pag
= xfs_get_perag(mp
, ino
);
405 read_lock(&pag
->pag_ici_lock
);
406 ip
= radix_tree_lookup(&pag
->pag_ici_root
, XFS_INO_TO_AGINO(mp
, ino
));
407 read_unlock(&pag
->pag_ici_lock
);
408 xfs_put_perag(mp
, pag
);
410 /* the returned inode must match the transaction */
411 if (ip
&& (ip
->i_transp
!= tp
))
417 * Decrement reference count of an inode structure and unlock it.
419 * ip -- the inode being released
420 * lock_flags -- this parameter indicates the inode's locks to be
421 * to be released. See the comment on xfs_iunlock() for a list
425 xfs_iput(xfs_inode_t
*ip
,
428 xfs_itrace_entry(ip
);
429 xfs_iunlock(ip
, lock_flags
);
434 * Special iput for brand-new inodes that are still locked
441 struct inode
*inode
= VFS_I(ip
);
443 xfs_itrace_entry(ip
);
445 if ((ip
->i_d
.di_mode
== 0)) {
446 ASSERT(!xfs_iflags_test(ip
, XFS_IRECLAIMABLE
));
447 make_bad_inode(inode
);
449 if (inode
->i_state
& I_NEW
)
450 unlock_new_inode(inode
);
452 xfs_iunlock(ip
, lock_flags
);
457 * This is called free all the memory associated with an inode.
458 * It must free the inode itself and any buffers allocated for
459 * if_extents/if_data and if_broot. It must also free the lock
460 * associated with the inode.
462 * Note: because we don't initialise everything on reallocation out
463 * of the zone, we must ensure we nullify everything correctly before
464 * freeing the structure.
468 struct xfs_inode
*ip
)
470 struct xfs_mount
*mp
= ip
->i_mount
;
471 struct xfs_perag
*pag
;
473 XFS_STATS_INC(xs_ig_reclaims
);
476 * Remove the inode from the per-AG radix tree. It doesn't matter
477 * if it was never added to it because radix_tree_delete can deal
478 * with that case just fine.
480 pag
= xfs_get_perag(mp
, ip
->i_ino
);
481 write_lock(&pag
->pag_ici_lock
);
482 radix_tree_delete(&pag
->pag_ici_root
, XFS_INO_TO_AGINO(mp
, ip
->i_ino
));
483 write_unlock(&pag
->pag_ici_lock
);
484 xfs_put_perag(mp
, pag
);
487 * Here we do an (almost) spurious inode lock in order to coordinate
488 * with inode cache radix tree lookups. This is because the lookup
489 * can reference the inodes in the cache without taking references.
491 * We make that OK here by ensuring that we wait until the inode is
492 * unlocked after the lookup before we go ahead and free it. We get
493 * both the ilock and the iolock because the code may need to drop the
494 * ilock one but will still hold the iolock.
496 xfs_ilock(ip
, XFS_ILOCK_EXCL
| XFS_IOLOCK_EXCL
);
498 xfs_iunlock(ip
, XFS_ILOCK_EXCL
| XFS_IOLOCK_EXCL
);
500 switch (ip
->i_d
.di_mode
& S_IFMT
) {
504 xfs_idestroy_fork(ip
, XFS_DATA_FORK
);
509 xfs_idestroy_fork(ip
, XFS_ATTR_FORK
);
511 #ifdef XFS_INODE_TRACE
512 ktrace_free(ip
->i_trace
);
514 #ifdef XFS_BMAP_TRACE
515 ktrace_free(ip
->i_xtrace
);
517 #ifdef XFS_BTREE_TRACE
518 ktrace_free(ip
->i_btrace
);
521 ktrace_free(ip
->i_rwtrace
);
523 #ifdef XFS_ILOCK_TRACE
524 ktrace_free(ip
->i_lock_trace
);
526 #ifdef XFS_DIR2_TRACE
527 ktrace_free(ip
->i_dir_trace
);
531 * Only if we are shutting down the fs will we see an
532 * inode still in the AIL. If it is there, we should remove
533 * it to prevent a use-after-free from occurring.
535 xfs_log_item_t
*lip
= &ip
->i_itemp
->ili_item
;
536 struct xfs_ail
*ailp
= lip
->li_ailp
;
538 ASSERT(((lip
->li_flags
& XFS_LI_IN_AIL
) == 0) ||
539 XFS_FORCED_SHUTDOWN(ip
->i_mount
));
540 if (lip
->li_flags
& XFS_LI_IN_AIL
) {
541 spin_lock(&ailp
->xa_lock
);
542 if (lip
->li_flags
& XFS_LI_IN_AIL
)
543 xfs_trans_ail_delete(ailp
, lip
);
545 spin_unlock(&ailp
->xa_lock
);
547 xfs_inode_item_destroy(ip
);
550 /* asserts to verify all state is correct here */
551 ASSERT(atomic_read(&ip
->i_iocount
) == 0);
552 ASSERT(atomic_read(&ip
->i_pincount
) == 0);
553 ASSERT(!spin_is_locked(&ip
->i_flags_lock
));
554 ASSERT(completion_done(&ip
->i_flush
));
555 kmem_zone_free(xfs_inode_zone
, ip
);
559 * This is a wrapper routine around the xfs_ilock() routine
560 * used to centralize some grungy code. It is used in places
561 * that wish to lock the inode solely for reading the extents.
562 * The reason these places can't just call xfs_ilock(SHARED)
563 * is that the inode lock also guards to bringing in of the
564 * extents from disk for a file in b-tree format. If the inode
565 * is in b-tree format, then we need to lock the inode exclusively
566 * until the extents are read in. Locking it exclusively all
567 * the time would limit our parallelism unnecessarily, though.
568 * What we do instead is check to see if the extents have been
569 * read in yet, and only lock the inode exclusively if they
572 * The function returns a value which should be given to the
573 * corresponding xfs_iunlock_map_shared(). This value is
574 * the mode in which the lock was actually taken.
577 xfs_ilock_map_shared(
582 if ((ip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
) &&
583 ((ip
->i_df
.if_flags
& XFS_IFEXTENTS
) == 0)) {
584 lock_mode
= XFS_ILOCK_EXCL
;
586 lock_mode
= XFS_ILOCK_SHARED
;
589 xfs_ilock(ip
, lock_mode
);
595 * This is simply the unlock routine to go with xfs_ilock_map_shared().
596 * All it does is call xfs_iunlock() with the given lock_mode.
599 xfs_iunlock_map_shared(
601 unsigned int lock_mode
)
603 xfs_iunlock(ip
, lock_mode
);
607 * The xfs inode contains 2 locks: a multi-reader lock called the
608 * i_iolock and a multi-reader lock called the i_lock. This routine
609 * allows either or both of the locks to be obtained.
611 * The 2 locks should always be ordered so that the IO lock is
612 * obtained first in order to prevent deadlock.
614 * ip -- the inode being locked
615 * lock_flags -- this parameter indicates the inode's locks
616 * to be locked. It can be:
621 * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
622 * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
623 * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
624 * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
632 * You can't set both SHARED and EXCL for the same lock,
633 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
634 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
636 ASSERT((lock_flags
& (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
)) !=
637 (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
));
638 ASSERT((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) !=
639 (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
));
640 ASSERT((lock_flags
& ~(XFS_LOCK_MASK
| XFS_LOCK_DEP_MASK
)) == 0);
642 if (lock_flags
& XFS_IOLOCK_EXCL
)
643 mrupdate_nested(&ip
->i_iolock
, XFS_IOLOCK_DEP(lock_flags
));
644 else if (lock_flags
& XFS_IOLOCK_SHARED
)
645 mraccess_nested(&ip
->i_iolock
, XFS_IOLOCK_DEP(lock_flags
));
647 if (lock_flags
& XFS_ILOCK_EXCL
)
648 mrupdate_nested(&ip
->i_lock
, XFS_ILOCK_DEP(lock_flags
));
649 else if (lock_flags
& XFS_ILOCK_SHARED
)
650 mraccess_nested(&ip
->i_lock
, XFS_ILOCK_DEP(lock_flags
));
652 xfs_ilock_trace(ip
, 1, lock_flags
, (inst_t
*)__return_address
);
656 * This is just like xfs_ilock(), except that the caller
657 * is guaranteed not to sleep. It returns 1 if it gets
658 * the requested locks and 0 otherwise. If the IO lock is
659 * obtained but the inode lock cannot be, then the IO lock
660 * is dropped before returning.
662 * ip -- the inode being locked
663 * lock_flags -- this parameter indicates the inode's locks to be
664 * to be locked. See the comment for xfs_ilock() for a list
673 * You can't set both SHARED and EXCL for the same lock,
674 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
675 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
677 ASSERT((lock_flags
& (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
)) !=
678 (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
));
679 ASSERT((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) !=
680 (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
));
681 ASSERT((lock_flags
& ~(XFS_LOCK_MASK
| XFS_LOCK_DEP_MASK
)) == 0);
683 if (lock_flags
& XFS_IOLOCK_EXCL
) {
684 if (!mrtryupdate(&ip
->i_iolock
))
686 } else if (lock_flags
& XFS_IOLOCK_SHARED
) {
687 if (!mrtryaccess(&ip
->i_iolock
))
690 if (lock_flags
& XFS_ILOCK_EXCL
) {
691 if (!mrtryupdate(&ip
->i_lock
))
692 goto out_undo_iolock
;
693 } else if (lock_flags
& XFS_ILOCK_SHARED
) {
694 if (!mrtryaccess(&ip
->i_lock
))
695 goto out_undo_iolock
;
697 xfs_ilock_trace(ip
, 2, lock_flags
, (inst_t
*)__return_address
);
701 if (lock_flags
& XFS_IOLOCK_EXCL
)
702 mrunlock_excl(&ip
->i_iolock
);
703 else if (lock_flags
& XFS_IOLOCK_SHARED
)
704 mrunlock_shared(&ip
->i_iolock
);
710 * xfs_iunlock() is used to drop the inode locks acquired with
711 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
712 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
713 * that we know which locks to drop.
715 * ip -- the inode being unlocked
716 * lock_flags -- this parameter indicates the inode's locks to be
717 * to be unlocked. See the comment for xfs_ilock() for a list
718 * of valid values for this parameter.
727 * You can't set both SHARED and EXCL for the same lock,
728 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
729 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
731 ASSERT((lock_flags
& (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
)) !=
732 (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
));
733 ASSERT((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) !=
734 (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
));
735 ASSERT((lock_flags
& ~(XFS_LOCK_MASK
| XFS_IUNLOCK_NONOTIFY
|
736 XFS_LOCK_DEP_MASK
)) == 0);
737 ASSERT(lock_flags
!= 0);
739 if (lock_flags
& XFS_IOLOCK_EXCL
)
740 mrunlock_excl(&ip
->i_iolock
);
741 else if (lock_flags
& XFS_IOLOCK_SHARED
)
742 mrunlock_shared(&ip
->i_iolock
);
744 if (lock_flags
& XFS_ILOCK_EXCL
)
745 mrunlock_excl(&ip
->i_lock
);
746 else if (lock_flags
& XFS_ILOCK_SHARED
)
747 mrunlock_shared(&ip
->i_lock
);
749 if ((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) &&
750 !(lock_flags
& XFS_IUNLOCK_NONOTIFY
) && ip
->i_itemp
) {
752 * Let the AIL know that this item has been unlocked in case
753 * it is in the AIL and anyone is waiting on it. Don't do
754 * this if the caller has asked us not to.
756 xfs_trans_unlocked_item(ip
->i_itemp
->ili_item
.li_ailp
,
757 (xfs_log_item_t
*)(ip
->i_itemp
));
759 xfs_ilock_trace(ip
, 3, lock_flags
, (inst_t
*)__return_address
);
763 * give up write locks. the i/o lock cannot be held nested
764 * if it is being demoted.
771 ASSERT(lock_flags
& (XFS_IOLOCK_EXCL
|XFS_ILOCK_EXCL
));
772 ASSERT((lock_flags
& ~(XFS_IOLOCK_EXCL
|XFS_ILOCK_EXCL
)) == 0);
774 if (lock_flags
& XFS_ILOCK_EXCL
)
775 mrdemote(&ip
->i_lock
);
776 if (lock_flags
& XFS_IOLOCK_EXCL
)
777 mrdemote(&ip
->i_iolock
);
782 * Debug-only routine, without additional rw_semaphore APIs, we can
783 * now only answer requests regarding whether we hold the lock for write
784 * (reader state is outside our visibility, we only track writer state).
786 * Note: this means !xfs_isilocked would give false positives, so don't do that.
793 if ((lock_flags
& (XFS_ILOCK_EXCL
|XFS_ILOCK_SHARED
)) ==
795 if (!ip
->i_lock
.mr_writer
)
799 if ((lock_flags
& (XFS_IOLOCK_EXCL
|XFS_IOLOCK_SHARED
)) ==
801 if (!ip
->i_iolock
.mr_writer
)
809 #ifdef XFS_INODE_TRACE
811 #define KTRACE_ENTER(ip, vk, s, line, ra) \
812 ktrace_enter((ip)->i_trace, \
813 /* 0 */ (void *)(__psint_t)(vk), \
814 /* 1 */ (void *)(s), \
815 /* 2 */ (void *)(__psint_t) line, \
816 /* 3 */ (void *)(__psint_t)atomic_read(&VFS_I(ip)->i_count), \
817 /* 4 */ (void *)(ra), \
819 /* 6 */ (void *)(__psint_t)current_cpu(), \
820 /* 7 */ (void *)(__psint_t)current_pid(), \
821 /* 8 */ (void *)__return_address, \
822 /* 9 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL)
825 * Vnode tracing code.
828 _xfs_itrace_entry(xfs_inode_t
*ip
, const char *func
, inst_t
*ra
)
830 KTRACE_ENTER(ip
, INODE_KTRACE_ENTRY
, func
, 0, ra
);
834 _xfs_itrace_exit(xfs_inode_t
*ip
, const char *func
, inst_t
*ra
)
836 KTRACE_ENTER(ip
, INODE_KTRACE_EXIT
, func
, 0, ra
);
840 xfs_itrace_hold(xfs_inode_t
*ip
, char *file
, int line
, inst_t
*ra
)
842 KTRACE_ENTER(ip
, INODE_KTRACE_HOLD
, file
, line
, ra
);
846 _xfs_itrace_ref(xfs_inode_t
*ip
, char *file
, int line
, inst_t
*ra
)
848 KTRACE_ENTER(ip
, INODE_KTRACE_REF
, file
, line
, ra
);
852 xfs_itrace_rele(xfs_inode_t
*ip
, char *file
, int line
, inst_t
*ra
)
854 KTRACE_ENTER(ip
, INODE_KTRACE_RELE
, file
, line
, ra
);
856 #endif /* XFS_INODE_TRACE */