Merge tag 'vfio-v3.10-rc5' of git://github.com/awilliam/linux-vfio
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / xfs / xfs_inode.c
1 /*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include <linux/log2.h>
19
20 #include "xfs.h"
21 #include "xfs_fs.h"
22 #include "xfs_types.h"
23 #include "xfs_log.h"
24 #include "xfs_inum.h"
25 #include "xfs_trans.h"
26 #include "xfs_trans_priv.h"
27 #include "xfs_sb.h"
28 #include "xfs_ag.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_attr_sf.h"
34 #include "xfs_dinode.h"
35 #include "xfs_inode.h"
36 #include "xfs_buf_item.h"
37 #include "xfs_inode_item.h"
38 #include "xfs_btree.h"
39 #include "xfs_alloc.h"
40 #include "xfs_ialloc.h"
41 #include "xfs_bmap.h"
42 #include "xfs_error.h"
43 #include "xfs_utils.h"
44 #include "xfs_quota.h"
45 #include "xfs_filestream.h"
46 #include "xfs_vnodeops.h"
47 #include "xfs_cksum.h"
48 #include "xfs_trace.h"
49 #include "xfs_icache.h"
50
51 kmem_zone_t *xfs_ifork_zone;
52 kmem_zone_t *xfs_inode_zone;
53
54 /*
55 * Used in xfs_itruncate_extents(). This is the maximum number of extents
56 * freed from a file in a single transaction.
57 */
58 #define XFS_ITRUNC_MAX_EXTENTS 2
59
60 STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *);
61 STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int);
62 STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int);
63 STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int);
64
65 /*
66 * helper function to extract extent size hint from inode
67 */
68 xfs_extlen_t
69 xfs_get_extsz_hint(
70 struct xfs_inode *ip)
71 {
72 if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
73 return ip->i_d.di_extsize;
74 if (XFS_IS_REALTIME_INODE(ip))
75 return ip->i_mount->m_sb.sb_rextsize;
76 return 0;
77 }
78
79 /*
80 * This is a wrapper routine around the xfs_ilock() routine used to centralize
81 * some grungy code. It is used in places that wish to lock the inode solely
82 * for reading the extents. The reason these places can't just call
83 * xfs_ilock(SHARED) is that the inode lock also guards to bringing in of the
84 * extents from disk for a file in b-tree format. If the inode is in b-tree
85 * format, then we need to lock the inode exclusively until the extents are read
86 * in. Locking it exclusively all the time would limit our parallelism
87 * unnecessarily, though. What we do instead is check to see if the extents
88 * have been read in yet, and only lock the inode exclusively if they have not.
89 *
90 * The function returns a value which should be given to the corresponding
91 * xfs_iunlock_map_shared(). This value is the mode in which the lock was
92 * actually taken.
93 */
94 uint
95 xfs_ilock_map_shared(
96 xfs_inode_t *ip)
97 {
98 uint lock_mode;
99
100 if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) &&
101 ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) {
102 lock_mode = XFS_ILOCK_EXCL;
103 } else {
104 lock_mode = XFS_ILOCK_SHARED;
105 }
106
107 xfs_ilock(ip, lock_mode);
108
109 return lock_mode;
110 }
111
112 /*
113 * This is simply the unlock routine to go with xfs_ilock_map_shared().
114 * All it does is call xfs_iunlock() with the given lock_mode.
115 */
116 void
117 xfs_iunlock_map_shared(
118 xfs_inode_t *ip,
119 unsigned int lock_mode)
120 {
121 xfs_iunlock(ip, lock_mode);
122 }
123
124 /*
125 * The xfs inode contains 2 locks: a multi-reader lock called the
126 * i_iolock and a multi-reader lock called the i_lock. This routine
127 * allows either or both of the locks to be obtained.
128 *
129 * The 2 locks should always be ordered so that the IO lock is
130 * obtained first in order to prevent deadlock.
131 *
132 * ip -- the inode being locked
133 * lock_flags -- this parameter indicates the inode's locks
134 * to be locked. It can be:
135 * XFS_IOLOCK_SHARED,
136 * XFS_IOLOCK_EXCL,
137 * XFS_ILOCK_SHARED,
138 * XFS_ILOCK_EXCL,
139 * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
140 * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
141 * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
142 * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
143 */
144 void
145 xfs_ilock(
146 xfs_inode_t *ip,
147 uint lock_flags)
148 {
149 trace_xfs_ilock(ip, lock_flags, _RET_IP_);
150
151 /*
152 * You can't set both SHARED and EXCL for the same lock,
153 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
154 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
155 */
156 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
157 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
158 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
159 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
160 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
161
162 if (lock_flags & XFS_IOLOCK_EXCL)
163 mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
164 else if (lock_flags & XFS_IOLOCK_SHARED)
165 mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
166
167 if (lock_flags & XFS_ILOCK_EXCL)
168 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
169 else if (lock_flags & XFS_ILOCK_SHARED)
170 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
171 }
172
173 /*
174 * This is just like xfs_ilock(), except that the caller
175 * is guaranteed not to sleep. It returns 1 if it gets
176 * the requested locks and 0 otherwise. If the IO lock is
177 * obtained but the inode lock cannot be, then the IO lock
178 * is dropped before returning.
179 *
180 * ip -- the inode being locked
181 * lock_flags -- this parameter indicates the inode's locks to be
182 * to be locked. See the comment for xfs_ilock() for a list
183 * of valid values.
184 */
185 int
186 xfs_ilock_nowait(
187 xfs_inode_t *ip,
188 uint lock_flags)
189 {
190 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
191
192 /*
193 * You can't set both SHARED and EXCL for the same lock,
194 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
195 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
196 */
197 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
198 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
199 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
200 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
201 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
202
203 if (lock_flags & XFS_IOLOCK_EXCL) {
204 if (!mrtryupdate(&ip->i_iolock))
205 goto out;
206 } else if (lock_flags & XFS_IOLOCK_SHARED) {
207 if (!mrtryaccess(&ip->i_iolock))
208 goto out;
209 }
210 if (lock_flags & XFS_ILOCK_EXCL) {
211 if (!mrtryupdate(&ip->i_lock))
212 goto out_undo_iolock;
213 } else if (lock_flags & XFS_ILOCK_SHARED) {
214 if (!mrtryaccess(&ip->i_lock))
215 goto out_undo_iolock;
216 }
217 return 1;
218
219 out_undo_iolock:
220 if (lock_flags & XFS_IOLOCK_EXCL)
221 mrunlock_excl(&ip->i_iolock);
222 else if (lock_flags & XFS_IOLOCK_SHARED)
223 mrunlock_shared(&ip->i_iolock);
224 out:
225 return 0;
226 }
227
228 /*
229 * xfs_iunlock() is used to drop the inode locks acquired with
230 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
231 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
232 * that we know which locks to drop.
233 *
234 * ip -- the inode being unlocked
235 * lock_flags -- this parameter indicates the inode's locks to be
236 * to be unlocked. See the comment for xfs_ilock() for a list
237 * of valid values for this parameter.
238 *
239 */
240 void
241 xfs_iunlock(
242 xfs_inode_t *ip,
243 uint lock_flags)
244 {
245 /*
246 * You can't set both SHARED and EXCL for the same lock,
247 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
248 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
249 */
250 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
251 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
252 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
253 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
254 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
255 ASSERT(lock_flags != 0);
256
257 if (lock_flags & XFS_IOLOCK_EXCL)
258 mrunlock_excl(&ip->i_iolock);
259 else if (lock_flags & XFS_IOLOCK_SHARED)
260 mrunlock_shared(&ip->i_iolock);
261
262 if (lock_flags & XFS_ILOCK_EXCL)
263 mrunlock_excl(&ip->i_lock);
264 else if (lock_flags & XFS_ILOCK_SHARED)
265 mrunlock_shared(&ip->i_lock);
266
267 trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
268 }
269
270 /*
271 * give up write locks. the i/o lock cannot be held nested
272 * if it is being demoted.
273 */
274 void
275 xfs_ilock_demote(
276 xfs_inode_t *ip,
277 uint lock_flags)
278 {
279 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
280 ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
281
282 if (lock_flags & XFS_ILOCK_EXCL)
283 mrdemote(&ip->i_lock);
284 if (lock_flags & XFS_IOLOCK_EXCL)
285 mrdemote(&ip->i_iolock);
286
287 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
288 }
289
290 #if defined(DEBUG) || defined(XFS_WARN)
291 int
292 xfs_isilocked(
293 xfs_inode_t *ip,
294 uint lock_flags)
295 {
296 if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
297 if (!(lock_flags & XFS_ILOCK_SHARED))
298 return !!ip->i_lock.mr_writer;
299 return rwsem_is_locked(&ip->i_lock.mr_lock);
300 }
301
302 if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
303 if (!(lock_flags & XFS_IOLOCK_SHARED))
304 return !!ip->i_iolock.mr_writer;
305 return rwsem_is_locked(&ip->i_iolock.mr_lock);
306 }
307
308 ASSERT(0);
309 return 0;
310 }
311 #endif
312
313 void
314 __xfs_iflock(
315 struct xfs_inode *ip)
316 {
317 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
318 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
319
320 do {
321 prepare_to_wait_exclusive(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
322 if (xfs_isiflocked(ip))
323 io_schedule();
324 } while (!xfs_iflock_nowait(ip));
325
326 finish_wait(wq, &wait.wait);
327 }
328
329 #ifdef DEBUG
330 /*
331 * Make sure that the extents in the given memory buffer
332 * are valid.
333 */
334 STATIC void
335 xfs_validate_extents(
336 xfs_ifork_t *ifp,
337 int nrecs,
338 xfs_exntfmt_t fmt)
339 {
340 xfs_bmbt_irec_t irec;
341 xfs_bmbt_rec_host_t rec;
342 int i;
343
344 for (i = 0; i < nrecs; i++) {
345 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
346 rec.l0 = get_unaligned(&ep->l0);
347 rec.l1 = get_unaligned(&ep->l1);
348 xfs_bmbt_get_all(&rec, &irec);
349 if (fmt == XFS_EXTFMT_NOSTATE)
350 ASSERT(irec.br_state == XFS_EXT_NORM);
351 }
352 }
353 #else /* DEBUG */
354 #define xfs_validate_extents(ifp, nrecs, fmt)
355 #endif /* DEBUG */
356
357 /*
358 * Check that none of the inode's in the buffer have a next
359 * unlinked field of 0.
360 */
361 #if defined(DEBUG)
362 void
363 xfs_inobp_check(
364 xfs_mount_t *mp,
365 xfs_buf_t *bp)
366 {
367 int i;
368 int j;
369 xfs_dinode_t *dip;
370
371 j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
372
373 for (i = 0; i < j; i++) {
374 dip = (xfs_dinode_t *)xfs_buf_offset(bp,
375 i * mp->m_sb.sb_inodesize);
376 if (!dip->di_next_unlinked) {
377 xfs_alert(mp,
378 "Detected bogus zero next_unlinked field in incore inode buffer 0x%p.",
379 bp);
380 ASSERT(dip->di_next_unlinked);
381 }
382 }
383 }
384 #endif
385
386 static void
387 xfs_inode_buf_verify(
388 struct xfs_buf *bp)
389 {
390 struct xfs_mount *mp = bp->b_target->bt_mount;
391 int i;
392 int ni;
393
394 /*
395 * Validate the magic number and version of every inode in the buffer
396 */
397 ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock;
398 for (i = 0; i < ni; i++) {
399 int di_ok;
400 xfs_dinode_t *dip;
401
402 dip = (struct xfs_dinode *)xfs_buf_offset(bp,
403 (i << mp->m_sb.sb_inodelog));
404 di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) &&
405 XFS_DINODE_GOOD_VERSION(dip->di_version);
406 if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
407 XFS_ERRTAG_ITOBP_INOTOBP,
408 XFS_RANDOM_ITOBP_INOTOBP))) {
409 xfs_buf_ioerror(bp, EFSCORRUPTED);
410 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_HIGH,
411 mp, dip);
412 #ifdef DEBUG
413 xfs_emerg(mp,
414 "bad inode magic/vsn daddr %lld #%d (magic=%x)",
415 (unsigned long long)bp->b_bn, i,
416 be16_to_cpu(dip->di_magic));
417 ASSERT(0);
418 #endif
419 }
420 }
421 xfs_inobp_check(mp, bp);
422 }
423
424
425 static void
426 xfs_inode_buf_read_verify(
427 struct xfs_buf *bp)
428 {
429 xfs_inode_buf_verify(bp);
430 }
431
432 static void
433 xfs_inode_buf_write_verify(
434 struct xfs_buf *bp)
435 {
436 xfs_inode_buf_verify(bp);
437 }
438
439 const struct xfs_buf_ops xfs_inode_buf_ops = {
440 .verify_read = xfs_inode_buf_read_verify,
441 .verify_write = xfs_inode_buf_write_verify,
442 };
443
444
445 /*
446 * This routine is called to map an inode to the buffer containing the on-disk
447 * version of the inode. It returns a pointer to the buffer containing the
448 * on-disk inode in the bpp parameter, and in the dipp parameter it returns a
449 * pointer to the on-disk inode within that buffer.
450 *
451 * If a non-zero error is returned, then the contents of bpp and dipp are
452 * undefined.
453 */
454 int
455 xfs_imap_to_bp(
456 struct xfs_mount *mp,
457 struct xfs_trans *tp,
458 struct xfs_imap *imap,
459 struct xfs_dinode **dipp,
460 struct xfs_buf **bpp,
461 uint buf_flags,
462 uint iget_flags)
463 {
464 struct xfs_buf *bp;
465 int error;
466
467 buf_flags |= XBF_UNMAPPED;
468 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
469 (int)imap->im_len, buf_flags, &bp,
470 &xfs_inode_buf_ops);
471 if (error) {
472 if (error == EAGAIN) {
473 ASSERT(buf_flags & XBF_TRYLOCK);
474 return error;
475 }
476
477 if (error == EFSCORRUPTED &&
478 (iget_flags & XFS_IGET_UNTRUSTED))
479 return XFS_ERROR(EINVAL);
480
481 xfs_warn(mp, "%s: xfs_trans_read_buf() returned error %d.",
482 __func__, error);
483 return error;
484 }
485
486 *bpp = bp;
487 *dipp = (struct xfs_dinode *)xfs_buf_offset(bp, imap->im_boffset);
488 return 0;
489 }
490
491 /*
492 * Move inode type and inode format specific information from the
493 * on-disk inode to the in-core inode. For fifos, devs, and sockets
494 * this means set if_rdev to the proper value. For files, directories,
495 * and symlinks this means to bring in the in-line data or extent
496 * pointers. For a file in B-tree format, only the root is immediately
497 * brought in-core. The rest will be in-lined in if_extents when it
498 * is first referenced (see xfs_iread_extents()).
499 */
500 STATIC int
501 xfs_iformat(
502 xfs_inode_t *ip,
503 xfs_dinode_t *dip)
504 {
505 xfs_attr_shortform_t *atp;
506 int size;
507 int error = 0;
508 xfs_fsize_t di_size;
509
510 if (unlikely(be32_to_cpu(dip->di_nextents) +
511 be16_to_cpu(dip->di_anextents) >
512 be64_to_cpu(dip->di_nblocks))) {
513 xfs_warn(ip->i_mount,
514 "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.",
515 (unsigned long long)ip->i_ino,
516 (int)(be32_to_cpu(dip->di_nextents) +
517 be16_to_cpu(dip->di_anextents)),
518 (unsigned long long)
519 be64_to_cpu(dip->di_nblocks));
520 XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW,
521 ip->i_mount, dip);
522 return XFS_ERROR(EFSCORRUPTED);
523 }
524
525 if (unlikely(dip->di_forkoff > ip->i_mount->m_sb.sb_inodesize)) {
526 xfs_warn(ip->i_mount, "corrupt dinode %Lu, forkoff = 0x%x.",
527 (unsigned long long)ip->i_ino,
528 dip->di_forkoff);
529 XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW,
530 ip->i_mount, dip);
531 return XFS_ERROR(EFSCORRUPTED);
532 }
533
534 if (unlikely((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) &&
535 !ip->i_mount->m_rtdev_targp)) {
536 xfs_warn(ip->i_mount,
537 "corrupt dinode %Lu, has realtime flag set.",
538 ip->i_ino);
539 XFS_CORRUPTION_ERROR("xfs_iformat(realtime)",
540 XFS_ERRLEVEL_LOW, ip->i_mount, dip);
541 return XFS_ERROR(EFSCORRUPTED);
542 }
543
544 switch (ip->i_d.di_mode & S_IFMT) {
545 case S_IFIFO:
546 case S_IFCHR:
547 case S_IFBLK:
548 case S_IFSOCK:
549 if (unlikely(dip->di_format != XFS_DINODE_FMT_DEV)) {
550 XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW,
551 ip->i_mount, dip);
552 return XFS_ERROR(EFSCORRUPTED);
553 }
554 ip->i_d.di_size = 0;
555 ip->i_df.if_u2.if_rdev = xfs_dinode_get_rdev(dip);
556 break;
557
558 case S_IFREG:
559 case S_IFLNK:
560 case S_IFDIR:
561 switch (dip->di_format) {
562 case XFS_DINODE_FMT_LOCAL:
563 /*
564 * no local regular files yet
565 */
566 if (unlikely(S_ISREG(be16_to_cpu(dip->di_mode)))) {
567 xfs_warn(ip->i_mount,
568 "corrupt inode %Lu (local format for regular file).",
569 (unsigned long long) ip->i_ino);
570 XFS_CORRUPTION_ERROR("xfs_iformat(4)",
571 XFS_ERRLEVEL_LOW,
572 ip->i_mount, dip);
573 return XFS_ERROR(EFSCORRUPTED);
574 }
575
576 di_size = be64_to_cpu(dip->di_size);
577 if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) {
578 xfs_warn(ip->i_mount,
579 "corrupt inode %Lu (bad size %Ld for local inode).",
580 (unsigned long long) ip->i_ino,
581 (long long) di_size);
582 XFS_CORRUPTION_ERROR("xfs_iformat(5)",
583 XFS_ERRLEVEL_LOW,
584 ip->i_mount, dip);
585 return XFS_ERROR(EFSCORRUPTED);
586 }
587
588 size = (int)di_size;
589 error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size);
590 break;
591 case XFS_DINODE_FMT_EXTENTS:
592 error = xfs_iformat_extents(ip, dip, XFS_DATA_FORK);
593 break;
594 case XFS_DINODE_FMT_BTREE:
595 error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK);
596 break;
597 default:
598 XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW,
599 ip->i_mount);
600 return XFS_ERROR(EFSCORRUPTED);
601 }
602 break;
603
604 default:
605 XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount);
606 return XFS_ERROR(EFSCORRUPTED);
607 }
608 if (error) {
609 return error;
610 }
611 if (!XFS_DFORK_Q(dip))
612 return 0;
613
614 ASSERT(ip->i_afp == NULL);
615 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP | KM_NOFS);
616
617 switch (dip->di_aformat) {
618 case XFS_DINODE_FMT_LOCAL:
619 atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip);
620 size = be16_to_cpu(atp->hdr.totsize);
621
622 if (unlikely(size < sizeof(struct xfs_attr_sf_hdr))) {
623 xfs_warn(ip->i_mount,
624 "corrupt inode %Lu (bad attr fork size %Ld).",
625 (unsigned long long) ip->i_ino,
626 (long long) size);
627 XFS_CORRUPTION_ERROR("xfs_iformat(8)",
628 XFS_ERRLEVEL_LOW,
629 ip->i_mount, dip);
630 return XFS_ERROR(EFSCORRUPTED);
631 }
632
633 error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size);
634 break;
635 case XFS_DINODE_FMT_EXTENTS:
636 error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK);
637 break;
638 case XFS_DINODE_FMT_BTREE:
639 error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK);
640 break;
641 default:
642 error = XFS_ERROR(EFSCORRUPTED);
643 break;
644 }
645 if (error) {
646 kmem_zone_free(xfs_ifork_zone, ip->i_afp);
647 ip->i_afp = NULL;
648 xfs_idestroy_fork(ip, XFS_DATA_FORK);
649 }
650 return error;
651 }
652
653 /*
654 * The file is in-lined in the on-disk inode.
655 * If it fits into if_inline_data, then copy
656 * it there, otherwise allocate a buffer for it
657 * and copy the data there. Either way, set
658 * if_data to point at the data.
659 * If we allocate a buffer for the data, make
660 * sure that its size is a multiple of 4 and
661 * record the real size in i_real_bytes.
662 */
663 STATIC int
664 xfs_iformat_local(
665 xfs_inode_t *ip,
666 xfs_dinode_t *dip,
667 int whichfork,
668 int size)
669 {
670 xfs_ifork_t *ifp;
671 int real_size;
672
673 /*
674 * If the size is unreasonable, then something
675 * is wrong and we just bail out rather than crash in
676 * kmem_alloc() or memcpy() below.
677 */
678 if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
679 xfs_warn(ip->i_mount,
680 "corrupt inode %Lu (bad size %d for local fork, size = %d).",
681 (unsigned long long) ip->i_ino, size,
682 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork));
683 XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW,
684 ip->i_mount, dip);
685 return XFS_ERROR(EFSCORRUPTED);
686 }
687 ifp = XFS_IFORK_PTR(ip, whichfork);
688 real_size = 0;
689 if (size == 0)
690 ifp->if_u1.if_data = NULL;
691 else if (size <= sizeof(ifp->if_u2.if_inline_data))
692 ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
693 else {
694 real_size = roundup(size, 4);
695 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP | KM_NOFS);
696 }
697 ifp->if_bytes = size;
698 ifp->if_real_bytes = real_size;
699 if (size)
700 memcpy(ifp->if_u1.if_data, XFS_DFORK_PTR(dip, whichfork), size);
701 ifp->if_flags &= ~XFS_IFEXTENTS;
702 ifp->if_flags |= XFS_IFINLINE;
703 return 0;
704 }
705
706 /*
707 * The file consists of a set of extents all
708 * of which fit into the on-disk inode.
709 * If there are few enough extents to fit into
710 * the if_inline_ext, then copy them there.
711 * Otherwise allocate a buffer for them and copy
712 * them into it. Either way, set if_extents
713 * to point at the extents.
714 */
715 STATIC int
716 xfs_iformat_extents(
717 xfs_inode_t *ip,
718 xfs_dinode_t *dip,
719 int whichfork)
720 {
721 xfs_bmbt_rec_t *dp;
722 xfs_ifork_t *ifp;
723 int nex;
724 int size;
725 int i;
726
727 ifp = XFS_IFORK_PTR(ip, whichfork);
728 nex = XFS_DFORK_NEXTENTS(dip, whichfork);
729 size = nex * (uint)sizeof(xfs_bmbt_rec_t);
730
731 /*
732 * If the number of extents is unreasonable, then something
733 * is wrong and we just bail out rather than crash in
734 * kmem_alloc() or memcpy() below.
735 */
736 if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
737 xfs_warn(ip->i_mount, "corrupt inode %Lu ((a)extents = %d).",
738 (unsigned long long) ip->i_ino, nex);
739 XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW,
740 ip->i_mount, dip);
741 return XFS_ERROR(EFSCORRUPTED);
742 }
743
744 ifp->if_real_bytes = 0;
745 if (nex == 0)
746 ifp->if_u1.if_extents = NULL;
747 else if (nex <= XFS_INLINE_EXTS)
748 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
749 else
750 xfs_iext_add(ifp, 0, nex);
751
752 ifp->if_bytes = size;
753 if (size) {
754 dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork);
755 xfs_validate_extents(ifp, nex, XFS_EXTFMT_INODE(ip));
756 for (i = 0; i < nex; i++, dp++) {
757 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
758 ep->l0 = get_unaligned_be64(&dp->l0);
759 ep->l1 = get_unaligned_be64(&dp->l1);
760 }
761 XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork);
762 if (whichfork != XFS_DATA_FORK ||
763 XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE)
764 if (unlikely(xfs_check_nostate_extents(
765 ifp, 0, nex))) {
766 XFS_ERROR_REPORT("xfs_iformat_extents(2)",
767 XFS_ERRLEVEL_LOW,
768 ip->i_mount);
769 return XFS_ERROR(EFSCORRUPTED);
770 }
771 }
772 ifp->if_flags |= XFS_IFEXTENTS;
773 return 0;
774 }
775
776 /*
777 * The file has too many extents to fit into
778 * the inode, so they are in B-tree format.
779 * Allocate a buffer for the root of the B-tree
780 * and copy the root into it. The i_extents
781 * field will remain NULL until all of the
782 * extents are read in (when they are needed).
783 */
784 STATIC int
785 xfs_iformat_btree(
786 xfs_inode_t *ip,
787 xfs_dinode_t *dip,
788 int whichfork)
789 {
790 struct xfs_mount *mp = ip->i_mount;
791 xfs_bmdr_block_t *dfp;
792 xfs_ifork_t *ifp;
793 /* REFERENCED */
794 int nrecs;
795 int size;
796
797 ifp = XFS_IFORK_PTR(ip, whichfork);
798 dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork);
799 size = XFS_BMAP_BROOT_SPACE(mp, dfp);
800 nrecs = be16_to_cpu(dfp->bb_numrecs);
801
802 /*
803 * blow out if -- fork has less extents than can fit in
804 * fork (fork shouldn't be a btree format), root btree
805 * block has more records than can fit into the fork,
806 * or the number of extents is greater than the number of
807 * blocks.
808 */
809 if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <=
810 XFS_IFORK_MAXEXT(ip, whichfork) ||
811 XFS_BMDR_SPACE_CALC(nrecs) >
812 XFS_DFORK_SIZE(dip, mp, whichfork) ||
813 XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) {
814 xfs_warn(mp, "corrupt inode %Lu (btree).",
815 (unsigned long long) ip->i_ino);
816 XFS_CORRUPTION_ERROR("xfs_iformat_btree", XFS_ERRLEVEL_LOW,
817 mp, dip);
818 return XFS_ERROR(EFSCORRUPTED);
819 }
820
821 ifp->if_broot_bytes = size;
822 ifp->if_broot = kmem_alloc(size, KM_SLEEP | KM_NOFS);
823 ASSERT(ifp->if_broot != NULL);
824 /*
825 * Copy and convert from the on-disk structure
826 * to the in-memory structure.
827 */
828 xfs_bmdr_to_bmbt(ip, dfp, XFS_DFORK_SIZE(dip, ip->i_mount, whichfork),
829 ifp->if_broot, size);
830 ifp->if_flags &= ~XFS_IFEXTENTS;
831 ifp->if_flags |= XFS_IFBROOT;
832
833 return 0;
834 }
835
836 STATIC void
837 xfs_dinode_from_disk(
838 xfs_icdinode_t *to,
839 xfs_dinode_t *from)
840 {
841 to->di_magic = be16_to_cpu(from->di_magic);
842 to->di_mode = be16_to_cpu(from->di_mode);
843 to->di_version = from ->di_version;
844 to->di_format = from->di_format;
845 to->di_onlink = be16_to_cpu(from->di_onlink);
846 to->di_uid = be32_to_cpu(from->di_uid);
847 to->di_gid = be32_to_cpu(from->di_gid);
848 to->di_nlink = be32_to_cpu(from->di_nlink);
849 to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
850 to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
851 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
852 to->di_flushiter = be16_to_cpu(from->di_flushiter);
853 to->di_atime.t_sec = be32_to_cpu(from->di_atime.t_sec);
854 to->di_atime.t_nsec = be32_to_cpu(from->di_atime.t_nsec);
855 to->di_mtime.t_sec = be32_to_cpu(from->di_mtime.t_sec);
856 to->di_mtime.t_nsec = be32_to_cpu(from->di_mtime.t_nsec);
857 to->di_ctime.t_sec = be32_to_cpu(from->di_ctime.t_sec);
858 to->di_ctime.t_nsec = be32_to_cpu(from->di_ctime.t_nsec);
859 to->di_size = be64_to_cpu(from->di_size);
860 to->di_nblocks = be64_to_cpu(from->di_nblocks);
861 to->di_extsize = be32_to_cpu(from->di_extsize);
862 to->di_nextents = be32_to_cpu(from->di_nextents);
863 to->di_anextents = be16_to_cpu(from->di_anextents);
864 to->di_forkoff = from->di_forkoff;
865 to->di_aformat = from->di_aformat;
866 to->di_dmevmask = be32_to_cpu(from->di_dmevmask);
867 to->di_dmstate = be16_to_cpu(from->di_dmstate);
868 to->di_flags = be16_to_cpu(from->di_flags);
869 to->di_gen = be32_to_cpu(from->di_gen);
870
871 if (to->di_version == 3) {
872 to->di_changecount = be64_to_cpu(from->di_changecount);
873 to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec);
874 to->di_crtime.t_nsec = be32_to_cpu(from->di_crtime.t_nsec);
875 to->di_flags2 = be64_to_cpu(from->di_flags2);
876 to->di_ino = be64_to_cpu(from->di_ino);
877 to->di_lsn = be64_to_cpu(from->di_lsn);
878 memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
879 uuid_copy(&to->di_uuid, &from->di_uuid);
880 }
881 }
882
883 void
884 xfs_dinode_to_disk(
885 xfs_dinode_t *to,
886 xfs_icdinode_t *from)
887 {
888 to->di_magic = cpu_to_be16(from->di_magic);
889 to->di_mode = cpu_to_be16(from->di_mode);
890 to->di_version = from ->di_version;
891 to->di_format = from->di_format;
892 to->di_onlink = cpu_to_be16(from->di_onlink);
893 to->di_uid = cpu_to_be32(from->di_uid);
894 to->di_gid = cpu_to_be32(from->di_gid);
895 to->di_nlink = cpu_to_be32(from->di_nlink);
896 to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
897 to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
898 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
899 to->di_flushiter = cpu_to_be16(from->di_flushiter);
900 to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
901 to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
902 to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
903 to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
904 to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
905 to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
906 to->di_size = cpu_to_be64(from->di_size);
907 to->di_nblocks = cpu_to_be64(from->di_nblocks);
908 to->di_extsize = cpu_to_be32(from->di_extsize);
909 to->di_nextents = cpu_to_be32(from->di_nextents);
910 to->di_anextents = cpu_to_be16(from->di_anextents);
911 to->di_forkoff = from->di_forkoff;
912 to->di_aformat = from->di_aformat;
913 to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
914 to->di_dmstate = cpu_to_be16(from->di_dmstate);
915 to->di_flags = cpu_to_be16(from->di_flags);
916 to->di_gen = cpu_to_be32(from->di_gen);
917
918 if (from->di_version == 3) {
919 to->di_changecount = cpu_to_be64(from->di_changecount);
920 to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
921 to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
922 to->di_flags2 = cpu_to_be64(from->di_flags2);
923 to->di_ino = cpu_to_be64(from->di_ino);
924 to->di_lsn = cpu_to_be64(from->di_lsn);
925 memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
926 uuid_copy(&to->di_uuid, &from->di_uuid);
927 }
928 }
929
930 STATIC uint
931 _xfs_dic2xflags(
932 __uint16_t di_flags)
933 {
934 uint flags = 0;
935
936 if (di_flags & XFS_DIFLAG_ANY) {
937 if (di_flags & XFS_DIFLAG_REALTIME)
938 flags |= XFS_XFLAG_REALTIME;
939 if (di_flags & XFS_DIFLAG_PREALLOC)
940 flags |= XFS_XFLAG_PREALLOC;
941 if (di_flags & XFS_DIFLAG_IMMUTABLE)
942 flags |= XFS_XFLAG_IMMUTABLE;
943 if (di_flags & XFS_DIFLAG_APPEND)
944 flags |= XFS_XFLAG_APPEND;
945 if (di_flags & XFS_DIFLAG_SYNC)
946 flags |= XFS_XFLAG_SYNC;
947 if (di_flags & XFS_DIFLAG_NOATIME)
948 flags |= XFS_XFLAG_NOATIME;
949 if (di_flags & XFS_DIFLAG_NODUMP)
950 flags |= XFS_XFLAG_NODUMP;
951 if (di_flags & XFS_DIFLAG_RTINHERIT)
952 flags |= XFS_XFLAG_RTINHERIT;
953 if (di_flags & XFS_DIFLAG_PROJINHERIT)
954 flags |= XFS_XFLAG_PROJINHERIT;
955 if (di_flags & XFS_DIFLAG_NOSYMLINKS)
956 flags |= XFS_XFLAG_NOSYMLINKS;
957 if (di_flags & XFS_DIFLAG_EXTSIZE)
958 flags |= XFS_XFLAG_EXTSIZE;
959 if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
960 flags |= XFS_XFLAG_EXTSZINHERIT;
961 if (di_flags & XFS_DIFLAG_NODEFRAG)
962 flags |= XFS_XFLAG_NODEFRAG;
963 if (di_flags & XFS_DIFLAG_FILESTREAM)
964 flags |= XFS_XFLAG_FILESTREAM;
965 }
966
967 return flags;
968 }
969
970 uint
971 xfs_ip2xflags(
972 xfs_inode_t *ip)
973 {
974 xfs_icdinode_t *dic = &ip->i_d;
975
976 return _xfs_dic2xflags(dic->di_flags) |
977 (XFS_IFORK_Q(ip) ? XFS_XFLAG_HASATTR : 0);
978 }
979
980 uint
981 xfs_dic2xflags(
982 xfs_dinode_t *dip)
983 {
984 return _xfs_dic2xflags(be16_to_cpu(dip->di_flags)) |
985 (XFS_DFORK_Q(dip) ? XFS_XFLAG_HASATTR : 0);
986 }
987
988 static bool
989 xfs_dinode_verify(
990 struct xfs_mount *mp,
991 struct xfs_inode *ip,
992 struct xfs_dinode *dip)
993 {
994 if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
995 return false;
996
997 /* only version 3 or greater inodes are extensively verified here */
998 if (dip->di_version < 3)
999 return true;
1000
1001 if (!xfs_sb_version_hascrc(&mp->m_sb))
1002 return false;
1003 if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
1004 offsetof(struct xfs_dinode, di_crc)))
1005 return false;
1006 if (be64_to_cpu(dip->di_ino) != ip->i_ino)
1007 return false;
1008 if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_uuid))
1009 return false;
1010 return true;
1011 }
1012
1013 void
1014 xfs_dinode_calc_crc(
1015 struct xfs_mount *mp,
1016 struct xfs_dinode *dip)
1017 {
1018 __uint32_t crc;
1019
1020 if (dip->di_version < 3)
1021 return;
1022
1023 ASSERT(xfs_sb_version_hascrc(&mp->m_sb));
1024 crc = xfs_start_cksum((char *)dip, mp->m_sb.sb_inodesize,
1025 offsetof(struct xfs_dinode, di_crc));
1026 dip->di_crc = xfs_end_cksum(crc);
1027 }
1028
1029 /*
1030 * Read the disk inode attributes into the in-core inode structure.
1031 */
1032 int
1033 xfs_iread(
1034 xfs_mount_t *mp,
1035 xfs_trans_t *tp,
1036 xfs_inode_t *ip,
1037 uint iget_flags)
1038 {
1039 xfs_buf_t *bp;
1040 xfs_dinode_t *dip;
1041 int error;
1042
1043 /*
1044 * Fill in the location information in the in-core inode.
1045 */
1046 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags);
1047 if (error)
1048 return error;
1049
1050 /*
1051 * Get pointers to the on-disk inode and the buffer containing it.
1052 */
1053 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags);
1054 if (error)
1055 return error;
1056
1057 /* even unallocated inodes are verified */
1058 if (!xfs_dinode_verify(mp, ip, dip)) {
1059 xfs_alert(mp, "%s: validation failed for inode %lld failed",
1060 __func__, ip->i_ino);
1061
1062 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, dip);
1063 error = XFS_ERROR(EFSCORRUPTED);
1064 goto out_brelse;
1065 }
1066
1067 /*
1068 * If the on-disk inode is already linked to a directory
1069 * entry, copy all of the inode into the in-core inode.
1070 * xfs_iformat() handles copying in the inode format
1071 * specific information.
1072 * Otherwise, just get the truly permanent information.
1073 */
1074 if (dip->di_mode) {
1075 xfs_dinode_from_disk(&ip->i_d, dip);
1076 error = xfs_iformat(ip, dip);
1077 if (error) {
1078 #ifdef DEBUG
1079 xfs_alert(mp, "%s: xfs_iformat() returned error %d",
1080 __func__, error);
1081 #endif /* DEBUG */
1082 goto out_brelse;
1083 }
1084 } else {
1085 /*
1086 * Partial initialisation of the in-core inode. Just the bits
1087 * that xfs_ialloc won't overwrite or relies on being correct.
1088 */
1089 ip->i_d.di_magic = be16_to_cpu(dip->di_magic);
1090 ip->i_d.di_version = dip->di_version;
1091 ip->i_d.di_gen = be32_to_cpu(dip->di_gen);
1092 ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter);
1093
1094 if (dip->di_version == 3) {
1095 ip->i_d.di_ino = be64_to_cpu(dip->di_ino);
1096 uuid_copy(&ip->i_d.di_uuid, &dip->di_uuid);
1097 }
1098
1099 /*
1100 * Make sure to pull in the mode here as well in
1101 * case the inode is released without being used.
1102 * This ensures that xfs_inactive() will see that
1103 * the inode is already free and not try to mess
1104 * with the uninitialized part of it.
1105 */
1106 ip->i_d.di_mode = 0;
1107 }
1108
1109 /*
1110 * The inode format changed when we moved the link count and
1111 * made it 32 bits long. If this is an old format inode,
1112 * convert it in memory to look like a new one. If it gets
1113 * flushed to disk we will convert back before flushing or
1114 * logging it. We zero out the new projid field and the old link
1115 * count field. We'll handle clearing the pad field (the remains
1116 * of the old uuid field) when we actually convert the inode to
1117 * the new format. We don't change the version number so that we
1118 * can distinguish this from a real new format inode.
1119 */
1120 if (ip->i_d.di_version == 1) {
1121 ip->i_d.di_nlink = ip->i_d.di_onlink;
1122 ip->i_d.di_onlink = 0;
1123 xfs_set_projid(ip, 0);
1124 }
1125
1126 ip->i_delayed_blks = 0;
1127
1128 /*
1129 * Mark the buffer containing the inode as something to keep
1130 * around for a while. This helps to keep recently accessed
1131 * meta-data in-core longer.
1132 */
1133 xfs_buf_set_ref(bp, XFS_INO_REF);
1134
1135 /*
1136 * Use xfs_trans_brelse() to release the buffer containing the
1137 * on-disk inode, because it was acquired with xfs_trans_read_buf()
1138 * in xfs_imap_to_bp() above. If tp is NULL, this is just a normal
1139 * brelse(). If we're within a transaction, then xfs_trans_brelse()
1140 * will only release the buffer if it is not dirty within the
1141 * transaction. It will be OK to release the buffer in this case,
1142 * because inodes on disk are never destroyed and we will be
1143 * locking the new in-core inode before putting it in the hash
1144 * table where other processes can find it. Thus we don't have
1145 * to worry about the inode being changed just because we released
1146 * the buffer.
1147 */
1148 out_brelse:
1149 xfs_trans_brelse(tp, bp);
1150 return error;
1151 }
1152
1153 /*
1154 * Read in extents from a btree-format inode.
1155 * Allocate and fill in if_extents. Real work is done in xfs_bmap.c.
1156 */
1157 int
1158 xfs_iread_extents(
1159 xfs_trans_t *tp,
1160 xfs_inode_t *ip,
1161 int whichfork)
1162 {
1163 int error;
1164 xfs_ifork_t *ifp;
1165 xfs_extnum_t nextents;
1166
1167 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
1168 XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW,
1169 ip->i_mount);
1170 return XFS_ERROR(EFSCORRUPTED);
1171 }
1172 nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
1173 ifp = XFS_IFORK_PTR(ip, whichfork);
1174
1175 /*
1176 * We know that the size is valid (it's checked in iformat_btree)
1177 */
1178 ifp->if_bytes = ifp->if_real_bytes = 0;
1179 ifp->if_flags |= XFS_IFEXTENTS;
1180 xfs_iext_add(ifp, 0, nextents);
1181 error = xfs_bmap_read_extents(tp, ip, whichfork);
1182 if (error) {
1183 xfs_iext_destroy(ifp);
1184 ifp->if_flags &= ~XFS_IFEXTENTS;
1185 return error;
1186 }
1187 xfs_validate_extents(ifp, nextents, XFS_EXTFMT_INODE(ip));
1188 return 0;
1189 }
1190
1191 /*
1192 * Allocate an inode on disk and return a copy of its in-core version.
1193 * The in-core inode is locked exclusively. Set mode, nlink, and rdev
1194 * appropriately within the inode. The uid and gid for the inode are
1195 * set according to the contents of the given cred structure.
1196 *
1197 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
1198 * has a free inode available, call xfs_iget() to obtain the in-core
1199 * version of the allocated inode. Finally, fill in the inode and
1200 * log its initial contents. In this case, ialloc_context would be
1201 * set to NULL.
1202 *
1203 * If xfs_dialloc() does not have an available inode, it will replenish
1204 * its supply by doing an allocation. Since we can only do one
1205 * allocation within a transaction without deadlocks, we must commit
1206 * the current transaction before returning the inode itself.
1207 * In this case, therefore, we will set ialloc_context and return.
1208 * The caller should then commit the current transaction, start a new
1209 * transaction, and call xfs_ialloc() again to actually get the inode.
1210 *
1211 * To ensure that some other process does not grab the inode that
1212 * was allocated during the first call to xfs_ialloc(), this routine
1213 * also returns the [locked] bp pointing to the head of the freelist
1214 * as ialloc_context. The caller should hold this buffer across
1215 * the commit and pass it back into this routine on the second call.
1216 *
1217 * If we are allocating quota inodes, we do not have a parent inode
1218 * to attach to or associate with (i.e. pip == NULL) because they
1219 * are not linked into the directory structure - they are attached
1220 * directly to the superblock - and so have no parent.
1221 */
1222 int
1223 xfs_ialloc(
1224 xfs_trans_t *tp,
1225 xfs_inode_t *pip,
1226 umode_t mode,
1227 xfs_nlink_t nlink,
1228 xfs_dev_t rdev,
1229 prid_t prid,
1230 int okalloc,
1231 xfs_buf_t **ialloc_context,
1232 xfs_inode_t **ipp)
1233 {
1234 struct xfs_mount *mp = tp->t_mountp;
1235 xfs_ino_t ino;
1236 xfs_inode_t *ip;
1237 uint flags;
1238 int error;
1239 timespec_t tv;
1240 int filestreams = 0;
1241
1242 /*
1243 * Call the space management code to pick
1244 * the on-disk inode to be allocated.
1245 */
1246 error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc,
1247 ialloc_context, &ino);
1248 if (error)
1249 return error;
1250 if (*ialloc_context || ino == NULLFSINO) {
1251 *ipp = NULL;
1252 return 0;
1253 }
1254 ASSERT(*ialloc_context == NULL);
1255
1256 /*
1257 * Get the in-core inode with the lock held exclusively.
1258 * This is because we're setting fields here we need
1259 * to prevent others from looking at until we're done.
1260 */
1261 error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE,
1262 XFS_ILOCK_EXCL, &ip);
1263 if (error)
1264 return error;
1265 ASSERT(ip != NULL);
1266
1267 ip->i_d.di_mode = mode;
1268 ip->i_d.di_onlink = 0;
1269 ip->i_d.di_nlink = nlink;
1270 ASSERT(ip->i_d.di_nlink == nlink);
1271 ip->i_d.di_uid = current_fsuid();
1272 ip->i_d.di_gid = current_fsgid();
1273 xfs_set_projid(ip, prid);
1274 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
1275
1276 /*
1277 * If the superblock version is up to where we support new format
1278 * inodes and this is currently an old format inode, then change
1279 * the inode version number now. This way we only do the conversion
1280 * here rather than here and in the flush/logging code.
1281 */
1282 if (xfs_sb_version_hasnlink(&mp->m_sb) &&
1283 ip->i_d.di_version == 1) {
1284 ip->i_d.di_version = 2;
1285 /*
1286 * We've already zeroed the old link count, the projid field,
1287 * and the pad field.
1288 */
1289 }
1290
1291 /*
1292 * Project ids won't be stored on disk if we are using a version 1 inode.
1293 */
1294 if ((prid != 0) && (ip->i_d.di_version == 1))
1295 xfs_bump_ino_vers2(tp, ip);
1296
1297 if (pip && XFS_INHERIT_GID(pip)) {
1298 ip->i_d.di_gid = pip->i_d.di_gid;
1299 if ((pip->i_d.di_mode & S_ISGID) && S_ISDIR(mode)) {
1300 ip->i_d.di_mode |= S_ISGID;
1301 }
1302 }
1303
1304 /*
1305 * If the group ID of the new file does not match the effective group
1306 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
1307 * (and only if the irix_sgid_inherit compatibility variable is set).
1308 */
1309 if ((irix_sgid_inherit) &&
1310 (ip->i_d.di_mode & S_ISGID) &&
1311 (!in_group_p((gid_t)ip->i_d.di_gid))) {
1312 ip->i_d.di_mode &= ~S_ISGID;
1313 }
1314
1315 ip->i_d.di_size = 0;
1316 ip->i_d.di_nextents = 0;
1317 ASSERT(ip->i_d.di_nblocks == 0);
1318
1319 nanotime(&tv);
1320 ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec;
1321 ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec;
1322 ip->i_d.di_atime = ip->i_d.di_mtime;
1323 ip->i_d.di_ctime = ip->i_d.di_mtime;
1324
1325 /*
1326 * di_gen will have been taken care of in xfs_iread.
1327 */
1328 ip->i_d.di_extsize = 0;
1329 ip->i_d.di_dmevmask = 0;
1330 ip->i_d.di_dmstate = 0;
1331 ip->i_d.di_flags = 0;
1332
1333 if (ip->i_d.di_version == 3) {
1334 ASSERT(ip->i_d.di_ino == ino);
1335 ASSERT(uuid_equal(&ip->i_d.di_uuid, &mp->m_sb.sb_uuid));
1336 ip->i_d.di_crc = 0;
1337 ip->i_d.di_changecount = 1;
1338 ip->i_d.di_lsn = 0;
1339 ip->i_d.di_flags2 = 0;
1340 memset(&(ip->i_d.di_pad2[0]), 0, sizeof(ip->i_d.di_pad2));
1341 ip->i_d.di_crtime = ip->i_d.di_mtime;
1342 }
1343
1344
1345 flags = XFS_ILOG_CORE;
1346 switch (mode & S_IFMT) {
1347 case S_IFIFO:
1348 case S_IFCHR:
1349 case S_IFBLK:
1350 case S_IFSOCK:
1351 ip->i_d.di_format = XFS_DINODE_FMT_DEV;
1352 ip->i_df.if_u2.if_rdev = rdev;
1353 ip->i_df.if_flags = 0;
1354 flags |= XFS_ILOG_DEV;
1355 break;
1356 case S_IFREG:
1357 /*
1358 * we can't set up filestreams until after the VFS inode
1359 * is set up properly.
1360 */
1361 if (pip && xfs_inode_is_filestream(pip))
1362 filestreams = 1;
1363 /* fall through */
1364 case S_IFDIR:
1365 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
1366 uint di_flags = 0;
1367
1368 if (S_ISDIR(mode)) {
1369 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
1370 di_flags |= XFS_DIFLAG_RTINHERIT;
1371 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1372 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
1373 ip->i_d.di_extsize = pip->i_d.di_extsize;
1374 }
1375 } else if (S_ISREG(mode)) {
1376 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
1377 di_flags |= XFS_DIFLAG_REALTIME;
1378 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1379 di_flags |= XFS_DIFLAG_EXTSIZE;
1380 ip->i_d.di_extsize = pip->i_d.di_extsize;
1381 }
1382 }
1383 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
1384 xfs_inherit_noatime)
1385 di_flags |= XFS_DIFLAG_NOATIME;
1386 if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
1387 xfs_inherit_nodump)
1388 di_flags |= XFS_DIFLAG_NODUMP;
1389 if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
1390 xfs_inherit_sync)
1391 di_flags |= XFS_DIFLAG_SYNC;
1392 if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
1393 xfs_inherit_nosymlinks)
1394 di_flags |= XFS_DIFLAG_NOSYMLINKS;
1395 if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
1396 di_flags |= XFS_DIFLAG_PROJINHERIT;
1397 if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
1398 xfs_inherit_nodefrag)
1399 di_flags |= XFS_DIFLAG_NODEFRAG;
1400 if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
1401 di_flags |= XFS_DIFLAG_FILESTREAM;
1402 ip->i_d.di_flags |= di_flags;
1403 }
1404 /* FALLTHROUGH */
1405 case S_IFLNK:
1406 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
1407 ip->i_df.if_flags = XFS_IFEXTENTS;
1408 ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
1409 ip->i_df.if_u1.if_extents = NULL;
1410 break;
1411 default:
1412 ASSERT(0);
1413 }
1414 /*
1415 * Attribute fork settings for new inode.
1416 */
1417 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1418 ip->i_d.di_anextents = 0;
1419
1420 /*
1421 * Log the new values stuffed into the inode.
1422 */
1423 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1424 xfs_trans_log_inode(tp, ip, flags);
1425
1426 /* now that we have an i_mode we can setup inode ops and unlock */
1427 xfs_setup_inode(ip);
1428
1429 /* now we have set up the vfs inode we can associate the filestream */
1430 if (filestreams) {
1431 error = xfs_filestream_associate(pip, ip);
1432 if (error < 0)
1433 return -error;
1434 if (!error)
1435 xfs_iflags_set(ip, XFS_IFILESTREAM);
1436 }
1437
1438 *ipp = ip;
1439 return 0;
1440 }
1441
1442 /*
1443 * Free up the underlying blocks past new_size. The new size must be smaller
1444 * than the current size. This routine can be used both for the attribute and
1445 * data fork, and does not modify the inode size, which is left to the caller.
1446 *
1447 * The transaction passed to this routine must have made a permanent log
1448 * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the
1449 * given transaction and start new ones, so make sure everything involved in
1450 * the transaction is tidy before calling here. Some transaction will be
1451 * returned to the caller to be committed. The incoming transaction must
1452 * already include the inode, and both inode locks must be held exclusively.
1453 * The inode must also be "held" within the transaction. On return the inode
1454 * will be "held" within the returned transaction. This routine does NOT
1455 * require any disk space to be reserved for it within the transaction.
1456 *
1457 * If we get an error, we must return with the inode locked and linked into the
1458 * current transaction. This keeps things simple for the higher level code,
1459 * because it always knows that the inode is locked and held in the transaction
1460 * that returns to it whether errors occur or not. We don't mark the inode
1461 * dirty on error so that transactions can be easily aborted if possible.
1462 */
1463 int
1464 xfs_itruncate_extents(
1465 struct xfs_trans **tpp,
1466 struct xfs_inode *ip,
1467 int whichfork,
1468 xfs_fsize_t new_size)
1469 {
1470 struct xfs_mount *mp = ip->i_mount;
1471 struct xfs_trans *tp = *tpp;
1472 struct xfs_trans *ntp;
1473 xfs_bmap_free_t free_list;
1474 xfs_fsblock_t first_block;
1475 xfs_fileoff_t first_unmap_block;
1476 xfs_fileoff_t last_block;
1477 xfs_filblks_t unmap_len;
1478 int committed;
1479 int error = 0;
1480 int done = 0;
1481
1482 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1483 ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
1484 xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1485 ASSERT(new_size <= XFS_ISIZE(ip));
1486 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1487 ASSERT(ip->i_itemp != NULL);
1488 ASSERT(ip->i_itemp->ili_lock_flags == 0);
1489 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1490
1491 trace_xfs_itruncate_extents_start(ip, new_size);
1492
1493 /*
1494 * Since it is possible for space to become allocated beyond
1495 * the end of the file (in a crash where the space is allocated
1496 * but the inode size is not yet updated), simply remove any
1497 * blocks which show up between the new EOF and the maximum
1498 * possible file size. If the first block to be removed is
1499 * beyond the maximum file size (ie it is the same as last_block),
1500 * then there is nothing to do.
1501 */
1502 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1503 last_block = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1504 if (first_unmap_block == last_block)
1505 return 0;
1506
1507 ASSERT(first_unmap_block < last_block);
1508 unmap_len = last_block - first_unmap_block + 1;
1509 while (!done) {
1510 xfs_bmap_init(&free_list, &first_block);
1511 error = xfs_bunmapi(tp, ip,
1512 first_unmap_block, unmap_len,
1513 xfs_bmapi_aflag(whichfork),
1514 XFS_ITRUNC_MAX_EXTENTS,
1515 &first_block, &free_list,
1516 &done);
1517 if (error)
1518 goto out_bmap_cancel;
1519
1520 /*
1521 * Duplicate the transaction that has the permanent
1522 * reservation and commit the old transaction.
1523 */
1524 error = xfs_bmap_finish(&tp, &free_list, &committed);
1525 if (committed)
1526 xfs_trans_ijoin(tp, ip, 0);
1527 if (error)
1528 goto out_bmap_cancel;
1529
1530 if (committed) {
1531 /*
1532 * Mark the inode dirty so it will be logged and
1533 * moved forward in the log as part of every commit.
1534 */
1535 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1536 }
1537
1538 ntp = xfs_trans_dup(tp);
1539 error = xfs_trans_commit(tp, 0);
1540 tp = ntp;
1541
1542 xfs_trans_ijoin(tp, ip, 0);
1543
1544 if (error)
1545 goto out;
1546
1547 /*
1548 * Transaction commit worked ok so we can drop the extra ticket
1549 * reference that we gained in xfs_trans_dup()
1550 */
1551 xfs_log_ticket_put(tp->t_ticket);
1552 error = xfs_trans_reserve(tp, 0,
1553 XFS_ITRUNCATE_LOG_RES(mp), 0,
1554 XFS_TRANS_PERM_LOG_RES,
1555 XFS_ITRUNCATE_LOG_COUNT);
1556 if (error)
1557 goto out;
1558 }
1559
1560 /*
1561 * Always re-log the inode so that our permanent transaction can keep
1562 * on rolling it forward in the log.
1563 */
1564 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1565
1566 trace_xfs_itruncate_extents_end(ip, new_size);
1567
1568 out:
1569 *tpp = tp;
1570 return error;
1571 out_bmap_cancel:
1572 /*
1573 * If the bunmapi call encounters an error, return to the caller where
1574 * the transaction can be properly aborted. We just need to make sure
1575 * we're not holding any resources that we were not when we came in.
1576 */
1577 xfs_bmap_cancel(&free_list);
1578 goto out;
1579 }
1580
1581 /*
1582 * This is called when the inode's link count goes to 0.
1583 * We place the on-disk inode on a list in the AGI. It
1584 * will be pulled from this list when the inode is freed.
1585 */
1586 int
1587 xfs_iunlink(
1588 xfs_trans_t *tp,
1589 xfs_inode_t *ip)
1590 {
1591 xfs_mount_t *mp;
1592 xfs_agi_t *agi;
1593 xfs_dinode_t *dip;
1594 xfs_buf_t *agibp;
1595 xfs_buf_t *ibp;
1596 xfs_agino_t agino;
1597 short bucket_index;
1598 int offset;
1599 int error;
1600
1601 ASSERT(ip->i_d.di_nlink == 0);
1602 ASSERT(ip->i_d.di_mode != 0);
1603
1604 mp = tp->t_mountp;
1605
1606 /*
1607 * Get the agi buffer first. It ensures lock ordering
1608 * on the list.
1609 */
1610 error = xfs_read_agi(mp, tp, XFS_INO_TO_AGNO(mp, ip->i_ino), &agibp);
1611 if (error)
1612 return error;
1613 agi = XFS_BUF_TO_AGI(agibp);
1614
1615 /*
1616 * Get the index into the agi hash table for the
1617 * list this inode will go on.
1618 */
1619 agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1620 ASSERT(agino != 0);
1621 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1622 ASSERT(agi->agi_unlinked[bucket_index]);
1623 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino);
1624
1625 if (agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO)) {
1626 /*
1627 * There is already another inode in the bucket we need
1628 * to add ourselves to. Add us at the front of the list.
1629 * Here we put the head pointer into our next pointer,
1630 * and then we fall through to point the head at us.
1631 */
1632 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
1633 0, 0);
1634 if (error)
1635 return error;
1636
1637 ASSERT(dip->di_next_unlinked == cpu_to_be32(NULLAGINO));
1638 dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
1639 offset = ip->i_imap.im_boffset +
1640 offsetof(xfs_dinode_t, di_next_unlinked);
1641
1642 /* need to recalc the inode CRC if appropriate */
1643 xfs_dinode_calc_crc(mp, dip);
1644
1645 xfs_trans_inode_buf(tp, ibp);
1646 xfs_trans_log_buf(tp, ibp, offset,
1647 (offset + sizeof(xfs_agino_t) - 1));
1648 xfs_inobp_check(mp, ibp);
1649 }
1650
1651 /*
1652 * Point the bucket head pointer at the inode being inserted.
1653 */
1654 ASSERT(agino != 0);
1655 agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
1656 offset = offsetof(xfs_agi_t, agi_unlinked) +
1657 (sizeof(xfs_agino_t) * bucket_index);
1658 xfs_trans_log_buf(tp, agibp, offset,
1659 (offset + sizeof(xfs_agino_t) - 1));
1660 return 0;
1661 }
1662
1663 /*
1664 * Pull the on-disk inode from the AGI unlinked list.
1665 */
1666 STATIC int
1667 xfs_iunlink_remove(
1668 xfs_trans_t *tp,
1669 xfs_inode_t *ip)
1670 {
1671 xfs_ino_t next_ino;
1672 xfs_mount_t *mp;
1673 xfs_agi_t *agi;
1674 xfs_dinode_t *dip;
1675 xfs_buf_t *agibp;
1676 xfs_buf_t *ibp;
1677 xfs_agnumber_t agno;
1678 xfs_agino_t agino;
1679 xfs_agino_t next_agino;
1680 xfs_buf_t *last_ibp;
1681 xfs_dinode_t *last_dip = NULL;
1682 short bucket_index;
1683 int offset, last_offset = 0;
1684 int error;
1685
1686 mp = tp->t_mountp;
1687 agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
1688
1689 /*
1690 * Get the agi buffer first. It ensures lock ordering
1691 * on the list.
1692 */
1693 error = xfs_read_agi(mp, tp, agno, &agibp);
1694 if (error)
1695 return error;
1696
1697 agi = XFS_BUF_TO_AGI(agibp);
1698
1699 /*
1700 * Get the index into the agi hash table for the
1701 * list this inode will go on.
1702 */
1703 agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1704 ASSERT(agino != 0);
1705 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1706 ASSERT(agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO));
1707 ASSERT(agi->agi_unlinked[bucket_index]);
1708
1709 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
1710 /*
1711 * We're at the head of the list. Get the inode's on-disk
1712 * buffer to see if there is anyone after us on the list.
1713 * Only modify our next pointer if it is not already NULLAGINO.
1714 * This saves us the overhead of dealing with the buffer when
1715 * there is no need to change it.
1716 */
1717 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
1718 0, 0);
1719 if (error) {
1720 xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
1721 __func__, error);
1722 return error;
1723 }
1724 next_agino = be32_to_cpu(dip->di_next_unlinked);
1725 ASSERT(next_agino != 0);
1726 if (next_agino != NULLAGINO) {
1727 dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
1728 offset = ip->i_imap.im_boffset +
1729 offsetof(xfs_dinode_t, di_next_unlinked);
1730
1731 /* need to recalc the inode CRC if appropriate */
1732 xfs_dinode_calc_crc(mp, dip);
1733
1734 xfs_trans_inode_buf(tp, ibp);
1735 xfs_trans_log_buf(tp, ibp, offset,
1736 (offset + sizeof(xfs_agino_t) - 1));
1737 xfs_inobp_check(mp, ibp);
1738 } else {
1739 xfs_trans_brelse(tp, ibp);
1740 }
1741 /*
1742 * Point the bucket head pointer at the next inode.
1743 */
1744 ASSERT(next_agino != 0);
1745 ASSERT(next_agino != agino);
1746 agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
1747 offset = offsetof(xfs_agi_t, agi_unlinked) +
1748 (sizeof(xfs_agino_t) * bucket_index);
1749 xfs_trans_log_buf(tp, agibp, offset,
1750 (offset + sizeof(xfs_agino_t) - 1));
1751 } else {
1752 /*
1753 * We need to search the list for the inode being freed.
1754 */
1755 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
1756 last_ibp = NULL;
1757 while (next_agino != agino) {
1758 struct xfs_imap imap;
1759
1760 if (last_ibp)
1761 xfs_trans_brelse(tp, last_ibp);
1762
1763 imap.im_blkno = 0;
1764 next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino);
1765
1766 error = xfs_imap(mp, tp, next_ino, &imap, 0);
1767 if (error) {
1768 xfs_warn(mp,
1769 "%s: xfs_imap returned error %d.",
1770 __func__, error);
1771 return error;
1772 }
1773
1774 error = xfs_imap_to_bp(mp, tp, &imap, &last_dip,
1775 &last_ibp, 0, 0);
1776 if (error) {
1777 xfs_warn(mp,
1778 "%s: xfs_imap_to_bp returned error %d.",
1779 __func__, error);
1780 return error;
1781 }
1782
1783 last_offset = imap.im_boffset;
1784 next_agino = be32_to_cpu(last_dip->di_next_unlinked);
1785 ASSERT(next_agino != NULLAGINO);
1786 ASSERT(next_agino != 0);
1787 }
1788
1789 /*
1790 * Now last_ibp points to the buffer previous to us on the
1791 * unlinked list. Pull us from the list.
1792 */
1793 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
1794 0, 0);
1795 if (error) {
1796 xfs_warn(mp, "%s: xfs_imap_to_bp(2) returned error %d.",
1797 __func__, error);
1798 return error;
1799 }
1800 next_agino = be32_to_cpu(dip->di_next_unlinked);
1801 ASSERT(next_agino != 0);
1802 ASSERT(next_agino != agino);
1803 if (next_agino != NULLAGINO) {
1804 dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
1805 offset = ip->i_imap.im_boffset +
1806 offsetof(xfs_dinode_t, di_next_unlinked);
1807
1808 /* need to recalc the inode CRC if appropriate */
1809 xfs_dinode_calc_crc(mp, dip);
1810
1811 xfs_trans_inode_buf(tp, ibp);
1812 xfs_trans_log_buf(tp, ibp, offset,
1813 (offset + sizeof(xfs_agino_t) - 1));
1814 xfs_inobp_check(mp, ibp);
1815 } else {
1816 xfs_trans_brelse(tp, ibp);
1817 }
1818 /*
1819 * Point the previous inode on the list to the next inode.
1820 */
1821 last_dip->di_next_unlinked = cpu_to_be32(next_agino);
1822 ASSERT(next_agino != 0);
1823 offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
1824
1825 /* need to recalc the inode CRC if appropriate */
1826 xfs_dinode_calc_crc(mp, last_dip);
1827
1828 xfs_trans_inode_buf(tp, last_ibp);
1829 xfs_trans_log_buf(tp, last_ibp, offset,
1830 (offset + sizeof(xfs_agino_t) - 1));
1831 xfs_inobp_check(mp, last_ibp);
1832 }
1833 return 0;
1834 }
1835
1836 /*
1837 * A big issue when freeing the inode cluster is is that we _cannot_ skip any
1838 * inodes that are in memory - they all must be marked stale and attached to
1839 * the cluster buffer.
1840 */
1841 STATIC int
1842 xfs_ifree_cluster(
1843 xfs_inode_t *free_ip,
1844 xfs_trans_t *tp,
1845 xfs_ino_t inum)
1846 {
1847 xfs_mount_t *mp = free_ip->i_mount;
1848 int blks_per_cluster;
1849 int nbufs;
1850 int ninodes;
1851 int i, j;
1852 xfs_daddr_t blkno;
1853 xfs_buf_t *bp;
1854 xfs_inode_t *ip;
1855 xfs_inode_log_item_t *iip;
1856 xfs_log_item_t *lip;
1857 struct xfs_perag *pag;
1858
1859 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
1860 if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) {
1861 blks_per_cluster = 1;
1862 ninodes = mp->m_sb.sb_inopblock;
1863 nbufs = XFS_IALLOC_BLOCKS(mp);
1864 } else {
1865 blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) /
1866 mp->m_sb.sb_blocksize;
1867 ninodes = blks_per_cluster * mp->m_sb.sb_inopblock;
1868 nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster;
1869 }
1870
1871 for (j = 0; j < nbufs; j++, inum += ninodes) {
1872 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
1873 XFS_INO_TO_AGBNO(mp, inum));
1874
1875 /*
1876 * We obtain and lock the backing buffer first in the process
1877 * here, as we have to ensure that any dirty inode that we
1878 * can't get the flush lock on is attached to the buffer.
1879 * If we scan the in-memory inodes first, then buffer IO can
1880 * complete before we get a lock on it, and hence we may fail
1881 * to mark all the active inodes on the buffer stale.
1882 */
1883 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
1884 mp->m_bsize * blks_per_cluster,
1885 XBF_UNMAPPED);
1886
1887 if (!bp)
1888 return ENOMEM;
1889
1890 /*
1891 * This buffer may not have been correctly initialised as we
1892 * didn't read it from disk. That's not important because we are
1893 * only using to mark the buffer as stale in the log, and to
1894 * attach stale cached inodes on it. That means it will never be
1895 * dispatched for IO. If it is, we want to know about it, and we
1896 * want it to fail. We can acheive this by adding a write
1897 * verifier to the buffer.
1898 */
1899 bp->b_ops = &xfs_inode_buf_ops;
1900
1901 /*
1902 * Walk the inodes already attached to the buffer and mark them
1903 * stale. These will all have the flush locks held, so an
1904 * in-memory inode walk can't lock them. By marking them all
1905 * stale first, we will not attempt to lock them in the loop
1906 * below as the XFS_ISTALE flag will be set.
1907 */
1908 lip = bp->b_fspriv;
1909 while (lip) {
1910 if (lip->li_type == XFS_LI_INODE) {
1911 iip = (xfs_inode_log_item_t *)lip;
1912 ASSERT(iip->ili_logged == 1);
1913 lip->li_cb = xfs_istale_done;
1914 xfs_trans_ail_copy_lsn(mp->m_ail,
1915 &iip->ili_flush_lsn,
1916 &iip->ili_item.li_lsn);
1917 xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
1918 }
1919 lip = lip->li_bio_list;
1920 }
1921
1922
1923 /*
1924 * For each inode in memory attempt to add it to the inode
1925 * buffer and set it up for being staled on buffer IO
1926 * completion. This is safe as we've locked out tail pushing
1927 * and flushing by locking the buffer.
1928 *
1929 * We have already marked every inode that was part of a
1930 * transaction stale above, which means there is no point in
1931 * even trying to lock them.
1932 */
1933 for (i = 0; i < ninodes; i++) {
1934 retry:
1935 rcu_read_lock();
1936 ip = radix_tree_lookup(&pag->pag_ici_root,
1937 XFS_INO_TO_AGINO(mp, (inum + i)));
1938
1939 /* Inode not in memory, nothing to do */
1940 if (!ip) {
1941 rcu_read_unlock();
1942 continue;
1943 }
1944
1945 /*
1946 * because this is an RCU protected lookup, we could
1947 * find a recently freed or even reallocated inode
1948 * during the lookup. We need to check under the
1949 * i_flags_lock for a valid inode here. Skip it if it
1950 * is not valid, the wrong inode or stale.
1951 */
1952 spin_lock(&ip->i_flags_lock);
1953 if (ip->i_ino != inum + i ||
1954 __xfs_iflags_test(ip, XFS_ISTALE)) {
1955 spin_unlock(&ip->i_flags_lock);
1956 rcu_read_unlock();
1957 continue;
1958 }
1959 spin_unlock(&ip->i_flags_lock);
1960
1961 /*
1962 * Don't try to lock/unlock the current inode, but we
1963 * _cannot_ skip the other inodes that we did not find
1964 * in the list attached to the buffer and are not
1965 * already marked stale. If we can't lock it, back off
1966 * and retry.
1967 */
1968 if (ip != free_ip &&
1969 !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
1970 rcu_read_unlock();
1971 delay(1);
1972 goto retry;
1973 }
1974 rcu_read_unlock();
1975
1976 xfs_iflock(ip);
1977 xfs_iflags_set(ip, XFS_ISTALE);
1978
1979 /*
1980 * we don't need to attach clean inodes or those only
1981 * with unlogged changes (which we throw away, anyway).
1982 */
1983 iip = ip->i_itemp;
1984 if (!iip || xfs_inode_clean(ip)) {
1985 ASSERT(ip != free_ip);
1986 xfs_ifunlock(ip);
1987 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1988 continue;
1989 }
1990
1991 iip->ili_last_fields = iip->ili_fields;
1992 iip->ili_fields = 0;
1993 iip->ili_logged = 1;
1994 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
1995 &iip->ili_item.li_lsn);
1996
1997 xfs_buf_attach_iodone(bp, xfs_istale_done,
1998 &iip->ili_item);
1999
2000 if (ip != free_ip)
2001 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2002 }
2003
2004 xfs_trans_stale_inode_buf(tp, bp);
2005 xfs_trans_binval(tp, bp);
2006 }
2007
2008 xfs_perag_put(pag);
2009 return 0;
2010 }
2011
2012 /*
2013 * This is called to return an inode to the inode free list.
2014 * The inode should already be truncated to 0 length and have
2015 * no pages associated with it. This routine also assumes that
2016 * the inode is already a part of the transaction.
2017 *
2018 * The on-disk copy of the inode will have been added to the list
2019 * of unlinked inodes in the AGI. We need to remove the inode from
2020 * that list atomically with respect to freeing it here.
2021 */
2022 int
2023 xfs_ifree(
2024 xfs_trans_t *tp,
2025 xfs_inode_t *ip,
2026 xfs_bmap_free_t *flist)
2027 {
2028 int error;
2029 int delete;
2030 xfs_ino_t first_ino;
2031 xfs_dinode_t *dip;
2032 xfs_buf_t *ibp;
2033
2034 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2035 ASSERT(ip->i_d.di_nlink == 0);
2036 ASSERT(ip->i_d.di_nextents == 0);
2037 ASSERT(ip->i_d.di_anextents == 0);
2038 ASSERT(ip->i_d.di_size == 0 || !S_ISREG(ip->i_d.di_mode));
2039 ASSERT(ip->i_d.di_nblocks == 0);
2040
2041 /*
2042 * Pull the on-disk inode from the AGI unlinked list.
2043 */
2044 error = xfs_iunlink_remove(tp, ip);
2045 if (error != 0) {
2046 return error;
2047 }
2048
2049 error = xfs_difree(tp, ip->i_ino, flist, &delete, &first_ino);
2050 if (error != 0) {
2051 return error;
2052 }
2053 ip->i_d.di_mode = 0; /* mark incore inode as free */
2054 ip->i_d.di_flags = 0;
2055 ip->i_d.di_dmevmask = 0;
2056 ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */
2057 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
2058 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
2059 /*
2060 * Bump the generation count so no one will be confused
2061 * by reincarnations of this inode.
2062 */
2063 ip->i_d.di_gen++;
2064
2065 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2066
2067 error = xfs_imap_to_bp(ip->i_mount, tp, &ip->i_imap, &dip, &ibp,
2068 0, 0);
2069 if (error)
2070 return error;
2071
2072 /*
2073 * Clear the on-disk di_mode. This is to prevent xfs_bulkstat
2074 * from picking up this inode when it is reclaimed (its incore state
2075 * initialzed but not flushed to disk yet). The in-core di_mode is
2076 * already cleared and a corresponding transaction logged.
2077 * The hack here just synchronizes the in-core to on-disk
2078 * di_mode value in advance before the actual inode sync to disk.
2079 * This is OK because the inode is already unlinked and would never
2080 * change its di_mode again for this inode generation.
2081 * This is a temporary hack that would require a proper fix
2082 * in the future.
2083 */
2084 dip->di_mode = 0;
2085
2086 if (delete) {
2087 error = xfs_ifree_cluster(ip, tp, first_ino);
2088 }
2089
2090 return error;
2091 }
2092
2093 /*
2094 * Reallocate the space for if_broot based on the number of records
2095 * being added or deleted as indicated in rec_diff. Move the records
2096 * and pointers in if_broot to fit the new size. When shrinking this
2097 * will eliminate holes between the records and pointers created by
2098 * the caller. When growing this will create holes to be filled in
2099 * by the caller.
2100 *
2101 * The caller must not request to add more records than would fit in
2102 * the on-disk inode root. If the if_broot is currently NULL, then
2103 * if we adding records one will be allocated. The caller must also
2104 * not request that the number of records go below zero, although
2105 * it can go to zero.
2106 *
2107 * ip -- the inode whose if_broot area is changing
2108 * ext_diff -- the change in the number of records, positive or negative,
2109 * requested for the if_broot array.
2110 */
2111 void
2112 xfs_iroot_realloc(
2113 xfs_inode_t *ip,
2114 int rec_diff,
2115 int whichfork)
2116 {
2117 struct xfs_mount *mp = ip->i_mount;
2118 int cur_max;
2119 xfs_ifork_t *ifp;
2120 struct xfs_btree_block *new_broot;
2121 int new_max;
2122 size_t new_size;
2123 char *np;
2124 char *op;
2125
2126 /*
2127 * Handle the degenerate case quietly.
2128 */
2129 if (rec_diff == 0) {
2130 return;
2131 }
2132
2133 ifp = XFS_IFORK_PTR(ip, whichfork);
2134 if (rec_diff > 0) {
2135 /*
2136 * If there wasn't any memory allocated before, just
2137 * allocate it now and get out.
2138 */
2139 if (ifp->if_broot_bytes == 0) {
2140 new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, rec_diff);
2141 ifp->if_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS);
2142 ifp->if_broot_bytes = (int)new_size;
2143 return;
2144 }
2145
2146 /*
2147 * If there is already an existing if_broot, then we need
2148 * to realloc() it and shift the pointers to their new
2149 * location. The records don't change location because
2150 * they are kept butted up against the btree block header.
2151 */
2152 cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0);
2153 new_max = cur_max + rec_diff;
2154 new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, new_max);
2155 ifp->if_broot = kmem_realloc(ifp->if_broot, new_size,
2156 XFS_BMAP_BROOT_SPACE_CALC(mp, cur_max),
2157 KM_SLEEP | KM_NOFS);
2158 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
2159 ifp->if_broot_bytes);
2160 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
2161 (int)new_size);
2162 ifp->if_broot_bytes = (int)new_size;
2163 ASSERT(ifp->if_broot_bytes <=
2164 XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ(ip));
2165 memmove(np, op, cur_max * (uint)sizeof(xfs_dfsbno_t));
2166 return;
2167 }
2168
2169 /*
2170 * rec_diff is less than 0. In this case, we are shrinking the
2171 * if_broot buffer. It must already exist. If we go to zero
2172 * records, just get rid of the root and clear the status bit.
2173 */
2174 ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0));
2175 cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0);
2176 new_max = cur_max + rec_diff;
2177 ASSERT(new_max >= 0);
2178 if (new_max > 0)
2179 new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, new_max);
2180 else
2181 new_size = 0;
2182 if (new_size > 0) {
2183 new_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS);
2184 /*
2185 * First copy over the btree block header.
2186 */
2187 memcpy(new_broot, ifp->if_broot,
2188 XFS_BMBT_BLOCK_LEN(ip->i_mount));
2189 } else {
2190 new_broot = NULL;
2191 ifp->if_flags &= ~XFS_IFBROOT;
2192 }
2193
2194 /*
2195 * Only copy the records and pointers if there are any.
2196 */
2197 if (new_max > 0) {
2198 /*
2199 * First copy the records.
2200 */
2201 op = (char *)XFS_BMBT_REC_ADDR(mp, ifp->if_broot, 1);
2202 np = (char *)XFS_BMBT_REC_ADDR(mp, new_broot, 1);
2203 memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t));
2204
2205 /*
2206 * Then copy the pointers.
2207 */
2208 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
2209 ifp->if_broot_bytes);
2210 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, new_broot, 1,
2211 (int)new_size);
2212 memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t));
2213 }
2214 kmem_free(ifp->if_broot);
2215 ifp->if_broot = new_broot;
2216 ifp->if_broot_bytes = (int)new_size;
2217 ASSERT(ifp->if_broot_bytes <=
2218 XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ(ip));
2219 return;
2220 }
2221
2222
2223 /*
2224 * This is called when the amount of space needed for if_data
2225 * is increased or decreased. The change in size is indicated by
2226 * the number of bytes that need to be added or deleted in the
2227 * byte_diff parameter.
2228 *
2229 * If the amount of space needed has decreased below the size of the
2230 * inline buffer, then switch to using the inline buffer. Otherwise,
2231 * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer
2232 * to what is needed.
2233 *
2234 * ip -- the inode whose if_data area is changing
2235 * byte_diff -- the change in the number of bytes, positive or negative,
2236 * requested for the if_data array.
2237 */
2238 void
2239 xfs_idata_realloc(
2240 xfs_inode_t *ip,
2241 int byte_diff,
2242 int whichfork)
2243 {
2244 xfs_ifork_t *ifp;
2245 int new_size;
2246 int real_size;
2247
2248 if (byte_diff == 0) {
2249 return;
2250 }
2251
2252 ifp = XFS_IFORK_PTR(ip, whichfork);
2253 new_size = (int)ifp->if_bytes + byte_diff;
2254 ASSERT(new_size >= 0);
2255
2256 if (new_size == 0) {
2257 if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2258 kmem_free(ifp->if_u1.if_data);
2259 }
2260 ifp->if_u1.if_data = NULL;
2261 real_size = 0;
2262 } else if (new_size <= sizeof(ifp->if_u2.if_inline_data)) {
2263 /*
2264 * If the valid extents/data can fit in if_inline_ext/data,
2265 * copy them from the malloc'd vector and free it.
2266 */
2267 if (ifp->if_u1.if_data == NULL) {
2268 ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
2269 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2270 ASSERT(ifp->if_real_bytes != 0);
2271 memcpy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data,
2272 new_size);
2273 kmem_free(ifp->if_u1.if_data);
2274 ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
2275 }
2276 real_size = 0;
2277 } else {
2278 /*
2279 * Stuck with malloc/realloc.
2280 * For inline data, the underlying buffer must be
2281 * a multiple of 4 bytes in size so that it can be
2282 * logged and stay on word boundaries. We enforce
2283 * that here.
2284 */
2285 real_size = roundup(new_size, 4);
2286 if (ifp->if_u1.if_data == NULL) {
2287 ASSERT(ifp->if_real_bytes == 0);
2288 ifp->if_u1.if_data = kmem_alloc(real_size,
2289 KM_SLEEP | KM_NOFS);
2290 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2291 /*
2292 * Only do the realloc if the underlying size
2293 * is really changing.
2294 */
2295 if (ifp->if_real_bytes != real_size) {
2296 ifp->if_u1.if_data =
2297 kmem_realloc(ifp->if_u1.if_data,
2298 real_size,
2299 ifp->if_real_bytes,
2300 KM_SLEEP | KM_NOFS);
2301 }
2302 } else {
2303 ASSERT(ifp->if_real_bytes == 0);
2304 ifp->if_u1.if_data = kmem_alloc(real_size,
2305 KM_SLEEP | KM_NOFS);
2306 memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data,
2307 ifp->if_bytes);
2308 }
2309 }
2310 ifp->if_real_bytes = real_size;
2311 ifp->if_bytes = new_size;
2312 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
2313 }
2314
2315 void
2316 xfs_idestroy_fork(
2317 xfs_inode_t *ip,
2318 int whichfork)
2319 {
2320 xfs_ifork_t *ifp;
2321
2322 ifp = XFS_IFORK_PTR(ip, whichfork);
2323 if (ifp->if_broot != NULL) {
2324 kmem_free(ifp->if_broot);
2325 ifp->if_broot = NULL;
2326 }
2327
2328 /*
2329 * If the format is local, then we can't have an extents
2330 * array so just look for an inline data array. If we're
2331 * not local then we may or may not have an extents list,
2332 * so check and free it up if we do.
2333 */
2334 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
2335 if ((ifp->if_u1.if_data != ifp->if_u2.if_inline_data) &&
2336 (ifp->if_u1.if_data != NULL)) {
2337 ASSERT(ifp->if_real_bytes != 0);
2338 kmem_free(ifp->if_u1.if_data);
2339 ifp->if_u1.if_data = NULL;
2340 ifp->if_real_bytes = 0;
2341 }
2342 } else if ((ifp->if_flags & XFS_IFEXTENTS) &&
2343 ((ifp->if_flags & XFS_IFEXTIREC) ||
2344 ((ifp->if_u1.if_extents != NULL) &&
2345 (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)))) {
2346 ASSERT(ifp->if_real_bytes != 0);
2347 xfs_iext_destroy(ifp);
2348 }
2349 ASSERT(ifp->if_u1.if_extents == NULL ||
2350 ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext);
2351 ASSERT(ifp->if_real_bytes == 0);
2352 if (whichfork == XFS_ATTR_FORK) {
2353 kmem_zone_free(xfs_ifork_zone, ip->i_afp);
2354 ip->i_afp = NULL;
2355 }
2356 }
2357
2358 /*
2359 * This is called to unpin an inode. The caller must have the inode locked
2360 * in at least shared mode so that the buffer cannot be subsequently pinned
2361 * once someone is waiting for it to be unpinned.
2362 */
2363 static void
2364 xfs_iunpin(
2365 struct xfs_inode *ip)
2366 {
2367 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2368
2369 trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
2370
2371 /* Give the log a push to start the unpinning I/O */
2372 xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0);
2373
2374 }
2375
2376 static void
2377 __xfs_iunpin_wait(
2378 struct xfs_inode *ip)
2379 {
2380 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2381 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2382
2383 xfs_iunpin(ip);
2384
2385 do {
2386 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
2387 if (xfs_ipincount(ip))
2388 io_schedule();
2389 } while (xfs_ipincount(ip));
2390 finish_wait(wq, &wait.wait);
2391 }
2392
2393 void
2394 xfs_iunpin_wait(
2395 struct xfs_inode *ip)
2396 {
2397 if (xfs_ipincount(ip))
2398 __xfs_iunpin_wait(ip);
2399 }
2400
2401 /*
2402 * xfs_iextents_copy()
2403 *
2404 * This is called to copy the REAL extents (as opposed to the delayed
2405 * allocation extents) from the inode into the given buffer. It
2406 * returns the number of bytes copied into the buffer.
2407 *
2408 * If there are no delayed allocation extents, then we can just
2409 * memcpy() the extents into the buffer. Otherwise, we need to
2410 * examine each extent in turn and skip those which are delayed.
2411 */
2412 int
2413 xfs_iextents_copy(
2414 xfs_inode_t *ip,
2415 xfs_bmbt_rec_t *dp,
2416 int whichfork)
2417 {
2418 int copied;
2419 int i;
2420 xfs_ifork_t *ifp;
2421 int nrecs;
2422 xfs_fsblock_t start_block;
2423
2424 ifp = XFS_IFORK_PTR(ip, whichfork);
2425 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2426 ASSERT(ifp->if_bytes > 0);
2427
2428 nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
2429 XFS_BMAP_TRACE_EXLIST(ip, nrecs, whichfork);
2430 ASSERT(nrecs > 0);
2431
2432 /*
2433 * There are some delayed allocation extents in the
2434 * inode, so copy the extents one at a time and skip
2435 * the delayed ones. There must be at least one
2436 * non-delayed extent.
2437 */
2438 copied = 0;
2439 for (i = 0; i < nrecs; i++) {
2440 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
2441 start_block = xfs_bmbt_get_startblock(ep);
2442 if (isnullstartblock(start_block)) {
2443 /*
2444 * It's a delayed allocation extent, so skip it.
2445 */
2446 continue;
2447 }
2448
2449 /* Translate to on disk format */
2450 put_unaligned(cpu_to_be64(ep->l0), &dp->l0);
2451 put_unaligned(cpu_to_be64(ep->l1), &dp->l1);
2452 dp++;
2453 copied++;
2454 }
2455 ASSERT(copied != 0);
2456 xfs_validate_extents(ifp, copied, XFS_EXTFMT_INODE(ip));
2457
2458 return (copied * (uint)sizeof(xfs_bmbt_rec_t));
2459 }
2460
2461 /*
2462 * Each of the following cases stores data into the same region
2463 * of the on-disk inode, so only one of them can be valid at
2464 * any given time. While it is possible to have conflicting formats
2465 * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is
2466 * in EXTENTS format, this can only happen when the fork has
2467 * changed formats after being modified but before being flushed.
2468 * In these cases, the format always takes precedence, because the
2469 * format indicates the current state of the fork.
2470 */
2471 /*ARGSUSED*/
2472 STATIC void
2473 xfs_iflush_fork(
2474 xfs_inode_t *ip,
2475 xfs_dinode_t *dip,
2476 xfs_inode_log_item_t *iip,
2477 int whichfork,
2478 xfs_buf_t *bp)
2479 {
2480 char *cp;
2481 xfs_ifork_t *ifp;
2482 xfs_mount_t *mp;
2483 static const short brootflag[2] =
2484 { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
2485 static const short dataflag[2] =
2486 { XFS_ILOG_DDATA, XFS_ILOG_ADATA };
2487 static const short extflag[2] =
2488 { XFS_ILOG_DEXT, XFS_ILOG_AEXT };
2489
2490 if (!iip)
2491 return;
2492 ifp = XFS_IFORK_PTR(ip, whichfork);
2493 /*
2494 * This can happen if we gave up in iformat in an error path,
2495 * for the attribute fork.
2496 */
2497 if (!ifp) {
2498 ASSERT(whichfork == XFS_ATTR_FORK);
2499 return;
2500 }
2501 cp = XFS_DFORK_PTR(dip, whichfork);
2502 mp = ip->i_mount;
2503 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
2504 case XFS_DINODE_FMT_LOCAL:
2505 if ((iip->ili_fields & dataflag[whichfork]) &&
2506 (ifp->if_bytes > 0)) {
2507 ASSERT(ifp->if_u1.if_data != NULL);
2508 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
2509 memcpy(cp, ifp->if_u1.if_data, ifp->if_bytes);
2510 }
2511 break;
2512
2513 case XFS_DINODE_FMT_EXTENTS:
2514 ASSERT((ifp->if_flags & XFS_IFEXTENTS) ||
2515 !(iip->ili_fields & extflag[whichfork]));
2516 if ((iip->ili_fields & extflag[whichfork]) &&
2517 (ifp->if_bytes > 0)) {
2518 ASSERT(xfs_iext_get_ext(ifp, 0));
2519 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0);
2520 (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp,
2521 whichfork);
2522 }
2523 break;
2524
2525 case XFS_DINODE_FMT_BTREE:
2526 if ((iip->ili_fields & brootflag[whichfork]) &&
2527 (ifp->if_broot_bytes > 0)) {
2528 ASSERT(ifp->if_broot != NULL);
2529 ASSERT(ifp->if_broot_bytes <=
2530 (XFS_IFORK_SIZE(ip, whichfork) +
2531 XFS_BROOT_SIZE_ADJ(ip)));
2532 xfs_bmbt_to_bmdr(mp, ifp->if_broot, ifp->if_broot_bytes,
2533 (xfs_bmdr_block_t *)cp,
2534 XFS_DFORK_SIZE(dip, mp, whichfork));
2535 }
2536 break;
2537
2538 case XFS_DINODE_FMT_DEV:
2539 if (iip->ili_fields & XFS_ILOG_DEV) {
2540 ASSERT(whichfork == XFS_DATA_FORK);
2541 xfs_dinode_put_rdev(dip, ip->i_df.if_u2.if_rdev);
2542 }
2543 break;
2544
2545 case XFS_DINODE_FMT_UUID:
2546 if (iip->ili_fields & XFS_ILOG_UUID) {
2547 ASSERT(whichfork == XFS_DATA_FORK);
2548 memcpy(XFS_DFORK_DPTR(dip),
2549 &ip->i_df.if_u2.if_uuid,
2550 sizeof(uuid_t));
2551 }
2552 break;
2553
2554 default:
2555 ASSERT(0);
2556 break;
2557 }
2558 }
2559
2560 STATIC int
2561 xfs_iflush_cluster(
2562 xfs_inode_t *ip,
2563 xfs_buf_t *bp)
2564 {
2565 xfs_mount_t *mp = ip->i_mount;
2566 struct xfs_perag *pag;
2567 unsigned long first_index, mask;
2568 unsigned long inodes_per_cluster;
2569 int ilist_size;
2570 xfs_inode_t **ilist;
2571 xfs_inode_t *iq;
2572 int nr_found;
2573 int clcount = 0;
2574 int bufwasdelwri;
2575 int i;
2576
2577 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
2578
2579 inodes_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog;
2580 ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
2581 ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS);
2582 if (!ilist)
2583 goto out_put;
2584
2585 mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
2586 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
2587 rcu_read_lock();
2588 /* really need a gang lookup range call here */
2589 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist,
2590 first_index, inodes_per_cluster);
2591 if (nr_found == 0)
2592 goto out_free;
2593
2594 for (i = 0; i < nr_found; i++) {
2595 iq = ilist[i];
2596 if (iq == ip)
2597 continue;
2598
2599 /*
2600 * because this is an RCU protected lookup, we could find a
2601 * recently freed or even reallocated inode during the lookup.
2602 * We need to check under the i_flags_lock for a valid inode
2603 * here. Skip it if it is not valid or the wrong inode.
2604 */
2605 spin_lock(&ip->i_flags_lock);
2606 if (!ip->i_ino ||
2607 (XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) {
2608 spin_unlock(&ip->i_flags_lock);
2609 continue;
2610 }
2611 spin_unlock(&ip->i_flags_lock);
2612
2613 /*
2614 * Do an un-protected check to see if the inode is dirty and
2615 * is a candidate for flushing. These checks will be repeated
2616 * later after the appropriate locks are acquired.
2617 */
2618 if (xfs_inode_clean(iq) && xfs_ipincount(iq) == 0)
2619 continue;
2620
2621 /*
2622 * Try to get locks. If any are unavailable or it is pinned,
2623 * then this inode cannot be flushed and is skipped.
2624 */
2625
2626 if (!xfs_ilock_nowait(iq, XFS_ILOCK_SHARED))
2627 continue;
2628 if (!xfs_iflock_nowait(iq)) {
2629 xfs_iunlock(iq, XFS_ILOCK_SHARED);
2630 continue;
2631 }
2632 if (xfs_ipincount(iq)) {
2633 xfs_ifunlock(iq);
2634 xfs_iunlock(iq, XFS_ILOCK_SHARED);
2635 continue;
2636 }
2637
2638 /*
2639 * arriving here means that this inode can be flushed. First
2640 * re-check that it's dirty before flushing.
2641 */
2642 if (!xfs_inode_clean(iq)) {
2643 int error;
2644 error = xfs_iflush_int(iq, bp);
2645 if (error) {
2646 xfs_iunlock(iq, XFS_ILOCK_SHARED);
2647 goto cluster_corrupt_out;
2648 }
2649 clcount++;
2650 } else {
2651 xfs_ifunlock(iq);
2652 }
2653 xfs_iunlock(iq, XFS_ILOCK_SHARED);
2654 }
2655
2656 if (clcount) {
2657 XFS_STATS_INC(xs_icluster_flushcnt);
2658 XFS_STATS_ADD(xs_icluster_flushinode, clcount);
2659 }
2660
2661 out_free:
2662 rcu_read_unlock();
2663 kmem_free(ilist);
2664 out_put:
2665 xfs_perag_put(pag);
2666 return 0;
2667
2668
2669 cluster_corrupt_out:
2670 /*
2671 * Corruption detected in the clustering loop. Invalidate the
2672 * inode buffer and shut down the filesystem.
2673 */
2674 rcu_read_unlock();
2675 /*
2676 * Clean up the buffer. If it was delwri, just release it --
2677 * brelse can handle it with no problems. If not, shut down the
2678 * filesystem before releasing the buffer.
2679 */
2680 bufwasdelwri = (bp->b_flags & _XBF_DELWRI_Q);
2681 if (bufwasdelwri)
2682 xfs_buf_relse(bp);
2683
2684 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
2685
2686 if (!bufwasdelwri) {
2687 /*
2688 * Just like incore_relse: if we have b_iodone functions,
2689 * mark the buffer as an error and call them. Otherwise
2690 * mark it as stale and brelse.
2691 */
2692 if (bp->b_iodone) {
2693 XFS_BUF_UNDONE(bp);
2694 xfs_buf_stale(bp);
2695 xfs_buf_ioerror(bp, EIO);
2696 xfs_buf_ioend(bp, 0);
2697 } else {
2698 xfs_buf_stale(bp);
2699 xfs_buf_relse(bp);
2700 }
2701 }
2702
2703 /*
2704 * Unlocks the flush lock
2705 */
2706 xfs_iflush_abort(iq, false);
2707 kmem_free(ilist);
2708 xfs_perag_put(pag);
2709 return XFS_ERROR(EFSCORRUPTED);
2710 }
2711
2712 /*
2713 * Flush dirty inode metadata into the backing buffer.
2714 *
2715 * The caller must have the inode lock and the inode flush lock held. The
2716 * inode lock will still be held upon return to the caller, and the inode
2717 * flush lock will be released after the inode has reached the disk.
2718 *
2719 * The caller must write out the buffer returned in *bpp and release it.
2720 */
2721 int
2722 xfs_iflush(
2723 struct xfs_inode *ip,
2724 struct xfs_buf **bpp)
2725 {
2726 struct xfs_mount *mp = ip->i_mount;
2727 struct xfs_buf *bp;
2728 struct xfs_dinode *dip;
2729 int error;
2730
2731 XFS_STATS_INC(xs_iflush_count);
2732
2733 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2734 ASSERT(xfs_isiflocked(ip));
2735 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
2736 ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
2737
2738 *bpp = NULL;
2739
2740 xfs_iunpin_wait(ip);
2741
2742 /*
2743 * For stale inodes we cannot rely on the backing buffer remaining
2744 * stale in cache for the remaining life of the stale inode and so
2745 * xfs_imap_to_bp() below may give us a buffer that no longer contains
2746 * inodes below. We have to check this after ensuring the inode is
2747 * unpinned so that it is safe to reclaim the stale inode after the
2748 * flush call.
2749 */
2750 if (xfs_iflags_test(ip, XFS_ISTALE)) {
2751 xfs_ifunlock(ip);
2752 return 0;
2753 }
2754
2755 /*
2756 * This may have been unpinned because the filesystem is shutting
2757 * down forcibly. If that's the case we must not write this inode
2758 * to disk, because the log record didn't make it to disk.
2759 *
2760 * We also have to remove the log item from the AIL in this case,
2761 * as we wait for an empty AIL as part of the unmount process.
2762 */
2763 if (XFS_FORCED_SHUTDOWN(mp)) {
2764 error = XFS_ERROR(EIO);
2765 goto abort_out;
2766 }
2767
2768 /*
2769 * Get the buffer containing the on-disk inode.
2770 */
2771 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK,
2772 0);
2773 if (error || !bp) {
2774 xfs_ifunlock(ip);
2775 return error;
2776 }
2777
2778 /*
2779 * First flush out the inode that xfs_iflush was called with.
2780 */
2781 error = xfs_iflush_int(ip, bp);
2782 if (error)
2783 goto corrupt_out;
2784
2785 /*
2786 * If the buffer is pinned then push on the log now so we won't
2787 * get stuck waiting in the write for too long.
2788 */
2789 if (xfs_buf_ispinned(bp))
2790 xfs_log_force(mp, 0);
2791
2792 /*
2793 * inode clustering:
2794 * see if other inodes can be gathered into this write
2795 */
2796 error = xfs_iflush_cluster(ip, bp);
2797 if (error)
2798 goto cluster_corrupt_out;
2799
2800 *bpp = bp;
2801 return 0;
2802
2803 corrupt_out:
2804 xfs_buf_relse(bp);
2805 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
2806 cluster_corrupt_out:
2807 error = XFS_ERROR(EFSCORRUPTED);
2808 abort_out:
2809 /*
2810 * Unlocks the flush lock
2811 */
2812 xfs_iflush_abort(ip, false);
2813 return error;
2814 }
2815
2816
2817 STATIC int
2818 xfs_iflush_int(
2819 struct xfs_inode *ip,
2820 struct xfs_buf *bp)
2821 {
2822 struct xfs_inode_log_item *iip = ip->i_itemp;
2823 struct xfs_dinode *dip;
2824 struct xfs_mount *mp = ip->i_mount;
2825
2826 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2827 ASSERT(xfs_isiflocked(ip));
2828 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
2829 ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
2830 ASSERT(iip != NULL && iip->ili_fields != 0);
2831
2832 /* set *dip = inode's place in the buffer */
2833 dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
2834
2835 if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
2836 mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
2837 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2838 "%s: Bad inode %Lu magic number 0x%x, ptr 0x%p",
2839 __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
2840 goto corrupt_out;
2841 }
2842 if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC,
2843 mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) {
2844 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2845 "%s: Bad inode %Lu, ptr 0x%p, magic number 0x%x",
2846 __func__, ip->i_ino, ip, ip->i_d.di_magic);
2847 goto corrupt_out;
2848 }
2849 if (S_ISREG(ip->i_d.di_mode)) {
2850 if (XFS_TEST_ERROR(
2851 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
2852 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
2853 mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) {
2854 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2855 "%s: Bad regular inode %Lu, ptr 0x%p",
2856 __func__, ip->i_ino, ip);
2857 goto corrupt_out;
2858 }
2859 } else if (S_ISDIR(ip->i_d.di_mode)) {
2860 if (XFS_TEST_ERROR(
2861 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
2862 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
2863 (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
2864 mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) {
2865 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2866 "%s: Bad directory inode %Lu, ptr 0x%p",
2867 __func__, ip->i_ino, ip);
2868 goto corrupt_out;
2869 }
2870 }
2871 if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
2872 ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5,
2873 XFS_RANDOM_IFLUSH_5)) {
2874 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2875 "%s: detected corrupt incore inode %Lu, "
2876 "total extents = %d, nblocks = %Ld, ptr 0x%p",
2877 __func__, ip->i_ino,
2878 ip->i_d.di_nextents + ip->i_d.di_anextents,
2879 ip->i_d.di_nblocks, ip);
2880 goto corrupt_out;
2881 }
2882 if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
2883 mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) {
2884 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2885 "%s: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
2886 __func__, ip->i_ino, ip->i_d.di_forkoff, ip);
2887 goto corrupt_out;
2888 }
2889 /*
2890 * bump the flush iteration count, used to detect flushes which
2891 * postdate a log record during recovery. This is redundant as we now
2892 * log every change and hence this can't happen. Still, it doesn't hurt.
2893 */
2894 ip->i_d.di_flushiter++;
2895
2896 /*
2897 * Copy the dirty parts of the inode into the on-disk
2898 * inode. We always copy out the core of the inode,
2899 * because if the inode is dirty at all the core must
2900 * be.
2901 */
2902 xfs_dinode_to_disk(dip, &ip->i_d);
2903
2904 /* Wrap, we never let the log put out DI_MAX_FLUSH */
2905 if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
2906 ip->i_d.di_flushiter = 0;
2907
2908 /*
2909 * If this is really an old format inode and the superblock version
2910 * has not been updated to support only new format inodes, then
2911 * convert back to the old inode format. If the superblock version
2912 * has been updated, then make the conversion permanent.
2913 */
2914 ASSERT(ip->i_d.di_version == 1 || xfs_sb_version_hasnlink(&mp->m_sb));
2915 if (ip->i_d.di_version == 1) {
2916 if (!xfs_sb_version_hasnlink(&mp->m_sb)) {
2917 /*
2918 * Convert it back.
2919 */
2920 ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1);
2921 dip->di_onlink = cpu_to_be16(ip->i_d.di_nlink);
2922 } else {
2923 /*
2924 * The superblock version has already been bumped,
2925 * so just make the conversion to the new inode
2926 * format permanent.
2927 */
2928 ip->i_d.di_version = 2;
2929 dip->di_version = 2;
2930 ip->i_d.di_onlink = 0;
2931 dip->di_onlink = 0;
2932 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
2933 memset(&(dip->di_pad[0]), 0,
2934 sizeof(dip->di_pad));
2935 ASSERT(xfs_get_projid(ip) == 0);
2936 }
2937 }
2938
2939 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp);
2940 if (XFS_IFORK_Q(ip))
2941 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp);
2942 xfs_inobp_check(mp, bp);
2943
2944 /*
2945 * We've recorded everything logged in the inode, so we'd like to clear
2946 * the ili_fields bits so we don't log and flush things unnecessarily.
2947 * However, we can't stop logging all this information until the data
2948 * we've copied into the disk buffer is written to disk. If we did we
2949 * might overwrite the copy of the inode in the log with all the data
2950 * after re-logging only part of it, and in the face of a crash we
2951 * wouldn't have all the data we need to recover.
2952 *
2953 * What we do is move the bits to the ili_last_fields field. When
2954 * logging the inode, these bits are moved back to the ili_fields field.
2955 * In the xfs_iflush_done() routine we clear ili_last_fields, since we
2956 * know that the information those bits represent is permanently on
2957 * disk. As long as the flush completes before the inode is logged
2958 * again, then both ili_fields and ili_last_fields will be cleared.
2959 *
2960 * We can play with the ili_fields bits here, because the inode lock
2961 * must be held exclusively in order to set bits there and the flush
2962 * lock protects the ili_last_fields bits. Set ili_logged so the flush
2963 * done routine can tell whether or not to look in the AIL. Also, store
2964 * the current LSN of the inode so that we can tell whether the item has
2965 * moved in the AIL from xfs_iflush_done(). In order to read the lsn we
2966 * need the AIL lock, because it is a 64 bit value that cannot be read
2967 * atomically.
2968 */
2969 iip->ili_last_fields = iip->ili_fields;
2970 iip->ili_fields = 0;
2971 iip->ili_logged = 1;
2972
2973 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
2974 &iip->ili_item.li_lsn);
2975
2976 /*
2977 * Attach the function xfs_iflush_done to the inode's
2978 * buffer. This will remove the inode from the AIL
2979 * and unlock the inode's flush lock when the inode is
2980 * completely written to disk.
2981 */
2982 xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item);
2983
2984 /* update the lsn in the on disk inode if required */
2985 if (ip->i_d.di_version == 3)
2986 dip->di_lsn = cpu_to_be64(iip->ili_item.li_lsn);
2987
2988 /* generate the checksum. */
2989 xfs_dinode_calc_crc(mp, dip);
2990
2991 ASSERT(bp->b_fspriv != NULL);
2992 ASSERT(bp->b_iodone != NULL);
2993 return 0;
2994
2995 corrupt_out:
2996 return XFS_ERROR(EFSCORRUPTED);
2997 }
2998
2999 /*
3000 * Return a pointer to the extent record at file index idx.
3001 */
3002 xfs_bmbt_rec_host_t *
3003 xfs_iext_get_ext(
3004 xfs_ifork_t *ifp, /* inode fork pointer */
3005 xfs_extnum_t idx) /* index of target extent */
3006 {
3007 ASSERT(idx >= 0);
3008 ASSERT(idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t));
3009
3010 if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) {
3011 return ifp->if_u1.if_ext_irec->er_extbuf;
3012 } else if (ifp->if_flags & XFS_IFEXTIREC) {
3013 xfs_ext_irec_t *erp; /* irec pointer */
3014 int erp_idx = 0; /* irec index */
3015 xfs_extnum_t page_idx = idx; /* ext index in target list */
3016
3017 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
3018 return &erp->er_extbuf[page_idx];
3019 } else if (ifp->if_bytes) {
3020 return &ifp->if_u1.if_extents[idx];
3021 } else {
3022 return NULL;
3023 }
3024 }
3025
3026 /*
3027 * Insert new item(s) into the extent records for incore inode
3028 * fork 'ifp'. 'count' new items are inserted at index 'idx'.
3029 */
3030 void
3031 xfs_iext_insert(
3032 xfs_inode_t *ip, /* incore inode pointer */
3033 xfs_extnum_t idx, /* starting index of new items */
3034 xfs_extnum_t count, /* number of inserted items */
3035 xfs_bmbt_irec_t *new, /* items to insert */
3036 int state) /* type of extent conversion */
3037 {
3038 xfs_ifork_t *ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df;
3039 xfs_extnum_t i; /* extent record index */
3040
3041 trace_xfs_iext_insert(ip, idx, new, state, _RET_IP_);
3042
3043 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
3044 xfs_iext_add(ifp, idx, count);
3045 for (i = idx; i < idx + count; i++, new++)
3046 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, i), new);
3047 }
3048
3049 /*
3050 * This is called when the amount of space required for incore file
3051 * extents needs to be increased. The ext_diff parameter stores the
3052 * number of new extents being added and the idx parameter contains
3053 * the extent index where the new extents will be added. If the new
3054 * extents are being appended, then we just need to (re)allocate and
3055 * initialize the space. Otherwise, if the new extents are being
3056 * inserted into the middle of the existing entries, a bit more work
3057 * is required to make room for the new extents to be inserted. The
3058 * caller is responsible for filling in the new extent entries upon
3059 * return.
3060 */
3061 void
3062 xfs_iext_add(
3063 xfs_ifork_t *ifp, /* inode fork pointer */
3064 xfs_extnum_t idx, /* index to begin adding exts */
3065 int ext_diff) /* number of extents to add */
3066 {
3067 int byte_diff; /* new bytes being added */
3068 int new_size; /* size of extents after adding */
3069 xfs_extnum_t nextents; /* number of extents in file */
3070
3071 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3072 ASSERT((idx >= 0) && (idx <= nextents));
3073 byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t);
3074 new_size = ifp->if_bytes + byte_diff;
3075 /*
3076 * If the new number of extents (nextents + ext_diff)
3077 * fits inside the inode, then continue to use the inline
3078 * extent buffer.
3079 */
3080 if (nextents + ext_diff <= XFS_INLINE_EXTS) {
3081 if (idx < nextents) {
3082 memmove(&ifp->if_u2.if_inline_ext[idx + ext_diff],
3083 &ifp->if_u2.if_inline_ext[idx],
3084 (nextents - idx) * sizeof(xfs_bmbt_rec_t));
3085 memset(&ifp->if_u2.if_inline_ext[idx], 0, byte_diff);
3086 }
3087 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
3088 ifp->if_real_bytes = 0;
3089 }
3090 /*
3091 * Otherwise use a linear (direct) extent list.
3092 * If the extents are currently inside the inode,
3093 * xfs_iext_realloc_direct will switch us from
3094 * inline to direct extent allocation mode.
3095 */
3096 else if (nextents + ext_diff <= XFS_LINEAR_EXTS) {
3097 xfs_iext_realloc_direct(ifp, new_size);
3098 if (idx < nextents) {
3099 memmove(&ifp->if_u1.if_extents[idx + ext_diff],
3100 &ifp->if_u1.if_extents[idx],
3101 (nextents - idx) * sizeof(xfs_bmbt_rec_t));
3102 memset(&ifp->if_u1.if_extents[idx], 0, byte_diff);
3103 }
3104 }
3105 /* Indirection array */
3106 else {
3107 xfs_ext_irec_t *erp;
3108 int erp_idx = 0;
3109 int page_idx = idx;
3110
3111 ASSERT(nextents + ext_diff > XFS_LINEAR_EXTS);
3112 if (ifp->if_flags & XFS_IFEXTIREC) {
3113 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 1);
3114 } else {
3115 xfs_iext_irec_init(ifp);
3116 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3117 erp = ifp->if_u1.if_ext_irec;
3118 }
3119 /* Extents fit in target extent page */
3120 if (erp && erp->er_extcount + ext_diff <= XFS_LINEAR_EXTS) {
3121 if (page_idx < erp->er_extcount) {
3122 memmove(&erp->er_extbuf[page_idx + ext_diff],
3123 &erp->er_extbuf[page_idx],
3124 (erp->er_extcount - page_idx) *
3125 sizeof(xfs_bmbt_rec_t));
3126 memset(&erp->er_extbuf[page_idx], 0, byte_diff);
3127 }
3128 erp->er_extcount += ext_diff;
3129 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3130 }
3131 /* Insert a new extent page */
3132 else if (erp) {
3133 xfs_iext_add_indirect_multi(ifp,
3134 erp_idx, page_idx, ext_diff);
3135 }
3136 /*
3137 * If extent(s) are being appended to the last page in
3138 * the indirection array and the new extent(s) don't fit
3139 * in the page, then erp is NULL and erp_idx is set to
3140 * the next index needed in the indirection array.
3141 */
3142 else {
3143 int count = ext_diff;
3144
3145 while (count) {
3146 erp = xfs_iext_irec_new(ifp, erp_idx);
3147 erp->er_extcount = count;
3148 count -= MIN(count, (int)XFS_LINEAR_EXTS);
3149 if (count) {
3150 erp_idx++;
3151 }
3152 }
3153 }
3154 }
3155 ifp->if_bytes = new_size;
3156 }
3157
3158 /*
3159 * This is called when incore extents are being added to the indirection
3160 * array and the new extents do not fit in the target extent list. The
3161 * erp_idx parameter contains the irec index for the target extent list
3162 * in the indirection array, and the idx parameter contains the extent
3163 * index within the list. The number of extents being added is stored
3164 * in the count parameter.
3165 *
3166 * |-------| |-------|
3167 * | | | | idx - number of extents before idx
3168 * | idx | | count |
3169 * | | | | count - number of extents being inserted at idx
3170 * |-------| |-------|
3171 * | count | | nex2 | nex2 - number of extents after idx + count
3172 * |-------| |-------|
3173 */
3174 void
3175 xfs_iext_add_indirect_multi(
3176 xfs_ifork_t *ifp, /* inode fork pointer */
3177 int erp_idx, /* target extent irec index */
3178 xfs_extnum_t idx, /* index within target list */
3179 int count) /* new extents being added */
3180 {
3181 int byte_diff; /* new bytes being added */
3182 xfs_ext_irec_t *erp; /* pointer to irec entry */
3183 xfs_extnum_t ext_diff; /* number of extents to add */
3184 xfs_extnum_t ext_cnt; /* new extents still needed */
3185 xfs_extnum_t nex2; /* extents after idx + count */
3186 xfs_bmbt_rec_t *nex2_ep = NULL; /* temp list for nex2 extents */
3187 int nlists; /* number of irec's (lists) */
3188
3189 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3190 erp = &ifp->if_u1.if_ext_irec[erp_idx];
3191 nex2 = erp->er_extcount - idx;
3192 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3193
3194 /*
3195 * Save second part of target extent list
3196 * (all extents past */
3197 if (nex2) {
3198 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
3199 nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_NOFS);
3200 memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff);
3201 erp->er_extcount -= nex2;
3202 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2);
3203 memset(&erp->er_extbuf[idx], 0, byte_diff);
3204 }
3205
3206 /*
3207 * Add the new extents to the end of the target
3208 * list, then allocate new irec record(s) and
3209 * extent buffer(s) as needed to store the rest
3210 * of the new extents.
3211 */
3212 ext_cnt = count;
3213 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS - erp->er_extcount);
3214 if (ext_diff) {
3215 erp->er_extcount += ext_diff;
3216 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3217 ext_cnt -= ext_diff;
3218 }
3219 while (ext_cnt) {
3220 erp_idx++;
3221 erp = xfs_iext_irec_new(ifp, erp_idx);
3222 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS);
3223 erp->er_extcount = ext_diff;
3224 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3225 ext_cnt -= ext_diff;
3226 }
3227
3228 /* Add nex2 extents back to indirection array */
3229 if (nex2) {
3230 xfs_extnum_t ext_avail;
3231 int i;
3232
3233 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
3234 ext_avail = XFS_LINEAR_EXTS - erp->er_extcount;
3235 i = 0;
3236 /*
3237 * If nex2 extents fit in the current page, append
3238 * nex2_ep after the new extents.
3239 */
3240 if (nex2 <= ext_avail) {
3241 i = erp->er_extcount;
3242 }
3243 /*
3244 * Otherwise, check if space is available in the
3245 * next page.
3246 */
3247 else if ((erp_idx < nlists - 1) &&
3248 (nex2 <= (ext_avail = XFS_LINEAR_EXTS -
3249 ifp->if_u1.if_ext_irec[erp_idx+1].er_extcount))) {
3250 erp_idx++;
3251 erp++;
3252 /* Create a hole for nex2 extents */
3253 memmove(&erp->er_extbuf[nex2], erp->er_extbuf,
3254 erp->er_extcount * sizeof(xfs_bmbt_rec_t));
3255 }
3256 /*
3257 * Final choice, create a new extent page for
3258 * nex2 extents.
3259 */
3260 else {
3261 erp_idx++;
3262 erp = xfs_iext_irec_new(ifp, erp_idx);
3263 }
3264 memmove(&erp->er_extbuf[i], nex2_ep, byte_diff);
3265 kmem_free(nex2_ep);
3266 erp->er_extcount += nex2;
3267 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, nex2);
3268 }
3269 }
3270
3271 /*
3272 * This is called when the amount of space required for incore file
3273 * extents needs to be decreased. The ext_diff parameter stores the
3274 * number of extents to be removed and the idx parameter contains
3275 * the extent index where the extents will be removed from.
3276 *
3277 * If the amount of space needed has decreased below the linear
3278 * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous
3279 * extent array. Otherwise, use kmem_realloc() to adjust the
3280 * size to what is needed.
3281 */
3282 void
3283 xfs_iext_remove(
3284 xfs_inode_t *ip, /* incore inode pointer */
3285 xfs_extnum_t idx, /* index to begin removing exts */
3286 int ext_diff, /* number of extents to remove */
3287 int state) /* type of extent conversion */
3288 {
3289 xfs_ifork_t *ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df;
3290 xfs_extnum_t nextents; /* number of extents in file */
3291 int new_size; /* size of extents after removal */
3292
3293 trace_xfs_iext_remove(ip, idx, state, _RET_IP_);
3294
3295 ASSERT(ext_diff > 0);
3296 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3297 new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t);
3298
3299 if (new_size == 0) {
3300 xfs_iext_destroy(ifp);
3301 } else if (ifp->if_flags & XFS_IFEXTIREC) {
3302 xfs_iext_remove_indirect(ifp, idx, ext_diff);
3303 } else if (ifp->if_real_bytes) {
3304 xfs_iext_remove_direct(ifp, idx, ext_diff);
3305 } else {
3306 xfs_iext_remove_inline(ifp, idx, ext_diff);
3307 }
3308 ifp->if_bytes = new_size;
3309 }
3310
3311 /*
3312 * This removes ext_diff extents from the inline buffer, beginning
3313 * at extent index idx.
3314 */
3315 void
3316 xfs_iext_remove_inline(
3317 xfs_ifork_t *ifp, /* inode fork pointer */
3318 xfs_extnum_t idx, /* index to begin removing exts */
3319 int ext_diff) /* number of extents to remove */
3320 {
3321 int nextents; /* number of extents in file */
3322
3323 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
3324 ASSERT(idx < XFS_INLINE_EXTS);
3325 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3326 ASSERT(((nextents - ext_diff) > 0) &&
3327 (nextents - ext_diff) < XFS_INLINE_EXTS);
3328
3329 if (idx + ext_diff < nextents) {
3330 memmove(&ifp->if_u2.if_inline_ext[idx],
3331 &ifp->if_u2.if_inline_ext[idx + ext_diff],
3332 (nextents - (idx + ext_diff)) *
3333 sizeof(xfs_bmbt_rec_t));
3334 memset(&ifp->if_u2.if_inline_ext[nextents - ext_diff],
3335 0, ext_diff * sizeof(xfs_bmbt_rec_t));
3336 } else {
3337 memset(&ifp->if_u2.if_inline_ext[idx], 0,
3338 ext_diff * sizeof(xfs_bmbt_rec_t));
3339 }
3340 }
3341
3342 /*
3343 * This removes ext_diff extents from a linear (direct) extent list,
3344 * beginning at extent index idx. If the extents are being removed
3345 * from the end of the list (ie. truncate) then we just need to re-
3346 * allocate the list to remove the extra space. Otherwise, if the
3347 * extents are being removed from the middle of the existing extent
3348 * entries, then we first need to move the extent records beginning
3349 * at idx + ext_diff up in the list to overwrite the records being
3350 * removed, then remove the extra space via kmem_realloc.
3351 */
3352 void
3353 xfs_iext_remove_direct(
3354 xfs_ifork_t *ifp, /* inode fork pointer */
3355 xfs_extnum_t idx, /* index to begin removing exts */
3356 int ext_diff) /* number of extents to remove */
3357 {
3358 xfs_extnum_t nextents; /* number of extents in file */
3359 int new_size; /* size of extents after removal */
3360
3361 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
3362 new_size = ifp->if_bytes -
3363 (ext_diff * sizeof(xfs_bmbt_rec_t));
3364 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3365
3366 if (new_size == 0) {
3367 xfs_iext_destroy(ifp);
3368 return;
3369 }
3370 /* Move extents up in the list (if needed) */
3371 if (idx + ext_diff < nextents) {
3372 memmove(&ifp->if_u1.if_extents[idx],
3373 &ifp->if_u1.if_extents[idx + ext_diff],
3374 (nextents - (idx + ext_diff)) *
3375 sizeof(xfs_bmbt_rec_t));
3376 }
3377 memset(&ifp->if_u1.if_extents[nextents - ext_diff],
3378 0, ext_diff * sizeof(xfs_bmbt_rec_t));
3379 /*
3380 * Reallocate the direct extent list. If the extents
3381 * will fit inside the inode then xfs_iext_realloc_direct
3382 * will switch from direct to inline extent allocation
3383 * mode for us.
3384 */
3385 xfs_iext_realloc_direct(ifp, new_size);
3386 ifp->if_bytes = new_size;
3387 }
3388
3389 /*
3390 * This is called when incore extents are being removed from the
3391 * indirection array and the extents being removed span multiple extent
3392 * buffers. The idx parameter contains the file extent index where we
3393 * want to begin removing extents, and the count parameter contains
3394 * how many extents need to be removed.
3395 *
3396 * |-------| |-------|
3397 * | nex1 | | | nex1 - number of extents before idx
3398 * |-------| | count |
3399 * | | | | count - number of extents being removed at idx
3400 * | count | |-------|
3401 * | | | nex2 | nex2 - number of extents after idx + count
3402 * |-------| |-------|
3403 */
3404 void
3405 xfs_iext_remove_indirect(
3406 xfs_ifork_t *ifp, /* inode fork pointer */
3407 xfs_extnum_t idx, /* index to begin removing extents */
3408 int count) /* number of extents to remove */
3409 {
3410 xfs_ext_irec_t *erp; /* indirection array pointer */
3411 int erp_idx = 0; /* indirection array index */
3412 xfs_extnum_t ext_cnt; /* extents left to remove */
3413 xfs_extnum_t ext_diff; /* extents to remove in current list */
3414 xfs_extnum_t nex1; /* number of extents before idx */
3415 xfs_extnum_t nex2; /* extents after idx + count */
3416 int page_idx = idx; /* index in target extent list */
3417
3418 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3419 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
3420 ASSERT(erp != NULL);
3421 nex1 = page_idx;
3422 ext_cnt = count;
3423 while (ext_cnt) {
3424 nex2 = MAX((erp->er_extcount - (nex1 + ext_cnt)), 0);
3425 ext_diff = MIN(ext_cnt, (erp->er_extcount - nex1));
3426 /*
3427 * Check for deletion of entire list;
3428 * xfs_iext_irec_remove() updates extent offsets.
3429 */
3430 if (ext_diff == erp->er_extcount) {
3431 xfs_iext_irec_remove(ifp, erp_idx);
3432 ext_cnt -= ext_diff;
3433 nex1 = 0;
3434 if (ext_cnt) {
3435 ASSERT(erp_idx < ifp->if_real_bytes /
3436 XFS_IEXT_BUFSZ);
3437 erp = &ifp->if_u1.if_ext_irec[erp_idx];
3438 nex1 = 0;
3439 continue;
3440 } else {
3441 break;
3442 }
3443 }
3444 /* Move extents up (if needed) */
3445 if (nex2) {
3446 memmove(&erp->er_extbuf[nex1],
3447 &erp->er_extbuf[nex1 + ext_diff],
3448 nex2 * sizeof(xfs_bmbt_rec_t));
3449 }
3450 /* Zero out rest of page */
3451 memset(&erp->er_extbuf[nex1 + nex2], 0, (XFS_IEXT_BUFSZ -
3452 ((nex1 + nex2) * sizeof(xfs_bmbt_rec_t))));
3453 /* Update remaining counters */
3454 erp->er_extcount -= ext_diff;
3455 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -ext_diff);
3456 ext_cnt -= ext_diff;
3457 nex1 = 0;
3458 erp_idx++;
3459 erp++;
3460 }
3461 ifp->if_bytes -= count * sizeof(xfs_bmbt_rec_t);
3462 xfs_iext_irec_compact(ifp);
3463 }
3464
3465 /*
3466 * Create, destroy, or resize a linear (direct) block of extents.
3467 */
3468 void
3469 xfs_iext_realloc_direct(
3470 xfs_ifork_t *ifp, /* inode fork pointer */
3471 int new_size) /* new size of extents */
3472 {
3473 int rnew_size; /* real new size of extents */
3474
3475 rnew_size = new_size;
3476
3477 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC) ||
3478 ((new_size >= 0) && (new_size <= XFS_IEXT_BUFSZ) &&
3479 (new_size != ifp->if_real_bytes)));
3480
3481 /* Free extent records */
3482 if (new_size == 0) {
3483 xfs_iext_destroy(ifp);
3484 }
3485 /* Resize direct extent list and zero any new bytes */
3486 else if (ifp->if_real_bytes) {
3487 /* Check if extents will fit inside the inode */
3488 if (new_size <= XFS_INLINE_EXTS * sizeof(xfs_bmbt_rec_t)) {
3489 xfs_iext_direct_to_inline(ifp, new_size /
3490 (uint)sizeof(xfs_bmbt_rec_t));
3491 ifp->if_bytes = new_size;
3492 return;
3493 }
3494 if (!is_power_of_2(new_size)){
3495 rnew_size = roundup_pow_of_two(new_size);
3496 }
3497 if (rnew_size != ifp->if_real_bytes) {
3498 ifp->if_u1.if_extents =
3499 kmem_realloc(ifp->if_u1.if_extents,
3500 rnew_size,
3501 ifp->if_real_bytes, KM_NOFS);
3502 }
3503 if (rnew_size > ifp->if_real_bytes) {
3504 memset(&ifp->if_u1.if_extents[ifp->if_bytes /
3505 (uint)sizeof(xfs_bmbt_rec_t)], 0,
3506 rnew_size - ifp->if_real_bytes);
3507 }
3508 }
3509 /*
3510 * Switch from the inline extent buffer to a direct
3511 * extent list. Be sure to include the inline extent
3512 * bytes in new_size.
3513 */
3514 else {
3515 new_size += ifp->if_bytes;
3516 if (!is_power_of_2(new_size)) {
3517 rnew_size = roundup_pow_of_two(new_size);
3518 }
3519 xfs_iext_inline_to_direct(ifp, rnew_size);
3520 }
3521 ifp->if_real_bytes = rnew_size;
3522 ifp->if_bytes = new_size;
3523 }
3524
3525 /*
3526 * Switch from linear (direct) extent records to inline buffer.
3527 */
3528 void
3529 xfs_iext_direct_to_inline(
3530 xfs_ifork_t *ifp, /* inode fork pointer */
3531 xfs_extnum_t nextents) /* number of extents in file */
3532 {
3533 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
3534 ASSERT(nextents <= XFS_INLINE_EXTS);
3535 /*
3536 * The inline buffer was zeroed when we switched
3537 * from inline to direct extent allocation mode,
3538 * so we don't need to clear it here.
3539 */
3540 memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents,
3541 nextents * sizeof(xfs_bmbt_rec_t));
3542 kmem_free(ifp->if_u1.if_extents);
3543 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
3544 ifp->if_real_bytes = 0;
3545 }
3546
3547 /*
3548 * Switch from inline buffer to linear (direct) extent records.
3549 * new_size should already be rounded up to the next power of 2
3550 * by the caller (when appropriate), so use new_size as it is.
3551 * However, since new_size may be rounded up, we can't update
3552 * if_bytes here. It is the caller's responsibility to update
3553 * if_bytes upon return.
3554 */
3555 void
3556 xfs_iext_inline_to_direct(
3557 xfs_ifork_t *ifp, /* inode fork pointer */
3558 int new_size) /* number of extents in file */
3559 {
3560 ifp->if_u1.if_extents = kmem_alloc(new_size, KM_NOFS);
3561 memset(ifp->if_u1.if_extents, 0, new_size);
3562 if (ifp->if_bytes) {
3563 memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext,
3564 ifp->if_bytes);
3565 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
3566 sizeof(xfs_bmbt_rec_t));
3567 }
3568 ifp->if_real_bytes = new_size;
3569 }
3570
3571 /*
3572 * Resize an extent indirection array to new_size bytes.
3573 */
3574 STATIC void
3575 xfs_iext_realloc_indirect(
3576 xfs_ifork_t *ifp, /* inode fork pointer */
3577 int new_size) /* new indirection array size */
3578 {
3579 int nlists; /* number of irec's (ex lists) */
3580 int size; /* current indirection array size */
3581
3582 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3583 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3584 size = nlists * sizeof(xfs_ext_irec_t);
3585 ASSERT(ifp->if_real_bytes);
3586 ASSERT((new_size >= 0) && (new_size != size));
3587 if (new_size == 0) {
3588 xfs_iext_destroy(ifp);
3589 } else {
3590 ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *)
3591 kmem_realloc(ifp->if_u1.if_ext_irec,
3592 new_size, size, KM_NOFS);
3593 }
3594 }
3595
3596 /*
3597 * Switch from indirection array to linear (direct) extent allocations.
3598 */
3599 STATIC void
3600 xfs_iext_indirect_to_direct(
3601 xfs_ifork_t *ifp) /* inode fork pointer */
3602 {
3603 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
3604 xfs_extnum_t nextents; /* number of extents in file */
3605 int size; /* size of file extents */
3606
3607 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3608 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3609 ASSERT(nextents <= XFS_LINEAR_EXTS);
3610 size = nextents * sizeof(xfs_bmbt_rec_t);
3611
3612 xfs_iext_irec_compact_pages(ifp);
3613 ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ);
3614
3615 ep = ifp->if_u1.if_ext_irec->er_extbuf;
3616 kmem_free(ifp->if_u1.if_ext_irec);
3617 ifp->if_flags &= ~XFS_IFEXTIREC;
3618 ifp->if_u1.if_extents = ep;
3619 ifp->if_bytes = size;
3620 if (nextents < XFS_LINEAR_EXTS) {
3621 xfs_iext_realloc_direct(ifp, size);
3622 }
3623 }
3624
3625 /*
3626 * Free incore file extents.
3627 */
3628 void
3629 xfs_iext_destroy(
3630 xfs_ifork_t *ifp) /* inode fork pointer */
3631 {
3632 if (ifp->if_flags & XFS_IFEXTIREC) {
3633 int erp_idx;
3634 int nlists;
3635
3636 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3637 for (erp_idx = nlists - 1; erp_idx >= 0 ; erp_idx--) {
3638 xfs_iext_irec_remove(ifp, erp_idx);
3639 }
3640 ifp->if_flags &= ~XFS_IFEXTIREC;
3641 } else if (ifp->if_real_bytes) {
3642 kmem_free(ifp->if_u1.if_extents);
3643 } else if (ifp->if_bytes) {
3644 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
3645 sizeof(xfs_bmbt_rec_t));
3646 }
3647 ifp->if_u1.if_extents = NULL;
3648 ifp->if_real_bytes = 0;
3649 ifp->if_bytes = 0;
3650 }
3651
3652 /*
3653 * Return a pointer to the extent record for file system block bno.
3654 */
3655 xfs_bmbt_rec_host_t * /* pointer to found extent record */
3656 xfs_iext_bno_to_ext(
3657 xfs_ifork_t *ifp, /* inode fork pointer */
3658 xfs_fileoff_t bno, /* block number to search for */
3659 xfs_extnum_t *idxp) /* index of target extent */
3660 {
3661 xfs_bmbt_rec_host_t *base; /* pointer to first extent */
3662 xfs_filblks_t blockcount = 0; /* number of blocks in extent */
3663 xfs_bmbt_rec_host_t *ep = NULL; /* pointer to target extent */
3664 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */
3665 int high; /* upper boundary in search */
3666 xfs_extnum_t idx = 0; /* index of target extent */
3667 int low; /* lower boundary in search */
3668 xfs_extnum_t nextents; /* number of file extents */
3669 xfs_fileoff_t startoff = 0; /* start offset of extent */
3670
3671 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3672 if (nextents == 0) {
3673 *idxp = 0;
3674 return NULL;
3675 }
3676 low = 0;
3677 if (ifp->if_flags & XFS_IFEXTIREC) {
3678 /* Find target extent list */
3679 int erp_idx = 0;
3680 erp = xfs_iext_bno_to_irec(ifp, bno, &erp_idx);
3681 base = erp->er_extbuf;
3682 high = erp->er_extcount - 1;
3683 } else {
3684 base = ifp->if_u1.if_extents;
3685 high = nextents - 1;
3686 }
3687 /* Binary search extent records */
3688 while (low <= high) {
3689 idx = (low + high) >> 1;
3690 ep = base + idx;
3691 startoff = xfs_bmbt_get_startoff(ep);
3692 blockcount = xfs_bmbt_get_blockcount(ep);
3693 if (bno < startoff) {
3694 high = idx - 1;
3695 } else if (bno >= startoff + blockcount) {
3696 low = idx + 1;
3697 } else {
3698 /* Convert back to file-based extent index */
3699 if (ifp->if_flags & XFS_IFEXTIREC) {
3700 idx += erp->er_extoff;
3701 }
3702 *idxp = idx;
3703 return ep;
3704 }
3705 }
3706 /* Convert back to file-based extent index */
3707 if (ifp->if_flags & XFS_IFEXTIREC) {
3708 idx += erp->er_extoff;
3709 }
3710 if (bno >= startoff + blockcount) {
3711 if (++idx == nextents) {
3712 ep = NULL;
3713 } else {
3714 ep = xfs_iext_get_ext(ifp, idx);
3715 }
3716 }
3717 *idxp = idx;
3718 return ep;
3719 }
3720
3721 /*
3722 * Return a pointer to the indirection array entry containing the
3723 * extent record for filesystem block bno. Store the index of the
3724 * target irec in *erp_idxp.
3725 */
3726 xfs_ext_irec_t * /* pointer to found extent record */
3727 xfs_iext_bno_to_irec(
3728 xfs_ifork_t *ifp, /* inode fork pointer */
3729 xfs_fileoff_t bno, /* block number to search for */
3730 int *erp_idxp) /* irec index of target ext list */
3731 {
3732 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */
3733 xfs_ext_irec_t *erp_next; /* next indirection array entry */
3734 int erp_idx; /* indirection array index */
3735 int nlists; /* number of extent irec's (lists) */
3736 int high; /* binary search upper limit */
3737 int low; /* binary search lower limit */
3738
3739 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3740 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3741 erp_idx = 0;
3742 low = 0;
3743 high = nlists - 1;
3744 while (low <= high) {
3745 erp_idx = (low + high) >> 1;
3746 erp = &ifp->if_u1.if_ext_irec[erp_idx];
3747 erp_next = erp_idx < nlists - 1 ? erp + 1 : NULL;
3748 if (bno < xfs_bmbt_get_startoff(erp->er_extbuf)) {
3749 high = erp_idx - 1;
3750 } else if (erp_next && bno >=
3751 xfs_bmbt_get_startoff(erp_next->er_extbuf)) {
3752 low = erp_idx + 1;
3753 } else {
3754 break;
3755 }
3756 }
3757 *erp_idxp = erp_idx;
3758 return erp;
3759 }
3760
3761 /*
3762 * Return a pointer to the indirection array entry containing the
3763 * extent record at file extent index *idxp. Store the index of the
3764 * target irec in *erp_idxp and store the page index of the target
3765 * extent record in *idxp.
3766 */
3767 xfs_ext_irec_t *
3768 xfs_iext_idx_to_irec(
3769 xfs_ifork_t *ifp, /* inode fork pointer */
3770 xfs_extnum_t *idxp, /* extent index (file -> page) */
3771 int *erp_idxp, /* pointer to target irec */
3772 int realloc) /* new bytes were just added */
3773 {
3774 xfs_ext_irec_t *prev; /* pointer to previous irec */
3775 xfs_ext_irec_t *erp = NULL; /* pointer to current irec */
3776 int erp_idx; /* indirection array index */
3777 int nlists; /* number of irec's (ex lists) */
3778 int high; /* binary search upper limit */
3779 int low; /* binary search lower limit */
3780 xfs_extnum_t page_idx = *idxp; /* extent index in target list */
3781
3782 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3783 ASSERT(page_idx >= 0);
3784 ASSERT(page_idx <= ifp->if_bytes / sizeof(xfs_bmbt_rec_t));
3785 ASSERT(page_idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t) || realloc);
3786
3787 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3788 erp_idx = 0;
3789 low = 0;
3790 high = nlists - 1;
3791
3792 /* Binary search extent irec's */
3793 while (low <= high) {
3794 erp_idx = (low + high) >> 1;
3795 erp = &ifp->if_u1.if_ext_irec[erp_idx];
3796 prev = erp_idx > 0 ? erp - 1 : NULL;
3797 if (page_idx < erp->er_extoff || (page_idx == erp->er_extoff &&
3798 realloc && prev && prev->er_extcount < XFS_LINEAR_EXTS)) {
3799 high = erp_idx - 1;
3800 } else if (page_idx > erp->er_extoff + erp->er_extcount ||
3801 (page_idx == erp->er_extoff + erp->er_extcount &&
3802 !realloc)) {
3803 low = erp_idx + 1;
3804 } else if (page_idx == erp->er_extoff + erp->er_extcount &&
3805 erp->er_extcount == XFS_LINEAR_EXTS) {
3806 ASSERT(realloc);
3807 page_idx = 0;
3808 erp_idx++;
3809 erp = erp_idx < nlists ? erp + 1 : NULL;
3810 break;
3811 } else {
3812 page_idx -= erp->er_extoff;
3813 break;
3814 }
3815 }
3816 *idxp = page_idx;
3817 *erp_idxp = erp_idx;
3818 return(erp);
3819 }
3820
3821 /*
3822 * Allocate and initialize an indirection array once the space needed
3823 * for incore extents increases above XFS_IEXT_BUFSZ.
3824 */
3825 void
3826 xfs_iext_irec_init(
3827 xfs_ifork_t *ifp) /* inode fork pointer */
3828 {
3829 xfs_ext_irec_t *erp; /* indirection array pointer */
3830 xfs_extnum_t nextents; /* number of extents in file */
3831
3832 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
3833 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3834 ASSERT(nextents <= XFS_LINEAR_EXTS);
3835
3836 erp = kmem_alloc(sizeof(xfs_ext_irec_t), KM_NOFS);
3837
3838 if (nextents == 0) {
3839 ifp->if_u1.if_extents = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS);
3840 } else if (!ifp->if_real_bytes) {
3841 xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ);
3842 } else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) {
3843 xfs_iext_realloc_direct(ifp, XFS_IEXT_BUFSZ);
3844 }
3845 erp->er_extbuf = ifp->if_u1.if_extents;
3846 erp->er_extcount = nextents;
3847 erp->er_extoff = 0;
3848
3849 ifp->if_flags |= XFS_IFEXTIREC;
3850 ifp->if_real_bytes = XFS_IEXT_BUFSZ;
3851 ifp->if_bytes = nextents * sizeof(xfs_bmbt_rec_t);
3852 ifp->if_u1.if_ext_irec = erp;
3853
3854 return;
3855 }
3856
3857 /*
3858 * Allocate and initialize a new entry in the indirection array.
3859 */
3860 xfs_ext_irec_t *
3861 xfs_iext_irec_new(
3862 xfs_ifork_t *ifp, /* inode fork pointer */
3863 int erp_idx) /* index for new irec */
3864 {
3865 xfs_ext_irec_t *erp; /* indirection array pointer */
3866 int i; /* loop counter */
3867 int nlists; /* number of irec's (ex lists) */
3868
3869 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3870 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3871
3872 /* Resize indirection array */
3873 xfs_iext_realloc_indirect(ifp, ++nlists *
3874 sizeof(xfs_ext_irec_t));
3875 /*
3876 * Move records down in the array so the
3877 * new page can use erp_idx.
3878 */
3879 erp = ifp->if_u1.if_ext_irec;
3880 for (i = nlists - 1; i > erp_idx; i--) {
3881 memmove(&erp[i], &erp[i-1], sizeof(xfs_ext_irec_t));
3882 }
3883 ASSERT(i == erp_idx);
3884
3885 /* Initialize new extent record */
3886 erp = ifp->if_u1.if_ext_irec;
3887 erp[erp_idx].er_extbuf = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS);
3888 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
3889 memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ);
3890 erp[erp_idx].er_extcount = 0;
3891 erp[erp_idx].er_extoff = erp_idx > 0 ?
3892 erp[erp_idx-1].er_extoff + erp[erp_idx-1].er_extcount : 0;
3893 return (&erp[erp_idx]);
3894 }
3895
3896 /*
3897 * Remove a record from the indirection array.
3898 */
3899 void
3900 xfs_iext_irec_remove(
3901 xfs_ifork_t *ifp, /* inode fork pointer */
3902 int erp_idx) /* irec index to remove */
3903 {
3904 xfs_ext_irec_t *erp; /* indirection array pointer */
3905 int i; /* loop counter */
3906 int nlists; /* number of irec's (ex lists) */
3907
3908 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3909 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3910 erp = &ifp->if_u1.if_ext_irec[erp_idx];
3911 if (erp->er_extbuf) {
3912 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1,
3913 -erp->er_extcount);
3914 kmem_free(erp->er_extbuf);
3915 }
3916 /* Compact extent records */
3917 erp = ifp->if_u1.if_ext_irec;
3918 for (i = erp_idx; i < nlists - 1; i++) {
3919 memmove(&erp[i], &erp[i+1], sizeof(xfs_ext_irec_t));
3920 }
3921 /*
3922 * Manually free the last extent record from the indirection
3923 * array. A call to xfs_iext_realloc_indirect() with a size
3924 * of zero would result in a call to xfs_iext_destroy() which
3925 * would in turn call this function again, creating a nasty
3926 * infinite loop.
3927 */
3928 if (--nlists) {
3929 xfs_iext_realloc_indirect(ifp,
3930 nlists * sizeof(xfs_ext_irec_t));
3931 } else {
3932 kmem_free(ifp->if_u1.if_ext_irec);
3933 }
3934 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
3935 }
3936
3937 /*
3938 * This is called to clean up large amounts of unused memory allocated
3939 * by the indirection array. Before compacting anything though, verify
3940 * that the indirection array is still needed and switch back to the
3941 * linear extent list (or even the inline buffer) if possible. The
3942 * compaction policy is as follows:
3943 *
3944 * Full Compaction: Extents fit into a single page (or inline buffer)
3945 * Partial Compaction: Extents occupy less than 50% of allocated space
3946 * No Compaction: Extents occupy at least 50% of allocated space
3947 */
3948 void
3949 xfs_iext_irec_compact(
3950 xfs_ifork_t *ifp) /* inode fork pointer */
3951 {
3952 xfs_extnum_t nextents; /* number of extents in file */
3953 int nlists; /* number of irec's (ex lists) */
3954
3955 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3956 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3957 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3958
3959 if (nextents == 0) {
3960 xfs_iext_destroy(ifp);
3961 } else if (nextents <= XFS_INLINE_EXTS) {
3962 xfs_iext_indirect_to_direct(ifp);
3963 xfs_iext_direct_to_inline(ifp, nextents);
3964 } else if (nextents <= XFS_LINEAR_EXTS) {
3965 xfs_iext_indirect_to_direct(ifp);
3966 } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) {
3967 xfs_iext_irec_compact_pages(ifp);
3968 }
3969 }
3970
3971 /*
3972 * Combine extents from neighboring extent pages.
3973 */
3974 void
3975 xfs_iext_irec_compact_pages(
3976 xfs_ifork_t *ifp) /* inode fork pointer */
3977 {
3978 xfs_ext_irec_t *erp, *erp_next;/* pointers to irec entries */
3979 int erp_idx = 0; /* indirection array index */
3980 int nlists; /* number of irec's (ex lists) */
3981
3982 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3983 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3984 while (erp_idx < nlists - 1) {
3985 erp = &ifp->if_u1.if_ext_irec[erp_idx];
3986 erp_next = erp + 1;
3987 if (erp_next->er_extcount <=
3988 (XFS_LINEAR_EXTS - erp->er_extcount)) {
3989 memcpy(&erp->er_extbuf[erp->er_extcount],
3990 erp_next->er_extbuf, erp_next->er_extcount *
3991 sizeof(xfs_bmbt_rec_t));
3992 erp->er_extcount += erp_next->er_extcount;
3993 /*
3994 * Free page before removing extent record
3995 * so er_extoffs don't get modified in
3996 * xfs_iext_irec_remove.
3997 */
3998 kmem_free(erp_next->er_extbuf);
3999 erp_next->er_extbuf = NULL;
4000 xfs_iext_irec_remove(ifp, erp_idx + 1);
4001 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4002 } else {
4003 erp_idx++;
4004 }
4005 }
4006 }
4007
4008 /*
4009 * This is called to update the er_extoff field in the indirection
4010 * array when extents have been added or removed from one of the
4011 * extent lists. erp_idx contains the irec index to begin updating
4012 * at and ext_diff contains the number of extents that were added
4013 * or removed.
4014 */
4015 void
4016 xfs_iext_irec_update_extoffs(
4017 xfs_ifork_t *ifp, /* inode fork pointer */
4018 int erp_idx, /* irec index to update */
4019 int ext_diff) /* number of new extents */
4020 {
4021 int i; /* loop counter */
4022 int nlists; /* number of irec's (ex lists */
4023
4024 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4025 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4026 for (i = erp_idx; i < nlists; i++) {
4027 ifp->if_u1.if_ext_irec[i].er_extoff += ext_diff;
4028 }
4029 }
4030
4031 /*
4032 * Test whether it is appropriate to check an inode for and free post EOF
4033 * blocks. The 'force' parameter determines whether we should also consider
4034 * regular files that are marked preallocated or append-only.
4035 */
4036 bool
4037 xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
4038 {
4039 /* prealloc/delalloc exists only on regular files */
4040 if (!S_ISREG(ip->i_d.di_mode))
4041 return false;
4042
4043 /*
4044 * Zero sized files with no cached pages and delalloc blocks will not
4045 * have speculative prealloc/delalloc blocks to remove.
4046 */
4047 if (VFS_I(ip)->i_size == 0 &&
4048 VN_CACHED(VFS_I(ip)) == 0 &&
4049 ip->i_delayed_blks == 0)
4050 return false;
4051
4052 /* If we haven't read in the extent list, then don't do it now. */
4053 if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
4054 return false;
4055
4056 /*
4057 * Do not free real preallocated or append-only files unless the file
4058 * has delalloc blocks and we are forced to remove them.
4059 */
4060 if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
4061 if (!force || ip->i_delayed_blks == 0)
4062 return false;
4063
4064 return true;
4065 }
4066