Commit | Line | Data |
---|---|---|
fe4fa4b8 DC |
1 | /* |
2 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public License as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it would be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write the Free Software Foundation, | |
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
17 | */ | |
18 | #include "xfs.h" | |
19 | #include "xfs_fs.h" | |
20 | #include "xfs_types.h" | |
21 | #include "xfs_bit.h" | |
22 | #include "xfs_log.h" | |
23 | #include "xfs_inum.h" | |
24 | #include "xfs_trans.h" | |
fd074841 | 25 | #include "xfs_trans_priv.h" |
fe4fa4b8 DC |
26 | #include "xfs_sb.h" |
27 | #include "xfs_ag.h" | |
fe4fa4b8 DC |
28 | #include "xfs_mount.h" |
29 | #include "xfs_bmap_btree.h" | |
fe4fa4b8 DC |
30 | #include "xfs_inode.h" |
31 | #include "xfs_dinode.h" | |
32 | #include "xfs_error.h" | |
fe4fa4b8 DC |
33 | #include "xfs_filestream.h" |
34 | #include "xfs_vnodeops.h" | |
fe4fa4b8 | 35 | #include "xfs_inode_item.h" |
7d095257 | 36 | #include "xfs_quota.h" |
0b1b213f | 37 | #include "xfs_trace.h" |
1a387d3b | 38 | #include "xfs_fsops.h" |
fe4fa4b8 | 39 | |
a167b17e DC |
40 | #include <linux/kthread.h> |
41 | #include <linux/freezer.h> | |
42 | ||
c6d09b66 DC |
43 | struct workqueue_struct *xfs_syncd_wq; /* sync workqueue */ |
44 | ||
78ae5256 DC |
45 | /* |
46 | * The inode lookup is done in batches to keep the amount of lock traffic and | |
47 | * radix tree lookups to a minimum. The batch size is a trade off between | |
48 | * lookup reduction and stack usage. This is in the reclaim path, so we can't | |
49 | * be too greedy. | |
50 | */ | |
51 | #define XFS_LOOKUP_BATCH 32 | |
52 | ||
e13de955 DC |
53 | STATIC int |
54 | xfs_inode_ag_walk_grab( | |
55 | struct xfs_inode *ip) | |
56 | { | |
57 | struct inode *inode = VFS_I(ip); | |
58 | ||
1a3e8f3d DC |
59 | ASSERT(rcu_read_lock_held()); |
60 | ||
61 | /* | |
62 | * check for stale RCU freed inode | |
63 | * | |
64 | * If the inode has been reallocated, it doesn't matter if it's not in | |
65 | * the AG we are walking - we are walking for writeback, so if it | |
66 | * passes all the "valid inode" checks and is dirty, then we'll write | |
67 | * it back anyway. If it has been reallocated and still being | |
68 | * initialised, the XFS_INEW check below will catch it. | |
69 | */ | |
70 | spin_lock(&ip->i_flags_lock); | |
71 | if (!ip->i_ino) | |
72 | goto out_unlock_noent; | |
73 | ||
74 | /* avoid new or reclaimable inodes. Leave for reclaim code to flush */ | |
75 | if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM)) | |
76 | goto out_unlock_noent; | |
77 | spin_unlock(&ip->i_flags_lock); | |
78 | ||
e13de955 DC |
79 | /* nothing to sync during shutdown */ |
80 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | |
81 | return EFSCORRUPTED; | |
82 | ||
e13de955 DC |
83 | /* If we can't grab the inode, it must on it's way to reclaim. */ |
84 | if (!igrab(inode)) | |
85 | return ENOENT; | |
86 | ||
87 | if (is_bad_inode(inode)) { | |
88 | IRELE(ip); | |
89 | return ENOENT; | |
90 | } | |
91 | ||
92 | /* inode is valid */ | |
93 | return 0; | |
1a3e8f3d DC |
94 | |
95 | out_unlock_noent: | |
96 | spin_unlock(&ip->i_flags_lock); | |
97 | return ENOENT; | |
e13de955 DC |
98 | } |
99 | ||
75f3cb13 DC |
100 | STATIC int |
101 | xfs_inode_ag_walk( | |
102 | struct xfs_mount *mp, | |
5017e97d | 103 | struct xfs_perag *pag, |
75f3cb13 DC |
104 | int (*execute)(struct xfs_inode *ip, |
105 | struct xfs_perag *pag, int flags), | |
65d0f205 | 106 | int flags) |
75f3cb13 | 107 | { |
75f3cb13 DC |
108 | uint32_t first_index; |
109 | int last_error = 0; | |
110 | int skipped; | |
65d0f205 | 111 | int done; |
78ae5256 | 112 | int nr_found; |
75f3cb13 DC |
113 | |
114 | restart: | |
65d0f205 | 115 | done = 0; |
75f3cb13 DC |
116 | skipped = 0; |
117 | first_index = 0; | |
78ae5256 | 118 | nr_found = 0; |
75f3cb13 | 119 | do { |
78ae5256 | 120 | struct xfs_inode *batch[XFS_LOOKUP_BATCH]; |
75f3cb13 | 121 | int error = 0; |
78ae5256 | 122 | int i; |
75f3cb13 | 123 | |
1a3e8f3d | 124 | rcu_read_lock(); |
65d0f205 | 125 | nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, |
78ae5256 DC |
126 | (void **)batch, first_index, |
127 | XFS_LOOKUP_BATCH); | |
65d0f205 | 128 | if (!nr_found) { |
1a3e8f3d | 129 | rcu_read_unlock(); |
75f3cb13 | 130 | break; |
c8e20be0 | 131 | } |
75f3cb13 | 132 | |
65d0f205 | 133 | /* |
78ae5256 DC |
134 | * Grab the inodes before we drop the lock. if we found |
135 | * nothing, nr == 0 and the loop will be skipped. | |
65d0f205 | 136 | */ |
78ae5256 DC |
137 | for (i = 0; i < nr_found; i++) { |
138 | struct xfs_inode *ip = batch[i]; | |
139 | ||
140 | if (done || xfs_inode_ag_walk_grab(ip)) | |
141 | batch[i] = NULL; | |
142 | ||
143 | /* | |
1a3e8f3d DC |
144 | * Update the index for the next lookup. Catch |
145 | * overflows into the next AG range which can occur if | |
146 | * we have inodes in the last block of the AG and we | |
147 | * are currently pointing to the last inode. | |
148 | * | |
149 | * Because we may see inodes that are from the wrong AG | |
150 | * due to RCU freeing and reallocation, only update the | |
151 | * index if it lies in this AG. It was a race that lead | |
152 | * us to see this inode, so another lookup from the | |
153 | * same index will not find it again. | |
78ae5256 | 154 | */ |
1a3e8f3d DC |
155 | if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno) |
156 | continue; | |
78ae5256 DC |
157 | first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); |
158 | if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) | |
159 | done = 1; | |
e13de955 | 160 | } |
78ae5256 DC |
161 | |
162 | /* unlock now we've grabbed the inodes. */ | |
1a3e8f3d | 163 | rcu_read_unlock(); |
e13de955 | 164 | |
78ae5256 DC |
165 | for (i = 0; i < nr_found; i++) { |
166 | if (!batch[i]) | |
167 | continue; | |
168 | error = execute(batch[i], pag, flags); | |
169 | IRELE(batch[i]); | |
170 | if (error == EAGAIN) { | |
171 | skipped++; | |
172 | continue; | |
173 | } | |
174 | if (error && last_error != EFSCORRUPTED) | |
175 | last_error = error; | |
75f3cb13 | 176 | } |
c8e20be0 DC |
177 | |
178 | /* bail out if the filesystem is corrupted. */ | |
75f3cb13 DC |
179 | if (error == EFSCORRUPTED) |
180 | break; | |
181 | ||
78ae5256 | 182 | } while (nr_found && !done); |
75f3cb13 DC |
183 | |
184 | if (skipped) { | |
185 | delay(1); | |
186 | goto restart; | |
187 | } | |
75f3cb13 DC |
188 | return last_error; |
189 | } | |
190 | ||
fe588ed3 | 191 | int |
75f3cb13 DC |
192 | xfs_inode_ag_iterator( |
193 | struct xfs_mount *mp, | |
194 | int (*execute)(struct xfs_inode *ip, | |
195 | struct xfs_perag *pag, int flags), | |
65d0f205 | 196 | int flags) |
75f3cb13 | 197 | { |
16fd5367 | 198 | struct xfs_perag *pag; |
75f3cb13 DC |
199 | int error = 0; |
200 | int last_error = 0; | |
201 | xfs_agnumber_t ag; | |
202 | ||
16fd5367 | 203 | ag = 0; |
65d0f205 DC |
204 | while ((pag = xfs_perag_get(mp, ag))) { |
205 | ag = pag->pag_agno + 1; | |
206 | error = xfs_inode_ag_walk(mp, pag, execute, flags); | |
5017e97d | 207 | xfs_perag_put(pag); |
75f3cb13 DC |
208 | if (error) { |
209 | last_error = error; | |
210 | if (error == EFSCORRUPTED) | |
211 | break; | |
212 | } | |
213 | } | |
214 | return XFS_ERROR(last_error); | |
215 | } | |
216 | ||
5a34d5cd DC |
217 | STATIC int |
218 | xfs_sync_inode_data( | |
219 | struct xfs_inode *ip, | |
75f3cb13 | 220 | struct xfs_perag *pag, |
5a34d5cd DC |
221 | int flags) |
222 | { | |
223 | struct inode *inode = VFS_I(ip); | |
224 | struct address_space *mapping = inode->i_mapping; | |
225 | int error = 0; | |
226 | ||
227 | if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) | |
228 | goto out_wait; | |
229 | ||
230 | if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) { | |
231 | if (flags & SYNC_TRYLOCK) | |
232 | goto out_wait; | |
233 | xfs_ilock(ip, XFS_IOLOCK_SHARED); | |
234 | } | |
235 | ||
236 | error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ? | |
0cadda1c | 237 | 0 : XBF_ASYNC, FI_NONE); |
5a34d5cd DC |
238 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); |
239 | ||
240 | out_wait: | |
b0710ccc | 241 | if (flags & SYNC_WAIT) |
5a34d5cd DC |
242 | xfs_ioend_wait(ip); |
243 | return error; | |
244 | } | |
245 | ||
845b6d0c CH |
246 | STATIC int |
247 | xfs_sync_inode_attr( | |
248 | struct xfs_inode *ip, | |
75f3cb13 | 249 | struct xfs_perag *pag, |
845b6d0c CH |
250 | int flags) |
251 | { | |
252 | int error = 0; | |
253 | ||
254 | xfs_ilock(ip, XFS_ILOCK_SHARED); | |
255 | if (xfs_inode_clean(ip)) | |
256 | goto out_unlock; | |
257 | if (!xfs_iflock_nowait(ip)) { | |
258 | if (!(flags & SYNC_WAIT)) | |
259 | goto out_unlock; | |
260 | xfs_iflock(ip); | |
261 | } | |
262 | ||
263 | if (xfs_inode_clean(ip)) { | |
264 | xfs_ifunlock(ip); | |
265 | goto out_unlock; | |
266 | } | |
267 | ||
c854363e | 268 | error = xfs_iflush(ip, flags); |
845b6d0c CH |
269 | |
270 | out_unlock: | |
271 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | |
272 | return error; | |
273 | } | |
274 | ||
075fe102 CH |
275 | /* |
276 | * Write out pagecache data for the whole filesystem. | |
277 | */ | |
64c86149 | 278 | STATIC int |
075fe102 CH |
279 | xfs_sync_data( |
280 | struct xfs_mount *mp, | |
281 | int flags) | |
683a8970 | 282 | { |
075fe102 | 283 | int error; |
fe4fa4b8 | 284 | |
b0710ccc | 285 | ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0); |
fe4fa4b8 | 286 | |
65d0f205 | 287 | error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags); |
075fe102 CH |
288 | if (error) |
289 | return XFS_ERROR(error); | |
e9f1c6ee | 290 | |
a14a348b | 291 | xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0); |
075fe102 CH |
292 | return 0; |
293 | } | |
e9f1c6ee | 294 | |
075fe102 CH |
295 | /* |
296 | * Write out inode metadata (attributes) for the whole filesystem. | |
297 | */ | |
64c86149 | 298 | STATIC int |
075fe102 CH |
299 | xfs_sync_attr( |
300 | struct xfs_mount *mp, | |
301 | int flags) | |
302 | { | |
303 | ASSERT((flags & ~SYNC_WAIT) == 0); | |
75f3cb13 | 304 | |
65d0f205 | 305 | return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags); |
fe4fa4b8 DC |
306 | } |
307 | ||
5d77c0dc | 308 | STATIC int |
2af75df7 | 309 | xfs_sync_fsdata( |
df308bcf | 310 | struct xfs_mount *mp) |
2af75df7 CH |
311 | { |
312 | struct xfs_buf *bp; | |
2af75df7 CH |
313 | |
314 | /* | |
df308bcf CH |
315 | * If the buffer is pinned then push on the log so we won't get stuck |
316 | * waiting in the write for someone, maybe ourselves, to flush the log. | |
317 | * | |
318 | * Even though we just pushed the log above, we did not have the | |
319 | * superblock buffer locked at that point so it can become pinned in | |
320 | * between there and here. | |
2af75df7 | 321 | */ |
df308bcf CH |
322 | bp = xfs_getsb(mp, 0); |
323 | if (XFS_BUF_ISPINNED(bp)) | |
324 | xfs_log_force(mp, 0); | |
2af75df7 | 325 | |
df308bcf | 326 | return xfs_bwrite(mp, bp); |
e9f1c6ee DC |
327 | } |
328 | ||
329 | /* | |
a4e4c4f4 DC |
330 | * When remounting a filesystem read-only or freezing the filesystem, we have |
331 | * two phases to execute. This first phase is syncing the data before we | |
332 | * quiesce the filesystem, and the second is flushing all the inodes out after | |
333 | * we've waited for all the transactions created by the first phase to | |
334 | * complete. The second phase ensures that the inodes are written to their | |
335 | * location on disk rather than just existing in transactions in the log. This | |
336 | * means after a quiesce there is no log replay required to write the inodes to | |
337 | * disk (this is the main difference between a sync and a quiesce). | |
338 | */ | |
339 | /* | |
340 | * First stage of freeze - no writers will make progress now we are here, | |
e9f1c6ee DC |
341 | * so we flush delwri and delalloc buffers here, then wait for all I/O to |
342 | * complete. Data is frozen at that point. Metadata is not frozen, | |
a4e4c4f4 DC |
343 | * transactions can still occur here so don't bother flushing the buftarg |
344 | * because it'll just get dirty again. | |
e9f1c6ee DC |
345 | */ |
346 | int | |
347 | xfs_quiesce_data( | |
348 | struct xfs_mount *mp) | |
349 | { | |
df308bcf | 350 | int error, error2 = 0; |
e9f1c6ee DC |
351 | |
352 | /* push non-blocking */ | |
075fe102 | 353 | xfs_sync_data(mp, 0); |
8b5403a6 | 354 | xfs_qm_sync(mp, SYNC_TRYLOCK); |
e9f1c6ee | 355 | |
c90b07e8 | 356 | /* push and block till complete */ |
b0710ccc | 357 | xfs_sync_data(mp, SYNC_WAIT); |
7d095257 | 358 | xfs_qm_sync(mp, SYNC_WAIT); |
e9f1c6ee | 359 | |
a4e4c4f4 | 360 | /* write superblock and hoover up shutdown errors */ |
df308bcf CH |
361 | error = xfs_sync_fsdata(mp); |
362 | ||
363 | /* make sure all delwri buffers are written out */ | |
364 | xfs_flush_buftarg(mp->m_ddev_targp, 1); | |
365 | ||
366 | /* mark the log as covered if needed */ | |
367 | if (xfs_log_need_covered(mp)) | |
c58efdb4 | 368 | error2 = xfs_fs_log_dummy(mp); |
e9f1c6ee | 369 | |
a4e4c4f4 | 370 | /* flush data-only devices */ |
e9f1c6ee DC |
371 | if (mp->m_rtdev_targp) |
372 | XFS_bflush(mp->m_rtdev_targp); | |
373 | ||
df308bcf | 374 | return error ? error : error2; |
2af75df7 CH |
375 | } |
376 | ||
76bf105c DC |
377 | STATIC void |
378 | xfs_quiesce_fs( | |
379 | struct xfs_mount *mp) | |
380 | { | |
381 | int count = 0, pincount; | |
382 | ||
c854363e | 383 | xfs_reclaim_inodes(mp, 0); |
76bf105c | 384 | xfs_flush_buftarg(mp->m_ddev_targp, 0); |
76bf105c DC |
385 | |
386 | /* | |
387 | * This loop must run at least twice. The first instance of the loop | |
388 | * will flush most meta data but that will generate more meta data | |
389 | * (typically directory updates). Which then must be flushed and | |
c854363e DC |
390 | * logged before we can write the unmount record. We also so sync |
391 | * reclaim of inodes to catch any that the above delwri flush skipped. | |
76bf105c DC |
392 | */ |
393 | do { | |
c854363e | 394 | xfs_reclaim_inodes(mp, SYNC_WAIT); |
075fe102 | 395 | xfs_sync_attr(mp, SYNC_WAIT); |
76bf105c DC |
396 | pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1); |
397 | if (!pincount) { | |
398 | delay(50); | |
399 | count++; | |
400 | } | |
401 | } while (count < 2); | |
402 | } | |
403 | ||
404 | /* | |
405 | * Second stage of a quiesce. The data is already synced, now we have to take | |
406 | * care of the metadata. New transactions are already blocked, so we need to | |
25985edc | 407 | * wait for any remaining transactions to drain out before proceeding. |
76bf105c DC |
408 | */ |
409 | void | |
410 | xfs_quiesce_attr( | |
411 | struct xfs_mount *mp) | |
412 | { | |
413 | int error = 0; | |
414 | ||
415 | /* wait for all modifications to complete */ | |
416 | while (atomic_read(&mp->m_active_trans) > 0) | |
417 | delay(100); | |
418 | ||
419 | /* flush inodes and push all remaining buffers out to disk */ | |
420 | xfs_quiesce_fs(mp); | |
421 | ||
5e106572 FB |
422 | /* |
423 | * Just warn here till VFS can correctly support | |
424 | * read-only remount without racing. | |
425 | */ | |
426 | WARN_ON(atomic_read(&mp->m_active_trans) != 0); | |
76bf105c DC |
427 | |
428 | /* Push the superblock and write an unmount record */ | |
429 | error = xfs_log_sbcount(mp, 1); | |
430 | if (error) | |
4f10700a | 431 | xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. " |
76bf105c DC |
432 | "Frozen image may not be consistent."); |
433 | xfs_log_unmount_write(mp); | |
434 | xfs_unmountfs_writesb(mp); | |
435 | } | |
436 | ||
c6d09b66 DC |
437 | static void |
438 | xfs_syncd_queue_sync( | |
439 | struct xfs_mount *mp) | |
a167b17e | 440 | { |
c6d09b66 DC |
441 | queue_delayed_work(xfs_syncd_wq, &mp->m_sync_work, |
442 | msecs_to_jiffies(xfs_syncd_centisecs * 10)); | |
a167b17e DC |
443 | } |
444 | ||
aacaa880 | 445 | /* |
df308bcf CH |
446 | * Every sync period we need to unpin all items, reclaim inodes and sync |
447 | * disk quotas. We might need to cover the log to indicate that the | |
1a387d3b | 448 | * filesystem is idle and not frozen. |
aacaa880 | 449 | */ |
a167b17e DC |
450 | STATIC void |
451 | xfs_sync_worker( | |
c6d09b66 | 452 | struct work_struct *work) |
a167b17e | 453 | { |
c6d09b66 DC |
454 | struct xfs_mount *mp = container_of(to_delayed_work(work), |
455 | struct xfs_mount, m_sync_work); | |
a167b17e DC |
456 | int error; |
457 | ||
aacaa880 | 458 | if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { |
aacaa880 | 459 | /* dgc: errors ignored here */ |
1a387d3b DC |
460 | if (mp->m_super->s_frozen == SB_UNFROZEN && |
461 | xfs_log_need_covered(mp)) | |
c58efdb4 DC |
462 | error = xfs_fs_log_dummy(mp); |
463 | else | |
464 | xfs_log_force(mp, 0); | |
c58efdb4 | 465 | error = xfs_qm_sync(mp, SYNC_TRYLOCK); |
fd074841 DC |
466 | |
467 | /* start pushing all the metadata that is currently dirty */ | |
468 | xfs_ail_push_all(mp->m_ail); | |
aacaa880 | 469 | } |
c6d09b66 DC |
470 | |
471 | /* queue us up again */ | |
472 | xfs_syncd_queue_sync(mp); | |
a167b17e DC |
473 | } |
474 | ||
a7b339f1 DC |
475 | /* |
476 | * Queue a new inode reclaim pass if there are reclaimable inodes and there | |
477 | * isn't a reclaim pass already in progress. By default it runs every 5s based | |
478 | * on the xfs syncd work default of 30s. Perhaps this should have it's own | |
479 | * tunable, but that can be done if this method proves to be ineffective or too | |
480 | * aggressive. | |
481 | */ | |
482 | static void | |
483 | xfs_syncd_queue_reclaim( | |
484 | struct xfs_mount *mp) | |
a167b17e | 485 | { |
a167b17e | 486 | |
a7b339f1 DC |
487 | /* |
488 | * We can have inodes enter reclaim after we've shut down the syncd | |
489 | * workqueue during unmount, so don't allow reclaim work to be queued | |
490 | * during unmount. | |
491 | */ | |
492 | if (!(mp->m_super->s_flags & MS_ACTIVE)) | |
493 | return; | |
a167b17e | 494 | |
a7b339f1 DC |
495 | rcu_read_lock(); |
496 | if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { | |
497 | queue_delayed_work(xfs_syncd_wq, &mp->m_reclaim_work, | |
498 | msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10)); | |
a167b17e | 499 | } |
a7b339f1 DC |
500 | rcu_read_unlock(); |
501 | } | |
a167b17e | 502 | |
a7b339f1 DC |
503 | /* |
504 | * This is a fast pass over the inode cache to try to get reclaim moving on as | |
505 | * many inodes as possible in a short period of time. It kicks itself every few | |
506 | * seconds, as well as being kicked by the inode cache shrinker when memory | |
507 | * goes low. It scans as quickly as possible avoiding locked inodes or those | |
508 | * already being flushed, and once done schedules a future pass. | |
509 | */ | |
510 | STATIC void | |
511 | xfs_reclaim_worker( | |
512 | struct work_struct *work) | |
513 | { | |
514 | struct xfs_mount *mp = container_of(to_delayed_work(work), | |
515 | struct xfs_mount, m_reclaim_work); | |
516 | ||
517 | xfs_reclaim_inodes(mp, SYNC_TRYLOCK); | |
518 | xfs_syncd_queue_reclaim(mp); | |
519 | } | |
520 | ||
89e4cb55 DC |
521 | /* |
522 | * Flush delayed allocate data, attempting to free up reserved space | |
523 | * from existing allocations. At this point a new allocation attempt | |
524 | * has failed with ENOSPC and we are in the process of scratching our | |
525 | * heads, looking about for more room. | |
526 | * | |
527 | * Queue a new data flush if there isn't one already in progress and | |
528 | * wait for completion of the flush. This means that we only ever have one | |
529 | * inode flush in progress no matter how many ENOSPC events are occurring and | |
530 | * so will prevent the system from bogging down due to every concurrent | |
531 | * ENOSPC event scanning all the active inodes in the system for writeback. | |
532 | */ | |
533 | void | |
534 | xfs_flush_inodes( | |
535 | struct xfs_inode *ip) | |
536 | { | |
537 | struct xfs_mount *mp = ip->i_mount; | |
538 | ||
539 | queue_work(xfs_syncd_wq, &mp->m_flush_work); | |
540 | flush_work_sync(&mp->m_flush_work); | |
541 | } | |
542 | ||
543 | STATIC void | |
544 | xfs_flush_worker( | |
545 | struct work_struct *work) | |
546 | { | |
547 | struct xfs_mount *mp = container_of(work, | |
548 | struct xfs_mount, m_flush_work); | |
549 | ||
550 | xfs_sync_data(mp, SYNC_TRYLOCK); | |
551 | xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT); | |
a167b17e DC |
552 | } |
553 | ||
554 | int | |
555 | xfs_syncd_init( | |
556 | struct xfs_mount *mp) | |
557 | { | |
89e4cb55 | 558 | INIT_WORK(&mp->m_flush_work, xfs_flush_worker); |
c6d09b66 | 559 | INIT_DELAYED_WORK(&mp->m_sync_work, xfs_sync_worker); |
a7b339f1 DC |
560 | INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker); |
561 | ||
c6d09b66 | 562 | xfs_syncd_queue_sync(mp); |
a7b339f1 | 563 | xfs_syncd_queue_reclaim(mp); |
c6d09b66 | 564 | |
a167b17e DC |
565 | return 0; |
566 | } | |
567 | ||
568 | void | |
569 | xfs_syncd_stop( | |
570 | struct xfs_mount *mp) | |
571 | { | |
c6d09b66 | 572 | cancel_delayed_work_sync(&mp->m_sync_work); |
a7b339f1 | 573 | cancel_delayed_work_sync(&mp->m_reclaim_work); |
89e4cb55 | 574 | cancel_work_sync(&mp->m_flush_work); |
a167b17e DC |
575 | } |
576 | ||
bc990f5c CH |
577 | void |
578 | __xfs_inode_set_reclaim_tag( | |
579 | struct xfs_perag *pag, | |
580 | struct xfs_inode *ip) | |
581 | { | |
582 | radix_tree_tag_set(&pag->pag_ici_root, | |
583 | XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), | |
584 | XFS_ICI_RECLAIM_TAG); | |
16fd5367 DC |
585 | |
586 | if (!pag->pag_ici_reclaimable) { | |
587 | /* propagate the reclaim tag up into the perag radix tree */ | |
588 | spin_lock(&ip->i_mount->m_perag_lock); | |
589 | radix_tree_tag_set(&ip->i_mount->m_perag_tree, | |
590 | XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), | |
591 | XFS_ICI_RECLAIM_TAG); | |
592 | spin_unlock(&ip->i_mount->m_perag_lock); | |
a7b339f1 DC |
593 | |
594 | /* schedule periodic background inode reclaim */ | |
595 | xfs_syncd_queue_reclaim(ip->i_mount); | |
596 | ||
16fd5367 DC |
597 | trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno, |
598 | -1, _RET_IP_); | |
599 | } | |
9bf729c0 | 600 | pag->pag_ici_reclaimable++; |
bc990f5c CH |
601 | } |
602 | ||
11654513 DC |
603 | /* |
604 | * We set the inode flag atomically with the radix tree tag. | |
605 | * Once we get tag lookups on the radix tree, this inode flag | |
606 | * can go away. | |
607 | */ | |
396beb85 DC |
608 | void |
609 | xfs_inode_set_reclaim_tag( | |
610 | xfs_inode_t *ip) | |
611 | { | |
5017e97d DC |
612 | struct xfs_mount *mp = ip->i_mount; |
613 | struct xfs_perag *pag; | |
396beb85 | 614 | |
5017e97d | 615 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); |
1a427ab0 | 616 | spin_lock(&pag->pag_ici_lock); |
396beb85 | 617 | spin_lock(&ip->i_flags_lock); |
bc990f5c | 618 | __xfs_inode_set_reclaim_tag(pag, ip); |
11654513 | 619 | __xfs_iflags_set(ip, XFS_IRECLAIMABLE); |
396beb85 | 620 | spin_unlock(&ip->i_flags_lock); |
1a427ab0 | 621 | spin_unlock(&pag->pag_ici_lock); |
5017e97d | 622 | xfs_perag_put(pag); |
396beb85 DC |
623 | } |
624 | ||
081003ff JW |
625 | STATIC void |
626 | __xfs_inode_clear_reclaim( | |
396beb85 DC |
627 | xfs_perag_t *pag, |
628 | xfs_inode_t *ip) | |
629 | { | |
9bf729c0 | 630 | pag->pag_ici_reclaimable--; |
16fd5367 DC |
631 | if (!pag->pag_ici_reclaimable) { |
632 | /* clear the reclaim tag from the perag radix tree */ | |
633 | spin_lock(&ip->i_mount->m_perag_lock); | |
634 | radix_tree_tag_clear(&ip->i_mount->m_perag_tree, | |
635 | XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), | |
636 | XFS_ICI_RECLAIM_TAG); | |
637 | spin_unlock(&ip->i_mount->m_perag_lock); | |
638 | trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno, | |
639 | -1, _RET_IP_); | |
640 | } | |
396beb85 DC |
641 | } |
642 | ||
081003ff JW |
643 | void |
644 | __xfs_inode_clear_reclaim_tag( | |
645 | xfs_mount_t *mp, | |
646 | xfs_perag_t *pag, | |
647 | xfs_inode_t *ip) | |
648 | { | |
649 | radix_tree_tag_clear(&pag->pag_ici_root, | |
650 | XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); | |
651 | __xfs_inode_clear_reclaim(pag, ip); | |
652 | } | |
653 | ||
e3a20c0b DC |
654 | /* |
655 | * Grab the inode for reclaim exclusively. | |
656 | * Return 0 if we grabbed it, non-zero otherwise. | |
657 | */ | |
658 | STATIC int | |
659 | xfs_reclaim_inode_grab( | |
660 | struct xfs_inode *ip, | |
661 | int flags) | |
662 | { | |
1a3e8f3d DC |
663 | ASSERT(rcu_read_lock_held()); |
664 | ||
665 | /* quick check for stale RCU freed inode */ | |
666 | if (!ip->i_ino) | |
667 | return 1; | |
e3a20c0b DC |
668 | |
669 | /* | |
1a3e8f3d | 670 | * do some unlocked checks first to avoid unnecessary lock traffic. |
e3a20c0b DC |
671 | * The first is a flush lock check, the second is a already in reclaim |
672 | * check. Only do these checks if we are not going to block on locks. | |
673 | */ | |
674 | if ((flags & SYNC_TRYLOCK) && | |
675 | (!ip->i_flush.done || __xfs_iflags_test(ip, XFS_IRECLAIM))) { | |
676 | return 1; | |
677 | } | |
678 | ||
679 | /* | |
680 | * The radix tree lock here protects a thread in xfs_iget from racing | |
681 | * with us starting reclaim on the inode. Once we have the | |
682 | * XFS_IRECLAIM flag set it will not touch us. | |
1a3e8f3d DC |
683 | * |
684 | * Due to RCU lookup, we may find inodes that have been freed and only | |
685 | * have XFS_IRECLAIM set. Indeed, we may see reallocated inodes that | |
686 | * aren't candidates for reclaim at all, so we must check the | |
687 | * XFS_IRECLAIMABLE is set first before proceeding to reclaim. | |
e3a20c0b DC |
688 | */ |
689 | spin_lock(&ip->i_flags_lock); | |
1a3e8f3d DC |
690 | if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) || |
691 | __xfs_iflags_test(ip, XFS_IRECLAIM)) { | |
692 | /* not a reclaim candidate. */ | |
e3a20c0b DC |
693 | spin_unlock(&ip->i_flags_lock); |
694 | return 1; | |
695 | } | |
696 | __xfs_iflags_set(ip, XFS_IRECLAIM); | |
697 | spin_unlock(&ip->i_flags_lock); | |
698 | return 0; | |
699 | } | |
700 | ||
777df5af DC |
701 | /* |
702 | * Inodes in different states need to be treated differently, and the return | |
703 | * value of xfs_iflush is not sufficient to get this right. The following table | |
704 | * lists the inode states and the reclaim actions necessary for non-blocking | |
705 | * reclaim: | |
706 | * | |
707 | * | |
708 | * inode state iflush ret required action | |
709 | * --------------- ---------- --------------- | |
710 | * bad - reclaim | |
711 | * shutdown EIO unpin and reclaim | |
712 | * clean, unpinned 0 reclaim | |
713 | * stale, unpinned 0 reclaim | |
c854363e DC |
714 | * clean, pinned(*) 0 requeue |
715 | * stale, pinned EAGAIN requeue | |
716 | * dirty, delwri ok 0 requeue | |
717 | * dirty, delwri blocked EAGAIN requeue | |
718 | * dirty, sync flush 0 reclaim | |
777df5af DC |
719 | * |
720 | * (*) dgc: I don't think the clean, pinned state is possible but it gets | |
721 | * handled anyway given the order of checks implemented. | |
722 | * | |
c854363e DC |
723 | * As can be seen from the table, the return value of xfs_iflush() is not |
724 | * sufficient to correctly decide the reclaim action here. The checks in | |
725 | * xfs_iflush() might look like duplicates, but they are not. | |
726 | * | |
727 | * Also, because we get the flush lock first, we know that any inode that has | |
728 | * been flushed delwri has had the flush completed by the time we check that | |
729 | * the inode is clean. The clean inode check needs to be done before flushing | |
730 | * the inode delwri otherwise we would loop forever requeuing clean inodes as | |
731 | * we cannot tell apart a successful delwri flush and a clean inode from the | |
732 | * return value of xfs_iflush(). | |
733 | * | |
734 | * Note that because the inode is flushed delayed write by background | |
735 | * writeback, the flush lock may already be held here and waiting on it can | |
736 | * result in very long latencies. Hence for sync reclaims, where we wait on the | |
737 | * flush lock, the caller should push out delayed write inodes first before | |
738 | * trying to reclaim them to minimise the amount of time spent waiting. For | |
739 | * background relaim, we just requeue the inode for the next pass. | |
740 | * | |
777df5af DC |
741 | * Hence the order of actions after gaining the locks should be: |
742 | * bad => reclaim | |
743 | * shutdown => unpin and reclaim | |
c854363e DC |
744 | * pinned, delwri => requeue |
745 | * pinned, sync => unpin | |
777df5af DC |
746 | * stale => reclaim |
747 | * clean => reclaim | |
c854363e DC |
748 | * dirty, delwri => flush and requeue |
749 | * dirty, sync => flush, wait and reclaim | |
777df5af | 750 | */ |
75f3cb13 | 751 | STATIC int |
c8e20be0 | 752 | xfs_reclaim_inode( |
75f3cb13 DC |
753 | struct xfs_inode *ip, |
754 | struct xfs_perag *pag, | |
c8e20be0 | 755 | int sync_mode) |
fce08f2f | 756 | { |
1bfd8d04 | 757 | int error; |
777df5af | 758 | |
1bfd8d04 DC |
759 | restart: |
760 | error = 0; | |
c8e20be0 | 761 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
c854363e DC |
762 | if (!xfs_iflock_nowait(ip)) { |
763 | if (!(sync_mode & SYNC_WAIT)) | |
764 | goto out; | |
765 | xfs_iflock(ip); | |
766 | } | |
7a3be02b | 767 | |
777df5af DC |
768 | if (is_bad_inode(VFS_I(ip))) |
769 | goto reclaim; | |
770 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { | |
771 | xfs_iunpin_wait(ip); | |
772 | goto reclaim; | |
773 | } | |
c854363e DC |
774 | if (xfs_ipincount(ip)) { |
775 | if (!(sync_mode & SYNC_WAIT)) { | |
776 | xfs_ifunlock(ip); | |
777 | goto out; | |
778 | } | |
777df5af | 779 | xfs_iunpin_wait(ip); |
c854363e | 780 | } |
777df5af DC |
781 | if (xfs_iflags_test(ip, XFS_ISTALE)) |
782 | goto reclaim; | |
783 | if (xfs_inode_clean(ip)) | |
784 | goto reclaim; | |
785 | ||
1bfd8d04 DC |
786 | /* |
787 | * Now we have an inode that needs flushing. | |
788 | * | |
789 | * We do a nonblocking flush here even if we are doing a SYNC_WAIT | |
790 | * reclaim as we can deadlock with inode cluster removal. | |
791 | * xfs_ifree_cluster() can lock the inode buffer before it locks the | |
792 | * ip->i_lock, and we are doing the exact opposite here. As a result, | |
793 | * doing a blocking xfs_itobp() to get the cluster buffer will result | |
794 | * in an ABBA deadlock with xfs_ifree_cluster(). | |
795 | * | |
796 | * As xfs_ifree_cluser() must gather all inodes that are active in the | |
797 | * cache to mark them stale, if we hit this case we don't actually want | |
798 | * to do IO here - we want the inode marked stale so we can simply | |
799 | * reclaim it. Hence if we get an EAGAIN error on a SYNC_WAIT flush, | |
800 | * just unlock the inode, back off and try again. Hopefully the next | |
801 | * pass through will see the stale flag set on the inode. | |
802 | */ | |
803 | error = xfs_iflush(ip, SYNC_TRYLOCK | sync_mode); | |
c854363e | 804 | if (sync_mode & SYNC_WAIT) { |
1bfd8d04 DC |
805 | if (error == EAGAIN) { |
806 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
807 | /* backoff longer than in xfs_ifree_cluster */ | |
808 | delay(2); | |
809 | goto restart; | |
810 | } | |
c854363e DC |
811 | xfs_iflock(ip); |
812 | goto reclaim; | |
c8e20be0 DC |
813 | } |
814 | ||
c854363e DC |
815 | /* |
816 | * When we have to flush an inode but don't have SYNC_WAIT set, we | |
817 | * flush the inode out using a delwri buffer and wait for the next | |
818 | * call into reclaim to find it in a clean state instead of waiting for | |
819 | * it now. We also don't return errors here - if the error is transient | |
820 | * then the next reclaim pass will flush the inode, and if the error | |
f1d486a3 | 821 | * is permanent then the next sync reclaim will reclaim the inode and |
c854363e DC |
822 | * pass on the error. |
823 | */ | |
f1d486a3 | 824 | if (error && error != EAGAIN && !XFS_FORCED_SHUTDOWN(ip->i_mount)) { |
4f10700a | 825 | xfs_warn(ip->i_mount, |
c854363e DC |
826 | "inode 0x%llx background reclaim flush failed with %d", |
827 | (long long)ip->i_ino, error); | |
828 | } | |
829 | out: | |
830 | xfs_iflags_clear(ip, XFS_IRECLAIM); | |
831 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
832 | /* | |
833 | * We could return EAGAIN here to make reclaim rescan the inode tree in | |
834 | * a short while. However, this just burns CPU time scanning the tree | |
835 | * waiting for IO to complete and xfssyncd never goes back to the idle | |
836 | * state. Instead, return 0 to let the next scheduled background reclaim | |
837 | * attempt to reclaim the inode again. | |
838 | */ | |
839 | return 0; | |
840 | ||
777df5af DC |
841 | reclaim: |
842 | xfs_ifunlock(ip); | |
c8e20be0 | 843 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
2f11feab DC |
844 | |
845 | XFS_STATS_INC(xs_ig_reclaims); | |
846 | /* | |
847 | * Remove the inode from the per-AG radix tree. | |
848 | * | |
849 | * Because radix_tree_delete won't complain even if the item was never | |
850 | * added to the tree assert that it's been there before to catch | |
851 | * problems with the inode life time early on. | |
852 | */ | |
1a427ab0 | 853 | spin_lock(&pag->pag_ici_lock); |
2f11feab DC |
854 | if (!radix_tree_delete(&pag->pag_ici_root, |
855 | XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino))) | |
856 | ASSERT(0); | |
081003ff | 857 | __xfs_inode_clear_reclaim(pag, ip); |
1a427ab0 | 858 | spin_unlock(&pag->pag_ici_lock); |
2f11feab DC |
859 | |
860 | /* | |
861 | * Here we do an (almost) spurious inode lock in order to coordinate | |
862 | * with inode cache radix tree lookups. This is because the lookup | |
863 | * can reference the inodes in the cache without taking references. | |
864 | * | |
865 | * We make that OK here by ensuring that we wait until the inode is | |
866 | * unlocked after the lookup before we go ahead and free it. We get | |
867 | * both the ilock and the iolock because the code may need to drop the | |
868 | * ilock one but will still hold the iolock. | |
869 | */ | |
870 | xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | |
871 | xfs_qm_dqdetach(ip); | |
872 | xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | |
873 | ||
874 | xfs_inode_free(ip); | |
c854363e DC |
875 | return error; |
876 | ||
7a3be02b DC |
877 | } |
878 | ||
65d0f205 DC |
879 | /* |
880 | * Walk the AGs and reclaim the inodes in them. Even if the filesystem is | |
881 | * corrupted, we still want to try to reclaim all the inodes. If we don't, | |
882 | * then a shut down during filesystem unmount reclaim walk leak all the | |
883 | * unreclaimed inodes. | |
884 | */ | |
885 | int | |
886 | xfs_reclaim_inodes_ag( | |
887 | struct xfs_mount *mp, | |
888 | int flags, | |
889 | int *nr_to_scan) | |
890 | { | |
891 | struct xfs_perag *pag; | |
892 | int error = 0; | |
893 | int last_error = 0; | |
894 | xfs_agnumber_t ag; | |
69b491c2 DC |
895 | int trylock = flags & SYNC_TRYLOCK; |
896 | int skipped; | |
65d0f205 | 897 | |
69b491c2 | 898 | restart: |
65d0f205 | 899 | ag = 0; |
69b491c2 | 900 | skipped = 0; |
65d0f205 DC |
901 | while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { |
902 | unsigned long first_index = 0; | |
903 | int done = 0; | |
e3a20c0b | 904 | int nr_found = 0; |
65d0f205 DC |
905 | |
906 | ag = pag->pag_agno + 1; | |
907 | ||
69b491c2 DC |
908 | if (trylock) { |
909 | if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) { | |
910 | skipped++; | |
f83282a8 | 911 | xfs_perag_put(pag); |
69b491c2 DC |
912 | continue; |
913 | } | |
914 | first_index = pag->pag_ici_reclaim_cursor; | |
915 | } else | |
916 | mutex_lock(&pag->pag_ici_reclaim_lock); | |
917 | ||
65d0f205 | 918 | do { |
e3a20c0b DC |
919 | struct xfs_inode *batch[XFS_LOOKUP_BATCH]; |
920 | int i; | |
65d0f205 | 921 | |
1a3e8f3d | 922 | rcu_read_lock(); |
e3a20c0b DC |
923 | nr_found = radix_tree_gang_lookup_tag( |
924 | &pag->pag_ici_root, | |
925 | (void **)batch, first_index, | |
926 | XFS_LOOKUP_BATCH, | |
65d0f205 DC |
927 | XFS_ICI_RECLAIM_TAG); |
928 | if (!nr_found) { | |
228d62dd | 929 | done = 1; |
1a3e8f3d | 930 | rcu_read_unlock(); |
65d0f205 DC |
931 | break; |
932 | } | |
933 | ||
934 | /* | |
e3a20c0b DC |
935 | * Grab the inodes before we drop the lock. if we found |
936 | * nothing, nr == 0 and the loop will be skipped. | |
65d0f205 | 937 | */ |
e3a20c0b DC |
938 | for (i = 0; i < nr_found; i++) { |
939 | struct xfs_inode *ip = batch[i]; | |
940 | ||
941 | if (done || xfs_reclaim_inode_grab(ip, flags)) | |
942 | batch[i] = NULL; | |
943 | ||
944 | /* | |
945 | * Update the index for the next lookup. Catch | |
946 | * overflows into the next AG range which can | |
947 | * occur if we have inodes in the last block of | |
948 | * the AG and we are currently pointing to the | |
949 | * last inode. | |
1a3e8f3d DC |
950 | * |
951 | * Because we may see inodes that are from the | |
952 | * wrong AG due to RCU freeing and | |
953 | * reallocation, only update the index if it | |
954 | * lies in this AG. It was a race that lead us | |
955 | * to see this inode, so another lookup from | |
956 | * the same index will not find it again. | |
e3a20c0b | 957 | */ |
1a3e8f3d DC |
958 | if (XFS_INO_TO_AGNO(mp, ip->i_ino) != |
959 | pag->pag_agno) | |
960 | continue; | |
e3a20c0b DC |
961 | first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); |
962 | if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) | |
963 | done = 1; | |
964 | } | |
65d0f205 | 965 | |
e3a20c0b | 966 | /* unlock now we've grabbed the inodes. */ |
1a3e8f3d | 967 | rcu_read_unlock(); |
e3a20c0b DC |
968 | |
969 | for (i = 0; i < nr_found; i++) { | |
970 | if (!batch[i]) | |
971 | continue; | |
972 | error = xfs_reclaim_inode(batch[i], pag, flags); | |
973 | if (error && last_error != EFSCORRUPTED) | |
974 | last_error = error; | |
975 | } | |
976 | ||
977 | *nr_to_scan -= XFS_LOOKUP_BATCH; | |
65d0f205 | 978 | |
e3a20c0b | 979 | } while (nr_found && !done && *nr_to_scan > 0); |
65d0f205 | 980 | |
69b491c2 DC |
981 | if (trylock && !done) |
982 | pag->pag_ici_reclaim_cursor = first_index; | |
983 | else | |
984 | pag->pag_ici_reclaim_cursor = 0; | |
985 | mutex_unlock(&pag->pag_ici_reclaim_lock); | |
65d0f205 DC |
986 | xfs_perag_put(pag); |
987 | } | |
69b491c2 DC |
988 | |
989 | /* | |
990 | * if we skipped any AG, and we still have scan count remaining, do | |
991 | * another pass this time using blocking reclaim semantics (i.e | |
992 | * waiting on the reclaim locks and ignoring the reclaim cursors). This | |
993 | * ensure that when we get more reclaimers than AGs we block rather | |
994 | * than spin trying to execute reclaim. | |
995 | */ | |
996 | if (trylock && skipped && *nr_to_scan > 0) { | |
997 | trylock = 0; | |
998 | goto restart; | |
999 | } | |
65d0f205 DC |
1000 | return XFS_ERROR(last_error); |
1001 | } | |
1002 | ||
7a3be02b DC |
1003 | int |
1004 | xfs_reclaim_inodes( | |
1005 | xfs_mount_t *mp, | |
7a3be02b DC |
1006 | int mode) |
1007 | { | |
65d0f205 DC |
1008 | int nr_to_scan = INT_MAX; |
1009 | ||
1010 | return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan); | |
9bf729c0 DC |
1011 | } |
1012 | ||
1013 | /* | |
a7b339f1 DC |
1014 | * Inode cache shrinker. |
1015 | * | |
1016 | * When called we make sure that there is a background (fast) inode reclaim in | |
1017 | * progress, while we will throttle the speed of reclaim via doiing synchronous | |
1018 | * reclaim of inodes. That means if we come across dirty inodes, we wait for | |
1019 | * them to be cleaned, which we hope will not be very long due to the | |
1020 | * background walker having already kicked the IO off on those dirty inodes. | |
9bf729c0 | 1021 | */ |
9bf729c0 DC |
1022 | static int |
1023 | xfs_reclaim_inode_shrink( | |
7f8275d0 | 1024 | struct shrinker *shrink, |
9bf729c0 DC |
1025 | int nr_to_scan, |
1026 | gfp_t gfp_mask) | |
1027 | { | |
1028 | struct xfs_mount *mp; | |
1029 | struct xfs_perag *pag; | |
1030 | xfs_agnumber_t ag; | |
16fd5367 | 1031 | int reclaimable; |
9bf729c0 | 1032 | |
70e60ce7 | 1033 | mp = container_of(shrink, struct xfs_mount, m_inode_shrink); |
9bf729c0 | 1034 | if (nr_to_scan) { |
fd074841 | 1035 | /* kick background reclaimer and push the AIL */ |
a7b339f1 | 1036 | xfs_syncd_queue_reclaim(mp); |
fd074841 | 1037 | xfs_ail_push_all(mp->m_ail); |
a7b339f1 | 1038 | |
9bf729c0 DC |
1039 | if (!(gfp_mask & __GFP_FS)) |
1040 | return -1; | |
1041 | ||
a7b339f1 DC |
1042 | xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, |
1043 | &nr_to_scan); | |
65d0f205 | 1044 | /* terminate if we don't exhaust the scan */ |
70e60ce7 DC |
1045 | if (nr_to_scan > 0) |
1046 | return -1; | |
1047 | } | |
9bf729c0 | 1048 | |
16fd5367 DC |
1049 | reclaimable = 0; |
1050 | ag = 0; | |
65d0f205 DC |
1051 | while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { |
1052 | ag = pag->pag_agno + 1; | |
70e60ce7 DC |
1053 | reclaimable += pag->pag_ici_reclaimable; |
1054 | xfs_perag_put(pag); | |
9bf729c0 | 1055 | } |
9bf729c0 DC |
1056 | return reclaimable; |
1057 | } | |
1058 | ||
9bf729c0 DC |
1059 | void |
1060 | xfs_inode_shrinker_register( | |
1061 | struct xfs_mount *mp) | |
1062 | { | |
70e60ce7 DC |
1063 | mp->m_inode_shrink.shrink = xfs_reclaim_inode_shrink; |
1064 | mp->m_inode_shrink.seeks = DEFAULT_SEEKS; | |
1065 | register_shrinker(&mp->m_inode_shrink); | |
9bf729c0 DC |
1066 | } |
1067 | ||
1068 | void | |
1069 | xfs_inode_shrinker_unregister( | |
1070 | struct xfs_mount *mp) | |
1071 | { | |
70e60ce7 | 1072 | unregister_shrinker(&mp->m_inode_shrink); |
fce08f2f | 1073 | } |