Merge branches 'x86/cache', 'x86/debug' and 'x86/irq' into x86/urgent
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / fs / xfs / xfs_aops.c
1 /*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19 #include "xfs_shared.h"
20 #include "xfs_format.h"
21 #include "xfs_log_format.h"
22 #include "xfs_trans_resv.h"
23 #include "xfs_mount.h"
24 #include "xfs_inode.h"
25 #include "xfs_trans.h"
26 #include "xfs_inode_item.h"
27 #include "xfs_alloc.h"
28 #include "xfs_error.h"
29 #include "xfs_iomap.h"
30 #include "xfs_trace.h"
31 #include "xfs_bmap.h"
32 #include "xfs_bmap_util.h"
33 #include "xfs_bmap_btree.h"
34 #include "xfs_reflink.h"
35 #include <linux/gfp.h>
36 #include <linux/mpage.h>
37 #include <linux/pagevec.h>
38 #include <linux/writeback.h>
39
40 /*
41 * structure owned by writepages passed to individual writepage calls
42 */
43 struct xfs_writepage_ctx {
44 struct xfs_bmbt_irec imap;
45 bool imap_valid;
46 unsigned int io_type;
47 struct xfs_ioend *ioend;
48 sector_t last_block;
49 };
50
51 void
52 xfs_count_page_state(
53 struct page *page,
54 int *delalloc,
55 int *unwritten)
56 {
57 struct buffer_head *bh, *head;
58
59 *delalloc = *unwritten = 0;
60
61 bh = head = page_buffers(page);
62 do {
63 if (buffer_unwritten(bh))
64 (*unwritten) = 1;
65 else if (buffer_delay(bh))
66 (*delalloc) = 1;
67 } while ((bh = bh->b_this_page) != head);
68 }
69
70 struct block_device *
71 xfs_find_bdev_for_inode(
72 struct inode *inode)
73 {
74 struct xfs_inode *ip = XFS_I(inode);
75 struct xfs_mount *mp = ip->i_mount;
76
77 if (XFS_IS_REALTIME_INODE(ip))
78 return mp->m_rtdev_targp->bt_bdev;
79 else
80 return mp->m_ddev_targp->bt_bdev;
81 }
82
83 /*
84 * We're now finished for good with this page. Update the page state via the
85 * associated buffer_heads, paying attention to the start and end offsets that
86 * we need to process on the page.
87 *
88 * Landmine Warning: bh->b_end_io() will call end_page_writeback() on the last
89 * buffer in the IO. Once it does this, it is unsafe to access the bufferhead or
90 * the page at all, as we may be racing with memory reclaim and it can free both
91 * the bufferhead chain and the page as it will see the page as clean and
92 * unused.
93 */
94 static void
95 xfs_finish_page_writeback(
96 struct inode *inode,
97 struct bio_vec *bvec,
98 int error)
99 {
100 unsigned int end = bvec->bv_offset + bvec->bv_len - 1;
101 struct buffer_head *head, *bh, *next;
102 unsigned int off = 0;
103 unsigned int bsize;
104
105 ASSERT(bvec->bv_offset < PAGE_SIZE);
106 ASSERT((bvec->bv_offset & ((1 << inode->i_blkbits) - 1)) == 0);
107 ASSERT(end < PAGE_SIZE);
108 ASSERT((bvec->bv_len & ((1 << inode->i_blkbits) - 1)) == 0);
109
110 bh = head = page_buffers(bvec->bv_page);
111
112 bsize = bh->b_size;
113 do {
114 next = bh->b_this_page;
115 if (off < bvec->bv_offset)
116 goto next_bh;
117 if (off > end)
118 break;
119 bh->b_end_io(bh, !error);
120 next_bh:
121 off += bsize;
122 } while ((bh = next) != head);
123 }
124
125 /*
126 * We're now finished for good with this ioend structure. Update the page
127 * state, release holds on bios, and finally free up memory. Do not use the
128 * ioend after this.
129 */
130 STATIC void
131 xfs_destroy_ioend(
132 struct xfs_ioend *ioend,
133 int error)
134 {
135 struct inode *inode = ioend->io_inode;
136 struct bio *last = ioend->io_bio;
137 struct bio *bio, *next;
138
139 for (bio = &ioend->io_inline_bio; bio; bio = next) {
140 struct bio_vec *bvec;
141 int i;
142
143 /*
144 * For the last bio, bi_private points to the ioend, so we
145 * need to explicitly end the iteration here.
146 */
147 if (bio == last)
148 next = NULL;
149 else
150 next = bio->bi_private;
151
152 /* walk each page on bio, ending page IO on them */
153 bio_for_each_segment_all(bvec, bio, i)
154 xfs_finish_page_writeback(inode, bvec, error);
155
156 bio_put(bio);
157 }
158 }
159
160 /*
161 * Fast and loose check if this write could update the on-disk inode size.
162 */
163 static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
164 {
165 return ioend->io_offset + ioend->io_size >
166 XFS_I(ioend->io_inode)->i_d.di_size;
167 }
168
169 STATIC int
170 xfs_setfilesize_trans_alloc(
171 struct xfs_ioend *ioend)
172 {
173 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
174 struct xfs_trans *tp;
175 int error;
176
177 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
178 if (error)
179 return error;
180
181 ioend->io_append_trans = tp;
182
183 /*
184 * We may pass freeze protection with a transaction. So tell lockdep
185 * we released it.
186 */
187 __sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
188 /*
189 * We hand off the transaction to the completion thread now, so
190 * clear the flag here.
191 */
192 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
193 return 0;
194 }
195
196 /*
197 * Update on-disk file size now that data has been written to disk.
198 */
199 STATIC int
200 __xfs_setfilesize(
201 struct xfs_inode *ip,
202 struct xfs_trans *tp,
203 xfs_off_t offset,
204 size_t size)
205 {
206 xfs_fsize_t isize;
207
208 xfs_ilock(ip, XFS_ILOCK_EXCL);
209 isize = xfs_new_eof(ip, offset + size);
210 if (!isize) {
211 xfs_iunlock(ip, XFS_ILOCK_EXCL);
212 xfs_trans_cancel(tp);
213 return 0;
214 }
215
216 trace_xfs_setfilesize(ip, offset, size);
217
218 ip->i_d.di_size = isize;
219 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
220 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
221
222 return xfs_trans_commit(tp);
223 }
224
225 int
226 xfs_setfilesize(
227 struct xfs_inode *ip,
228 xfs_off_t offset,
229 size_t size)
230 {
231 struct xfs_mount *mp = ip->i_mount;
232 struct xfs_trans *tp;
233 int error;
234
235 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
236 if (error)
237 return error;
238
239 return __xfs_setfilesize(ip, tp, offset, size);
240 }
241
242 STATIC int
243 xfs_setfilesize_ioend(
244 struct xfs_ioend *ioend,
245 int error)
246 {
247 struct xfs_inode *ip = XFS_I(ioend->io_inode);
248 struct xfs_trans *tp = ioend->io_append_trans;
249
250 /*
251 * The transaction may have been allocated in the I/O submission thread,
252 * thus we need to mark ourselves as being in a transaction manually.
253 * Similarly for freeze protection.
254 */
255 current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
256 __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
257
258 /* we abort the update if there was an IO error */
259 if (error) {
260 xfs_trans_cancel(tp);
261 return error;
262 }
263
264 return __xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
265 }
266
267 /*
268 * IO write completion.
269 */
270 STATIC void
271 xfs_end_io(
272 struct work_struct *work)
273 {
274 struct xfs_ioend *ioend =
275 container_of(work, struct xfs_ioend, io_work);
276 struct xfs_inode *ip = XFS_I(ioend->io_inode);
277 int error = ioend->io_bio->bi_error;
278
279 /*
280 * Set an error if the mount has shut down and proceed with end I/O
281 * processing so it can perform whatever cleanups are necessary.
282 */
283 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
284 error = -EIO;
285
286 /*
287 * For a CoW extent, we need to move the mapping from the CoW fork
288 * to the data fork. If instead an error happened, just dump the
289 * new blocks.
290 */
291 if (ioend->io_type == XFS_IO_COW) {
292 if (error)
293 goto done;
294 if (ioend->io_bio->bi_error) {
295 error = xfs_reflink_cancel_cow_range(ip,
296 ioend->io_offset, ioend->io_size);
297 goto done;
298 }
299 error = xfs_reflink_end_cow(ip, ioend->io_offset,
300 ioend->io_size);
301 if (error)
302 goto done;
303 }
304
305 /*
306 * For unwritten extents we need to issue transactions to convert a
307 * range to normal written extens after the data I/O has finished.
308 * Detecting and handling completion IO errors is done individually
309 * for each case as different cleanup operations need to be performed
310 * on error.
311 */
312 if (ioend->io_type == XFS_IO_UNWRITTEN) {
313 if (error)
314 goto done;
315 error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
316 ioend->io_size);
317 } else if (ioend->io_append_trans) {
318 error = xfs_setfilesize_ioend(ioend, error);
319 } else {
320 ASSERT(!xfs_ioend_is_append(ioend) ||
321 ioend->io_type == XFS_IO_COW);
322 }
323
324 done:
325 xfs_destroy_ioend(ioend, error);
326 }
327
328 STATIC void
329 xfs_end_bio(
330 struct bio *bio)
331 {
332 struct xfs_ioend *ioend = bio->bi_private;
333 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
334
335 if (ioend->io_type == XFS_IO_UNWRITTEN || ioend->io_type == XFS_IO_COW)
336 queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
337 else if (ioend->io_append_trans)
338 queue_work(mp->m_data_workqueue, &ioend->io_work);
339 else
340 xfs_destroy_ioend(ioend, bio->bi_error);
341 }
342
343 STATIC int
344 xfs_map_blocks(
345 struct inode *inode,
346 loff_t offset,
347 struct xfs_bmbt_irec *imap,
348 int type)
349 {
350 struct xfs_inode *ip = XFS_I(inode);
351 struct xfs_mount *mp = ip->i_mount;
352 ssize_t count = 1 << inode->i_blkbits;
353 xfs_fileoff_t offset_fsb, end_fsb;
354 int error = 0;
355 int bmapi_flags = XFS_BMAPI_ENTIRE;
356 int nimaps = 1;
357
358 if (XFS_FORCED_SHUTDOWN(mp))
359 return -EIO;
360
361 ASSERT(type != XFS_IO_COW);
362 if (type == XFS_IO_UNWRITTEN)
363 bmapi_flags |= XFS_BMAPI_IGSTATE;
364
365 xfs_ilock(ip, XFS_ILOCK_SHARED);
366 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
367 (ip->i_df.if_flags & XFS_IFEXTENTS));
368 ASSERT(offset <= mp->m_super->s_maxbytes);
369
370 if (offset + count > mp->m_super->s_maxbytes)
371 count = mp->m_super->s_maxbytes - offset;
372 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
373 offset_fsb = XFS_B_TO_FSBT(mp, offset);
374 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
375 imap, &nimaps, bmapi_flags);
376 /*
377 * Truncate an overwrite extent if there's a pending CoW
378 * reservation before the end of this extent. This forces us
379 * to come back to writepage to take care of the CoW.
380 */
381 if (nimaps && type == XFS_IO_OVERWRITE)
382 xfs_reflink_trim_irec_to_next_cow(ip, offset_fsb, imap);
383 xfs_iunlock(ip, XFS_ILOCK_SHARED);
384
385 if (error)
386 return error;
387
388 if (type == XFS_IO_DELALLOC &&
389 (!nimaps || isnullstartblock(imap->br_startblock))) {
390 error = xfs_iomap_write_allocate(ip, XFS_DATA_FORK, offset,
391 imap);
392 if (!error)
393 trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
394 return error;
395 }
396
397 #ifdef DEBUG
398 if (type == XFS_IO_UNWRITTEN) {
399 ASSERT(nimaps);
400 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
401 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
402 }
403 #endif
404 if (nimaps)
405 trace_xfs_map_blocks_found(ip, offset, count, type, imap);
406 return 0;
407 }
408
409 STATIC bool
410 xfs_imap_valid(
411 struct inode *inode,
412 struct xfs_bmbt_irec *imap,
413 xfs_off_t offset)
414 {
415 offset >>= inode->i_blkbits;
416
417 return offset >= imap->br_startoff &&
418 offset < imap->br_startoff + imap->br_blockcount;
419 }
420
421 STATIC void
422 xfs_start_buffer_writeback(
423 struct buffer_head *bh)
424 {
425 ASSERT(buffer_mapped(bh));
426 ASSERT(buffer_locked(bh));
427 ASSERT(!buffer_delay(bh));
428 ASSERT(!buffer_unwritten(bh));
429
430 mark_buffer_async_write(bh);
431 set_buffer_uptodate(bh);
432 clear_buffer_dirty(bh);
433 }
434
435 STATIC void
436 xfs_start_page_writeback(
437 struct page *page,
438 int clear_dirty)
439 {
440 ASSERT(PageLocked(page));
441 ASSERT(!PageWriteback(page));
442
443 /*
444 * if the page was not fully cleaned, we need to ensure that the higher
445 * layers come back to it correctly. That means we need to keep the page
446 * dirty, and for WB_SYNC_ALL writeback we need to ensure the
447 * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to
448 * write this page in this writeback sweep will be made.
449 */
450 if (clear_dirty) {
451 clear_page_dirty_for_io(page);
452 set_page_writeback(page);
453 } else
454 set_page_writeback_keepwrite(page);
455
456 unlock_page(page);
457 }
458
459 static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh)
460 {
461 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
462 }
463
464 /*
465 * Submit the bio for an ioend. We are passed an ioend with a bio attached to
466 * it, and we submit that bio. The ioend may be used for multiple bio
467 * submissions, so we only want to allocate an append transaction for the ioend
468 * once. In the case of multiple bio submission, each bio will take an IO
469 * reference to the ioend to ensure that the ioend completion is only done once
470 * all bios have been submitted and the ioend is really done.
471 *
472 * If @fail is non-zero, it means that we have a situation where some part of
473 * the submission process has failed after we have marked paged for writeback
474 * and unlocked them. In this situation, we need to fail the bio and ioend
475 * rather than submit it to IO. This typically only happens on a filesystem
476 * shutdown.
477 */
478 STATIC int
479 xfs_submit_ioend(
480 struct writeback_control *wbc,
481 struct xfs_ioend *ioend,
482 int status)
483 {
484 /* Reserve log space if we might write beyond the on-disk inode size. */
485 if (!status &&
486 ioend->io_type != XFS_IO_UNWRITTEN &&
487 xfs_ioend_is_append(ioend) &&
488 !ioend->io_append_trans)
489 status = xfs_setfilesize_trans_alloc(ioend);
490
491 ioend->io_bio->bi_private = ioend;
492 ioend->io_bio->bi_end_io = xfs_end_bio;
493 ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
494
495 /*
496 * If we are failing the IO now, just mark the ioend with an
497 * error and finish it. This will run IO completion immediately
498 * as there is only one reference to the ioend at this point in
499 * time.
500 */
501 if (status) {
502 ioend->io_bio->bi_error = status;
503 bio_endio(ioend->io_bio);
504 return status;
505 }
506
507 submit_bio(ioend->io_bio);
508 return 0;
509 }
510
511 static void
512 xfs_init_bio_from_bh(
513 struct bio *bio,
514 struct buffer_head *bh)
515 {
516 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
517 bio->bi_bdev = bh->b_bdev;
518 }
519
520 static struct xfs_ioend *
521 xfs_alloc_ioend(
522 struct inode *inode,
523 unsigned int type,
524 xfs_off_t offset,
525 struct buffer_head *bh)
526 {
527 struct xfs_ioend *ioend;
528 struct bio *bio;
529
530 bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, xfs_ioend_bioset);
531 xfs_init_bio_from_bh(bio, bh);
532
533 ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
534 INIT_LIST_HEAD(&ioend->io_list);
535 ioend->io_type = type;
536 ioend->io_inode = inode;
537 ioend->io_size = 0;
538 ioend->io_offset = offset;
539 INIT_WORK(&ioend->io_work, xfs_end_io);
540 ioend->io_append_trans = NULL;
541 ioend->io_bio = bio;
542 return ioend;
543 }
544
545 /*
546 * Allocate a new bio, and chain the old bio to the new one.
547 *
548 * Note that we have to do perform the chaining in this unintuitive order
549 * so that the bi_private linkage is set up in the right direction for the
550 * traversal in xfs_destroy_ioend().
551 */
552 static void
553 xfs_chain_bio(
554 struct xfs_ioend *ioend,
555 struct writeback_control *wbc,
556 struct buffer_head *bh)
557 {
558 struct bio *new;
559
560 new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
561 xfs_init_bio_from_bh(new, bh);
562
563 bio_chain(ioend->io_bio, new);
564 bio_get(ioend->io_bio); /* for xfs_destroy_ioend */
565 ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
566 submit_bio(ioend->io_bio);
567 ioend->io_bio = new;
568 }
569
570 /*
571 * Test to see if we've been building up a completion structure for
572 * earlier buffers -- if so, we try to append to this ioend if we
573 * can, otherwise we finish off any current ioend and start another.
574 * Return the ioend we finished off so that the caller can submit it
575 * once it has finished processing the dirty page.
576 */
577 STATIC void
578 xfs_add_to_ioend(
579 struct inode *inode,
580 struct buffer_head *bh,
581 xfs_off_t offset,
582 struct xfs_writepage_ctx *wpc,
583 struct writeback_control *wbc,
584 struct list_head *iolist)
585 {
586 if (!wpc->ioend || wpc->io_type != wpc->ioend->io_type ||
587 bh->b_blocknr != wpc->last_block + 1 ||
588 offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
589 if (wpc->ioend)
590 list_add(&wpc->ioend->io_list, iolist);
591 wpc->ioend = xfs_alloc_ioend(inode, wpc->io_type, offset, bh);
592 }
593
594 /*
595 * If the buffer doesn't fit into the bio we need to allocate a new
596 * one. This shouldn't happen more than once for a given buffer.
597 */
598 while (xfs_bio_add_buffer(wpc->ioend->io_bio, bh) != bh->b_size)
599 xfs_chain_bio(wpc->ioend, wbc, bh);
600
601 wpc->ioend->io_size += bh->b_size;
602 wpc->last_block = bh->b_blocknr;
603 xfs_start_buffer_writeback(bh);
604 }
605
606 STATIC void
607 xfs_map_buffer(
608 struct inode *inode,
609 struct buffer_head *bh,
610 struct xfs_bmbt_irec *imap,
611 xfs_off_t offset)
612 {
613 sector_t bn;
614 struct xfs_mount *m = XFS_I(inode)->i_mount;
615 xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
616 xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
617
618 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
619 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
620
621 bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
622 ((offset - iomap_offset) >> inode->i_blkbits);
623
624 ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
625
626 bh->b_blocknr = bn;
627 set_buffer_mapped(bh);
628 }
629
630 STATIC void
631 xfs_map_at_offset(
632 struct inode *inode,
633 struct buffer_head *bh,
634 struct xfs_bmbt_irec *imap,
635 xfs_off_t offset)
636 {
637 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
638 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
639
640 xfs_map_buffer(inode, bh, imap, offset);
641 set_buffer_mapped(bh);
642 clear_buffer_delay(bh);
643 clear_buffer_unwritten(bh);
644 }
645
646 /*
647 * Test if a given page contains at least one buffer of a given @type.
648 * If @check_all_buffers is true, then we walk all the buffers in the page to
649 * try to find one of the type passed in. If it is not set, then the caller only
650 * needs to check the first buffer on the page for a match.
651 */
652 STATIC bool
653 xfs_check_page_type(
654 struct page *page,
655 unsigned int type,
656 bool check_all_buffers)
657 {
658 struct buffer_head *bh;
659 struct buffer_head *head;
660
661 if (PageWriteback(page))
662 return false;
663 if (!page->mapping)
664 return false;
665 if (!page_has_buffers(page))
666 return false;
667
668 bh = head = page_buffers(page);
669 do {
670 if (buffer_unwritten(bh)) {
671 if (type == XFS_IO_UNWRITTEN)
672 return true;
673 } else if (buffer_delay(bh)) {
674 if (type == XFS_IO_DELALLOC)
675 return true;
676 } else if (buffer_dirty(bh) && buffer_mapped(bh)) {
677 if (type == XFS_IO_OVERWRITE)
678 return true;
679 }
680
681 /* If we are only checking the first buffer, we are done now. */
682 if (!check_all_buffers)
683 break;
684 } while ((bh = bh->b_this_page) != head);
685
686 return false;
687 }
688
689 STATIC void
690 xfs_vm_invalidatepage(
691 struct page *page,
692 unsigned int offset,
693 unsigned int length)
694 {
695 trace_xfs_invalidatepage(page->mapping->host, page, offset,
696 length);
697 block_invalidatepage(page, offset, length);
698 }
699
700 /*
701 * If the page has delalloc buffers on it, we need to punch them out before we
702 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
703 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
704 * is done on that same region - the delalloc extent is returned when none is
705 * supposed to be there.
706 *
707 * We prevent this by truncating away the delalloc regions on the page before
708 * invalidating it. Because they are delalloc, we can do this without needing a
709 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
710 * truncation without a transaction as there is no space left for block
711 * reservation (typically why we see a ENOSPC in writeback).
712 *
713 * This is not a performance critical path, so for now just do the punching a
714 * buffer head at a time.
715 */
716 STATIC void
717 xfs_aops_discard_page(
718 struct page *page)
719 {
720 struct inode *inode = page->mapping->host;
721 struct xfs_inode *ip = XFS_I(inode);
722 struct buffer_head *bh, *head;
723 loff_t offset = page_offset(page);
724
725 if (!xfs_check_page_type(page, XFS_IO_DELALLOC, true))
726 goto out_invalidate;
727
728 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
729 goto out_invalidate;
730
731 xfs_alert(ip->i_mount,
732 "page discard on page %p, inode 0x%llx, offset %llu.",
733 page, ip->i_ino, offset);
734
735 xfs_ilock(ip, XFS_ILOCK_EXCL);
736 bh = head = page_buffers(page);
737 do {
738 int error;
739 xfs_fileoff_t start_fsb;
740
741 if (!buffer_delay(bh))
742 goto next_buffer;
743
744 start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
745 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
746 if (error) {
747 /* something screwed, just bail */
748 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
749 xfs_alert(ip->i_mount,
750 "page discard unable to remove delalloc mapping.");
751 }
752 break;
753 }
754 next_buffer:
755 offset += 1 << inode->i_blkbits;
756
757 } while ((bh = bh->b_this_page) != head);
758
759 xfs_iunlock(ip, XFS_ILOCK_EXCL);
760 out_invalidate:
761 xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
762 return;
763 }
764
765 static int
766 xfs_map_cow(
767 struct xfs_writepage_ctx *wpc,
768 struct inode *inode,
769 loff_t offset,
770 unsigned int *new_type)
771 {
772 struct xfs_inode *ip = XFS_I(inode);
773 struct xfs_bmbt_irec imap;
774 bool is_cow = false;
775 int error;
776
777 /*
778 * If we already have a valid COW mapping keep using it.
779 */
780 if (wpc->io_type == XFS_IO_COW) {
781 wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap, offset);
782 if (wpc->imap_valid) {
783 *new_type = XFS_IO_COW;
784 return 0;
785 }
786 }
787
788 /*
789 * Else we need to check if there is a COW mapping at this offset.
790 */
791 xfs_ilock(ip, XFS_ILOCK_SHARED);
792 is_cow = xfs_reflink_find_cow_mapping(ip, offset, &imap);
793 xfs_iunlock(ip, XFS_ILOCK_SHARED);
794
795 if (!is_cow)
796 return 0;
797
798 /*
799 * And if the COW mapping has a delayed extent here we need to
800 * allocate real space for it now.
801 */
802 if (isnullstartblock(imap.br_startblock)) {
803 error = xfs_iomap_write_allocate(ip, XFS_COW_FORK, offset,
804 &imap);
805 if (error)
806 return error;
807 }
808
809 wpc->io_type = *new_type = XFS_IO_COW;
810 wpc->imap_valid = true;
811 wpc->imap = imap;
812 return 0;
813 }
814
815 /*
816 * We implement an immediate ioend submission policy here to avoid needing to
817 * chain multiple ioends and hence nest mempool allocations which can violate
818 * forward progress guarantees we need to provide. The current ioend we are
819 * adding buffers to is cached on the writepage context, and if the new buffer
820 * does not append to the cached ioend it will create a new ioend and cache that
821 * instead.
822 *
823 * If a new ioend is created and cached, the old ioend is returned and queued
824 * locally for submission once the entire page is processed or an error has been
825 * detected. While ioends are submitted immediately after they are completed,
826 * batching optimisations are provided by higher level block plugging.
827 *
828 * At the end of a writeback pass, there will be a cached ioend remaining on the
829 * writepage context that the caller will need to submit.
830 */
831 static int
832 xfs_writepage_map(
833 struct xfs_writepage_ctx *wpc,
834 struct writeback_control *wbc,
835 struct inode *inode,
836 struct page *page,
837 loff_t offset,
838 __uint64_t end_offset)
839 {
840 LIST_HEAD(submit_list);
841 struct xfs_ioend *ioend, *next;
842 struct buffer_head *bh, *head;
843 ssize_t len = 1 << inode->i_blkbits;
844 int error = 0;
845 int count = 0;
846 int uptodate = 1;
847 unsigned int new_type;
848
849 bh = head = page_buffers(page);
850 offset = page_offset(page);
851 do {
852 if (offset >= end_offset)
853 break;
854 if (!buffer_uptodate(bh))
855 uptodate = 0;
856
857 /*
858 * set_page_dirty dirties all buffers in a page, independent
859 * of their state. The dirty state however is entirely
860 * meaningless for holes (!mapped && uptodate), so skip
861 * buffers covering holes here.
862 */
863 if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
864 wpc->imap_valid = false;
865 continue;
866 }
867
868 if (buffer_unwritten(bh))
869 new_type = XFS_IO_UNWRITTEN;
870 else if (buffer_delay(bh))
871 new_type = XFS_IO_DELALLOC;
872 else if (buffer_uptodate(bh))
873 new_type = XFS_IO_OVERWRITE;
874 else {
875 if (PageUptodate(page))
876 ASSERT(buffer_mapped(bh));
877 /*
878 * This buffer is not uptodate and will not be
879 * written to disk. Ensure that we will put any
880 * subsequent writeable buffers into a new
881 * ioend.
882 */
883 wpc->imap_valid = false;
884 continue;
885 }
886
887 if (xfs_is_reflink_inode(XFS_I(inode))) {
888 error = xfs_map_cow(wpc, inode, offset, &new_type);
889 if (error)
890 goto out;
891 }
892
893 if (wpc->io_type != new_type) {
894 wpc->io_type = new_type;
895 wpc->imap_valid = false;
896 }
897
898 if (wpc->imap_valid)
899 wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
900 offset);
901 if (!wpc->imap_valid) {
902 error = xfs_map_blocks(inode, offset, &wpc->imap,
903 wpc->io_type);
904 if (error)
905 goto out;
906 wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
907 offset);
908 }
909 if (wpc->imap_valid) {
910 lock_buffer(bh);
911 if (wpc->io_type != XFS_IO_OVERWRITE)
912 xfs_map_at_offset(inode, bh, &wpc->imap, offset);
913 xfs_add_to_ioend(inode, bh, offset, wpc, wbc, &submit_list);
914 count++;
915 }
916
917 } while (offset += len, ((bh = bh->b_this_page) != head));
918
919 if (uptodate && bh == head)
920 SetPageUptodate(page);
921
922 ASSERT(wpc->ioend || list_empty(&submit_list));
923
924 out:
925 /*
926 * On error, we have to fail the ioend here because we have locked
927 * buffers in the ioend. If we don't do this, we'll deadlock
928 * invalidating the page as that tries to lock the buffers on the page.
929 * Also, because we may have set pages under writeback, we have to make
930 * sure we run IO completion to mark the error state of the IO
931 * appropriately, so we can't cancel the ioend directly here. That means
932 * we have to mark this page as under writeback if we included any
933 * buffers from it in the ioend chain so that completion treats it
934 * correctly.
935 *
936 * If we didn't include the page in the ioend, the on error we can
937 * simply discard and unlock it as there are no other users of the page
938 * or it's buffers right now. The caller will still need to trigger
939 * submission of outstanding ioends on the writepage context so they are
940 * treated correctly on error.
941 */
942 if (count) {
943 xfs_start_page_writeback(page, !error);
944
945 /*
946 * Preserve the original error if there was one, otherwise catch
947 * submission errors here and propagate into subsequent ioend
948 * submissions.
949 */
950 list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
951 int error2;
952
953 list_del_init(&ioend->io_list);
954 error2 = xfs_submit_ioend(wbc, ioend, error);
955 if (error2 && !error)
956 error = error2;
957 }
958 } else if (error) {
959 xfs_aops_discard_page(page);
960 ClearPageUptodate(page);
961 unlock_page(page);
962 } else {
963 /*
964 * We can end up here with no error and nothing to write if we
965 * race with a partial page truncate on a sub-page block sized
966 * filesystem. In that case we need to mark the page clean.
967 */
968 xfs_start_page_writeback(page, 1);
969 end_page_writeback(page);
970 }
971
972 mapping_set_error(page->mapping, error);
973 return error;
974 }
975
976 /*
977 * Write out a dirty page.
978 *
979 * For delalloc space on the page we need to allocate space and flush it.
980 * For unwritten space on the page we need to start the conversion to
981 * regular allocated space.
982 * For any other dirty buffer heads on the page we should flush them.
983 */
984 STATIC int
985 xfs_do_writepage(
986 struct page *page,
987 struct writeback_control *wbc,
988 void *data)
989 {
990 struct xfs_writepage_ctx *wpc = data;
991 struct inode *inode = page->mapping->host;
992 loff_t offset;
993 __uint64_t end_offset;
994 pgoff_t end_index;
995
996 trace_xfs_writepage(inode, page, 0, 0);
997
998 ASSERT(page_has_buffers(page));
999
1000 /*
1001 * Refuse to write the page out if we are called from reclaim context.
1002 *
1003 * This avoids stack overflows when called from deeply used stacks in
1004 * random callers for direct reclaim or memcg reclaim. We explicitly
1005 * allow reclaim from kswapd as the stack usage there is relatively low.
1006 *
1007 * This should never happen except in the case of a VM regression so
1008 * warn about it.
1009 */
1010 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
1011 PF_MEMALLOC))
1012 goto redirty;
1013
1014 /*
1015 * Given that we do not allow direct reclaim to call us, we should
1016 * never be called while in a filesystem transaction.
1017 */
1018 if (WARN_ON_ONCE(current->flags & PF_FSTRANS))
1019 goto redirty;
1020
1021 /*
1022 * Is this page beyond the end of the file?
1023 *
1024 * The page index is less than the end_index, adjust the end_offset
1025 * to the highest offset that this page should represent.
1026 * -----------------------------------------------------
1027 * | file mapping | <EOF> |
1028 * -----------------------------------------------------
1029 * | Page ... | Page N-2 | Page N-1 | Page N | |
1030 * ^--------------------------------^----------|--------
1031 * | desired writeback range | see else |
1032 * ---------------------------------^------------------|
1033 */
1034 offset = i_size_read(inode);
1035 end_index = offset >> PAGE_SHIFT;
1036 if (page->index < end_index)
1037 end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
1038 else {
1039 /*
1040 * Check whether the page to write out is beyond or straddles
1041 * i_size or not.
1042 * -------------------------------------------------------
1043 * | file mapping | <EOF> |
1044 * -------------------------------------------------------
1045 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
1046 * ^--------------------------------^-----------|---------
1047 * | | Straddles |
1048 * ---------------------------------^-----------|--------|
1049 */
1050 unsigned offset_into_page = offset & (PAGE_SIZE - 1);
1051
1052 /*
1053 * Skip the page if it is fully outside i_size, e.g. due to a
1054 * truncate operation that is in progress. We must redirty the
1055 * page so that reclaim stops reclaiming it. Otherwise
1056 * xfs_vm_releasepage() is called on it and gets confused.
1057 *
1058 * Note that the end_index is unsigned long, it would overflow
1059 * if the given offset is greater than 16TB on 32-bit system
1060 * and if we do check the page is fully outside i_size or not
1061 * via "if (page->index >= end_index + 1)" as "end_index + 1"
1062 * will be evaluated to 0. Hence this page will be redirtied
1063 * and be written out repeatedly which would result in an
1064 * infinite loop, the user program that perform this operation
1065 * will hang. Instead, we can verify this situation by checking
1066 * if the page to write is totally beyond the i_size or if it's
1067 * offset is just equal to the EOF.
1068 */
1069 if (page->index > end_index ||
1070 (page->index == end_index && offset_into_page == 0))
1071 goto redirty;
1072
1073 /*
1074 * The page straddles i_size. It must be zeroed out on each
1075 * and every writepage invocation because it may be mmapped.
1076 * "A file is mapped in multiples of the page size. For a file
1077 * that is not a multiple of the page size, the remaining
1078 * memory is zeroed when mapped, and writes to that region are
1079 * not written out to the file."
1080 */
1081 zero_user_segment(page, offset_into_page, PAGE_SIZE);
1082
1083 /* Adjust the end_offset to the end of file */
1084 end_offset = offset;
1085 }
1086
1087 return xfs_writepage_map(wpc, wbc, inode, page, offset, end_offset);
1088
1089 redirty:
1090 redirty_page_for_writepage(wbc, page);
1091 unlock_page(page);
1092 return 0;
1093 }
1094
1095 STATIC int
1096 xfs_vm_writepage(
1097 struct page *page,
1098 struct writeback_control *wbc)
1099 {
1100 struct xfs_writepage_ctx wpc = {
1101 .io_type = XFS_IO_INVALID,
1102 };
1103 int ret;
1104
1105 ret = xfs_do_writepage(page, wbc, &wpc);
1106 if (wpc.ioend)
1107 ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
1108 return ret;
1109 }
1110
1111 STATIC int
1112 xfs_vm_writepages(
1113 struct address_space *mapping,
1114 struct writeback_control *wbc)
1115 {
1116 struct xfs_writepage_ctx wpc = {
1117 .io_type = XFS_IO_INVALID,
1118 };
1119 int ret;
1120
1121 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1122 if (dax_mapping(mapping))
1123 return dax_writeback_mapping_range(mapping,
1124 xfs_find_bdev_for_inode(mapping->host), wbc);
1125
1126 ret = write_cache_pages(mapping, wbc, xfs_do_writepage, &wpc);
1127 if (wpc.ioend)
1128 ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
1129 return ret;
1130 }
1131
1132 /*
1133 * Called to move a page into cleanable state - and from there
1134 * to be released. The page should already be clean. We always
1135 * have buffer heads in this call.
1136 *
1137 * Returns 1 if the page is ok to release, 0 otherwise.
1138 */
1139 STATIC int
1140 xfs_vm_releasepage(
1141 struct page *page,
1142 gfp_t gfp_mask)
1143 {
1144 int delalloc, unwritten;
1145
1146 trace_xfs_releasepage(page->mapping->host, page, 0, 0);
1147
1148 /*
1149 * mm accommodates an old ext3 case where clean pages might not have had
1150 * the dirty bit cleared. Thus, it can send actual dirty pages to
1151 * ->releasepage() via shrink_active_list(). Conversely,
1152 * block_invalidatepage() can send pages that are still marked dirty
1153 * but otherwise have invalidated buffers.
1154 *
1155 * We want to release the latter to avoid unnecessary buildup of the
1156 * LRU, skip the former and warn if we've left any lingering
1157 * delalloc/unwritten buffers on clean pages. Skip pages with delalloc
1158 * or unwritten buffers and warn if the page is not dirty. Otherwise
1159 * try to release the buffers.
1160 */
1161 xfs_count_page_state(page, &delalloc, &unwritten);
1162
1163 if (delalloc) {
1164 WARN_ON_ONCE(!PageDirty(page));
1165 return 0;
1166 }
1167 if (unwritten) {
1168 WARN_ON_ONCE(!PageDirty(page));
1169 return 0;
1170 }
1171
1172 return try_to_free_buffers(page);
1173 }
1174
1175 /*
1176 * If this is O_DIRECT or the mpage code calling tell them how large the mapping
1177 * is, so that we can avoid repeated get_blocks calls.
1178 *
1179 * If the mapping spans EOF, then we have to break the mapping up as the mapping
1180 * for blocks beyond EOF must be marked new so that sub block regions can be
1181 * correctly zeroed. We can't do this for mappings within EOF unless the mapping
1182 * was just allocated or is unwritten, otherwise the callers would overwrite
1183 * existing data with zeros. Hence we have to split the mapping into a range up
1184 * to and including EOF, and a second mapping for beyond EOF.
1185 */
1186 static void
1187 xfs_map_trim_size(
1188 struct inode *inode,
1189 sector_t iblock,
1190 struct buffer_head *bh_result,
1191 struct xfs_bmbt_irec *imap,
1192 xfs_off_t offset,
1193 ssize_t size)
1194 {
1195 xfs_off_t mapping_size;
1196
1197 mapping_size = imap->br_startoff + imap->br_blockcount - iblock;
1198 mapping_size <<= inode->i_blkbits;
1199
1200 ASSERT(mapping_size > 0);
1201 if (mapping_size > size)
1202 mapping_size = size;
1203 if (offset < i_size_read(inode) &&
1204 offset + mapping_size >= i_size_read(inode)) {
1205 /* limit mapping to block that spans EOF */
1206 mapping_size = roundup_64(i_size_read(inode) - offset,
1207 1 << inode->i_blkbits);
1208 }
1209 if (mapping_size > LONG_MAX)
1210 mapping_size = LONG_MAX;
1211
1212 bh_result->b_size = mapping_size;
1213 }
1214
1215 static int
1216 xfs_get_blocks(
1217 struct inode *inode,
1218 sector_t iblock,
1219 struct buffer_head *bh_result,
1220 int create)
1221 {
1222 struct xfs_inode *ip = XFS_I(inode);
1223 struct xfs_mount *mp = ip->i_mount;
1224 xfs_fileoff_t offset_fsb, end_fsb;
1225 int error = 0;
1226 int lockmode = 0;
1227 struct xfs_bmbt_irec imap;
1228 int nimaps = 1;
1229 xfs_off_t offset;
1230 ssize_t size;
1231
1232 BUG_ON(create);
1233
1234 if (XFS_FORCED_SHUTDOWN(mp))
1235 return -EIO;
1236
1237 offset = (xfs_off_t)iblock << inode->i_blkbits;
1238 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1239 size = bh_result->b_size;
1240
1241 if (offset >= i_size_read(inode))
1242 return 0;
1243
1244 /*
1245 * Direct I/O is usually done on preallocated files, so try getting
1246 * a block mapping without an exclusive lock first.
1247 */
1248 lockmode = xfs_ilock_data_map_shared(ip);
1249
1250 ASSERT(offset <= mp->m_super->s_maxbytes);
1251 if (offset + size > mp->m_super->s_maxbytes)
1252 size = mp->m_super->s_maxbytes - offset;
1253 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1254 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1255
1256 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
1257 &imap, &nimaps, XFS_BMAPI_ENTIRE);
1258 if (error)
1259 goto out_unlock;
1260
1261 if (nimaps) {
1262 trace_xfs_get_blocks_found(ip, offset, size,
1263 ISUNWRITTEN(&imap) ? XFS_IO_UNWRITTEN
1264 : XFS_IO_OVERWRITE, &imap);
1265 xfs_iunlock(ip, lockmode);
1266 } else {
1267 trace_xfs_get_blocks_notfound(ip, offset, size);
1268 goto out_unlock;
1269 }
1270
1271 /* trim mapping down to size requested */
1272 xfs_map_trim_size(inode, iblock, bh_result, &imap, offset, size);
1273
1274 /*
1275 * For unwritten extents do not report a disk address in the buffered
1276 * read case (treat as if we're reading into a hole).
1277 */
1278 if (imap.br_startblock != HOLESTARTBLOCK &&
1279 imap.br_startblock != DELAYSTARTBLOCK &&
1280 !ISUNWRITTEN(&imap))
1281 xfs_map_buffer(inode, bh_result, &imap, offset);
1282
1283 /*
1284 * If this is a realtime file, data may be on a different device.
1285 * to that pointed to from the buffer_head b_bdev currently.
1286 */
1287 bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
1288 return 0;
1289
1290 out_unlock:
1291 xfs_iunlock(ip, lockmode);
1292 return error;
1293 }
1294
1295 STATIC ssize_t
1296 xfs_vm_direct_IO(
1297 struct kiocb *iocb,
1298 struct iov_iter *iter)
1299 {
1300 /*
1301 * We just need the method present so that open/fcntl allow direct I/O.
1302 */
1303 return -EINVAL;
1304 }
1305
1306 STATIC sector_t
1307 xfs_vm_bmap(
1308 struct address_space *mapping,
1309 sector_t block)
1310 {
1311 struct inode *inode = (struct inode *)mapping->host;
1312 struct xfs_inode *ip = XFS_I(inode);
1313
1314 trace_xfs_vm_bmap(XFS_I(inode));
1315
1316 /*
1317 * The swap code (ab-)uses ->bmap to get a block mapping and then
1318 * bypasseѕ the file system for actual I/O. We really can't allow
1319 * that on reflinks inodes, so we have to skip out here. And yes,
1320 * 0 is the magic code for a bmap error..
1321 */
1322 if (xfs_is_reflink_inode(ip))
1323 return 0;
1324
1325 filemap_write_and_wait(mapping);
1326 return generic_block_bmap(mapping, block, xfs_get_blocks);
1327 }
1328
1329 STATIC int
1330 xfs_vm_readpage(
1331 struct file *unused,
1332 struct page *page)
1333 {
1334 trace_xfs_vm_readpage(page->mapping->host, 1);
1335 return mpage_readpage(page, xfs_get_blocks);
1336 }
1337
1338 STATIC int
1339 xfs_vm_readpages(
1340 struct file *unused,
1341 struct address_space *mapping,
1342 struct list_head *pages,
1343 unsigned nr_pages)
1344 {
1345 trace_xfs_vm_readpages(mapping->host, nr_pages);
1346 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1347 }
1348
1349 /*
1350 * This is basically a copy of __set_page_dirty_buffers() with one
1351 * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
1352 * dirty, we'll never be able to clean them because we don't write buffers
1353 * beyond EOF, and that means we can't invalidate pages that span EOF
1354 * that have been marked dirty. Further, the dirty state can leak into
1355 * the file interior if the file is extended, resulting in all sorts of
1356 * bad things happening as the state does not match the underlying data.
1357 *
1358 * XXX: this really indicates that bufferheads in XFS need to die. Warts like
1359 * this only exist because of bufferheads and how the generic code manages them.
1360 */
1361 STATIC int
1362 xfs_vm_set_page_dirty(
1363 struct page *page)
1364 {
1365 struct address_space *mapping = page->mapping;
1366 struct inode *inode = mapping->host;
1367 loff_t end_offset;
1368 loff_t offset;
1369 int newly_dirty;
1370
1371 if (unlikely(!mapping))
1372 return !TestSetPageDirty(page);
1373
1374 end_offset = i_size_read(inode);
1375 offset = page_offset(page);
1376
1377 spin_lock(&mapping->private_lock);
1378 if (page_has_buffers(page)) {
1379 struct buffer_head *head = page_buffers(page);
1380 struct buffer_head *bh = head;
1381
1382 do {
1383 if (offset < end_offset)
1384 set_buffer_dirty(bh);
1385 bh = bh->b_this_page;
1386 offset += 1 << inode->i_blkbits;
1387 } while (bh != head);
1388 }
1389 /*
1390 * Lock out page->mem_cgroup migration to keep PageDirty
1391 * synchronized with per-memcg dirty page counters.
1392 */
1393 lock_page_memcg(page);
1394 newly_dirty = !TestSetPageDirty(page);
1395 spin_unlock(&mapping->private_lock);
1396
1397 if (newly_dirty) {
1398 /* sigh - __set_page_dirty() is static, so copy it here, too */
1399 unsigned long flags;
1400
1401 spin_lock_irqsave(&mapping->tree_lock, flags);
1402 if (page->mapping) { /* Race with truncate? */
1403 WARN_ON_ONCE(!PageUptodate(page));
1404 account_page_dirtied(page, mapping);
1405 radix_tree_tag_set(&mapping->page_tree,
1406 page_index(page), PAGECACHE_TAG_DIRTY);
1407 }
1408 spin_unlock_irqrestore(&mapping->tree_lock, flags);
1409 }
1410 unlock_page_memcg(page);
1411 if (newly_dirty)
1412 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1413 return newly_dirty;
1414 }
1415
1416 const struct address_space_operations xfs_address_space_operations = {
1417 .readpage = xfs_vm_readpage,
1418 .readpages = xfs_vm_readpages,
1419 .writepage = xfs_vm_writepage,
1420 .writepages = xfs_vm_writepages,
1421 .set_page_dirty = xfs_vm_set_page_dirty,
1422 .releasepage = xfs_vm_releasepage,
1423 .invalidatepage = xfs_vm_invalidatepage,
1424 .bmap = xfs_vm_bmap,
1425 .direct_IO = xfs_vm_direct_IO,
1426 .migratepage = buffer_migrate_page,
1427 .is_partially_uptodate = block_is_partially_uptodate,
1428 .error_remove_page = generic_error_remove_page,
1429 };