Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 LT |
2 | /* |
3 | * fs/mpage.c | |
4 | * | |
5 | * Copyright (C) 2002, Linus Torvalds. | |
6 | * | |
7 | * Contains functions related to preparing and submitting BIOs which contain | |
8 | * multiple pagecache pages. | |
9 | * | |
e1f8e874 | 10 | * 15May2002 Andrew Morton |
1da177e4 LT |
11 | * Initial version |
12 | * 27Jun2002 axboe@suse.de | |
13 | * use bio_add_page() to build bio's just the right size | |
14 | */ | |
15 | ||
16 | #include <linux/kernel.h> | |
630d9c47 | 17 | #include <linux/export.h> |
1da177e4 LT |
18 | #include <linux/mm.h> |
19 | #include <linux/kdev_t.h> | |
5a0e3ad6 | 20 | #include <linux/gfp.h> |
1da177e4 LT |
21 | #include <linux/bio.h> |
22 | #include <linux/fs.h> | |
23 | #include <linux/buffer_head.h> | |
24 | #include <linux/blkdev.h> | |
25 | #include <linux/highmem.h> | |
26 | #include <linux/prefetch.h> | |
27 | #include <linux/mpage.h> | |
02c43638 | 28 | #include <linux/mm_inline.h> |
1da177e4 LT |
29 | #include <linux/writeback.h> |
30 | #include <linux/backing-dev.h> | |
31 | #include <linux/pagevec.h> | |
c515e1fd | 32 | #include <linux/cleancache.h> |
4db96b71 | 33 | #include "internal.h" |
1da177e4 | 34 | |
45560fee MS |
35 | #define CREATE_TRACE_POINTS |
36 | #include <trace/events/android_fs.h> | |
37 | ||
38 | EXPORT_TRACEPOINT_SYMBOL(android_fs_datawrite_start); | |
39 | EXPORT_TRACEPOINT_SYMBOL(android_fs_datawrite_end); | |
40 | EXPORT_TRACEPOINT_SYMBOL(android_fs_dataread_start); | |
41 | EXPORT_TRACEPOINT_SYMBOL(android_fs_dataread_end); | |
42 | ||
1da177e4 LT |
43 | /* |
44 | * I/O completion handler for multipage BIOs. | |
45 | * | |
46 | * The mpage code never puts partial pages into a BIO (except for end-of-file). | |
47 | * If a page does not map to a contiguous run of blocks then it simply falls | |
48 | * back to block_read_full_page(). | |
49 | * | |
50 | * Why is this? If a page's completion depends on a number of different BIOs | |
51 | * which can complete in any order (or at the same time) then determining the | |
52 | * status of that page is hard. See end_buffer_async_read() for the details. | |
53 | * There is no point in duplicating all that complexity. | |
54 | */ | |
4246a0b6 | 55 | static void mpage_end_io(struct bio *bio) |
1da177e4 | 56 | { |
2c30c71b KO |
57 | struct bio_vec *bv; |
58 | int i; | |
1da177e4 | 59 | |
45560fee MS |
60 | if (trace_android_fs_dataread_end_enabled() && |
61 | (bio_data_dir(bio) == READ)) { | |
62 | struct page *first_page = bio->bi_io_vec[0].bv_page; | |
63 | ||
64 | if (first_page != NULL) | |
65 | trace_android_fs_dataread_end(first_page->mapping->host, | |
66 | page_offset(first_page), | |
67 | bio->bi_iter.bi_size); | |
68 | } | |
69 | ||
2c30c71b KO |
70 | bio_for_each_segment_all(bv, bio, i) { |
71 | struct page *page = bv->bv_page; | |
4e4cbee9 CH |
72 | page_endio(page, op_is_write(bio_op(bio)), |
73 | blk_status_to_errno(bio->bi_status)); | |
2c30c71b KO |
74 | } |
75 | ||
1da177e4 | 76 | bio_put(bio); |
1da177e4 LT |
77 | } |
78 | ||
eed25cd5 | 79 | static struct bio *mpage_bio_submit(int op, int op_flags, struct bio *bio) |
1da177e4 | 80 | { |
45560fee MS |
81 | if (trace_android_fs_dataread_start_enabled() && (op == REQ_OP_READ)) { |
82 | struct page *first_page = bio->bi_io_vec[0].bv_page; | |
83 | ||
84 | if (first_page != NULL) { | |
a59d8732 MS |
85 | char *path, pathbuf[MAX_TRACE_PATHBUF_LEN]; |
86 | ||
87 | path = android_fstrace_get_pathname(pathbuf, | |
88 | MAX_TRACE_PATHBUF_LEN, | |
89 | first_page->mapping->host); | |
45560fee MS |
90 | trace_android_fs_dataread_start( |
91 | first_page->mapping->host, | |
92 | page_offset(first_page), | |
93 | bio->bi_iter.bi_size, | |
94 | current->pid, | |
a59d8732 | 95 | path, |
45560fee MS |
96 | current->comm); |
97 | } | |
98 | } | |
c32b0d4b | 99 | bio->bi_end_io = mpage_end_io; |
eed25cd5 MC |
100 | bio_set_op_attrs(bio, op, op_flags); |
101 | guard_bio_eod(op, bio); | |
4e49ea4a | 102 | submit_bio(bio); |
1da177e4 LT |
103 | return NULL; |
104 | } | |
105 | ||
106 | static struct bio * | |
107 | mpage_alloc(struct block_device *bdev, | |
108 | sector_t first_sector, int nr_vecs, | |
dd0fc66f | 109 | gfp_t gfp_flags) |
1da177e4 LT |
110 | { |
111 | struct bio *bio; | |
112 | ||
8a5c743e MH |
113 | /* Restrict the given (page cache) mask for slab allocations */ |
114 | gfp_flags &= GFP_KERNEL; | |
1da177e4 LT |
115 | bio = bio_alloc(gfp_flags, nr_vecs); |
116 | ||
117 | if (bio == NULL && (current->flags & PF_MEMALLOC)) { | |
118 | while (!bio && (nr_vecs /= 2)) | |
119 | bio = bio_alloc(gfp_flags, nr_vecs); | |
120 | } | |
121 | ||
122 | if (bio) { | |
74d46992 | 123 | bio_set_dev(bio, bdev); |
4f024f37 | 124 | bio->bi_iter.bi_sector = first_sector; |
1da177e4 LT |
125 | } |
126 | return bio; | |
127 | } | |
128 | ||
129 | /* | |
130 | * support function for mpage_readpages. The fs supplied get_block might | |
131 | * return an up to date buffer. This is used to map that buffer into | |
132 | * the page, which allows readpage to avoid triggering a duplicate call | |
133 | * to get_block. | |
134 | * | |
135 | * The idea is to avoid adding buffers to pages that don't already have | |
136 | * them. So when the buffer is up to date and the page size == block size, | |
137 | * this marks the page up to date instead of adding new buffers. | |
138 | */ | |
139 | static void | |
140 | map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) | |
141 | { | |
142 | struct inode *inode = page->mapping->host; | |
143 | struct buffer_head *page_bh, *head; | |
144 | int block = 0; | |
145 | ||
146 | if (!page_has_buffers(page)) { | |
147 | /* | |
148 | * don't make any buffers if there is only one buffer on | |
149 | * the page and the page just needs to be set up to date | |
150 | */ | |
09cbfeaf | 151 | if (inode->i_blkbits == PAGE_SHIFT && |
1da177e4 LT |
152 | buffer_uptodate(bh)) { |
153 | SetPageUptodate(page); | |
154 | return; | |
155 | } | |
93407472 | 156 | create_empty_buffers(page, i_blocksize(inode), 0); |
1da177e4 LT |
157 | } |
158 | head = page_buffers(page); | |
159 | page_bh = head; | |
160 | do { | |
161 | if (block == page_block) { | |
162 | page_bh->b_state = bh->b_state; | |
163 | page_bh->b_bdev = bh->b_bdev; | |
164 | page_bh->b_blocknr = bh->b_blocknr; | |
165 | break; | |
166 | } | |
167 | page_bh = page_bh->b_this_page; | |
168 | block++; | |
169 | } while (page_bh != head); | |
170 | } | |
171 | ||
fa30bd05 BP |
172 | /* |
173 | * This is the worker routine which does all the work of mapping the disk | |
174 | * blocks and constructs largest possible bios, submits them for IO if the | |
175 | * blocks are not contiguous on the disk. | |
176 | * | |
177 | * We pass a buffer_head back and forth and use its buffer_mapped() flag to | |
178 | * represent the validity of its disk mapping and to decide when to do the next | |
179 | * get_block() call. | |
180 | */ | |
1da177e4 LT |
181 | static struct bio * |
182 | do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, | |
fa30bd05 | 183 | sector_t *last_block_in_bio, struct buffer_head *map_bh, |
063d99b4 MH |
184 | unsigned long *first_logical_block, get_block_t get_block, |
185 | gfp_t gfp) | |
1da177e4 LT |
186 | { |
187 | struct inode *inode = page->mapping->host; | |
188 | const unsigned blkbits = inode->i_blkbits; | |
09cbfeaf | 189 | const unsigned blocks_per_page = PAGE_SIZE >> blkbits; |
1da177e4 LT |
190 | const unsigned blocksize = 1 << blkbits; |
191 | sector_t block_in_file; | |
192 | sector_t last_block; | |
fa30bd05 | 193 | sector_t last_block_in_file; |
1da177e4 LT |
194 | sector_t blocks[MAX_BUF_PER_PAGE]; |
195 | unsigned page_block; | |
196 | unsigned first_hole = blocks_per_page; | |
197 | struct block_device *bdev = NULL; | |
1da177e4 LT |
198 | int length; |
199 | int fully_mapped = 1; | |
fa30bd05 BP |
200 | unsigned nblocks; |
201 | unsigned relative_block; | |
1da177e4 LT |
202 | |
203 | if (page_has_buffers(page)) | |
204 | goto confused; | |
205 | ||
09cbfeaf | 206 | block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); |
fa30bd05 BP |
207 | last_block = block_in_file + nr_pages * blocks_per_page; |
208 | last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; | |
209 | if (last_block > last_block_in_file) | |
210 | last_block = last_block_in_file; | |
211 | page_block = 0; | |
212 | ||
213 | /* | |
214 | * Map blocks using the result from the previous get_blocks call first. | |
215 | */ | |
216 | nblocks = map_bh->b_size >> blkbits; | |
217 | if (buffer_mapped(map_bh) && block_in_file > *first_logical_block && | |
218 | block_in_file < (*first_logical_block + nblocks)) { | |
219 | unsigned map_offset = block_in_file - *first_logical_block; | |
220 | unsigned last = nblocks - map_offset; | |
221 | ||
222 | for (relative_block = 0; ; relative_block++) { | |
223 | if (relative_block == last) { | |
224 | clear_buffer_mapped(map_bh); | |
225 | break; | |
226 | } | |
227 | if (page_block == blocks_per_page) | |
228 | break; | |
229 | blocks[page_block] = map_bh->b_blocknr + map_offset + | |
230 | relative_block; | |
231 | page_block++; | |
232 | block_in_file++; | |
233 | } | |
234 | bdev = map_bh->b_bdev; | |
235 | } | |
236 | ||
237 | /* | |
238 | * Then do more get_blocks calls until we are done with this page. | |
239 | */ | |
240 | map_bh->b_page = page; | |
241 | while (page_block < blocks_per_page) { | |
242 | map_bh->b_state = 0; | |
243 | map_bh->b_size = 0; | |
1da177e4 | 244 | |
1da177e4 | 245 | if (block_in_file < last_block) { |
fa30bd05 BP |
246 | map_bh->b_size = (last_block-block_in_file) << blkbits; |
247 | if (get_block(inode, block_in_file, map_bh, 0)) | |
1da177e4 | 248 | goto confused; |
fa30bd05 | 249 | *first_logical_block = block_in_file; |
1da177e4 LT |
250 | } |
251 | ||
fa30bd05 | 252 | if (!buffer_mapped(map_bh)) { |
1da177e4 LT |
253 | fully_mapped = 0; |
254 | if (first_hole == blocks_per_page) | |
255 | first_hole = page_block; | |
fa30bd05 BP |
256 | page_block++; |
257 | block_in_file++; | |
1da177e4 LT |
258 | continue; |
259 | } | |
260 | ||
261 | /* some filesystems will copy data into the page during | |
262 | * the get_block call, in which case we don't want to | |
263 | * read it again. map_buffer_to_page copies the data | |
264 | * we just collected from get_block into the page's buffers | |
265 | * so readpage doesn't have to repeat the get_block call | |
266 | */ | |
fa30bd05 BP |
267 | if (buffer_uptodate(map_bh)) { |
268 | map_buffer_to_page(page, map_bh, page_block); | |
1da177e4 LT |
269 | goto confused; |
270 | } | |
271 | ||
272 | if (first_hole != blocks_per_page) | |
273 | goto confused; /* hole -> non-hole */ | |
274 | ||
275 | /* Contiguous blocks? */ | |
fa30bd05 | 276 | if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1) |
1da177e4 | 277 | goto confused; |
fa30bd05 BP |
278 | nblocks = map_bh->b_size >> blkbits; |
279 | for (relative_block = 0; ; relative_block++) { | |
280 | if (relative_block == nblocks) { | |
281 | clear_buffer_mapped(map_bh); | |
282 | break; | |
283 | } else if (page_block == blocks_per_page) | |
284 | break; | |
285 | blocks[page_block] = map_bh->b_blocknr+relative_block; | |
286 | page_block++; | |
287 | block_in_file++; | |
288 | } | |
289 | bdev = map_bh->b_bdev; | |
1da177e4 LT |
290 | } |
291 | ||
292 | if (first_hole != blocks_per_page) { | |
09cbfeaf | 293 | zero_user_segment(page, first_hole << blkbits, PAGE_SIZE); |
1da177e4 LT |
294 | if (first_hole == 0) { |
295 | SetPageUptodate(page); | |
296 | unlock_page(page); | |
297 | goto out; | |
298 | } | |
299 | } else if (fully_mapped) { | |
300 | SetPageMappedToDisk(page); | |
301 | } | |
302 | ||
c515e1fd DM |
303 | if (fully_mapped && blocks_per_page == 1 && !PageUptodate(page) && |
304 | cleancache_get_page(page) == 0) { | |
305 | SetPageUptodate(page); | |
306 | goto confused; | |
307 | } | |
308 | ||
1da177e4 LT |
309 | /* |
310 | * This page will go to BIO. Do we need to send this BIO off first? | |
311 | */ | |
312 | if (bio && (*last_block_in_bio != blocks[0] - 1)) | |
eed25cd5 | 313 | bio = mpage_bio_submit(REQ_OP_READ, 0, bio); |
1da177e4 LT |
314 | |
315 | alloc_new: | |
316 | if (bio == NULL) { | |
47a191fd MW |
317 | if (first_hole == blocks_per_page) { |
318 | if (!bdev_read_page(bdev, blocks[0] << (blkbits - 9), | |
319 | page)) | |
320 | goto out; | |
321 | } | |
1da177e4 | 322 | bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), |
063d99b4 | 323 | min_t(int, nr_pages, BIO_MAX_PAGES), gfp); |
1da177e4 LT |
324 | if (bio == NULL) |
325 | goto confused; | |
326 | } | |
327 | ||
328 | length = first_hole << blkbits; | |
329 | if (bio_add_page(bio, page, length, 0) < length) { | |
eed25cd5 | 330 | bio = mpage_bio_submit(REQ_OP_READ, 0, bio); |
1da177e4 LT |
331 | goto alloc_new; |
332 | } | |
333 | ||
38c8e618 MS |
334 | relative_block = block_in_file - *first_logical_block; |
335 | nblocks = map_bh->b_size >> blkbits; | |
336 | if ((buffer_boundary(map_bh) && relative_block == nblocks) || | |
337 | (first_hole != blocks_per_page)) | |
eed25cd5 | 338 | bio = mpage_bio_submit(REQ_OP_READ, 0, bio); |
1da177e4 LT |
339 | else |
340 | *last_block_in_bio = blocks[blocks_per_page - 1]; | |
341 | out: | |
342 | return bio; | |
343 | ||
344 | confused: | |
345 | if (bio) | |
eed25cd5 | 346 | bio = mpage_bio_submit(REQ_OP_READ, 0, bio); |
1da177e4 LT |
347 | if (!PageUptodate(page)) |
348 | block_read_full_page(page, get_block); | |
349 | else | |
350 | unlock_page(page); | |
351 | goto out; | |
352 | } | |
353 | ||
67be2dd1 | 354 | /** |
78a4a50a | 355 | * mpage_readpages - populate an address space with some pages & start reads against them |
67be2dd1 MW |
356 | * @mapping: the address_space |
357 | * @pages: The address of a list_head which contains the target pages. These | |
358 | * pages have their ->index populated and are otherwise uninitialised. | |
67be2dd1 MW |
359 | * The page at @pages->prev has the lowest file offset, and reads should be |
360 | * issued in @pages->prev to @pages->next order. | |
67be2dd1 MW |
361 | * @nr_pages: The number of pages at *@pages |
362 | * @get_block: The filesystem's block mapper function. | |
363 | * | |
364 | * This function walks the pages and the blocks within each page, building and | |
365 | * emitting large BIOs. | |
366 | * | |
367 | * If anything unusual happens, such as: | |
368 | * | |
369 | * - encountering a page which has buffers | |
370 | * - encountering a page which has a non-hole after a hole | |
371 | * - encountering a page with non-contiguous blocks | |
372 | * | |
373 | * then this code just gives up and calls the buffer_head-based read function. | |
374 | * It does handle a page which has holes at the end - that is a common case: | |
ea1754a0 | 375 | * the end-of-file on blocksize < PAGE_SIZE setups. |
67be2dd1 MW |
376 | * |
377 | * BH_Boundary explanation: | |
378 | * | |
379 | * There is a problem. The mpage read code assembles several pages, gets all | |
380 | * their disk mappings, and then submits them all. That's fine, but obtaining | |
381 | * the disk mappings may require I/O. Reads of indirect blocks, for example. | |
382 | * | |
383 | * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be | |
384 | * submitted in the following order: | |
0117d427 | 385 | * |
67be2dd1 | 386 | * 12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16 |
78a4a50a | 387 | * |
67be2dd1 MW |
388 | * because the indirect block has to be read to get the mappings of blocks |
389 | * 13,14,15,16. Obviously, this impacts performance. | |
390 | * | |
391 | * So what we do it to allow the filesystem's get_block() function to set | |
392 | * BH_Boundary when it maps block 11. BH_Boundary says: mapping of the block | |
393 | * after this one will require I/O against a block which is probably close to | |
394 | * this one. So you should push what I/O you have currently accumulated. | |
395 | * | |
396 | * This all causes the disk requests to be issued in the correct order. | |
397 | */ | |
1da177e4 LT |
398 | int |
399 | mpage_readpages(struct address_space *mapping, struct list_head *pages, | |
400 | unsigned nr_pages, get_block_t get_block) | |
401 | { | |
402 | struct bio *bio = NULL; | |
403 | unsigned page_idx; | |
404 | sector_t last_block_in_bio = 0; | |
fa30bd05 BP |
405 | struct buffer_head map_bh; |
406 | unsigned long first_logical_block = 0; | |
8a5c743e | 407 | gfp_t gfp = readahead_gfp_mask(mapping); |
1da177e4 | 408 | |
79ffab34 AK |
409 | map_bh.b_state = 0; |
410 | map_bh.b_size = 0; | |
1da177e4 | 411 | for (page_idx = 0; page_idx < nr_pages; page_idx++) { |
02c43638 | 412 | struct page *page = lru_to_page(pages); |
1da177e4 LT |
413 | |
414 | prefetchw(&page->flags); | |
415 | list_del(&page->lru); | |
eb2be189 | 416 | if (!add_to_page_cache_lru(page, mapping, |
063d99b4 MH |
417 | page->index, |
418 | gfp)) { | |
1da177e4 LT |
419 | bio = do_mpage_readpage(bio, page, |
420 | nr_pages - page_idx, | |
fa30bd05 BP |
421 | &last_block_in_bio, &map_bh, |
422 | &first_logical_block, | |
063d99b4 | 423 | get_block, gfp); |
1da177e4 | 424 | } |
09cbfeaf | 425 | put_page(page); |
1da177e4 | 426 | } |
1da177e4 LT |
427 | BUG_ON(!list_empty(pages)); |
428 | if (bio) | |
eed25cd5 | 429 | mpage_bio_submit(REQ_OP_READ, 0, bio); |
1da177e4 LT |
430 | return 0; |
431 | } | |
432 | EXPORT_SYMBOL(mpage_readpages); | |
433 | ||
434 | /* | |
435 | * This isn't called much at all | |
436 | */ | |
437 | int mpage_readpage(struct page *page, get_block_t get_block) | |
438 | { | |
439 | struct bio *bio = NULL; | |
440 | sector_t last_block_in_bio = 0; | |
fa30bd05 BP |
441 | struct buffer_head map_bh; |
442 | unsigned long first_logical_block = 0; | |
c62d2555 | 443 | gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL); |
1da177e4 | 444 | |
79ffab34 AK |
445 | map_bh.b_state = 0; |
446 | map_bh.b_size = 0; | |
fa30bd05 | 447 | bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio, |
063d99b4 | 448 | &map_bh, &first_logical_block, get_block, gfp); |
1da177e4 | 449 | if (bio) |
eed25cd5 | 450 | mpage_bio_submit(REQ_OP_READ, 0, bio); |
1da177e4 LT |
451 | return 0; |
452 | } | |
453 | EXPORT_SYMBOL(mpage_readpage); | |
454 | ||
455 | /* | |
456 | * Writing is not so simple. | |
457 | * | |
458 | * If the page has buffers then they will be used for obtaining the disk | |
459 | * mapping. We only support pages which are fully mapped-and-dirty, with a | |
460 | * special case for pages which are unmapped at the end: end-of-file. | |
461 | * | |
462 | * If the page has no buffers (preferred) then the page is mapped here. | |
463 | * | |
464 | * If all blocks are found to be contiguous then the page can go into the | |
465 | * BIO. Otherwise fall back to the mapping's writepage(). | |
466 | * | |
467 | * FIXME: This code wants an estimate of how many pages are still to be | |
468 | * written, so it can intelligently allocate a suitably-sized BIO. For now, | |
469 | * just allocate full-size (16-page) BIOs. | |
470 | */ | |
0ea97180 | 471 | |
ced117c7 DV |
472 | struct mpage_data { |
473 | struct bio *bio; | |
474 | sector_t last_block_in_bio; | |
475 | get_block_t *get_block; | |
476 | unsigned use_writepage; | |
477 | }; | |
478 | ||
90768eee MW |
479 | /* |
480 | * We have our BIO, so we can now mark the buffers clean. Make | |
481 | * sure to only clean buffers which we know we'll be writing. | |
482 | */ | |
483 | static void clean_buffers(struct page *page, unsigned first_unmapped) | |
484 | { | |
485 | unsigned buffer_counter = 0; | |
486 | struct buffer_head *bh, *head; | |
487 | if (!page_has_buffers(page)) | |
488 | return; | |
489 | head = page_buffers(page); | |
490 | bh = head; | |
491 | ||
492 | do { | |
493 | if (buffer_counter++ == first_unmapped) | |
494 | break; | |
495 | clear_buffer_dirty(bh); | |
496 | bh = bh->b_this_page; | |
497 | } while (bh != head); | |
498 | ||
499 | /* | |
500 | * we cannot drop the bh if the page is not uptodate or a concurrent | |
501 | * readpage would fail to serialize with the bh and it would read from | |
502 | * disk before we reach the platter. | |
503 | */ | |
504 | if (buffer_heads_over_limit && PageUptodate(page)) | |
505 | try_to_free_buffers(page); | |
506 | } | |
507 | ||
f892760a MW |
508 | /* |
509 | * For situations where we want to clean all buffers attached to a page. | |
510 | * We don't need to calculate how many buffers are attached to the page, | |
511 | * we just need to specify a number larger than the maximum number of buffers. | |
512 | */ | |
513 | void clean_page_buffers(struct page *page) | |
514 | { | |
515 | clean_buffers(page, ~0U); | |
516 | } | |
517 | ||
ced117c7 | 518 | static int __mpage_writepage(struct page *page, struct writeback_control *wbc, |
29a814d2 | 519 | void *data) |
1da177e4 | 520 | { |
0ea97180 MS |
521 | struct mpage_data *mpd = data; |
522 | struct bio *bio = mpd->bio; | |
1da177e4 LT |
523 | struct address_space *mapping = page->mapping; |
524 | struct inode *inode = page->mapping->host; | |
525 | const unsigned blkbits = inode->i_blkbits; | |
526 | unsigned long end_index; | |
09cbfeaf | 527 | const unsigned blocks_per_page = PAGE_SIZE >> blkbits; |
1da177e4 LT |
528 | sector_t last_block; |
529 | sector_t block_in_file; | |
530 | sector_t blocks[MAX_BUF_PER_PAGE]; | |
531 | unsigned page_block; | |
532 | unsigned first_unmapped = blocks_per_page; | |
533 | struct block_device *bdev = NULL; | |
534 | int boundary = 0; | |
535 | sector_t boundary_block = 0; | |
536 | struct block_device *boundary_bdev = NULL; | |
537 | int length; | |
538 | struct buffer_head map_bh; | |
539 | loff_t i_size = i_size_read(inode); | |
0ea97180 | 540 | int ret = 0; |
7637241e | 541 | int op_flags = wbc_to_write_flags(wbc); |
1da177e4 LT |
542 | |
543 | if (page_has_buffers(page)) { | |
544 | struct buffer_head *head = page_buffers(page); | |
545 | struct buffer_head *bh = head; | |
546 | ||
547 | /* If they're all mapped and dirty, do it */ | |
548 | page_block = 0; | |
549 | do { | |
550 | BUG_ON(buffer_locked(bh)); | |
551 | if (!buffer_mapped(bh)) { | |
552 | /* | |
553 | * unmapped dirty buffers are created by | |
554 | * __set_page_dirty_buffers -> mmapped data | |
555 | */ | |
556 | if (buffer_dirty(bh)) | |
557 | goto confused; | |
558 | if (first_unmapped == blocks_per_page) | |
559 | first_unmapped = page_block; | |
560 | continue; | |
561 | } | |
562 | ||
563 | if (first_unmapped != blocks_per_page) | |
564 | goto confused; /* hole -> non-hole */ | |
565 | ||
566 | if (!buffer_dirty(bh) || !buffer_uptodate(bh)) | |
567 | goto confused; | |
568 | if (page_block) { | |
569 | if (bh->b_blocknr != blocks[page_block-1] + 1) | |
570 | goto confused; | |
571 | } | |
572 | blocks[page_block++] = bh->b_blocknr; | |
573 | boundary = buffer_boundary(bh); | |
574 | if (boundary) { | |
575 | boundary_block = bh->b_blocknr; | |
576 | boundary_bdev = bh->b_bdev; | |
577 | } | |
578 | bdev = bh->b_bdev; | |
579 | } while ((bh = bh->b_this_page) != head); | |
580 | ||
581 | if (first_unmapped) | |
582 | goto page_is_mapped; | |
583 | ||
584 | /* | |
585 | * Page has buffers, but they are all unmapped. The page was | |
586 | * created by pagein or read over a hole which was handled by | |
587 | * block_read_full_page(). If this address_space is also | |
588 | * using mpage_readpages then this can rarely happen. | |
589 | */ | |
590 | goto confused; | |
591 | } | |
592 | ||
593 | /* | |
594 | * The page has no buffers: map it to disk | |
595 | */ | |
596 | BUG_ON(!PageUptodate(page)); | |
09cbfeaf | 597 | block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); |
1da177e4 LT |
598 | last_block = (i_size - 1) >> blkbits; |
599 | map_bh.b_page = page; | |
600 | for (page_block = 0; page_block < blocks_per_page; ) { | |
601 | ||
602 | map_bh.b_state = 0; | |
b0cf2321 | 603 | map_bh.b_size = 1 << blkbits; |
0ea97180 | 604 | if (mpd->get_block(inode, block_in_file, &map_bh, 1)) |
1da177e4 LT |
605 | goto confused; |
606 | if (buffer_new(&map_bh)) | |
e64855c6 | 607 | clean_bdev_bh_alias(&map_bh); |
1da177e4 LT |
608 | if (buffer_boundary(&map_bh)) { |
609 | boundary_block = map_bh.b_blocknr; | |
610 | boundary_bdev = map_bh.b_bdev; | |
611 | } | |
612 | if (page_block) { | |
613 | if (map_bh.b_blocknr != blocks[page_block-1] + 1) | |
614 | goto confused; | |
615 | } | |
616 | blocks[page_block++] = map_bh.b_blocknr; | |
617 | boundary = buffer_boundary(&map_bh); | |
618 | bdev = map_bh.b_bdev; | |
619 | if (block_in_file == last_block) | |
620 | break; | |
621 | block_in_file++; | |
622 | } | |
623 | BUG_ON(page_block == 0); | |
624 | ||
625 | first_unmapped = page_block; | |
626 | ||
627 | page_is_mapped: | |
09cbfeaf | 628 | end_index = i_size >> PAGE_SHIFT; |
1da177e4 LT |
629 | if (page->index >= end_index) { |
630 | /* | |
631 | * The page straddles i_size. It must be zeroed out on each | |
2a61aa40 | 632 | * and every writepage invocation because it may be mmapped. |
1da177e4 LT |
633 | * "A file is mapped in multiples of the page size. For a file |
634 | * that is not a multiple of the page size, the remaining memory | |
635 | * is zeroed when mapped, and writes to that region are not | |
636 | * written out to the file." | |
637 | */ | |
09cbfeaf | 638 | unsigned offset = i_size & (PAGE_SIZE - 1); |
1da177e4 LT |
639 | |
640 | if (page->index > end_index || !offset) | |
641 | goto confused; | |
09cbfeaf | 642 | zero_user_segment(page, offset, PAGE_SIZE); |
1da177e4 LT |
643 | } |
644 | ||
645 | /* | |
646 | * This page will go to BIO. Do we need to send this BIO off first? | |
647 | */ | |
0ea97180 | 648 | if (bio && mpd->last_block_in_bio != blocks[0] - 1) |
eed25cd5 | 649 | bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio); |
1da177e4 LT |
650 | |
651 | alloc_new: | |
652 | if (bio == NULL) { | |
47a191fd MW |
653 | if (first_unmapped == blocks_per_page) { |
654 | if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9), | |
f892760a | 655 | page, wbc)) |
47a191fd | 656 | goto out; |
47a191fd | 657 | } |
1da177e4 | 658 | bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), |
b54ffb73 | 659 | BIO_MAX_PAGES, GFP_NOFS|__GFP_HIGH); |
1da177e4 LT |
660 | if (bio == NULL) |
661 | goto confused; | |
429b3fb0 | 662 | |
b16b1deb | 663 | wbc_init_bio(wbc, bio); |
8e8f9298 | 664 | bio->bi_write_hint = inode->i_write_hint; |
1da177e4 LT |
665 | } |
666 | ||
667 | /* | |
668 | * Must try to add the page before marking the buffer clean or | |
669 | * the confused fail path above (OOM) will be very confused when | |
670 | * it finds all bh marked clean (i.e. it will not write anything) | |
671 | */ | |
2a814908 | 672 | wbc_account_io(wbc, page, PAGE_SIZE); |
1da177e4 LT |
673 | length = first_unmapped << blkbits; |
674 | if (bio_add_page(bio, page, length, 0) < length) { | |
eed25cd5 | 675 | bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio); |
1da177e4 LT |
676 | goto alloc_new; |
677 | } | |
678 | ||
90768eee | 679 | clean_buffers(page, first_unmapped); |
1da177e4 LT |
680 | |
681 | BUG_ON(PageWriteback(page)); | |
682 | set_page_writeback(page); | |
683 | unlock_page(page); | |
684 | if (boundary || (first_unmapped != blocks_per_page)) { | |
eed25cd5 | 685 | bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio); |
1da177e4 LT |
686 | if (boundary_block) { |
687 | write_boundary_block(boundary_bdev, | |
688 | boundary_block, 1 << blkbits); | |
689 | } | |
690 | } else { | |
0ea97180 | 691 | mpd->last_block_in_bio = blocks[blocks_per_page - 1]; |
1da177e4 LT |
692 | } |
693 | goto out; | |
694 | ||
695 | confused: | |
696 | if (bio) | |
eed25cd5 | 697 | bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio); |
1da177e4 | 698 | |
0ea97180 MS |
699 | if (mpd->use_writepage) { |
700 | ret = mapping->a_ops->writepage(page, wbc); | |
1da177e4 | 701 | } else { |
0ea97180 | 702 | ret = -EAGAIN; |
1da177e4 LT |
703 | goto out; |
704 | } | |
705 | /* | |
706 | * The caller has a ref on the inode, so *mapping is stable | |
707 | */ | |
0ea97180 | 708 | mapping_set_error(mapping, ret); |
1da177e4 | 709 | out: |
0ea97180 MS |
710 | mpd->bio = bio; |
711 | return ret; | |
1da177e4 LT |
712 | } |
713 | ||
714 | /** | |
78a4a50a | 715 | * mpage_writepages - walk the list of dirty pages of the given address space & writepage() all of them |
1da177e4 LT |
716 | * @mapping: address space structure to write |
717 | * @wbc: subtract the number of written pages from *@wbc->nr_to_write | |
718 | * @get_block: the filesystem's block mapper function. | |
719 | * If this is NULL then use a_ops->writepage. Otherwise, go | |
720 | * direct-to-BIO. | |
721 | * | |
722 | * This is a library function, which implements the writepages() | |
723 | * address_space_operation. | |
724 | * | |
725 | * If a page is already under I/O, generic_writepages() skips it, even | |
726 | * if it's dirty. This is desirable behaviour for memory-cleaning writeback, | |
727 | * but it is INCORRECT for data-integrity system calls such as fsync(). fsync() | |
728 | * and msync() need to guarantee that all the data which was dirty at the time | |
729 | * the call was made get new I/O started against them. If wbc->sync_mode is | |
730 | * WB_SYNC_ALL then we were called for data integrity and we must wait for | |
731 | * existing IO to complete. | |
732 | */ | |
733 | int | |
734 | mpage_writepages(struct address_space *mapping, | |
735 | struct writeback_control *wbc, get_block_t get_block) | |
1da177e4 | 736 | { |
2ed1a6bc | 737 | struct blk_plug plug; |
0ea97180 MS |
738 | int ret; |
739 | ||
2ed1a6bc JA |
740 | blk_start_plug(&plug); |
741 | ||
0ea97180 MS |
742 | if (!get_block) |
743 | ret = generic_writepages(mapping, wbc); | |
744 | else { | |
745 | struct mpage_data mpd = { | |
746 | .bio = NULL, | |
747 | .last_block_in_bio = 0, | |
748 | .get_block = get_block, | |
749 | .use_writepage = 1, | |
750 | }; | |
751 | ||
752 | ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd); | |
5948edbc | 753 | if (mpd.bio) { |
eed25cd5 | 754 | int op_flags = (wbc->sync_mode == WB_SYNC_ALL ? |
70fd7614 | 755 | REQ_SYNC : 0); |
eed25cd5 | 756 | mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio); |
5948edbc | 757 | } |
1da177e4 | 758 | } |
2ed1a6bc | 759 | blk_finish_plug(&plug); |
1da177e4 LT |
760 | return ret; |
761 | } | |
762 | EXPORT_SYMBOL(mpage_writepages); | |
1da177e4 LT |
763 | |
764 | int mpage_writepage(struct page *page, get_block_t get_block, | |
765 | struct writeback_control *wbc) | |
766 | { | |
0ea97180 MS |
767 | struct mpage_data mpd = { |
768 | .bio = NULL, | |
769 | .last_block_in_bio = 0, | |
770 | .get_block = get_block, | |
771 | .use_writepage = 0, | |
772 | }; | |
773 | int ret = __mpage_writepage(page, wbc, &mpd); | |
5948edbc | 774 | if (mpd.bio) { |
eed25cd5 | 775 | int op_flags = (wbc->sync_mode == WB_SYNC_ALL ? |
70fd7614 | 776 | REQ_SYNC : 0); |
eed25cd5 | 777 | mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio); |
5948edbc | 778 | } |
1da177e4 LT |
779 | return ret; |
780 | } | |
781 | EXPORT_SYMBOL(mpage_writepage); |