V4L/DVB (13571): v4l: Adding Digital Video Timings APIs
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / fs / sdfat / mpage.c
CommitLineData
3c2a0909
S
1/*
2 * fs/mpage.c
3 *
4 * Copyright (C) 2002, Linus Torvalds.
5 *
6 * Contains functions related to preparing and submitting BIOs which contain
7 * multiple pagecache pages.
8 *
9 * 15May2002 Andrew Morton
10 * Initial version
11 * 27Jun2002 axboe@suse.de
12 * use bio_add_page() to build bio's just the right size
13 */
14
15/*
16 * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version 2
21 * of the License, or (at your option) any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
31 * MA 02110-1301, USA.
32 */
33
34/************************************************************************/
35/* */
36/* PROJECT : exFAT & FAT12/16/32 File System */
37/* FILE : core.c */
38/* PURPOSE : sdFAT glue layer for supporting VFS */
39/* */
40/*----------------------------------------------------------------------*/
41/* NOTES */
42/* */
43/* */
44/************************************************************************/
45
46#include <linux/version.h>
47#include <linux/module.h>
48#include <linux/time.h>
49#include <linux/buffer_head.h>
50#include <linux/exportfs.h>
51#include <linux/mount.h>
52#include <linux/vfs.h>
53#include <linux/parser.h>
54#include <linux/uio.h>
55#include <linux/writeback.h>
56#include <linux/log2.h>
57#include <linux/hash.h>
58#include <linux/backing-dev.h>
59#include <linux/sched.h>
60#include <linux/fs_struct.h>
61#include <linux/namei.h>
62#include <linux/bio.h>
63#include <linux/blkdev.h>
64#include <linux/swap.h> /* for mark_page_accessed() */
65#include <asm/current.h>
66#include <asm/unaligned.h>
67#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
68#include <linux/aio.h>
69#endif
70
71#include "sdfat.h"
72
73#ifdef CONFIG_SDFAT_ALIGNED_MPAGE_WRITE
74
75/*************************************************************************
76 * INNER FUNCTIONS FOR FUNCTIONS WHICH HAS KERNEL VERSION DEPENDENCY
77 *************************************************************************/
78static void __mpage_write_end_io(struct bio *bio, int err);
79
80/*************************************************************************
81 * FUNCTIONS WHICH HAS KERNEL VERSION DEPENDENCY
82 *************************************************************************/
83#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)
84static void mpage_write_end_io(struct bio *bio)
85{
86 __mpage_write_end_io(bio, bio->bi_error);
87}
88#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0) */
89static void mpage_write_end_io(struct bio *bio, int err)
90{
91 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
92 err = 0;
93 __mpage_write_end_io(bio, err);
94}
95#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0) */
96
97#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)
98static inline int bio_get_nr_vecs(struct block_device *bdev)
99{
100 return BIO_MAX_PAGES;
101}
102#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0) */
103 /* EMPTY */
104#endif
105
106#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
107static inline sector_t __sdfat_bio_sector(struct bio *bio)
108{
109 return bio->bi_iter.bi_sector;
110}
111
112static inline void __sdfat_set_bio_sector(struct bio *bio, sector_t sector)
113{
114 bio->bi_iter.bi_sector = sector;
115}
116
117static inline unsigned int __sdfat_bio_size(struct bio *bio)
118{
119 return bio->bi_iter.bi_size;
120}
121
122static inline void __sdfat_set_bio_size(struct bio *bio, unsigned int size)
123{
124 bio->bi_iter.bi_size = size;
125}
126#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) */
127static inline sector_t __sdfat_bio_sector(struct bio *bio)
128{
129 return bio->bi_sector;
130}
131
132static inline void __sdfat_set_bio_sector(struct bio *bio, sector_t sector)
133{
134 bio->bi_sector = sector;
135}
136
137static inline unsigned int __sdfat_bio_size(struct bio *bio)
138{
139 return bio->bi_size;
140}
141
142static inline void __sdfat_set_bio_size(struct bio *bio, unsigned int size)
143{
144 bio->bi_size = size;
145}
146#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) */
147
148/* __check_dfr_on() and __dfr_writepage_end_io() functions are copied from
149 * sdfat.c.
150 * Each function should be same perfectly
151 */
152static inline int __check_dfr_on(struct inode *inode, loff_t start, loff_t end, const char *fname)
153{
154#ifdef CONFIG_SDFAT_DFR
155 struct defrag_info *ino_dfr = &(SDFAT_I(inode)->dfr_info);
156
157 if ( (atomic_read(&ino_dfr->stat) == DFR_INO_STAT_REQ) &&
158 fsapi_dfr_check_dfr_on(inode, start, end, 0, fname) )
159 return 1;
160#endif
161 return 0;
162}
163
164static inline int __dfr_writepage_end_io(struct page *page)
165{
166#ifdef CONFIG_SDFAT_DFR
167 struct defrag_info *ino_dfr = &(SDFAT_I(page->mapping->host)->dfr_info);
168 if (atomic_read(&ino_dfr->stat) == DFR_INO_STAT_REQ)
169 fsapi_dfr_writepage_endio(page);
170#endif
171 return 0;
172}
173
174
175static inline unsigned int __calc_size_to_align(struct super_block* sb)
176{
177 struct block_device *bdev = sb->s_bdev;
178 struct gendisk *disk;
179 struct request_queue *queue;
180 struct queue_limits *limit;
181 unsigned int max_sectors;
182 unsigned int aligned = 0;
183
184 disk = bdev->bd_disk;
185 if (!disk)
186 goto out;
187
188 queue = disk->queue;
189 if (!queue)
190 goto out;
191
192 limit = &queue->limits;
193 max_sectors = limit->max_sectors;
194 aligned = 1 << ilog2(max_sectors);
195
196 if (aligned && (max_sectors & (aligned - 1)))
197 aligned = 0;
198out:
199 return aligned;
200}
201
202struct mpage_data {
203 struct bio *bio;
204 sector_t last_block_in_bio;
205 get_block_t *get_block;
206 unsigned use_writepage;
207 unsigned size_to_align;
208};
209
210/*
211 * I/O completion handler for multipage BIOs.
212 *
213 * The mpage code never puts partial pages into a BIO (except for end-of-file).
214 * If a page does not map to a contiguous run of blocks then it simply falls
215 * back to block_read_full_page().
216 *
217 * Why is this? If a page's completion depends on a number of different BIOs
218 * which can complete in any order (or at the same time) then determining the
219 * status of that page is hard. See end_buffer_async_read() for the details.
220 * There is no point in duplicating all that complexity.
221 */
222static void __mpage_write_end_io(struct bio *bio, int err)
223{
224 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
225
226 ASSERT(bio_data_dir(bio) == WRITE); /* only write */
227
228 do {
229 struct page *page = bvec->bv_page;
230
231 if (--bvec >= bio->bi_io_vec)
232 prefetchw(&bvec->bv_page->flags);
233 if (err) {
234 SetPageError(page);
235 if (page->mapping)
236 mapping_set_error(page->mapping, err);
237 }
238
239 __dfr_writepage_end_io(page);
240
241 end_page_writeback(page);
242 } while (bvec >= bio->bi_io_vec);
243 bio_put(bio);
244}
245
246static struct bio *mpage_bio_submit(int rw, struct bio *bio)
247{
248 bio->bi_end_io = mpage_write_end_io;
249 submit_bio(rw, bio);
250 return NULL;
251}
252
253static struct bio *
254mpage_alloc(struct block_device *bdev,
255 sector_t first_sector, int nr_vecs,
256 gfp_t gfp_flags)
257{
258 struct bio *bio;
259
260 bio = bio_alloc(gfp_flags, nr_vecs);
261
262 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
263 while (!bio && (nr_vecs /= 2))
264 bio = bio_alloc(gfp_flags, nr_vecs);
265 }
266
267 if (bio) {
268 bio->bi_bdev = bdev;
269 __sdfat_set_bio_sector(bio, first_sector);
270 }
271 return bio;
272}
273
274static int sdfat_mpage_writepage(struct page *page,
275 struct writeback_control *wbc, void *data)
276{
277 struct mpage_data *mpd = data;
278 struct bio *bio = mpd->bio;
279 struct address_space *mapping = page->mapping;
280 struct inode *inode = page->mapping->host;
281 const unsigned blkbits = inode->i_blkbits;
282 const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
283 sector_t last_block;
284 sector_t block_in_file;
285 sector_t blocks[MAX_BUF_PER_PAGE];
286 unsigned page_block;
287 unsigned first_unmapped = blocks_per_page;
288 struct block_device *bdev = NULL;
289 int boundary = 0;
290 sector_t boundary_block = 0;
291 struct block_device *boundary_bdev = NULL;
292 int length;
293 struct buffer_head map_bh;
294 loff_t i_size = i_size_read(inode);
295 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
296 int ret = 0;
297
298 if (page_has_buffers(page)) {
299 struct buffer_head *head = page_buffers(page);
300 struct buffer_head *bh = head;
301
302 /* If they're all mapped and dirty, do it */
303 page_block = 0;
304 do {
305 BUG_ON(buffer_locked(bh));
306 if (!buffer_mapped(bh)) {
307 /*
308 * unmapped dirty buffers are created by
309 * __set_page_dirty_buffers -> mmapped data
310 */
311 if (buffer_dirty(bh))
312 goto confused;
313 if (first_unmapped == blocks_per_page)
314 first_unmapped = page_block;
315 continue;
316 }
317
318 if (first_unmapped != blocks_per_page)
319 goto confused; /* hole -> non-hole */
320
321 if (!buffer_dirty(bh) || !buffer_uptodate(bh))
322 goto confused;
323
324 /* bh should be mapped if delay is set */
325 if (buffer_delay(bh)) {
326 sector_t blk_in_file = (sector_t)(page->index << (PAGE_CACHE_SHIFT - blkbits)) + page_block;
327 BUG_ON(bh->b_size != (1 << blkbits));
328
329 if (page->index > end_index) {
330 MMSG("%s(inode:%p) "
331 "over end with delayed buffer"
332 "(page_idx:%u, end_idx:%u)\n",
333 __func__, inode,
334 (u32)page->index,
335 (u32)end_index);
336 goto confused;
337 }
338
339 ret = mpd->get_block(inode, blk_in_file, bh, 1);
340 if (ret) {
341 MMSG("%s(inode:%p) "
342 "failed to getblk(ret:%d)\n",
343 __func__, inode, ret);
344 goto confused;
345 }
346
347 BUG_ON(buffer_delay(bh));
348
349 if (buffer_new(bh)) {
350 clear_buffer_new(bh);
351 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
352 }
353 }
354
355 if (page_block) {
356 if (bh->b_blocknr != blocks[page_block-1] + 1) {
357 MMSG("%s(inode:%p) pblk(%d) "
358 "no_seq(prev:%lld, new:%lld)\n",
359 __func__, inode, page_block,
360 (u64)blocks[page_block-1],
361 (u64)bh->b_blocknr);
362 goto confused;
363 }
364 }
365 blocks[page_block++] = bh->b_blocknr;
366 boundary = buffer_boundary(bh);
367 if (boundary) {
368 boundary_block = bh->b_blocknr;
369 boundary_bdev = bh->b_bdev;
370 }
371 bdev = bh->b_bdev;
372 } while ((bh = bh->b_this_page) != head);
373
374 if (first_unmapped)
375 goto page_is_mapped;
376
377 /*
378 * Page has buffers, but they are all unmapped. The page was
379 * created by pagein or read over a hole which was handled by
380 * block_read_full_page(). If this address_space is also
381 * using mpage_readpages then this can rarely happen.
382 */
383 goto confused;
384 }
385
386 /*
387 * The page has no buffers: map it to disk
388 */
389 BUG_ON(!PageUptodate(page));
390 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
391 last_block = (i_size - 1) >> blkbits;
392 map_bh.b_page = page;
393 for (page_block = 0; page_block < blocks_per_page; ) {
394
395 map_bh.b_state = 0;
396 map_bh.b_size = 1 << blkbits;
397 if (mpd->get_block(inode, block_in_file, &map_bh, 1))
398 goto confused;
399
400 if (buffer_new(&map_bh))
401 unmap_underlying_metadata(map_bh.b_bdev,
402 map_bh.b_blocknr);
403 if (buffer_boundary(&map_bh)) {
404 boundary_block = map_bh.b_blocknr;
405 boundary_bdev = map_bh.b_bdev;
406 }
407
408 if (page_block) {
409 if (map_bh.b_blocknr != blocks[page_block-1] + 1)
410 goto confused;
411 }
412 blocks[page_block++] = map_bh.b_blocknr;
413 boundary = buffer_boundary(&map_bh);
414 bdev = map_bh.b_bdev;
415 if (block_in_file == last_block)
416 break;
417 block_in_file++;
418 }
419 BUG_ON(page_block == 0);
420
421 first_unmapped = page_block;
422
423page_is_mapped:
424 if (page->index >= end_index) {
425 /*
426 * The page straddles i_size. It must be zeroed out on each
427 * and every writepage invocation because it may be mmapped.
428 * "A file is mapped in multiples of the page size. For a file
429 * that is not a multiple of the page size, the remaining memory
430 * is zeroed when mapped, and writes to that region are not
431 * written out to the file."
432 */
433 unsigned offset = i_size & (PAGE_CACHE_SIZE - 1);
434
435 if (page->index > end_index || !offset) {
436 MMSG("%s(inode:%p) over end "
437 "(page_idx:%u, end_idx:%u off:%u)\n",
438 __func__, inode, (u32)page->index,
439 (u32)end_index, (u32)offset);
440 goto confused;
441 }
442 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
443 }
444
445 /*
446 * This page will go to BIO. Do we need to send this BIO off first?
447 *
448 * REMARK : added ELSE_IF for ALIGNMENT_MPAGE_WRITE of SDFAT
449 */
450 if (bio) {
451 if (mpd->last_block_in_bio != blocks[0] - 1) {
452 bio = mpage_bio_submit(WRITE, bio);
453 } else if (mpd->size_to_align) {
454 unsigned mask = mpd->size_to_align - 1;
455 sector_t max_end_block =
456 (__sdfat_bio_sector(bio) & ~(mask)) + mask;
457
458 if ( (__sdfat_bio_size(bio) != (1 << (mask + 1))) &&
459 (mpd->last_block_in_bio == max_end_block) ) {
460 MMSG("%s(inode:%p) alignment mpage_bio_submit"
461 "(start:%u, len:%u aligned:%u)\n",
462 __func__, inode,
463 (unsigned)__sdfat_bio_sector(bio),
464 (unsigned)(mpd->last_block_in_bio -
465 __sdfat_bio_sector(bio) + 1),
466 (unsigned)mpd->size_to_align);
467 bio = mpage_bio_submit(WRITE | REQ_NOMERGE, bio);
468 }
469 }
470 }
471
472alloc_new:
473 if (!bio) {
474 bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
475 bio_get_nr_vecs(bdev), GFP_NOFS|__GFP_HIGH);
476 if (!bio)
477 goto confused;
478 }
479
480 /*
481 * Must try to add the page before marking the buffer clean or
482 * the confused fail path above (OOM) will be very confused when
483 * it finds all bh marked clean (i.e. it will not write anything)
484 */
485 length = first_unmapped << blkbits;
486 if (bio_add_page(bio, page, length, 0) < length) {
487 bio = mpage_bio_submit(WRITE, bio);
488 goto alloc_new;
489 }
490
491 /*
492 * OK, we have our BIO, so we can now mark the buffers clean. Make
493 * sure to only clean buffers which we know we'll be writing.
494 */
495 if (page_has_buffers(page)) {
496 struct buffer_head *head = page_buffers(page);
497 struct buffer_head *bh = head;
498 unsigned buffer_counter = 0;
499
500 do {
501 if (buffer_counter++ == first_unmapped)
502 break;
503 clear_buffer_dirty(bh);
504 bh = bh->b_this_page;
505 } while (bh != head);
506
507 /*
508 * we cannot drop the bh if the page is not uptodate
509 * or a concurrent readpage would fail to serialize with the bh
510 * and it would read from disk before we reach the platter.
511 */
512 if (buffer_heads_over_limit && PageUptodate(page))
513 try_to_free_buffers(page);
514 }
515
516 BUG_ON(PageWriteback(page));
517 set_page_writeback(page);
518
519 /*
520 * FIXME FOR DEFRAGMENTATION : CODE REVIEW IS REQUIRED
521 *
522 * Turn off MAPPED flag in victim's bh if defrag on.
523 * Another write_begin can starts after get_block for defrag victims
524 * called.
525 * In this case, write_begin calls get_block and get original block
526 * number and previous defrag will be canceled.
527 */
528 if (unlikely(__check_dfr_on(inode, (loff_t)(page->index << PAGE_SHIFT),
529 (loff_t)((page->index + 1) << PAGE_SHIFT), __func__))) {
530 struct buffer_head *head = page_buffers(page);
531 struct buffer_head *bh = head;
532 do {
533 clear_buffer_mapped(bh);
534 bh = bh->b_this_page;
535 } while (bh != head);
536 }
537
538 unlock_page(page);
539 if (boundary || (first_unmapped != blocks_per_page)) {
540 bio = mpage_bio_submit(WRITE, bio);
541 if (boundary_block) {
542 write_boundary_block(boundary_bdev,
543 boundary_block, 1 << blkbits);
544 }
545 } else {
546 mpd->last_block_in_bio = blocks[blocks_per_page - 1];
547 }
548
549 goto out;
550
551confused:
552 if (bio)
553 bio = mpage_bio_submit(WRITE, bio);
554
555 if (mpd->use_writepage) {
556 ret = mapping->a_ops->writepage(page, wbc);
557 } else {
558 ret = -EAGAIN;
559 goto out;
560 }
561 /*
562 * The caller has a ref on the inode, so *mapping is stable
563 */
564 mapping_set_error(mapping, ret);
565out:
566 mpd->bio = bio;
567 return ret;
568}
569
570int sdfat_mpage_writepages(struct address_space *mapping,
571 struct writeback_control *wbc, get_block_t *get_block)
572{
573 struct blk_plug plug;
574 int ret;
575 struct mpage_data mpd = {
576 .bio = NULL,
577 .last_block_in_bio = 0,
578 .get_block = get_block,
579 .use_writepage = 1,
580 .size_to_align = __calc_size_to_align(mapping->host->i_sb),
581 };
582
583 BUG_ON(!get_block);
584
585 blk_start_plug(&plug);
586
587 ret = write_cache_pages(mapping, wbc, sdfat_mpage_writepage, &mpd);
588 if (mpd.bio)
589 mpage_bio_submit(WRITE, mpd.bio);
590 blk_finish_plug(&plug);
591 return ret;
592}
593
594#endif /* CONFIG_SDFAT_ALIGNED_MPAGE_WRITE */
595