Merge tag 'v3.10.68' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / mm / page_io.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/page_io.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95,
7 * Asynchronous swapping added 30.12.95. Stephen Tweedie
8 * Removed race in async swapping. 14.4.1996. Bruno Haible
9 * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
10 * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
11 */
12
13#include <linux/mm.h>
14#include <linux/kernel_stat.h>
5a0e3ad6 15#include <linux/gfp.h>
1da177e4
LT
16#include <linux/pagemap.h>
17#include <linux/swap.h>
18#include <linux/bio.h>
19#include <linux/swapops.h>
62c230bc 20#include <linux/buffer_head.h>
1da177e4 21#include <linux/writeback.h>
38b5faf4 22#include <linux/frontswap.h>
a27bb332 23#include <linux/aio.h>
6fa3eb70 24#include <linux/blkdev.h>
1da177e4
LT
25#include <asm/pgtable.h>
26
f29ad6a9 27static struct bio *get_swap_bio(gfp_t gfp_flags,
1da177e4
LT
28 struct page *page, bio_end_io_t end_io)
29{
30 struct bio *bio;
31
32 bio = bio_alloc(gfp_flags, 1);
33 if (bio) {
d4906e1a 34 bio->bi_sector = map_swap_page(page, &bio->bi_bdev);
f29ad6a9 35 bio->bi_sector <<= PAGE_SHIFT - 9;
1da177e4
LT
36 bio->bi_io_vec[0].bv_page = page;
37 bio->bi_io_vec[0].bv_len = PAGE_SIZE;
38 bio->bi_io_vec[0].bv_offset = 0;
39 bio->bi_vcnt = 1;
1da177e4
LT
40 bio->bi_size = PAGE_SIZE;
41 bio->bi_end_io = end_io;
42 }
43 return bio;
44}
45
1eec6702 46void end_swap_bio_write(struct bio *bio, int err)
1da177e4
LT
47{
48 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
49 struct page *page = bio->bi_io_vec[0].bv_page;
50
6ddab3b9 51 if (!uptodate) {
6fa3eb70
S
52 if (!task_in_mtkpasr(current)) {
53 SetPageError(page);
54 }
6ddab3b9
PZ
55 /*
56 * We failed to write the page out to swap-space.
57 * Re-dirty the page in order to avoid it being reclaimed.
58 * Also print a dire warning that things will go BAD (tm)
59 * very quickly.
60 *
61 * Also clear PG_reclaim to avoid rotate_reclaimable_page()
62 */
63 set_page_dirty(page);
6fa3eb70
S
64 if (!task_in_mtkpasr(current)) {
65 printk(KERN_ALERT "Write-error on swap-device (%u:%u:%Lu)\n",
66 imajor(bio->bi_bdev->bd_inode),
67 iminor(bio->bi_bdev->bd_inode),
68 (unsigned long long)bio->bi_sector);
69 }
6ddab3b9
PZ
70 ClearPageReclaim(page);
71 }
1da177e4
LT
72 end_page_writeback(page);
73 bio_put(bio);
1da177e4
LT
74}
75
6712ecf8 76void end_swap_bio_read(struct bio *bio, int err)
1da177e4
LT
77{
78 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
79 struct page *page = bio->bi_io_vec[0].bv_page;
80
1da177e4
LT
81 if (!uptodate) {
82 SetPageError(page);
83 ClearPageUptodate(page);
6ddab3b9
PZ
84 printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
85 imajor(bio->bi_bdev->bd_inode),
86 iminor(bio->bi_bdev->bd_inode),
87 (unsigned long long)bio->bi_sector);
6fa3eb70
S
88 goto out;
89 }
90
91 SetPageUptodate(page);
92
93 /*
94 * There is no guarantee that the page is in swap cache - the software
95 * suspend code (at least) uses end_swap_bio_read() against a non-
96 * swapcache page. So we must check PG_swapcache before proceeding with
97 * this optimization.
98 */
99 if (likely(PageSwapCache(page))) {
100 struct swap_info_struct *sis;
101
102 sis = page_swap_info(page);
103 if (sis->flags & SWP_BLKDEV) {
104 /*
105 * The swap subsystem performs lazy swap slot freeing,
106 * expecting that the page will be swapped out again.
107 * So we can avoid an unnecessary write if the page
108 * isn't redirtied.
109 * This is good for real swap storage because we can
110 * reduce unnecessary I/O and enhance wear-leveling
111 * if an SSD is used as the as swap device.
112 * But if in-memory swap device (eg zram) is used,
113 * this causes a duplicated copy between uncompressed
114 * data in VM-owned memory and compressed data in
115 * zram-owned memory. So let's free zram-owned memory
116 * and make the VM-owned decompressed page *dirty*,
117 * so the page should be swapped out somewhere again if
118 * we again wish to reclaim it.
119 */
120 struct gendisk *disk = sis->bdev->bd_disk;
121 if (disk->fops->swap_slot_free_notify) {
122 swp_entry_t entry;
123 unsigned long offset;
124
125 entry.val = page_private(page);
126 offset = swp_offset(entry);
127
128 SetPageDirty(page);
129 disk->fops->swap_slot_free_notify(sis->bdev,
130 offset);
131 }
132 }
1da177e4 133 }
6fa3eb70
S
134
135out:
1da177e4
LT
136 unlock_page(page);
137 bio_put(bio);
1da177e4
LT
138}
139
a509bc1a
MG
140int generic_swapfile_activate(struct swap_info_struct *sis,
141 struct file *swap_file,
142 sector_t *span)
143{
144 struct address_space *mapping = swap_file->f_mapping;
145 struct inode *inode = mapping->host;
146 unsigned blocks_per_page;
147 unsigned long page_no;
148 unsigned blkbits;
149 sector_t probe_block;
150 sector_t last_block;
151 sector_t lowest_block = -1;
152 sector_t highest_block = 0;
153 int nr_extents = 0;
154 int ret;
155
156 blkbits = inode->i_blkbits;
157 blocks_per_page = PAGE_SIZE >> blkbits;
158
159 /*
160 * Map all the blocks into the extent list. This code doesn't try
161 * to be very smart.
162 */
163 probe_block = 0;
164 page_no = 0;
165 last_block = i_size_read(inode) >> blkbits;
166 while ((probe_block + blocks_per_page) <= last_block &&
167 page_no < sis->max) {
168 unsigned block_in_page;
169 sector_t first_block;
170
171 first_block = bmap(inode, probe_block);
172 if (first_block == 0)
173 goto bad_bmap;
174
175 /*
176 * It must be PAGE_SIZE aligned on-disk
177 */
178 if (first_block & (blocks_per_page - 1)) {
179 probe_block++;
180 goto reprobe;
181 }
182
183 for (block_in_page = 1; block_in_page < blocks_per_page;
184 block_in_page++) {
185 sector_t block;
186
187 block = bmap(inode, probe_block + block_in_page);
188 if (block == 0)
189 goto bad_bmap;
190 if (block != first_block + block_in_page) {
191 /* Discontiguity */
192 probe_block++;
193 goto reprobe;
194 }
195 }
196
197 first_block >>= (PAGE_SHIFT - blkbits);
198 if (page_no) { /* exclude the header page */
199 if (first_block < lowest_block)
200 lowest_block = first_block;
201 if (first_block > highest_block)
202 highest_block = first_block;
203 }
204
205 /*
206 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
207 */
208 ret = add_swap_extent(sis, page_no, 1, first_block);
209 if (ret < 0)
210 goto out;
211 nr_extents += ret;
212 page_no++;
213 probe_block += blocks_per_page;
214reprobe:
215 continue;
216 }
217 ret = nr_extents;
218 *span = 1 + highest_block - lowest_block;
219 if (page_no == 0)
220 page_no = 1; /* force Empty message */
221 sis->max = page_no;
222 sis->pages = page_no - 1;
223 sis->highest_bit = page_no - 1;
224out:
225 return ret;
226bad_bmap:
227 printk(KERN_ERR "swapon: swapfile has holes\n");
228 ret = -EINVAL;
229 goto out;
230}
231
1da177e4
LT
232/*
233 * We may have stale swap cache pages in memory: notice
234 * them here and get rid of the unnecessary final write.
235 */
236int swap_writepage(struct page *page, struct writeback_control *wbc)
237{
2f772e6c 238 int ret = 0;
1da177e4 239
a2c43eed 240 if (try_to_free_swap(page)) {
1da177e4
LT
241 unlock_page(page);
242 goto out;
243 }
165c8aed 244 if (frontswap_store(page) == 0) {
38b5faf4
DM
245 set_page_writeback(page);
246 unlock_page(page);
247 end_page_writeback(page);
248 goto out;
249 }
1eec6702 250 ret = __swap_writepage(page, wbc, end_swap_bio_write);
2f772e6c
SJ
251out:
252 return ret;
253}
254
1eec6702
SJ
255int __swap_writepage(struct page *page, struct writeback_control *wbc,
256 void (*end_write_func)(struct bio *, int))
2f772e6c
SJ
257{
258 struct bio *bio;
259 int ret = 0, rw = WRITE;
260 struct swap_info_struct *sis = page_swap_info(page);
62c230bc
MG
261
262 if (sis->flags & SWP_FILE) {
263 struct kiocb kiocb;
264 struct file *swap_file = sis->swap_file;
265 struct address_space *mapping = swap_file->f_mapping;
266 struct iovec iov = {
5a178119 267 .iov_base = kmap(page),
62c230bc
MG
268 .iov_len = PAGE_SIZE,
269 };
270
271 init_sync_kiocb(&kiocb, swap_file);
272 kiocb.ki_pos = page_file_offset(page);
273 kiocb.ki_left = PAGE_SIZE;
274 kiocb.ki_nbytes = PAGE_SIZE;
275
0cdc444a 276 set_page_writeback(page);
62c230bc
MG
277 unlock_page(page);
278 ret = mapping->a_ops->direct_IO(KERNEL_WRITE,
279 &kiocb, &iov,
280 kiocb.ki_pos, 1);
5a178119 281 kunmap(page);
62c230bc
MG
282 if (ret == PAGE_SIZE) {
283 count_vm_event(PSWPOUT);
284 ret = 0;
2d30d31e 285 } else {
0cdc444a
MG
286 /*
287 * In the case of swap-over-nfs, this can be a
288 * temporary failure if the system has limited
289 * memory for allocating transmit buffers.
290 * Mark the page dirty and avoid
291 * rotate_reclaimable_page but rate-limit the
292 * messages but do not flag PageError like
293 * the normal direct-to-bio case as it could
294 * be temporary.
295 */
2d30d31e 296 set_page_dirty(page);
0cdc444a
MG
297 ClearPageReclaim(page);
298 pr_err_ratelimited("Write error on dio swapfile (%Lu)\n",
299 page_file_offset(page));
62c230bc 300 }
0cdc444a 301 end_page_writeback(page);
62c230bc
MG
302 return ret;
303 }
304
1eec6702 305 bio = get_swap_bio(GFP_NOIO, page, end_write_func);
1da177e4
LT
306 if (bio == NULL) {
307 set_page_dirty(page);
308 unlock_page(page);
309 ret = -ENOMEM;
310 goto out;
311 }
312 if (wbc->sync_mode == WB_SYNC_ALL)
721a9602 313 rw |= REQ_SYNC;
6fa3eb70
S
314
315#ifdef CONFIG_ZRAM
316 current->swap_out++;
317#endif
f8891e5e 318 count_vm_event(PSWPOUT);
1da177e4
LT
319 set_page_writeback(page);
320 unlock_page(page);
321 submit_bio(rw, bio);
322out:
323 return ret;
324}
325
aca8bf32 326int swap_readpage(struct page *page)
1da177e4
LT
327{
328 struct bio *bio;
329 int ret = 0;
62c230bc 330 struct swap_info_struct *sis = page_swap_info(page);
1da177e4 331
51726b12
HD
332 VM_BUG_ON(!PageLocked(page));
333 VM_BUG_ON(PageUptodate(page));
165c8aed 334 if (frontswap_load(page) == 0) {
38b5faf4
DM
335 SetPageUptodate(page);
336 unlock_page(page);
337 goto out;
338 }
62c230bc
MG
339
340 if (sis->flags & SWP_FILE) {
341 struct file *swap_file = sis->swap_file;
342 struct address_space *mapping = swap_file->f_mapping;
343
344 ret = mapping->a_ops->readpage(swap_file, page);
6fa3eb70
S
345 if (!ret) {
346#ifdef CONFIG_ZRAM
347 current->swap_in++;
348#endif
62c230bc 349 count_vm_event(PSWPIN);
6fa3eb70 350 }
62c230bc
MG
351 return ret;
352 }
353
f29ad6a9 354 bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read);
1da177e4
LT
355 if (bio == NULL) {
356 unlock_page(page);
357 ret = -ENOMEM;
358 goto out;
359 }
6fa3eb70
S
360
361#ifdef CONFIG_ZRAM
362 current->swap_in++;
363#endif
f8891e5e 364 count_vm_event(PSWPIN);
1da177e4
LT
365 submit_bio(READ, bio);
366out:
367 return ret;
368}
62c230bc
MG
369
370int swap_set_page_dirty(struct page *page)
371{
372 struct swap_info_struct *sis = page_swap_info(page);
373
374 if (sis->flags & SWP_FILE) {
375 struct address_space *mapping = sis->swap_file->f_mapping;
376 return mapping->a_ops->set_page_dirty(page);
377 } else {
378 return __set_page_dirty_no_writeback(page);
379 }
380}