Merge 4.14.26 into android-4.14
[GitHub/MotorolaMobilityLLC/kernel-slsi.git] / fs / squashfs / block.c
1 /*
2 * Squashfs - a compressed read only filesystem for Linux
3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
5 * Phillip Lougher <phillip@squashfs.org.uk>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2,
10 * or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 *
21 * block.c
22 */
23
24 /*
25 * This file implements the low-level routines to read and decompress
26 * datablocks and metadata blocks.
27 */
28
29 #include <linux/fs.h>
30 #include <linux/vfs.h>
31 #include <linux/bio.h>
32 #include <linux/slab.h>
33 #include <linux/string.h>
34 #include <linux/pagemap.h>
35 #include <linux/buffer_head.h>
36 #include <linux/bio.h>
37 #include <linux/workqueue.h>
38
39 #include "squashfs_fs.h"
40 #include "squashfs_fs_sb.h"
41 #include "squashfs.h"
42 #include "decompressor.h"
43 #include "page_actor.h"
44
45 static struct workqueue_struct *squashfs_read_wq;
46
47 struct squashfs_read_request {
48 struct super_block *sb;
49 u64 index;
50 int length;
51 int compressed;
52 int offset;
53 u64 read_end;
54 struct squashfs_page_actor *output;
55 enum {
56 SQUASHFS_COPY,
57 SQUASHFS_DECOMPRESS,
58 SQUASHFS_METADATA,
59 } data_processing;
60 bool synchronous;
61
62 /*
63 * If the read is synchronous, it is possible to retrieve information
64 * about the request by setting these pointers.
65 */
66 int *res;
67 int *bytes_read;
68 int *bytes_uncompressed;
69
70 int nr_buffers;
71 struct buffer_head **bh;
72 struct work_struct offload;
73 };
74
75 struct squashfs_bio_request {
76 struct buffer_head **bh;
77 int nr_buffers;
78 };
79
80 static int squashfs_bio_submit(struct squashfs_read_request *req);
81
82 int squashfs_init_read_wq(void)
83 {
84 squashfs_read_wq = create_workqueue("SquashFS read wq");
85 return !!squashfs_read_wq;
86 }
87
88 void squashfs_destroy_read_wq(void)
89 {
90 flush_workqueue(squashfs_read_wq);
91 destroy_workqueue(squashfs_read_wq);
92 }
93
94 static void free_read_request(struct squashfs_read_request *req, int error)
95 {
96 if (!req->synchronous)
97 squashfs_page_actor_free(req->output, error);
98 if (req->res)
99 *(req->res) = error;
100 kfree(req->bh);
101 kfree(req);
102 }
103
104 static void squashfs_process_blocks(struct squashfs_read_request *req)
105 {
106 int error = 0;
107 int bytes, i, length;
108 struct squashfs_sb_info *msblk = req->sb->s_fs_info;
109 struct squashfs_page_actor *actor = req->output;
110 struct buffer_head **bh = req->bh;
111 int nr_buffers = req->nr_buffers;
112
113 for (i = 0; i < nr_buffers; ++i) {
114 if (!bh[i])
115 continue;
116 wait_on_buffer(bh[i]);
117 if (!buffer_uptodate(bh[i]))
118 error = -EIO;
119 }
120 if (error)
121 goto cleanup;
122
123 if (req->data_processing == SQUASHFS_METADATA) {
124 /* Extract the length of the metadata block */
125 if (req->offset != msblk->devblksize - 1) {
126 length = le16_to_cpup((__le16 *)
127 (bh[0]->b_data + req->offset));
128 } else {
129 length = (unsigned char)bh[0]->b_data[req->offset];
130 length |= (unsigned char)bh[1]->b_data[0] << 8;
131 }
132 req->compressed = SQUASHFS_COMPRESSED(length);
133 req->data_processing = req->compressed ? SQUASHFS_DECOMPRESS
134 : SQUASHFS_COPY;
135 length = SQUASHFS_COMPRESSED_SIZE(length);
136 if (req->index + length + 2 > req->read_end) {
137 for (i = 0; i < nr_buffers; ++i)
138 put_bh(bh[i]);
139 kfree(bh);
140 req->length = length;
141 req->index += 2;
142 squashfs_bio_submit(req);
143 return;
144 }
145 req->length = length;
146 req->offset = (req->offset + 2) % PAGE_SIZE;
147 if (req->offset < 2) {
148 put_bh(bh[0]);
149 ++bh;
150 --nr_buffers;
151 }
152 }
153 if (req->bytes_read)
154 *(req->bytes_read) = req->length;
155
156 if (req->data_processing == SQUASHFS_COPY) {
157 squashfs_bh_to_actor(bh, nr_buffers, req->output, req->offset,
158 req->length, msblk->devblksize);
159 } else if (req->data_processing == SQUASHFS_DECOMPRESS) {
160 req->length = squashfs_decompress(msblk, bh, nr_buffers,
161 req->offset, req->length, actor);
162 if (req->length < 0) {
163 error = -EIO;
164 goto cleanup;
165 }
166 }
167
168 /* Last page may have trailing bytes not filled */
169 bytes = req->length % PAGE_SIZE;
170 if (bytes && actor->page[actor->pages - 1])
171 zero_user_segment(actor->page[actor->pages - 1], bytes,
172 PAGE_SIZE);
173
174 cleanup:
175 if (req->bytes_uncompressed)
176 *(req->bytes_uncompressed) = req->length;
177 if (error) {
178 for (i = 0; i < nr_buffers; ++i)
179 if (bh[i])
180 put_bh(bh[i]);
181 }
182 free_read_request(req, error);
183 }
184
185 static void read_wq_handler(struct work_struct *work)
186 {
187 squashfs_process_blocks(container_of(work,
188 struct squashfs_read_request, offload));
189 }
190
191 static void squashfs_bio_end_io(struct bio *bio)
192 {
193 int i;
194 blk_status_t error = bio->bi_status;
195 struct squashfs_bio_request *bio_req = bio->bi_private;
196
197 bio_put(bio);
198
199 for (i = 0; i < bio_req->nr_buffers; ++i) {
200 if (!bio_req->bh[i])
201 continue;
202 if (!error)
203 set_buffer_uptodate(bio_req->bh[i]);
204 else
205 clear_buffer_uptodate(bio_req->bh[i]);
206 unlock_buffer(bio_req->bh[i]);
207 }
208 kfree(bio_req);
209 }
210
211 static int bh_is_optional(struct squashfs_read_request *req, int idx)
212 {
213 int start_idx, end_idx;
214 struct squashfs_sb_info *msblk = req->sb->s_fs_info;
215
216 start_idx = (idx * msblk->devblksize - req->offset) >> PAGE_SHIFT;
217 end_idx = ((idx + 1) * msblk->devblksize - req->offset + 1) >> PAGE_SHIFT;
218 if (start_idx >= req->output->pages)
219 return 1;
220 if (start_idx < 0)
221 start_idx = end_idx;
222 if (end_idx >= req->output->pages)
223 end_idx = start_idx;
224 return !req->output->page[start_idx] && !req->output->page[end_idx];
225 }
226
227 static int actor_getblks(struct squashfs_read_request *req, u64 block)
228 {
229 int i;
230
231 req->bh = kmalloc_array(req->nr_buffers, sizeof(*(req->bh)), GFP_NOIO);
232 if (!req->bh)
233 return -ENOMEM;
234
235 for (i = 0; i < req->nr_buffers; ++i) {
236 /*
237 * When dealing with an uncompressed block, the actor may
238 * contains NULL pages. There's no need to read the buffers
239 * associated with these pages.
240 */
241 if (!req->compressed && bh_is_optional(req, i)) {
242 req->bh[i] = NULL;
243 continue;
244 }
245 req->bh[i] = sb_getblk(req->sb, block + i);
246 if (!req->bh[i]) {
247 while (--i) {
248 if (req->bh[i])
249 put_bh(req->bh[i]);
250 }
251 return -1;
252 }
253 }
254 return 0;
255 }
256
257 static int squashfs_bio_submit(struct squashfs_read_request *req)
258 {
259 struct bio *bio = NULL;
260 struct buffer_head *bh;
261 struct squashfs_bio_request *bio_req = NULL;
262 int b = 0, prev_block = 0;
263 struct squashfs_sb_info *msblk = req->sb->s_fs_info;
264
265 u64 read_start = round_down(req->index, msblk->devblksize);
266 u64 read_end = round_up(req->index + req->length, msblk->devblksize);
267 sector_t block = read_start >> msblk->devblksize_log2;
268 sector_t block_end = read_end >> msblk->devblksize_log2;
269 int offset = read_start - round_down(req->index, PAGE_SIZE);
270 int nr_buffers = block_end - block;
271 int blksz = msblk->devblksize;
272 int bio_max_pages = nr_buffers > BIO_MAX_PAGES ? BIO_MAX_PAGES
273 : nr_buffers;
274
275 /* Setup the request */
276 req->read_end = read_end;
277 req->offset = req->index - read_start;
278 req->nr_buffers = nr_buffers;
279 if (actor_getblks(req, block) < 0)
280 goto getblk_failed;
281
282 /* Create and submit the BIOs */
283 for (b = 0; b < nr_buffers; ++b, offset += blksz) {
284 bh = req->bh[b];
285 if (!bh || !trylock_buffer(bh))
286 continue;
287 if (buffer_uptodate(bh)) {
288 unlock_buffer(bh);
289 continue;
290 }
291 offset %= PAGE_SIZE;
292
293 /* Append the buffer to the current BIO if it is contiguous */
294 if (bio && bio_req && prev_block + 1 == b) {
295 if (bio_add_page(bio, bh->b_page, blksz, offset)) {
296 bio_req->nr_buffers += 1;
297 prev_block = b;
298 continue;
299 }
300 }
301
302 /* Otherwise, submit the current BIO and create a new one */
303 if (bio)
304 submit_bio(bio);
305 bio_req = kcalloc(1, sizeof(struct squashfs_bio_request),
306 GFP_NOIO);
307 if (!bio_req)
308 goto req_alloc_failed;
309 bio_req->bh = &req->bh[b];
310 bio = bio_alloc(GFP_NOIO, bio_max_pages);
311 if (!bio)
312 goto bio_alloc_failed;
313 bio_set_dev(bio, req->sb->s_bdev);
314 bio->bi_iter.bi_sector = (block + b)
315 << (msblk->devblksize_log2 - 9);
316 bio_set_op_attrs(bio, REQ_OP_READ, 0);
317 bio->bi_private = bio_req;
318 bio->bi_end_io = squashfs_bio_end_io;
319
320 bio_add_page(bio, bh->b_page, blksz, offset);
321 bio_req->nr_buffers += 1;
322 prev_block = b;
323 }
324 if (bio)
325 submit_bio(bio);
326
327 if (req->synchronous)
328 squashfs_process_blocks(req);
329 else {
330 INIT_WORK(&req->offload, read_wq_handler);
331 schedule_work(&req->offload);
332 }
333 return 0;
334
335 bio_alloc_failed:
336 kfree(bio_req);
337 req_alloc_failed:
338 unlock_buffer(bh);
339 while (--nr_buffers >= b)
340 if (req->bh[nr_buffers])
341 put_bh(req->bh[nr_buffers]);
342 while (--b >= 0)
343 if (req->bh[b])
344 wait_on_buffer(req->bh[b]);
345 getblk_failed:
346 free_read_request(req, -ENOMEM);
347 return -ENOMEM;
348 }
349
350 static int read_metadata_block(struct squashfs_read_request *req,
351 u64 *next_index)
352 {
353 int ret, error, bytes_read = 0, bytes_uncompressed = 0;
354 struct squashfs_sb_info *msblk = req->sb->s_fs_info;
355
356 if (req->index + 2 > msblk->bytes_used) {
357 free_read_request(req, -EINVAL);
358 return -EINVAL;
359 }
360 req->length = 2;
361
362 /* Do not read beyond the end of the device */
363 if (req->index + req->length > msblk->bytes_used)
364 req->length = msblk->bytes_used - req->index;
365 req->data_processing = SQUASHFS_METADATA;
366
367 /*
368 * Reading metadata is always synchronous because we don't know the
369 * length in advance and the function is expected to update
370 * 'next_index' and return the length.
371 */
372 req->synchronous = true;
373 req->res = &error;
374 req->bytes_read = &bytes_read;
375 req->bytes_uncompressed = &bytes_uncompressed;
376
377 TRACE("Metadata block @ 0x%llx, %scompressed size %d, src size %d\n",
378 req->index, req->compressed ? "" : "un", bytes_read,
379 req->output->length);
380
381 ret = squashfs_bio_submit(req);
382 if (ret)
383 return ret;
384 if (error)
385 return error;
386 if (next_index)
387 *next_index += 2 + bytes_read;
388 return bytes_uncompressed;
389 }
390
391 static int read_data_block(struct squashfs_read_request *req, int length,
392 u64 *next_index, bool synchronous)
393 {
394 int ret, error = 0, bytes_uncompressed = 0, bytes_read = 0;
395
396 req->compressed = SQUASHFS_COMPRESSED_BLOCK(length);
397 req->length = length = SQUASHFS_COMPRESSED_SIZE_BLOCK(length);
398 req->data_processing = req->compressed ? SQUASHFS_DECOMPRESS
399 : SQUASHFS_COPY;
400
401 req->synchronous = synchronous;
402 if (synchronous) {
403 req->res = &error;
404 req->bytes_read = &bytes_read;
405 req->bytes_uncompressed = &bytes_uncompressed;
406 }
407
408 TRACE("Data block @ 0x%llx, %scompressed size %d, src size %d\n",
409 req->index, req->compressed ? "" : "un", req->length,
410 req->output->length);
411
412 ret = squashfs_bio_submit(req);
413 if (ret)
414 return ret;
415 if (synchronous)
416 ret = error ? error : bytes_uncompressed;
417 if (next_index)
418 *next_index += length;
419 return ret;
420 }
421
422 /*
423 * Read and decompress a metadata block or datablock. Length is non-zero
424 * if a datablock is being read (the size is stored elsewhere in the
425 * filesystem), otherwise the length is obtained from the first two bytes of
426 * the metadata block. A bit in the length field indicates if the block
427 * is stored uncompressed in the filesystem (usually because compression
428 * generated a larger block - this does occasionally happen with compression
429 * algorithms).
430 */
431 static int __squashfs_read_data(struct super_block *sb, u64 index, int length,
432 u64 *next_index, struct squashfs_page_actor *output, bool sync)
433 {
434 struct squashfs_read_request *req;
435
436 req = kcalloc(1, sizeof(struct squashfs_read_request), GFP_KERNEL);
437 if (!req) {
438 if (!sync)
439 squashfs_page_actor_free(output, -ENOMEM);
440 return -ENOMEM;
441 }
442
443 req->sb = sb;
444 req->index = index;
445 req->output = output;
446
447 if (next_index)
448 *next_index = index;
449
450 if (length)
451 length = read_data_block(req, length, next_index, sync);
452 else
453 length = read_metadata_block(req, next_index);
454
455 if (length < 0) {
456 ERROR("squashfs_read_data failed to read block 0x%llx\n",
457 (unsigned long long)index);
458 return -EIO;
459 }
460
461 return length;
462 }
463
464 int squashfs_read_data(struct super_block *sb, u64 index, int length,
465 u64 *next_index, struct squashfs_page_actor *output)
466 {
467 return __squashfs_read_data(sb, index, length, next_index, output,
468 true);
469 }
470
471 int squashfs_read_data_async(struct super_block *sb, u64 index, int length,
472 u64 *next_index, struct squashfs_page_actor *output)
473 {
474
475 return __squashfs_read_data(sb, index, length, next_index, output,
476 false);
477 }