If unsure, say N.
+choice
+ prompt "File decompression options"
+ depends on SQUASHFS
+ help
+ Squashfs now supports two options for decompressing file
+ data. Traditionally Squashfs has decompressed into an
+ intermediate buffer and then memcopied it into the page cache.
+ Squashfs now supports the ability to decompress directly into
+ the page cache.
+
+ If unsure, select "Decompress file data into an intermediate buffer"
+
+config SQUASHFS_FILE_CACHE
+ bool "Decompress file data into an intermediate buffer"
+ help
+ Decompress file data into an intermediate buffer and then
+ memcopy it into the page cache.
+
+config SQUASHFS_FILE_DIRECT
+ bool "Decompress files directly into the page cache"
+ help
+ Directly decompress file data into the page cache.
+ Doing so can significantly improve performance because
+ it eliminates a memcpy and it also removes the lock contention
+ on the single buffer.
+
+endchoice
+
choice
prompt "Decompressor parallelisation options"
depends on SQUASHFS
obj-$(CONFIG_SQUASHFS) += squashfs.o
squashfs-y += block.o cache.o dir.o export.o file.o fragment.o id.o inode.o
squashfs-y += namei.o super.o symlink.o decompressor.o
-squashfs-y += file_direct.o page_actor.o
+squashfs-$(CONFIG_SQUASHFS_FILE_CACHE) += file_cache.o
+squashfs-$(CONFIG_SQUASHFS_FILE_DIRECT) += file_direct.o page_actor.o
squashfs-$(CONFIG_SQUASHFS_DECOMP_SINGLE) += decompressor_single.o
squashfs-$(CONFIG_SQUASHFS_DECOMP_MULTI) += decompressor_multi.o
squashfs-$(CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU) += decompressor_multi_percpu.o
#include <linux/fs.h>
#include <linux/vfs.h>
-#include <linux/bio.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/pagemap.h>
#include <linux/buffer_head.h>
#include <linux/bio.h>
-#include <linux/workqueue.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "decompressor.h"
#include "page_actor.h"
-static struct workqueue_struct *squashfs_read_wq;
-
-struct squashfs_read_request {
- struct super_block *sb;
- u64 index;
- int length;
- int compressed;
- int offset;
- u64 read_end;
- struct squashfs_page_actor *output;
- enum {
- SQUASHFS_COPY,
- SQUASHFS_DECOMPRESS,
- SQUASHFS_METADATA,
- } data_processing;
- bool synchronous;
-
- /*
- * If the read is synchronous, it is possible to retrieve information
- * about the request by setting these pointers.
- */
- int *res;
- int *bytes_read;
- int *bytes_uncompressed;
-
- int nr_buffers;
- struct buffer_head **bh;
- struct work_struct offload;
-};
-
-struct squashfs_bio_request {
- struct buffer_head **bh;
- int nr_buffers;
-};
-
-static int squashfs_bio_submit(struct squashfs_read_request *req);
-
-int squashfs_init_read_wq(void)
-{
- squashfs_read_wq = create_workqueue("SquashFS read wq");
- return !!squashfs_read_wq;
-}
-
-void squashfs_destroy_read_wq(void)
-{
- flush_workqueue(squashfs_read_wq);
- destroy_workqueue(squashfs_read_wq);
-}
-
-static void free_read_request(struct squashfs_read_request *req, int error)
-{
- if (!req->synchronous)
- squashfs_page_actor_free(req->output, error);
- if (req->res)
- *(req->res) = error;
- kfree(req->bh);
- kfree(req);
-}
-
-static void squashfs_process_blocks(struct squashfs_read_request *req)
-{
- int error = 0;
- int bytes, i, length;
- struct squashfs_sb_info *msblk = req->sb->s_fs_info;
- struct squashfs_page_actor *actor = req->output;
- struct buffer_head **bh = req->bh;
- int nr_buffers = req->nr_buffers;
-
- for (i = 0; i < nr_buffers; ++i) {
- if (!bh[i])
- continue;
- wait_on_buffer(bh[i]);
- if (!buffer_uptodate(bh[i]))
- error = -EIO;
- }
- if (error)
- goto cleanup;
-
- if (req->data_processing == SQUASHFS_METADATA) {
- /* Extract the length of the metadata block */
- if (req->offset != msblk->devblksize - 1) {
- length = le16_to_cpup((__le16 *)
- (bh[0]->b_data + req->offset));
- } else {
- length = (unsigned char)bh[0]->b_data[req->offset];
- length |= (unsigned char)bh[1]->b_data[0] << 8;
- }
- req->compressed = SQUASHFS_COMPRESSED(length);
- req->data_processing = req->compressed ? SQUASHFS_DECOMPRESS
- : SQUASHFS_COPY;
- length = SQUASHFS_COMPRESSED_SIZE(length);
- if (req->index + length + 2 > req->read_end) {
- for (i = 0; i < nr_buffers; ++i)
- put_bh(bh[i]);
- kfree(bh);
- req->length = length;
- req->index += 2;
- squashfs_bio_submit(req);
- return;
- }
- req->length = length;
- req->offset = (req->offset + 2) % PAGE_SIZE;
- if (req->offset < 2) {
- put_bh(bh[0]);
- ++bh;
- --nr_buffers;
- }
- }
- if (req->bytes_read)
- *(req->bytes_read) = req->length;
-
- if (req->data_processing == SQUASHFS_COPY) {
- squashfs_bh_to_actor(bh, nr_buffers, req->output, req->offset,
- req->length, msblk->devblksize);
- } else if (req->data_processing == SQUASHFS_DECOMPRESS) {
- req->length = squashfs_decompress(msblk, bh, nr_buffers,
- req->offset, req->length, actor);
- if (req->length < 0) {
- error = -EIO;
- goto cleanup;
- }
- }
-
- /* Last page may have trailing bytes not filled */
- bytes = req->length % PAGE_SIZE;
- if (bytes && actor->page[actor->pages - 1])
- zero_user_segment(actor->page[actor->pages - 1], bytes,
- PAGE_SIZE);
-
-cleanup:
- if (req->bytes_uncompressed)
- *(req->bytes_uncompressed) = req->length;
- if (error) {
- for (i = 0; i < nr_buffers; ++i)
- if (bh[i])
- put_bh(bh[i]);
- }
- free_read_request(req, error);
-}
-
-static void read_wq_handler(struct work_struct *work)
-{
- squashfs_process_blocks(container_of(work,
- struct squashfs_read_request, offload));
-}
-
-static void squashfs_bio_end_io(struct bio *bio)
-{
- int i;
- blk_status_t error = bio->bi_status;
- struct squashfs_bio_request *bio_req = bio->bi_private;
-
- bio_put(bio);
-
- for (i = 0; i < bio_req->nr_buffers; ++i) {
- if (!bio_req->bh[i])
- continue;
- if (!error)
- set_buffer_uptodate(bio_req->bh[i]);
- else
- clear_buffer_uptodate(bio_req->bh[i]);
- unlock_buffer(bio_req->bh[i]);
- }
- kfree(bio_req);
-}
-
-static int bh_is_optional(struct squashfs_read_request *req, int idx)
-{
- int start_idx, end_idx;
- struct squashfs_sb_info *msblk = req->sb->s_fs_info;
-
- start_idx = (idx * msblk->devblksize - req->offset) >> PAGE_SHIFT;
- end_idx = ((idx + 1) * msblk->devblksize - req->offset + 1) >> PAGE_SHIFT;
- if (start_idx >= req->output->pages)
- return 1;
- if (start_idx < 0)
- start_idx = end_idx;
- if (end_idx >= req->output->pages)
- end_idx = start_idx;
- return !req->output->page[start_idx] && !req->output->page[end_idx];
-}
-
-static int actor_getblks(struct squashfs_read_request *req, u64 block)
-{
- int i;
-
- req->bh = kmalloc_array(req->nr_buffers, sizeof(*(req->bh)), GFP_NOIO);
- if (!req->bh)
- return -ENOMEM;
-
- for (i = 0; i < req->nr_buffers; ++i) {
- /*
- * When dealing with an uncompressed block, the actor may
- * contains NULL pages. There's no need to read the buffers
- * associated with these pages.
- */
- if (!req->compressed && bh_is_optional(req, i)) {
- req->bh[i] = NULL;
- continue;
- }
- req->bh[i] = sb_getblk(req->sb, block + i);
- if (!req->bh[i]) {
- while (--i) {
- if (req->bh[i])
- put_bh(req->bh[i]);
- }
- return -1;
- }
- }
- return 0;
-}
-
-static int squashfs_bio_submit(struct squashfs_read_request *req)
+/*
+ * Read the metadata block length, this is stored in the first two
+ * bytes of the metadata block.
+ */
+static struct buffer_head *get_block_length(struct super_block *sb,
+ u64 *cur_index, int *offset, int *length)
{
- struct bio *bio = NULL;
+ struct squashfs_sb_info *msblk = sb->s_fs_info;
struct buffer_head *bh;
- struct squashfs_bio_request *bio_req = NULL;
- int b = 0, prev_block = 0;
- struct squashfs_sb_info *msblk = req->sb->s_fs_info;
-
- u64 read_start = round_down(req->index, msblk->devblksize);
- u64 read_end = round_up(req->index + req->length, msblk->devblksize);
- sector_t block = read_start >> msblk->devblksize_log2;
- sector_t block_end = read_end >> msblk->devblksize_log2;
- int offset = read_start - round_down(req->index, PAGE_SIZE);
- int nr_buffers = block_end - block;
- int blksz = msblk->devblksize;
- int bio_max_pages = nr_buffers > BIO_MAX_PAGES ? BIO_MAX_PAGES
- : nr_buffers;
- /* Setup the request */
- req->read_end = read_end;
- req->offset = req->index - read_start;
- req->nr_buffers = nr_buffers;
- if (actor_getblks(req, block) < 0)
- goto getblk_failed;
-
- /* Create and submit the BIOs */
- for (b = 0; b < nr_buffers; ++b, offset += blksz) {
- bh = req->bh[b];
- if (!bh || !trylock_buffer(bh))
- continue;
- if (buffer_uptodate(bh)) {
- unlock_buffer(bh);
- continue;
+ bh = sb_bread(sb, *cur_index);
+ if (bh == NULL)
+ return NULL;
+
+ if (msblk->devblksize - *offset == 1) {
+ *length = (unsigned char) bh->b_data[*offset];
+ put_bh(bh);
+ bh = sb_bread(sb, ++(*cur_index));
+ if (bh == NULL)
+ return NULL;
+ *length |= (unsigned char) bh->b_data[0] << 8;
+ *offset = 1;
+ } else {
+ *length = (unsigned char) bh->b_data[*offset] |
+ (unsigned char) bh->b_data[*offset + 1] << 8;
+ *offset += 2;
+
+ if (*offset == msblk->devblksize) {
+ put_bh(bh);
+ bh = sb_bread(sb, ++(*cur_index));
+ if (bh == NULL)
+ return NULL;
+ *offset = 0;
}
- offset %= PAGE_SIZE;
-
- /* Append the buffer to the current BIO if it is contiguous */
- if (bio && bio_req && prev_block + 1 == b) {
- if (bio_add_page(bio, bh->b_page, blksz, offset)) {
- bio_req->nr_buffers += 1;
- prev_block = b;
- continue;
- }
- }
-
- /* Otherwise, submit the current BIO and create a new one */
- if (bio)
- submit_bio(bio);
- bio_req = kcalloc(1, sizeof(struct squashfs_bio_request),
- GFP_NOIO);
- if (!bio_req)
- goto req_alloc_failed;
- bio_req->bh = &req->bh[b];
- bio = bio_alloc(GFP_NOIO, bio_max_pages);
- if (!bio)
- goto bio_alloc_failed;
- bio_set_dev(bio, req->sb->s_bdev);
- bio->bi_iter.bi_sector = (block + b)
- << (msblk->devblksize_log2 - 9);
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
- bio->bi_private = bio_req;
- bio->bi_end_io = squashfs_bio_end_io;
-
- bio_add_page(bio, bh->b_page, blksz, offset);
- bio_req->nr_buffers += 1;
- prev_block = b;
}
- if (bio)
- submit_bio(bio);
- if (req->synchronous)
- squashfs_process_blocks(req);
- else {
- INIT_WORK(&req->offload, read_wq_handler);
- schedule_work(&req->offload);
- }
- return 0;
-
-bio_alloc_failed:
- kfree(bio_req);
-req_alloc_failed:
- unlock_buffer(bh);
- while (--nr_buffers >= b)
- if (req->bh[nr_buffers])
- put_bh(req->bh[nr_buffers]);
- while (--b >= 0)
- if (req->bh[b])
- wait_on_buffer(req->bh[b]);
-getblk_failed:
- free_read_request(req, -ENOMEM);
- return -ENOMEM;
+ return bh;
}
-static int read_metadata_block(struct squashfs_read_request *req,
- u64 *next_index)
-{
- int ret, error, bytes_read = 0, bytes_uncompressed = 0;
- struct squashfs_sb_info *msblk = req->sb->s_fs_info;
-
- if (req->index + 2 > msblk->bytes_used) {
- free_read_request(req, -EINVAL);
- return -EINVAL;
- }
- req->length = 2;
-
- /* Do not read beyond the end of the device */
- if (req->index + req->length > msblk->bytes_used)
- req->length = msblk->bytes_used - req->index;
- req->data_processing = SQUASHFS_METADATA;
-
- /*
- * Reading metadata is always synchronous because we don't know the
- * length in advance and the function is expected to update
- * 'next_index' and return the length.
- */
- req->synchronous = true;
- req->res = &error;
- req->bytes_read = &bytes_read;
- req->bytes_uncompressed = &bytes_uncompressed;
-
- TRACE("Metadata block @ 0x%llx, %scompressed size %d, src size %d\n",
- req->index, req->compressed ? "" : "un", bytes_read,
- req->output->length);
-
- ret = squashfs_bio_submit(req);
- if (ret)
- return ret;
- if (error)
- return error;
- if (next_index)
- *next_index += 2 + bytes_read;
- return bytes_uncompressed;
-}
-
-static int read_data_block(struct squashfs_read_request *req, int length,
- u64 *next_index, bool synchronous)
-{
- int ret, error = 0, bytes_uncompressed = 0, bytes_read = 0;
-
- req->compressed = SQUASHFS_COMPRESSED_BLOCK(length);
- req->length = length = SQUASHFS_COMPRESSED_SIZE_BLOCK(length);
- req->data_processing = req->compressed ? SQUASHFS_DECOMPRESS
- : SQUASHFS_COPY;
-
- req->synchronous = synchronous;
- if (synchronous) {
- req->res = &error;
- req->bytes_read = &bytes_read;
- req->bytes_uncompressed = &bytes_uncompressed;
- }
-
- TRACE("Data block @ 0x%llx, %scompressed size %d, src size %d\n",
- req->index, req->compressed ? "" : "un", req->length,
- req->output->length);
-
- ret = squashfs_bio_submit(req);
- if (ret)
- return ret;
- if (synchronous)
- ret = error ? error : bytes_uncompressed;
- if (next_index)
- *next_index += length;
- return ret;
-}
/*
* Read and decompress a metadata block or datablock. Length is non-zero
* generated a larger block - this does occasionally happen with compression
* algorithms).
*/
-static int __squashfs_read_data(struct super_block *sb, u64 index, int length,
- u64 *next_index, struct squashfs_page_actor *output, bool sync)
+int squashfs_read_data(struct super_block *sb, u64 index, int length,
+ u64 *next_index, struct squashfs_page_actor *output)
{
- struct squashfs_read_request *req;
+ struct squashfs_sb_info *msblk = sb->s_fs_info;
+ struct buffer_head **bh;
+ int offset = index & ((1 << msblk->devblksize_log2) - 1);
+ u64 cur_index = index >> msblk->devblksize_log2;
+ int bytes, compressed, b = 0, k = 0, avail, i;
- req = kcalloc(1, sizeof(struct squashfs_read_request), GFP_KERNEL);
- if (!req) {
- if (!sync)
- squashfs_page_actor_free(output, -ENOMEM);
+ bh = kcalloc(((output->length + msblk->devblksize - 1)
+ >> msblk->devblksize_log2) + 1, sizeof(*bh), GFP_KERNEL);
+ if (bh == NULL)
return -ENOMEM;
- }
- req->sb = sb;
- req->index = index;
- req->output = output;
+ if (length) {
+ /*
+ * Datablock.
+ */
+ bytes = -offset;
+ compressed = SQUASHFS_COMPRESSED_BLOCK(length);
+ length = SQUASHFS_COMPRESSED_SIZE_BLOCK(length);
+ if (next_index)
+ *next_index = index + length;
+
+ TRACE("Block @ 0x%llx, %scompressed size %d, src size %d\n",
+ index, compressed ? "" : "un", length, output->length);
+
+ if (length < 0 || length > output->length ||
+ (index + length) > msblk->bytes_used)
+ goto read_failure;
+
+ for (b = 0; bytes < length; b++, cur_index++) {
+ bh[b] = sb_getblk(sb, cur_index);
+ if (bh[b] == NULL)
+ goto block_release;
+ bytes += msblk->devblksize;
+ }
+ ll_rw_block(REQ_OP_READ, 0, b, bh);
+ } else {
+ /*
+ * Metadata block.
+ */
+ if ((index + 2) > msblk->bytes_used)
+ goto read_failure;
+
+ bh[0] = get_block_length(sb, &cur_index, &offset, &length);
+ if (bh[0] == NULL)
+ goto read_failure;
+ b = 1;
+
+ bytes = msblk->devblksize - offset;
+ compressed = SQUASHFS_COMPRESSED(length);
+ length = SQUASHFS_COMPRESSED_SIZE(length);
+ if (next_index)
+ *next_index = index + length + 2;
- if (next_index)
- *next_index = index;
+ TRACE("Block @ 0x%llx, %scompressed size %d\n", index,
+ compressed ? "" : "un", length);
- if (length)
- length = read_data_block(req, length, next_index, sync);
- else
- length = read_metadata_block(req, next_index);
+ if (length < 0 || length > output->length ||
+ (index + length) > msblk->bytes_used)
+ goto block_release;
- if (length < 0) {
- ERROR("squashfs_read_data failed to read block 0x%llx\n",
- (unsigned long long)index);
- return -EIO;
+ for (; bytes < length; b++) {
+ bh[b] = sb_getblk(sb, ++cur_index);
+ if (bh[b] == NULL)
+ goto block_release;
+ bytes += msblk->devblksize;
+ }
+ ll_rw_block(REQ_OP_READ, 0, b - 1, bh + 1);
}
- return length;
-}
+ for (i = 0; i < b; i++) {
+ wait_on_buffer(bh[i]);
+ if (!buffer_uptodate(bh[i]))
+ goto block_release;
+ }
-int squashfs_read_data(struct super_block *sb, u64 index, int length,
- u64 *next_index, struct squashfs_page_actor *output)
-{
- return __squashfs_read_data(sb, index, length, next_index, output,
- true);
-}
+ if (compressed) {
+ if (!msblk->stream)
+ goto read_failure;
+ length = squashfs_decompress(msblk, bh, b, offset, length,
+ output);
+ if (length < 0)
+ goto read_failure;
+ } else {
+ /*
+ * Block is uncompressed.
+ */
+ int in, pg_offset = 0;
+ void *data = squashfs_first_page(output);
+
+ for (bytes = length; k < b; k++) {
+ in = min(bytes, msblk->devblksize - offset);
+ bytes -= in;
+ while (in) {
+ if (pg_offset == PAGE_SIZE) {
+ data = squashfs_next_page(output);
+ pg_offset = 0;
+ }
+ avail = min_t(int, in, PAGE_SIZE -
+ pg_offset);
+ memcpy(data + pg_offset, bh[k]->b_data + offset,
+ avail);
+ in -= avail;
+ pg_offset += avail;
+ offset += avail;
+ }
+ offset = 0;
+ put_bh(bh[k]);
+ }
+ squashfs_finish_page(output);
+ }
-int squashfs_read_data_async(struct super_block *sb, u64 index, int length,
- u64 *next_index, struct squashfs_page_actor *output)
-{
+ kfree(bh);
+ return length;
+
+block_release:
+ for (; k < b; k++)
+ put_bh(bh[k]);
- return __squashfs_read_data(sb, index, length, next_index, output,
- false);
+read_failure:
+ ERROR("squashfs_read_data failed to read block 0x%llx\n",
+ (unsigned long long) index);
+ kfree(bh);
+ return -EIO;
}
*/
void squashfs_cache_delete(struct squashfs_cache *cache)
{
- int i;
+ int i, j;
if (cache == NULL)
return;
for (i = 0; i < cache->entries; i++) {
- if (cache->entry[i].page)
- free_page_array(cache->entry[i].page, cache->pages);
+ if (cache->entry[i].data) {
+ for (j = 0; j < cache->pages; j++)
+ kfree(cache->entry[i].data[j]);
+ kfree(cache->entry[i].data);
+ }
kfree(cache->entry[i].actor);
}
struct squashfs_cache *squashfs_cache_init(char *name, int entries,
int block_size)
{
- int i;
+ int i, j;
struct squashfs_cache *cache = kzalloc(sizeof(*cache), GFP_KERNEL);
if (cache == NULL) {
init_waitqueue_head(&cache->entry[i].wait_queue);
entry->cache = cache;
entry->block = SQUASHFS_INVALID_BLK;
- entry->page = alloc_page_array(cache->pages, GFP_KERNEL);
- if (!entry->page) {
+ entry->data = kcalloc(cache->pages, sizeof(void *), GFP_KERNEL);
+ if (entry->data == NULL) {
ERROR("Failed to allocate %s cache entry\n", name);
goto cleanup;
}
- entry->actor = squashfs_page_actor_init(entry->page,
- cache->pages, 0, NULL);
+
+ for (j = 0; j < cache->pages; j++) {
+ entry->data[j] = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (entry->data[j] == NULL) {
+ ERROR("Failed to allocate %s buffer\n", name);
+ goto cleanup;
+ }
+ }
+
+ entry->actor = squashfs_page_actor_init(entry->data,
+ cache->pages, 0);
if (entry->actor == NULL) {
ERROR("Failed to allocate %s cache entry\n", name);
goto cleanup;
return min(length, entry->length - offset);
while (offset < entry->length) {
- void *buff = kmap_atomic(entry->page[offset / PAGE_SIZE])
- + (offset % PAGE_SIZE);
+ void *buff = entry->data[offset / PAGE_SIZE]
+ + (offset % PAGE_SIZE);
int bytes = min_t(int, entry->length - offset,
PAGE_SIZE - (offset % PAGE_SIZE));
if (bytes >= remaining) {
memcpy(buffer, buff, remaining);
- kunmap_atomic(buff);
remaining = 0;
break;
}
memcpy(buffer, buff, bytes);
- kunmap_atomic(buff);
buffer += bytes;
remaining -= bytes;
offset += bytes;
void *squashfs_read_table(struct super_block *sb, u64 block, int length)
{
int pages = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
- struct page **page;
- void *buff;
- int res;
+ int i, res;
+ void *table, *buffer, **data;
struct squashfs_page_actor *actor;
- page = alloc_page_array(pages, GFP_KERNEL);
- if (!page)
+ table = buffer = kmalloc(length, GFP_KERNEL);
+ if (table == NULL)
return ERR_PTR(-ENOMEM);
- actor = squashfs_page_actor_init(page, pages, length, NULL);
- if (actor == NULL) {
+ data = kcalloc(pages, sizeof(void *), GFP_KERNEL);
+ if (data == NULL) {
res = -ENOMEM;
goto failed;
}
+ actor = squashfs_page_actor_init(data, pages, length);
+ if (actor == NULL) {
+ res = -ENOMEM;
+ goto failed2;
+ }
+
+ for (i = 0; i < pages; i++, buffer += PAGE_SIZE)
+ data[i] = buffer;
+
res = squashfs_read_data(sb, block, length |
SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, actor);
+ kfree(data);
+ kfree(actor);
+
if (res < 0)
- goto failed2;
+ goto failed;
- buff = kmalloc(length, GFP_KERNEL);
- if (!buff)
- goto failed2;
- squashfs_actor_to_buf(actor, buff, length);
- squashfs_page_actor_free(actor, 0);
- free_page_array(page, pages);
- return buff;
+ return table;
failed2:
- squashfs_page_actor_free(actor, 0);
+ kfree(data);
failed:
- free_page_array(page, pages);
+ kfree(table);
return ERR_PTR(res);
}
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <linux/highmem.h>
-#include <linux/fs.h>
+#include <linux/buffer_head.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
static void *get_comp_opts(struct super_block *sb, unsigned short flags)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
- void *comp_opts, *buffer = NULL;
- struct page *page;
+ void *buffer = NULL, *comp_opts;
struct squashfs_page_actor *actor = NULL;
int length = 0;
- if (!SQUASHFS_COMP_OPTS(flags))
- return squashfs_comp_opts(msblk, buffer, length);
-
/*
* Read decompressor specific options from file system if present
*/
-
- page = alloc_page(GFP_KERNEL);
- if (!page)
- return ERR_PTR(-ENOMEM);
-
- actor = squashfs_page_actor_init(&page, 1, 0, NULL);
- if (actor == NULL) {
- comp_opts = ERR_PTR(-ENOMEM);
- goto actor_error;
- }
-
- length = squashfs_read_data(sb,
- sizeof(struct squashfs_super_block), 0, NULL, actor);
-
- if (length < 0) {
- comp_opts = ERR_PTR(length);
- goto read_error;
+ if (SQUASHFS_COMP_OPTS(flags)) {
+ buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (buffer == NULL) {
+ comp_opts = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ actor = squashfs_page_actor_init(&buffer, 1, 0);
+ if (actor == NULL) {
+ comp_opts = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ length = squashfs_read_data(sb,
+ sizeof(struct squashfs_super_block), 0, NULL, actor);
+
+ if (length < 0) {
+ comp_opts = ERR_PTR(length);
+ goto out;
+ }
}
- buffer = kmap_atomic(page);
comp_opts = squashfs_comp_opts(msblk, buffer, length);
- kunmap_atomic(buffer);
-read_error:
- squashfs_page_actor_free(actor, 0);
-actor_error:
- __free_page(page);
+out:
+ kfree(actor);
+ kfree(buffer);
return comp_opts;
}
#include <linux/string.h>
#include <linux/pagemap.h>
#include <linux/mutex.h>
-#include <linux/mm_inline.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
return res;
}
-static int squashfs_readpages_fragment(struct page *page,
- struct list_head *readahead_pages, struct address_space *mapping,
- int expected)
-{
- if (!page) {
- page = lru_to_page(readahead_pages);
- list_del(&page->lru);
- if (add_to_page_cache_lru(page, mapping, page->index,
- mapping_gfp_constraint(mapping, GFP_KERNEL))) {
- put_page(page);
- return 0;
- }
- }
- return squashfs_readpage_fragment(page, expected);
-}
-
static int squashfs_readpage_sparse(struct page *page, int expected)
{
squashfs_copy_cache(page, NULL, expected, 0);
return 0;
}
-static int squashfs_readpages_sparse(struct page *page,
- struct list_head *readahead_pages, struct address_space *mapping,
- int expected)
-{
- if (!page) {
- page = lru_to_page(readahead_pages);
- list_del(&page->lru);
- if (add_to_page_cache_lru(page, mapping, page->index,
- mapping_gfp_constraint(mapping, GFP_KERNEL))) {
- put_page(page);
- return 0;
- }
- }
- return squashfs_readpage_sparse(page, expected);
-}
-
-static int __squashfs_readpages(struct file *file, struct page *page,
- struct list_head *readahead_pages, unsigned int nr_pages,
- struct address_space *mapping)
+static int squashfs_readpage(struct file *file, struct page *page)
{
- struct inode *inode = mapping->host;
+ struct inode *inode = page->mapping->host;
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+ int index = page->index >> (msblk->block_log - PAGE_SHIFT);
int file_end = i_size_read(inode) >> msblk->block_log;
- int res;
-
- do {
- struct page *cur_page = page ? page
- : lru_to_page(readahead_pages);
- int page_index = cur_page->index;
- int index = page_index >> (msblk->block_log - PAGE_SHIFT);
- int expected = index == file_end ?
+ int expected = index == file_end ?
(i_size_read(inode) & (msblk->block_size - 1)) :
msblk->block_size;
-
- if (page_index >= ((i_size_read(inode) + PAGE_SIZE - 1) >>
- PAGE_SHIFT))
- return 1;
-
- if (index < file_end || squashfs_i(inode)->fragment_block ==
- SQUASHFS_INVALID_BLK) {
- u64 block = 0;
- int bsize = read_blocklist(inode, index, &block);
-
- if (bsize < 0)
- return -1;
-
- if (bsize == 0) {
- res = squashfs_readpages_sparse(page,
- readahead_pages, mapping, expected);
- } else {
- res = squashfs_readpages_block(page,
- readahead_pages, &nr_pages, mapping,
- page_index, block, bsize);
- }
- } else {
- res = squashfs_readpages_fragment(page,
- readahead_pages, mapping, expected);
- }
- if (res)
- return 0;
- page = NULL;
- } while (readahead_pages && !list_empty(readahead_pages));
-
- return 0;
-}
-
-static int squashfs_readpage(struct file *file, struct page *page)
-{
- int ret;
+ int res;
+ void *pageaddr;
TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
- page->index, squashfs_i(page->mapping->host)->start);
+ page->index, squashfs_i(inode)->start);
+
+ if (page->index >= ((i_size_read(inode) + PAGE_SIZE - 1) >>
+ PAGE_SHIFT))
+ goto out;
- get_page(page);
+ if (index < file_end || squashfs_i(inode)->fragment_block ==
+ SQUASHFS_INVALID_BLK) {
+ u64 block = 0;
+ int bsize = read_blocklist(inode, index, &block);
+ if (bsize < 0)
+ goto error_out;
- ret = __squashfs_readpages(file, page, NULL, 1, page->mapping);
- if (ret) {
- flush_dcache_page(page);
- if (ret < 0)
- SetPageError(page);
+ if (bsize == 0)
+ res = squashfs_readpage_sparse(page, expected);
else
- SetPageUptodate(page);
- zero_user_segment(page, 0, PAGE_SIZE);
- unlock_page(page);
- put_page(page);
- }
+ res = squashfs_readpage_block(page, block, bsize, expected);
+ } else
+ res = squashfs_readpage_fragment(page, expected);
- return 0;
-}
+ if (!res)
+ return 0;
+
+error_out:
+ SetPageError(page);
+out:
+ pageaddr = kmap_atomic(page);
+ memset(pageaddr, 0, PAGE_SIZE);
+ kunmap_atomic(pageaddr);
+ flush_dcache_page(page);
+ if (!PageError(page))
+ SetPageUptodate(page);
+ unlock_page(page);
-static int squashfs_readpages(struct file *file, struct address_space *mapping,
- struct list_head *pages, unsigned int nr_pages)
-{
- TRACE("Entered squashfs_readpages, %u pages, first page index %lx\n",
- nr_pages, lru_to_page(pages)->index);
- __squashfs_readpages(file, NULL, pages, nr_pages, mapping);
return 0;
}
const struct address_space_operations squashfs_aops = {
- .readpage = squashfs_readpage,
- .readpages = squashfs_readpages,
+ .readpage = squashfs_readpage
};
--- /dev/null
+/*
+ * Copyright (c) 2013
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/pagemap.h>
+#include <linux/mutex.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+
+/* Read separately compressed datablock and memcopy into page cache */
+int squashfs_readpage_block(struct page *page, u64 block, int bsize, int expected)
+{
+ struct inode *i = page->mapping->host;
+ struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
+ block, bsize);
+ int res = buffer->error;
+
+ if (res)
+ ERROR("Unable to read page, block %llx, size %x\n", block,
+ bsize);
+ else
+ squashfs_copy_cache(page, buffer, expected, 0);
+
+ squashfs_cache_put(buffer);
+ return res;
+}
#include <linux/string.h>
#include <linux/pagemap.h>
#include <linux/mutex.h>
-#include <linux/mm_inline.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs.h"
#include "page_actor.h"
-static void release_actor_pages(struct page **page, int pages, int error)
-{
- int i;
+static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
+ int pages, struct page **page, int bytes);
- for (i = 0; i < pages; i++) {
- if (!page[i])
- continue;
- flush_dcache_page(page[i]);
- if (!error)
- SetPageUptodate(page[i]);
- else {
- SetPageError(page[i]);
- zero_user_segment(page[i], 0, PAGE_SIZE);
- }
- unlock_page(page[i]);
- put_page(page[i]);
- }
- kfree(page);
-}
+/* Read separately compressed datablock directly into page cache */
+int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
+ int expected)
-/*
- * Create a "page actor" which will kmap and kunmap the
- * page cache pages appropriately within the decompressor
- */
-static struct squashfs_page_actor *actor_from_page_cache(
- unsigned int actor_pages, struct page *target_page,
- struct list_head *rpages, unsigned int *nr_pages, int start_index,
- struct address_space *mapping)
{
+ struct inode *inode = target_page->mapping->host;
+ struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+
+ int file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT;
+ int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
+ int start_index = target_page->index & ~mask;
+ int end_index = start_index | mask;
+ int i, n, pages, missing_pages, bytes, res = -ENOMEM;
struct page **page;
struct squashfs_page_actor *actor;
- int i, n;
- gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
-
- page = kmalloc_array(actor_pages, sizeof(void *), GFP_KERNEL);
- if (!page)
- return NULL;
-
- for (i = 0, n = start_index; i < actor_pages; i++, n++) {
- if (target_page == NULL && rpages && !list_empty(rpages)) {
- struct page *cur_page = lru_to_page(rpages);
-
- if (cur_page->index < start_index + actor_pages) {
- list_del(&cur_page->lru);
- --(*nr_pages);
- if (add_to_page_cache_lru(cur_page, mapping,
- cur_page->index, gfp))
- put_page(cur_page);
- else
- target_page = cur_page;
- } else
- rpages = NULL;
- }
+ void *pageaddr;
+
+ if (end_index > file_end)
+ end_index = file_end;
+
+ pages = end_index - start_index + 1;
+
+ page = kmalloc_array(pages, sizeof(void *), GFP_KERNEL);
+ if (page == NULL)
+ return res;
+
+ /*
+ * Create a "page actor" which will kmap and kunmap the
+ * page cache pages appropriately within the decompressor
+ */
+ actor = squashfs_page_actor_init_special(page, pages, 0);
+ if (actor == NULL)
+ goto out;
- if (target_page && target_page->index == n) {
- page[i] = target_page;
- target_page = NULL;
- } else {
- page[i] = grab_cache_page_nowait(mapping, n);
- if (page[i] == NULL)
- continue;
+ /* Try to grab all the pages covered by the Squashfs block */
+ for (missing_pages = 0, i = 0, n = start_index; i < pages; i++, n++) {
+ page[i] = (n == target_page->index) ? target_page :
+ grab_cache_page_nowait(target_page->mapping, n);
+
+ if (page[i] == NULL) {
+ missing_pages++;
+ continue;
}
if (PageUptodate(page[i])) {
unlock_page(page[i]);
put_page(page[i]);
page[i] = NULL;
+ missing_pages++;
}
}
- actor = squashfs_page_actor_init(page, actor_pages, 0,
- release_actor_pages);
- if (!actor) {
- release_actor_pages(page, actor_pages, -ENOMEM);
- kfree(page);
- return NULL;
+ if (missing_pages) {
+ /*
+ * Couldn't get one or more pages, this page has either
+ * been VM reclaimed, but others are still in the page cache
+ * and uptodate, or we're racing with another thread in
+ * squashfs_readpage also trying to grab them. Fall back to
+ * using an intermediate buffer.
+ */
+ res = squashfs_read_cache(target_page, block, bsize, pages,
+ page, expected);
+ if (res < 0)
+ goto mark_errored;
+
+ goto out;
}
- return actor;
-}
-int squashfs_readpages_block(struct page *target_page,
- struct list_head *readahead_pages,
- unsigned int *nr_pages,
- struct address_space *mapping,
- int page_index, u64 block, int bsize)
+ /* Decompress directly into the page cache buffers */
+ res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
+ if (res < 0)
+ goto mark_errored;
-{
- struct squashfs_page_actor *actor;
- struct inode *inode = mapping->host;
- struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
- int start_index, end_index, file_end, actor_pages, res;
- int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
+ if (res != expected) {
+ res = -EIO;
+ goto mark_errored;
+ }
- /*
- * If readpage() is called on an uncompressed datablock, we can just
- * read the pages instead of fetching the whole block.
- * This greatly improves the performance when a process keep doing
- * random reads because we only fetch the necessary data.
- * The readahead algorithm will take care of doing speculative reads
- * if necessary.
- * We can't read more than 1 block even if readahead provides use more
- * pages because we don't know yet if the next block is compressed or
- * not.
+ /* Last page may have trailing bytes not filled */
+ bytes = res % PAGE_SIZE;
+ if (bytes) {
+ pageaddr = kmap_atomic(page[pages - 1]);
+ memset(pageaddr + bytes, 0, PAGE_SIZE - bytes);
+ kunmap_atomic(pageaddr);
+ }
+
+ /* Mark pages as uptodate, unlock and release */
+ for (i = 0; i < pages; i++) {
+ flush_dcache_page(page[i]);
+ SetPageUptodate(page[i]);
+ unlock_page(page[i]);
+ if (page[i] != target_page)
+ put_page(page[i]);
+ }
+
+ kfree(actor);
+ kfree(page);
+
+ return 0;
+
+mark_errored:
+ /* Decompression failed, mark pages as errored. Target_page is
+ * dealt with by the caller
*/
- if (bsize && !SQUASHFS_COMPRESSED_BLOCK(bsize)) {
- u64 block_end = block + msblk->block_size;
-
- block += (page_index & mask) * PAGE_SIZE;
- actor_pages = (block_end - block) / PAGE_SIZE;
- if (*nr_pages < actor_pages)
- actor_pages = *nr_pages;
- start_index = page_index;
- bsize = min_t(int, bsize, (PAGE_SIZE * actor_pages)
- | SQUASHFS_COMPRESSED_BIT_BLOCK);
- } else {
- file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT;
- start_index = page_index & ~mask;
- end_index = start_index | mask;
- if (end_index > file_end)
- end_index = file_end;
- actor_pages = end_index - start_index + 1;
+ for (i = 0; i < pages; i++) {
+ if (page[i] == NULL || page[i] == target_page)
+ continue;
+ flush_dcache_page(page[i]);
+ SetPageError(page[i]);
+ unlock_page(page[i]);
+ put_page(page[i]);
+ }
+
+out:
+ kfree(actor);
+ kfree(page);
+ return res;
+}
+
+
+static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
+ int pages, struct page **page, int bytes)
+{
+ struct inode *i = target_page->mapping->host;
+ struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
+ block, bsize);
+ int res = buffer->error, n, offset = 0;
+
+ if (res) {
+ ERROR("Unable to read page, block %llx, size %x\n", block,
+ bsize);
+ goto out;
}
- actor = actor_from_page_cache(actor_pages, target_page,
- readahead_pages, nr_pages, start_index,
- mapping);
- if (!actor)
- return -ENOMEM;
+ for (n = 0; n < pages && bytes > 0; n++,
+ bytes -= PAGE_SIZE, offset += PAGE_SIZE) {
+ int avail = min_t(int, bytes, PAGE_SIZE);
+
+ if (page[n] == NULL)
+ continue;
+
+ squashfs_fill_page(page[n], buffer, offset, avail);
+ unlock_page(page[n]);
+ if (page[n] != target_page)
+ put_page(page[n]);
+ }
- res = squashfs_read_data_async(inode->i_sb, block, bsize, NULL,
- actor);
- return res < 0 ? res : 0;
+out:
+ squashfs_cache_put(buffer);
+ return res;
}
struct buffer_head **bh, int b, int offset, int length,
struct squashfs_page_actor *output)
{
- int res;
struct squashfs_lz4 *stream = strm;
+ void *buff = stream->input, *data;
+ int avail, i, bytes = length, res;
+
+ for (i = 0; i < b; i++) {
+ avail = min(bytes, msblk->devblksize - offset);
+ memcpy(buff, bh[i]->b_data + offset, avail);
+ buff += avail;
+ bytes -= avail;
+ offset = 0;
+ put_bh(bh[i]);
+ }
- squashfs_bh_to_buf(bh, b, stream->input, offset, length,
- msblk->devblksize);
res = LZ4_decompress_safe(stream->input, stream->output,
length, output->length);
if (res < 0)
return -EIO;
- squashfs_buf_to_actor(stream->output, output, res);
+
+ bytes = res;
+ data = squashfs_first_page(output);
+ buff = stream->output;
+ while (data) {
+ if (bytes <= PAGE_SIZE) {
+ memcpy(data, buff, bytes);
+ break;
+ }
+ memcpy(data, buff, PAGE_SIZE);
+ buff += PAGE_SIZE;
+ bytes -= PAGE_SIZE;
+ data = squashfs_next_page(output);
+ }
+ squashfs_finish_page(output);
return res;
}
struct buffer_head **bh, int b, int offset, int length,
struct squashfs_page_actor *output)
{
- int res;
- size_t out_len = output->length;
struct squashfs_lzo *stream = strm;
+ void *buff = stream->input, *data;
+ int avail, i, bytes = length, res;
+ size_t out_len = output->length;
+
+ for (i = 0; i < b; i++) {
+ avail = min(bytes, msblk->devblksize - offset);
+ memcpy(buff, bh[i]->b_data + offset, avail);
+ buff += avail;
+ bytes -= avail;
+ offset = 0;
+ put_bh(bh[i]);
+ }
- squashfs_bh_to_buf(bh, b, stream->input, offset, length,
- msblk->devblksize);
res = lzo1x_decompress_safe(stream->input, (size_t)length,
stream->output, &out_len);
if (res != LZO_E_OK)
- return -EIO;
- squashfs_buf_to_actor(stream->output, output, out_len);
+ goto failed;
- return out_len;
+ res = bytes = (int)out_len;
+ data = squashfs_first_page(output);
+ buff = stream->output;
+ while (data) {
+ if (bytes <= PAGE_SIZE) {
+ memcpy(data, buff, bytes);
+ break;
+ } else {
+ memcpy(data, buff, PAGE_SIZE);
+ buff += PAGE_SIZE;
+ bytes -= PAGE_SIZE;
+ data = squashfs_next_page(output);
+ }
+ }
+ squashfs_finish_page(output);
+
+ return res;
+
+failed:
+ return -EIO;
}
const struct squashfs_decompressor squashfs_lzo_comp_ops = {
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
-#include <linux/buffer_head.h>
#include "page_actor.h"
-struct squashfs_page_actor *squashfs_page_actor_init(struct page **page,
- int pages, int length, void (*release_pages)(struct page **, int, int))
-{
- struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
-
- if (actor == NULL)
- return NULL;
+/*
+ * This file contains implementations of page_actor for decompressing into
+ * an intermediate buffer, and for decompressing directly into the
+ * page cache.
+ *
+ * Calling code should avoid sleeping between calls to squashfs_first_page()
+ * and squashfs_finish_page().
+ */
- actor->length = length ? : pages * PAGE_SIZE;
- actor->page = page;
- actor->pages = pages;
- actor->next_page = 0;
- actor->pageaddr = NULL;
- actor->release_pages = release_pages;
- return actor;
+/* Implementation of page_actor for decompressing into intermediate buffer */
+static void *cache_first_page(struct squashfs_page_actor *actor)
+{
+ actor->next_page = 1;
+ return actor->buffer[0];
}
-void squashfs_page_actor_free(struct squashfs_page_actor *actor, int error)
+static void *cache_next_page(struct squashfs_page_actor *actor)
{
- if (!actor)
- return;
+ if (actor->next_page == actor->pages)
+ return NULL;
- if (actor->release_pages)
- actor->release_pages(actor->page, actor->pages, error);
- kfree(actor);
+ return actor->buffer[actor->next_page++];
}
-void squashfs_actor_to_buf(struct squashfs_page_actor *actor, void *buf,
- int length)
+static void cache_finish_page(struct squashfs_page_actor *actor)
{
- void *pageaddr;
- int pos = 0, avail, i;
-
- for (i = 0; i < actor->pages && pos < length; ++i) {
- avail = min_t(int, length - pos, PAGE_SIZE);
- if (actor->page[i]) {
- pageaddr = kmap_atomic(actor->page[i]);
- memcpy(buf + pos, pageaddr, avail);
- kunmap_atomic(pageaddr);
- }
- pos += avail;
- }
+ /* empty */
}
-void squashfs_buf_to_actor(void *buf, struct squashfs_page_actor *actor,
- int length)
+struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
+ int pages, int length)
{
- void *pageaddr;
- int pos = 0, avail, i;
-
- for (i = 0; i < actor->pages && pos < length; ++i) {
- avail = min_t(int, length - pos, PAGE_SIZE);
- if (actor->page[i]) {
- pageaddr = kmap_atomic(actor->page[i]);
- memcpy(pageaddr, buf + pos, avail);
- kunmap_atomic(pageaddr);
- }
- pos += avail;
- }
+ struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
+
+ if (actor == NULL)
+ return NULL;
+
+ actor->length = length ? : pages * PAGE_SIZE;
+ actor->buffer = buffer;
+ actor->pages = pages;
+ actor->next_page = 0;
+ actor->squashfs_first_page = cache_first_page;
+ actor->squashfs_next_page = cache_next_page;
+ actor->squashfs_finish_page = cache_finish_page;
+ return actor;
}
-void squashfs_bh_to_actor(struct buffer_head **bh, int nr_buffers,
- struct squashfs_page_actor *actor, int offset, int length, int blksz)
+/* Implementation of page_actor for decompressing directly into page cache. */
+static void *direct_first_page(struct squashfs_page_actor *actor)
{
- void *kaddr = NULL;
- int bytes = 0, pgoff = 0, b = 0, p = 0, avail, i;
-
- while (bytes < length) {
- if (actor->page[p]) {
- kaddr = kmap_atomic(actor->page[p]);
- while (pgoff < PAGE_SIZE && bytes < length) {
- avail = min_t(int, blksz - offset,
- PAGE_SIZE - pgoff);
- memcpy(kaddr + pgoff, bh[b]->b_data + offset,
- avail);
- pgoff += avail;
- bytes += avail;
- offset = (offset + avail) % blksz;
- if (!offset) {
- put_bh(bh[b]);
- ++b;
- }
- }
- kunmap_atomic(kaddr);
- pgoff = 0;
- } else {
- for (i = 0; i < PAGE_SIZE / blksz; ++i) {
- if (bh[b])
- put_bh(bh[b]);
- ++b;
- }
- bytes += PAGE_SIZE;
- }
- ++p;
- }
+ actor->next_page = 1;
+ return actor->pageaddr = kmap_atomic(actor->page[0]);
}
-void squashfs_bh_to_buf(struct buffer_head **bh, int nr_buffers, void *buf,
- int offset, int length, int blksz)
+static void *direct_next_page(struct squashfs_page_actor *actor)
{
- int i, avail, bytes = 0;
-
- for (i = 0; i < nr_buffers && bytes < length; ++i) {
- avail = min_t(int, length - bytes, blksz - offset);
- if (bh[i]) {
- memcpy(buf + bytes, bh[i]->b_data + offset, avail);
- put_bh(bh[i]);
- }
- bytes += avail;
- offset = 0;
- }
+ if (actor->pageaddr)
+ kunmap_atomic(actor->pageaddr);
+
+ return actor->pageaddr = actor->next_page == actor->pages ? NULL :
+ kmap_atomic(actor->page[actor->next_page++]);
}
-void free_page_array(struct page **page, int nr_pages)
+static void direct_finish_page(struct squashfs_page_actor *actor)
{
- int i;
-
- for (i = 0; i < nr_pages; ++i)
- __free_page(page[i]);
- kfree(page);
+ if (actor->pageaddr)
+ kunmap_atomic(actor->pageaddr);
}
-struct page **alloc_page_array(int nr_pages, int gfp_mask)
+struct squashfs_page_actor *squashfs_page_actor_init_special(struct page **page,
+ int pages, int length)
{
- int i;
- struct page **page;
+ struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
- page = kcalloc(nr_pages, sizeof(struct page *), gfp_mask);
- if (!page)
+ if (actor == NULL)
return NULL;
- for (i = 0; i < nr_pages; ++i) {
- page[i] = alloc_page(gfp_mask);
- if (!page[i]) {
- free_page_array(page, i);
- return NULL;
- }
- }
- return page;
+
+ actor->length = length ? : pages * PAGE_SIZE;
+ actor->page = page;
+ actor->pages = pages;
+ actor->next_page = 0;
+ actor->pageaddr = NULL;
+ actor->squashfs_first_page = direct_first_page;
+ actor->squashfs_next_page = direct_next_page;
+ actor->squashfs_finish_page = direct_finish_page;
+ return actor;
}
* Phillip Lougher <phillip@squashfs.org.uk>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
- * the COPYING file in the top-level squashfsory.
+ * the COPYING file in the top-level directory.
*/
+#ifndef CONFIG_SQUASHFS_FILE_DIRECT
struct squashfs_page_actor {
- struct page **page;
- void *pageaddr;
+ void **page;
int pages;
int length;
int next_page;
- void (*release_pages)(struct page **, int, int);
};
-extern struct squashfs_page_actor *squashfs_page_actor_init(struct page **,
- int, int, void (*)(struct page **, int, int));
-extern void squashfs_page_actor_free(struct squashfs_page_actor *, int);
+static inline struct squashfs_page_actor *squashfs_page_actor_init(void **page,
+ int pages, int length)
+{
+ struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
-extern void squashfs_actor_to_buf(struct squashfs_page_actor *, void *, int);
-extern void squashfs_buf_to_actor(void *, struct squashfs_page_actor *, int);
-extern void squashfs_bh_to_actor(struct buffer_head **, int,
- struct squashfs_page_actor *, int, int, int);
-extern void squashfs_bh_to_buf(struct buffer_head **, int, void *, int, int,
- int);
+ if (actor == NULL)
+ return NULL;
+
+ actor->length = length ? : pages * PAGE_SIZE;
+ actor->page = page;
+ actor->pages = pages;
+ actor->next_page = 0;
+ return actor;
+}
-/*
- * Calling code should avoid sleeping between calls to squashfs_first_page()
- * and squashfs_finish_page().
- */
static inline void *squashfs_first_page(struct squashfs_page_actor *actor)
{
actor->next_page = 1;
- return actor->pageaddr = actor->page[0] ? kmap_atomic(actor->page[0])
- : NULL;
+ return actor->page[0];
}
static inline void *squashfs_next_page(struct squashfs_page_actor *actor)
{
- if (!IS_ERR_OR_NULL(actor->pageaddr))
- kunmap_atomic(actor->pageaddr);
-
- if (actor->next_page == actor->pages)
- return actor->pageaddr = ERR_PTR(-ENODATA);
-
- actor->pageaddr = actor->page[actor->next_page] ?
- kmap_atomic(actor->page[actor->next_page]) : NULL;
- ++actor->next_page;
- return actor->pageaddr;
+ return actor->next_page == actor->pages ? NULL :
+ actor->page[actor->next_page++];
}
static inline void squashfs_finish_page(struct squashfs_page_actor *actor)
{
- if (!IS_ERR_OR_NULL(actor->pageaddr))
- kunmap_atomic(actor->pageaddr);
+ /* empty */
}
+#else
+struct squashfs_page_actor {
+ union {
+ void **buffer;
+ struct page **page;
+ };
+ void *pageaddr;
+ void *(*squashfs_first_page)(struct squashfs_page_actor *);
+ void *(*squashfs_next_page)(struct squashfs_page_actor *);
+ void (*squashfs_finish_page)(struct squashfs_page_actor *);
+ int pages;
+ int length;
+ int next_page;
+};
-extern struct page **alloc_page_array(int, int);
-extern void free_page_array(struct page **, int);
-
+extern struct squashfs_page_actor *squashfs_page_actor_init(void **, int, int);
+extern struct squashfs_page_actor *squashfs_page_actor_init_special(struct page
+ **, int, int);
+static inline void *squashfs_first_page(struct squashfs_page_actor *actor)
+{
+ return actor->squashfs_first_page(actor);
+}
+static inline void *squashfs_next_page(struct squashfs_page_actor *actor)
+{
+ return actor->squashfs_next_page(actor);
+}
+static inline void squashfs_finish_page(struct squashfs_page_actor *actor)
+{
+ actor->squashfs_finish_page(actor);
+}
+#endif
#endif
#define WARNING(s, args...) pr_warn("SQUASHFS: "s, ## args)
/* block.c */
-extern int squashfs_init_read_wq(void);
-extern void squashfs_destroy_read_wq(void);
extern int squashfs_read_data(struct super_block *, u64, int, u64 *,
struct squashfs_page_actor *);
-extern int squashfs_read_data_async(struct super_block *, u64, int, u64 *,
- struct squashfs_page_actor *);
/* cache.c */
extern struct squashfs_cache *squashfs_cache_init(char *, int, int);
void squashfs_copy_cache(struct page *, struct squashfs_cache_entry *, int,
int);
-/* file_direct.c */
-extern int squashfs_readpages_block(struct page *, struct list_head *,
- unsigned int *, struct address_space *, int, u64, int);
+/* file_xxx.c */
+extern int squashfs_readpage_block(struct page *, u64, int, int);
/* id.c */
extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *);
int num_waiters;
wait_queue_head_t wait_queue;
struct squashfs_cache *cache;
- struct page **page;
+ void **data;
struct squashfs_page_actor *actor;
};
if (err)
return err;
- if (!squashfs_init_read_wq()) {
- destroy_inodecache();
- return -ENOMEM;
- }
-
err = register_filesystem(&squashfs_fs_type);
if (err) {
destroy_inodecache();
- squashfs_destroy_read_wq();
return err;
}
{
unregister_filesystem(&squashfs_fs_type);
destroy_inodecache();
- squashfs_destroy_read_wq();
}
struct comp_opts *opts;
int err = 0, n;
- opts = kmalloc(sizeof(*opts), GFP_ATOMIC);
+ opts = kmalloc(sizeof(*opts), GFP_KERNEL);
if (opts == NULL) {
err = -ENOMEM;
goto out2;
enum xz_ret xz_err;
int avail, total = 0, k = 0;
struct squashfs_xz *stream = strm;
- void *buf = NULL;
xz_dec_reset(stream->state);
stream->buf.in_pos = 0;
if (stream->buf.out_pos == stream->buf.out_size) {
stream->buf.out = squashfs_next_page(output);
- if (!IS_ERR(stream->buf.out)) {
+ if (stream->buf.out != NULL) {
stream->buf.out_pos = 0;
total += PAGE_SIZE;
}
}
- if (!stream->buf.out) {
- if (!buf) {
- buf = kmalloc(PAGE_SIZE, GFP_ATOMIC);
- if (!buf)
- goto out;
- }
- stream->buf.out = buf;
- }
xz_err = xz_dec_run(stream->state, &stream->buf);
if (stream->buf.in_pos == stream->buf.in_size && k < b)
if (xz_err != XZ_STREAM_END || k < b)
goto out;
- kfree(buf);
return total + stream->buf.out_pos;
out:
for (; k < b; k++)
put_bh(bh[k]);
- kfree(buf);
return -EIO;
}
struct buffer_head **bh, int b, int offset, int length,
struct squashfs_page_actor *output)
{
- void *buf = NULL;
int zlib_err, zlib_init = 0, k = 0;
z_stream *stream = strm;
if (stream->avail_out == 0) {
stream->next_out = squashfs_next_page(output);
- if (!IS_ERR(stream->next_out))
+ if (stream->next_out != NULL)
stream->avail_out = PAGE_SIZE;
}
- if (!stream->next_out) {
- if (!buf) {
- buf = kmalloc(PAGE_SIZE, GFP_ATOMIC);
- if (!buf)
- goto out;
- }
- stream->next_out = buf;
- }
-
if (!zlib_init) {
zlib_err = zlib_inflateInit(stream);
if (zlib_err != Z_OK) {
if (k < b)
goto out;
- kfree(buf);
return stream->total_out;
out:
for (; k < b; k++)
put_bh(bh[k]);
- kfree(buf);
return -EIO;
}