diff options
author | Campbell Suter <campbell@snapit.group> | 2020-11-23 15:40:03 +1300 |
---|---|---|
committer | Tom Rini <trini@konsulko.com> | 2021-01-20 14:01:44 -0500 |
commit | 9dba07f14304612ed4ee6d827898aa0cb55d4e26 (patch) | |
tree | 993e262d50a903b112a025f4e5d169407476fa81 /fs/squashfs | |
parent | 3b72612ad191cad29aad3982221ff3355bec798d (diff) | |
download | u-boot-9dba07f14304612ed4ee6d827898aa0cb55d4e26.zip u-boot-9dba07f14304612ed4ee6d827898aa0cb55d4e26.tar.gz u-boot-9dba07f14304612ed4ee6d827898aa0cb55d4e26.tar.bz2 |
Fix squashfs failing to load sparse files
SquashFS supports sprase blocks in files - that is, if a given block is
composed only of zeros, it's not written to the output file to save
space and it's on-disk length field is set to zero to indicate that.
Previously the squashfs driver did not recognise that, and would attempt
to read and decompress a zero-sized block, which obviously failed.
The following command may be used to create a file for testing:
cat <(dd if=/dev/urandom of=/dev/stdout bs=1M count=1) \
<(dd if=/dev/zero of=/dev/stdout bs=1M count=1) \
<(dd if=/dev/urandom of=/dev/stdout bs=1k count=200) >test_file
Signed-off-by: Campbell Suter <campbell@snapit.group>
Diffstat (limited to 'fs/squashfs')
-rw-r--r-- | fs/squashfs/sqfs.c | 54 |
1 files changed, 35 insertions, 19 deletions
diff --git a/fs/squashfs/sqfs.c b/fs/squashfs/sqfs.c index 5de69ac..dca13bd 100644 --- a/fs/squashfs/sqfs.c +++ b/fs/squashfs/sqfs.c @@ -1310,7 +1310,7 @@ int sqfs_read(const char *filename, void *buf, loff_t offset, loff_t len, { char *dir = NULL, *fragment_block, *datablock = NULL, *data_buffer = NULL; char *fragment = NULL, *file = NULL, *resolved, *data; - u64 start, n_blks, table_size, data_offset, table_offset; + u64 start, n_blks, table_size, data_offset, table_offset, sparse_size; int ret, j, i_number, datablk_count = 0; struct squashfs_super_block *sblk = ctxt.sblk; struct squashfs_fragment_block_entry frag_entry; @@ -1444,28 +1444,43 @@ int sqfs_read(const char *filename, void *buf, loff_t offset, loff_t len, n_blks = DIV_ROUND_UP(table_size + table_offset, ctxt.cur_dev->blksz); - data_buffer = malloc_cache_aligned(n_blks * ctxt.cur_dev->blksz); + /* Don't load any data for sparse blocks */ + if (finfo.blk_sizes[j] == 0) { + n_blks = 0; + table_offset = 0; + data_buffer = NULL; + data = NULL; + } else { + data_buffer = malloc_cache_aligned(n_blks * ctxt.cur_dev->blksz); - if (!data_buffer) { - ret = -ENOMEM; - goto out; - } + if (!data_buffer) { + ret = -ENOMEM; + goto out; + } - ret = sqfs_disk_read(start, n_blks, data_buffer); - if (ret < 0) { - /* - * Possible causes: too many data blocks or too large - * SquashFS block size. Tip: re-compile the SquashFS - * image with mksquashfs's -b <block_size> option. - */ - printf("Error: too many data blocks to be read.\n"); - goto out; - } + ret = sqfs_disk_read(start, n_blks, data_buffer); + if (ret < 0) { + /* + * Possible causes: too many data blocks or too large + * SquashFS block size. Tip: re-compile the SquashFS + * image with mksquashfs's -b <block_size> option. + */ + printf("Error: too many data blocks to be read.\n"); + goto out; + } - data = data_buffer + table_offset; + data = data_buffer + table_offset; + } /* Load the data */ - if (SQFS_COMPRESSED_BLOCK(finfo.blk_sizes[j])) { + if (finfo.blk_sizes[j] == 0) { + /* This is a sparse block */ + sparse_size = get_unaligned_le32(&sblk->block_size); + if ((*actread + sparse_size) > len) + sparse_size = len - *actread; + memset(buf + *actread, 0, sparse_size); + *actread += sparse_size; + } else if (SQFS_COMPRESSED_BLOCK(finfo.blk_sizes[j])) { dest_len = get_unaligned_le32(&sblk->block_size); ret = sqfs_decompress(&ctxt, datablock, &dest_len, data, table_size); @@ -1484,7 +1499,8 @@ int sqfs_read(const char *filename, void *buf, loff_t offset, loff_t len, } data_offset += table_size; - free(data_buffer); + if (data_buffer) + free(data_buffer); data_buffer = NULL; if (*actread >= len) break; |