kernel-fxtec-pro1x/fs/squashfs/block.c
Phillip Lougher e0d1f70010 squashfs: fix potential buffer over-run on 4K block file systems
Sizing the buffer based on block size is incorrect, leading
to a potential buffer over-run on 4K block size file systems
(because the metadata block size is always 8K).  This bug
doesn't seem have triggered because 4K block size file systems
are not default, and also because metadata blocks after
compression tend to be less than 4K.

Signed-off-by: Phillip Lougher <phillip@lougher.demon.co.uk>
2010-04-25 02:09:05 +01:00

203 lines
5.3 KiB
C

/*
* Squashfs - a compressed read only filesystem for Linux
*
* Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
* Phillip Lougher <phillip@lougher.demon.co.uk>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2,
* or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* block.c
*/
/*
* This file implements the low-level routines to read and decompress
* datablocks and metadata blocks.
*/
#include <linux/fs.h>
#include <linux/vfs.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/buffer_head.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs_fs_i.h"
#include "squashfs.h"
#include "decompressor.h"
/*
* Read the metadata block length, this is stored in the first two
* bytes of the metadata block.
*/
static struct buffer_head *get_block_length(struct super_block *sb,
u64 *cur_index, int *offset, int *length)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
struct buffer_head *bh;
bh = sb_bread(sb, *cur_index);
if (bh == NULL)
return NULL;
if (msblk->devblksize - *offset == 1) {
*length = (unsigned char) bh->b_data[*offset];
put_bh(bh);
bh = sb_bread(sb, ++(*cur_index));
if (bh == NULL)
return NULL;
*length |= (unsigned char) bh->b_data[0] << 8;
*offset = 1;
} else {
*length = (unsigned char) bh->b_data[*offset] |
(unsigned char) bh->b_data[*offset + 1] << 8;
*offset += 2;
}
return bh;
}
/*
* Read and decompress a metadata block or datablock. Length is non-zero
* if a datablock is being read (the size is stored elsewhere in the
* filesystem), otherwise the length is obtained from the first two bytes of
* the metadata block. A bit in the length field indicates if the block
* is stored uncompressed in the filesystem (usually because compression
* generated a larger block - this does occasionally happen with zlib).
*/
int squashfs_read_data(struct super_block *sb, void **buffer, u64 index,
int length, u64 *next_index, int srclength, int pages)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
struct buffer_head **bh;
int offset = index & ((1 << msblk->devblksize_log2) - 1);
u64 cur_index = index >> msblk->devblksize_log2;
int bytes, compressed, b = 0, k = 0, page = 0, avail;
bh = kcalloc(((srclength + msblk->devblksize - 1)
>> msblk->devblksize_log2) + 1, sizeof(*bh), GFP_KERNEL);
if (bh == NULL)
return -ENOMEM;
if (length) {
/*
* Datablock.
*/
bytes = -offset;
compressed = SQUASHFS_COMPRESSED_BLOCK(length);
length = SQUASHFS_COMPRESSED_SIZE_BLOCK(length);
if (next_index)
*next_index = index + length;
TRACE("Block @ 0x%llx, %scompressed size %d, src size %d\n",
index, compressed ? "" : "un", length, srclength);
if (length < 0 || length > srclength ||
(index + length) > msblk->bytes_used)
goto read_failure;
for (b = 0; bytes < length; b++, cur_index++) {
bh[b] = sb_getblk(sb, cur_index);
if (bh[b] == NULL)
goto block_release;
bytes += msblk->devblksize;
}
ll_rw_block(READ, b, bh);
} else {
/*
* Metadata block.
*/
if ((index + 2) > msblk->bytes_used)
goto read_failure;
bh[0] = get_block_length(sb, &cur_index, &offset, &length);
if (bh[0] == NULL)
goto read_failure;
b = 1;
bytes = msblk->devblksize - offset;
compressed = SQUASHFS_COMPRESSED(length);
length = SQUASHFS_COMPRESSED_SIZE(length);
if (next_index)
*next_index = index + length + 2;
TRACE("Block @ 0x%llx, %scompressed size %d\n", index,
compressed ? "" : "un", length);
if (length < 0 || length > srclength ||
(index + length) > msblk->bytes_used)
goto block_release;
for (; bytes < length; b++) {
bh[b] = sb_getblk(sb, ++cur_index);
if (bh[b] == NULL)
goto block_release;
bytes += msblk->devblksize;
}
ll_rw_block(READ, b - 1, bh + 1);
}
if (compressed) {
length = squashfs_decompress(msblk, buffer, bh, b, offset,
length, srclength, pages);
if (length < 0)
goto read_failure;
} else {
/*
* Block is uncompressed.
*/
int i, in, pg_offset = 0;
for (i = 0; i < b; i++) {
wait_on_buffer(bh[i]);
if (!buffer_uptodate(bh[i]))
goto block_release;
}
for (bytes = length; k < b; k++) {
in = min(bytes, msblk->devblksize - offset);
bytes -= in;
while (in) {
if (pg_offset == PAGE_CACHE_SIZE) {
page++;
pg_offset = 0;
}
avail = min_t(int, in, PAGE_CACHE_SIZE -
pg_offset);
memcpy(buffer[page] + pg_offset,
bh[k]->b_data + offset, avail);
in -= avail;
pg_offset += avail;
offset += avail;
}
offset = 0;
put_bh(bh[k]);
}
}
kfree(bh);
return length;
block_release:
for (; k < b; k++)
put_bh(bh[k]);
read_failure:
ERROR("squashfs_read_data failed to read block 0x%llx\n",
(unsigned long long) index);
kfree(bh);
return -EIO;
}