Merge branch 'for_linus' into for_linus_merged
Conflicts: fs/ext4/ioctl.c
This commit is contained in:
commit
ff9cb1c4ee
16 changed files with 1103 additions and 487 deletions
|
@ -581,6 +581,13 @@ Table of Ext4 specific ioctls
|
|||
behaviour may change in the future as it is
|
||||
not necessary and has been done this way only
|
||||
for sake of simplicity.
|
||||
|
||||
EXT4_IOC_RESIZE_FS Resize the filesystem to a new size. The number
|
||||
of blocks of resized filesystem is passed in via
|
||||
64 bit integer argument. The kernel allocates
|
||||
bitmaps and inode table, the userspace tool thus
|
||||
just passes the new number of blocks.
|
||||
|
||||
..............................................................................
|
||||
|
||||
References
|
||||
|
|
|
@ -23,6 +23,8 @@
|
|||
|
||||
#include <trace/events/ext4.h>
|
||||
|
||||
static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
|
||||
ext4_group_t block_group);
|
||||
/*
|
||||
* balloc.c contains the blocks allocation and deallocation routines
|
||||
*/
|
||||
|
@ -668,7 +670,7 @@ unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
|
|||
* This function returns the number of file system metadata clusters at
|
||||
* the beginning of a block group, including the reserved gdt blocks.
|
||||
*/
|
||||
unsigned ext4_num_base_meta_clusters(struct super_block *sb,
|
||||
static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
|
||||
ext4_group_t block_group)
|
||||
{
|
||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
|
|
|
@ -511,6 +511,14 @@ struct ext4_new_group_data {
|
|||
__u32 free_blocks_count;
|
||||
};
|
||||
|
||||
/* Indexes used to index group tables in ext4_new_group_data */
|
||||
enum {
|
||||
BLOCK_BITMAP = 0, /* block bitmap */
|
||||
INODE_BITMAP, /* inode bitmap */
|
||||
INODE_TABLE, /* inode tables */
|
||||
GROUP_TABLE_COUNT,
|
||||
};
|
||||
|
||||
/*
|
||||
* Flags used by ext4_map_blocks()
|
||||
*/
|
||||
|
@ -575,6 +583,7 @@ struct ext4_new_group_data {
|
|||
/* note ioctl 11 reserved for filesystem-independent FIEMAP ioctl */
|
||||
#define EXT4_IOC_ALLOC_DA_BLKS _IO('f', 12)
|
||||
#define EXT4_IOC_MOVE_EXT _IOWR('f', 15, struct move_extent)
|
||||
#define EXT4_IOC_RESIZE_FS _IOW('f', 16, __u64)
|
||||
|
||||
#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
|
||||
/*
|
||||
|
@ -957,12 +966,13 @@ struct ext4_inode_info {
|
|||
#define test_opt2(sb, opt) (EXT4_SB(sb)->s_mount_opt2 & \
|
||||
EXT4_MOUNT2_##opt)
|
||||
|
||||
#define ext4_set_bit __test_and_set_bit_le
|
||||
#define ext4_test_and_set_bit __test_and_set_bit_le
|
||||
#define ext4_set_bit __set_bit_le
|
||||
#define ext4_set_bit_atomic ext2_set_bit_atomic
|
||||
#define ext4_clear_bit __test_and_clear_bit_le
|
||||
#define ext4_test_and_clear_bit __test_and_clear_bit_le
|
||||
#define ext4_clear_bit __clear_bit_le
|
||||
#define ext4_clear_bit_atomic ext2_clear_bit_atomic
|
||||
#define ext4_test_bit test_bit_le
|
||||
#define ext4_find_first_zero_bit find_first_zero_bit_le
|
||||
#define ext4_find_next_zero_bit find_next_zero_bit_le
|
||||
#define ext4_find_next_bit find_next_bit_le
|
||||
|
||||
|
@ -1397,6 +1407,7 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
|
|||
#define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE 0x0040
|
||||
#define EXT4_FEATURE_RO_COMPAT_QUOTA 0x0100
|
||||
#define EXT4_FEATURE_RO_COMPAT_BIGALLOC 0x0200
|
||||
#define EXT4_FEATURE_RO_COMPAT_METADATA_CSUM 0x0400
|
||||
|
||||
#define EXT4_FEATURE_INCOMPAT_COMPRESSION 0x0001
|
||||
#define EXT4_FEATURE_INCOMPAT_FILETYPE 0x0002
|
||||
|
@ -1409,6 +1420,8 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
|
|||
#define EXT4_FEATURE_INCOMPAT_FLEX_BG 0x0200
|
||||
#define EXT4_FEATURE_INCOMPAT_EA_INODE 0x0400 /* EA in inode */
|
||||
#define EXT4_FEATURE_INCOMPAT_DIRDATA 0x1000 /* data in dirent */
|
||||
#define EXT4_FEATURE_INCOMPAT_INLINEDATA 0x2000 /* data in inode */
|
||||
#define EXT4_FEATURE_INCOMPAT_LARGEDIR 0x4000 /* >2GB or 3-lvl htree */
|
||||
|
||||
#define EXT2_FEATURE_COMPAT_SUPP EXT4_FEATURE_COMPAT_EXT_ATTR
|
||||
#define EXT2_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \
|
||||
|
@ -1790,8 +1803,6 @@ extern void ext4_init_block_bitmap(struct super_block *sb,
|
|||
extern unsigned ext4_free_clusters_after_init(struct super_block *sb,
|
||||
ext4_group_t block_group,
|
||||
struct ext4_group_desc *gdp);
|
||||
extern unsigned ext4_num_base_meta_clusters(struct super_block *sb,
|
||||
ext4_group_t block_group);
|
||||
extern unsigned ext4_num_overhead_clusters(struct super_block *sb,
|
||||
ext4_group_t block_group,
|
||||
struct ext4_group_desc *gdp);
|
||||
|
@ -1880,16 +1891,9 @@ extern int ext4_alloc_da_blocks(struct inode *inode);
|
|||
extern void ext4_set_aops(struct inode *inode);
|
||||
extern int ext4_writepage_trans_blocks(struct inode *);
|
||||
extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
|
||||
extern int ext4_block_truncate_page(handle_t *handle,
|
||||
struct address_space *mapping, loff_t from);
|
||||
extern int ext4_block_zero_page_range(handle_t *handle,
|
||||
struct address_space *mapping, loff_t from, loff_t length);
|
||||
extern int ext4_discard_partial_page_buffers(handle_t *handle,
|
||||
struct address_space *mapping, loff_t from,
|
||||
loff_t length, int flags);
|
||||
extern int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
|
||||
struct inode *inode, struct page *page, loff_t from,
|
||||
loff_t length, int flags);
|
||||
extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
|
||||
extern qsize_t *ext4_get_reserved_space(struct inode *inode);
|
||||
extern void ext4_da_update_reserve_space(struct inode *inode,
|
||||
|
@ -1924,6 +1928,7 @@ extern int ext4_group_add(struct super_block *sb,
|
|||
extern int ext4_group_extend(struct super_block *sb,
|
||||
struct ext4_super_block *es,
|
||||
ext4_fsblk_t n_blocks_count);
|
||||
extern int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count);
|
||||
|
||||
/* super.c */
|
||||
extern void *ext4_kvmalloc(size_t size, gfp_t flags);
|
||||
|
|
|
@ -3280,6 +3280,9 @@ static int ext4_find_delalloc_range(struct inode *inode,
|
|||
ext4_lblk_t i, pg_lblk;
|
||||
pgoff_t index;
|
||||
|
||||
if (!test_opt(inode->i_sb, DELALLOC))
|
||||
return 0;
|
||||
|
||||
/* reverse search wont work if fs block size is less than page size */
|
||||
if (inode->i_blkbits < PAGE_CACHE_SHIFT)
|
||||
search_hint_reverse = 0;
|
||||
|
@ -3452,8 +3455,8 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
|
|||
int err = 0;
|
||||
ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
|
||||
|
||||
ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical"
|
||||
"block %llu, max_blocks %u, flags %d, allocated %u",
|
||||
ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical "
|
||||
"block %llu, max_blocks %u, flags %x, allocated %u\n",
|
||||
inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
|
||||
flags, allocated);
|
||||
ext4_ext_show_leaf(inode, path);
|
||||
|
@ -3624,7 +3627,7 @@ static int get_implied_cluster_alloc(struct super_block *sb,
|
|||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
|
||||
ext4_lblk_t ex_cluster_start, ex_cluster_end;
|
||||
ext4_lblk_t rr_cluster_start, rr_cluster_end;
|
||||
ext4_lblk_t rr_cluster_start;
|
||||
ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
|
||||
ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
|
||||
unsigned short ee_len = ext4_ext_get_actual_len(ex);
|
||||
|
@ -3635,7 +3638,6 @@ static int get_implied_cluster_alloc(struct super_block *sb,
|
|||
|
||||
/* The requested region passed into ext4_map_blocks() */
|
||||
rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
|
||||
rr_cluster_end = EXT4_B2C(sbi, map->m_lblk + map->m_len - 1);
|
||||
|
||||
if ((rr_cluster_start == ex_cluster_end) ||
|
||||
(rr_cluster_start == ex_cluster_start)) {
|
||||
|
|
|
@ -252,7 +252,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
|
|||
fatal = ext4_journal_get_write_access(handle, bh2);
|
||||
}
|
||||
ext4_lock_group(sb, block_group);
|
||||
cleared = ext4_clear_bit(bit, bitmap_bh->b_data);
|
||||
cleared = ext4_test_and_clear_bit(bit, bitmap_bh->b_data);
|
||||
if (fatal || !cleared) {
|
||||
ext4_unlock_group(sb, block_group);
|
||||
goto out;
|
||||
|
@ -358,7 +358,7 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent,
|
|||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
ext4_group_t real_ngroups = ext4_get_groups_count(sb);
|
||||
int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
|
||||
unsigned int freei, avefreei;
|
||||
unsigned int freei, avefreei, grp_free;
|
||||
ext4_fsblk_t freeb, avefreec;
|
||||
unsigned int ndirs;
|
||||
int max_dirs, min_inodes;
|
||||
|
@ -477,8 +477,8 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent,
|
|||
for (i = 0; i < ngroups; i++) {
|
||||
grp = (parent_group + i) % ngroups;
|
||||
desc = ext4_get_group_desc(sb, grp, NULL);
|
||||
if (desc && ext4_free_inodes_count(sb, desc) &&
|
||||
ext4_free_inodes_count(sb, desc) >= avefreei) {
|
||||
grp_free = ext4_free_inodes_count(sb, desc);
|
||||
if (desc && grp_free && grp_free >= avefreei) {
|
||||
*group = grp;
|
||||
return 0;
|
||||
}
|
||||
|
@ -618,7 +618,7 @@ static int ext4_claim_inode(struct super_block *sb,
|
|||
*/
|
||||
down_read(&grp->alloc_sem);
|
||||
ext4_lock_group(sb, group);
|
||||
if (ext4_set_bit(ino, inode_bitmap_bh->b_data)) {
|
||||
if (ext4_test_and_set_bit(ino, inode_bitmap_bh->b_data)) {
|
||||
/* not a free inode */
|
||||
retval = 1;
|
||||
goto err_ret;
|
||||
|
@ -885,8 +885,12 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, umode_t mode,
|
|||
if (IS_DIRSYNC(inode))
|
||||
ext4_handle_sync(handle);
|
||||
if (insert_inode_locked(inode) < 0) {
|
||||
err = -EINVAL;
|
||||
goto fail_drop;
|
||||
/*
|
||||
* Likely a bitmap corruption causing inode to be allocated
|
||||
* twice.
|
||||
*/
|
||||
err = -EIO;
|
||||
goto fail;
|
||||
}
|
||||
spin_lock(&sbi->s_next_gen_lock);
|
||||
inode->i_generation = sbi->s_next_generation++;
|
||||
|
|
143
fs/ext4/inode.c
143
fs/ext4/inode.c
|
@ -71,6 +71,9 @@ static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
|
|||
static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
|
||||
static int __ext4_journalled_writepage(struct page *page, unsigned int len);
|
||||
static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
|
||||
static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
|
||||
struct inode *inode, struct page *page, loff_t from,
|
||||
loff_t length, int flags);
|
||||
|
||||
/*
|
||||
* Test whether an inode is a fast symlink.
|
||||
|
@ -2759,7 +2762,7 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
|
|||
if (!io_end || !size)
|
||||
goto out;
|
||||
|
||||
ext_debug("ext4_end_io_dio(): io_end 0x%p"
|
||||
ext_debug("ext4_end_io_dio(): io_end 0x%p "
|
||||
"for inode %lu, iocb 0x%p, offset %llu, size %llu\n",
|
||||
iocb->private, io_end->inode->i_ino, iocb, offset,
|
||||
size);
|
||||
|
@ -3160,7 +3163,7 @@ int ext4_discard_partial_page_buffers(handle_t *handle,
|
|||
*
|
||||
* Returns zero on sucess or negative on failure.
|
||||
*/
|
||||
int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
|
||||
static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
|
||||
struct inode *inode, struct page *page, loff_t from,
|
||||
loff_t length, int flags)
|
||||
{
|
||||
|
@ -3300,126 +3303,6 @@ int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
|
|||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* ext4_block_truncate_page() zeroes out a mapping from file offset `from'
|
||||
* up to the end of the block which corresponds to `from'.
|
||||
* This required during truncate. We need to physically zero the tail end
|
||||
* of that block so it doesn't yield old data if the file is later grown.
|
||||
*/
|
||||
int ext4_block_truncate_page(handle_t *handle,
|
||||
struct address_space *mapping, loff_t from)
|
||||
{
|
||||
unsigned offset = from & (PAGE_CACHE_SIZE-1);
|
||||
unsigned length;
|
||||
unsigned blocksize;
|
||||
struct inode *inode = mapping->host;
|
||||
|
||||
blocksize = inode->i_sb->s_blocksize;
|
||||
length = blocksize - (offset & (blocksize - 1));
|
||||
|
||||
return ext4_block_zero_page_range(handle, mapping, from, length);
|
||||
}
|
||||
|
||||
/*
|
||||
* ext4_block_zero_page_range() zeros out a mapping of length 'length'
|
||||
* starting from file offset 'from'. The range to be zero'd must
|
||||
* be contained with in one block. If the specified range exceeds
|
||||
* the end of the block it will be shortened to end of the block
|
||||
* that cooresponds to 'from'
|
||||
*/
|
||||
int ext4_block_zero_page_range(handle_t *handle,
|
||||
struct address_space *mapping, loff_t from, loff_t length)
|
||||
{
|
||||
ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
|
||||
unsigned offset = from & (PAGE_CACHE_SIZE-1);
|
||||
unsigned blocksize, max, pos;
|
||||
ext4_lblk_t iblock;
|
||||
struct inode *inode = mapping->host;
|
||||
struct buffer_head *bh;
|
||||
struct page *page;
|
||||
int err = 0;
|
||||
|
||||
page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
|
||||
mapping_gfp_mask(mapping) & ~__GFP_FS);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
|
||||
blocksize = inode->i_sb->s_blocksize;
|
||||
max = blocksize - (offset & (blocksize - 1));
|
||||
|
||||
/*
|
||||
* correct length if it does not fall between
|
||||
* 'from' and the end of the block
|
||||
*/
|
||||
if (length > max || length < 0)
|
||||
length = max;
|
||||
|
||||
iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
|
||||
|
||||
if (!page_has_buffers(page))
|
||||
create_empty_buffers(page, blocksize, 0);
|
||||
|
||||
/* Find the buffer that contains "offset" */
|
||||
bh = page_buffers(page);
|
||||
pos = blocksize;
|
||||
while (offset >= pos) {
|
||||
bh = bh->b_this_page;
|
||||
iblock++;
|
||||
pos += blocksize;
|
||||
}
|
||||
|
||||
err = 0;
|
||||
if (buffer_freed(bh)) {
|
||||
BUFFER_TRACE(bh, "freed: skip");
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (!buffer_mapped(bh)) {
|
||||
BUFFER_TRACE(bh, "unmapped");
|
||||
ext4_get_block(inode, iblock, bh, 0);
|
||||
/* unmapped? It's a hole - nothing to do */
|
||||
if (!buffer_mapped(bh)) {
|
||||
BUFFER_TRACE(bh, "still unmapped");
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
|
||||
/* Ok, it's mapped. Make sure it's up-to-date */
|
||||
if (PageUptodate(page))
|
||||
set_buffer_uptodate(bh);
|
||||
|
||||
if (!buffer_uptodate(bh)) {
|
||||
err = -EIO;
|
||||
ll_rw_block(READ, 1, &bh);
|
||||
wait_on_buffer(bh);
|
||||
/* Uhhuh. Read error. Complain and punt. */
|
||||
if (!buffer_uptodate(bh))
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (ext4_should_journal_data(inode)) {
|
||||
BUFFER_TRACE(bh, "get write access");
|
||||
err = ext4_journal_get_write_access(handle, bh);
|
||||
if (err)
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
zero_user(page, offset, length);
|
||||
|
||||
BUFFER_TRACE(bh, "zeroed end of block");
|
||||
|
||||
err = 0;
|
||||
if (ext4_should_journal_data(inode)) {
|
||||
err = ext4_handle_dirty_metadata(handle, inode, bh);
|
||||
} else
|
||||
mark_buffer_dirty(bh);
|
||||
|
||||
unlock:
|
||||
unlock_page(page);
|
||||
page_cache_release(page);
|
||||
return err;
|
||||
}
|
||||
|
||||
int ext4_can_truncate(struct inode *inode)
|
||||
{
|
||||
if (S_ISREG(inode->i_mode))
|
||||
|
@ -4646,9 +4529,19 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
|
|||
return 0;
|
||||
if (is_journal_aborted(journal))
|
||||
return -EROFS;
|
||||
/* We have to allocate physical blocks for delalloc blocks
|
||||
* before flushing journal. otherwise delalloc blocks can not
|
||||
* be allocated any more. even more truncate on delalloc blocks
|
||||
* could trigger BUG by flushing delalloc blocks in journal.
|
||||
* There is no delalloc block in non-journal data mode.
|
||||
*/
|
||||
if (val && test_opt(inode->i_sb, DELALLOC)) {
|
||||
err = ext4_alloc_da_blocks(inode);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
|
||||
jbd2_journal_lock_updates(journal);
|
||||
jbd2_journal_flush(journal);
|
||||
|
||||
/*
|
||||
* OK, there are no updates running now, and all cached data is
|
||||
|
@ -4660,8 +4553,10 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
|
|||
|
||||
if (val)
|
||||
ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
|
||||
else
|
||||
else {
|
||||
jbd2_journal_flush(journal);
|
||||
ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
|
||||
}
|
||||
ext4_set_aops(inode);
|
||||
|
||||
jbd2_journal_unlock_updates(journal);
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
#include "ext4_jbd2.h"
|
||||
#include "ext4.h"
|
||||
|
||||
#define MAX_32_NUM ((((unsigned long long) 1) << 32) - 1)
|
||||
|
||||
long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
struct inode *inode = filp->f_dentry->d_inode;
|
||||
|
@ -186,19 +188,22 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
if (get_user(n_blocks_count, (__u32 __user *)arg))
|
||||
return -EFAULT;
|
||||
if (get_user(n_blocks_count, (__u32 __user *)arg)) {
|
||||
err = -EFAULT;
|
||||
goto group_extend_out;
|
||||
}
|
||||
|
||||
if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
|
||||
EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
|
||||
ext4_msg(sb, KERN_ERR,
|
||||
"Online resizing not supported with bigalloc");
|
||||
return -EOPNOTSUPP;
|
||||
err = -EOPNOTSUPP;
|
||||
goto group_extend_out;
|
||||
}
|
||||
|
||||
err = mnt_want_write_file(filp);
|
||||
if (err)
|
||||
return err;
|
||||
goto group_extend_out;
|
||||
|
||||
err = ext4_group_extend(sb, EXT4_SB(sb)->s_es, n_blocks_count);
|
||||
if (EXT4_SB(sb)->s_journal) {
|
||||
|
@ -209,8 +214,8 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||
if (err == 0)
|
||||
err = err2;
|
||||
mnt_drop_write_file(filp);
|
||||
group_extend_out:
|
||||
ext4_resize_end(sb);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -251,8 +256,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||
err = ext4_move_extents(filp, donor_filp, me.orig_start,
|
||||
me.donor_start, me.len, &me.moved_len);
|
||||
mnt_drop_write_file(filp);
|
||||
if (me.moved_len > 0)
|
||||
file_remove_suid(donor_filp);
|
||||
mnt_drop_write(filp->f_path.mnt);
|
||||
|
||||
if (copy_to_user((struct move_extent __user *)arg,
|
||||
&me, sizeof(me)))
|
||||
|
@ -271,19 +275,22 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||
return err;
|
||||
|
||||
if (copy_from_user(&input, (struct ext4_new_group_input __user *)arg,
|
||||
sizeof(input)))
|
||||
return -EFAULT;
|
||||
sizeof(input))) {
|
||||
err = -EFAULT;
|
||||
goto group_add_out;
|
||||
}
|
||||
|
||||
if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
|
||||
EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
|
||||
ext4_msg(sb, KERN_ERR,
|
||||
"Online resizing not supported with bigalloc");
|
||||
return -EOPNOTSUPP;
|
||||
err = -EOPNOTSUPP;
|
||||
goto group_add_out;
|
||||
}
|
||||
|
||||
err = mnt_want_write_file(filp);
|
||||
if (err)
|
||||
return err;
|
||||
goto group_add_out;
|
||||
|
||||
err = ext4_group_add(sb, &input);
|
||||
if (EXT4_SB(sb)->s_journal) {
|
||||
|
@ -294,8 +301,8 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||
if (err == 0)
|
||||
err = err2;
|
||||
mnt_drop_write_file(filp);
|
||||
group_add_out:
|
||||
ext4_resize_end(sb);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -335,6 +342,60 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||
return err;
|
||||
}
|
||||
|
||||
case EXT4_IOC_RESIZE_FS: {
|
||||
ext4_fsblk_t n_blocks_count;
|
||||
struct super_block *sb = inode->i_sb;
|
||||
int err = 0, err2 = 0;
|
||||
|
||||
if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
|
||||
EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
|
||||
ext4_msg(sb, KERN_ERR,
|
||||
"Online resizing not (yet) supported with bigalloc");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (EXT4_HAS_INCOMPAT_FEATURE(sb,
|
||||
EXT4_FEATURE_INCOMPAT_META_BG)) {
|
||||
ext4_msg(sb, KERN_ERR,
|
||||
"Online resizing not (yet) supported with meta_bg");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (copy_from_user(&n_blocks_count, (__u64 __user *)arg,
|
||||
sizeof(__u64))) {
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (n_blocks_count > MAX_32_NUM &&
|
||||
!EXT4_HAS_INCOMPAT_FEATURE(sb,
|
||||
EXT4_FEATURE_INCOMPAT_64BIT)) {
|
||||
ext4_msg(sb, KERN_ERR,
|
||||
"File system only supports 32-bit block numbers");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
err = ext4_resize_begin(sb);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mnt_want_write(filp->f_path.mnt);
|
||||
if (err)
|
||||
goto resizefs_out;
|
||||
|
||||
err = ext4_resize_fs(sb, n_blocks_count);
|
||||
if (EXT4_SB(sb)->s_journal) {
|
||||
jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
|
||||
err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
|
||||
jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
|
||||
}
|
||||
if (err == 0)
|
||||
err = err2;
|
||||
mnt_drop_write(filp->f_path.mnt);
|
||||
resizefs_out:
|
||||
ext4_resize_end(sb);
|
||||
return err;
|
||||
}
|
||||
|
||||
case FITRIM:
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(sb->s_bdev);
|
||||
|
@ -433,6 +494,7 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|||
}
|
||||
case EXT4_IOC_MOVE_EXT:
|
||||
case FITRIM:
|
||||
case EXT4_IOC_RESIZE_FS:
|
||||
break;
|
||||
default:
|
||||
return -ENOIOCTLCMD;
|
||||
|
|
|
@ -3671,7 +3671,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
|
|||
ext4_group_t group;
|
||||
ext4_grpblk_t bit;
|
||||
|
||||
trace_ext4_mb_release_group_pa(pa);
|
||||
trace_ext4_mb_release_group_pa(sb, pa);
|
||||
BUG_ON(pa->pa_deleted == 0);
|
||||
ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
|
||||
BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
|
||||
|
|
1219
fs/ext4/resize.c
1219
fs/ext4/resize.c
File diff suppressed because it is too large
Load diff
|
@ -1095,7 +1095,7 @@ static int ext4_show_options(struct seq_file *seq, struct dentry *root)
|
|||
}
|
||||
if (sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME) {
|
||||
seq_printf(seq, ",max_batch_time=%u",
|
||||
(unsigned) sbi->s_min_batch_time);
|
||||
(unsigned) sbi->s_max_batch_time);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2005,17 +2005,16 @@ static int ext4_fill_flex_info(struct super_block *sb)
|
|||
struct ext4_group_desc *gdp = NULL;
|
||||
ext4_group_t flex_group_count;
|
||||
ext4_group_t flex_group;
|
||||
int groups_per_flex = 0;
|
||||
unsigned int groups_per_flex = 0;
|
||||
size_t size;
|
||||
int i;
|
||||
|
||||
sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
|
||||
groups_per_flex = 1 << sbi->s_log_groups_per_flex;
|
||||
|
||||
if (groups_per_flex < 2) {
|
||||
if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
|
||||
sbi->s_log_groups_per_flex = 0;
|
||||
return 1;
|
||||
}
|
||||
groups_per_flex = 1 << sbi->s_log_groups_per_flex;
|
||||
|
||||
/* We allocate both existing and potentially added groups */
|
||||
flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) +
|
||||
|
@ -3506,7 +3505,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
|||
* of the filesystem.
|
||||
*/
|
||||
if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) {
|
||||
ext4_msg(sb, KERN_WARNING, "bad geometry: first data"
|
||||
ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
|
||||
"block %u is beyond end of filesystem (%llu)",
|
||||
le32_to_cpu(es->s_first_data_block),
|
||||
ext4_blocks_count(es));
|
||||
|
|
|
@ -47,8 +47,9 @@ ext4_xattr_security_set(struct dentry *dentry, const char *name,
|
|||
name, value, size, flags);
|
||||
}
|
||||
|
||||
int ext4_initxattrs(struct inode *inode, const struct xattr *xattr_array,
|
||||
void *fs_info)
|
||||
static int
|
||||
ext4_initxattrs(struct inode *inode, const struct xattr *xattr_array,
|
||||
void *fs_info)
|
||||
{
|
||||
const struct xattr *xattr;
|
||||
handle_t *handle = fs_info;
|
||||
|
|
|
@ -429,6 +429,12 @@ void jbd2_journal_commit_transaction(journal_t *journal)
|
|||
|
||||
jbd_debug(3, "JBD2: commit phase 1\n");
|
||||
|
||||
/*
|
||||
* Clear revoked flag to reflect there is no revoked buffers
|
||||
* in the next transaction which is going to be started.
|
||||
*/
|
||||
jbd2_clear_buffer_revoked_flags(journal);
|
||||
|
||||
/*
|
||||
* Switch to a new revoke table.
|
||||
*/
|
||||
|
|
|
@ -47,6 +47,10 @@
|
|||
* overwriting the new data. We don't even need to clear the revoke
|
||||
* bit here.
|
||||
*
|
||||
* We cache revoke status of a buffer in the current transaction in b_states
|
||||
* bits. As the name says, revokevalid flag indicates that the cached revoke
|
||||
* status of a buffer is valid and we can rely on the cached status.
|
||||
*
|
||||
* Revoke information on buffers is a tri-state value:
|
||||
*
|
||||
* RevokeValid clear: no cached revoke status, need to look it up
|
||||
|
@ -478,6 +482,36 @@ int jbd2_journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
|
|||
return did_revoke;
|
||||
}
|
||||
|
||||
/*
|
||||
* journal_clear_revoked_flag clears revoked flag of buffers in
|
||||
* revoke table to reflect there is no revoked buffers in the next
|
||||
* transaction which is going to be started.
|
||||
*/
|
||||
void jbd2_clear_buffer_revoked_flags(journal_t *journal)
|
||||
{
|
||||
struct jbd2_revoke_table_s *revoke = journal->j_revoke;
|
||||
int i = 0;
|
||||
|
||||
for (i = 0; i < revoke->hash_size; i++) {
|
||||
struct list_head *hash_list;
|
||||
struct list_head *list_entry;
|
||||
hash_list = &revoke->hash_table[i];
|
||||
|
||||
list_for_each(list_entry, hash_list) {
|
||||
struct jbd2_revoke_record_s *record;
|
||||
struct buffer_head *bh;
|
||||
record = (struct jbd2_revoke_record_s *)list_entry;
|
||||
bh = __find_get_block(journal->j_fs_dev,
|
||||
record->blocknr,
|
||||
journal->j_blocksize);
|
||||
if (bh) {
|
||||
clear_buffer_revoked(bh);
|
||||
__brelse(bh);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* journal_switch_revoke table select j_revoke for next transaction
|
||||
* we do not want to suspend any processing until all revokes are
|
||||
* written -bzzz
|
||||
|
|
|
@ -517,12 +517,13 @@ void jbd2_journal_lock_updates(journal_t *journal)
|
|||
break;
|
||||
|
||||
spin_lock(&transaction->t_handle_lock);
|
||||
if (!atomic_read(&transaction->t_updates)) {
|
||||
spin_unlock(&transaction->t_handle_lock);
|
||||
break;
|
||||
}
|
||||
prepare_to_wait(&journal->j_wait_updates, &wait,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
if (!atomic_read(&transaction->t_updates)) {
|
||||
spin_unlock(&transaction->t_handle_lock);
|
||||
finish_wait(&journal->j_wait_updates, &wait);
|
||||
break;
|
||||
}
|
||||
spin_unlock(&transaction->t_handle_lock);
|
||||
write_unlock(&journal->j_state_lock);
|
||||
schedule();
|
||||
|
|
|
@ -1151,6 +1151,7 @@ extern int jbd2_journal_set_revoke(journal_t *, unsigned long long, tid_t);
|
|||
extern int jbd2_journal_test_revoke(journal_t *, unsigned long long, tid_t);
|
||||
extern void jbd2_journal_clear_revoke(journal_t *);
|
||||
extern void jbd2_journal_switch_revoke_table(journal_t *journal);
|
||||
extern void jbd2_clear_buffer_revoked_flags(journal_t *journal);
|
||||
|
||||
/*
|
||||
* The log thread user interface:
|
||||
|
|
|
@ -573,9 +573,9 @@ TRACE_EVENT(ext4_mb_release_inode_pa,
|
|||
);
|
||||
|
||||
TRACE_EVENT(ext4_mb_release_group_pa,
|
||||
TP_PROTO(struct ext4_prealloc_space *pa),
|
||||
TP_PROTO(struct super_block *sb, struct ext4_prealloc_space *pa),
|
||||
|
||||
TP_ARGS(pa),
|
||||
TP_ARGS(sb, pa),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( dev_t, dev )
|
||||
|
@ -585,7 +585,7 @@ TRACE_EVENT(ext4_mb_release_group_pa,
|
|||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = pa->pa_inode->i_sb->s_dev;
|
||||
__entry->dev = sb->s_dev;
|
||||
__entry->pa_pstart = pa->pa_pstart;
|
||||
__entry->pa_len = pa->pa_len;
|
||||
),
|
||||
|
|
Loading…
Reference in a new issue