for-4.19-tag
-----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEE8rQSAMVO+zA4DBdWxWXV+ddtWDsFAltxe7QACgkQxWXV+ddt WDswMA//QlRO+Ln5CH+RlT4fyf1RQUQZblWss2zxrmlo1GRI3Ljf2DNsBE3rD7P4 NSiXfHmgkdjcQP6poPLJwHxwkNd4NFXglYg64wWO10RjHGhKglmH6ztU88wsPfr2 2RZv271/NvYIEkEi6kdyy8ilKeWMshOfyj3+PaeapQn67uJfimyiUvDgUgbvwH3c yj0nVRLP1C7snNj4Atti/rjXMhG+m1UWfjRkZsmqlBp52k2UAcrtiwQK+DS5b9mL aWLSaGmIcJtSMkNJPQBST9GTWbJfKTpceoCzkT0o3irvQpN2e2flAJ4ireL8q4mN MvqJ7giPBFHNDcHEzN6VERvsaA1Rx9Vq20ieQl8JAMd4p/bi5ehN3ww+9vau5zCw Pc8WeKEILKrLYEAgHOnUO1wxHw994Iv5CA26roTQ0HNXQJjyEZ4m40Ch6LzmfKPm WKcHX14Uw22GKaFEXHTOpRZ0U0d1cMTcn5zaAajGsB9LwcaiLM+OiFSPtDkwUOB9 QGJHklZVXAD1IH9HFPuq85uUtXTLXbxsw1g8phEJGbmaVxxCOAUAXwEk3qxuZNbz CHL3G5+l3JEXxfoJSbDW60kr8xic7teqQDszqqP2qlqtP15ty2xc9d5Q8MZajSTZ H1z9+0gfjYYHrGuAp69MtCbdQhhDSqLyivjJJm0HBaKfVNGW2Xg= =jBaz -----END PGP SIGNATURE----- Merge tag 'for-4.19-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux Pull btrfs updates from David Sterba: "Mostly fixes and cleanups, nothing big, though the notable thing is the inserted/deleted lines delta -1124. User visible changes: - allow defrag on opened read-only files that have rw permissions; similar to what dedupe will allow on such files Core changes: - tree checker improvements, reported by fuzzing: * more checks for: block group items, essential trees * chunk type validation * mount time cross-checks that physical and logical chunks match * switch more error codes to EUCLEAN aka EFSCORRUPTED Fixes: - fsync corner case fixes - fix send failure when root has deleted files still open - send, fix incorrect file layout after hole punching beyond eof - fix races between mount and deice scan ioctl, found by fuzzing - fix deadlock when delayed iput is called from writeback on the same inode; rare but has been observed in practice, also removes code - fix pinned byte accounting, using the right percpu helpers; this should avoid some write IO inefficiency during low space conditions - don't remove block group that still has pinned bytes - reset on-disk device stats value after replace, otherwise this would report stale values for the new device Cleanups: - time64_t/timespec64 cleanups - remove remaining dead code in scrub handling NOCOW extents after disabling it in previous cycle - simplify fsync regarding ordered extents logic and remove all the related code - remove redundant arguments in order to reduce stack space consumption - remove support for V0 type of extents, not in use since 2.6.30 - remove several unused structure members - fewer indirect function calls by inlining some callbacks - qgroup rescan timing fixes - vfs: iget cleanups" * tag 'for-4.19-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: (182 commits) btrfs: revert fs_devices state on error of btrfs_init_new_device btrfs: Exit gracefully when chunk map cannot be inserted to the tree btrfs: Introduce mount time chunk <-> dev extent mapping check btrfs: Verify that every chunk has corresponding block group at mount time btrfs: Check that each block group has corresponding chunk at mount time Btrfs: send, fix incorrect file layout after hole punching beyond eof btrfs: Use wrapper macro for rcu string to remove duplicate code btrfs: simplify btrfs_iget btrfs: lift make_bad_inode into btrfs_iget btrfs: simplify IS_ERR/PTR_ERR checks btrfs: btrfs_iget never returns an is_bad_inode inode btrfs: replace: Reset on-disk dev stats value after replace btrfs: extent-tree: Remove unused __btrfs_free_block_rsv btrfs: backref: Use ERR_CAST to return error code btrfs: Remove redundant btrfs_release_path from btrfs_unlink_subvol btrfs: Remove root parameter from btrfs_unlink_subvol btrfs: Remove fs_info from btrfs_add_root_ref btrfs: Remove fs_info from btrfs_del_root_ref btrfs: Remove fs_info from btrfs_del_root btrfs: Remove fs_info from btrfs_delete_delayed_dir_index ...
This commit is contained in:
commit
a1a4f841ec
47 changed files with 1819 additions and 2943 deletions
|
@ -30,23 +30,22 @@ struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
|
|||
name = XATTR_NAME_POSIX_ACL_DEFAULT;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
size = btrfs_getxattr(inode, name, "", 0);
|
||||
size = btrfs_getxattr(inode, name, NULL, 0);
|
||||
if (size > 0) {
|
||||
value = kzalloc(size, GFP_KERNEL);
|
||||
if (!value)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
size = btrfs_getxattr(inode, name, value, size);
|
||||
}
|
||||
if (size > 0) {
|
||||
if (size > 0)
|
||||
acl = posix_acl_from_xattr(&init_user_ns, value, size);
|
||||
} else if (size == -ERANGE || size == -ENODATA || size == 0) {
|
||||
else if (size == -ENODATA || size == 0)
|
||||
acl = NULL;
|
||||
} else {
|
||||
acl = ERR_PTR(-EIO);
|
||||
}
|
||||
else
|
||||
acl = ERR_PTR(size);
|
||||
kfree(value);
|
||||
|
||||
return acl;
|
||||
|
|
|
@ -925,7 +925,7 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info,
|
|||
type = btrfs_get_extent_inline_ref_type(leaf, iref,
|
||||
BTRFS_REF_TYPE_ANY);
|
||||
if (type == BTRFS_REF_TYPE_INVALID)
|
||||
return -EINVAL;
|
||||
return -EUCLEAN;
|
||||
|
||||
offset = btrfs_extent_inline_ref_offset(leaf, iref);
|
||||
|
||||
|
@ -1793,7 +1793,7 @@ static int get_extent_inline_ref(unsigned long *ptr,
|
|||
*out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
|
||||
BTRFS_REF_TYPE_ANY);
|
||||
if (*out_type == BTRFS_REF_TYPE_INVALID)
|
||||
return -EINVAL;
|
||||
return -EUCLEAN;
|
||||
|
||||
*ptr += btrfs_extent_inline_ref_size(*out_type);
|
||||
WARN_ON(*ptr > end);
|
||||
|
@ -2225,7 +2225,7 @@ struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
|
|||
|
||||
fspath = init_data_container(total_bytes);
|
||||
if (IS_ERR(fspath))
|
||||
return (void *)fspath;
|
||||
return ERR_CAST(fspath);
|
||||
|
||||
ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
|
||||
if (!ifp) {
|
||||
|
|
|
@ -178,7 +178,7 @@ struct btrfs_inode {
|
|||
struct btrfs_delayed_node *delayed_node;
|
||||
|
||||
/* File creation time. */
|
||||
struct timespec i_otime;
|
||||
struct timespec64 i_otime;
|
||||
|
||||
/* Hook into fs_info->delayed_iputs */
|
||||
struct list_head delayed_iput;
|
||||
|
|
|
@ -1539,7 +1539,12 @@ static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
|
|||
}
|
||||
|
||||
device = multi->stripes[0].dev;
|
||||
block_ctx_out->dev = btrfsic_dev_state_lookup(device->bdev->bd_dev);
|
||||
if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state) ||
|
||||
!device->bdev || !device->name)
|
||||
block_ctx_out->dev = NULL;
|
||||
else
|
||||
block_ctx_out->dev = btrfsic_dev_state_lookup(
|
||||
device->bdev->bd_dev);
|
||||
block_ctx_out->dev_bytenr = multi->stripes[0].physical;
|
||||
block_ctx_out->start = bytenr;
|
||||
block_ctx_out->len = len;
|
||||
|
@ -1624,7 +1629,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
|
|||
bio = btrfs_io_bio_alloc(num_pages - i);
|
||||
bio_set_dev(bio, block_ctx->dev->bdev);
|
||||
bio->bi_iter.bi_sector = dev_bytenr >> 9;
|
||||
bio_set_op_attrs(bio, REQ_OP_READ, 0);
|
||||
bio->bi_opf = REQ_OP_READ;
|
||||
|
||||
for (j = i; j < num_pages; j++) {
|
||||
ret = bio_add_page(bio, block_ctx->pagev[j],
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/pagemap.h>
|
||||
|
@ -14,10 +13,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/backing-dev.h>
|
||||
#include <linux/mpage.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/writeback.h>
|
||||
#include <linux/bit_spinlock.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/log2.h>
|
||||
|
@ -303,7 +299,6 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
|
|||
struct bio *bio = NULL;
|
||||
struct compressed_bio *cb;
|
||||
unsigned long bytes_left;
|
||||
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
|
||||
int pg_index = 0;
|
||||
struct page *page;
|
||||
u64 first_byte = disk_start;
|
||||
|
@ -342,9 +337,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
|
|||
page = compressed_pages[pg_index];
|
||||
page->mapping = inode->i_mapping;
|
||||
if (bio->bi_iter.bi_size)
|
||||
submit = io_tree->ops->merge_bio_hook(page, 0,
|
||||
PAGE_SIZE,
|
||||
bio, 0);
|
||||
submit = btrfs_merge_bio_hook(page, 0, PAGE_SIZE, bio, 0);
|
||||
|
||||
page->mapping = NULL;
|
||||
if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
|
||||
|
@ -613,7 +606,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
|||
cb->len = bio->bi_iter.bi_size;
|
||||
|
||||
comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
|
||||
bio_set_op_attrs (comp_bio, REQ_OP_READ, 0);
|
||||
comp_bio->bi_opf = REQ_OP_READ;
|
||||
comp_bio->bi_private = cb;
|
||||
comp_bio->bi_end_io = end_compressed_bio_read;
|
||||
refcount_set(&cb->pending_bios, 1);
|
||||
|
@ -626,9 +619,8 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
|||
page->index = em_start >> PAGE_SHIFT;
|
||||
|
||||
if (comp_bio->bi_iter.bi_size)
|
||||
submit = tree->ops->merge_bio_hook(page, 0,
|
||||
PAGE_SIZE,
|
||||
comp_bio, 0);
|
||||
submit = btrfs_merge_bio_hook(page, 0, PAGE_SIZE,
|
||||
comp_bio, 0);
|
||||
|
||||
page->mapping = NULL;
|
||||
if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
|
||||
|
@ -660,7 +652,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
|||
}
|
||||
|
||||
comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
|
||||
bio_set_op_attrs(comp_bio, REQ_OP_READ, 0);
|
||||
comp_bio->bi_opf = REQ_OP_READ;
|
||||
comp_bio->bi_private = cb;
|
||||
comp_bio->bi_end_io = end_compressed_bio_read;
|
||||
|
||||
|
|
|
@ -888,11 +888,7 @@ int btrfs_block_can_be_shared(struct btrfs_root *root,
|
|||
btrfs_root_last_snapshot(&root->root_item) ||
|
||||
btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
|
||||
return 1;
|
||||
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
|
||||
if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
|
||||
btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
|
||||
return 1;
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3128,8 +3124,7 @@ int btrfs_search_slot_for_read(struct btrfs_root *root,
|
|||
* higher levels
|
||||
*
|
||||
*/
|
||||
static void fixup_low_keys(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_path *path,
|
||||
static void fixup_low_keys(struct btrfs_path *path,
|
||||
struct btrfs_disk_key *key, int level)
|
||||
{
|
||||
int i;
|
||||
|
@ -3181,7 +3176,7 @@ void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
|
|||
btrfs_set_item_key(eb, &disk_key, slot);
|
||||
btrfs_mark_buffer_dirty(eb);
|
||||
if (slot == 0)
|
||||
fixup_low_keys(fs_info, path, &disk_key, 1);
|
||||
fixup_low_keys(path, &disk_key, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3359,17 +3354,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
|
|||
|
||||
root_add_used(root, fs_info->nodesize);
|
||||
|
||||
memzero_extent_buffer(c, 0, sizeof(struct btrfs_header));
|
||||
btrfs_set_header_nritems(c, 1);
|
||||
btrfs_set_header_level(c, level);
|
||||
btrfs_set_header_bytenr(c, c->start);
|
||||
btrfs_set_header_generation(c, trans->transid);
|
||||
btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
|
||||
btrfs_set_header_owner(c, root->root_key.objectid);
|
||||
|
||||
write_extent_buffer_fsid(c, fs_info->fsid);
|
||||
write_extent_buffer_chunk_tree_uuid(c, fs_info->chunk_tree_uuid);
|
||||
|
||||
btrfs_set_node_key(c, &lower_key, 0);
|
||||
btrfs_set_node_blockptr(c, 0, lower->start);
|
||||
lower_gen = btrfs_header_generation(lower);
|
||||
|
@ -3498,15 +3483,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
|
|||
return PTR_ERR(split);
|
||||
|
||||
root_add_used(root, fs_info->nodesize);
|
||||
|
||||
memzero_extent_buffer(split, 0, sizeof(struct btrfs_header));
|
||||
btrfs_set_header_level(split, btrfs_header_level(c));
|
||||
btrfs_set_header_bytenr(split, split->start);
|
||||
btrfs_set_header_generation(split, trans->transid);
|
||||
btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
|
||||
btrfs_set_header_owner(split, root->root_key.objectid);
|
||||
write_extent_buffer_fsid(split, fs_info->fsid);
|
||||
write_extent_buffer_chunk_tree_uuid(split, fs_info->chunk_tree_uuid);
|
||||
ASSERT(btrfs_header_level(c) == level);
|
||||
|
||||
ret = tree_mod_log_eb_copy(fs_info, split, c, 0, mid, c_nritems - mid);
|
||||
if (ret) {
|
||||
|
@ -3945,7 +3922,7 @@ static noinline int __push_leaf_left(struct btrfs_fs_info *fs_info,
|
|||
clean_tree_block(fs_info, right);
|
||||
|
||||
btrfs_item_key(right, &disk_key, 0);
|
||||
fixup_low_keys(fs_info, path, &disk_key, 1);
|
||||
fixup_low_keys(path, &disk_key, 1);
|
||||
|
||||
/* then fixup the leaf pointer in the path */
|
||||
if (path->slots[0] < push_items) {
|
||||
|
@ -4292,15 +4269,6 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
|
|||
|
||||
root_add_used(root, fs_info->nodesize);
|
||||
|
||||
memzero_extent_buffer(right, 0, sizeof(struct btrfs_header));
|
||||
btrfs_set_header_bytenr(right, right->start);
|
||||
btrfs_set_header_generation(right, trans->transid);
|
||||
btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
|
||||
btrfs_set_header_owner(right, root->root_key.objectid);
|
||||
btrfs_set_header_level(right, 0);
|
||||
write_extent_buffer_fsid(right, fs_info->fsid);
|
||||
write_extent_buffer_chunk_tree_uuid(right, fs_info->chunk_tree_uuid);
|
||||
|
||||
if (split == 0) {
|
||||
if (mid <= slot) {
|
||||
btrfs_set_header_nritems(right, 0);
|
||||
|
@ -4320,7 +4288,7 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
|
|||
path->nodes[0] = right;
|
||||
path->slots[0] = 0;
|
||||
if (path->slots[1] == 0)
|
||||
fixup_low_keys(fs_info, path, &disk_key, 1);
|
||||
fixup_low_keys(path, &disk_key, 1);
|
||||
}
|
||||
/*
|
||||
* We create a new leaf 'right' for the required ins_len and
|
||||
|
@ -4642,7 +4610,7 @@ void btrfs_truncate_item(struct btrfs_fs_info *fs_info,
|
|||
btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
|
||||
btrfs_set_item_key(leaf, &disk_key, slot);
|
||||
if (slot == 0)
|
||||
fixup_low_keys(fs_info, path, &disk_key, 1);
|
||||
fixup_low_keys(path, &disk_key, 1);
|
||||
}
|
||||
|
||||
item = btrfs_item_nr(slot);
|
||||
|
@ -4744,7 +4712,7 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
|
|||
|
||||
if (path->slots[0] == 0) {
|
||||
btrfs_cpu_key_to_disk(&disk_key, cpu_key);
|
||||
fixup_low_keys(fs_info, path, &disk_key, 1);
|
||||
fixup_low_keys(path, &disk_key, 1);
|
||||
}
|
||||
btrfs_unlock_up_safe(path, 1);
|
||||
|
||||
|
@ -4886,7 +4854,6 @@ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
|
|||
static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
|
||||
int level, int slot)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||
struct extent_buffer *parent = path->nodes[level];
|
||||
u32 nritems;
|
||||
int ret;
|
||||
|
@ -4919,7 +4886,7 @@ static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
|
|||
struct btrfs_disk_key disk_key;
|
||||
|
||||
btrfs_node_key(parent, &disk_key, 0);
|
||||
fixup_low_keys(fs_info, path, &disk_key, level + 1);
|
||||
fixup_low_keys(path, &disk_key, level + 1);
|
||||
}
|
||||
btrfs_mark_buffer_dirty(parent);
|
||||
}
|
||||
|
@ -5022,7 +4989,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
|
|||
struct btrfs_disk_key disk_key;
|
||||
|
||||
btrfs_item_key(leaf, &disk_key, 0);
|
||||
fixup_low_keys(fs_info, path, &disk_key, 1);
|
||||
fixup_low_keys(path, &disk_key, 1);
|
||||
}
|
||||
|
||||
/* delete the leaf if it is mostly empty */
|
||||
|
|
|
@ -55,8 +55,6 @@ struct btrfs_ordered_sum;
|
|||
|
||||
#define BTRFS_OLDEST_GENERATION 0ULL
|
||||
|
||||
#define BTRFS_COMPAT_EXTENT_TREE_V0
|
||||
|
||||
/*
|
||||
* the max metadata block size. This limit is somewhat artificial,
|
||||
* but the memmove costs go through the roof for larger blocks.
|
||||
|
@ -86,6 +84,14 @@ static const int btrfs_csum_sizes[] = { 4 };
|
|||
|
||||
#define BTRFS_DIRTY_METADATA_THRESH SZ_32M
|
||||
|
||||
/*
|
||||
* Use large batch size to reduce overhead of metadata updates. On the reader
|
||||
* side, we only read it when we are close to ENOSPC and the read overhead is
|
||||
* mostly related to the number of CPUs, so it is OK to use arbitrary large
|
||||
* value here.
|
||||
*/
|
||||
#define BTRFS_TOTAL_BYTES_PINNED_BATCH SZ_128M
|
||||
|
||||
#define BTRFS_MAX_EXTENT_SIZE SZ_128M
|
||||
|
||||
|
||||
|
@ -342,8 +348,8 @@ struct btrfs_path {
|
|||
sizeof(struct btrfs_item))
|
||||
struct btrfs_dev_replace {
|
||||
u64 replace_state; /* see #define above */
|
||||
u64 time_started; /* seconds since 1-Jan-1970 */
|
||||
u64 time_stopped; /* seconds since 1-Jan-1970 */
|
||||
time64_t time_started; /* seconds since 1-Jan-1970 */
|
||||
time64_t time_stopped; /* seconds since 1-Jan-1970 */
|
||||
atomic64_t num_write_errors;
|
||||
atomic64_t num_uncorrectable_read_errors;
|
||||
|
||||
|
@ -359,8 +365,6 @@ struct btrfs_dev_replace {
|
|||
struct btrfs_device *srcdev;
|
||||
struct btrfs_device *tgtdev;
|
||||
|
||||
pid_t lock_owner;
|
||||
atomic_t nesting_level;
|
||||
struct mutex lock_finishing_cancel_unmount;
|
||||
rwlock_t lock;
|
||||
atomic_t read_locks;
|
||||
|
@ -1213,7 +1217,6 @@ struct btrfs_root {
|
|||
u64 defrag_trans_start;
|
||||
struct btrfs_key defrag_progress;
|
||||
struct btrfs_key defrag_max;
|
||||
char *name;
|
||||
|
||||
/* the dirty list is only used by non-reference counted roots */
|
||||
struct list_head dirty_list;
|
||||
|
@ -2428,32 +2431,6 @@ static inline u32 btrfs_file_extent_inline_item_len(
|
|||
return btrfs_item_size(eb, e) - BTRFS_FILE_EXTENT_INLINE_DATA_START;
|
||||
}
|
||||
|
||||
/* this returns the number of file bytes represented by the inline item.
|
||||
* If an item is compressed, this is the uncompressed size
|
||||
*/
|
||||
static inline u32 btrfs_file_extent_inline_len(const struct extent_buffer *eb,
|
||||
int slot,
|
||||
const struct btrfs_file_extent_item *fi)
|
||||
{
|
||||
struct btrfs_map_token token;
|
||||
|
||||
btrfs_init_map_token(&token);
|
||||
/*
|
||||
* return the space used on disk if this item isn't
|
||||
* compressed or encoded
|
||||
*/
|
||||
if (btrfs_token_file_extent_compression(eb, fi, &token) == 0 &&
|
||||
btrfs_token_file_extent_encryption(eb, fi, &token) == 0 &&
|
||||
btrfs_token_file_extent_other_encoding(eb, fi, &token) == 0) {
|
||||
return btrfs_file_extent_inline_item_len(eb,
|
||||
btrfs_item_nr(slot));
|
||||
}
|
||||
|
||||
/* otherwise use the ram bytes field */
|
||||
return btrfs_token_file_extent_ram_bytes(eb, fi, &token);
|
||||
}
|
||||
|
||||
|
||||
/* btrfs_dev_stats_item */
|
||||
static inline u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
|
||||
const struct btrfs_dev_stats_item *ptr,
|
||||
|
@ -2676,7 +2653,6 @@ int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
|
|||
u64 offset, u64 ram_bytes,
|
||||
struct btrfs_key *ins);
|
||||
int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info,
|
||||
u64 root_objectid, u64 owner, u64 offset,
|
||||
struct btrfs_key *ins);
|
||||
int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes, u64 num_bytes,
|
||||
|
@ -2716,15 +2692,14 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info);
|
|||
int btrfs_read_block_groups(struct btrfs_fs_info *info);
|
||||
int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr);
|
||||
int btrfs_make_block_group(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info, u64 bytes_used,
|
||||
u64 type, u64 chunk_offset, u64 size);
|
||||
u64 bytes_used, u64 type, u64 chunk_offset,
|
||||
u64 size);
|
||||
void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info);
|
||||
struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
|
||||
struct btrfs_fs_info *fs_info,
|
||||
const u64 chunk_offset);
|
||||
int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info, u64 group_start,
|
||||
struct extent_map *em);
|
||||
u64 group_start, struct extent_map *em);
|
||||
void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info);
|
||||
void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache);
|
||||
void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *cache);
|
||||
|
@ -2786,7 +2761,6 @@ void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
|
|||
unsigned short type);
|
||||
void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_block_rsv *rsv);
|
||||
void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv);
|
||||
int btrfs_block_rsv_add(struct btrfs_root *root,
|
||||
struct btrfs_block_rsv *block_rsv, u64 num_bytes,
|
||||
enum btrfs_reserve_flush_enum flush);
|
||||
|
@ -2803,8 +2777,7 @@ int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
|
|||
void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_block_rsv *block_rsv,
|
||||
u64 num_bytes);
|
||||
int btrfs_inc_block_group_ro(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_block_group_cache *cache);
|
||||
int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache);
|
||||
void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache);
|
||||
void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
|
||||
u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
|
||||
|
@ -2812,8 +2785,7 @@ int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
|
|||
u64 start, u64 end);
|
||||
int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
|
||||
u64 num_bytes, u64 *actual_bytes);
|
||||
int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info, u64 type);
|
||||
int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type);
|
||||
int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range);
|
||||
|
||||
int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
|
||||
|
@ -2822,10 +2794,10 @@ int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
|
|||
int btrfs_start_write_no_snapshotting(struct btrfs_root *root);
|
||||
void btrfs_end_write_no_snapshotting(struct btrfs_root *root);
|
||||
void btrfs_wait_for_snapshot_creation(struct btrfs_root *root);
|
||||
void check_system_chunk(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info, const u64 type);
|
||||
void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type);
|
||||
u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
|
||||
u64 start, u64 end);
|
||||
void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg);
|
||||
|
||||
/* ctree.c */
|
||||
int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
|
||||
|
@ -3011,16 +2983,14 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
|
|||
int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq);
|
||||
|
||||
/* root-item.c */
|
||||
int btrfs_add_root_ref(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info,
|
||||
u64 root_id, u64 ref_id, u64 dirid, u64 sequence,
|
||||
const char *name, int name_len);
|
||||
int btrfs_del_root_ref(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info,
|
||||
u64 root_id, u64 ref_id, u64 dirid, u64 *sequence,
|
||||
const char *name, int name_len);
|
||||
int btrfs_add_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
|
||||
u64 ref_id, u64 dirid, u64 sequence, const char *name,
|
||||
int name_len);
|
||||
int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
|
||||
u64 ref_id, u64 dirid, u64 *sequence, const char *name,
|
||||
int name_len);
|
||||
int btrfs_del_root(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info, const struct btrfs_key *key);
|
||||
const struct btrfs_key *key);
|
||||
int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
|
||||
const struct btrfs_key *key,
|
||||
struct btrfs_root_item *item);
|
||||
|
@ -3196,7 +3166,7 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
|
|||
int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
|
||||
size_t size, struct bio *bio,
|
||||
unsigned long bio_flags);
|
||||
void btrfs_set_range_writeback(void *private_data, u64 start, u64 end);
|
||||
void btrfs_set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end);
|
||||
vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf);
|
||||
int btrfs_readpage(struct file *file, struct page *page);
|
||||
void btrfs_evict_inode(struct inode *inode);
|
||||
|
@ -3452,7 +3422,7 @@ do { \
|
|||
#ifdef CONFIG_BTRFS_ASSERT
|
||||
|
||||
__cold
|
||||
static inline void assfail(char *expr, char *file, int line)
|
||||
static inline void assfail(const char *expr, const char *file, int line)
|
||||
{
|
||||
pr_err("assertion failed: %s, file: %s, line: %d\n",
|
||||
expr, file, line);
|
||||
|
@ -3465,6 +3435,13 @@ static inline void assfail(char *expr, char *file, int line)
|
|||
#define ASSERT(expr) ((void)0)
|
||||
#endif
|
||||
|
||||
__cold
|
||||
static inline void btrfs_print_v0_err(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
btrfs_err(fs_info,
|
||||
"Unsupported V0 extent filesystem detected. Aborting. Please re-create your filesystem with a newer kernel");
|
||||
}
|
||||
|
||||
__printf(5, 6)
|
||||
__cold
|
||||
void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function,
|
||||
|
|
|
@ -1222,7 +1222,7 @@ int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
|
|||
|
||||
int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
|
||||
struct btrfs_fs_info *fs_info = inode->root->fs_info;
|
||||
struct btrfs_trans_handle *trans;
|
||||
struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
|
||||
struct btrfs_path *path;
|
||||
|
@ -1418,7 +1418,6 @@ void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
|
|||
|
||||
/* Will return 0 or -ENOMEM */
|
||||
int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info,
|
||||
const char *name, int name_len,
|
||||
struct btrfs_inode *dir,
|
||||
struct btrfs_disk_key *disk_key, u8 type,
|
||||
|
@ -1458,11 +1457,10 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
|
|||
*/
|
||||
BUG_ON(ret);
|
||||
|
||||
|
||||
mutex_lock(&delayed_node->mutex);
|
||||
ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
|
||||
if (unlikely(ret)) {
|
||||
btrfs_err(fs_info,
|
||||
btrfs_err(trans->fs_info,
|
||||
"err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
|
||||
name_len, name, delayed_node->root->objectid,
|
||||
delayed_node->inode_id, ret);
|
||||
|
@ -1495,7 +1493,6 @@ static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
|
|||
}
|
||||
|
||||
int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_inode *dir, u64 index)
|
||||
{
|
||||
struct btrfs_delayed_node *node;
|
||||
|
@ -1511,7 +1508,8 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
|
|||
item_key.type = BTRFS_DIR_INDEX_KEY;
|
||||
item_key.offset = index;
|
||||
|
||||
ret = btrfs_delete_delayed_insertion_item(fs_info, node, &item_key);
|
||||
ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node,
|
||||
&item_key);
|
||||
if (!ret)
|
||||
goto end;
|
||||
|
||||
|
@ -1533,7 +1531,7 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
|
|||
mutex_lock(&node->mutex);
|
||||
ret = __btrfs_add_delayed_deletion_item(node, item);
|
||||
if (unlikely(ret)) {
|
||||
btrfs_err(fs_info,
|
||||
btrfs_err(trans->fs_info,
|
||||
"err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
|
||||
index, node->root->objectid, node->inode_id, ret);
|
||||
BUG();
|
||||
|
@ -1837,7 +1835,7 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
|
|||
|
||||
int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
|
||||
struct btrfs_fs_info *fs_info = inode->root->fs_info;
|
||||
struct btrfs_delayed_node *delayed_node;
|
||||
|
||||
/*
|
||||
|
|
|
@ -86,14 +86,12 @@ static inline void btrfs_init_delayed_root(
|
|||
}
|
||||
|
||||
int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info,
|
||||
const char *name, int name_len,
|
||||
struct btrfs_inode *dir,
|
||||
struct btrfs_disk_key *disk_key, u8 type,
|
||||
u64 index);
|
||||
|
||||
int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_inode *dir, u64 index);
|
||||
|
||||
int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode);
|
||||
|
|
|
@ -709,13 +709,13 @@ static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
|
|||
* to make sure the delayed ref is eventually processed before this
|
||||
* transaction commits.
|
||||
*/
|
||||
int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_trans_handle *trans,
|
||||
int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
|
||||
u64 bytenr, u64 num_bytes, u64 parent,
|
||||
u64 ref_root, int level, int action,
|
||||
struct btrfs_delayed_extent_op *extent_op,
|
||||
int *old_ref_mod, int *new_ref_mod)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||
struct btrfs_delayed_tree_ref *ref;
|
||||
struct btrfs_delayed_ref_head *head_ref;
|
||||
struct btrfs_delayed_ref_root *delayed_refs;
|
||||
|
@ -730,27 +730,33 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
|
|||
if (!ref)
|
||||
return -ENOMEM;
|
||||
|
||||
head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
|
||||
if (!head_ref) {
|
||||
kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
|
||||
is_fstree(ref_root)) {
|
||||
record = kmalloc(sizeof(*record), GFP_NOFS);
|
||||
if (!record) {
|
||||
kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
|
||||
kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
if (parent)
|
||||
ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
|
||||
else
|
||||
ref_type = BTRFS_TREE_BLOCK_REF_KEY;
|
||||
|
||||
init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
|
||||
ref_root, action, ref_type);
|
||||
ref->root = ref_root;
|
||||
ref->parent = parent;
|
||||
ref->level = level;
|
||||
|
||||
head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
|
||||
if (!head_ref)
|
||||
goto free_ref;
|
||||
|
||||
if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
|
||||
is_fstree(ref_root)) {
|
||||
record = kmalloc(sizeof(*record), GFP_NOFS);
|
||||
if (!record)
|
||||
goto free_head_ref;
|
||||
}
|
||||
|
||||
init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
|
||||
ref_root, 0, action, false, is_system);
|
||||
head_ref->extent_op = extent_op;
|
||||
|
@ -779,25 +785,18 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
|
|||
btrfs_qgroup_trace_extent_post(fs_info, record);
|
||||
|
||||
return 0;
|
||||
|
||||
free_head_ref:
|
||||
kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
|
||||
free_ref:
|
||||
kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
* add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
|
||||
*/
|
||||
int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_trans_handle *trans,
|
||||
int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
|
||||
u64 bytenr, u64 num_bytes,
|
||||
u64 parent, u64 ref_root,
|
||||
u64 owner, u64 offset, u64 reserved, int action,
|
||||
int *old_ref_mod, int *new_ref_mod)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||
struct btrfs_delayed_data_ref *ref;
|
||||
struct btrfs_delayed_ref_head *head_ref;
|
||||
struct btrfs_delayed_ref_root *delayed_refs;
|
||||
|
|
|
@ -234,14 +234,12 @@ static inline void btrfs_put_delayed_ref_head(struct btrfs_delayed_ref_head *hea
|
|||
kmem_cache_free(btrfs_delayed_ref_head_cachep, head);
|
||||
}
|
||||
|
||||
int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_trans_handle *trans,
|
||||
int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
|
||||
u64 bytenr, u64 num_bytes, u64 parent,
|
||||
u64 ref_root, int level, int action,
|
||||
struct btrfs_delayed_extent_op *extent_op,
|
||||
int *old_ref_mod, int *new_ref_mod);
|
||||
int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_trans_handle *trans,
|
||||
int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
|
||||
u64 bytenr, u64 num_bytes,
|
||||
u64 parent, u64 ref_root,
|
||||
u64 owner, u64 offset, u64 reserved, int action,
|
||||
|
|
|
@ -6,14 +6,9 @@
|
|||
#include <linux/sched.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/iocontext.h>
|
||||
#include <linux/capability.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/math64.h>
|
||||
#include <asm/div64.h>
|
||||
#include "ctree.h"
|
||||
#include "extent_map.h"
|
||||
#include "disk-io.h"
|
||||
|
@ -465,7 +460,7 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
|
|||
* go to the tgtdev as well (refer to btrfs_map_block()).
|
||||
*/
|
||||
dev_replace->replace_state = BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED;
|
||||
dev_replace->time_started = get_seconds();
|
||||
dev_replace->time_started = ktime_get_real_seconds();
|
||||
dev_replace->cursor_left = 0;
|
||||
dev_replace->committed_cursor_left = 0;
|
||||
dev_replace->cursor_left_last_write_of_item = 0;
|
||||
|
@ -511,7 +506,7 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
|
|||
dev_replace->srcdev = NULL;
|
||||
dev_replace->tgtdev = NULL;
|
||||
btrfs_dev_replace_write_unlock(dev_replace);
|
||||
btrfs_destroy_dev_replace_tgtdev(fs_info, tgt_device);
|
||||
btrfs_destroy_dev_replace_tgtdev(tgt_device);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -618,7 +613,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
|
|||
: BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED;
|
||||
dev_replace->tgtdev = NULL;
|
||||
dev_replace->srcdev = NULL;
|
||||
dev_replace->time_stopped = get_seconds();
|
||||
dev_replace->time_stopped = ktime_get_real_seconds();
|
||||
dev_replace->item_needs_writeback = 1;
|
||||
|
||||
/* replace old device with new one in mapping tree */
|
||||
|
@ -637,7 +632,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
|
|||
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
|
||||
btrfs_rm_dev_replace_blocked(fs_info);
|
||||
if (tgt_device)
|
||||
btrfs_destroy_dev_replace_tgtdev(fs_info, tgt_device);
|
||||
btrfs_destroy_dev_replace_tgtdev(tgt_device);
|
||||
btrfs_rm_dev_replace_unblocked(fs_info);
|
||||
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
|
||||
|
||||
|
@ -663,7 +658,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
|
|||
tgt_device->commit_total_bytes = src_device->commit_total_bytes;
|
||||
tgt_device->commit_bytes_used = src_device->bytes_used;
|
||||
|
||||
btrfs_assign_next_active_device(fs_info, src_device, tgt_device);
|
||||
btrfs_assign_next_active_device(src_device, tgt_device);
|
||||
|
||||
list_add(&tgt_device->dev_alloc_list, &fs_info->fs_devices->alloc_list);
|
||||
fs_info->fs_devices->rw_devices++;
|
||||
|
@ -672,10 +667,16 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
|
|||
|
||||
btrfs_rm_dev_replace_blocked(fs_info);
|
||||
|
||||
btrfs_rm_dev_replace_remove_srcdev(fs_info, src_device);
|
||||
btrfs_rm_dev_replace_remove_srcdev(src_device);
|
||||
|
||||
btrfs_rm_dev_replace_unblocked(fs_info);
|
||||
|
||||
/*
|
||||
* Increment dev_stats_ccnt so that btrfs_run_dev_stats() will
|
||||
* update on-disk dev stats value during commit transaction
|
||||
*/
|
||||
atomic_inc(&tgt_device->dev_stats_ccnt);
|
||||
|
||||
/*
|
||||
* this is again a consistent state where no dev_replace procedure
|
||||
* is running, the target device is part of the filesystem, the
|
||||
|
@ -807,7 +808,7 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
|
|||
break;
|
||||
}
|
||||
dev_replace->replace_state = BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED;
|
||||
dev_replace->time_stopped = get_seconds();
|
||||
dev_replace->time_stopped = ktime_get_real_seconds();
|
||||
dev_replace->item_needs_writeback = 1;
|
||||
btrfs_dev_replace_write_unlock(dev_replace);
|
||||
btrfs_scrub_cancel(fs_info);
|
||||
|
@ -826,7 +827,7 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
|
|||
btrfs_dev_name(tgt_device));
|
||||
|
||||
if (tgt_device)
|
||||
btrfs_destroy_dev_replace_tgtdev(fs_info, tgt_device);
|
||||
btrfs_destroy_dev_replace_tgtdev(tgt_device);
|
||||
|
||||
leave:
|
||||
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
|
||||
|
@ -848,7 +849,7 @@ void btrfs_dev_replace_suspend_for_unmount(struct btrfs_fs_info *fs_info)
|
|||
case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
|
||||
dev_replace->replace_state =
|
||||
BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED;
|
||||
dev_replace->time_stopped = get_seconds();
|
||||
dev_replace->time_stopped = ktime_get_real_seconds();
|
||||
dev_replace->item_needs_writeback = 1;
|
||||
btrfs_info(fs_info, "suspending dev_replace for unmount");
|
||||
break;
|
||||
|
|
|
@ -160,8 +160,8 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
|
|||
}
|
||||
btrfs_release_path(path);
|
||||
|
||||
ret2 = btrfs_insert_delayed_dir_index(trans, root->fs_info, name,
|
||||
name_len, dir, &disk_key, type, index);
|
||||
ret2 = btrfs_insert_delayed_dir_index(trans, name, name_len, dir,
|
||||
&disk_key, type, index);
|
||||
out_free:
|
||||
btrfs_free_path(path);
|
||||
if (ret)
|
||||
|
|
|
@ -5,8 +5,6 @@
|
|||
|
||||
#include <linux/fs.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/radix-tree.h>
|
||||
#include <linux/writeback.h>
|
||||
#include <linux/buffer_head.h>
|
||||
|
@ -54,7 +52,6 @@
|
|||
|
||||
static const struct extent_io_ops btree_extent_io_ops;
|
||||
static void end_workqueue_fn(struct btrfs_work *work);
|
||||
static void free_fs_root(struct btrfs_root *root);
|
||||
static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
|
||||
static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
|
||||
struct btrfs_fs_info *fs_info);
|
||||
|
@ -108,12 +105,9 @@ void __cold btrfs_end_io_wq_exit(void)
|
|||
*/
|
||||
struct async_submit_bio {
|
||||
void *private_data;
|
||||
struct btrfs_fs_info *fs_info;
|
||||
struct bio *bio;
|
||||
extent_submit_bio_start_t *submit_bio_start;
|
||||
extent_submit_bio_done_t *submit_bio_done;
|
||||
int mirror_num;
|
||||
unsigned long bio_flags;
|
||||
/*
|
||||
* bio_offset is optional, can be used if the pages in the bio
|
||||
* can't tell us where in the file the bio should go
|
||||
|
@ -212,7 +206,7 @@ struct extent_map *btree_get_extent(struct btrfs_inode *inode,
|
|||
struct page *page, size_t pg_offset, u64 start, u64 len,
|
||||
int create)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
|
||||
struct btrfs_fs_info *fs_info = inode->root->fs_info;
|
||||
struct extent_map_tree *em_tree = &inode->extent_tree;
|
||||
struct extent_map *em;
|
||||
int ret;
|
||||
|
@ -615,8 +609,8 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
|
|||
|
||||
found_start = btrfs_header_bytenr(eb);
|
||||
if (found_start != eb->start) {
|
||||
btrfs_err_rl(fs_info, "bad tree block start %llu %llu",
|
||||
found_start, eb->start);
|
||||
btrfs_err_rl(fs_info, "bad tree block start, want %llu have %llu",
|
||||
eb->start, found_start);
|
||||
ret = -EIO;
|
||||
goto err;
|
||||
}
|
||||
|
@ -628,8 +622,8 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
|
|||
}
|
||||
found_level = btrfs_header_level(eb);
|
||||
if (found_level >= BTRFS_MAX_LEVEL) {
|
||||
btrfs_err(fs_info, "bad tree block level %d",
|
||||
(int)btrfs_header_level(eb));
|
||||
btrfs_err(fs_info, "bad tree block level %d on %llu",
|
||||
(int)btrfs_header_level(eb), eb->start);
|
||||
ret = -EIO;
|
||||
goto err;
|
||||
}
|
||||
|
@ -779,7 +773,7 @@ static void run_one_async_done(struct btrfs_work *work)
|
|||
return;
|
||||
}
|
||||
|
||||
async->submit_bio_done(async->private_data, async->bio, async->mirror_num);
|
||||
btrfs_submit_bio_done(async->private_data, async->bio, async->mirror_num);
|
||||
}
|
||||
|
||||
static void run_one_async_free(struct btrfs_work *work)
|
||||
|
@ -793,8 +787,7 @@ static void run_one_async_free(struct btrfs_work *work)
|
|||
blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
|
||||
int mirror_num, unsigned long bio_flags,
|
||||
u64 bio_offset, void *private_data,
|
||||
extent_submit_bio_start_t *submit_bio_start,
|
||||
extent_submit_bio_done_t *submit_bio_done)
|
||||
extent_submit_bio_start_t *submit_bio_start)
|
||||
{
|
||||
struct async_submit_bio *async;
|
||||
|
||||
|
@ -803,16 +796,13 @@ blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
|
|||
return BLK_STS_RESOURCE;
|
||||
|
||||
async->private_data = private_data;
|
||||
async->fs_info = fs_info;
|
||||
async->bio = bio;
|
||||
async->mirror_num = mirror_num;
|
||||
async->submit_bio_start = submit_bio_start;
|
||||
async->submit_bio_done = submit_bio_done;
|
||||
|
||||
btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start,
|
||||
run_one_async_done, run_one_async_free);
|
||||
|
||||
async->bio_flags = bio_flags;
|
||||
async->bio_offset = bio_offset;
|
||||
|
||||
async->status = 0;
|
||||
|
@ -851,24 +841,6 @@ static blk_status_t btree_submit_bio_start(void *private_data, struct bio *bio,
|
|||
return btree_csum_one_bio(bio);
|
||||
}
|
||||
|
||||
static blk_status_t btree_submit_bio_done(void *private_data, struct bio *bio,
|
||||
int mirror_num)
|
||||
{
|
||||
struct inode *inode = private_data;
|
||||
blk_status_t ret;
|
||||
|
||||
/*
|
||||
* when we're called for a write, we're already in the async
|
||||
* submission context. Just jump into btrfs_map_bio
|
||||
*/
|
||||
ret = btrfs_map_bio(btrfs_sb(inode->i_sb), bio, mirror_num, 1);
|
||||
if (ret) {
|
||||
bio->bi_status = ret;
|
||||
bio_endio(bio);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int check_async_write(struct btrfs_inode *bi)
|
||||
{
|
||||
if (atomic_read(&bi->sync_writers))
|
||||
|
@ -911,8 +883,7 @@ static blk_status_t btree_submit_bio_hook(void *private_data, struct bio *bio,
|
|||
*/
|
||||
ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, 0,
|
||||
bio_offset, private_data,
|
||||
btree_submit_bio_start,
|
||||
btree_submit_bio_done);
|
||||
btree_submit_bio_start);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
|
@ -961,8 +932,9 @@ static int btree_writepages(struct address_space *mapping,
|
|||
|
||||
fs_info = BTRFS_I(mapping->host)->root->fs_info;
|
||||
/* this is a bit racy, but that's ok */
|
||||
ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
|
||||
BTRFS_DIRTY_METADATA_THRESH);
|
||||
ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
|
||||
BTRFS_DIRTY_METADATA_THRESH,
|
||||
fs_info->dirty_metadata_batch);
|
||||
if (ret < 0)
|
||||
return 0;
|
||||
}
|
||||
|
@ -1181,7 +1153,6 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
|
|||
root->highest_objectid = 0;
|
||||
root->nr_delalloc_inodes = 0;
|
||||
root->nr_ordered_extents = 0;
|
||||
root->name = NULL;
|
||||
root->inode_tree = RB_ROOT;
|
||||
INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
|
||||
root->block_rsv = NULL;
|
||||
|
@ -1292,15 +1263,7 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header));
|
||||
btrfs_set_header_bytenr(leaf, leaf->start);
|
||||
btrfs_set_header_generation(leaf, trans->transid);
|
||||
btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
|
||||
btrfs_set_header_owner(leaf, objectid);
|
||||
root->node = leaf;
|
||||
|
||||
write_extent_buffer_fsid(leaf, fs_info->fsid);
|
||||
write_extent_buffer_chunk_tree_uuid(leaf, fs_info->chunk_tree_uuid);
|
||||
btrfs_mark_buffer_dirty(leaf);
|
||||
|
||||
root->commit_root = btrfs_root_node(root);
|
||||
|
@ -1374,14 +1337,8 @@ static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
|
|||
return ERR_CAST(leaf);
|
||||
}
|
||||
|
||||
memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header));
|
||||
btrfs_set_header_bytenr(leaf, leaf->start);
|
||||
btrfs_set_header_generation(leaf, trans->transid);
|
||||
btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
|
||||
btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
|
||||
root->node = leaf;
|
||||
|
||||
write_extent_buffer_fsid(root->node, fs_info->fsid);
|
||||
btrfs_mark_buffer_dirty(root->node);
|
||||
btrfs_tree_unlock(root->node);
|
||||
return root;
|
||||
|
@ -1546,7 +1503,7 @@ int btrfs_init_fs_root(struct btrfs_root *root)
|
|||
|
||||
return 0;
|
||||
fail:
|
||||
/* the caller is responsible to call free_fs_root */
|
||||
/* The caller is responsible to call btrfs_free_fs_root */
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1651,14 +1608,14 @@ struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
|
|||
ret = btrfs_insert_fs_root(fs_info, root);
|
||||
if (ret) {
|
||||
if (ret == -EEXIST) {
|
||||
free_fs_root(root);
|
||||
btrfs_free_fs_root(root);
|
||||
goto again;
|
||||
}
|
||||
goto fail;
|
||||
}
|
||||
return root;
|
||||
fail:
|
||||
free_fs_root(root);
|
||||
btrfs_free_fs_root(root);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
|
@ -1803,7 +1760,7 @@ static int transaction_kthread(void *arg)
|
|||
struct btrfs_trans_handle *trans;
|
||||
struct btrfs_transaction *cur;
|
||||
u64 transid;
|
||||
unsigned long now;
|
||||
time64_t now;
|
||||
unsigned long delay;
|
||||
bool cannot_commit;
|
||||
|
||||
|
@ -1819,7 +1776,7 @@ static int transaction_kthread(void *arg)
|
|||
goto sleep;
|
||||
}
|
||||
|
||||
now = get_seconds();
|
||||
now = ktime_get_seconds();
|
||||
if (cur->state < TRANS_STATE_BLOCKED &&
|
||||
!test_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags) &&
|
||||
(now < cur->start_time ||
|
||||
|
@ -2196,8 +2153,6 @@ static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
|
|||
|
||||
static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
fs_info->dev_replace.lock_owner = 0;
|
||||
atomic_set(&fs_info->dev_replace.nesting_level, 0);
|
||||
mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
|
||||
rwlock_init(&fs_info->dev_replace.lock);
|
||||
atomic_set(&fs_info->dev_replace.read_locks, 0);
|
||||
|
@ -3075,6 +3030,13 @@ int open_ctree(struct super_block *sb,
|
|||
fs_info->generation = generation;
|
||||
fs_info->last_trans_committed = generation;
|
||||
|
||||
ret = btrfs_verify_dev_extents(fs_info);
|
||||
if (ret) {
|
||||
btrfs_err(fs_info,
|
||||
"failed to verify dev extents against chunks: %d",
|
||||
ret);
|
||||
goto fail_block_groups;
|
||||
}
|
||||
ret = btrfs_recover_balance(fs_info);
|
||||
if (ret) {
|
||||
btrfs_err(fs_info, "failed to recover balance: %d", ret);
|
||||
|
@ -3875,10 +3837,10 @@ void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
|
|||
__btrfs_remove_free_space_cache(root->free_ino_pinned);
|
||||
if (root->free_ino_ctl)
|
||||
__btrfs_remove_free_space_cache(root->free_ino_ctl);
|
||||
free_fs_root(root);
|
||||
btrfs_free_fs_root(root);
|
||||
}
|
||||
|
||||
static void free_fs_root(struct btrfs_root *root)
|
||||
void btrfs_free_fs_root(struct btrfs_root *root)
|
||||
{
|
||||
iput(root->ino_cache_inode);
|
||||
WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
|
||||
|
@ -3890,15 +3852,9 @@ static void free_fs_root(struct btrfs_root *root)
|
|||
free_extent_buffer(root->commit_root);
|
||||
kfree(root->free_ino_ctl);
|
||||
kfree(root->free_ino_pinned);
|
||||
kfree(root->name);
|
||||
btrfs_put_fs_root(root);
|
||||
}
|
||||
|
||||
void btrfs_free_fs_root(struct btrfs_root *root)
|
||||
{
|
||||
free_fs_root(root);
|
||||
}
|
||||
|
||||
int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
u64 root_objectid = 0;
|
||||
|
@ -4104,10 +4060,10 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
|
|||
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
|
||||
/*
|
||||
* This is a fast path so only do this check if we have sanity tests
|
||||
* enabled. Normal people shouldn't be marking dummy buffers as dirty
|
||||
* enabled. Normal people shouldn't be using umapped buffers as dirty
|
||||
* outside of the sanity tests.
|
||||
*/
|
||||
if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &buf->bflags)))
|
||||
if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
|
||||
return;
|
||||
#endif
|
||||
root = BTRFS_I(buf->pages[0]->mapping->host)->root;
|
||||
|
@ -4150,8 +4106,9 @@ static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
|
|||
if (flush_delayed)
|
||||
btrfs_balance_delayed_items(fs_info);
|
||||
|
||||
ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
|
||||
BTRFS_DIRTY_METADATA_THRESH);
|
||||
ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
|
||||
BTRFS_DIRTY_METADATA_THRESH,
|
||||
fs_info->dirty_metadata_batch);
|
||||
if (ret > 0) {
|
||||
balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
|
||||
}
|
||||
|
@ -4563,21 +4520,11 @@ static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct btrfs_fs_info *btree_fs_info(void *private_data)
|
||||
{
|
||||
struct inode *inode = private_data;
|
||||
return btrfs_sb(inode->i_sb);
|
||||
}
|
||||
|
||||
static const struct extent_io_ops btree_extent_io_ops = {
|
||||
/* mandatory callbacks */
|
||||
.submit_bio_hook = btree_submit_bio_hook,
|
||||
.readpage_end_io_hook = btree_readpage_end_io_hook,
|
||||
/* note we're sharing with inode.c for the merge bio hook */
|
||||
.merge_bio_hook = btrfs_merge_bio_hook,
|
||||
.readpage_io_failed_hook = btree_io_failed_hook,
|
||||
.set_range_writeback = btrfs_set_range_writeback,
|
||||
.tree_fs_info = btree_fs_info,
|
||||
|
||||
/* optional callbacks */
|
||||
};
|
||||
|
|
|
@ -120,8 +120,9 @@ blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
|
|||
blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
|
||||
int mirror_num, unsigned long bio_flags,
|
||||
u64 bio_offset, void *private_data,
|
||||
extent_submit_bio_start_t *submit_bio_start,
|
||||
extent_submit_bio_done_t *submit_bio_done);
|
||||
extent_submit_bio_start_t *submit_bio_start);
|
||||
blk_status_t btrfs_submit_bio_done(void *private_data, struct bio *bio,
|
||||
int mirror_num);
|
||||
int btrfs_write_tree_block(struct extent_buffer *buf);
|
||||
void btrfs_wait_tree_block_writeback(struct extent_buffer *buf);
|
||||
int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -140,14 +140,6 @@ static int add_extent_changeset(struct extent_state *state, unsigned bits,
|
|||
|
||||
static void flush_write_bio(struct extent_page_data *epd);
|
||||
|
||||
static inline struct btrfs_fs_info *
|
||||
tree_fs_info(struct extent_io_tree *tree)
|
||||
{
|
||||
if (tree->ops)
|
||||
return tree->ops->tree_fs_info(tree->private_data);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int __init extent_io_init(void)
|
||||
{
|
||||
extent_state_cache = kmem_cache_create("btrfs_extent_state",
|
||||
|
@ -564,8 +556,10 @@ alloc_extent_state_atomic(struct extent_state *prealloc)
|
|||
|
||||
static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
|
||||
{
|
||||
btrfs_panic(tree_fs_info(tree), err,
|
||||
"Locking error: Extent tree was modified by another thread while locked.");
|
||||
struct inode *inode = tree->private_data;
|
||||
|
||||
btrfs_panic(btrfs_sb(inode->i_sb), err,
|
||||
"locking error: extent tree was modified by another thread while locked");
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1386,14 +1380,6 @@ void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* helper function to set both pages and extents in the tree writeback
|
||||
*/
|
||||
static void set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
|
||||
{
|
||||
tree->ops->set_range_writeback(tree->private_data, start, end);
|
||||
}
|
||||
|
||||
/* find the first state struct with 'bits' set after 'start', and
|
||||
* return it. tree->lock must be held. NULL will returned if
|
||||
* nothing was found after 'start'
|
||||
|
@ -2059,7 +2045,7 @@ int repair_eb_io_failure(struct btrfs_fs_info *fs_info,
|
|||
struct extent_buffer *eb, int mirror_num)
|
||||
{
|
||||
u64 start = eb->start;
|
||||
unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
|
||||
int i, num_pages = num_extent_pages(eb);
|
||||
int ret = 0;
|
||||
|
||||
if (sb_rdonly(fs_info->sb))
|
||||
|
@ -2398,7 +2384,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
|
|||
start - page_offset(page),
|
||||
(int)phy_offset, failed_bio->bi_end_io,
|
||||
NULL);
|
||||
bio_set_op_attrs(bio, REQ_OP_READ, read_mode);
|
||||
bio->bi_opf = REQ_OP_READ | read_mode;
|
||||
|
||||
btrfs_debug(btrfs_sb(inode->i_sb),
|
||||
"Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d",
|
||||
|
@ -2790,8 +2776,8 @@ static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree,
|
|||
else
|
||||
contig = bio_end_sector(bio) == sector;
|
||||
|
||||
if (tree->ops && tree->ops->merge_bio_hook(page, offset,
|
||||
page_size, bio, bio_flags))
|
||||
if (tree->ops && btrfs_merge_bio_hook(page, offset, page_size,
|
||||
bio, bio_flags))
|
||||
can_merge = false;
|
||||
|
||||
if (prev_bio_flags != bio_flags || !contig || !can_merge ||
|
||||
|
@ -3422,7 +3408,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
|
|||
continue;
|
||||
}
|
||||
|
||||
set_range_writeback(tree, cur, cur + iosize - 1);
|
||||
btrfs_set_range_writeback(tree, cur, cur + iosize - 1);
|
||||
if (!PageWriteback(page)) {
|
||||
btrfs_err(BTRFS_I(inode)->root->fs_info,
|
||||
"page %lu not writeback, cur %llu end %llu",
|
||||
|
@ -3538,7 +3524,7 @@ lock_extent_buffer_for_io(struct extent_buffer *eb,
|
|||
struct btrfs_fs_info *fs_info,
|
||||
struct extent_page_data *epd)
|
||||
{
|
||||
unsigned long i, num_pages;
|
||||
int i, num_pages;
|
||||
int flush = 0;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -3588,7 +3574,7 @@ lock_extent_buffer_for_io(struct extent_buffer *eb,
|
|||
if (!ret)
|
||||
return ret;
|
||||
|
||||
num_pages = num_extent_pages(eb->start, eb->len);
|
||||
num_pages = num_extent_pages(eb);
|
||||
for (i = 0; i < num_pages; i++) {
|
||||
struct page *p = eb->pages[i];
|
||||
|
||||
|
@ -3712,13 +3698,13 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
|
|||
struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
|
||||
u64 offset = eb->start;
|
||||
u32 nritems;
|
||||
unsigned long i, num_pages;
|
||||
int i, num_pages;
|
||||
unsigned long start, end;
|
||||
unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
|
||||
int ret = 0;
|
||||
|
||||
clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
|
||||
num_pages = num_extent_pages(eb->start, eb->len);
|
||||
num_pages = num_extent_pages(eb);
|
||||
atomic_set(&eb->io_pages, num_pages);
|
||||
|
||||
/* set btree blocks beyond nritems with 0 to avoid stale content. */
|
||||
|
@ -4643,23 +4629,20 @@ int extent_buffer_under_io(struct extent_buffer *eb)
|
|||
}
|
||||
|
||||
/*
|
||||
* Helper for releasing extent buffer page.
|
||||
* Release all pages attached to the extent buffer.
|
||||
*/
|
||||
static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
|
||||
static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
|
||||
{
|
||||
unsigned long index;
|
||||
struct page *page;
|
||||
int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
|
||||
int i;
|
||||
int num_pages;
|
||||
int mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
|
||||
|
||||
BUG_ON(extent_buffer_under_io(eb));
|
||||
|
||||
index = num_extent_pages(eb->start, eb->len);
|
||||
if (index == 0)
|
||||
return;
|
||||
num_pages = num_extent_pages(eb);
|
||||
for (i = 0; i < num_pages; i++) {
|
||||
struct page *page = eb->pages[i];
|
||||
|
||||
do {
|
||||
index--;
|
||||
page = eb->pages[index];
|
||||
if (!page)
|
||||
continue;
|
||||
if (mapped)
|
||||
|
@ -4691,7 +4674,7 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
|
|||
|
||||
/* One for when we allocated the page */
|
||||
put_page(page);
|
||||
} while (index != 0);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -4699,7 +4682,7 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
|
|||
*/
|
||||
static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
|
||||
{
|
||||
btrfs_release_extent_buffer_page(eb);
|
||||
btrfs_release_extent_buffer_pages(eb);
|
||||
__free_extent_buffer(eb);
|
||||
}
|
||||
|
||||
|
@ -4743,10 +4726,10 @@ __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
|
|||
|
||||
struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
|
||||
{
|
||||
unsigned long i;
|
||||
int i;
|
||||
struct page *p;
|
||||
struct extent_buffer *new;
|
||||
unsigned long num_pages = num_extent_pages(src->start, src->len);
|
||||
int num_pages = num_extent_pages(src);
|
||||
|
||||
new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
|
||||
if (new == NULL)
|
||||
|
@ -4766,7 +4749,7 @@ struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
|
|||
}
|
||||
|
||||
set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
|
||||
set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
|
||||
set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
|
||||
|
||||
return new;
|
||||
}
|
||||
|
@ -4775,15 +4758,14 @@ struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
|
|||
u64 start, unsigned long len)
|
||||
{
|
||||
struct extent_buffer *eb;
|
||||
unsigned long num_pages;
|
||||
unsigned long i;
|
||||
|
||||
num_pages = num_extent_pages(start, len);
|
||||
int num_pages;
|
||||
int i;
|
||||
|
||||
eb = __alloc_extent_buffer(fs_info, start, len);
|
||||
if (!eb)
|
||||
return NULL;
|
||||
|
||||
num_pages = num_extent_pages(eb);
|
||||
for (i = 0; i < num_pages; i++) {
|
||||
eb->pages[i] = alloc_page(GFP_NOFS);
|
||||
if (!eb->pages[i])
|
||||
|
@ -4791,7 +4773,7 @@ struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
|
|||
}
|
||||
set_extent_buffer_uptodate(eb);
|
||||
btrfs_set_header_nritems(eb, 0);
|
||||
set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
|
||||
set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
|
||||
|
||||
return eb;
|
||||
err:
|
||||
|
@ -4843,11 +4825,11 @@ static void check_buffer_tree_ref(struct extent_buffer *eb)
|
|||
static void mark_extent_buffer_accessed(struct extent_buffer *eb,
|
||||
struct page *accessed)
|
||||
{
|
||||
unsigned long num_pages, i;
|
||||
int num_pages, i;
|
||||
|
||||
check_buffer_tree_ref(eb);
|
||||
|
||||
num_pages = num_extent_pages(eb->start, eb->len);
|
||||
num_pages = num_extent_pages(eb);
|
||||
for (i = 0; i < num_pages; i++) {
|
||||
struct page *p = eb->pages[i];
|
||||
|
||||
|
@ -4944,8 +4926,8 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
|
|||
u64 start)
|
||||
{
|
||||
unsigned long len = fs_info->nodesize;
|
||||
unsigned long num_pages = num_extent_pages(start, len);
|
||||
unsigned long i;
|
||||
int num_pages;
|
||||
int i;
|
||||
unsigned long index = start >> PAGE_SHIFT;
|
||||
struct extent_buffer *eb;
|
||||
struct extent_buffer *exists = NULL;
|
||||
|
@ -4967,6 +4949,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
|
|||
if (!eb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
num_pages = num_extent_pages(eb);
|
||||
for (i = 0; i < num_pages; i++, index++) {
|
||||
p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
|
||||
if (!p) {
|
||||
|
@ -5009,8 +4992,11 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
|
|||
uptodate = 0;
|
||||
|
||||
/*
|
||||
* see below about how we avoid a nasty race with release page
|
||||
* and why we unlock later
|
||||
* We can't unlock the pages just yet since the extent buffer
|
||||
* hasn't been properly inserted in the radix tree, this
|
||||
* opens a race with btree_releasepage which can free a page
|
||||
* while we are still filling in all pages for the buffer and
|
||||
* we could crash.
|
||||
*/
|
||||
}
|
||||
if (uptodate)
|
||||
|
@ -5039,21 +5025,12 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
|
|||
set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
|
||||
|
||||
/*
|
||||
* there is a race where release page may have
|
||||
* tried to find this extent buffer in the radix
|
||||
* but failed. It will tell the VM it is safe to
|
||||
* reclaim the, and it will clear the page private bit.
|
||||
* We must make sure to set the page private bit properly
|
||||
* after the extent buffer is in the radix tree so
|
||||
* it doesn't get lost
|
||||
* Now it's safe to unlock the pages because any calls to
|
||||
* btree_releasepage will correctly detect that a page belongs to a
|
||||
* live buffer and won't free them prematurely.
|
||||
*/
|
||||
SetPageChecked(eb->pages[0]);
|
||||
for (i = 1; i < num_pages; i++) {
|
||||
p = eb->pages[i];
|
||||
ClearPageChecked(p);
|
||||
unlock_page(p);
|
||||
}
|
||||
unlock_page(eb->pages[0]);
|
||||
for (i = 0; i < num_pages; i++)
|
||||
unlock_page(eb->pages[i]);
|
||||
return eb;
|
||||
|
||||
free_eb:
|
||||
|
@ -5075,9 +5052,10 @@ static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
|
|||
__free_extent_buffer(eb);
|
||||
}
|
||||
|
||||
/* Expects to have eb->eb_lock already held */
|
||||
static int release_extent_buffer(struct extent_buffer *eb)
|
||||
{
|
||||
lockdep_assert_held(&eb->refs_lock);
|
||||
|
||||
WARN_ON(atomic_read(&eb->refs) == 0);
|
||||
if (atomic_dec_and_test(&eb->refs)) {
|
||||
if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
|
||||
|
@ -5094,9 +5072,9 @@ static int release_extent_buffer(struct extent_buffer *eb)
|
|||
}
|
||||
|
||||
/* Should be safe to release our pages at this point */
|
||||
btrfs_release_extent_buffer_page(eb);
|
||||
btrfs_release_extent_buffer_pages(eb);
|
||||
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
|
||||
if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))) {
|
||||
if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
|
||||
__free_extent_buffer(eb);
|
||||
return 1;
|
||||
}
|
||||
|
@ -5127,7 +5105,7 @@ void free_extent_buffer(struct extent_buffer *eb)
|
|||
|
||||
spin_lock(&eb->refs_lock);
|
||||
if (atomic_read(&eb->refs) == 2 &&
|
||||
test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
|
||||
test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))
|
||||
atomic_dec(&eb->refs);
|
||||
|
||||
if (atomic_read(&eb->refs) == 2 &&
|
||||
|
@ -5159,11 +5137,11 @@ void free_extent_buffer_stale(struct extent_buffer *eb)
|
|||
|
||||
void clear_extent_buffer_dirty(struct extent_buffer *eb)
|
||||
{
|
||||
unsigned long i;
|
||||
unsigned long num_pages;
|
||||
int i;
|
||||
int num_pages;
|
||||
struct page *page;
|
||||
|
||||
num_pages = num_extent_pages(eb->start, eb->len);
|
||||
num_pages = num_extent_pages(eb);
|
||||
|
||||
for (i = 0; i < num_pages; i++) {
|
||||
page = eb->pages[i];
|
||||
|
@ -5189,15 +5167,15 @@ void clear_extent_buffer_dirty(struct extent_buffer *eb)
|
|||
|
||||
int set_extent_buffer_dirty(struct extent_buffer *eb)
|
||||
{
|
||||
unsigned long i;
|
||||
unsigned long num_pages;
|
||||
int i;
|
||||
int num_pages;
|
||||
int was_dirty = 0;
|
||||
|
||||
check_buffer_tree_ref(eb);
|
||||
|
||||
was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
|
||||
|
||||
num_pages = num_extent_pages(eb->start, eb->len);
|
||||
num_pages = num_extent_pages(eb);
|
||||
WARN_ON(atomic_read(&eb->refs) == 0);
|
||||
WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
|
||||
|
||||
|
@ -5208,12 +5186,12 @@ int set_extent_buffer_dirty(struct extent_buffer *eb)
|
|||
|
||||
void clear_extent_buffer_uptodate(struct extent_buffer *eb)
|
||||
{
|
||||
unsigned long i;
|
||||
int i;
|
||||
struct page *page;
|
||||
unsigned long num_pages;
|
||||
int num_pages;
|
||||
|
||||
clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
|
||||
num_pages = num_extent_pages(eb->start, eb->len);
|
||||
num_pages = num_extent_pages(eb);
|
||||
for (i = 0; i < num_pages; i++) {
|
||||
page = eb->pages[i];
|
||||
if (page)
|
||||
|
@ -5223,12 +5201,12 @@ void clear_extent_buffer_uptodate(struct extent_buffer *eb)
|
|||
|
||||
void set_extent_buffer_uptodate(struct extent_buffer *eb)
|
||||
{
|
||||
unsigned long i;
|
||||
int i;
|
||||
struct page *page;
|
||||
unsigned long num_pages;
|
||||
int num_pages;
|
||||
|
||||
set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
|
||||
num_pages = num_extent_pages(eb->start, eb->len);
|
||||
num_pages = num_extent_pages(eb);
|
||||
for (i = 0; i < num_pages; i++) {
|
||||
page = eb->pages[i];
|
||||
SetPageUptodate(page);
|
||||
|
@ -5238,13 +5216,13 @@ void set_extent_buffer_uptodate(struct extent_buffer *eb)
|
|||
int read_extent_buffer_pages(struct extent_io_tree *tree,
|
||||
struct extent_buffer *eb, int wait, int mirror_num)
|
||||
{
|
||||
unsigned long i;
|
||||
int i;
|
||||
struct page *page;
|
||||
int err;
|
||||
int ret = 0;
|
||||
int locked_pages = 0;
|
||||
int all_uptodate = 1;
|
||||
unsigned long num_pages;
|
||||
int num_pages;
|
||||
unsigned long num_reads = 0;
|
||||
struct bio *bio = NULL;
|
||||
unsigned long bio_flags = 0;
|
||||
|
@ -5252,7 +5230,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
|
|||
if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
|
||||
return 0;
|
||||
|
||||
num_pages = num_extent_pages(eb->start, eb->len);
|
||||
num_pages = num_extent_pages(eb);
|
||||
for (i = 0; i < num_pages; i++) {
|
||||
page = eb->pages[i];
|
||||
if (wait == WAIT_NONE) {
|
||||
|
@ -5576,11 +5554,11 @@ void copy_extent_buffer_full(struct extent_buffer *dst,
|
|||
struct extent_buffer *src)
|
||||
{
|
||||
int i;
|
||||
unsigned num_pages;
|
||||
int num_pages;
|
||||
|
||||
ASSERT(dst->len == src->len);
|
||||
|
||||
num_pages = num_extent_pages(dst->start, dst->len);
|
||||
num_pages = num_extent_pages(dst);
|
||||
for (i = 0; i < num_pages; i++)
|
||||
copy_page(page_address(dst->pages[i]),
|
||||
page_address(src->pages[i]));
|
||||
|
|
|
@ -46,7 +46,7 @@
|
|||
#define EXTENT_BUFFER_STALE 6
|
||||
#define EXTENT_BUFFER_WRITEBACK 7
|
||||
#define EXTENT_BUFFER_READ_ERR 8 /* read IO error */
|
||||
#define EXTENT_BUFFER_DUMMY 9
|
||||
#define EXTENT_BUFFER_UNMAPPED 9
|
||||
#define EXTENT_BUFFER_IN_TREE 10
|
||||
#define EXTENT_BUFFER_WRITE_ERR 11 /* write IO error */
|
||||
|
||||
|
@ -92,9 +92,6 @@ typedef blk_status_t (extent_submit_bio_hook_t)(void *private_data, struct bio *
|
|||
typedef blk_status_t (extent_submit_bio_start_t)(void *private_data,
|
||||
struct bio *bio, u64 bio_offset);
|
||||
|
||||
typedef blk_status_t (extent_submit_bio_done_t)(void *private_data,
|
||||
struct bio *bio, int mirror_num);
|
||||
|
||||
struct extent_io_ops {
|
||||
/*
|
||||
* The following callbacks must be allways defined, the function
|
||||
|
@ -104,12 +101,7 @@ struct extent_io_ops {
|
|||
int (*readpage_end_io_hook)(struct btrfs_io_bio *io_bio, u64 phy_offset,
|
||||
struct page *page, u64 start, u64 end,
|
||||
int mirror);
|
||||
int (*merge_bio_hook)(struct page *page, unsigned long offset,
|
||||
size_t size, struct bio *bio,
|
||||
unsigned long bio_flags);
|
||||
int (*readpage_io_failed_hook)(struct page *page, int failed_mirror);
|
||||
struct btrfs_fs_info *(*tree_fs_info)(void *private_data);
|
||||
void (*set_range_writeback)(void *private_data, u64 start, u64 end);
|
||||
|
||||
/*
|
||||
* Optional hooks, called if the pointer is not NULL
|
||||
|
@ -440,10 +432,10 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
|
|||
int mirror_num);
|
||||
void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
|
||||
|
||||
static inline unsigned long num_extent_pages(u64 start, u64 len)
|
||||
static inline int num_extent_pages(const struct extent_buffer *eb)
|
||||
{
|
||||
return ((start + len + PAGE_SIZE - 1) >> PAGE_SHIFT) -
|
||||
(start >> PAGE_SHIFT);
|
||||
return (round_up(eb->start + eb->len, PAGE_SIZE) >> PAGE_SHIFT) -
|
||||
(eb->start >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static inline void extent_buffer_get(struct extent_buffer *eb)
|
||||
|
|
|
@ -922,7 +922,7 @@ void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
|
|||
const bool new_inline,
|
||||
struct extent_map *em)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
|
||||
struct btrfs_fs_info *fs_info = inode->root->fs_info;
|
||||
struct btrfs_root *root = inode->root;
|
||||
struct extent_buffer *leaf = path->nodes[0];
|
||||
const int slot = path->slots[0];
|
||||
|
@ -942,7 +942,7 @@ void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
|
|||
btrfs_file_extent_num_bytes(leaf, fi);
|
||||
} else if (type == BTRFS_FILE_EXTENT_INLINE) {
|
||||
size_t size;
|
||||
size = btrfs_file_extent_inline_len(leaf, slot, fi);
|
||||
size = btrfs_file_extent_ram_bytes(leaf, fi);
|
||||
extent_end = ALIGN(extent_start + size,
|
||||
fs_info->sectorsize);
|
||||
}
|
||||
|
|
128
fs/btrfs/file.c
128
fs/btrfs/file.c
|
@ -5,14 +5,11 @@
|
|||
|
||||
#include <linux/fs.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/backing-dev.h>
|
||||
#include <linux/mpage.h>
|
||||
#include <linux/falloc.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/writeback.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/slab.h>
|
||||
|
@ -83,7 +80,7 @@ static int __compare_inode_defrag(struct inode_defrag *defrag1,
|
|||
static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
|
||||
struct inode_defrag *defrag)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
|
||||
struct btrfs_fs_info *fs_info = inode->root->fs_info;
|
||||
struct inode_defrag *entry;
|
||||
struct rb_node **p;
|
||||
struct rb_node *parent = NULL;
|
||||
|
@ -135,8 +132,8 @@ static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
|
|||
int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_inode *inode)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
|
||||
struct btrfs_root *root = inode->root;
|
||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||
struct inode_defrag *defrag;
|
||||
u64 transid;
|
||||
int ret;
|
||||
|
@ -185,7 +182,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
|
|||
static void btrfs_requeue_inode_defrag(struct btrfs_inode *inode,
|
||||
struct inode_defrag *defrag)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
|
||||
struct btrfs_fs_info *fs_info = inode->root->fs_info;
|
||||
int ret;
|
||||
|
||||
if (!__need_auto_defrag(fs_info))
|
||||
|
@ -833,8 +830,7 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
|
|||
btrfs_file_extent_num_bytes(leaf, fi);
|
||||
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
|
||||
extent_end = key.offset +
|
||||
btrfs_file_extent_inline_len(leaf,
|
||||
path->slots[0], fi);
|
||||
btrfs_file_extent_ram_bytes(leaf, fi);
|
||||
} else {
|
||||
/* can't happen */
|
||||
BUG();
|
||||
|
@ -1133,7 +1129,7 @@ static int extent_mergeable(struct extent_buffer *leaf, int slot,
|
|||
int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_inode *inode, u64 start, u64 end)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
|
||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||
struct btrfs_root *root = inode->root;
|
||||
struct extent_buffer *leaf;
|
||||
struct btrfs_path *path;
|
||||
|
@ -1470,7 +1466,7 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
|
|||
u64 *lockstart, u64 *lockend,
|
||||
struct extent_state **cached_state)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
|
||||
struct btrfs_fs_info *fs_info = inode->root->fs_info;
|
||||
u64 start_pos;
|
||||
u64 last_pos;
|
||||
int i;
|
||||
|
@ -1526,7 +1522,7 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
|
|||
static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
|
||||
size_t *write_bytes)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
|
||||
struct btrfs_fs_info *fs_info = inode->root->fs_info;
|
||||
struct btrfs_root *root = inode->root;
|
||||
struct btrfs_ordered_extent *ordered;
|
||||
u64 lockstart, lockend;
|
||||
|
@ -1569,10 +1565,11 @@ static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static noinline ssize_t __btrfs_buffered_write(struct file *file,
|
||||
struct iov_iter *i,
|
||||
loff_t pos)
|
||||
static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
|
||||
struct iov_iter *i)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
loff_t pos = iocb->ki_pos;
|
||||
struct inode *inode = file_inode(file);
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
|
@ -1804,7 +1801,7 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
|
|||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct inode *inode = file_inode(file);
|
||||
loff_t pos = iocb->ki_pos;
|
||||
loff_t pos;
|
||||
ssize_t written;
|
||||
ssize_t written_buffered;
|
||||
loff_t endbyte;
|
||||
|
@ -1815,8 +1812,8 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
|
|||
if (written < 0 || !iov_iter_count(from))
|
||||
return written;
|
||||
|
||||
pos += written;
|
||||
written_buffered = __btrfs_buffered_write(file, from, pos);
|
||||
pos = iocb->ki_pos;
|
||||
written_buffered = btrfs_buffered_write(iocb, from);
|
||||
if (written_buffered < 0) {
|
||||
err = written_buffered;
|
||||
goto out;
|
||||
|
@ -1953,7 +1950,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
|
|||
if (iocb->ki_flags & IOCB_DIRECT) {
|
||||
num_written = __btrfs_direct_write(iocb, from);
|
||||
} else {
|
||||
num_written = __btrfs_buffered_write(file, from, pos);
|
||||
num_written = btrfs_buffered_write(iocb, from);
|
||||
if (num_written > 0)
|
||||
iocb->ki_pos = pos + num_written;
|
||||
if (clean_page)
|
||||
|
@ -2042,7 +2039,6 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
|||
struct btrfs_trans_handle *trans;
|
||||
struct btrfs_log_ctx ctx;
|
||||
int ret = 0, err;
|
||||
bool full_sync = false;
|
||||
u64 len;
|
||||
|
||||
/*
|
||||
|
@ -2066,96 +2062,21 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
|||
|
||||
inode_lock(inode);
|
||||
atomic_inc(&root->log_batch);
|
||||
full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
|
||||
&BTRFS_I(inode)->runtime_flags);
|
||||
|
||||
/*
|
||||
* We might have have had more pages made dirty after calling
|
||||
* start_ordered_ops and before acquiring the inode's i_mutex.
|
||||
* We have to do this here to avoid the priority inversion of waiting on
|
||||
* IO of a lower priority task while holding a transaciton open.
|
||||
*/
|
||||
if (full_sync) {
|
||||
/*
|
||||
* For a full sync, we need to make sure any ordered operations
|
||||
* start and finish before we start logging the inode, so that
|
||||
* all extents are persisted and the respective file extent
|
||||
* items are in the fs/subvol btree.
|
||||
*/
|
||||
ret = btrfs_wait_ordered_range(inode, start, len);
|
||||
} else {
|
||||
/*
|
||||
* Start any new ordered operations before starting to log the
|
||||
* inode. We will wait for them to finish in btrfs_sync_log().
|
||||
*
|
||||
* Right before acquiring the inode's mutex, we might have new
|
||||
* writes dirtying pages, which won't immediately start the
|
||||
* respective ordered operations - that is done through the
|
||||
* fill_delalloc callbacks invoked from the writepage and
|
||||
* writepages address space operations. So make sure we start
|
||||
* all ordered operations before starting to log our inode. Not
|
||||
* doing this means that while logging the inode, writeback
|
||||
* could start and invoke writepage/writepages, which would call
|
||||
* the fill_delalloc callbacks (cow_file_range,
|
||||
* submit_compressed_extents). These callbacks add first an
|
||||
* extent map to the modified list of extents and then create
|
||||
* the respective ordered operation, which means in
|
||||
* tree-log.c:btrfs_log_inode() we might capture all existing
|
||||
* ordered operations (with btrfs_get_logged_extents()) before
|
||||
* the fill_delalloc callback adds its ordered operation, and by
|
||||
* the time we visit the modified list of extent maps (with
|
||||
* btrfs_log_changed_extents()), we see and process the extent
|
||||
* map they created. We then use the extent map to construct a
|
||||
* file extent item for logging without waiting for the
|
||||
* respective ordered operation to finish - this file extent
|
||||
* item points to a disk location that might not have yet been
|
||||
* written to, containing random data - so after a crash a log
|
||||
* replay will make our inode have file extent items that point
|
||||
* to disk locations containing invalid data, as we returned
|
||||
* success to userspace without waiting for the respective
|
||||
* ordered operation to finish, because it wasn't captured by
|
||||
* btrfs_get_logged_extents().
|
||||
*/
|
||||
ret = start_ordered_ops(inode, start, end);
|
||||
}
|
||||
ret = btrfs_wait_ordered_range(inode, start, len);
|
||||
if (ret) {
|
||||
inode_unlock(inode);
|
||||
goto out;
|
||||
}
|
||||
atomic_inc(&root->log_batch);
|
||||
|
||||
/*
|
||||
* If the last transaction that changed this file was before the current
|
||||
* transaction and we have the full sync flag set in our inode, we can
|
||||
* bail out now without any syncing.
|
||||
*
|
||||
* Note that we can't bail out if the full sync flag isn't set. This is
|
||||
* because when the full sync flag is set we start all ordered extents
|
||||
* and wait for them to fully complete - when they complete they update
|
||||
* the inode's last_trans field through:
|
||||
*
|
||||
* btrfs_finish_ordered_io() ->
|
||||
* btrfs_update_inode_fallback() ->
|
||||
* btrfs_update_inode() ->
|
||||
* btrfs_set_inode_last_trans()
|
||||
*
|
||||
* So we are sure that last_trans is up to date and can do this check to
|
||||
* bail out safely. For the fast path, when the full sync flag is not
|
||||
* set in our inode, we can not do it because we start only our ordered
|
||||
* extents and don't wait for them to complete (that is when
|
||||
* btrfs_finish_ordered_io runs), so here at this point their last_trans
|
||||
* value might be less than or equals to fs_info->last_trans_committed,
|
||||
* and setting a speculative last_trans for an inode when a buffered
|
||||
* write is made (such as fs_info->generation + 1 for example) would not
|
||||
* be reliable since after setting the value and before fsync is called
|
||||
* any number of transactions can start and commit (transaction kthread
|
||||
* commits the current transaction periodically), and a transaction
|
||||
* commit does not start nor waits for ordered extents to complete.
|
||||
*/
|
||||
smp_mb();
|
||||
if (btrfs_inode_in_log(BTRFS_I(inode), fs_info->generation) ||
|
||||
(full_sync && BTRFS_I(inode)->last_trans <=
|
||||
fs_info->last_trans_committed) ||
|
||||
(!btrfs_have_ordered_extents_in_range(inode, start, len) &&
|
||||
BTRFS_I(inode)->last_trans
|
||||
<= fs_info->last_trans_committed)) {
|
||||
BTRFS_I(inode)->last_trans <= fs_info->last_trans_committed) {
|
||||
/*
|
||||
* We've had everything committed since the last time we were
|
||||
* modified so clear this flag in case it was set for whatever
|
||||
|
@ -2239,13 +2160,6 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
|||
goto out;
|
||||
}
|
||||
}
|
||||
if (!full_sync) {
|
||||
ret = btrfs_wait_ordered_range(inode, start, len);
|
||||
if (ret) {
|
||||
btrfs_end_transaction(trans);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
ret = btrfs_commit_transaction(trans);
|
||||
} else {
|
||||
ret = btrfs_end_transaction(trans);
|
||||
|
@ -2310,7 +2224,7 @@ static int fill_holes(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_inode *inode,
|
||||
struct btrfs_path *path, u64 offset, u64 end)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
|
||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||
struct btrfs_root *root = inode->root;
|
||||
struct extent_buffer *leaf;
|
||||
struct btrfs_file_extent_item *fi;
|
||||
|
|
|
@ -71,10 +71,6 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
|
|||
inode = btrfs_iget(fs_info->sb, &location, root, NULL);
|
||||
if (IS_ERR(inode))
|
||||
return inode;
|
||||
if (is_bad_inode(inode)) {
|
||||
iput(inode);
|
||||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
|
||||
mapping_set_gfp_mask(inode->i_mapping,
|
||||
mapping_gfp_constraint(inode->i_mapping,
|
||||
|
@ -300,9 +296,9 @@ static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
|
|||
if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FREE_INO_OBJECTID)
|
||||
check_crcs = 1;
|
||||
|
||||
/* Make sure we can fit our crcs into the first page */
|
||||
/* Make sure we can fit our crcs and generation into the first page */
|
||||
if (write && check_crcs &&
|
||||
(num_pages * sizeof(u32)) >= PAGE_SIZE)
|
||||
(num_pages * sizeof(u32) + sizeof(u64)) > PAGE_SIZE)
|
||||
return -ENOSPC;
|
||||
|
||||
memset(io_ctl, 0, sizeof(struct btrfs_io_ctl));
|
||||
|
@ -547,7 +543,7 @@ static int io_ctl_add_bitmap(struct btrfs_io_ctl *io_ctl, void *bitmap)
|
|||
io_ctl_map_page(io_ctl, 0);
|
||||
}
|
||||
|
||||
memcpy(io_ctl->cur, bitmap, PAGE_SIZE);
|
||||
copy_page(io_ctl->cur, bitmap);
|
||||
io_ctl_set_crc(io_ctl, io_ctl->index - 1);
|
||||
if (io_ctl->index < io_ctl->num_pages)
|
||||
io_ctl_map_page(io_ctl, 0);
|
||||
|
@ -607,7 +603,7 @@ static int io_ctl_read_bitmap(struct btrfs_io_ctl *io_ctl,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
memcpy(entry->bitmap, io_ctl->cur, PAGE_SIZE);
|
||||
copy_page(entry->bitmap, io_ctl->cur);
|
||||
io_ctl_unmap_page(io_ctl);
|
||||
|
||||
return 0;
|
||||
|
@ -655,7 +651,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
|
|||
struct btrfs_free_space_ctl *ctl,
|
||||
struct btrfs_path *path, u64 offset)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||
struct btrfs_free_space_header *header;
|
||||
struct extent_buffer *leaf;
|
||||
struct btrfs_io_ctl io_ctl;
|
||||
|
@ -1123,13 +1119,10 @@ static int __btrfs_wait_cache_io(struct btrfs_root *root,
|
|||
{
|
||||
int ret;
|
||||
struct inode *inode = io_ctl->inode;
|
||||
struct btrfs_fs_info *fs_info;
|
||||
|
||||
if (!inode)
|
||||
return 0;
|
||||
|
||||
fs_info = btrfs_sb(inode->i_sb);
|
||||
|
||||
/* Flush the dirty pages in the cache file. */
|
||||
ret = flush_dirty_cache(inode);
|
||||
if (ret)
|
||||
|
@ -1145,7 +1138,7 @@ static int __btrfs_wait_cache_io(struct btrfs_root *root,
|
|||
BTRFS_I(inode)->generation = 0;
|
||||
if (block_group) {
|
||||
#ifdef DEBUG
|
||||
btrfs_err(fs_info,
|
||||
btrfs_err(root->fs_info,
|
||||
"failed to write free space cache for block group %llu",
|
||||
block_group->key.objectid);
|
||||
#endif
|
||||
|
|
|
@ -1236,7 +1236,7 @@ int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info)
|
|||
if (ret)
|
||||
goto abort;
|
||||
|
||||
ret = btrfs_del_root(trans, fs_info, &free_space_root->root_key);
|
||||
ret = btrfs_del_root(trans, &free_space_root->root_key);
|
||||
if (ret)
|
||||
goto abort;
|
||||
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
* Copyright (C) 2007 Oracle. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/pagemap.h>
|
||||
|
||||
|
@ -244,8 +243,6 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
|
|||
return;
|
||||
|
||||
while (1) {
|
||||
bool add_to_ctl = true;
|
||||
|
||||
spin_lock(rbroot_lock);
|
||||
n = rb_first(rbroot);
|
||||
if (!n) {
|
||||
|
@ -257,15 +254,14 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
|
|||
BUG_ON(info->bitmap); /* Logic error */
|
||||
|
||||
if (info->offset > root->ino_cache_progress)
|
||||
add_to_ctl = false;
|
||||
else if (info->offset + info->bytes > root->ino_cache_progress)
|
||||
count = root->ino_cache_progress - info->offset + 1;
|
||||
count = 0;
|
||||
else
|
||||
count = info->bytes;
|
||||
count = min(root->ino_cache_progress - info->offset + 1,
|
||||
info->bytes);
|
||||
|
||||
rb_erase(&info->offset_index, rbroot);
|
||||
spin_unlock(rbroot_lock);
|
||||
if (add_to_ctl)
|
||||
if (count)
|
||||
__btrfs_add_free_space(root->fs_info, ctl,
|
||||
info->offset, count);
|
||||
kmem_cache_free(btrfs_free_space_cachep, info);
|
||||
|
|
161
fs/btrfs/inode.c
161
fs/btrfs/inode.c
|
@ -14,17 +14,13 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/backing-dev.h>
|
||||
#include <linux/mpage.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/writeback.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/bit_spinlock.h>
|
||||
#include <linux/xattr.h>
|
||||
#include <linux/posix_acl.h>
|
||||
#include <linux/falloc.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/btrfs.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/posix_acl_xattr.h>
|
||||
|
@ -1443,8 +1439,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,
|
|||
nocow = 1;
|
||||
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
|
||||
extent_end = found_key.offset +
|
||||
btrfs_file_extent_inline_len(leaf,
|
||||
path->slots[0], fi);
|
||||
btrfs_file_extent_ram_bytes(leaf, fi);
|
||||
extent_end = ALIGN(extent_end,
|
||||
fs_info->sectorsize);
|
||||
} else {
|
||||
|
@ -1752,7 +1747,7 @@ static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
|
|||
void __btrfs_del_delalloc_inode(struct btrfs_root *root,
|
||||
struct btrfs_inode *inode)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
|
||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||
|
||||
if (!list_empty(&inode->delalloc_inodes)) {
|
||||
list_del_init(&inode->delalloc_inodes);
|
||||
|
@ -1903,8 +1898,8 @@ static void btrfs_clear_bit_hook(void *private_data,
|
|||
}
|
||||
|
||||
/*
|
||||
* extent_io.c merge_bio_hook, this must check the chunk tree to make sure
|
||||
* we don't create bios that span stripes or chunks
|
||||
* Merge bio hook, this must check the chunk tree to make sure we don't create
|
||||
* bios that span stripes or chunks
|
||||
*
|
||||
* return 1 if page cannot be merged to bio
|
||||
* return 0 if page can be merged to bio
|
||||
|
@ -1962,7 +1957,7 @@ static blk_status_t btrfs_submit_bio_start(void *private_data, struct bio *bio,
|
|||
* At IO completion time the cums attached on the ordered extent record
|
||||
* are inserted into the btree
|
||||
*/
|
||||
static blk_status_t btrfs_submit_bio_done(void *private_data, struct bio *bio,
|
||||
blk_status_t btrfs_submit_bio_done(void *private_data, struct bio *bio,
|
||||
int mirror_num)
|
||||
{
|
||||
struct inode *inode = private_data;
|
||||
|
@ -2035,8 +2030,7 @@ static blk_status_t btrfs_submit_bio_hook(void *private_data, struct bio *bio,
|
|||
/* we're doing a write, do the async checksumming */
|
||||
ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, bio_flags,
|
||||
bio_offset, inode,
|
||||
btrfs_submit_bio_start,
|
||||
btrfs_submit_bio_done);
|
||||
btrfs_submit_bio_start);
|
||||
goto out;
|
||||
} else if (!skip_sum) {
|
||||
ret = btrfs_csum_one_bio(inode, bio, 0, 0);
|
||||
|
@ -3610,18 +3604,15 @@ static int btrfs_read_locked_inode(struct inode *inode)
|
|||
filled = true;
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path) {
|
||||
ret = -ENOMEM;
|
||||
goto make_bad;
|
||||
}
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
|
||||
|
||||
ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
|
||||
if (ret) {
|
||||
if (ret > 0)
|
||||
ret = -ENOENT;
|
||||
goto make_bad;
|
||||
btrfs_free_path(path);
|
||||
return ret;
|
||||
}
|
||||
|
||||
leaf = path->nodes[0];
|
||||
|
@ -3774,11 +3765,6 @@ static int btrfs_read_locked_inode(struct inode *inode)
|
|||
|
||||
btrfs_sync_inode_flags_to_i_flags(inode);
|
||||
return 0;
|
||||
|
||||
make_bad:
|
||||
btrfs_free_path(path);
|
||||
make_bad_inode(inode);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3984,7 +3970,7 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
|
|||
goto err;
|
||||
}
|
||||
skip_backref:
|
||||
ret = btrfs_delete_delayed_dir_index(trans, fs_info, dir, index);
|
||||
ret = btrfs_delete_delayed_dir_index(trans, dir, index);
|
||||
if (ret) {
|
||||
btrfs_abort_transaction(trans, ret);
|
||||
goto err;
|
||||
|
@ -4087,11 +4073,10 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
|
|||
}
|
||||
|
||||
static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct inode *dir, u64 objectid,
|
||||
const char *name, int name_len)
|
||||
struct inode *dir, u64 objectid,
|
||||
const char *name, int name_len)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||
struct btrfs_root *root = BTRFS_I(dir)->root;
|
||||
struct btrfs_path *path;
|
||||
struct extent_buffer *leaf;
|
||||
struct btrfs_dir_item *di;
|
||||
|
@ -4124,9 +4109,8 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
btrfs_release_path(path);
|
||||
|
||||
ret = btrfs_del_root_ref(trans, fs_info, objectid,
|
||||
root->root_key.objectid, dir_ino,
|
||||
&index, name, name_len);
|
||||
ret = btrfs_del_root_ref(trans, objectid, root->root_key.objectid,
|
||||
dir_ino, &index, name, name_len);
|
||||
if (ret < 0) {
|
||||
if (ret != -ENOENT) {
|
||||
btrfs_abort_transaction(trans, ret);
|
||||
|
@ -4145,12 +4129,11 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
|
|||
|
||||
leaf = path->nodes[0];
|
||||
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
|
||||
btrfs_release_path(path);
|
||||
index = key.offset;
|
||||
}
|
||||
btrfs_release_path(path);
|
||||
|
||||
ret = btrfs_delete_delayed_dir_index(trans, fs_info, BTRFS_I(dir), index);
|
||||
ret = btrfs_delete_delayed_dir_index(trans, BTRFS_I(dir), index);
|
||||
if (ret) {
|
||||
btrfs_abort_transaction(trans, ret);
|
||||
goto out;
|
||||
|
@ -4243,9 +4226,9 @@ static void btrfs_prune_dentries(struct btrfs_root *root)
|
|||
prev = node;
|
||||
entry = rb_entry(node, struct btrfs_inode, rb_node);
|
||||
|
||||
if (objectid < btrfs_ino(BTRFS_I(&entry->vfs_inode)))
|
||||
if (objectid < btrfs_ino(entry))
|
||||
node = node->rb_left;
|
||||
else if (objectid > btrfs_ino(BTRFS_I(&entry->vfs_inode)))
|
||||
else if (objectid > btrfs_ino(entry))
|
||||
node = node->rb_right;
|
||||
else
|
||||
break;
|
||||
|
@ -4253,7 +4236,7 @@ static void btrfs_prune_dentries(struct btrfs_root *root)
|
|||
if (!node) {
|
||||
while (prev) {
|
||||
entry = rb_entry(prev, struct btrfs_inode, rb_node);
|
||||
if (objectid <= btrfs_ino(BTRFS_I(&entry->vfs_inode))) {
|
||||
if (objectid <= btrfs_ino(entry)) {
|
||||
node = prev;
|
||||
break;
|
||||
}
|
||||
|
@ -4262,7 +4245,7 @@ static void btrfs_prune_dentries(struct btrfs_root *root)
|
|||
}
|
||||
while (node) {
|
||||
entry = rb_entry(node, struct btrfs_inode, rb_node);
|
||||
objectid = btrfs_ino(BTRFS_I(&entry->vfs_inode)) + 1;
|
||||
objectid = btrfs_ino(entry) + 1;
|
||||
inode = igrab(&entry->vfs_inode);
|
||||
if (inode) {
|
||||
spin_unlock(&root->inode_lock);
|
||||
|
@ -4343,10 +4326,8 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
|
|||
|
||||
btrfs_record_snapshot_destroy(trans, BTRFS_I(dir));
|
||||
|
||||
ret = btrfs_unlink_subvol(trans, root, dir,
|
||||
dest->root_key.objectid,
|
||||
dentry->d_name.name,
|
||||
dentry->d_name.len);
|
||||
ret = btrfs_unlink_subvol(trans, dir, dest->root_key.objectid,
|
||||
dentry->d_name.name, dentry->d_name.len);
|
||||
if (ret) {
|
||||
err = ret;
|
||||
btrfs_abort_transaction(trans, ret);
|
||||
|
@ -4441,7 +4422,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
|
|||
return PTR_ERR(trans);
|
||||
|
||||
if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
|
||||
err = btrfs_unlink_subvol(trans, root, dir,
|
||||
err = btrfs_unlink_subvol(trans, dir,
|
||||
BTRFS_I(inode)->location.objectid,
|
||||
dentry->d_name.name,
|
||||
dentry->d_name.len);
|
||||
|
@ -4643,8 +4624,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
|
|||
BTRFS_I(inode), leaf, fi,
|
||||
found_key.offset);
|
||||
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
|
||||
item_end += btrfs_file_extent_inline_len(leaf,
|
||||
path->slots[0], fi);
|
||||
item_end += btrfs_file_extent_ram_bytes(leaf,
|
||||
fi);
|
||||
|
||||
trace_btrfs_truncate_show_fi_inline(
|
||||
BTRFS_I(inode), leaf, fi, path->slots[0],
|
||||
|
@ -5615,9 +5596,9 @@ static void inode_tree_add(struct inode *inode)
|
|||
parent = *p;
|
||||
entry = rb_entry(parent, struct btrfs_inode, rb_node);
|
||||
|
||||
if (ino < btrfs_ino(BTRFS_I(&entry->vfs_inode)))
|
||||
if (ino < btrfs_ino(entry))
|
||||
p = &parent->rb_left;
|
||||
else if (ino > btrfs_ino(BTRFS_I(&entry->vfs_inode)))
|
||||
else if (ino > btrfs_ino(entry))
|
||||
p = &parent->rb_right;
|
||||
else {
|
||||
WARN_ON(!(entry->vfs_inode.i_state &
|
||||
|
@ -5708,16 +5689,21 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
|
|||
int ret;
|
||||
|
||||
ret = btrfs_read_locked_inode(inode);
|
||||
if (!is_bad_inode(inode)) {
|
||||
if (!ret) {
|
||||
inode_tree_add(inode);
|
||||
unlock_new_inode(inode);
|
||||
if (new)
|
||||
*new = 1;
|
||||
} else {
|
||||
unlock_new_inode(inode);
|
||||
iput(inode);
|
||||
ASSERT(ret < 0);
|
||||
inode = ERR_PTR(ret < 0 ? ret : -ESTALE);
|
||||
iget_failed(inode);
|
||||
/*
|
||||
* ret > 0 can come from btrfs_search_slot called by
|
||||
* btrfs_read_locked_inode, this means the inode item
|
||||
* was not found.
|
||||
*/
|
||||
if (ret > 0)
|
||||
ret = -ENOENT;
|
||||
inode = ERR_PTR(ret);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5745,7 +5731,7 @@ static struct inode *new_simple_dir(struct super_block *s,
|
|||
inode->i_mtime = current_time(inode);
|
||||
inode->i_atime = inode->i_mtime;
|
||||
inode->i_ctime = inode->i_mtime;
|
||||
BTRFS_I(inode)->i_otime = timespec64_to_timespec(inode->i_mtime);
|
||||
BTRFS_I(inode)->i_otime = inode->i_mtime;
|
||||
|
||||
return inode;
|
||||
}
|
||||
|
@ -6027,32 +6013,6 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
|
||||
{
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
struct btrfs_trans_handle *trans;
|
||||
int ret = 0;
|
||||
bool nolock = false;
|
||||
|
||||
if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
|
||||
return 0;
|
||||
|
||||
if (btrfs_fs_closing(root->fs_info) &&
|
||||
btrfs_is_free_space_inode(BTRFS_I(inode)))
|
||||
nolock = true;
|
||||
|
||||
if (wbc->sync_mode == WB_SYNC_ALL) {
|
||||
if (nolock)
|
||||
trans = btrfs_join_transaction_nolock(root);
|
||||
else
|
||||
trans = btrfs_join_transaction(root);
|
||||
if (IS_ERR(trans))
|
||||
return PTR_ERR(trans);
|
||||
ret = btrfs_commit_transaction(trans);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is somewhat expensive, updating the tree every time the
|
||||
* inode changes. But, it is most likely to find the inode in cache.
|
||||
|
@ -6351,7 +6311,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
|
|||
inode->i_mtime = current_time(inode);
|
||||
inode->i_atime = inode->i_mtime;
|
||||
inode->i_ctime = inode->i_mtime;
|
||||
BTRFS_I(inode)->i_otime = timespec64_to_timespec(inode->i_mtime);
|
||||
BTRFS_I(inode)->i_otime = inode->i_mtime;
|
||||
|
||||
inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
|
||||
struct btrfs_inode_item);
|
||||
|
@ -6420,7 +6380,6 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
|
||||
const char *name, int name_len, int add_backref, u64 index)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
|
||||
int ret = 0;
|
||||
struct btrfs_key key;
|
||||
struct btrfs_root *root = parent_inode->root;
|
||||
|
@ -6436,7 +6395,7 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
|
||||
if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
|
||||
ret = btrfs_add_root_ref(trans, fs_info, key.objectid,
|
||||
ret = btrfs_add_root_ref(trans, key.objectid,
|
||||
root->root_key.objectid, parent_ino,
|
||||
index, name, name_len);
|
||||
} else if (add_backref) {
|
||||
|
@ -6472,7 +6431,7 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
|
|||
if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
|
||||
u64 local_index;
|
||||
int err;
|
||||
err = btrfs_del_root_ref(trans, fs_info, key.objectid,
|
||||
err = btrfs_del_root_ref(trans, key.objectid,
|
||||
root->root_key.objectid, parent_ino,
|
||||
&local_index, name, name_len);
|
||||
|
||||
|
@ -6832,7 +6791,7 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
|
|||
size_t pg_offset, u64 start, u64 len,
|
||||
int create)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
|
||||
struct btrfs_fs_info *fs_info = inode->root->fs_info;
|
||||
int ret;
|
||||
int err = 0;
|
||||
u64 extent_start = 0;
|
||||
|
@ -6928,7 +6887,8 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
|
|||
extent_start);
|
||||
} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
|
||||
size_t size;
|
||||
size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
|
||||
|
||||
size = btrfs_file_extent_ram_bytes(leaf, item);
|
||||
extent_end = ALIGN(extent_start + size,
|
||||
fs_info->sectorsize);
|
||||
|
||||
|
@ -6979,7 +6939,7 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
|
|||
if (new_inline)
|
||||
goto out;
|
||||
|
||||
size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
|
||||
size = btrfs_file_extent_ram_bytes(leaf, item);
|
||||
extent_offset = page_offset(page) + pg_offset - extent_start;
|
||||
copy_size = min_t(u64, PAGE_SIZE - pg_offset,
|
||||
size - extent_offset);
|
||||
|
@ -7850,7 +7810,7 @@ static blk_status_t dio_read_error(struct inode *inode, struct bio *failed_bio,
|
|||
isector >>= inode->i_sb->s_blocksize_bits;
|
||||
bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
|
||||
pgoff, isector, repair_endio, repair_arg);
|
||||
bio_set_op_attrs(bio, REQ_OP_READ, read_mode);
|
||||
bio->bi_opf = REQ_OP_READ | read_mode;
|
||||
|
||||
btrfs_debug(BTRFS_I(inode)->root->fs_info,
|
||||
"repair DIO read error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d",
|
||||
|
@ -8284,8 +8244,7 @@ static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio,
|
|||
if (write && async_submit) {
|
||||
ret = btrfs_wq_submit_bio(fs_info, bio, 0, 0,
|
||||
file_offset, inode,
|
||||
btrfs_submit_bio_start_direct_io,
|
||||
btrfs_submit_bio_done);
|
||||
btrfs_submit_bio_start_direct_io);
|
||||
goto err;
|
||||
} else if (write) {
|
||||
/*
|
||||
|
@ -9525,8 +9484,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
|
|||
/* src is a subvolume */
|
||||
if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
|
||||
root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
|
||||
ret = btrfs_unlink_subvol(trans, root, old_dir,
|
||||
root_objectid,
|
||||
ret = btrfs_unlink_subvol(trans, old_dir, root_objectid,
|
||||
old_dentry->d_name.name,
|
||||
old_dentry->d_name.len);
|
||||
} else { /* src is an inode */
|
||||
|
@ -9545,8 +9503,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
|
|||
/* dest is a subvolume */
|
||||
if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
|
||||
root_objectid = BTRFS_I(new_inode)->root->root_key.objectid;
|
||||
ret = btrfs_unlink_subvol(trans, dest, new_dir,
|
||||
root_objectid,
|
||||
ret = btrfs_unlink_subvol(trans, new_dir, root_objectid,
|
||||
new_dentry->d_name.name,
|
||||
new_dentry->d_name.len);
|
||||
} else { /* dest is an inode */
|
||||
|
@ -9806,7 +9763,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
|||
|
||||
if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
|
||||
root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
|
||||
ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
|
||||
ret = btrfs_unlink_subvol(trans, old_dir, root_objectid,
|
||||
old_dentry->d_name.name,
|
||||
old_dentry->d_name.len);
|
||||
} else {
|
||||
|
@ -9828,8 +9785,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
|||
if (unlikely(btrfs_ino(BTRFS_I(new_inode)) ==
|
||||
BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
|
||||
root_objectid = BTRFS_I(new_inode)->location.objectid;
|
||||
ret = btrfs_unlink_subvol(trans, dest, new_dir,
|
||||
root_objectid,
|
||||
ret = btrfs_unlink_subvol(trans, new_dir, root_objectid,
|
||||
new_dentry->d_name.name,
|
||||
new_dentry->d_name.len);
|
||||
BUG_ON(new_inode->i_nlink == 0);
|
||||
|
@ -10451,12 +10407,6 @@ static int btrfs_readpage_io_failed_hook(struct page *page, int failed_mirror)
|
|||
return -EAGAIN;
|
||||
}
|
||||
|
||||
static struct btrfs_fs_info *iotree_fs_info(void *private_data)
|
||||
{
|
||||
struct inode *inode = private_data;
|
||||
return btrfs_sb(inode->i_sb);
|
||||
}
|
||||
|
||||
static void btrfs_check_extent_io_range(void *private_data, const char *caller,
|
||||
u64 start, u64 end)
|
||||
{
|
||||
|
@ -10471,9 +10421,9 @@ static void btrfs_check_extent_io_range(void *private_data, const char *caller,
|
|||
}
|
||||
}
|
||||
|
||||
void btrfs_set_range_writeback(void *private_data, u64 start, u64 end)
|
||||
void btrfs_set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
|
||||
{
|
||||
struct inode *inode = private_data;
|
||||
struct inode *inode = tree->private_data;
|
||||
unsigned long index = start >> PAGE_SHIFT;
|
||||
unsigned long end_index = end >> PAGE_SHIFT;
|
||||
struct page *page;
|
||||
|
@ -10529,10 +10479,7 @@ static const struct extent_io_ops btrfs_extent_io_ops = {
|
|||
/* mandatory callbacks */
|
||||
.submit_bio_hook = btrfs_submit_bio_hook,
|
||||
.readpage_end_io_hook = btrfs_readpage_end_io_hook,
|
||||
.merge_bio_hook = btrfs_merge_bio_hook,
|
||||
.readpage_io_failed_hook = btrfs_readpage_io_failed_hook,
|
||||
.tree_fs_info = iotree_fs_info,
|
||||
.set_range_writeback = btrfs_set_range_writeback,
|
||||
|
||||
/* optional callbacks */
|
||||
.fill_delalloc = run_delalloc_range,
|
||||
|
|
|
@ -5,23 +5,18 @@
|
|||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/fsnotify.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/backing-dev.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/mpage.h>
|
||||
#include <linux/namei.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/writeback.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/bit_spinlock.h>
|
||||
#include <linux/security.h>
|
||||
#include <linux/xattr.h>
|
||||
#include <linux/mm.h>
|
||||
|
@ -606,7 +601,7 @@ static noinline int create_subvol(struct inode *dir,
|
|||
trans->block_rsv = &block_rsv;
|
||||
trans->bytes_reserved = block_rsv.size;
|
||||
|
||||
ret = btrfs_qgroup_inherit(trans, fs_info, 0, objectid, inherit);
|
||||
ret = btrfs_qgroup_inherit(trans, 0, objectid, inherit);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
|
@ -616,14 +611,6 @@ static noinline int create_subvol(struct inode *dir,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header));
|
||||
btrfs_set_header_bytenr(leaf, leaf->start);
|
||||
btrfs_set_header_generation(leaf, trans->transid);
|
||||
btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
|
||||
btrfs_set_header_owner(leaf, objectid);
|
||||
|
||||
write_extent_buffer_fsid(leaf, fs_info->fsid);
|
||||
write_extent_buffer_chunk_tree_uuid(leaf, fs_info->chunk_tree_uuid);
|
||||
btrfs_mark_buffer_dirty(leaf);
|
||||
|
||||
inode_item = &root_item->inode;
|
||||
|
@ -711,8 +698,7 @@ static noinline int create_subvol(struct inode *dir,
|
|||
ret = btrfs_update_inode(trans, root, dir);
|
||||
BUG_ON(ret);
|
||||
|
||||
ret = btrfs_add_root_ref(trans, fs_info,
|
||||
objectid, root->root_key.objectid,
|
||||
ret = btrfs_add_root_ref(trans, objectid, root->root_key.objectid,
|
||||
btrfs_ino(BTRFS_I(dir)), index, name, namelen);
|
||||
BUG_ON(ret);
|
||||
|
||||
|
@ -2507,8 +2493,8 @@ static int btrfs_search_path_in_tree_user(struct inode *inode,
|
|||
static noinline int btrfs_ioctl_ino_lookup(struct file *file,
|
||||
void __user *argp)
|
||||
{
|
||||
struct btrfs_ioctl_ino_lookup_args *args;
|
||||
struct inode *inode;
|
||||
struct btrfs_ioctl_ino_lookup_args *args;
|
||||
struct inode *inode;
|
||||
int ret = 0;
|
||||
|
||||
args = memdup_user(argp, sizeof(*args));
|
||||
|
@ -2941,8 +2927,14 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
|
|||
ret = btrfs_defrag_root(root);
|
||||
break;
|
||||
case S_IFREG:
|
||||
if (!(file->f_mode & FMODE_WRITE)) {
|
||||
ret = -EINVAL;
|
||||
/*
|
||||
* Note that this does not check the file descriptor for write
|
||||
* access. This prevents defragmenting executables that are
|
||||
* running and allows defrag on files open in read-only mode.
|
||||
*/
|
||||
if (!capable(CAP_SYS_ADMIN) &&
|
||||
inode_permission(inode, MAY_WRITE)) {
|
||||
ret = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -3165,10 +3157,8 @@ static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
|
|||
di_args->total_bytes = btrfs_device_get_total_bytes(dev);
|
||||
memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
|
||||
if (dev->name) {
|
||||
struct rcu_string *name;
|
||||
|
||||
name = rcu_dereference(dev->name);
|
||||
strncpy(di_args->path, name->str, sizeof(di_args->path) - 1);
|
||||
strncpy(di_args->path, rcu_str_deref(dev->name),
|
||||
sizeof(di_args->path) - 1);
|
||||
di_args->path[sizeof(di_args->path) - 1] = 0;
|
||||
} else {
|
||||
di_args->path[0] = '\0';
|
||||
|
@ -5118,9 +5108,7 @@ static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
|
|||
struct inode *inode = file_inode(file);
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
struct btrfs_ioctl_quota_ctl_args *sa;
|
||||
struct btrfs_trans_handle *trans = NULL;
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
@ -5136,28 +5124,19 @@ static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
|
|||
}
|
||||
|
||||
down_write(&fs_info->subvol_sem);
|
||||
trans = btrfs_start_transaction(fs_info->tree_root, 2);
|
||||
if (IS_ERR(trans)) {
|
||||
ret = PTR_ERR(trans);
|
||||
goto out;
|
||||
}
|
||||
|
||||
switch (sa->cmd) {
|
||||
case BTRFS_QUOTA_CTL_ENABLE:
|
||||
ret = btrfs_quota_enable(trans, fs_info);
|
||||
ret = btrfs_quota_enable(fs_info);
|
||||
break;
|
||||
case BTRFS_QUOTA_CTL_DISABLE:
|
||||
ret = btrfs_quota_disable(trans, fs_info);
|
||||
ret = btrfs_quota_disable(fs_info);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
err = btrfs_commit_transaction(trans);
|
||||
if (err && !ret)
|
||||
ret = err;
|
||||
out:
|
||||
kfree(sa);
|
||||
up_write(&fs_info->subvol_sem);
|
||||
drop_write:
|
||||
|
@ -5195,15 +5174,13 @@ static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
|
|||
}
|
||||
|
||||
if (sa->assign) {
|
||||
ret = btrfs_add_qgroup_relation(trans, fs_info,
|
||||
sa->src, sa->dst);
|
||||
ret = btrfs_add_qgroup_relation(trans, sa->src, sa->dst);
|
||||
} else {
|
||||
ret = btrfs_del_qgroup_relation(trans, fs_info,
|
||||
sa->src, sa->dst);
|
||||
ret = btrfs_del_qgroup_relation(trans, sa->src, sa->dst);
|
||||
}
|
||||
|
||||
/* update qgroup status and info */
|
||||
err = btrfs_run_qgroups(trans, fs_info);
|
||||
err = btrfs_run_qgroups(trans);
|
||||
if (err < 0)
|
||||
btrfs_handle_fs_error(fs_info, err,
|
||||
"failed to update qgroup status and info");
|
||||
|
@ -5221,7 +5198,6 @@ static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
|
|||
static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
|
||||
{
|
||||
struct inode *inode = file_inode(file);
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
struct btrfs_ioctl_qgroup_create_args *sa;
|
||||
struct btrfs_trans_handle *trans;
|
||||
|
@ -5253,9 +5229,9 @@ static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
|
|||
}
|
||||
|
||||
if (sa->create) {
|
||||
ret = btrfs_create_qgroup(trans, fs_info, sa->qgroupid);
|
||||
ret = btrfs_create_qgroup(trans, sa->qgroupid);
|
||||
} else {
|
||||
ret = btrfs_remove_qgroup(trans, fs_info, sa->qgroupid);
|
||||
ret = btrfs_remove_qgroup(trans, sa->qgroupid);
|
||||
}
|
||||
|
||||
err = btrfs_end_transaction(trans);
|
||||
|
@ -5272,7 +5248,6 @@ static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
|
|||
static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
|
||||
{
|
||||
struct inode *inode = file_inode(file);
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
struct btrfs_ioctl_qgroup_limit_args *sa;
|
||||
struct btrfs_trans_handle *trans;
|
||||
|
@ -5305,7 +5280,7 @@ static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
|
|||
qgroupid = root->root_key.objectid;
|
||||
}
|
||||
|
||||
ret = btrfs_limit_qgroup(trans, fs_info, qgroupid, &sa->lim);
|
||||
ret = btrfs_limit_qgroup(trans, qgroupid, &sa->lim);
|
||||
|
||||
err = btrfs_end_transaction(trans);
|
||||
if (err && !ret)
|
||||
|
|
|
@ -6,7 +6,6 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/writeback.h>
|
||||
#include <linux/pagevec.h>
|
||||
#include "ctree.h"
|
||||
#include "transaction.h"
|
||||
#include "btrfs_inode.h"
|
||||
|
@ -421,129 +420,6 @@ int btrfs_dec_test_ordered_pending(struct inode *inode,
|
|||
return ret == 0;
|
||||
}
|
||||
|
||||
/* Needs to either be called under a log transaction or the log_mutex */
|
||||
void btrfs_get_logged_extents(struct btrfs_inode *inode,
|
||||
struct list_head *logged_list,
|
||||
const loff_t start,
|
||||
const loff_t end)
|
||||
{
|
||||
struct btrfs_ordered_inode_tree *tree;
|
||||
struct btrfs_ordered_extent *ordered;
|
||||
struct rb_node *n;
|
||||
struct rb_node *prev;
|
||||
|
||||
tree = &inode->ordered_tree;
|
||||
spin_lock_irq(&tree->lock);
|
||||
n = __tree_search(&tree->tree, end, &prev);
|
||||
if (!n)
|
||||
n = prev;
|
||||
for (; n; n = rb_prev(n)) {
|
||||
ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
|
||||
if (ordered->file_offset > end)
|
||||
continue;
|
||||
if (entry_end(ordered) <= start)
|
||||
break;
|
||||
if (test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
|
||||
continue;
|
||||
list_add(&ordered->log_list, logged_list);
|
||||
refcount_inc(&ordered->refs);
|
||||
}
|
||||
spin_unlock_irq(&tree->lock);
|
||||
}
|
||||
|
||||
void btrfs_put_logged_extents(struct list_head *logged_list)
|
||||
{
|
||||
struct btrfs_ordered_extent *ordered;
|
||||
|
||||
while (!list_empty(logged_list)) {
|
||||
ordered = list_first_entry(logged_list,
|
||||
struct btrfs_ordered_extent,
|
||||
log_list);
|
||||
list_del_init(&ordered->log_list);
|
||||
btrfs_put_ordered_extent(ordered);
|
||||
}
|
||||
}
|
||||
|
||||
void btrfs_submit_logged_extents(struct list_head *logged_list,
|
||||
struct btrfs_root *log)
|
||||
{
|
||||
int index = log->log_transid % 2;
|
||||
|
||||
spin_lock_irq(&log->log_extents_lock[index]);
|
||||
list_splice_tail(logged_list, &log->logged_list[index]);
|
||||
spin_unlock_irq(&log->log_extents_lock[index]);
|
||||
}
|
||||
|
||||
void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *log, u64 transid)
|
||||
{
|
||||
struct btrfs_ordered_extent *ordered;
|
||||
int index = transid % 2;
|
||||
|
||||
spin_lock_irq(&log->log_extents_lock[index]);
|
||||
while (!list_empty(&log->logged_list[index])) {
|
||||
struct inode *inode;
|
||||
ordered = list_first_entry(&log->logged_list[index],
|
||||
struct btrfs_ordered_extent,
|
||||
log_list);
|
||||
list_del_init(&ordered->log_list);
|
||||
inode = ordered->inode;
|
||||
spin_unlock_irq(&log->log_extents_lock[index]);
|
||||
|
||||
if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
|
||||
!test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
|
||||
u64 start = ordered->file_offset;
|
||||
u64 end = ordered->file_offset + ordered->len - 1;
|
||||
|
||||
WARN_ON(!inode);
|
||||
filemap_fdatawrite_range(inode->i_mapping, start, end);
|
||||
}
|
||||
wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
|
||||
&ordered->flags));
|
||||
|
||||
/*
|
||||
* In order to keep us from losing our ordered extent
|
||||
* information when committing the transaction we have to make
|
||||
* sure that any logged extents are completed when we go to
|
||||
* commit the transaction. To do this we simply increase the
|
||||
* current transactions pending_ordered counter and decrement it
|
||||
* when the ordered extent completes.
|
||||
*/
|
||||
if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
|
||||
struct btrfs_ordered_inode_tree *tree;
|
||||
|
||||
tree = &BTRFS_I(inode)->ordered_tree;
|
||||
spin_lock_irq(&tree->lock);
|
||||
if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
|
||||
set_bit(BTRFS_ORDERED_PENDING, &ordered->flags);
|
||||
atomic_inc(&trans->transaction->pending_ordered);
|
||||
}
|
||||
spin_unlock_irq(&tree->lock);
|
||||
}
|
||||
btrfs_put_ordered_extent(ordered);
|
||||
spin_lock_irq(&log->log_extents_lock[index]);
|
||||
}
|
||||
spin_unlock_irq(&log->log_extents_lock[index]);
|
||||
}
|
||||
|
||||
void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid)
|
||||
{
|
||||
struct btrfs_ordered_extent *ordered;
|
||||
int index = transid % 2;
|
||||
|
||||
spin_lock_irq(&log->log_extents_lock[index]);
|
||||
while (!list_empty(&log->logged_list[index])) {
|
||||
ordered = list_first_entry(&log->logged_list[index],
|
||||
struct btrfs_ordered_extent,
|
||||
log_list);
|
||||
list_del_init(&ordered->log_list);
|
||||
spin_unlock_irq(&log->log_extents_lock[index]);
|
||||
btrfs_put_ordered_extent(ordered);
|
||||
spin_lock_irq(&log->log_extents_lock[index]);
|
||||
}
|
||||
spin_unlock_irq(&log->log_extents_lock[index]);
|
||||
}
|
||||
|
||||
/*
|
||||
* used to drop a reference on an ordered extent. This will free
|
||||
* the extent if the last reference is dropped
|
||||
|
@ -913,20 +789,6 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
|
|||
return entry;
|
||||
}
|
||||
|
||||
bool btrfs_have_ordered_extents_in_range(struct inode *inode,
|
||||
u64 file_offset,
|
||||
u64 len)
|
||||
{
|
||||
struct btrfs_ordered_extent *oe;
|
||||
|
||||
oe = btrfs_lookup_ordered_range(BTRFS_I(inode), file_offset, len);
|
||||
if (oe) {
|
||||
btrfs_put_ordered_extent(oe);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* lookup and return any extent before 'file_offset'. NULL is returned
|
||||
* if none is found
|
||||
|
|
|
@ -54,15 +54,11 @@ struct btrfs_ordered_sum {
|
|||
#define BTRFS_ORDERED_UPDATED_ISIZE 7 /* indicates whether this ordered extent
|
||||
* has done its due diligence in updating
|
||||
* the isize. */
|
||||
#define BTRFS_ORDERED_LOGGED_CSUM 8 /* We've logged the csums on this ordered
|
||||
ordered extent */
|
||||
#define BTRFS_ORDERED_TRUNCATED 9 /* Set when we have to truncate an extent */
|
||||
#define BTRFS_ORDERED_TRUNCATED 8 /* Set when we have to truncate an extent */
|
||||
|
||||
#define BTRFS_ORDERED_LOGGED 10 /* Set when we've waited on this ordered extent
|
||||
* in the logging code. */
|
||||
#define BTRFS_ORDERED_PENDING 11 /* We are waiting for this ordered extent to
|
||||
#define BTRFS_ORDERED_PENDING 9 /* We are waiting for this ordered extent to
|
||||
* complete in the current transaction. */
|
||||
#define BTRFS_ORDERED_REGULAR 12 /* Regular IO for COW */
|
||||
#define BTRFS_ORDERED_REGULAR 10 /* Regular IO for COW */
|
||||
|
||||
struct btrfs_ordered_extent {
|
||||
/* logical offset in the file */
|
||||
|
@ -182,9 +178,6 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
|
|||
struct btrfs_inode *inode,
|
||||
u64 file_offset,
|
||||
u64 len);
|
||||
bool btrfs_have_ordered_extents_in_range(struct inode *inode,
|
||||
u64 file_offset,
|
||||
u64 len);
|
||||
int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
|
||||
struct btrfs_ordered_extent *ordered);
|
||||
int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
|
||||
|
@ -193,16 +186,6 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
|
|||
const u64 range_start, const u64 range_len);
|
||||
u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
|
||||
const u64 range_start, const u64 range_len);
|
||||
void btrfs_get_logged_extents(struct btrfs_inode *inode,
|
||||
struct list_head *logged_list,
|
||||
const loff_t start,
|
||||
const loff_t end);
|
||||
void btrfs_put_logged_extents(struct list_head *logged_list);
|
||||
void btrfs_submit_logged_extents(struct list_head *logged_list,
|
||||
struct btrfs_root *log);
|
||||
void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *log, u64 transid);
|
||||
void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid);
|
||||
int __init ordered_data_init(void);
|
||||
void __cold ordered_data_exit(void);
|
||||
|
||||
|
|
|
@ -52,17 +52,9 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
|
|||
u64 offset;
|
||||
int ref_index = 0;
|
||||
|
||||
if (item_size < sizeof(*ei)) {
|
||||
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
|
||||
struct btrfs_extent_item_v0 *ei0;
|
||||
BUG_ON(item_size != sizeof(*ei0));
|
||||
ei0 = btrfs_item_ptr(eb, slot, struct btrfs_extent_item_v0);
|
||||
pr_info("\t\textent refs %u\n",
|
||||
btrfs_extent_refs_v0(eb, ei0));
|
||||
return;
|
||||
#else
|
||||
BUG();
|
||||
#endif
|
||||
if (unlikely(item_size < sizeof(*ei))) {
|
||||
btrfs_print_v0_err(eb->fs_info);
|
||||
btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
|
||||
}
|
||||
|
||||
ei = btrfs_item_ptr(eb, slot, struct btrfs_extent_item);
|
||||
|
@ -133,20 +125,6 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
|
|||
WARN_ON(ptr > end);
|
||||
}
|
||||
|
||||
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
|
||||
static void print_extent_ref_v0(struct extent_buffer *eb, int slot)
|
||||
{
|
||||
struct btrfs_extent_ref_v0 *ref0;
|
||||
|
||||
ref0 = btrfs_item_ptr(eb, slot, struct btrfs_extent_ref_v0);
|
||||
printk("\t\textent back ref root %llu gen %llu owner %llu num_refs %lu\n",
|
||||
btrfs_ref_root_v0(eb, ref0),
|
||||
btrfs_ref_generation_v0(eb, ref0),
|
||||
btrfs_ref_objectid_v0(eb, ref0),
|
||||
(unsigned long)btrfs_ref_count_v0(eb, ref0));
|
||||
}
|
||||
#endif
|
||||
|
||||
static void print_uuid_item(struct extent_buffer *l, unsigned long offset,
|
||||
u32 item_size)
|
||||
{
|
||||
|
@ -267,8 +245,8 @@ void btrfs_print_leaf(struct extent_buffer *l)
|
|||
struct btrfs_file_extent_item);
|
||||
if (btrfs_file_extent_type(l, fi) ==
|
||||
BTRFS_FILE_EXTENT_INLINE) {
|
||||
pr_info("\t\tinline extent data size %u\n",
|
||||
btrfs_file_extent_inline_len(l, i, fi));
|
||||
pr_info("\t\tinline extent data size %llu\n",
|
||||
btrfs_file_extent_ram_bytes(l, fi));
|
||||
break;
|
||||
}
|
||||
pr_info("\t\textent data disk bytenr %llu nr %llu\n",
|
||||
|
@ -280,11 +258,8 @@ void btrfs_print_leaf(struct extent_buffer *l)
|
|||
btrfs_file_extent_ram_bytes(l, fi));
|
||||
break;
|
||||
case BTRFS_EXTENT_REF_V0_KEY:
|
||||
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
|
||||
print_extent_ref_v0(l, i);
|
||||
#else
|
||||
BUG();
|
||||
#endif
|
||||
btrfs_print_v0_err(fs_info);
|
||||
btrfs_handle_fs_error(fs_info, -EINVAL, NULL);
|
||||
break;
|
||||
case BTRFS_BLOCK_GROUP_ITEM_KEY:
|
||||
bi = btrfs_item_ptr(l, i,
|
||||
|
|
|
@ -530,11 +530,11 @@ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
|
|||
fs_info->qgroup_ulist = NULL;
|
||||
}
|
||||
|
||||
static int add_qgroup_relation_item(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *quota_root,
|
||||
u64 src, u64 dst)
|
||||
static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
|
||||
u64 dst)
|
||||
{
|
||||
int ret;
|
||||
struct btrfs_root *quota_root = trans->fs_info->quota_root;
|
||||
struct btrfs_path *path;
|
||||
struct btrfs_key key;
|
||||
|
||||
|
@ -554,11 +554,11 @@ static int add_qgroup_relation_item(struct btrfs_trans_handle *trans,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int del_qgroup_relation_item(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *quota_root,
|
||||
u64 src, u64 dst)
|
||||
static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
|
||||
u64 dst)
|
||||
{
|
||||
int ret;
|
||||
struct btrfs_root *quota_root = trans->fs_info->quota_root;
|
||||
struct btrfs_path *path;
|
||||
struct btrfs_key key;
|
||||
|
||||
|
@ -653,10 +653,10 @@ static int add_qgroup_item(struct btrfs_trans_handle *trans,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int del_qgroup_item(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *quota_root, u64 qgroupid)
|
||||
static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid)
|
||||
{
|
||||
int ret;
|
||||
struct btrfs_root *quota_root = trans->fs_info->quota_root;
|
||||
struct btrfs_path *path;
|
||||
struct btrfs_key key;
|
||||
|
||||
|
@ -700,9 +700,9 @@ static int del_qgroup_item(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
|
||||
static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct btrfs_qgroup *qgroup)
|
||||
{
|
||||
struct btrfs_root *quota_root = trans->fs_info->quota_root;
|
||||
struct btrfs_path *path;
|
||||
struct btrfs_key key;
|
||||
struct extent_buffer *l;
|
||||
|
@ -718,7 +718,7 @@ static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
|
|||
if (!path)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
|
||||
ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
|
||||
if (ret > 0)
|
||||
ret = -ENOENT;
|
||||
|
||||
|
@ -742,9 +742,10 @@ static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
|
||||
static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct btrfs_qgroup *qgroup)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||
struct btrfs_root *quota_root = fs_info->quota_root;
|
||||
struct btrfs_path *path;
|
||||
struct btrfs_key key;
|
||||
struct extent_buffer *l;
|
||||
|
@ -752,7 +753,7 @@ static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
|
|||
int ret;
|
||||
int slot;
|
||||
|
||||
if (btrfs_is_testing(root->fs_info))
|
||||
if (btrfs_is_testing(fs_info))
|
||||
return 0;
|
||||
|
||||
key.objectid = 0;
|
||||
|
@ -763,7 +764,7 @@ static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
|
|||
if (!path)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
|
||||
ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
|
||||
if (ret > 0)
|
||||
ret = -ENOENT;
|
||||
|
||||
|
@ -786,10 +787,10 @@ static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_root *root)
|
||||
static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||
struct btrfs_root *quota_root = fs_info->quota_root;
|
||||
struct btrfs_path *path;
|
||||
struct btrfs_key key;
|
||||
struct extent_buffer *l;
|
||||
|
@ -805,7 +806,7 @@ static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
|
|||
if (!path)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
|
||||
ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
|
||||
if (ret > 0)
|
||||
ret = -ENOENT;
|
||||
|
||||
|
@ -875,8 +876,7 @@ static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_quota_enable(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info)
|
||||
int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
struct btrfs_root *quota_root;
|
||||
struct btrfs_root *tree_root = fs_info->tree_root;
|
||||
|
@ -886,6 +886,7 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_key key;
|
||||
struct btrfs_key found_key;
|
||||
struct btrfs_qgroup *qgroup = NULL;
|
||||
struct btrfs_trans_handle *trans = NULL;
|
||||
int ret = 0;
|
||||
int slot;
|
||||
|
||||
|
@ -893,9 +894,25 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
|
|||
if (fs_info->quota_root)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* 1 for quota root item
|
||||
* 1 for BTRFS_QGROUP_STATUS item
|
||||
*
|
||||
* Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items
|
||||
* per subvolume. However those are not currently reserved since it
|
||||
* would be a lot of overkill.
|
||||
*/
|
||||
trans = btrfs_start_transaction(tree_root, 2);
|
||||
if (IS_ERR(trans)) {
|
||||
ret = PTR_ERR(trans);
|
||||
trans = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
|
||||
if (!fs_info->qgroup_ulist) {
|
||||
ret = -ENOMEM;
|
||||
btrfs_abort_transaction(trans, ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -906,12 +923,14 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
|
|||
BTRFS_QUOTA_TREE_OBJECTID);
|
||||
if (IS_ERR(quota_root)) {
|
||||
ret = PTR_ERR(quota_root);
|
||||
btrfs_abort_transaction(trans, ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path) {
|
||||
ret = -ENOMEM;
|
||||
btrfs_abort_transaction(trans, ret);
|
||||
goto out_free_root;
|
||||
}
|
||||
|
||||
|
@ -921,8 +940,10 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
|
|||
|
||||
ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
|
||||
sizeof(*ptr));
|
||||
if (ret)
|
||||
if (ret) {
|
||||
btrfs_abort_transaction(trans, ret);
|
||||
goto out_free_path;
|
||||
}
|
||||
|
||||
leaf = path->nodes[0];
|
||||
ptr = btrfs_item_ptr(leaf, path->slots[0],
|
||||
|
@ -944,9 +965,10 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
|
|||
ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
|
||||
if (ret > 0)
|
||||
goto out_add_root;
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
btrfs_abort_transaction(trans, ret);
|
||||
goto out_free_path;
|
||||
|
||||
}
|
||||
|
||||
while (1) {
|
||||
slot = path->slots[0];
|
||||
|
@ -956,18 +978,23 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
|
|||
if (found_key.type == BTRFS_ROOT_REF_KEY) {
|
||||
ret = add_qgroup_item(trans, quota_root,
|
||||
found_key.offset);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
btrfs_abort_transaction(trans, ret);
|
||||
goto out_free_path;
|
||||
}
|
||||
|
||||
qgroup = add_qgroup_rb(fs_info, found_key.offset);
|
||||
if (IS_ERR(qgroup)) {
|
||||
ret = PTR_ERR(qgroup);
|
||||
btrfs_abort_transaction(trans, ret);
|
||||
goto out_free_path;
|
||||
}
|
||||
}
|
||||
ret = btrfs_next_item(tree_root, path);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
btrfs_abort_transaction(trans, ret);
|
||||
goto out_free_path;
|
||||
}
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
@ -975,18 +1002,28 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
|
|||
out_add_root:
|
||||
btrfs_release_path(path);
|
||||
ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
btrfs_abort_transaction(trans, ret);
|
||||
goto out_free_path;
|
||||
}
|
||||
|
||||
qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
|
||||
if (IS_ERR(qgroup)) {
|
||||
ret = PTR_ERR(qgroup);
|
||||
btrfs_abort_transaction(trans, ret);
|
||||
goto out_free_path;
|
||||
}
|
||||
spin_lock(&fs_info->qgroup_lock);
|
||||
fs_info->quota_root = quota_root;
|
||||
set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
|
||||
spin_unlock(&fs_info->qgroup_lock);
|
||||
|
||||
ret = btrfs_commit_transaction(trans);
|
||||
if (ret) {
|
||||
trans = NULL;
|
||||
goto out_free_path;
|
||||
}
|
||||
|
||||
ret = qgroup_rescan_init(fs_info, 0, 1);
|
||||
if (!ret) {
|
||||
qgroup_rescan_zero_tracking(fs_info);
|
||||
|
@ -1006,20 +1043,35 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
|
|||
if (ret) {
|
||||
ulist_free(fs_info->qgroup_ulist);
|
||||
fs_info->qgroup_ulist = NULL;
|
||||
if (trans)
|
||||
btrfs_end_transaction(trans);
|
||||
}
|
||||
mutex_unlock(&fs_info->qgroup_ioctl_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_quota_disable(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info)
|
||||
int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
struct btrfs_root *quota_root;
|
||||
struct btrfs_trans_handle *trans = NULL;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&fs_info->qgroup_ioctl_lock);
|
||||
if (!fs_info->quota_root)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* 1 For the root item
|
||||
*
|
||||
* We should also reserve enough items for the quota tree deletion in
|
||||
* btrfs_clean_quota_tree but this is not done.
|
||||
*/
|
||||
trans = btrfs_start_transaction(fs_info->tree_root, 1);
|
||||
if (IS_ERR(trans)) {
|
||||
ret = PTR_ERR(trans);
|
||||
goto out;
|
||||
}
|
||||
|
||||
clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
|
||||
btrfs_qgroup_wait_for_completion(fs_info, false);
|
||||
spin_lock(&fs_info->qgroup_lock);
|
||||
|
@ -1031,12 +1083,16 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
|
|||
btrfs_free_qgroup_config(fs_info);
|
||||
|
||||
ret = btrfs_clean_quota_tree(trans, quota_root);
|
||||
if (ret)
|
||||
goto out;
|
||||
if (ret) {
|
||||
btrfs_abort_transaction(trans, ret);
|
||||
goto end_trans;
|
||||
}
|
||||
|
||||
ret = btrfs_del_root(trans, fs_info, "a_root->root_key);
|
||||
if (ret)
|
||||
goto out;
|
||||
ret = btrfs_del_root(trans, "a_root->root_key);
|
||||
if (ret) {
|
||||
btrfs_abort_transaction(trans, ret);
|
||||
goto end_trans;
|
||||
}
|
||||
|
||||
list_del("a_root->dirty_list);
|
||||
|
||||
|
@ -1048,6 +1104,9 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
|
|||
free_extent_buffer(quota_root->node);
|
||||
free_extent_buffer(quota_root->commit_root);
|
||||
kfree(quota_root);
|
||||
|
||||
end_trans:
|
||||
ret = btrfs_end_transaction(trans);
|
||||
out:
|
||||
mutex_unlock(&fs_info->qgroup_ioctl_lock);
|
||||
return ret;
|
||||
|
@ -1177,9 +1236,10 @@ static int quick_update_accounting(struct btrfs_fs_info *fs_info,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info, u64 src, u64 dst)
|
||||
int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
|
||||
u64 dst)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||
struct btrfs_root *quota_root;
|
||||
struct btrfs_qgroup *parent;
|
||||
struct btrfs_qgroup *member;
|
||||
|
@ -1216,13 +1276,13 @@ int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
}
|
||||
|
||||
ret = add_qgroup_relation_item(trans, quota_root, src, dst);
|
||||
ret = add_qgroup_relation_item(trans, src, dst);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = add_qgroup_relation_item(trans, quota_root, dst, src);
|
||||
ret = add_qgroup_relation_item(trans, dst, src);
|
||||
if (ret) {
|
||||
del_qgroup_relation_item(trans, quota_root, src, dst);
|
||||
del_qgroup_relation_item(trans, src, dst);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -1240,9 +1300,10 @@ int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int __del_qgroup_relation(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info, u64 src, u64 dst)
|
||||
static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
|
||||
u64 dst)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||
struct btrfs_root *quota_root;
|
||||
struct btrfs_qgroup *parent;
|
||||
struct btrfs_qgroup *member;
|
||||
|
@ -1276,8 +1337,8 @@ static int __del_qgroup_relation(struct btrfs_trans_handle *trans,
|
|||
ret = -ENOENT;
|
||||
goto out;
|
||||
exist:
|
||||
ret = del_qgroup_relation_item(trans, quota_root, src, dst);
|
||||
err = del_qgroup_relation_item(trans, quota_root, dst, src);
|
||||
ret = del_qgroup_relation_item(trans, src, dst);
|
||||
err = del_qgroup_relation_item(trans, dst, src);
|
||||
if (err && !ret)
|
||||
ret = err;
|
||||
|
||||
|
@ -1290,21 +1351,22 @@ static int __del_qgroup_relation(struct btrfs_trans_handle *trans,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info, u64 src, u64 dst)
|
||||
int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
|
||||
u64 dst)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&fs_info->qgroup_ioctl_lock);
|
||||
ret = __del_qgroup_relation(trans, fs_info, src, dst);
|
||||
ret = __del_qgroup_relation(trans, src, dst);
|
||||
mutex_unlock(&fs_info->qgroup_ioctl_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info, u64 qgroupid)
|
||||
int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||
struct btrfs_root *quota_root;
|
||||
struct btrfs_qgroup *qgroup;
|
||||
int ret = 0;
|
||||
|
@ -1336,9 +1398,9 @@ int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info, u64 qgroupid)
|
||||
int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||
struct btrfs_root *quota_root;
|
||||
struct btrfs_qgroup *qgroup;
|
||||
struct btrfs_qgroup_list *list;
|
||||
|
@ -1362,16 +1424,15 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
|
|||
goto out;
|
||||
}
|
||||
}
|
||||
ret = del_qgroup_item(trans, quota_root, qgroupid);
|
||||
ret = del_qgroup_item(trans, qgroupid);
|
||||
if (ret && ret != -ENOENT)
|
||||
goto out;
|
||||
|
||||
while (!list_empty(&qgroup->groups)) {
|
||||
list = list_first_entry(&qgroup->groups,
|
||||
struct btrfs_qgroup_list, next_group);
|
||||
ret = __del_qgroup_relation(trans, fs_info,
|
||||
qgroupid,
|
||||
list->group->qgroupid);
|
||||
ret = __del_qgroup_relation(trans, qgroupid,
|
||||
list->group->qgroupid);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
@ -1384,10 +1445,10 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info, u64 qgroupid,
|
||||
int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
|
||||
struct btrfs_qgroup_limit *limit)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||
struct btrfs_root *quota_root;
|
||||
struct btrfs_qgroup *qgroup;
|
||||
int ret = 0;
|
||||
|
@ -1451,7 +1512,7 @@ int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
|
|||
|
||||
spin_unlock(&fs_info->qgroup_lock);
|
||||
|
||||
ret = update_qgroup_limit_item(trans, quota_root, qgroup);
|
||||
ret = update_qgroup_limit_item(trans, qgroup);
|
||||
if (ret) {
|
||||
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
|
||||
btrfs_info(fs_info, "unable to update quota limit for %llu",
|
||||
|
@ -1519,10 +1580,10 @@ int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes,
|
||||
gfp_t gfp_flag)
|
||||
int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
|
||||
u64 num_bytes, gfp_t gfp_flag)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||
struct btrfs_qgroup_extent_record *record;
|
||||
struct btrfs_delayed_ref_root *delayed_refs;
|
||||
int ret;
|
||||
|
@ -1530,8 +1591,6 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans,
|
|||
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)
|
||||
|| bytenr == 0 || num_bytes == 0)
|
||||
return 0;
|
||||
if (WARN_ON(trans == NULL))
|
||||
return -EINVAL;
|
||||
record = kmalloc(sizeof(*record), gfp_flag);
|
||||
if (!record)
|
||||
return -ENOMEM;
|
||||
|
@ -1552,9 +1611,9 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
|
||||
int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info,
|
||||
struct extent_buffer *eb)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||
int nr = btrfs_header_nritems(eb);
|
||||
int i, extent_type, ret;
|
||||
struct btrfs_key key;
|
||||
|
@ -1584,8 +1643,8 @@ int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
|
|||
|
||||
num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
|
||||
|
||||
ret = btrfs_qgroup_trace_extent(trans, fs_info, bytenr,
|
||||
num_bytes, GFP_NOFS);
|
||||
ret = btrfs_qgroup_trace_extent(trans, bytenr, num_bytes,
|
||||
GFP_NOFS);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -1655,11 +1714,10 @@ static int adjust_slots_upwards(struct btrfs_path *path, int root_level)
|
|||
}
|
||||
|
||||
int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct extent_buffer *root_eb,
|
||||
u64 root_gen, int root_level)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||
int ret = 0;
|
||||
int level;
|
||||
struct extent_buffer *eb = root_eb;
|
||||
|
@ -1678,7 +1736,7 @@ int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
|
||||
if (root_level == 0) {
|
||||
ret = btrfs_qgroup_trace_leaf_items(trans, fs_info, root_eb);
|
||||
ret = btrfs_qgroup_trace_leaf_items(trans, root_eb);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -1736,8 +1794,7 @@ int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
|
|||
btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
|
||||
path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
|
||||
|
||||
ret = btrfs_qgroup_trace_extent(trans, fs_info,
|
||||
child_bytenr,
|
||||
ret = btrfs_qgroup_trace_extent(trans, child_bytenr,
|
||||
fs_info->nodesize,
|
||||
GFP_NOFS);
|
||||
if (ret)
|
||||
|
@ -1745,8 +1802,8 @@ int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
|
||||
if (level == 0) {
|
||||
ret = btrfs_qgroup_trace_leaf_items(trans,fs_info,
|
||||
path->nodes[level]);
|
||||
ret = btrfs_qgroup_trace_leaf_items(trans,
|
||||
path->nodes[level]);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
@ -1981,12 +2038,11 @@ static int maybe_fs_roots(struct ulist *roots)
|
|||
return is_fstree(unode->val);
|
||||
}
|
||||
|
||||
int
|
||||
btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info,
|
||||
u64 bytenr, u64 num_bytes,
|
||||
struct ulist *old_roots, struct ulist *new_roots)
|
||||
int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
|
||||
u64 num_bytes, struct ulist *old_roots,
|
||||
struct ulist *new_roots)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||
struct ulist *qgroups = NULL;
|
||||
struct ulist *tmp = NULL;
|
||||
u64 seq;
|
||||
|
@ -2116,9 +2172,10 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
|
|||
ulist_del(record->old_roots, qgroup_to_skip,
|
||||
0);
|
||||
}
|
||||
ret = btrfs_qgroup_account_extent(trans, fs_info,
|
||||
record->bytenr, record->num_bytes,
|
||||
record->old_roots, new_roots);
|
||||
ret = btrfs_qgroup_account_extent(trans, record->bytenr,
|
||||
record->num_bytes,
|
||||
record->old_roots,
|
||||
new_roots);
|
||||
record->old_roots = NULL;
|
||||
new_roots = NULL;
|
||||
}
|
||||
|
@ -2136,9 +2193,9 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
|
|||
/*
|
||||
* called from commit_transaction. Writes all changed qgroups to disk.
|
||||
*/
|
||||
int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info)
|
||||
int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||
struct btrfs_root *quota_root = fs_info->quota_root;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -2152,11 +2209,11 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_qgroup, dirty);
|
||||
list_del_init(&qgroup->dirty);
|
||||
spin_unlock(&fs_info->qgroup_lock);
|
||||
ret = update_qgroup_info_item(trans, quota_root, qgroup);
|
||||
ret = update_qgroup_info_item(trans, qgroup);
|
||||
if (ret)
|
||||
fs_info->qgroup_flags |=
|
||||
BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
|
||||
ret = update_qgroup_limit_item(trans, quota_root, qgroup);
|
||||
ret = update_qgroup_limit_item(trans, qgroup);
|
||||
if (ret)
|
||||
fs_info->qgroup_flags |=
|
||||
BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
|
||||
|
@ -2168,7 +2225,7 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
|
|||
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
|
||||
spin_unlock(&fs_info->qgroup_lock);
|
||||
|
||||
ret = update_qgroup_status_item(trans, fs_info, quota_root);
|
||||
ret = update_qgroup_status_item(trans);
|
||||
if (ret)
|
||||
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
|
||||
|
||||
|
@ -2181,13 +2238,13 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
|
|||
* cause a transaction abort so we take extra care here to only error
|
||||
* when a readonly fs is a reasonable outcome.
|
||||
*/
|
||||
int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
|
||||
struct btrfs_qgroup_inherit *inherit)
|
||||
int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
|
||||
u64 objectid, struct btrfs_qgroup_inherit *inherit)
|
||||
{
|
||||
int ret = 0;
|
||||
int i;
|
||||
u64 *i_qgroups;
|
||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||
struct btrfs_root *quota_root = fs_info->quota_root;
|
||||
struct btrfs_qgroup *srcgroup;
|
||||
struct btrfs_qgroup *dstgroup;
|
||||
|
@ -2229,22 +2286,6 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
|
|||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (srcid) {
|
||||
struct btrfs_root *srcroot;
|
||||
struct btrfs_key srckey;
|
||||
|
||||
srckey.objectid = srcid;
|
||||
srckey.type = BTRFS_ROOT_ITEM_KEY;
|
||||
srckey.offset = (u64)-1;
|
||||
srcroot = btrfs_read_fs_root_no_name(fs_info, &srckey);
|
||||
if (IS_ERR(srcroot)) {
|
||||
ret = PTR_ERR(srcroot);
|
||||
goto out;
|
||||
}
|
||||
|
||||
level_size = fs_info->nodesize;
|
||||
}
|
||||
|
||||
/*
|
||||
* add qgroup to all inherited groups
|
||||
*/
|
||||
|
@ -2253,12 +2294,12 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
|
|||
for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) {
|
||||
if (*i_qgroups == 0)
|
||||
continue;
|
||||
ret = add_qgroup_relation_item(trans, quota_root,
|
||||
objectid, *i_qgroups);
|
||||
ret = add_qgroup_relation_item(trans, objectid,
|
||||
*i_qgroups);
|
||||
if (ret && ret != -EEXIST)
|
||||
goto out;
|
||||
ret = add_qgroup_relation_item(trans, quota_root,
|
||||
*i_qgroups, objectid);
|
||||
ret = add_qgroup_relation_item(trans, *i_qgroups,
|
||||
objectid);
|
||||
if (ret && ret != -EEXIST)
|
||||
goto out;
|
||||
}
|
||||
|
@ -2281,7 +2322,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
|
|||
dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
|
||||
dstgroup->rsv_excl = inherit->lim.rsv_excl;
|
||||
|
||||
ret = update_qgroup_limit_item(trans, quota_root, dstgroup);
|
||||
ret = update_qgroup_limit_item(trans, dstgroup);
|
||||
if (ret) {
|
||||
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
|
||||
btrfs_info(fs_info,
|
||||
|
@ -2301,6 +2342,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
|
|||
* our counts don't go crazy, so at this point the only
|
||||
* difference between the two roots should be the root node.
|
||||
*/
|
||||
level_size = fs_info->nodesize;
|
||||
dstgroup->rfer = srcgroup->rfer;
|
||||
dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
|
||||
dstgroup->excl = level_size;
|
||||
|
@ -2598,10 +2640,10 @@ static bool is_last_leaf(struct btrfs_path *path)
|
|||
* returns < 0 on error, 0 when more leafs are to be scanned.
|
||||
* returns 1 when done.
|
||||
*/
|
||||
static int
|
||||
qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
|
||||
struct btrfs_trans_handle *trans)
|
||||
static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_path *path)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||
struct btrfs_key found;
|
||||
struct extent_buffer *scratch_leaf = NULL;
|
||||
struct ulist *roots = NULL;
|
||||
|
@ -2669,8 +2711,8 @@ qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
|
|||
if (ret < 0)
|
||||
goto out;
|
||||
/* For rescan, just pass old_roots as NULL */
|
||||
ret = btrfs_qgroup_account_extent(trans, fs_info,
|
||||
found.objectid, num_bytes, NULL, roots);
|
||||
ret = btrfs_qgroup_account_extent(trans, found.objectid,
|
||||
num_bytes, NULL, roots);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
|
@ -2716,7 +2758,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
|
|||
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
|
||||
err = -EINTR;
|
||||
} else {
|
||||
err = qgroup_rescan_leaf(fs_info, path, trans);
|
||||
err = qgroup_rescan_leaf(trans, path);
|
||||
}
|
||||
if (err > 0)
|
||||
btrfs_commit_transaction(trans);
|
||||
|
@ -2751,7 +2793,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
|
|||
err);
|
||||
goto done;
|
||||
}
|
||||
ret = update_qgroup_status_item(trans, fs_info, fs_info->quota_root);
|
||||
ret = update_qgroup_status_item(trans);
|
||||
if (ret < 0) {
|
||||
err = ret;
|
||||
btrfs_err(fs_info, "fail to update qgroup status: %d", err);
|
||||
|
|
|
@ -141,24 +141,19 @@ struct btrfs_qgroup {
|
|||
#define QGROUP_RELEASE (1<<1)
|
||||
#define QGROUP_FREE (1<<2)
|
||||
|
||||
int btrfs_quota_enable(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info);
|
||||
int btrfs_quota_disable(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info);
|
||||
int btrfs_quota_enable(struct btrfs_fs_info *fs_info);
|
||||
int btrfs_quota_disable(struct btrfs_fs_info *fs_info);
|
||||
int btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info);
|
||||
void btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info);
|
||||
int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
|
||||
bool interruptible);
|
||||
int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info, u64 src, u64 dst);
|
||||
int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info, u64 src, u64 dst);
|
||||
int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info, u64 qgroupid);
|
||||
int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info, u64 qgroupid);
|
||||
int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info, u64 qgroupid,
|
||||
int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
|
||||
u64 dst);
|
||||
int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
|
||||
u64 dst);
|
||||
int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid);
|
||||
int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid);
|
||||
int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
|
||||
struct btrfs_qgroup_limit *limit);
|
||||
int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info);
|
||||
void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info);
|
||||
|
@ -217,9 +212,8 @@ int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info,
|
|||
* Return <0 for error, like memory allocation failure or invalid parameter
|
||||
* (NULL trans)
|
||||
*/
|
||||
int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes,
|
||||
gfp_t gfp_flag);
|
||||
int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
|
||||
u64 num_bytes, gfp_t gfp_flag);
|
||||
|
||||
/*
|
||||
* Inform qgroup to trace all leaf items of data
|
||||
|
@ -228,7 +222,6 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans,
|
|||
* Return <0 for error(ENOMEM)
|
||||
*/
|
||||
int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info,
|
||||
struct extent_buffer *eb);
|
||||
/*
|
||||
* Inform qgroup to trace a whole subtree, including all its child tree
|
||||
|
@ -241,20 +234,15 @@ int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
|
|||
* Return <0 for error(ENOMEM or tree search error)
|
||||
*/
|
||||
int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct extent_buffer *root_eb,
|
||||
u64 root_gen, int root_level);
|
||||
int
|
||||
btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info,
|
||||
u64 bytenr, u64 num_bytes,
|
||||
struct ulist *old_roots, struct ulist *new_roots);
|
||||
int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
|
||||
u64 num_bytes, struct ulist *old_roots,
|
||||
struct ulist *new_roots);
|
||||
int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans);
|
||||
int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info);
|
||||
int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
|
||||
struct btrfs_qgroup_inherit *inherit);
|
||||
int btrfs_run_qgroups(struct btrfs_trans_handle *trans);
|
||||
int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
|
||||
u64 objectid, struct btrfs_qgroup_inherit *inherit);
|
||||
void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
|
||||
u64 ref_root, u64 num_bytes,
|
||||
enum btrfs_qgroup_rsv_type type);
|
||||
|
|
|
@ -5,32 +5,19 @@
|
|||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/iocontext.h>
|
||||
#include <linux/capability.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/raid/pq.h>
|
||||
#include <linux/hash.h>
|
||||
#include <linux/list_sort.h>
|
||||
#include <linux/raid/xor.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/div64.h>
|
||||
#include "ctree.h"
|
||||
#include "extent_map.h"
|
||||
#include "disk-io.h"
|
||||
#include "transaction.h"
|
||||
#include "print-tree.h"
|
||||
#include "volumes.h"
|
||||
#include "raid56.h"
|
||||
#include "async-thread.h"
|
||||
#include "check-integrity.h"
|
||||
#include "rcu-string.h"
|
||||
|
||||
/* set when additional merges to this rbio are not allowed */
|
||||
#define RBIO_RMW_LOCKED_BIT 1
|
||||
|
@ -175,8 +162,6 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
|
|||
static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
|
||||
static void rmw_work(struct btrfs_work *work);
|
||||
static void read_rebuild_work(struct btrfs_work *work);
|
||||
static void async_rmw_stripe(struct btrfs_raid_bio *rbio);
|
||||
static void async_read_rebuild(struct btrfs_raid_bio *rbio);
|
||||
static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
|
||||
static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
|
||||
static void __free_raid_bio(struct btrfs_raid_bio *rbio);
|
||||
|
@ -185,7 +170,13 @@ static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
|
|||
|
||||
static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
|
||||
int need_check);
|
||||
static void async_scrub_parity(struct btrfs_raid_bio *rbio);
|
||||
static void scrub_parity_work(struct btrfs_work *work);
|
||||
|
||||
static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func)
|
||||
{
|
||||
btrfs_init_work(&rbio->work, btrfs_rmw_helper, work_func, NULL, NULL);
|
||||
btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
|
||||
}
|
||||
|
||||
/*
|
||||
* the stripe hash table is used for locking, and to collect
|
||||
|
@ -260,7 +251,7 @@ static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
|
|||
s = kmap(rbio->bio_pages[i]);
|
||||
d = kmap(rbio->stripe_pages[i]);
|
||||
|
||||
memcpy(d, s, PAGE_SIZE);
|
||||
copy_page(d, s);
|
||||
|
||||
kunmap(rbio->bio_pages[i]);
|
||||
kunmap(rbio->stripe_pages[i]);
|
||||
|
@ -516,32 +507,21 @@ static void run_xor(void **pages, int src_cnt, ssize_t len)
|
|||
}
|
||||
|
||||
/*
|
||||
* returns true if the bio list inside this rbio
|
||||
* covers an entire stripe (no rmw required).
|
||||
* Must be called with the bio list lock held, or
|
||||
* at a time when you know it is impossible to add
|
||||
* new bios into the list
|
||||
* Returns true if the bio list inside this rbio covers an entire stripe (no
|
||||
* rmw required).
|
||||
*/
|
||||
static int __rbio_is_full(struct btrfs_raid_bio *rbio)
|
||||
{
|
||||
unsigned long size = rbio->bio_list_bytes;
|
||||
int ret = 1;
|
||||
|
||||
if (size != rbio->nr_data * rbio->stripe_len)
|
||||
ret = 0;
|
||||
|
||||
BUG_ON(size > rbio->nr_data * rbio->stripe_len);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int rbio_is_full(struct btrfs_raid_bio *rbio)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
unsigned long size = rbio->bio_list_bytes;
|
||||
int ret = 1;
|
||||
|
||||
spin_lock_irqsave(&rbio->bio_list_lock, flags);
|
||||
ret = __rbio_is_full(rbio);
|
||||
if (size != rbio->nr_data * rbio->stripe_len)
|
||||
ret = 0;
|
||||
BUG_ON(size > rbio->nr_data * rbio->stripe_len);
|
||||
spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -812,16 +792,16 @@ static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
|
|||
spin_unlock_irqrestore(&h->lock, flags);
|
||||
|
||||
if (next->operation == BTRFS_RBIO_READ_REBUILD)
|
||||
async_read_rebuild(next);
|
||||
start_async_work(next, read_rebuild_work);
|
||||
else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
|
||||
steal_rbio(rbio, next);
|
||||
async_read_rebuild(next);
|
||||
start_async_work(next, read_rebuild_work);
|
||||
} else if (next->operation == BTRFS_RBIO_WRITE) {
|
||||
steal_rbio(rbio, next);
|
||||
async_rmw_stripe(next);
|
||||
start_async_work(next, rmw_work);
|
||||
} else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
|
||||
steal_rbio(rbio, next);
|
||||
async_scrub_parity(next);
|
||||
start_async_work(next, scrub_parity_work);
|
||||
}
|
||||
|
||||
goto done_nolock;
|
||||
|
@ -1275,7 +1255,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
|
|||
pointers);
|
||||
} else {
|
||||
/* raid5 */
|
||||
memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
|
||||
copy_page(pointers[nr_data], pointers[0]);
|
||||
run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
|
||||
}
|
||||
|
||||
|
@ -1343,7 +1323,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
|
|||
|
||||
bio->bi_private = rbio;
|
||||
bio->bi_end_io = raid_write_end_io;
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
||||
bio->bi_opf = REQ_OP_WRITE;
|
||||
|
||||
submit_bio(bio);
|
||||
}
|
||||
|
@ -1508,20 +1488,6 @@ static void raid_rmw_end_io(struct bio *bio)
|
|||
rbio_orig_end_io(rbio, BLK_STS_IOERR);
|
||||
}
|
||||
|
||||
static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
|
||||
{
|
||||
btrfs_init_work(&rbio->work, btrfs_rmw_helper, rmw_work, NULL, NULL);
|
||||
btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
|
||||
}
|
||||
|
||||
static void async_read_rebuild(struct btrfs_raid_bio *rbio)
|
||||
{
|
||||
btrfs_init_work(&rbio->work, btrfs_rmw_helper,
|
||||
read_rebuild_work, NULL, NULL);
|
||||
|
||||
btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
|
||||
}
|
||||
|
||||
/*
|
||||
* the stripe must be locked by the caller. It will
|
||||
* unlock after all the writes are done
|
||||
|
@ -1599,7 +1565,7 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
|
|||
|
||||
bio->bi_private = rbio;
|
||||
bio->bi_end_io = raid_rmw_end_io;
|
||||
bio_set_op_attrs(bio, REQ_OP_READ, 0);
|
||||
bio->bi_opf = REQ_OP_READ;
|
||||
|
||||
btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
|
||||
|
||||
|
@ -1652,7 +1618,7 @@ static int partial_stripe_write(struct btrfs_raid_bio *rbio)
|
|||
|
||||
ret = lock_stripe_add(rbio);
|
||||
if (ret == 0)
|
||||
async_rmw_stripe(rbio);
|
||||
start_async_work(rbio, rmw_work);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1720,8 +1686,11 @@ static void run_plug(struct btrfs_plug_cb *plug)
|
|||
list_del_init(&cur->plug_list);
|
||||
|
||||
if (rbio_is_full(cur)) {
|
||||
int ret;
|
||||
|
||||
/* we have a full stripe, send it down */
|
||||
full_stripe_write(cur);
|
||||
ret = full_stripe_write(cur);
|
||||
BUG_ON(ret);
|
||||
continue;
|
||||
}
|
||||
if (last) {
|
||||
|
@ -1941,9 +1910,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
|
|||
BUG_ON(failb != -1);
|
||||
pstripe:
|
||||
/* Copy parity block into failed block to start with */
|
||||
memcpy(pointers[faila],
|
||||
pointers[rbio->nr_data],
|
||||
PAGE_SIZE);
|
||||
copy_page(pointers[faila], pointers[rbio->nr_data]);
|
||||
|
||||
/* rearrange the pointer array */
|
||||
p = pointers[faila];
|
||||
|
@ -2145,7 +2112,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
|
|||
|
||||
bio->bi_private = rbio;
|
||||
bio->bi_end_io = raid_recover_end_io;
|
||||
bio_set_op_attrs(bio, REQ_OP_READ, 0);
|
||||
bio->bi_opf = REQ_OP_READ;
|
||||
|
||||
btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
|
||||
|
||||
|
@ -2448,7 +2415,7 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
|
|||
pointers);
|
||||
} else {
|
||||
/* raid5 */
|
||||
memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
|
||||
copy_page(pointers[nr_data], pointers[0]);
|
||||
run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
|
||||
}
|
||||
|
||||
|
@ -2456,7 +2423,7 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
|
|||
p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
|
||||
parity = kmap(p);
|
||||
if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
|
||||
memcpy(parity, pointers[rbio->scrubp], PAGE_SIZE);
|
||||
copy_page(parity, pointers[rbio->scrubp]);
|
||||
else
|
||||
/* Parity is right, needn't writeback */
|
||||
bitmap_clear(rbio->dbitmap, pagenr, 1);
|
||||
|
@ -2517,7 +2484,7 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
|
|||
|
||||
bio->bi_private = rbio;
|
||||
bio->bi_end_io = raid_write_end_io;
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
||||
bio->bi_opf = REQ_OP_WRITE;
|
||||
|
||||
submit_bio(bio);
|
||||
}
|
||||
|
@ -2699,7 +2666,7 @@ static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
|
|||
|
||||
bio->bi_private = rbio;
|
||||
bio->bi_end_io = raid56_parity_scrub_end_io;
|
||||
bio_set_op_attrs(bio, REQ_OP_READ, 0);
|
||||
bio->bi_opf = REQ_OP_READ;
|
||||
|
||||
btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
|
||||
|
||||
|
@ -2728,18 +2695,10 @@ static void scrub_parity_work(struct btrfs_work *work)
|
|||
raid56_parity_scrub_stripe(rbio);
|
||||
}
|
||||
|
||||
static void async_scrub_parity(struct btrfs_raid_bio *rbio)
|
||||
{
|
||||
btrfs_init_work(&rbio->work, btrfs_rmw_helper,
|
||||
scrub_parity_work, NULL, NULL);
|
||||
|
||||
btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
|
||||
}
|
||||
|
||||
void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
|
||||
{
|
||||
if (!lock_stripe_add(rbio))
|
||||
async_scrub_parity(rbio);
|
||||
start_async_work(rbio, scrub_parity_work);
|
||||
}
|
||||
|
||||
/* The following code is used for dev replace of a missing RAID 5/6 device. */
|
||||
|
@ -2781,5 +2740,5 @@ raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
|
|||
void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
|
||||
{
|
||||
if (!lock_stripe_add(rbio))
|
||||
async_read_rebuild(rbio);
|
||||
start_async_work(rbio, read_rebuild_work);
|
||||
}
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
#include <linux/pagemap.h>
|
||||
#include <linux/writeback.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include "ctree.h"
|
||||
|
@ -355,7 +354,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
|
|||
dev = bbio->stripes[nzones].dev;
|
||||
|
||||
/* cannot read ahead on missing device. */
|
||||
if (!dev->bdev)
|
||||
if (!dev->bdev)
|
||||
continue;
|
||||
|
||||
zone = reada_find_zone(dev, logical, bbio);
|
||||
|
|
|
@ -586,29 +586,6 @@ static struct btrfs_root *read_fs_root(struct btrfs_fs_info *fs_info,
|
|||
return btrfs_get_fs_root(fs_info, &key, false);
|
||||
}
|
||||
|
||||
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
|
||||
static noinline_for_stack
|
||||
struct btrfs_root *find_tree_root(struct reloc_control *rc,
|
||||
struct extent_buffer *leaf,
|
||||
struct btrfs_extent_ref_v0 *ref0)
|
||||
{
|
||||
struct btrfs_root *root;
|
||||
u64 root_objectid = btrfs_ref_root_v0(leaf, ref0);
|
||||
u64 generation = btrfs_ref_generation_v0(leaf, ref0);
|
||||
|
||||
BUG_ON(root_objectid == BTRFS_TREE_RELOC_OBJECTID);
|
||||
|
||||
root = read_fs_root(rc->extent_root->fs_info, root_objectid);
|
||||
BUG_ON(IS_ERR(root));
|
||||
|
||||
if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
|
||||
generation != btrfs_root_generation(&root->root_item))
|
||||
return NULL;
|
||||
|
||||
return root;
|
||||
}
|
||||
#endif
|
||||
|
||||
static noinline_for_stack
|
||||
int find_inline_backref(struct extent_buffer *leaf, int slot,
|
||||
unsigned long *ptr, unsigned long *end)
|
||||
|
@ -621,12 +598,11 @@ int find_inline_backref(struct extent_buffer *leaf, int slot,
|
|||
btrfs_item_key_to_cpu(leaf, &key, slot);
|
||||
|
||||
item_size = btrfs_item_size_nr(leaf, slot);
|
||||
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
|
||||
if (item_size < sizeof(*ei)) {
|
||||
WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
|
||||
btrfs_print_v0_err(leaf->fs_info);
|
||||
btrfs_handle_fs_error(leaf->fs_info, -EINVAL, NULL);
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
|
||||
WARN_ON(!(btrfs_extent_flags(leaf, ei) &
|
||||
BTRFS_EXTENT_FLAG_TREE_BLOCK));
|
||||
|
@ -792,7 +768,7 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
|
|||
type = btrfs_get_extent_inline_ref_type(eb, iref,
|
||||
BTRFS_REF_TYPE_BLOCK);
|
||||
if (type == BTRFS_REF_TYPE_INVALID) {
|
||||
err = -EINVAL;
|
||||
err = -EUCLEAN;
|
||||
goto out;
|
||||
}
|
||||
key.type = type;
|
||||
|
@ -811,29 +787,7 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
|
|||
goto next;
|
||||
}
|
||||
|
||||
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
|
||||
if (key.type == BTRFS_SHARED_BLOCK_REF_KEY ||
|
||||
key.type == BTRFS_EXTENT_REF_V0_KEY) {
|
||||
if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
|
||||
struct btrfs_extent_ref_v0 *ref0;
|
||||
ref0 = btrfs_item_ptr(eb, path1->slots[0],
|
||||
struct btrfs_extent_ref_v0);
|
||||
if (key.objectid == key.offset) {
|
||||
root = find_tree_root(rc, eb, ref0);
|
||||
if (root && !should_ignore_root(root))
|
||||
cur->root = root;
|
||||
else
|
||||
list_add(&cur->list, &useless);
|
||||
break;
|
||||
}
|
||||
if (is_cowonly_root(btrfs_ref_root_v0(eb,
|
||||
ref0)))
|
||||
cur->cowonly = 1;
|
||||
}
|
||||
#else
|
||||
ASSERT(key.type != BTRFS_EXTENT_REF_V0_KEY);
|
||||
if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
|
||||
#endif
|
||||
if (key.objectid == key.offset) {
|
||||
/*
|
||||
* only root blocks of reloc trees use
|
||||
|
@ -876,6 +830,12 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
|
|||
edge->node[UPPER] = upper;
|
||||
|
||||
goto next;
|
||||
} else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
|
||||
err = -EINVAL;
|
||||
btrfs_print_v0_err(rc->extent_root->fs_info);
|
||||
btrfs_handle_fs_error(rc->extent_root->fs_info, err,
|
||||
NULL);
|
||||
goto out;
|
||||
} else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
|
||||
goto next;
|
||||
}
|
||||
|
@ -1321,18 +1281,19 @@ static void __del_reloc_root(struct btrfs_root *root)
|
|||
struct mapping_node *node = NULL;
|
||||
struct reloc_control *rc = fs_info->reloc_ctl;
|
||||
|
||||
spin_lock(&rc->reloc_root_tree.lock);
|
||||
rb_node = tree_search(&rc->reloc_root_tree.rb_root,
|
||||
root->node->start);
|
||||
if (rb_node) {
|
||||
node = rb_entry(rb_node, struct mapping_node, rb_node);
|
||||
rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
|
||||
if (rc) {
|
||||
spin_lock(&rc->reloc_root_tree.lock);
|
||||
rb_node = tree_search(&rc->reloc_root_tree.rb_root,
|
||||
root->node->start);
|
||||
if (rb_node) {
|
||||
node = rb_entry(rb_node, struct mapping_node, rb_node);
|
||||
rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
|
||||
}
|
||||
spin_unlock(&rc->reloc_root_tree.lock);
|
||||
if (!node)
|
||||
return;
|
||||
BUG_ON((struct btrfs_root *)node->data != root);
|
||||
}
|
||||
spin_unlock(&rc->reloc_root_tree.lock);
|
||||
|
||||
if (!node)
|
||||
return;
|
||||
BUG_ON((struct btrfs_root *)node->data != root);
|
||||
|
||||
spin_lock(&fs_info->trans_lock);
|
||||
list_del_init(&root->root_list);
|
||||
|
@ -1918,13 +1879,12 @@ int replace_path(struct btrfs_trans_handle *trans,
|
|||
* and tree block numbers, if current trans doesn't free
|
||||
* data reloc tree inode.
|
||||
*/
|
||||
ret = btrfs_qgroup_trace_subtree(trans, src, parent,
|
||||
ret = btrfs_qgroup_trace_subtree(trans, parent,
|
||||
btrfs_header_generation(parent),
|
||||
btrfs_header_level(parent));
|
||||
if (ret < 0)
|
||||
break;
|
||||
ret = btrfs_qgroup_trace_subtree(trans, dest,
|
||||
path->nodes[level],
|
||||
ret = btrfs_qgroup_trace_subtree(trans, path->nodes[level],
|
||||
btrfs_header_generation(path->nodes[level]),
|
||||
btrfs_header_level(path->nodes[level]));
|
||||
if (ret < 0)
|
||||
|
@ -3333,48 +3293,6 @@ int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
|
||||
static int get_ref_objectid_v0(struct reloc_control *rc,
|
||||
struct btrfs_path *path,
|
||||
struct btrfs_key *extent_key,
|
||||
u64 *ref_objectid, int *path_change)
|
||||
{
|
||||
struct btrfs_key key;
|
||||
struct extent_buffer *leaf;
|
||||
struct btrfs_extent_ref_v0 *ref0;
|
||||
int ret;
|
||||
int slot;
|
||||
|
||||
leaf = path->nodes[0];
|
||||
slot = path->slots[0];
|
||||
while (1) {
|
||||
if (slot >= btrfs_header_nritems(leaf)) {
|
||||
ret = btrfs_next_leaf(rc->extent_root, path);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
BUG_ON(ret > 0);
|
||||
leaf = path->nodes[0];
|
||||
slot = path->slots[0];
|
||||
if (path_change)
|
||||
*path_change = 1;
|
||||
}
|
||||
btrfs_item_key_to_cpu(leaf, &key, slot);
|
||||
if (key.objectid != extent_key->objectid)
|
||||
return -ENOENT;
|
||||
|
||||
if (key.type != BTRFS_EXTENT_REF_V0_KEY) {
|
||||
slot++;
|
||||
continue;
|
||||
}
|
||||
ref0 = btrfs_item_ptr(leaf, slot,
|
||||
struct btrfs_extent_ref_v0);
|
||||
*ref_objectid = btrfs_ref_objectid_v0(leaf, ref0);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* helper to add a tree block to the list.
|
||||
* the major work is getting the generation and level of the block
|
||||
|
@ -3407,23 +3325,12 @@ static int add_tree_block(struct reloc_control *rc,
|
|||
level = (int)extent_key->offset;
|
||||
}
|
||||
generation = btrfs_extent_generation(eb, ei);
|
||||
} else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) {
|
||||
btrfs_print_v0_err(eb->fs_info);
|
||||
btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
|
||||
return -EINVAL;
|
||||
} else {
|
||||
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
|
||||
u64 ref_owner;
|
||||
int ret;
|
||||
|
||||
BUG_ON(item_size != sizeof(struct btrfs_extent_item_v0));
|
||||
ret = get_ref_objectid_v0(rc, path, extent_key,
|
||||
&ref_owner, NULL);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
BUG_ON(ref_owner >= BTRFS_MAX_LEVEL);
|
||||
level = (int)ref_owner;
|
||||
/* FIXME: get real generation */
|
||||
generation = 0;
|
||||
#else
|
||||
BUG();
|
||||
#endif
|
||||
}
|
||||
|
||||
btrfs_release_path(path);
|
||||
|
@ -3563,11 +3470,8 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
|
|||
key.offset = 0;
|
||||
|
||||
inode = btrfs_iget(fs_info->sb, &key, root, NULL);
|
||||
if (IS_ERR(inode) || is_bad_inode(inode)) {
|
||||
if (!IS_ERR(inode))
|
||||
iput(inode);
|
||||
if (IS_ERR(inode))
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
truncate:
|
||||
ret = btrfs_check_trunc_cache_free_space(fs_info,
|
||||
|
@ -3781,12 +3685,7 @@ int add_data_references(struct reloc_control *rc,
|
|||
eb = path->nodes[0];
|
||||
ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
|
||||
end = ptr + btrfs_item_size_nr(eb, path->slots[0]);
|
||||
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
|
||||
if (ptr + sizeof(struct btrfs_extent_item_v0) == end)
|
||||
ptr = end;
|
||||
else
|
||||
#endif
|
||||
ptr += sizeof(struct btrfs_extent_item);
|
||||
ptr += sizeof(struct btrfs_extent_item);
|
||||
|
||||
while (ptr < end) {
|
||||
iref = (struct btrfs_extent_inline_ref *)ptr;
|
||||
|
@ -3801,7 +3700,7 @@ int add_data_references(struct reloc_control *rc,
|
|||
ret = find_data_references(rc, extent_key,
|
||||
eb, dref, blocks);
|
||||
} else {
|
||||
ret = -EINVAL;
|
||||
ret = -EUCLEAN;
|
||||
btrfs_err(rc->extent_root->fs_info,
|
||||
"extent %llu slot %d has an invalid inline ref type",
|
||||
eb->start, path->slots[0]);
|
||||
|
@ -3832,13 +3731,7 @@ int add_data_references(struct reloc_control *rc,
|
|||
if (key.objectid != extent_key->objectid)
|
||||
break;
|
||||
|
||||
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
|
||||
if (key.type == BTRFS_SHARED_DATA_REF_KEY ||
|
||||
key.type == BTRFS_EXTENT_REF_V0_KEY) {
|
||||
#else
|
||||
BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY);
|
||||
if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
|
||||
#endif
|
||||
ret = __add_tree_block(rc, key.offset, blocksize,
|
||||
blocks);
|
||||
} else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
|
||||
|
@ -3846,6 +3739,10 @@ int add_data_references(struct reloc_control *rc,
|
|||
struct btrfs_extent_data_ref);
|
||||
ret = find_data_references(rc, extent_key,
|
||||
eb, dref, blocks);
|
||||
} else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
|
||||
btrfs_print_v0_err(eb->fs_info);
|
||||
btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
|
||||
ret = -EINVAL;
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
|
@ -4084,41 +3981,13 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
|
|||
flags = btrfs_extent_flags(path->nodes[0], ei);
|
||||
ret = check_extent_flags(flags);
|
||||
BUG_ON(ret);
|
||||
|
||||
} else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) {
|
||||
err = -EINVAL;
|
||||
btrfs_print_v0_err(trans->fs_info);
|
||||
btrfs_abort_transaction(trans, err);
|
||||
break;
|
||||
} else {
|
||||
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
|
||||
u64 ref_owner;
|
||||
int path_change = 0;
|
||||
|
||||
BUG_ON(item_size !=
|
||||
sizeof(struct btrfs_extent_item_v0));
|
||||
ret = get_ref_objectid_v0(rc, path, &key, &ref_owner,
|
||||
&path_change);
|
||||
if (ret < 0) {
|
||||
err = ret;
|
||||
break;
|
||||
}
|
||||
if (ref_owner < BTRFS_FIRST_FREE_OBJECTID)
|
||||
flags = BTRFS_EXTENT_FLAG_TREE_BLOCK;
|
||||
else
|
||||
flags = BTRFS_EXTENT_FLAG_DATA;
|
||||
|
||||
if (path_change) {
|
||||
btrfs_release_path(path);
|
||||
|
||||
path->search_commit_root = 1;
|
||||
path->skip_locking = 1;
|
||||
ret = btrfs_search_slot(NULL, rc->extent_root,
|
||||
&key, path, 0, 0);
|
||||
if (ret < 0) {
|
||||
err = ret;
|
||||
break;
|
||||
}
|
||||
BUG_ON(ret > 0);
|
||||
}
|
||||
#else
|
||||
BUG();
|
||||
#endif
|
||||
}
|
||||
|
||||
if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
|
||||
|
@ -4169,8 +4038,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
|
|||
}
|
||||
}
|
||||
if (trans && progress && err == -ENOSPC) {
|
||||
ret = btrfs_force_chunk_alloc(trans, fs_info,
|
||||
rc->block_group->flags);
|
||||
ret = btrfs_force_chunk_alloc(trans, rc->block_group->flags);
|
||||
if (ret == 1) {
|
||||
err = 0;
|
||||
progress = 0;
|
||||
|
@ -4284,7 +4152,7 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
|
|||
key.type = BTRFS_INODE_ITEM_KEY;
|
||||
key.offset = 0;
|
||||
inode = btrfs_iget(fs_info->sb, &key, root, NULL);
|
||||
BUG_ON(IS_ERR(inode) || is_bad_inode(inode));
|
||||
BUG_ON(IS_ERR(inode));
|
||||
BTRFS_I(inode)->index_cnt = group->key.objectid;
|
||||
|
||||
err = btrfs_orphan_add(trans, BTRFS_I(inode));
|
||||
|
@ -4375,7 +4243,7 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
|
|||
rc->block_group = btrfs_lookup_block_group(fs_info, group_start);
|
||||
BUG_ON(!rc->block_group);
|
||||
|
||||
ret = btrfs_inc_block_group_ro(fs_info, rc->block_group);
|
||||
ret = btrfs_inc_block_group_ro(rc->block_group);
|
||||
if (ret) {
|
||||
err = ret;
|
||||
goto out;
|
||||
|
|
|
@ -320,9 +320,9 @@ int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info)
|
|||
|
||||
/* drop the root item for 'key' from the tree root */
|
||||
int btrfs_del_root(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info, const struct btrfs_key *key)
|
||||
const struct btrfs_key *key)
|
||||
{
|
||||
struct btrfs_root *root = fs_info->tree_root;
|
||||
struct btrfs_root *root = trans->fs_info->tree_root;
|
||||
struct btrfs_path *path;
|
||||
int ret;
|
||||
|
||||
|
@ -341,13 +341,12 @@ int btrfs_del_root(struct btrfs_trans_handle *trans,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_del_root_ref(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info,
|
||||
u64 root_id, u64 ref_id, u64 dirid, u64 *sequence,
|
||||
const char *name, int name_len)
|
||||
int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
|
||||
u64 ref_id, u64 dirid, u64 *sequence, const char *name,
|
||||
int name_len)
|
||||
|
||||
{
|
||||
struct btrfs_root *tree_root = fs_info->tree_root;
|
||||
struct btrfs_root *tree_root = trans->fs_info->tree_root;
|
||||
struct btrfs_path *path;
|
||||
struct btrfs_root_ref *ref;
|
||||
struct extent_buffer *leaf;
|
||||
|
@ -413,12 +412,11 @@ int btrfs_del_root_ref(struct btrfs_trans_handle *trans,
|
|||
*
|
||||
* Will return 0, -ENOMEM, or anything from the CoW path
|
||||
*/
|
||||
int btrfs_add_root_ref(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info,
|
||||
u64 root_id, u64 ref_id, u64 dirid, u64 sequence,
|
||||
const char *name, int name_len)
|
||||
int btrfs_add_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
|
||||
u64 ref_id, u64 dirid, u64 sequence, const char *name,
|
||||
int name_len)
|
||||
{
|
||||
struct btrfs_root *tree_root = fs_info->tree_root;
|
||||
struct btrfs_root *tree_root = trans->fs_info->tree_root;
|
||||
struct btrfs_key key;
|
||||
int ret;
|
||||
struct btrfs_path *path;
|
||||
|
|
679
fs/btrfs/scrub.c
679
fs/btrfs/scrub.c
|
@ -188,32 +188,6 @@ struct scrub_ctx {
|
|||
refcount_t refs;
|
||||
};
|
||||
|
||||
struct scrub_fixup_nodatasum {
|
||||
struct scrub_ctx *sctx;
|
||||
struct btrfs_device *dev;
|
||||
u64 logical;
|
||||
struct btrfs_root *root;
|
||||
struct btrfs_work work;
|
||||
int mirror_num;
|
||||
};
|
||||
|
||||
struct scrub_nocow_inode {
|
||||
u64 inum;
|
||||
u64 offset;
|
||||
u64 root;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct scrub_copy_nocow_ctx {
|
||||
struct scrub_ctx *sctx;
|
||||
u64 logical;
|
||||
u64 len;
|
||||
int mirror_num;
|
||||
u64 physical_for_dev_replace;
|
||||
struct list_head inodes;
|
||||
struct btrfs_work work;
|
||||
};
|
||||
|
||||
struct scrub_warning {
|
||||
struct btrfs_path *path;
|
||||
u64 extent_item_size;
|
||||
|
@ -232,8 +206,6 @@ struct full_stripe_lock {
|
|||
|
||||
static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
|
||||
static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
|
||||
static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
|
||||
static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
|
||||
static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
|
||||
static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
|
||||
struct scrub_block *sblocks_for_recheck);
|
||||
|
@ -277,13 +249,6 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
|
|||
static void scrub_wr_submit(struct scrub_ctx *sctx);
|
||||
static void scrub_wr_bio_end_io(struct bio *bio);
|
||||
static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
|
||||
static int write_page_nocow(struct scrub_ctx *sctx,
|
||||
u64 physical_for_dev_replace, struct page *page);
|
||||
static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
|
||||
struct scrub_copy_nocow_ctx *ctx);
|
||||
static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
|
||||
int mirror_num, u64 physical_for_dev_replace);
|
||||
static void copy_nocow_pages_worker(struct btrfs_work *work);
|
||||
static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
|
||||
static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
|
||||
static void scrub_put_ctx(struct scrub_ctx *sctx);
|
||||
|
@ -555,60 +520,6 @@ static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* used for workers that require transaction commits (i.e., for the
|
||||
* NOCOW case)
|
||||
*/
|
||||
static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = sctx->fs_info;
|
||||
|
||||
refcount_inc(&sctx->refs);
|
||||
/*
|
||||
* increment scrubs_running to prevent cancel requests from
|
||||
* completing as long as a worker is running. we must also
|
||||
* increment scrubs_paused to prevent deadlocking on pause
|
||||
* requests used for transactions commits (as the worker uses a
|
||||
* transaction context). it is safe to regard the worker
|
||||
* as paused for all matters practical. effectively, we only
|
||||
* avoid cancellation requests from completing.
|
||||
*/
|
||||
mutex_lock(&fs_info->scrub_lock);
|
||||
atomic_inc(&fs_info->scrubs_running);
|
||||
atomic_inc(&fs_info->scrubs_paused);
|
||||
mutex_unlock(&fs_info->scrub_lock);
|
||||
|
||||
/*
|
||||
* check if @scrubs_running=@scrubs_paused condition
|
||||
* inside wait_event() is not an atomic operation.
|
||||
* which means we may inc/dec @scrub_running/paused
|
||||
* at any time. Let's wake up @scrub_pause_wait as
|
||||
* much as we can to let commit transaction blocked less.
|
||||
*/
|
||||
wake_up(&fs_info->scrub_pause_wait);
|
||||
|
||||
atomic_inc(&sctx->workers_pending);
|
||||
}
|
||||
|
||||
/* used for workers that require transaction commits */
|
||||
static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = sctx->fs_info;
|
||||
|
||||
/*
|
||||
* see scrub_pending_trans_workers_inc() why we're pretending
|
||||
* to be paused in the scrub counters
|
||||
*/
|
||||
mutex_lock(&fs_info->scrub_lock);
|
||||
atomic_dec(&fs_info->scrubs_running);
|
||||
atomic_dec(&fs_info->scrubs_paused);
|
||||
mutex_unlock(&fs_info->scrub_lock);
|
||||
atomic_dec(&sctx->workers_pending);
|
||||
wake_up(&fs_info->scrub_pause_wait);
|
||||
wake_up(&sctx->list_wait);
|
||||
scrub_put_ctx(sctx);
|
||||
}
|
||||
|
||||
static void scrub_free_csums(struct scrub_ctx *sctx)
|
||||
{
|
||||
while (!list_empty(&sctx->csum_list)) {
|
||||
|
@ -882,194 +793,6 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
|
|||
btrfs_free_path(path);
|
||||
}
|
||||
|
||||
static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
|
||||
{
|
||||
struct page *page = NULL;
|
||||
unsigned long index;
|
||||
struct scrub_fixup_nodatasum *fixup = fixup_ctx;
|
||||
int ret;
|
||||
int corrected = 0;
|
||||
struct btrfs_key key;
|
||||
struct inode *inode = NULL;
|
||||
struct btrfs_fs_info *fs_info;
|
||||
u64 end = offset + PAGE_SIZE - 1;
|
||||
struct btrfs_root *local_root;
|
||||
int srcu_index;
|
||||
|
||||
key.objectid = root;
|
||||
key.type = BTRFS_ROOT_ITEM_KEY;
|
||||
key.offset = (u64)-1;
|
||||
|
||||
fs_info = fixup->root->fs_info;
|
||||
srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
|
||||
|
||||
local_root = btrfs_read_fs_root_no_name(fs_info, &key);
|
||||
if (IS_ERR(local_root)) {
|
||||
srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
|
||||
return PTR_ERR(local_root);
|
||||
}
|
||||
|
||||
key.type = BTRFS_INODE_ITEM_KEY;
|
||||
key.objectid = inum;
|
||||
key.offset = 0;
|
||||
inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
|
||||
srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
|
||||
if (IS_ERR(inode))
|
||||
return PTR_ERR(inode);
|
||||
|
||||
index = offset >> PAGE_SHIFT;
|
||||
|
||||
page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
|
||||
if (!page) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (PageUptodate(page)) {
|
||||
if (PageDirty(page)) {
|
||||
/*
|
||||
* we need to write the data to the defect sector. the
|
||||
* data that was in that sector is not in memory,
|
||||
* because the page was modified. we must not write the
|
||||
* modified page to that sector.
|
||||
*
|
||||
* TODO: what could be done here: wait for the delalloc
|
||||
* runner to write out that page (might involve
|
||||
* COW) and see whether the sector is still
|
||||
* referenced afterwards.
|
||||
*
|
||||
* For the meantime, we'll treat this error
|
||||
* incorrectable, although there is a chance that a
|
||||
* later scrub will find the bad sector again and that
|
||||
* there's no dirty page in memory, then.
|
||||
*/
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
ret = repair_io_failure(fs_info, inum, offset, PAGE_SIZE,
|
||||
fixup->logical, page,
|
||||
offset - page_offset(page),
|
||||
fixup->mirror_num);
|
||||
unlock_page(page);
|
||||
corrected = !ret;
|
||||
} else {
|
||||
/*
|
||||
* we need to get good data first. the general readpage path
|
||||
* will call repair_io_failure for us, we just have to make
|
||||
* sure we read the bad mirror.
|
||||
*/
|
||||
ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
|
||||
EXTENT_DAMAGED);
|
||||
if (ret) {
|
||||
/* set_extent_bits should give proper error */
|
||||
WARN_ON(ret > 0);
|
||||
if (ret > 0)
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
|
||||
btrfs_get_extent,
|
||||
fixup->mirror_num);
|
||||
wait_on_page_locked(page);
|
||||
|
||||
corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
|
||||
end, EXTENT_DAMAGED, 0, NULL);
|
||||
if (!corrected)
|
||||
clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
|
||||
EXTENT_DAMAGED);
|
||||
}
|
||||
|
||||
out:
|
||||
if (page)
|
||||
put_page(page);
|
||||
|
||||
iput(inode);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (ret == 0 && corrected) {
|
||||
/*
|
||||
* we only need to call readpage for one of the inodes belonging
|
||||
* to this extent. so make iterate_extent_inodes stop
|
||||
*/
|
||||
return 1;
|
||||
}
|
||||
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static void scrub_fixup_nodatasum(struct btrfs_work *work)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info;
|
||||
int ret;
|
||||
struct scrub_fixup_nodatasum *fixup;
|
||||
struct scrub_ctx *sctx;
|
||||
struct btrfs_trans_handle *trans = NULL;
|
||||
struct btrfs_path *path;
|
||||
int uncorrectable = 0;
|
||||
|
||||
fixup = container_of(work, struct scrub_fixup_nodatasum, work);
|
||||
sctx = fixup->sctx;
|
||||
fs_info = fixup->root->fs_info;
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path) {
|
||||
spin_lock(&sctx->stat_lock);
|
||||
++sctx->stat.malloc_errors;
|
||||
spin_unlock(&sctx->stat_lock);
|
||||
uncorrectable = 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
trans = btrfs_join_transaction(fixup->root);
|
||||
if (IS_ERR(trans)) {
|
||||
uncorrectable = 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* the idea is to trigger a regular read through the standard path. we
|
||||
* read a page from the (failed) logical address by specifying the
|
||||
* corresponding copynum of the failed sector. thus, that readpage is
|
||||
* expected to fail.
|
||||
* that is the point where on-the-fly error correction will kick in
|
||||
* (once it's finished) and rewrite the failed sector if a good copy
|
||||
* can be found.
|
||||
*/
|
||||
ret = iterate_inodes_from_logical(fixup->logical, fs_info, path,
|
||||
scrub_fixup_readpage, fixup, false);
|
||||
if (ret < 0) {
|
||||
uncorrectable = 1;
|
||||
goto out;
|
||||
}
|
||||
WARN_ON(ret != 1);
|
||||
|
||||
spin_lock(&sctx->stat_lock);
|
||||
++sctx->stat.corrected_errors;
|
||||
spin_unlock(&sctx->stat_lock);
|
||||
|
||||
out:
|
||||
if (trans && !IS_ERR(trans))
|
||||
btrfs_end_transaction(trans);
|
||||
if (uncorrectable) {
|
||||
spin_lock(&sctx->stat_lock);
|
||||
++sctx->stat.uncorrectable_errors;
|
||||
spin_unlock(&sctx->stat_lock);
|
||||
btrfs_dev_replace_stats_inc(
|
||||
&fs_info->dev_replace.num_uncorrectable_read_errors);
|
||||
btrfs_err_rl_in_rcu(fs_info,
|
||||
"unable to fixup (nodatasum) error at logical %llu on dev %s",
|
||||
fixup->logical, rcu_str_deref(fixup->dev->name));
|
||||
}
|
||||
|
||||
btrfs_free_path(path);
|
||||
kfree(fixup);
|
||||
|
||||
scrub_pending_trans_workers_dec(sctx);
|
||||
}
|
||||
|
||||
static inline void scrub_get_recover(struct scrub_recover *recover)
|
||||
{
|
||||
refcount_inc(&recover->refs);
|
||||
|
@ -1263,42 +986,6 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
|
|||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* NOTE: Even for nodatasum case, it's still possible that it's a
|
||||
* compressed data extent, thus scrub_fixup_nodatasum(), which write
|
||||
* inode page cache onto disk, could cause serious data corruption.
|
||||
*
|
||||
* So here we could only read from disk, and hope our recovery could
|
||||
* reach disk before the newer write.
|
||||
*/
|
||||
if (0 && !is_metadata && !have_csum) {
|
||||
struct scrub_fixup_nodatasum *fixup_nodatasum;
|
||||
|
||||
WARN_ON(sctx->is_dev_replace);
|
||||
|
||||
/*
|
||||
* !is_metadata and !have_csum, this means that the data
|
||||
* might not be COWed, that it might be modified
|
||||
* concurrently. The general strategy to work on the
|
||||
* commit root does not help in the case when COW is not
|
||||
* used.
|
||||
*/
|
||||
fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
|
||||
if (!fixup_nodatasum)
|
||||
goto did_not_correct_error;
|
||||
fixup_nodatasum->sctx = sctx;
|
||||
fixup_nodatasum->dev = dev;
|
||||
fixup_nodatasum->logical = logical;
|
||||
fixup_nodatasum->root = fs_info->extent_root;
|
||||
fixup_nodatasum->mirror_num = failed_mirror_index + 1;
|
||||
scrub_pending_trans_workers_inc(sctx);
|
||||
btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper,
|
||||
scrub_fixup_nodatasum, NULL, NULL);
|
||||
btrfs_queue_work(fs_info->scrub_workers,
|
||||
&fixup_nodatasum->work);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* now build and submit the bios for the other mirrors, check
|
||||
* checksums.
|
||||
|
@ -1866,7 +1553,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
|
|||
bio = btrfs_io_bio_alloc(1);
|
||||
bio_set_dev(bio, page_bad->dev->bdev);
|
||||
bio->bi_iter.bi_sector = page_bad->physical >> 9;
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
||||
bio->bi_opf = REQ_OP_WRITE;
|
||||
|
||||
ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
|
||||
if (PAGE_SIZE != ret) {
|
||||
|
@ -1961,7 +1648,7 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
|
|||
bio->bi_end_io = scrub_wr_bio_end_io;
|
||||
bio_set_dev(bio, sbio->dev->bdev);
|
||||
bio->bi_iter.bi_sector = sbio->physical >> 9;
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
||||
bio->bi_opf = REQ_OP_WRITE;
|
||||
sbio->status = 0;
|
||||
} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
|
||||
spage->physical_for_dev_replace ||
|
||||
|
@ -2361,7 +2048,7 @@ static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
|
|||
bio->bi_end_io = scrub_bio_end_io;
|
||||
bio_set_dev(bio, sbio->dev->bdev);
|
||||
bio->bi_iter.bi_sector = sbio->physical >> 9;
|
||||
bio_set_op_attrs(bio, REQ_OP_READ, 0);
|
||||
bio->bi_opf = REQ_OP_READ;
|
||||
sbio->status = 0;
|
||||
} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
|
||||
spage->physical ||
|
||||
|
@ -2800,17 +2487,10 @@ static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map,
|
|||
have_csum = scrub_find_csum(sctx, logical, csum);
|
||||
if (have_csum == 0)
|
||||
++sctx->stat.no_csum;
|
||||
if (0 && sctx->is_dev_replace && !have_csum) {
|
||||
ret = copy_nocow_pages(sctx, logical, l,
|
||||
mirror_num,
|
||||
physical_for_dev_replace);
|
||||
goto behind_scrub_pages;
|
||||
}
|
||||
}
|
||||
ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
|
||||
mirror_num, have_csum ? csum : NULL, 0,
|
||||
physical_for_dev_replace);
|
||||
behind_scrub_pages:
|
||||
if (ret)
|
||||
return ret;
|
||||
len -= l;
|
||||
|
@ -3863,7 +3543,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
|
|||
* -> btrfs_scrub_pause()
|
||||
*/
|
||||
scrub_pause_on(fs_info);
|
||||
ret = btrfs_inc_block_group_ro(fs_info, cache);
|
||||
ret = btrfs_inc_block_group_ro(cache);
|
||||
if (!ret && is_dev_replace) {
|
||||
/*
|
||||
* If we are doing a device replace wait for any tasks
|
||||
|
@ -3982,14 +3662,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
|
|||
if (!cache->removed && !cache->ro && cache->reserved == 0 &&
|
||||
btrfs_block_group_used(&cache->item) == 0) {
|
||||
spin_unlock(&cache->lock);
|
||||
spin_lock(&fs_info->unused_bgs_lock);
|
||||
if (list_empty(&cache->bg_list)) {
|
||||
btrfs_get_block_group(cache);
|
||||
trace_btrfs_add_unused_block_group(cache);
|
||||
list_add_tail(&cache->bg_list,
|
||||
&fs_info->unused_bgs);
|
||||
}
|
||||
spin_unlock(&fs_info->unused_bgs_lock);
|
||||
btrfs_mark_bg_unused(cache);
|
||||
} else {
|
||||
spin_unlock(&cache->lock);
|
||||
}
|
||||
|
@ -4072,10 +3745,6 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
|
|||
if (!fs_info->scrub_wr_completion_workers)
|
||||
goto fail_scrub_wr_completion_workers;
|
||||
|
||||
fs_info->scrub_nocow_workers =
|
||||
btrfs_alloc_workqueue(fs_info, "scrubnc", flags, 1, 0);
|
||||
if (!fs_info->scrub_nocow_workers)
|
||||
goto fail_scrub_nocow_workers;
|
||||
fs_info->scrub_parity_workers =
|
||||
btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
|
||||
max_active, 2);
|
||||
|
@ -4086,8 +3755,6 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
|
|||
return 0;
|
||||
|
||||
fail_scrub_parity_workers:
|
||||
btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
|
||||
fail_scrub_nocow_workers:
|
||||
btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
|
||||
fail_scrub_wr_completion_workers:
|
||||
btrfs_destroy_workqueue(fs_info->scrub_workers);
|
||||
|
@ -4100,7 +3767,6 @@ static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
|
|||
if (--fs_info->scrub_workers_refcnt == 0) {
|
||||
btrfs_destroy_workqueue(fs_info->scrub_workers);
|
||||
btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
|
||||
btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
|
||||
btrfs_destroy_workqueue(fs_info->scrub_parity_workers);
|
||||
}
|
||||
WARN_ON(fs_info->scrub_workers_refcnt < 0);
|
||||
|
@ -4113,7 +3779,6 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
|
|||
struct scrub_ctx *sctx;
|
||||
int ret;
|
||||
struct btrfs_device *dev;
|
||||
struct rcu_string *name;
|
||||
|
||||
if (btrfs_fs_closing(fs_info))
|
||||
return -EINVAL;
|
||||
|
@ -4167,11 +3832,8 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
|
|||
if (!is_dev_replace && !readonly &&
|
||||
!test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
|
||||
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
|
||||
rcu_read_lock();
|
||||
name = rcu_dereference(dev->name);
|
||||
btrfs_err(fs_info, "scrub: device %s is not writable",
|
||||
name->str);
|
||||
rcu_read_unlock();
|
||||
btrfs_err_in_rcu(fs_info, "scrub: device %s is not writable",
|
||||
rcu_str_deref(dev->name));
|
||||
return -EROFS;
|
||||
}
|
||||
|
||||
|
@ -4359,330 +4021,3 @@ static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
|
|||
*extent_dev = bbio->stripes[0].dev;
|
||||
btrfs_put_bbio(bbio);
|
||||
}
|
||||
|
||||
static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
|
||||
int mirror_num, u64 physical_for_dev_replace)
|
||||
{
|
||||
struct scrub_copy_nocow_ctx *nocow_ctx;
|
||||
struct btrfs_fs_info *fs_info = sctx->fs_info;
|
||||
|
||||
nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
|
||||
if (!nocow_ctx) {
|
||||
spin_lock(&sctx->stat_lock);
|
||||
sctx->stat.malloc_errors++;
|
||||
spin_unlock(&sctx->stat_lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
scrub_pending_trans_workers_inc(sctx);
|
||||
|
||||
nocow_ctx->sctx = sctx;
|
||||
nocow_ctx->logical = logical;
|
||||
nocow_ctx->len = len;
|
||||
nocow_ctx->mirror_num = mirror_num;
|
||||
nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
|
||||
btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper,
|
||||
copy_nocow_pages_worker, NULL, NULL);
|
||||
INIT_LIST_HEAD(&nocow_ctx->inodes);
|
||||
btrfs_queue_work(fs_info->scrub_nocow_workers,
|
||||
&nocow_ctx->work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
|
||||
{
|
||||
struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
|
||||
struct scrub_nocow_inode *nocow_inode;
|
||||
|
||||
nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
|
||||
if (!nocow_inode)
|
||||
return -ENOMEM;
|
||||
nocow_inode->inum = inum;
|
||||
nocow_inode->offset = offset;
|
||||
nocow_inode->root = root;
|
||||
list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define COPY_COMPLETE 1
|
||||
|
||||
static void copy_nocow_pages_worker(struct btrfs_work *work)
|
||||
{
|
||||
struct scrub_copy_nocow_ctx *nocow_ctx =
|
||||
container_of(work, struct scrub_copy_nocow_ctx, work);
|
||||
struct scrub_ctx *sctx = nocow_ctx->sctx;
|
||||
struct btrfs_fs_info *fs_info = sctx->fs_info;
|
||||
struct btrfs_root *root = fs_info->extent_root;
|
||||
u64 logical = nocow_ctx->logical;
|
||||
u64 len = nocow_ctx->len;
|
||||
int mirror_num = nocow_ctx->mirror_num;
|
||||
u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
|
||||
int ret;
|
||||
struct btrfs_trans_handle *trans = NULL;
|
||||
struct btrfs_path *path;
|
||||
int not_written = 0;
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path) {
|
||||
spin_lock(&sctx->stat_lock);
|
||||
sctx->stat.malloc_errors++;
|
||||
spin_unlock(&sctx->stat_lock);
|
||||
not_written = 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
trans = btrfs_join_transaction(root);
|
||||
if (IS_ERR(trans)) {
|
||||
not_written = 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = iterate_inodes_from_logical(logical, fs_info, path,
|
||||
record_inode_for_nocow, nocow_ctx, false);
|
||||
if (ret != 0 && ret != -ENOENT) {
|
||||
btrfs_warn(fs_info,
|
||||
"iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %u, ret %d",
|
||||
logical, physical_for_dev_replace, len, mirror_num,
|
||||
ret);
|
||||
not_written = 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
btrfs_end_transaction(trans);
|
||||
trans = NULL;
|
||||
while (!list_empty(&nocow_ctx->inodes)) {
|
||||
struct scrub_nocow_inode *entry;
|
||||
entry = list_first_entry(&nocow_ctx->inodes,
|
||||
struct scrub_nocow_inode,
|
||||
list);
|
||||
list_del_init(&entry->list);
|
||||
ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
|
||||
entry->root, nocow_ctx);
|
||||
kfree(entry);
|
||||
if (ret == COPY_COMPLETE) {
|
||||
ret = 0;
|
||||
break;
|
||||
} else if (ret) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
out:
|
||||
while (!list_empty(&nocow_ctx->inodes)) {
|
||||
struct scrub_nocow_inode *entry;
|
||||
entry = list_first_entry(&nocow_ctx->inodes,
|
||||
struct scrub_nocow_inode,
|
||||
list);
|
||||
list_del_init(&entry->list);
|
||||
kfree(entry);
|
||||
}
|
||||
if (trans && !IS_ERR(trans))
|
||||
btrfs_end_transaction(trans);
|
||||
if (not_written)
|
||||
btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
|
||||
num_uncorrectable_read_errors);
|
||||
|
||||
btrfs_free_path(path);
|
||||
kfree(nocow_ctx);
|
||||
|
||||
scrub_pending_trans_workers_dec(sctx);
|
||||
}
|
||||
|
||||
static int check_extent_to_block(struct btrfs_inode *inode, u64 start, u64 len,
|
||||
u64 logical)
|
||||
{
|
||||
struct extent_state *cached_state = NULL;
|
||||
struct btrfs_ordered_extent *ordered;
|
||||
struct extent_io_tree *io_tree;
|
||||
struct extent_map *em;
|
||||
u64 lockstart = start, lockend = start + len - 1;
|
||||
int ret = 0;
|
||||
|
||||
io_tree = &inode->io_tree;
|
||||
|
||||
lock_extent_bits(io_tree, lockstart, lockend, &cached_state);
|
||||
ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
|
||||
if (ordered) {
|
||||
btrfs_put_ordered_extent(ordered);
|
||||
ret = 1;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
|
||||
if (IS_ERR(em)) {
|
||||
ret = PTR_ERR(em);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* This extent does not actually cover the logical extent anymore,
|
||||
* move on to the next inode.
|
||||
*/
|
||||
if (em->block_start > logical ||
|
||||
em->block_start + em->block_len < logical + len ||
|
||||
test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
|
||||
free_extent_map(em);
|
||||
ret = 1;
|
||||
goto out_unlock;
|
||||
}
|
||||
free_extent_map(em);
|
||||
|
||||
out_unlock:
|
||||
unlock_extent_cached(io_tree, lockstart, lockend, &cached_state);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
|
||||
struct scrub_copy_nocow_ctx *nocow_ctx)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = nocow_ctx->sctx->fs_info;
|
||||
struct btrfs_key key;
|
||||
struct inode *inode;
|
||||
struct page *page;
|
||||
struct btrfs_root *local_root;
|
||||
struct extent_io_tree *io_tree;
|
||||
u64 physical_for_dev_replace;
|
||||
u64 nocow_ctx_logical;
|
||||
u64 len = nocow_ctx->len;
|
||||
unsigned long index;
|
||||
int srcu_index;
|
||||
int ret = 0;
|
||||
int err = 0;
|
||||
|
||||
key.objectid = root;
|
||||
key.type = BTRFS_ROOT_ITEM_KEY;
|
||||
key.offset = (u64)-1;
|
||||
|
||||
srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
|
||||
|
||||
local_root = btrfs_read_fs_root_no_name(fs_info, &key);
|
||||
if (IS_ERR(local_root)) {
|
||||
srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
|
||||
return PTR_ERR(local_root);
|
||||
}
|
||||
|
||||
key.type = BTRFS_INODE_ITEM_KEY;
|
||||
key.objectid = inum;
|
||||
key.offset = 0;
|
||||
inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
|
||||
srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
|
||||
if (IS_ERR(inode))
|
||||
return PTR_ERR(inode);
|
||||
|
||||
/* Avoid truncate/dio/punch hole.. */
|
||||
inode_lock(inode);
|
||||
inode_dio_wait(inode);
|
||||
|
||||
physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
|
||||
io_tree = &BTRFS_I(inode)->io_tree;
|
||||
nocow_ctx_logical = nocow_ctx->logical;
|
||||
|
||||
ret = check_extent_to_block(BTRFS_I(inode), offset, len,
|
||||
nocow_ctx_logical);
|
||||
if (ret) {
|
||||
ret = ret > 0 ? 0 : ret;
|
||||
goto out;
|
||||
}
|
||||
|
||||
while (len >= PAGE_SIZE) {
|
||||
index = offset >> PAGE_SHIFT;
|
||||
again:
|
||||
page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
|
||||
if (!page) {
|
||||
btrfs_err(fs_info, "find_or_create_page() failed");
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (PageUptodate(page)) {
|
||||
if (PageDirty(page))
|
||||
goto next_page;
|
||||
} else {
|
||||
ClearPageError(page);
|
||||
err = extent_read_full_page(io_tree, page,
|
||||
btrfs_get_extent,
|
||||
nocow_ctx->mirror_num);
|
||||
if (err) {
|
||||
ret = err;
|
||||
goto next_page;
|
||||
}
|
||||
|
||||
lock_page(page);
|
||||
/*
|
||||
* If the page has been remove from the page cache,
|
||||
* the data on it is meaningless, because it may be
|
||||
* old one, the new data may be written into the new
|
||||
* page in the page cache.
|
||||
*/
|
||||
if (page->mapping != inode->i_mapping) {
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
goto again;
|
||||
}
|
||||
if (!PageUptodate(page)) {
|
||||
ret = -EIO;
|
||||
goto next_page;
|
||||
}
|
||||
}
|
||||
|
||||
ret = check_extent_to_block(BTRFS_I(inode), offset, len,
|
||||
nocow_ctx_logical);
|
||||
if (ret) {
|
||||
ret = ret > 0 ? 0 : ret;
|
||||
goto next_page;
|
||||
}
|
||||
|
||||
err = write_page_nocow(nocow_ctx->sctx,
|
||||
physical_for_dev_replace, page);
|
||||
if (err)
|
||||
ret = err;
|
||||
next_page:
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
offset += PAGE_SIZE;
|
||||
physical_for_dev_replace += PAGE_SIZE;
|
||||
nocow_ctx_logical += PAGE_SIZE;
|
||||
len -= PAGE_SIZE;
|
||||
}
|
||||
ret = COPY_COMPLETE;
|
||||
out:
|
||||
inode_unlock(inode);
|
||||
iput(inode);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int write_page_nocow(struct scrub_ctx *sctx,
|
||||
u64 physical_for_dev_replace, struct page *page)
|
||||
{
|
||||
struct bio *bio;
|
||||
struct btrfs_device *dev;
|
||||
|
||||
dev = sctx->wr_tgtdev;
|
||||
if (!dev)
|
||||
return -EIO;
|
||||
if (!dev->bdev) {
|
||||
btrfs_warn_rl(dev->fs_info,
|
||||
"scrub write_page_nocow(bdev == NULL) is unexpected");
|
||||
return -EIO;
|
||||
}
|
||||
bio = btrfs_io_bio_alloc(1);
|
||||
bio->bi_iter.bi_size = 0;
|
||||
bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
|
||||
bio_set_dev(bio, dev->bdev);
|
||||
bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
|
||||
/* bio_add_page won't fail on a freshly allocated bio */
|
||||
bio_add_page(bio, page, PAGE_SIZE, 0);
|
||||
|
||||
if (btrfsic_submit_bio_wait(bio)) {
|
||||
bio_put(bio);
|
||||
btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
bio_put(bio);
|
||||
return 0;
|
||||
}
|
||||
|
|
172
fs/btrfs/send.c
172
fs/btrfs/send.c
|
@ -100,6 +100,7 @@ struct send_ctx {
|
|||
u64 cur_inode_rdev;
|
||||
u64 cur_inode_last_extent;
|
||||
u64 cur_inode_next_write_offset;
|
||||
bool ignore_cur_inode;
|
||||
|
||||
u64 send_progress;
|
||||
|
||||
|
@ -1500,7 +1501,7 @@ static int read_symlink(struct btrfs_root *root,
|
|||
BUG_ON(compression);
|
||||
|
||||
off = btrfs_file_extent_inline_start(ei);
|
||||
len = btrfs_file_extent_inline_len(path->nodes[0], path->slots[0], ei);
|
||||
len = btrfs_file_extent_ram_bytes(path->nodes[0], ei);
|
||||
|
||||
ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
|
||||
|
||||
|
@ -5006,6 +5007,15 @@ static int send_hole(struct send_ctx *sctx, u64 end)
|
|||
u64 len;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* A hole that starts at EOF or beyond it. Since we do not yet support
|
||||
* fallocate (for extent preallocation and hole punching), sending a
|
||||
* write of zeroes starting at EOF or beyond would later require issuing
|
||||
* a truncate operation which would undo the write and achieve nothing.
|
||||
*/
|
||||
if (offset >= sctx->cur_inode_size)
|
||||
return 0;
|
||||
|
||||
if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
|
||||
return send_update_extent(sctx, offset, end - offset);
|
||||
|
||||
|
@ -5160,7 +5170,7 @@ static int clone_range(struct send_ctx *sctx,
|
|||
ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
|
||||
type = btrfs_file_extent_type(leaf, ei);
|
||||
if (type == BTRFS_FILE_EXTENT_INLINE) {
|
||||
ext_len = btrfs_file_extent_inline_len(leaf, slot, ei);
|
||||
ext_len = btrfs_file_extent_ram_bytes(leaf, ei);
|
||||
ext_len = PAGE_ALIGN(ext_len);
|
||||
} else {
|
||||
ext_len = btrfs_file_extent_num_bytes(leaf, ei);
|
||||
|
@ -5236,8 +5246,7 @@ static int send_write_or_clone(struct send_ctx *sctx,
|
|||
struct btrfs_file_extent_item);
|
||||
type = btrfs_file_extent_type(path->nodes[0], ei);
|
||||
if (type == BTRFS_FILE_EXTENT_INLINE) {
|
||||
len = btrfs_file_extent_inline_len(path->nodes[0],
|
||||
path->slots[0], ei);
|
||||
len = btrfs_file_extent_ram_bytes(path->nodes[0], ei);
|
||||
/*
|
||||
* it is possible the inline item won't cover the whole page,
|
||||
* but there may be items after this page. Make
|
||||
|
@ -5375,7 +5384,7 @@ static int is_extent_unchanged(struct send_ctx *sctx,
|
|||
}
|
||||
|
||||
if (right_type == BTRFS_FILE_EXTENT_INLINE) {
|
||||
right_len = btrfs_file_extent_inline_len(eb, slot, ei);
|
||||
right_len = btrfs_file_extent_ram_bytes(eb, ei);
|
||||
right_len = PAGE_ALIGN(right_len);
|
||||
} else {
|
||||
right_len = btrfs_file_extent_num_bytes(eb, ei);
|
||||
|
@ -5496,8 +5505,7 @@ static int get_last_extent(struct send_ctx *sctx, u64 offset)
|
|||
struct btrfs_file_extent_item);
|
||||
type = btrfs_file_extent_type(path->nodes[0], fi);
|
||||
if (type == BTRFS_FILE_EXTENT_INLINE) {
|
||||
u64 size = btrfs_file_extent_inline_len(path->nodes[0],
|
||||
path->slots[0], fi);
|
||||
u64 size = btrfs_file_extent_ram_bytes(path->nodes[0], fi);
|
||||
extent_end = ALIGN(key.offset + size,
|
||||
sctx->send_root->fs_info->sectorsize);
|
||||
} else {
|
||||
|
@ -5560,7 +5568,7 @@ static int range_is_hole_in_parent(struct send_ctx *sctx,
|
|||
fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
|
||||
if (btrfs_file_extent_type(leaf, fi) ==
|
||||
BTRFS_FILE_EXTENT_INLINE) {
|
||||
u64 size = btrfs_file_extent_inline_len(leaf, slot, fi);
|
||||
u64 size = btrfs_file_extent_ram_bytes(leaf, fi);
|
||||
|
||||
extent_end = ALIGN(key.offset + size,
|
||||
root->fs_info->sectorsize);
|
||||
|
@ -5606,8 +5614,7 @@ static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
|
|||
struct btrfs_file_extent_item);
|
||||
type = btrfs_file_extent_type(path->nodes[0], fi);
|
||||
if (type == BTRFS_FILE_EXTENT_INLINE) {
|
||||
u64 size = btrfs_file_extent_inline_len(path->nodes[0],
|
||||
path->slots[0], fi);
|
||||
u64 size = btrfs_file_extent_ram_bytes(path->nodes[0], fi);
|
||||
extent_end = ALIGN(key->offset + size,
|
||||
sctx->send_root->fs_info->sectorsize);
|
||||
} else {
|
||||
|
@ -5799,6 +5806,9 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
|
|||
int pending_move = 0;
|
||||
int refs_processed = 0;
|
||||
|
||||
if (sctx->ignore_cur_inode)
|
||||
return 0;
|
||||
|
||||
ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move,
|
||||
&refs_processed);
|
||||
if (ret < 0)
|
||||
|
@ -5917,6 +5927,93 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
|
|||
return ret;
|
||||
}
|
||||
|
||||
struct parent_paths_ctx {
|
||||
struct list_head *refs;
|
||||
struct send_ctx *sctx;
|
||||
};
|
||||
|
||||
static int record_parent_ref(int num, u64 dir, int index, struct fs_path *name,
|
||||
void *ctx)
|
||||
{
|
||||
struct parent_paths_ctx *ppctx = ctx;
|
||||
|
||||
return record_ref(ppctx->sctx->parent_root, dir, name, ppctx->sctx,
|
||||
ppctx->refs);
|
||||
}
|
||||
|
||||
/*
|
||||
* Issue unlink operations for all paths of the current inode found in the
|
||||
* parent snapshot.
|
||||
*/
|
||||
static int btrfs_unlink_all_paths(struct send_ctx *sctx)
|
||||
{
|
||||
LIST_HEAD(deleted_refs);
|
||||
struct btrfs_path *path;
|
||||
struct btrfs_key key;
|
||||
struct parent_paths_ctx ctx;
|
||||
int ret;
|
||||
|
||||
path = alloc_path_for_send();
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
|
||||
key.objectid = sctx->cur_ino;
|
||||
key.type = BTRFS_INODE_REF_KEY;
|
||||
key.offset = 0;
|
||||
ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ctx.refs = &deleted_refs;
|
||||
ctx.sctx = sctx;
|
||||
|
||||
while (true) {
|
||||
struct extent_buffer *eb = path->nodes[0];
|
||||
int slot = path->slots[0];
|
||||
|
||||
if (slot >= btrfs_header_nritems(eb)) {
|
||||
ret = btrfs_next_leaf(sctx->parent_root, path);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
else if (ret > 0)
|
||||
break;
|
||||
continue;
|
||||
}
|
||||
|
||||
btrfs_item_key_to_cpu(eb, &key, slot);
|
||||
if (key.objectid != sctx->cur_ino)
|
||||
break;
|
||||
if (key.type != BTRFS_INODE_REF_KEY &&
|
||||
key.type != BTRFS_INODE_EXTREF_KEY)
|
||||
break;
|
||||
|
||||
ret = iterate_inode_ref(sctx->parent_root, path, &key, 1,
|
||||
record_parent_ref, &ctx);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
path->slots[0]++;
|
||||
}
|
||||
|
||||
while (!list_empty(&deleted_refs)) {
|
||||
struct recorded_ref *ref;
|
||||
|
||||
ref = list_first_entry(&deleted_refs, struct recorded_ref, list);
|
||||
ret = send_unlink(sctx, ref->full_path);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
fs_path_free(ref->full_path);
|
||||
list_del(&ref->list);
|
||||
kfree(ref);
|
||||
}
|
||||
ret = 0;
|
||||
out:
|
||||
btrfs_free_path(path);
|
||||
if (ret)
|
||||
__free_recorded_refs(&deleted_refs);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int changed_inode(struct send_ctx *sctx,
|
||||
enum btrfs_compare_tree_result result)
|
||||
{
|
||||
|
@ -5931,6 +6028,7 @@ static int changed_inode(struct send_ctx *sctx,
|
|||
sctx->cur_inode_new_gen = 0;
|
||||
sctx->cur_inode_last_extent = (u64)-1;
|
||||
sctx->cur_inode_next_write_offset = 0;
|
||||
sctx->ignore_cur_inode = false;
|
||||
|
||||
/*
|
||||
* Set send_progress to current inode. This will tell all get_cur_xxx
|
||||
|
@ -5971,6 +6069,33 @@ static int changed_inode(struct send_ctx *sctx,
|
|||
sctx->cur_inode_new_gen = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Normally we do not find inodes with a link count of zero (orphans)
|
||||
* because the most common case is to create a snapshot and use it
|
||||
* for a send operation. However other less common use cases involve
|
||||
* using a subvolume and send it after turning it to RO mode just
|
||||
* after deleting all hard links of a file while holding an open
|
||||
* file descriptor against it or turning a RO snapshot into RW mode,
|
||||
* keep an open file descriptor against a file, delete it and then
|
||||
* turn the snapshot back to RO mode before using it for a send
|
||||
* operation. So if we find such cases, ignore the inode and all its
|
||||
* items completely if it's a new inode, or if it's a changed inode
|
||||
* make sure all its previous paths (from the parent snapshot) are all
|
||||
* unlinked and all other the inode items are ignored.
|
||||
*/
|
||||
if (result == BTRFS_COMPARE_TREE_NEW ||
|
||||
result == BTRFS_COMPARE_TREE_CHANGED) {
|
||||
u32 nlinks;
|
||||
|
||||
nlinks = btrfs_inode_nlink(sctx->left_path->nodes[0], left_ii);
|
||||
if (nlinks == 0) {
|
||||
sctx->ignore_cur_inode = true;
|
||||
if (result == BTRFS_COMPARE_TREE_CHANGED)
|
||||
ret = btrfs_unlink_all_paths(sctx);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (result == BTRFS_COMPARE_TREE_NEW) {
|
||||
sctx->cur_inode_gen = left_gen;
|
||||
sctx->cur_inode_new = 1;
|
||||
|
@ -6309,15 +6434,17 @@ static int changed_cb(struct btrfs_path *left_path,
|
|||
key->objectid == BTRFS_FREE_SPACE_OBJECTID)
|
||||
goto out;
|
||||
|
||||
if (key->type == BTRFS_INODE_ITEM_KEY)
|
||||
if (key->type == BTRFS_INODE_ITEM_KEY) {
|
||||
ret = changed_inode(sctx, result);
|
||||
else if (key->type == BTRFS_INODE_REF_KEY ||
|
||||
key->type == BTRFS_INODE_EXTREF_KEY)
|
||||
ret = changed_ref(sctx, result);
|
||||
else if (key->type == BTRFS_XATTR_ITEM_KEY)
|
||||
ret = changed_xattr(sctx, result);
|
||||
else if (key->type == BTRFS_EXTENT_DATA_KEY)
|
||||
ret = changed_extent(sctx, result);
|
||||
} else if (!sctx->ignore_cur_inode) {
|
||||
if (key->type == BTRFS_INODE_REF_KEY ||
|
||||
key->type == BTRFS_INODE_EXTREF_KEY)
|
||||
ret = changed_ref(sctx, result);
|
||||
else if (key->type == BTRFS_XATTR_ITEM_KEY)
|
||||
ret = changed_xattr(sctx, result);
|
||||
else if (key->type == BTRFS_EXTENT_DATA_KEY)
|
||||
ret = changed_extent(sctx, result);
|
||||
}
|
||||
|
||||
out:
|
||||
return ret;
|
||||
|
@ -6328,7 +6455,6 @@ static int full_send_tree(struct send_ctx *sctx)
|
|||
int ret;
|
||||
struct btrfs_root *send_root = sctx->send_root;
|
||||
struct btrfs_key key;
|
||||
struct btrfs_key found_key;
|
||||
struct btrfs_path *path;
|
||||
struct extent_buffer *eb;
|
||||
int slot;
|
||||
|
@ -6350,17 +6476,13 @@ static int full_send_tree(struct send_ctx *sctx)
|
|||
while (1) {
|
||||
eb = path->nodes[0];
|
||||
slot = path->slots[0];
|
||||
btrfs_item_key_to_cpu(eb, &found_key, slot);
|
||||
btrfs_item_key_to_cpu(eb, &key, slot);
|
||||
|
||||
ret = changed_cb(path, NULL, &found_key,
|
||||
ret = changed_cb(path, NULL, &key,
|
||||
BTRFS_COMPARE_TREE_NEW, sctx);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
key.objectid = found_key.objectid;
|
||||
key.type = found_key.type;
|
||||
key.offset = found_key.offset + 1;
|
||||
|
||||
ret = btrfs_next_item(send_root, path);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
* Copyright (C) 2007 Oracle. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/highmem.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#include "ctree.h"
|
||||
|
|
115
fs/btrfs/super.c
115
fs/btrfs/super.c
|
@ -5,7 +5,6 @@
|
|||
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/highmem.h>
|
||||
|
@ -15,8 +14,6 @@
|
|||
#include <linux/string.h>
|
||||
#include <linux/backing-dev.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/mpage.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/writeback.h>
|
||||
#include <linux/statfs.h>
|
||||
#include <linux/compat.h>
|
||||
|
@ -468,9 +465,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
|
|||
case Opt_subvolrootid:
|
||||
case Opt_device:
|
||||
/*
|
||||
* These are parsed by btrfs_parse_subvol_options
|
||||
* and btrfs_parse_early_options
|
||||
* and can be happily ignored here.
|
||||
* These are parsed by btrfs_parse_subvol_options or
|
||||
* btrfs_parse_device_options and can be ignored here.
|
||||
*/
|
||||
break;
|
||||
case Opt_nodatasum:
|
||||
|
@ -760,6 +756,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
|
|||
case Opt_recovery:
|
||||
btrfs_warn(info,
|
||||
"'recovery' is deprecated, use 'usebackuproot' instead");
|
||||
/* fall through */
|
||||
case Opt_usebackuproot:
|
||||
btrfs_info(info,
|
||||
"trying to use backup root at mount time");
|
||||
|
@ -885,13 +882,16 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
|
|||
* All other options will be parsed on much later in the mount process and
|
||||
* only when we need to allocate a new super block.
|
||||
*/
|
||||
static int btrfs_parse_early_options(const char *options, fmode_t flags,
|
||||
void *holder, struct btrfs_fs_devices **fs_devices)
|
||||
static int btrfs_parse_device_options(const char *options, fmode_t flags,
|
||||
void *holder)
|
||||
{
|
||||
substring_t args[MAX_OPT_ARGS];
|
||||
char *device_name, *opts, *orig, *p;
|
||||
struct btrfs_device *device = NULL;
|
||||
int error = 0;
|
||||
|
||||
lockdep_assert_held(&uuid_mutex);
|
||||
|
||||
if (!options)
|
||||
return 0;
|
||||
|
||||
|
@ -917,11 +917,13 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
|
|||
error = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
error = btrfs_scan_one_device(device_name,
|
||||
flags, holder, fs_devices);
|
||||
device = btrfs_scan_one_device(device_name, flags,
|
||||
holder);
|
||||
kfree(device_name);
|
||||
if (error)
|
||||
if (IS_ERR(device)) {
|
||||
error = PTR_ERR(device);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -935,8 +937,8 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
|
|||
*
|
||||
* The value is later passed to mount_subvol()
|
||||
*/
|
||||
static int btrfs_parse_subvol_options(const char *options, fmode_t flags,
|
||||
char **subvol_name, u64 *subvol_objectid)
|
||||
static int btrfs_parse_subvol_options(const char *options, char **subvol_name,
|
||||
u64 *subvol_objectid)
|
||||
{
|
||||
substring_t args[MAX_OPT_ARGS];
|
||||
char *opts, *orig, *p;
|
||||
|
@ -948,7 +950,7 @@ static int btrfs_parse_subvol_options(const char *options, fmode_t flags,
|
|||
|
||||
/*
|
||||
* strsep changes the string, duplicate it because
|
||||
* btrfs_parse_early_options gets called later
|
||||
* btrfs_parse_device_options gets called later
|
||||
*/
|
||||
opts = kstrdup(options, GFP_KERNEL);
|
||||
if (!opts)
|
||||
|
@ -1517,6 +1519,7 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
|
|||
{
|
||||
struct block_device *bdev = NULL;
|
||||
struct super_block *s;
|
||||
struct btrfs_device *device = NULL;
|
||||
struct btrfs_fs_devices *fs_devices = NULL;
|
||||
struct btrfs_fs_info *fs_info = NULL;
|
||||
struct security_mnt_opts new_sec_opts;
|
||||
|
@ -1526,12 +1529,6 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
|
|||
if (!(flags & SB_RDONLY))
|
||||
mode |= FMODE_WRITE;
|
||||
|
||||
error = btrfs_parse_early_options(data, mode, fs_type,
|
||||
&fs_devices);
|
||||
if (error) {
|
||||
return ERR_PTR(error);
|
||||
}
|
||||
|
||||
security_init_mnt_opts(&new_sec_opts);
|
||||
if (data) {
|
||||
error = parse_security_options(data, &new_sec_opts);
|
||||
|
@ -1539,10 +1536,6 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
|
|||
return ERR_PTR(error);
|
||||
}
|
||||
|
||||
error = btrfs_scan_one_device(device_name, mode, fs_type, &fs_devices);
|
||||
if (error)
|
||||
goto error_sec_opts;
|
||||
|
||||
/*
|
||||
* Setup a dummy root and fs_info for test/set super. This is because
|
||||
* we don't actually fill this stuff out until open_ctree, but we need
|
||||
|
@ -1555,8 +1548,6 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
|
|||
goto error_sec_opts;
|
||||
}
|
||||
|
||||
fs_info->fs_devices = fs_devices;
|
||||
|
||||
fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
|
||||
fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
|
||||
security_init_mnt_opts(&fs_info->security_opts);
|
||||
|
@ -1565,7 +1556,25 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
|
|||
goto error_fs_info;
|
||||
}
|
||||
|
||||
mutex_lock(&uuid_mutex);
|
||||
error = btrfs_parse_device_options(data, mode, fs_type);
|
||||
if (error) {
|
||||
mutex_unlock(&uuid_mutex);
|
||||
goto error_fs_info;
|
||||
}
|
||||
|
||||
device = btrfs_scan_one_device(device_name, mode, fs_type);
|
||||
if (IS_ERR(device)) {
|
||||
mutex_unlock(&uuid_mutex);
|
||||
error = PTR_ERR(device);
|
||||
goto error_fs_info;
|
||||
}
|
||||
|
||||
fs_devices = device->fs_devices;
|
||||
fs_info->fs_devices = fs_devices;
|
||||
|
||||
error = btrfs_open_devices(fs_devices, mode, fs_type);
|
||||
mutex_unlock(&uuid_mutex);
|
||||
if (error)
|
||||
goto error_fs_info;
|
||||
|
||||
|
@ -1650,8 +1659,8 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
|
|||
if (!(flags & SB_RDONLY))
|
||||
mode |= FMODE_WRITE;
|
||||
|
||||
error = btrfs_parse_subvol_options(data, mode,
|
||||
&subvol_name, &subvol_objectid);
|
||||
error = btrfs_parse_subvol_options(data, &subvol_name,
|
||||
&subvol_objectid);
|
||||
if (error) {
|
||||
kfree(subvol_name);
|
||||
return ERR_PTR(error);
|
||||
|
@ -2098,14 +2107,9 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
|
|||
btrfs_account_ro_block_groups_free_space(found);
|
||||
|
||||
for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
|
||||
if (!list_empty(&found->block_groups[i])) {
|
||||
switch (i) {
|
||||
case BTRFS_RAID_DUP:
|
||||
case BTRFS_RAID_RAID1:
|
||||
case BTRFS_RAID_RAID10:
|
||||
factor = 2;
|
||||
}
|
||||
}
|
||||
if (!list_empty(&found->block_groups[i]))
|
||||
factor = btrfs_bg_type_to_factor(
|
||||
btrfs_raid_array[i].bg_flag);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2222,7 +2226,7 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
|
|||
unsigned long arg)
|
||||
{
|
||||
struct btrfs_ioctl_vol_args *vol;
|
||||
struct btrfs_fs_devices *fs_devices;
|
||||
struct btrfs_device *device = NULL;
|
||||
int ret = -ENOTTY;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
|
@ -2234,15 +2238,24 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
|
|||
|
||||
switch (cmd) {
|
||||
case BTRFS_IOC_SCAN_DEV:
|
||||
ret = btrfs_scan_one_device(vol->name, FMODE_READ,
|
||||
&btrfs_root_fs_type, &fs_devices);
|
||||
mutex_lock(&uuid_mutex);
|
||||
device = btrfs_scan_one_device(vol->name, FMODE_READ,
|
||||
&btrfs_root_fs_type);
|
||||
ret = PTR_ERR_OR_ZERO(device);
|
||||
mutex_unlock(&uuid_mutex);
|
||||
break;
|
||||
case BTRFS_IOC_DEVICES_READY:
|
||||
ret = btrfs_scan_one_device(vol->name, FMODE_READ,
|
||||
&btrfs_root_fs_type, &fs_devices);
|
||||
if (ret)
|
||||
mutex_lock(&uuid_mutex);
|
||||
device = btrfs_scan_one_device(vol->name, FMODE_READ,
|
||||
&btrfs_root_fs_type);
|
||||
if (IS_ERR(device)) {
|
||||
mutex_unlock(&uuid_mutex);
|
||||
ret = PTR_ERR(device);
|
||||
break;
|
||||
ret = !(fs_devices->num_devices == fs_devices->total_devices);
|
||||
}
|
||||
ret = !(device->fs_devices->num_devices ==
|
||||
device->fs_devices->total_devices);
|
||||
mutex_unlock(&uuid_mutex);
|
||||
break;
|
||||
case BTRFS_IOC_GET_SUPPORTED_FEATURES:
|
||||
ret = btrfs_ioctl_get_supported_features((void __user*)arg);
|
||||
|
@ -2290,7 +2303,6 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
|
|||
struct btrfs_fs_devices *cur_devices;
|
||||
struct btrfs_device *dev, *first_dev = NULL;
|
||||
struct list_head *head;
|
||||
struct rcu_string *name;
|
||||
|
||||
/*
|
||||
* Lightweight locking of the devices. We should not need
|
||||
|
@ -2314,12 +2326,10 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
|
|||
cur_devices = cur_devices->seed;
|
||||
}
|
||||
|
||||
if (first_dev) {
|
||||
name = rcu_dereference(first_dev->name);
|
||||
seq_escape(m, name->str, " \t\n\\");
|
||||
} else {
|
||||
if (first_dev)
|
||||
seq_escape(m, rcu_str_deref(first_dev->name), " \t\n\\");
|
||||
else
|
||||
WARN_ON(1);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
@ -2331,7 +2341,6 @@ static const struct super_operations btrfs_super_ops = {
|
|||
.sync_fs = btrfs_sync_fs,
|
||||
.show_options = btrfs_show_options,
|
||||
.show_devname = btrfs_show_devname,
|
||||
.write_inode = btrfs_write_inode,
|
||||
.alloc_inode = btrfs_alloc_inode,
|
||||
.destroy_inode = btrfs_destroy_inode,
|
||||
.statfs = btrfs_statfs,
|
||||
|
@ -2369,7 +2378,7 @@ static __cold void btrfs_interface_exit(void)
|
|||
|
||||
static void __init btrfs_print_mod_info(void)
|
||||
{
|
||||
pr_info("Btrfs loaded, crc32c=%s"
|
||||
static const char options[] = ""
|
||||
#ifdef CONFIG_BTRFS_DEBUG
|
||||
", debug=on"
|
||||
#endif
|
||||
|
@ -2382,8 +2391,8 @@ static void __init btrfs_print_mod_info(void)
|
|||
#ifdef CONFIG_BTRFS_FS_REF_VERIFY
|
||||
", ref-verify=on"
|
||||
#endif
|
||||
"\n",
|
||||
crc32c_impl());
|
||||
;
|
||||
pr_info("Btrfs loaded, crc32c=%s%s\n", crc32c_impl(), options);
|
||||
}
|
||||
|
||||
static int __init init_btrfs_fs(void)
|
||||
|
|
|
@ -7,10 +7,8 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/genhd.h>
|
||||
#include <linux/debugfs.h>
|
||||
|
||||
#include "ctree.h"
|
||||
|
|
|
@ -216,7 +216,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
|
|||
btrfs_init_dummy_trans(&trans, fs_info);
|
||||
|
||||
test_msg("qgroup basic add");
|
||||
ret = btrfs_create_qgroup(NULL, fs_info, BTRFS_FS_TREE_OBJECTID);
|
||||
ret = btrfs_create_qgroup(&trans, BTRFS_FS_TREE_OBJECTID);
|
||||
if (ret) {
|
||||
test_err("couldn't create a qgroup %d", ret);
|
||||
return ret;
|
||||
|
@ -249,8 +249,8 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
|
||||
nodesize, old_roots, new_roots);
|
||||
ret = btrfs_qgroup_account_extent(&trans, nodesize, nodesize, old_roots,
|
||||
new_roots);
|
||||
if (ret) {
|
||||
test_err("couldn't account space for a qgroup %d", ret);
|
||||
return ret;
|
||||
|
@ -285,8 +285,8 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
|
||||
nodesize, old_roots, new_roots);
|
||||
ret = btrfs_qgroup_account_extent(&trans, nodesize, nodesize, old_roots,
|
||||
new_roots);
|
||||
if (ret) {
|
||||
test_err("couldn't account space for a qgroup %d", ret);
|
||||
return -EINVAL;
|
||||
|
@ -322,7 +322,7 @@ static int test_multiple_refs(struct btrfs_root *root,
|
|||
* We have BTRFS_FS_TREE_OBJECTID created already from the
|
||||
* previous test.
|
||||
*/
|
||||
ret = btrfs_create_qgroup(NULL, fs_info, BTRFS_FIRST_FREE_OBJECTID);
|
||||
ret = btrfs_create_qgroup(&trans, BTRFS_FIRST_FREE_OBJECTID);
|
||||
if (ret) {
|
||||
test_err("couldn't create a qgroup %d", ret);
|
||||
return ret;
|
||||
|
@ -350,8 +350,8 @@ static int test_multiple_refs(struct btrfs_root *root,
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
|
||||
nodesize, old_roots, new_roots);
|
||||
ret = btrfs_qgroup_account_extent(&trans, nodesize, nodesize, old_roots,
|
||||
new_roots);
|
||||
if (ret) {
|
||||
test_err("couldn't account space for a qgroup %d", ret);
|
||||
return ret;
|
||||
|
@ -385,8 +385,8 @@ static int test_multiple_refs(struct btrfs_root *root,
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
|
||||
nodesize, old_roots, new_roots);
|
||||
ret = btrfs_qgroup_account_extent(&trans, nodesize, nodesize, old_roots,
|
||||
new_roots);
|
||||
if (ret) {
|
||||
test_err("couldn't account space for a qgroup %d", ret);
|
||||
return ret;
|
||||
|
@ -426,8 +426,8 @@ static int test_multiple_refs(struct btrfs_root *root,
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
|
||||
nodesize, old_roots, new_roots);
|
||||
ret = btrfs_qgroup_account_extent(&trans, nodesize, nodesize, old_roots,
|
||||
new_roots);
|
||||
if (ret) {
|
||||
test_err("couldn't account space for a qgroup %d", ret);
|
||||
return ret;
|
||||
|
|
|
@ -241,7 +241,7 @@ static noinline int join_transaction(struct btrfs_fs_info *fs_info,
|
|||
refcount_set(&cur_trans->use_count, 2);
|
||||
atomic_set(&cur_trans->pending_ordered, 0);
|
||||
cur_trans->flags = 0;
|
||||
cur_trans->start_time = get_seconds();
|
||||
cur_trans->start_time = ktime_get_seconds();
|
||||
|
||||
memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs));
|
||||
|
||||
|
@ -680,7 +680,7 @@ btrfs_attach_transaction_barrier(struct btrfs_root *root)
|
|||
|
||||
trans = start_transaction(root, 0, TRANS_ATTACH,
|
||||
BTRFS_RESERVE_NO_FLUSH, true);
|
||||
if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT)
|
||||
if (trans == ERR_PTR(-ENOENT))
|
||||
btrfs_wait_for_commit(root->fs_info, 0);
|
||||
|
||||
return trans;
|
||||
|
@ -1152,7 +1152,7 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans)
|
|||
ret = btrfs_run_dev_replace(trans, fs_info);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = btrfs_run_qgroups(trans, fs_info);
|
||||
ret = btrfs_run_qgroups(trans);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1355,8 +1355,7 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
|
|||
goto out;
|
||||
|
||||
/* Now qgroup are all updated, we can inherit it to new qgroups */
|
||||
ret = btrfs_qgroup_inherit(trans, fs_info,
|
||||
src->root_key.objectid, dst_objectid,
|
||||
ret = btrfs_qgroup_inherit(trans, src->root_key.objectid, dst_objectid,
|
||||
inherit);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
@ -1574,7 +1573,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
|
|||
/*
|
||||
* insert root back/forward references
|
||||
*/
|
||||
ret = btrfs_add_root_ref(trans, fs_info, objectid,
|
||||
ret = btrfs_add_root_ref(trans, objectid,
|
||||
parent_root->root_key.objectid,
|
||||
btrfs_ino(BTRFS_I(parent_inode)), index,
|
||||
dentry->d_name.name, dentry->d_name.len);
|
||||
|
|
|
@ -48,7 +48,7 @@ struct btrfs_transaction {
|
|||
int aborted;
|
||||
struct list_head list;
|
||||
struct extent_io_tree dirty_pages;
|
||||
unsigned long start_time;
|
||||
time64_t start_time;
|
||||
wait_queue_head_t writer_wait;
|
||||
wait_queue_head_t commit_wait;
|
||||
wait_queue_head_t pending_wait;
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include "tree-checker.h"
|
||||
#include "disk-io.h"
|
||||
#include "compression.h"
|
||||
#include "volumes.h"
|
||||
|
||||
/*
|
||||
* Error message should follow the following format:
|
||||
|
@ -353,6 +354,102 @@ static int check_dir_item(struct btrfs_fs_info *fs_info,
|
|||
return 0;
|
||||
}
|
||||
|
||||
__printf(4, 5)
|
||||
__cold
|
||||
static void block_group_err(const struct btrfs_fs_info *fs_info,
|
||||
const struct extent_buffer *eb, int slot,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
struct btrfs_key key;
|
||||
struct va_format vaf;
|
||||
va_list args;
|
||||
|
||||
btrfs_item_key_to_cpu(eb, &key, slot);
|
||||
va_start(args, fmt);
|
||||
|
||||
vaf.fmt = fmt;
|
||||
vaf.va = &args;
|
||||
|
||||
btrfs_crit(fs_info,
|
||||
"corrupt %s: root=%llu block=%llu slot=%d bg_start=%llu bg_len=%llu, %pV",
|
||||
btrfs_header_level(eb) == 0 ? "leaf" : "node",
|
||||
btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
|
||||
key.objectid, key.offset, &vaf);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
static int check_block_group_item(struct btrfs_fs_info *fs_info,
|
||||
struct extent_buffer *leaf,
|
||||
struct btrfs_key *key, int slot)
|
||||
{
|
||||
struct btrfs_block_group_item bgi;
|
||||
u32 item_size = btrfs_item_size_nr(leaf, slot);
|
||||
u64 flags;
|
||||
u64 type;
|
||||
|
||||
/*
|
||||
* Here we don't really care about alignment since extent allocator can
|
||||
* handle it. We care more about the size, as if one block group is
|
||||
* larger than maximum size, it's must be some obvious corruption.
|
||||
*/
|
||||
if (key->offset > BTRFS_MAX_DATA_CHUNK_SIZE || key->offset == 0) {
|
||||
block_group_err(fs_info, leaf, slot,
|
||||
"invalid block group size, have %llu expect (0, %llu]",
|
||||
key->offset, BTRFS_MAX_DATA_CHUNK_SIZE);
|
||||
return -EUCLEAN;
|
||||
}
|
||||
|
||||
if (item_size != sizeof(bgi)) {
|
||||
block_group_err(fs_info, leaf, slot,
|
||||
"invalid item size, have %u expect %zu",
|
||||
item_size, sizeof(bgi));
|
||||
return -EUCLEAN;
|
||||
}
|
||||
|
||||
read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
|
||||
sizeof(bgi));
|
||||
if (btrfs_block_group_chunk_objectid(&bgi) !=
|
||||
BTRFS_FIRST_CHUNK_TREE_OBJECTID) {
|
||||
block_group_err(fs_info, leaf, slot,
|
||||
"invalid block group chunk objectid, have %llu expect %llu",
|
||||
btrfs_block_group_chunk_objectid(&bgi),
|
||||
BTRFS_FIRST_CHUNK_TREE_OBJECTID);
|
||||
return -EUCLEAN;
|
||||
}
|
||||
|
||||
if (btrfs_block_group_used(&bgi) > key->offset) {
|
||||
block_group_err(fs_info, leaf, slot,
|
||||
"invalid block group used, have %llu expect [0, %llu)",
|
||||
btrfs_block_group_used(&bgi), key->offset);
|
||||
return -EUCLEAN;
|
||||
}
|
||||
|
||||
flags = btrfs_block_group_flags(&bgi);
|
||||
if (hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) > 1) {
|
||||
block_group_err(fs_info, leaf, slot,
|
||||
"invalid profile flags, have 0x%llx (%lu bits set) expect no more than 1 bit set",
|
||||
flags & BTRFS_BLOCK_GROUP_PROFILE_MASK,
|
||||
hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK));
|
||||
return -EUCLEAN;
|
||||
}
|
||||
|
||||
type = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
|
||||
if (type != BTRFS_BLOCK_GROUP_DATA &&
|
||||
type != BTRFS_BLOCK_GROUP_METADATA &&
|
||||
type != BTRFS_BLOCK_GROUP_SYSTEM &&
|
||||
type != (BTRFS_BLOCK_GROUP_METADATA |
|
||||
BTRFS_BLOCK_GROUP_DATA)) {
|
||||
block_group_err(fs_info, leaf, slot,
|
||||
"invalid type, have 0x%llx (%lu bits set) expect either 0x%llx, 0x%llx, 0x%llu or 0x%llx",
|
||||
type, hweight64(type),
|
||||
BTRFS_BLOCK_GROUP_DATA, BTRFS_BLOCK_GROUP_METADATA,
|
||||
BTRFS_BLOCK_GROUP_SYSTEM,
|
||||
BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA);
|
||||
return -EUCLEAN;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Common point to switch the item-specific validation.
|
||||
*/
|
||||
|
@ -374,6 +471,9 @@ static int check_leaf_item(struct btrfs_fs_info *fs_info,
|
|||
case BTRFS_XATTR_ITEM_KEY:
|
||||
ret = check_dir_item(fs_info, leaf, key, slot);
|
||||
break;
|
||||
case BTRFS_BLOCK_GROUP_ITEM_KEY:
|
||||
ret = check_block_group_item(fs_info, leaf, key, slot);
|
||||
break;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -396,9 +496,22 @@ static int check_leaf(struct btrfs_fs_info *fs_info, struct extent_buffer *leaf,
|
|||
* skip this check for relocation trees.
|
||||
*/
|
||||
if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) {
|
||||
u64 owner = btrfs_header_owner(leaf);
|
||||
struct btrfs_root *check_root;
|
||||
|
||||
key.objectid = btrfs_header_owner(leaf);
|
||||
/* These trees must never be empty */
|
||||
if (owner == BTRFS_ROOT_TREE_OBJECTID ||
|
||||
owner == BTRFS_CHUNK_TREE_OBJECTID ||
|
||||
owner == BTRFS_EXTENT_TREE_OBJECTID ||
|
||||
owner == BTRFS_DEV_TREE_OBJECTID ||
|
||||
owner == BTRFS_FS_TREE_OBJECTID ||
|
||||
owner == BTRFS_DATA_RELOC_TREE_OBJECTID) {
|
||||
generic_err(fs_info, leaf, 0,
|
||||
"invalid root, root %llu must never be empty",
|
||||
owner);
|
||||
return -EUCLEAN;
|
||||
}
|
||||
key.objectid = owner;
|
||||
key.type = BTRFS_ROOT_ITEM_KEY;
|
||||
key.offset = (u64)-1;
|
||||
|
||||
|
|
|
@ -545,12 +545,8 @@ static noinline struct inode *read_one_inode(struct btrfs_root *root,
|
|||
key.type = BTRFS_INODE_ITEM_KEY;
|
||||
key.offset = 0;
|
||||
inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
|
||||
if (IS_ERR(inode)) {
|
||||
if (IS_ERR(inode))
|
||||
inode = NULL;
|
||||
} else if (is_bad_inode(inode)) {
|
||||
iput(inode);
|
||||
inode = NULL;
|
||||
}
|
||||
return inode;
|
||||
}
|
||||
|
||||
|
@ -597,7 +593,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
|
|||
if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
|
||||
nbytes = 0;
|
||||
} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
|
||||
size = btrfs_file_extent_inline_len(eb, slot, item);
|
||||
size = btrfs_file_extent_ram_bytes(eb, item);
|
||||
nbytes = btrfs_file_extent_ram_bytes(eb, item);
|
||||
extent_end = ALIGN(start + size,
|
||||
fs_info->sectorsize);
|
||||
|
@ -685,7 +681,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
|
|||
* as the owner of the file extent changed from log tree
|
||||
* (doesn't affect qgroup) to fs/file tree(affects qgroup)
|
||||
*/
|
||||
ret = btrfs_qgroup_trace_extent(trans, fs_info,
|
||||
ret = btrfs_qgroup_trace_extent(trans,
|
||||
btrfs_file_extent_disk_bytenr(eb, item),
|
||||
btrfs_file_extent_disk_num_bytes(eb, item),
|
||||
GFP_NOFS);
|
||||
|
@ -715,7 +711,6 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
|
|||
* allocation tree
|
||||
*/
|
||||
ret = btrfs_alloc_logged_file_extent(trans,
|
||||
fs_info,
|
||||
root->root_key.objectid,
|
||||
key->objectid, offset, &ins);
|
||||
if (ret)
|
||||
|
@ -1291,6 +1286,46 @@ static int unlink_old_inode_refs(struct btrfs_trans_handle *trans,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int btrfs_inode_ref_exists(struct inode *inode, struct inode *dir,
|
||||
const u8 ref_type, const char *name,
|
||||
const int namelen)
|
||||
{
|
||||
struct btrfs_key key;
|
||||
struct btrfs_path *path;
|
||||
const u64 parent_id = btrfs_ino(BTRFS_I(dir));
|
||||
int ret;
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
|
||||
key.objectid = btrfs_ino(BTRFS_I(inode));
|
||||
key.type = ref_type;
|
||||
if (key.type == BTRFS_INODE_REF_KEY)
|
||||
key.offset = parent_id;
|
||||
else
|
||||
key.offset = btrfs_extref_hash(parent_id, name, namelen);
|
||||
|
||||
ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &key, path, 0, 0);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
if (ret > 0) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
if (key.type == BTRFS_INODE_EXTREF_KEY)
|
||||
ret = btrfs_find_name_in_ext_backref(path->nodes[0],
|
||||
path->slots[0], parent_id,
|
||||
name, namelen, NULL);
|
||||
else
|
||||
ret = btrfs_find_name_in_backref(path->nodes[0], path->slots[0],
|
||||
name, namelen, NULL);
|
||||
|
||||
out:
|
||||
btrfs_free_path(path);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* replay one inode back reference item found in the log tree.
|
||||
* eb, slot and key refer to the buffer and key found in the log tree.
|
||||
|
@ -1400,6 +1435,32 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If a reference item already exists for this inode
|
||||
* with the same parent and name, but different index,
|
||||
* drop it and the corresponding directory index entries
|
||||
* from the parent before adding the new reference item
|
||||
* and dir index entries, otherwise we would fail with
|
||||
* -EEXIST returned from btrfs_add_link() below.
|
||||
*/
|
||||
ret = btrfs_inode_ref_exists(inode, dir, key->type,
|
||||
name, namelen);
|
||||
if (ret > 0) {
|
||||
ret = btrfs_unlink_inode(trans, root,
|
||||
BTRFS_I(dir),
|
||||
BTRFS_I(inode),
|
||||
name, namelen);
|
||||
/*
|
||||
* If we dropped the link count to 0, bump it so
|
||||
* that later the iput() on the inode will not
|
||||
* free it. We will fixup the link count later.
|
||||
*/
|
||||
if (!ret && inode->i_nlink == 0)
|
||||
inc_nlink(inode);
|
||||
}
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
/* insert our name */
|
||||
ret = btrfs_add_link(trans, BTRFS_I(dir),
|
||||
BTRFS_I(inode),
|
||||
|
@ -2120,7 +2181,7 @@ static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
|
|||
dir_key->offset,
|
||||
name, name_len, 0);
|
||||
}
|
||||
if (!log_di || (IS_ERR(log_di) && PTR_ERR(log_di) == -ENOENT)) {
|
||||
if (!log_di || log_di == ERR_PTR(-ENOENT)) {
|
||||
btrfs_dir_item_key_to_cpu(eb, di, &location);
|
||||
btrfs_release_path(path);
|
||||
btrfs_release_path(log_path);
|
||||
|
@ -2933,7 +2994,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
|
|||
/* bail out if we need to do a full commit */
|
||||
if (btrfs_need_log_full_commit(fs_info, trans)) {
|
||||
ret = -EAGAIN;
|
||||
btrfs_free_logged_extents(log, log_transid);
|
||||
mutex_unlock(&root->log_mutex);
|
||||
goto out;
|
||||
}
|
||||
|
@ -2951,7 +3011,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
|
|||
if (ret) {
|
||||
blk_finish_plug(&plug);
|
||||
btrfs_abort_transaction(trans, ret);
|
||||
btrfs_free_logged_extents(log, log_transid);
|
||||
btrfs_set_log_full_commit(fs_info, trans);
|
||||
mutex_unlock(&root->log_mutex);
|
||||
goto out;
|
||||
|
@ -3002,7 +3061,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
|
|||
goto out;
|
||||
}
|
||||
btrfs_wait_tree_log_extents(log, mark);
|
||||
btrfs_free_logged_extents(log, log_transid);
|
||||
mutex_unlock(&log_root_tree->log_mutex);
|
||||
ret = -EAGAIN;
|
||||
goto out;
|
||||
|
@ -3020,7 +3078,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
|
|||
if (atomic_read(&log_root_tree->log_commit[index2])) {
|
||||
blk_finish_plug(&plug);
|
||||
ret = btrfs_wait_tree_log_extents(log, mark);
|
||||
btrfs_wait_logged_extents(trans, log, log_transid);
|
||||
wait_log_commit(log_root_tree,
|
||||
root_log_ctx.log_transid);
|
||||
mutex_unlock(&log_root_tree->log_mutex);
|
||||
|
@ -3045,7 +3102,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
|
|||
if (btrfs_need_log_full_commit(fs_info, trans)) {
|
||||
blk_finish_plug(&plug);
|
||||
btrfs_wait_tree_log_extents(log, mark);
|
||||
btrfs_free_logged_extents(log, log_transid);
|
||||
mutex_unlock(&log_root_tree->log_mutex);
|
||||
ret = -EAGAIN;
|
||||
goto out_wake_log_root;
|
||||
|
@ -3058,7 +3114,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
|
|||
if (ret) {
|
||||
btrfs_set_log_full_commit(fs_info, trans);
|
||||
btrfs_abort_transaction(trans, ret);
|
||||
btrfs_free_logged_extents(log, log_transid);
|
||||
mutex_unlock(&log_root_tree->log_mutex);
|
||||
goto out_wake_log_root;
|
||||
}
|
||||
|
@ -3068,11 +3123,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
|
|||
EXTENT_NEW | EXTENT_DIRTY);
|
||||
if (ret) {
|
||||
btrfs_set_log_full_commit(fs_info, trans);
|
||||
btrfs_free_logged_extents(log, log_transid);
|
||||
mutex_unlock(&log_root_tree->log_mutex);
|
||||
goto out_wake_log_root;
|
||||
}
|
||||
btrfs_wait_logged_extents(trans, log, log_transid);
|
||||
|
||||
btrfs_set_super_log_root(fs_info->super_for_commit,
|
||||
log_root_tree->node->start);
|
||||
|
@ -3159,14 +3212,6 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
|
|||
EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
|
||||
}
|
||||
|
||||
/*
|
||||
* We may have short-circuited the log tree with the full commit logic
|
||||
* and left ordered extents on our list, so clear these out to keep us
|
||||
* from leaking inodes and memory.
|
||||
*/
|
||||
btrfs_free_logged_extents(log, 0);
|
||||
btrfs_free_logged_extents(log, 1);
|
||||
|
||||
free_extent_buffer(log->node);
|
||||
kfree(log);
|
||||
}
|
||||
|
@ -3756,7 +3801,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
|
|||
int start_slot, int nr, int inode_only,
|
||||
u64 logged_isize)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
|
||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||
unsigned long src_offset;
|
||||
unsigned long dst_offset;
|
||||
struct btrfs_root *log = inode->root->log_root;
|
||||
|
@ -3937,9 +3982,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_file_extent_item);
|
||||
if (btrfs_file_extent_type(src, extent) ==
|
||||
BTRFS_FILE_EXTENT_INLINE) {
|
||||
len = btrfs_file_extent_inline_len(src,
|
||||
src_path->slots[0],
|
||||
extent);
|
||||
len = btrfs_file_extent_ram_bytes(src, extent);
|
||||
*last_extent = ALIGN(key.offset + len,
|
||||
fs_info->sectorsize);
|
||||
} else {
|
||||
|
@ -4004,7 +4047,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
|
|||
extent = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
|
||||
if (btrfs_file_extent_type(src, extent) ==
|
||||
BTRFS_FILE_EXTENT_INLINE) {
|
||||
len = btrfs_file_extent_inline_len(src, i, extent);
|
||||
len = btrfs_file_extent_ram_bytes(src, extent);
|
||||
extent_end = ALIGN(key.offset + len,
|
||||
fs_info->sectorsize);
|
||||
} else {
|
||||
|
@ -4078,131 +4121,32 @@ static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int wait_ordered_extents(struct btrfs_trans_handle *trans,
|
||||
struct inode *inode,
|
||||
struct btrfs_root *root,
|
||||
const struct extent_map *em,
|
||||
const struct list_head *logged_list,
|
||||
bool *ordered_io_error)
|
||||
static int log_extent_csums(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_inode *inode,
|
||||
struct btrfs_root *log_root,
|
||||
const struct extent_map *em)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||
struct btrfs_ordered_extent *ordered;
|
||||
struct btrfs_root *log = root->log_root;
|
||||
u64 mod_start = em->mod_start;
|
||||
u64 mod_len = em->mod_len;
|
||||
const bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
|
||||
u64 csum_offset;
|
||||
u64 csum_len;
|
||||
LIST_HEAD(ordered_sums);
|
||||
int ret = 0;
|
||||
|
||||
*ordered_io_error = false;
|
||||
|
||||
if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
|
||||
if (inode->flags & BTRFS_INODE_NODATASUM ||
|
||||
test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
|
||||
em->block_start == EXTENT_MAP_HOLE)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Wait far any ordered extent that covers our extent map. If it
|
||||
* finishes without an error, first check and see if our csums are on
|
||||
* our outstanding ordered extents.
|
||||
*/
|
||||
list_for_each_entry(ordered, logged_list, log_list) {
|
||||
struct btrfs_ordered_sum *sum;
|
||||
|
||||
if (!mod_len)
|
||||
break;
|
||||
|
||||
if (ordered->file_offset + ordered->len <= mod_start ||
|
||||
mod_start + mod_len <= ordered->file_offset)
|
||||
continue;
|
||||
|
||||
if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
|
||||
!test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) &&
|
||||
!test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
|
||||
const u64 start = ordered->file_offset;
|
||||
const u64 end = ordered->file_offset + ordered->len - 1;
|
||||
|
||||
WARN_ON(ordered->inode != inode);
|
||||
filemap_fdatawrite_range(inode->i_mapping, start, end);
|
||||
}
|
||||
|
||||
wait_event(ordered->wait,
|
||||
(test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) ||
|
||||
test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)));
|
||||
|
||||
if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) {
|
||||
/*
|
||||
* Clear the AS_EIO/AS_ENOSPC flags from the inode's
|
||||
* i_mapping flags, so that the next fsync won't get
|
||||
* an outdated io error too.
|
||||
*/
|
||||
filemap_check_errors(inode->i_mapping);
|
||||
*ordered_io_error = true;
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* We are going to copy all the csums on this ordered extent, so
|
||||
* go ahead and adjust mod_start and mod_len in case this
|
||||
* ordered extent has already been logged.
|
||||
*/
|
||||
if (ordered->file_offset > mod_start) {
|
||||
if (ordered->file_offset + ordered->len >=
|
||||
mod_start + mod_len)
|
||||
mod_len = ordered->file_offset - mod_start;
|
||||
/*
|
||||
* If we have this case
|
||||
*
|
||||
* |--------- logged extent ---------|
|
||||
* |----- ordered extent ----|
|
||||
*
|
||||
* Just don't mess with mod_start and mod_len, we'll
|
||||
* just end up logging more csums than we need and it
|
||||
* will be ok.
|
||||
*/
|
||||
} else {
|
||||
if (ordered->file_offset + ordered->len <
|
||||
mod_start + mod_len) {
|
||||
mod_len = (mod_start + mod_len) -
|
||||
(ordered->file_offset + ordered->len);
|
||||
mod_start = ordered->file_offset +
|
||||
ordered->len;
|
||||
} else {
|
||||
mod_len = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (skip_csum)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* To keep us from looping for the above case of an ordered
|
||||
* extent that falls inside of the logged extent.
|
||||
*/
|
||||
if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM,
|
||||
&ordered->flags))
|
||||
continue;
|
||||
|
||||
list_for_each_entry(sum, &ordered->list, list) {
|
||||
ret = btrfs_csum_file_blocks(trans, log, sum);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (*ordered_io_error || !mod_len || ret || skip_csum)
|
||||
return ret;
|
||||
|
||||
/* If we're compressed we have to save the entire range of csums. */
|
||||
if (em->compress_type) {
|
||||
csum_offset = 0;
|
||||
csum_len = max(em->block_len, em->orig_block_len);
|
||||
} else {
|
||||
csum_offset = mod_start - em->start;
|
||||
csum_len = mod_len;
|
||||
csum_offset = em->mod_start - em->start;
|
||||
csum_len = em->mod_len;
|
||||
}
|
||||
|
||||
/* block start is already adjusted for the file extent offset. */
|
||||
ret = btrfs_lookup_csums_range(fs_info->csum_root,
|
||||
ret = btrfs_lookup_csums_range(trans->fs_info->csum_root,
|
||||
em->block_start + csum_offset,
|
||||
em->block_start + csum_offset +
|
||||
csum_len - 1, &ordered_sums, 0);
|
||||
|
@ -4214,7 +4158,7 @@ static int wait_ordered_extents(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_ordered_sum,
|
||||
list);
|
||||
if (!ret)
|
||||
ret = btrfs_csum_file_blocks(trans, log, sums);
|
||||
ret = btrfs_csum_file_blocks(trans, log_root, sums);
|
||||
list_del(&sums->list);
|
||||
kfree(sums);
|
||||
}
|
||||
|
@ -4226,7 +4170,6 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_inode *inode, struct btrfs_root *root,
|
||||
const struct extent_map *em,
|
||||
struct btrfs_path *path,
|
||||
const struct list_head *logged_list,
|
||||
struct btrfs_log_ctx *ctx)
|
||||
{
|
||||
struct btrfs_root *log = root->log_root;
|
||||
|
@ -4238,18 +4181,11 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
|
|||
u64 block_len;
|
||||
int ret;
|
||||
int extent_inserted = 0;
|
||||
bool ordered_io_err = false;
|
||||
|
||||
ret = wait_ordered_extents(trans, &inode->vfs_inode, root, em,
|
||||
logged_list, &ordered_io_err);
|
||||
ret = log_extent_csums(trans, inode, log, em);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (ordered_io_err) {
|
||||
ctx->io_err = -EIO;
|
||||
return ctx->io_err;
|
||||
}
|
||||
|
||||
btrfs_init_map_token(&token);
|
||||
|
||||
ret = __btrfs_drop_extents(trans, log, &inode->vfs_inode, path, em->start,
|
||||
|
@ -4424,7 +4360,6 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_root *root,
|
||||
struct btrfs_inode *inode,
|
||||
struct btrfs_path *path,
|
||||
struct list_head *logged_list,
|
||||
struct btrfs_log_ctx *ctx,
|
||||
const u64 start,
|
||||
const u64 end)
|
||||
|
@ -4480,20 +4415,6 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
|
||||
list_sort(NULL, &extents, extent_cmp);
|
||||
btrfs_get_logged_extents(inode, logged_list, logged_start, logged_end);
|
||||
/*
|
||||
* Some ordered extents started by fsync might have completed
|
||||
* before we could collect them into the list logged_list, which
|
||||
* means they're gone, not in our logged_list nor in the inode's
|
||||
* ordered tree. We want the application/user space to know an
|
||||
* error happened while attempting to persist file data so that
|
||||
* it can take proper action. If such error happened, we leave
|
||||
* without writing to the log tree and the fsync must report the
|
||||
* file data write error and not commit the current transaction.
|
||||
*/
|
||||
ret = filemap_check_errors(inode->vfs_inode.i_mapping);
|
||||
if (ret)
|
||||
ctx->io_err = ret;
|
||||
process:
|
||||
while (!list_empty(&extents)) {
|
||||
em = list_entry(extents.next, struct extent_map, list);
|
||||
|
@ -4512,8 +4433,7 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
|
|||
|
||||
write_unlock(&tree->lock);
|
||||
|
||||
ret = log_one_extent(trans, inode, root, em, path, logged_list,
|
||||
ctx);
|
||||
ret = log_one_extent(trans, inode, root, em, path, ctx);
|
||||
write_lock(&tree->lock);
|
||||
clear_em_logging(tree, em);
|
||||
free_extent_map(em);
|
||||
|
@ -4712,9 +4632,7 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
|
|||
|
||||
if (btrfs_file_extent_type(leaf, extent) ==
|
||||
BTRFS_FILE_EXTENT_INLINE) {
|
||||
len = btrfs_file_extent_inline_len(leaf,
|
||||
path->slots[0],
|
||||
extent);
|
||||
len = btrfs_file_extent_ram_bytes(leaf, extent);
|
||||
ASSERT(len == i_size ||
|
||||
(len == fs_info->sectorsize &&
|
||||
btrfs_file_extent_compression(leaf, extent) !=
|
||||
|
@ -4898,7 +4816,6 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_key min_key;
|
||||
struct btrfs_key max_key;
|
||||
struct btrfs_root *log = root->log_root;
|
||||
LIST_HEAD(logged_list);
|
||||
u64 last_extent = 0;
|
||||
int err = 0;
|
||||
int ret;
|
||||
|
@ -5094,8 +5011,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
|
|||
* we don't need to do more work nor fallback to
|
||||
* a transaction commit.
|
||||
*/
|
||||
if (IS_ERR(other_inode) &&
|
||||
PTR_ERR(other_inode) == -ENOENT) {
|
||||
if (other_inode == ERR_PTR(-ENOENT)) {
|
||||
goto next_key;
|
||||
} else if (IS_ERR(other_inode)) {
|
||||
err = PTR_ERR(other_inode);
|
||||
|
@ -5235,7 +5151,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
if (fast_search) {
|
||||
ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
|
||||
&logged_list, ctx, start, end);
|
||||
ctx, start, end);
|
||||
if (ret) {
|
||||
err = ret;
|
||||
goto out_unlock;
|
||||
|
@ -5286,10 +5202,6 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
|
|||
inode->last_log_commit = inode->last_sub_trans;
|
||||
spin_unlock(&inode->lock);
|
||||
out_unlock:
|
||||
if (unlikely(err))
|
||||
btrfs_put_logged_extents(&logged_list);
|
||||
else
|
||||
btrfs_submit_logged_extents(&logged_list, log);
|
||||
mutex_unlock(&inode->log_mutex);
|
||||
|
||||
btrfs_free_path(path);
|
||||
|
@ -5585,7 +5497,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_inode *inode,
|
||||
struct btrfs_log_ctx *ctx)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
|
||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||
int ret;
|
||||
struct btrfs_path *path;
|
||||
struct btrfs_key key;
|
||||
|
@ -6120,7 +6032,7 @@ int btrfs_log_new_name(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_inode *inode, struct btrfs_inode *old_dir,
|
||||
struct dentry *parent)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
|
||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||
|
||||
/*
|
||||
* this will force the logging code to walk the dentry chain
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -11,6 +11,8 @@
|
|||
#include <linux/btrfs.h>
|
||||
#include "async-thread.h"
|
||||
|
||||
#define BTRFS_MAX_DATA_CHUNK_SIZE (10ULL * SZ_1G)
|
||||
|
||||
extern struct mutex uuid_mutex;
|
||||
|
||||
#define BTRFS_STRIPE_LEN SZ_64K
|
||||
|
@ -343,6 +345,7 @@ struct map_lookup {
|
|||
u64 stripe_len;
|
||||
int num_stripes;
|
||||
int sub_stripes;
|
||||
int verified_stripes; /* For mount time dev extent verification */
|
||||
struct btrfs_bio_stripe stripes[];
|
||||
};
|
||||
|
||||
|
@ -382,8 +385,6 @@ static inline enum btrfs_map_op btrfs_op(struct bio *bio)
|
|||
}
|
||||
}
|
||||
|
||||
int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
|
||||
u64 end, u64 *length);
|
||||
void btrfs_get_bbio(struct btrfs_bio *bbio);
|
||||
void btrfs_put_bbio(struct btrfs_bio *bbio);
|
||||
int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
|
||||
|
@ -396,20 +397,19 @@ int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
|
|||
u64 physical, u64 **logical, int *naddrs, int *stripe_len);
|
||||
int btrfs_read_sys_array(struct btrfs_fs_info *fs_info);
|
||||
int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info);
|
||||
int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info, u64 type);
|
||||
int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type);
|
||||
void btrfs_mapping_init(struct btrfs_mapping_tree *tree);
|
||||
void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree);
|
||||
blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
|
||||
int mirror_num, int async_submit);
|
||||
int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
|
||||
fmode_t flags, void *holder);
|
||||
int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
|
||||
struct btrfs_fs_devices **fs_devices_ret);
|
||||
struct btrfs_device *btrfs_scan_one_device(const char *path,
|
||||
fmode_t flags, void *holder);
|
||||
int btrfs_close_devices(struct btrfs_fs_devices *fs_devices);
|
||||
void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step);
|
||||
void btrfs_assign_next_active_device(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_device *device, struct btrfs_device *this_dev);
|
||||
void btrfs_assign_next_active_device(struct btrfs_device *device,
|
||||
struct btrfs_device *this_dev);
|
||||
int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info,
|
||||
const char *device_path,
|
||||
struct btrfs_device **device);
|
||||
|
@ -453,22 +453,18 @@ void btrfs_init_devices_late(struct btrfs_fs_info *fs_info);
|
|||
int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info);
|
||||
int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info);
|
||||
void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_device *srcdev);
|
||||
void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev);
|
||||
void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_device *srcdev);
|
||||
void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_device *tgtdev);
|
||||
void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev);
|
||||
void btrfs_scratch_superblocks(struct block_device *bdev, const char *device_path);
|
||||
int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info,
|
||||
u64 logical, u64 len);
|
||||
unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
|
||||
u64 logical);
|
||||
int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info,
|
||||
u64 chunk_offset, u64 chunk_size);
|
||||
int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info, u64 chunk_offset);
|
||||
u64 chunk_offset, u64 chunk_size);
|
||||
int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset);
|
||||
|
||||
static inline void btrfs_dev_stat_inc(struct btrfs_device *dev,
|
||||
int index)
|
||||
|
@ -560,4 +556,7 @@ void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info);
|
|||
bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_device *failing_dev);
|
||||
|
||||
int btrfs_bg_type_to_factor(u64 flags);
|
||||
int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -374,7 +374,7 @@ DECLARE_EVENT_CLASS(
|
|||
__entry->extent_type = btrfs_file_extent_type(l, fi);
|
||||
__entry->compression = btrfs_file_extent_compression(l, fi);
|
||||
__entry->extent_start = start;
|
||||
__entry->extent_end = (start + btrfs_file_extent_inline_len(l, slot, fi));
|
||||
__entry->extent_end = (start + btrfs_file_extent_ram_bytes(l, fi));
|
||||
),
|
||||
|
||||
TP_printk_btrfs(
|
||||
|
@ -433,7 +433,6 @@ DEFINE_EVENT(
|
|||
{ (1 << BTRFS_ORDERED_DIRECT), "DIRECT" }, \
|
||||
{ (1 << BTRFS_ORDERED_IOERR), "IOERR" }, \
|
||||
{ (1 << BTRFS_ORDERED_UPDATED_ISIZE), "UPDATED_ISIZE" }, \
|
||||
{ (1 << BTRFS_ORDERED_LOGGED_CSUM), "LOGGED_CSUM" }, \
|
||||
{ (1 << BTRFS_ORDERED_TRUNCATED), "TRUNCATED" })
|
||||
|
||||
|
||||
|
|
Loading…
Reference in a new issue