Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw
* git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw: (32 commits) GFS2: Move all locking inside the inode creation function GFS2: Clean up symlink creation GFS2: Clean up mkdir GFS2: Use UUID field in generic superblock GFS2: Rename ops_inode.c to inode.c GFS2: Inode.c is empty now, remove it GFS2: Move final part of inode.c into super.c GFS2: Move most of the remaining inode.c into ops_inode.c GFS2: Move gfs2_refresh_inode() and friends into glops.c GFS2: Remove gfs2_dinode_print() function GFS2: When adding a new dir entry, inc link count if it is a subdir GFS2: Make gfs2_dir_del update link count when required GFS2: Don't use gfs2_change_nlink in link syscall GFS2: Don't use a try lock when promoting to a higher mode GFS2: Double check link count under glock GFS2: Improve bug trap code in ->releasepage() GFS2: Fix ail list traversal GFS2: make sure fallocate bytes is a multiple of blksize GFS2: Add an AIL writeback tracepoint GFS2: Make writeback more responsive to system conditions ...
This commit is contained in:
commit
6c1b8d94bc
25 changed files with 1910 additions and 2077 deletions
|
@ -1,9 +1,9 @@
|
|||
ccflags-y := -I$(src)
|
||||
obj-$(CONFIG_GFS2_FS) += gfs2.o
|
||||
gfs2-y := acl.o bmap.o dir.o xattr.o glock.o \
|
||||
glops.o inode.o log.o lops.o main.o meta_io.o \
|
||||
glops.o log.o lops.o main.o meta_io.o \
|
||||
aops.o dentry.o export.o file.o \
|
||||
ops_fstype.o ops_inode.o quota.o \
|
||||
ops_fstype.o inode.o quota.o \
|
||||
recovery.o rgrp.o super.o sys.o trans.o util.o
|
||||
|
||||
gfs2-$(CONFIG_GFS2_FS_LOCKING_DLM) += lock_dlm.o
|
||||
|
|
|
@ -1076,8 +1076,8 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
|
|||
bd = bh->b_private;
|
||||
if (bd && bd->bd_ail)
|
||||
goto cannot_release;
|
||||
gfs2_assert_warn(sdp, !buffer_pinned(bh));
|
||||
gfs2_assert_warn(sdp, !buffer_dirty(bh));
|
||||
if (buffer_pinned(bh) || buffer_dirty(bh))
|
||||
goto not_possible;
|
||||
bh = bh->b_this_page;
|
||||
} while(bh != head);
|
||||
gfs2_log_unlock(sdp);
|
||||
|
@ -1107,6 +1107,10 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
|
|||
} while (bh != head);
|
||||
|
||||
return try_to_free_buffers(page);
|
||||
|
||||
not_possible: /* Should never happen */
|
||||
WARN_ON(buffer_dirty(bh));
|
||||
WARN_ON(buffer_pinned(bh));
|
||||
cannot_release:
|
||||
gfs2_log_unlock(sdp);
|
||||
return 0;
|
||||
|
|
329
fs/gfs2/dir.c
329
fs/gfs2/dir.c
|
@ -82,12 +82,9 @@
|
|||
struct qstr gfs2_qdot __read_mostly;
|
||||
struct qstr gfs2_qdotdot __read_mostly;
|
||||
|
||||
typedef int (*leaf_call_t) (struct gfs2_inode *dip, u32 index, u32 len,
|
||||
u64 leaf_no, void *data);
|
||||
typedef int (*gfs2_dscan_t)(const struct gfs2_dirent *dent,
|
||||
const struct qstr *name, void *opaque);
|
||||
|
||||
|
||||
int gfs2_dir_get_new_buffer(struct gfs2_inode *ip, u64 block,
|
||||
struct buffer_head **bhp)
|
||||
{
|
||||
|
@ -1600,7 +1597,7 @@ static int dir_new_leaf(struct inode *inode, const struct qstr *name)
|
|||
*/
|
||||
|
||||
int gfs2_dir_add(struct inode *inode, const struct qstr *name,
|
||||
const struct gfs2_inode *nip, unsigned type)
|
||||
const struct gfs2_inode *nip)
|
||||
{
|
||||
struct gfs2_inode *ip = GFS2_I(inode);
|
||||
struct buffer_head *bh;
|
||||
|
@ -1616,7 +1613,7 @@ int gfs2_dir_add(struct inode *inode, const struct qstr *name,
|
|||
return PTR_ERR(dent);
|
||||
dent = gfs2_init_dirent(inode, dent, name, bh);
|
||||
gfs2_inum_out(nip, dent);
|
||||
dent->de_type = cpu_to_be16(type);
|
||||
dent->de_type = cpu_to_be16(IF2DT(nip->i_inode.i_mode));
|
||||
if (ip->i_diskflags & GFS2_DIF_EXHASH) {
|
||||
leaf = (struct gfs2_leaf *)bh->b_data;
|
||||
be16_add_cpu(&leaf->lf_entries, 1);
|
||||
|
@ -1628,6 +1625,8 @@ int gfs2_dir_add(struct inode *inode, const struct qstr *name,
|
|||
gfs2_trans_add_bh(ip->i_gl, bh, 1);
|
||||
ip->i_entries++;
|
||||
ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
|
||||
if (S_ISDIR(nip->i_inode.i_mode))
|
||||
inc_nlink(&ip->i_inode);
|
||||
gfs2_dinode_out(ip, bh->b_data);
|
||||
brelse(bh);
|
||||
error = 0;
|
||||
|
@ -1672,8 +1671,9 @@ int gfs2_dir_add(struct inode *inode, const struct qstr *name,
|
|||
* Returns: 0 on success, error code on failure
|
||||
*/
|
||||
|
||||
int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *name)
|
||||
int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry)
|
||||
{
|
||||
const struct qstr *name = &dentry->d_name;
|
||||
struct gfs2_dirent *dent, *prev = NULL;
|
||||
struct buffer_head *bh;
|
||||
int error;
|
||||
|
@ -1714,6 +1714,8 @@ int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *name)
|
|||
gfs2_trans_add_bh(dip->i_gl, bh, 1);
|
||||
dip->i_entries--;
|
||||
dip->i_inode.i_mtime = dip->i_inode.i_ctime = CURRENT_TIME;
|
||||
if (S_ISDIR(dentry->d_inode->i_mode))
|
||||
drop_nlink(&dip->i_inode);
|
||||
gfs2_dinode_out(dip, bh->b_data);
|
||||
brelse(bh);
|
||||
mark_inode_dirty(&dip->i_inode);
|
||||
|
@ -1768,25 +1770,159 @@ int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
|
|||
}
|
||||
|
||||
/**
|
||||
* foreach_leaf - call a function for each leaf in a directory
|
||||
* leaf_dealloc - Deallocate a directory leaf
|
||||
* @dip: the directory
|
||||
* @lc: the function to call for each each
|
||||
* @data: private data to pass to it
|
||||
* @index: the hash table offset in the directory
|
||||
* @len: the number of pointers to this leaf
|
||||
* @leaf_no: the leaf number
|
||||
* @leaf_bh: buffer_head for the starting leaf
|
||||
* last_dealloc: 1 if this is the final dealloc for the leaf, else 0
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
static int foreach_leaf(struct gfs2_inode *dip, leaf_call_t lc, void *data)
|
||||
static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
|
||||
u64 leaf_no, struct buffer_head *leaf_bh,
|
||||
int last_dealloc)
|
||||
{
|
||||
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
|
||||
struct gfs2_leaf *tmp_leaf;
|
||||
struct gfs2_rgrp_list rlist;
|
||||
struct buffer_head *bh, *dibh;
|
||||
u64 blk, nblk;
|
||||
unsigned int rg_blocks = 0, l_blocks = 0;
|
||||
char *ht;
|
||||
unsigned int x, size = len * sizeof(u64);
|
||||
int error;
|
||||
|
||||
memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
|
||||
|
||||
ht = kzalloc(size, GFP_NOFS);
|
||||
if (!ht)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!gfs2_alloc_get(dip)) {
|
||||
error = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
error = gfs2_quota_hold(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
|
||||
if (error)
|
||||
goto out_put;
|
||||
|
||||
error = gfs2_rindex_hold(sdp, &dip->i_alloc->al_ri_gh);
|
||||
if (error)
|
||||
goto out_qs;
|
||||
|
||||
/* Count the number of leaves */
|
||||
bh = leaf_bh;
|
||||
|
||||
for (blk = leaf_no; blk; blk = nblk) {
|
||||
if (blk != leaf_no) {
|
||||
error = get_leaf(dip, blk, &bh);
|
||||
if (error)
|
||||
goto out_rlist;
|
||||
}
|
||||
tmp_leaf = (struct gfs2_leaf *)bh->b_data;
|
||||
nblk = be64_to_cpu(tmp_leaf->lf_next);
|
||||
if (blk != leaf_no)
|
||||
brelse(bh);
|
||||
|
||||
gfs2_rlist_add(sdp, &rlist, blk);
|
||||
l_blocks++;
|
||||
}
|
||||
|
||||
gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
|
||||
|
||||
for (x = 0; x < rlist.rl_rgrps; x++) {
|
||||
struct gfs2_rgrpd *rgd;
|
||||
rgd = rlist.rl_ghs[x].gh_gl->gl_object;
|
||||
rg_blocks += rgd->rd_length;
|
||||
}
|
||||
|
||||
error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
|
||||
if (error)
|
||||
goto out_rlist;
|
||||
|
||||
error = gfs2_trans_begin(sdp,
|
||||
rg_blocks + (DIV_ROUND_UP(size, sdp->sd_jbsize) + 1) +
|
||||
RES_DINODE + RES_STATFS + RES_QUOTA, l_blocks);
|
||||
if (error)
|
||||
goto out_rg_gunlock;
|
||||
|
||||
bh = leaf_bh;
|
||||
|
||||
for (blk = leaf_no; blk; blk = nblk) {
|
||||
if (blk != leaf_no) {
|
||||
error = get_leaf(dip, blk, &bh);
|
||||
if (error)
|
||||
goto out_end_trans;
|
||||
}
|
||||
tmp_leaf = (struct gfs2_leaf *)bh->b_data;
|
||||
nblk = be64_to_cpu(tmp_leaf->lf_next);
|
||||
if (blk != leaf_no)
|
||||
brelse(bh);
|
||||
|
||||
gfs2_free_meta(dip, blk, 1);
|
||||
gfs2_add_inode_blocks(&dip->i_inode, -1);
|
||||
}
|
||||
|
||||
error = gfs2_dir_write_data(dip, ht, index * sizeof(u64), size);
|
||||
if (error != size) {
|
||||
if (error >= 0)
|
||||
error = -EIO;
|
||||
goto out_end_trans;
|
||||
}
|
||||
|
||||
error = gfs2_meta_inode_buffer(dip, &dibh);
|
||||
if (error)
|
||||
goto out_end_trans;
|
||||
|
||||
gfs2_trans_add_bh(dip->i_gl, dibh, 1);
|
||||
/* On the last dealloc, make this a regular file in case we crash.
|
||||
(We don't want to free these blocks a second time.) */
|
||||
if (last_dealloc)
|
||||
dip->i_inode.i_mode = S_IFREG;
|
||||
gfs2_dinode_out(dip, dibh->b_data);
|
||||
brelse(dibh);
|
||||
|
||||
out_end_trans:
|
||||
gfs2_trans_end(sdp);
|
||||
out_rg_gunlock:
|
||||
gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
|
||||
out_rlist:
|
||||
gfs2_rlist_free(&rlist);
|
||||
gfs2_glock_dq_uninit(&dip->i_alloc->al_ri_gh);
|
||||
out_qs:
|
||||
gfs2_quota_unhold(dip);
|
||||
out_put:
|
||||
gfs2_alloc_put(dip);
|
||||
out:
|
||||
kfree(ht);
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_dir_exhash_dealloc - free all the leaf blocks in a directory
|
||||
* @dip: the directory
|
||||
*
|
||||
* Dealloc all on-disk directory leaves to FREEMETA state
|
||||
* Change on-disk inode type to "regular file"
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip)
|
||||
{
|
||||
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
|
||||
struct buffer_head *bh;
|
||||
struct gfs2_leaf *leaf;
|
||||
u32 hsize, len;
|
||||
u32 ht_offset, lp_offset, ht_offset_cur = -1;
|
||||
u32 index = 0;
|
||||
u32 index = 0, next_index;
|
||||
__be64 *lp;
|
||||
u64 leaf_no;
|
||||
int error = 0;
|
||||
int error = 0, last;
|
||||
|
||||
hsize = 1 << dip->i_depth;
|
||||
if (hsize * sizeof(u64) != i_size_read(&dip->i_inode)) {
|
||||
|
@ -1821,13 +1957,15 @@ static int foreach_leaf(struct gfs2_inode *dip, leaf_call_t lc, void *data)
|
|||
goto out;
|
||||
leaf = (struct gfs2_leaf *)bh->b_data;
|
||||
len = 1 << (dip->i_depth - be16_to_cpu(leaf->lf_depth));
|
||||
brelse(bh);
|
||||
|
||||
error = lc(dip, index, len, leaf_no, data);
|
||||
next_index = (index & ~(len - 1)) + len;
|
||||
last = ((next_index >= hsize) ? 1 : 0);
|
||||
error = leaf_dealloc(dip, index, len, leaf_no, bh,
|
||||
last);
|
||||
brelse(bh);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
index = (index & ~(len - 1)) + len;
|
||||
index = next_index;
|
||||
} else
|
||||
index++;
|
||||
}
|
||||
|
@ -1843,165 +1981,6 @@ static int foreach_leaf(struct gfs2_inode *dip, leaf_call_t lc, void *data)
|
|||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* leaf_dealloc - Deallocate a directory leaf
|
||||
* @dip: the directory
|
||||
* @index: the hash table offset in the directory
|
||||
* @len: the number of pointers to this leaf
|
||||
* @leaf_no: the leaf number
|
||||
* @data: not used
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
|
||||
u64 leaf_no, void *data)
|
||||
{
|
||||
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
|
||||
struct gfs2_leaf *tmp_leaf;
|
||||
struct gfs2_rgrp_list rlist;
|
||||
struct buffer_head *bh, *dibh;
|
||||
u64 blk, nblk;
|
||||
unsigned int rg_blocks = 0, l_blocks = 0;
|
||||
char *ht;
|
||||
unsigned int x, size = len * sizeof(u64);
|
||||
int error;
|
||||
|
||||
memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
|
||||
|
||||
ht = kzalloc(size, GFP_NOFS);
|
||||
if (!ht)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!gfs2_alloc_get(dip)) {
|
||||
error = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
error = gfs2_quota_hold(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
|
||||
if (error)
|
||||
goto out_put;
|
||||
|
||||
error = gfs2_rindex_hold(sdp, &dip->i_alloc->al_ri_gh);
|
||||
if (error)
|
||||
goto out_qs;
|
||||
|
||||
/* Count the number of leaves */
|
||||
|
||||
for (blk = leaf_no; blk; blk = nblk) {
|
||||
error = get_leaf(dip, blk, &bh);
|
||||
if (error)
|
||||
goto out_rlist;
|
||||
tmp_leaf = (struct gfs2_leaf *)bh->b_data;
|
||||
nblk = be64_to_cpu(tmp_leaf->lf_next);
|
||||
brelse(bh);
|
||||
|
||||
gfs2_rlist_add(sdp, &rlist, blk);
|
||||
l_blocks++;
|
||||
}
|
||||
|
||||
gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
|
||||
|
||||
for (x = 0; x < rlist.rl_rgrps; x++) {
|
||||
struct gfs2_rgrpd *rgd;
|
||||
rgd = rlist.rl_ghs[x].gh_gl->gl_object;
|
||||
rg_blocks += rgd->rd_length;
|
||||
}
|
||||
|
||||
error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
|
||||
if (error)
|
||||
goto out_rlist;
|
||||
|
||||
error = gfs2_trans_begin(sdp,
|
||||
rg_blocks + (DIV_ROUND_UP(size, sdp->sd_jbsize) + 1) +
|
||||
RES_DINODE + RES_STATFS + RES_QUOTA, l_blocks);
|
||||
if (error)
|
||||
goto out_rg_gunlock;
|
||||
|
||||
for (blk = leaf_no; blk; blk = nblk) {
|
||||
error = get_leaf(dip, blk, &bh);
|
||||
if (error)
|
||||
goto out_end_trans;
|
||||
tmp_leaf = (struct gfs2_leaf *)bh->b_data;
|
||||
nblk = be64_to_cpu(tmp_leaf->lf_next);
|
||||
brelse(bh);
|
||||
|
||||
gfs2_free_meta(dip, blk, 1);
|
||||
gfs2_add_inode_blocks(&dip->i_inode, -1);
|
||||
}
|
||||
|
||||
error = gfs2_dir_write_data(dip, ht, index * sizeof(u64), size);
|
||||
if (error != size) {
|
||||
if (error >= 0)
|
||||
error = -EIO;
|
||||
goto out_end_trans;
|
||||
}
|
||||
|
||||
error = gfs2_meta_inode_buffer(dip, &dibh);
|
||||
if (error)
|
||||
goto out_end_trans;
|
||||
|
||||
gfs2_trans_add_bh(dip->i_gl, dibh, 1);
|
||||
gfs2_dinode_out(dip, dibh->b_data);
|
||||
brelse(dibh);
|
||||
|
||||
out_end_trans:
|
||||
gfs2_trans_end(sdp);
|
||||
out_rg_gunlock:
|
||||
gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
|
||||
out_rlist:
|
||||
gfs2_rlist_free(&rlist);
|
||||
gfs2_glock_dq_uninit(&dip->i_alloc->al_ri_gh);
|
||||
out_qs:
|
||||
gfs2_quota_unhold(dip);
|
||||
out_put:
|
||||
gfs2_alloc_put(dip);
|
||||
out:
|
||||
kfree(ht);
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_dir_exhash_dealloc - free all the leaf blocks in a directory
|
||||
* @dip: the directory
|
||||
*
|
||||
* Dealloc all on-disk directory leaves to FREEMETA state
|
||||
* Change on-disk inode type to "regular file"
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip)
|
||||
{
|
||||
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
|
||||
struct buffer_head *bh;
|
||||
int error;
|
||||
|
||||
/* Dealloc on-disk leaves to FREEMETA state */
|
||||
error = foreach_leaf(dip, leaf_dealloc, NULL);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/* Make this a regular file in case we crash.
|
||||
(We don't want to free these blocks a second time.) */
|
||||
|
||||
error = gfs2_trans_begin(sdp, RES_DINODE, 0);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = gfs2_meta_inode_buffer(dip, &bh);
|
||||
if (!error) {
|
||||
gfs2_trans_add_bh(dip->i_gl, bh, 1);
|
||||
((struct gfs2_dinode *)bh->b_data)->di_mode =
|
||||
cpu_to_be32(S_IFREG);
|
||||
brelse(bh);
|
||||
}
|
||||
|
||||
gfs2_trans_end(sdp);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_diradd_alloc_required - find if adding entry will require an allocation
|
||||
* @ip: the file being written to
|
||||
|
|
|
@ -22,8 +22,8 @@ extern struct inode *gfs2_dir_search(struct inode *dir,
|
|||
extern int gfs2_dir_check(struct inode *dir, const struct qstr *filename,
|
||||
const struct gfs2_inode *ip);
|
||||
extern int gfs2_dir_add(struct inode *inode, const struct qstr *filename,
|
||||
const struct gfs2_inode *ip, unsigned int type);
|
||||
extern int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *filename);
|
||||
const struct gfs2_inode *ip);
|
||||
extern int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry);
|
||||
extern int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
|
||||
filldir_t filldir);
|
||||
extern int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
|
||||
|
|
|
@ -139,7 +139,7 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb,
|
|||
struct gfs2_sbd *sdp = sb->s_fs_info;
|
||||
struct inode *inode;
|
||||
|
||||
inode = gfs2_ilookup(sb, inum->no_addr);
|
||||
inode = gfs2_ilookup(sb, inum->no_addr, 0);
|
||||
if (inode) {
|
||||
if (GFS2_I(inode)->i_no_formal_ino != inum->no_formal_ino) {
|
||||
iput(inode);
|
||||
|
|
|
@ -545,18 +545,10 @@ static int gfs2_close(struct inode *inode, struct file *file)
|
|||
/**
|
||||
* gfs2_fsync - sync the dirty data for a file (across the cluster)
|
||||
* @file: the file that points to the dentry (we ignore this)
|
||||
* @dentry: the dentry that points to the inode to sync
|
||||
* @datasync: set if we can ignore timestamp changes
|
||||
*
|
||||
* The VFS will flush "normal" data for us. We only need to worry
|
||||
* about metadata here. For journaled data, we just do a log flush
|
||||
* as we can't avoid it. Otherwise we can just bale out if datasync
|
||||
* is set. For stuffed inodes we must flush the log in order to
|
||||
* ensure that all data is on disk.
|
||||
*
|
||||
* The call to write_inode_now() is there to write back metadata and
|
||||
* the inode itself. It does also try and write the data, but thats
|
||||
* (hopefully) a no-op due to the VFS having already called filemap_fdatawrite()
|
||||
* for us.
|
||||
* The VFS will flush data for us. We only need to worry
|
||||
* about metadata here.
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
@ -565,22 +557,20 @@ static int gfs2_fsync(struct file *file, int datasync)
|
|||
{
|
||||
struct inode *inode = file->f_mapping->host;
|
||||
int sync_state = inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC);
|
||||
int ret = 0;
|
||||
struct gfs2_inode *ip = GFS2_I(inode);
|
||||
int ret;
|
||||
|
||||
if (gfs2_is_jdata(GFS2_I(inode))) {
|
||||
gfs2_log_flush(GFS2_SB(inode), GFS2_I(inode)->i_gl);
|
||||
return 0;
|
||||
if (datasync)
|
||||
sync_state &= ~I_DIRTY_SYNC;
|
||||
|
||||
if (sync_state) {
|
||||
ret = sync_inode_metadata(inode, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
gfs2_ail_flush(ip->i_gl);
|
||||
}
|
||||
|
||||
if (sync_state != 0) {
|
||||
if (!datasync)
|
||||
ret = write_inode_now(inode, 0);
|
||||
|
||||
if (gfs2_is_stuffed(GFS2_I(inode)))
|
||||
gfs2_log_flush(GFS2_SB(inode), GFS2_I(inode)->i_gl);
|
||||
}
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -826,6 +816,7 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
|
|||
loff_t bytes, max_bytes;
|
||||
struct gfs2_alloc *al;
|
||||
int error;
|
||||
loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
|
||||
loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
|
||||
next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
|
||||
|
||||
|
@ -833,13 +824,15 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
|
|||
if (mode & ~FALLOC_FL_KEEP_SIZE)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
offset = (offset >> sdp->sd_sb.sb_bsize_shift) <<
|
||||
sdp->sd_sb.sb_bsize_shift;
|
||||
offset &= bsize_mask;
|
||||
|
||||
len = next - offset;
|
||||
bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
|
||||
if (!bytes)
|
||||
bytes = UINT_MAX;
|
||||
bytes &= bsize_mask;
|
||||
if (bytes == 0)
|
||||
bytes = sdp->sd_sb.sb_bsize;
|
||||
|
||||
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
|
||||
error = gfs2_glock_nq(&ip->i_gh);
|
||||
|
@ -870,6 +863,9 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
|
|||
if (error) {
|
||||
if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
|
||||
bytes >>= 1;
|
||||
bytes &= bsize_mask;
|
||||
if (bytes == 0)
|
||||
bytes = sdp->sd_sb.sb_bsize;
|
||||
goto retry;
|
||||
}
|
||||
goto out_qunlock;
|
||||
|
|
|
@ -143,14 +143,9 @@ static int demote_ok(const struct gfs2_glock *gl)
|
|||
{
|
||||
const struct gfs2_glock_operations *glops = gl->gl_ops;
|
||||
|
||||
/* assert_spin_locked(&gl->gl_spin); */
|
||||
|
||||
if (gl->gl_state == LM_ST_UNLOCKED)
|
||||
return 0;
|
||||
if (test_bit(GLF_LFLUSH, &gl->gl_flags))
|
||||
return 0;
|
||||
if ((gl->gl_name.ln_type != LM_TYPE_INODE) &&
|
||||
!list_empty(&gl->gl_holders))
|
||||
if (!list_empty(&gl->gl_holders))
|
||||
return 0;
|
||||
if (glops->go_demote_ok)
|
||||
return glops->go_demote_ok(gl);
|
||||
|
@ -158,6 +153,31 @@ static int demote_ok(const struct gfs2_glock *gl)
|
|||
}
|
||||
|
||||
|
||||
void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
|
||||
{
|
||||
spin_lock(&lru_lock);
|
||||
|
||||
if (!list_empty(&gl->gl_lru))
|
||||
list_del_init(&gl->gl_lru);
|
||||
else
|
||||
atomic_inc(&lru_count);
|
||||
|
||||
list_add_tail(&gl->gl_lru, &lru_list);
|
||||
set_bit(GLF_LRU, &gl->gl_flags);
|
||||
spin_unlock(&lru_lock);
|
||||
}
|
||||
|
||||
static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
|
||||
{
|
||||
spin_lock(&lru_lock);
|
||||
if (!list_empty(&gl->gl_lru)) {
|
||||
list_del_init(&gl->gl_lru);
|
||||
atomic_dec(&lru_count);
|
||||
clear_bit(GLF_LRU, &gl->gl_flags);
|
||||
}
|
||||
spin_unlock(&lru_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
|
||||
* @gl: the glock
|
||||
|
@ -168,24 +188,8 @@ static int demote_ok(const struct gfs2_glock *gl)
|
|||
|
||||
static void __gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
|
||||
{
|
||||
if (demote_ok(gl)) {
|
||||
spin_lock(&lru_lock);
|
||||
|
||||
if (!list_empty(&gl->gl_lru))
|
||||
list_del_init(&gl->gl_lru);
|
||||
else
|
||||
atomic_inc(&lru_count);
|
||||
|
||||
list_add_tail(&gl->gl_lru, &lru_list);
|
||||
spin_unlock(&lru_lock);
|
||||
}
|
||||
}
|
||||
|
||||
void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
|
||||
{
|
||||
spin_lock(&gl->gl_spin);
|
||||
__gfs2_glock_schedule_for_reclaim(gl);
|
||||
spin_unlock(&gl->gl_spin);
|
||||
if (demote_ok(gl))
|
||||
gfs2_glock_add_to_lru(gl);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -217,12 +221,7 @@ void gfs2_glock_put(struct gfs2_glock *gl)
|
|||
spin_lock_bucket(gl->gl_hash);
|
||||
hlist_bl_del_rcu(&gl->gl_list);
|
||||
spin_unlock_bucket(gl->gl_hash);
|
||||
spin_lock(&lru_lock);
|
||||
if (!list_empty(&gl->gl_lru)) {
|
||||
list_del_init(&gl->gl_lru);
|
||||
atomic_dec(&lru_count);
|
||||
}
|
||||
spin_unlock(&lru_lock);
|
||||
gfs2_glock_remove_from_lru(gl);
|
||||
GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
|
||||
GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
|
||||
trace_gfs2_glock_put(gl);
|
||||
|
@ -542,11 +541,6 @@ __acquires(&gl->gl_spin)
|
|||
clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
|
||||
|
||||
gfs2_glock_hold(gl);
|
||||
if (target != LM_ST_UNLOCKED && (gl->gl_state == LM_ST_SHARED ||
|
||||
gl->gl_state == LM_ST_DEFERRED) &&
|
||||
!(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
|
||||
lck_flags |= LM_FLAG_TRY_1CB;
|
||||
|
||||
if (sdp->sd_lockstruct.ls_ops->lm_lock) {
|
||||
/* lock_dlm */
|
||||
ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
|
||||
|
@ -648,7 +642,7 @@ static void delete_work_func(struct work_struct *work)
|
|||
/* Note: Unsafe to dereference ip as we don't hold right refs/locks */
|
||||
|
||||
if (ip)
|
||||
inode = gfs2_ilookup(sdp->sd_vfs, no_addr);
|
||||
inode = gfs2_ilookup(sdp->sd_vfs, no_addr, 1);
|
||||
else
|
||||
inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
|
||||
if (inode && !IS_ERR(inode)) {
|
||||
|
@ -1025,6 +1019,9 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
|
|||
if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
|
||||
return -EIO;
|
||||
|
||||
if (test_bit(GLF_LRU, &gl->gl_flags))
|
||||
gfs2_glock_remove_from_lru(gl);
|
||||
|
||||
spin_lock(&gl->gl_spin);
|
||||
add_to_queue(gh);
|
||||
if ((LM_FLAG_NOEXP & gh->gh_flags) &&
|
||||
|
@ -1082,7 +1079,8 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
|
|||
!test_bit(GLF_DEMOTE, &gl->gl_flags))
|
||||
fast_path = 1;
|
||||
}
|
||||
__gfs2_glock_schedule_for_reclaim(gl);
|
||||
if (!test_bit(GLF_LFLUSH, &gl->gl_flags))
|
||||
__gfs2_glock_schedule_for_reclaim(gl);
|
||||
trace_gfs2_glock_queue(gh, 0);
|
||||
spin_unlock(&gl->gl_spin);
|
||||
if (likely(fast_path))
|
||||
|
@ -1365,6 +1363,7 @@ static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_m
|
|||
while(nr && !list_empty(&lru_list)) {
|
||||
gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
|
||||
list_del_init(&gl->gl_lru);
|
||||
clear_bit(GLF_LRU, &gl->gl_flags);
|
||||
atomic_dec(&lru_count);
|
||||
|
||||
/* Test for being demotable */
|
||||
|
@ -1387,6 +1386,7 @@ static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_m
|
|||
}
|
||||
nr_skipped++;
|
||||
list_add(&gl->gl_lru, &skipped);
|
||||
set_bit(GLF_LRU, &gl->gl_flags);
|
||||
}
|
||||
list_splice(&skipped, &lru_list);
|
||||
atomic_add(nr_skipped, &lru_count);
|
||||
|
@ -1459,12 +1459,7 @@ static void thaw_glock(struct gfs2_glock *gl)
|
|||
|
||||
static void clear_glock(struct gfs2_glock *gl)
|
||||
{
|
||||
spin_lock(&lru_lock);
|
||||
if (!list_empty(&gl->gl_lru)) {
|
||||
list_del_init(&gl->gl_lru);
|
||||
atomic_dec(&lru_count);
|
||||
}
|
||||
spin_unlock(&lru_lock);
|
||||
gfs2_glock_remove_from_lru(gl);
|
||||
|
||||
spin_lock(&gl->gl_spin);
|
||||
if (gl->gl_state != LM_ST_UNLOCKED)
|
||||
|
@ -1599,9 +1594,11 @@ static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static const char *gflags2str(char *buf, const unsigned long *gflags)
|
||||
static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
|
||||
{
|
||||
const unsigned long *gflags = &gl->gl_flags;
|
||||
char *p = buf;
|
||||
|
||||
if (test_bit(GLF_LOCK, gflags))
|
||||
*p++ = 'l';
|
||||
if (test_bit(GLF_DEMOTE, gflags))
|
||||
|
@ -1624,6 +1621,10 @@ static const char *gflags2str(char *buf, const unsigned long *gflags)
|
|||
*p++ = 'F';
|
||||
if (test_bit(GLF_QUEUED, gflags))
|
||||
*p++ = 'q';
|
||||
if (test_bit(GLF_LRU, gflags))
|
||||
*p++ = 'L';
|
||||
if (gl->gl_object)
|
||||
*p++ = 'o';
|
||||
*p = 0;
|
||||
return buf;
|
||||
}
|
||||
|
@ -1658,14 +1659,15 @@ static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
|
|||
dtime *= 1000000/HZ; /* demote time in uSec */
|
||||
if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
|
||||
dtime = 0;
|
||||
gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d r:%d\n",
|
||||
gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d\n",
|
||||
state2str(gl->gl_state),
|
||||
gl->gl_name.ln_type,
|
||||
(unsigned long long)gl->gl_name.ln_number,
|
||||
gflags2str(gflags_buf, &gl->gl_flags),
|
||||
gflags2str(gflags_buf, gl),
|
||||
state2str(gl->gl_target),
|
||||
state2str(gl->gl_demote_state), dtime,
|
||||
atomic_read(&gl->gl_ail_count),
|
||||
atomic_read(&gl->gl_revokes),
|
||||
atomic_read(&gl->gl_ref));
|
||||
|
||||
list_for_each_entry(gh, &gl->gl_holders, gh_list) {
|
||||
|
|
|
@ -225,11 +225,10 @@ static inline int gfs2_glock_nq_init(struct gfs2_glock *gl,
|
|||
|
||||
extern void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
|
||||
extern void gfs2_glock_complete(struct gfs2_glock *gl, int ret);
|
||||
extern void gfs2_reclaim_glock(struct gfs2_sbd *sdp);
|
||||
extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
|
||||
extern void gfs2_glock_finish_truncate(struct gfs2_inode *ip);
|
||||
extern void gfs2_glock_thaw(struct gfs2_sbd *sdp);
|
||||
extern void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl);
|
||||
extern void gfs2_glock_add_to_lru(struct gfs2_glock *gl);
|
||||
extern void gfs2_glock_free(struct gfs2_glock *gl);
|
||||
|
||||
extern int __init gfs2_glock_init(void);
|
||||
|
|
172
fs/gfs2/glops.c
172
fs/gfs2/glops.c
|
@ -28,33 +28,18 @@
|
|||
#include "trans.h"
|
||||
|
||||
/**
|
||||
* ail_empty_gl - remove all buffers for a given lock from the AIL
|
||||
* __gfs2_ail_flush - remove all buffers for a given lock from the AIL
|
||||
* @gl: the glock
|
||||
*
|
||||
* None of the buffers should be dirty, locked, or pinned.
|
||||
*/
|
||||
|
||||
static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
|
||||
static void __gfs2_ail_flush(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_sbd *sdp = gl->gl_sbd;
|
||||
struct list_head *head = &gl->gl_ail_list;
|
||||
struct gfs2_bufdata *bd;
|
||||
struct buffer_head *bh;
|
||||
struct gfs2_trans tr;
|
||||
|
||||
memset(&tr, 0, sizeof(tr));
|
||||
tr.tr_revokes = atomic_read(&gl->gl_ail_count);
|
||||
|
||||
if (!tr.tr_revokes)
|
||||
return;
|
||||
|
||||
/* A shortened, inline version of gfs2_trans_begin() */
|
||||
tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));
|
||||
tr.tr_ip = (unsigned long)__builtin_return_address(0);
|
||||
INIT_LIST_HEAD(&tr.tr_list_buf);
|
||||
gfs2_log_reserve(sdp, tr.tr_reserved);
|
||||
BUG_ON(current->journal_info);
|
||||
current->journal_info = &tr;
|
||||
|
||||
spin_lock(&sdp->sd_ail_lock);
|
||||
while (!list_empty(head)) {
|
||||
|
@ -76,7 +61,47 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
|
|||
}
|
||||
gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
|
||||
spin_unlock(&sdp->sd_ail_lock);
|
||||
}
|
||||
|
||||
|
||||
static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_sbd *sdp = gl->gl_sbd;
|
||||
struct gfs2_trans tr;
|
||||
|
||||
memset(&tr, 0, sizeof(tr));
|
||||
tr.tr_revokes = atomic_read(&gl->gl_ail_count);
|
||||
|
||||
if (!tr.tr_revokes)
|
||||
return;
|
||||
|
||||
/* A shortened, inline version of gfs2_trans_begin() */
|
||||
tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));
|
||||
tr.tr_ip = (unsigned long)__builtin_return_address(0);
|
||||
INIT_LIST_HEAD(&tr.tr_list_buf);
|
||||
gfs2_log_reserve(sdp, tr.tr_reserved);
|
||||
BUG_ON(current->journal_info);
|
||||
current->journal_info = &tr;
|
||||
|
||||
__gfs2_ail_flush(gl);
|
||||
|
||||
gfs2_trans_end(sdp);
|
||||
gfs2_log_flush(sdp, NULL);
|
||||
}
|
||||
|
||||
void gfs2_ail_flush(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_sbd *sdp = gl->gl_sbd;
|
||||
unsigned int revokes = atomic_read(&gl->gl_ail_count);
|
||||
int ret;
|
||||
|
||||
if (!revokes)
|
||||
return;
|
||||
|
||||
ret = gfs2_trans_begin(sdp, 0, revokes);
|
||||
if (ret)
|
||||
return;
|
||||
__gfs2_ail_flush(gl);
|
||||
gfs2_trans_end(sdp);
|
||||
gfs2_log_flush(sdp, NULL);
|
||||
}
|
||||
|
@ -226,6 +251,119 @@ static int inode_go_demote_ok(const struct gfs2_glock *gl)
|
|||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_set_nlink - Set the inode's link count based on on-disk info
|
||||
* @inode: The inode in question
|
||||
* @nlink: The link count
|
||||
*
|
||||
* If the link count has hit zero, it must never be raised, whatever the
|
||||
* on-disk inode might say. When new struct inodes are created the link
|
||||
* count is set to 1, so that we can safely use this test even when reading
|
||||
* in on disk information for the first time.
|
||||
*/
|
||||
|
||||
static void gfs2_set_nlink(struct inode *inode, u32 nlink)
|
||||
{
|
||||
/*
|
||||
* We will need to review setting the nlink count here in the
|
||||
* light of the forthcoming ro bind mount work. This is a reminder
|
||||
* to do that.
|
||||
*/
|
||||
if ((inode->i_nlink != nlink) && (inode->i_nlink != 0)) {
|
||||
if (nlink == 0)
|
||||
clear_nlink(inode);
|
||||
else
|
||||
inode->i_nlink = nlink;
|
||||
}
|
||||
}
|
||||
|
||||
static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
|
||||
{
|
||||
const struct gfs2_dinode *str = buf;
|
||||
struct timespec atime;
|
||||
u16 height, depth;
|
||||
|
||||
if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
|
||||
goto corrupt;
|
||||
ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
|
||||
ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
|
||||
ip->i_inode.i_rdev = 0;
|
||||
switch (ip->i_inode.i_mode & S_IFMT) {
|
||||
case S_IFBLK:
|
||||
case S_IFCHR:
|
||||
ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
|
||||
be32_to_cpu(str->di_minor));
|
||||
break;
|
||||
};
|
||||
|
||||
ip->i_inode.i_uid = be32_to_cpu(str->di_uid);
|
||||
ip->i_inode.i_gid = be32_to_cpu(str->di_gid);
|
||||
gfs2_set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
|
||||
i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
|
||||
gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
|
||||
atime.tv_sec = be64_to_cpu(str->di_atime);
|
||||
atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
|
||||
if (timespec_compare(&ip->i_inode.i_atime, &atime) < 0)
|
||||
ip->i_inode.i_atime = atime;
|
||||
ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
|
||||
ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
|
||||
ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
|
||||
ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
|
||||
|
||||
ip->i_goal = be64_to_cpu(str->di_goal_meta);
|
||||
ip->i_generation = be64_to_cpu(str->di_generation);
|
||||
|
||||
ip->i_diskflags = be32_to_cpu(str->di_flags);
|
||||
gfs2_set_inode_flags(&ip->i_inode);
|
||||
height = be16_to_cpu(str->di_height);
|
||||
if (unlikely(height > GFS2_MAX_META_HEIGHT))
|
||||
goto corrupt;
|
||||
ip->i_height = (u8)height;
|
||||
|
||||
depth = be16_to_cpu(str->di_depth);
|
||||
if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
|
||||
goto corrupt;
|
||||
ip->i_depth = (u8)depth;
|
||||
ip->i_entries = be32_to_cpu(str->di_entries);
|
||||
|
||||
ip->i_eattr = be64_to_cpu(str->di_eattr);
|
||||
if (S_ISREG(ip->i_inode.i_mode))
|
||||
gfs2_set_aops(&ip->i_inode);
|
||||
|
||||
return 0;
|
||||
corrupt:
|
||||
gfs2_consist_inode(ip);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_inode_refresh - Refresh the incore copy of the dinode
|
||||
* @ip: The GFS2 inode
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
int gfs2_inode_refresh(struct gfs2_inode *ip)
|
||||
{
|
||||
struct buffer_head *dibh;
|
||||
int error;
|
||||
|
||||
error = gfs2_meta_inode_buffer(ip, &dibh);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), dibh, GFS2_METATYPE_DI)) {
|
||||
brelse(dibh);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
error = gfs2_dinode_in(ip, dibh->b_data);
|
||||
brelse(dibh);
|
||||
clear_bit(GIF_INVALID, &ip->i_flags);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* inode_go_lock - operation done after an inode lock is locked by a process
|
||||
* @gl: the glock
|
||||
|
|
|
@ -23,4 +23,6 @@ extern const struct gfs2_glock_operations gfs2_quota_glops;
|
|||
extern const struct gfs2_glock_operations gfs2_journal_glops;
|
||||
extern const struct gfs2_glock_operations *gfs2_glops_list[];
|
||||
|
||||
extern void gfs2_ail_flush(struct gfs2_glock *gl);
|
||||
|
||||
#endif /* __GLOPS_DOT_H__ */
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
|
||||
#define DIO_WAIT 0x00000010
|
||||
#define DIO_METADATA 0x00000020
|
||||
#define DIO_ALL 0x00000100
|
||||
|
||||
struct gfs2_log_operations;
|
||||
struct gfs2_log_element;
|
||||
|
@ -200,6 +199,8 @@ enum {
|
|||
GLF_INITIAL = 10,
|
||||
GLF_FROZEN = 11,
|
||||
GLF_QUEUED = 12,
|
||||
GLF_LRU = 13,
|
||||
GLF_OBJECT = 14, /* Used only for tracing */
|
||||
};
|
||||
|
||||
struct gfs2_glock {
|
||||
|
@ -234,6 +235,7 @@ struct gfs2_glock {
|
|||
|
||||
struct list_head gl_ail_list;
|
||||
atomic_t gl_ail_count;
|
||||
atomic_t gl_revokes;
|
||||
struct delayed_work gl_work;
|
||||
struct work_struct gl_delete;
|
||||
struct rcu_head gl_rcu;
|
||||
|
@ -374,8 +376,6 @@ struct gfs2_ail {
|
|||
unsigned int ai_first;
|
||||
struct list_head ai_ail1_list;
|
||||
struct list_head ai_ail2_list;
|
||||
|
||||
u64 ai_sync_gen;
|
||||
};
|
||||
|
||||
struct gfs2_journal_extent {
|
||||
|
@ -488,7 +488,6 @@ struct gfs2_sb_host {
|
|||
|
||||
char sb_lockproto[GFS2_LOCKNAME_LEN];
|
||||
char sb_locktable[GFS2_LOCKNAME_LEN];
|
||||
u8 sb_uuid[16];
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -654,7 +653,6 @@ struct gfs2_sbd {
|
|||
spinlock_t sd_ail_lock;
|
||||
struct list_head sd_ail1_list;
|
||||
struct list_head sd_ail2_list;
|
||||
u64 sd_ail_sync_gen;
|
||||
|
||||
/* Replay stuff */
|
||||
|
||||
|
|
1494
fs/gfs2/inode.c
1494
fs/gfs2/inode.c
File diff suppressed because it is too large
Load diff
|
@ -102,22 +102,16 @@ extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type,
|
|||
extern struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
|
||||
u64 *no_formal_ino,
|
||||
unsigned int blktype);
|
||||
extern struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr);
|
||||
extern struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr, int nonblock);
|
||||
|
||||
extern int gfs2_inode_refresh(struct gfs2_inode *ip);
|
||||
|
||||
extern int gfs2_dinode_dealloc(struct gfs2_inode *inode);
|
||||
extern int gfs2_change_nlink(struct gfs2_inode *ip, int diff);
|
||||
extern struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
|
||||
int is_root);
|
||||
extern struct inode *gfs2_createi(struct gfs2_holder *ghs,
|
||||
const struct qstr *name,
|
||||
unsigned int mode, dev_t dev);
|
||||
extern int gfs2_permission(struct inode *inode, int mask, unsigned int flags);
|
||||
extern int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr);
|
||||
extern struct inode *gfs2_lookup_simple(struct inode *dip, const char *name);
|
||||
extern void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf);
|
||||
extern void gfs2_dinode_print(const struct gfs2_inode *ip);
|
||||
|
||||
extern const struct inode_operations gfs2_file_iops;
|
||||
extern const struct inode_operations gfs2_dir_iops;
|
||||
|
|
187
fs/gfs2/log.c
187
fs/gfs2/log.c
|
@ -18,6 +18,7 @@
|
|||
#include <linux/kthread.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/writeback.h>
|
||||
|
||||
#include "gfs2.h"
|
||||
#include "incore.h"
|
||||
|
@ -83,55 +84,97 @@ void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
|
|||
/**
|
||||
* gfs2_ail1_start_one - Start I/O on a part of the AIL
|
||||
* @sdp: the filesystem
|
||||
* @tr: the part of the AIL
|
||||
* @wbc: The writeback control structure
|
||||
* @ai: The ail structure
|
||||
*
|
||||
*/
|
||||
|
||||
static void gfs2_ail1_start_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
|
||||
static int gfs2_ail1_start_one(struct gfs2_sbd *sdp,
|
||||
struct writeback_control *wbc,
|
||||
struct gfs2_ail *ai)
|
||||
__releases(&sdp->sd_ail_lock)
|
||||
__acquires(&sdp->sd_ail_lock)
|
||||
{
|
||||
struct gfs2_glock *gl = NULL;
|
||||
struct address_space *mapping;
|
||||
struct gfs2_bufdata *bd, *s;
|
||||
struct buffer_head *bh;
|
||||
int retry;
|
||||
|
||||
do {
|
||||
retry = 0;
|
||||
list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list, bd_ail_st_list) {
|
||||
bh = bd->bd_bh;
|
||||
|
||||
list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list,
|
||||
bd_ail_st_list) {
|
||||
bh = bd->bd_bh;
|
||||
gfs2_assert(sdp, bd->bd_ail == ai);
|
||||
|
||||
gfs2_assert(sdp, bd->bd_ail == ai);
|
||||
|
||||
if (!buffer_busy(bh)) {
|
||||
if (!buffer_uptodate(bh))
|
||||
gfs2_io_error_bh(sdp, bh);
|
||||
list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!buffer_dirty(bh))
|
||||
continue;
|
||||
|
||||
list_move(&bd->bd_ail_st_list, &ai->ai_ail1_list);
|
||||
|
||||
get_bh(bh);
|
||||
spin_unlock(&sdp->sd_ail_lock);
|
||||
lock_buffer(bh);
|
||||
if (test_clear_buffer_dirty(bh)) {
|
||||
bh->b_end_io = end_buffer_write_sync;
|
||||
submit_bh(WRITE_SYNC, bh);
|
||||
} else {
|
||||
unlock_buffer(bh);
|
||||
brelse(bh);
|
||||
}
|
||||
spin_lock(&sdp->sd_ail_lock);
|
||||
|
||||
retry = 1;
|
||||
break;
|
||||
if (!buffer_busy(bh)) {
|
||||
if (!buffer_uptodate(bh))
|
||||
gfs2_io_error_bh(sdp, bh);
|
||||
list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
|
||||
continue;
|
||||
}
|
||||
} while (retry);
|
||||
|
||||
if (!buffer_dirty(bh))
|
||||
continue;
|
||||
if (gl == bd->bd_gl)
|
||||
continue;
|
||||
gl = bd->bd_gl;
|
||||
list_move(&bd->bd_ail_st_list, &ai->ai_ail1_list);
|
||||
mapping = bh->b_page->mapping;
|
||||
if (!mapping)
|
||||
continue;
|
||||
spin_unlock(&sdp->sd_ail_lock);
|
||||
generic_writepages(mapping, wbc);
|
||||
spin_lock(&sdp->sd_ail_lock);
|
||||
if (wbc->nr_to_write <= 0)
|
||||
break;
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* gfs2_ail1_flush - start writeback of some ail1 entries
|
||||
* @sdp: The super block
|
||||
* @wbc: The writeback control structure
|
||||
*
|
||||
* Writes back some ail1 entries, according to the limits in the
|
||||
* writeback control structure
|
||||
*/
|
||||
|
||||
void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
|
||||
{
|
||||
struct list_head *head = &sdp->sd_ail1_list;
|
||||
struct gfs2_ail *ai;
|
||||
|
||||
trace_gfs2_ail_flush(sdp, wbc, 1);
|
||||
spin_lock(&sdp->sd_ail_lock);
|
||||
restart:
|
||||
list_for_each_entry_reverse(ai, head, ai_list) {
|
||||
if (wbc->nr_to_write <= 0)
|
||||
break;
|
||||
if (gfs2_ail1_start_one(sdp, wbc, ai))
|
||||
goto restart;
|
||||
}
|
||||
spin_unlock(&sdp->sd_ail_lock);
|
||||
trace_gfs2_ail_flush(sdp, wbc, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_ail1_start - start writeback of all ail1 entries
|
||||
* @sdp: The superblock
|
||||
*/
|
||||
|
||||
static void gfs2_ail1_start(struct gfs2_sbd *sdp)
|
||||
{
|
||||
struct writeback_control wbc = {
|
||||
.sync_mode = WB_SYNC_NONE,
|
||||
.nr_to_write = LONG_MAX,
|
||||
.range_start = 0,
|
||||
.range_end = LLONG_MAX,
|
||||
};
|
||||
|
||||
return gfs2_ail1_flush(sdp, &wbc);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -141,7 +184,7 @@ __acquires(&sdp->sd_ail_lock)
|
|||
*
|
||||
*/
|
||||
|
||||
static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai, int flags)
|
||||
static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
|
||||
{
|
||||
struct gfs2_bufdata *bd, *s;
|
||||
struct buffer_head *bh;
|
||||
|
@ -149,71 +192,37 @@ static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai, int fl
|
|||
list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list,
|
||||
bd_ail_st_list) {
|
||||
bh = bd->bd_bh;
|
||||
|
||||
gfs2_assert(sdp, bd->bd_ail == ai);
|
||||
|
||||
if (buffer_busy(bh)) {
|
||||
if (flags & DIO_ALL)
|
||||
continue;
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
if (buffer_busy(bh))
|
||||
continue;
|
||||
if (!buffer_uptodate(bh))
|
||||
gfs2_io_error_bh(sdp, bh);
|
||||
|
||||
list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
|
||||
}
|
||||
|
||||
return list_empty(&ai->ai_ail1_list);
|
||||
}
|
||||
|
||||
static void gfs2_ail1_start(struct gfs2_sbd *sdp)
|
||||
{
|
||||
struct list_head *head;
|
||||
u64 sync_gen;
|
||||
struct gfs2_ail *ai;
|
||||
int done = 0;
|
||||
/**
|
||||
* gfs2_ail1_empty - Try to empty the ail1 lists
|
||||
* @sdp: The superblock
|
||||
*
|
||||
* Tries to empty the ail1 lists, starting with the oldest first
|
||||
*/
|
||||
|
||||
spin_lock(&sdp->sd_ail_lock);
|
||||
head = &sdp->sd_ail1_list;
|
||||
if (list_empty(head)) {
|
||||
spin_unlock(&sdp->sd_ail_lock);
|
||||
return;
|
||||
}
|
||||
sync_gen = sdp->sd_ail_sync_gen++;
|
||||
|
||||
while(!done) {
|
||||
done = 1;
|
||||
list_for_each_entry_reverse(ai, head, ai_list) {
|
||||
if (ai->ai_sync_gen >= sync_gen)
|
||||
continue;
|
||||
ai->ai_sync_gen = sync_gen;
|
||||
gfs2_ail1_start_one(sdp, ai); /* This may drop ail lock */
|
||||
done = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&sdp->sd_ail_lock);
|
||||
}
|
||||
|
||||
static int gfs2_ail1_empty(struct gfs2_sbd *sdp, int flags)
|
||||
static int gfs2_ail1_empty(struct gfs2_sbd *sdp)
|
||||
{
|
||||
struct gfs2_ail *ai, *s;
|
||||
int ret;
|
||||
|
||||
spin_lock(&sdp->sd_ail_lock);
|
||||
|
||||
list_for_each_entry_safe_reverse(ai, s, &sdp->sd_ail1_list, ai_list) {
|
||||
if (gfs2_ail1_empty_one(sdp, ai, flags))
|
||||
gfs2_ail1_empty_one(sdp, ai);
|
||||
if (list_empty(&ai->ai_ail1_list))
|
||||
list_move(&ai->ai_list, &sdp->sd_ail2_list);
|
||||
else if (!(flags & DIO_ALL))
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
ret = list_empty(&sdp->sd_ail1_list);
|
||||
|
||||
spin_unlock(&sdp->sd_ail_lock);
|
||||
|
||||
return ret;
|
||||
|
@ -574,7 +583,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
|
|||
set_buffer_uptodate(bh);
|
||||
clear_buffer_dirty(bh);
|
||||
|
||||
gfs2_ail1_empty(sdp, 0);
|
||||
gfs2_ail1_empty(sdp);
|
||||
tail = current_tail(sdp);
|
||||
|
||||
lh = (struct gfs2_log_header *)bh->b_data;
|
||||
|
@ -869,7 +878,7 @@ void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
|
|||
gfs2_log_flush(sdp, NULL);
|
||||
for (;;) {
|
||||
gfs2_ail1_start(sdp);
|
||||
if (gfs2_ail1_empty(sdp, DIO_ALL))
|
||||
if (gfs2_ail1_empty(sdp))
|
||||
break;
|
||||
msleep(10);
|
||||
}
|
||||
|
@ -905,17 +914,15 @@ int gfs2_logd(void *data)
|
|||
|
||||
preflush = atomic_read(&sdp->sd_log_pinned);
|
||||
if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
|
||||
gfs2_ail1_empty(sdp, DIO_ALL);
|
||||
gfs2_ail1_empty(sdp);
|
||||
gfs2_log_flush(sdp, NULL);
|
||||
gfs2_ail1_empty(sdp, DIO_ALL);
|
||||
}
|
||||
|
||||
if (gfs2_ail_flush_reqd(sdp)) {
|
||||
gfs2_ail1_start(sdp);
|
||||
io_schedule();
|
||||
gfs2_ail1_empty(sdp, 0);
|
||||
gfs2_ail1_empty(sdp);
|
||||
gfs2_log_flush(sdp, NULL);
|
||||
gfs2_ail1_empty(sdp, DIO_ALL);
|
||||
}
|
||||
|
||||
wake_up(&sdp->sd_log_waitq);
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
|
||||
#include <linux/list.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/writeback.h>
|
||||
#include "incore.h"
|
||||
|
||||
/**
|
||||
|
@ -59,6 +60,7 @@ extern struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp,
|
|||
extern void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl);
|
||||
extern void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans);
|
||||
extern void gfs2_remove_from_ail(struct gfs2_bufdata *bd);
|
||||
extern void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc);
|
||||
|
||||
extern void gfs2_log_shutdown(struct gfs2_sbd *sdp);
|
||||
extern void gfs2_meta_syncfs(struct gfs2_sbd *sdp);
|
||||
|
|
|
@ -40,7 +40,7 @@ static void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
|
|||
{
|
||||
struct gfs2_bufdata *bd;
|
||||
|
||||
gfs2_assert_withdraw(sdp, test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags));
|
||||
BUG_ON(!current->journal_info);
|
||||
|
||||
clear_buffer_dirty(bh);
|
||||
if (test_set_buffer_pinned(bh))
|
||||
|
@ -65,6 +65,7 @@ static void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
|
|||
* @sdp: the filesystem the buffer belongs to
|
||||
* @bh: The buffer to unpin
|
||||
* @ai:
|
||||
* @flags: The inode dirty flags
|
||||
*
|
||||
*/
|
||||
|
||||
|
@ -73,10 +74,8 @@ static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
|
|||
{
|
||||
struct gfs2_bufdata *bd = bh->b_private;
|
||||
|
||||
gfs2_assert_withdraw(sdp, buffer_uptodate(bh));
|
||||
|
||||
if (!buffer_pinned(bh))
|
||||
gfs2_assert_withdraw(sdp, 0);
|
||||
BUG_ON(!buffer_uptodate(bh));
|
||||
BUG_ON(!buffer_pinned(bh));
|
||||
|
||||
lock_buffer(bh);
|
||||
mark_buffer_dirty(bh);
|
||||
|
@ -95,8 +94,7 @@ static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
|
|||
list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list);
|
||||
spin_unlock(&sdp->sd_ail_lock);
|
||||
|
||||
if (test_and_clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags))
|
||||
gfs2_glock_schedule_for_reclaim(bd->bd_gl);
|
||||
clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
|
||||
trace_gfs2_pin(bd, 0);
|
||||
unlock_buffer(bh);
|
||||
atomic_dec(&sdp->sd_log_pinned);
|
||||
|
@ -322,12 +320,16 @@ static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
|
|||
|
||||
static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
|
||||
{
|
||||
struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
|
||||
struct gfs2_glock *gl = bd->bd_gl;
|
||||
struct gfs2_trans *tr;
|
||||
|
||||
tr = current->journal_info;
|
||||
tr->tr_touched = 1;
|
||||
tr->tr_num_revoke++;
|
||||
sdp->sd_log_num_revoke++;
|
||||
atomic_inc(&gl->gl_revokes);
|
||||
set_bit(GLF_LFLUSH, &gl->gl_flags);
|
||||
list_add(&le->le_list, &sdp->sd_log_le_revoke);
|
||||
}
|
||||
|
||||
|
@ -350,9 +352,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
|
|||
ld->ld_data1 = cpu_to_be32(sdp->sd_log_num_revoke);
|
||||
offset = sizeof(struct gfs2_log_descriptor);
|
||||
|
||||
while (!list_empty(head)) {
|
||||
bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
|
||||
list_del_init(&bd->bd_le.le_list);
|
||||
list_for_each_entry(bd, head, bd_le.le_list) {
|
||||
sdp->sd_log_num_revoke--;
|
||||
|
||||
if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
|
||||
|
@ -367,8 +367,6 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
|
|||
}
|
||||
|
||||
*(__be64 *)(bh->b_data + offset) = cpu_to_be64(bd->bd_blkno);
|
||||
kmem_cache_free(gfs2_bufdata_cachep, bd);
|
||||
|
||||
offset += sizeof(u64);
|
||||
}
|
||||
gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
|
||||
|
@ -376,6 +374,22 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
|
|||
submit_bh(WRITE_SYNC, bh);
|
||||
}
|
||||
|
||||
static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
|
||||
{
|
||||
struct list_head *head = &sdp->sd_log_le_revoke;
|
||||
struct gfs2_bufdata *bd;
|
||||
struct gfs2_glock *gl;
|
||||
|
||||
while (!list_empty(head)) {
|
||||
bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
|
||||
list_del_init(&bd->bd_le.le_list);
|
||||
gl = bd->bd_gl;
|
||||
atomic_dec(&gl->gl_revokes);
|
||||
clear_bit(GLF_LFLUSH, &gl->gl_flags);
|
||||
kmem_cache_free(gfs2_bufdata_cachep, bd);
|
||||
}
|
||||
}
|
||||
|
||||
static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
|
||||
struct gfs2_log_header_host *head, int pass)
|
||||
{
|
||||
|
@ -749,6 +763,7 @@ const struct gfs2_log_operations gfs2_buf_lops = {
|
|||
const struct gfs2_log_operations gfs2_revoke_lops = {
|
||||
.lo_add = revoke_lo_add,
|
||||
.lo_before_commit = revoke_lo_before_commit,
|
||||
.lo_after_commit = revoke_lo_after_commit,
|
||||
.lo_before_scan = revoke_lo_before_scan,
|
||||
.lo_scan_elements = revoke_lo_scan_elements,
|
||||
.lo_after_scan = revoke_lo_after_scan,
|
||||
|
|
|
@ -53,6 +53,7 @@ static void gfs2_init_glock_once(void *foo)
|
|||
INIT_LIST_HEAD(&gl->gl_lru);
|
||||
INIT_LIST_HEAD(&gl->gl_ail_list);
|
||||
atomic_set(&gl->gl_ail_count, 0);
|
||||
atomic_set(&gl->gl_revokes, 0);
|
||||
}
|
||||
|
||||
static void gfs2_init_gl_aspace_once(void *foo)
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include "rgrp.h"
|
||||
#include "trans.h"
|
||||
#include "util.h"
|
||||
#include "trace_gfs2.h"
|
||||
|
||||
static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc)
|
||||
{
|
||||
|
@ -310,6 +311,7 @@ void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int
|
|||
struct gfs2_bufdata *bd = bh->b_private;
|
||||
|
||||
if (test_clear_buffer_pinned(bh)) {
|
||||
trace_gfs2_pin(bd, 0);
|
||||
atomic_dec(&sdp->sd_log_pinned);
|
||||
list_del_init(&bd->bd_le.le_list);
|
||||
if (meta) {
|
||||
|
|
|
@ -77,8 +77,6 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen);
|
|||
|
||||
#define buffer_busy(bh) \
|
||||
((bh)->b_state & ((1ul << BH_Dirty) | (1ul << BH_Lock) | (1ul << BH_Pinned)))
|
||||
#define buffer_in_io(bh) \
|
||||
((bh)->b_state & ((1ul << BH_Dirty) | (1ul << BH_Lock)))
|
||||
|
||||
#endif /* __DIO_DOT_H__ */
|
||||
|
||||
|
|
|
@ -126,8 +126,10 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
|
|||
* changed.
|
||||
*/
|
||||
|
||||
static int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb_host *sb, int silent)
|
||||
static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent)
|
||||
{
|
||||
struct gfs2_sb_host *sb = &sdp->sd_sb;
|
||||
|
||||
if (sb->sb_magic != GFS2_MAGIC ||
|
||||
sb->sb_type != GFS2_METATYPE_SB) {
|
||||
if (!silent)
|
||||
|
@ -157,8 +159,10 @@ static void end_bio_io_page(struct bio *bio, int error)
|
|||
unlock_page(page);
|
||||
}
|
||||
|
||||
static void gfs2_sb_in(struct gfs2_sb_host *sb, const void *buf)
|
||||
static void gfs2_sb_in(struct gfs2_sbd *sdp, const void *buf)
|
||||
{
|
||||
struct gfs2_sb_host *sb = &sdp->sd_sb;
|
||||
struct super_block *s = sdp->sd_vfs;
|
||||
const struct gfs2_sb *str = buf;
|
||||
|
||||
sb->sb_magic = be32_to_cpu(str->sb_header.mh_magic);
|
||||
|
@ -175,7 +179,7 @@ static void gfs2_sb_in(struct gfs2_sb_host *sb, const void *buf)
|
|||
|
||||
memcpy(sb->sb_lockproto, str->sb_lockproto, GFS2_LOCKNAME_LEN);
|
||||
memcpy(sb->sb_locktable, str->sb_locktable, GFS2_LOCKNAME_LEN);
|
||||
memcpy(sb->sb_uuid, str->sb_uuid, 16);
|
||||
memcpy(s->s_uuid, str->sb_uuid, 16);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -197,7 +201,7 @@ static void gfs2_sb_in(struct gfs2_sb_host *sb, const void *buf)
|
|||
* Returns: 0 on success or error
|
||||
*/
|
||||
|
||||
static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector)
|
||||
static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
|
||||
{
|
||||
struct super_block *sb = sdp->sd_vfs;
|
||||
struct gfs2_sb *p;
|
||||
|
@ -227,10 +231,10 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector)
|
|||
return -EIO;
|
||||
}
|
||||
p = kmap(page);
|
||||
gfs2_sb_in(&sdp->sd_sb, p);
|
||||
gfs2_sb_in(sdp, p);
|
||||
kunmap(page);
|
||||
__free_page(page);
|
||||
return 0;
|
||||
return gfs2_check_sb(sdp, silent);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -247,17 +251,13 @@ static int gfs2_read_sb(struct gfs2_sbd *sdp, int silent)
|
|||
unsigned int x;
|
||||
int error;
|
||||
|
||||
error = gfs2_read_super(sdp, GFS2_SB_ADDR >> sdp->sd_fsb2bb_shift);
|
||||
error = gfs2_read_super(sdp, GFS2_SB_ADDR >> sdp->sd_fsb2bb_shift, silent);
|
||||
if (error) {
|
||||
if (!silent)
|
||||
fs_err(sdp, "can't read superblock\n");
|
||||
return error;
|
||||
}
|
||||
|
||||
error = gfs2_check_sb(sdp, &sdp->sd_sb, silent);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift -
|
||||
GFS2_BASIC_BLOCK_SHIFT;
|
||||
sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift;
|
||||
|
@ -340,14 +340,10 @@ static int init_names(struct gfs2_sbd *sdp, int silent)
|
|||
/* Try to autodetect */
|
||||
|
||||
if (!proto[0] || !table[0]) {
|
||||
error = gfs2_read_super(sdp, GFS2_SB_ADDR >> sdp->sd_fsb2bb_shift);
|
||||
error = gfs2_read_super(sdp, GFS2_SB_ADDR >> sdp->sd_fsb2bb_shift, silent);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = gfs2_check_sb(sdp, &sdp->sd_sb, silent);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
if (!proto[0])
|
||||
proto = sdp->sd_sb.sb_lockproto;
|
||||
if (!table[0])
|
||||
|
@ -364,7 +360,6 @@ static int init_names(struct gfs2_sbd *sdp, int silent)
|
|||
while ((table = strchr(table, '/')))
|
||||
*table = '_';
|
||||
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -1119,8 +1114,7 @@ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent
|
|||
if (sdp->sd_args.ar_statfs_quantum) {
|
||||
sdp->sd_tune.gt_statfs_slow = 0;
|
||||
sdp->sd_tune.gt_statfs_quantum = sdp->sd_args.ar_statfs_quantum;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
sdp->sd_tune.gt_statfs_slow = 1;
|
||||
sdp->sd_tune.gt_statfs_quantum = 30;
|
||||
}
|
||||
|
|
1344
fs/gfs2/ops_inode.c
1344
fs/gfs2/ops_inode.c
File diff suppressed because it is too large
Load diff
|
@ -78,10 +78,11 @@ static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal,
|
|||
|
||||
static inline void gfs2_setbit(struct gfs2_rgrpd *rgd, unsigned char *buf1,
|
||||
unsigned char *buf2, unsigned int offset,
|
||||
unsigned int buflen, u32 block,
|
||||
struct gfs2_bitmap *bi, u32 block,
|
||||
unsigned char new_state)
|
||||
{
|
||||
unsigned char *byte1, *byte2, *end, cur_state;
|
||||
unsigned int buflen = bi->bi_len;
|
||||
const unsigned int bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE;
|
||||
|
||||
byte1 = buf1 + offset + (block / GFS2_NBBY);
|
||||
|
@ -92,6 +93,16 @@ static inline void gfs2_setbit(struct gfs2_rgrpd *rgd, unsigned char *buf1,
|
|||
cur_state = (*byte1 >> bit) & GFS2_BIT_MASK;
|
||||
|
||||
if (unlikely(!valid_change[new_state * 4 + cur_state])) {
|
||||
printk(KERN_WARNING "GFS2: buf_blk = 0x%llx old_state=%d, "
|
||||
"new_state=%d\n",
|
||||
(unsigned long long)block, cur_state, new_state);
|
||||
printk(KERN_WARNING "GFS2: rgrp=0x%llx bi_start=0x%lx\n",
|
||||
(unsigned long long)rgd->rd_addr,
|
||||
(unsigned long)bi->bi_start);
|
||||
printk(KERN_WARNING "GFS2: bi_offset=0x%lx bi_len=0x%lx\n",
|
||||
(unsigned long)bi->bi_offset,
|
||||
(unsigned long)bi->bi_len);
|
||||
dump_stack();
|
||||
gfs2_consist_rgrpd(rgd);
|
||||
return;
|
||||
}
|
||||
|
@ -381,6 +392,7 @@ static void clear_rgrpdi(struct gfs2_sbd *sdp)
|
|||
|
||||
if (gl) {
|
||||
gl->gl_object = NULL;
|
||||
gfs2_glock_add_to_lru(gl);
|
||||
gfs2_glock_put(gl);
|
||||
}
|
||||
|
||||
|
@ -1365,7 +1377,7 @@ static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal,
|
|||
|
||||
gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
|
||||
gfs2_setbit(rgd, bi->bi_bh->b_data, bi->bi_clone, bi->bi_offset,
|
||||
bi->bi_len, blk, new_state);
|
||||
bi, blk, new_state);
|
||||
goal = blk;
|
||||
while (*n < elen) {
|
||||
goal++;
|
||||
|
@ -1375,7 +1387,7 @@ static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal,
|
|||
GFS2_BLKST_FREE)
|
||||
break;
|
||||
gfs2_setbit(rgd, bi->bi_bh->b_data, bi->bi_clone, bi->bi_offset,
|
||||
bi->bi_len, goal, new_state);
|
||||
bi, goal, new_state);
|
||||
(*n)++;
|
||||
}
|
||||
out:
|
||||
|
@ -1432,7 +1444,7 @@ static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
|
|||
}
|
||||
gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
|
||||
gfs2_setbit(rgd, bi->bi_bh->b_data, NULL, bi->bi_offset,
|
||||
bi->bi_len, buf_blk, new_state);
|
||||
bi, buf_blk, new_state);
|
||||
}
|
||||
|
||||
return rgd;
|
||||
|
|
138
fs/gfs2/super.c
138
fs/gfs2/super.c
|
@ -23,6 +23,7 @@
|
|||
#include <linux/time.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/writeback.h>
|
||||
#include <linux/backing-dev.h>
|
||||
|
||||
#include "gfs2.h"
|
||||
#include "incore.h"
|
||||
|
@ -700,11 +701,47 @@ void gfs2_unfreeze_fs(struct gfs2_sbd *sdp)
|
|||
mutex_unlock(&sdp->sd_freeze_lock);
|
||||
}
|
||||
|
||||
void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
|
||||
{
|
||||
struct gfs2_dinode *str = buf;
|
||||
|
||||
str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
|
||||
str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
|
||||
str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
|
||||
str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
|
||||
str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
|
||||
str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
|
||||
str->di_uid = cpu_to_be32(ip->i_inode.i_uid);
|
||||
str->di_gid = cpu_to_be32(ip->i_inode.i_gid);
|
||||
str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
|
||||
str->di_size = cpu_to_be64(i_size_read(&ip->i_inode));
|
||||
str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
|
||||
str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
|
||||
str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
|
||||
str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec);
|
||||
|
||||
str->di_goal_meta = cpu_to_be64(ip->i_goal);
|
||||
str->di_goal_data = cpu_to_be64(ip->i_goal);
|
||||
str->di_generation = cpu_to_be64(ip->i_generation);
|
||||
|
||||
str->di_flags = cpu_to_be32(ip->i_diskflags);
|
||||
str->di_height = cpu_to_be16(ip->i_height);
|
||||
str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) &&
|
||||
!(ip->i_diskflags & GFS2_DIF_EXHASH) ?
|
||||
GFS2_FORMAT_DE : 0);
|
||||
str->di_depth = cpu_to_be16(ip->i_depth);
|
||||
str->di_entries = cpu_to_be32(ip->i_entries);
|
||||
|
||||
str->di_eattr = cpu_to_be64(ip->i_eattr);
|
||||
str->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
|
||||
str->di_mtime_nsec = cpu_to_be32(ip->i_inode.i_mtime.tv_nsec);
|
||||
str->di_ctime_nsec = cpu_to_be32(ip->i_inode.i_ctime.tv_nsec);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_write_inode - Make sure the inode is stable on the disk
|
||||
* @inode: The inode
|
||||
* @sync: synchronous write flag
|
||||
* @wbc: The writeback control structure
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
@ -713,15 +750,17 @@ static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
|
|||
{
|
||||
struct gfs2_inode *ip = GFS2_I(inode);
|
||||
struct gfs2_sbd *sdp = GFS2_SB(inode);
|
||||
struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl);
|
||||
struct backing_dev_info *bdi = metamapping->backing_dev_info;
|
||||
struct gfs2_holder gh;
|
||||
struct buffer_head *bh;
|
||||
struct timespec atime;
|
||||
struct gfs2_dinode *di;
|
||||
int ret = 0;
|
||||
int ret = -EAGAIN;
|
||||
|
||||
/* Check this is a "normal" inode, etc */
|
||||
/* Skip timestamp update, if this is from a memalloc */
|
||||
if (current->flags & PF_MEMALLOC)
|
||||
return 0;
|
||||
goto do_flush;
|
||||
ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
|
||||
if (ret)
|
||||
goto do_flush;
|
||||
|
@ -745,6 +784,13 @@ static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
|
|||
do_flush:
|
||||
if (wbc->sync_mode == WB_SYNC_ALL)
|
||||
gfs2_log_flush(GFS2_SB(inode), ip->i_gl);
|
||||
filemap_fdatawrite(metamapping);
|
||||
if (bdi->dirty_exceeded)
|
||||
gfs2_ail1_flush(sdp, wbc);
|
||||
if (!ret && (wbc->sync_mode == WB_SYNC_ALL))
|
||||
ret = filemap_fdatawait(metamapping);
|
||||
if (ret)
|
||||
mark_inode_dirty_sync(inode);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -874,8 +920,9 @@ static void gfs2_put_super(struct super_block *sb)
|
|||
|
||||
static int gfs2_sync_fs(struct super_block *sb, int wait)
|
||||
{
|
||||
if (wait && sb->s_fs_info)
|
||||
gfs2_log_flush(sb->s_fs_info, NULL);
|
||||
struct gfs2_sbd *sdp = sb->s_fs_info;
|
||||
if (wait && sdp)
|
||||
gfs2_log_flush(sdp, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1308,6 +1355,78 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void gfs2_final_release_pages(struct gfs2_inode *ip)
|
||||
{
|
||||
struct inode *inode = &ip->i_inode;
|
||||
struct gfs2_glock *gl = ip->i_gl;
|
||||
|
||||
truncate_inode_pages(gfs2_glock2aspace(ip->i_gl), 0);
|
||||
truncate_inode_pages(&inode->i_data, 0);
|
||||
|
||||
if (atomic_read(&gl->gl_revokes) == 0) {
|
||||
clear_bit(GLF_LFLUSH, &gl->gl_flags);
|
||||
clear_bit(GLF_DIRTY, &gl->gl_flags);
|
||||
}
|
||||
}
|
||||
|
||||
static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
|
||||
{
|
||||
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
|
||||
struct gfs2_alloc *al;
|
||||
struct gfs2_rgrpd *rgd;
|
||||
int error;
|
||||
|
||||
if (gfs2_get_inode_blocks(&ip->i_inode) != 1) {
|
||||
gfs2_consist_inode(ip);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
al = gfs2_alloc_get(ip);
|
||||
if (!al)
|
||||
return -ENOMEM;
|
||||
|
||||
error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
|
||||
if (error)
|
||||
goto out_qs;
|
||||
|
||||
rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr);
|
||||
if (!rgd) {
|
||||
gfs2_consist_inode(ip);
|
||||
error = -EIO;
|
||||
goto out_rindex_relse;
|
||||
}
|
||||
|
||||
error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
|
||||
&al->al_rgd_gh);
|
||||
if (error)
|
||||
goto out_rindex_relse;
|
||||
|
||||
error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA,
|
||||
sdp->sd_jdesc->jd_blocks);
|
||||
if (error)
|
||||
goto out_rg_gunlock;
|
||||
|
||||
gfs2_free_di(rgd, ip);
|
||||
|
||||
gfs2_final_release_pages(ip);
|
||||
|
||||
gfs2_trans_end(sdp);
|
||||
|
||||
out_rg_gunlock:
|
||||
gfs2_glock_dq_uninit(&al->al_rgd_gh);
|
||||
out_rindex_relse:
|
||||
gfs2_glock_dq_uninit(&al->al_ri_gh);
|
||||
out_qs:
|
||||
gfs2_quota_unhold(ip);
|
||||
out:
|
||||
gfs2_alloc_put(ip);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* We have to (at the moment) hold the inodes main lock to cover
|
||||
* the gap between unlocking the shared lock on the iopen lock and
|
||||
|
@ -1371,15 +1490,13 @@ static void gfs2_evict_inode(struct inode *inode)
|
|||
}
|
||||
|
||||
error = gfs2_dinode_dealloc(ip);
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
goto out_unlock;
|
||||
|
||||
out_truncate:
|
||||
error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
/* Needs to be done before glock release & also in a transaction */
|
||||
truncate_inode_pages(&inode->i_data, 0);
|
||||
gfs2_final_release_pages(ip);
|
||||
gfs2_trans_end(sdp);
|
||||
|
||||
out_unlock:
|
||||
|
@ -1394,6 +1511,7 @@ static void gfs2_evict_inode(struct inode *inode)
|
|||
end_writeback(inode);
|
||||
|
||||
ip->i_gl->gl_object = NULL;
|
||||
gfs2_glock_add_to_lru(ip->i_gl);
|
||||
gfs2_glock_put(ip->i_gl);
|
||||
ip->i_gl = NULL;
|
||||
if (ip->i_iopen_gh.gh_gl) {
|
||||
|
|
|
@ -81,7 +81,8 @@ static int gfs2_uuid_valid(const u8 *uuid)
|
|||
|
||||
static ssize_t uuid_show(struct gfs2_sbd *sdp, char *buf)
|
||||
{
|
||||
const u8 *uuid = sdp->sd_sb.sb_uuid;
|
||||
struct super_block *s = sdp->sd_vfs;
|
||||
const u8 *uuid = s->s_uuid;
|
||||
buf[0] = '\0';
|
||||
if (!gfs2_uuid_valid(uuid))
|
||||
return 0;
|
||||
|
@ -616,7 +617,8 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
|
|||
struct kobj_uevent_env *env)
|
||||
{
|
||||
struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj);
|
||||
const u8 *uuid = sdp->sd_sb.sb_uuid;
|
||||
struct super_block *s = sdp->sd_vfs;
|
||||
const u8 *uuid = s->s_uuid;
|
||||
|
||||
add_uevent_var(env, "LOCKTABLE=%s", sdp->sd_table_name);
|
||||
add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name);
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <linux/buffer_head.h>
|
||||
#include <linux/dlmconstants.h>
|
||||
#include <linux/gfs2_ondisk.h>
|
||||
#include <linux/writeback.h>
|
||||
#include "incore.h"
|
||||
#include "glock.h"
|
||||
|
||||
|
@ -40,7 +41,9 @@
|
|||
{(1UL << GLF_REPLY_PENDING), "r" }, \
|
||||
{(1UL << GLF_INITIAL), "I" }, \
|
||||
{(1UL << GLF_FROZEN), "F" }, \
|
||||
{(1UL << GLF_QUEUED), "q" })
|
||||
{(1UL << GLF_QUEUED), "q" }, \
|
||||
{(1UL << GLF_LRU), "L" }, \
|
||||
{(1UL << GLF_OBJECT), "o" })
|
||||
|
||||
#ifndef NUMPTY
|
||||
#define NUMPTY
|
||||
|
@ -94,7 +97,7 @@ TRACE_EVENT(gfs2_glock_state_change,
|
|||
__entry->new_state = glock_trace_state(new_state);
|
||||
__entry->tgt_state = glock_trace_state(gl->gl_target);
|
||||
__entry->dmt_state = glock_trace_state(gl->gl_demote_state);
|
||||
__entry->flags = gl->gl_flags;
|
||||
__entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0);
|
||||
),
|
||||
|
||||
TP_printk("%u,%u glock %d:%lld state %s to %s tgt:%s dmt:%s flags:%s",
|
||||
|
@ -127,7 +130,7 @@ TRACE_EVENT(gfs2_glock_put,
|
|||
__entry->gltype = gl->gl_name.ln_type;
|
||||
__entry->glnum = gl->gl_name.ln_number;
|
||||
__entry->cur_state = glock_trace_state(gl->gl_state);
|
||||
__entry->flags = gl->gl_flags;
|
||||
__entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0);
|
||||
),
|
||||
|
||||
TP_printk("%u,%u glock %d:%lld state %s => %s flags:%s",
|
||||
|
@ -161,7 +164,7 @@ TRACE_EVENT(gfs2_demote_rq,
|
|||
__entry->glnum = gl->gl_name.ln_number;
|
||||
__entry->cur_state = glock_trace_state(gl->gl_state);
|
||||
__entry->dmt_state = glock_trace_state(gl->gl_demote_state);
|
||||
__entry->flags = gl->gl_flags;
|
||||
__entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0);
|
||||
),
|
||||
|
||||
TP_printk("%u,%u glock %d:%lld demote %s to %s flags:%s",
|
||||
|
@ -318,6 +321,33 @@ TRACE_EVENT(gfs2_log_blocks,
|
|||
MINOR(__entry->dev), __entry->blocks)
|
||||
);
|
||||
|
||||
/* Writing back the AIL */
|
||||
TRACE_EVENT(gfs2_ail_flush,
|
||||
|
||||
TP_PROTO(const struct gfs2_sbd *sdp, const struct writeback_control *wbc, int start),
|
||||
|
||||
TP_ARGS(sdp, wbc, start),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( dev_t, dev )
|
||||
__field( int, start )
|
||||
__field( int, sync_mode )
|
||||
__field( long, nr_to_write )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = sdp->sd_vfs->s_dev;
|
||||
__entry->start = start;
|
||||
__entry->sync_mode = wbc->sync_mode;
|
||||
__entry->nr_to_write = wbc->nr_to_write;
|
||||
),
|
||||
|
||||
TP_printk("%u,%u ail flush %s %s %ld", MAJOR(__entry->dev),
|
||||
MINOR(__entry->dev), __entry->start ? "start" : "end",
|
||||
__entry->sync_mode == WB_SYNC_ALL ? "all" : "none",
|
||||
__entry->nr_to_write)
|
||||
);
|
||||
|
||||
/* Section 3 - bmap
|
||||
*
|
||||
* Objectives:
|
||||
|
|
Loading…
Reference in a new issue