xfs: fold dfops into the transaction
struct xfs_defer_ops has now been reduced to a single list_head. The external dfops mechanism is unused and thus everywhere a (permanent) transaction is accessible the associated dfops structure is as well. Remove the xfs_defer_ops structure and fold the list_head into the transaction. Also remove the last remnant of external dfops in xfs_trans_dup(). Signed-off-by: Brian Foster <bfoster@redhat.com> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
This commit is contained in:
parent
c03edc9e49
commit
9d9e623385
12 changed files with 46 additions and 96 deletions
|
@ -4286,7 +4286,6 @@ xfs_bmapi_write(
|
|||
bma.ip = ip;
|
||||
bma.total = total;
|
||||
bma.datatype = 0;
|
||||
ASSERT(!tp || tp->t_dfops);
|
||||
|
||||
while (bno < end && n < *nmap) {
|
||||
bool need_alloc = false, wasdelay = false;
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
#define __XFS_BTREE_H__
|
||||
|
||||
struct xfs_buf;
|
||||
struct xfs_defer_ops;
|
||||
struct xfs_inode;
|
||||
struct xfs_mount;
|
||||
struct xfs_trans;
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
#ifndef __XFS_DA_BTREE_H__
|
||||
#define __XFS_DA_BTREE_H__
|
||||
|
||||
struct xfs_defer_ops;
|
||||
struct xfs_inode;
|
||||
struct xfs_trans;
|
||||
struct zone;
|
||||
|
|
|
@ -183,11 +183,10 @@ STATIC void
|
|||
xfs_defer_create_intents(
|
||||
struct xfs_trans *tp)
|
||||
{
|
||||
struct xfs_defer_ops *dop = tp->t_dfops;
|
||||
struct list_head *li;
|
||||
struct xfs_defer_pending *dfp;
|
||||
|
||||
list_for_each_entry(dfp, &dop->dop_intake, dfp_list) {
|
||||
list_for_each_entry(dfp, &tp->t_dfops, dfp_list) {
|
||||
dfp->dfp_intent = dfp->dfp_type->create_intent(tp,
|
||||
dfp->dfp_count);
|
||||
trace_xfs_defer_create_intent(tp->t_mountp, dfp);
|
||||
|
@ -204,10 +203,9 @@ xfs_defer_trans_abort(
|
|||
struct xfs_trans *tp,
|
||||
struct list_head *dop_pending)
|
||||
{
|
||||
struct xfs_defer_ops *dop = tp->t_dfops;
|
||||
struct xfs_defer_pending *dfp;
|
||||
|
||||
trace_xfs_defer_trans_abort(tp->t_mountp, dop, _RET_IP_);
|
||||
trace_xfs_defer_trans_abort(tp, _RET_IP_);
|
||||
|
||||
/* Abort intent items that don't have a done item. */
|
||||
list_for_each_entry(dfp, dop_pending, dfp_list) {
|
||||
|
@ -266,14 +264,13 @@ xfs_defer_trans_roll(
|
|||
}
|
||||
}
|
||||
|
||||
trace_xfs_defer_trans_roll(tp->t_mountp, tp->t_dfops, _RET_IP_);
|
||||
trace_xfs_defer_trans_roll(tp, _RET_IP_);
|
||||
|
||||
/* Roll the transaction. */
|
||||
error = xfs_trans_roll(tpp);
|
||||
tp = *tpp;
|
||||
if (error) {
|
||||
trace_xfs_defer_trans_roll_error(tp->t_mountp,
|
||||
tp->t_dfops, error);
|
||||
trace_xfs_defer_trans_roll_error(tp, error);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -297,7 +294,7 @@ static void
|
|||
xfs_defer_reset(
|
||||
struct xfs_trans *tp)
|
||||
{
|
||||
ASSERT(list_empty(&tp->t_dfops->dop_intake));
|
||||
ASSERT(list_empty(&tp->t_dfops));
|
||||
|
||||
/*
|
||||
* Low mode state transfers across transaction rolls to mirror dfops
|
||||
|
@ -358,15 +355,13 @@ xfs_defer_finish_noroll(
|
|||
|
||||
ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
|
||||
|
||||
trace_xfs_defer_finish((*tp)->t_mountp, (*tp)->t_dfops, _RET_IP_);
|
||||
trace_xfs_defer_finish(*tp, _RET_IP_);
|
||||
|
||||
/* Until we run out of pending work to finish... */
|
||||
while (!list_empty(&dop_pending) ||
|
||||
!list_empty(&(*tp)->t_dfops->dop_intake)) {
|
||||
while (!list_empty(&dop_pending) || !list_empty(&(*tp)->t_dfops)) {
|
||||
/* log intents and pull in intake items */
|
||||
xfs_defer_create_intents(*tp);
|
||||
list_splice_tail_init(&(*tp)->t_dfops->dop_intake,
|
||||
&dop_pending);
|
||||
list_splice_tail_init(&(*tp)->t_dfops, &dop_pending);
|
||||
|
||||
/*
|
||||
* Roll the transaction.
|
||||
|
@ -438,14 +433,13 @@ xfs_defer_finish_noroll(
|
|||
if (error) {
|
||||
xfs_defer_trans_abort(*tp, &dop_pending);
|
||||
xfs_force_shutdown((*tp)->t_mountp, SHUTDOWN_CORRUPT_INCORE);
|
||||
trace_xfs_defer_finish_error((*tp)->t_mountp, (*tp)->t_dfops,
|
||||
error);
|
||||
trace_xfs_defer_finish_error(*tp, error);
|
||||
xfs_defer_cancel_list((*tp)->t_mountp, &dop_pending);
|
||||
xfs_defer_cancel(*tp);
|
||||
return error;
|
||||
}
|
||||
|
||||
trace_xfs_defer_finish_done((*tp)->t_mountp, (*tp)->t_dfops, _RET_IP_);
|
||||
trace_xfs_defer_finish_done(*tp, _RET_IP_);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -480,8 +474,8 @@ xfs_defer_cancel(
|
|||
{
|
||||
struct xfs_mount *mp = tp->t_mountp;
|
||||
|
||||
trace_xfs_defer_cancel(mp, tp->t_dfops, _RET_IP_);
|
||||
xfs_defer_cancel_list(mp, &tp->t_dfops->dop_intake);
|
||||
trace_xfs_defer_cancel(tp, _RET_IP_);
|
||||
xfs_defer_cancel_list(mp, &tp->t_dfops);
|
||||
}
|
||||
|
||||
/* Add an item for later deferred processing. */
|
||||
|
@ -491,7 +485,6 @@ xfs_defer_add(
|
|||
enum xfs_defer_ops_type type,
|
||||
struct list_head *li)
|
||||
{
|
||||
struct xfs_defer_ops *dop = tp->t_dfops;
|
||||
struct xfs_defer_pending *dfp = NULL;
|
||||
|
||||
ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
|
||||
|
@ -501,8 +494,8 @@ xfs_defer_add(
|
|||
* If the last pending item has the same type, reuse it. Else,
|
||||
* create a new pending item at the end of the intake list.
|
||||
*/
|
||||
if (!list_empty(&dop->dop_intake)) {
|
||||
dfp = list_last_entry(&dop->dop_intake,
|
||||
if (!list_empty(&tp->t_dfops)) {
|
||||
dfp = list_last_entry(&tp->t_dfops,
|
||||
struct xfs_defer_pending, dfp_list);
|
||||
if (dfp->dfp_type->type != type ||
|
||||
(dfp->dfp_type->max_items &&
|
||||
|
@ -517,7 +510,7 @@ xfs_defer_add(
|
|||
dfp->dfp_done = NULL;
|
||||
dfp->dfp_count = 0;
|
||||
INIT_LIST_HEAD(&dfp->dfp_work);
|
||||
list_add_tail(&dfp->dfp_list, &dop->dop_intake);
|
||||
list_add_tail(&dfp->dfp_list, &tp->t_dfops);
|
||||
}
|
||||
|
||||
list_add_tail(li, &dfp->dfp_work);
|
||||
|
@ -532,39 +525,17 @@ xfs_defer_init_op_type(
|
|||
defer_op_types[type->type] = type;
|
||||
}
|
||||
|
||||
/* Initialize a deferred operation. */
|
||||
void
|
||||
xfs_defer_init(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_defer_ops *dop)
|
||||
{
|
||||
struct xfs_mount *mp = NULL;
|
||||
|
||||
memset(dop, 0, sizeof(struct xfs_defer_ops));
|
||||
INIT_LIST_HEAD(&dop->dop_intake);
|
||||
if (tp) {
|
||||
ASSERT(tp->t_firstblock == NULLFSBLOCK);
|
||||
tp->t_dfops = dop;
|
||||
mp = tp->t_mountp;
|
||||
}
|
||||
trace_xfs_defer_init(mp, dop, _RET_IP_);
|
||||
}
|
||||
|
||||
/*
|
||||
* Move state from one xfs_defer_ops to another and reset the source to initial
|
||||
* state. This is primarily used to carry state forward across transaction rolls
|
||||
* with internal dfops.
|
||||
* Move deferred ops from one transaction to another and reset the source to
|
||||
* initial state. This is primarily used to carry state forward across
|
||||
* transaction rolls with pending dfops.
|
||||
*/
|
||||
void
|
||||
xfs_defer_move(
|
||||
struct xfs_trans *dtp,
|
||||
struct xfs_trans *stp)
|
||||
{
|
||||
struct xfs_defer_ops *dst = dtp->t_dfops;
|
||||
struct xfs_defer_ops *src = stp->t_dfops;
|
||||
ASSERT(dst != src);
|
||||
|
||||
list_splice_init(&src->dop_intake, &dst->dop_intake);
|
||||
list_splice_init(&stp->t_dfops, &dtp->t_dfops);
|
||||
|
||||
/*
|
||||
* Low free space mode was historically controlled by a dfops field.
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
#define __XFS_DEFER_H__
|
||||
|
||||
struct xfs_defer_op_type;
|
||||
struct xfs_defer_ops;
|
||||
|
||||
/*
|
||||
* Save a log intent item and a list of extents, so that we can replay
|
||||
|
@ -40,7 +39,6 @@ void xfs_defer_add(struct xfs_trans *tp, enum xfs_defer_ops_type type,
|
|||
int xfs_defer_finish_noroll(struct xfs_trans **tp);
|
||||
int xfs_defer_finish(struct xfs_trans **tp);
|
||||
void xfs_defer_cancel(struct xfs_trans *);
|
||||
void xfs_defer_init(struct xfs_trans *tp, struct xfs_defer_ops *dop);
|
||||
void xfs_defer_move(struct xfs_trans *dtp, struct xfs_trans *stp);
|
||||
|
||||
/* Description of a deferred type. */
|
||||
|
|
|
@ -424,7 +424,6 @@ xfs_dir_removename(
|
|||
int v; /* type-checking value */
|
||||
|
||||
ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
|
||||
ASSERT(tp->t_dfops);
|
||||
XFS_STATS_INC(dp->i_mount, xs_dir_remove);
|
||||
|
||||
args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS);
|
||||
|
@ -483,7 +482,6 @@ xfs_dir_replace(
|
|||
int v; /* type-checking value */
|
||||
|
||||
ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
|
||||
ASSERT(tp->t_dfops);
|
||||
|
||||
rval = xfs_dir_ino_validate(tp->t_mountp, inum);
|
||||
if (rval)
|
||||
|
|
|
@ -9,7 +9,6 @@
|
|||
#include "xfs_da_format.h"
|
||||
#include "xfs_da_btree.h"
|
||||
|
||||
struct xfs_defer_ops;
|
||||
struct xfs_da_args;
|
||||
struct xfs_inode;
|
||||
struct xfs_mount;
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
struct xfs_dinode;
|
||||
struct xfs_inode;
|
||||
struct xfs_buf;
|
||||
struct xfs_defer_ops;
|
||||
struct xfs_bmbt_irec;
|
||||
struct xfs_inode_log_item;
|
||||
struct xfs_mount;
|
||||
|
|
|
@ -502,7 +502,6 @@ xfs_reflink_cancel_cow_blocks(
|
|||
if (error)
|
||||
break;
|
||||
} else if (del.br_state == XFS_EXT_UNWRITTEN || cancel_real) {
|
||||
ASSERT((*tpp)->t_dfops);
|
||||
ASSERT((*tpp)->t_firstblock == NULLFSBLOCK);
|
||||
|
||||
/* Free the CoW orphan record. */
|
||||
|
@ -678,7 +677,7 @@ xfs_reflink_end_cow(
|
|||
goto prev_extent;
|
||||
|
||||
/* Unmap the old blocks in the data fork. */
|
||||
ASSERT(tp->t_dfops && tp->t_firstblock == NULLFSBLOCK);
|
||||
ASSERT(tp->t_firstblock == NULLFSBLOCK);
|
||||
rlen = del.br_blockcount;
|
||||
error = __xfs_bunmapi(tp, ip, del.br_startoff, &rlen, 0, 1);
|
||||
if (error)
|
||||
|
@ -1021,7 +1020,7 @@ xfs_reflink_remap_extent(
|
|||
/* Unmap the old blocks in the data fork. */
|
||||
rlen = unmap_len;
|
||||
while (rlen) {
|
||||
ASSERT(tp->t_dfops && tp->t_firstblock == NULLFSBLOCK);
|
||||
ASSERT(tp->t_firstblock == NULLFSBLOCK);
|
||||
error = __xfs_bunmapi(tp, ip, destoff, &rlen, 0, 1);
|
||||
if (error)
|
||||
goto out_cancel;
|
||||
|
|
|
@ -2213,57 +2213,54 @@ DEFINE_BTREE_CUR_EVENT(xfs_btree_overlapped_query_range);
|
|||
|
||||
/* deferred ops */
|
||||
struct xfs_defer_pending;
|
||||
struct xfs_defer_ops;
|
||||
|
||||
DECLARE_EVENT_CLASS(xfs_defer_class,
|
||||
TP_PROTO(struct xfs_mount *mp, struct xfs_defer_ops *dop,
|
||||
unsigned long caller_ip),
|
||||
TP_ARGS(mp, dop, caller_ip),
|
||||
TP_PROTO(struct xfs_trans *tp, unsigned long caller_ip),
|
||||
TP_ARGS(tp, caller_ip),
|
||||
TP_STRUCT__entry(
|
||||
__field(dev_t, dev)
|
||||
__field(void *, dop)
|
||||
__field(struct xfs_trans *, tp)
|
||||
__field(char, committed)
|
||||
__field(unsigned long, caller_ip)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->dev = mp ? mp->m_super->s_dev : 0;
|
||||
__entry->dop = dop;
|
||||
__entry->dev = tp->t_mountp->m_super->s_dev;
|
||||
__entry->tp = tp;
|
||||
__entry->caller_ip = caller_ip;
|
||||
),
|
||||
TP_printk("dev %d:%d ops %p caller %pS",
|
||||
TP_printk("dev %d:%d tp %p caller %pS",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
__entry->dop,
|
||||
__entry->tp,
|
||||
(char *)__entry->caller_ip)
|
||||
)
|
||||
#define DEFINE_DEFER_EVENT(name) \
|
||||
DEFINE_EVENT(xfs_defer_class, name, \
|
||||
TP_PROTO(struct xfs_mount *mp, struct xfs_defer_ops *dop, \
|
||||
unsigned long caller_ip), \
|
||||
TP_ARGS(mp, dop, caller_ip))
|
||||
TP_PROTO(struct xfs_trans *tp, unsigned long caller_ip), \
|
||||
TP_ARGS(tp, caller_ip))
|
||||
|
||||
DECLARE_EVENT_CLASS(xfs_defer_error_class,
|
||||
TP_PROTO(struct xfs_mount *mp, struct xfs_defer_ops *dop, int error),
|
||||
TP_ARGS(mp, dop, error),
|
||||
TP_PROTO(struct xfs_trans *tp, int error),
|
||||
TP_ARGS(tp, error),
|
||||
TP_STRUCT__entry(
|
||||
__field(dev_t, dev)
|
||||
__field(void *, dop)
|
||||
__field(struct xfs_trans *, tp)
|
||||
__field(char, committed)
|
||||
__field(int, error)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->dev = mp ? mp->m_super->s_dev : 0;
|
||||
__entry->dop = dop;
|
||||
__entry->dev = tp->t_mountp->m_super->s_dev;
|
||||
__entry->tp = tp;
|
||||
__entry->error = error;
|
||||
),
|
||||
TP_printk("dev %d:%d ops %p err %d",
|
||||
TP_printk("dev %d:%d tp %p err %d",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
__entry->dop,
|
||||
__entry->tp,
|
||||
__entry->error)
|
||||
)
|
||||
#define DEFINE_DEFER_ERROR_EVENT(name) \
|
||||
DEFINE_EVENT(xfs_defer_error_class, name, \
|
||||
TP_PROTO(struct xfs_mount *mp, struct xfs_defer_ops *dop, int error), \
|
||||
TP_ARGS(mp, dop, error))
|
||||
TP_PROTO(struct xfs_trans *tp, int error), \
|
||||
TP_ARGS(tp, error))
|
||||
|
||||
DECLARE_EVENT_CLASS(xfs_defer_pending_class,
|
||||
TP_PROTO(struct xfs_mount *mp, struct xfs_defer_pending *dfp),
|
||||
|
@ -2382,7 +2379,6 @@ DEFINE_EVENT(xfs_map_extent_deferred_class, name, \
|
|||
xfs_exntst_t state), \
|
||||
TP_ARGS(mp, agno, op, agbno, ino, whichfork, offset, len, state))
|
||||
|
||||
DEFINE_DEFER_EVENT(xfs_defer_init);
|
||||
DEFINE_DEFER_EVENT(xfs_defer_cancel);
|
||||
DEFINE_DEFER_EVENT(xfs_defer_trans_roll);
|
||||
DEFINE_DEFER_EVENT(xfs_defer_trans_abort);
|
||||
|
|
|
@ -100,6 +100,7 @@ xfs_trans_dup(
|
|||
ntp->t_mountp = tp->t_mountp;
|
||||
INIT_LIST_HEAD(&ntp->t_items);
|
||||
INIT_LIST_HEAD(&ntp->t_busy);
|
||||
INIT_LIST_HEAD(&ntp->t_dfops);
|
||||
ntp->t_firstblock = NULLFSBLOCK;
|
||||
|
||||
ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
|
||||
|
@ -120,12 +121,8 @@ xfs_trans_dup(
|
|||
tp->t_rtx_res = tp->t_rtx_res_used;
|
||||
ntp->t_pflags = tp->t_pflags;
|
||||
|
||||
/* copy the dfops pointer if it's external, otherwise move it */
|
||||
xfs_defer_init(ntp, &ntp->t_dfops_internal);
|
||||
if (tp->t_dfops != &tp->t_dfops_internal)
|
||||
ntp->t_dfops = tp->t_dfops;
|
||||
else
|
||||
xfs_defer_move(ntp, tp);
|
||||
/* move deferred ops over to the new tp */
|
||||
xfs_defer_move(ntp, tp);
|
||||
|
||||
xfs_trans_dup_dqinfo(tp, ntp);
|
||||
|
||||
|
@ -280,8 +277,8 @@ xfs_trans_alloc(
|
|||
tp->t_mountp = mp;
|
||||
INIT_LIST_HEAD(&tp->t_items);
|
||||
INIT_LIST_HEAD(&tp->t_busy);
|
||||
INIT_LIST_HEAD(&tp->t_dfops);
|
||||
tp->t_firstblock = NULLFSBLOCK;
|
||||
xfs_defer_init(tp, &tp->t_dfops_internal);
|
||||
|
||||
error = xfs_trans_reserve(tp, resp, blocks, rtextents);
|
||||
if (error) {
|
||||
|
@ -929,7 +926,7 @@ __xfs_trans_commit(
|
|||
* Finish deferred items on final commit. Only permanent transactions
|
||||
* should ever have deferred ops.
|
||||
*/
|
||||
WARN_ON_ONCE(!list_empty(&tp->t_dfops->dop_intake) &&
|
||||
WARN_ON_ONCE(!list_empty(&tp->t_dfops) &&
|
||||
!(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
|
||||
if (!regrant && (tp->t_flags & XFS_TRANS_PERM_LOG_RES)) {
|
||||
error = xfs_defer_finish_noroll(&tp);
|
||||
|
|
|
@ -90,13 +90,10 @@ void xfs_log_item_init(struct xfs_mount *mp, struct xfs_log_item *item,
|
|||
#define XFS_ITEM_FLUSHING 3
|
||||
|
||||
/*
|
||||
* Deferred operations tracking structure.
|
||||
* Deferred operation item relogging limits.
|
||||
*/
|
||||
#define XFS_DEFER_OPS_NR_INODES 2 /* join up to two inodes */
|
||||
#define XFS_DEFER_OPS_NR_BUFS 2 /* join up to two buffers */
|
||||
struct xfs_defer_ops {
|
||||
struct list_head dop_intake; /* unlogged pending work */
|
||||
};
|
||||
|
||||
/*
|
||||
* This is the structure maintained for every active transaction.
|
||||
|
@ -114,7 +111,6 @@ typedef struct xfs_trans {
|
|||
struct xlog_ticket *t_ticket; /* log mgr ticket */
|
||||
struct xfs_mount *t_mountp; /* ptr to fs mount struct */
|
||||
struct xfs_dquot_acct *t_dqinfo; /* acctg info for dquots */
|
||||
struct xfs_defer_ops *t_dfops; /* dfops reference */
|
||||
int64_t t_icount_delta; /* superblock icount change */
|
||||
int64_t t_ifree_delta; /* superblock ifree change */
|
||||
int64_t t_fdblocks_delta; /* superblock fdblocks chg */
|
||||
|
@ -136,8 +132,8 @@ typedef struct xfs_trans {
|
|||
int64_t t_rextslog_delta;/* superblocks rextslog chg */
|
||||
struct list_head t_items; /* log item descriptors */
|
||||
struct list_head t_busy; /* list of busy extents */
|
||||
struct list_head t_dfops; /* deferred operations */
|
||||
unsigned long t_pflags; /* saved process flags state */
|
||||
struct xfs_defer_ops t_dfops_internal;
|
||||
} xfs_trans_t;
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in a new issue