xfs: remove xfs_qm_sync
Now that we can't have any dirty dquots around that aren't in the AIL we can get rid of the explicit dquot syncing from xfssyncd and xfs_fs_sync_fs and instead rely on AIL pushing to write out any quota updates. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Ben Myers <bpm@sgi.com>
This commit is contained in:
parent
f2fba558d3
commit
34625c661b
5 changed files with 3 additions and 119 deletions
|
@ -879,100 +879,6 @@ xfs_qm_dqdetach(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
|
||||||
xfs_qm_sync(
|
|
||||||
struct xfs_mount *mp,
|
|
||||||
int flags)
|
|
||||||
{
|
|
||||||
struct xfs_quotainfo *q = mp->m_quotainfo;
|
|
||||||
int recl, restarts;
|
|
||||||
struct xfs_dquot *dqp;
|
|
||||||
int error;
|
|
||||||
|
|
||||||
if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
restarts = 0;
|
|
||||||
|
|
||||||
again:
|
|
||||||
mutex_lock(&q->qi_dqlist_lock);
|
|
||||||
/*
|
|
||||||
* dqpurge_all() also takes the mplist lock and iterate thru all dquots
|
|
||||||
* in quotaoff. However, if the QUOTA_ACTIVE bits are not cleared
|
|
||||||
* when we have the mplist lock, we know that dquots will be consistent
|
|
||||||
* as long as we have it locked.
|
|
||||||
*/
|
|
||||||
if (!XFS_IS_QUOTA_ON(mp)) {
|
|
||||||
mutex_unlock(&q->qi_dqlist_lock);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
|
|
||||||
list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
|
|
||||||
/*
|
|
||||||
* If this is vfs_sync calling, then skip the dquots that
|
|
||||||
* don't 'seem' to be dirty. ie. don't acquire dqlock.
|
|
||||||
* This is very similar to what xfs_sync does with inodes.
|
|
||||||
*/
|
|
||||||
if (flags & SYNC_TRYLOCK) {
|
|
||||||
if (!XFS_DQ_IS_DIRTY(dqp))
|
|
||||||
continue;
|
|
||||||
if (!xfs_qm_dqlock_nowait(dqp))
|
|
||||||
continue;
|
|
||||||
} else {
|
|
||||||
xfs_dqlock(dqp);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Now, find out for sure if this dquot is dirty or not.
|
|
||||||
*/
|
|
||||||
if (! XFS_DQ_IS_DIRTY(dqp)) {
|
|
||||||
xfs_dqunlock(dqp);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* XXX a sentinel would be better */
|
|
||||||
recl = q->qi_dqreclaims;
|
|
||||||
if (!xfs_dqflock_nowait(dqp)) {
|
|
||||||
if (flags & SYNC_TRYLOCK) {
|
|
||||||
xfs_dqunlock(dqp);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
/*
|
|
||||||
* If we can't grab the flush lock then if the caller
|
|
||||||
* really wanted us to give this our best shot, so
|
|
||||||
* see if we can give a push to the buffer before we wait
|
|
||||||
* on the flush lock. At this point, we know that
|
|
||||||
* even though the dquot is being flushed,
|
|
||||||
* it has (new) dirty data.
|
|
||||||
*/
|
|
||||||
xfs_qm_dqflock_pushbuf_wait(dqp);
|
|
||||||
}
|
|
||||||
/*
|
|
||||||
* Let go of the mplist lock. We don't want to hold it
|
|
||||||
* across a disk write
|
|
||||||
*/
|
|
||||||
mutex_unlock(&q->qi_dqlist_lock);
|
|
||||||
error = xfs_qm_dqflush(dqp, flags);
|
|
||||||
xfs_dqunlock(dqp);
|
|
||||||
if (error && XFS_FORCED_SHUTDOWN(mp))
|
|
||||||
return 0; /* Need to prevent umount failure */
|
|
||||||
else if (error)
|
|
||||||
return error;
|
|
||||||
|
|
||||||
mutex_lock(&q->qi_dqlist_lock);
|
|
||||||
if (recl != q->qi_dqreclaims) {
|
|
||||||
if (++restarts >= XFS_QM_SYNC_MAX_RESTARTS)
|
|
||||||
break;
|
|
||||||
|
|
||||||
mutex_unlock(&q->qi_dqlist_lock);
|
|
||||||
goto again;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mutex_unlock(&q->qi_dqlist_lock);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The hash chains and the mplist use the same xfs_dqhash structure as
|
* The hash chains and the mplist use the same xfs_dqhash structure as
|
||||||
* their list head, but we can take the mplist qh_lock and one of the
|
* their list head, but we can take the mplist qh_lock and one of the
|
||||||
|
|
|
@ -32,12 +32,6 @@ extern struct xfs_qm *xfs_Gqm;
|
||||||
extern kmem_zone_t *qm_dqzone;
|
extern kmem_zone_t *qm_dqzone;
|
||||||
extern kmem_zone_t *qm_dqtrxzone;
|
extern kmem_zone_t *qm_dqtrxzone;
|
||||||
|
|
||||||
/*
|
|
||||||
* Used in xfs_qm_sync called by xfs_sync to count the max times that it can
|
|
||||||
* iterate over the mountpt's dquot list in one call.
|
|
||||||
*/
|
|
||||||
#define XFS_QM_SYNC_MAX_RESTARTS 7
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ditto, for xfs_qm_dqreclaim_one.
|
* Ditto, for xfs_qm_dqreclaim_one.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -326,7 +326,6 @@ extern int xfs_qm_dqattach_locked(struct xfs_inode *, uint);
|
||||||
extern void xfs_qm_dqdetach(struct xfs_inode *);
|
extern void xfs_qm_dqdetach(struct xfs_inode *);
|
||||||
extern void xfs_qm_dqrele(struct xfs_dquot *);
|
extern void xfs_qm_dqrele(struct xfs_dquot *);
|
||||||
extern void xfs_qm_statvfs(struct xfs_inode *, struct kstatfs *);
|
extern void xfs_qm_statvfs(struct xfs_inode *, struct kstatfs *);
|
||||||
extern int xfs_qm_sync(struct xfs_mount *, int);
|
|
||||||
extern int xfs_qm_newmount(struct xfs_mount *, uint *, uint *);
|
extern int xfs_qm_newmount(struct xfs_mount *, uint *, uint *);
|
||||||
extern void xfs_qm_mount_quotas(struct xfs_mount *);
|
extern void xfs_qm_mount_quotas(struct xfs_mount *);
|
||||||
extern void xfs_qm_unmount(struct xfs_mount *);
|
extern void xfs_qm_unmount(struct xfs_mount *);
|
||||||
|
@ -366,10 +365,6 @@ static inline int xfs_trans_reserve_quota_bydquots(struct xfs_trans *tp,
|
||||||
#define xfs_qm_dqdetach(ip)
|
#define xfs_qm_dqdetach(ip)
|
||||||
#define xfs_qm_dqrele(d)
|
#define xfs_qm_dqrele(d)
|
||||||
#define xfs_qm_statvfs(ip, s)
|
#define xfs_qm_statvfs(ip, s)
|
||||||
static inline int xfs_qm_sync(struct xfs_mount *mp, int flags)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#define xfs_qm_newmount(mp, a, b) (0)
|
#define xfs_qm_newmount(mp, a, b) (0)
|
||||||
#define xfs_qm_mount_quotas(mp)
|
#define xfs_qm_mount_quotas(mp)
|
||||||
#define xfs_qm_unmount(mp)
|
#define xfs_qm_unmount(mp)
|
||||||
|
|
|
@ -1025,17 +1025,10 @@ xfs_fs_sync_fs(
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Not much we can do for the first async pass. Writing out the
|
* Doing anything during the async pass would be counterproductive.
|
||||||
* superblock would be counter-productive as we are going to redirty
|
|
||||||
* when writing out other data and metadata (and writing out a single
|
|
||||||
* block is quite fast anyway).
|
|
||||||
*
|
|
||||||
* Try to asynchronously kick off quota syncing at least.
|
|
||||||
*/
|
*/
|
||||||
if (!wait) {
|
if (!wait)
|
||||||
xfs_qm_sync(mp, SYNC_TRYLOCK);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
|
||||||
|
|
||||||
error = xfs_quiesce_data(mp);
|
error = xfs_quiesce_data(mp);
|
||||||
if (error)
|
if (error)
|
||||||
|
|
|
@ -359,10 +359,7 @@ xfs_quiesce_data(
|
||||||
{
|
{
|
||||||
int error, error2 = 0;
|
int error, error2 = 0;
|
||||||
|
|
||||||
xfs_qm_sync(mp, SYNC_TRYLOCK);
|
/* force out the log */
|
||||||
xfs_qm_sync(mp, SYNC_WAIT);
|
|
||||||
|
|
||||||
/* force out the newly dirtied log buffers */
|
|
||||||
xfs_log_force(mp, XFS_LOG_SYNC);
|
xfs_log_force(mp, XFS_LOG_SYNC);
|
||||||
|
|
||||||
/* write superblock and hoover up shutdown errors */
|
/* write superblock and hoover up shutdown errors */
|
||||||
|
@ -470,7 +467,6 @@ xfs_sync_worker(
|
||||||
error = xfs_fs_log_dummy(mp);
|
error = xfs_fs_log_dummy(mp);
|
||||||
else
|
else
|
||||||
xfs_log_force(mp, 0);
|
xfs_log_force(mp, 0);
|
||||||
error = xfs_qm_sync(mp, SYNC_TRYLOCK);
|
|
||||||
|
|
||||||
/* start pushing all the metadata that is currently dirty */
|
/* start pushing all the metadata that is currently dirty */
|
||||||
xfs_ail_push_all(mp->m_ail);
|
xfs_ail_push_all(mp->m_ail);
|
||||||
|
|
Loading…
Reference in a new issue