2005-04-16 16:20:36 -06:00
|
|
|
/*
|
2005-11-01 20:58:39 -07:00
|
|
|
* Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
|
|
|
|
* All Rights Reserved.
|
2005-04-16 16:20:36 -06:00
|
|
|
*
|
2005-11-01 20:58:39 -07:00
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License as
|
2005-04-16 16:20:36 -06:00
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
2005-11-01 20:58:39 -07:00
|
|
|
* This program is distributed in the hope that it would be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
2005-04-16 16:20:36 -06:00
|
|
|
*
|
2005-11-01 20:58:39 -07:00
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write the Free Software Foundation,
|
|
|
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
2005-04-16 16:20:36 -06:00
|
|
|
*/
|
|
|
|
#include "xfs.h"
|
2005-11-01 20:38:42 -07:00
|
|
|
#include "xfs_fs.h"
|
2005-04-16 16:20:36 -06:00
|
|
|
#include "xfs_types.h"
|
|
|
|
#include "xfs_log.h"
|
|
|
|
#include "xfs_trans.h"
|
|
|
|
#include "xfs_sb.h"
|
2005-11-01 20:38:42 -07:00
|
|
|
#include "xfs_ag.h"
|
2005-04-16 16:20:36 -06:00
|
|
|
#include "xfs_mount.h"
|
|
|
|
#include "xfs_trans_priv.h"
|
|
|
|
#include "xfs_bmap_btree.h"
|
|
|
|
#include "xfs_dinode.h"
|
|
|
|
#include "xfs_inode.h"
|
2005-11-01 20:38:42 -07:00
|
|
|
#include "xfs_inode_item.h"
|
2008-04-09 20:22:24 -06:00
|
|
|
#include "xfs_error.h"
|
2009-12-14 16:14:59 -07:00
|
|
|
#include "xfs_trace.h"
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
|
|
|
|
kmem_zone_t *xfs_ili_zone; /* inode log item zone */
|
|
|
|
|
2010-06-23 02:11:15 -06:00
|
|
|
static inline struct xfs_inode_log_item *INODE_ITEM(struct xfs_log_item *lip)
|
|
|
|
{
|
|
|
|
return container_of(lip, struct xfs_inode_log_item, ili_item);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
/*
|
|
|
|
* This returns the number of iovecs needed to log the given inode item.
|
|
|
|
*
|
|
|
|
* We need one iovec for the inode log format structure, one for the
|
|
|
|
* inode core, and possibly one for the inode data/extents/b-tree root
|
|
|
|
* and one for the inode attribute data/extents/b-tree root.
|
|
|
|
*/
|
|
|
|
STATIC uint
|
|
|
|
xfs_inode_item_size(
|
2010-06-23 02:11:15 -06:00
|
|
|
struct xfs_log_item *lip)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
2010-06-23 02:11:15 -06:00
|
|
|
struct xfs_inode_log_item *iip = INODE_ITEM(lip);
|
|
|
|
struct xfs_inode *ip = iip->ili_inode;
|
|
|
|
uint nvecs = 2;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
switch (ip->i_d.di_format) {
|
|
|
|
case XFS_DINODE_FMT_EXTENTS:
|
2012-02-29 02:53:54 -07:00
|
|
|
if ((iip->ili_fields & XFS_ILOG_DEXT) &&
|
2012-02-29 02:53:53 -07:00
|
|
|
ip->i_d.di_nextents > 0 &&
|
|
|
|
ip->i_df.if_bytes > 0)
|
2005-04-16 16:20:36 -06:00
|
|
|
nvecs++;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case XFS_DINODE_FMT_BTREE:
|
2012-02-29 02:53:54 -07:00
|
|
|
if ((iip->ili_fields & XFS_ILOG_DBROOT) &&
|
2012-02-29 02:53:53 -07:00
|
|
|
ip->i_df.if_broot_bytes > 0)
|
2005-04-16 16:20:36 -06:00
|
|
|
nvecs++;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case XFS_DINODE_FMT_LOCAL:
|
2012-02-29 02:53:54 -07:00
|
|
|
if ((iip->ili_fields & XFS_ILOG_DDATA) &&
|
2012-02-29 02:53:53 -07:00
|
|
|
ip->i_df.if_bytes > 0)
|
2005-04-16 16:20:36 -06:00
|
|
|
nvecs++;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case XFS_DINODE_FMT_DEV:
|
|
|
|
case XFS_DINODE_FMT_UUID:
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
ASSERT(0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2012-02-29 02:53:53 -07:00
|
|
|
if (!XFS_IFORK_Q(ip))
|
2005-04-16 16:20:36 -06:00
|
|
|
return nvecs;
|
2012-02-29 02:53:53 -07:00
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Log any necessary attribute data.
|
|
|
|
*/
|
|
|
|
switch (ip->i_d.di_aformat) {
|
|
|
|
case XFS_DINODE_FMT_EXTENTS:
|
2012-02-29 02:53:54 -07:00
|
|
|
if ((iip->ili_fields & XFS_ILOG_AEXT) &&
|
2012-02-29 02:53:53 -07:00
|
|
|
ip->i_d.di_anextents > 0 &&
|
|
|
|
ip->i_afp->if_bytes > 0)
|
2005-04-16 16:20:36 -06:00
|
|
|
nvecs++;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case XFS_DINODE_FMT_BTREE:
|
2012-02-29 02:53:54 -07:00
|
|
|
if ((iip->ili_fields & XFS_ILOG_ABROOT) &&
|
2012-02-29 02:53:53 -07:00
|
|
|
ip->i_afp->if_broot_bytes > 0)
|
2005-04-16 16:20:36 -06:00
|
|
|
nvecs++;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case XFS_DINODE_FMT_LOCAL:
|
2012-02-29 02:53:54 -07:00
|
|
|
if ((iip->ili_fields & XFS_ILOG_ADATA) &&
|
2012-02-29 02:53:53 -07:00
|
|
|
ip->i_afp->if_bytes > 0)
|
2005-04-16 16:20:36 -06:00
|
|
|
nvecs++;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
ASSERT(0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return nvecs;
|
|
|
|
}
|
|
|
|
|
2011-04-07 20:45:07 -06:00
|
|
|
/*
|
|
|
|
* xfs_inode_item_format_extents - convert in-core extents to on-disk form
|
|
|
|
*
|
|
|
|
* For either the data or attr fork in extent format, we need to endian convert
|
|
|
|
* the in-core extent as we place them into the on-disk inode. In this case, we
|
|
|
|
* need to do this conversion before we write the extents into the log. Because
|
|
|
|
* we don't have the disk inode to write into here, we allocate a buffer and
|
|
|
|
* format the extents into it via xfs_iextents_copy(). We free the buffer in
|
|
|
|
* the unlock routine after the copy for the log has been made.
|
|
|
|
*
|
|
|
|
* In the case of the data fork, the in-core and on-disk fork sizes can be
|
|
|
|
* different due to delayed allocation extents. We only log on-disk extents
|
|
|
|
* here, so always use the physical fork size to determine the size of the
|
|
|
|
* buffer we need to allocate.
|
|
|
|
*/
|
|
|
|
STATIC void
|
|
|
|
xfs_inode_item_format_extents(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
struct xfs_log_iovec *vecp,
|
|
|
|
int whichfork,
|
|
|
|
int type)
|
|
|
|
{
|
|
|
|
xfs_bmbt_rec_t *ext_buffer;
|
|
|
|
|
|
|
|
ext_buffer = kmem_alloc(XFS_IFORK_SIZE(ip, whichfork), KM_SLEEP);
|
|
|
|
if (whichfork == XFS_DATA_FORK)
|
|
|
|
ip->i_itemp->ili_extents_buf = ext_buffer;
|
|
|
|
else
|
|
|
|
ip->i_itemp->ili_aextents_buf = ext_buffer;
|
|
|
|
|
|
|
|
vecp->i_addr = ext_buffer;
|
|
|
|
vecp->i_len = xfs_iextents_copy(ip, ext_buffer, whichfork);
|
|
|
|
vecp->i_type = type;
|
|
|
|
}
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
/*
|
|
|
|
* This is called to fill in the vector of log iovecs for the
|
|
|
|
* given inode log item. It fills the first item with an inode
|
|
|
|
* log format structure, the second with the on-disk inode structure,
|
|
|
|
* and a possible third and/or fourth with the inode data/extents/b-tree
|
|
|
|
* root and inode attributes data/extents/b-tree root.
|
|
|
|
*/
|
|
|
|
STATIC void
|
|
|
|
xfs_inode_item_format(
|
2010-06-23 02:11:15 -06:00
|
|
|
struct xfs_log_item *lip,
|
|
|
|
struct xfs_log_iovec *vecp)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
2010-06-23 02:11:15 -06:00
|
|
|
struct xfs_inode_log_item *iip = INODE_ITEM(lip);
|
|
|
|
struct xfs_inode *ip = iip->ili_inode;
|
2005-04-16 16:20:36 -06:00
|
|
|
uint nvecs;
|
|
|
|
size_t data_bytes;
|
|
|
|
xfs_mount_t *mp;
|
|
|
|
|
2010-06-23 02:11:15 -06:00
|
|
|
vecp->i_addr = &iip->ili_format;
|
2005-04-16 16:20:36 -06:00
|
|
|
vecp->i_len = sizeof(xfs_inode_log_format_t);
|
2010-01-19 02:56:45 -07:00
|
|
|
vecp->i_type = XLOG_REG_TYPE_IFORMAT;
|
2005-04-16 16:20:36 -06:00
|
|
|
vecp++;
|
|
|
|
nvecs = 1;
|
|
|
|
|
2010-06-23 02:11:15 -06:00
|
|
|
vecp->i_addr = &ip->i_d;
|
2008-11-27 20:23:39 -07:00
|
|
|
vecp->i_len = sizeof(struct xfs_icdinode);
|
2010-01-19 02:56:45 -07:00
|
|
|
vecp->i_type = XLOG_REG_TYPE_ICORE;
|
2005-04-16 16:20:36 -06:00
|
|
|
vecp++;
|
|
|
|
nvecs++;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this is really an old format inode, then we need to
|
|
|
|
* log it as such. This means that we have to copy the link
|
|
|
|
* count from the new field to the old. We don't have to worry
|
|
|
|
* about the new fields, because nothing trusts them as long as
|
|
|
|
* the old inode version number is there. If the superblock already
|
|
|
|
* has a new version number, then we don't bother converting back.
|
|
|
|
*/
|
|
|
|
mp = ip->i_mount;
|
2008-11-27 20:23:39 -07:00
|
|
|
ASSERT(ip->i_d.di_version == 1 || xfs_sb_version_hasnlink(&mp->m_sb));
|
|
|
|
if (ip->i_d.di_version == 1) {
|
2008-03-05 19:44:28 -07:00
|
|
|
if (!xfs_sb_version_hasnlink(&mp->m_sb)) {
|
2005-04-16 16:20:36 -06:00
|
|
|
/*
|
|
|
|
* Convert it back.
|
|
|
|
*/
|
|
|
|
ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1);
|
|
|
|
ip->i_d.di_onlink = ip->i_d.di_nlink;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* The superblock version has already been bumped,
|
|
|
|
* so just make the conversion to the new inode
|
|
|
|
* format permanent.
|
|
|
|
*/
|
2008-11-27 20:23:39 -07:00
|
|
|
ip->i_d.di_version = 2;
|
2005-04-16 16:20:36 -06:00
|
|
|
ip->i_d.di_onlink = 0;
|
|
|
|
memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (ip->i_d.di_format) {
|
|
|
|
case XFS_DINODE_FMT_EXTENTS:
|
2012-02-29 02:53:54 -07:00
|
|
|
iip->ili_fields &=
|
2012-02-29 02:53:53 -07:00
|
|
|
~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT |
|
|
|
|
XFS_ILOG_DEV | XFS_ILOG_UUID);
|
|
|
|
|
2012-02-29 02:53:54 -07:00
|
|
|
if ((iip->ili_fields & XFS_ILOG_DEXT) &&
|
2012-02-29 02:53:53 -07:00
|
|
|
ip->i_d.di_nextents > 0 &&
|
|
|
|
ip->i_df.if_bytes > 0) {
|
2005-04-16 16:20:36 -06:00
|
|
|
ASSERT(ip->i_df.if_u1.if_extents != NULL);
|
2012-02-29 02:53:53 -07:00
|
|
|
ASSERT(ip->i_df.if_bytes / sizeof(xfs_bmbt_rec_t) > 0);
|
2005-04-16 16:20:36 -06:00
|
|
|
ASSERT(iip->ili_extents_buf == NULL);
|
2012-02-29 02:53:53 -07:00
|
|
|
|
2005-09-07 23:30:05 -06:00
|
|
|
#ifdef XFS_NATIVE_HOST
|
2010-07-26 12:51:46 -06:00
|
|
|
if (ip->i_d.di_nextents == ip->i_df.if_bytes /
|
|
|
|
(uint)sizeof(xfs_bmbt_rec_t)) {
|
2005-04-16 16:20:36 -06:00
|
|
|
/*
|
|
|
|
* There are no delayed allocation
|
|
|
|
* extents, so just point to the
|
|
|
|
* real extents array.
|
|
|
|
*/
|
2010-06-23 02:11:15 -06:00
|
|
|
vecp->i_addr = ip->i_df.if_u1.if_extents;
|
2005-04-16 16:20:36 -06:00
|
|
|
vecp->i_len = ip->i_df.if_bytes;
|
2010-01-19 02:56:45 -07:00
|
|
|
vecp->i_type = XLOG_REG_TYPE_IEXT;
|
2005-04-16 16:20:36 -06:00
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
2011-04-07 20:45:07 -06:00
|
|
|
xfs_inode_item_format_extents(ip, vecp,
|
|
|
|
XFS_DATA_FORK, XLOG_REG_TYPE_IEXT);
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
ASSERT(vecp->i_len <= ip->i_df.if_bytes);
|
|
|
|
iip->ili_format.ilf_dsize = vecp->i_len;
|
|
|
|
vecp++;
|
|
|
|
nvecs++;
|
2012-02-29 02:53:53 -07:00
|
|
|
} else {
|
2012-02-29 02:53:54 -07:00
|
|
|
iip->ili_fields &= ~XFS_ILOG_DEXT;
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case XFS_DINODE_FMT_BTREE:
|
2012-02-29 02:53:54 -07:00
|
|
|
iip->ili_fields &=
|
2012-02-29 02:53:53 -07:00
|
|
|
~(XFS_ILOG_DDATA | XFS_ILOG_DEXT |
|
|
|
|
XFS_ILOG_DEV | XFS_ILOG_UUID);
|
|
|
|
|
2012-02-29 02:53:54 -07:00
|
|
|
if ((iip->ili_fields & XFS_ILOG_DBROOT) &&
|
2012-02-29 02:53:53 -07:00
|
|
|
ip->i_df.if_broot_bytes > 0) {
|
2005-04-16 16:20:36 -06:00
|
|
|
ASSERT(ip->i_df.if_broot != NULL);
|
2010-06-23 02:11:15 -06:00
|
|
|
vecp->i_addr = ip->i_df.if_broot;
|
2005-04-16 16:20:36 -06:00
|
|
|
vecp->i_len = ip->i_df.if_broot_bytes;
|
2010-01-19 02:56:45 -07:00
|
|
|
vecp->i_type = XLOG_REG_TYPE_IBROOT;
|
2005-04-16 16:20:36 -06:00
|
|
|
vecp++;
|
|
|
|
nvecs++;
|
|
|
|
iip->ili_format.ilf_dsize = ip->i_df.if_broot_bytes;
|
2012-02-29 02:53:53 -07:00
|
|
|
} else {
|
2012-02-29 02:53:54 -07:00
|
|
|
ASSERT(!(iip->ili_fields &
|
2012-02-29 02:53:53 -07:00
|
|
|
XFS_ILOG_DBROOT));
|
|
|
|
#ifdef XFS_TRANS_DEBUG
|
|
|
|
if (iip->ili_root_size > 0) {
|
|
|
|
ASSERT(iip->ili_root_size ==
|
|
|
|
ip->i_df.if_broot_bytes);
|
|
|
|
ASSERT(memcmp(iip->ili_orig_root,
|
|
|
|
ip->i_df.if_broot,
|
|
|
|
iip->ili_root_size) == 0);
|
|
|
|
} else {
|
|
|
|
ASSERT(ip->i_df.if_broot_bytes == 0);
|
|
|
|
}
|
|
|
|
#endif
|
2012-02-29 02:53:54 -07:00
|
|
|
iip->ili_fields &= ~XFS_ILOG_DBROOT;
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case XFS_DINODE_FMT_LOCAL:
|
2012-02-29 02:53:54 -07:00
|
|
|
iip->ili_fields &=
|
2012-02-29 02:53:53 -07:00
|
|
|
~(XFS_ILOG_DEXT | XFS_ILOG_DBROOT |
|
|
|
|
XFS_ILOG_DEV | XFS_ILOG_UUID);
|
2012-02-29 02:53:54 -07:00
|
|
|
if ((iip->ili_fields & XFS_ILOG_DDATA) &&
|
2012-02-29 02:53:53 -07:00
|
|
|
ip->i_df.if_bytes > 0) {
|
2005-04-16 16:20:36 -06:00
|
|
|
ASSERT(ip->i_df.if_u1.if_data != NULL);
|
|
|
|
ASSERT(ip->i_d.di_size > 0);
|
|
|
|
|
2010-06-23 02:11:15 -06:00
|
|
|
vecp->i_addr = ip->i_df.if_u1.if_data;
|
2005-04-16 16:20:36 -06:00
|
|
|
/*
|
|
|
|
* Round i_bytes up to a word boundary.
|
|
|
|
* The underlying memory is guaranteed to
|
|
|
|
* to be there by xfs_idata_realloc().
|
|
|
|
*/
|
|
|
|
data_bytes = roundup(ip->i_df.if_bytes, 4);
|
|
|
|
ASSERT((ip->i_df.if_real_bytes == 0) ||
|
|
|
|
(ip->i_df.if_real_bytes == data_bytes));
|
|
|
|
vecp->i_len = (int)data_bytes;
|
2010-01-19 02:56:45 -07:00
|
|
|
vecp->i_type = XLOG_REG_TYPE_ILOCAL;
|
2005-04-16 16:20:36 -06:00
|
|
|
vecp++;
|
|
|
|
nvecs++;
|
|
|
|
iip->ili_format.ilf_dsize = (unsigned)data_bytes;
|
2012-02-29 02:53:53 -07:00
|
|
|
} else {
|
2012-02-29 02:53:54 -07:00
|
|
|
iip->ili_fields &= ~XFS_ILOG_DDATA;
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case XFS_DINODE_FMT_DEV:
|
2012-02-29 02:53:54 -07:00
|
|
|
iip->ili_fields &=
|
2012-02-29 02:53:53 -07:00
|
|
|
~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT |
|
|
|
|
XFS_ILOG_DEXT | XFS_ILOG_UUID);
|
2012-02-29 02:53:54 -07:00
|
|
|
if (iip->ili_fields & XFS_ILOG_DEV) {
|
2005-04-16 16:20:36 -06:00
|
|
|
iip->ili_format.ilf_u.ilfu_rdev =
|
|
|
|
ip->i_df.if_u2.if_rdev;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case XFS_DINODE_FMT_UUID:
|
2012-02-29 02:53:54 -07:00
|
|
|
iip->ili_fields &=
|
2012-02-29 02:53:53 -07:00
|
|
|
~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT |
|
|
|
|
XFS_ILOG_DEXT | XFS_ILOG_DEV);
|
2012-02-29 02:53:54 -07:00
|
|
|
if (iip->ili_fields & XFS_ILOG_UUID) {
|
2005-04-16 16:20:36 -06:00
|
|
|
iip->ili_format.ilf_u.ilfu_uuid =
|
|
|
|
ip->i_df.if_u2.if_uuid;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
ASSERT(0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2012-02-29 02:53:53 -07:00
|
|
|
* If there are no attributes associated with the file, then we're done.
|
2005-04-16 16:20:36 -06:00
|
|
|
*/
|
|
|
|
if (!XFS_IFORK_Q(ip)) {
|
2012-02-29 02:53:54 -07:00
|
|
|
iip->ili_fields &=
|
2012-02-29 02:53:53 -07:00
|
|
|
~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT | XFS_ILOG_AEXT);
|
2012-02-29 02:53:54 -07:00
|
|
|
goto out;
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
switch (ip->i_d.di_aformat) {
|
|
|
|
case XFS_DINODE_FMT_EXTENTS:
|
2012-02-29 02:53:54 -07:00
|
|
|
iip->ili_fields &=
|
2012-02-29 02:53:53 -07:00
|
|
|
~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT);
|
|
|
|
|
2012-02-29 02:53:54 -07:00
|
|
|
if ((iip->ili_fields & XFS_ILOG_AEXT) &&
|
2012-02-29 02:53:53 -07:00
|
|
|
ip->i_d.di_anextents > 0 &&
|
|
|
|
ip->i_afp->if_bytes > 0) {
|
|
|
|
ASSERT(ip->i_afp->if_bytes / sizeof(xfs_bmbt_rec_t) ==
|
|
|
|
ip->i_d.di_anextents);
|
2010-07-20 01:54:45 -06:00
|
|
|
ASSERT(ip->i_afp->if_u1.if_extents != NULL);
|
2005-09-07 23:30:05 -06:00
|
|
|
#ifdef XFS_NATIVE_HOST
|
2005-04-16 16:20:36 -06:00
|
|
|
/*
|
|
|
|
* There are not delayed allocation extents
|
|
|
|
* for attributes, so just point at the array.
|
|
|
|
*/
|
2010-06-23 02:11:15 -06:00
|
|
|
vecp->i_addr = ip->i_afp->if_u1.if_extents;
|
2005-04-16 16:20:36 -06:00
|
|
|
vecp->i_len = ip->i_afp->if_bytes;
|
2011-04-07 20:45:07 -06:00
|
|
|
vecp->i_type = XLOG_REG_TYPE_IATTR_EXT;
|
2005-04-16 16:20:36 -06:00
|
|
|
#else
|
|
|
|
ASSERT(iip->ili_aextents_buf == NULL);
|
2011-04-07 20:45:07 -06:00
|
|
|
xfs_inode_item_format_extents(ip, vecp,
|
|
|
|
XFS_ATTR_FORK, XLOG_REG_TYPE_IATTR_EXT);
|
2005-04-16 16:20:36 -06:00
|
|
|
#endif
|
|
|
|
iip->ili_format.ilf_asize = vecp->i_len;
|
|
|
|
vecp++;
|
|
|
|
nvecs++;
|
2012-02-29 02:53:53 -07:00
|
|
|
} else {
|
2012-02-29 02:53:54 -07:00
|
|
|
iip->ili_fields &= ~XFS_ILOG_AEXT;
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case XFS_DINODE_FMT_BTREE:
|
2012-02-29 02:53:54 -07:00
|
|
|
iip->ili_fields &=
|
2012-02-29 02:53:53 -07:00
|
|
|
~(XFS_ILOG_ADATA | XFS_ILOG_AEXT);
|
|
|
|
|
2012-02-29 02:53:54 -07:00
|
|
|
if ((iip->ili_fields & XFS_ILOG_ABROOT) &&
|
2012-02-29 02:53:53 -07:00
|
|
|
ip->i_afp->if_broot_bytes > 0) {
|
2005-04-16 16:20:36 -06:00
|
|
|
ASSERT(ip->i_afp->if_broot != NULL);
|
2012-02-29 02:53:53 -07:00
|
|
|
|
2010-06-23 02:11:15 -06:00
|
|
|
vecp->i_addr = ip->i_afp->if_broot;
|
2005-04-16 16:20:36 -06:00
|
|
|
vecp->i_len = ip->i_afp->if_broot_bytes;
|
2010-01-19 02:56:45 -07:00
|
|
|
vecp->i_type = XLOG_REG_TYPE_IATTR_BROOT;
|
2005-04-16 16:20:36 -06:00
|
|
|
vecp++;
|
|
|
|
nvecs++;
|
|
|
|
iip->ili_format.ilf_asize = ip->i_afp->if_broot_bytes;
|
2012-02-29 02:53:53 -07:00
|
|
|
} else {
|
2012-02-29 02:53:54 -07:00
|
|
|
iip->ili_fields &= ~XFS_ILOG_ABROOT;
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case XFS_DINODE_FMT_LOCAL:
|
2012-02-29 02:53:54 -07:00
|
|
|
iip->ili_fields &=
|
2012-02-29 02:53:53 -07:00
|
|
|
~(XFS_ILOG_AEXT | XFS_ILOG_ABROOT);
|
|
|
|
|
2012-02-29 02:53:54 -07:00
|
|
|
if ((iip->ili_fields & XFS_ILOG_ADATA) &&
|
2012-02-29 02:53:53 -07:00
|
|
|
ip->i_afp->if_bytes > 0) {
|
2005-04-16 16:20:36 -06:00
|
|
|
ASSERT(ip->i_afp->if_u1.if_data != NULL);
|
|
|
|
|
2010-06-23 02:11:15 -06:00
|
|
|
vecp->i_addr = ip->i_afp->if_u1.if_data;
|
2005-04-16 16:20:36 -06:00
|
|
|
/*
|
|
|
|
* Round i_bytes up to a word boundary.
|
|
|
|
* The underlying memory is guaranteed to
|
|
|
|
* to be there by xfs_idata_realloc().
|
|
|
|
*/
|
|
|
|
data_bytes = roundup(ip->i_afp->if_bytes, 4);
|
|
|
|
ASSERT((ip->i_afp->if_real_bytes == 0) ||
|
|
|
|
(ip->i_afp->if_real_bytes == data_bytes));
|
|
|
|
vecp->i_len = (int)data_bytes;
|
2010-01-19 02:56:45 -07:00
|
|
|
vecp->i_type = XLOG_REG_TYPE_IATTR_LOCAL;
|
2005-04-16 16:20:36 -06:00
|
|
|
vecp++;
|
|
|
|
nvecs++;
|
|
|
|
iip->ili_format.ilf_asize = (unsigned)data_bytes;
|
2012-02-29 02:53:53 -07:00
|
|
|
} else {
|
2012-02-29 02:53:54 -07:00
|
|
|
iip->ili_fields &= ~XFS_ILOG_ADATA;
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
ASSERT(0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2012-02-29 02:53:54 -07:00
|
|
|
out:
|
|
|
|
/*
|
|
|
|
* Now update the log format that goes out to disk from the in-core
|
|
|
|
* values. We always write the inode core to make the arithmetic
|
|
|
|
* games in recovery easier, which isn't a big deal as just about any
|
|
|
|
* transaction would dirty it anyway.
|
|
|
|
*/
|
2012-02-29 02:53:55 -07:00
|
|
|
iip->ili_format.ilf_fields = XFS_ILOG_CORE |
|
|
|
|
(iip->ili_fields & ~XFS_ILOG_TIMESTAMP);
|
2005-04-16 16:20:36 -06:00
|
|
|
iip->ili_format.ilf_size = nvecs;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is called to pin the inode associated with the inode log
|
2010-02-18 05:43:22 -07:00
|
|
|
* item in memory so it cannot be written out.
|
2005-04-16 16:20:36 -06:00
|
|
|
*/
|
|
|
|
STATIC void
|
|
|
|
xfs_inode_item_pin(
|
2010-06-23 02:11:15 -06:00
|
|
|
struct xfs_log_item *lip)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
2010-06-23 02:11:15 -06:00
|
|
|
struct xfs_inode *ip = INODE_ITEM(lip)->ili_inode;
|
2010-02-18 05:43:22 -07:00
|
|
|
|
2010-06-23 02:11:15 -06:00
|
|
|
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
|
|
|
|
|
|
|
trace_xfs_inode_pin(ip, _RET_IP_);
|
|
|
|
atomic_inc(&ip->i_pincount);
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is called to unpin the inode associated with the inode log
|
|
|
|
* item which was previously pinned with a call to xfs_inode_item_pin().
|
2010-02-18 05:43:22 -07:00
|
|
|
*
|
|
|
|
* Also wake up anyone in xfs_iunpin_wait() if the count goes to 0.
|
2005-04-16 16:20:36 -06:00
|
|
|
*/
|
|
|
|
STATIC void
|
|
|
|
xfs_inode_item_unpin(
|
2010-06-23 02:11:15 -06:00
|
|
|
struct xfs_log_item *lip,
|
2010-06-23 02:11:15 -06:00
|
|
|
int remove)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
2010-06-23 02:11:15 -06:00
|
|
|
struct xfs_inode *ip = INODE_ITEM(lip)->ili_inode;
|
2010-02-18 05:43:22 -07:00
|
|
|
|
2010-03-07 17:24:07 -07:00
|
|
|
trace_xfs_inode_unpin(ip, _RET_IP_);
|
2010-02-18 05:43:22 -07:00
|
|
|
ASSERT(atomic_read(&ip->i_pincount) > 0);
|
|
|
|
if (atomic_dec_and_test(&ip->i_pincount))
|
2011-12-18 13:00:10 -07:00
|
|
|
wake_up_bit(&ip->i_flags, __XFS_IPINNED_BIT);
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
STATIC uint
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-22 23:58:39 -06:00
|
|
|
xfs_inode_item_push(
|
|
|
|
struct xfs_log_item *lip,
|
|
|
|
struct list_head *buffer_list)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
2010-06-23 02:11:15 -06:00
|
|
|
struct xfs_inode_log_item *iip = INODE_ITEM(lip);
|
|
|
|
struct xfs_inode *ip = iip->ili_inode;
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-22 23:58:39 -06:00
|
|
|
struct xfs_buf *bp = NULL;
|
|
|
|
uint rval = XFS_ITEM_SUCCESS;
|
|
|
|
int error;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2010-06-23 02:11:15 -06:00
|
|
|
if (xfs_ipincount(ip) > 0)
|
2005-04-16 16:20:36 -06:00
|
|
|
return XFS_ITEM_PINNED;
|
|
|
|
|
2010-06-23 02:11:15 -06:00
|
|
|
if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED))
|
2005-04-16 16:20:36 -06:00
|
|
|
return XFS_ITEM_LOCKED;
|
|
|
|
|
2012-04-22 23:58:36 -06:00
|
|
|
/*
|
|
|
|
* Re-check the pincount now that we stabilized the value by
|
|
|
|
* taking the ilock.
|
|
|
|
*/
|
|
|
|
if (xfs_ipincount(ip) > 0) {
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-22 23:58:39 -06:00
|
|
|
rval = XFS_ITEM_PINNED;
|
|
|
|
goto out_unlock;
|
2012-04-22 23:58:36 -06:00
|
|
|
}
|
|
|
|
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-22 23:58:39 -06:00
|
|
|
/*
|
|
|
|
* Someone else is already flushing the inode. Nothing we can do
|
|
|
|
* here but wait for the flush to finish and remove the item from
|
|
|
|
* the AIL.
|
|
|
|
*/
|
2005-04-16 16:20:36 -06:00
|
|
|
if (!xfs_iflock_nowait(ip)) {
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-22 23:58:39 -06:00
|
|
|
rval = XFS_ITEM_FLUSHING;
|
|
|
|
goto out_unlock;
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-22 23:58:39 -06:00
|
|
|
/*
|
|
|
|
* Stale inode items should force out the iclog.
|
|
|
|
*/
|
2005-04-16 16:20:36 -06:00
|
|
|
if (ip->i_flags & XFS_ISTALE) {
|
|
|
|
xfs_ifunlock(ip);
|
2012-02-19 19:31:22 -07:00
|
|
|
xfs_iunlock(ip, XFS_ILOCK_SHARED);
|
2005-04-16 16:20:36 -06:00
|
|
|
return XFS_ITEM_PINNED;
|
|
|
|
}
|
|
|
|
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-22 23:58:39 -06:00
|
|
|
ASSERT(iip->ili_fields != 0 || XFS_FORCED_SHUTDOWN(ip->i_mount));
|
|
|
|
ASSERT(iip->ili_logged == 0 || XFS_FORCED_SHUTDOWN(ip->i_mount));
|
|
|
|
|
|
|
|
spin_unlock(&lip->li_ailp->xa_lock);
|
|
|
|
|
|
|
|
error = xfs_iflush(ip, &bp);
|
|
|
|
if (!error) {
|
|
|
|
if (!xfs_buf_delwri_queue(bp, buffer_list))
|
|
|
|
rval = XFS_ITEM_FLUSHING;
|
|
|
|
xfs_buf_relse(bp);
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-22 23:58:39 -06:00
|
|
|
|
|
|
|
spin_lock(&lip->li_ailp->xa_lock);
|
|
|
|
out_unlock:
|
|
|
|
xfs_iunlock(ip, XFS_ILOCK_SHARED);
|
|
|
|
return rval;
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Unlock the inode associated with the inode log item.
|
|
|
|
* Clear the fields of the inode and inode log item that
|
|
|
|
* are specific to the current transaction. If the
|
|
|
|
* hold flags is set, do not unlock the inode.
|
|
|
|
*/
|
|
|
|
STATIC void
|
|
|
|
xfs_inode_item_unlock(
|
2010-06-23 02:11:15 -06:00
|
|
|
struct xfs_log_item *lip)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
2010-06-23 02:11:15 -06:00
|
|
|
struct xfs_inode_log_item *iip = INODE_ITEM(lip);
|
|
|
|
struct xfs_inode *ip = iip->ili_inode;
|
2010-06-23 19:36:58 -06:00
|
|
|
unsigned short lock_flags;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2011-07-08 06:34:47 -06:00
|
|
|
ASSERT(ip->i_itemp != NULL);
|
|
|
|
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the inode needed a separate buffer with which to log
|
|
|
|
* its extents, then free it now.
|
|
|
|
*/
|
|
|
|
if (iip->ili_extents_buf != NULL) {
|
|
|
|
ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS);
|
|
|
|
ASSERT(ip->i_d.di_nextents > 0);
|
2012-02-29 02:53:54 -07:00
|
|
|
ASSERT(iip->ili_fields & XFS_ILOG_DEXT);
|
2005-04-16 16:20:36 -06:00
|
|
|
ASSERT(ip->i_df.if_bytes > 0);
|
2008-05-19 00:31:57 -06:00
|
|
|
kmem_free(iip->ili_extents_buf);
|
2005-04-16 16:20:36 -06:00
|
|
|
iip->ili_extents_buf = NULL;
|
|
|
|
}
|
|
|
|
if (iip->ili_aextents_buf != NULL) {
|
|
|
|
ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS);
|
|
|
|
ASSERT(ip->i_d.di_anextents > 0);
|
2012-02-29 02:53:54 -07:00
|
|
|
ASSERT(iip->ili_fields & XFS_ILOG_AEXT);
|
2005-04-16 16:20:36 -06:00
|
|
|
ASSERT(ip->i_afp->if_bytes > 0);
|
2008-05-19 00:31:57 -06:00
|
|
|
kmem_free(iip->ili_aextents_buf);
|
2005-04-16 16:20:36 -06:00
|
|
|
iip->ili_aextents_buf = NULL;
|
|
|
|
}
|
|
|
|
|
2010-06-23 19:36:58 -06:00
|
|
|
lock_flags = iip->ili_lock_flags;
|
|
|
|
iip->ili_lock_flags = 0;
|
2011-09-19 09:00:54 -06:00
|
|
|
if (lock_flags)
|
2011-07-08 06:34:47 -06:00
|
|
|
xfs_iunlock(ip, lock_flags);
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2010-11-29 21:15:46 -07:00
|
|
|
* This is called to find out where the oldest active copy of the inode log
|
|
|
|
* item in the on disk log resides now that the last log write of it completed
|
|
|
|
* at the given lsn. Since we always re-log all dirty data in an inode, the
|
|
|
|
* latest copy in the on disk log is the only one that matters. Therefore,
|
|
|
|
* simply return the given lsn.
|
|
|
|
*
|
|
|
|
* If the inode has been marked stale because the cluster is being freed, we
|
|
|
|
* don't want to (re-)insert this inode into the AIL. There is a race condition
|
|
|
|
* where the cluster buffer may be unpinned before the inode is inserted into
|
|
|
|
* the AIL during transaction committed processing. If the buffer is unpinned
|
|
|
|
* before the inode item has been committed and inserted, then it is possible
|
xfs: unpin stale inodes directly in IOP_COMMITTED
When inodes are marked stale in a transaction, they are treated
specially when the inode log item is being inserted into the AIL.
It tries to avoid moving the log item forward in the AIL due to a
race condition with the writing the underlying buffer back to disk.
The was "fixed" in commit de25c18 ("xfs: avoid moving stale inodes
in the AIL").
To avoid moving the item forward, we return a LSN smaller than the
commit_lsn of the completing transaction, thereby trying to trick
the commit code into not moving the inode forward at all. I'm not
sure this ever worked as intended - it assumes the inode is already
in the AIL, but I don't think the returned LSN would have been small
enough to prevent moving the inode. It appears that the reason it
worked is that the lower LSN of the inodes meant they were inserted
into the AIL and flushed before the inode buffer (which was moved to
the commit_lsn of the transaction).
The big problem is that with delayed logging, the returning of the
different LSN means insertion takes the slow, non-bulk path. Worse
yet is that insertion is to a position -before- the commit_lsn so it
is doing a AIL traversal on every insertion, and has to walk over
all the items that have already been inserted into the AIL. It's
expensive.
To compound the matter further, with delayed logging inodes are
likely to go from clean to stale in a single checkpoint, which means
they aren't even in the AIL at all when we come across them at AIL
insertion time. Hence these were all getting inserted into the AIL
when they simply do not need to be as inodes marked XFS_ISTALE are
never written back.
Transactional/recovery integrity is maintained in this case by the
other items in the unlink transaction that were modified (e.g. the
AGI btree blocks) and committed in the same checkpoint.
So to fix this, simply unpin the stale inodes directly in
xfs_inode_item_committed() and return -1 to indicate that the AIL
insertion code does not need to do any further processing of these
inodes.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Alex Elder <aelder@sgi.com>
2011-07-03 23:27:36 -06:00
|
|
|
* for the buffer to be written and IO completes before the inode is inserted
|
2010-11-29 21:15:46 -07:00
|
|
|
* into the AIL. In that case, we'd be inserting a clean, stale inode into the
|
|
|
|
* AIL which will never get removed. It will, however, get reclaimed which
|
|
|
|
* triggers an assert in xfs_inode_free() complaining about freein an inode
|
|
|
|
* still in the AIL.
|
|
|
|
*
|
xfs: unpin stale inodes directly in IOP_COMMITTED
When inodes are marked stale in a transaction, they are treated
specially when the inode log item is being inserted into the AIL.
It tries to avoid moving the log item forward in the AIL due to a
race condition with the writing the underlying buffer back to disk.
The was "fixed" in commit de25c18 ("xfs: avoid moving stale inodes
in the AIL").
To avoid moving the item forward, we return a LSN smaller than the
commit_lsn of the completing transaction, thereby trying to trick
the commit code into not moving the inode forward at all. I'm not
sure this ever worked as intended - it assumes the inode is already
in the AIL, but I don't think the returned LSN would have been small
enough to prevent moving the inode. It appears that the reason it
worked is that the lower LSN of the inodes meant they were inserted
into the AIL and flushed before the inode buffer (which was moved to
the commit_lsn of the transaction).
The big problem is that with delayed logging, the returning of the
different LSN means insertion takes the slow, non-bulk path. Worse
yet is that insertion is to a position -before- the commit_lsn so it
is doing a AIL traversal on every insertion, and has to walk over
all the items that have already been inserted into the AIL. It's
expensive.
To compound the matter further, with delayed logging inodes are
likely to go from clean to stale in a single checkpoint, which means
they aren't even in the AIL at all when we come across them at AIL
insertion time. Hence these were all getting inserted into the AIL
when they simply do not need to be as inodes marked XFS_ISTALE are
never written back.
Transactional/recovery integrity is maintained in this case by the
other items in the unlink transaction that were modified (e.g. the
AGI btree blocks) and committed in the same checkpoint.
So to fix this, simply unpin the stale inodes directly in
xfs_inode_item_committed() and return -1 to indicate that the AIL
insertion code does not need to do any further processing of these
inodes.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Alex Elder <aelder@sgi.com>
2011-07-03 23:27:36 -06:00
|
|
|
* To avoid this, just unpin the inode directly and return a LSN of -1 so the
|
|
|
|
* transaction committed code knows that it does not need to do any further
|
|
|
|
* processing on the item.
|
2005-04-16 16:20:36 -06:00
|
|
|
*/
|
|
|
|
STATIC xfs_lsn_t
|
|
|
|
xfs_inode_item_committed(
|
2010-06-23 02:11:15 -06:00
|
|
|
struct xfs_log_item *lip,
|
2005-04-16 16:20:36 -06:00
|
|
|
xfs_lsn_t lsn)
|
|
|
|
{
|
2010-11-29 21:15:46 -07:00
|
|
|
struct xfs_inode_log_item *iip = INODE_ITEM(lip);
|
|
|
|
struct xfs_inode *ip = iip->ili_inode;
|
|
|
|
|
xfs: unpin stale inodes directly in IOP_COMMITTED
When inodes are marked stale in a transaction, they are treated
specially when the inode log item is being inserted into the AIL.
It tries to avoid moving the log item forward in the AIL due to a
race condition with the writing the underlying buffer back to disk.
The was "fixed" in commit de25c18 ("xfs: avoid moving stale inodes
in the AIL").
To avoid moving the item forward, we return a LSN smaller than the
commit_lsn of the completing transaction, thereby trying to trick
the commit code into not moving the inode forward at all. I'm not
sure this ever worked as intended - it assumes the inode is already
in the AIL, but I don't think the returned LSN would have been small
enough to prevent moving the inode. It appears that the reason it
worked is that the lower LSN of the inodes meant they were inserted
into the AIL and flushed before the inode buffer (which was moved to
the commit_lsn of the transaction).
The big problem is that with delayed logging, the returning of the
different LSN means insertion takes the slow, non-bulk path. Worse
yet is that insertion is to a position -before- the commit_lsn so it
is doing a AIL traversal on every insertion, and has to walk over
all the items that have already been inserted into the AIL. It's
expensive.
To compound the matter further, with delayed logging inodes are
likely to go from clean to stale in a single checkpoint, which means
they aren't even in the AIL at all when we come across them at AIL
insertion time. Hence these were all getting inserted into the AIL
when they simply do not need to be as inodes marked XFS_ISTALE are
never written back.
Transactional/recovery integrity is maintained in this case by the
other items in the unlink transaction that were modified (e.g. the
AGI btree blocks) and committed in the same checkpoint.
So to fix this, simply unpin the stale inodes directly in
xfs_inode_item_committed() and return -1 to indicate that the AIL
insertion code does not need to do any further processing of these
inodes.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Alex Elder <aelder@sgi.com>
2011-07-03 23:27:36 -06:00
|
|
|
if (xfs_iflags_test(ip, XFS_ISTALE)) {
|
|
|
|
xfs_inode_item_unpin(lip, 0);
|
|
|
|
return -1;
|
|
|
|
}
|
2010-06-23 02:11:15 -06:00
|
|
|
return lsn;
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX rcc - this one really has to do something. Probably needs
|
|
|
|
* to stamp in a new field in the incore inode.
|
|
|
|
*/
|
|
|
|
STATIC void
|
|
|
|
xfs_inode_item_committing(
|
2010-06-23 02:11:15 -06:00
|
|
|
struct xfs_log_item *lip,
|
2005-04-16 16:20:36 -06:00
|
|
|
xfs_lsn_t lsn)
|
|
|
|
{
|
2010-06-23 02:11:15 -06:00
|
|
|
INODE_ITEM(lip)->ili_last_lsn = lsn;
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is the ops vector shared by all buf log items.
|
|
|
|
*/
|
2011-10-28 03:54:24 -06:00
|
|
|
static const struct xfs_item_ops xfs_inode_item_ops = {
|
2010-06-23 02:11:15 -06:00
|
|
|
.iop_size = xfs_inode_item_size,
|
|
|
|
.iop_format = xfs_inode_item_format,
|
|
|
|
.iop_pin = xfs_inode_item_pin,
|
|
|
|
.iop_unpin = xfs_inode_item_unpin,
|
|
|
|
.iop_unlock = xfs_inode_item_unlock,
|
|
|
|
.iop_committed = xfs_inode_item_committed,
|
|
|
|
.iop_push = xfs_inode_item_push,
|
|
|
|
.iop_committing = xfs_inode_item_committing
|
2005-04-16 16:20:36 -06:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize the inode log item for a newly allocated (in-core) inode.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
xfs_inode_item_init(
|
2010-06-23 02:11:15 -06:00
|
|
|
struct xfs_inode *ip,
|
|
|
|
struct xfs_mount *mp)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
2010-06-23 02:11:15 -06:00
|
|
|
struct xfs_inode_log_item *iip;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
ASSERT(ip->i_itemp == NULL);
|
|
|
|
iip = ip->i_itemp = kmem_zone_zalloc(xfs_ili_zone, KM_SLEEP);
|
|
|
|
|
|
|
|
iip->ili_inode = ip;
|
2010-03-22 17:10:00 -06:00
|
|
|
xfs_log_item_init(mp, &iip->ili_item, XFS_LI_INODE,
|
|
|
|
&xfs_inode_item_ops);
|
2005-04-16 16:20:36 -06:00
|
|
|
iip->ili_format.ilf_type = XFS_LI_INODE;
|
|
|
|
iip->ili_format.ilf_ino = ip->i_ino;
|
2008-11-27 20:23:41 -07:00
|
|
|
iip->ili_format.ilf_blkno = ip->i_imap.im_blkno;
|
|
|
|
iip->ili_format.ilf_len = ip->i_imap.im_len;
|
|
|
|
iip->ili_format.ilf_boffset = ip->i_imap.im_boffset;
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free the inode log item and any memory hanging off of it.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
xfs_inode_item_destroy(
|
|
|
|
xfs_inode_t *ip)
|
|
|
|
{
|
|
|
|
#ifdef XFS_TRANS_DEBUG
|
|
|
|
if (ip->i_itemp->ili_root_size != 0) {
|
2008-05-19 00:31:57 -06:00
|
|
|
kmem_free(ip->i_itemp->ili_orig_root);
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
kmem_zone_free(xfs_ili_zone, ip->i_itemp);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is the inode flushing I/O completion routine. It is called
|
|
|
|
* from interrupt level when the buffer containing the inode is
|
|
|
|
* flushed to disk. It is responsible for removing the inode item
|
|
|
|
* from the AIL if it has not been re-logged, and unlocking the inode's
|
|
|
|
* flush lock.
|
2010-12-19 18:03:17 -07:00
|
|
|
*
|
|
|
|
* To reduce AIL lock traffic as much as possible, we scan the buffer log item
|
|
|
|
* list for other inodes that will run this function. We remove them from the
|
|
|
|
* buffer list so we can process all the inode IO completions in one AIL lock
|
|
|
|
* traversal.
|
2005-04-16 16:20:36 -06:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
xfs_iflush_done(
|
2010-06-23 02:11:15 -06:00
|
|
|
struct xfs_buf *bp,
|
|
|
|
struct xfs_log_item *lip)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
2010-12-19 18:03:17 -07:00
|
|
|
struct xfs_inode_log_item *iip;
|
|
|
|
struct xfs_log_item *blip;
|
|
|
|
struct xfs_log_item *next;
|
|
|
|
struct xfs_log_item *prev;
|
2010-06-23 02:11:15 -06:00
|
|
|
struct xfs_ail *ailp = lip->li_ailp;
|
2010-12-19 18:03:17 -07:00
|
|
|
int need_ail = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Scan the buffer IO completions for other inodes being completed and
|
|
|
|
* attach them to the current inode log item.
|
|
|
|
*/
|
2011-07-13 05:43:49 -06:00
|
|
|
blip = bp->b_fspriv;
|
2010-12-19 18:03:17 -07:00
|
|
|
prev = NULL;
|
|
|
|
while (blip != NULL) {
|
|
|
|
if (lip->li_cb != xfs_iflush_done) {
|
|
|
|
prev = blip;
|
|
|
|
blip = blip->li_bio_list;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* remove from list */
|
|
|
|
next = blip->li_bio_list;
|
|
|
|
if (!prev) {
|
2011-07-13 05:43:49 -06:00
|
|
|
bp->b_fspriv = next;
|
2010-12-19 18:03:17 -07:00
|
|
|
} else {
|
|
|
|
prev->li_bio_list = next;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* add to current list */
|
|
|
|
blip->li_bio_list = lip->li_bio_list;
|
|
|
|
lip->li_bio_list = blip;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* while we have the item, do the unlocked check for needing
|
|
|
|
* the AIL lock.
|
|
|
|
*/
|
|
|
|
iip = INODE_ITEM(blip);
|
|
|
|
if (iip->ili_logged && blip->li_lsn == iip->ili_flush_lsn)
|
|
|
|
need_ail++;
|
|
|
|
|
|
|
|
blip = next;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* make sure we capture the state of the initial inode. */
|
|
|
|
iip = INODE_ITEM(lip);
|
|
|
|
if (iip->ili_logged && lip->li_lsn == iip->ili_flush_lsn)
|
|
|
|
need_ail++;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We only want to pull the item from the AIL if it is
|
|
|
|
* actually there and its location in the log has not
|
|
|
|
* changed since we started the flush. Thus, we only bother
|
|
|
|
* if the ili_logged flag is set and the inode's lsn has not
|
|
|
|
* changed. First we check the lsn outside
|
|
|
|
* the lock since it's cheaper, and then we recheck while
|
|
|
|
* holding the lock before removing the inode from the AIL.
|
|
|
|
*/
|
2010-12-19 18:03:17 -07:00
|
|
|
if (need_ail) {
|
|
|
|
struct xfs_log_item *log_items[need_ail];
|
|
|
|
int i = 0;
|
2008-10-30 00:39:58 -06:00
|
|
|
spin_lock(&ailp->xa_lock);
|
2010-12-19 18:03:17 -07:00
|
|
|
for (blip = lip; blip; blip = blip->li_bio_list) {
|
|
|
|
iip = INODE_ITEM(blip);
|
|
|
|
if (iip->ili_logged &&
|
|
|
|
blip->li_lsn == iip->ili_flush_lsn) {
|
|
|
|
log_items[i++] = blip;
|
|
|
|
}
|
|
|
|
ASSERT(i <= need_ail);
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
2010-12-19 18:03:17 -07:00
|
|
|
/* xfs_trans_ail_delete_bulk() drops the AIL lock. */
|
2012-04-22 23:58:41 -06:00
|
|
|
xfs_trans_ail_delete_bulk(ailp, log_items, i,
|
|
|
|
SHUTDOWN_CORRUPT_INCORE);
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2010-12-19 18:03:17 -07:00
|
|
|
* clean up and unlock the flush lock now we are done. We can clear the
|
|
|
|
* ili_last_fields bits now that we know that the data corresponding to
|
|
|
|
* them is safely on disk.
|
2005-04-16 16:20:36 -06:00
|
|
|
*/
|
2010-12-19 18:03:17 -07:00
|
|
|
for (blip = lip; blip; blip = next) {
|
|
|
|
next = blip->li_bio_list;
|
|
|
|
blip->li_bio_list = NULL;
|
|
|
|
|
|
|
|
iip = INODE_ITEM(blip);
|
|
|
|
iip->ili_logged = 0;
|
|
|
|
iip->ili_last_fields = 0;
|
|
|
|
xfs_ifunlock(iip->ili_inode);
|
|
|
|
}
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2012-04-22 23:58:41 -06:00
|
|
|
* This is the inode flushing abort routine. It is called from xfs_iflush when
|
|
|
|
* the filesystem is shutting down to clean up the inode state. It is
|
|
|
|
* responsible for removing the inode item from the AIL if it has not been
|
|
|
|
* re-logged, and unlocking the inode's flush lock.
|
2005-04-16 16:20:36 -06:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
xfs_iflush_abort(
|
2012-04-22 23:58:41 -06:00
|
|
|
xfs_inode_t *ip,
|
|
|
|
bool stale)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
2008-10-30 00:39:58 -06:00
|
|
|
xfs_inode_log_item_t *iip = ip->i_itemp;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
if (iip) {
|
2008-10-30 00:39:58 -06:00
|
|
|
struct xfs_ail *ailp = iip->ili_item.li_ailp;
|
2005-04-16 16:20:36 -06:00
|
|
|
if (iip->ili_item.li_flags & XFS_LI_IN_AIL) {
|
2008-10-30 00:39:58 -06:00
|
|
|
spin_lock(&ailp->xa_lock);
|
2005-04-16 16:20:36 -06:00
|
|
|
if (iip->ili_item.li_flags & XFS_LI_IN_AIL) {
|
2008-10-30 00:39:58 -06:00
|
|
|
/* xfs_trans_ail_delete() drops the AIL lock. */
|
2012-04-22 23:58:41 -06:00
|
|
|
xfs_trans_ail_delete(ailp, &iip->ili_item,
|
|
|
|
stale ?
|
|
|
|
SHUTDOWN_LOG_IO_ERROR :
|
|
|
|
SHUTDOWN_CORRUPT_INCORE);
|
2005-04-16 16:20:36 -06:00
|
|
|
} else
|
2008-10-30 00:39:58 -06:00
|
|
|
spin_unlock(&ailp->xa_lock);
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
iip->ili_logged = 0;
|
|
|
|
/*
|
|
|
|
* Clear the ili_last_fields bits now that we know that the
|
|
|
|
* data corresponding to them is safely on disk.
|
|
|
|
*/
|
|
|
|
iip->ili_last_fields = 0;
|
|
|
|
/*
|
|
|
|
* Clear the inode logging fields so no more flushes are
|
|
|
|
* attempted.
|
|
|
|
*/
|
2012-02-29 02:53:54 -07:00
|
|
|
iip->ili_fields = 0;
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Release the inode's flush lock since we're done with it.
|
|
|
|
*/
|
|
|
|
xfs_ifunlock(ip);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
xfs_istale_done(
|
2010-06-23 02:11:15 -06:00
|
|
|
struct xfs_buf *bp,
|
|
|
|
struct xfs_log_item *lip)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
2012-04-22 23:58:41 -06:00
|
|
|
xfs_iflush_abort(INODE_ITEM(lip)->ili_inode, true);
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
2006-06-08 22:55:38 -06:00
|
|
|
|
|
|
|
/*
|
|
|
|
* convert an xfs_inode_log_format struct from either 32 or 64 bit versions
|
|
|
|
* (which can have different field alignments) to the native version
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_inode_item_format_convert(
|
|
|
|
xfs_log_iovec_t *buf,
|
|
|
|
xfs_inode_log_format_t *in_f)
|
|
|
|
{
|
|
|
|
if (buf->i_len == sizeof(xfs_inode_log_format_32_t)) {
|
2010-06-23 02:11:15 -06:00
|
|
|
xfs_inode_log_format_32_t *in_f32 = buf->i_addr;
|
2006-06-08 22:55:38 -06:00
|
|
|
|
|
|
|
in_f->ilf_type = in_f32->ilf_type;
|
|
|
|
in_f->ilf_size = in_f32->ilf_size;
|
|
|
|
in_f->ilf_fields = in_f32->ilf_fields;
|
|
|
|
in_f->ilf_asize = in_f32->ilf_asize;
|
|
|
|
in_f->ilf_dsize = in_f32->ilf_dsize;
|
|
|
|
in_f->ilf_ino = in_f32->ilf_ino;
|
|
|
|
/* copy biggest field of ilf_u */
|
|
|
|
memcpy(in_f->ilf_u.ilfu_uuid.__u_bits,
|
|
|
|
in_f32->ilf_u.ilfu_uuid.__u_bits,
|
|
|
|
sizeof(uuid_t));
|
|
|
|
in_f->ilf_blkno = in_f32->ilf_blkno;
|
|
|
|
in_f->ilf_len = in_f32->ilf_len;
|
|
|
|
in_f->ilf_boffset = in_f32->ilf_boffset;
|
|
|
|
return 0;
|
|
|
|
} else if (buf->i_len == sizeof(xfs_inode_log_format_64_t)){
|
2010-06-23 02:11:15 -06:00
|
|
|
xfs_inode_log_format_64_t *in_f64 = buf->i_addr;
|
2006-06-08 22:55:38 -06:00
|
|
|
|
|
|
|
in_f->ilf_type = in_f64->ilf_type;
|
|
|
|
in_f->ilf_size = in_f64->ilf_size;
|
|
|
|
in_f->ilf_fields = in_f64->ilf_fields;
|
|
|
|
in_f->ilf_asize = in_f64->ilf_asize;
|
|
|
|
in_f->ilf_dsize = in_f64->ilf_dsize;
|
|
|
|
in_f->ilf_ino = in_f64->ilf_ino;
|
|
|
|
/* copy biggest field of ilf_u */
|
|
|
|
memcpy(in_f->ilf_u.ilfu_uuid.__u_bits,
|
|
|
|
in_f64->ilf_u.ilfu_uuid.__u_bits,
|
|
|
|
sizeof(uuid_t));
|
|
|
|
in_f->ilf_blkno = in_f64->ilf_blkno;
|
|
|
|
in_f->ilf_len = in_f64->ilf_len;
|
|
|
|
in_f->ilf_boffset = in_f64->ilf_boffset;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return EFSCORRUPTED;
|
|
|
|
}
|