Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs
* 'for-linus' of git://oss.sgi.com/xfs/xfs: (61 commits) Revert "xfs: increase the maximum number of supported ACL entries" xfs: cleanup uuid handling xfs: remove m_attroffset xfs: fix various typos xfs: pagecache usage optimization xfs: remove m_litino xfs: kill ino64 mount option xfs: kill mutex_t typedef xfs: increase the maximum number of supported ACL entries xfs: factor out code to find the longest free extent in the AG xfs: kill VN_BAD xfs: kill vn_atime_* helpers. xfs: cleanup xlog_bread xfs: cleanup xlog_recover_do_trans xfs: remove another leftover of the old inode log item format xfs: cleanup log unmount handling Fix xfs debug build breakage by pushing xfs_error.h after xfs: include header files for prototypes xfs: make symbols static xfs: move declaration to header file ...
This commit is contained in:
commit
ac7c1a776d
69 changed files with 1026 additions and 1594 deletions
|
@ -4969,7 +4969,8 @@ S: Supported
|
|||
|
||||
XFS FILESYSTEM
|
||||
P: Silicon Graphics Inc
|
||||
P: Bill O'Donnell
|
||||
P: Felix Blyakher
|
||||
M: felixb@sgi.com
|
||||
M: xfs-masters@oss.sgi.com
|
||||
L: xfs@oss.sgi.com
|
||||
W: http://oss.sgi.com/projects/xfs
|
||||
|
|
|
@ -33,6 +33,7 @@ xfs-$(CONFIG_XFS_QUOTA) += $(addprefix quota/, \
|
|||
xfs_qm_syscalls.o \
|
||||
xfs_qm_bhv.o \
|
||||
xfs_qm.o)
|
||||
xfs-$(CONFIG_XFS_QUOTA) += linux-2.6/xfs_quotaops.o
|
||||
|
||||
ifeq ($(CONFIG_XFS_QUOTA),y)
|
||||
xfs-$(CONFIG_PROC_FS) += quota/xfs_qm_stats.o
|
||||
|
|
|
@ -1,25 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it would be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
#ifndef __XFS_SUPPORT_MUTEX_H__
|
||||
#define __XFS_SUPPORT_MUTEX_H__
|
||||
|
||||
#include <linux/mutex.h>
|
||||
|
||||
typedef struct mutex mutex_t;
|
||||
|
||||
#endif /* __XFS_SUPPORT_MUTEX_H__ */
|
|
@ -1623,4 +1623,5 @@ const struct address_space_operations xfs_address_space_operations = {
|
|||
.bmap = xfs_vm_bmap,
|
||||
.direct_IO = xfs_vm_direct_IO,
|
||||
.migratepage = buffer_migrate_page,
|
||||
.is_partially_uptodate = block_is_partially_uptodate,
|
||||
};
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include "xfs_dir2_sf.h"
|
||||
#include "xfs_dinode.h"
|
||||
#include "xfs_inode.h"
|
||||
#include "xfs_ioctl.h"
|
||||
#include "xfs_btree.h"
|
||||
#include "xfs_ialloc.h"
|
||||
#include "xfs_rtalloc.h"
|
||||
|
@ -78,92 +79,74 @@ xfs_find_handle(
|
|||
int hsize;
|
||||
xfs_handle_t handle;
|
||||
struct inode *inode;
|
||||
struct file *file = NULL;
|
||||
struct path path;
|
||||
int error;
|
||||
struct xfs_inode *ip;
|
||||
|
||||
memset((char *)&handle, 0, sizeof(handle));
|
||||
|
||||
switch (cmd) {
|
||||
case XFS_IOC_PATH_TO_FSHANDLE:
|
||||
case XFS_IOC_PATH_TO_HANDLE: {
|
||||
struct path path;
|
||||
int error = user_lpath((const char __user *)hreq->path, &path);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
ASSERT(path.dentry);
|
||||
ASSERT(path.dentry->d_inode);
|
||||
inode = igrab(path.dentry->d_inode);
|
||||
path_put(&path);
|
||||
break;
|
||||
}
|
||||
|
||||
case XFS_IOC_FD_TO_HANDLE: {
|
||||
struct file *file;
|
||||
|
||||
if (cmd == XFS_IOC_FD_TO_HANDLE) {
|
||||
file = fget(hreq->fd);
|
||||
if (!file)
|
||||
return -EBADF;
|
||||
|
||||
ASSERT(file->f_path.dentry);
|
||||
ASSERT(file->f_path.dentry->d_inode);
|
||||
inode = igrab(file->f_path.dentry->d_inode);
|
||||
fput(file);
|
||||
break;
|
||||
return -EBADF;
|
||||
inode = file->f_path.dentry->d_inode;
|
||||
} else {
|
||||
error = user_lpath((const char __user *)hreq->path, &path);
|
||||
if (error)
|
||||
return error;
|
||||
inode = path.dentry->d_inode;
|
||||
}
|
||||
ip = XFS_I(inode);
|
||||
|
||||
default:
|
||||
ASSERT(0);
|
||||
return -XFS_ERROR(EINVAL);
|
||||
}
|
||||
/*
|
||||
* We can only generate handles for inodes residing on a XFS filesystem,
|
||||
* and only for regular files, directories or symbolic links.
|
||||
*/
|
||||
error = -EINVAL;
|
||||
if (inode->i_sb->s_magic != XFS_SB_MAGIC)
|
||||
goto out_put;
|
||||
|
||||
if (inode->i_sb->s_magic != XFS_SB_MAGIC) {
|
||||
/* we're not in XFS anymore, Toto */
|
||||
iput(inode);
|
||||
return -XFS_ERROR(EINVAL);
|
||||
}
|
||||
error = -EBADF;
|
||||
if (!S_ISREG(inode->i_mode) &&
|
||||
!S_ISDIR(inode->i_mode) &&
|
||||
!S_ISLNK(inode->i_mode))
|
||||
goto out_put;
|
||||
|
||||
switch (inode->i_mode & S_IFMT) {
|
||||
case S_IFREG:
|
||||
case S_IFDIR:
|
||||
case S_IFLNK:
|
||||
break;
|
||||
default:
|
||||
iput(inode);
|
||||
return -XFS_ERROR(EBADF);
|
||||
}
|
||||
|
||||
/* now we can grab the fsid */
|
||||
memcpy(&handle.ha_fsid, XFS_I(inode)->i_mount->m_fixedfsid,
|
||||
sizeof(xfs_fsid_t));
|
||||
hsize = sizeof(xfs_fsid_t);
|
||||
memcpy(&handle.ha_fsid, ip->i_mount->m_fixedfsid, sizeof(xfs_fsid_t));
|
||||
|
||||
if (cmd != XFS_IOC_PATH_TO_FSHANDLE) {
|
||||
xfs_inode_t *ip = XFS_I(inode);
|
||||
if (cmd == XFS_IOC_PATH_TO_FSHANDLE) {
|
||||
/*
|
||||
* This handle only contains an fsid, zero the rest.
|
||||
*/
|
||||
memset(&handle.ha_fid, 0, sizeof(handle.ha_fid));
|
||||
hsize = sizeof(xfs_fsid_t);
|
||||
} else {
|
||||
int lock_mode;
|
||||
|
||||
/* need to get access to the xfs_inode to read the generation */
|
||||
lock_mode = xfs_ilock_map_shared(ip);
|
||||
|
||||
/* fill in fid section of handle from inode */
|
||||
handle.ha_fid.fid_len = sizeof(xfs_fid_t) -
|
||||
sizeof(handle.ha_fid.fid_len);
|
||||
handle.ha_fid.fid_pad = 0;
|
||||
handle.ha_fid.fid_gen = ip->i_d.di_gen;
|
||||
handle.ha_fid.fid_ino = ip->i_ino;
|
||||
|
||||
xfs_iunlock_map_shared(ip, lock_mode);
|
||||
|
||||
hsize = XFS_HSIZE(handle);
|
||||
}
|
||||
|
||||
/* now copy our handle into the user buffer & write out the size */
|
||||
error = -EFAULT;
|
||||
if (copy_to_user(hreq->ohandle, &handle, hsize) ||
|
||||
copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32))) {
|
||||
iput(inode);
|
||||
return -XFS_ERROR(EFAULT);
|
||||
}
|
||||
copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
|
||||
goto out_put;
|
||||
|
||||
iput(inode);
|
||||
return 0;
|
||||
error = 0;
|
||||
|
||||
out_put:
|
||||
if (cmd == XFS_IOC_FD_TO_HANDLE)
|
||||
fput(file);
|
||||
else
|
||||
path_put(&path);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -211,8 +211,13 @@ xfs_vn_mknod(
|
|||
* Irix uses Missed'em'V split, but doesn't want to see
|
||||
* the upper 5 bits of (14bit) major.
|
||||
*/
|
||||
if (unlikely(!sysv_valid_dev(rdev) || MAJOR(rdev) & ~0x1ff))
|
||||
return -EINVAL;
|
||||
if (S_ISCHR(mode) || S_ISBLK(mode)) {
|
||||
if (unlikely(!sysv_valid_dev(rdev) || MAJOR(rdev) & ~0x1ff))
|
||||
return -EINVAL;
|
||||
rdev = sysv_encode_dev(rdev);
|
||||
} else {
|
||||
rdev = 0;
|
||||
}
|
||||
|
||||
if (test_default_acl && test_default_acl(dir)) {
|
||||
if (!_ACL_ALLOC(default_acl)) {
|
||||
|
@ -224,28 +229,11 @@ xfs_vn_mknod(
|
|||
}
|
||||
}
|
||||
|
||||
xfs_dentry_to_name(&name, dentry);
|
||||
|
||||
if (IS_POSIXACL(dir) && !default_acl)
|
||||
mode &= ~current_umask();
|
||||
|
||||
switch (mode & S_IFMT) {
|
||||
case S_IFCHR:
|
||||
case S_IFBLK:
|
||||
case S_IFIFO:
|
||||
case S_IFSOCK:
|
||||
rdev = sysv_encode_dev(rdev);
|
||||
case S_IFREG:
|
||||
error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip, NULL);
|
||||
break;
|
||||
case S_IFDIR:
|
||||
error = xfs_mkdir(XFS_I(dir), &name, mode, &ip, NULL);
|
||||
break;
|
||||
default:
|
||||
error = EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
xfs_dentry_to_name(&name, dentry);
|
||||
error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip, NULL);
|
||||
if (unlikely(error))
|
||||
goto out_free_acl;
|
||||
|
||||
|
@ -553,9 +541,6 @@ xfs_vn_getattr(
|
|||
stat->uid = ip->i_d.di_uid;
|
||||
stat->gid = ip->i_d.di_gid;
|
||||
stat->ino = ip->i_ino;
|
||||
#if XFS_BIG_INUMS
|
||||
stat->ino += mp->m_inoadd;
|
||||
#endif
|
||||
stat->atime = inode->i_atime;
|
||||
stat->mtime.tv_sec = ip->i_d.di_mtime.t_sec;
|
||||
stat->mtime.tv_nsec = ip->i_d.di_mtime.t_nsec;
|
||||
|
|
|
@ -38,7 +38,6 @@
|
|||
#include <kmem.h>
|
||||
#include <mrlock.h>
|
||||
#include <sv.h>
|
||||
#include <mutex.h>
|
||||
#include <time.h>
|
||||
|
||||
#include <support/ktrace.h>
|
||||
|
@ -51,6 +50,7 @@
|
|||
#include <linux/blkdev.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/errno.h>
|
||||
|
@ -147,17 +147,6 @@
|
|||
#define SYNCHRONIZE() barrier()
|
||||
#define __return_address __builtin_return_address(0)
|
||||
|
||||
/*
|
||||
* IRIX (BSD) quotactl makes use of separate commands for user/group,
|
||||
* whereas on Linux the syscall encodes this information into the cmd
|
||||
* field (see the QCMD macro in quota.h). These macros help keep the
|
||||
* code portable - they are not visible from the syscall interface.
|
||||
*/
|
||||
#define Q_XSETGQLIM XQM_CMD(8) /* set groups disk limits */
|
||||
#define Q_XGETGQUOTA XQM_CMD(9) /* get groups disk limits */
|
||||
#define Q_XSETPQLIM XQM_CMD(10) /* set projects disk limits */
|
||||
#define Q_XGETPQUOTA XQM_CMD(11) /* get projects disk limits */
|
||||
|
||||
#define dfltprid 0
|
||||
#define MAXPATHLEN 1024
|
||||
|
||||
|
|
157
fs/xfs/linux-2.6/xfs_quotaops.c
Normal file
157
fs/xfs/linux-2.6/xfs_quotaops.c
Normal file
|
@ -0,0 +1,157 @@
|
|||
/*
|
||||
* Copyright (c) 2008, Christoph Hellwig
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it would be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
#include "xfs.h"
|
||||
#include "xfs_dmapi.h"
|
||||
#include "xfs_sb.h"
|
||||
#include "xfs_inum.h"
|
||||
#include "xfs_ag.h"
|
||||
#include "xfs_mount.h"
|
||||
#include "xfs_quota.h"
|
||||
#include "xfs_log.h"
|
||||
#include "xfs_trans.h"
|
||||
#include "xfs_bmap_btree.h"
|
||||
#include "xfs_inode.h"
|
||||
#include "quota/xfs_qm.h"
|
||||
#include <linux/quota.h>
|
||||
|
||||
|
||||
STATIC int
|
||||
xfs_quota_type(int type)
|
||||
{
|
||||
switch (type) {
|
||||
case USRQUOTA:
|
||||
return XFS_DQ_USER;
|
||||
case GRPQUOTA:
|
||||
return XFS_DQ_GROUP;
|
||||
default:
|
||||
return XFS_DQ_PROJ;
|
||||
}
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_fs_quota_sync(
|
||||
struct super_block *sb,
|
||||
int type)
|
||||
{
|
||||
struct xfs_mount *mp = XFS_M(sb);
|
||||
|
||||
if (!XFS_IS_QUOTA_RUNNING(mp))
|
||||
return -ENOSYS;
|
||||
return -xfs_sync_inodes(mp, SYNC_DELWRI);
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_fs_get_xstate(
|
||||
struct super_block *sb,
|
||||
struct fs_quota_stat *fqs)
|
||||
{
|
||||
struct xfs_mount *mp = XFS_M(sb);
|
||||
|
||||
if (!XFS_IS_QUOTA_RUNNING(mp))
|
||||
return -ENOSYS;
|
||||
return -xfs_qm_scall_getqstat(mp, fqs);
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_fs_set_xstate(
|
||||
struct super_block *sb,
|
||||
unsigned int uflags,
|
||||
int op)
|
||||
{
|
||||
struct xfs_mount *mp = XFS_M(sb);
|
||||
unsigned int flags = 0;
|
||||
|
||||
if (sb->s_flags & MS_RDONLY)
|
||||
return -EROFS;
|
||||
if (!XFS_IS_QUOTA_RUNNING(mp))
|
||||
return -ENOSYS;
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
if (uflags & XFS_QUOTA_UDQ_ACCT)
|
||||
flags |= XFS_UQUOTA_ACCT;
|
||||
if (uflags & XFS_QUOTA_PDQ_ACCT)
|
||||
flags |= XFS_PQUOTA_ACCT;
|
||||
if (uflags & XFS_QUOTA_GDQ_ACCT)
|
||||
flags |= XFS_GQUOTA_ACCT;
|
||||
if (uflags & XFS_QUOTA_UDQ_ENFD)
|
||||
flags |= XFS_UQUOTA_ENFD;
|
||||
if (uflags & (XFS_QUOTA_PDQ_ENFD|XFS_QUOTA_GDQ_ENFD))
|
||||
flags |= XFS_OQUOTA_ENFD;
|
||||
|
||||
switch (op) {
|
||||
case Q_XQUOTAON:
|
||||
return -xfs_qm_scall_quotaon(mp, flags);
|
||||
case Q_XQUOTAOFF:
|
||||
if (!XFS_IS_QUOTA_ON(mp))
|
||||
return -EINVAL;
|
||||
return -xfs_qm_scall_quotaoff(mp, flags);
|
||||
case Q_XQUOTARM:
|
||||
if (XFS_IS_QUOTA_ON(mp))
|
||||
return -EINVAL;
|
||||
return -xfs_qm_scall_trunc_qfiles(mp, flags);
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_fs_get_xquota(
|
||||
struct super_block *sb,
|
||||
int type,
|
||||
qid_t id,
|
||||
struct fs_disk_quota *fdq)
|
||||
{
|
||||
struct xfs_mount *mp = XFS_M(sb);
|
||||
|
||||
if (!XFS_IS_QUOTA_RUNNING(mp))
|
||||
return -ENOSYS;
|
||||
if (!XFS_IS_QUOTA_ON(mp))
|
||||
return -ESRCH;
|
||||
|
||||
return -xfs_qm_scall_getquota(mp, id, xfs_quota_type(type), fdq);
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_fs_set_xquota(
|
||||
struct super_block *sb,
|
||||
int type,
|
||||
qid_t id,
|
||||
struct fs_disk_quota *fdq)
|
||||
{
|
||||
struct xfs_mount *mp = XFS_M(sb);
|
||||
|
||||
if (sb->s_flags & MS_RDONLY)
|
||||
return -EROFS;
|
||||
if (!XFS_IS_QUOTA_RUNNING(mp))
|
||||
return -ENOSYS;
|
||||
if (!XFS_IS_QUOTA_ON(mp))
|
||||
return -ESRCH;
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
return -xfs_qm_scall_setqlim(mp, id, xfs_quota_type(type), fdq);
|
||||
}
|
||||
|
||||
struct quotactl_ops xfs_quotactl_operations = {
|
||||
.quota_sync = xfs_fs_quota_sync,
|
||||
.get_xstate = xfs_fs_get_xstate,
|
||||
.set_xstate = xfs_fs_set_xstate,
|
||||
.get_xquota = xfs_fs_get_xquota,
|
||||
.set_xquota = xfs_fs_set_xquota,
|
||||
};
|
|
@ -68,7 +68,6 @@
|
|||
#include <linux/freezer.h>
|
||||
#include <linux/parser.h>
|
||||
|
||||
static struct quotactl_ops xfs_quotactl_operations;
|
||||
static struct super_operations xfs_super_operations;
|
||||
static kmem_zone_t *xfs_ioend_zone;
|
||||
mempool_t *xfs_ioend_pool;
|
||||
|
@ -79,7 +78,6 @@ mempool_t *xfs_ioend_pool;
|
|||
#define MNTOPT_RTDEV "rtdev" /* realtime I/O device */
|
||||
#define MNTOPT_BIOSIZE "biosize" /* log2 of preferred buffered io size */
|
||||
#define MNTOPT_WSYNC "wsync" /* safe-mode nfs compatible mount */
|
||||
#define MNTOPT_INO64 "ino64" /* force inodes into 64-bit range */
|
||||
#define MNTOPT_NOALIGN "noalign" /* turn off stripe alignment */
|
||||
#define MNTOPT_SWALLOC "swalloc" /* turn on stripe width allocation */
|
||||
#define MNTOPT_SUNIT "sunit" /* data volume stripe unit */
|
||||
|
@ -180,7 +178,7 @@ xfs_parseargs(
|
|||
int dswidth = 0;
|
||||
int iosize = 0;
|
||||
int dmapi_implies_ikeep = 1;
|
||||
uchar_t iosizelog = 0;
|
||||
__uint8_t iosizelog = 0;
|
||||
|
||||
/*
|
||||
* Copy binary VFS mount flags we are interested in.
|
||||
|
@ -291,16 +289,6 @@ xfs_parseargs(
|
|||
mp->m_flags |= XFS_MOUNT_OSYNCISOSYNC;
|
||||
} else if (!strcmp(this_char, MNTOPT_NORECOVERY)) {
|
||||
mp->m_flags |= XFS_MOUNT_NORECOVERY;
|
||||
} else if (!strcmp(this_char, MNTOPT_INO64)) {
|
||||
#if XFS_BIG_INUMS
|
||||
mp->m_flags |= XFS_MOUNT_INO64;
|
||||
mp->m_inoadd = XFS_INO64_OFFSET;
|
||||
#else
|
||||
cmn_err(CE_WARN,
|
||||
"XFS: %s option not allowed on this system",
|
||||
this_char);
|
||||
return EINVAL;
|
||||
#endif
|
||||
} else if (!strcmp(this_char, MNTOPT_NOALIGN)) {
|
||||
mp->m_flags |= XFS_MOUNT_NOALIGN;
|
||||
} else if (!strcmp(this_char, MNTOPT_SWALLOC)) {
|
||||
|
@ -529,7 +517,6 @@ xfs_showargs(
|
|||
/* the few simple ones we can get from the mount struct */
|
||||
{ XFS_MOUNT_IKEEP, "," MNTOPT_IKEEP },
|
||||
{ XFS_MOUNT_WSYNC, "," MNTOPT_WSYNC },
|
||||
{ XFS_MOUNT_INO64, "," MNTOPT_INO64 },
|
||||
{ XFS_MOUNT_NOALIGN, "," MNTOPT_NOALIGN },
|
||||
{ XFS_MOUNT_SWALLOC, "," MNTOPT_SWALLOC },
|
||||
{ XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID },
|
||||
|
@ -634,7 +621,7 @@ xfs_max_file_offset(
|
|||
return (((__uint64_t)pagefactor) << bitshift) - 1;
|
||||
}
|
||||
|
||||
int
|
||||
STATIC int
|
||||
xfs_blkdev_get(
|
||||
xfs_mount_t *mp,
|
||||
const char *name,
|
||||
|
@ -651,7 +638,7 @@ xfs_blkdev_get(
|
|||
return -error;
|
||||
}
|
||||
|
||||
void
|
||||
STATIC void
|
||||
xfs_blkdev_put(
|
||||
struct block_device *bdev)
|
||||
{
|
||||
|
@ -872,7 +859,7 @@ xfsaild_wakeup(
|
|||
wake_up_process(ailp->xa_task);
|
||||
}
|
||||
|
||||
int
|
||||
STATIC int
|
||||
xfsaild(
|
||||
void *data)
|
||||
{
|
||||
|
@ -990,26 +977,57 @@ xfs_fs_write_inode(
|
|||
int sync)
|
||||
{
|
||||
struct xfs_inode *ip = XFS_I(inode);
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
int error = 0;
|
||||
int flags = 0;
|
||||
|
||||
xfs_itrace_entry(ip);
|
||||
|
||||
if (XFS_FORCED_SHUTDOWN(mp))
|
||||
return XFS_ERROR(EIO);
|
||||
|
||||
if (sync) {
|
||||
error = xfs_wait_on_pages(ip, 0, -1);
|
||||
if (error)
|
||||
goto out_error;
|
||||
flags |= FLUSH_SYNC;
|
||||
goto out;
|
||||
}
|
||||
error = xfs_inode_flush(ip, flags);
|
||||
|
||||
out_error:
|
||||
/*
|
||||
* Bypass inodes which have already been cleaned by
|
||||
* the inode flush clustering code inside xfs_iflush
|
||||
*/
|
||||
if (xfs_inode_clean(ip))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* We make this non-blocking if the inode is contended, return
|
||||
* EAGAIN to indicate to the caller that they did not succeed.
|
||||
* This prevents the flush path from blocking on inodes inside
|
||||
* another operation right now, they get caught later by xfs_sync.
|
||||
*/
|
||||
if (sync) {
|
||||
xfs_ilock(ip, XFS_ILOCK_SHARED);
|
||||
xfs_iflock(ip);
|
||||
|
||||
error = xfs_iflush(ip, XFS_IFLUSH_SYNC);
|
||||
} else {
|
||||
error = EAGAIN;
|
||||
if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED))
|
||||
goto out;
|
||||
if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip))
|
||||
goto out_unlock;
|
||||
|
||||
error = xfs_iflush(ip, XFS_IFLUSH_ASYNC_NOBLOCK);
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
xfs_iunlock(ip, XFS_ILOCK_SHARED);
|
||||
out:
|
||||
/*
|
||||
* if we failed to write out the inode then mark
|
||||
* it dirty again so we'll try again later.
|
||||
*/
|
||||
if (error)
|
||||
xfs_mark_inode_dirty_sync(ip);
|
||||
|
||||
return -error;
|
||||
}
|
||||
|
||||
|
@ -1169,18 +1187,12 @@ xfs_fs_statfs(
|
|||
statp->f_bfree = statp->f_bavail =
|
||||
sbp->sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
|
||||
fakeinos = statp->f_bfree << sbp->sb_inopblog;
|
||||
#if XFS_BIG_INUMS
|
||||
fakeinos += mp->m_inoadd;
|
||||
#endif
|
||||
statp->f_files =
|
||||
MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER);
|
||||
if (mp->m_maxicount)
|
||||
#if XFS_BIG_INUMS
|
||||
if (!mp->m_inoadd)
|
||||
#endif
|
||||
statp->f_files = min_t(typeof(statp->f_files),
|
||||
statp->f_files,
|
||||
mp->m_maxicount);
|
||||
statp->f_files = min_t(typeof(statp->f_files),
|
||||
statp->f_files,
|
||||
mp->m_maxicount);
|
||||
statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
|
||||
spin_unlock(&mp->m_sb_lock);
|
||||
|
||||
|
@ -1302,57 +1314,6 @@ xfs_fs_show_options(
|
|||
return -xfs_showargs(XFS_M(mnt->mnt_sb), m);
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_fs_quotasync(
|
||||
struct super_block *sb,
|
||||
int type)
|
||||
{
|
||||
return -XFS_QM_QUOTACTL(XFS_M(sb), Q_XQUOTASYNC, 0, NULL);
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_fs_getxstate(
|
||||
struct super_block *sb,
|
||||
struct fs_quota_stat *fqs)
|
||||
{
|
||||
return -XFS_QM_QUOTACTL(XFS_M(sb), Q_XGETQSTAT, 0, (caddr_t)fqs);
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_fs_setxstate(
|
||||
struct super_block *sb,
|
||||
unsigned int flags,
|
||||
int op)
|
||||
{
|
||||
return -XFS_QM_QUOTACTL(XFS_M(sb), op, 0, (caddr_t)&flags);
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_fs_getxquota(
|
||||
struct super_block *sb,
|
||||
int type,
|
||||
qid_t id,
|
||||
struct fs_disk_quota *fdq)
|
||||
{
|
||||
return -XFS_QM_QUOTACTL(XFS_M(sb),
|
||||
(type == USRQUOTA) ? Q_XGETQUOTA :
|
||||
((type == GRPQUOTA) ? Q_XGETGQUOTA :
|
||||
Q_XGETPQUOTA), id, (caddr_t)fdq);
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_fs_setxquota(
|
||||
struct super_block *sb,
|
||||
int type,
|
||||
qid_t id,
|
||||
struct fs_disk_quota *fdq)
|
||||
{
|
||||
return -XFS_QM_QUOTACTL(XFS_M(sb),
|
||||
(type == USRQUOTA) ? Q_XSETQLIM :
|
||||
((type == GRPQUOTA) ? Q_XSETGQLIM :
|
||||
Q_XSETPQLIM), id, (caddr_t)fdq);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function fills in xfs_mount_t fields based on mount args.
|
||||
* Note: the superblock _has_ now been read in.
|
||||
|
@ -1435,7 +1396,9 @@ xfs_fs_fill_super(
|
|||
sb_min_blocksize(sb, BBSIZE);
|
||||
sb->s_xattr = xfs_xattr_handlers;
|
||||
sb->s_export_op = &xfs_export_operations;
|
||||
#ifdef CONFIG_XFS_QUOTA
|
||||
sb->s_qcop = &xfs_quotactl_operations;
|
||||
#endif
|
||||
sb->s_op = &xfs_super_operations;
|
||||
|
||||
error = xfs_dmops_get(mp);
|
||||
|
@ -1578,14 +1541,6 @@ static struct super_operations xfs_super_operations = {
|
|||
.show_options = xfs_fs_show_options,
|
||||
};
|
||||
|
||||
static struct quotactl_ops xfs_quotactl_operations = {
|
||||
.quota_sync = xfs_fs_quotasync,
|
||||
.get_xstate = xfs_fs_getxstate,
|
||||
.set_xstate = xfs_fs_setxstate,
|
||||
.get_xquota = xfs_fs_getxquota,
|
||||
.set_xquota = xfs_fs_setxquota,
|
||||
};
|
||||
|
||||
static struct file_system_type xfs_fs_type = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "xfs",
|
||||
|
|
|
@ -93,6 +93,7 @@ extern void xfs_blkdev_issue_flush(struct xfs_buftarg *);
|
|||
|
||||
extern const struct export_operations xfs_export_operations;
|
||||
extern struct xattr_handler *xfs_xattr_handlers[];
|
||||
extern struct quotactl_ops xfs_quotactl_operations;
|
||||
|
||||
#define XFS_M(sb) ((struct xfs_mount *)((sb)->s_fs_info))
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#define XFS_SYNC_H 1
|
||||
|
||||
struct xfs_mount;
|
||||
struct xfs_perag;
|
||||
|
||||
typedef struct bhv_vfs_sync_work {
|
||||
struct list_head w_list;
|
||||
|
|
|
@ -40,11 +40,6 @@ struct attrlist_cursor_kern;
|
|||
#define IO_ISDIRECT 0x00004 /* bypass page cache */
|
||||
#define IO_INVIS 0x00020 /* don't update inode timestamps */
|
||||
|
||||
/*
|
||||
* Flags for xfs_inode_flush
|
||||
*/
|
||||
#define FLUSH_SYNC 1 /* wait for flush to complete */
|
||||
|
||||
/*
|
||||
* Flush/Invalidate options for vop_toss/flush/flushinval_pages.
|
||||
*/
|
||||
|
@ -54,33 +49,6 @@ struct attrlist_cursor_kern;
|
|||
Prevent VM access to the pages until
|
||||
the operation completes. */
|
||||
|
||||
/*
|
||||
* Dealing with bad inodes
|
||||
*/
|
||||
static inline int VN_BAD(struct inode *vp)
|
||||
{
|
||||
return is_bad_inode(vp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Extracting atime values in various formats
|
||||
*/
|
||||
static inline void vn_atime_to_bstime(struct inode *vp, xfs_bstime_t *bs_atime)
|
||||
{
|
||||
bs_atime->tv_sec = vp->i_atime.tv_sec;
|
||||
bs_atime->tv_nsec = vp->i_atime.tv_nsec;
|
||||
}
|
||||
|
||||
static inline void vn_atime_to_timespec(struct inode *vp, struct timespec *ts)
|
||||
{
|
||||
*ts = vp->i_atime;
|
||||
}
|
||||
|
||||
static inline void vn_atime_to_time_t(struct inode *vp, time_t *tt)
|
||||
{
|
||||
*tt = vp->i_atime.tv_sec;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some useful predicates.
|
||||
*/
|
||||
|
|
|
@ -804,7 +804,7 @@ xfs_qm_dqlookup(
|
|||
uint flist_locked;
|
||||
xfs_dquot_t *d;
|
||||
|
||||
ASSERT(XFS_DQ_IS_HASH_LOCKED(qh));
|
||||
ASSERT(mutex_is_locked(&qh->qh_lock));
|
||||
|
||||
flist_locked = B_FALSE;
|
||||
|
||||
|
@ -877,7 +877,7 @@ xfs_qm_dqlookup(
|
|||
/*
|
||||
* move the dquot to the front of the hashchain
|
||||
*/
|
||||
ASSERT(XFS_DQ_IS_HASH_LOCKED(qh));
|
||||
ASSERT(mutex_is_locked(&qh->qh_lock));
|
||||
if (dqp->HL_PREVP != &qh->qh_next) {
|
||||
xfs_dqtrace_entry(dqp,
|
||||
"DQLOOKUP: HASH MOVETOFRONT");
|
||||
|
@ -892,13 +892,13 @@ xfs_qm_dqlookup(
|
|||
}
|
||||
xfs_dqtrace_entry(dqp, "LOOKUP END");
|
||||
*O_dqpp = dqp;
|
||||
ASSERT(XFS_DQ_IS_HASH_LOCKED(qh));
|
||||
ASSERT(mutex_is_locked(&qh->qh_lock));
|
||||
return (0);
|
||||
}
|
||||
}
|
||||
|
||||
*O_dqpp = NULL;
|
||||
ASSERT(XFS_DQ_IS_HASH_LOCKED(qh));
|
||||
ASSERT(mutex_is_locked(&qh->qh_lock));
|
||||
return (1);
|
||||
}
|
||||
|
||||
|
@ -956,7 +956,7 @@ xfs_qm_dqget(
|
|||
ASSERT(ip->i_gdquot == NULL);
|
||||
}
|
||||
#endif
|
||||
XFS_DQ_HASH_LOCK(h);
|
||||
mutex_lock(&h->qh_lock);
|
||||
|
||||
/*
|
||||
* Look in the cache (hashtable).
|
||||
|
@ -971,7 +971,7 @@ xfs_qm_dqget(
|
|||
*/
|
||||
ASSERT(*O_dqpp);
|
||||
ASSERT(XFS_DQ_IS_LOCKED(*O_dqpp));
|
||||
XFS_DQ_HASH_UNLOCK(h);
|
||||
mutex_unlock(&h->qh_lock);
|
||||
xfs_dqtrace_entry(*O_dqpp, "DQGET DONE (FROM CACHE)");
|
||||
return (0); /* success */
|
||||
}
|
||||
|
@ -991,7 +991,7 @@ xfs_qm_dqget(
|
|||
* we don't keep the lock across a disk read
|
||||
*/
|
||||
version = h->qh_version;
|
||||
XFS_DQ_HASH_UNLOCK(h);
|
||||
mutex_unlock(&h->qh_lock);
|
||||
|
||||
/*
|
||||
* Allocate the dquot on the kernel heap, and read the ondisk
|
||||
|
@ -1056,7 +1056,7 @@ xfs_qm_dqget(
|
|||
/*
|
||||
* Hashlock comes after ilock in lock order
|
||||
*/
|
||||
XFS_DQ_HASH_LOCK(h);
|
||||
mutex_lock(&h->qh_lock);
|
||||
if (version != h->qh_version) {
|
||||
xfs_dquot_t *tmpdqp;
|
||||
/*
|
||||
|
@ -1072,7 +1072,7 @@ xfs_qm_dqget(
|
|||
* and start over.
|
||||
*/
|
||||
xfs_qm_dqput(tmpdqp);
|
||||
XFS_DQ_HASH_UNLOCK(h);
|
||||
mutex_unlock(&h->qh_lock);
|
||||
xfs_qm_dqdestroy(dqp);
|
||||
XQM_STATS_INC(xqmstats.xs_qm_dquot_dups);
|
||||
goto again;
|
||||
|
@ -1083,7 +1083,7 @@ xfs_qm_dqget(
|
|||
* Put the dquot at the beginning of the hash-chain and mp's list
|
||||
* LOCK ORDER: hashlock, freelistlock, mplistlock, udqlock, gdqlock ..
|
||||
*/
|
||||
ASSERT(XFS_DQ_IS_HASH_LOCKED(h));
|
||||
ASSERT(mutex_is_locked(&h->qh_lock));
|
||||
dqp->q_hash = h;
|
||||
XQM_HASHLIST_INSERT(h, dqp);
|
||||
|
||||
|
@ -1102,7 +1102,7 @@ xfs_qm_dqget(
|
|||
XQM_MPLIST_INSERT(&(XFS_QI_MPL_LIST(mp)), dqp);
|
||||
|
||||
xfs_qm_mplist_unlock(mp);
|
||||
XFS_DQ_HASH_UNLOCK(h);
|
||||
mutex_unlock(&h->qh_lock);
|
||||
dqret:
|
||||
ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
||||
xfs_dqtrace_entry(dqp, "DQGET DONE");
|
||||
|
@ -1440,7 +1440,7 @@ xfs_qm_dqpurge(
|
|||
xfs_mount_t *mp = dqp->q_mount;
|
||||
|
||||
ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp));
|
||||
ASSERT(XFS_DQ_IS_HASH_LOCKED(dqp->q_hash));
|
||||
ASSERT(mutex_is_locked(&dqp->q_hash->qh_lock));
|
||||
|
||||
xfs_dqlock(dqp);
|
||||
/*
|
||||
|
@ -1453,7 +1453,7 @@ xfs_qm_dqpurge(
|
|||
*/
|
||||
if (dqp->q_nrefs != 0) {
|
||||
xfs_dqunlock(dqp);
|
||||
XFS_DQ_HASH_UNLOCK(dqp->q_hash);
|
||||
mutex_unlock(&dqp->q_hash->qh_lock);
|
||||
return (1);
|
||||
}
|
||||
|
||||
|
@ -1517,7 +1517,7 @@ xfs_qm_dqpurge(
|
|||
memset(&dqp->q_core, 0, sizeof(dqp->q_core));
|
||||
xfs_dqfunlock(dqp);
|
||||
xfs_dqunlock(dqp);
|
||||
XFS_DQ_HASH_UNLOCK(thishash);
|
||||
mutex_unlock(&thishash->qh_lock);
|
||||
return (0);
|
||||
}
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
*/
|
||||
typedef struct xfs_dqhash {
|
||||
struct xfs_dquot *qh_next;
|
||||
mutex_t qh_lock;
|
||||
struct mutex qh_lock;
|
||||
uint qh_version; /* ever increasing version */
|
||||
uint qh_nelems; /* number of dquots on the list */
|
||||
} xfs_dqhash_t;
|
||||
|
@ -81,7 +81,7 @@ typedef struct xfs_dquot {
|
|||
xfs_qcnt_t q_res_bcount; /* total regular nblks used+reserved */
|
||||
xfs_qcnt_t q_res_icount; /* total inos allocd+reserved */
|
||||
xfs_qcnt_t q_res_rtbcount;/* total realtime blks used+reserved */
|
||||
mutex_t q_qlock; /* quota lock */
|
||||
struct mutex q_qlock; /* quota lock */
|
||||
struct completion q_flush; /* flush completion queue */
|
||||
atomic_t q_pincount; /* dquot pin count */
|
||||
wait_queue_head_t q_pinwait; /* dquot pinning wait queue */
|
||||
|
@ -109,19 +109,6 @@ enum {
|
|||
|
||||
#define XFS_DQHOLD(dqp) ((dqp)->q_nrefs++)
|
||||
|
||||
#ifdef DEBUG
|
||||
static inline int
|
||||
XFS_DQ_IS_LOCKED(xfs_dquot_t *dqp)
|
||||
{
|
||||
if (mutex_trylock(&dqp->q_qlock)) {
|
||||
mutex_unlock(&dqp->q_qlock);
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Manage the q_flush completion queue embedded in the dquot. This completion
|
||||
* queue synchronizes processes attempting to flush the in-core dquot back to
|
||||
|
@ -142,6 +129,7 @@ static inline void xfs_dqfunlock(xfs_dquot_t *dqp)
|
|||
complete(&dqp->q_flush);
|
||||
}
|
||||
|
||||
#define XFS_DQ_IS_LOCKED(dqp) (mutex_is_locked(&((dqp)->q_qlock)))
|
||||
#define XFS_DQ_IS_ON_FREELIST(dqp) ((dqp)->dq_flnext != (dqp))
|
||||
#define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY)
|
||||
#define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER)
|
||||
|
|
|
@ -55,7 +55,7 @@
|
|||
* quota functionality, including maintaining the freelist and hash
|
||||
* tables of dquots.
|
||||
*/
|
||||
mutex_t xfs_Gqm_lock;
|
||||
struct mutex xfs_Gqm_lock;
|
||||
struct xfs_qm *xfs_Gqm;
|
||||
uint ndquot;
|
||||
|
||||
|
@ -69,8 +69,6 @@ STATIC void xfs_qm_list_destroy(xfs_dqlist_t *);
|
|||
|
||||
STATIC void xfs_qm_freelist_init(xfs_frlist_t *);
|
||||
STATIC void xfs_qm_freelist_destroy(xfs_frlist_t *);
|
||||
STATIC int xfs_qm_mplist_nowait(xfs_mount_t *);
|
||||
STATIC int xfs_qm_dqhashlock_nowait(xfs_dquot_t *);
|
||||
|
||||
STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
|
||||
STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
|
||||
|
@ -82,7 +80,7 @@ static struct shrinker xfs_qm_shaker = {
|
|||
};
|
||||
|
||||
#ifdef DEBUG
|
||||
extern mutex_t qcheck_lock;
|
||||
extern struct mutex qcheck_lock;
|
||||
#endif
|
||||
|
||||
#ifdef QUOTADEBUG
|
||||
|
@ -219,7 +217,7 @@ xfs_qm_hold_quotafs_ref(
|
|||
* the structure could disappear between the entry to this routine and
|
||||
* a HOLD operation if not locked.
|
||||
*/
|
||||
XFS_QM_LOCK(xfs_Gqm);
|
||||
mutex_lock(&xfs_Gqm_lock);
|
||||
|
||||
if (xfs_Gqm == NULL)
|
||||
xfs_Gqm = xfs_Gqm_init();
|
||||
|
@ -228,8 +226,8 @@ xfs_qm_hold_quotafs_ref(
|
|||
* debugging and statistical purposes, but ...
|
||||
* Just take a reference and get out.
|
||||
*/
|
||||
XFS_QM_HOLD(xfs_Gqm);
|
||||
XFS_QM_UNLOCK(xfs_Gqm);
|
||||
xfs_Gqm->qm_nrefs++;
|
||||
mutex_unlock(&xfs_Gqm_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -277,13 +275,12 @@ xfs_qm_rele_quotafs_ref(
|
|||
* Destroy the entire XQM. If somebody mounts with quotaon, this'll
|
||||
* be restarted.
|
||||
*/
|
||||
XFS_QM_LOCK(xfs_Gqm);
|
||||
XFS_QM_RELE(xfs_Gqm);
|
||||
if (xfs_Gqm->qm_nrefs == 0) {
|
||||
mutex_lock(&xfs_Gqm_lock);
|
||||
if (--xfs_Gqm->qm_nrefs == 0) {
|
||||
xfs_qm_destroy(xfs_Gqm);
|
||||
xfs_Gqm = NULL;
|
||||
}
|
||||
XFS_QM_UNLOCK(xfs_Gqm);
|
||||
mutex_unlock(&xfs_Gqm_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -577,10 +574,10 @@ xfs_qm_dqpurge_int(
|
|||
continue;
|
||||
}
|
||||
|
||||
if (! xfs_qm_dqhashlock_nowait(dqp)) {
|
||||
if (!mutex_trylock(&dqp->q_hash->qh_lock)) {
|
||||
nrecl = XFS_QI_MPLRECLAIMS(mp);
|
||||
xfs_qm_mplist_unlock(mp);
|
||||
XFS_DQ_HASH_LOCK(dqp->q_hash);
|
||||
mutex_lock(&dqp->q_hash->qh_lock);
|
||||
xfs_qm_mplist_lock(mp);
|
||||
|
||||
/*
|
||||
|
@ -590,7 +587,7 @@ xfs_qm_dqpurge_int(
|
|||
* this point, but somebody might be taking things off.
|
||||
*/
|
||||
if (nrecl != XFS_QI_MPLRECLAIMS(mp)) {
|
||||
XFS_DQ_HASH_UNLOCK(dqp->q_hash);
|
||||
mutex_unlock(&dqp->q_hash->qh_lock);
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
|
@ -632,7 +629,6 @@ xfs_qm_dqattach_one(
|
|||
xfs_dqid_t id,
|
||||
uint type,
|
||||
uint doalloc,
|
||||
uint dolock,
|
||||
xfs_dquot_t *udqhint, /* hint */
|
||||
xfs_dquot_t **IO_idqpp)
|
||||
{
|
||||
|
@ -641,16 +637,16 @@ xfs_qm_dqattach_one(
|
|||
|
||||
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
||||
error = 0;
|
||||
|
||||
/*
|
||||
* See if we already have it in the inode itself. IO_idqpp is
|
||||
* &i_udquot or &i_gdquot. This made the code look weird, but
|
||||
* made the logic a lot simpler.
|
||||
*/
|
||||
if ((dqp = *IO_idqpp)) {
|
||||
if (dolock)
|
||||
xfs_dqlock(dqp);
|
||||
dqp = *IO_idqpp;
|
||||
if (dqp) {
|
||||
xfs_dqtrace_entry(dqp, "DQATTACH: found in ip");
|
||||
goto done;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -659,38 +655,38 @@ xfs_qm_dqattach_one(
|
|||
* lookup by dqid (xfs_qm_dqget) by caching a group dquot inside
|
||||
* the user dquot.
|
||||
*/
|
||||
ASSERT(!udqhint || type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);
|
||||
if (udqhint && !dolock)
|
||||
if (udqhint) {
|
||||
ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);
|
||||
xfs_dqlock(udqhint);
|
||||
|
||||
/*
|
||||
* No need to take dqlock to look at the id.
|
||||
* The ID can't change until it gets reclaimed, and it won't
|
||||
* be reclaimed as long as we have a ref from inode and we hold
|
||||
* the ilock.
|
||||
*/
|
||||
if (udqhint &&
|
||||
(dqp = udqhint->q_gdquot) &&
|
||||
(be32_to_cpu(dqp->q_core.d_id) == id)) {
|
||||
ASSERT(XFS_DQ_IS_LOCKED(udqhint));
|
||||
xfs_dqlock(dqp);
|
||||
XFS_DQHOLD(dqp);
|
||||
ASSERT(*IO_idqpp == NULL);
|
||||
*IO_idqpp = dqp;
|
||||
if (!dolock) {
|
||||
/*
|
||||
* No need to take dqlock to look at the id.
|
||||
*
|
||||
* The ID can't change until it gets reclaimed, and it won't
|
||||
* be reclaimed as long as we have a ref from inode and we
|
||||
* hold the ilock.
|
||||
*/
|
||||
dqp = udqhint->q_gdquot;
|
||||
if (dqp && be32_to_cpu(dqp->q_core.d_id) == id) {
|
||||
xfs_dqlock(dqp);
|
||||
XFS_DQHOLD(dqp);
|
||||
ASSERT(*IO_idqpp == NULL);
|
||||
*IO_idqpp = dqp;
|
||||
|
||||
xfs_dqunlock(dqp);
|
||||
xfs_dqunlock(udqhint);
|
||||
return 0;
|
||||
}
|
||||
goto done;
|
||||
}
|
||||
/*
|
||||
* We can't hold a dquot lock when we call the dqget code.
|
||||
* We'll deadlock in no time, because of (not conforming to)
|
||||
* lock ordering - the inodelock comes before any dquot lock,
|
||||
* and we may drop and reacquire the ilock in xfs_qm_dqget().
|
||||
*/
|
||||
if (udqhint)
|
||||
|
||||
/*
|
||||
* We can't hold a dquot lock when we call the dqget code.
|
||||
* We'll deadlock in no time, because of (not conforming to)
|
||||
* lock ordering - the inodelock comes before any dquot lock,
|
||||
* and we may drop and reacquire the ilock in xfs_qm_dqget().
|
||||
*/
|
||||
xfs_dqunlock(udqhint);
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the dquot from somewhere. This bumps the
|
||||
* reference count of dquot and returns it locked.
|
||||
|
@ -698,48 +694,19 @@ xfs_qm_dqattach_one(
|
|||
* disk and we didn't ask it to allocate;
|
||||
* ESRCH if quotas got turned off suddenly.
|
||||
*/
|
||||
if ((error = xfs_qm_dqget(ip->i_mount, ip, id, type,
|
||||
doalloc|XFS_QMOPT_DOWARN, &dqp))) {
|
||||
if (udqhint && dolock)
|
||||
xfs_dqlock(udqhint);
|
||||
goto done;
|
||||
}
|
||||
error = xfs_qm_dqget(ip->i_mount, ip, id, type, XFS_QMOPT_DOWARN, &dqp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
xfs_dqtrace_entry(dqp, "DQATTACH: found by dqget");
|
||||
|
||||
/*
|
||||
* dqget may have dropped and re-acquired the ilock, but it guarantees
|
||||
* that the dquot returned is the one that should go in the inode.
|
||||
*/
|
||||
*IO_idqpp = dqp;
|
||||
ASSERT(dqp);
|
||||
ASSERT(XFS_DQ_IS_LOCKED(dqp));
|
||||
if (! dolock) {
|
||||
xfs_dqunlock(dqp);
|
||||
goto done;
|
||||
}
|
||||
if (! udqhint)
|
||||
goto done;
|
||||
|
||||
ASSERT(udqhint);
|
||||
ASSERT(dolock);
|
||||
ASSERT(XFS_DQ_IS_LOCKED(dqp));
|
||||
if (! xfs_qm_dqlock_nowait(udqhint)) {
|
||||
xfs_dqunlock(dqp);
|
||||
xfs_dqlock(udqhint);
|
||||
xfs_dqlock(dqp);
|
||||
}
|
||||
done:
|
||||
#ifdef QUOTADEBUG
|
||||
if (udqhint) {
|
||||
if (dolock)
|
||||
ASSERT(XFS_DQ_IS_LOCKED(udqhint));
|
||||
}
|
||||
if (! error) {
|
||||
if (dolock)
|
||||
ASSERT(XFS_DQ_IS_LOCKED(dqp));
|
||||
}
|
||||
#endif
|
||||
return error;
|
||||
xfs_dqunlock(dqp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
@ -754,24 +721,15 @@ xfs_qm_dqattach_one(
|
|||
STATIC void
|
||||
xfs_qm_dqattach_grouphint(
|
||||
xfs_dquot_t *udq,
|
||||
xfs_dquot_t *gdq,
|
||||
uint locked)
|
||||
xfs_dquot_t *gdq)
|
||||
{
|
||||
xfs_dquot_t *tmp;
|
||||
|
||||
#ifdef QUOTADEBUG
|
||||
if (locked) {
|
||||
ASSERT(XFS_DQ_IS_LOCKED(udq));
|
||||
ASSERT(XFS_DQ_IS_LOCKED(gdq));
|
||||
}
|
||||
#endif
|
||||
if (! locked)
|
||||
xfs_dqlock(udq);
|
||||
xfs_dqlock(udq);
|
||||
|
||||
if ((tmp = udq->q_gdquot)) {
|
||||
if (tmp == gdq) {
|
||||
if (! locked)
|
||||
xfs_dqunlock(udq);
|
||||
xfs_dqunlock(udq);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -781,8 +739,6 @@ xfs_qm_dqattach_grouphint(
|
|||
* because the freelist lock comes before dqlocks.
|
||||
*/
|
||||
xfs_dqunlock(udq);
|
||||
if (locked)
|
||||
xfs_dqunlock(gdq);
|
||||
/*
|
||||
* we took a hard reference once upon a time in dqget,
|
||||
* so give it back when the udquot no longer points at it
|
||||
|
@ -795,9 +751,7 @@ xfs_qm_dqattach_grouphint(
|
|||
|
||||
} else {
|
||||
ASSERT(XFS_DQ_IS_LOCKED(udq));
|
||||
if (! locked) {
|
||||
xfs_dqlock(gdq);
|
||||
}
|
||||
xfs_dqlock(gdq);
|
||||
}
|
||||
|
||||
ASSERT(XFS_DQ_IS_LOCKED(udq));
|
||||
|
@ -810,10 +764,9 @@ xfs_qm_dqattach_grouphint(
|
|||
XFS_DQHOLD(gdq);
|
||||
udq->q_gdquot = gdq;
|
||||
}
|
||||
if (! locked) {
|
||||
xfs_dqunlock(gdq);
|
||||
xfs_dqunlock(udq);
|
||||
}
|
||||
|
||||
xfs_dqunlock(gdq);
|
||||
xfs_dqunlock(udq);
|
||||
}
|
||||
|
||||
|
||||
|
@ -821,8 +774,6 @@ xfs_qm_dqattach_grouphint(
|
|||
* Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
|
||||
* into account.
|
||||
* If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
|
||||
* If XFS_QMOPT_DQLOCK, the dquot(s) will be returned locked. This option pretty
|
||||
* much made this code a complete mess, but it has been pretty useful.
|
||||
* If XFS_QMOPT_ILOCKED, then inode sent is already locked EXCL.
|
||||
* Inode may get unlocked and relocked in here, and the caller must deal with
|
||||
* the consequences.
|
||||
|
@ -851,7 +802,6 @@ xfs_qm_dqattach(
|
|||
if (XFS_IS_UQUOTA_ON(mp)) {
|
||||
error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
|
||||
flags & XFS_QMOPT_DQALLOC,
|
||||
flags & XFS_QMOPT_DQLOCK,
|
||||
NULL, &ip->i_udquot);
|
||||
if (error)
|
||||
goto done;
|
||||
|
@ -863,11 +813,9 @@ xfs_qm_dqattach(
|
|||
error = XFS_IS_GQUOTA_ON(mp) ?
|
||||
xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
|
||||
flags & XFS_QMOPT_DQALLOC,
|
||||
flags & XFS_QMOPT_DQLOCK,
|
||||
ip->i_udquot, &ip->i_gdquot) :
|
||||
xfs_qm_dqattach_one(ip, ip->i_d.di_projid, XFS_DQ_PROJ,
|
||||
flags & XFS_QMOPT_DQALLOC,
|
||||
flags & XFS_QMOPT_DQLOCK,
|
||||
ip->i_udquot, &ip->i_gdquot);
|
||||
/*
|
||||
* Don't worry about the udquot that we may have
|
||||
|
@ -898,22 +846,13 @@ xfs_qm_dqattach(
|
|||
/*
|
||||
* Attach i_gdquot to the gdquot hint inside the i_udquot.
|
||||
*/
|
||||
xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot,
|
||||
flags & XFS_QMOPT_DQLOCK);
|
||||
xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot);
|
||||
}
|
||||
|
||||
done:
|
||||
|
||||
#ifdef QUOTADEBUG
|
||||
if (! error) {
|
||||
if (ip->i_udquot) {
|
||||
if (flags & XFS_QMOPT_DQLOCK)
|
||||
ASSERT(XFS_DQ_IS_LOCKED(ip->i_udquot));
|
||||
}
|
||||
if (ip->i_gdquot) {
|
||||
if (flags & XFS_QMOPT_DQLOCK)
|
||||
ASSERT(XFS_DQ_IS_LOCKED(ip->i_gdquot));
|
||||
}
|
||||
if (XFS_IS_UQUOTA_ON(mp))
|
||||
ASSERT(ip->i_udquot);
|
||||
if (XFS_IS_OQUOTA_ON(mp))
|
||||
|
@ -2086,7 +2025,7 @@ xfs_qm_shake_freelist(
|
|||
* a dqlookup process that holds the hashlock that is
|
||||
* waiting for the freelist lock.
|
||||
*/
|
||||
if (! xfs_qm_dqhashlock_nowait(dqp)) {
|
||||
if (!mutex_trylock(&dqp->q_hash->qh_lock)) {
|
||||
xfs_dqfunlock(dqp);
|
||||
xfs_dqunlock(dqp);
|
||||
dqp = dqp->dq_flnext;
|
||||
|
@ -2103,7 +2042,7 @@ xfs_qm_shake_freelist(
|
|||
/* XXX put a sentinel so that we can come back here */
|
||||
xfs_dqfunlock(dqp);
|
||||
xfs_dqunlock(dqp);
|
||||
XFS_DQ_HASH_UNLOCK(hash);
|
||||
mutex_unlock(&hash->qh_lock);
|
||||
xfs_qm_freelist_unlock(xfs_Gqm);
|
||||
if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
|
||||
return nreclaimed;
|
||||
|
@ -2120,7 +2059,7 @@ xfs_qm_shake_freelist(
|
|||
XQM_HASHLIST_REMOVE(hash, dqp);
|
||||
xfs_dqfunlock(dqp);
|
||||
xfs_qm_mplist_unlock(dqp->q_mount);
|
||||
XFS_DQ_HASH_UNLOCK(hash);
|
||||
mutex_unlock(&hash->qh_lock);
|
||||
|
||||
off_freelist:
|
||||
XQM_FREELIST_REMOVE(dqp);
|
||||
|
@ -2262,7 +2201,7 @@ xfs_qm_dqreclaim_one(void)
|
|||
continue;
|
||||
}
|
||||
|
||||
if (! xfs_qm_dqhashlock_nowait(dqp))
|
||||
if (!mutex_trylock(&dqp->q_hash->qh_lock))
|
||||
goto mplistunlock;
|
||||
|
||||
ASSERT(dqp->q_nrefs == 0);
|
||||
|
@ -2271,7 +2210,7 @@ xfs_qm_dqreclaim_one(void)
|
|||
XQM_HASHLIST_REMOVE(dqp->q_hash, dqp);
|
||||
XQM_FREELIST_REMOVE(dqp);
|
||||
dqpout = dqp;
|
||||
XFS_DQ_HASH_UNLOCK(dqp->q_hash);
|
||||
mutex_unlock(&dqp->q_hash->qh_lock);
|
||||
mplistunlock:
|
||||
xfs_qm_mplist_unlock(dqp->q_mount);
|
||||
xfs_dqfunlock(dqp);
|
||||
|
@ -2774,34 +2713,3 @@ xfs_qm_freelist_append(xfs_frlist_t *ql, xfs_dquot_t *dq)
|
|||
{
|
||||
xfs_qm_freelist_insert((xfs_frlist_t *)ql->qh_prev, dq);
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_qm_dqhashlock_nowait(
|
||||
xfs_dquot_t *dqp)
|
||||
{
|
||||
int locked;
|
||||
|
||||
locked = mutex_trylock(&((dqp)->q_hash->qh_lock));
|
||||
return locked;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_qm_freelist_lock_nowait(
|
||||
xfs_qm_t *xqm)
|
||||
{
|
||||
int locked;
|
||||
|
||||
locked = mutex_trylock(&(xqm->qm_dqfreelist.qh_lock));
|
||||
return locked;
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_qm_mplist_nowait(
|
||||
xfs_mount_t *mp)
|
||||
{
|
||||
int locked;
|
||||
|
||||
ASSERT(mp->m_quotainfo);
|
||||
locked = mutex_trylock(&(XFS_QI_MPLLOCK(mp)));
|
||||
return locked;
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ struct xfs_qm;
|
|||
struct xfs_inode;
|
||||
|
||||
extern uint ndquot;
|
||||
extern mutex_t xfs_Gqm_lock;
|
||||
extern struct mutex xfs_Gqm_lock;
|
||||
extern struct xfs_qm *xfs_Gqm;
|
||||
extern kmem_zone_t *qm_dqzone;
|
||||
extern kmem_zone_t *qm_dqtrxzone;
|
||||
|
@ -79,7 +79,7 @@ typedef xfs_dqhash_t xfs_dqlist_t;
|
|||
typedef struct xfs_frlist {
|
||||
struct xfs_dquot *qh_next;
|
||||
struct xfs_dquot *qh_prev;
|
||||
mutex_t qh_lock;
|
||||
struct mutex qh_lock;
|
||||
uint qh_version;
|
||||
uint qh_nelems;
|
||||
} xfs_frlist_t;
|
||||
|
@ -115,7 +115,7 @@ typedef struct xfs_quotainfo {
|
|||
xfs_qwarncnt_t qi_bwarnlimit; /* limit for blks warnings */
|
||||
xfs_qwarncnt_t qi_iwarnlimit; /* limit for inodes warnings */
|
||||
xfs_qwarncnt_t qi_rtbwarnlimit;/* limit for rt blks warnings */
|
||||
mutex_t qi_quotaofflock;/* to serialize quotaoff */
|
||||
struct mutex qi_quotaofflock;/* to serialize quotaoff */
|
||||
xfs_filblks_t qi_dqchunklen; /* # BBs in a chunk of dqs */
|
||||
uint qi_dqperchunk; /* # ondisk dqs in above chunk */
|
||||
xfs_qcnt_t qi_bhardlimit; /* default data blk hard limit */
|
||||
|
@ -158,11 +158,6 @@ typedef struct xfs_dquot_acct {
|
|||
#define XFS_QM_IWARNLIMIT 5
|
||||
#define XFS_QM_RTBWARNLIMIT 5
|
||||
|
||||
#define XFS_QM_LOCK(xqm) (mutex_lock(&xqm##_lock))
|
||||
#define XFS_QM_UNLOCK(xqm) (mutex_unlock(&xqm##_lock))
|
||||
#define XFS_QM_HOLD(xqm) ((xqm)->qm_nrefs++)
|
||||
#define XFS_QM_RELE(xqm) ((xqm)->qm_nrefs--)
|
||||
|
||||
extern void xfs_qm_destroy_quotainfo(xfs_mount_t *);
|
||||
extern void xfs_qm_mount_quotas(xfs_mount_t *);
|
||||
extern int xfs_qm_quotacheck(xfs_mount_t *);
|
||||
|
@ -178,6 +173,16 @@ extern void xfs_qm_dqdetach(xfs_inode_t *);
|
|||
extern int xfs_qm_dqpurge_all(xfs_mount_t *, uint);
|
||||
extern void xfs_qm_dqrele_all_inodes(xfs_mount_t *, uint);
|
||||
|
||||
/* quota ops */
|
||||
extern int xfs_qm_scall_trunc_qfiles(xfs_mount_t *, uint);
|
||||
extern int xfs_qm_scall_getquota(xfs_mount_t *, xfs_dqid_t, uint,
|
||||
fs_disk_quota_t *);
|
||||
extern int xfs_qm_scall_setqlim(xfs_mount_t *, xfs_dqid_t, uint,
|
||||
fs_disk_quota_t *);
|
||||
extern int xfs_qm_scall_getqstat(xfs_mount_t *, fs_quota_stat_t *);
|
||||
extern int xfs_qm_scall_quotaon(xfs_mount_t *, uint);
|
||||
extern int xfs_qm_scall_quotaoff(xfs_mount_t *, uint);
|
||||
|
||||
/* vop stuff */
|
||||
extern int xfs_qm_vop_dqalloc(xfs_mount_t *, xfs_inode_t *,
|
||||
uid_t, gid_t, prid_t, uint,
|
||||
|
@ -194,11 +199,6 @@ extern int xfs_qm_vop_chown_reserve(xfs_trans_t *, xfs_inode_t *,
|
|||
/* list stuff */
|
||||
extern void xfs_qm_freelist_append(xfs_frlist_t *, xfs_dquot_t *);
|
||||
extern void xfs_qm_freelist_unlink(xfs_dquot_t *);
|
||||
extern int xfs_qm_freelist_lock_nowait(xfs_qm_t *);
|
||||
|
||||
/* system call interface */
|
||||
extern int xfs_qm_quotactl(struct xfs_mount *, int, int,
|
||||
xfs_caddr_t);
|
||||
|
||||
#ifdef DEBUG
|
||||
extern int xfs_qm_internalqcheck(xfs_mount_t *);
|
||||
|
|
|
@ -235,7 +235,6 @@ struct xfs_qmops xfs_qmcore_xfs = {
|
|||
.xfs_dqvopchownresv = xfs_qm_vop_chown_reserve,
|
||||
.xfs_dqstatvfs = xfs_qm_statvfs,
|
||||
.xfs_dqsync = xfs_qm_sync,
|
||||
.xfs_quotactl = xfs_qm_quotactl,
|
||||
.xfs_dqtrxops = &xfs_trans_dquot_ops,
|
||||
};
|
||||
EXPORT_SYMBOL(xfs_qmcore_xfs);
|
||||
|
|
|
@ -57,134 +57,15 @@
|
|||
# define qdprintk(s, args...) do { } while (0)
|
||||
#endif
|
||||
|
||||
STATIC int xfs_qm_scall_trunc_qfiles(xfs_mount_t *, uint);
|
||||
STATIC int xfs_qm_scall_getquota(xfs_mount_t *, xfs_dqid_t, uint,
|
||||
fs_disk_quota_t *);
|
||||
STATIC int xfs_qm_scall_getqstat(xfs_mount_t *, fs_quota_stat_t *);
|
||||
STATIC int xfs_qm_scall_setqlim(xfs_mount_t *, xfs_dqid_t, uint,
|
||||
fs_disk_quota_t *);
|
||||
STATIC int xfs_qm_scall_quotaon(xfs_mount_t *, uint);
|
||||
STATIC int xfs_qm_scall_quotaoff(xfs_mount_t *, uint, boolean_t);
|
||||
STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
|
||||
STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
|
||||
uint);
|
||||
STATIC uint xfs_qm_import_flags(uint);
|
||||
STATIC uint xfs_qm_export_flags(uint);
|
||||
STATIC uint xfs_qm_import_qtype_flags(uint);
|
||||
STATIC uint xfs_qm_export_qtype_flags(uint);
|
||||
STATIC void xfs_qm_export_dquot(xfs_mount_t *, xfs_disk_dquot_t *,
|
||||
fs_disk_quota_t *);
|
||||
|
||||
|
||||
/*
|
||||
* The main distribution switch of all XFS quotactl system calls.
|
||||
*/
|
||||
int
|
||||
xfs_qm_quotactl(
|
||||
xfs_mount_t *mp,
|
||||
int cmd,
|
||||
int id,
|
||||
xfs_caddr_t addr)
|
||||
{
|
||||
int error;
|
||||
|
||||
ASSERT(addr != NULL || cmd == Q_XQUOTASYNC);
|
||||
|
||||
/*
|
||||
* The following commands are valid even when quotaoff.
|
||||
*/
|
||||
switch (cmd) {
|
||||
case Q_XQUOTARM:
|
||||
/*
|
||||
* Truncate quota files. quota must be off.
|
||||
*/
|
||||
if (XFS_IS_QUOTA_ON(mp))
|
||||
return XFS_ERROR(EINVAL);
|
||||
if (mp->m_flags & XFS_MOUNT_RDONLY)
|
||||
return XFS_ERROR(EROFS);
|
||||
return (xfs_qm_scall_trunc_qfiles(mp,
|
||||
xfs_qm_import_qtype_flags(*(uint *)addr)));
|
||||
|
||||
case Q_XGETQSTAT:
|
||||
/*
|
||||
* Get quota status information.
|
||||
*/
|
||||
return (xfs_qm_scall_getqstat(mp, (fs_quota_stat_t *)addr));
|
||||
|
||||
case Q_XQUOTAON:
|
||||
/*
|
||||
* QUOTAON - enabling quota enforcement.
|
||||
* Quota accounting must be turned on at mount time.
|
||||
*/
|
||||
if (mp->m_flags & XFS_MOUNT_RDONLY)
|
||||
return XFS_ERROR(EROFS);
|
||||
return (xfs_qm_scall_quotaon(mp,
|
||||
xfs_qm_import_flags(*(uint *)addr)));
|
||||
|
||||
case Q_XQUOTAOFF:
|
||||
if (mp->m_flags & XFS_MOUNT_RDONLY)
|
||||
return XFS_ERROR(EROFS);
|
||||
break;
|
||||
|
||||
case Q_XQUOTASYNC:
|
||||
return xfs_sync_inodes(mp, SYNC_DELWRI);
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (! XFS_IS_QUOTA_ON(mp))
|
||||
return XFS_ERROR(ESRCH);
|
||||
|
||||
switch (cmd) {
|
||||
case Q_XQUOTAOFF:
|
||||
if (mp->m_flags & XFS_MOUNT_RDONLY)
|
||||
return XFS_ERROR(EROFS);
|
||||
error = xfs_qm_scall_quotaoff(mp,
|
||||
xfs_qm_import_flags(*(uint *)addr),
|
||||
B_FALSE);
|
||||
break;
|
||||
|
||||
case Q_XGETQUOTA:
|
||||
error = xfs_qm_scall_getquota(mp, (xfs_dqid_t)id, XFS_DQ_USER,
|
||||
(fs_disk_quota_t *)addr);
|
||||
break;
|
||||
case Q_XGETGQUOTA:
|
||||
error = xfs_qm_scall_getquota(mp, (xfs_dqid_t)id, XFS_DQ_GROUP,
|
||||
(fs_disk_quota_t *)addr);
|
||||
break;
|
||||
case Q_XGETPQUOTA:
|
||||
error = xfs_qm_scall_getquota(mp, (xfs_dqid_t)id, XFS_DQ_PROJ,
|
||||
(fs_disk_quota_t *)addr);
|
||||
break;
|
||||
|
||||
case Q_XSETQLIM:
|
||||
if (mp->m_flags & XFS_MOUNT_RDONLY)
|
||||
return XFS_ERROR(EROFS);
|
||||
error = xfs_qm_scall_setqlim(mp, (xfs_dqid_t)id, XFS_DQ_USER,
|
||||
(fs_disk_quota_t *)addr);
|
||||
break;
|
||||
case Q_XSETGQLIM:
|
||||
if (mp->m_flags & XFS_MOUNT_RDONLY)
|
||||
return XFS_ERROR(EROFS);
|
||||
error = xfs_qm_scall_setqlim(mp, (xfs_dqid_t)id, XFS_DQ_GROUP,
|
||||
(fs_disk_quota_t *)addr);
|
||||
break;
|
||||
case Q_XSETPQLIM:
|
||||
if (mp->m_flags & XFS_MOUNT_RDONLY)
|
||||
return XFS_ERROR(EROFS);
|
||||
error = xfs_qm_scall_setqlim(mp, (xfs_dqid_t)id, XFS_DQ_PROJ,
|
||||
(fs_disk_quota_t *)addr);
|
||||
break;
|
||||
|
||||
default:
|
||||
error = XFS_ERROR(EINVAL);
|
||||
break;
|
||||
}
|
||||
|
||||
return (error);
|
||||
}
|
||||
|
||||
/*
|
||||
* Turn off quota accounting and/or enforcement for all udquots and/or
|
||||
* gdquots. Called only at unmount time.
|
||||
|
@ -193,11 +74,10 @@ xfs_qm_quotactl(
|
|||
* incore, and modifies the ondisk dquot directly. Therefore, for example,
|
||||
* it is an error to call this twice, without purging the cache.
|
||||
*/
|
||||
STATIC int
|
||||
int
|
||||
xfs_qm_scall_quotaoff(
|
||||
xfs_mount_t *mp,
|
||||
uint flags,
|
||||
boolean_t force)
|
||||
uint flags)
|
||||
{
|
||||
uint dqtype;
|
||||
int error;
|
||||
|
@ -205,8 +85,6 @@ xfs_qm_scall_quotaoff(
|
|||
xfs_qoff_logitem_t *qoffstart;
|
||||
int nculprits;
|
||||
|
||||
if (!force && !capable(CAP_SYS_ADMIN))
|
||||
return XFS_ERROR(EPERM);
|
||||
/*
|
||||
* No file system can have quotas enabled on disk but not in core.
|
||||
* Note that quota utilities (like quotaoff) _expect_
|
||||
|
@ -375,7 +253,7 @@ xfs_qm_scall_quotaoff(
|
|||
return (error);
|
||||
}
|
||||
|
||||
STATIC int
|
||||
int
|
||||
xfs_qm_scall_trunc_qfiles(
|
||||
xfs_mount_t *mp,
|
||||
uint flags)
|
||||
|
@ -383,8 +261,6 @@ xfs_qm_scall_trunc_qfiles(
|
|||
int error = 0, error2 = 0;
|
||||
xfs_inode_t *qip;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return XFS_ERROR(EPERM);
|
||||
if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) {
|
||||
qdprintk("qtrunc flags=%x m_qflags=%x\n", flags, mp->m_qflags);
|
||||
return XFS_ERROR(EINVAL);
|
||||
|
@ -416,7 +292,7 @@ xfs_qm_scall_trunc_qfiles(
|
|||
* effect immediately.
|
||||
* (Switching on quota accounting must be done at mount time.)
|
||||
*/
|
||||
STATIC int
|
||||
int
|
||||
xfs_qm_scall_quotaon(
|
||||
xfs_mount_t *mp,
|
||||
uint flags)
|
||||
|
@ -426,9 +302,6 @@ xfs_qm_scall_quotaon(
|
|||
uint accflags;
|
||||
__int64_t sbflags;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return XFS_ERROR(EPERM);
|
||||
|
||||
flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
|
||||
/*
|
||||
* Switching on quota accounting must be done at mount time.
|
||||
|
@ -517,7 +390,7 @@ xfs_qm_scall_quotaon(
|
|||
/*
|
||||
* Return quota status information, such as uquota-off, enforcements, etc.
|
||||
*/
|
||||
STATIC int
|
||||
int
|
||||
xfs_qm_scall_getqstat(
|
||||
xfs_mount_t *mp,
|
||||
fs_quota_stat_t *out)
|
||||
|
@ -582,7 +455,7 @@ xfs_qm_scall_getqstat(
|
|||
/*
|
||||
* Adjust quota limits, and start/stop timers accordingly.
|
||||
*/
|
||||
STATIC int
|
||||
int
|
||||
xfs_qm_scall_setqlim(
|
||||
xfs_mount_t *mp,
|
||||
xfs_dqid_t id,
|
||||
|
@ -595,9 +468,6 @@ xfs_qm_scall_setqlim(
|
|||
int error;
|
||||
xfs_qcnt_t hard, soft;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return XFS_ERROR(EPERM);
|
||||
|
||||
if ((newlim->d_fieldmask &
|
||||
(FS_DQ_LIMIT_MASK|FS_DQ_TIMER_MASK|FS_DQ_WARNS_MASK)) == 0)
|
||||
return (0);
|
||||
|
@ -742,7 +612,7 @@ xfs_qm_scall_setqlim(
|
|||
return error;
|
||||
}
|
||||
|
||||
STATIC int
|
||||
int
|
||||
xfs_qm_scall_getquota(
|
||||
xfs_mount_t *mp,
|
||||
xfs_dqid_t id,
|
||||
|
@ -934,30 +804,6 @@ xfs_qm_export_dquot(
|
|||
#endif
|
||||
}
|
||||
|
||||
STATIC uint
|
||||
xfs_qm_import_qtype_flags(
|
||||
uint uflags)
|
||||
{
|
||||
uint oflags = 0;
|
||||
|
||||
/*
|
||||
* Can't be more than one, or none.
|
||||
*/
|
||||
if (((uflags & (XFS_GROUP_QUOTA | XFS_USER_QUOTA)) ==
|
||||
(XFS_GROUP_QUOTA | XFS_USER_QUOTA)) ||
|
||||
((uflags & (XFS_GROUP_QUOTA | XFS_PROJ_QUOTA)) ==
|
||||
(XFS_GROUP_QUOTA | XFS_PROJ_QUOTA)) ||
|
||||
((uflags & (XFS_USER_QUOTA | XFS_PROJ_QUOTA)) ==
|
||||
(XFS_USER_QUOTA | XFS_PROJ_QUOTA)) ||
|
||||
((uflags & (XFS_GROUP_QUOTA|XFS_USER_QUOTA|XFS_PROJ_QUOTA)) == 0))
|
||||
return (0);
|
||||
|
||||
oflags |= (uflags & XFS_USER_QUOTA) ? XFS_DQ_USER : 0;
|
||||
oflags |= (uflags & XFS_PROJ_QUOTA) ? XFS_DQ_PROJ : 0;
|
||||
oflags |= (uflags & XFS_GROUP_QUOTA) ? XFS_DQ_GROUP: 0;
|
||||
return oflags;
|
||||
}
|
||||
|
||||
STATIC uint
|
||||
xfs_qm_export_qtype_flags(
|
||||
uint flags)
|
||||
|
@ -978,26 +824,6 @@ xfs_qm_export_qtype_flags(
|
|||
XFS_PROJ_QUOTA : XFS_GROUP_QUOTA;
|
||||
}
|
||||
|
||||
STATIC uint
|
||||
xfs_qm_import_flags(
|
||||
uint uflags)
|
||||
{
|
||||
uint flags = 0;
|
||||
|
||||
if (uflags & XFS_QUOTA_UDQ_ACCT)
|
||||
flags |= XFS_UQUOTA_ACCT;
|
||||
if (uflags & XFS_QUOTA_PDQ_ACCT)
|
||||
flags |= XFS_PQUOTA_ACCT;
|
||||
if (uflags & XFS_QUOTA_GDQ_ACCT)
|
||||
flags |= XFS_GQUOTA_ACCT;
|
||||
if (uflags & XFS_QUOTA_UDQ_ENFD)
|
||||
flags |= XFS_UQUOTA_ENFD;
|
||||
if (uflags & (XFS_QUOTA_PDQ_ENFD|XFS_QUOTA_GDQ_ENFD))
|
||||
flags |= XFS_OQUOTA_ENFD;
|
||||
return (flags);
|
||||
}
|
||||
|
||||
|
||||
STATIC uint
|
||||
xfs_qm_export_flags(
|
||||
uint flags)
|
||||
|
@ -1134,7 +960,7 @@ xfs_dqhash_t *qmtest_udqtab;
|
|||
xfs_dqhash_t *qmtest_gdqtab;
|
||||
int qmtest_hashmask;
|
||||
int qmtest_nfails;
|
||||
mutex_t qcheck_lock;
|
||||
struct mutex qcheck_lock;
|
||||
|
||||
#define DQTEST_HASHVAL(mp, id) (((__psunsigned_t)(mp) + \
|
||||
(__psunsigned_t)(id)) & \
|
||||
|
|
|
@ -42,34 +42,24 @@
|
|||
#define XFS_QI_QOFFLOCK(mp) ((mp)->m_quotainfo->qi_quotaofflock)
|
||||
|
||||
#define XFS_QI_MPL_LIST(mp) ((mp)->m_quotainfo->qi_dqlist)
|
||||
#define XFS_QI_MPLLOCK(mp) ((mp)->m_quotainfo->qi_dqlist.qh_lock)
|
||||
#define XFS_QI_MPLNEXT(mp) ((mp)->m_quotainfo->qi_dqlist.qh_next)
|
||||
#define XFS_QI_MPLNDQUOTS(mp) ((mp)->m_quotainfo->qi_dqlist.qh_nelems)
|
||||
|
||||
#define XQMLCK(h) (mutex_lock(&((h)->qh_lock)))
|
||||
#define XQMUNLCK(h) (mutex_unlock(&((h)->qh_lock)))
|
||||
#ifdef DEBUG
|
||||
struct xfs_dqhash;
|
||||
static inline int XQMISLCKD(struct xfs_dqhash *h)
|
||||
{
|
||||
if (mutex_trylock(&h->qh_lock)) {
|
||||
mutex_unlock(&h->qh_lock);
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
#define xfs_qm_mplist_lock(mp) \
|
||||
mutex_lock(&(XFS_QI_MPL_LIST(mp).qh_lock))
|
||||
#define xfs_qm_mplist_nowait(mp) \
|
||||
mutex_trylock(&(XFS_QI_MPL_LIST(mp).qh_lock))
|
||||
#define xfs_qm_mplist_unlock(mp) \
|
||||
mutex_unlock(&(XFS_QI_MPL_LIST(mp).qh_lock))
|
||||
#define XFS_QM_IS_MPLIST_LOCKED(mp) \
|
||||
mutex_is_locked(&(XFS_QI_MPL_LIST(mp).qh_lock))
|
||||
|
||||
#define XFS_DQ_HASH_LOCK(h) XQMLCK(h)
|
||||
#define XFS_DQ_HASH_UNLOCK(h) XQMUNLCK(h)
|
||||
#define XFS_DQ_IS_HASH_LOCKED(h) XQMISLCKD(h)
|
||||
|
||||
#define xfs_qm_mplist_lock(mp) XQMLCK(&(XFS_QI_MPL_LIST(mp)))
|
||||
#define xfs_qm_mplist_unlock(mp) XQMUNLCK(&(XFS_QI_MPL_LIST(mp)))
|
||||
#define XFS_QM_IS_MPLIST_LOCKED(mp) XQMISLCKD(&(XFS_QI_MPL_LIST(mp)))
|
||||
|
||||
#define xfs_qm_freelist_lock(qm) XQMLCK(&((qm)->qm_dqfreelist))
|
||||
#define xfs_qm_freelist_unlock(qm) XQMUNLCK(&((qm)->qm_dqfreelist))
|
||||
#define xfs_qm_freelist_lock(qm) \
|
||||
mutex_lock(&((qm)->qm_dqfreelist.qh_lock))
|
||||
#define xfs_qm_freelist_lock_nowait(qm) \
|
||||
mutex_trylock(&((qm)->qm_dqfreelist.qh_lock))
|
||||
#define xfs_qm_freelist_unlock(qm) \
|
||||
mutex_unlock(&((qm)->qm_dqfreelist.qh_lock))
|
||||
|
||||
/*
|
||||
* Hash into a bucket in the dquot hash table, based on <mp, id>.
|
||||
|
|
|
@ -624,10 +624,9 @@ xfs_trans_dqresv(
|
|||
xfs_qcnt_t *resbcountp;
|
||||
xfs_quotainfo_t *q = mp->m_quotainfo;
|
||||
|
||||
if (! (flags & XFS_QMOPT_DQLOCK)) {
|
||||
xfs_dqlock(dqp);
|
||||
}
|
||||
ASSERT(XFS_DQ_IS_LOCKED(dqp));
|
||||
|
||||
xfs_dqlock(dqp);
|
||||
|
||||
if (flags & XFS_TRANS_DQ_RES_BLKS) {
|
||||
hardlimit = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
|
||||
if (!hardlimit)
|
||||
|
@ -740,10 +739,8 @@ xfs_trans_dqresv(
|
|||
ASSERT(dqp->q_res_icount >= be64_to_cpu(dqp->q_core.d_icount));
|
||||
|
||||
error_return:
|
||||
if (! (flags & XFS_QMOPT_DQLOCK)) {
|
||||
xfs_dqunlock(dqp);
|
||||
}
|
||||
return (error);
|
||||
xfs_dqunlock(dqp);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
|
@ -753,8 +750,7 @@ xfs_trans_dqresv(
|
|||
* grp/prj quotas is important, because this follows a both-or-nothing
|
||||
* approach.
|
||||
*
|
||||
* flags = XFS_QMOPT_DQLOCK indicate if dquot(s) need to be locked.
|
||||
* XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown.
|
||||
* flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown.
|
||||
* XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT. Used by pquota.
|
||||
* XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks
|
||||
* XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include "xfs_ag.h"
|
||||
#include "xfs_dmapi.h"
|
||||
#include "xfs_mount.h"
|
||||
#include "xfs_error.h"
|
||||
|
||||
static char message[1024]; /* keep it off the stack */
|
||||
static DEFINE_SPINLOCK(xfs_err_lock);
|
||||
|
|
|
@ -17,10 +17,6 @@
|
|||
*/
|
||||
#include <xfs.h>
|
||||
|
||||
static DEFINE_MUTEX(uuid_monitor);
|
||||
static int uuid_table_size;
|
||||
static uuid_t *uuid_table;
|
||||
|
||||
/* IRIX interpretation of an uuid_t */
|
||||
typedef struct {
|
||||
__be32 uu_timelow;
|
||||
|
@ -46,12 +42,6 @@ uuid_getnodeuniq(uuid_t *uuid, int fsid [2])
|
|||
fsid[1] = be32_to_cpu(uup->uu_timelow);
|
||||
}
|
||||
|
||||
void
|
||||
uuid_create_nil(uuid_t *uuid)
|
||||
{
|
||||
memset(uuid, 0, sizeof(*uuid));
|
||||
}
|
||||
|
||||
int
|
||||
uuid_is_nil(uuid_t *uuid)
|
||||
{
|
||||
|
@ -71,64 +61,3 @@ uuid_equal(uuid_t *uuid1, uuid_t *uuid2)
|
|||
{
|
||||
return memcmp(uuid1, uuid2, sizeof(uuid_t)) ? 0 : 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Given a 128-bit uuid, return a 64-bit value by adding the top and bottom
|
||||
* 64-bit words. NOTE: This function can not be changed EVER. Although
|
||||
* brain-dead, some applications depend on this 64-bit value remaining
|
||||
* persistent. Specifically, DMI vendors store the value as a persistent
|
||||
* filehandle.
|
||||
*/
|
||||
__uint64_t
|
||||
uuid_hash64(uuid_t *uuid)
|
||||
{
|
||||
__uint64_t *sp = (__uint64_t *)uuid;
|
||||
|
||||
return sp[0] + sp[1];
|
||||
}
|
||||
|
||||
int
|
||||
uuid_table_insert(uuid_t *uuid)
|
||||
{
|
||||
int i, hole;
|
||||
|
||||
mutex_lock(&uuid_monitor);
|
||||
for (i = 0, hole = -1; i < uuid_table_size; i++) {
|
||||
if (uuid_is_nil(&uuid_table[i])) {
|
||||
hole = i;
|
||||
continue;
|
||||
}
|
||||
if (uuid_equal(uuid, &uuid_table[i])) {
|
||||
mutex_unlock(&uuid_monitor);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
if (hole < 0) {
|
||||
uuid_table = kmem_realloc(uuid_table,
|
||||
(uuid_table_size + 1) * sizeof(*uuid_table),
|
||||
uuid_table_size * sizeof(*uuid_table),
|
||||
KM_SLEEP);
|
||||
hole = uuid_table_size++;
|
||||
}
|
||||
uuid_table[hole] = *uuid;
|
||||
mutex_unlock(&uuid_monitor);
|
||||
return 1;
|
||||
}
|
||||
|
||||
void
|
||||
uuid_table_remove(uuid_t *uuid)
|
||||
{
|
||||
int i;
|
||||
|
||||
mutex_lock(&uuid_monitor);
|
||||
for (i = 0; i < uuid_table_size; i++) {
|
||||
if (uuid_is_nil(&uuid_table[i]))
|
||||
continue;
|
||||
if (!uuid_equal(uuid, &uuid_table[i]))
|
||||
continue;
|
||||
uuid_create_nil(&uuid_table[i]);
|
||||
break;
|
||||
}
|
||||
ASSERT(i < uuid_table_size);
|
||||
mutex_unlock(&uuid_monitor);
|
||||
}
|
||||
|
|
|
@ -22,12 +22,8 @@ typedef struct {
|
|||
unsigned char __u_bits[16];
|
||||
} uuid_t;
|
||||
|
||||
extern void uuid_create_nil(uuid_t *uuid);
|
||||
extern int uuid_is_nil(uuid_t *uuid);
|
||||
extern int uuid_equal(uuid_t *uuid1, uuid_t *uuid2);
|
||||
extern void uuid_getnodeuniq(uuid_t *uuid, int fsid [2]);
|
||||
extern __uint64_t uuid_hash64(uuid_t *uuid);
|
||||
extern int uuid_table_insert(uuid_t *uuid);
|
||||
extern void uuid_table_remove(uuid_t *uuid);
|
||||
|
||||
#endif /* __XFS_SUPPORT_UUID_H__ */
|
||||
|
|
|
@ -223,8 +223,8 @@ typedef struct xfs_perag
|
|||
be32_to_cpu((a)->agf_levels[XFS_BTNUM_CNTi]), mp))
|
||||
#define XFS_MIN_FREELIST_PAG(pag,mp) \
|
||||
(XFS_MIN_FREELIST_RAW( \
|
||||
(uint_t)(pag)->pagf_levels[XFS_BTNUM_BNOi], \
|
||||
(uint_t)(pag)->pagf_levels[XFS_BTNUM_CNTi], mp))
|
||||
(unsigned int)(pag)->pagf_levels[XFS_BTNUM_BNOi], \
|
||||
(unsigned int)(pag)->pagf_levels[XFS_BTNUM_CNTi], mp))
|
||||
|
||||
#define XFS_AGB_TO_FSB(mp,agno,agbno) \
|
||||
(((xfs_fsblock_t)(agno) << (mp)->m_sb.sb_agblklog) | (agbno))
|
||||
|
|
|
@ -1871,6 +1871,25 @@ xfs_alloc_compute_maxlevels(
|
|||
mp->m_ag_maxlevels = level;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the length of the longest extent in an AG.
|
||||
*/
|
||||
xfs_extlen_t
|
||||
xfs_alloc_longest_free_extent(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_perag *pag)
|
||||
{
|
||||
xfs_extlen_t need, delta = 0;
|
||||
|
||||
need = XFS_MIN_FREELIST_PAG(pag, mp);
|
||||
if (need > pag->pagf_flcount)
|
||||
delta = need - pag->pagf_flcount;
|
||||
|
||||
if (pag->pagf_longest > delta)
|
||||
return pag->pagf_longest - delta;
|
||||
return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Decide whether to use this allocation group for this allocation.
|
||||
* If so, fix up the btree freelist's size.
|
||||
|
@ -1923,15 +1942,12 @@ xfs_alloc_fix_freelist(
|
|||
}
|
||||
|
||||
if (!(flags & XFS_ALLOC_FLAG_FREEING)) {
|
||||
need = XFS_MIN_FREELIST_PAG(pag, mp);
|
||||
delta = need > pag->pagf_flcount ? need - pag->pagf_flcount : 0;
|
||||
/*
|
||||
* If it looks like there isn't a long enough extent, or enough
|
||||
* total blocks, reject it.
|
||||
*/
|
||||
longest = (pag->pagf_longest > delta) ?
|
||||
(pag->pagf_longest - delta) :
|
||||
(pag->pagf_flcount > 0 || pag->pagf_longest > 0);
|
||||
need = XFS_MIN_FREELIST_PAG(pag, mp);
|
||||
longest = xfs_alloc_longest_free_extent(mp, pag);
|
||||
if ((args->minlen + args->alignment + args->minalignslop - 1) >
|
||||
longest ||
|
||||
((int)(pag->pagf_freeblks + pag->pagf_flcount -
|
||||
|
|
|
@ -100,6 +100,12 @@ typedef struct xfs_alloc_arg {
|
|||
#define XFS_ALLOC_USERDATA 1 /* allocation is for user data*/
|
||||
#define XFS_ALLOC_INITIAL_USER_DATA 2 /* special case start of file */
|
||||
|
||||
/*
|
||||
* Find the length of the longest extent in an AG.
|
||||
*/
|
||||
xfs_extlen_t
|
||||
xfs_alloc_longest_free_extent(struct xfs_mount *mp,
|
||||
struct xfs_perag *pag);
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
|
|
|
@ -155,7 +155,8 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
|
|||
* minimum offset only needs to be the space required for
|
||||
* the btree root.
|
||||
*/
|
||||
if (!dp->i_d.di_forkoff && dp->i_df.if_bytes > mp->m_attroffset)
|
||||
if (!dp->i_d.di_forkoff && dp->i_df.if_bytes >
|
||||
xfs_default_attroffset(dp))
|
||||
dsize = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
|
||||
break;
|
||||
|
||||
|
@ -297,6 +298,26 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff)
|
|||
xfs_sbversion_add_attr2(mp, args->trans);
|
||||
}
|
||||
|
||||
/*
|
||||
* After the last attribute is removed revert to original inode format,
|
||||
* making all literal area available to the data fork once more.
|
||||
*/
|
||||
STATIC void
|
||||
xfs_attr_fork_reset(
|
||||
struct xfs_inode *ip,
|
||||
struct xfs_trans *tp)
|
||||
{
|
||||
xfs_idestroy_fork(ip, XFS_ATTR_FORK);
|
||||
ip->i_d.di_forkoff = 0;
|
||||
ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
|
||||
|
||||
ASSERT(ip->i_d.di_anextents == 0);
|
||||
ASSERT(ip->i_afp == NULL);
|
||||
|
||||
ip->i_df.if_ext_max = XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t);
|
||||
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove an attribute from the shortform attribute list structure.
|
||||
*/
|
||||
|
@ -344,22 +365,10 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
|
|||
*/
|
||||
totsize -= size;
|
||||
if (totsize == sizeof(xfs_attr_sf_hdr_t) &&
|
||||
!(args->op_flags & XFS_DA_OP_ADDNAME) &&
|
||||
(mp->m_flags & XFS_MOUNT_ATTR2) &&
|
||||
(dp->i_d.di_format != XFS_DINODE_FMT_BTREE)) {
|
||||
/*
|
||||
* Last attribute now removed, revert to original
|
||||
* inode format making all literal area available
|
||||
* to the data fork once more.
|
||||
*/
|
||||
xfs_idestroy_fork(dp, XFS_ATTR_FORK);
|
||||
dp->i_d.di_forkoff = 0;
|
||||
dp->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
|
||||
ASSERT(dp->i_d.di_anextents == 0);
|
||||
ASSERT(dp->i_afp == NULL);
|
||||
dp->i_df.if_ext_max =
|
||||
XFS_IFORK_DSIZE(dp) / (uint)sizeof(xfs_bmbt_rec_t);
|
||||
xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE);
|
||||
(mp->m_flags & XFS_MOUNT_ATTR2) &&
|
||||
(dp->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
|
||||
!(args->op_flags & XFS_DA_OP_ADDNAME)) {
|
||||
xfs_attr_fork_reset(dp, args->trans);
|
||||
} else {
|
||||
xfs_idata_realloc(dp, -size, XFS_ATTR_FORK);
|
||||
dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize);
|
||||
|
@ -786,20 +795,7 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff)
|
|||
if (forkoff == -1) {
|
||||
ASSERT(dp->i_mount->m_flags & XFS_MOUNT_ATTR2);
|
||||
ASSERT(dp->i_d.di_format != XFS_DINODE_FMT_BTREE);
|
||||
|
||||
/*
|
||||
* Last attribute was removed, revert to original
|
||||
* inode format making all literal area available
|
||||
* to the data fork once more.
|
||||
*/
|
||||
xfs_idestroy_fork(dp, XFS_ATTR_FORK);
|
||||
dp->i_d.di_forkoff = 0;
|
||||
dp->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
|
||||
ASSERT(dp->i_d.di_anextents == 0);
|
||||
ASSERT(dp->i_afp == NULL);
|
||||
dp->i_df.if_ext_max =
|
||||
XFS_IFORK_DSIZE(dp) / (uint)sizeof(xfs_bmbt_rec_t);
|
||||
xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE);
|
||||
xfs_attr_fork_reset(dp, args->trans);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -2479,7 +2479,7 @@ xfs_bmap_adjacent(
|
|||
fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock);
|
||||
/*
|
||||
* If allocating at eof, and there's a previous real block,
|
||||
* try to use it's last block as our starting point.
|
||||
* try to use its last block as our starting point.
|
||||
*/
|
||||
if (ap->eof && ap->prevp->br_startoff != NULLFILEOFF &&
|
||||
!isnullstartblock(ap->prevp->br_startblock) &&
|
||||
|
@ -2712,9 +2712,6 @@ xfs_bmap_btalloc(
|
|||
xfs_agnumber_t startag;
|
||||
xfs_alloc_arg_t args;
|
||||
xfs_extlen_t blen;
|
||||
xfs_extlen_t delta;
|
||||
xfs_extlen_t longest;
|
||||
xfs_extlen_t need;
|
||||
xfs_extlen_t nextminlen = 0;
|
||||
xfs_perag_t *pag;
|
||||
int nullfb; /* true if ap->firstblock isn't set */
|
||||
|
@ -2796,13 +2793,8 @@ xfs_bmap_btalloc(
|
|||
* See xfs_alloc_fix_freelist...
|
||||
*/
|
||||
if (pag->pagf_init) {
|
||||
need = XFS_MIN_FREELIST_PAG(pag, mp);
|
||||
delta = need > pag->pagf_flcount ?
|
||||
need - pag->pagf_flcount : 0;
|
||||
longest = (pag->pagf_longest > delta) ?
|
||||
(pag->pagf_longest - delta) :
|
||||
(pag->pagf_flcount > 0 ||
|
||||
pag->pagf_longest > 0);
|
||||
xfs_extlen_t longest;
|
||||
longest = xfs_alloc_longest_free_extent(mp, pag);
|
||||
if (blen < longest)
|
||||
blen = longest;
|
||||
} else
|
||||
|
@ -3576,6 +3568,27 @@ xfs_bmap_extents_to_btree(
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate the default attribute fork offset for newly created inodes.
|
||||
*/
|
||||
uint
|
||||
xfs_default_attroffset(
|
||||
struct xfs_inode *ip)
|
||||
{
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
uint offset;
|
||||
|
||||
if (mp->m_sb.sb_inodesize == 256) {
|
||||
offset = XFS_LITINO(mp) -
|
||||
XFS_BMDR_SPACE_CALC(MINABTPTRS);
|
||||
} else {
|
||||
offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
|
||||
}
|
||||
|
||||
ASSERT(offset < XFS_LITINO(mp));
|
||||
return offset;
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper routine to reset inode di_forkoff field when switching
|
||||
* attribute fork from local to extent format - we reset it where
|
||||
|
@ -3588,15 +3601,18 @@ xfs_bmap_forkoff_reset(
|
|||
int whichfork)
|
||||
{
|
||||
if (whichfork == XFS_ATTR_FORK &&
|
||||
(ip->i_d.di_format != XFS_DINODE_FMT_DEV) &&
|
||||
(ip->i_d.di_format != XFS_DINODE_FMT_UUID) &&
|
||||
(ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
|
||||
((mp->m_attroffset >> 3) > ip->i_d.di_forkoff)) {
|
||||
ip->i_d.di_forkoff = mp->m_attroffset >> 3;
|
||||
ip->i_df.if_ext_max = XFS_IFORK_DSIZE(ip) /
|
||||
(uint)sizeof(xfs_bmbt_rec_t);
|
||||
ip->i_afp->if_ext_max = XFS_IFORK_ASIZE(ip) /
|
||||
(uint)sizeof(xfs_bmbt_rec_t);
|
||||
ip->i_d.di_format != XFS_DINODE_FMT_DEV &&
|
||||
ip->i_d.di_format != XFS_DINODE_FMT_UUID &&
|
||||
ip->i_d.di_format != XFS_DINODE_FMT_BTREE) {
|
||||
uint dfl_forkoff = xfs_default_attroffset(ip) >> 3;
|
||||
|
||||
if (dfl_forkoff > ip->i_d.di_forkoff) {
|
||||
ip->i_d.di_forkoff = dfl_forkoff;
|
||||
ip->i_df.if_ext_max =
|
||||
XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t);
|
||||
ip->i_afp->if_ext_max =
|
||||
XFS_IFORK_ASIZE(ip) / sizeof(xfs_bmbt_rec_t);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4065,7 +4081,7 @@ xfs_bmap_add_attrfork(
|
|||
case XFS_DINODE_FMT_BTREE:
|
||||
ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
|
||||
if (!ip->i_d.di_forkoff)
|
||||
ip->i_d.di_forkoff = mp->m_attroffset >> 3;
|
||||
ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
|
||||
else if (mp->m_flags & XFS_MOUNT_ATTR2)
|
||||
version = 2;
|
||||
break;
|
||||
|
@ -4212,12 +4228,12 @@ xfs_bmap_compute_maxlevels(
|
|||
* (a signed 16-bit number, xfs_aextnum_t).
|
||||
*
|
||||
* Note that we can no longer assume that if we are in ATTR1 that
|
||||
* the fork offset of all the inodes will be (m_attroffset >> 3)
|
||||
* because we could have mounted with ATTR2 and then mounted back
|
||||
* with ATTR1, keeping the di_forkoff's fixed but probably at
|
||||
* various positions. Therefore, for both ATTR1 and ATTR2
|
||||
* we have to assume the worst case scenario of a minimum size
|
||||
* available.
|
||||
* the fork offset of all the inodes will be
|
||||
* (xfs_default_attroffset(ip) >> 3) because we could have mounted
|
||||
* with ATTR2 and then mounted back with ATTR1, keeping the
|
||||
* di_forkoff's fixed but probably at various positions. Therefore,
|
||||
* for both ATTR1 and ATTR2 we have to assume the worst case scenario
|
||||
* of a minimum size available.
|
||||
*/
|
||||
if (whichfork == XFS_DATA_FORK) {
|
||||
maxleafents = MAXEXTNUM;
|
||||
|
@ -4804,7 +4820,7 @@ xfs_bmapi(
|
|||
xfs_extlen_t minlen; /* min allocation size */
|
||||
xfs_mount_t *mp; /* xfs mount structure */
|
||||
int n; /* current extent index */
|
||||
int nallocs; /* number of extents alloc\'d */
|
||||
int nallocs; /* number of extents alloc'd */
|
||||
xfs_extnum_t nextents; /* number of extents in file */
|
||||
xfs_fileoff_t obno; /* old block number (offset) */
|
||||
xfs_bmbt_irec_t prev; /* previous file extent record */
|
||||
|
@ -6204,7 +6220,7 @@ xfs_bmap_get_bp(
|
|||
return(bp);
|
||||
}
|
||||
|
||||
void
|
||||
STATIC void
|
||||
xfs_check_block(
|
||||
struct xfs_btree_block *block,
|
||||
xfs_mount_t *mp,
|
||||
|
@ -6494,7 +6510,7 @@ xfs_bmap_count_tree(
|
|||
block = XFS_BUF_TO_BLOCK(bp);
|
||||
|
||||
if (--level) {
|
||||
/* Not at node above leafs, count this level of nodes */
|
||||
/* Not at node above leaves, count this level of nodes */
|
||||
nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
|
||||
while (nextbno != NULLFSBLOCK) {
|
||||
if ((error = xfs_btree_read_bufl(mp, tp, nextbno,
|
||||
|
|
|
@ -125,7 +125,7 @@ typedef struct xfs_bmalloca {
|
|||
struct xfs_bmbt_irec *gotp; /* extent after, or delayed */
|
||||
xfs_extlen_t alen; /* i/o length asked/allocated */
|
||||
xfs_extlen_t total; /* total blocks needed for xaction */
|
||||
xfs_extlen_t minlen; /* mininum allocation size (blocks) */
|
||||
xfs_extlen_t minlen; /* minimum allocation size (blocks) */
|
||||
xfs_extlen_t minleft; /* amount must be left after alloc */
|
||||
char eof; /* set if allocating past last extent */
|
||||
char wasdel; /* replacing a delayed allocation */
|
||||
|
@ -338,6 +338,10 @@ xfs_check_nostate_extents(
|
|||
xfs_extnum_t idx,
|
||||
xfs_extnum_t num);
|
||||
|
||||
uint
|
||||
xfs_default_attroffset(
|
||||
struct xfs_inode *ip);
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
/*
|
||||
|
|
|
@ -1883,7 +1883,7 @@ xfs_btree_lshift(
|
|||
|
||||
/*
|
||||
* We add one entry to the left side and remove one for the right side.
|
||||
* Accout for it here, the changes will be updated on disk and logged
|
||||
* Account for it here, the changes will be updated on disk and logged
|
||||
* later.
|
||||
*/
|
||||
lrecs++;
|
||||
|
@ -3535,7 +3535,7 @@ xfs_btree_delrec(
|
|||
XFS_BTREE_STATS_INC(cur, join);
|
||||
|
||||
/*
|
||||
* Fix up the the number of records and right block pointer in the
|
||||
* Fix up the number of records and right block pointer in the
|
||||
* surviving block, and log it.
|
||||
*/
|
||||
xfs_btree_set_numrecs(left, lrecs + rrecs);
|
||||
|
|
|
@ -41,7 +41,7 @@ extern kmem_zone_t *xfs_btree_cur_zone;
|
|||
/*
|
||||
* Generic btree header.
|
||||
*
|
||||
* This is a comination of the actual format used on disk for short and long
|
||||
* This is a combination of the actual format used on disk for short and long
|
||||
* format btrees. The first three fields are shared by both format, but
|
||||
* the pointers are different and should be used with care.
|
||||
*
|
||||
|
|
|
@ -1503,7 +1503,7 @@ xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
|
|||
* This is implemented with some source-level loop unrolling.
|
||||
*/
|
||||
xfs_dahash_t
|
||||
xfs_da_hashname(const uchar_t *name, int namelen)
|
||||
xfs_da_hashname(const __uint8_t *name, int namelen)
|
||||
{
|
||||
xfs_dahash_t hash;
|
||||
|
||||
|
|
|
@ -91,9 +91,9 @@ enum xfs_dacmp {
|
|||
* Structure to ease passing around component names.
|
||||
*/
|
||||
typedef struct xfs_da_args {
|
||||
const uchar_t *name; /* string (maybe not NULL terminated) */
|
||||
const __uint8_t *name; /* string (maybe not NULL terminated) */
|
||||
int namelen; /* length of string (maybe no NULL) */
|
||||
uchar_t *value; /* set of bytes (maybe contain NULLs) */
|
||||
__uint8_t *value; /* set of bytes (maybe contain NULLs) */
|
||||
int valuelen; /* length of value */
|
||||
int flags; /* argument flags (eg: ATTR_NOCREATE) */
|
||||
xfs_dahash_t hashval; /* hash value of name */
|
||||
|
@ -185,7 +185,7 @@ typedef struct xfs_da_state {
|
|||
unsigned char inleaf; /* insert into 1->lf, 0->splf */
|
||||
unsigned char extravalid; /* T/F: extrablk is in use */
|
||||
unsigned char extraafter; /* T/F: extrablk is after new */
|
||||
xfs_da_state_blk_t extrablk; /* for double-splits on leafs */
|
||||
xfs_da_state_blk_t extrablk; /* for double-splits on leaves */
|
||||
/* for dirv2 extrablk is data */
|
||||
} xfs_da_state_t;
|
||||
|
||||
|
@ -251,7 +251,7 @@ xfs_daddr_t xfs_da_reada_buf(struct xfs_trans *trans, struct xfs_inode *dp,
|
|||
int xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
|
||||
xfs_dabuf_t *dead_buf);
|
||||
|
||||
uint xfs_da_hashname(const uchar_t *name_string, int name_length);
|
||||
uint xfs_da_hashname(const __uint8_t *name_string, int name_length);
|
||||
enum xfs_dacmp xfs_da_compname(struct xfs_da_args *args,
|
||||
const char *name, int len);
|
||||
|
||||
|
@ -268,5 +268,6 @@ xfs_daddr_t xfs_da_blkno(xfs_dabuf_t *dabuf);
|
|||
|
||||
extern struct kmem_zone *xfs_da_state_zone;
|
||||
extern struct kmem_zone *xfs_dabuf_zone;
|
||||
extern const struct xfs_nameops xfs_default_nameops;
|
||||
|
||||
#endif /* __XFS_DA_BTREE_H__ */
|
||||
|
|
|
@ -79,6 +79,12 @@ xfs_swapext(
|
|||
goto out_put_target_file;
|
||||
}
|
||||
|
||||
if (IS_SWAPFILE(file->f_path.dentry->d_inode) ||
|
||||
IS_SWAPFILE(target_file->f_path.dentry->d_inode)) {
|
||||
error = XFS_ERROR(EINVAL);
|
||||
goto out_put_target_file;
|
||||
}
|
||||
|
||||
ip = XFS_I(file->f_path.dentry->d_inode);
|
||||
tip = XFS_I(target_file->f_path.dentry->d_inode);
|
||||
|
||||
|
@ -118,19 +124,17 @@ xfs_swap_extents(
|
|||
xfs_bstat_t *sbp = &sxp->sx_stat;
|
||||
xfs_ifork_t *tempifp, *ifp, *tifp;
|
||||
int ilf_fields, tilf_fields;
|
||||
static uint lock_flags = XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL;
|
||||
int error = 0;
|
||||
int aforkblks = 0;
|
||||
int taforkblks = 0;
|
||||
__uint64_t tmp;
|
||||
char locked = 0;
|
||||
|
||||
mp = ip->i_mount;
|
||||
|
||||
tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL);
|
||||
if (!tempifp) {
|
||||
error = XFS_ERROR(ENOMEM);
|
||||
goto error0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
sbp = &sxp->sx_stat;
|
||||
|
@ -143,25 +147,24 @@ xfs_swap_extents(
|
|||
*/
|
||||
xfs_lock_two_inodes(ip, tip, XFS_IOLOCK_EXCL);
|
||||
xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
|
||||
locked = 1;
|
||||
|
||||
/* Verify that both files have the same format */
|
||||
if ((ip->i_d.di_mode & S_IFMT) != (tip->i_d.di_mode & S_IFMT)) {
|
||||
error = XFS_ERROR(EINVAL);
|
||||
goto error0;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* Verify both files are either real-time or non-realtime */
|
||||
if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
|
||||
error = XFS_ERROR(EINVAL);
|
||||
goto error0;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* Should never get a local format */
|
||||
if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
|
||||
tip->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
|
||||
error = XFS_ERROR(EINVAL);
|
||||
goto error0;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (VN_CACHED(VFS_I(tip)) != 0) {
|
||||
|
@ -169,13 +172,13 @@ xfs_swap_extents(
|
|||
error = xfs_flushinval_pages(tip, 0, -1,
|
||||
FI_REMAPF_LOCKED);
|
||||
if (error)
|
||||
goto error0;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* Verify O_DIRECT for ftmp */
|
||||
if (VN_CACHED(VFS_I(tip)) != 0) {
|
||||
error = XFS_ERROR(EINVAL);
|
||||
goto error0;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* Verify all data are being swapped */
|
||||
|
@ -183,7 +186,7 @@ xfs_swap_extents(
|
|||
sxp->sx_length != ip->i_d.di_size ||
|
||||
sxp->sx_length != tip->i_d.di_size) {
|
||||
error = XFS_ERROR(EFAULT);
|
||||
goto error0;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -193,7 +196,7 @@ xfs_swap_extents(
|
|||
*/
|
||||
if ( XFS_IFORK_Q(ip) != XFS_IFORK_Q(tip) ) {
|
||||
error = XFS_ERROR(EINVAL);
|
||||
goto error0;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -208,7 +211,7 @@ xfs_swap_extents(
|
|||
(sbp->bs_mtime.tv_sec != ip->i_d.di_mtime.t_sec) ||
|
||||
(sbp->bs_mtime.tv_nsec != ip->i_d.di_mtime.t_nsec)) {
|
||||
error = XFS_ERROR(EBUSY);
|
||||
goto error0;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* We need to fail if the file is memory mapped. Once we have tossed
|
||||
|
@ -219,7 +222,7 @@ xfs_swap_extents(
|
|||
*/
|
||||
if (VN_MAPPED(VFS_I(ip))) {
|
||||
error = XFS_ERROR(EBUSY);
|
||||
goto error0;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||
|
@ -242,8 +245,7 @@ xfs_swap_extents(
|
|||
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
|
||||
xfs_iunlock(tip, XFS_IOLOCK_EXCL);
|
||||
xfs_trans_cancel(tp, 0);
|
||||
locked = 0;
|
||||
goto error0;
|
||||
goto out;
|
||||
}
|
||||
xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
|
||||
|
||||
|
@ -253,19 +255,15 @@ xfs_swap_extents(
|
|||
if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
|
||||
(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
|
||||
error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &aforkblks);
|
||||
if (error) {
|
||||
xfs_trans_cancel(tp, 0);
|
||||
goto error0;
|
||||
}
|
||||
if (error)
|
||||
goto out_trans_cancel;
|
||||
}
|
||||
if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
|
||||
(tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
|
||||
error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK,
|
||||
&taforkblks);
|
||||
if (error) {
|
||||
xfs_trans_cancel(tp, 0);
|
||||
goto error0;
|
||||
}
|
||||
if (error)
|
||||
goto out_trans_cancel;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -332,10 +330,10 @@ xfs_swap_extents(
|
|||
|
||||
|
||||
IHOLD(ip);
|
||||
xfs_trans_ijoin(tp, ip, lock_flags);
|
||||
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
|
||||
|
||||
IHOLD(tip);
|
||||
xfs_trans_ijoin(tp, tip, lock_flags);
|
||||
xfs_trans_ijoin(tp, tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
|
||||
|
||||
xfs_trans_log_inode(tp, ip, ilf_fields);
|
||||
xfs_trans_log_inode(tp, tip, tilf_fields);
|
||||
|
@ -344,19 +342,19 @@ xfs_swap_extents(
|
|||
* If this is a synchronous mount, make sure that the
|
||||
* transaction goes to disk before returning to the user.
|
||||
*/
|
||||
if (mp->m_flags & XFS_MOUNT_WSYNC) {
|
||||
if (mp->m_flags & XFS_MOUNT_WSYNC)
|
||||
xfs_trans_set_sync(tp);
|
||||
}
|
||||
|
||||
error = xfs_trans_commit(tp, XFS_TRANS_SWAPEXT);
|
||||
locked = 0;
|
||||
|
||||
error0:
|
||||
if (locked) {
|
||||
xfs_iunlock(ip, lock_flags);
|
||||
xfs_iunlock(tip, lock_flags);
|
||||
}
|
||||
if (tempifp != NULL)
|
||||
kmem_free(tempifp);
|
||||
out_unlock:
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
|
||||
xfs_iunlock(tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
|
||||
out:
|
||||
kmem_free(tempifp);
|
||||
return error;
|
||||
|
||||
out_trans_cancel:
|
||||
xfs_trans_cancel(tp, 0);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
|
|
@ -103,7 +103,9 @@ typedef enum xfs_dinode_fmt {
|
|||
/*
|
||||
* Inode size for given fs.
|
||||
*/
|
||||
#define XFS_LITINO(mp) ((mp)->m_litino)
|
||||
#define XFS_LITINO(mp) \
|
||||
((int)(((mp)->m_sb.sb_inodesize) - sizeof(struct xfs_dinode)))
|
||||
|
||||
#define XFS_BROOT_SIZE_ADJ \
|
||||
(XFS_BTREE_LBLOCK_LEN - sizeof(xfs_bmdr_block_t))
|
||||
|
||||
|
|
|
@ -46,8 +46,6 @@
|
|||
|
||||
struct xfs_name xfs_name_dotdot = {"..", 2};
|
||||
|
||||
extern const struct xfs_nameops xfs_default_nameops;
|
||||
|
||||
/*
|
||||
* ASCII case-insensitive (ie. A-Z) support for directories that was
|
||||
* used in IRIX.
|
||||
|
|
|
@ -448,7 +448,6 @@ xfs_dir2_block_getdents(
|
|||
xfs_mount_t *mp; /* filesystem mount point */
|
||||
char *ptr; /* current data entry */
|
||||
int wantoff; /* starting block offset */
|
||||
xfs_ino_t ino;
|
||||
xfs_off_t cook;
|
||||
|
||||
mp = dp->i_mount;
|
||||
|
@ -509,16 +508,12 @@ xfs_dir2_block_getdents(
|
|||
|
||||
cook = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
|
||||
(char *)dep - (char *)block);
|
||||
ino = be64_to_cpu(dep->inumber);
|
||||
#if XFS_BIG_INUMS
|
||||
ino += mp->m_inoadd;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If it didn't fit, set the final offset to here & return.
|
||||
*/
|
||||
if (filldir(dirent, dep->name, dep->namelen, cook & 0x7fffffff,
|
||||
ino, DT_UNKNOWN)) {
|
||||
be64_to_cpu(dep->inumber), DT_UNKNOWN)) {
|
||||
*offset = cook & 0x7fffffff;
|
||||
xfs_da_brelse(NULL, bp);
|
||||
return 0;
|
||||
|
|
|
@ -38,7 +38,7 @@ struct xfs_trans;
|
|||
|
||||
/*
|
||||
* Directory address space divided into sections,
|
||||
* spaces separated by 32gb.
|
||||
* spaces separated by 32GB.
|
||||
*/
|
||||
#define XFS_DIR2_SPACE_SIZE (1ULL << (32 + XFS_DIR2_DATA_ALIGN_LOG))
|
||||
#define XFS_DIR2_DATA_SPACE 0
|
||||
|
|
|
@ -549,7 +549,7 @@ xfs_dir2_leaf_addname(
|
|||
* Check the internal consistency of a leaf1 block.
|
||||
* Pop an assert if something is wrong.
|
||||
*/
|
||||
void
|
||||
STATIC void
|
||||
xfs_dir2_leaf_check(
|
||||
xfs_inode_t *dp, /* incore directory inode */
|
||||
xfs_dabuf_t *bp) /* leaf's buffer */
|
||||
|
@ -780,7 +780,6 @@ xfs_dir2_leaf_getdents(
|
|||
int ra_index; /* *map index for read-ahead */
|
||||
int ra_offset; /* map entry offset for ra */
|
||||
int ra_want; /* readahead count wanted */
|
||||
xfs_ino_t ino;
|
||||
|
||||
/*
|
||||
* If the offset is at or past the largest allowed value,
|
||||
|
@ -1076,24 +1075,12 @@ xfs_dir2_leaf_getdents(
|
|||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy the entry into the putargs, and try formatting it.
|
||||
*/
|
||||
dep = (xfs_dir2_data_entry_t *)ptr;
|
||||
|
||||
length = xfs_dir2_data_entsize(dep->namelen);
|
||||
|
||||
ino = be64_to_cpu(dep->inumber);
|
||||
#if XFS_BIG_INUMS
|
||||
ino += mp->m_inoadd;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Won't fit. Return to caller.
|
||||
*/
|
||||
if (filldir(dirent, dep->name, dep->namelen,
|
||||
xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff,
|
||||
ino, DT_UNKNOWN))
|
||||
be64_to_cpu(dep->inumber), DT_UNKNOWN))
|
||||
break;
|
||||
|
||||
/*
|
||||
|
|
|
@ -1104,7 +1104,7 @@ xfs_dir2_leafn_remove(
|
|||
}
|
||||
xfs_dir2_leafn_check(dp, bp);
|
||||
/*
|
||||
* Return indication of whether this leaf block is emtpy enough
|
||||
* Return indication of whether this leaf block is empty enough
|
||||
* to justify trying to join it with a neighbor.
|
||||
*/
|
||||
*rval =
|
||||
|
|
|
@ -748,11 +748,7 @@ xfs_dir2_sf_getdents(
|
|||
* Put . entry unless we're starting past it.
|
||||
*/
|
||||
if (*offset <= dot_offset) {
|
||||
ino = dp->i_ino;
|
||||
#if XFS_BIG_INUMS
|
||||
ino += mp->m_inoadd;
|
||||
#endif
|
||||
if (filldir(dirent, ".", 1, dot_offset & 0x7fffffff, ino, DT_DIR)) {
|
||||
if (filldir(dirent, ".", 1, dot_offset & 0x7fffffff, dp->i_ino, DT_DIR)) {
|
||||
*offset = dot_offset & 0x7fffffff;
|
||||
return 0;
|
||||
}
|
||||
|
@ -763,9 +759,6 @@ xfs_dir2_sf_getdents(
|
|||
*/
|
||||
if (*offset <= dotdot_offset) {
|
||||
ino = xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent);
|
||||
#if XFS_BIG_INUMS
|
||||
ino += mp->m_inoadd;
|
||||
#endif
|
||||
if (filldir(dirent, "..", 2, dotdot_offset & 0x7fffffff, ino, DT_DIR)) {
|
||||
*offset = dotdot_offset & 0x7fffffff;
|
||||
return 0;
|
||||
|
@ -786,10 +779,6 @@ xfs_dir2_sf_getdents(
|
|||
}
|
||||
|
||||
ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
|
||||
#if XFS_BIG_INUMS
|
||||
ino += mp->m_inoadd;
|
||||
#endif
|
||||
|
||||
if (filldir(dirent, sfep->name, sfep->namelen,
|
||||
off & 0x7fffffff, ino, DT_UNKNOWN)) {
|
||||
*offset = off & 0x7fffffff;
|
||||
|
|
|
@ -33,12 +33,10 @@ typedef struct xfs_extent {
|
|||
* conversion routine.
|
||||
*/
|
||||
|
||||
#ifndef HAVE_FORMAT32
|
||||
typedef struct xfs_extent_32 {
|
||||
__uint64_t ext_start;
|
||||
__uint32_t ext_len;
|
||||
} __attribute__((packed)) xfs_extent_32_t;
|
||||
#endif
|
||||
|
||||
typedef struct xfs_extent_64 {
|
||||
__uint64_t ext_start;
|
||||
|
@ -59,7 +57,6 @@ typedef struct xfs_efi_log_format {
|
|||
xfs_extent_t efi_extents[1]; /* array of extents to free */
|
||||
} xfs_efi_log_format_t;
|
||||
|
||||
#ifndef HAVE_FORMAT32
|
||||
typedef struct xfs_efi_log_format_32 {
|
||||
__uint16_t efi_type; /* efi log item type */
|
||||
__uint16_t efi_size; /* size of this item */
|
||||
|
@ -67,7 +64,6 @@ typedef struct xfs_efi_log_format_32 {
|
|||
__uint64_t efi_id; /* efi identifier */
|
||||
xfs_extent_32_t efi_extents[1]; /* array of extents to free */
|
||||
} __attribute__((packed)) xfs_efi_log_format_32_t;
|
||||
#endif
|
||||
|
||||
typedef struct xfs_efi_log_format_64 {
|
||||
__uint16_t efi_type; /* efi log item type */
|
||||
|
@ -90,7 +86,6 @@ typedef struct xfs_efd_log_format {
|
|||
xfs_extent_t efd_extents[1]; /* array of extents freed */
|
||||
} xfs_efd_log_format_t;
|
||||
|
||||
#ifndef HAVE_FORMAT32
|
||||
typedef struct xfs_efd_log_format_32 {
|
||||
__uint16_t efd_type; /* efd log item type */
|
||||
__uint16_t efd_size; /* size of this item */
|
||||
|
@ -98,7 +93,6 @@ typedef struct xfs_efd_log_format_32 {
|
|||
__uint64_t efd_efi_id; /* id of corresponding efi */
|
||||
xfs_extent_32_t efd_extents[1]; /* array of extents freed */
|
||||
} __attribute__((packed)) xfs_efd_log_format_32_t;
|
||||
#endif
|
||||
|
||||
typedef struct xfs_efd_log_format_64 {
|
||||
__uint16_t efd_type; /* efd log item type */
|
||||
|
|
|
@ -140,7 +140,7 @@ _xfs_filestream_pick_ag(
|
|||
xfs_extlen_t minlen)
|
||||
{
|
||||
int err, trylock, nscan;
|
||||
xfs_extlen_t delta, longest, need, free, minfree, maxfree = 0;
|
||||
xfs_extlen_t longest, free, minfree, maxfree = 0;
|
||||
xfs_agnumber_t ag, max_ag = NULLAGNUMBER;
|
||||
struct xfs_perag *pag;
|
||||
|
||||
|
@ -186,12 +186,7 @@ _xfs_filestream_pick_ag(
|
|||
goto next_ag;
|
||||
}
|
||||
|
||||
need = XFS_MIN_FREELIST_PAG(pag, mp);
|
||||
delta = need > pag->pagf_flcount ? need - pag->pagf_flcount : 0;
|
||||
longest = (pag->pagf_longest > delta) ?
|
||||
(pag->pagf_longest - delta) :
|
||||
(pag->pagf_flcount > 0 || pag->pagf_longest > 0);
|
||||
|
||||
longest = xfs_alloc_longest_free_extent(mp, pag);
|
||||
if (((minlen && longest >= minlen) ||
|
||||
(!minlen && pag->pagf_freeblks >= minfree)) &&
|
||||
(!pag->pagf_metadata || !(flags & XFS_PICK_USERDATA) ||
|
||||
|
|
|
@ -576,7 +576,7 @@ xfs_reserve_blocks(
|
|||
if (fdblks_delta) {
|
||||
/*
|
||||
* If we are putting blocks back here, m_resblks_avail is
|
||||
* already at it's max so this will put it in the free pool.
|
||||
* already at its max so this will put it in the free pool.
|
||||
*
|
||||
* If we need space, we'll either succeed in getting it
|
||||
* from the free block count or we'll get an enospc. If
|
||||
|
|
|
@ -230,7 +230,7 @@ xfs_ialloc_ag_alloc(
|
|||
args.minalignslop = xfs_ialloc_cluster_alignment(&args) - 1;
|
||||
|
||||
/* Allow space for the inode btree to split. */
|
||||
args.minleft = XFS_IN_MAXLEVELS(args.mp) - 1;
|
||||
args.minleft = args.mp->m_in_maxlevels - 1;
|
||||
if ((error = xfs_alloc_vextent(&args)))
|
||||
return error;
|
||||
} else
|
||||
|
@ -270,7 +270,7 @@ xfs_ialloc_ag_alloc(
|
|||
/*
|
||||
* Allow space for the inode btree to split.
|
||||
*/
|
||||
args.minleft = XFS_IN_MAXLEVELS(args.mp) - 1;
|
||||
args.minleft = args.mp->m_in_maxlevels - 1;
|
||||
if ((error = xfs_alloc_vextent(&args)))
|
||||
return error;
|
||||
}
|
||||
|
@ -349,7 +349,7 @@ xfs_ialloc_ag_alloc(
|
|||
* Initialize all inodes in this buffer and then log them.
|
||||
*
|
||||
* XXX: It would be much better if we had just one transaction to
|
||||
* log a whole cluster of inodes instead of all the indivdual
|
||||
* log a whole cluster of inodes instead of all the individual
|
||||
* transactions causing a lot of log traffic.
|
||||
*/
|
||||
xfs_biozero(fbuf, 0, ninodes << args.mp->m_sb.sb_inodelog);
|
||||
|
@ -943,7 +943,7 @@ xfs_dialloc(
|
|||
ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
|
||||
XFS_INODES_PER_CHUNK) == 0);
|
||||
ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset);
|
||||
XFS_INOBT_CLR_FREE(&rec, offset);
|
||||
rec.ir_free &= ~XFS_INOBT_MASK(offset);
|
||||
rec.ir_freecount--;
|
||||
if ((error = xfs_inobt_update(cur, rec.ir_startino, rec.ir_freecount,
|
||||
rec.ir_free)))
|
||||
|
@ -1105,11 +1105,11 @@ xfs_difree(
|
|||
*/
|
||||
off = agino - rec.ir_startino;
|
||||
ASSERT(off >= 0 && off < XFS_INODES_PER_CHUNK);
|
||||
ASSERT(!XFS_INOBT_IS_FREE(&rec, off));
|
||||
ASSERT(!(rec.ir_free & XFS_INOBT_MASK(off)));
|
||||
/*
|
||||
* Mark the inode free & increment the count.
|
||||
*/
|
||||
XFS_INOBT_SET_FREE(&rec, off);
|
||||
rec.ir_free |= XFS_INOBT_MASK(off);
|
||||
rec.ir_freecount++;
|
||||
|
||||
/*
|
||||
|
|
|
@ -164,7 +164,7 @@ xfs_inobt_init_rec_from_cur(
|
|||
}
|
||||
|
||||
/*
|
||||
* intial value of ptr for lookup
|
||||
* initial value of ptr for lookup
|
||||
*/
|
||||
STATIC void
|
||||
xfs_inobt_init_ptr_from_cur(
|
||||
|
|
|
@ -32,14 +32,14 @@ struct xfs_mount;
|
|||
#define XFS_IBT_MAGIC 0x49414254 /* 'IABT' */
|
||||
|
||||
typedef __uint64_t xfs_inofree_t;
|
||||
#define XFS_INODES_PER_CHUNK (NBBY * sizeof(xfs_inofree_t))
|
||||
#define XFS_INODES_PER_CHUNK (NBBY * sizeof(xfs_inofree_t))
|
||||
#define XFS_INODES_PER_CHUNK_LOG (XFS_NBBYLOG + 3)
|
||||
#define XFS_INOBT_ALL_FREE ((xfs_inofree_t)-1)
|
||||
#define XFS_INOBT_ALL_FREE ((xfs_inofree_t)-1)
|
||||
#define XFS_INOBT_MASK(i) ((xfs_inofree_t)1 << (i))
|
||||
|
||||
static inline xfs_inofree_t xfs_inobt_maskn(int i, int n)
|
||||
{
|
||||
return (((n) >= XFS_INODES_PER_CHUNK ? \
|
||||
(xfs_inofree_t)0 : ((xfs_inofree_t)1 << (n))) - 1) << (i);
|
||||
return ((n >= XFS_INODES_PER_CHUNK ? 0 : XFS_INOBT_MASK(n)) - 1) << i;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -68,20 +68,6 @@ typedef struct xfs_inobt_key {
|
|||
/* btree pointer type */
|
||||
typedef __be32 xfs_inobt_ptr_t;
|
||||
|
||||
/*
|
||||
* Bit manipulations for ir_free.
|
||||
*/
|
||||
#define XFS_INOBT_MASK(i) ((xfs_inofree_t)1 << (i))
|
||||
#define XFS_INOBT_IS_FREE(rp,i) \
|
||||
(((rp)->ir_free & XFS_INOBT_MASK(i)) != 0)
|
||||
#define XFS_INOBT_SET_FREE(rp,i) ((rp)->ir_free |= XFS_INOBT_MASK(i))
|
||||
#define XFS_INOBT_CLR_FREE(rp,i) ((rp)->ir_free &= ~XFS_INOBT_MASK(i))
|
||||
|
||||
/*
|
||||
* Maximum number of inode btree levels.
|
||||
*/
|
||||
#define XFS_IN_MAXLEVELS(mp) ((mp)->m_in_maxlevels)
|
||||
|
||||
/*
|
||||
* block numbers in the AG.
|
||||
*/
|
||||
|
|
|
@ -122,7 +122,7 @@ typedef struct xfs_ictimestamp {
|
|||
|
||||
/*
|
||||
* NOTE: This structure must be kept identical to struct xfs_dinode
|
||||
* in xfs_dinode.h except for the endianess annotations.
|
||||
* in xfs_dinode.h except for the endianness annotations.
|
||||
*/
|
||||
typedef struct xfs_icdinode {
|
||||
__uint16_t di_magic; /* inode magic # = XFS_DINODE_MAGIC */
|
||||
|
|
|
@ -40,7 +40,6 @@ typedef struct xfs_inode_log_format {
|
|||
__int32_t ilf_boffset; /* off of inode in buffer */
|
||||
} xfs_inode_log_format_t;
|
||||
|
||||
#ifndef HAVE_FORMAT32
|
||||
typedef struct xfs_inode_log_format_32 {
|
||||
__uint16_t ilf_type; /* inode log item type */
|
||||
__uint16_t ilf_size; /* size of this item */
|
||||
|
@ -56,7 +55,6 @@ typedef struct xfs_inode_log_format_32 {
|
|||
__int32_t ilf_len; /* len of inode buffer */
|
||||
__int32_t ilf_boffset; /* off of inode in buffer */
|
||||
} __attribute__((packed)) xfs_inode_log_format_32_t;
|
||||
#endif
|
||||
|
||||
typedef struct xfs_inode_log_format_64 {
|
||||
__uint16_t ilf_type; /* inode log item type */
|
||||
|
|
|
@ -63,7 +63,7 @@ typedef enum {
|
|||
*/
|
||||
|
||||
typedef struct xfs_iomap {
|
||||
xfs_daddr_t iomap_bn; /* first 512b blk of mapping */
|
||||
xfs_daddr_t iomap_bn; /* first 512B blk of mapping */
|
||||
xfs_buftarg_t *iomap_target;
|
||||
xfs_off_t iomap_offset; /* offset of mapping, bytes */
|
||||
xfs_off_t iomap_bsize; /* size of mapping, bytes */
|
||||
|
|
|
@ -83,7 +83,12 @@ xfs_bulkstat_one_iget(
|
|||
buf->bs_uid = dic->di_uid;
|
||||
buf->bs_gid = dic->di_gid;
|
||||
buf->bs_size = dic->di_size;
|
||||
vn_atime_to_bstime(VFS_I(ip), &buf->bs_atime);
|
||||
/*
|
||||
* We are reading the atime from the Linux inode because the
|
||||
* dinode might not be uptodate.
|
||||
*/
|
||||
buf->bs_atime.tv_sec = VFS_I(ip)->i_atime.tv_sec;
|
||||
buf->bs_atime.tv_nsec = VFS_I(ip)->i_atime.tv_nsec;
|
||||
buf->bs_mtime.tv_sec = dic->di_mtime.t_sec;
|
||||
buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec;
|
||||
buf->bs_ctime.tv_sec = dic->di_ctime.t_sec;
|
||||
|
@ -579,7 +584,7 @@ xfs_bulkstat(
|
|||
* first inode of the cluster.
|
||||
*
|
||||
* Careful with clustidx. There can be
|
||||
* multple clusters per chunk, a single
|
||||
* multiple clusters per chunk, a single
|
||||
* cluster per chunk or a cluster that has
|
||||
* inodes represented from several different
|
||||
* chunks (if blocksize is large).
|
||||
|
|
|
@ -574,7 +574,7 @@ xfs_log_mount(
|
|||
error = xfs_trans_ail_init(mp);
|
||||
if (error) {
|
||||
cmn_err(CE_WARN, "XFS: AIL initialisation failed: error %d", error);
|
||||
goto error;
|
||||
goto out_free_log;
|
||||
}
|
||||
mp->m_log->l_ailp = mp->m_ail;
|
||||
|
||||
|
@ -594,20 +594,22 @@ xfs_log_mount(
|
|||
mp->m_flags |= XFS_MOUNT_RDONLY;
|
||||
if (error) {
|
||||
cmn_err(CE_WARN, "XFS: log mount/recovery failed: error %d", error);
|
||||
goto error;
|
||||
goto out_destroy_ail;
|
||||
}
|
||||
}
|
||||
|
||||
/* Normal transactions can now occur */
|
||||
mp->m_log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
|
||||
|
||||
/* End mounting message in xfs_log_mount_finish */
|
||||
return 0;
|
||||
error:
|
||||
xfs_log_unmount_dealloc(mp);
|
||||
|
||||
out_destroy_ail:
|
||||
xfs_trans_ail_destroy(mp);
|
||||
out_free_log:
|
||||
xlog_dealloc_log(mp->m_log);
|
||||
out:
|
||||
return error;
|
||||
} /* xfs_log_mount */
|
||||
}
|
||||
|
||||
/*
|
||||
* Finish the recovery of the file system. This is separate from
|
||||
|
@ -632,19 +634,6 @@ xfs_log_mount_finish(xfs_mount_t *mp)
|
|||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Unmount processing for the log.
|
||||
*/
|
||||
int
|
||||
xfs_log_unmount(xfs_mount_t *mp)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = xfs_log_unmount_write(mp);
|
||||
xfs_log_unmount_dealloc(mp);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Final log writes as part of unmount.
|
||||
*
|
||||
|
@ -795,7 +784,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
|
|||
* and deallocate the log as the aild references the log.
|
||||
*/
|
||||
void
|
||||
xfs_log_unmount_dealloc(xfs_mount_t *mp)
|
||||
xfs_log_unmount(xfs_mount_t *mp)
|
||||
{
|
||||
xfs_trans_ail_destroy(mp);
|
||||
xlog_dealloc_log(mp->m_log);
|
||||
|
@ -1109,7 +1098,7 @@ xlog_bdstrat_cb(struct xfs_buf *bp)
|
|||
/*
|
||||
* Return size of each in-core log record buffer.
|
||||
*
|
||||
* All machines get 8 x 32KB buffers by default, unless tuned otherwise.
|
||||
* All machines get 8 x 32kB buffers by default, unless tuned otherwise.
|
||||
*
|
||||
* If the filesystem blocksize is too large, we may need to choose a
|
||||
* larger size since the directory code currently logs entire blocks.
|
||||
|
@ -1139,8 +1128,8 @@ xlog_get_iclog_buffer_size(xfs_mount_t *mp,
|
|||
}
|
||||
|
||||
if (xfs_sb_version_haslogv2(&mp->m_sb)) {
|
||||
/* # headers = size / 32K
|
||||
* one header holds cycles from 32K of data
|
||||
/* # headers = size / 32k
|
||||
* one header holds cycles from 32k of data
|
||||
*/
|
||||
|
||||
xhdrs = mp->m_logbsize / XLOG_HEADER_CYCLE_SIZE;
|
||||
|
@ -1156,7 +1145,7 @@ xlog_get_iclog_buffer_size(xfs_mount_t *mp,
|
|||
goto done;
|
||||
}
|
||||
|
||||
/* All machines use 32KB buffers by default. */
|
||||
/* All machines use 32kB buffers by default. */
|
||||
log->l_iclog_size = XLOG_BIG_RECORD_BSIZE;
|
||||
log->l_iclog_size_log = XLOG_BIG_RECORD_BSHIFT;
|
||||
|
||||
|
@ -1164,32 +1153,8 @@ xlog_get_iclog_buffer_size(xfs_mount_t *mp,
|
|||
log->l_iclog_hsize = BBSIZE;
|
||||
log->l_iclog_heads = 1;
|
||||
|
||||
/*
|
||||
* For 16KB, we use 3 32KB buffers. For 32KB block sizes, we use
|
||||
* 4 32KB buffers. For 64KB block sizes, we use 8 32KB buffers.
|
||||
*/
|
||||
if (mp->m_sb.sb_blocksize >= 16*1024) {
|
||||
log->l_iclog_size = XLOG_BIG_RECORD_BSIZE;
|
||||
log->l_iclog_size_log = XLOG_BIG_RECORD_BSHIFT;
|
||||
if (mp->m_logbufs <= 0) {
|
||||
switch (mp->m_sb.sb_blocksize) {
|
||||
case 16*1024: /* 16 KB */
|
||||
log->l_iclog_bufs = 3;
|
||||
break;
|
||||
case 32*1024: /* 32 KB */
|
||||
log->l_iclog_bufs = 4;
|
||||
break;
|
||||
case 64*1024: /* 64 KB */
|
||||
log->l_iclog_bufs = 8;
|
||||
break;
|
||||
default:
|
||||
xlog_panic("XFS: Invalid blocksize");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
done: /* are we being asked to make the sizes selected above visible? */
|
||||
done:
|
||||
/* are we being asked to make the sizes selected above visible? */
|
||||
if (mp->m_logbufs == 0)
|
||||
mp->m_logbufs = log->l_iclog_bufs;
|
||||
if (mp->m_logbsize == 0)
|
||||
|
@ -3214,7 +3179,7 @@ xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog)
|
|||
*/
|
||||
|
||||
/*
|
||||
* Free a used ticket when it's refcount falls to zero.
|
||||
* Free a used ticket when its refcount falls to zero.
|
||||
*/
|
||||
void
|
||||
xfs_log_ticket_put(
|
||||
|
|
|
@ -170,9 +170,8 @@ int xfs_log_write(struct xfs_mount *mp,
|
|||
int nentries,
|
||||
xfs_log_ticket_t ticket,
|
||||
xfs_lsn_t *start_lsn);
|
||||
int xfs_log_unmount(struct xfs_mount *mp);
|
||||
int xfs_log_unmount_write(struct xfs_mount *mp);
|
||||
void xfs_log_unmount_dealloc(struct xfs_mount *mp);
|
||||
void xfs_log_unmount(struct xfs_mount *mp);
|
||||
int xfs_log_force_umount(struct xfs_mount *mp, int logerror);
|
||||
int xfs_log_need_covered(struct xfs_mount *mp);
|
||||
|
||||
|
|
|
@ -359,7 +359,7 @@ typedef struct xlog_in_core {
|
|||
int ic_size;
|
||||
int ic_offset;
|
||||
int ic_bwritecnt;
|
||||
ushort_t ic_state;
|
||||
unsigned short ic_state;
|
||||
char *ic_datap; /* pointer to iclog data */
|
||||
#ifdef XFS_LOG_TRACE
|
||||
struct ktrace *ic_trace;
|
||||
|
@ -455,7 +455,6 @@ extern void xlog_recover_process_iunlinks(xlog_t *log);
|
|||
|
||||
extern struct xfs_buf *xlog_get_bp(xlog_t *, int);
|
||||
extern void xlog_put_bp(struct xfs_buf *);
|
||||
extern int xlog_bread(xlog_t *, xfs_daddr_t, int, struct xfs_buf *);
|
||||
|
||||
extern kmem_zone_t *xfs_log_ticket_zone;
|
||||
|
||||
|
|
|
@ -94,12 +94,30 @@ xlog_put_bp(
|
|||
xfs_buf_free(bp);
|
||||
}
|
||||
|
||||
STATIC xfs_caddr_t
|
||||
xlog_align(
|
||||
xlog_t *log,
|
||||
xfs_daddr_t blk_no,
|
||||
int nbblks,
|
||||
xfs_buf_t *bp)
|
||||
{
|
||||
xfs_caddr_t ptr;
|
||||
|
||||
if (!log->l_sectbb_log)
|
||||
return XFS_BUF_PTR(bp);
|
||||
|
||||
ptr = XFS_BUF_PTR(bp) + BBTOB((int)blk_no & log->l_sectbb_mask);
|
||||
ASSERT(XFS_BUF_SIZE(bp) >=
|
||||
BBTOB(nbblks + (blk_no & log->l_sectbb_mask)));
|
||||
return ptr;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* nbblks should be uint, but oh well. Just want to catch that 32-bit length.
|
||||
*/
|
||||
int
|
||||
xlog_bread(
|
||||
STATIC int
|
||||
xlog_bread_noalign(
|
||||
xlog_t *log,
|
||||
xfs_daddr_t blk_no,
|
||||
int nbblks,
|
||||
|
@ -137,6 +155,24 @@ xlog_bread(
|
|||
return error;
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xlog_bread(
|
||||
xlog_t *log,
|
||||
xfs_daddr_t blk_no,
|
||||
int nbblks,
|
||||
xfs_buf_t *bp,
|
||||
xfs_caddr_t *offset)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = xlog_bread_noalign(log, blk_no, nbblks, bp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
*offset = xlog_align(log, blk_no, nbblks, bp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Write out the buffer at the given block for the given number of blocks.
|
||||
* The buffer is kept locked across the write and is returned locked.
|
||||
|
@ -180,24 +216,6 @@ xlog_bwrite(
|
|||
return error;
|
||||
}
|
||||
|
||||
STATIC xfs_caddr_t
|
||||
xlog_align(
|
||||
xlog_t *log,
|
||||
xfs_daddr_t blk_no,
|
||||
int nbblks,
|
||||
xfs_buf_t *bp)
|
||||
{
|
||||
xfs_caddr_t ptr;
|
||||
|
||||
if (!log->l_sectbb_log)
|
||||
return XFS_BUF_PTR(bp);
|
||||
|
||||
ptr = XFS_BUF_PTR(bp) + BBTOB((int)blk_no & log->l_sectbb_mask);
|
||||
ASSERT(XFS_BUF_SIZE(bp) >=
|
||||
BBTOB(nbblks + (blk_no & log->l_sectbb_mask)));
|
||||
return ptr;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
/*
|
||||
* dump debug superblock and log record information
|
||||
|
@ -211,11 +229,11 @@ xlog_header_check_dump(
|
|||
|
||||
cmn_err(CE_DEBUG, "%s: SB : uuid = ", __func__);
|
||||
for (b = 0; b < 16; b++)
|
||||
cmn_err(CE_DEBUG, "%02x", ((uchar_t *)&mp->m_sb.sb_uuid)[b]);
|
||||
cmn_err(CE_DEBUG, "%02x", ((__uint8_t *)&mp->m_sb.sb_uuid)[b]);
|
||||
cmn_err(CE_DEBUG, ", fmt = %d\n", XLOG_FMT);
|
||||
cmn_err(CE_DEBUG, " log : uuid = ");
|
||||
for (b = 0; b < 16; b++)
|
||||
cmn_err(CE_DEBUG, "%02x",((uchar_t *)&head->h_fs_uuid)[b]);
|
||||
cmn_err(CE_DEBUG, "%02x", ((__uint8_t *)&head->h_fs_uuid)[b]);
|
||||
cmn_err(CE_DEBUG, ", fmt = %d\n", be32_to_cpu(head->h_fmt));
|
||||
}
|
||||
#else
|
||||
|
@ -321,9 +339,9 @@ xlog_find_cycle_start(
|
|||
|
||||
mid_blk = BLK_AVG(first_blk, *last_blk);
|
||||
while (mid_blk != first_blk && mid_blk != *last_blk) {
|
||||
if ((error = xlog_bread(log, mid_blk, 1, bp)))
|
||||
error = xlog_bread(log, mid_blk, 1, bp, &offset);
|
||||
if (error)
|
||||
return error;
|
||||
offset = xlog_align(log, mid_blk, 1, bp);
|
||||
mid_cycle = xlog_get_cycle(offset);
|
||||
if (mid_cycle == cycle) {
|
||||
*last_blk = mid_blk;
|
||||
|
@ -379,10 +397,10 @@ xlog_find_verify_cycle(
|
|||
|
||||
bcount = min(bufblks, (start_blk + nbblks - i));
|
||||
|
||||
if ((error = xlog_bread(log, i, bcount, bp)))
|
||||
error = xlog_bread(log, i, bcount, bp, &buf);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
buf = xlog_align(log, i, bcount, bp);
|
||||
for (j = 0; j < bcount; j++) {
|
||||
cycle = xlog_get_cycle(buf);
|
||||
if (cycle == stop_on_cycle_no) {
|
||||
|
@ -436,9 +454,9 @@ xlog_find_verify_log_record(
|
|||
return ENOMEM;
|
||||
smallmem = 1;
|
||||
} else {
|
||||
if ((error = xlog_bread(log, start_blk, num_blks, bp)))
|
||||
error = xlog_bread(log, start_blk, num_blks, bp, &offset);
|
||||
if (error)
|
||||
goto out;
|
||||
offset = xlog_align(log, start_blk, num_blks, bp);
|
||||
offset += ((num_blks - 1) << BBSHIFT);
|
||||
}
|
||||
|
||||
|
@ -453,9 +471,9 @@ xlog_find_verify_log_record(
|
|||
}
|
||||
|
||||
if (smallmem) {
|
||||
if ((error = xlog_bread(log, i, 1, bp)))
|
||||
error = xlog_bread(log, i, 1, bp, &offset);
|
||||
if (error)
|
||||
goto out;
|
||||
offset = xlog_align(log, i, 1, bp);
|
||||
}
|
||||
|
||||
head = (xlog_rec_header_t *)offset;
|
||||
|
@ -559,15 +577,18 @@ xlog_find_head(
|
|||
bp = xlog_get_bp(log, 1);
|
||||
if (!bp)
|
||||
return ENOMEM;
|
||||
if ((error = xlog_bread(log, 0, 1, bp)))
|
||||
|
||||
error = xlog_bread(log, 0, 1, bp, &offset);
|
||||
if (error)
|
||||
goto bp_err;
|
||||
offset = xlog_align(log, 0, 1, bp);
|
||||
|
||||
first_half_cycle = xlog_get_cycle(offset);
|
||||
|
||||
last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */
|
||||
if ((error = xlog_bread(log, last_blk, 1, bp)))
|
||||
error = xlog_bread(log, last_blk, 1, bp, &offset);
|
||||
if (error)
|
||||
goto bp_err;
|
||||
offset = xlog_align(log, last_blk, 1, bp);
|
||||
|
||||
last_half_cycle = xlog_get_cycle(offset);
|
||||
ASSERT(last_half_cycle != 0);
|
||||
|
||||
|
@ -817,9 +838,10 @@ xlog_find_tail(
|
|||
if (!bp)
|
||||
return ENOMEM;
|
||||
if (*head_blk == 0) { /* special case */
|
||||
if ((error = xlog_bread(log, 0, 1, bp)))
|
||||
error = xlog_bread(log, 0, 1, bp, &offset);
|
||||
if (error)
|
||||
goto bread_err;
|
||||
offset = xlog_align(log, 0, 1, bp);
|
||||
|
||||
if (xlog_get_cycle(offset) == 0) {
|
||||
*tail_blk = 0;
|
||||
/* leave all other log inited values alone */
|
||||
|
@ -832,9 +854,10 @@ xlog_find_tail(
|
|||
*/
|
||||
ASSERT(*head_blk < INT_MAX);
|
||||
for (i = (int)(*head_blk) - 1; i >= 0; i--) {
|
||||
if ((error = xlog_bread(log, i, 1, bp)))
|
||||
error = xlog_bread(log, i, 1, bp, &offset);
|
||||
if (error)
|
||||
goto bread_err;
|
||||
offset = xlog_align(log, i, 1, bp);
|
||||
|
||||
if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(*(__be32 *)offset)) {
|
||||
found = 1;
|
||||
break;
|
||||
|
@ -848,9 +871,10 @@ xlog_find_tail(
|
|||
*/
|
||||
if (!found) {
|
||||
for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) {
|
||||
if ((error = xlog_bread(log, i, 1, bp)))
|
||||
error = xlog_bread(log, i, 1, bp, &offset);
|
||||
if (error)
|
||||
goto bread_err;
|
||||
offset = xlog_align(log, i, 1, bp);
|
||||
|
||||
if (XLOG_HEADER_MAGIC_NUM ==
|
||||
be32_to_cpu(*(__be32 *)offset)) {
|
||||
found = 2;
|
||||
|
@ -922,10 +946,10 @@ xlog_find_tail(
|
|||
if (*head_blk == after_umount_blk &&
|
||||
be32_to_cpu(rhead->h_num_logops) == 1) {
|
||||
umount_data_blk = (i + hblks) % log->l_logBBsize;
|
||||
if ((error = xlog_bread(log, umount_data_blk, 1, bp))) {
|
||||
error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
|
||||
if (error)
|
||||
goto bread_err;
|
||||
}
|
||||
offset = xlog_align(log, umount_data_blk, 1, bp);
|
||||
|
||||
op_head = (xlog_op_header_t *)offset;
|
||||
if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
|
||||
/*
|
||||
|
@ -1017,9 +1041,10 @@ xlog_find_zeroed(
|
|||
bp = xlog_get_bp(log, 1);
|
||||
if (!bp)
|
||||
return ENOMEM;
|
||||
if ((error = xlog_bread(log, 0, 1, bp)))
|
||||
error = xlog_bread(log, 0, 1, bp, &offset);
|
||||
if (error)
|
||||
goto bp_err;
|
||||
offset = xlog_align(log, 0, 1, bp);
|
||||
|
||||
first_cycle = xlog_get_cycle(offset);
|
||||
if (first_cycle == 0) { /* completely zeroed log */
|
||||
*blk_no = 0;
|
||||
|
@ -1028,9 +1053,10 @@ xlog_find_zeroed(
|
|||
}
|
||||
|
||||
/* check partially zeroed log */
|
||||
if ((error = xlog_bread(log, log_bbnum-1, 1, bp)))
|
||||
error = xlog_bread(log, log_bbnum-1, 1, bp, &offset);
|
||||
if (error)
|
||||
goto bp_err;
|
||||
offset = xlog_align(log, log_bbnum-1, 1, bp);
|
||||
|
||||
last_cycle = xlog_get_cycle(offset);
|
||||
if (last_cycle != 0) { /* log completely written to */
|
||||
xlog_put_bp(bp);
|
||||
|
@ -1152,10 +1178,10 @@ xlog_write_log_records(
|
|||
*/
|
||||
balign = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, start_block);
|
||||
if (balign != start_block) {
|
||||
if ((error = xlog_bread(log, start_block, 1, bp))) {
|
||||
xlog_put_bp(bp);
|
||||
return error;
|
||||
}
|
||||
error = xlog_bread_noalign(log, start_block, 1, bp);
|
||||
if (error)
|
||||
goto out_put_bp;
|
||||
|
||||
j = start_block - balign;
|
||||
}
|
||||
|
||||
|
@ -1175,10 +1201,14 @@ xlog_write_log_records(
|
|||
balign = BBTOB(ealign - start_block);
|
||||
error = XFS_BUF_SET_PTR(bp, offset + balign,
|
||||
BBTOB(sectbb));
|
||||
if (!error)
|
||||
error = xlog_bread(log, ealign, sectbb, bp);
|
||||
if (!error)
|
||||
error = XFS_BUF_SET_PTR(bp, offset, bufblks);
|
||||
if (error)
|
||||
break;
|
||||
|
||||
error = xlog_bread_noalign(log, ealign, sectbb, bp);
|
||||
if (error)
|
||||
break;
|
||||
|
||||
error = XFS_BUF_SET_PTR(bp, offset, bufblks);
|
||||
if (error)
|
||||
break;
|
||||
}
|
||||
|
@ -1195,6 +1225,8 @@ xlog_write_log_records(
|
|||
start_block += endcount;
|
||||
j = 0;
|
||||
}
|
||||
|
||||
out_put_bp:
|
||||
xlog_put_bp(bp);
|
||||
return error;
|
||||
}
|
||||
|
@ -2511,16 +2543,10 @@ xlog_recover_do_inode_trans(
|
|||
}
|
||||
|
||||
write_inode_buffer:
|
||||
if (ITEM_TYPE(item) == XFS_LI_INODE) {
|
||||
ASSERT(bp->b_mount == NULL || bp->b_mount == mp);
|
||||
bp->b_mount = mp;
|
||||
XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
|
||||
xfs_bdwrite(mp, bp);
|
||||
} else {
|
||||
XFS_BUF_STALE(bp);
|
||||
error = xfs_bwrite(mp, bp);
|
||||
}
|
||||
|
||||
ASSERT(bp->b_mount == NULL || bp->b_mount == mp);
|
||||
bp->b_mount = mp;
|
||||
XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
|
||||
xfs_bdwrite(mp, bp);
|
||||
error:
|
||||
if (need_free)
|
||||
kmem_free(in_f);
|
||||
|
@ -2769,51 +2795,48 @@ xlog_recover_do_trans(
|
|||
int error = 0;
|
||||
xlog_recover_item_t *item, *first_item;
|
||||
|
||||
if ((error = xlog_recover_reorder_trans(trans)))
|
||||
error = xlog_recover_reorder_trans(trans);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
first_item = item = trans->r_itemq;
|
||||
do {
|
||||
/*
|
||||
* we don't need to worry about the block number being
|
||||
* truncated in > 1 TB buffers because in user-land,
|
||||
* we're now n32 or 64-bit so xfs_daddr_t is 64-bits so
|
||||
* the blknos will get through the user-mode buffer
|
||||
* cache properly. The only bad case is o32 kernels
|
||||
* where xfs_daddr_t is 32-bits but mount will warn us
|
||||
* off a > 1 TB filesystem before we get here.
|
||||
*/
|
||||
if ((ITEM_TYPE(item) == XFS_LI_BUF)) {
|
||||
if ((error = xlog_recover_do_buffer_trans(log, item,
|
||||
pass)))
|
||||
break;
|
||||
} else if ((ITEM_TYPE(item) == XFS_LI_INODE)) {
|
||||
if ((error = xlog_recover_do_inode_trans(log, item,
|
||||
pass)))
|
||||
break;
|
||||
} else if (ITEM_TYPE(item) == XFS_LI_EFI) {
|
||||
if ((error = xlog_recover_do_efi_trans(log, item, trans->r_lsn,
|
||||
pass)))
|
||||
break;
|
||||
} else if (ITEM_TYPE(item) == XFS_LI_EFD) {
|
||||
switch (ITEM_TYPE(item)) {
|
||||
case XFS_LI_BUF:
|
||||
error = xlog_recover_do_buffer_trans(log, item, pass);
|
||||
break;
|
||||
case XFS_LI_INODE:
|
||||
error = xlog_recover_do_inode_trans(log, item, pass);
|
||||
break;
|
||||
case XFS_LI_EFI:
|
||||
error = xlog_recover_do_efi_trans(log, item,
|
||||
trans->r_lsn, pass);
|
||||
break;
|
||||
case XFS_LI_EFD:
|
||||
xlog_recover_do_efd_trans(log, item, pass);
|
||||
} else if (ITEM_TYPE(item) == XFS_LI_DQUOT) {
|
||||
if ((error = xlog_recover_do_dquot_trans(log, item,
|
||||
pass)))
|
||||
break;
|
||||
} else if ((ITEM_TYPE(item) == XFS_LI_QUOTAOFF)) {
|
||||
if ((error = xlog_recover_do_quotaoff_trans(log, item,
|
||||
pass)))
|
||||
break;
|
||||
} else {
|
||||
xlog_warn("XFS: xlog_recover_do_trans");
|
||||
error = 0;
|
||||
break;
|
||||
case XFS_LI_DQUOT:
|
||||
error = xlog_recover_do_dquot_trans(log, item, pass);
|
||||
break;
|
||||
case XFS_LI_QUOTAOFF:
|
||||
error = xlog_recover_do_quotaoff_trans(log, item,
|
||||
pass);
|
||||
break;
|
||||
default:
|
||||
xlog_warn(
|
||||
"XFS: invalid item type (%d) xlog_recover_do_trans", ITEM_TYPE(item));
|
||||
ASSERT(0);
|
||||
error = XFS_ERROR(EIO);
|
||||
break;
|
||||
}
|
||||
|
||||
if (error)
|
||||
return error;
|
||||
item = item->ri_next;
|
||||
} while (first_item != item);
|
||||
|
||||
return error;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3490,9 +3513,11 @@ xlog_do_recovery_pass(
|
|||
hbp = xlog_get_bp(log, 1);
|
||||
if (!hbp)
|
||||
return ENOMEM;
|
||||
if ((error = xlog_bread(log, tail_blk, 1, hbp)))
|
||||
|
||||
error = xlog_bread(log, tail_blk, 1, hbp, &offset);
|
||||
if (error)
|
||||
goto bread_err1;
|
||||
offset = xlog_align(log, tail_blk, 1, hbp);
|
||||
|
||||
rhead = (xlog_rec_header_t *)offset;
|
||||
error = xlog_valid_rec_header(log, rhead, tail_blk);
|
||||
if (error)
|
||||
|
@ -3526,9 +3551,10 @@ xlog_do_recovery_pass(
|
|||
memset(rhash, 0, sizeof(rhash));
|
||||
if (tail_blk <= head_blk) {
|
||||
for (blk_no = tail_blk; blk_no < head_blk; ) {
|
||||
if ((error = xlog_bread(log, blk_no, hblks, hbp)))
|
||||
error = xlog_bread(log, blk_no, hblks, hbp, &offset);
|
||||
if (error)
|
||||
goto bread_err2;
|
||||
offset = xlog_align(log, blk_no, hblks, hbp);
|
||||
|
||||
rhead = (xlog_rec_header_t *)offset;
|
||||
error = xlog_valid_rec_header(log, rhead, blk_no);
|
||||
if (error)
|
||||
|
@ -3536,10 +3562,11 @@ xlog_do_recovery_pass(
|
|||
|
||||
/* blocks in data section */
|
||||
bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
|
||||
error = xlog_bread(log, blk_no + hblks, bblks, dbp);
|
||||
error = xlog_bread(log, blk_no + hblks, bblks, dbp,
|
||||
&offset);
|
||||
if (error)
|
||||
goto bread_err2;
|
||||
offset = xlog_align(log, blk_no + hblks, bblks, dbp);
|
||||
|
||||
xlog_unpack_data(rhead, offset, log);
|
||||
if ((error = xlog_recover_process_data(log,
|
||||
rhash, rhead, offset, pass)))
|
||||
|
@ -3562,10 +3589,10 @@ xlog_do_recovery_pass(
|
|||
wrapped_hblks = 0;
|
||||
if (blk_no + hblks <= log->l_logBBsize) {
|
||||
/* Read header in one read */
|
||||
error = xlog_bread(log, blk_no, hblks, hbp);
|
||||
error = xlog_bread(log, blk_no, hblks, hbp,
|
||||
&offset);
|
||||
if (error)
|
||||
goto bread_err2;
|
||||
offset = xlog_align(log, blk_no, hblks, hbp);
|
||||
} else {
|
||||
/* This LR is split across physical log end */
|
||||
if (blk_no != log->l_logBBsize) {
|
||||
|
@ -3573,12 +3600,13 @@ xlog_do_recovery_pass(
|
|||
ASSERT(blk_no <= INT_MAX);
|
||||
split_hblks = log->l_logBBsize - (int)blk_no;
|
||||
ASSERT(split_hblks > 0);
|
||||
if ((error = xlog_bread(log, blk_no,
|
||||
split_hblks, hbp)))
|
||||
error = xlog_bread(log, blk_no,
|
||||
split_hblks, hbp,
|
||||
&offset);
|
||||
if (error)
|
||||
goto bread_err2;
|
||||
offset = xlog_align(log, blk_no,
|
||||
split_hblks, hbp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: this black magic still works with
|
||||
* large sector sizes (non-512) only because:
|
||||
|
@ -3596,14 +3624,19 @@ xlog_do_recovery_pass(
|
|||
error = XFS_BUF_SET_PTR(hbp,
|
||||
bufaddr + BBTOB(split_hblks),
|
||||
BBTOB(hblks - split_hblks));
|
||||
if (!error)
|
||||
error = xlog_bread(log, 0,
|
||||
wrapped_hblks, hbp);
|
||||
if (!error)
|
||||
error = XFS_BUF_SET_PTR(hbp, bufaddr,
|
||||
if (error)
|
||||
goto bread_err2;
|
||||
|
||||
error = xlog_bread_noalign(log, 0,
|
||||
wrapped_hblks, hbp);
|
||||
if (error)
|
||||
goto bread_err2;
|
||||
|
||||
error = XFS_BUF_SET_PTR(hbp, bufaddr,
|
||||
BBTOB(hblks));
|
||||
if (error)
|
||||
goto bread_err2;
|
||||
|
||||
if (!offset)
|
||||
offset = xlog_align(log, 0,
|
||||
wrapped_hblks, hbp);
|
||||
|
@ -3619,10 +3652,10 @@ xlog_do_recovery_pass(
|
|||
|
||||
/* Read in data for log record */
|
||||
if (blk_no + bblks <= log->l_logBBsize) {
|
||||
error = xlog_bread(log, blk_no, bblks, dbp);
|
||||
error = xlog_bread(log, blk_no, bblks, dbp,
|
||||
&offset);
|
||||
if (error)
|
||||
goto bread_err2;
|
||||
offset = xlog_align(log, blk_no, bblks, dbp);
|
||||
} else {
|
||||
/* This log record is split across the
|
||||
* physical end of log */
|
||||
|
@ -3636,12 +3669,13 @@ xlog_do_recovery_pass(
|
|||
split_bblks =
|
||||
log->l_logBBsize - (int)blk_no;
|
||||
ASSERT(split_bblks > 0);
|
||||
if ((error = xlog_bread(log, blk_no,
|
||||
split_bblks, dbp)))
|
||||
error = xlog_bread(log, blk_no,
|
||||
split_bblks, dbp,
|
||||
&offset);
|
||||
if (error)
|
||||
goto bread_err2;
|
||||
offset = xlog_align(log, blk_no,
|
||||
split_bblks, dbp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: this black magic still works with
|
||||
* large sector sizes (non-512) only because:
|
||||
|
@ -3658,15 +3692,19 @@ xlog_do_recovery_pass(
|
|||
error = XFS_BUF_SET_PTR(dbp,
|
||||
bufaddr + BBTOB(split_bblks),
|
||||
BBTOB(bblks - split_bblks));
|
||||
if (!error)
|
||||
error = xlog_bread(log, wrapped_hblks,
|
||||
bblks - split_bblks,
|
||||
dbp);
|
||||
if (!error)
|
||||
error = XFS_BUF_SET_PTR(dbp, bufaddr,
|
||||
h_size);
|
||||
if (error)
|
||||
goto bread_err2;
|
||||
|
||||
error = xlog_bread_noalign(log, wrapped_hblks,
|
||||
bblks - split_bblks,
|
||||
dbp);
|
||||
if (error)
|
||||
goto bread_err2;
|
||||
|
||||
error = XFS_BUF_SET_PTR(dbp, bufaddr, h_size);
|
||||
if (error)
|
||||
goto bread_err2;
|
||||
|
||||
if (!offset)
|
||||
offset = xlog_align(log, wrapped_hblks,
|
||||
bblks - split_bblks, dbp);
|
||||
|
@ -3683,17 +3721,21 @@ xlog_do_recovery_pass(
|
|||
|
||||
/* read first part of physical log */
|
||||
while (blk_no < head_blk) {
|
||||
if ((error = xlog_bread(log, blk_no, hblks, hbp)))
|
||||
error = xlog_bread(log, blk_no, hblks, hbp, &offset);
|
||||
if (error)
|
||||
goto bread_err2;
|
||||
offset = xlog_align(log, blk_no, hblks, hbp);
|
||||
|
||||
rhead = (xlog_rec_header_t *)offset;
|
||||
error = xlog_valid_rec_header(log, rhead, blk_no);
|
||||
if (error)
|
||||
goto bread_err2;
|
||||
|
||||
bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
|
||||
if ((error = xlog_bread(log, blk_no+hblks, bblks, dbp)))
|
||||
error = xlog_bread(log, blk_no+hblks, bblks, dbp,
|
||||
&offset);
|
||||
if (error)
|
||||
goto bread_err2;
|
||||
offset = xlog_align(log, blk_no+hblks, bblks, dbp);
|
||||
|
||||
xlog_unpack_data(rhead, offset, log);
|
||||
if ((error = xlog_recover_process_data(log, rhash,
|
||||
rhead, offset, pass)))
|
||||
|
|
|
@ -45,7 +45,6 @@
|
|||
#include "xfs_fsops.h"
|
||||
#include "xfs_utils.h"
|
||||
|
||||
STATIC int xfs_uuid_mount(xfs_mount_t *);
|
||||
STATIC void xfs_unmountfs_wait(xfs_mount_t *);
|
||||
|
||||
|
||||
|
@ -121,6 +120,84 @@ static const struct {
|
|||
{ sizeof(xfs_sb_t), 0 }
|
||||
};
|
||||
|
||||
static DEFINE_MUTEX(xfs_uuid_table_mutex);
|
||||
static int xfs_uuid_table_size;
|
||||
static uuid_t *xfs_uuid_table;
|
||||
|
||||
/*
|
||||
* See if the UUID is unique among mounted XFS filesystems.
|
||||
* Mount fails if UUID is nil or a FS with the same UUID is already mounted.
|
||||
*/
|
||||
STATIC int
|
||||
xfs_uuid_mount(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
uuid_t *uuid = &mp->m_sb.sb_uuid;
|
||||
int hole, i;
|
||||
|
||||
if (mp->m_flags & XFS_MOUNT_NOUUID)
|
||||
return 0;
|
||||
|
||||
if (uuid_is_nil(uuid)) {
|
||||
cmn_err(CE_WARN,
|
||||
"XFS: Filesystem %s has nil UUID - can't mount",
|
||||
mp->m_fsname);
|
||||
return XFS_ERROR(EINVAL);
|
||||
}
|
||||
|
||||
mutex_lock(&xfs_uuid_table_mutex);
|
||||
for (i = 0, hole = -1; i < xfs_uuid_table_size; i++) {
|
||||
if (uuid_is_nil(&xfs_uuid_table[i])) {
|
||||
hole = i;
|
||||
continue;
|
||||
}
|
||||
if (uuid_equal(uuid, &xfs_uuid_table[i]))
|
||||
goto out_duplicate;
|
||||
}
|
||||
|
||||
if (hole < 0) {
|
||||
xfs_uuid_table = kmem_realloc(xfs_uuid_table,
|
||||
(xfs_uuid_table_size + 1) * sizeof(*xfs_uuid_table),
|
||||
xfs_uuid_table_size * sizeof(*xfs_uuid_table),
|
||||
KM_SLEEP);
|
||||
hole = xfs_uuid_table_size++;
|
||||
}
|
||||
xfs_uuid_table[hole] = *uuid;
|
||||
mutex_unlock(&xfs_uuid_table_mutex);
|
||||
|
||||
return 0;
|
||||
|
||||
out_duplicate:
|
||||
mutex_unlock(&xfs_uuid_table_mutex);
|
||||
cmn_err(CE_WARN, "XFS: Filesystem %s has duplicate UUID - can't mount",
|
||||
mp->m_fsname);
|
||||
return XFS_ERROR(EINVAL);
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xfs_uuid_unmount(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
uuid_t *uuid = &mp->m_sb.sb_uuid;
|
||||
int i;
|
||||
|
||||
if (mp->m_flags & XFS_MOUNT_NOUUID)
|
||||
return;
|
||||
|
||||
mutex_lock(&xfs_uuid_table_mutex);
|
||||
for (i = 0; i < xfs_uuid_table_size; i++) {
|
||||
if (uuid_is_nil(&xfs_uuid_table[i]))
|
||||
continue;
|
||||
if (!uuid_equal(uuid, &xfs_uuid_table[i]))
|
||||
continue;
|
||||
memset(&xfs_uuid_table[i], 0, sizeof(uuid_t));
|
||||
break;
|
||||
}
|
||||
ASSERT(i < xfs_uuid_table_size);
|
||||
mutex_unlock(&xfs_uuid_table_mutex);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Free up the resources associated with a mount structure. Assume that
|
||||
* the structure was initially zeroed, so we can tell which fields got
|
||||
|
@ -256,6 +333,22 @@ xfs_mount_validate_sb(
|
|||
return XFS_ERROR(ENOSYS);
|
||||
}
|
||||
|
||||
/*
|
||||
* Currently only very few inode sizes are supported.
|
||||
*/
|
||||
switch (sbp->sb_inodesize) {
|
||||
case 256:
|
||||
case 512:
|
||||
case 1024:
|
||||
case 2048:
|
||||
break;
|
||||
default:
|
||||
xfs_fs_mount_cmn_err(flags,
|
||||
"inode size of %d bytes not supported",
|
||||
sbp->sb_inodesize);
|
||||
return XFS_ERROR(ENOSYS);
|
||||
}
|
||||
|
||||
if (xfs_sb_validate_fsb_count(sbp, sbp->sb_dblocks) ||
|
||||
xfs_sb_validate_fsb_count(sbp, sbp->sb_rblocks)) {
|
||||
xfs_fs_mount_cmn_err(flags,
|
||||
|
@ -574,32 +667,10 @@ xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp)
|
|||
mp->m_sectbb_log = sbp->sb_sectlog - BBSHIFT;
|
||||
mp->m_agno_log = xfs_highbit32(sbp->sb_agcount - 1) + 1;
|
||||
mp->m_agino_log = sbp->sb_inopblog + sbp->sb_agblklog;
|
||||
mp->m_litino = sbp->sb_inodesize - sizeof(struct xfs_dinode);
|
||||
mp->m_blockmask = sbp->sb_blocksize - 1;
|
||||
mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG;
|
||||
mp->m_blockwmask = mp->m_blockwsize - 1;
|
||||
|
||||
/*
|
||||
* Setup for attributes, in case they get created.
|
||||
* This value is for inodes getting attributes for the first time,
|
||||
* the per-inode value is for old attribute values.
|
||||
*/
|
||||
ASSERT(sbp->sb_inodesize >= 256 && sbp->sb_inodesize <= 2048);
|
||||
switch (sbp->sb_inodesize) {
|
||||
case 256:
|
||||
mp->m_attroffset = XFS_LITINO(mp) -
|
||||
XFS_BMDR_SPACE_CALC(MINABTPTRS);
|
||||
break;
|
||||
case 512:
|
||||
case 1024:
|
||||
case 2048:
|
||||
mp->m_attroffset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
|
||||
break;
|
||||
default:
|
||||
ASSERT(0);
|
||||
}
|
||||
ASSERT(mp->m_attroffset < XFS_LITINO(mp));
|
||||
|
||||
mp->m_alloc_mxr[0] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 1);
|
||||
mp->m_alloc_mxr[1] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 0);
|
||||
mp->m_alloc_mnr[0] = mp->m_alloc_mxr[0] / 2;
|
||||
|
@ -645,7 +716,7 @@ xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount)
|
|||
for (index = 0; index < agcount; index++) {
|
||||
/*
|
||||
* read the agf, then the agi. This gets us
|
||||
* all the inforamtion we need and populates the
|
||||
* all the information we need and populates the
|
||||
* per-ag structures for us.
|
||||
*/
|
||||
error = xfs_alloc_pagf_init(mp, NULL, index, 0);
|
||||
|
@ -886,8 +957,6 @@ xfs_check_sizes(xfs_mount_t *mp)
|
|||
}
|
||||
|
||||
/*
|
||||
* xfs_mountfs
|
||||
*
|
||||
* This function does the following on an initial mount of a file system:
|
||||
* - reads the superblock from disk and init the mount struct
|
||||
* - if we're a 32-bit kernel, do a size check on the superblock
|
||||
|
@ -905,7 +974,6 @@ xfs_mountfs(
|
|||
xfs_inode_t *rip;
|
||||
__uint64_t resblks;
|
||||
uint quotamount, quotaflags;
|
||||
int uuid_mounted = 0;
|
||||
int error = 0;
|
||||
|
||||
xfs_mount_common(mp, sbp);
|
||||
|
@ -960,7 +1028,7 @@ xfs_mountfs(
|
|||
*/
|
||||
error = xfs_update_alignment(mp);
|
||||
if (error)
|
||||
goto error1;
|
||||
goto out;
|
||||
|
||||
xfs_alloc_compute_maxlevels(mp);
|
||||
xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK);
|
||||
|
@ -971,19 +1039,9 @@ xfs_mountfs(
|
|||
|
||||
mp->m_maxioffset = xfs_max_file_offset(sbp->sb_blocklog);
|
||||
|
||||
/*
|
||||
* XFS uses the uuid from the superblock as the unique
|
||||
* identifier for fsid. We can not use the uuid from the volume
|
||||
* since a single partition filesystem is identical to a single
|
||||
* partition volume/filesystem.
|
||||
*/
|
||||
if ((mp->m_flags & XFS_MOUNT_NOUUID) == 0) {
|
||||
if (xfs_uuid_mount(mp)) {
|
||||
error = XFS_ERROR(EINVAL);
|
||||
goto error1;
|
||||
}
|
||||
uuid_mounted=1;
|
||||
}
|
||||
error = xfs_uuid_mount(mp);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Set the minimum read and write sizes
|
||||
|
@ -1007,7 +1065,7 @@ xfs_mountfs(
|
|||
*/
|
||||
error = xfs_check_sizes(mp);
|
||||
if (error)
|
||||
goto error1;
|
||||
goto out_remove_uuid;
|
||||
|
||||
/*
|
||||
* Initialize realtime fields in the mount structure
|
||||
|
@ -1015,7 +1073,7 @@ xfs_mountfs(
|
|||
error = xfs_rtmount_init(mp);
|
||||
if (error) {
|
||||
cmn_err(CE_WARN, "XFS: RT mount failed");
|
||||
goto error1;
|
||||
goto out_remove_uuid;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1045,26 +1103,26 @@ xfs_mountfs(
|
|||
mp->m_perag = kmem_zalloc(sbp->sb_agcount * sizeof(xfs_perag_t),
|
||||
KM_MAYFAIL);
|
||||
if (!mp->m_perag)
|
||||
goto error1;
|
||||
goto out_remove_uuid;
|
||||
|
||||
mp->m_maxagi = xfs_initialize_perag(mp, sbp->sb_agcount);
|
||||
|
||||
if (!sbp->sb_logblocks) {
|
||||
cmn_err(CE_WARN, "XFS: no log defined");
|
||||
XFS_ERROR_REPORT("xfs_mountfs", XFS_ERRLEVEL_LOW, mp);
|
||||
error = XFS_ERROR(EFSCORRUPTED);
|
||||
goto out_free_perag;
|
||||
}
|
||||
|
||||
/*
|
||||
* log's mount-time initialization. Perform 1st part recovery if needed
|
||||
*/
|
||||
if (likely(sbp->sb_logblocks > 0)) { /* check for volume case */
|
||||
error = xfs_log_mount(mp, mp->m_logdev_targp,
|
||||
XFS_FSB_TO_DADDR(mp, sbp->sb_logstart),
|
||||
XFS_FSB_TO_BB(mp, sbp->sb_logblocks));
|
||||
if (error) {
|
||||
cmn_err(CE_WARN, "XFS: log mount failed");
|
||||
goto error2;
|
||||
}
|
||||
} else { /* No log has been defined */
|
||||
cmn_err(CE_WARN, "XFS: no log defined");
|
||||
XFS_ERROR_REPORT("xfs_mountfs_int(1)", XFS_ERRLEVEL_LOW, mp);
|
||||
error = XFS_ERROR(EFSCORRUPTED);
|
||||
goto error2;
|
||||
error = xfs_log_mount(mp, mp->m_logdev_targp,
|
||||
XFS_FSB_TO_DADDR(mp, sbp->sb_logstart),
|
||||
XFS_FSB_TO_BB(mp, sbp->sb_logblocks));
|
||||
if (error) {
|
||||
cmn_err(CE_WARN, "XFS: log mount failed");
|
||||
goto out_free_perag;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1086,15 +1144,14 @@ xfs_mountfs(
|
|||
* If we are currently making the filesystem, the initialisation will
|
||||
* fail as the perag data is in an undefined state.
|
||||
*/
|
||||
|
||||
if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
|
||||
!XFS_LAST_UNMOUNT_WAS_CLEAN(mp) &&
|
||||
!mp->m_sb.sb_inprogress) {
|
||||
error = xfs_initialize_perag_data(mp, sbp->sb_agcount);
|
||||
if (error) {
|
||||
goto error2;
|
||||
}
|
||||
if (error)
|
||||
goto out_free_perag;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get and sanity-check the root inode.
|
||||
* Save the pointer to it in the mount structure.
|
||||
|
@ -1102,7 +1159,7 @@ xfs_mountfs(
|
|||
error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip, 0);
|
||||
if (error) {
|
||||
cmn_err(CE_WARN, "XFS: failed to read root inode");
|
||||
goto error3;
|
||||
goto out_log_dealloc;
|
||||
}
|
||||
|
||||
ASSERT(rip != NULL);
|
||||
|
@ -1116,7 +1173,7 @@ xfs_mountfs(
|
|||
XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW,
|
||||
mp);
|
||||
error = XFS_ERROR(EFSCORRUPTED);
|
||||
goto error4;
|
||||
goto out_rele_rip;
|
||||
}
|
||||
mp->m_rootip = rip; /* save it */
|
||||
|
||||
|
@ -1131,7 +1188,7 @@ xfs_mountfs(
|
|||
* Free up the root inode.
|
||||
*/
|
||||
cmn_err(CE_WARN, "XFS: failed to read RT inodes");
|
||||
goto error4;
|
||||
goto out_rele_rip;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1143,7 +1200,7 @@ xfs_mountfs(
|
|||
error = xfs_mount_log_sb(mp, mp->m_update_flags);
|
||||
if (error) {
|
||||
cmn_err(CE_WARN, "XFS: failed to write sb changes");
|
||||
goto error4;
|
||||
goto out_rtunmount;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1152,7 +1209,7 @@ xfs_mountfs(
|
|||
*/
|
||||
error = XFS_QM_INIT(mp, "amount, "aflags);
|
||||
if (error)
|
||||
goto error4;
|
||||
goto out_rtunmount;
|
||||
|
||||
/*
|
||||
* Finish recovering the file system. This part needed to be
|
||||
|
@ -1162,7 +1219,7 @@ xfs_mountfs(
|
|||
error = xfs_log_mount_finish(mp);
|
||||
if (error) {
|
||||
cmn_err(CE_WARN, "XFS: log mount finish failed");
|
||||
goto error4;
|
||||
goto out_rtunmount;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1170,7 +1227,7 @@ xfs_mountfs(
|
|||
*/
|
||||
error = XFS_QM_MOUNT(mp, quotamount, quotaflags);
|
||||
if (error)
|
||||
goto error4;
|
||||
goto out_rtunmount;
|
||||
|
||||
/*
|
||||
* Now we are mounted, reserve a small amount of unused space for
|
||||
|
@ -1194,18 +1251,17 @@ xfs_mountfs(
|
|||
|
||||
return 0;
|
||||
|
||||
error4:
|
||||
/*
|
||||
* Free up the root inode.
|
||||
*/
|
||||
out_rtunmount:
|
||||
xfs_rtunmount_inodes(mp);
|
||||
out_rele_rip:
|
||||
IRELE(rip);
|
||||
error3:
|
||||
xfs_log_unmount_dealloc(mp);
|
||||
error2:
|
||||
out_log_dealloc:
|
||||
xfs_log_unmount(mp);
|
||||
out_free_perag:
|
||||
xfs_free_perag(mp);
|
||||
error1:
|
||||
if (uuid_mounted)
|
||||
uuid_table_remove(&mp->m_sb.sb_uuid);
|
||||
out_remove_uuid:
|
||||
xfs_uuid_unmount(mp);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -1226,15 +1282,12 @@ xfs_unmountfs(
|
|||
*/
|
||||
XFS_QM_UNMOUNT(mp);
|
||||
|
||||
if (mp->m_rbmip)
|
||||
IRELE(mp->m_rbmip);
|
||||
if (mp->m_rsumip)
|
||||
IRELE(mp->m_rsumip);
|
||||
xfs_rtunmount_inodes(mp);
|
||||
IRELE(mp->m_rootip);
|
||||
|
||||
/*
|
||||
* We can potentially deadlock here if we have an inode cluster
|
||||
* that has been freed has it's buffer still pinned in memory because
|
||||
* that has been freed has its buffer still pinned in memory because
|
||||
* the transaction is still sitting in a iclog. The stale inodes
|
||||
* on that buffer will have their flush locks held until the
|
||||
* transaction hits the disk and the callbacks run. the inode
|
||||
|
@ -1266,7 +1319,7 @@ xfs_unmountfs(
|
|||
* Unreserve any blocks we have so that when we unmount we don't account
|
||||
* the reserved free space as used. This is really only necessary for
|
||||
* lazy superblock counting because it trusts the incore superblock
|
||||
* counters to be aboslutely correct on clean unmount.
|
||||
* counters to be absolutely correct on clean unmount.
|
||||
*
|
||||
* We don't bother correcting this elsewhere for lazy superblock
|
||||
* counting because on mount of an unclean filesystem we reconstruct the
|
||||
|
@ -1288,10 +1341,9 @@ xfs_unmountfs(
|
|||
"Freespace may not be correct on next mount.");
|
||||
xfs_unmountfs_writesb(mp);
|
||||
xfs_unmountfs_wait(mp); /* wait for async bufs */
|
||||
xfs_log_unmount(mp); /* Done! No more fs ops. */
|
||||
|
||||
if ((mp->m_flags & XFS_MOUNT_NOUUID) == 0)
|
||||
uuid_table_remove(&mp->m_sb.sb_uuid);
|
||||
xfs_log_unmount_write(mp);
|
||||
xfs_log_unmount(mp);
|
||||
xfs_uuid_unmount(mp);
|
||||
|
||||
#if defined(DEBUG)
|
||||
xfs_errortag_clearall(mp, 0);
|
||||
|
@ -1792,29 +1844,6 @@ xfs_freesb(
|
|||
mp->m_sb_bp = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* See if the UUID is unique among mounted XFS filesystems.
|
||||
* Mount fails if UUID is nil or a FS with the same UUID is already mounted.
|
||||
*/
|
||||
STATIC int
|
||||
xfs_uuid_mount(
|
||||
xfs_mount_t *mp)
|
||||
{
|
||||
if (uuid_is_nil(&mp->m_sb.sb_uuid)) {
|
||||
cmn_err(CE_WARN,
|
||||
"XFS: Filesystem %s has nil UUID - can't mount",
|
||||
mp->m_fsname);
|
||||
return -1;
|
||||
}
|
||||
if (!uuid_table_insert(&mp->m_sb.sb_uuid)) {
|
||||
cmn_err(CE_WARN,
|
||||
"XFS: Filesystem %s has duplicate UUID - can't mount",
|
||||
mp->m_fsname);
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Used to log changes to the superblock unit and width fields which could
|
||||
* be altered by the mount options, as well as any potential sb_features2
|
||||
|
@ -1868,7 +1897,7 @@ xfs_mount_log_sb(
|
|||
* we disable the per-cpu counter and go through the slow path.
|
||||
*
|
||||
* The slow path is the current xfs_mod_incore_sb() function. This means that
|
||||
* when we disable a per-cpu counter, we need to drain it's resources back to
|
||||
* when we disable a per-cpu counter, we need to drain its resources back to
|
||||
* the global superblock. We do this after disabling the counter to prevent
|
||||
* more threads from queueing up on the counter.
|
||||
*
|
||||
|
|
|
@ -136,7 +136,6 @@ typedef int (*xfs_dqvopchownresv_t)(struct xfs_trans *, struct xfs_inode *,
|
|||
struct xfs_dquot *, struct xfs_dquot *, uint);
|
||||
typedef void (*xfs_dqstatvfs_t)(struct xfs_inode *, struct kstatfs *);
|
||||
typedef int (*xfs_dqsync_t)(struct xfs_mount *, int flags);
|
||||
typedef int (*xfs_quotactl_t)(struct xfs_mount *, int, int, xfs_caddr_t);
|
||||
|
||||
typedef struct xfs_qmops {
|
||||
xfs_qminit_t xfs_qminit;
|
||||
|
@ -154,7 +153,6 @@ typedef struct xfs_qmops {
|
|||
xfs_dqvopchownresv_t xfs_dqvopchownresv;
|
||||
xfs_dqstatvfs_t xfs_dqstatvfs;
|
||||
xfs_dqsync_t xfs_dqsync;
|
||||
xfs_quotactl_t xfs_quotactl;
|
||||
struct xfs_dqtrxops *xfs_dqtrxops;
|
||||
} xfs_qmops_t;
|
||||
|
||||
|
@ -188,8 +186,6 @@ typedef struct xfs_qmops {
|
|||
(*(ip)->i_mount->m_qm_ops->xfs_dqstatvfs)(ip, statp)
|
||||
#define XFS_QM_DQSYNC(mp, flags) \
|
||||
(*(mp)->m_qm_ops->xfs_dqsync)(mp, flags)
|
||||
#define XFS_QM_QUOTACTL(mp, cmd, id, addr) \
|
||||
(*(mp)->m_qm_ops->xfs_quotactl)(mp, cmd, id, addr)
|
||||
|
||||
#ifdef HAVE_PERCPU_SB
|
||||
|
||||
|
@ -273,19 +269,17 @@ typedef struct xfs_mount {
|
|||
uint m_inobt_mnr[2]; /* min inobt btree records */
|
||||
uint m_ag_maxlevels; /* XFS_AG_MAXLEVELS */
|
||||
uint m_bm_maxlevels[2]; /* XFS_BM_MAXLEVELS */
|
||||
uint m_in_maxlevels; /* XFS_IN_MAXLEVELS */
|
||||
uint m_in_maxlevels; /* max inobt btree levels. */
|
||||
struct xfs_perag *m_perag; /* per-ag accounting info */
|
||||
struct rw_semaphore m_peraglock; /* lock for m_perag (pointer) */
|
||||
struct mutex m_growlock; /* growfs mutex */
|
||||
int m_fixedfsid[2]; /* unchanged for life of FS */
|
||||
uint m_dmevmask; /* DMI events for this FS */
|
||||
__uint64_t m_flags; /* global mount flags */
|
||||
uint m_attroffset; /* inode attribute offset */
|
||||
uint m_dir_node_ents; /* #entries in a dir danode */
|
||||
uint m_attr_node_ents; /* #entries in attr danode */
|
||||
int m_ialloc_inos; /* inodes in inode allocation */
|
||||
int m_ialloc_blks; /* blocks in inode allocation */
|
||||
int m_litino; /* size of inode union area */
|
||||
int m_inoalign_mask;/* mask sb_inoalignmt if used */
|
||||
uint m_qflags; /* quota status flags */
|
||||
xfs_trans_reservations_t m_reservations;/* precomputed res values */
|
||||
|
@ -293,9 +287,6 @@ typedef struct xfs_mount {
|
|||
__uint64_t m_maxioffset; /* maximum inode offset */
|
||||
__uint64_t m_resblks; /* total reserved blocks */
|
||||
__uint64_t m_resblks_avail;/* available reserved blocks */
|
||||
#if XFS_BIG_INUMS
|
||||
xfs_ino_t m_inoadd; /* add value for ino64_offset */
|
||||
#endif
|
||||
int m_dalign; /* stripe unit */
|
||||
int m_swidth; /* stripe width */
|
||||
int m_sinoalign; /* stripe unit inode alignment */
|
||||
|
@ -337,7 +328,6 @@ typedef struct xfs_mount {
|
|||
#define XFS_MOUNT_WSYNC (1ULL << 0) /* for nfs - all metadata ops
|
||||
must be synchronous except
|
||||
for space allocations */
|
||||
#define XFS_MOUNT_INO64 (1ULL << 1)
|
||||
#define XFS_MOUNT_DMAPI (1ULL << 2) /* dmapi is enabled */
|
||||
#define XFS_MOUNT_WAS_CLEAN (1ULL << 3)
|
||||
#define XFS_MOUNT_FS_SHUTDOWN (1ULL << 4) /* atomic stop of all filesystem
|
||||
|
@ -389,8 +379,8 @@ typedef struct xfs_mount {
|
|||
* Synchronous read and write sizes. This should be
|
||||
* better for NFSv2 wsync filesystems.
|
||||
*/
|
||||
#define XFS_WSYNC_READIO_LOG 15 /* 32K */
|
||||
#define XFS_WSYNC_WRITEIO_LOG 14 /* 16K */
|
||||
#define XFS_WSYNC_READIO_LOG 15 /* 32k */
|
||||
#define XFS_WSYNC_WRITEIO_LOG 14 /* 16k */
|
||||
|
||||
/*
|
||||
* Allow large block sizes to be reported to userspace programs if the
|
||||
|
@ -500,9 +490,6 @@ typedef struct xfs_mod_sb {
|
|||
int64_t msb_delta; /* Change to make to specified field */
|
||||
} xfs_mod_sb_t;
|
||||
|
||||
#define XFS_MOUNT_ILOCK(mp) mutex_lock(&((mp)->m_ilock))
|
||||
#define XFS_MOUNT_IUNLOCK(mp) mutex_unlock(&((mp)->m_ilock))
|
||||
|
||||
extern int xfs_log_sbcount(xfs_mount_t *, uint);
|
||||
extern int xfs_mountfs(xfs_mount_t *mp);
|
||||
extern void xfs_mountfs_check_barriers(xfs_mount_t *mp);
|
||||
|
|
|
@ -126,7 +126,6 @@ static struct xfs_qmops xfs_qmcore_stub = {
|
|||
.xfs_dqvopchownresv = (xfs_dqvopchownresv_t) fs_noerr,
|
||||
.xfs_dqstatvfs = (xfs_dqstatvfs_t) fs_noval,
|
||||
.xfs_dqsync = (xfs_dqsync_t) fs_noerr,
|
||||
.xfs_quotactl = (xfs_quotactl_t) fs_nosys,
|
||||
};
|
||||
|
||||
int
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
#ifndef __XFS_QUOTA_H__
|
||||
#define __XFS_QUOTA_H__
|
||||
|
||||
struct xfs_trans;
|
||||
|
||||
/*
|
||||
* The ondisk form of a dquot structure.
|
||||
*/
|
||||
|
@ -185,7 +187,6 @@ typedef struct xfs_qoff_logformat {
|
|||
* to a single function. None of these XFS_QMOPT_* flags are meant to have
|
||||
* persistent values (ie. their values can and will change between versions)
|
||||
*/
|
||||
#define XFS_QMOPT_DQLOCK 0x0000001 /* dqlock */
|
||||
#define XFS_QMOPT_DQALLOC 0x0000002 /* alloc dquot ondisk if needed */
|
||||
#define XFS_QMOPT_UQUOTA 0x0000004 /* user dquot requested */
|
||||
#define XFS_QMOPT_PQUOTA 0x0000008 /* project dquot requested */
|
||||
|
|
|
@ -2288,6 +2288,16 @@ xfs_rtmount_inodes(
|
|||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
xfs_rtunmount_inodes(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
if (mp->m_rbmip)
|
||||
IRELE(mp->m_rbmip);
|
||||
if (mp->m_rsumip)
|
||||
IRELE(mp->m_rsumip);
|
||||
}
|
||||
|
||||
/*
|
||||
* Pick an extent for allocation at the start of a new realtime file.
|
||||
* Use the sequence number stored in the atime field of the bitmap inode.
|
||||
|
|
|
@ -23,8 +23,8 @@ struct xfs_trans;
|
|||
|
||||
/* Min and max rt extent sizes, specified in bytes */
|
||||
#define XFS_MAX_RTEXTSIZE (1024 * 1024 * 1024) /* 1GB */
|
||||
#define XFS_DFL_RTEXTSIZE (64 * 1024) /* 64KB */
|
||||
#define XFS_MIN_RTEXTSIZE (4 * 1024) /* 4KB */
|
||||
#define XFS_DFL_RTEXTSIZE (64 * 1024) /* 64kB */
|
||||
#define XFS_MIN_RTEXTSIZE (4 * 1024) /* 4kB */
|
||||
|
||||
/*
|
||||
* Constants for bit manipulations.
|
||||
|
@ -108,6 +108,9 @@ xfs_rtfree_extent(
|
|||
int /* error */
|
||||
xfs_rtmount_init(
|
||||
struct xfs_mount *mp); /* file system mount structure */
|
||||
void
|
||||
xfs_rtunmount_inodes(
|
||||
struct xfs_mount *mp);
|
||||
|
||||
/*
|
||||
* Get the bitmap and summary inodes into the mount structure
|
||||
|
@ -146,6 +149,7 @@ xfs_growfs_rt(
|
|||
# define xfs_growfs_rt(mp,in) (ENOSYS)
|
||||
# define xfs_rtmount_init(m) (((mp)->m_sb.sb_rblocks == 0)? 0 : (ENOSYS))
|
||||
# define xfs_rtmount_inodes(m) (((mp)->m_sb.sb_rblocks == 0)? 0 : (ENOSYS))
|
||||
# define xfs_rtunmount_inodes(m)
|
||||
#endif /* CONFIG_XFS_RT */
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
|
|
@ -292,7 +292,7 @@ xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
|
|||
* In a write transaction we can allocate a maximum of 2
|
||||
* extents. This gives:
|
||||
* the inode getting the new extents: inode size
|
||||
* the inode\'s bmap btree: max depth * block size
|
||||
* the inode's bmap btree: max depth * block size
|
||||
* the agfs of the ags from which the extents are allocated: 2 * sector
|
||||
* the superblock free block counter: sector size
|
||||
* the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
|
||||
|
@ -321,7 +321,7 @@ xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
|
|||
/*
|
||||
* In truncating a file we free up to two extents at once. We can modify:
|
||||
* the inode being truncated: inode size
|
||||
* the inode\'s bmap btree: (max depth + 1) * block size
|
||||
* the inode's bmap btree: (max depth + 1) * block size
|
||||
* And the bmap_finish transaction can free the blocks and bmap blocks:
|
||||
* the agf for each of the ags: 4 * sector size
|
||||
* the agfl for each of the ags: 4 * sector size
|
||||
|
@ -343,7 +343,7 @@ xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
|
|||
(128 * (9 + XFS_ALLOCFREE_LOG_COUNT(mp, 4))) + \
|
||||
(128 * 5) + \
|
||||
XFS_ALLOCFREE_LOG_RES(mp, 1) + \
|
||||
(128 * (2 + XFS_IALLOC_BLOCKS(mp) + XFS_IN_MAXLEVELS(mp) + \
|
||||
(128 * (2 + XFS_IALLOC_BLOCKS(mp) + (mp)->m_in_maxlevels + \
|
||||
XFS_ALLOCFREE_LOG_COUNT(mp, 1))))))
|
||||
|
||||
#define XFS_ITRUNCATE_LOG_RES(mp) ((mp)->m_reservations.tr_itruncate)
|
||||
|
@ -431,8 +431,8 @@ xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
|
|||
* the new inode: inode size
|
||||
* the inode btree entry: 1 block
|
||||
* the directory btree: (max depth + v2) * dir block size
|
||||
* the directory inode\'s bmap btree: (max depth + v2) * block size
|
||||
* the blocks for the symlink: 1 KB
|
||||
* the directory inode's bmap btree: (max depth + v2) * block size
|
||||
* the blocks for the symlink: 1 kB
|
||||
* Or in the first xact we allocate some inodes giving:
|
||||
* the agi and agf of the ag getting the new inodes: 2 * sectorsize
|
||||
* the inode blocks allocated: XFS_IALLOC_BLOCKS * blocksize
|
||||
|
@ -449,9 +449,9 @@ xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
|
|||
(128 * (4 + XFS_DIROP_LOG_COUNT(mp)))), \
|
||||
(2 * (mp)->m_sb.sb_sectsize + \
|
||||
XFS_FSB_TO_B((mp), XFS_IALLOC_BLOCKS((mp))) + \
|
||||
XFS_FSB_TO_B((mp), XFS_IN_MAXLEVELS(mp)) + \
|
||||
XFS_FSB_TO_B((mp), (mp)->m_in_maxlevels) + \
|
||||
XFS_ALLOCFREE_LOG_RES(mp, 1) + \
|
||||
(128 * (2 + XFS_IALLOC_BLOCKS(mp) + XFS_IN_MAXLEVELS(mp) + \
|
||||
(128 * (2 + XFS_IALLOC_BLOCKS(mp) + (mp)->m_in_maxlevels + \
|
||||
XFS_ALLOCFREE_LOG_COUNT(mp, 1))))))
|
||||
|
||||
#define XFS_SYMLINK_LOG_RES(mp) ((mp)->m_reservations.tr_symlink)
|
||||
|
@ -463,7 +463,7 @@ xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
|
|||
* the inode btree entry: block size
|
||||
* the superblock for the nlink flag: sector size
|
||||
* the directory btree: (max depth + v2) * dir block size
|
||||
* the directory inode\'s bmap btree: (max depth + v2) * block size
|
||||
* the directory inode's bmap btree: (max depth + v2) * block size
|
||||
* Or in the first xact we allocate some inodes giving:
|
||||
* the agi and agf of the ag getting the new inodes: 2 * sectorsize
|
||||
* the superblock for the nlink flag: sector size
|
||||
|
@ -481,9 +481,9 @@ xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
|
|||
(128 * (3 + XFS_DIROP_LOG_COUNT(mp)))), \
|
||||
(3 * (mp)->m_sb.sb_sectsize + \
|
||||
XFS_FSB_TO_B((mp), XFS_IALLOC_BLOCKS((mp))) + \
|
||||
XFS_FSB_TO_B((mp), XFS_IN_MAXLEVELS(mp)) + \
|
||||
XFS_FSB_TO_B((mp), (mp)->m_in_maxlevels) + \
|
||||
XFS_ALLOCFREE_LOG_RES(mp, 1) + \
|
||||
(128 * (2 + XFS_IALLOC_BLOCKS(mp) + XFS_IN_MAXLEVELS(mp) + \
|
||||
(128 * (2 + XFS_IALLOC_BLOCKS(mp) + (mp)->m_in_maxlevels + \
|
||||
XFS_ALLOCFREE_LOG_COUNT(mp, 1))))))
|
||||
|
||||
#define XFS_CREATE_LOG_RES(mp) ((mp)->m_reservations.tr_create)
|
||||
|
@ -513,7 +513,7 @@ xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
|
|||
MAX((__uint16_t)XFS_FSB_TO_B((mp), 1), XFS_INODE_CLUSTER_SIZE(mp)) + \
|
||||
(128 * 5) + \
|
||||
XFS_ALLOCFREE_LOG_RES(mp, 1) + \
|
||||
(128 * (2 + XFS_IALLOC_BLOCKS(mp) + XFS_IN_MAXLEVELS(mp) + \
|
||||
(128 * (2 + XFS_IALLOC_BLOCKS(mp) + (mp)->m_in_maxlevels + \
|
||||
XFS_ALLOCFREE_LOG_COUNT(mp, 1))))
|
||||
|
||||
|
||||
|
@ -637,7 +637,7 @@ xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
|
|||
/*
|
||||
* Removing the attribute fork of a file
|
||||
* the inode being truncated: inode size
|
||||
* the inode\'s bmap btree: max depth * block size
|
||||
* the inode's bmap btree: max depth * block size
|
||||
* And the bmap_finish transaction can free the blocks and bmap blocks:
|
||||
* the agf for each of the ags: 4 * sector size
|
||||
* the agfl for each of the ags: 4 * sector size
|
||||
|
|
|
@ -79,7 +79,7 @@ xfs_trans_ail_tail(
|
|||
* the push is run asynchronously in a separate thread, so we return the tail
|
||||
* of the log right now instead of the tail after the push. This means we will
|
||||
* either continue right away, or we will sleep waiting on the async thread to
|
||||
* do it's work.
|
||||
* do its work.
|
||||
*
|
||||
* We do this unlocked - we only need to know whether there is anything in the
|
||||
* AIL at the time we are called. We don't need to access the contents of
|
||||
|
@ -160,7 +160,7 @@ xfs_trans_ail_cursor_next(
|
|||
/*
|
||||
* Now that the traversal is complete, we need to remove the cursor
|
||||
* from the list of traversing cursors. Avoid removing the embedded
|
||||
* push cursor, but use the fact it is alway present to make the
|
||||
* push cursor, but use the fact it is always present to make the
|
||||
* list deletion simple.
|
||||
*/
|
||||
void
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
#include "xfs_inum.h"
|
||||
#include "xfs_trans.h"
|
||||
#include "xfs_trans_priv.h"
|
||||
/* XXX: from here down needed until struct xfs_trans has it's own ailp */
|
||||
/* XXX: from here down needed until struct xfs_trans has its own ailp */
|
||||
#include "xfs_bit.h"
|
||||
#include "xfs_buf_item.h"
|
||||
#include "xfs_sb.h"
|
||||
|
|
|
@ -47,7 +47,7 @@
|
|||
#define XFS_DIRREMOVE_SPACE_RES(mp) \
|
||||
XFS_DAREMOVE_SPACE_RES(mp, XFS_DATA_FORK)
|
||||
#define XFS_IALLOC_SPACE_RES(mp) \
|
||||
(XFS_IALLOC_BLOCKS(mp) + XFS_IN_MAXLEVELS(mp)-1)
|
||||
(XFS_IALLOC_BLOCKS(mp) + (mp)->m_in_maxlevels - 1)
|
||||
|
||||
/*
|
||||
* Space reservation values for various transactions.
|
||||
|
|
|
@ -20,14 +20,6 @@
|
|||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
/*
|
||||
* POSIX Extensions
|
||||
*/
|
||||
typedef unsigned char uchar_t;
|
||||
typedef unsigned short ushort_t;
|
||||
typedef unsigned int uint_t;
|
||||
typedef unsigned long ulong_t;
|
||||
|
||||
/*
|
||||
* Additional type declarations for XFS
|
||||
*/
|
||||
|
|
|
@ -374,7 +374,7 @@ xfs_truncate_file(
|
|||
|
||||
/*
|
||||
* Follow the normal truncate locking protocol. Since we
|
||||
* hold the inode in the transaction, we know that it's number
|
||||
* hold the inode in the transaction, we know that its number
|
||||
* of references will stay constant.
|
||||
*/
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
|
|
|
@ -1136,7 +1136,7 @@ xfs_inactive(
|
|||
* If the inode is already free, then there can be nothing
|
||||
* to clean up here.
|
||||
*/
|
||||
if (ip->i_d.di_mode == 0 || VN_BAD(VFS_I(ip))) {
|
||||
if (ip->i_d.di_mode == 0 || is_bad_inode(VFS_I(ip))) {
|
||||
ASSERT(ip->i_df.if_real_bytes == 0);
|
||||
ASSERT(ip->i_df.if_broot_bytes == 0);
|
||||
return VN_INACTIVE_CACHE;
|
||||
|
@ -1387,23 +1387,28 @@ xfs_create(
|
|||
xfs_inode_t **ipp,
|
||||
cred_t *credp)
|
||||
{
|
||||
xfs_mount_t *mp = dp->i_mount;
|
||||
xfs_inode_t *ip;
|
||||
xfs_trans_t *tp;
|
||||
int is_dir = S_ISDIR(mode);
|
||||
struct xfs_mount *mp = dp->i_mount;
|
||||
struct xfs_inode *ip = NULL;
|
||||
struct xfs_trans *tp = NULL;
|
||||
int error;
|
||||
xfs_bmap_free_t free_list;
|
||||
xfs_fsblock_t first_block;
|
||||
boolean_t unlock_dp_on_error = B_FALSE;
|
||||
int dm_event_sent = 0;
|
||||
uint cancel_flags;
|
||||
int committed;
|
||||
xfs_prid_t prid;
|
||||
struct xfs_dquot *udqp, *gdqp;
|
||||
struct xfs_dquot *udqp = NULL;
|
||||
struct xfs_dquot *gdqp = NULL;
|
||||
uint resblks;
|
||||
uint log_res;
|
||||
uint log_count;
|
||||
|
||||
ASSERT(!*ipp);
|
||||
xfs_itrace_entry(dp);
|
||||
|
||||
if (XFS_FORCED_SHUTDOWN(mp))
|
||||
return XFS_ERROR(EIO);
|
||||
|
||||
if (DM_EVENT_ENABLED(dp, DM_EVENT_CREATE)) {
|
||||
error = XFS_SEND_NAMESP(mp, DM_EVENT_CREATE,
|
||||
dp, DM_RIGHT_NULL, NULL,
|
||||
|
@ -1412,84 +1417,97 @@ xfs_create(
|
|||
|
||||
if (error)
|
||||
return error;
|
||||
dm_event_sent = 1;
|
||||
}
|
||||
|
||||
if (XFS_FORCED_SHUTDOWN(mp))
|
||||
return XFS_ERROR(EIO);
|
||||
|
||||
/* Return through std_return after this point. */
|
||||
|
||||
udqp = gdqp = NULL;
|
||||
if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
|
||||
prid = dp->i_d.di_projid;
|
||||
else
|
||||
prid = (xfs_prid_t)dfltprid;
|
||||
prid = dfltprid;
|
||||
|
||||
/*
|
||||
* Make sure that we have allocated dquot(s) on disk.
|
||||
*/
|
||||
error = XFS_QM_DQVOPALLOC(mp, dp,
|
||||
current_fsuid(), current_fsgid(), prid,
|
||||
XFS_QMOPT_QUOTALL|XFS_QMOPT_INHERIT, &udqp, &gdqp);
|
||||
XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp);
|
||||
if (error)
|
||||
goto std_return;
|
||||
|
||||
ip = NULL;
|
||||
if (is_dir) {
|
||||
rdev = 0;
|
||||
resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
|
||||
log_res = XFS_MKDIR_LOG_RES(mp);
|
||||
log_count = XFS_MKDIR_LOG_COUNT;
|
||||
tp = xfs_trans_alloc(mp, XFS_TRANS_MKDIR);
|
||||
} else {
|
||||
resblks = XFS_CREATE_SPACE_RES(mp, name->len);
|
||||
log_res = XFS_CREATE_LOG_RES(mp);
|
||||
log_count = XFS_CREATE_LOG_COUNT;
|
||||
tp = xfs_trans_alloc(mp, XFS_TRANS_CREATE);
|
||||
}
|
||||
|
||||
tp = xfs_trans_alloc(mp, XFS_TRANS_CREATE);
|
||||
cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
|
||||
resblks = XFS_CREATE_SPACE_RES(mp, name->len);
|
||||
|
||||
/*
|
||||
* Initially assume that the file does not exist and
|
||||
* reserve the resources for that case. If that is not
|
||||
* the case we'll drop the one we have and get a more
|
||||
* appropriate transaction later.
|
||||
*/
|
||||
error = xfs_trans_reserve(tp, resblks, XFS_CREATE_LOG_RES(mp), 0,
|
||||
XFS_TRANS_PERM_LOG_RES, XFS_CREATE_LOG_COUNT);
|
||||
error = xfs_trans_reserve(tp, resblks, log_res, 0,
|
||||
XFS_TRANS_PERM_LOG_RES, log_count);
|
||||
if (error == ENOSPC) {
|
||||
resblks = 0;
|
||||
error = xfs_trans_reserve(tp, 0, XFS_CREATE_LOG_RES(mp), 0,
|
||||
XFS_TRANS_PERM_LOG_RES, XFS_CREATE_LOG_COUNT);
|
||||
error = xfs_trans_reserve(tp, 0, log_res, 0,
|
||||
XFS_TRANS_PERM_LOG_RES, log_count);
|
||||
}
|
||||
if (error) {
|
||||
cancel_flags = 0;
|
||||
goto error_return;
|
||||
goto out_trans_cancel;
|
||||
}
|
||||
|
||||
xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
|
||||
unlock_dp_on_error = B_TRUE;
|
||||
|
||||
xfs_bmap_init(&free_list, &first_block);
|
||||
/*
|
||||
* Check for directory link count overflow.
|
||||
*/
|
||||
if (is_dir && dp->i_d.di_nlink >= XFS_MAXLINK) {
|
||||
error = XFS_ERROR(EMLINK);
|
||||
goto out_trans_cancel;
|
||||
}
|
||||
|
||||
ASSERT(ip == NULL);
|
||||
xfs_bmap_init(&free_list, &first_block);
|
||||
|
||||
/*
|
||||
* Reserve disk quota and the inode.
|
||||
*/
|
||||
error = XFS_TRANS_RESERVE_QUOTA(mp, tp, udqp, gdqp, resblks, 1, 0);
|
||||
if (error)
|
||||
goto error_return;
|
||||
goto out_trans_cancel;
|
||||
|
||||
error = xfs_dir_canenter(tp, dp, name, resblks);
|
||||
if (error)
|
||||
goto error_return;
|
||||
error = xfs_dir_ialloc(&tp, dp, mode, 1,
|
||||
rdev, credp, prid, resblks > 0,
|
||||
&ip, &committed);
|
||||
goto out_trans_cancel;
|
||||
|
||||
/*
|
||||
* A newly created regular or special file just has one directory
|
||||
* entry pointing to them, but a directory also the "." entry
|
||||
* pointing to itself.
|
||||
*/
|
||||
error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, credp,
|
||||
prid, resblks > 0, &ip, &committed);
|
||||
if (error) {
|
||||
if (error == ENOSPC)
|
||||
goto error_return;
|
||||
goto abort_return;
|
||||
goto out_trans_cancel;
|
||||
goto out_trans_abort;
|
||||
}
|
||||
xfs_itrace_ref(ip);
|
||||
|
||||
/*
|
||||
* At this point, we've gotten a newly allocated inode.
|
||||
* It is locked (and joined to the transaction).
|
||||
*/
|
||||
|
||||
xfs_itrace_ref(ip);
|
||||
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
||||
|
||||
/*
|
||||
|
@ -1508,19 +1526,28 @@ xfs_create(
|
|||
resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
|
||||
if (error) {
|
||||
ASSERT(error != ENOSPC);
|
||||
goto abort_return;
|
||||
goto out_trans_abort;
|
||||
}
|
||||
xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
|
||||
xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
|
||||
|
||||
if (is_dir) {
|
||||
error = xfs_dir_init(tp, ip, dp);
|
||||
if (error)
|
||||
goto out_bmap_cancel;
|
||||
|
||||
error = xfs_bumplink(tp, dp);
|
||||
if (error)
|
||||
goto out_bmap_cancel;
|
||||
}
|
||||
|
||||
/*
|
||||
* If this is a synchronous mount, make sure that the
|
||||
* create transaction goes to disk before returning to
|
||||
* the user.
|
||||
*/
|
||||
if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) {
|
||||
if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
|
||||
xfs_trans_set_sync(tp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Attach the dquot(s) to the inodes and modify them incore.
|
||||
|
@ -1537,16 +1564,13 @@ xfs_create(
|
|||
IHOLD(ip);
|
||||
|
||||
error = xfs_bmap_finish(&tp, &free_list, &committed);
|
||||
if (error) {
|
||||
xfs_bmap_cancel(&free_list);
|
||||
goto abort_rele;
|
||||
}
|
||||
if (error)
|
||||
goto out_abort_rele;
|
||||
|
||||
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
|
||||
if (error) {
|
||||
IRELE(ip);
|
||||
tp = NULL;
|
||||
goto error_return;
|
||||
goto out_dqrele;
|
||||
}
|
||||
|
||||
XFS_QM_DQRELE(mp, udqp);
|
||||
|
@ -1555,26 +1579,22 @@ xfs_create(
|
|||
*ipp = ip;
|
||||
|
||||
/* Fallthrough to std_return with error = 0 */
|
||||
|
||||
std_return:
|
||||
if ((*ipp || (error != 0 && dm_event_sent != 0)) &&
|
||||
DM_EVENT_ENABLED(dp, DM_EVENT_POSTCREATE)) {
|
||||
(void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTCREATE,
|
||||
dp, DM_RIGHT_NULL,
|
||||
*ipp ? ip : NULL,
|
||||
DM_RIGHT_NULL, name->name, NULL,
|
||||
mode, error, 0);
|
||||
std_return:
|
||||
if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTCREATE)) {
|
||||
XFS_SEND_NAMESP(mp, DM_EVENT_POSTCREATE, dp, DM_RIGHT_NULL,
|
||||
ip, DM_RIGHT_NULL, name->name, NULL, mode,
|
||||
error, 0);
|
||||
}
|
||||
|
||||
return error;
|
||||
|
||||
abort_return:
|
||||
out_bmap_cancel:
|
||||
xfs_bmap_cancel(&free_list);
|
||||
out_trans_abort:
|
||||
cancel_flags |= XFS_TRANS_ABORT;
|
||||
/* FALLTHROUGH */
|
||||
|
||||
error_return:
|
||||
if (tp != NULL)
|
||||
xfs_trans_cancel(tp, cancel_flags);
|
||||
|
||||
out_trans_cancel:
|
||||
xfs_trans_cancel(tp, cancel_flags);
|
||||
out_dqrele:
|
||||
XFS_QM_DQRELE(mp, udqp);
|
||||
XFS_QM_DQRELE(mp, gdqp);
|
||||
|
||||
|
@ -1583,20 +1603,18 @@ xfs_create(
|
|||
|
||||
goto std_return;
|
||||
|
||||
abort_rele:
|
||||
out_abort_rele:
|
||||
/*
|
||||
* Wait until after the current transaction is aborted to
|
||||
* release the inode. This prevents recursive transactions
|
||||
* and deadlocks from xfs_inactive.
|
||||
*/
|
||||
xfs_bmap_cancel(&free_list);
|
||||
cancel_flags |= XFS_TRANS_ABORT;
|
||||
xfs_trans_cancel(tp, cancel_flags);
|
||||
IRELE(ip);
|
||||
|
||||
XFS_QM_DQRELE(mp, udqp);
|
||||
XFS_QM_DQRELE(mp, gdqp);
|
||||
|
||||
goto std_return;
|
||||
unlock_dp_on_error = B_FALSE;
|
||||
goto out_dqrele;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
|
@ -2004,8 +2022,10 @@ xfs_link(
|
|||
/* Return through std_return after this point. */
|
||||
|
||||
error = XFS_QM_DQATTACH(mp, sip, 0);
|
||||
if (!error && sip != tdp)
|
||||
error = XFS_QM_DQATTACH(mp, tdp, 0);
|
||||
if (error)
|
||||
goto std_return;
|
||||
|
||||
error = XFS_QM_DQATTACH(mp, tdp, 0);
|
||||
if (error)
|
||||
goto std_return;
|
||||
|
||||
|
@ -2110,209 +2130,6 @@ xfs_link(
|
|||
goto std_return;
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
xfs_mkdir(
|
||||
xfs_inode_t *dp,
|
||||
struct xfs_name *dir_name,
|
||||
mode_t mode,
|
||||
xfs_inode_t **ipp,
|
||||
cred_t *credp)
|
||||
{
|
||||
xfs_mount_t *mp = dp->i_mount;
|
||||
xfs_inode_t *cdp; /* inode of created dir */
|
||||
xfs_trans_t *tp;
|
||||
int cancel_flags;
|
||||
int error;
|
||||
int committed;
|
||||
xfs_bmap_free_t free_list;
|
||||
xfs_fsblock_t first_block;
|
||||
boolean_t unlock_dp_on_error = B_FALSE;
|
||||
boolean_t created = B_FALSE;
|
||||
int dm_event_sent = 0;
|
||||
xfs_prid_t prid;
|
||||
struct xfs_dquot *udqp, *gdqp;
|
||||
uint resblks;
|
||||
|
||||
if (XFS_FORCED_SHUTDOWN(mp))
|
||||
return XFS_ERROR(EIO);
|
||||
|
||||
tp = NULL;
|
||||
|
||||
if (DM_EVENT_ENABLED(dp, DM_EVENT_CREATE)) {
|
||||
error = XFS_SEND_NAMESP(mp, DM_EVENT_CREATE,
|
||||
dp, DM_RIGHT_NULL, NULL,
|
||||
DM_RIGHT_NULL, dir_name->name, NULL,
|
||||
mode, 0, 0);
|
||||
if (error)
|
||||
return error;
|
||||
dm_event_sent = 1;
|
||||
}
|
||||
|
||||
/* Return through std_return after this point. */
|
||||
|
||||
xfs_itrace_entry(dp);
|
||||
|
||||
mp = dp->i_mount;
|
||||
udqp = gdqp = NULL;
|
||||
if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
|
||||
prid = dp->i_d.di_projid;
|
||||
else
|
||||
prid = (xfs_prid_t)dfltprid;
|
||||
|
||||
/*
|
||||
* Make sure that we have allocated dquot(s) on disk.
|
||||
*/
|
||||
error = XFS_QM_DQVOPALLOC(mp, dp,
|
||||
current_fsuid(), current_fsgid(), prid,
|
||||
XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp);
|
||||
if (error)
|
||||
goto std_return;
|
||||
|
||||
tp = xfs_trans_alloc(mp, XFS_TRANS_MKDIR);
|
||||
cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
|
||||
resblks = XFS_MKDIR_SPACE_RES(mp, dir_name->len);
|
||||
error = xfs_trans_reserve(tp, resblks, XFS_MKDIR_LOG_RES(mp), 0,
|
||||
XFS_TRANS_PERM_LOG_RES, XFS_MKDIR_LOG_COUNT);
|
||||
if (error == ENOSPC) {
|
||||
resblks = 0;
|
||||
error = xfs_trans_reserve(tp, 0, XFS_MKDIR_LOG_RES(mp), 0,
|
||||
XFS_TRANS_PERM_LOG_RES,
|
||||
XFS_MKDIR_LOG_COUNT);
|
||||
}
|
||||
if (error) {
|
||||
cancel_flags = 0;
|
||||
goto error_return;
|
||||
}
|
||||
|
||||
xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
|
||||
unlock_dp_on_error = B_TRUE;
|
||||
|
||||
/*
|
||||
* Check for directory link count overflow.
|
||||
*/
|
||||
if (dp->i_d.di_nlink >= XFS_MAXLINK) {
|
||||
error = XFS_ERROR(EMLINK);
|
||||
goto error_return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reserve disk quota and the inode.
|
||||
*/
|
||||
error = XFS_TRANS_RESERVE_QUOTA(mp, tp, udqp, gdqp, resblks, 1, 0);
|
||||
if (error)
|
||||
goto error_return;
|
||||
|
||||
error = xfs_dir_canenter(tp, dp, dir_name, resblks);
|
||||
if (error)
|
||||
goto error_return;
|
||||
/*
|
||||
* create the directory inode.
|
||||
*/
|
||||
error = xfs_dir_ialloc(&tp, dp, mode, 2,
|
||||
0, credp, prid, resblks > 0,
|
||||
&cdp, NULL);
|
||||
if (error) {
|
||||
if (error == ENOSPC)
|
||||
goto error_return;
|
||||
goto abort_return;
|
||||
}
|
||||
xfs_itrace_ref(cdp);
|
||||
|
||||
/*
|
||||
* Now we add the directory inode to the transaction.
|
||||
* We waited until now since xfs_dir_ialloc might start
|
||||
* a new transaction. Had we joined the transaction
|
||||
* earlier, the locks might have gotten released. An error
|
||||
* from here on will result in the transaction cancel
|
||||
* unlocking dp so don't do it explicitly in the error path.
|
||||
*/
|
||||
IHOLD(dp);
|
||||
xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
|
||||
unlock_dp_on_error = B_FALSE;
|
||||
|
||||
xfs_bmap_init(&free_list, &first_block);
|
||||
|
||||
error = xfs_dir_createname(tp, dp, dir_name, cdp->i_ino,
|
||||
&first_block, &free_list, resblks ?
|
||||
resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
|
||||
if (error) {
|
||||
ASSERT(error != ENOSPC);
|
||||
goto error1;
|
||||
}
|
||||
xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
|
||||
|
||||
error = xfs_dir_init(tp, cdp, dp);
|
||||
if (error)
|
||||
goto error2;
|
||||
|
||||
error = xfs_bumplink(tp, dp);
|
||||
if (error)
|
||||
goto error2;
|
||||
|
||||
created = B_TRUE;
|
||||
|
||||
*ipp = cdp;
|
||||
IHOLD(cdp);
|
||||
|
||||
/*
|
||||
* Attach the dquots to the new inode and modify the icount incore.
|
||||
*/
|
||||
XFS_QM_DQVOPCREATE(mp, tp, cdp, udqp, gdqp);
|
||||
|
||||
/*
|
||||
* If this is a synchronous mount, make sure that the
|
||||
* mkdir transaction goes to disk before returning to
|
||||
* the user.
|
||||
*/
|
||||
if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) {
|
||||
xfs_trans_set_sync(tp);
|
||||
}
|
||||
|
||||
error = xfs_bmap_finish(&tp, &free_list, &committed);
|
||||
if (error) {
|
||||
IRELE(cdp);
|
||||
goto error2;
|
||||
}
|
||||
|
||||
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
|
||||
XFS_QM_DQRELE(mp, udqp);
|
||||
XFS_QM_DQRELE(mp, gdqp);
|
||||
if (error) {
|
||||
IRELE(cdp);
|
||||
}
|
||||
|
||||
/* Fall through to std_return with error = 0 or errno from
|
||||
* xfs_trans_commit. */
|
||||
|
||||
std_return:
|
||||
if ((created || (error != 0 && dm_event_sent != 0)) &&
|
||||
DM_EVENT_ENABLED(dp, DM_EVENT_POSTCREATE)) {
|
||||
(void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTCREATE,
|
||||
dp, DM_RIGHT_NULL,
|
||||
created ? cdp : NULL,
|
||||
DM_RIGHT_NULL,
|
||||
dir_name->name, NULL,
|
||||
mode, error, 0);
|
||||
}
|
||||
return error;
|
||||
|
||||
error2:
|
||||
error1:
|
||||
xfs_bmap_cancel(&free_list);
|
||||
abort_return:
|
||||
cancel_flags |= XFS_TRANS_ABORT;
|
||||
error_return:
|
||||
xfs_trans_cancel(tp, cancel_flags);
|
||||
XFS_QM_DQRELE(mp, udqp);
|
||||
XFS_QM_DQRELE(mp, gdqp);
|
||||
|
||||
if (unlock_dp_on_error)
|
||||
xfs_iunlock(dp, XFS_ILOCK_EXCL);
|
||||
|
||||
goto std_return;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_symlink(
|
||||
xfs_inode_t *dp,
|
||||
|
@ -2586,51 +2403,6 @@ xfs_symlink(
|
|||
goto std_return;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_inode_flush(
|
||||
xfs_inode_t *ip,
|
||||
int flags)
|
||||
{
|
||||
xfs_mount_t *mp = ip->i_mount;
|
||||
int error = 0;
|
||||
|
||||
if (XFS_FORCED_SHUTDOWN(mp))
|
||||
return XFS_ERROR(EIO);
|
||||
|
||||
/*
|
||||
* Bypass inodes which have already been cleaned by
|
||||
* the inode flush clustering code inside xfs_iflush
|
||||
*/
|
||||
if (xfs_inode_clean(ip))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* We make this non-blocking if the inode is contended,
|
||||
* return EAGAIN to indicate to the caller that they
|
||||
* did not succeed. This prevents the flush path from
|
||||
* blocking on inodes inside another operation right
|
||||
* now, they get caught later by xfs_sync.
|
||||
*/
|
||||
if (flags & FLUSH_SYNC) {
|
||||
xfs_ilock(ip, XFS_ILOCK_SHARED);
|
||||
xfs_iflock(ip);
|
||||
} else if (xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
|
||||
if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip)) {
|
||||
xfs_iunlock(ip, XFS_ILOCK_SHARED);
|
||||
return EAGAIN;
|
||||
}
|
||||
} else {
|
||||
return EAGAIN;
|
||||
}
|
||||
|
||||
error = xfs_iflush(ip, (flags & FLUSH_SYNC) ? XFS_IFLUSH_SYNC
|
||||
: XFS_IFLUSH_ASYNC_NOBLOCK);
|
||||
xfs_iunlock(ip, XFS_ILOCK_SHARED);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
xfs_set_dmattrs(
|
||||
xfs_inode_t *ip,
|
||||
|
@ -2676,7 +2448,7 @@ xfs_reclaim(
|
|||
ASSERT(!VN_MAPPED(VFS_I(ip)));
|
||||
|
||||
/* bad inode, get out here ASAP */
|
||||
if (VN_BAD(VFS_I(ip))) {
|
||||
if (is_bad_inode(VFS_I(ip))) {
|
||||
xfs_ireclaim(ip);
|
||||
return 0;
|
||||
}
|
||||
|
@ -3090,7 +2862,7 @@ xfs_free_file_space(
|
|||
|
||||
/*
|
||||
* Need to zero the stuff we're not freeing, on disk.
|
||||
* If its a realtime file & can't use unwritten extents then we
|
||||
* If it's a realtime file & can't use unwritten extents then we
|
||||
* actually need to zero the extent edges. Otherwise xfs_bunmapi
|
||||
* will take care of it for us.
|
||||
*/
|
||||
|
|
|
@ -31,14 +31,11 @@ int xfs_remove(struct xfs_inode *dp, struct xfs_name *name,
|
|||
struct xfs_inode *ip);
|
||||
int xfs_link(struct xfs_inode *tdp, struct xfs_inode *sip,
|
||||
struct xfs_name *target_name);
|
||||
int xfs_mkdir(struct xfs_inode *dp, struct xfs_name *dir_name,
|
||||
mode_t mode, struct xfs_inode **ipp, cred_t *credp);
|
||||
int xfs_readdir(struct xfs_inode *dp, void *dirent, size_t bufsize,
|
||||
xfs_off_t *offset, filldir_t filldir);
|
||||
int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name,
|
||||
const char *target_path, mode_t mode, struct xfs_inode **ipp,
|
||||
cred_t *credp);
|
||||
int xfs_inode_flush(struct xfs_inode *ip, int flags);
|
||||
int xfs_set_dmattrs(struct xfs_inode *ip, u_int evmask, u_int16_t state);
|
||||
int xfs_reclaim(struct xfs_inode *ip);
|
||||
int xfs_change_file_space(struct xfs_inode *ip, int cmd,
|
||||
|
|
Loading…
Reference in a new issue