[PATCH] mutex subsystem, semaphore to mutex: VFS, ->i_sem
This patch converts the inode semaphore to a mutex. I have tested it on XFS and compiled as much as one can consider on an ia64. Anyway your luck with it might be different. Modified-by: Ingo Molnar <mingo@elte.hu> (finished the conversion) Signed-off-by: Jes Sorensen <jes@sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
794ee1baee
commit
1b1dcc1b57
113 changed files with 563 additions and 573 deletions
|
@ -137,7 +137,7 @@ spufs_delete_inode(struct inode *inode)
|
|||
static void spufs_prune_dir(struct dentry *dir)
|
||||
{
|
||||
struct dentry *dentry, *tmp;
|
||||
down(&dir->d_inode->i_sem);
|
||||
mutex_lock(&dir->d_inode->i_mutex);
|
||||
list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_child) {
|
||||
spin_lock(&dcache_lock);
|
||||
spin_lock(&dentry->d_lock);
|
||||
|
@ -154,7 +154,7 @@ static void spufs_prune_dir(struct dentry *dir)
|
|||
}
|
||||
}
|
||||
shrink_dcache_parent(dir);
|
||||
up(&dir->d_inode->i_sem);
|
||||
mutex_unlock(&dir->d_inode->i_mutex);
|
||||
}
|
||||
|
||||
static int spufs_rmdir(struct inode *root, struct dentry *dir_dentry)
|
||||
|
@ -162,15 +162,15 @@ static int spufs_rmdir(struct inode *root, struct dentry *dir_dentry)
|
|||
struct spu_context *ctx;
|
||||
|
||||
/* remove all entries */
|
||||
down(&root->i_sem);
|
||||
mutex_lock(&root->i_mutex);
|
||||
spufs_prune_dir(dir_dentry);
|
||||
up(&root->i_sem);
|
||||
mutex_unlock(&root->i_mutex);
|
||||
|
||||
/* We have to give up the mm_struct */
|
||||
ctx = SPUFS_I(dir_dentry->d_inode)->i_ctx;
|
||||
spu_forget(ctx);
|
||||
|
||||
/* XXX Do we need to hold i_sem here ? */
|
||||
/* XXX Do we need to hold i_mutex here ? */
|
||||
return simple_rmdir(root, dir_dentry);
|
||||
}
|
||||
|
||||
|
@ -330,7 +330,7 @@ long spufs_create_thread(struct nameidata *nd,
|
|||
out_dput:
|
||||
dput(dentry);
|
||||
out_dir:
|
||||
up(&nd->dentry->d_inode->i_sem);
|
||||
mutex_unlock(&nd->dentry->d_inode->i_mutex);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -215,7 +215,7 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
|
|||
unsigned offset, bv_offs;
|
||||
int len, ret;
|
||||
|
||||
down(&mapping->host->i_sem);
|
||||
mutex_lock(&mapping->host->i_mutex);
|
||||
index = pos >> PAGE_CACHE_SHIFT;
|
||||
offset = pos & ((pgoff_t)PAGE_CACHE_SIZE - 1);
|
||||
bv_offs = bvec->bv_offset;
|
||||
|
@ -278,7 +278,7 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
|
|||
}
|
||||
ret = 0;
|
||||
out:
|
||||
up(&mapping->host->i_sem);
|
||||
mutex_unlock(&mapping->host->i_mutex);
|
||||
return ret;
|
||||
unlock:
|
||||
unlock_page(page);
|
||||
|
|
|
@ -741,7 +741,7 @@ static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
|
|||
{
|
||||
loff_t ret;
|
||||
|
||||
down(&file->f_dentry->d_inode->i_sem);
|
||||
mutex_lock(&file->f_dentry->d_inode->i_mutex);
|
||||
switch (orig) {
|
||||
case 0:
|
||||
file->f_pos = offset;
|
||||
|
@ -756,7 +756,7 @@ static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
|
|||
default:
|
||||
ret = -EINVAL;
|
||||
}
|
||||
up(&file->f_dentry->d_inode->i_sem);
|
||||
mutex_unlock(&file->f_dentry->d_inode->i_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -138,7 +138,7 @@ static struct dentry *get_node(int num)
|
|||
{
|
||||
char s[10];
|
||||
struct dentry *root = capifs_root;
|
||||
down(&root->d_inode->i_sem);
|
||||
mutex_lock(&root->d_inode->i_mutex);
|
||||
return lookup_one_len(s, root, sprintf(s, "%d", num));
|
||||
}
|
||||
|
||||
|
@ -159,7 +159,7 @@ void capifs_new_ncci(unsigned int number, dev_t device)
|
|||
dentry = get_node(number);
|
||||
if (!IS_ERR(dentry) && !dentry->d_inode)
|
||||
d_instantiate(dentry, inode);
|
||||
up(&capifs_root->d_inode->i_sem);
|
||||
mutex_unlock(&capifs_root->d_inode->i_mutex);
|
||||
}
|
||||
|
||||
void capifs_free_ncci(unsigned int number)
|
||||
|
@ -175,7 +175,7 @@ void capifs_free_ncci(unsigned int number)
|
|||
}
|
||||
dput(dentry);
|
||||
}
|
||||
up(&capifs_root->d_inode->i_sem);
|
||||
mutex_unlock(&capifs_root->d_inode->i_mutex);
|
||||
}
|
||||
|
||||
static int __init capifs_init(void)
|
||||
|
|
|
@ -837,9 +837,9 @@ static void __set_size(struct mapped_device *md, sector_t size)
|
|||
{
|
||||
set_capacity(md->disk, size);
|
||||
|
||||
down(&md->suspended_bdev->bd_inode->i_sem);
|
||||
mutex_lock(&md->suspended_bdev->bd_inode->i_mutex);
|
||||
i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
|
||||
up(&md->suspended_bdev->bd_inode->i_sem);
|
||||
mutex_unlock(&md->suspended_bdev->bd_inode->i_mutex);
|
||||
}
|
||||
|
||||
static int __bind(struct mapped_device *md, struct dm_table *t)
|
||||
|
|
|
@ -3460,9 +3460,9 @@ static int update_size(mddev_t *mddev, unsigned long size)
|
|||
|
||||
bdev = bdget_disk(mddev->gendisk, 0);
|
||||
if (bdev) {
|
||||
down(&bdev->bd_inode->i_sem);
|
||||
mutex_lock(&bdev->bd_inode->i_mutex);
|
||||
i_size_write(bdev->bd_inode, mddev->array_size << 10);
|
||||
up(&bdev->bd_inode->i_sem);
|
||||
mutex_unlock(&bdev->bd_inode->i_mutex);
|
||||
bdput(bdev);
|
||||
}
|
||||
}
|
||||
|
@ -3486,9 +3486,9 @@ static int update_raid_disks(mddev_t *mddev, int raid_disks)
|
|||
|
||||
bdev = bdget_disk(mddev->gendisk, 0);
|
||||
if (bdev) {
|
||||
down(&bdev->bd_inode->i_sem);
|
||||
mutex_lock(&bdev->bd_inode->i_mutex);
|
||||
i_size_write(bdev->bd_inode, mddev->array_size << 10);
|
||||
up(&bdev->bd_inode->i_sem);
|
||||
mutex_unlock(&bdev->bd_inode->i_mutex);
|
||||
bdput(bdev);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ proc_bus_pci_lseek(struct file *file, loff_t off, int whence)
|
|||
loff_t new = -1;
|
||||
struct inode *inode = file->f_dentry->d_inode;
|
||||
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
switch (whence) {
|
||||
case 0:
|
||||
new = off;
|
||||
|
@ -41,7 +41,7 @@ proc_bus_pci_lseek(struct file *file, loff_t off, int whence)
|
|||
new = -EINVAL;
|
||||
else
|
||||
file->f_pos = new;
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
return new;
|
||||
}
|
||||
|
||||
|
|
|
@ -184,13 +184,13 @@ static void update_bus(struct dentry *bus)
|
|||
bus->d_inode->i_gid = busgid;
|
||||
bus->d_inode->i_mode = S_IFDIR | busmode;
|
||||
|
||||
down(&bus->d_inode->i_sem);
|
||||
mutex_lock(&bus->d_inode->i_mutex);
|
||||
|
||||
list_for_each_entry(dev, &bus->d_subdirs, d_u.d_child)
|
||||
if (dev->d_inode)
|
||||
update_dev(dev);
|
||||
|
||||
up(&bus->d_inode->i_sem);
|
||||
mutex_unlock(&bus->d_inode->i_mutex);
|
||||
}
|
||||
|
||||
static void update_sb(struct super_block *sb)
|
||||
|
@ -201,7 +201,7 @@ static void update_sb(struct super_block *sb)
|
|||
if (!root)
|
||||
return;
|
||||
|
||||
down(&root->d_inode->i_sem);
|
||||
mutex_lock(&root->d_inode->i_mutex);
|
||||
|
||||
list_for_each_entry(bus, &root->d_subdirs, d_u.d_child) {
|
||||
if (bus->d_inode) {
|
||||
|
@ -219,7 +219,7 @@ static void update_sb(struct super_block *sb)
|
|||
}
|
||||
}
|
||||
|
||||
up(&root->d_inode->i_sem);
|
||||
mutex_unlock(&root->d_inode->i_mutex);
|
||||
}
|
||||
|
||||
static int remount(struct super_block *sb, int *flags, char *data)
|
||||
|
@ -333,10 +333,10 @@ static int usbfs_empty (struct dentry *dentry)
|
|||
static int usbfs_unlink (struct inode *dir, struct dentry *dentry)
|
||||
{
|
||||
struct inode *inode = dentry->d_inode;
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
dentry->d_inode->i_nlink--;
|
||||
dput(dentry);
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
d_delete(dentry);
|
||||
return 0;
|
||||
}
|
||||
|
@ -346,7 +346,7 @@ static int usbfs_rmdir(struct inode *dir, struct dentry *dentry)
|
|||
int error = -ENOTEMPTY;
|
||||
struct inode * inode = dentry->d_inode;
|
||||
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
dentry_unhash(dentry);
|
||||
if (usbfs_empty(dentry)) {
|
||||
dentry->d_inode->i_nlink -= 2;
|
||||
|
@ -355,7 +355,7 @@ static int usbfs_rmdir(struct inode *dir, struct dentry *dentry)
|
|||
dir->i_nlink--;
|
||||
error = 0;
|
||||
}
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
if (!error)
|
||||
d_delete(dentry);
|
||||
dput(dentry);
|
||||
|
@ -380,7 +380,7 @@ static loff_t default_file_lseek (struct file *file, loff_t offset, int orig)
|
|||
{
|
||||
loff_t retval = -EINVAL;
|
||||
|
||||
down(&file->f_dentry->d_inode->i_sem);
|
||||
mutex_lock(&file->f_dentry->d_inode->i_mutex);
|
||||
switch(orig) {
|
||||
case 0:
|
||||
if (offset > 0) {
|
||||
|
@ -397,7 +397,7 @@ static loff_t default_file_lseek (struct file *file, loff_t offset, int orig)
|
|||
default:
|
||||
break;
|
||||
}
|
||||
up(&file->f_dentry->d_inode->i_sem);
|
||||
mutex_unlock(&file->f_dentry->d_inode->i_mutex);
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
@ -480,7 +480,7 @@ static int fs_create_by_name (const char *name, mode_t mode,
|
|||
}
|
||||
|
||||
*dentry = NULL;
|
||||
down(&parent->d_inode->i_sem);
|
||||
mutex_lock(&parent->d_inode->i_mutex);
|
||||
*dentry = lookup_one_len(name, parent, strlen(name));
|
||||
if (!IS_ERR(dentry)) {
|
||||
if ((mode & S_IFMT) == S_IFDIR)
|
||||
|
@ -489,7 +489,7 @@ static int fs_create_by_name (const char *name, mode_t mode,
|
|||
error = usbfs_create (parent->d_inode, *dentry, mode);
|
||||
} else
|
||||
error = PTR_ERR(dentry);
|
||||
up(&parent->d_inode->i_sem);
|
||||
mutex_unlock(&parent->d_inode->i_mutex);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
@ -528,7 +528,7 @@ static void fs_remove_file (struct dentry *dentry)
|
|||
if (!parent || !parent->d_inode)
|
||||
return;
|
||||
|
||||
down(&parent->d_inode->i_sem);
|
||||
mutex_lock(&parent->d_inode->i_mutex);
|
||||
if (usbfs_positive(dentry)) {
|
||||
if (dentry->d_inode) {
|
||||
if (S_ISDIR(dentry->d_inode->i_mode))
|
||||
|
@ -538,7 +538,7 @@ static void fs_remove_file (struct dentry *dentry)
|
|||
dput(dentry);
|
||||
}
|
||||
}
|
||||
up(&parent->d_inode->i_sem);
|
||||
mutex_unlock(&parent->d_inode->i_mutex);
|
||||
}
|
||||
|
||||
/* --------------------------------------------------------------------- */
|
||||
|
|
|
@ -1891,7 +1891,7 @@ static int fsync_sub(struct lun *curlun)
|
|||
return -EINVAL;
|
||||
|
||||
inode = filp->f_dentry->d_inode;
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
current->flags |= PF_SYNCWRITE;
|
||||
rc = filemap_fdatawrite(inode->i_mapping);
|
||||
err = filp->f_op->fsync(filp, filp->f_dentry, 1);
|
||||
|
@ -1901,7 +1901,7 @@ static int fsync_sub(struct lun *curlun)
|
|||
if (!rc)
|
||||
rc = err;
|
||||
current->flags &= ~PF_SYNCWRITE;
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
VLDBG(curlun, "fdatasync -> %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -1562,10 +1562,10 @@ static void destroy_ep_files (struct dev_data *dev)
|
|||
spin_unlock_irq (&dev->lock);
|
||||
|
||||
/* break link to dcache */
|
||||
down (&parent->i_sem);
|
||||
mutex_lock (&parent->i_mutex);
|
||||
d_delete (dentry);
|
||||
dput (dentry);
|
||||
up (&parent->i_sem);
|
||||
mutex_unlock (&parent->i_mutex);
|
||||
|
||||
/* fds may still be open */
|
||||
goto restart;
|
||||
|
|
|
@ -244,10 +244,10 @@ affs_put_inode(struct inode *inode)
|
|||
pr_debug("AFFS: put_inode(ino=%lu, nlink=%u)\n", inode->i_ino, inode->i_nlink);
|
||||
affs_free_prealloc(inode);
|
||||
if (atomic_read(&inode->i_count) == 1) {
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
if (inode->i_size != AFFS_I(inode)->mmu_private)
|
||||
affs_truncate(inode);
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -229,9 +229,9 @@ static struct dentry *autofs_root_lookup(struct inode *dir, struct dentry *dentr
|
|||
dentry->d_flags |= DCACHE_AUTOFS_PENDING;
|
||||
d_add(dentry, NULL);
|
||||
|
||||
up(&dir->i_sem);
|
||||
mutex_unlock(&dir->i_mutex);
|
||||
autofs_revalidate(dentry, nd);
|
||||
down(&dir->i_sem);
|
||||
mutex_lock(&dir->i_mutex);
|
||||
|
||||
/*
|
||||
* If we are still pending, check if we had to handle
|
||||
|
|
|
@ -489,9 +489,9 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s
|
|||
d_add(dentry, NULL);
|
||||
|
||||
if (dentry->d_op && dentry->d_op->d_revalidate) {
|
||||
up(&dir->i_sem);
|
||||
mutex_unlock(&dir->i_mutex);
|
||||
(dentry->d_op->d_revalidate)(dentry, nd);
|
||||
down(&dir->i_sem);
|
||||
mutex_lock(&dir->i_mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -588,11 +588,11 @@ static ssize_t bm_entry_write(struct file *file, const char __user *buffer,
|
|||
case 2: set_bit(Enabled, &e->flags);
|
||||
break;
|
||||
case 3: root = dget(file->f_vfsmnt->mnt_sb->s_root);
|
||||
down(&root->d_inode->i_sem);
|
||||
mutex_lock(&root->d_inode->i_mutex);
|
||||
|
||||
kill_node(e);
|
||||
|
||||
up(&root->d_inode->i_sem);
|
||||
mutex_unlock(&root->d_inode->i_mutex);
|
||||
dput(root);
|
||||
break;
|
||||
default: return res;
|
||||
|
@ -622,7 +622,7 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
|
|||
return PTR_ERR(e);
|
||||
|
||||
root = dget(sb->s_root);
|
||||
down(&root->d_inode->i_sem);
|
||||
mutex_lock(&root->d_inode->i_mutex);
|
||||
dentry = lookup_one_len(e->name, root, strlen(e->name));
|
||||
err = PTR_ERR(dentry);
|
||||
if (IS_ERR(dentry))
|
||||
|
@ -658,7 +658,7 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
|
|||
out2:
|
||||
dput(dentry);
|
||||
out:
|
||||
up(&root->d_inode->i_sem);
|
||||
mutex_unlock(&root->d_inode->i_mutex);
|
||||
dput(root);
|
||||
|
||||
if (err) {
|
||||
|
@ -703,12 +703,12 @@ static ssize_t bm_status_write(struct file * file, const char __user * buffer,
|
|||
case 1: enabled = 0; break;
|
||||
case 2: enabled = 1; break;
|
||||
case 3: root = dget(file->f_vfsmnt->mnt_sb->s_root);
|
||||
down(&root->d_inode->i_sem);
|
||||
mutex_lock(&root->d_inode->i_mutex);
|
||||
|
||||
while (!list_empty(&entries))
|
||||
kill_node(list_entry(entries.next, Node, list));
|
||||
|
||||
up(&root->d_inode->i_sem);
|
||||
mutex_unlock(&root->d_inode->i_mutex);
|
||||
dput(root);
|
||||
default: return res;
|
||||
}
|
||||
|
|
|
@ -202,7 +202,7 @@ static loff_t block_llseek(struct file *file, loff_t offset, int origin)
|
|||
loff_t size;
|
||||
loff_t retval;
|
||||
|
||||
down(&bd_inode->i_sem);
|
||||
mutex_lock(&bd_inode->i_mutex);
|
||||
size = i_size_read(bd_inode);
|
||||
|
||||
switch (origin) {
|
||||
|
@ -219,7 +219,7 @@ static loff_t block_llseek(struct file *file, loff_t offset, int origin)
|
|||
}
|
||||
retval = offset;
|
||||
}
|
||||
up(&bd_inode->i_sem);
|
||||
mutex_unlock(&bd_inode->i_mutex);
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
|
|
@ -352,11 +352,11 @@ static long do_fsync(unsigned int fd, int datasync)
|
|||
* We need to protect against concurrent writers,
|
||||
* which could cause livelocks in fsync_buffers_list
|
||||
*/
|
||||
down(&mapping->host->i_sem);
|
||||
mutex_lock(&mapping->host->i_mutex);
|
||||
err = file->f_op->fsync(file, file->f_dentry, datasync);
|
||||
if (!ret)
|
||||
ret = err;
|
||||
up(&mapping->host->i_sem);
|
||||
mutex_unlock(&mapping->host->i_mutex);
|
||||
err = filemap_fdatawait(mapping);
|
||||
if (!ret)
|
||||
ret = err;
|
||||
|
@ -2338,7 +2338,7 @@ int generic_commit_write(struct file *file, struct page *page,
|
|||
__block_commit_write(inode,page,from,to);
|
||||
/*
|
||||
* No need to use i_size_read() here, the i_size
|
||||
* cannot change under us because we hold i_sem.
|
||||
* cannot change under us because we hold i_mutex.
|
||||
*/
|
||||
if (pos > inode->i_size) {
|
||||
i_size_write(inode, pos);
|
||||
|
|
|
@ -860,9 +860,9 @@ static int cifs_oplock_thread(void * dummyarg)
|
|||
DeleteOplockQEntry(oplock_item);
|
||||
/* can not grab inode sem here since it would
|
||||
deadlock when oplock received on delete
|
||||
since vfs_unlink holds the i_sem across
|
||||
since vfs_unlink holds the i_mutex across
|
||||
the call */
|
||||
/* down(&inode->i_sem);*/
|
||||
/* mutex_lock(&inode->i_mutex);*/
|
||||
if (S_ISREG(inode->i_mode)) {
|
||||
rc = filemap_fdatawrite(inode->i_mapping);
|
||||
if(CIFS_I(inode)->clientCanCacheRead == 0) {
|
||||
|
@ -871,7 +871,7 @@ static int cifs_oplock_thread(void * dummyarg)
|
|||
}
|
||||
} else
|
||||
rc = 0;
|
||||
/* up(&inode->i_sem);*/
|
||||
/* mutex_unlock(&inode->i_mutex);*/
|
||||
if (rc)
|
||||
CIFS_I(inode)->write_behind_rc = rc;
|
||||
cFYI(1,("Oplock flush inode %p rc %d",inode,rc));
|
||||
|
|
|
@ -1040,9 +1040,9 @@ int cifs_revalidate(struct dentry *direntry)
|
|||
}
|
||||
|
||||
/* can not grab this sem since kernel filesys locking documentation
|
||||
indicates i_sem may be taken by the kernel on lookup and rename
|
||||
which could deadlock if we grab the i_sem here as well */
|
||||
/* down(&direntry->d_inode->i_sem);*/
|
||||
indicates i_mutex may be taken by the kernel on lookup and rename
|
||||
which could deadlock if we grab the i_mutex here as well */
|
||||
/* mutex_lock(&direntry->d_inode->i_mutex);*/
|
||||
/* need to write out dirty pages here */
|
||||
if (direntry->d_inode->i_mapping) {
|
||||
/* do we need to lock inode until after invalidate completes
|
||||
|
@ -1066,7 +1066,7 @@ int cifs_revalidate(struct dentry *direntry)
|
|||
}
|
||||
}
|
||||
}
|
||||
/* up(&direntry->d_inode->i_sem); */
|
||||
/* mutex_unlock(&direntry->d_inode->i_mutex); */
|
||||
|
||||
kfree(full_path);
|
||||
FreeXid(xid);
|
||||
|
|
|
@ -453,7 +453,7 @@ int coda_readdir(struct file *coda_file, void *dirent, filldir_t filldir)
|
|||
coda_vfs_stat.readdir++;
|
||||
|
||||
host_inode = host_file->f_dentry->d_inode;
|
||||
down(&host_inode->i_sem);
|
||||
mutex_lock(&host_inode->i_mutex);
|
||||
host_file->f_pos = coda_file->f_pos;
|
||||
|
||||
if (!host_file->f_op->readdir) {
|
||||
|
@ -475,7 +475,7 @@ int coda_readdir(struct file *coda_file, void *dirent, filldir_t filldir)
|
|||
}
|
||||
out:
|
||||
coda_file->f_pos = host_file->f_pos;
|
||||
up(&host_inode->i_sem);
|
||||
mutex_unlock(&host_inode->i_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -77,14 +77,14 @@ coda_file_write(struct file *coda_file, const char __user *buf, size_t count, lo
|
|||
return -EINVAL;
|
||||
|
||||
host_inode = host_file->f_dentry->d_inode;
|
||||
down(&coda_inode->i_sem);
|
||||
mutex_lock(&coda_inode->i_mutex);
|
||||
|
||||
ret = host_file->f_op->write(host_file, buf, count, ppos);
|
||||
|
||||
coda_inode->i_size = host_inode->i_size;
|
||||
coda_inode->i_blocks = (coda_inode->i_size + 511) >> 9;
|
||||
coda_inode->i_mtime = coda_inode->i_ctime = CURRENT_TIME_SEC;
|
||||
up(&coda_inode->i_sem);
|
||||
mutex_unlock(&coda_inode->i_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -272,9 +272,9 @@ int coda_fsync(struct file *coda_file, struct dentry *coda_dentry, int datasync)
|
|||
if (host_file->f_op && host_file->f_op->fsync) {
|
||||
host_dentry = host_file->f_dentry;
|
||||
host_inode = host_dentry->d_inode;
|
||||
down(&host_inode->i_sem);
|
||||
mutex_lock(&host_inode->i_mutex);
|
||||
err = host_file->f_op->fsync(host_file, host_dentry, datasync);
|
||||
up(&host_inode->i_sem);
|
||||
mutex_unlock(&host_inode->i_mutex);
|
||||
}
|
||||
|
||||
if ( !err && !datasync ) {
|
||||
|
|
|
@ -288,10 +288,10 @@ static struct dentry * configfs_lookup(struct inode *dir,
|
|||
|
||||
/*
|
||||
* Only subdirectories count here. Files (CONFIGFS_NOT_PINNED) are
|
||||
* attributes and are removed by rmdir(). We recurse, taking i_sem
|
||||
* attributes and are removed by rmdir(). We recurse, taking i_mutex
|
||||
* on all children that are candidates for default detach. If the
|
||||
* result is clean, then configfs_detach_group() will handle dropping
|
||||
* i_sem. If there is an error, the caller will clean up the i_sem
|
||||
* i_mutex. If there is an error, the caller will clean up the i_mutex
|
||||
* holders via configfs_detach_rollback().
|
||||
*/
|
||||
static int configfs_detach_prep(struct dentry *dentry)
|
||||
|
@ -309,8 +309,8 @@ static int configfs_detach_prep(struct dentry *dentry)
|
|||
if (sd->s_type & CONFIGFS_NOT_PINNED)
|
||||
continue;
|
||||
if (sd->s_type & CONFIGFS_USET_DEFAULT) {
|
||||
down(&sd->s_dentry->d_inode->i_sem);
|
||||
/* Mark that we've taken i_sem */
|
||||
mutex_lock(&sd->s_dentry->d_inode->i_mutex);
|
||||
/* Mark that we've taken i_mutex */
|
||||
sd->s_type |= CONFIGFS_USET_DROPPING;
|
||||
|
||||
ret = configfs_detach_prep(sd->s_dentry);
|
||||
|
@ -327,7 +327,7 @@ static int configfs_detach_prep(struct dentry *dentry)
|
|||
}
|
||||
|
||||
/*
|
||||
* Walk the tree, dropping i_sem wherever CONFIGFS_USET_DROPPING is
|
||||
* Walk the tree, dropping i_mutex wherever CONFIGFS_USET_DROPPING is
|
||||
* set.
|
||||
*/
|
||||
static void configfs_detach_rollback(struct dentry *dentry)
|
||||
|
@ -341,7 +341,7 @@ static void configfs_detach_rollback(struct dentry *dentry)
|
|||
|
||||
if (sd->s_type & CONFIGFS_USET_DROPPING) {
|
||||
sd->s_type &= ~CONFIGFS_USET_DROPPING;
|
||||
up(&sd->s_dentry->d_inode->i_sem);
|
||||
mutex_unlock(&sd->s_dentry->d_inode->i_mutex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -424,11 +424,11 @@ static void detach_groups(struct config_group *group)
|
|||
|
||||
/*
|
||||
* From rmdir/unregister, a configfs_detach_prep() pass
|
||||
* has taken our i_sem for us. Drop it.
|
||||
* has taken our i_mutex for us. Drop it.
|
||||
* From mkdir/register cleanup, there is no sem held.
|
||||
*/
|
||||
if (sd->s_type & CONFIGFS_USET_DROPPING)
|
||||
up(&child->d_inode->i_sem);
|
||||
mutex_unlock(&child->d_inode->i_mutex);
|
||||
|
||||
d_delete(child);
|
||||
dput(child);
|
||||
|
@ -493,11 +493,11 @@ static int populate_groups(struct config_group *group)
|
|||
/* FYI, we're faking mkdir here
|
||||
* I'm not sure we need this semaphore, as we're called
|
||||
* from our parent's mkdir. That holds our parent's
|
||||
* i_sem, so afaik lookup cannot continue through our
|
||||
* i_mutex, so afaik lookup cannot continue through our
|
||||
* parent to find us, let alone mess with our tree.
|
||||
* That said, taking our i_sem is closer to mkdir
|
||||
* That said, taking our i_mutex is closer to mkdir
|
||||
* emulation, and shouldn't hurt. */
|
||||
down(&dentry->d_inode->i_sem);
|
||||
mutex_lock(&dentry->d_inode->i_mutex);
|
||||
|
||||
for (i = 0; group->default_groups[i]; i++) {
|
||||
new_group = group->default_groups[i];
|
||||
|
@ -507,7 +507,7 @@ static int populate_groups(struct config_group *group)
|
|||
break;
|
||||
}
|
||||
|
||||
up(&dentry->d_inode->i_sem);
|
||||
mutex_unlock(&dentry->d_inode->i_mutex);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
|
@ -856,7 +856,7 @@ int configfs_rename_dir(struct config_item * item, const char *new_name)
|
|||
down_write(&configfs_rename_sem);
|
||||
parent = item->parent->dentry;
|
||||
|
||||
down(&parent->d_inode->i_sem);
|
||||
mutex_lock(&parent->d_inode->i_mutex);
|
||||
|
||||
new_dentry = lookup_one_len(new_name, parent, strlen(new_name));
|
||||
if (!IS_ERR(new_dentry)) {
|
||||
|
@ -872,7 +872,7 @@ int configfs_rename_dir(struct config_item * item, const char *new_name)
|
|||
error = -EEXIST;
|
||||
dput(new_dentry);
|
||||
}
|
||||
up(&parent->d_inode->i_sem);
|
||||
mutex_unlock(&parent->d_inode->i_mutex);
|
||||
up_write(&configfs_rename_sem);
|
||||
|
||||
return error;
|
||||
|
@ -884,9 +884,9 @@ static int configfs_dir_open(struct inode *inode, struct file *file)
|
|||
struct dentry * dentry = file->f_dentry;
|
||||
struct configfs_dirent * parent_sd = dentry->d_fsdata;
|
||||
|
||||
down(&dentry->d_inode->i_sem);
|
||||
mutex_lock(&dentry->d_inode->i_mutex);
|
||||
file->private_data = configfs_new_dirent(parent_sd, NULL);
|
||||
up(&dentry->d_inode->i_sem);
|
||||
mutex_unlock(&dentry->d_inode->i_mutex);
|
||||
|
||||
return file->private_data ? 0 : -ENOMEM;
|
||||
|
||||
|
@ -897,9 +897,9 @@ static int configfs_dir_close(struct inode *inode, struct file *file)
|
|||
struct dentry * dentry = file->f_dentry;
|
||||
struct configfs_dirent * cursor = file->private_data;
|
||||
|
||||
down(&dentry->d_inode->i_sem);
|
||||
mutex_lock(&dentry->d_inode->i_mutex);
|
||||
list_del_init(&cursor->s_sibling);
|
||||
up(&dentry->d_inode->i_sem);
|
||||
mutex_unlock(&dentry->d_inode->i_mutex);
|
||||
|
||||
release_configfs_dirent(cursor);
|
||||
|
||||
|
@ -975,7 +975,7 @@ static loff_t configfs_dir_lseek(struct file * file, loff_t offset, int origin)
|
|||
{
|
||||
struct dentry * dentry = file->f_dentry;
|
||||
|
||||
down(&dentry->d_inode->i_sem);
|
||||
mutex_lock(&dentry->d_inode->i_mutex);
|
||||
switch (origin) {
|
||||
case 1:
|
||||
offset += file->f_pos;
|
||||
|
@ -983,7 +983,7 @@ static loff_t configfs_dir_lseek(struct file * file, loff_t offset, int origin)
|
|||
if (offset >= 0)
|
||||
break;
|
||||
default:
|
||||
up(&file->f_dentry->d_inode->i_sem);
|
||||
mutex_unlock(&file->f_dentry->d_inode->i_mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (offset != file->f_pos) {
|
||||
|
@ -1007,7 +1007,7 @@ static loff_t configfs_dir_lseek(struct file * file, loff_t offset, int origin)
|
|||
list_add_tail(&cursor->s_sibling, p);
|
||||
}
|
||||
}
|
||||
up(&dentry->d_inode->i_sem);
|
||||
mutex_unlock(&dentry->d_inode->i_mutex);
|
||||
return offset;
|
||||
}
|
||||
|
||||
|
@ -1037,7 +1037,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
|
|||
sd = configfs_sb->s_root->d_fsdata;
|
||||
link_group(to_config_group(sd->s_element), group);
|
||||
|
||||
down(&configfs_sb->s_root->d_inode->i_sem);
|
||||
mutex_lock(&configfs_sb->s_root->d_inode->i_mutex);
|
||||
|
||||
name.name = group->cg_item.ci_name;
|
||||
name.len = strlen(name.name);
|
||||
|
@ -1057,7 +1057,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
|
|||
else
|
||||
d_delete(dentry);
|
||||
|
||||
up(&configfs_sb->s_root->d_inode->i_sem);
|
||||
mutex_unlock(&configfs_sb->s_root->d_inode->i_mutex);
|
||||
|
||||
if (dentry) {
|
||||
dput(dentry);
|
||||
|
@ -1079,18 +1079,18 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys)
|
|||
return;
|
||||
}
|
||||
|
||||
down(&configfs_sb->s_root->d_inode->i_sem);
|
||||
down(&dentry->d_inode->i_sem);
|
||||
mutex_lock(&configfs_sb->s_root->d_inode->i_mutex);
|
||||
mutex_lock(&dentry->d_inode->i_mutex);
|
||||
if (configfs_detach_prep(dentry)) {
|
||||
printk(KERN_ERR "configfs: Tried to unregister non-empty subsystem!\n");
|
||||
}
|
||||
configfs_detach_group(&group->cg_item);
|
||||
dentry->d_inode->i_flags |= S_DEAD;
|
||||
up(&dentry->d_inode->i_sem);
|
||||
mutex_unlock(&dentry->d_inode->i_mutex);
|
||||
|
||||
d_delete(dentry);
|
||||
|
||||
up(&configfs_sb->s_root->d_inode->i_sem);
|
||||
mutex_unlock(&configfs_sb->s_root->d_inode->i_mutex);
|
||||
|
||||
dput(dentry);
|
||||
|
||||
|
|
|
@ -336,9 +336,9 @@ int configfs_add_file(struct dentry * dir, const struct configfs_attribute * att
|
|||
umode_t mode = (attr->ca_mode & S_IALLUGO) | S_IFREG;
|
||||
int error = 0;
|
||||
|
||||
down(&dir->d_inode->i_sem);
|
||||
mutex_lock(&dir->d_inode->i_mutex);
|
||||
error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode, type);
|
||||
up(&dir->d_inode->i_sem);
|
||||
mutex_unlock(&dir->d_inode->i_mutex);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
|
|
@ -122,7 +122,7 @@ const unsigned char * configfs_get_name(struct configfs_dirent *sd)
|
|||
|
||||
/*
|
||||
* Unhashes the dentry corresponding to given configfs_dirent
|
||||
* Called with parent inode's i_sem held.
|
||||
* Called with parent inode's i_mutex held.
|
||||
*/
|
||||
void configfs_drop_dentry(struct configfs_dirent * sd, struct dentry * parent)
|
||||
{
|
||||
|
@ -145,7 +145,7 @@ void configfs_hash_and_remove(struct dentry * dir, const char * name)
|
|||
struct configfs_dirent * sd;
|
||||
struct configfs_dirent * parent_sd = dir->d_fsdata;
|
||||
|
||||
down(&dir->d_inode->i_sem);
|
||||
mutex_lock(&dir->d_inode->i_mutex);
|
||||
list_for_each_entry(sd, &parent_sd->s_children, s_sibling) {
|
||||
if (!sd->s_element)
|
||||
continue;
|
||||
|
@ -156,7 +156,7 @@ void configfs_hash_and_remove(struct dentry * dir, const char * name)
|
|||
break;
|
||||
}
|
||||
}
|
||||
up(&dir->d_inode->i_sem);
|
||||
mutex_unlock(&dir->d_inode->i_mutex);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -146,7 +146,7 @@ static int debugfs_create_by_name(const char *name, mode_t mode,
|
|||
}
|
||||
|
||||
*dentry = NULL;
|
||||
down(&parent->d_inode->i_sem);
|
||||
mutex_lock(&parent->d_inode->i_mutex);
|
||||
*dentry = lookup_one_len(name, parent, strlen(name));
|
||||
if (!IS_ERR(dentry)) {
|
||||
if ((mode & S_IFMT) == S_IFDIR)
|
||||
|
@ -155,7 +155,7 @@ static int debugfs_create_by_name(const char *name, mode_t mode,
|
|||
error = debugfs_create(parent->d_inode, *dentry, mode);
|
||||
} else
|
||||
error = PTR_ERR(dentry);
|
||||
up(&parent->d_inode->i_sem);
|
||||
mutex_unlock(&parent->d_inode->i_mutex);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
@ -273,7 +273,7 @@ void debugfs_remove(struct dentry *dentry)
|
|||
if (!parent || !parent->d_inode)
|
||||
return;
|
||||
|
||||
down(&parent->d_inode->i_sem);
|
||||
mutex_lock(&parent->d_inode->i_mutex);
|
||||
if (debugfs_positive(dentry)) {
|
||||
if (dentry->d_inode) {
|
||||
if (S_ISDIR(dentry->d_inode->i_mode))
|
||||
|
@ -283,7 +283,7 @@ void debugfs_remove(struct dentry *dentry)
|
|||
dput(dentry);
|
||||
}
|
||||
}
|
||||
up(&parent->d_inode->i_sem);
|
||||
mutex_unlock(&parent->d_inode->i_mutex);
|
||||
simple_release_fs(&debugfs_mount, &debugfs_mount_count);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(debugfs_remove);
|
||||
|
|
|
@ -2162,27 +2162,27 @@ static int devfs_d_revalidate_wait(struct dentry *dentry, struct nameidata *nd)
|
|||
*
|
||||
* make sure that
|
||||
* d_instantiate always runs under lock
|
||||
* we release i_sem lock before going to sleep
|
||||
* we release i_mutex lock before going to sleep
|
||||
*
|
||||
* unfortunately sometimes d_revalidate is called with
|
||||
* and sometimes without i_sem lock held. The following checks
|
||||
* and sometimes without i_mutex lock held. The following checks
|
||||
* attempt to deduce when we need to add (and drop resp.) lock
|
||||
* here. This relies on current (2.6.2) calling coventions:
|
||||
*
|
||||
* lookup_hash is always run under i_sem and is passing NULL
|
||||
* lookup_hash is always run under i_mutex and is passing NULL
|
||||
* as nd
|
||||
*
|
||||
* open(...,O_CREATE,...) calls _lookup_hash under i_sem
|
||||
* open(...,O_CREATE,...) calls _lookup_hash under i_mutex
|
||||
* and sets flags to LOOKUP_OPEN|LOOKUP_CREATE
|
||||
*
|
||||
* all other invocations of ->d_revalidate seem to happen
|
||||
* outside of i_sem
|
||||
* outside of i_mutex
|
||||
*/
|
||||
need_lock = nd &&
|
||||
(!(nd->flags & LOOKUP_CREATE) || (nd->flags & LOOKUP_PARENT));
|
||||
|
||||
if (need_lock)
|
||||
down(&dir->i_sem);
|
||||
mutex_lock(&dir->i_mutex);
|
||||
|
||||
if (is_devfsd_or_child(fs_info)) {
|
||||
devfs_handle_t de = lookup_info->de;
|
||||
|
@ -2221,9 +2221,9 @@ static int devfs_d_revalidate_wait(struct dentry *dentry, struct nameidata *nd)
|
|||
add_wait_queue(&lookup_info->wait_queue, &wait);
|
||||
read_unlock(&parent->u.dir.lock);
|
||||
/* at this point it is always (hopefully) locked */
|
||||
up(&dir->i_sem);
|
||||
mutex_unlock(&dir->i_mutex);
|
||||
schedule();
|
||||
down(&dir->i_sem);
|
||||
mutex_lock(&dir->i_mutex);
|
||||
/*
|
||||
* This does not need nor should remove wait from wait_queue.
|
||||
* Wait queue head is never reused - nothing is ever added to it
|
||||
|
@ -2238,7 +2238,7 @@ static int devfs_d_revalidate_wait(struct dentry *dentry, struct nameidata *nd)
|
|||
|
||||
out:
|
||||
if (need_lock)
|
||||
up(&dir->i_sem);
|
||||
mutex_unlock(&dir->i_mutex);
|
||||
return 1;
|
||||
} /* End Function devfs_d_revalidate_wait */
|
||||
|
||||
|
@ -2284,9 +2284,9 @@ static struct dentry *devfs_lookup(struct inode *dir, struct dentry *dentry,
|
|||
/* Unlock directory semaphore, which will release any waiters. They
|
||||
will get the hashed dentry, and may be forced to wait for
|
||||
revalidation */
|
||||
up(&dir->i_sem);
|
||||
mutex_unlock(&dir->i_mutex);
|
||||
wait_for_devfsd_finished(fs_info); /* If I'm not devfsd, must wait */
|
||||
down(&dir->i_sem); /* Grab it again because them's the rules */
|
||||
mutex_lock(&dir->i_mutex); /* Grab it again because them's the rules */
|
||||
de = lookup_info.de;
|
||||
/* If someone else has been so kind as to make the inode, we go home
|
||||
early */
|
||||
|
|
|
@ -130,7 +130,7 @@ static struct dentry *get_node(int num)
|
|||
{
|
||||
char s[12];
|
||||
struct dentry *root = devpts_root;
|
||||
down(&root->d_inode->i_sem);
|
||||
mutex_lock(&root->d_inode->i_mutex);
|
||||
return lookup_one_len(s, root, sprintf(s, "%d", num));
|
||||
}
|
||||
|
||||
|
@ -161,7 +161,7 @@ int devpts_pty_new(struct tty_struct *tty)
|
|||
if (!IS_ERR(dentry) && !dentry->d_inode)
|
||||
d_instantiate(dentry, inode);
|
||||
|
||||
up(&devpts_root->d_inode->i_sem);
|
||||
mutex_unlock(&devpts_root->d_inode->i_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -178,7 +178,7 @@ struct tty_struct *devpts_get_tty(int number)
|
|||
dput(dentry);
|
||||
}
|
||||
|
||||
up(&devpts_root->d_inode->i_sem);
|
||||
mutex_unlock(&devpts_root->d_inode->i_mutex);
|
||||
|
||||
return tty;
|
||||
}
|
||||
|
@ -196,7 +196,7 @@ void devpts_pty_kill(int number)
|
|||
}
|
||||
dput(dentry);
|
||||
}
|
||||
up(&devpts_root->d_inode->i_sem);
|
||||
mutex_unlock(&devpts_root->d_inode->i_mutex);
|
||||
}
|
||||
|
||||
static int __init init_devpts_fs(void)
|
||||
|
|
|
@ -56,7 +56,7 @@
|
|||
* lock_type is DIO_LOCKING for regular files on direct-IO-naive filesystems.
|
||||
* This determines whether we need to do the fancy locking which prevents
|
||||
* direct-IO from being able to read uninitialised disk blocks. If its zero
|
||||
* (blockdev) this locking is not done, and if it is DIO_OWN_LOCKING i_sem is
|
||||
* (blockdev) this locking is not done, and if it is DIO_OWN_LOCKING i_mutex is
|
||||
* not held for the entire direct write (taken briefly, initially, during a
|
||||
* direct read though, but its never held for the duration of a direct-IO).
|
||||
*/
|
||||
|
@ -930,7 +930,7 @@ static int do_direct_IO(struct dio *dio)
|
|||
}
|
||||
|
||||
/*
|
||||
* Releases both i_sem and i_alloc_sem
|
||||
* Releases both i_mutex and i_alloc_sem
|
||||
*/
|
||||
static ssize_t
|
||||
direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
|
||||
|
@ -1062,11 +1062,11 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
|
|||
|
||||
/*
|
||||
* All block lookups have been performed. For READ requests
|
||||
* we can let i_sem go now that its achieved its purpose
|
||||
* we can let i_mutex go now that its achieved its purpose
|
||||
* of protecting us from looking up uninitialized blocks.
|
||||
*/
|
||||
if ((rw == READ) && (dio->lock_type == DIO_LOCKING))
|
||||
up(&dio->inode->i_sem);
|
||||
mutex_unlock(&dio->inode->i_mutex);
|
||||
|
||||
/*
|
||||
* OK, all BIOs are submitted, so we can decrement bio_count to truly
|
||||
|
@ -1145,18 +1145,18 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
|
|||
* The locking rules are governed by the dio_lock_type parameter.
|
||||
*
|
||||
* DIO_NO_LOCKING (no locking, for raw block device access)
|
||||
* For writes, i_sem is not held on entry; it is never taken.
|
||||
* For writes, i_mutex is not held on entry; it is never taken.
|
||||
*
|
||||
* DIO_LOCKING (simple locking for regular files)
|
||||
* For writes we are called under i_sem and return with i_sem held, even though
|
||||
* For writes we are called under i_mutex and return with i_mutex held, even though
|
||||
* it is internally dropped.
|
||||
* For reads, i_sem is not held on entry, but it is taken and dropped before
|
||||
* For reads, i_mutex is not held on entry, but it is taken and dropped before
|
||||
* returning.
|
||||
*
|
||||
* DIO_OWN_LOCKING (filesystem provides synchronisation and handling of
|
||||
* uninitialised data, allowing parallel direct readers and writers)
|
||||
* For writes we are called without i_sem, return without it, never touch it.
|
||||
* For reads, i_sem is held on entry and will be released before returning.
|
||||
* For writes we are called without i_mutex, return without it, never touch it.
|
||||
* For reads, i_mutex is held on entry and will be released before returning.
|
||||
*
|
||||
* Additional i_alloc_sem locking requirements described inline below.
|
||||
*/
|
||||
|
@ -1214,11 +1214,11 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
|
|||
* For block device access DIO_NO_LOCKING is used,
|
||||
* neither readers nor writers do any locking at all
|
||||
* For regular files using DIO_LOCKING,
|
||||
* readers need to grab i_sem and i_alloc_sem
|
||||
* writers need to grab i_alloc_sem only (i_sem is already held)
|
||||
* readers need to grab i_mutex and i_alloc_sem
|
||||
* writers need to grab i_alloc_sem only (i_mutex is already held)
|
||||
* For regular files using DIO_OWN_LOCKING,
|
||||
* neither readers nor writers take any locks here
|
||||
* (i_sem is already held and release for writers here)
|
||||
* (i_mutex is already held and release for writers here)
|
||||
*/
|
||||
dio->lock_type = dio_lock_type;
|
||||
if (dio_lock_type != DIO_NO_LOCKING) {
|
||||
|
@ -1228,7 +1228,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
|
|||
|
||||
mapping = iocb->ki_filp->f_mapping;
|
||||
if (dio_lock_type != DIO_OWN_LOCKING) {
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
reader_with_isem = 1;
|
||||
}
|
||||
|
||||
|
@ -1240,7 +1240,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
|
|||
}
|
||||
|
||||
if (dio_lock_type == DIO_OWN_LOCKING) {
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
reader_with_isem = 0;
|
||||
}
|
||||
}
|
||||
|
@ -1266,7 +1266,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
|
|||
|
||||
out:
|
||||
if (reader_with_isem)
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
if (rw & WRITE)
|
||||
current->flags &= ~PF_SYNCWRITE;
|
||||
return retval;
|
||||
|
|
16
fs/dquot.c
16
fs/dquot.c
|
@ -100,7 +100,7 @@
|
|||
* operation is just reading pointers from inode (or not using them at all) the
|
||||
* read lock is enough. If pointers are altered function must hold write lock
|
||||
* (these locking rules also apply for S_NOQUOTA flag in the inode - note that
|
||||
* for altering the flag i_sem is also needed). If operation is holding
|
||||
* for altering the flag i_mutex is also needed). If operation is holding
|
||||
* reference to dquot in other way (e.g. quotactl ops) it must be guarded by
|
||||
* dqonoff_sem.
|
||||
* This locking assures that:
|
||||
|
@ -117,9 +117,9 @@
|
|||
* spinlock to internal buffers before writing.
|
||||
*
|
||||
* Lock ordering (including related VFS locks) is the following:
|
||||
* i_sem > dqonoff_sem > iprune_sem > journal_lock > dqptr_sem >
|
||||
* i_mutex > dqonoff_sem > iprune_sem > journal_lock > dqptr_sem >
|
||||
* > dquot->dq_lock > dqio_sem
|
||||
* i_sem on quota files is special (it's below dqio_sem)
|
||||
* i_mutex on quota files is special (it's below dqio_sem)
|
||||
*/
|
||||
|
||||
static DEFINE_SPINLOCK(dq_list_lock);
|
||||
|
@ -1369,11 +1369,11 @@ int vfs_quota_off(struct super_block *sb, int type)
|
|||
/* If quota was reenabled in the meantime, we have
|
||||
* nothing to do */
|
||||
if (!sb_has_quota_enabled(sb, cnt)) {
|
||||
down(&toputinode[cnt]->i_sem);
|
||||
mutex_lock(&toputinode[cnt]->i_mutex);
|
||||
toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |
|
||||
S_NOATIME | S_NOQUOTA);
|
||||
truncate_inode_pages(&toputinode[cnt]->i_data, 0);
|
||||
up(&toputinode[cnt]->i_sem);
|
||||
mutex_unlock(&toputinode[cnt]->i_mutex);
|
||||
mark_inode_dirty(toputinode[cnt]);
|
||||
iput(toputinode[cnt]);
|
||||
}
|
||||
|
@ -1417,7 +1417,7 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id)
|
|||
write_inode_now(inode, 1);
|
||||
/* And now flush the block cache so that kernel sees the changes */
|
||||
invalidate_bdev(sb->s_bdev, 0);
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
down(&dqopt->dqonoff_sem);
|
||||
if (sb_has_quota_enabled(sb, type)) {
|
||||
error = -EBUSY;
|
||||
|
@ -1449,7 +1449,7 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id)
|
|||
goto out_file_init;
|
||||
}
|
||||
up(&dqopt->dqio_sem);
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
set_enable_flags(dqopt, type);
|
||||
|
||||
add_dquot_ref(sb, type);
|
||||
|
@ -1470,7 +1470,7 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id)
|
|||
inode->i_flags |= oldflags;
|
||||
up_write(&dqopt->dqptr_sem);
|
||||
}
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
out_fmt:
|
||||
put_quota_format(fmt);
|
||||
|
||||
|
|
|
@ -177,9 +177,9 @@ find_exported_dentry(struct super_block *sb, void *obj, void *parent,
|
|||
struct dentry *ppd;
|
||||
struct dentry *npd;
|
||||
|
||||
down(&pd->d_inode->i_sem);
|
||||
mutex_lock(&pd->d_inode->i_mutex);
|
||||
ppd = CALL(nops,get_parent)(pd);
|
||||
up(&pd->d_inode->i_sem);
|
||||
mutex_unlock(&pd->d_inode->i_mutex);
|
||||
|
||||
if (IS_ERR(ppd)) {
|
||||
err = PTR_ERR(ppd);
|
||||
|
@ -201,9 +201,9 @@ find_exported_dentry(struct super_block *sb, void *obj, void *parent,
|
|||
break;
|
||||
}
|
||||
dprintk("find_exported_dentry: found name: %s\n", nbuf);
|
||||
down(&ppd->d_inode->i_sem);
|
||||
mutex_lock(&ppd->d_inode->i_mutex);
|
||||
npd = lookup_one_len(nbuf, ppd, strlen(nbuf));
|
||||
up(&ppd->d_inode->i_sem);
|
||||
mutex_unlock(&ppd->d_inode->i_mutex);
|
||||
if (IS_ERR(npd)) {
|
||||
err = PTR_ERR(npd);
|
||||
dprintk("find_exported_dentry: lookup failed: %d\n", err);
|
||||
|
@ -242,9 +242,9 @@ find_exported_dentry(struct super_block *sb, void *obj, void *parent,
|
|||
struct dentry *nresult;
|
||||
err = CALL(nops,get_name)(target_dir, nbuf, result);
|
||||
if (!err) {
|
||||
down(&target_dir->d_inode->i_sem);
|
||||
mutex_lock(&target_dir->d_inode->i_mutex);
|
||||
nresult = lookup_one_len(nbuf, target_dir, strlen(nbuf));
|
||||
up(&target_dir->d_inode->i_sem);
|
||||
mutex_unlock(&target_dir->d_inode->i_mutex);
|
||||
if (!IS_ERR(nresult)) {
|
||||
if (nresult->d_inode) {
|
||||
dput(result);
|
||||
|
|
|
@ -149,7 +149,7 @@ ext2_iset_acl(struct inode *inode, struct posix_acl **i_acl,
|
|||
}
|
||||
|
||||
/*
|
||||
* inode->i_sem: don't care
|
||||
* inode->i_mutex: don't care
|
||||
*/
|
||||
static struct posix_acl *
|
||||
ext2_get_acl(struct inode *inode, int type)
|
||||
|
@ -211,7 +211,7 @@ ext2_get_acl(struct inode *inode, int type)
|
|||
}
|
||||
|
||||
/*
|
||||
* inode->i_sem: down
|
||||
* inode->i_mutex: down
|
||||
*/
|
||||
static int
|
||||
ext2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
|
||||
|
@ -301,8 +301,8 @@ ext2_permission(struct inode *inode, int mask, struct nameidata *nd)
|
|||
/*
|
||||
* Initialize the ACLs of a new inode. Called from ext2_new_inode.
|
||||
*
|
||||
* dir->i_sem: down
|
||||
* inode->i_sem: up (access to inode is still exclusive)
|
||||
* dir->i_mutex: down
|
||||
* inode->i_mutex: up (access to inode is still exclusive)
|
||||
*/
|
||||
int
|
||||
ext2_init_acl(struct inode *inode, struct inode *dir)
|
||||
|
@ -361,7 +361,7 @@ ext2_init_acl(struct inode *inode, struct inode *dir)
|
|||
* for directories) are added. There are no more bits available in the
|
||||
* file mode.
|
||||
*
|
||||
* inode->i_sem: down
|
||||
* inode->i_mutex: down
|
||||
*/
|
||||
int
|
||||
ext2_acl_chmod(struct inode *inode)
|
||||
|
|
|
@ -53,7 +53,7 @@ struct ext2_inode_info {
|
|||
#ifdef CONFIG_EXT2_FS_XATTR
|
||||
/*
|
||||
* Extended attributes can be read independently of the main file
|
||||
* data. Taking i_sem even when reading would cause contention
|
||||
* data. Taking i_mutex even when reading would cause contention
|
||||
* between readers of EAs and writers of regular file data, so
|
||||
* instead we synchronize on xattr_sem when reading or changing
|
||||
* EAs.
|
||||
|
|
|
@ -1152,7 +1152,7 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type,
|
|||
struct buffer_head tmp_bh;
|
||||
struct buffer_head *bh;
|
||||
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
while (towrite > 0) {
|
||||
tocopy = sb->s_blocksize - offset < towrite ?
|
||||
sb->s_blocksize - offset : towrite;
|
||||
|
@ -1189,7 +1189,7 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type,
|
|||
inode->i_version++;
|
||||
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
|
||||
mark_inode_dirty(inode);
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
return len - towrite;
|
||||
}
|
||||
|
||||
|
|
|
@ -325,7 +325,7 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
|
|||
/*
|
||||
* Inode operation listxattr()
|
||||
*
|
||||
* dentry->d_inode->i_sem: don't care
|
||||
* dentry->d_inode->i_mutex: don't care
|
||||
*/
|
||||
ssize_t
|
||||
ext2_listxattr(struct dentry *dentry, char *buffer, size_t size)
|
||||
|
|
|
@ -152,7 +152,7 @@ ext3_iset_acl(struct inode *inode, struct posix_acl **i_acl,
|
|||
/*
|
||||
* Inode operation get_posix_acl().
|
||||
*
|
||||
* inode->i_sem: don't care
|
||||
* inode->i_mutex: don't care
|
||||
*/
|
||||
static struct posix_acl *
|
||||
ext3_get_acl(struct inode *inode, int type)
|
||||
|
@ -216,7 +216,7 @@ ext3_get_acl(struct inode *inode, int type)
|
|||
/*
|
||||
* Set the access or default ACL of an inode.
|
||||
*
|
||||
* inode->i_sem: down unless called from ext3_new_inode
|
||||
* inode->i_mutex: down unless called from ext3_new_inode
|
||||
*/
|
||||
static int
|
||||
ext3_set_acl(handle_t *handle, struct inode *inode, int type,
|
||||
|
@ -306,8 +306,8 @@ ext3_permission(struct inode *inode, int mask, struct nameidata *nd)
|
|||
/*
|
||||
* Initialize the ACLs of a new inode. Called from ext3_new_inode.
|
||||
*
|
||||
* dir->i_sem: down
|
||||
* inode->i_sem: up (access to inode is still exclusive)
|
||||
* dir->i_mutex: down
|
||||
* inode->i_mutex: up (access to inode is still exclusive)
|
||||
*/
|
||||
int
|
||||
ext3_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
|
||||
|
@ -368,7 +368,7 @@ ext3_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
|
|||
* for directories) are added. There are no more bits available in the
|
||||
* file mode.
|
||||
*
|
||||
* inode->i_sem: down
|
||||
* inode->i_mutex: down
|
||||
*/
|
||||
int
|
||||
ext3_acl_chmod(struct inode *inode)
|
||||
|
|
|
@ -2601,7 +2601,7 @@ static ssize_t ext3_quota_write(struct super_block *sb, int type,
|
|||
struct buffer_head *bh;
|
||||
handle_t *handle = journal_current_handle();
|
||||
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
while (towrite > 0) {
|
||||
tocopy = sb->s_blocksize - offset < towrite ?
|
||||
sb->s_blocksize - offset : towrite;
|
||||
|
@ -2644,7 +2644,7 @@ static ssize_t ext3_quota_write(struct super_block *sb, int type,
|
|||
inode->i_version++;
|
||||
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
|
||||
ext3_mark_inode_dirty(handle, inode);
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
return len - towrite;
|
||||
}
|
||||
|
||||
|
|
|
@ -140,7 +140,7 @@ ext3_xattr_handler(int name_index)
|
|||
/*
|
||||
* Inode operation listxattr()
|
||||
*
|
||||
* dentry->d_inode->i_sem: don't care
|
||||
* dentry->d_inode->i_mutex: don't care
|
||||
*/
|
||||
ssize_t
|
||||
ext3_listxattr(struct dentry *dentry, char *buffer, size_t size)
|
||||
|
|
|
@ -729,13 +729,13 @@ static int fat_dir_ioctl(struct inode * inode, struct file * filp,
|
|||
|
||||
buf.dirent = d1;
|
||||
buf.result = 0;
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
ret = -ENOENT;
|
||||
if (!IS_DEADDIR(inode)) {
|
||||
ret = __fat_readdir(inode, filp, &buf, fat_ioctl_filldir,
|
||||
short_only, both);
|
||||
}
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
if (ret >= 0)
|
||||
ret = buf.result;
|
||||
return ret;
|
||||
|
|
|
@ -41,7 +41,7 @@ int fat_generic_ioctl(struct inode *inode, struct file *filp,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
|
||||
if (IS_RDONLY(inode)) {
|
||||
err = -EROFS;
|
||||
|
@ -103,7 +103,7 @@ int fat_generic_ioctl(struct inode *inode, struct file *filp,
|
|||
MSDOS_I(inode)->i_attrs = attr & ATTR_UNUSED;
|
||||
mark_inode_dirty(inode);
|
||||
up:
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
return err;
|
||||
}
|
||||
default:
|
||||
|
|
|
@ -35,7 +35,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
|
|||
int ret;
|
||||
|
||||
ret = -ERESTARTSYS;
|
||||
if (down_interruptible(PIPE_SEM(*inode)))
|
||||
if (mutex_lock_interruptible(PIPE_MUTEX(*inode)))
|
||||
goto err_nolock_nocleanup;
|
||||
|
||||
if (!inode->i_pipe) {
|
||||
|
@ -119,7 +119,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
|
|||
}
|
||||
|
||||
/* Ok! */
|
||||
up(PIPE_SEM(*inode));
|
||||
mutex_unlock(PIPE_MUTEX(*inode));
|
||||
return 0;
|
||||
|
||||
err_rd:
|
||||
|
@ -139,7 +139,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
|
|||
free_pipe_info(inode);
|
||||
|
||||
err_nocleanup:
|
||||
up(PIPE_SEM(*inode));
|
||||
mutex_unlock(PIPE_MUTEX(*inode));
|
||||
|
||||
err_nolock_nocleanup:
|
||||
return ret;
|
||||
|
|
|
@ -560,9 +560,9 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
|
|||
struct inode *inode = file->f_dentry->d_inode;
|
||||
ssize_t res;
|
||||
/* Don't allow parallel writes to the same file */
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
res = fuse_direct_io(file, buf, count, ppos, 1);
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
|
|
@ -547,13 +547,13 @@ static int hfs_file_release(struct inode *inode, struct file *file)
|
|||
if (atomic_read(&file->f_count) != 0)
|
||||
return 0;
|
||||
if (atomic_dec_and_test(&HFS_I(inode)->opencnt)) {
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
hfs_file_truncate(inode);
|
||||
//if (inode->i_flags & S_DEAD) {
|
||||
// hfs_delete_cat(inode->i_ino, HFSPLUS_SB(sb).hidden_dir, NULL);
|
||||
// hfs_delete_inode(inode);
|
||||
//}
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *ma
|
|||
return size;
|
||||
|
||||
dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
|
||||
down(&HFSPLUS_SB(sb).alloc_file->i_sem);
|
||||
mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
|
||||
mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
|
||||
page = read_cache_page(mapping, offset / PAGE_CACHE_BITS,
|
||||
(filler_t *)mapping->a_ops->readpage, NULL);
|
||||
|
@ -143,7 +143,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *ma
|
|||
sb->s_dirt = 1;
|
||||
dprint(DBG_BITMAP, "-> %u,%u\n", start, *max);
|
||||
out:
|
||||
up(&HFSPLUS_SB(sb).alloc_file->i_sem);
|
||||
mutex_unlock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
|
||||
return start;
|
||||
}
|
||||
|
||||
|
@ -164,7 +164,7 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
|
|||
if ((offset + count) > HFSPLUS_SB(sb).total_blocks)
|
||||
return -2;
|
||||
|
||||
down(&HFSPLUS_SB(sb).alloc_file->i_sem);
|
||||
mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
|
||||
mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
|
||||
pnr = offset / PAGE_CACHE_BITS;
|
||||
page = read_cache_page(mapping, pnr, (filler_t *)mapping->a_ops->readpage, NULL);
|
||||
|
@ -215,7 +215,7 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
|
|||
kunmap(page);
|
||||
HFSPLUS_SB(sb).free_blocks += len;
|
||||
sb->s_dirt = 1;
|
||||
up(&HFSPLUS_SB(sb).alloc_file->i_sem);
|
||||
mutex_unlock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -276,13 +276,13 @@ static int hfsplus_file_release(struct inode *inode, struct file *file)
|
|||
if (atomic_read(&file->f_count) != 0)
|
||||
return 0;
|
||||
if (atomic_dec_and_test(&HFSPLUS_I(inode).opencnt)) {
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
hfsplus_file_truncate(inode);
|
||||
if (inode->i_flags & S_DEAD) {
|
||||
hfsplus_delete_cat(inode->i_ino, HFSPLUS_SB(sb).hidden_dir, NULL);
|
||||
hfsplus_delete_inode(inode);
|
||||
}
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -32,19 +32,19 @@ static loff_t hpfs_dir_lseek(struct file *filp, loff_t off, int whence)
|
|||
|
||||
/*printk("dir lseek\n");*/
|
||||
if (new_off == 0 || new_off == 1 || new_off == 11 || new_off == 12 || new_off == 13) goto ok;
|
||||
down(&i->i_sem);
|
||||
mutex_lock(&i->i_mutex);
|
||||
pos = ((loff_t) hpfs_de_as_down_as_possible(s, hpfs_inode->i_dno) << 4) + 1;
|
||||
while (pos != new_off) {
|
||||
if (map_pos_dirent(i, &pos, &qbh)) hpfs_brelse4(&qbh);
|
||||
else goto fail;
|
||||
if (pos == 12) goto fail;
|
||||
}
|
||||
up(&i->i_sem);
|
||||
mutex_unlock(&i->i_mutex);
|
||||
ok:
|
||||
unlock_kernel();
|
||||
return filp->f_pos = new_off;
|
||||
fail:
|
||||
up(&i->i_sem);
|
||||
mutex_unlock(&i->i_mutex);
|
||||
/*printk("illegal lseek: %016llx\n", new_off);*/
|
||||
unlock_kernel();
|
||||
return -ESPIPE;
|
||||
|
|
|
@ -171,12 +171,12 @@ static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry,
|
|||
|
||||
err = -ENOMEM;
|
||||
parent = HPPFS_I(ino)->proc_dentry;
|
||||
down(&parent->d_inode->i_sem);
|
||||
mutex_lock(&parent->d_inode->i_mutex);
|
||||
proc_dentry = d_lookup(parent, &dentry->d_name);
|
||||
if(proc_dentry == NULL){
|
||||
proc_dentry = d_alloc(parent, &dentry->d_name);
|
||||
if(proc_dentry == NULL){
|
||||
up(&parent->d_inode->i_sem);
|
||||
mutex_unlock(&parent->d_inode->i_mutex);
|
||||
goto out;
|
||||
}
|
||||
new = (*parent->d_inode->i_op->lookup)(parent->d_inode,
|
||||
|
@ -186,7 +186,7 @@ static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry,
|
|||
proc_dentry = new;
|
||||
}
|
||||
}
|
||||
up(&parent->d_inode->i_sem);
|
||||
mutex_unlock(&parent->d_inode->i_mutex);
|
||||
|
||||
if(IS_ERR(proc_dentry))
|
||||
return(proc_dentry);
|
||||
|
|
|
@ -118,7 +118,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
|
||||
vma_len = (loff_t)(vma->vm_end - vma->vm_start);
|
||||
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
file_accessed(file);
|
||||
vma->vm_flags |= VM_HUGETLB | VM_RESERVED;
|
||||
vma->vm_ops = &hugetlb_vm_ops;
|
||||
|
@ -133,7 +133,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
if (inode->i_size < len)
|
||||
inode->i_size = len;
|
||||
out:
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -192,7 +192,7 @@ void inode_init_once(struct inode *inode)
|
|||
INIT_HLIST_NODE(&inode->i_hash);
|
||||
INIT_LIST_HEAD(&inode->i_dentry);
|
||||
INIT_LIST_HEAD(&inode->i_devices);
|
||||
sema_init(&inode->i_sem, 1);
|
||||
mutex_init(&inode->i_mutex);
|
||||
init_rwsem(&inode->i_alloc_sem);
|
||||
INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
|
||||
rwlock_init(&inode->i_data.tree_lock);
|
||||
|
|
|
@ -1415,7 +1415,7 @@ jffs_file_write(struct file *filp, const char *buf, size_t count,
|
|||
* This will never trigger with sane page sizes. leave it in
|
||||
* anyway, since I'm thinking about how to merge larger writes
|
||||
* (the current idea is to poke a thread that does the actual
|
||||
* I/O and starts by doing a down(&inode->i_sem). then we
|
||||
* I/O and starts by doing a mutex_lock(&inode->i_mutex). then we
|
||||
* would need to get the page cache pages and have a list of
|
||||
* I/O requests and do write-merging here.
|
||||
* -- prumpf
|
||||
|
|
|
@ -58,7 +58,7 @@ struct jfs_inode_info {
|
|||
/*
|
||||
* rdwrlock serializes xtree between reads & writes and synchronizes
|
||||
* changes to special inodes. It's use would be redundant on
|
||||
* directories since the i_sem taken in the VFS is sufficient.
|
||||
* directories since the i_mutex taken in the VFS is sufficient.
|
||||
*/
|
||||
struct rw_semaphore rdwrlock;
|
||||
/*
|
||||
|
@ -68,7 +68,7 @@ struct jfs_inode_info {
|
|||
* inode is blocked in txBegin or TxBeginAnon
|
||||
*/
|
||||
struct semaphore commit_sem;
|
||||
/* xattr_sem allows us to access the xattrs without taking i_sem */
|
||||
/* xattr_sem allows us to access the xattrs without taking i_mutex */
|
||||
struct rw_semaphore xattr_sem;
|
||||
lid_t xtlid; /* lid of xtree lock on directory */
|
||||
#ifdef CONFIG_JFS_POSIX_ACL
|
||||
|
|
|
@ -74,7 +74,7 @@ int dcache_dir_close(struct inode *inode, struct file *file)
|
|||
|
||||
loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin)
|
||||
{
|
||||
down(&file->f_dentry->d_inode->i_sem);
|
||||
mutex_lock(&file->f_dentry->d_inode->i_mutex);
|
||||
switch (origin) {
|
||||
case 1:
|
||||
offset += file->f_pos;
|
||||
|
@ -82,7 +82,7 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin)
|
|||
if (offset >= 0)
|
||||
break;
|
||||
default:
|
||||
up(&file->f_dentry->d_inode->i_sem);
|
||||
mutex_unlock(&file->f_dentry->d_inode->i_mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (offset != file->f_pos) {
|
||||
|
@ -106,7 +106,7 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin)
|
|||
spin_unlock(&dcache_lock);
|
||||
}
|
||||
}
|
||||
up(&file->f_dentry->d_inode->i_sem);
|
||||
mutex_unlock(&file->f_dentry->d_inode->i_mutex);
|
||||
return offset;
|
||||
}
|
||||
|
||||
|
@ -356,7 +356,7 @@ int simple_commit_write(struct file *file, struct page *page,
|
|||
|
||||
/*
|
||||
* No need to use i_size_read() here, the i_size
|
||||
* cannot change under us because we hold the i_sem.
|
||||
* cannot change under us because we hold the i_mutex.
|
||||
*/
|
||||
if (pos > inode->i_size)
|
||||
i_size_write(inode, pos);
|
||||
|
|
82
fs/namei.c
82
fs/namei.c
|
@ -438,7 +438,7 @@ static struct dentry * real_lookup(struct dentry * parent, struct qstr * name, s
|
|||
struct dentry * result;
|
||||
struct inode *dir = parent->d_inode;
|
||||
|
||||
down(&dir->i_sem);
|
||||
mutex_lock(&dir->i_mutex);
|
||||
/*
|
||||
* First re-do the cached lookup just in case it was created
|
||||
* while we waited for the directory semaphore..
|
||||
|
@ -464,7 +464,7 @@ static struct dentry * real_lookup(struct dentry * parent, struct qstr * name, s
|
|||
else
|
||||
result = dentry;
|
||||
}
|
||||
up(&dir->i_sem);
|
||||
mutex_unlock(&dir->i_mutex);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -472,7 +472,7 @@ static struct dentry * real_lookup(struct dentry * parent, struct qstr * name, s
|
|||
* Uhhuh! Nasty case: the cache was re-populated while
|
||||
* we waited on the semaphore. Need to revalidate.
|
||||
*/
|
||||
up(&dir->i_sem);
|
||||
mutex_unlock(&dir->i_mutex);
|
||||
if (result->d_op && result->d_op->d_revalidate) {
|
||||
if (!result->d_op->d_revalidate(result, nd) && !d_invalidate(result)) {
|
||||
dput(result);
|
||||
|
@ -1366,7 +1366,7 @@ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
|
|||
struct dentry *p;
|
||||
|
||||
if (p1 == p2) {
|
||||
down(&p1->d_inode->i_sem);
|
||||
mutex_lock(&p1->d_inode->i_mutex);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -1374,30 +1374,30 @@ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
|
|||
|
||||
for (p = p1; p->d_parent != p; p = p->d_parent) {
|
||||
if (p->d_parent == p2) {
|
||||
down(&p2->d_inode->i_sem);
|
||||
down(&p1->d_inode->i_sem);
|
||||
mutex_lock(&p2->d_inode->i_mutex);
|
||||
mutex_lock(&p1->d_inode->i_mutex);
|
||||
return p;
|
||||
}
|
||||
}
|
||||
|
||||
for (p = p2; p->d_parent != p; p = p->d_parent) {
|
||||
if (p->d_parent == p1) {
|
||||
down(&p1->d_inode->i_sem);
|
||||
down(&p2->d_inode->i_sem);
|
||||
mutex_lock(&p1->d_inode->i_mutex);
|
||||
mutex_lock(&p2->d_inode->i_mutex);
|
||||
return p;
|
||||
}
|
||||
}
|
||||
|
||||
down(&p1->d_inode->i_sem);
|
||||
down(&p2->d_inode->i_sem);
|
||||
mutex_lock(&p1->d_inode->i_mutex);
|
||||
mutex_lock(&p2->d_inode->i_mutex);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void unlock_rename(struct dentry *p1, struct dentry *p2)
|
||||
{
|
||||
up(&p1->d_inode->i_sem);
|
||||
mutex_unlock(&p1->d_inode->i_mutex);
|
||||
if (p1 != p2) {
|
||||
up(&p2->d_inode->i_sem);
|
||||
mutex_unlock(&p2->d_inode->i_mutex);
|
||||
up(&p1->d_inode->i_sb->s_vfs_rename_sem);
|
||||
}
|
||||
}
|
||||
|
@ -1563,14 +1563,14 @@ int open_namei(const char * pathname, int flag, int mode, struct nameidata *nd)
|
|||
|
||||
dir = nd->dentry;
|
||||
nd->flags &= ~LOOKUP_PARENT;
|
||||
down(&dir->d_inode->i_sem);
|
||||
mutex_lock(&dir->d_inode->i_mutex);
|
||||
path.dentry = lookup_hash(nd);
|
||||
path.mnt = nd->mnt;
|
||||
|
||||
do_last:
|
||||
error = PTR_ERR(path.dentry);
|
||||
if (IS_ERR(path.dentry)) {
|
||||
up(&dir->d_inode->i_sem);
|
||||
mutex_unlock(&dir->d_inode->i_mutex);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
|
@ -1579,7 +1579,7 @@ int open_namei(const char * pathname, int flag, int mode, struct nameidata *nd)
|
|||
if (!IS_POSIXACL(dir->d_inode))
|
||||
mode &= ~current->fs->umask;
|
||||
error = vfs_create(dir->d_inode, path.dentry, mode, nd);
|
||||
up(&dir->d_inode->i_sem);
|
||||
mutex_unlock(&dir->d_inode->i_mutex);
|
||||
dput(nd->dentry);
|
||||
nd->dentry = path.dentry;
|
||||
if (error)
|
||||
|
@ -1593,7 +1593,7 @@ int open_namei(const char * pathname, int flag, int mode, struct nameidata *nd)
|
|||
/*
|
||||
* It already exists.
|
||||
*/
|
||||
up(&dir->d_inode->i_sem);
|
||||
mutex_unlock(&dir->d_inode->i_mutex);
|
||||
|
||||
error = -EEXIST;
|
||||
if (flag & O_EXCL)
|
||||
|
@ -1665,7 +1665,7 @@ int open_namei(const char * pathname, int flag, int mode, struct nameidata *nd)
|
|||
goto exit;
|
||||
}
|
||||
dir = nd->dentry;
|
||||
down(&dir->d_inode->i_sem);
|
||||
mutex_lock(&dir->d_inode->i_mutex);
|
||||
path.dentry = lookup_hash(nd);
|
||||
path.mnt = nd->mnt;
|
||||
__putname(nd->last.name);
|
||||
|
@ -1680,13 +1680,13 @@ int open_namei(const char * pathname, int flag, int mode, struct nameidata *nd)
|
|||
* Simple function to lookup and return a dentry and create it
|
||||
* if it doesn't exist. Is SMP-safe.
|
||||
*
|
||||
* Returns with nd->dentry->d_inode->i_sem locked.
|
||||
* Returns with nd->dentry->d_inode->i_mutex locked.
|
||||
*/
|
||||
struct dentry *lookup_create(struct nameidata *nd, int is_dir)
|
||||
{
|
||||
struct dentry *dentry = ERR_PTR(-EEXIST);
|
||||
|
||||
down(&nd->dentry->d_inode->i_sem);
|
||||
mutex_lock(&nd->dentry->d_inode->i_mutex);
|
||||
/*
|
||||
* Yucky last component or no last component at all?
|
||||
* (foo/., foo/.., /////)
|
||||
|
@ -1784,7 +1784,7 @@ asmlinkage long sys_mknod(const char __user * filename, int mode, unsigned dev)
|
|||
}
|
||||
dput(dentry);
|
||||
}
|
||||
up(&nd.dentry->d_inode->i_sem);
|
||||
mutex_unlock(&nd.dentry->d_inode->i_mutex);
|
||||
path_release(&nd);
|
||||
out:
|
||||
putname(tmp);
|
||||
|
@ -1836,7 +1836,7 @@ asmlinkage long sys_mkdir(const char __user * pathname, int mode)
|
|||
error = vfs_mkdir(nd.dentry->d_inode, dentry, mode);
|
||||
dput(dentry);
|
||||
}
|
||||
up(&nd.dentry->d_inode->i_sem);
|
||||
mutex_unlock(&nd.dentry->d_inode->i_mutex);
|
||||
path_release(&nd);
|
||||
out:
|
||||
putname(tmp);
|
||||
|
@ -1885,7 +1885,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
|
|||
|
||||
DQUOT_INIT(dir);
|
||||
|
||||
down(&dentry->d_inode->i_sem);
|
||||
mutex_lock(&dentry->d_inode->i_mutex);
|
||||
dentry_unhash(dentry);
|
||||
if (d_mountpoint(dentry))
|
||||
error = -EBUSY;
|
||||
|
@ -1897,7 +1897,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
|
|||
dentry->d_inode->i_flags |= S_DEAD;
|
||||
}
|
||||
}
|
||||
up(&dentry->d_inode->i_sem);
|
||||
mutex_unlock(&dentry->d_inode->i_mutex);
|
||||
if (!error) {
|
||||
d_delete(dentry);
|
||||
}
|
||||
|
@ -1932,14 +1932,14 @@ asmlinkage long sys_rmdir(const char __user * pathname)
|
|||
error = -EBUSY;
|
||||
goto exit1;
|
||||
}
|
||||
down(&nd.dentry->d_inode->i_sem);
|
||||
mutex_lock(&nd.dentry->d_inode->i_mutex);
|
||||
dentry = lookup_hash(&nd);
|
||||
error = PTR_ERR(dentry);
|
||||
if (!IS_ERR(dentry)) {
|
||||
error = vfs_rmdir(nd.dentry->d_inode, dentry);
|
||||
dput(dentry);
|
||||
}
|
||||
up(&nd.dentry->d_inode->i_sem);
|
||||
mutex_unlock(&nd.dentry->d_inode->i_mutex);
|
||||
exit1:
|
||||
path_release(&nd);
|
||||
exit:
|
||||
|
@ -1959,7 +1959,7 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry)
|
|||
|
||||
DQUOT_INIT(dir);
|
||||
|
||||
down(&dentry->d_inode->i_sem);
|
||||
mutex_lock(&dentry->d_inode->i_mutex);
|
||||
if (d_mountpoint(dentry))
|
||||
error = -EBUSY;
|
||||
else {
|
||||
|
@ -1967,7 +1967,7 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry)
|
|||
if (!error)
|
||||
error = dir->i_op->unlink(dir, dentry);
|
||||
}
|
||||
up(&dentry->d_inode->i_sem);
|
||||
mutex_unlock(&dentry->d_inode->i_mutex);
|
||||
|
||||
/* We don't d_delete() NFS sillyrenamed files--they still exist. */
|
||||
if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) {
|
||||
|
@ -1979,7 +1979,7 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry)
|
|||
|
||||
/*
|
||||
* Make sure that the actual truncation of the file will occur outside its
|
||||
* directory's i_sem. Truncate can take a long time if there is a lot of
|
||||
* directory's i_mutex. Truncate can take a long time if there is a lot of
|
||||
* writeout happening, and we don't want to prevent access to the directory
|
||||
* while waiting on the I/O.
|
||||
*/
|
||||
|
@ -2001,7 +2001,7 @@ asmlinkage long sys_unlink(const char __user * pathname)
|
|||
error = -EISDIR;
|
||||
if (nd.last_type != LAST_NORM)
|
||||
goto exit1;
|
||||
down(&nd.dentry->d_inode->i_sem);
|
||||
mutex_lock(&nd.dentry->d_inode->i_mutex);
|
||||
dentry = lookup_hash(&nd);
|
||||
error = PTR_ERR(dentry);
|
||||
if (!IS_ERR(dentry)) {
|
||||
|
@ -2015,7 +2015,7 @@ asmlinkage long sys_unlink(const char __user * pathname)
|
|||
exit2:
|
||||
dput(dentry);
|
||||
}
|
||||
up(&nd.dentry->d_inode->i_sem);
|
||||
mutex_unlock(&nd.dentry->d_inode->i_mutex);
|
||||
if (inode)
|
||||
iput(inode); /* truncate the inode here */
|
||||
exit1:
|
||||
|
@ -2075,7 +2075,7 @@ asmlinkage long sys_symlink(const char __user * oldname, const char __user * new
|
|||
error = vfs_symlink(nd.dentry->d_inode, dentry, from, S_IALLUGO);
|
||||
dput(dentry);
|
||||
}
|
||||
up(&nd.dentry->d_inode->i_sem);
|
||||
mutex_unlock(&nd.dentry->d_inode->i_mutex);
|
||||
path_release(&nd);
|
||||
out:
|
||||
putname(to);
|
||||
|
@ -2113,10 +2113,10 @@ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_de
|
|||
if (error)
|
||||
return error;
|
||||
|
||||
down(&old_dentry->d_inode->i_sem);
|
||||
mutex_lock(&old_dentry->d_inode->i_mutex);
|
||||
DQUOT_INIT(dir);
|
||||
error = dir->i_op->link(old_dentry, dir, new_dentry);
|
||||
up(&old_dentry->d_inode->i_sem);
|
||||
mutex_unlock(&old_dentry->d_inode->i_mutex);
|
||||
if (!error)
|
||||
fsnotify_create(dir, new_dentry->d_name.name);
|
||||
return error;
|
||||
|
@ -2157,7 +2157,7 @@ asmlinkage long sys_link(const char __user * oldname, const char __user * newnam
|
|||
error = vfs_link(old_nd.dentry, nd.dentry->d_inode, new_dentry);
|
||||
dput(new_dentry);
|
||||
}
|
||||
up(&nd.dentry->d_inode->i_sem);
|
||||
mutex_unlock(&nd.dentry->d_inode->i_mutex);
|
||||
out_release:
|
||||
path_release(&nd);
|
||||
out:
|
||||
|
@ -2178,7 +2178,7 @@ asmlinkage long sys_link(const char __user * oldname, const char __user * newnam
|
|||
* sb->s_vfs_rename_sem. We might be more accurate, but that's another
|
||||
* story.
|
||||
* c) we have to lock _three_ objects - parents and victim (if it exists).
|
||||
* And that - after we got ->i_sem on parents (until then we don't know
|
||||
* And that - after we got ->i_mutex on parents (until then we don't know
|
||||
* whether the target exists). Solution: try to be smart with locking
|
||||
* order for inodes. We rely on the fact that tree topology may change
|
||||
* only under ->s_vfs_rename_sem _and_ that parent of the object we
|
||||
|
@ -2195,9 +2195,9 @@ asmlinkage long sys_link(const char __user * oldname, const char __user * newnam
|
|||
* stuff into VFS), but the former is not going away. Solution: the same
|
||||
* trick as in rmdir().
|
||||
* e) conversion from fhandle to dentry may come in the wrong moment - when
|
||||
* we are removing the target. Solution: we will have to grab ->i_sem
|
||||
* we are removing the target. Solution: we will have to grab ->i_mutex
|
||||
* in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on
|
||||
* ->i_sem on parents, which works but leads to some truely excessive
|
||||
* ->i_mutex on parents, which works but leads to some truely excessive
|
||||
* locking].
|
||||
*/
|
||||
static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
|
||||
|
@ -2222,7 +2222,7 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
|
|||
|
||||
target = new_dentry->d_inode;
|
||||
if (target) {
|
||||
down(&target->i_sem);
|
||||
mutex_lock(&target->i_mutex);
|
||||
dentry_unhash(new_dentry);
|
||||
}
|
||||
if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry))
|
||||
|
@ -2232,7 +2232,7 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
|
|||
if (target) {
|
||||
if (!error)
|
||||
target->i_flags |= S_DEAD;
|
||||
up(&target->i_sem);
|
||||
mutex_unlock(&target->i_mutex);
|
||||
if (d_unhashed(new_dentry))
|
||||
d_rehash(new_dentry);
|
||||
dput(new_dentry);
|
||||
|
@ -2255,7 +2255,7 @@ static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry,
|
|||
dget(new_dentry);
|
||||
target = new_dentry->d_inode;
|
||||
if (target)
|
||||
down(&target->i_sem);
|
||||
mutex_lock(&target->i_mutex);
|
||||
if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry))
|
||||
error = -EBUSY;
|
||||
else
|
||||
|
@ -2266,7 +2266,7 @@ static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry,
|
|||
d_move(old_dentry, new_dentry);
|
||||
}
|
||||
if (target)
|
||||
up(&target->i_sem);
|
||||
mutex_unlock(&target->i_mutex);
|
||||
dput(new_dentry);
|
||||
return error;
|
||||
}
|
||||
|
|
|
@ -814,7 +814,7 @@ static int graft_tree(struct vfsmount *mnt, struct nameidata *nd)
|
|||
return -ENOTDIR;
|
||||
|
||||
err = -ENOENT;
|
||||
down(&nd->dentry->d_inode->i_sem);
|
||||
mutex_lock(&nd->dentry->d_inode->i_mutex);
|
||||
if (IS_DEADDIR(nd->dentry->d_inode))
|
||||
goto out_unlock;
|
||||
|
||||
|
@ -826,7 +826,7 @@ static int graft_tree(struct vfsmount *mnt, struct nameidata *nd)
|
|||
if (IS_ROOT(nd->dentry) || !d_unhashed(nd->dentry))
|
||||
err = attach_recursive_mnt(mnt, nd, NULL);
|
||||
out_unlock:
|
||||
up(&nd->dentry->d_inode->i_sem);
|
||||
mutex_unlock(&nd->dentry->d_inode->i_mutex);
|
||||
if (!err)
|
||||
security_sb_post_addmount(mnt, nd);
|
||||
return err;
|
||||
|
@ -962,7 +962,7 @@ static int do_move_mount(struct nameidata *nd, char *old_name)
|
|||
goto out;
|
||||
|
||||
err = -ENOENT;
|
||||
down(&nd->dentry->d_inode->i_sem);
|
||||
mutex_lock(&nd->dentry->d_inode->i_mutex);
|
||||
if (IS_DEADDIR(nd->dentry->d_inode))
|
||||
goto out1;
|
||||
|
||||
|
@ -1004,7 +1004,7 @@ static int do_move_mount(struct nameidata *nd, char *old_name)
|
|||
list_del_init(&old_nd.mnt->mnt_expire);
|
||||
spin_unlock(&vfsmount_lock);
|
||||
out1:
|
||||
up(&nd->dentry->d_inode->i_sem);
|
||||
mutex_unlock(&nd->dentry->d_inode->i_mutex);
|
||||
out:
|
||||
up_write(&namespace_sem);
|
||||
if (!err)
|
||||
|
@ -1573,7 +1573,7 @@ asmlinkage long sys_pivot_root(const char __user * new_root,
|
|||
user_nd.dentry = dget(current->fs->root);
|
||||
read_unlock(¤t->fs->lock);
|
||||
down_write(&namespace_sem);
|
||||
down(&old_nd.dentry->d_inode->i_sem);
|
||||
mutex_lock(&old_nd.dentry->d_inode->i_mutex);
|
||||
error = -EINVAL;
|
||||
if (IS_MNT_SHARED(old_nd.mnt) ||
|
||||
IS_MNT_SHARED(new_nd.mnt->mnt_parent) ||
|
||||
|
@ -1626,7 +1626,7 @@ asmlinkage long sys_pivot_root(const char __user * new_root,
|
|||
path_release(&root_parent);
|
||||
path_release(&parent_nd);
|
||||
out2:
|
||||
up(&old_nd.dentry->d_inode->i_sem);
|
||||
mutex_unlock(&old_nd.dentry->d_inode->i_mutex);
|
||||
up_write(&namespace_sem);
|
||||
path_release(&user_nd);
|
||||
path_release(&old_nd);
|
||||
|
|
10
fs/nfs/dir.c
10
fs/nfs/dir.c
|
@ -194,7 +194,7 @@ int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page *page)
|
|||
spin_unlock(&inode->i_lock);
|
||||
/* Ensure consistent page alignment of the data.
|
||||
* Note: assumes we have exclusive access to this mapping either
|
||||
* through inode->i_sem or some other mechanism.
|
||||
* through inode->i_mutex or some other mechanism.
|
||||
*/
|
||||
if (page->index == 0)
|
||||
invalidate_inode_pages2_range(inode->i_mapping, PAGE_CACHE_SIZE, -1);
|
||||
|
@ -573,7 +573,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
|
|||
|
||||
loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int origin)
|
||||
{
|
||||
down(&filp->f_dentry->d_inode->i_sem);
|
||||
mutex_lock(&filp->f_dentry->d_inode->i_mutex);
|
||||
switch (origin) {
|
||||
case 1:
|
||||
offset += filp->f_pos;
|
||||
|
@ -589,7 +589,7 @@ loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int origin)
|
|||
((struct nfs_open_context *)filp->private_data)->dir_cookie = 0;
|
||||
}
|
||||
out:
|
||||
up(&filp->f_dentry->d_inode->i_sem);
|
||||
mutex_unlock(&filp->f_dentry->d_inode->i_mutex);
|
||||
return offset;
|
||||
}
|
||||
|
||||
|
@ -1001,7 +1001,7 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd)
|
|||
openflags &= ~(O_CREAT|O_TRUNC);
|
||||
|
||||
/*
|
||||
* Note: we're not holding inode->i_sem and so may be racing with
|
||||
* Note: we're not holding inode->i_mutex and so may be racing with
|
||||
* operations that change the directory. We therefore save the
|
||||
* change attribute *before* we do the RPC call.
|
||||
*/
|
||||
|
@ -1051,7 +1051,7 @@ static struct dentry *nfs_readdir_lookup(nfs_readdir_descriptor_t *desc)
|
|||
return dentry;
|
||||
if (!desc->plus || !(entry->fattr->valid & NFS_ATTR_FATTR))
|
||||
return NULL;
|
||||
/* Note: caller is already holding the dir->i_sem! */
|
||||
/* Note: caller is already holding the dir->i_mutex! */
|
||||
dentry = d_alloc(parent, &name);
|
||||
if (dentry == NULL)
|
||||
return NULL;
|
||||
|
|
|
@ -121,9 +121,9 @@ nfs4_make_rec_clidname(char *dname, struct xdr_netobj *clname)
|
|||
static void
|
||||
nfsd4_sync_rec_dir(void)
|
||||
{
|
||||
down(&rec_dir.dentry->d_inode->i_sem);
|
||||
mutex_lock(&rec_dir.dentry->d_inode->i_mutex);
|
||||
nfsd_sync_dir(rec_dir.dentry);
|
||||
up(&rec_dir.dentry->d_inode->i_sem);
|
||||
mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -143,7 +143,7 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
|
|||
nfs4_save_user(&uid, &gid);
|
||||
|
||||
/* lock the parent */
|
||||
down(&rec_dir.dentry->d_inode->i_sem);
|
||||
mutex_lock(&rec_dir.dentry->d_inode->i_mutex);
|
||||
|
||||
dentry = lookup_one_len(dname, rec_dir.dentry, HEXDIR_LEN-1);
|
||||
if (IS_ERR(dentry)) {
|
||||
|
@ -159,7 +159,7 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
|
|||
out_put:
|
||||
dput(dentry);
|
||||
out_unlock:
|
||||
up(&rec_dir.dentry->d_inode->i_sem);
|
||||
mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
|
||||
if (status == 0) {
|
||||
clp->cl_firststate = 1;
|
||||
nfsd4_sync_rec_dir();
|
||||
|
@ -259,9 +259,9 @@ nfsd4_remove_clid_file(struct dentry *dir, struct dentry *dentry)
|
|||
printk("nfsd4: non-file found in client recovery directory\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
down(&dir->d_inode->i_sem);
|
||||
mutex_lock(&dir->d_inode->i_mutex);
|
||||
status = vfs_unlink(dir->d_inode, dentry);
|
||||
up(&dir->d_inode->i_sem);
|
||||
mutex_unlock(&dir->d_inode->i_mutex);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -274,9 +274,9 @@ nfsd4_clear_clid_dir(struct dentry *dir, struct dentry *dentry)
|
|||
* any regular files anyway, just in case the directory was created by
|
||||
* a kernel from the future.... */
|
||||
nfsd4_list_rec_dir(dentry, nfsd4_remove_clid_file);
|
||||
down(&dir->d_inode->i_sem);
|
||||
mutex_lock(&dir->d_inode->i_mutex);
|
||||
status = vfs_rmdir(dir->d_inode, dentry);
|
||||
up(&dir->d_inode->i_sem);
|
||||
mutex_unlock(&dir->d_inode->i_mutex);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -288,9 +288,9 @@ nfsd4_unlink_clid_dir(char *name, int namlen)
|
|||
|
||||
dprintk("NFSD: nfsd4_unlink_clid_dir. name %.*s\n", namlen, name);
|
||||
|
||||
down(&rec_dir.dentry->d_inode->i_sem);
|
||||
mutex_lock(&rec_dir.dentry->d_inode->i_mutex);
|
||||
dentry = lookup_one_len(name, rec_dir.dentry, namlen);
|
||||
up(&rec_dir.dentry->d_inode->i_sem);
|
||||
mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
|
||||
if (IS_ERR(dentry)) {
|
||||
status = PTR_ERR(dentry);
|
||||
return status;
|
||||
|
|
|
@ -390,12 +390,12 @@ set_nfsv4_acl_one(struct dentry *dentry, struct posix_acl *pacl, char *key)
|
|||
|
||||
error = -EOPNOTSUPP;
|
||||
if (inode->i_op && inode->i_op->setxattr) {
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
security_inode_setxattr(dentry, key, buf, len, 0);
|
||||
error = inode->i_op->setxattr(dentry, key, buf, len, 0);
|
||||
if (!error)
|
||||
security_inode_post_setxattr(dentry, key, buf, len, 0);
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
}
|
||||
out:
|
||||
kfree(buf);
|
||||
|
@ -739,9 +739,9 @@ nfsd_sync(struct file *filp)
|
|||
int err;
|
||||
struct inode *inode = filp->f_dentry->d_inode;
|
||||
dprintk("nfsd: sync file %s\n", filp->f_dentry->d_name.name);
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
err=nfsd_dosync(filp, filp->f_dentry, filp->f_op);
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -885,9 +885,9 @@ static void kill_suid(struct dentry *dentry)
|
|||
struct iattr ia;
|
||||
ia.ia_valid = ATTR_KILL_SUID | ATTR_KILL_SGID;
|
||||
|
||||
down(&dentry->d_inode->i_sem);
|
||||
mutex_lock(&dentry->d_inode->i_mutex);
|
||||
notify_change(dentry, &ia);
|
||||
up(&dentry->d_inode->i_sem);
|
||||
mutex_unlock(&dentry->d_inode->i_mutex);
|
||||
}
|
||||
|
||||
static inline int
|
||||
|
|
|
@ -1532,7 +1532,7 @@ int ntfs_resident_attr_value_resize(MFT_RECORD *m, ATTR_RECORD *a,
|
|||
* NOTE to self: No changes in the attribute list are required to move from
|
||||
* a resident to a non-resident attribute.
|
||||
*
|
||||
* Locking: - The caller must hold i_sem on the inode.
|
||||
* Locking: - The caller must hold i_mutex on the inode.
|
||||
*/
|
||||
int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
|
||||
{
|
||||
|
@ -1728,7 +1728,7 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
|
|||
/*
|
||||
* This needs to be last since the address space operations ->readpage
|
||||
* and ->writepage can run concurrently with us as they are not
|
||||
* serialized on i_sem. Note, we are not allowed to fail once we flip
|
||||
* serialized on i_mutex. Note, we are not allowed to fail once we flip
|
||||
* this switch, which is another reason to do this last.
|
||||
*/
|
||||
NInoSetNonResident(ni);
|
||||
|
|
|
@ -69,7 +69,7 @@ ntfschar I30[5] = { const_cpu_to_le16('$'), const_cpu_to_le16('I'),
|
|||
* work but we don't care for how quickly one can access them. This also fixes
|
||||
* the dcache aliasing issues.
|
||||
*
|
||||
* Locking: - Caller must hold i_sem on the directory.
|
||||
* Locking: - Caller must hold i_mutex on the directory.
|
||||
* - Each page cache page in the index allocation mapping must be
|
||||
* locked whilst being accessed otherwise we may find a corrupt
|
||||
* page due to it being under ->writepage at the moment which
|
||||
|
@ -1085,11 +1085,11 @@ static inline int ntfs_filldir(ntfs_volume *vol, loff_t fpos,
|
|||
* While this will return the names in random order this doesn't matter for
|
||||
* ->readdir but OTOH results in a faster ->readdir.
|
||||
*
|
||||
* VFS calls ->readdir without BKL but with i_sem held. This protects the VFS
|
||||
* VFS calls ->readdir without BKL but with i_mutex held. This protects the VFS
|
||||
* parts (e.g. ->f_pos and ->i_size, and it also protects against directory
|
||||
* modifications).
|
||||
*
|
||||
* Locking: - Caller must hold i_sem on the directory.
|
||||
* Locking: - Caller must hold i_mutex on the directory.
|
||||
* - Each page cache page in the index allocation mapping must be
|
||||
* locked whilst being accessed otherwise we may find a corrupt
|
||||
* page due to it being under ->writepage at the moment which
|
||||
|
@ -1520,7 +1520,7 @@ static int ntfs_dir_open(struct inode *vi, struct file *filp)
|
|||
* Note: In the past @filp could be NULL so we ignore it as we don't need it
|
||||
* anyway.
|
||||
*
|
||||
* Locking: Caller must hold i_sem on the inode.
|
||||
* Locking: Caller must hold i_mutex on the inode.
|
||||
*
|
||||
* TODO: We should probably also write all attribute/index inodes associated
|
||||
* with this inode but since we have no simple way of getting to them we ignore
|
||||
|
|
|
@ -106,7 +106,7 @@ static int ntfs_file_open(struct inode *vi, struct file *filp)
|
|||
* this is the case, the necessary zeroing will also have happened and that all
|
||||
* metadata is self-consistent.
|
||||
*
|
||||
* Locking: i_sem on the vfs inode corrseponsind to the ntfs inode @ni must be
|
||||
* Locking: i_mutex on the vfs inode corrseponsind to the ntfs inode @ni must be
|
||||
* held by the caller.
|
||||
*/
|
||||
static int ntfs_attr_extend_initialized(ntfs_inode *ni, const s64 new_init_size,
|
||||
|
@ -473,7 +473,7 @@ static inline int ntfs_submit_bh_for_read(struct buffer_head *bh)
|
|||
* @bytes: number of bytes to be written
|
||||
*
|
||||
* This is called for non-resident attributes from ntfs_file_buffered_write()
|
||||
* with i_sem held on the inode (@pages[0]->mapping->host). There are
|
||||
* with i_mutex held on the inode (@pages[0]->mapping->host). There are
|
||||
* @nr_pages pages in @pages which are locked but not kmap()ped. The source
|
||||
* data has not yet been copied into the @pages.
|
||||
*
|
||||
|
@ -1637,7 +1637,7 @@ static inline int ntfs_commit_pages_after_non_resident_write(
|
|||
* @pos: byte position in file at which the write begins
|
||||
* @bytes: number of bytes to be written
|
||||
*
|
||||
* This is called from ntfs_file_buffered_write() with i_sem held on the inode
|
||||
* This is called from ntfs_file_buffered_write() with i_mutex held on the inode
|
||||
* (@pages[0]->mapping->host). There are @nr_pages pages in @pages which are
|
||||
* locked but not kmap()ped. The source data has already been copied into the
|
||||
* @page. ntfs_prepare_pages_for_non_resident_write() has been called before
|
||||
|
@ -1814,7 +1814,7 @@ static int ntfs_commit_pages_after_write(struct page **pages,
|
|||
/**
|
||||
* ntfs_file_buffered_write -
|
||||
*
|
||||
* Locking: The vfs is holding ->i_sem on the inode.
|
||||
* Locking: The vfs is holding ->i_mutex on the inode.
|
||||
*/
|
||||
static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
|
||||
const struct iovec *iov, unsigned long nr_segs,
|
||||
|
@ -2196,9 +2196,9 @@ static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const char __user *buf,
|
|||
|
||||
BUG_ON(iocb->ki_pos != pos);
|
||||
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
ret = ntfs_file_aio_write_nolock(iocb, &local_iov, 1, &iocb->ki_pos);
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
|
||||
int err = sync_page_range(inode, mapping, pos, ret);
|
||||
if (err < 0)
|
||||
|
@ -2221,12 +2221,12 @@ static ssize_t ntfs_file_writev(struct file *file, const struct iovec *iov,
|
|||
struct kiocb kiocb;
|
||||
ssize_t ret;
|
||||
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
init_sync_kiocb(&kiocb, file);
|
||||
ret = ntfs_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos);
|
||||
if (ret == -EIOCBQUEUED)
|
||||
ret = wait_on_sync_kiocb(&kiocb);
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
|
||||
int err = sync_page_range(inode, mapping, *ppos - ret, ret);
|
||||
if (err < 0)
|
||||
|
@ -2269,7 +2269,7 @@ static ssize_t ntfs_file_write(struct file *file, const char __user *buf,
|
|||
* Note: In the past @filp could be NULL so we ignore it as we don't need it
|
||||
* anyway.
|
||||
*
|
||||
* Locking: Caller must hold i_sem on the inode.
|
||||
* Locking: Caller must hold i_mutex on the inode.
|
||||
*
|
||||
* TODO: We should probably also write all attribute/index inodes associated
|
||||
* with this inode but since we have no simple way of getting to them we ignore
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
* Allocate a new index context, initialize it with @idx_ni and return it.
|
||||
* Return NULL if allocation failed.
|
||||
*
|
||||
* Locking: Caller must hold i_sem on the index inode.
|
||||
* Locking: Caller must hold i_mutex on the index inode.
|
||||
*/
|
||||
ntfs_index_context *ntfs_index_ctx_get(ntfs_inode *idx_ni)
|
||||
{
|
||||
|
@ -50,7 +50,7 @@ ntfs_index_context *ntfs_index_ctx_get(ntfs_inode *idx_ni)
|
|||
*
|
||||
* Release the index context @ictx, releasing all associated resources.
|
||||
*
|
||||
* Locking: Caller must hold i_sem on the index inode.
|
||||
* Locking: Caller must hold i_mutex on the index inode.
|
||||
*/
|
||||
void ntfs_index_ctx_put(ntfs_index_context *ictx)
|
||||
{
|
||||
|
@ -106,7 +106,7 @@ void ntfs_index_ctx_put(ntfs_index_context *ictx)
|
|||
* or ntfs_index_entry_write() before the call to ntfs_index_ctx_put() to
|
||||
* ensure that the changes are written to disk.
|
||||
*
|
||||
* Locking: - Caller must hold i_sem on the index inode.
|
||||
* Locking: - Caller must hold i_mutex on the index inode.
|
||||
* - Each page cache page in the index allocation mapping must be
|
||||
* locked whilst being accessed otherwise we may find a corrupt
|
||||
* page due to it being under ->writepage at the moment which
|
||||
|
|
|
@ -2125,13 +2125,13 @@ void ntfs_put_inode(struct inode *vi)
|
|||
ntfs_inode *ni = NTFS_I(vi);
|
||||
if (NInoIndexAllocPresent(ni)) {
|
||||
struct inode *bvi = NULL;
|
||||
down(&vi->i_sem);
|
||||
mutex_lock(&vi->i_mutex);
|
||||
if (atomic_read(&vi->i_count) == 2) {
|
||||
bvi = ni->itype.index.bmp_ino;
|
||||
if (bvi)
|
||||
ni->itype.index.bmp_ino = NULL;
|
||||
}
|
||||
up(&vi->i_sem);
|
||||
mutex_unlock(&vi->i_mutex);
|
||||
if (bvi)
|
||||
iput(bvi);
|
||||
}
|
||||
|
@ -2311,7 +2311,7 @@ static const char *es = " Leaving inconsistent metadata. Unmount and run "
|
|||
*
|
||||
* Returns 0 on success or -errno on error.
|
||||
*
|
||||
* Called with ->i_sem held. In all but one case ->i_alloc_sem is held for
|
||||
* Called with ->i_mutex held. In all but one case ->i_alloc_sem is held for
|
||||
* writing. The only case in the kernel where ->i_alloc_sem is not held is
|
||||
* mm/filemap.c::generic_file_buffered_write() where vmtruncate() is called
|
||||
* with the current i_size as the offset. The analogous place in NTFS is in
|
||||
|
@ -2831,7 +2831,7 @@ void ntfs_truncate_vfs(struct inode *vi) {
|
|||
* We also abort all changes of user, group, and mode as we do not implement
|
||||
* the NTFS ACLs yet.
|
||||
*
|
||||
* Called with ->i_sem held. For the ATTR_SIZE (i.e. ->truncate) case, also
|
||||
* Called with ->i_mutex held. For the ATTR_SIZE (i.e. ->truncate) case, also
|
||||
* called with ->i_alloc_sem held for writing.
|
||||
*
|
||||
* Basically this is a copy of generic notify_change() and inode_setattr()
|
||||
|
|
|
@ -96,7 +96,7 @@
|
|||
* name. We then convert the name to the current NLS code page, and proceed
|
||||
* searching for a dentry with this name, etc, as in case 2), above.
|
||||
*
|
||||
* Locking: Caller must hold i_sem on the directory.
|
||||
* Locking: Caller must hold i_mutex on the directory.
|
||||
*/
|
||||
static struct dentry *ntfs_lookup(struct inode *dir_ino, struct dentry *dent,
|
||||
struct nameidata *nd)
|
||||
|
@ -254,7 +254,7 @@ static struct dentry *ntfs_lookup(struct inode *dir_ino, struct dentry *dent,
|
|||
nls_name.hash = full_name_hash(nls_name.name, nls_name.len);
|
||||
|
||||
/*
|
||||
* Note: No need for dent->d_lock lock as i_sem is held on the
|
||||
* Note: No need for dent->d_lock lock as i_mutex is held on the
|
||||
* parent inode.
|
||||
*/
|
||||
|
||||
|
@ -374,7 +374,7 @@ struct inode_operations ntfs_dir_inode_ops = {
|
|||
* The code is based on the ext3 ->get_parent() implementation found in
|
||||
* fs/ext3/namei.c::ext3_get_parent().
|
||||
*
|
||||
* Note: ntfs_get_parent() is called with @child_dent->d_inode->i_sem down.
|
||||
* Note: ntfs_get_parent() is called with @child_dent->d_inode->i_mutex down.
|
||||
*
|
||||
* Return the dentry of the parent directory on success or the error code on
|
||||
* error (IS_ERR() is true).
|
||||
|
|
|
@ -48,7 +48,7 @@ BOOL ntfs_mark_quotas_out_of_date(ntfs_volume *vol)
|
|||
ntfs_error(vol->sb, "Quota inodes are not open.");
|
||||
return FALSE;
|
||||
}
|
||||
down(&vol->quota_q_ino->i_sem);
|
||||
mutex_lock(&vol->quota_q_ino->i_mutex);
|
||||
ictx = ntfs_index_ctx_get(NTFS_I(vol->quota_q_ino));
|
||||
if (!ictx) {
|
||||
ntfs_error(vol->sb, "Failed to get index context.");
|
||||
|
@ -98,7 +98,7 @@ BOOL ntfs_mark_quotas_out_of_date(ntfs_volume *vol)
|
|||
ntfs_index_entry_mark_dirty(ictx);
|
||||
set_done:
|
||||
ntfs_index_ctx_put(ictx);
|
||||
up(&vol->quota_q_ino->i_sem);
|
||||
mutex_unlock(&vol->quota_q_ino->i_mutex);
|
||||
/*
|
||||
* We set the flag so we do not try to mark the quotas out of date
|
||||
* again on remount.
|
||||
|
@ -110,7 +110,7 @@ BOOL ntfs_mark_quotas_out_of_date(ntfs_volume *vol)
|
|||
err_out:
|
||||
if (ictx)
|
||||
ntfs_index_ctx_put(ictx);
|
||||
up(&vol->quota_q_ino->i_sem);
|
||||
mutex_unlock(&vol->quota_q_ino->i_mutex);
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
|
|
|
@ -1213,10 +1213,10 @@ static int check_windows_hibernation_status(ntfs_volume *vol)
|
|||
* Find the inode number for the hibernation file by looking up the
|
||||
* filename hiberfil.sys in the root directory.
|
||||
*/
|
||||
down(&vol->root_ino->i_sem);
|
||||
mutex_lock(&vol->root_ino->i_mutex);
|
||||
mref = ntfs_lookup_inode_by_name(NTFS_I(vol->root_ino), hiberfil, 12,
|
||||
&name);
|
||||
up(&vol->root_ino->i_sem);
|
||||
mutex_unlock(&vol->root_ino->i_mutex);
|
||||
if (IS_ERR_MREF(mref)) {
|
||||
ret = MREF_ERR(mref);
|
||||
/* If the file does not exist, Windows is not hibernated. */
|
||||
|
@ -1307,10 +1307,10 @@ static BOOL load_and_init_quota(ntfs_volume *vol)
|
|||
* Find the inode number for the quota file by looking up the filename
|
||||
* $Quota in the extended system files directory $Extend.
|
||||
*/
|
||||
down(&vol->extend_ino->i_sem);
|
||||
mutex_lock(&vol->extend_ino->i_mutex);
|
||||
mref = ntfs_lookup_inode_by_name(NTFS_I(vol->extend_ino), Quota, 6,
|
||||
&name);
|
||||
up(&vol->extend_ino->i_sem);
|
||||
mutex_unlock(&vol->extend_ino->i_mutex);
|
||||
if (IS_ERR_MREF(mref)) {
|
||||
/*
|
||||
* If the file does not exist, quotas are disabled and have
|
||||
|
@ -1390,10 +1390,10 @@ static BOOL load_and_init_usnjrnl(ntfs_volume *vol)
|
|||
* Find the inode number for the transaction log file by looking up the
|
||||
* filename $UsnJrnl in the extended system files directory $Extend.
|
||||
*/
|
||||
down(&vol->extend_ino->i_sem);
|
||||
mutex_lock(&vol->extend_ino->i_mutex);
|
||||
mref = ntfs_lookup_inode_by_name(NTFS_I(vol->extend_ino), UsnJrnl, 8,
|
||||
&name);
|
||||
up(&vol->extend_ino->i_sem);
|
||||
mutex_unlock(&vol->extend_ino->i_mutex);
|
||||
if (IS_ERR_MREF(mref)) {
|
||||
/*
|
||||
* If the file does not exist, transaction logging is disabled,
|
||||
|
@ -2312,9 +2312,9 @@ static void ntfs_put_super(struct super_block *sb)
|
|||
if (!list_empty(&sb->s_dirty)) {
|
||||
const char *s1, *s2;
|
||||
|
||||
down(&vol->mft_ino->i_sem);
|
||||
mutex_lock(&vol->mft_ino->i_mutex);
|
||||
truncate_inode_pages(vol->mft_ino->i_mapping, 0);
|
||||
up(&vol->mft_ino->i_sem);
|
||||
mutex_unlock(&vol->mft_ino->i_mutex);
|
||||
write_inode_now(vol->mft_ino, 1);
|
||||
if (!list_empty(&sb->s_dirty)) {
|
||||
static const char *_s1 = "inodes";
|
||||
|
|
|
@ -966,7 +966,7 @@ static int ocfs2_truncate_log_append(struct ocfs2_super *osb,
|
|||
mlog_entry("start_blk = %"MLFu64", num_clusters = %u\n", start_blk,
|
||||
num_clusters);
|
||||
|
||||
BUG_ON(!down_trylock(&tl_inode->i_sem));
|
||||
BUG_ON(mutex_trylock(&tl_inode->i_mutex));
|
||||
|
||||
start_cluster = ocfs2_blocks_to_clusters(osb->sb, start_blk);
|
||||
|
||||
|
@ -1108,7 +1108,7 @@ static int ocfs2_replay_truncate_records(struct ocfs2_super *osb,
|
|||
return status;
|
||||
}
|
||||
|
||||
/* Expects you to already be holding tl_inode->i_sem */
|
||||
/* Expects you to already be holding tl_inode->i_mutex */
|
||||
static int __ocfs2_flush_truncate_log(struct ocfs2_super *osb)
|
||||
{
|
||||
int status;
|
||||
|
@ -1123,7 +1123,7 @@ static int __ocfs2_flush_truncate_log(struct ocfs2_super *osb)
|
|||
|
||||
mlog_entry_void();
|
||||
|
||||
BUG_ON(!down_trylock(&tl_inode->i_sem));
|
||||
BUG_ON(mutex_trylock(&tl_inode->i_mutex));
|
||||
|
||||
di = (struct ocfs2_dinode *) tl_bh->b_data;
|
||||
tl = &di->id2.i_dealloc;
|
||||
|
@ -1198,9 +1198,9 @@ int ocfs2_flush_truncate_log(struct ocfs2_super *osb)
|
|||
int status;
|
||||
struct inode *tl_inode = osb->osb_tl_inode;
|
||||
|
||||
down(&tl_inode->i_sem);
|
||||
mutex_lock(&tl_inode->i_mutex);
|
||||
status = __ocfs2_flush_truncate_log(osb);
|
||||
up(&tl_inode->i_sem);
|
||||
mutex_unlock(&tl_inode->i_mutex);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
@ -1363,7 +1363,7 @@ int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb,
|
|||
mlog(0, "cleanup %u records from %"MLFu64"\n", num_recs,
|
||||
tl_copy->i_blkno);
|
||||
|
||||
down(&tl_inode->i_sem);
|
||||
mutex_lock(&tl_inode->i_mutex);
|
||||
for(i = 0; i < num_recs; i++) {
|
||||
if (ocfs2_truncate_log_needs_flush(osb)) {
|
||||
status = __ocfs2_flush_truncate_log(osb);
|
||||
|
@ -1395,7 +1395,7 @@ int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb,
|
|||
}
|
||||
|
||||
bail_up:
|
||||
up(&tl_inode->i_sem);
|
||||
mutex_unlock(&tl_inode->i_mutex);
|
||||
|
||||
mlog_exit(status);
|
||||
return status;
|
||||
|
@ -1840,7 +1840,7 @@ int ocfs2_commit_truncate(struct ocfs2_super *osb,
|
|||
|
||||
mlog(0, "clusters_to_del = %u in this pass\n", clusters_to_del);
|
||||
|
||||
down(&tl_inode->i_sem);
|
||||
mutex_lock(&tl_inode->i_mutex);
|
||||
tl_sem = 1;
|
||||
/* ocfs2_truncate_log_needs_flush guarantees us at least one
|
||||
* record is free for use. If there isn't any, we flush to get
|
||||
|
@ -1875,7 +1875,7 @@ int ocfs2_commit_truncate(struct ocfs2_super *osb,
|
|||
goto bail;
|
||||
}
|
||||
|
||||
up(&tl_inode->i_sem);
|
||||
mutex_unlock(&tl_inode->i_mutex);
|
||||
tl_sem = 0;
|
||||
|
||||
ocfs2_commit_trans(handle);
|
||||
|
@ -1890,7 +1890,7 @@ int ocfs2_commit_truncate(struct ocfs2_super *osb,
|
|||
ocfs2_schedule_truncate_log_flush(osb, 1);
|
||||
|
||||
if (tl_sem)
|
||||
up(&tl_inode->i_sem);
|
||||
mutex_unlock(&tl_inode->i_mutex);
|
||||
|
||||
if (handle)
|
||||
ocfs2_commit_trans(handle);
|
||||
|
@ -1994,7 +1994,7 @@ int ocfs2_prepare_truncate(struct ocfs2_super *osb,
|
|||
goto bail;
|
||||
}
|
||||
|
||||
down(&ext_alloc_inode->i_sem);
|
||||
mutex_lock(&ext_alloc_inode->i_mutex);
|
||||
(*tc)->tc_ext_alloc_inode = ext_alloc_inode;
|
||||
|
||||
status = ocfs2_meta_lock(ext_alloc_inode,
|
||||
|
@ -2026,7 +2026,7 @@ static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc)
|
|||
if (tc->tc_ext_alloc_locked)
|
||||
ocfs2_meta_unlock(tc->tc_ext_alloc_inode, 1);
|
||||
|
||||
up(&tc->tc_ext_alloc_inode->i_sem);
|
||||
mutex_unlock(&tc->tc_ext_alloc_inode->i_mutex);
|
||||
iput(tc->tc_ext_alloc_inode);
|
||||
}
|
||||
|
||||
|
|
|
@ -653,7 +653,7 @@ static struct config_group *o2nm_cluster_group_make_group(struct config_group *g
|
|||
struct config_group *o2hb_group = NULL, *ret = NULL;
|
||||
void *defs = NULL;
|
||||
|
||||
/* this runs under the parent dir's i_sem; there can be only
|
||||
/* this runs under the parent dir's i_mutex; there can be only
|
||||
* one caller in here at a time */
|
||||
if (o2nm_single_cluster)
|
||||
goto out; /* ENOSPC */
|
||||
|
|
|
@ -202,7 +202,7 @@ int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir)
|
|||
}
|
||||
|
||||
/*
|
||||
* NOTE: this should always be called with parent dir i_sem taken.
|
||||
* NOTE: this should always be called with parent dir i_mutex taken.
|
||||
*/
|
||||
int ocfs2_find_files_on_disk(const char *name,
|
||||
int namelen,
|
||||
|
@ -245,7 +245,7 @@ int ocfs2_find_files_on_disk(const char *name,
|
|||
* Return 0 if the name does not exist
|
||||
* Return -EEXIST if the directory contains the name
|
||||
*
|
||||
* Callers should have i_sem + a cluster lock on dir
|
||||
* Callers should have i_mutex + a cluster lock on dir
|
||||
*/
|
||||
int ocfs2_check_dir_for_entry(struct inode *dir,
|
||||
const char *name,
|
||||
|
|
|
@ -492,7 +492,7 @@ static int ocfs2_extend_allocation(struct inode *inode,
|
|||
}
|
||||
|
||||
/* blocks peope in read/write from reading our allocation
|
||||
* until we're done changing it. We depend on i_sem to block
|
||||
* until we're done changing it. We depend on i_mutex to block
|
||||
* other extend/truncate calls while we're here. Ordering wrt
|
||||
* start_trans is important here -- always do it before! */
|
||||
down_write(&OCFS2_I(inode)->ip_alloc_sem);
|
||||
|
@ -958,8 +958,8 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
|
|||
filp->f_flags &= ~O_DIRECT;
|
||||
#endif
|
||||
|
||||
down(&inode->i_sem);
|
||||
/* to match setattr's i_sem -> i_alloc_sem -> rw_lock ordering */
|
||||
mutex_lock(&inode->i_mutex);
|
||||
/* to match setattr's i_mutex -> i_alloc_sem -> rw_lock ordering */
|
||||
if (filp->f_flags & O_DIRECT) {
|
||||
have_alloc_sem = 1;
|
||||
down_read(&inode->i_alloc_sem);
|
||||
|
@ -1123,7 +1123,7 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
|
|||
up_read(&inode->i_alloc_sem);
|
||||
if (rw_level != -1)
|
||||
ocfs2_rw_unlock(inode, rw_level);
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
|
||||
mlog_exit(ret);
|
||||
return ret;
|
||||
|
|
|
@ -485,10 +485,10 @@ static int ocfs2_remove_inode(struct inode *inode,
|
|||
goto bail;
|
||||
}
|
||||
|
||||
down(&inode_alloc_inode->i_sem);
|
||||
mutex_lock(&inode_alloc_inode->i_mutex);
|
||||
status = ocfs2_meta_lock(inode_alloc_inode, NULL, &inode_alloc_bh, 1);
|
||||
if (status < 0) {
|
||||
up(&inode_alloc_inode->i_sem);
|
||||
mutex_unlock(&inode_alloc_inode->i_mutex);
|
||||
|
||||
mlog_errno(status);
|
||||
goto bail;
|
||||
|
@ -536,7 +536,7 @@ static int ocfs2_remove_inode(struct inode *inode,
|
|||
ocfs2_commit_trans(handle);
|
||||
bail_unlock:
|
||||
ocfs2_meta_unlock(inode_alloc_inode, 1);
|
||||
up(&inode_alloc_inode->i_sem);
|
||||
mutex_unlock(&inode_alloc_inode->i_mutex);
|
||||
brelse(inode_alloc_bh);
|
||||
bail:
|
||||
iput(inode_alloc_inode);
|
||||
|
@ -567,10 +567,10 @@ static int ocfs2_wipe_inode(struct inode *inode,
|
|||
/* Lock the orphan dir. The lock will be held for the entire
|
||||
* delete_inode operation. We do this now to avoid races with
|
||||
* recovery completion on other nodes. */
|
||||
down(&orphan_dir_inode->i_sem);
|
||||
mutex_lock(&orphan_dir_inode->i_mutex);
|
||||
status = ocfs2_meta_lock(orphan_dir_inode, NULL, &orphan_dir_bh, 1);
|
||||
if (status < 0) {
|
||||
up(&orphan_dir_inode->i_sem);
|
||||
mutex_unlock(&orphan_dir_inode->i_mutex);
|
||||
|
||||
mlog_errno(status);
|
||||
goto bail;
|
||||
|
@ -593,7 +593,7 @@ static int ocfs2_wipe_inode(struct inode *inode,
|
|||
|
||||
bail_unlock_dir:
|
||||
ocfs2_meta_unlock(orphan_dir_inode, 1);
|
||||
up(&orphan_dir_inode->i_sem);
|
||||
mutex_unlock(&orphan_dir_inode->i_mutex);
|
||||
brelse(orphan_dir_bh);
|
||||
bail:
|
||||
iput(orphan_dir_inode);
|
||||
|
|
|
@ -216,7 +216,7 @@ void ocfs2_handle_add_inode(struct ocfs2_journal_handle *handle,
|
|||
atomic_inc(&inode->i_count);
|
||||
|
||||
/* we're obviously changing it... */
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
|
||||
/* sanity check */
|
||||
BUG_ON(OCFS2_I(inode)->ip_handle);
|
||||
|
@ -241,7 +241,7 @@ static void ocfs2_handle_unlock_inodes(struct ocfs2_journal_handle *handle)
|
|||
OCFS2_I(inode)->ip_handle = NULL;
|
||||
list_del_init(&OCFS2_I(inode)->ip_handle_list);
|
||||
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
iput(inode);
|
||||
}
|
||||
}
|
||||
|
@ -1433,10 +1433,10 @@ static int ocfs2_recover_orphans(struct ocfs2_super *osb,
|
|||
goto out;
|
||||
}
|
||||
|
||||
down(&orphan_dir_inode->i_sem);
|
||||
mutex_lock(&orphan_dir_inode->i_mutex);
|
||||
status = ocfs2_meta_lock(orphan_dir_inode, NULL, NULL, 0);
|
||||
if (status < 0) {
|
||||
up(&orphan_dir_inode->i_sem);
|
||||
mutex_unlock(&orphan_dir_inode->i_mutex);
|
||||
mlog_errno(status);
|
||||
goto out;
|
||||
}
|
||||
|
@ -1451,7 +1451,7 @@ static int ocfs2_recover_orphans(struct ocfs2_super *osb,
|
|||
if (!bh)
|
||||
status = -EINVAL;
|
||||
if (status < 0) {
|
||||
up(&orphan_dir_inode->i_sem);
|
||||
mutex_unlock(&orphan_dir_inode->i_mutex);
|
||||
if (bh)
|
||||
brelse(bh);
|
||||
mlog_errno(status);
|
||||
|
@ -1465,7 +1465,7 @@ static int ocfs2_recover_orphans(struct ocfs2_super *osb,
|
|||
|
||||
if (!ocfs2_check_dir_entry(orphan_dir_inode,
|
||||
de, bh, local)) {
|
||||
up(&orphan_dir_inode->i_sem);
|
||||
mutex_unlock(&orphan_dir_inode->i_mutex);
|
||||
status = -EINVAL;
|
||||
mlog_errno(status);
|
||||
brelse(bh);
|
||||
|
@ -1509,7 +1509,7 @@ static int ocfs2_recover_orphans(struct ocfs2_super *osb,
|
|||
}
|
||||
brelse(bh);
|
||||
}
|
||||
up(&orphan_dir_inode->i_sem);
|
||||
mutex_unlock(&orphan_dir_inode->i_mutex);
|
||||
|
||||
ocfs2_meta_unlock(orphan_dir_inode, 0);
|
||||
have_disk_lock = 0;
|
||||
|
|
|
@ -334,7 +334,7 @@ int ocfs2_begin_local_alloc_recovery(struct ocfs2_super *osb,
|
|||
goto bail;
|
||||
}
|
||||
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
|
||||
status = ocfs2_read_block(osb, OCFS2_I(inode)->ip_blkno,
|
||||
&alloc_bh, 0, inode);
|
||||
|
@ -367,7 +367,7 @@ int ocfs2_begin_local_alloc_recovery(struct ocfs2_super *osb,
|
|||
brelse(alloc_bh);
|
||||
|
||||
if (inode) {
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
iput(inode);
|
||||
}
|
||||
|
||||
|
@ -446,7 +446,7 @@ int ocfs2_complete_local_alloc_recovery(struct ocfs2_super *osb,
|
|||
|
||||
/*
|
||||
* make sure we've got at least bitswanted contiguous bits in the
|
||||
* local alloc. You lose them when you drop i_sem.
|
||||
* local alloc. You lose them when you drop i_mutex.
|
||||
*
|
||||
* We will add ourselves to the transaction passed in, but may start
|
||||
* our own in order to shift windows.
|
||||
|
|
24
fs/open.c
24
fs/open.c
|
@ -211,9 +211,9 @@ int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
|
|||
newattrs.ia_valid |= ATTR_FILE;
|
||||
}
|
||||
|
||||
down(&dentry->d_inode->i_sem);
|
||||
mutex_lock(&dentry->d_inode->i_mutex);
|
||||
err = notify_change(dentry, &newattrs);
|
||||
up(&dentry->d_inode->i_sem);
|
||||
mutex_unlock(&dentry->d_inode->i_mutex);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -398,9 +398,9 @@ asmlinkage long sys_utime(char __user * filename, struct utimbuf __user * times)
|
|||
(error = vfs_permission(&nd, MAY_WRITE)) != 0)
|
||||
goto dput_and_out;
|
||||
}
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
error = notify_change(nd.dentry, &newattrs);
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
dput_and_out:
|
||||
path_release(&nd);
|
||||
out:
|
||||
|
@ -451,9 +451,9 @@ long do_utimes(char __user * filename, struct timeval * times)
|
|||
(error = vfs_permission(&nd, MAY_WRITE)) != 0)
|
||||
goto dput_and_out;
|
||||
}
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
error = notify_change(nd.dentry, &newattrs);
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
dput_and_out:
|
||||
path_release(&nd);
|
||||
out:
|
||||
|
@ -620,13 +620,13 @@ asmlinkage long sys_fchmod(unsigned int fd, mode_t mode)
|
|||
err = -EPERM;
|
||||
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
|
||||
goto out_putf;
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
if (mode == (mode_t) -1)
|
||||
mode = inode->i_mode;
|
||||
newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
|
||||
newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
|
||||
err = notify_change(dentry, &newattrs);
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
|
||||
out_putf:
|
||||
fput(file);
|
||||
|
@ -654,13 +654,13 @@ asmlinkage long sys_chmod(const char __user * filename, mode_t mode)
|
|||
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
|
||||
goto dput_and_out;
|
||||
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
if (mode == (mode_t) -1)
|
||||
mode = inode->i_mode;
|
||||
newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
|
||||
newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
|
||||
error = notify_change(nd.dentry, &newattrs);
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
|
||||
dput_and_out:
|
||||
path_release(&nd);
|
||||
|
@ -696,9 +696,9 @@ static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
|
|||
}
|
||||
if (!S_ISDIR(inode->i_mode))
|
||||
newattrs.ia_valid |= ATTR_KILL_SUID|ATTR_KILL_SGID;
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
error = notify_change(dentry, &newattrs);
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
|
44
fs/pipe.c
44
fs/pipe.c
|
@ -44,10 +44,10 @@ void pipe_wait(struct inode * inode)
|
|||
* is considered a noninteractive wait:
|
||||
*/
|
||||
prepare_to_wait(PIPE_WAIT(*inode), &wait, TASK_INTERRUPTIBLE|TASK_NONINTERACTIVE);
|
||||
up(PIPE_SEM(*inode));
|
||||
mutex_unlock(PIPE_MUTEX(*inode));
|
||||
schedule();
|
||||
finish_wait(PIPE_WAIT(*inode), &wait);
|
||||
down(PIPE_SEM(*inode));
|
||||
mutex_lock(PIPE_MUTEX(*inode));
|
||||
}
|
||||
|
||||
static inline int
|
||||
|
@ -136,7 +136,7 @@ pipe_readv(struct file *filp, const struct iovec *_iov,
|
|||
|
||||
do_wakeup = 0;
|
||||
ret = 0;
|
||||
down(PIPE_SEM(*inode));
|
||||
mutex_lock(PIPE_MUTEX(*inode));
|
||||
info = inode->i_pipe;
|
||||
for (;;) {
|
||||
int bufs = info->nrbufs;
|
||||
|
@ -200,7 +200,7 @@ pipe_readv(struct file *filp, const struct iovec *_iov,
|
|||
}
|
||||
pipe_wait(inode);
|
||||
}
|
||||
up(PIPE_SEM(*inode));
|
||||
mutex_unlock(PIPE_MUTEX(*inode));
|
||||
/* Signal writers asynchronously that there is more room. */
|
||||
if (do_wakeup) {
|
||||
wake_up_interruptible(PIPE_WAIT(*inode));
|
||||
|
@ -237,7 +237,7 @@ pipe_writev(struct file *filp, const struct iovec *_iov,
|
|||
|
||||
do_wakeup = 0;
|
||||
ret = 0;
|
||||
down(PIPE_SEM(*inode));
|
||||
mutex_lock(PIPE_MUTEX(*inode));
|
||||
info = inode->i_pipe;
|
||||
|
||||
if (!PIPE_READERS(*inode)) {
|
||||
|
@ -341,7 +341,7 @@ pipe_writev(struct file *filp, const struct iovec *_iov,
|
|||
PIPE_WAITING_WRITERS(*inode)--;
|
||||
}
|
||||
out:
|
||||
up(PIPE_SEM(*inode));
|
||||
mutex_unlock(PIPE_MUTEX(*inode));
|
||||
if (do_wakeup) {
|
||||
wake_up_interruptible(PIPE_WAIT(*inode));
|
||||
kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN);
|
||||
|
@ -381,7 +381,7 @@ pipe_ioctl(struct inode *pino, struct file *filp,
|
|||
|
||||
switch (cmd) {
|
||||
case FIONREAD:
|
||||
down(PIPE_SEM(*inode));
|
||||
mutex_lock(PIPE_MUTEX(*inode));
|
||||
info = inode->i_pipe;
|
||||
count = 0;
|
||||
buf = info->curbuf;
|
||||
|
@ -390,7 +390,7 @@ pipe_ioctl(struct inode *pino, struct file *filp,
|
|||
count += info->bufs[buf].len;
|
||||
buf = (buf+1) & (PIPE_BUFFERS-1);
|
||||
}
|
||||
up(PIPE_SEM(*inode));
|
||||
mutex_unlock(PIPE_MUTEX(*inode));
|
||||
return put_user(count, (int __user *)arg);
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -433,7 +433,7 @@ pipe_poll(struct file *filp, poll_table *wait)
|
|||
static int
|
||||
pipe_release(struct inode *inode, int decr, int decw)
|
||||
{
|
||||
down(PIPE_SEM(*inode));
|
||||
mutex_lock(PIPE_MUTEX(*inode));
|
||||
PIPE_READERS(*inode) -= decr;
|
||||
PIPE_WRITERS(*inode) -= decw;
|
||||
if (!PIPE_READERS(*inode) && !PIPE_WRITERS(*inode)) {
|
||||
|
@ -443,7 +443,7 @@ pipe_release(struct inode *inode, int decr, int decw)
|
|||
kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN);
|
||||
kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT);
|
||||
}
|
||||
up(PIPE_SEM(*inode));
|
||||
mutex_unlock(PIPE_MUTEX(*inode));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -454,9 +454,9 @@ pipe_read_fasync(int fd, struct file *filp, int on)
|
|||
struct inode *inode = filp->f_dentry->d_inode;
|
||||
int retval;
|
||||
|
||||
down(PIPE_SEM(*inode));
|
||||
mutex_lock(PIPE_MUTEX(*inode));
|
||||
retval = fasync_helper(fd, filp, on, PIPE_FASYNC_READERS(*inode));
|
||||
up(PIPE_SEM(*inode));
|
||||
mutex_unlock(PIPE_MUTEX(*inode));
|
||||
|
||||
if (retval < 0)
|
||||
return retval;
|
||||
|
@ -471,9 +471,9 @@ pipe_write_fasync(int fd, struct file *filp, int on)
|
|||
struct inode *inode = filp->f_dentry->d_inode;
|
||||
int retval;
|
||||
|
||||
down(PIPE_SEM(*inode));
|
||||
mutex_lock(PIPE_MUTEX(*inode));
|
||||
retval = fasync_helper(fd, filp, on, PIPE_FASYNC_WRITERS(*inode));
|
||||
up(PIPE_SEM(*inode));
|
||||
mutex_unlock(PIPE_MUTEX(*inode));
|
||||
|
||||
if (retval < 0)
|
||||
return retval;
|
||||
|
@ -488,14 +488,14 @@ pipe_rdwr_fasync(int fd, struct file *filp, int on)
|
|||
struct inode *inode = filp->f_dentry->d_inode;
|
||||
int retval;
|
||||
|
||||
down(PIPE_SEM(*inode));
|
||||
mutex_lock(PIPE_MUTEX(*inode));
|
||||
|
||||
retval = fasync_helper(fd, filp, on, PIPE_FASYNC_READERS(*inode));
|
||||
|
||||
if (retval >= 0)
|
||||
retval = fasync_helper(fd, filp, on, PIPE_FASYNC_WRITERS(*inode));
|
||||
|
||||
up(PIPE_SEM(*inode));
|
||||
mutex_unlock(PIPE_MUTEX(*inode));
|
||||
|
||||
if (retval < 0)
|
||||
return retval;
|
||||
|
@ -534,9 +534,9 @@ pipe_read_open(struct inode *inode, struct file *filp)
|
|||
{
|
||||
/* We could have perhaps used atomic_t, but this and friends
|
||||
below are the only places. So it doesn't seem worthwhile. */
|
||||
down(PIPE_SEM(*inode));
|
||||
mutex_lock(PIPE_MUTEX(*inode));
|
||||
PIPE_READERS(*inode)++;
|
||||
up(PIPE_SEM(*inode));
|
||||
mutex_unlock(PIPE_MUTEX(*inode));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -544,9 +544,9 @@ pipe_read_open(struct inode *inode, struct file *filp)
|
|||
static int
|
||||
pipe_write_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
down(PIPE_SEM(*inode));
|
||||
mutex_lock(PIPE_MUTEX(*inode));
|
||||
PIPE_WRITERS(*inode)++;
|
||||
up(PIPE_SEM(*inode));
|
||||
mutex_unlock(PIPE_MUTEX(*inode));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -554,12 +554,12 @@ pipe_write_open(struct inode *inode, struct file *filp)
|
|||
static int
|
||||
pipe_rdwr_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
down(PIPE_SEM(*inode));
|
||||
mutex_lock(PIPE_MUTEX(*inode));
|
||||
if (filp->f_mode & FMODE_READ)
|
||||
PIPE_READERS(*inode)++;
|
||||
if (filp->f_mode & FMODE_WRITE)
|
||||
PIPE_WRITERS(*inode)++;
|
||||
up(PIPE_SEM(*inode));
|
||||
mutex_unlock(PIPE_MUTEX(*inode));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -168,7 +168,7 @@ static void quota_sync_sb(struct super_block *sb, int type)
|
|||
sync_blockdev(sb->s_bdev);
|
||||
|
||||
/* Now when everything is written we can discard the pagecache so
|
||||
* that userspace sees the changes. We need i_sem and so we could
|
||||
* that userspace sees the changes. We need i_mutex and so we could
|
||||
* not do it inside dqonoff_sem. Moreover we need to be carefull
|
||||
* about races with quotaoff() (that is the reason why we have own
|
||||
* reference to inode). */
|
||||
|
@ -184,9 +184,9 @@ static void quota_sync_sb(struct super_block *sb, int type)
|
|||
up(&sb_dqopt(sb)->dqonoff_sem);
|
||||
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
|
||||
if (discard[cnt]) {
|
||||
down(&discard[cnt]->i_sem);
|
||||
mutex_lock(&discard[cnt]->i_mutex);
|
||||
truncate_inode_pages(&discard[cnt]->i_data, 0);
|
||||
up(&discard[cnt]->i_sem);
|
||||
mutex_unlock(&discard[cnt]->i_mutex);
|
||||
iput(discard[cnt]);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ loff_t generic_file_llseek(struct file *file, loff_t offset, int origin)
|
|||
long long retval;
|
||||
struct inode *inode = file->f_mapping->host;
|
||||
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
switch (origin) {
|
||||
case 2:
|
||||
offset += inode->i_size;
|
||||
|
@ -49,7 +49,7 @@ loff_t generic_file_llseek(struct file *file, loff_t offset, int origin)
|
|||
}
|
||||
retval = offset;
|
||||
}
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
|
|
@ -30,13 +30,13 @@ int vfs_readdir(struct file *file, filldir_t filler, void *buf)
|
|||
if (res)
|
||||
goto out;
|
||||
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
res = -ENOENT;
|
||||
if (!IS_DEADDIR(inode)) {
|
||||
res = file->f_op->readdir(file, buf, filler);
|
||||
file_accessed(file);
|
||||
}
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
out:
|
||||
return res;
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ static int reiserfs_file_release(struct inode *inode, struct file *filp)
|
|||
}
|
||||
|
||||
reiserfs_write_lock(inode->i_sb);
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
/* freeing preallocation only involves relogging blocks that
|
||||
* are already in the current transaction. preallocation gets
|
||||
* freed at the end of each transaction, so it is impossible for
|
||||
|
@ -100,7 +100,7 @@ static int reiserfs_file_release(struct inode *inode, struct file *filp)
|
|||
err = reiserfs_truncate_file(inode, 0);
|
||||
}
|
||||
out:
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
reiserfs_write_unlock(inode->i_sb);
|
||||
return err;
|
||||
}
|
||||
|
@ -1342,7 +1342,7 @@ static ssize_t reiserfs_file_write(struct file *file, /* the file we are going t
|
|||
if (unlikely(!access_ok(VERIFY_READ, buf, count)))
|
||||
return -EFAULT;
|
||||
|
||||
down(&inode->i_sem); // locks the entire file for just us
|
||||
mutex_lock(&inode->i_mutex); // locks the entire file for just us
|
||||
|
||||
pos = *ppos;
|
||||
|
||||
|
@ -1532,12 +1532,12 @@ static ssize_t reiserfs_file_write(struct file *file, /* the file we are going t
|
|||
generic_osync_inode(inode, file->f_mapping,
|
||||
OSYNC_METADATA | OSYNC_DATA);
|
||||
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
reiserfs_async_progress_wait(inode->i_sb);
|
||||
return (already_written != 0) ? already_written : res;
|
||||
|
||||
out:
|
||||
up(&inode->i_sem); // unlock the file on exit.
|
||||
mutex_unlock(&inode->i_mutex); // unlock the file on exit.
|
||||
return res;
|
||||
}
|
||||
|
||||
|
|
|
@ -40,12 +40,12 @@ void reiserfs_delete_inode(struct inode *inode)
|
|||
|
||||
/* The = 0 happens when we abort creating a new inode for some reason like lack of space.. */
|
||||
if (!(inode->i_state & I_NEW) && INODE_PKEY(inode)->k_objectid != 0) { /* also handles bad_inode case */
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
|
||||
reiserfs_delete_xattrs(inode);
|
||||
|
||||
if (journal_begin(&th, inode->i_sb, jbegin_count)) {
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
goto out;
|
||||
}
|
||||
reiserfs_update_inode_transaction(inode);
|
||||
|
@ -59,11 +59,11 @@ void reiserfs_delete_inode(struct inode *inode)
|
|||
DQUOT_FREE_INODE(inode);
|
||||
|
||||
if (journal_end(&th, inode->i_sb, jbegin_count)) {
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
goto out;
|
||||
}
|
||||
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
|
||||
/* check return value from reiserfs_delete_object after
|
||||
* ending the transaction
|
||||
|
@ -551,7 +551,7 @@ static int convert_tail_for_hole(struct inode *inode,
|
|||
|
||||
/* we don't have to make sure the conversion did not happen while
|
||||
** we were locking the page because anyone that could convert
|
||||
** must first take i_sem.
|
||||
** must first take i_mutex.
|
||||
**
|
||||
** We must fix the tail page for writing because it might have buffers
|
||||
** that are mapped, but have a block number of 0. This indicates tail
|
||||
|
@ -586,7 +586,7 @@ static inline int _allocate_block(struct reiserfs_transaction_handle *th,
|
|||
BUG_ON(!th->t_trans_id);
|
||||
|
||||
#ifdef REISERFS_PREALLOCATE
|
||||
if (!(flags & GET_BLOCK_NO_ISEM)) {
|
||||
if (!(flags & GET_BLOCK_NO_IMUX)) {
|
||||
return reiserfs_new_unf_blocknrs2(th, inode, allocated_block_nr,
|
||||
path, block);
|
||||
}
|
||||
|
@ -2318,7 +2318,7 @@ static int map_block_for_writepage(struct inode *inode,
|
|||
/* this is where we fill in holes in the file. */
|
||||
if (use_get_block) {
|
||||
retval = reiserfs_get_block(inode, block, bh_result,
|
||||
GET_BLOCK_CREATE | GET_BLOCK_NO_ISEM
|
||||
GET_BLOCK_CREATE | GET_BLOCK_NO_IMUX
|
||||
| GET_BLOCK_NO_DANGLE);
|
||||
if (!retval) {
|
||||
if (!buffer_mapped(bh_result)
|
||||
|
|
|
@ -120,7 +120,7 @@ static int reiserfs_unpack(struct inode *inode, struct file *filp)
|
|||
/* we need to make sure nobody is changing the file size beneath
|
||||
** us
|
||||
*/
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
|
||||
write_from = inode->i_size & (blocksize - 1);
|
||||
/* if we are on a block boundary, we are already unpacked. */
|
||||
|
@ -156,7 +156,7 @@ static int reiserfs_unpack(struct inode *inode, struct file *filp)
|
|||
page_cache_release(page);
|
||||
|
||||
out:
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
reiserfs_write_unlock(inode->i_sb);
|
||||
return retval;
|
||||
}
|
||||
|
|
|
@ -2211,7 +2211,7 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
|
|||
size_t towrite = len;
|
||||
struct buffer_head tmp_bh, *bh;
|
||||
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
while (towrite > 0) {
|
||||
tocopy = sb->s_blocksize - offset < towrite ?
|
||||
sb->s_blocksize - offset : towrite;
|
||||
|
@ -2250,7 +2250,7 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
|
|||
inode->i_version++;
|
||||
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
|
||||
mark_inode_dirty(inode);
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
return len - towrite;
|
||||
}
|
||||
|
||||
|
|
|
@ -205,7 +205,7 @@ int indirect2direct(struct reiserfs_transaction_handle *th, struct inode *p_s_in
|
|||
1) * p_s_sb->s_blocksize;
|
||||
pos1 = pos;
|
||||
|
||||
// we are protected by i_sem. The tail can not disapper, not
|
||||
// we are protected by i_mutex. The tail can not disapper, not
|
||||
// append can be done either
|
||||
// we are in truncate or packing tail in file_release
|
||||
|
||||
|
|
|
@ -67,11 +67,11 @@ static struct dentry *create_xa_root(struct super_block *sb)
|
|||
goto out;
|
||||
} else if (!xaroot->d_inode) {
|
||||
int err;
|
||||
down(&privroot->d_inode->i_sem);
|
||||
mutex_lock(&privroot->d_inode->i_mutex);
|
||||
err =
|
||||
privroot->d_inode->i_op->mkdir(privroot->d_inode, xaroot,
|
||||
0700);
|
||||
up(&privroot->d_inode->i_sem);
|
||||
mutex_unlock(&privroot->d_inode->i_mutex);
|
||||
|
||||
if (err) {
|
||||
dput(xaroot);
|
||||
|
@ -219,7 +219,7 @@ static struct dentry *get_xa_file_dentry(const struct inode *inode,
|
|||
} else if (flags & XATTR_REPLACE || flags & FL_READONLY) {
|
||||
goto out;
|
||||
} else {
|
||||
/* inode->i_sem is down, so nothing else can try to create
|
||||
/* inode->i_mutex is down, so nothing else can try to create
|
||||
* the same xattr */
|
||||
err = xadir->d_inode->i_op->create(xadir->d_inode, xafile,
|
||||
0700 | S_IFREG, NULL);
|
||||
|
@ -268,7 +268,7 @@ static struct file *open_xa_file(const struct inode *inode, const char *name,
|
|||
* and don't mess with f->f_pos, but the idea is the same. Do some
|
||||
* action on each and every entry in the directory.
|
||||
*
|
||||
* we're called with i_sem held, so there are no worries about the directory
|
||||
* we're called with i_mutex held, so there are no worries about the directory
|
||||
* changing underneath us.
|
||||
*/
|
||||
static int __xattr_readdir(struct file *filp, void *dirent, filldir_t filldir)
|
||||
|
@ -426,7 +426,7 @@ int xattr_readdir(struct file *file, filldir_t filler, void *buf)
|
|||
int res = -ENOTDIR;
|
||||
if (!file->f_op || !file->f_op->readdir)
|
||||
goto out;
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
// down(&inode->i_zombie);
|
||||
res = -ENOENT;
|
||||
if (!IS_DEADDIR(inode)) {
|
||||
|
@ -435,7 +435,7 @@ int xattr_readdir(struct file *file, filldir_t filler, void *buf)
|
|||
unlock_kernel();
|
||||
}
|
||||
// up(&inode->i_zombie);
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
out:
|
||||
return res;
|
||||
}
|
||||
|
@ -480,7 +480,7 @@ static inline __u32 xattr_hash(const char *msg, int len)
|
|||
/* Generic extended attribute operations that can be used by xa plugins */
|
||||
|
||||
/*
|
||||
* inode->i_sem: down
|
||||
* inode->i_mutex: down
|
||||
*/
|
||||
int
|
||||
reiserfs_xattr_set(struct inode *inode, const char *name, const void *buffer,
|
||||
|
@ -535,7 +535,7 @@ reiserfs_xattr_set(struct inode *inode, const char *name, const void *buffer,
|
|||
/* Resize it so we're ok to write there */
|
||||
newattrs.ia_size = buffer_size;
|
||||
newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
|
||||
down(&xinode->i_sem);
|
||||
mutex_lock(&xinode->i_mutex);
|
||||
err = notify_change(fp->f_dentry, &newattrs);
|
||||
if (err)
|
||||
goto out_filp;
|
||||
|
@ -598,7 +598,7 @@ reiserfs_xattr_set(struct inode *inode, const char *name, const void *buffer,
|
|||
}
|
||||
|
||||
out_filp:
|
||||
up(&xinode->i_sem);
|
||||
mutex_unlock(&xinode->i_mutex);
|
||||
fput(fp);
|
||||
|
||||
out:
|
||||
|
@ -606,7 +606,7 @@ reiserfs_xattr_set(struct inode *inode, const char *name, const void *buffer,
|
|||
}
|
||||
|
||||
/*
|
||||
* inode->i_sem: down
|
||||
* inode->i_mutex: down
|
||||
*/
|
||||
int
|
||||
reiserfs_xattr_get(const struct inode *inode, const char *name, void *buffer,
|
||||
|
@ -793,7 +793,7 @@ reiserfs_delete_xattrs_filler(void *buf, const char *name, int namelen,
|
|||
|
||||
}
|
||||
|
||||
/* This is called w/ inode->i_sem downed */
|
||||
/* This is called w/ inode->i_mutex downed */
|
||||
int reiserfs_delete_xattrs(struct inode *inode)
|
||||
{
|
||||
struct file *fp;
|
||||
|
@ -946,7 +946,7 @@ int reiserfs_chown_xattrs(struct inode *inode, struct iattr *attrs)
|
|||
|
||||
/*
|
||||
* Inode operation getxattr()
|
||||
* Preliminary locking: we down dentry->d_inode->i_sem
|
||||
* Preliminary locking: we down dentry->d_inode->i_mutex
|
||||
*/
|
||||
ssize_t
|
||||
reiserfs_getxattr(struct dentry * dentry, const char *name, void *buffer,
|
||||
|
@ -970,7 +970,7 @@ reiserfs_getxattr(struct dentry * dentry, const char *name, void *buffer,
|
|||
/*
|
||||
* Inode operation setxattr()
|
||||
*
|
||||
* dentry->d_inode->i_sem down
|
||||
* dentry->d_inode->i_mutex down
|
||||
*/
|
||||
int
|
||||
reiserfs_setxattr(struct dentry *dentry, const char *name, const void *value,
|
||||
|
@ -1008,7 +1008,7 @@ reiserfs_setxattr(struct dentry *dentry, const char *name, const void *value,
|
|||
/*
|
||||
* Inode operation removexattr()
|
||||
*
|
||||
* dentry->d_inode->i_sem down
|
||||
* dentry->d_inode->i_mutex down
|
||||
*/
|
||||
int reiserfs_removexattr(struct dentry *dentry, const char *name)
|
||||
{
|
||||
|
@ -1091,7 +1091,7 @@ reiserfs_listxattr_filler(void *buf, const char *name, int namelen,
|
|||
/*
|
||||
* Inode operation listxattr()
|
||||
*
|
||||
* Preliminary locking: we down dentry->d_inode->i_sem
|
||||
* Preliminary locking: we down dentry->d_inode->i_mutex
|
||||
*/
|
||||
ssize_t reiserfs_listxattr(struct dentry * dentry, char *buffer, size_t size)
|
||||
{
|
||||
|
@ -1289,9 +1289,9 @@ int reiserfs_xattr_init(struct super_block *s, int mount_flags)
|
|||
if (!IS_ERR(dentry)) {
|
||||
if (!(mount_flags & MS_RDONLY) && !dentry->d_inode) {
|
||||
struct inode *inode = dentry->d_parent->d_inode;
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
err = inode->i_op->mkdir(inode, dentry, 0700);
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
if (err) {
|
||||
dput(dentry);
|
||||
dentry = NULL;
|
||||
|
|
|
@ -174,7 +174,7 @@ static void *posix_acl_to_disk(const struct posix_acl *acl, size_t * size)
|
|||
/*
|
||||
* Inode operation get_posix_acl().
|
||||
*
|
||||
* inode->i_sem: down
|
||||
* inode->i_mutex: down
|
||||
* BKL held [before 2.5.x]
|
||||
*/
|
||||
struct posix_acl *reiserfs_get_acl(struct inode *inode, int type)
|
||||
|
@ -237,7 +237,7 @@ struct posix_acl *reiserfs_get_acl(struct inode *inode, int type)
|
|||
/*
|
||||
* Inode operation set_posix_acl().
|
||||
*
|
||||
* inode->i_sem: down
|
||||
* inode->i_mutex: down
|
||||
* BKL held [before 2.5.x]
|
||||
*/
|
||||
static int
|
||||
|
@ -312,7 +312,7 @@ reiserfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
|
|||
return error;
|
||||
}
|
||||
|
||||
/* dir->i_sem: down,
|
||||
/* dir->i_mutex: locked,
|
||||
* inode is new and not released into the wild yet */
|
||||
int
|
||||
reiserfs_inherit_default_acl(struct inode *dir, struct dentry *dentry,
|
||||
|
|
|
@ -109,7 +109,7 @@ static struct dentry *relayfs_create_entry(const char *name,
|
|||
}
|
||||
|
||||
parent = dget(parent);
|
||||
down(&parent->d_inode->i_sem);
|
||||
mutex_lock(&parent->d_inode->i_mutex);
|
||||
d = lookup_one_len(name, parent, strlen(name));
|
||||
if (IS_ERR(d)) {
|
||||
d = NULL;
|
||||
|
@ -139,7 +139,7 @@ static struct dentry *relayfs_create_entry(const char *name,
|
|||
simple_release_fs(&relayfs_mount, &relayfs_mount_count);
|
||||
|
||||
exit:
|
||||
up(&parent->d_inode->i_sem);
|
||||
mutex_unlock(&parent->d_inode->i_mutex);
|
||||
dput(parent);
|
||||
return d;
|
||||
}
|
||||
|
@ -204,7 +204,7 @@ int relayfs_remove(struct dentry *dentry)
|
|||
return -EINVAL;
|
||||
|
||||
parent = dget(parent);
|
||||
down(&parent->d_inode->i_sem);
|
||||
mutex_lock(&parent->d_inode->i_mutex);
|
||||
if (dentry->d_inode) {
|
||||
if (S_ISDIR(dentry->d_inode->i_mode))
|
||||
error = simple_rmdir(parent->d_inode, dentry);
|
||||
|
@ -215,7 +215,7 @@ int relayfs_remove(struct dentry *dentry)
|
|||
}
|
||||
if (!error)
|
||||
dput(dentry);
|
||||
up(&parent->d_inode->i_sem);
|
||||
mutex_unlock(&parent->d_inode->i_mutex);
|
||||
dput(parent);
|
||||
|
||||
if (!error)
|
||||
|
@ -476,7 +476,7 @@ static ssize_t relay_file_read(struct file *filp,
|
|||
ssize_t ret = 0;
|
||||
void *from;
|
||||
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
if(!relay_file_read_avail(buf, *ppos))
|
||||
goto out;
|
||||
|
||||
|
@ -494,7 +494,7 @@ static ssize_t relay_file_read(struct file *filp,
|
|||
relay_file_read_consume(buf, read_start, count);
|
||||
*ppos = relay_file_read_end_pos(buf, read_start, count);
|
||||
out:
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -99,7 +99,7 @@ static int create_dir(struct kobject * k, struct dentry * p,
|
|||
int error;
|
||||
umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO;
|
||||
|
||||
down(&p->d_inode->i_sem);
|
||||
mutex_lock(&p->d_inode->i_mutex);
|
||||
*d = lookup_one_len(n, p, strlen(n));
|
||||
if (!IS_ERR(*d)) {
|
||||
error = sysfs_make_dirent(p->d_fsdata, *d, k, mode, SYSFS_DIR);
|
||||
|
@ -122,7 +122,7 @@ static int create_dir(struct kobject * k, struct dentry * p,
|
|||
dput(*d);
|
||||
} else
|
||||
error = PTR_ERR(*d);
|
||||
up(&p->d_inode->i_sem);
|
||||
mutex_unlock(&p->d_inode->i_mutex);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -246,7 +246,7 @@ static void remove_dir(struct dentry * d)
|
|||
struct dentry * parent = dget(d->d_parent);
|
||||
struct sysfs_dirent * sd;
|
||||
|
||||
down(&parent->d_inode->i_sem);
|
||||
mutex_lock(&parent->d_inode->i_mutex);
|
||||
d_delete(d);
|
||||
sd = d->d_fsdata;
|
||||
list_del_init(&sd->s_sibling);
|
||||
|
@ -257,7 +257,7 @@ static void remove_dir(struct dentry * d)
|
|||
pr_debug(" o %s removing done (%d)\n",d->d_name.name,
|
||||
atomic_read(&d->d_count));
|
||||
|
||||
up(&parent->d_inode->i_sem);
|
||||
mutex_unlock(&parent->d_inode->i_mutex);
|
||||
dput(parent);
|
||||
}
|
||||
|
||||
|
@ -286,7 +286,7 @@ void sysfs_remove_dir(struct kobject * kobj)
|
|||
return;
|
||||
|
||||
pr_debug("sysfs %s: removing dir\n",dentry->d_name.name);
|
||||
down(&dentry->d_inode->i_sem);
|
||||
mutex_lock(&dentry->d_inode->i_mutex);
|
||||
parent_sd = dentry->d_fsdata;
|
||||
list_for_each_entry_safe(sd, tmp, &parent_sd->s_children, s_sibling) {
|
||||
if (!sd->s_element || !(sd->s_type & SYSFS_NOT_PINNED))
|
||||
|
@ -295,7 +295,7 @@ void sysfs_remove_dir(struct kobject * kobj)
|
|||
sysfs_drop_dentry(sd, dentry);
|
||||
sysfs_put(sd);
|
||||
}
|
||||
up(&dentry->d_inode->i_sem);
|
||||
mutex_unlock(&dentry->d_inode->i_mutex);
|
||||
|
||||
remove_dir(dentry);
|
||||
/**
|
||||
|
@ -318,7 +318,7 @@ int sysfs_rename_dir(struct kobject * kobj, const char *new_name)
|
|||
down_write(&sysfs_rename_sem);
|
||||
parent = kobj->parent->dentry;
|
||||
|
||||
down(&parent->d_inode->i_sem);
|
||||
mutex_lock(&parent->d_inode->i_mutex);
|
||||
|
||||
new_dentry = lookup_one_len(new_name, parent, strlen(new_name));
|
||||
if (!IS_ERR(new_dentry)) {
|
||||
|
@ -334,7 +334,7 @@ int sysfs_rename_dir(struct kobject * kobj, const char *new_name)
|
|||
error = -EEXIST;
|
||||
dput(new_dentry);
|
||||
}
|
||||
up(&parent->d_inode->i_sem);
|
||||
mutex_unlock(&parent->d_inode->i_mutex);
|
||||
up_write(&sysfs_rename_sem);
|
||||
|
||||
return error;
|
||||
|
@ -345,9 +345,9 @@ static int sysfs_dir_open(struct inode *inode, struct file *file)
|
|||
struct dentry * dentry = file->f_dentry;
|
||||
struct sysfs_dirent * parent_sd = dentry->d_fsdata;
|
||||
|
||||
down(&dentry->d_inode->i_sem);
|
||||
mutex_lock(&dentry->d_inode->i_mutex);
|
||||
file->private_data = sysfs_new_dirent(parent_sd, NULL);
|
||||
up(&dentry->d_inode->i_sem);
|
||||
mutex_unlock(&dentry->d_inode->i_mutex);
|
||||
|
||||
return file->private_data ? 0 : -ENOMEM;
|
||||
|
||||
|
@ -358,9 +358,9 @@ static int sysfs_dir_close(struct inode *inode, struct file *file)
|
|||
struct dentry * dentry = file->f_dentry;
|
||||
struct sysfs_dirent * cursor = file->private_data;
|
||||
|
||||
down(&dentry->d_inode->i_sem);
|
||||
mutex_lock(&dentry->d_inode->i_mutex);
|
||||
list_del_init(&cursor->s_sibling);
|
||||
up(&dentry->d_inode->i_sem);
|
||||
mutex_unlock(&dentry->d_inode->i_mutex);
|
||||
|
||||
release_sysfs_dirent(cursor);
|
||||
|
||||
|
@ -436,7 +436,7 @@ static loff_t sysfs_dir_lseek(struct file * file, loff_t offset, int origin)
|
|||
{
|
||||
struct dentry * dentry = file->f_dentry;
|
||||
|
||||
down(&dentry->d_inode->i_sem);
|
||||
mutex_lock(&dentry->d_inode->i_mutex);
|
||||
switch (origin) {
|
||||
case 1:
|
||||
offset += file->f_pos;
|
||||
|
@ -444,7 +444,7 @@ static loff_t sysfs_dir_lseek(struct file * file, loff_t offset, int origin)
|
|||
if (offset >= 0)
|
||||
break;
|
||||
default:
|
||||
up(&file->f_dentry->d_inode->i_sem);
|
||||
mutex_unlock(&file->f_dentry->d_inode->i_mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (offset != file->f_pos) {
|
||||
|
@ -468,7 +468,7 @@ static loff_t sysfs_dir_lseek(struct file * file, loff_t offset, int origin)
|
|||
list_add_tail(&cursor->s_sibling, p);
|
||||
}
|
||||
}
|
||||
up(&dentry->d_inode->i_sem);
|
||||
mutex_unlock(&dentry->d_inode->i_mutex);
|
||||
return offset;
|
||||
}
|
||||
|
||||
|
@ -483,4 +483,3 @@ struct file_operations sysfs_dir_operations = {
|
|||
EXPORT_SYMBOL_GPL(sysfs_create_dir);
|
||||
EXPORT_SYMBOL_GPL(sysfs_remove_dir);
|
||||
EXPORT_SYMBOL_GPL(sysfs_rename_dir);
|
||||
|
||||
|
|
|
@ -364,9 +364,9 @@ int sysfs_add_file(struct dentry * dir, const struct attribute * attr, int type)
|
|||
umode_t mode = (attr->mode & S_IALLUGO) | S_IFREG;
|
||||
int error = 0;
|
||||
|
||||
down(&dir->d_inode->i_sem);
|
||||
mutex_lock(&dir->d_inode->i_mutex);
|
||||
error = sysfs_make_dirent(parent_sd, NULL, (void *) attr, mode, type);
|
||||
up(&dir->d_inode->i_sem);
|
||||
mutex_unlock(&dir->d_inode->i_mutex);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
@ -398,7 +398,7 @@ int sysfs_update_file(struct kobject * kobj, const struct attribute * attr)
|
|||
struct dentry * victim;
|
||||
int res = -ENOENT;
|
||||
|
||||
down(&dir->d_inode->i_sem);
|
||||
mutex_lock(&dir->d_inode->i_mutex);
|
||||
victim = lookup_one_len(attr->name, dir, strlen(attr->name));
|
||||
if (!IS_ERR(victim)) {
|
||||
/* make sure dentry is really there */
|
||||
|
@ -420,7 +420,7 @@ int sysfs_update_file(struct kobject * kobj, const struct attribute * attr)
|
|||
*/
|
||||
dput(victim);
|
||||
}
|
||||
up(&dir->d_inode->i_sem);
|
||||
mutex_unlock(&dir->d_inode->i_mutex);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
@ -441,22 +441,22 @@ int sysfs_chmod_file(struct kobject *kobj, struct attribute *attr, mode_t mode)
|
|||
struct iattr newattrs;
|
||||
int res = -ENOENT;
|
||||
|
||||
down(&dir->d_inode->i_sem);
|
||||
mutex_lock(&dir->d_inode->i_mutex);
|
||||
victim = lookup_one_len(attr->name, dir, strlen(attr->name));
|
||||
if (!IS_ERR(victim)) {
|
||||
if (victim->d_inode &&
|
||||
(victim->d_parent->d_inode == dir->d_inode)) {
|
||||
inode = victim->d_inode;
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
newattrs.ia_mode = (mode & S_IALLUGO) |
|
||||
(inode->i_mode & ~S_IALLUGO);
|
||||
newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
|
||||
res = notify_change(victim, &newattrs);
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
}
|
||||
dput(victim);
|
||||
}
|
||||
up(&dir->d_inode->i_sem);
|
||||
mutex_unlock(&dir->d_inode->i_mutex);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
@ -480,4 +480,3 @@ void sysfs_remove_file(struct kobject * kobj, const struct attribute * attr)
|
|||
EXPORT_SYMBOL_GPL(sysfs_create_file);
|
||||
EXPORT_SYMBOL_GPL(sysfs_remove_file);
|
||||
EXPORT_SYMBOL_GPL(sysfs_update_file);
|
||||
|
||||
|
|
|
@ -201,7 +201,7 @@ const unsigned char * sysfs_get_name(struct sysfs_dirent *sd)
|
|||
|
||||
/*
|
||||
* Unhashes the dentry corresponding to given sysfs_dirent
|
||||
* Called with parent inode's i_sem held.
|
||||
* Called with parent inode's i_mutex held.
|
||||
*/
|
||||
void sysfs_drop_dentry(struct sysfs_dirent * sd, struct dentry * parent)
|
||||
{
|
||||
|
@ -232,7 +232,7 @@ void sysfs_hash_and_remove(struct dentry * dir, const char * name)
|
|||
/* no inode means this hasn't been made visible yet */
|
||||
return;
|
||||
|
||||
down(&dir->d_inode->i_sem);
|
||||
mutex_lock(&dir->d_inode->i_mutex);
|
||||
list_for_each_entry(sd, &parent_sd->s_children, s_sibling) {
|
||||
if (!sd->s_element)
|
||||
continue;
|
||||
|
@ -243,7 +243,5 @@ void sysfs_hash_and_remove(struct dentry * dir, const char * name)
|
|||
break;
|
||||
}
|
||||
}
|
||||
up(&dir->d_inode->i_sem);
|
||||
mutex_unlock(&dir->d_inode->i_mutex);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -86,9 +86,9 @@ int sysfs_create_link(struct kobject * kobj, struct kobject * target, const char
|
|||
|
||||
BUG_ON(!kobj || !kobj->dentry || !name);
|
||||
|
||||
down(&dentry->d_inode->i_sem);
|
||||
mutex_lock(&dentry->d_inode->i_mutex);
|
||||
error = sysfs_add_link(dentry, name, target);
|
||||
up(&dentry->d_inode->i_sem);
|
||||
mutex_unlock(&dentry->d_inode->i_mutex);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -177,4 +177,3 @@ struct inode_operations sysfs_symlink_inode_operations = {
|
|||
|
||||
EXPORT_SYMBOL_GPL(sysfs_create_link);
|
||||
EXPORT_SYMBOL_GPL(sysfs_remove_link);
|
||||
|
||||
|
|
|
@ -1275,7 +1275,7 @@ static ssize_t ufs_quota_write(struct super_block *sb, int type,
|
|||
size_t towrite = len;
|
||||
struct buffer_head *bh;
|
||||
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
while (towrite > 0) {
|
||||
tocopy = sb->s_blocksize - offset < towrite ?
|
||||
sb->s_blocksize - offset : towrite;
|
||||
|
@ -1297,7 +1297,7 @@ static ssize_t ufs_quota_write(struct super_block *sb, int type,
|
|||
}
|
||||
out:
|
||||
if (len == towrite) {
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
return err;
|
||||
}
|
||||
if (inode->i_size < off+len-towrite)
|
||||
|
@ -1305,7 +1305,7 @@ static ssize_t ufs_quota_write(struct super_block *sb, int type,
|
|||
inode->i_version++;
|
||||
inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
|
||||
mark_inode_dirty(inode);
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
return len - towrite;
|
||||
}
|
||||
|
||||
|
|
|
@ -51,7 +51,7 @@ setxattr(struct dentry *d, char __user *name, void __user *value,
|
|||
}
|
||||
}
|
||||
|
||||
down(&d->d_inode->i_sem);
|
||||
mutex_lock(&d->d_inode->i_mutex);
|
||||
error = security_inode_setxattr(d, kname, kvalue, size, flags);
|
||||
if (error)
|
||||
goto out;
|
||||
|
@ -73,7 +73,7 @@ setxattr(struct dentry *d, char __user *name, void __user *value,
|
|||
fsnotify_xattr(d);
|
||||
}
|
||||
out:
|
||||
up(&d->d_inode->i_sem);
|
||||
mutex_unlock(&d->d_inode->i_mutex);
|
||||
kfree(kvalue);
|
||||
return error;
|
||||
}
|
||||
|
@ -323,9 +323,9 @@ removexattr(struct dentry *d, char __user *name)
|
|||
error = security_inode_removexattr(d, kname);
|
||||
if (error)
|
||||
goto out;
|
||||
down(&d->d_inode->i_sem);
|
||||
mutex_lock(&d->d_inode->i_mutex);
|
||||
error = d->d_inode->i_op->removexattr(d, kname);
|
||||
up(&d->d_inode->i_sem);
|
||||
mutex_unlock(&d->d_inode->i_mutex);
|
||||
if (!error)
|
||||
fsnotify_xattr(d);
|
||||
}
|
||||
|
|
|
@ -203,7 +203,7 @@ validate_fields(
|
|||
ip->i_nlink = va.va_nlink;
|
||||
ip->i_blocks = va.va_nblocks;
|
||||
|
||||
/* we're under i_sem so i_size can't change under us */
|
||||
/* we're under i_mutex so i_size can't change under us */
|
||||
if (i_size_read(ip) != va.va_size)
|
||||
i_size_write(ip, va.va_size);
|
||||
}
|
||||
|
|
|
@ -254,7 +254,7 @@ xfs_read(
|
|||
}
|
||||
|
||||
if (unlikely(ioflags & IO_ISDIRECT))
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
xfs_ilock(ip, XFS_IOLOCK_SHARED);
|
||||
|
||||
if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&
|
||||
|
@ -286,7 +286,7 @@ xfs_read(
|
|||
|
||||
unlock_isem:
|
||||
if (unlikely(ioflags & IO_ISDIRECT))
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -655,7 +655,7 @@ xfs_write(
|
|||
iolock = XFS_IOLOCK_EXCL;
|
||||
locktype = VRWLOCK_WRITE;
|
||||
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
} else {
|
||||
iolock = XFS_IOLOCK_SHARED;
|
||||
locktype = VRWLOCK_WRITE_DIRECT;
|
||||
|
@ -686,7 +686,7 @@ xfs_write(
|
|||
int dmflags = FILP_DELAY_FLAG(file);
|
||||
|
||||
if (need_isem)
|
||||
dmflags |= DM_FLAGS_ISEM;
|
||||
dmflags |= DM_FLAGS_IMUX;
|
||||
|
||||
xfs_iunlock(xip, XFS_ILOCK_EXCL);
|
||||
error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, vp,
|
||||
|
@ -772,7 +772,7 @@ xfs_write(
|
|||
if (need_isem) {
|
||||
/* demote the lock now the cached pages are gone */
|
||||
XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL);
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
|
||||
iolock = XFS_IOLOCK_SHARED;
|
||||
locktype = VRWLOCK_WRITE_DIRECT;
|
||||
|
@ -817,14 +817,14 @@ xfs_write(
|
|||
|
||||
xfs_rwunlock(bdp, locktype);
|
||||
if (need_isem)
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, vp,
|
||||
DM_RIGHT_NULL, vp, DM_RIGHT_NULL, NULL, NULL,
|
||||
0, 0, 0); /* Delay flag intentionally unused */
|
||||
if (error)
|
||||
goto out_nounlocks;
|
||||
if (need_isem)
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
xfs_rwlock(bdp, locktype);
|
||||
pos = xip->i_d.di_size;
|
||||
ret = 0;
|
||||
|
@ -926,7 +926,7 @@ xfs_write(
|
|||
|
||||
xfs_rwunlock(bdp, locktype);
|
||||
if (need_isem)
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
|
||||
error = sync_page_range(inode, mapping, pos, ret);
|
||||
if (!error)
|
||||
|
@ -938,7 +938,7 @@ xfs_write(
|
|||
xfs_rwunlock(bdp, locktype);
|
||||
out_unlock_isem:
|
||||
if (need_isem)
|
||||
up(&inode->i_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
out_nounlocks:
|
||||
return -error;
|
||||
}
|
||||
|
|
|
@ -152,7 +152,7 @@ typedef enum {
|
|||
|
||||
#define DM_FLAGS_NDELAY 0x001 /* return EAGAIN after dm_pending() */
|
||||
#define DM_FLAGS_UNWANTED 0x002 /* event not in fsys dm_eventset_t */
|
||||
#define DM_FLAGS_ISEM 0x004 /* thread holds i_sem */
|
||||
#define DM_FLAGS_IMUX 0x004 /* thread holds i_mutex */
|
||||
#define DM_FLAGS_IALLOCSEM_RD 0x010 /* thread holds i_alloc_sem rd */
|
||||
#define DM_FLAGS_IALLOCSEM_WR 0x020 /* thread holds i_alloc_sem wr */
|
||||
|
||||
|
@ -161,21 +161,21 @@ typedef enum {
|
|||
*/
|
||||
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
|
||||
#define DM_SEM_FLAG_RD(ioflags) (((ioflags) & IO_ISDIRECT) ? \
|
||||
DM_FLAGS_ISEM : 0)
|
||||
#define DM_SEM_FLAG_WR (DM_FLAGS_IALLOCSEM_WR | DM_FLAGS_ISEM)
|
||||
DM_FLAGS_IMUX : 0)
|
||||
#define DM_SEM_FLAG_WR (DM_FLAGS_IALLOCSEM_WR | DM_FLAGS_IMUX)
|
||||
#endif
|
||||
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) && \
|
||||
(LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,22))
|
||||
#define DM_SEM_FLAG_RD(ioflags) (((ioflags) & IO_ISDIRECT) ? \
|
||||
DM_FLAGS_IALLOCSEM_RD : DM_FLAGS_ISEM)
|
||||
#define DM_SEM_FLAG_WR (DM_FLAGS_IALLOCSEM_WR | DM_FLAGS_ISEM)
|
||||
DM_FLAGS_IALLOCSEM_RD : DM_FLAGS_IMUX)
|
||||
#define DM_SEM_FLAG_WR (DM_FLAGS_IALLOCSEM_WR | DM_FLAGS_IMUX)
|
||||
#endif
|
||||
|
||||
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,21)
|
||||
#define DM_SEM_FLAG_RD(ioflags) (((ioflags) & IO_ISDIRECT) ? \
|
||||
0 : DM_FLAGS_ISEM)
|
||||
#define DM_SEM_FLAG_WR (DM_FLAGS_ISEM)
|
||||
0 : DM_FLAGS_IMUX)
|
||||
#define DM_SEM_FLAG_WR (DM_FLAGS_IMUX)
|
||||
#endif
|
||||
|
||||
|
||||
|
|
|
@ -87,7 +87,7 @@ struct ext3_inode_info {
|
|||
#ifdef CONFIG_EXT3_FS_XATTR
|
||||
/*
|
||||
* Extended attributes can be read independently of the main file
|
||||
* data. Taking i_sem even when reading would cause contention
|
||||
* data. Taking i_mutex even when reading would cause contention
|
||||
* between readers of EAs and writers of regular file data, so
|
||||
* instead we synchronize on xattr_sem when reading or changing
|
||||
* EAs.
|
||||
|
|
|
@ -219,6 +219,7 @@ extern int dir_notify_enable;
|
|||
#include <linux/prio_tree.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/semaphore.h>
|
||||
|
@ -484,7 +485,7 @@ struct inode {
|
|||
unsigned long i_blocks;
|
||||
unsigned short i_bytes;
|
||||
spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
|
||||
struct semaphore i_sem;
|
||||
struct mutex i_mutex;
|
||||
struct rw_semaphore i_alloc_sem;
|
||||
struct inode_operations *i_op;
|
||||
struct file_operations *i_fop; /* former ->i_op->default_file_ops */
|
||||
|
@ -1191,7 +1192,7 @@ int sync_inode(struct inode *inode, struct writeback_control *wbc);
|
|||
* directory. The name should be stored in the @name (with the
|
||||
* understanding that it is already pointing to a a %NAME_MAX+1 sized
|
||||
* buffer. get_name() should return %0 on success, a negative error code
|
||||
* or error. @get_name will be called without @parent->i_sem held.
|
||||
* or error. @get_name will be called without @parent->i_mutex held.
|
||||
*
|
||||
* get_parent:
|
||||
* @get_parent should find the parent directory for the given @child which
|
||||
|
@ -1213,7 +1214,7 @@ int sync_inode(struct inode *inode, struct writeback_control *wbc);
|
|||
* nfsd_find_fh_dentry() in either the @obj or @parent parameters.
|
||||
*
|
||||
* Locking rules:
|
||||
* get_parent is called with child->d_inode->i_sem down
|
||||
* get_parent is called with child->d_inode->i_mutex down
|
||||
* get_name is not (which is possibly inconsistent)
|
||||
*/
|
||||
|
||||
|
|
|
@ -8,11 +8,11 @@
|
|||
#include <asm/semaphore.h>
|
||||
|
||||
struct jffs2_inode_info {
|
||||
/* We need an internal semaphore similar to inode->i_sem.
|
||||
/* We need an internal mutex similar to inode->i_mutex.
|
||||
Unfortunately, we can't used the existing one, because
|
||||
either the GC would deadlock, or we'd have to release it
|
||||
before letting GC proceed. Or we'd have to put ugliness
|
||||
into the GC code so it didn't attempt to obtain the i_sem
|
||||
into the GC code so it didn't attempt to obtain the i_mutex
|
||||
for the inode(s) which are already locked */
|
||||
struct semaphore sem;
|
||||
|
||||
|
|
|
@ -294,7 +294,7 @@ fill_post_wcc(struct svc_fh *fhp)
|
|||
/*
|
||||
* Lock a file handle/inode
|
||||
* NOTE: both fh_lock and fh_unlock are done "by hand" in
|
||||
* vfs.c:nfsd_rename as it needs to grab 2 i_sem's at once
|
||||
* vfs.c:nfsd_rename as it needs to grab 2 i_mutex's at once
|
||||
* so, any changes here should be reflected there.
|
||||
*/
|
||||
static inline void
|
||||
|
@ -317,7 +317,7 @@ fh_lock(struct svc_fh *fhp)
|
|||
}
|
||||
|
||||
inode = dentry->d_inode;
|
||||
down(&inode->i_sem);
|
||||
mutex_lock(&inode->i_mutex);
|
||||
fill_pre_wcc(fhp);
|
||||
fhp->fh_locked = 1;
|
||||
}
|
||||
|
@ -333,7 +333,7 @@ fh_unlock(struct svc_fh *fhp)
|
|||
|
||||
if (fhp->fh_locked) {
|
||||
fill_post_wcc(fhp);
|
||||
up(&fhp->fh_dentry->d_inode->i_sem);
|
||||
mutex_unlock(&fhp->fh_dentry->d_inode->i_mutex);
|
||||
fhp->fh_locked = 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ struct pipe_inode_info {
|
|||
memory allocation, whereas PIPE_BUF makes atomicity guarantees. */
|
||||
#define PIPE_SIZE PAGE_SIZE
|
||||
|
||||
#define PIPE_SEM(inode) (&(inode).i_sem)
|
||||
#define PIPE_MUTEX(inode) (&(inode).i_mutex)
|
||||
#define PIPE_WAIT(inode) (&(inode).i_pipe->wait)
|
||||
#define PIPE_READERS(inode) ((inode).i_pipe->readers)
|
||||
#define PIPE_WRITERS(inode) ((inode).i_pipe->writers)
|
||||
|
|
|
@ -1857,7 +1857,7 @@ void padd_item(char *item, int total_length, int length);
|
|||
#define GET_BLOCK_CREATE 1 /* add anything you need to find block */
|
||||
#define GET_BLOCK_NO_HOLE 2 /* return -ENOENT for file holes */
|
||||
#define GET_BLOCK_READ_DIRECT 4 /* read the tail if indirect item not found */
|
||||
#define GET_BLOCK_NO_ISEM 8 /* i_sem is not held, don't preallocate */
|
||||
#define GET_BLOCK_NO_IMUX 8 /* i_mutex is not held, don't preallocate */
|
||||
#define GET_BLOCK_NO_DANGLE 16 /* don't leave any transactions running */
|
||||
|
||||
int restart_transaction(struct reiserfs_transaction_handle *th,
|
||||
|
|
|
@ -660,7 +660,7 @@ asmlinkage long sys_mq_open(const char __user *u_name, int oflag, mode_t mode,
|
|||
if (fd < 0)
|
||||
goto out_putname;
|
||||
|
||||
down(&mqueue_mnt->mnt_root->d_inode->i_sem);
|
||||
mutex_lock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
|
||||
dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name));
|
||||
if (IS_ERR(dentry)) {
|
||||
error = PTR_ERR(dentry);
|
||||
|
@ -697,7 +697,7 @@ asmlinkage long sys_mq_open(const char __user *u_name, int oflag, mode_t mode,
|
|||
out_err:
|
||||
fd = error;
|
||||
out_upsem:
|
||||
up(&mqueue_mnt->mnt_root->d_inode->i_sem);
|
||||
mutex_unlock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
|
||||
out_putname:
|
||||
putname(name);
|
||||
return fd;
|
||||
|
@ -714,7 +714,7 @@ asmlinkage long sys_mq_unlink(const char __user *u_name)
|
|||
if (IS_ERR(name))
|
||||
return PTR_ERR(name);
|
||||
|
||||
down(&mqueue_mnt->mnt_root->d_inode->i_sem);
|
||||
mutex_lock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
|
||||
dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name));
|
||||
if (IS_ERR(dentry)) {
|
||||
err = PTR_ERR(dentry);
|
||||
|
@ -735,7 +735,7 @@ asmlinkage long sys_mq_unlink(const char __user *u_name)
|
|||
dput(dentry);
|
||||
|
||||
out_unlock:
|
||||
up(&mqueue_mnt->mnt_root->d_inode->i_sem);
|
||||
mutex_unlock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
|
||||
putname(name);
|
||||
if (inode)
|
||||
iput(inode);
|
||||
|
|
|
@ -1513,7 +1513,7 @@ static int cpuset_add_file(struct dentry *dir, const struct cftype *cft)
|
|||
struct dentry *dentry;
|
||||
int error;
|
||||
|
||||
down(&dir->d_inode->i_sem);
|
||||
mutex_lock(&dir->d_inode->i_mutex);
|
||||
dentry = cpuset_get_dentry(dir, cft->name);
|
||||
if (!IS_ERR(dentry)) {
|
||||
error = cpuset_create_file(dentry, 0644 | S_IFREG);
|
||||
|
@ -1522,7 +1522,7 @@ static int cpuset_add_file(struct dentry *dir, const struct cftype *cft)
|
|||
dput(dentry);
|
||||
} else
|
||||
error = PTR_ERR(dentry);
|
||||
up(&dir->d_inode->i_sem);
|
||||
mutex_unlock(&dir->d_inode->i_mutex);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -1793,7 +1793,7 @@ static long cpuset_create(struct cpuset *parent, const char *name, int mode)
|
|||
|
||||
/*
|
||||
* Release manage_sem before cpuset_populate_dir() because it
|
||||
* will down() this new directory's i_sem and if we race with
|
||||
* will down() this new directory's i_mutex and if we race with
|
||||
* another mkdir, we might deadlock.
|
||||
*/
|
||||
up(&manage_sem);
|
||||
|
@ -1812,7 +1812,7 @@ static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode)
|
|||
{
|
||||
struct cpuset *c_parent = dentry->d_parent->d_fsdata;
|
||||
|
||||
/* the vfs holds inode->i_sem already */
|
||||
/* the vfs holds inode->i_mutex already */
|
||||
return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR);
|
||||
}
|
||||
|
||||
|
@ -1823,7 +1823,7 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
|
|||
struct cpuset *parent;
|
||||
char *pathbuf = NULL;
|
||||
|
||||
/* the vfs holds both inode->i_sem already */
|
||||
/* the vfs holds both inode->i_mutex already */
|
||||
|
||||
down(&manage_sem);
|
||||
cpuset_update_task_memory_state();
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue