[PATCH] sem2mutex: iprune
Semaphore to mutex conversion. The conversion was generated via scripts, and the result was validated automatically via a script as well. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
a11f3a0574
commit
f24075bd0c
3 changed files with 12 additions and 12 deletions
16
fs/inode.c
16
fs/inode.c
|
@ -84,14 +84,14 @@ static struct hlist_head *inode_hashtable;
|
|||
DEFINE_SPINLOCK(inode_lock);
|
||||
|
||||
/*
|
||||
* iprune_sem provides exclusion between the kswapd or try_to_free_pages
|
||||
* iprune_mutex provides exclusion between the kswapd or try_to_free_pages
|
||||
* icache shrinking path, and the umount path. Without this exclusion,
|
||||
* by the time prune_icache calls iput for the inode whose pages it has
|
||||
* been invalidating, or by the time it calls clear_inode & destroy_inode
|
||||
* from its final dispose_list, the struct super_block they refer to
|
||||
* (for inode->i_sb->s_op) may already have been freed and reused.
|
||||
*/
|
||||
DECLARE_MUTEX(iprune_sem);
|
||||
DEFINE_MUTEX(iprune_mutex);
|
||||
|
||||
/*
|
||||
* Statistics gathering..
|
||||
|
@ -319,7 +319,7 @@ static int invalidate_list(struct list_head *head, struct list_head *dispose)
|
|||
/*
|
||||
* We can reschedule here without worrying about the list's
|
||||
* consistency because the per-sb list of inodes must not
|
||||
* change during umount anymore, and because iprune_sem keeps
|
||||
* change during umount anymore, and because iprune_mutex keeps
|
||||
* shrink_icache_memory() away.
|
||||
*/
|
||||
cond_resched_lock(&inode_lock);
|
||||
|
@ -355,14 +355,14 @@ int invalidate_inodes(struct super_block * sb)
|
|||
int busy;
|
||||
LIST_HEAD(throw_away);
|
||||
|
||||
down(&iprune_sem);
|
||||
mutex_lock(&iprune_mutex);
|
||||
spin_lock(&inode_lock);
|
||||
inotify_unmount_inodes(&sb->s_inodes);
|
||||
busy = invalidate_list(&sb->s_inodes, &throw_away);
|
||||
spin_unlock(&inode_lock);
|
||||
|
||||
dispose_list(&throw_away);
|
||||
up(&iprune_sem);
|
||||
mutex_unlock(&iprune_mutex);
|
||||
|
||||
return busy;
|
||||
}
|
||||
|
@ -377,7 +377,7 @@ int __invalidate_device(struct block_device *bdev)
|
|||
if (sb) {
|
||||
/*
|
||||
* no need to lock the super, get_super holds the
|
||||
* read semaphore so the filesystem cannot go away
|
||||
* read mutex so the filesystem cannot go away
|
||||
* under us (->put_super runs with the write lock
|
||||
* hold).
|
||||
*/
|
||||
|
@ -423,7 +423,7 @@ static void prune_icache(int nr_to_scan)
|
|||
int nr_scanned;
|
||||
unsigned long reap = 0;
|
||||
|
||||
down(&iprune_sem);
|
||||
mutex_lock(&iprune_mutex);
|
||||
spin_lock(&inode_lock);
|
||||
for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) {
|
||||
struct inode *inode;
|
||||
|
@ -459,7 +459,7 @@ static void prune_icache(int nr_to_scan)
|
|||
spin_unlock(&inode_lock);
|
||||
|
||||
dispose_list(&freeable);
|
||||
up(&iprune_sem);
|
||||
mutex_unlock(&iprune_mutex);
|
||||
|
||||
if (current_is_kswapd())
|
||||
mod_page_state(kswapd_inodesteal, reap);
|
||||
|
|
|
@ -54,7 +54,7 @@ int inotify_max_queued_events;
|
|||
* Lock ordering:
|
||||
*
|
||||
* dentry->d_lock (used to keep d_move() away from dentry->d_parent)
|
||||
* iprune_sem (synchronize shrink_icache_memory())
|
||||
* iprune_mutex (synchronize shrink_icache_memory())
|
||||
* inode_lock (protects the super_block->s_inodes list)
|
||||
* inode->inotify_mutex (protects inode->inotify_watches and watches->i_list)
|
||||
* inotify_dev->mutex (protects inotify_device and watches->d_list)
|
||||
|
@ -569,7 +569,7 @@ EXPORT_SYMBOL_GPL(inotify_get_cookie);
|
|||
* @list: list of inodes being unmounted (sb->s_inodes)
|
||||
*
|
||||
* Called with inode_lock held, protecting the unmounting super block's list
|
||||
* of inodes, and with iprune_sem held, keeping shrink_icache_memory() at bay.
|
||||
* of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay.
|
||||
* We temporarily drop inode_lock, however, and CAN block.
|
||||
*/
|
||||
void inotify_unmount_inodes(struct list_head *list)
|
||||
|
@ -618,7 +618,7 @@ void inotify_unmount_inodes(struct list_head *list)
|
|||
* We can safely drop inode_lock here because we hold
|
||||
* references on both inode and next_i. Also no new inodes
|
||||
* will be added since the umount has begun. Finally,
|
||||
* iprune_sem keeps shrink_icache_memory() away.
|
||||
* iprune_mutex keeps shrink_icache_memory() away.
|
||||
*/
|
||||
spin_unlock(&inode_lock);
|
||||
|
||||
|
|
|
@ -1534,7 +1534,7 @@ extern void destroy_inode(struct inode *);
|
|||
extern struct inode *new_inode(struct super_block *);
|
||||
extern int remove_suid(struct dentry *);
|
||||
extern void remove_dquot_ref(struct super_block *, int, struct list_head *);
|
||||
extern struct semaphore iprune_sem;
|
||||
extern struct mutex iprune_mutex;
|
||||
|
||||
extern void __insert_inode_hash(struct inode *, unsigned long hashval);
|
||||
extern void remove_inode_hash(struct inode *);
|
||||
|
|
Loading…
Reference in a new issue