[PATCH] sem2mutex: inotify
Semaphore to mutex conversion. The conversion was generated via scripts, and the result was validated automatically via a script as well. Signed-off-by: Ingo Molnar <mingo@elte.hu> Cc: John McCutchan <ttb@tentacle.dhs.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Acked-by: Robert Love <rml@novell.com> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
d3be915fc5
commit
d4f9af9dac
3 changed files with 57 additions and 57 deletions
|
@ -206,7 +206,7 @@ void inode_init_once(struct inode *inode)
|
|||
i_size_ordered_init(inode);
|
||||
#ifdef CONFIG_INOTIFY
|
||||
INIT_LIST_HEAD(&inode->inotify_watches);
|
||||
sema_init(&inode->inotify_sem, 1);
|
||||
mutex_init(&inode->inotify_mutex);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
110
fs/inotify.c
110
fs/inotify.c
|
@ -56,8 +56,8 @@ int inotify_max_queued_events;
|
|||
* dentry->d_lock (used to keep d_move() away from dentry->d_parent)
|
||||
* iprune_sem (synchronize shrink_icache_memory())
|
||||
* inode_lock (protects the super_block->s_inodes list)
|
||||
* inode->inotify_sem (protects inode->inotify_watches and watches->i_list)
|
||||
* inotify_dev->sem (protects inotify_device and watches->d_list)
|
||||
* inode->inotify_mutex (protects inode->inotify_watches and watches->i_list)
|
||||
* inotify_dev->mutex (protects inotify_device and watches->d_list)
|
||||
*/
|
||||
|
||||
/*
|
||||
|
@ -79,12 +79,12 @@ int inotify_max_queued_events;
|
|||
/*
|
||||
* struct inotify_device - represents an inotify instance
|
||||
*
|
||||
* This structure is protected by the semaphore 'sem'.
|
||||
* This structure is protected by the mutex 'mutex'.
|
||||
*/
|
||||
struct inotify_device {
|
||||
wait_queue_head_t wq; /* wait queue for i/o */
|
||||
struct idr idr; /* idr mapping wd -> watch */
|
||||
struct semaphore sem; /* protects this bad boy */
|
||||
struct mutex mutex; /* protects this bad boy */
|
||||
struct list_head events; /* list of queued events */
|
||||
struct list_head watches; /* list of watches */
|
||||
atomic_t count; /* reference count */
|
||||
|
@ -101,7 +101,7 @@ struct inotify_device {
|
|||
* device. In read(), this list is walked and all events that can fit in the
|
||||
* buffer are returned.
|
||||
*
|
||||
* Protected by dev->sem of the device in which we are queued.
|
||||
* Protected by dev->mutex of the device in which we are queued.
|
||||
*/
|
||||
struct inotify_kernel_event {
|
||||
struct inotify_event event; /* the user-space event */
|
||||
|
@ -112,8 +112,8 @@ struct inotify_kernel_event {
|
|||
/*
|
||||
* struct inotify_watch - represents a watch request on a specific inode
|
||||
*
|
||||
* d_list is protected by dev->sem of the associated watch->dev.
|
||||
* i_list and mask are protected by inode->inotify_sem of the associated inode.
|
||||
* d_list is protected by dev->mutex of the associated watch->dev.
|
||||
* i_list and mask are protected by inode->inotify_mutex of the associated inode.
|
||||
* dev, inode, and wd are never written to once the watch is created.
|
||||
*/
|
||||
struct inotify_watch {
|
||||
|
@ -261,7 +261,7 @@ static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie,
|
|||
/*
|
||||
* inotify_dev_get_event - return the next event in the given dev's queue
|
||||
*
|
||||
* Caller must hold dev->sem.
|
||||
* Caller must hold dev->mutex.
|
||||
*/
|
||||
static inline struct inotify_kernel_event *
|
||||
inotify_dev_get_event(struct inotify_device *dev)
|
||||
|
@ -272,7 +272,7 @@ inotify_dev_get_event(struct inotify_device *dev)
|
|||
/*
|
||||
* inotify_dev_queue_event - add a new event to the given device
|
||||
*
|
||||
* Caller must hold dev->sem. Can sleep (calls kernel_event()).
|
||||
* Caller must hold dev->mutex. Can sleep (calls kernel_event()).
|
||||
*/
|
||||
static void inotify_dev_queue_event(struct inotify_device *dev,
|
||||
struct inotify_watch *watch, u32 mask,
|
||||
|
@ -315,7 +315,7 @@ static void inotify_dev_queue_event(struct inotify_device *dev,
|
|||
/*
|
||||
* remove_kevent - cleans up and ultimately frees the given kevent
|
||||
*
|
||||
* Caller must hold dev->sem.
|
||||
* Caller must hold dev->mutex.
|
||||
*/
|
||||
static void remove_kevent(struct inotify_device *dev,
|
||||
struct inotify_kernel_event *kevent)
|
||||
|
@ -332,7 +332,7 @@ static void remove_kevent(struct inotify_device *dev,
|
|||
/*
|
||||
* inotify_dev_event_dequeue - destroy an event on the given device
|
||||
*
|
||||
* Caller must hold dev->sem.
|
||||
* Caller must hold dev->mutex.
|
||||
*/
|
||||
static void inotify_dev_event_dequeue(struct inotify_device *dev)
|
||||
{
|
||||
|
@ -346,7 +346,7 @@ static void inotify_dev_event_dequeue(struct inotify_device *dev)
|
|||
/*
|
||||
* inotify_dev_get_wd - returns the next WD for use by the given dev
|
||||
*
|
||||
* Callers must hold dev->sem. This function can sleep.
|
||||
* Callers must hold dev->mutex. This function can sleep.
|
||||
*/
|
||||
static int inotify_dev_get_wd(struct inotify_device *dev,
|
||||
struct inotify_watch *watch)
|
||||
|
@ -383,7 +383,7 @@ static int find_inode(const char __user *dirname, struct nameidata *nd,
|
|||
/*
|
||||
* create_watch - creates a watch on the given device.
|
||||
*
|
||||
* Callers must hold dev->sem. Calls inotify_dev_get_wd() so may sleep.
|
||||
* Callers must hold dev->mutex. Calls inotify_dev_get_wd() so may sleep.
|
||||
* Both 'dev' and 'inode' (by way of nameidata) need to be pinned.
|
||||
*/
|
||||
static struct inotify_watch *create_watch(struct inotify_device *dev,
|
||||
|
@ -434,7 +434,7 @@ static struct inotify_watch *create_watch(struct inotify_device *dev,
|
|||
/*
|
||||
* inotify_find_dev - find the watch associated with the given inode and dev
|
||||
*
|
||||
* Callers must hold inode->inotify_sem.
|
||||
* Callers must hold inode->inotify_mutex.
|
||||
*/
|
||||
static struct inotify_watch *inode_find_dev(struct inode *inode,
|
||||
struct inotify_device *dev)
|
||||
|
@ -469,7 +469,7 @@ static void remove_watch_no_event(struct inotify_watch *watch,
|
|||
* the IN_IGNORED event to the given device signifying that the inode is no
|
||||
* longer watched.
|
||||
*
|
||||
* Callers must hold both inode->inotify_sem and dev->sem. We drop a
|
||||
* Callers must hold both inode->inotify_mutex and dev->mutex. We drop a
|
||||
* reference to the inode before returning.
|
||||
*
|
||||
* The inode is not iput() so as to remain atomic. If the inode needs to be
|
||||
|
@ -507,21 +507,21 @@ void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie,
|
|||
if (!inotify_inode_watched(inode))
|
||||
return;
|
||||
|
||||
down(&inode->inotify_sem);
|
||||
mutex_lock(&inode->inotify_mutex);
|
||||
list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
|
||||
u32 watch_mask = watch->mask;
|
||||
if (watch_mask & mask) {
|
||||
struct inotify_device *dev = watch->dev;
|
||||
get_inotify_watch(watch);
|
||||
down(&dev->sem);
|
||||
mutex_lock(&dev->mutex);
|
||||
inotify_dev_queue_event(dev, watch, mask, cookie, name);
|
||||
if (watch_mask & IN_ONESHOT)
|
||||
remove_watch_no_event(watch, dev);
|
||||
up(&dev->sem);
|
||||
mutex_unlock(&dev->mutex);
|
||||
put_inotify_watch(watch);
|
||||
}
|
||||
}
|
||||
up(&inode->inotify_sem);
|
||||
mutex_unlock(&inode->inotify_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(inotify_inode_queue_event);
|
||||
|
||||
|
@ -626,16 +626,16 @@ void inotify_unmount_inodes(struct list_head *list)
|
|||
iput(need_iput_tmp);
|
||||
|
||||
/* for each watch, send IN_UNMOUNT and then remove it */
|
||||
down(&inode->inotify_sem);
|
||||
mutex_lock(&inode->inotify_mutex);
|
||||
watches = &inode->inotify_watches;
|
||||
list_for_each_entry_safe(watch, next_w, watches, i_list) {
|
||||
struct inotify_device *dev = watch->dev;
|
||||
down(&dev->sem);
|
||||
mutex_lock(&dev->mutex);
|
||||
inotify_dev_queue_event(dev, watch, IN_UNMOUNT,0,NULL);
|
||||
remove_watch(watch, dev);
|
||||
up(&dev->sem);
|
||||
mutex_unlock(&dev->mutex);
|
||||
}
|
||||
up(&inode->inotify_sem);
|
||||
mutex_unlock(&inode->inotify_mutex);
|
||||
iput(inode);
|
||||
|
||||
spin_lock(&inode_lock);
|
||||
|
@ -651,14 +651,14 @@ void inotify_inode_is_dead(struct inode *inode)
|
|||
{
|
||||
struct inotify_watch *watch, *next;
|
||||
|
||||
down(&inode->inotify_sem);
|
||||
mutex_lock(&inode->inotify_mutex);
|
||||
list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
|
||||
struct inotify_device *dev = watch->dev;
|
||||
down(&dev->sem);
|
||||
mutex_lock(&dev->mutex);
|
||||
remove_watch(watch, dev);
|
||||
up(&dev->sem);
|
||||
mutex_unlock(&dev->mutex);
|
||||
}
|
||||
up(&inode->inotify_sem);
|
||||
mutex_unlock(&inode->inotify_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(inotify_inode_is_dead);
|
||||
|
||||
|
@ -670,10 +670,10 @@ static unsigned int inotify_poll(struct file *file, poll_table *wait)
|
|||
int ret = 0;
|
||||
|
||||
poll_wait(file, &dev->wq, wait);
|
||||
down(&dev->sem);
|
||||
mutex_lock(&dev->mutex);
|
||||
if (!list_empty(&dev->events))
|
||||
ret = POLLIN | POLLRDNORM;
|
||||
up(&dev->sem);
|
||||
mutex_unlock(&dev->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -695,9 +695,9 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
|
|||
|
||||
prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE);
|
||||
|
||||
down(&dev->sem);
|
||||
mutex_lock(&dev->mutex);
|
||||
events = !list_empty(&dev->events);
|
||||
up(&dev->sem);
|
||||
mutex_unlock(&dev->mutex);
|
||||
if (events) {
|
||||
ret = 0;
|
||||
break;
|
||||
|
@ -720,7 +720,7 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
down(&dev->sem);
|
||||
mutex_lock(&dev->mutex);
|
||||
while (1) {
|
||||
struct inotify_kernel_event *kevent;
|
||||
|
||||
|
@ -750,7 +750,7 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
|
|||
|
||||
remove_kevent(dev, kevent);
|
||||
}
|
||||
up(&dev->sem);
|
||||
mutex_unlock(&dev->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -763,37 +763,37 @@ static int inotify_release(struct inode *ignored, struct file *file)
|
|||
* Destroy all of the watches on this device. Unfortunately, not very
|
||||
* pretty. We cannot do a simple iteration over the list, because we
|
||||
* do not know the inode until we iterate to the watch. But we need to
|
||||
* hold inode->inotify_sem before dev->sem. The following works.
|
||||
* hold inode->inotify_mutex before dev->mutex. The following works.
|
||||
*/
|
||||
while (1) {
|
||||
struct inotify_watch *watch;
|
||||
struct list_head *watches;
|
||||
struct inode *inode;
|
||||
|
||||
down(&dev->sem);
|
||||
mutex_lock(&dev->mutex);
|
||||
watches = &dev->watches;
|
||||
if (list_empty(watches)) {
|
||||
up(&dev->sem);
|
||||
mutex_unlock(&dev->mutex);
|
||||
break;
|
||||
}
|
||||
watch = list_entry(watches->next, struct inotify_watch, d_list);
|
||||
get_inotify_watch(watch);
|
||||
up(&dev->sem);
|
||||
mutex_unlock(&dev->mutex);
|
||||
|
||||
inode = watch->inode;
|
||||
down(&inode->inotify_sem);
|
||||
down(&dev->sem);
|
||||
mutex_lock(&inode->inotify_mutex);
|
||||
mutex_lock(&dev->mutex);
|
||||
remove_watch_no_event(watch, dev);
|
||||
up(&dev->sem);
|
||||
up(&inode->inotify_sem);
|
||||
mutex_unlock(&dev->mutex);
|
||||
mutex_unlock(&inode->inotify_mutex);
|
||||
put_inotify_watch(watch);
|
||||
}
|
||||
|
||||
/* destroy all of the events on this device */
|
||||
down(&dev->sem);
|
||||
mutex_lock(&dev->mutex);
|
||||
while (!list_empty(&dev->events))
|
||||
inotify_dev_event_dequeue(dev);
|
||||
up(&dev->sem);
|
||||
mutex_unlock(&dev->mutex);
|
||||
|
||||
/* free this device: the put matching the get in inotify_init() */
|
||||
put_inotify_dev(dev);
|
||||
|
@ -811,26 +811,26 @@ static int inotify_ignore(struct inotify_device *dev, s32 wd)
|
|||
struct inotify_watch *watch;
|
||||
struct inode *inode;
|
||||
|
||||
down(&dev->sem);
|
||||
mutex_lock(&dev->mutex);
|
||||
watch = idr_find(&dev->idr, wd);
|
||||
if (unlikely(!watch)) {
|
||||
up(&dev->sem);
|
||||
mutex_unlock(&dev->mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
get_inotify_watch(watch);
|
||||
inode = watch->inode;
|
||||
up(&dev->sem);
|
||||
mutex_unlock(&dev->mutex);
|
||||
|
||||
down(&inode->inotify_sem);
|
||||
down(&dev->sem);
|
||||
mutex_lock(&inode->inotify_mutex);
|
||||
mutex_lock(&dev->mutex);
|
||||
|
||||
/* make sure that we did not race */
|
||||
watch = idr_find(&dev->idr, wd);
|
||||
if (likely(watch))
|
||||
remove_watch(watch, dev);
|
||||
|
||||
up(&dev->sem);
|
||||
up(&inode->inotify_sem);
|
||||
mutex_unlock(&dev->mutex);
|
||||
mutex_unlock(&inode->inotify_mutex);
|
||||
put_inotify_watch(watch);
|
||||
|
||||
return 0;
|
||||
|
@ -905,7 +905,7 @@ asmlinkage long sys_inotify_init(void)
|
|||
INIT_LIST_HEAD(&dev->events);
|
||||
INIT_LIST_HEAD(&dev->watches);
|
||||
init_waitqueue_head(&dev->wq);
|
||||
sema_init(&dev->sem, 1);
|
||||
mutex_init(&dev->mutex);
|
||||
dev->event_count = 0;
|
||||
dev->queue_size = 0;
|
||||
dev->max_events = inotify_max_queued_events;
|
||||
|
@ -960,8 +960,8 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask)
|
|||
inode = nd.dentry->d_inode;
|
||||
dev = filp->private_data;
|
||||
|
||||
down(&inode->inotify_sem);
|
||||
down(&dev->sem);
|
||||
mutex_lock(&inode->inotify_mutex);
|
||||
mutex_lock(&dev->mutex);
|
||||
|
||||
if (mask & IN_MASK_ADD)
|
||||
mask_add = 1;
|
||||
|
@ -998,8 +998,8 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask)
|
|||
list_add(&watch->i_list, &inode->inotify_watches);
|
||||
ret = watch->wd;
|
||||
out:
|
||||
up(&dev->sem);
|
||||
up(&inode->inotify_sem);
|
||||
mutex_unlock(&dev->mutex);
|
||||
mutex_unlock(&inode->inotify_mutex);
|
||||
path_release(&nd);
|
||||
fput_and_out:
|
||||
fput_light(filp, fput_needed);
|
||||
|
|
|
@ -509,7 +509,7 @@ struct inode {
|
|||
|
||||
#ifdef CONFIG_INOTIFY
|
||||
struct list_head inotify_watches; /* watches on this inode */
|
||||
struct semaphore inotify_sem; /* protects the watches list */
|
||||
struct mutex inotify_mutex; /* protects the watches list */
|
||||
#endif
|
||||
|
||||
unsigned long i_state;
|
||||
|
|
Loading…
Reference in a new issue