Commit d4f9af9d authored by Ingo Molnar's avatar Ingo Molnar Committed by Linus Torvalds

[PATCH] sem2mutex: inotify

Semaphore to mutex conversion.

The conversion was generated via scripts, and the result was validated
automatically via a script as well.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Cc: John McCutchan <ttb@tentacle.dhs.org>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Acked-by: default avatarRobert Love <rml@novell.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent d3be915f
...@@ -206,7 +206,7 @@ void inode_init_once(struct inode *inode) ...@@ -206,7 +206,7 @@ void inode_init_once(struct inode *inode)
i_size_ordered_init(inode); i_size_ordered_init(inode);
#ifdef CONFIG_INOTIFY #ifdef CONFIG_INOTIFY
INIT_LIST_HEAD(&inode->inotify_watches); INIT_LIST_HEAD(&inode->inotify_watches);
sema_init(&inode->inotify_sem, 1); mutex_init(&inode->inotify_mutex);
#endif #endif
} }
......
...@@ -56,8 +56,8 @@ int inotify_max_queued_events; ...@@ -56,8 +56,8 @@ int inotify_max_queued_events;
* dentry->d_lock (used to keep d_move() away from dentry->d_parent) * dentry->d_lock (used to keep d_move() away from dentry->d_parent)
* iprune_sem (synchronize shrink_icache_memory()) * iprune_sem (synchronize shrink_icache_memory())
* inode_lock (protects the super_block->s_inodes list) * inode_lock (protects the super_block->s_inodes list)
* inode->inotify_sem (protects inode->inotify_watches and watches->i_list) * inode->inotify_mutex (protects inode->inotify_watches and watches->i_list)
* inotify_dev->sem (protects inotify_device and watches->d_list) * inotify_dev->mutex (protects inotify_device and watches->d_list)
*/ */
/* /*
...@@ -79,12 +79,12 @@ int inotify_max_queued_events; ...@@ -79,12 +79,12 @@ int inotify_max_queued_events;
/* /*
* struct inotify_device - represents an inotify instance * struct inotify_device - represents an inotify instance
* *
* This structure is protected by the semaphore 'sem'. * This structure is protected by the mutex 'mutex'.
*/ */
struct inotify_device { struct inotify_device {
wait_queue_head_t wq; /* wait queue for i/o */ wait_queue_head_t wq; /* wait queue for i/o */
struct idr idr; /* idr mapping wd -> watch */ struct idr idr; /* idr mapping wd -> watch */
struct semaphore sem; /* protects this bad boy */ struct mutex mutex; /* protects this bad boy */
struct list_head events; /* list of queued events */ struct list_head events; /* list of queued events */
struct list_head watches; /* list of watches */ struct list_head watches; /* list of watches */
atomic_t count; /* reference count */ atomic_t count; /* reference count */
...@@ -101,7 +101,7 @@ struct inotify_device { ...@@ -101,7 +101,7 @@ struct inotify_device {
* device. In read(), this list is walked and all events that can fit in the * device. In read(), this list is walked and all events that can fit in the
* buffer are returned. * buffer are returned.
* *
* Protected by dev->sem of the device in which we are queued. * Protected by dev->mutex of the device in which we are queued.
*/ */
struct inotify_kernel_event { struct inotify_kernel_event {
struct inotify_event event; /* the user-space event */ struct inotify_event event; /* the user-space event */
...@@ -112,8 +112,8 @@ struct inotify_kernel_event { ...@@ -112,8 +112,8 @@ struct inotify_kernel_event {
/* /*
* struct inotify_watch - represents a watch request on a specific inode * struct inotify_watch - represents a watch request on a specific inode
* *
* d_list is protected by dev->sem of the associated watch->dev. * d_list is protected by dev->mutex of the associated watch->dev.
* i_list and mask are protected by inode->inotify_sem of the associated inode. * i_list and mask are protected by inode->inotify_mutex of the associated inode.
* dev, inode, and wd are never written to once the watch is created. * dev, inode, and wd are never written to once the watch is created.
*/ */
struct inotify_watch { struct inotify_watch {
...@@ -261,7 +261,7 @@ static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie, ...@@ -261,7 +261,7 @@ static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie,
/* /*
* inotify_dev_get_event - return the next event in the given dev's queue * inotify_dev_get_event - return the next event in the given dev's queue
* *
* Caller must hold dev->sem. * Caller must hold dev->mutex.
*/ */
static inline struct inotify_kernel_event * static inline struct inotify_kernel_event *
inotify_dev_get_event(struct inotify_device *dev) inotify_dev_get_event(struct inotify_device *dev)
...@@ -272,7 +272,7 @@ inotify_dev_get_event(struct inotify_device *dev) ...@@ -272,7 +272,7 @@ inotify_dev_get_event(struct inotify_device *dev)
/* /*
* inotify_dev_queue_event - add a new event to the given device * inotify_dev_queue_event - add a new event to the given device
* *
* Caller must hold dev->sem. Can sleep (calls kernel_event()). * Caller must hold dev->mutex. Can sleep (calls kernel_event()).
*/ */
static void inotify_dev_queue_event(struct inotify_device *dev, static void inotify_dev_queue_event(struct inotify_device *dev,
struct inotify_watch *watch, u32 mask, struct inotify_watch *watch, u32 mask,
...@@ -315,7 +315,7 @@ static void inotify_dev_queue_event(struct inotify_device *dev, ...@@ -315,7 +315,7 @@ static void inotify_dev_queue_event(struct inotify_device *dev,
/* /*
* remove_kevent - cleans up and ultimately frees the given kevent * remove_kevent - cleans up and ultimately frees the given kevent
* *
* Caller must hold dev->sem. * Caller must hold dev->mutex.
*/ */
static void remove_kevent(struct inotify_device *dev, static void remove_kevent(struct inotify_device *dev,
struct inotify_kernel_event *kevent) struct inotify_kernel_event *kevent)
...@@ -332,7 +332,7 @@ static void remove_kevent(struct inotify_device *dev, ...@@ -332,7 +332,7 @@ static void remove_kevent(struct inotify_device *dev,
/* /*
* inotify_dev_event_dequeue - destroy an event on the given device * inotify_dev_event_dequeue - destroy an event on the given device
* *
* Caller must hold dev->sem. * Caller must hold dev->mutex.
*/ */
static void inotify_dev_event_dequeue(struct inotify_device *dev) static void inotify_dev_event_dequeue(struct inotify_device *dev)
{ {
...@@ -346,7 +346,7 @@ static void inotify_dev_event_dequeue(struct inotify_device *dev) ...@@ -346,7 +346,7 @@ static void inotify_dev_event_dequeue(struct inotify_device *dev)
/* /*
* inotify_dev_get_wd - returns the next WD for use by the given dev * inotify_dev_get_wd - returns the next WD for use by the given dev
* *
* Callers must hold dev->sem. This function can sleep. * Callers must hold dev->mutex. This function can sleep.
*/ */
static int inotify_dev_get_wd(struct inotify_device *dev, static int inotify_dev_get_wd(struct inotify_device *dev,
struct inotify_watch *watch) struct inotify_watch *watch)
...@@ -383,7 +383,7 @@ static int find_inode(const char __user *dirname, struct nameidata *nd, ...@@ -383,7 +383,7 @@ static int find_inode(const char __user *dirname, struct nameidata *nd,
/* /*
* create_watch - creates a watch on the given device. * create_watch - creates a watch on the given device.
* *
* Callers must hold dev->sem. Calls inotify_dev_get_wd() so may sleep. * Callers must hold dev->mutex. Calls inotify_dev_get_wd() so may sleep.
* Both 'dev' and 'inode' (by way of nameidata) need to be pinned. * Both 'dev' and 'inode' (by way of nameidata) need to be pinned.
*/ */
static struct inotify_watch *create_watch(struct inotify_device *dev, static struct inotify_watch *create_watch(struct inotify_device *dev,
...@@ -434,7 +434,7 @@ static struct inotify_watch *create_watch(struct inotify_device *dev, ...@@ -434,7 +434,7 @@ static struct inotify_watch *create_watch(struct inotify_device *dev,
/* /*
* inotify_find_dev - find the watch associated with the given inode and dev * inotify_find_dev - find the watch associated with the given inode and dev
* *
* Callers must hold inode->inotify_sem. * Callers must hold inode->inotify_mutex.
*/ */
static struct inotify_watch *inode_find_dev(struct inode *inode, static struct inotify_watch *inode_find_dev(struct inode *inode,
struct inotify_device *dev) struct inotify_device *dev)
...@@ -469,7 +469,7 @@ static void remove_watch_no_event(struct inotify_watch *watch, ...@@ -469,7 +469,7 @@ static void remove_watch_no_event(struct inotify_watch *watch,
* the IN_IGNORED event to the given device signifying that the inode is no * the IN_IGNORED event to the given device signifying that the inode is no
* longer watched. * longer watched.
* *
* Callers must hold both inode->inotify_sem and dev->sem. We drop a * Callers must hold both inode->inotify_mutex and dev->mutex. We drop a
* reference to the inode before returning. * reference to the inode before returning.
* *
* The inode is not iput() so as to remain atomic. If the inode needs to be * The inode is not iput() so as to remain atomic. If the inode needs to be
...@@ -507,21 +507,21 @@ void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie, ...@@ -507,21 +507,21 @@ void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie,
if (!inotify_inode_watched(inode)) if (!inotify_inode_watched(inode))
return; return;
down(&inode->inotify_sem); mutex_lock(&inode->inotify_mutex);
list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
u32 watch_mask = watch->mask; u32 watch_mask = watch->mask;
if (watch_mask & mask) { if (watch_mask & mask) {
struct inotify_device *dev = watch->dev; struct inotify_device *dev = watch->dev;
get_inotify_watch(watch); get_inotify_watch(watch);
down(&dev->sem); mutex_lock(&dev->mutex);
inotify_dev_queue_event(dev, watch, mask, cookie, name); inotify_dev_queue_event(dev, watch, mask, cookie, name);
if (watch_mask & IN_ONESHOT) if (watch_mask & IN_ONESHOT)
remove_watch_no_event(watch, dev); remove_watch_no_event(watch, dev);
up(&dev->sem); mutex_unlock(&dev->mutex);
put_inotify_watch(watch); put_inotify_watch(watch);
} }
} }
up(&inode->inotify_sem); mutex_unlock(&inode->inotify_mutex);
} }
EXPORT_SYMBOL_GPL(inotify_inode_queue_event); EXPORT_SYMBOL_GPL(inotify_inode_queue_event);
...@@ -626,16 +626,16 @@ void inotify_unmount_inodes(struct list_head *list) ...@@ -626,16 +626,16 @@ void inotify_unmount_inodes(struct list_head *list)
iput(need_iput_tmp); iput(need_iput_tmp);
/* for each watch, send IN_UNMOUNT and then remove it */ /* for each watch, send IN_UNMOUNT and then remove it */
down(&inode->inotify_sem); mutex_lock(&inode->inotify_mutex);
watches = &inode->inotify_watches; watches = &inode->inotify_watches;
list_for_each_entry_safe(watch, next_w, watches, i_list) { list_for_each_entry_safe(watch, next_w, watches, i_list) {
struct inotify_device *dev = watch->dev; struct inotify_device *dev = watch->dev;
down(&dev->sem); mutex_lock(&dev->mutex);
inotify_dev_queue_event(dev, watch, IN_UNMOUNT,0,NULL); inotify_dev_queue_event(dev, watch, IN_UNMOUNT,0,NULL);
remove_watch(watch, dev); remove_watch(watch, dev);
up(&dev->sem); mutex_unlock(&dev->mutex);
} }
up(&inode->inotify_sem); mutex_unlock(&inode->inotify_mutex);
iput(inode); iput(inode);
spin_lock(&inode_lock); spin_lock(&inode_lock);
...@@ -651,14 +651,14 @@ void inotify_inode_is_dead(struct inode *inode) ...@@ -651,14 +651,14 @@ void inotify_inode_is_dead(struct inode *inode)
{ {
struct inotify_watch *watch, *next; struct inotify_watch *watch, *next;
down(&inode->inotify_sem); mutex_lock(&inode->inotify_mutex);
list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
struct inotify_device *dev = watch->dev; struct inotify_device *dev = watch->dev;
down(&dev->sem); mutex_lock(&dev->mutex);
remove_watch(watch, dev); remove_watch(watch, dev);
up(&dev->sem); mutex_unlock(&dev->mutex);
} }
up(&inode->inotify_sem); mutex_unlock(&inode->inotify_mutex);
} }
EXPORT_SYMBOL_GPL(inotify_inode_is_dead); EXPORT_SYMBOL_GPL(inotify_inode_is_dead);
...@@ -670,10 +670,10 @@ static unsigned int inotify_poll(struct file *file, poll_table *wait) ...@@ -670,10 +670,10 @@ static unsigned int inotify_poll(struct file *file, poll_table *wait)
int ret = 0; int ret = 0;
poll_wait(file, &dev->wq, wait); poll_wait(file, &dev->wq, wait);
down(&dev->sem); mutex_lock(&dev->mutex);
if (!list_empty(&dev->events)) if (!list_empty(&dev->events))
ret = POLLIN | POLLRDNORM; ret = POLLIN | POLLRDNORM;
up(&dev->sem); mutex_unlock(&dev->mutex);
return ret; return ret;
} }
...@@ -695,9 +695,9 @@ static ssize_t inotify_read(struct file *file, char __user *buf, ...@@ -695,9 +695,9 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE);
down(&dev->sem); mutex_lock(&dev->mutex);
events = !list_empty(&dev->events); events = !list_empty(&dev->events);
up(&dev->sem); mutex_unlock(&dev->mutex);
if (events) { if (events) {
ret = 0; ret = 0;
break; break;
...@@ -720,7 +720,7 @@ static ssize_t inotify_read(struct file *file, char __user *buf, ...@@ -720,7 +720,7 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
if (ret) if (ret)
return ret; return ret;
down(&dev->sem); mutex_lock(&dev->mutex);
while (1) { while (1) {
struct inotify_kernel_event *kevent; struct inotify_kernel_event *kevent;
...@@ -750,7 +750,7 @@ static ssize_t inotify_read(struct file *file, char __user *buf, ...@@ -750,7 +750,7 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
remove_kevent(dev, kevent); remove_kevent(dev, kevent);
} }
up(&dev->sem); mutex_unlock(&dev->mutex);
return ret; return ret;
} }
...@@ -763,37 +763,37 @@ static int inotify_release(struct inode *ignored, struct file *file) ...@@ -763,37 +763,37 @@ static int inotify_release(struct inode *ignored, struct file *file)
* Destroy all of the watches on this device. Unfortunately, not very * Destroy all of the watches on this device. Unfortunately, not very
* pretty. We cannot do a simple iteration over the list, because we * pretty. We cannot do a simple iteration over the list, because we
* do not know the inode until we iterate to the watch. But we need to * do not know the inode until we iterate to the watch. But we need to
* hold inode->inotify_sem before dev->sem. The following works. * hold inode->inotify_mutex before dev->mutex. The following works.
*/ */
while (1) { while (1) {
struct inotify_watch *watch; struct inotify_watch *watch;
struct list_head *watches; struct list_head *watches;
struct inode *inode; struct inode *inode;
down(&dev->sem); mutex_lock(&dev->mutex);
watches = &dev->watches; watches = &dev->watches;
if (list_empty(watches)) { if (list_empty(watches)) {
up(&dev->sem); mutex_unlock(&dev->mutex);
break; break;
} }
watch = list_entry(watches->next, struct inotify_watch, d_list); watch = list_entry(watches->next, struct inotify_watch, d_list);
get_inotify_watch(watch); get_inotify_watch(watch);
up(&dev->sem); mutex_unlock(&dev->mutex);
inode = watch->inode; inode = watch->inode;
down(&inode->inotify_sem); mutex_lock(&inode->inotify_mutex);
down(&dev->sem); mutex_lock(&dev->mutex);
remove_watch_no_event(watch, dev); remove_watch_no_event(watch, dev);
up(&dev->sem); mutex_unlock(&dev->mutex);
up(&inode->inotify_sem); mutex_unlock(&inode->inotify_mutex);
put_inotify_watch(watch); put_inotify_watch(watch);
} }
/* destroy all of the events on this device */ /* destroy all of the events on this device */
down(&dev->sem); mutex_lock(&dev->mutex);
while (!list_empty(&dev->events)) while (!list_empty(&dev->events))
inotify_dev_event_dequeue(dev); inotify_dev_event_dequeue(dev);
up(&dev->sem); mutex_unlock(&dev->mutex);
/* free this device: the put matching the get in inotify_init() */ /* free this device: the put matching the get in inotify_init() */
put_inotify_dev(dev); put_inotify_dev(dev);
...@@ -811,26 +811,26 @@ static int inotify_ignore(struct inotify_device *dev, s32 wd) ...@@ -811,26 +811,26 @@ static int inotify_ignore(struct inotify_device *dev, s32 wd)
struct inotify_watch *watch; struct inotify_watch *watch;
struct inode *inode; struct inode *inode;
down(&dev->sem); mutex_lock(&dev->mutex);
watch = idr_find(&dev->idr, wd); watch = idr_find(&dev->idr, wd);
if (unlikely(!watch)) { if (unlikely(!watch)) {
up(&dev->sem); mutex_unlock(&dev->mutex);
return -EINVAL; return -EINVAL;
} }
get_inotify_watch(watch); get_inotify_watch(watch);
inode = watch->inode; inode = watch->inode;
up(&dev->sem); mutex_unlock(&dev->mutex);
down(&inode->inotify_sem); mutex_lock(&inode->inotify_mutex);
down(&dev->sem); mutex_lock(&dev->mutex);
/* make sure that we did not race */ /* make sure that we did not race */
watch = idr_find(&dev->idr, wd); watch = idr_find(&dev->idr, wd);
if (likely(watch)) if (likely(watch))
remove_watch(watch, dev); remove_watch(watch, dev);
up(&dev->sem); mutex_unlock(&dev->mutex);
up(&inode->inotify_sem); mutex_unlock(&inode->inotify_mutex);
put_inotify_watch(watch); put_inotify_watch(watch);
return 0; return 0;
...@@ -905,7 +905,7 @@ asmlinkage long sys_inotify_init(void) ...@@ -905,7 +905,7 @@ asmlinkage long sys_inotify_init(void)
INIT_LIST_HEAD(&dev->events); INIT_LIST_HEAD(&dev->events);
INIT_LIST_HEAD(&dev->watches); INIT_LIST_HEAD(&dev->watches);
init_waitqueue_head(&dev->wq); init_waitqueue_head(&dev->wq);
sema_init(&dev->sem, 1); mutex_init(&dev->mutex);
dev->event_count = 0; dev->event_count = 0;
dev->queue_size = 0; dev->queue_size = 0;
dev->max_events = inotify_max_queued_events; dev->max_events = inotify_max_queued_events;
...@@ -960,8 +960,8 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask) ...@@ -960,8 +960,8 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask)
inode = nd.dentry->d_inode; inode = nd.dentry->d_inode;
dev = filp->private_data; dev = filp->private_data;
down(&inode->inotify_sem); mutex_lock(&inode->inotify_mutex);
down(&dev->sem); mutex_lock(&dev->mutex);
if (mask & IN_MASK_ADD) if (mask & IN_MASK_ADD)
mask_add = 1; mask_add = 1;
...@@ -998,8 +998,8 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask) ...@@ -998,8 +998,8 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask)
list_add(&watch->i_list, &inode->inotify_watches); list_add(&watch->i_list, &inode->inotify_watches);
ret = watch->wd; ret = watch->wd;
out: out:
up(&dev->sem); mutex_unlock(&dev->mutex);
up(&inode->inotify_sem); mutex_unlock(&inode->inotify_mutex);
path_release(&nd); path_release(&nd);
fput_and_out: fput_and_out:
fput_light(filp, fput_needed); fput_light(filp, fput_needed);
......
...@@ -509,7 +509,7 @@ struct inode { ...@@ -509,7 +509,7 @@ struct inode {
#ifdef CONFIG_INOTIFY #ifdef CONFIG_INOTIFY
struct list_head inotify_watches; /* watches on this inode */ struct list_head inotify_watches; /* watches on this inode */
struct semaphore inotify_sem; /* protects the watches list */ struct mutex inotify_mutex; /* protects the watches list */
#endif #endif
unsigned long i_state; unsigned long i_state;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment