Commit 55b27bf6 authored by Stephen Rothwell's avatar Stephen Rothwell

Merge commit 'fsnotify/for-next'

parents e5c86131 cf71f3cf
......@@ -426,6 +426,13 @@ When: 2.6.33
Why: Should be implemented in userspace, policy daemon.
Who: Johannes Berg <johannes@sipsolutions.net>
---------------------------
What: CONFIG_INOTIFY
When: 2.6.33
Why: No known users, fsnotify more generic and more easily maintained.
Who: Eric Paris <eparis@redhat.com>
----------------------------
What: CONFIG_X86_OLD_MCE
......
......@@ -1177,11 +1177,10 @@ out:
if (iov != iovstack)
kfree(iov);
if ((ret + (type == READ)) > 0) {
struct dentry *dentry = file->f_path.dentry;
if (type == READ)
fsnotify_access(dentry);
fsnotify_access(file);
else
fsnotify_modify(dentry);
fsnotify_modify(file);
}
return ret;
}
......
......@@ -128,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __user *, library)
if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
goto exit;
fsnotify_open(file->f_path.dentry);
fsnotify_open(file);
error = -ENOEXEC;
if(file->f_op) {
......@@ -663,7 +663,7 @@ struct file *open_exec(const char *name)
if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
goto exit;
fsnotify_open(file->f_path.dentry);
fsnotify_open(file);
err = deny_write_access(file);
if (err)
......
......@@ -951,7 +951,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
nfsdstats.io_read += host_err;
*count = host_err;
err = 0;
fsnotify_access(file->f_path.dentry);
fsnotify_access(file);
} else
err = nfserrno(host_err);
out:
......@@ -1062,7 +1062,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
goto out_nfserr;
*cnt = host_err;
nfsdstats.io_write += host_err;
fsnotify_modify(file->f_path.dentry);
fsnotify_modify(file);
/* clear setuid/setgid flag after write */
if (inode->i_mode & (S_ISUID | S_ISGID))
......
......@@ -132,7 +132,8 @@ static int dnotify_handle_event(struct fsnotify_group *group,
* userspace notification for that pair.
*/
static bool dnotify_should_send_event(struct fsnotify_group *group,
struct inode *inode, __u32 mask)
struct inode *inode, __u32 mask,
void *data, int data_type)
{
struct fsnotify_mark_entry *entry;
bool send;
......@@ -361,7 +362,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
dnentry = container_of(entry, struct dnotify_mark_entry, fsn_entry);
spin_lock(&entry->lock);
} else {
fsnotify_add_mark(new_entry, dnotify_group, inode);
fsnotify_add_mark(new_entry, dnotify_group, inode, 0);
spin_lock(&new_entry->lock);
entry = new_entry;
dnentry = new_dnentry;
......@@ -431,8 +432,7 @@ static int __init dnotify_init(void)
dnotify_struct_cache = KMEM_CACHE(dnotify_struct, SLAB_PANIC);
dnotify_mark_entry_cache = KMEM_CACHE(dnotify_mark_entry, SLAB_PANIC);
dnotify_group = fsnotify_obtain_group(DNOTIFY_GROUP_NUM,
0, &dnotify_fsnotify_ops);
dnotify_group = fsnotify_obtain_group(0, &dnotify_fsnotify_ops);
if (IS_ERR(dnotify_group))
panic("unable to allocate fsnotify group for dnotify\n");
return 0;
......
......@@ -77,13 +77,16 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)
}
/* Notify this dentry's parent about a child's events. */
void __fsnotify_parent(struct dentry *dentry, __u32 mask)
void __fsnotify_parent(struct file *file, struct dentry *dentry, __u32 mask)
{
struct dentry *parent;
struct inode *p_inode;
bool send = false;
bool should_update_children = false;
if (file)
dentry = file->f_path.dentry;
if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED))
return;
......@@ -114,8 +117,12 @@ void __fsnotify_parent(struct dentry *dentry, __u32 mask)
* specifies these are events which came from a child. */
mask |= FS_EVENT_ON_CHILD;
fsnotify(p_inode, mask, dentry->d_inode, FSNOTIFY_EVENT_INODE,
dentry->d_name.name, 0);
if (file)
fsnotify(p_inode, mask, file, FSNOTIFY_EVENT_FILE,
dentry->d_name.name, 0);
else
fsnotify(p_inode, mask, dentry->d_inode, FSNOTIFY_EVENT_INODE,
dentry->d_name.name, 0);
dput(parent);
}
......@@ -156,7 +163,8 @@ void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, const
idx = srcu_read_lock(&fsnotify_grp_srcu);
list_for_each_entry_rcu(group, &fsnotify_groups, group_list) {
if (test_mask & group->mask) {
if (!group->ops->should_send_event(group, to_tell, mask))
if (!group->ops->should_send_event(group, to_tell, mask,
data, data_is))
continue;
if (!event) {
event = fsnotify_create_event(to_tell, mask, data,
......
......@@ -77,15 +77,6 @@ void fsnotify_recalc_group_mask(struct fsnotify_group *group)
fsnotify_recalc_global_mask();
}
/*
* Take a reference to a group so things found under the fsnotify_grp_mutex
* can't get freed under us
*/
static void fsnotify_get_group(struct fsnotify_group *group)
{
atomic_inc(&group->refcnt);
}
/*
* Final freeing of a group
*/
......@@ -170,51 +161,24 @@ void fsnotify_put_group(struct fsnotify_group *group)
fsnotify_destroy_group(group);
}
/*
* Simply run the fsnotify_groups list and find a group which matches
* the given parameters. If a group is found we take a reference to that
* group.
*/
static struct fsnotify_group *fsnotify_find_group(unsigned int group_num, __u32 mask,
const struct fsnotify_ops *ops)
{
struct fsnotify_group *group_iter;
struct fsnotify_group *group = NULL;
BUG_ON(!mutex_is_locked(&fsnotify_grp_mutex));
list_for_each_entry_rcu(group_iter, &fsnotify_groups, group_list) {
if (group_iter->group_num == group_num) {
if ((group_iter->mask == mask) &&
(group_iter->ops == ops)) {
fsnotify_get_group(group_iter);
group = group_iter;
} else
group = ERR_PTR(-EEXIST);
}
}
return group;
}
/*
* Either finds an existing group which matches the group_num, mask, and ops or
* creates a new group and adds it to the global group list. In either case we
* take a reference for the group returned.
*/
struct fsnotify_group *fsnotify_obtain_group(unsigned int group_num, __u32 mask,
struct fsnotify_group *fsnotify_obtain_group(__u32 mask,
const struct fsnotify_ops *ops)
{
struct fsnotify_group *group, *tgroup;
struct fsnotify_group *group;
/* very low use, simpler locking if we just always alloc */
group = kmalloc(sizeof(struct fsnotify_group), GFP_KERNEL);
group = kzalloc(sizeof(struct fsnotify_group), GFP_KERNEL);
if (!group)
return ERR_PTR(-ENOMEM);
atomic_set(&group->refcnt, 1);
group->on_group_list = 0;
group->group_num = group_num;
group->mask = mask;
mutex_init(&group->notification_mutex);
......@@ -230,14 +194,6 @@ struct fsnotify_group *fsnotify_obtain_group(unsigned int group_num, __u32 mask,
group->ops = ops;
mutex_lock(&fsnotify_grp_mutex);
tgroup = fsnotify_find_group(group_num, mask, ops);
if (tgroup) {
/* group already exists */
mutex_unlock(&fsnotify_grp_mutex);
/* destroy the new one we made */
fsnotify_put_group(group);
return tgroup;
}
/* group not found, add a new one */
list_add_rcu(&group->group_list, &fsnotify_groups);
......
......@@ -283,12 +283,20 @@ struct fsnotify_mark_entry *fsnotify_find_mark_entry(struct fsnotify_group *grou
return NULL;
}
void fsnotify_duplicate_mark(struct fsnotify_mark_entry *new, struct fsnotify_mark_entry *old)
{
assert_spin_locked(&old->lock);
new->inode = old->inode;
new->group = old->group;
new->mask = old->mask;
new->free_mark = old->free_mark;
}
/*
* Nothing fancy, just initialize lists and locks and counters.
*/
void fsnotify_init_mark(struct fsnotify_mark_entry *entry,
void (*free_mark)(struct fsnotify_mark_entry *entry))
{
spin_lock_init(&entry->lock);
atomic_set(&entry->refcnt, 1);
......@@ -305,9 +313,10 @@ void fsnotify_init_mark(struct fsnotify_mark_entry *entry,
* event types should be delivered to which group and for which inodes.
*/
int fsnotify_add_mark(struct fsnotify_mark_entry *entry,
struct fsnotify_group *group, struct inode *inode)
struct fsnotify_group *group, struct inode *inode,
int allow_dups)
{
struct fsnotify_mark_entry *lentry;
struct fsnotify_mark_entry *lentry = NULL;
int ret = 0;
inode = igrab(inode);
......@@ -324,11 +333,12 @@ int fsnotify_add_mark(struct fsnotify_mark_entry *entry,
spin_lock(&group->mark_lock);
spin_lock(&inode->i_lock);
entry->group = group;
entry->inode = inode;
lentry = fsnotify_find_mark_entry(group, inode);
if (!allow_dups)
lentry = fsnotify_find_mark_entry(group, inode);
if (!lentry) {
entry->group = group;
entry->inode = inode;
hlist_add_head(&entry->i_list, &inode->i_fsnotify_mark_entries);
list_add(&entry->g_list, &group->mark_entries);
......
......@@ -31,6 +31,60 @@
#include "inotify.h"
/*
* Check if 2 events contain the same information. We do not compare private data
* but at this moment that isn't a problem for any know fsnotify listeners.
*/
static bool event_compare(struct fsnotify_event *old, struct fsnotify_event *new)
{
if ((old->mask == new->mask) &&
(old->to_tell == new->to_tell) &&
(old->data_type == new->data_type) &&
(old->name_len == new->name_len)) {
switch (old->data_type) {
case (FSNOTIFY_EVENT_INODE):
/* remember, after old was put on the wait_q we aren't
* allowed to look at the inode any more, only thing
* left to check was if the file_name is the same */
if (old->name_len &&
!strcmp(old->file_name, new->file_name))
return true;
break;
case (FSNOTIFY_EVENT_PATH):
if ((old->path.mnt == new->path.mnt) &&
(old->path.dentry == new->path.dentry))
return true;
break;
case (FSNOTIFY_EVENT_NONE):
if (old->mask & FS_Q_OVERFLOW)
return true;
else if (old->mask & FS_IN_IGNORED)
return false;
return true;
};
}
return false;
}
static int inotify_merge(struct list_head *list, struct fsnotify_event *event)
{
struct fsnotify_event_holder *last_holder;
struct fsnotify_event *last_event;
int ret = 0;
/* and the list better be locked by something too */
spin_lock(&event->lock);
last_holder = list_entry(list->prev, struct fsnotify_event_holder, event_list);
last_event = last_holder->event;
if (event_compare(last_event, event))
ret = -EEXIST;
spin_unlock(&event->lock);
return ret;
}
static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_event *event)
{
struct fsnotify_mark_entry *entry;
......@@ -61,7 +115,7 @@ static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_ev
fsn_event_priv->group = group;
event_priv->wd = wd;
ret = fsnotify_add_notify_event(group, event, fsn_event_priv);
ret = fsnotify_add_notify_event(group, event, fsn_event_priv, inotify_merge);
if (ret) {
inotify_free_event_priv(fsn_event_priv);
/* EEXIST says we tail matched, EOVERFLOW isn't something
......@@ -85,7 +139,8 @@ static void inotify_freeing_mark(struct fsnotify_mark_entry *entry, struct fsnot
inotify_ignored_and_remove_idr(entry, group);
}
static bool inotify_should_send_event(struct fsnotify_group *group, struct inode *inode, __u32 mask)
static bool inotify_should_send_event(struct fsnotify_group *group, struct inode *inode,
__u32 mask, void *data, int data_type)
{
struct fsnotify_mark_entry *entry;
bool send;
......
......@@ -55,12 +55,6 @@ int inotify_max_user_watches __read_mostly;
static struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
struct kmem_cache *event_priv_cachep __read_mostly;
/*
* When inotify registers a new group it increments this and uses that
* value as an offset to set the fsnotify group "name" and priority.
*/
static atomic_t inotify_grp_num;
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
......@@ -367,6 +361,77 @@ static int inotify_find_inode(const char __user *dirname, struct path *path, uns
return error;
}
static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock,
int last_wd,
struct inotify_inode_mark_entry *ientry)
{
int ret;
do {
if (unlikely(!idr_pre_get(idr, GFP_KERNEL)))
return -ENOMEM;
spin_lock(idr_lock);
ret = idr_get_new_above(idr, ientry, last_wd + 1,
&ientry->wd);
/* we added the mark to the idr, take a reference */
if (!ret)
fsnotify_get_mark(&ientry->fsn_entry);
spin_unlock(idr_lock);
} while (ret == -EAGAIN);
return ret;
}
static struct inotify_inode_mark_entry *inotify_idr_find_locked(struct fsnotify_group *group,
int wd)
{
struct idr *idr = &group->inotify_data.idr;
spinlock_t *idr_lock = &group->inotify_data.idr_lock;
struct inotify_inode_mark_entry *ientry;
assert_spin_locked(idr_lock);
ientry = idr_find(idr, wd);
if (ientry) {
struct fsnotify_mark_entry *fsn_entry = &ientry->fsn_entry;
fsnotify_get_mark(fsn_entry);
/* One ref for being in the idr, one ref we just took */
BUG_ON(atomic_read(&fsn_entry->refcnt) < 2);
}
return ientry;
}
static struct inotify_inode_mark_entry *inotify_idr_find(struct fsnotify_group *group,
int wd)
{
struct inotify_inode_mark_entry *ientry;
spinlock_t *idr_lock = &group->inotify_data.idr_lock;
spin_lock(idr_lock);
ientry = inotify_idr_find_locked(group, wd);
spin_unlock(idr_lock);
return ientry;
}
static void do_inotify_remove_from_idr(struct fsnotify_group *group,
struct inotify_inode_mark_entry *ientry)
{
struct idr *idr = &group->inotify_data.idr;
spinlock_t *idr_lock = &group->inotify_data.idr_lock;
int wd = ientry->wd;
assert_spin_locked(idr_lock);
idr_remove(idr, wd);
/* removed from the idr, drop that ref */
fsnotify_put_mark(&ientry->fsn_entry);
}
/*
* Remove the mark from the idr (if present) and drop the reference
* on the mark because it was in the idr.
......@@ -374,42 +439,72 @@ static int inotify_find_inode(const char __user *dirname, struct path *path, uns
static void inotify_remove_from_idr(struct fsnotify_group *group,
struct inotify_inode_mark_entry *ientry)
{
struct idr *idr;
struct fsnotify_mark_entry *entry;
struct inotify_inode_mark_entry *found_ientry;
spinlock_t *idr_lock = &group->inotify_data.idr_lock;
struct inotify_inode_mark_entry *found_ientry = NULL;
int wd;
spin_lock(&group->inotify_data.idr_lock);
idr = &group->inotify_data.idr;
spin_lock(idr_lock);
wd = ientry->wd;
if (wd == -1)
/*
* does this ientry think it is in the idr? we shouldn't get called
* if it wasn't....
*/
if (wd == -1) {
printk(KERN_WARNING "%s: ientry=%p ientry->wd=%d ientry->group=%p"
" ientry->inode=%p\n", __func__, ientry, ientry->wd,
ientry->fsn_entry.group, ientry->fsn_entry.inode);
WARN_ON(1);
goto out;
}
entry = idr_find(&group->inotify_data.idr, wd);
if (unlikely(!entry))
/* Lets look in the idr to see if we find it */
found_ientry = inotify_idr_find_locked(group, wd);
if (unlikely(!found_ientry)) {
printk(KERN_WARNING "%s: ientry=%p ientry->wd=%d ientry->group=%p"
" ientry->inode=%p\n", __func__, ientry, ientry->wd,
ientry->fsn_entry.group, ientry->fsn_entry.inode);
WARN_ON(1);
goto out;
}
found_ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
/*
* We found an entry in the idr at the right wd, but it's
* not the entry we were told to remove. eparis seriously
* fucked up somewhere.
*/
if (unlikely(found_ientry != ientry)) {
/* We found an entry in the idr with the right wd, but it's
* not the entry we were told to remove. eparis seriously
* fucked up somewhere. */
WARN_ON(1);
ientry->wd = -1;
printk(KERN_WARNING "%s: ientry=%p ientry->wd=%d ientry->group=%p "
"entry->inode=%p found_ientry=%p found_ientry->wd=%d "
"found_ientry->group=%p found_ientry->inode=%p\n",
__func__, ientry, ientry->wd, ientry->fsn_entry.group,
ientry->fsn_entry.inode, found_ientry, found_ientry->wd,
found_ientry->fsn_entry.group,
found_ientry->fsn_entry.inode);
goto out;
}
/* One ref for being in the idr, one ref held by the caller */
BUG_ON(atomic_read(&entry->refcnt) < 2);
idr_remove(idr, wd);
ientry->wd = -1;
/*
* One ref for being in the idr
* one ref held by the caller trying to kill us
* one ref grabbed by inotify_idr_find
*/
if (unlikely(atomic_read(&ientry->fsn_entry.refcnt) < 3)) {
printk(KERN_WARNING "%s: ientry=%p ientry->wd=%d ientry->group=%p"
" ientry->inode=%p\n", __func__, ientry, ientry->wd,
ientry->fsn_entry.group, ientry->fsn_entry.inode);
/* we can't really recover with bad ref cnting.. */
BUG();
}
/* removed from the idr, drop that ref */
fsnotify_put_mark(entry);
do_inotify_remove_from_idr(group, ientry);
out:
spin_unlock(&group->inotify_data.idr_lock);
/* match the ref taken by inotify_idr_find_locked() */
if (found_ientry)
fsnotify_put_mark(&found_ientry->fsn_entry);
ientry->wd = -1;
spin_unlock(idr_lock);
}
/*
......@@ -441,7 +536,7 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
fsn_event_priv->group = group;
event_priv->wd = ientry->wd;
ret = fsnotify_add_notify_event(group, ignored_event, fsn_event_priv);
ret = fsnotify_add_notify_event(group, ignored_event, fsn_event_priv, NULL);
if (ret)
inotify_free_event_priv(fsn_event_priv);
......@@ -459,7 +554,9 @@ skip_send_ignore:
/* ding dong the mark is dead */
static void inotify_free_mark(struct fsnotify_mark_entry *entry)
{
struct inotify_inode_mark_entry *ientry = (struct inotify_inode_mark_entry *)entry;
struct inotify_inode_mark_entry *ientry;
ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
kmem_cache_free(inotify_inode_mark_cachep, ientry);
}
......@@ -534,6 +631,8 @@ static int inotify_new_watch(struct fsnotify_group *group,
struct inotify_inode_mark_entry *tmp_ientry;
__u32 mask;
int ret;
struct idr *idr = &group->inotify_data.idr;
spinlock_t *idr_lock = &group->inotify_data.idr_lock;
/* don't allow invalid bits: we don't want flags set */
mask = inotify_arg_to_mask(arg);
......@@ -551,28 +650,14 @@ static int inotify_new_watch(struct fsnotify_group *group,
ret = -ENOSPC;
if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches)
goto out_err;
retry:
ret = -ENOMEM;
if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL)))
goto out_err;
spin_lock(&group->inotify_data.idr_lock);
ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry,
group->inotify_data.last_wd,
&tmp_ientry->wd);
spin_unlock(&group->inotify_data.idr_lock);
if (ret) {
/* idr was out of memory allocate and try again */
if (ret == -EAGAIN)
goto retry;
ret = inotify_add_to_idr(idr, idr_lock, group->inotify_data.last_wd,
tmp_ientry);
if (ret)
goto out_err;
}
/* we put the mark on the idr, take a reference */
fsnotify_get_mark(&tmp_ientry->fsn_entry);
/* we are on the idr, now get on the inode */
ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode);
ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode, 0);
if (ret) {
/* we failed to get on the inode, get off the idr */
inotify_remove_from_idr(group, tmp_ientry);
......@@ -588,16 +673,13 @@ retry:
/* return the watch descriptor for this new entry */
ret = tmp_ientry->wd;
/* match the ref from fsnotify_init_markentry() */
fsnotify_put_mark(&tmp_ientry->fsn_entry);
/* if this mark added a new event update the group mask */
if (mask & ~group->mask)
fsnotify_recalc_group_mask(group);
out_err:
if (ret < 0)
kmem_cache_free(inotify_inode_mark_cachep, tmp_ientry);
/* match the ref from fsnotify_init_markentry() */
fsnotify_put_mark(&tmp_ientry->fsn_entry);
return ret;
}
......@@ -626,11 +708,8 @@ retry:
static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsigned int max_events)
{
struct fsnotify_group *group;
unsigned int grp_num;
/* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
grp_num = (INOTIFY_GROUP_NUM - atomic_inc_return(&inotify_grp_num));
group = fsnotify_obtain_group(grp_num, 0, &inotify_fsnotify_ops);
group = fsnotify_obtain_group(0, &inotify_fsnotify_ops);
if (IS_ERR(group))
return group;
......@@ -638,7 +717,7 @@ static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsign
spin_lock_init(&group->inotify_data.idr_lock);
idr_init(&group->inotify_data.idr);
group->inotify_data.last_wd = 1;
group->inotify_data.last_wd = 0;
group->inotify_data.user = user;
group->inotify_data.fa = NULL;
......@@ -760,7 +839,7 @@ fput_and_out:
SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
{
struct fsnotify_group *group;
struct fsnotify_mark_entry *entry;
struct inotify_inode_mark_entry *ientry;
struct file *filp;
int ret = 0, fput_needed;
......@@ -769,25 +848,23 @@ SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
return -EBADF;
/* verify that this is indeed an inotify instance */
if (unlikely(filp->f_op != &inotify_fops)) {
ret = -EINVAL;
ret = -EINVAL;
if (unlikely(filp->f_op != &inotify_fops))
goto out;
}
group = filp->private_data;
spin_lock(&group->inotify_data.idr_lock);
entry = idr_find(&group->inotify_data.idr, wd);
if (unlikely(!entry)) {
spin_unlock(&group->inotify_data.idr_lock);
ret = -EINVAL;
ret = -EINVAL;
ientry = inotify_idr_find(group, wd);
if (unlikely(!ientry))
goto out;
}
fsnotify_get_mark(entry);
spin_unlock(&group->inotify_data.idr_lock);
fsnotify_destroy_mark_by_entry(entry);
fsnotify_put_mark(entry);
ret = 0;
fsnotify_destroy_mark_by_entry(&ientry->fsn_entry);
/* match ref taken by inotify_idr_find */
fsnotify_put_mark(&ientry->fsn_entry);
out:
fput_light(filp, fput_needed);
......
......@@ -56,7 +56,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
* it is needed. It's refcnt is set 1 at kernel init time and will never
* get set to 0 so it will never get 'freed'
*/
static struct fsnotify_event q_overflow_event;
static struct fsnotify_event *q_overflow_event;
static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
/**
......@@ -104,7 +104,8 @@ struct fsnotify_event_holder *fsnotify_alloc_event_holder(void)
void fsnotify_destroy_event_holder(struct fsnotify_event_holder *holder)
{
kmem_cache_free(fsnotify_event_holder_cachep, holder);
if (holder)
kmem_cache_free(fsnotify_event_holder_cachep, holder);
}
/*
......@@ -128,54 +129,18 @@ struct fsnotify_event_private_data *fsnotify_remove_priv_from_event(struct fsnot
return priv;
}
/*
* Check if 2 events contain the same information. We do not compare private data
* but at this moment that isn't a problem for any know fsnotify listeners.
*/
static bool event_compare(struct fsnotify_event *old, struct fsnotify_event *new)
{
if ((old->mask == new->mask) &&
(old->to_tell == new->to_tell) &&
(old->data_type == new->data_type) &&
(old->name_len == new->name_len)) {
switch (old->data_type) {
case (FSNOTIFY_EVENT_INODE):
/* remember, after old was put on the wait_q we aren't
* allowed to look at the inode any more, only thing
* left to check was if the file_name is the same */
if (old->name_len &&
!strcmp(old->file_name, new->file_name))
return true;
break;
case (FSNOTIFY_EVENT_PATH):
if ((old->path.mnt == new->path.mnt) &&
(old->path.dentry == new->path.dentry))
return true;
break;
case (FSNOTIFY_EVENT_NONE):
if (old->mask & FS_Q_OVERFLOW)
return true;
else if (old->mask & FS_IN_IGNORED)
return false;
return false;
};
}
return false;
}
/*
* Add an event to the group notification queue. The group can later pull this
* event off the queue to deal with. If the event is successfully added to the
* group's notification queue, a reference is taken on event.
*/
int fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_event *event,
struct fsnotify_event_private_data *priv)
struct fsnotify_event_private_data *priv,
int (*merge)(struct list_head *, struct fsnotify_event *))
{
struct fsnotify_event_holder *holder = NULL;
struct list_head *list = &group->notification_list;
struct fsnotify_event_holder *last_holder;
struct fsnotify_event *last_event;
int ret = 0;
int rc = 0;
/*
* There is one fsnotify_event_holder embedded inside each fsnotify_event.
......@@ -195,12 +160,24 @@ alloc_holder:
mutex_lock(&group->notification_mutex);
if (group->q_len >= group->max_events) {
event = &q_overflow_event;
ret = -EOVERFLOW;
event = q_overflow_event;
rc = -EOVERFLOW;
/* sorry, no private data on the overflow event */
priv = NULL;
}
if (!list_empty(list) && merge) {
int ret;
ret = merge(list, event);
if (ret) {
mutex_unlock(&group->notification_mutex);
if (holder != &event->holder)
fsnotify_destroy_event_holder(holder);
return ret;
}
}
spin_lock(&event->lock);
if (list_empty(&event->holder.event_list)) {
......@@ -215,18 +192,6 @@ alloc_holder:
goto alloc_holder;
}
if (!list_empty(list)) {
last_holder = list_entry(list->prev, struct fsnotify_event_holder, event_list);
last_event = last_holder->event;
if (event_compare(last_event, event)) {
spin_unlock(&event->lock);
mutex_unlock(&group->notification_mutex);
if (holder != &event->holder)
fsnotify_destroy_event_holder(holder);
return -EEXIST;
}
}
group->q_len++;
holder->event = event;
......@@ -238,7 +203,7 @@ alloc_holder:
mutex_unlock(&group->notification_mutex);
wake_up(&group->notification_waitq);
return ret;
return rc;
}
/*
......@@ -314,25 +279,77 @@ void fsnotify_flush_notify(struct fsnotify_group *group)
static void initialize_event(struct fsnotify_event *event)
{
event->holder.event = NULL;
INIT_LIST_HEAD(&event->holder.event_list);
atomic_set(&event->refcnt, 1);
spin_lock_init(&event->lock);
event->path.dentry = NULL;
event->path.mnt = NULL;
event->inode = NULL;
event->data_type = FSNOTIFY_EVENT_NONE;
INIT_LIST_HEAD(&event->private_data_list);
}
/*
* Caller damn well better be holding whatever mutex is protecting the
* old_holder->event_list and the new_event must be a clean event which
* cannot be found anywhere else in the kernel.
*/
int fsnotify_replace_event(struct fsnotify_event_holder *old_holder,
struct fsnotify_event *new_event)
{
struct fsnotify_event *old_event = old_holder->event;
struct fsnotify_event_holder *new_holder = &new_event->holder;
event->to_tell = NULL;
enum event_spinlock_class {
SPINLOCK_OLD,
SPINLOCK_NEW,
};
event->file_name = NULL;
event->name_len = 0;
/*
* if the new_event's embedded holder is in use someone
* screwed up and didn't give us a clean new event.
*/
BUG_ON(!list_empty(&new_holder->event_list));
spin_lock_nested(&old_event->lock, SPINLOCK_OLD);
spin_lock_nested(&new_event->lock, SPINLOCK_NEW);
new_holder->event = new_event;
list_replace_init(&old_holder->event_list, &new_holder->event_list);
spin_unlock(&new_event->lock);
spin_unlock(&old_event->lock);
/* event == holder means we are referenced through the in event holder */
if (old_holder != &old_event->holder)
fsnotify_destroy_event_holder(old_holder);
fsnotify_get_event(new_event); /* on the list take reference */
fsnotify_put_event(old_event); /* off the list, drop reference */
return 0;
}
struct fsnotify_event *fsnotify_clone_event(struct fsnotify_event *old_event)
{
struct fsnotify_event *event;
event->sync_cookie = 0;
event = kmem_cache_alloc(fsnotify_event_cachep, GFP_KERNEL);
if (!event)
return NULL;
memcpy(event, old_event, sizeof(*event));
initialize_event(event);
if (event->name_len) {
event->file_name = kstrdup(old_event->file_name, GFP_KERNEL);
if (!event->file_name) {
kmem_cache_free(fsnotify_event_cachep, event);
return NULL;
}
}
if (event->data_type == FSNOTIFY_EVENT_PATH)
path_get(&event->path);
return event;
}
/*
......@@ -353,7 +370,7 @@ struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask,
{
struct fsnotify_event *event;
event = kmem_cache_alloc(fsnotify_event_cachep, gfp);
event = kmem_cache_zalloc(fsnotify_event_cachep, gfp);
if (!event)
return NULL;
......@@ -370,6 +387,7 @@ struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask,
event->sync_cookie = cookie;
event->to_tell = to_tell;
event->data_type = data_type;
switch (data_type) {
case FSNOTIFY_EVENT_FILE: {
......@@ -386,12 +404,10 @@ struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask,
event->path.dentry = path->dentry;
event->path.mnt = path->mnt;
path_get(&event->path);
event->data_type = FSNOTIFY_EVENT_PATH;
break;
}
case FSNOTIFY_EVENT_INODE:
event->inode = data;
event->data_type = FSNOTIFY_EVENT_INODE;
break;
case FSNOTIFY_EVENT_NONE:
event->inode = NULL;
......@@ -412,8 +428,11 @@ __init int fsnotify_notification_init(void)
fsnotify_event_cachep = KMEM_CACHE(fsnotify_event, SLAB_PANIC);
fsnotify_event_holder_cachep = KMEM_CACHE(fsnotify_event_holder, SLAB_PANIC);
initialize_event(&q_overflow_event);
q_overflow_event.mask = FS_Q_OVERFLOW;
q_overflow_event = fsnotify_create_event(NULL, FS_Q_OVERFLOW, NULL,
FSNOTIFY_EVENT_NONE, NULL, 0,
GFP_KERNEL);
if (!q_overflow_event)
panic("unable to allocate fsnotify q_overflow_event\n");
return 0;
}
......
......@@ -1042,7 +1042,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
put_unused_fd(fd);
fd = PTR_ERR(f);
} else {
fsnotify_open(f->f_path.dentry);
fsnotify_open(f);
fd_install(fd, f);
}
}
......
......@@ -293,7 +293,7 @@ ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
else
ret = do_sync_read(file, buf, count, pos);
if (ret > 0) {
fsnotify_access(file->f_path.dentry);
fsnotify_access(file);
add_rchar(current, ret);
}
inc_syscr(current);
......@@ -348,7 +348,7 @@ ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_
else
ret = do_sync_write(file, buf, count, pos);
if (ret > 0) {
fsnotify_modify(file->f_path.dentry);
fsnotify_modify(file);
add_wchar(current, ret);
}
inc_syscw(current);
......@@ -656,9 +656,9 @@ out:
kfree(iov);
if ((ret + (type == READ)) > 0) {
if (type == READ)
fsnotify_access(file->f_path.dentry);
fsnotify_access(file);
else
fsnotify_modify(file->f_path.dentry);
fsnotify_modify(file);
}
return ret;
}
......
......@@ -29,9 +29,14 @@ static inline void fsnotify_d_instantiate(struct dentry *entry,
}
/* Notify this dentry's parent about a child's events. */
static inline void fsnotify_parent(struct dentry *dentry, __u32 mask)
static inline void fsnotify_parent(struct file *file, struct dentry *dentry, __u32 mask)
{
__fsnotify_parent(dentry, mask);
BUG_ON(file && dentry);
if (file)
dentry = file->f_path.dentry;
__fsnotify_parent(file, dentry, mask);
inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name);
}
......@@ -124,7 +129,7 @@ static inline void fsnotify_nameremove(struct dentry *dentry, int isdir)
if (isdir)
mask |= FS_IN_ISDIR;
fsnotify_parent(dentry, mask);
fsnotify_parent(NULL, dentry, mask);
}
/*
......@@ -183,9 +188,9 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
/*
* fsnotify_access - file was read
*/
static inline void fsnotify_access(struct dentry *dentry)
static inline void fsnotify_access(struct file *file)
{
struct inode *inode = dentry->d_inode;
struct inode *inode = file->f_path.dentry->d_inode;
__u32 mask = FS_ACCESS;
if (S_ISDIR(inode->i_mode))
......@@ -193,16 +198,16 @@ static inline void fsnotify_access(struct dentry *dentry)
inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
fsnotify_parent(dentry, mask);
fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
fsnotify_parent(file, NULL, mask);
fsnotify(inode, mask, file, FSNOTIFY_EVENT_FILE, NULL, 0);
}
/*
* fsnotify_modify - file was modified
*/
static inline void fsnotify_modify(struct dentry *dentry)
static inline void fsnotify_modify(struct file *file)
{
struct inode *inode = dentry->d_inode;
struct inode *inode = file->f_path.dentry->d_inode;
__u32 mask = FS_MODIFY;
if (S_ISDIR(inode->i_mode))
......@@ -210,16 +215,16 @@ static inline void fsnotify_modify(struct dentry *dentry)
inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
fsnotify_parent(dentry, mask);
fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
fsnotify_parent(file, NULL, mask);
fsnotify(inode, mask, file, FSNOTIFY_EVENT_FILE, NULL, 0);
}
/*
* fsnotify_open - file was opened
*/
static inline void fsnotify_open(struct dentry *dentry)
static inline void fsnotify_open(struct file *file)
{
struct inode *inode = dentry->d_inode;
struct inode *inode = file->f_path.dentry->d_inode;
__u32 mask = FS_OPEN;
if (S_ISDIR(inode->i_mode))
......@@ -227,8 +232,8 @@ static inline void fsnotify_open(struct dentry *dentry)
inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
fsnotify_parent(dentry, mask);
fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
fsnotify_parent(file, NULL, mask);
fsnotify(inode, mask, file, FSNOTIFY_EVENT_FILE, NULL, 0);
}
/*
......@@ -236,8 +241,7 @@ static inline void fsnotify_open(struct dentry *dentry)
*/
static inline void fsnotify_close(struct file *file)
{
struct dentry *dentry = file->f_path.dentry;
struct inode *inode = dentry->d_inode;
struct inode *inode = file->f_path.dentry->d_inode;
fmode_t mode = file->f_mode;
__u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE;
......@@ -246,7 +250,7 @@ static inline void fsnotify_close(struct file *file)
inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
fsnotify_parent(dentry, mask);
fsnotify_parent(file, NULL, mask);
fsnotify(inode, mask, file, FSNOTIFY_EVENT_FILE, NULL, 0);
}
......@@ -263,7 +267,7 @@ static inline void fsnotify_xattr(struct dentry *dentry)
inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
fsnotify_parent(dentry, mask);
fsnotify_parent(NULL, dentry, mask);
fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
......@@ -299,7 +303,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
mask |= FS_IN_ISDIR;
inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
fsnotify_parent(dentry, mask);
fsnotify_parent(NULL, dentry, mask);
fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
}
......
......@@ -58,9 +58,7 @@
FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\
FS_DELETE)
/* listeners that hard code group numbers near the top */
#define DNOTIFY_GROUP_NUM UINT_MAX
#define INOTIFY_GROUP_NUM (DNOTIFY_GROUP_NUM-1)
#define FS_MOVE (FS_MOVED_FROM | FS_MOVED_TO)
struct fsnotify_group;
struct fsnotify_event;
......@@ -80,7 +78,8 @@ struct fsnotify_event_private_data;
* valid group and inode to use to clean up.
*/
struct fsnotify_ops {
bool (*should_send_event)(struct fsnotify_group *group, struct inode *inode, __u32 mask);
bool (*should_send_event)(struct fsnotify_group *group, struct inode *inode,
__u32 mask, void *data, int data_type);
int (*handle_event)(struct fsnotify_group *group, struct fsnotify_event *event);
void (*free_group_priv)(struct fsnotify_group *group);
void (*freeing_mark)(struct fsnotify_mark_entry *entry, struct fsnotify_group *group);
......@@ -119,7 +118,6 @@ struct fsnotify_group {
* closed.
*/
atomic_t refcnt; /* things with interest in this group */
unsigned int group_num; /* simply prevents accidental group collision */
const struct fsnotify_ops *ops; /* how this group handles things */
......@@ -254,7 +252,7 @@ struct fsnotify_mark_entry {
/* main fsnotify call to send events */
extern void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
const char *name, u32 cookie);
extern void __fsnotify_parent(struct dentry *dentry, __u32 mask);
extern void __fsnotify_parent(struct file *file, struct dentry *dentry, __u32 mask);
extern void __fsnotify_inode_delete(struct inode *inode);
extern u32 fsnotify_get_cookie(void);
......@@ -307,8 +305,7 @@ static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode
/* must call when a group changes its ->mask */
extern void fsnotify_recalc_global_mask(void);
/* get a reference to an existing or create a new group */
extern struct fsnotify_group *fsnotify_obtain_group(unsigned int group_num,
__u32 mask,
extern struct fsnotify_group *fsnotify_obtain_group(__u32 mask,
const struct fsnotify_ops *ops);
/* run all marks associated with this group and update group->mask */
extern void fsnotify_recalc_group_mask(struct fsnotify_group *group);
......@@ -323,8 +320,10 @@ extern struct fsnotify_event_private_data *fsnotify_remove_priv_from_event(struc
struct fsnotify_event *event);
/* attach the event to the group notification queue */
extern int fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_event *event,
struct fsnotify_event_private_data *priv);
extern int fsnotify_add_notify_event(struct fsnotify_group *group,
struct fsnotify_event *event,
struct fsnotify_event_private_data *priv,
int (*merge)(struct list_head *, struct fsnotify_event *));
/* true if the group notification queue is empty */
extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
/* return, but do not dequeue the first event on the notification queue */
......@@ -339,8 +338,10 @@ extern void fsnotify_recalc_inode_mask(struct inode *inode);
extern void fsnotify_init_mark(struct fsnotify_mark_entry *entry, void (*free_mark)(struct fsnotify_mark_entry *entry));
/* find (and take a reference) to a mark associated with group and inode */
extern struct fsnotify_mark_entry *fsnotify_find_mark_entry(struct fsnotify_group *group, struct inode *inode);
/* copy the values from old into new */
extern void fsnotify_duplicate_mark(struct fsnotify_mark_entry *new, struct fsnotify_mark_entry *old);
/* attach the mark to both the group and the inode */
extern int fsnotify_add_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group, struct inode *inode);
extern int fsnotify_add_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group, struct inode *inode, int allow_dups);
/* given a mark, flag it to be freed when all references are dropped */
extern void fsnotify_destroy_mark_by_entry(struct fsnotify_mark_entry *entry);
/* run all the marks in a group, and flag them to be freed */
......@@ -354,13 +355,18 @@ extern struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32
void *data, int data_is, const char *name,
u32 cookie, gfp_t gfp);
/* fanotify likes to change events after they are on lists... */
extern struct fsnotify_event *fsnotify_clone_event(struct fsnotify_event *old_event);
extern int fsnotify_replace_event(struct fsnotify_event_holder *old_holder,
struct fsnotify_event *new_event);
#else
static inline void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
const char *name, u32 cookie)
{}
static inline void __fsnotify_parent(struct dentry *dentry, __u32 mask)
static inline void __fsnotify_parent(struct file *file, struct dentry *dentry, __u32 mask)
{}
static inline void __fsnotify_inode_delete(struct inode *inode)
......
......@@ -302,13 +302,17 @@ config AUDITSYSCALL
help
Enable low-overhead system-call auditing infrastructure that
can be used independently or with another kernel subsystem,
such as SELinux. To use audit's filesystem watch feature, please
ensure that INOTIFY is configured.
such as SELinux.
config AUDIT_WATCH
def_bool y
depends on AUDITSYSCALL
select FSNOTIFY
config AUDIT_TREE
def_bool y
depends on AUDITSYSCALL
select INOTIFY
select FSNOTIFY
menu "RCU Subsystem"
......
......@@ -69,10 +69,11 @@ obj-$(CONFIG_IKCONFIG) += configs.o
obj-$(CONFIG_RESOURCE_COUNTERS) += res_counter.o
obj-$(CONFIG_STOP_MACHINE) += stop_machine.o
obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
obj-$(CONFIG_AUDIT) += audit.o auditfilter.o audit_watch.o
obj-$(CONFIG_AUDIT) += audit.o auditfilter.o
obj-$(CONFIG_AUDITSYSCALL) += auditsc.o
obj-$(CONFIG_GCOV_KERNEL) += gcov/
obj-$(CONFIG_AUDIT_WATCH) += audit_watch.o
obj-$(CONFIG_AUDIT_TREE) += audit_tree.o
obj-$(CONFIG_GCOV_KERNEL) += gcov/
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_DETECT_SOFTLOCKUP) += softlockup.o
......
......@@ -55,7 +55,6 @@
#include <net/netlink.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
#include <linux/inotify.h>
#include <linux/freezer.h>
#include <linux/tty.h>
......
......@@ -103,21 +103,27 @@ extern struct mutex audit_filter_mutex;
extern void audit_free_rule_rcu(struct rcu_head *);
extern struct list_head audit_filter_list[];
extern struct audit_entry *audit_dupe_rule(struct audit_krule *old);
/* audit watch functions */
extern unsigned long audit_watch_inode(struct audit_watch *watch);
extern dev_t audit_watch_dev(struct audit_watch *watch);
#ifdef CONFIG_AUDIT_WATCH
extern void audit_put_watch(struct audit_watch *watch);
extern void audit_get_watch(struct audit_watch *watch);
extern int audit_to_watch(struct audit_krule *krule, char *path, int len, u32 op);
extern int audit_add_watch(struct audit_krule *krule);
extern void audit_remove_watch(struct audit_watch *watch);
extern void audit_remove_watch_rule(struct audit_krule *krule, struct list_head *list);
extern void audit_inotify_unregister(struct list_head *in_list);
extern int audit_add_watch(struct audit_krule *krule, struct list_head **list);
extern void audit_remove_watch_rule(struct audit_krule *krule);
extern char *audit_watch_path(struct audit_watch *watch);
extern struct list_head *audit_watch_rules(struct audit_watch *watch);
extern struct audit_entry *audit_dupe_rule(struct audit_krule *old,
struct audit_watch *watch);
extern int audit_watch_compare(struct audit_watch *watch, unsigned long ino, dev_t dev);
#else
#define audit_put_watch(w) {}
#define audit_get_watch(w) {}
#define audit_to_watch(k, p, l, o) (-EINVAL)
#define audit_add_watch(k, l) (-EINVAL)
#define audit_remove_watch_rule(k) BUG()
#define audit_watch_path(w) ""
#define audit_watch_compare(w, i, d) 0
#endif /* CONFIG_AUDIT_WATCH */
#ifdef CONFIG_AUDIT_TREE
extern struct audit_chunk *audit_tree_lookup(const struct inode *);
......
#include "audit.h"
#include <linux/inotify.h>
#include <linux/fsnotify_backend.h>
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/kthread.h>
......@@ -21,7 +21,7 @@ struct audit_tree {
struct audit_chunk {
struct list_head hash;
struct inotify_watch watch;
struct fsnotify_mark_entry mark;
struct list_head trees; /* with root here */
int dead;
int count;
......@@ -67,7 +67,7 @@ static LIST_HEAD(prune_list);
* that makes a difference. Some.
*/
static struct inotify_handle *rtree_ih;
static struct fsnotify_group *audit_tree_group;
static struct audit_tree *alloc_tree(const char *s)
{
......@@ -110,29 +110,6 @@ const char *audit_tree_path(struct audit_tree *tree)
return tree->pathname;
}
static struct audit_chunk *alloc_chunk(int count)
{
struct audit_chunk *chunk;
size_t size;
int i;
size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
chunk = kzalloc(size, GFP_KERNEL);
if (!chunk)
return NULL;
INIT_LIST_HEAD(&chunk->hash);
INIT_LIST_HEAD(&chunk->trees);
chunk->count = count;
atomic_long_set(&chunk->refs, 1);
for (i = 0; i < count; i++) {
INIT_LIST_HEAD(&chunk->owners[i].list);
chunk->owners[i].index = i;
}
inotify_init_watch(&chunk->watch);
return chunk;
}
static void free_chunk(struct audit_chunk *chunk)
{
int i;
......@@ -156,6 +133,35 @@ static void __put_chunk(struct rcu_head *rcu)
audit_put_chunk(chunk);
}
static void audit_tree_destroy_watch(struct fsnotify_mark_entry *entry)
{
struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
call_rcu(&chunk->head, __put_chunk);
}
static struct audit_chunk *alloc_chunk(int count)
{
struct audit_chunk *chunk;
size_t size;
int i;
size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
chunk = kzalloc(size, GFP_KERNEL);
if (!chunk)
return NULL;
INIT_LIST_HEAD(&chunk->hash);
INIT_LIST_HEAD(&chunk->trees);
chunk->count = count;
atomic_long_set(&chunk->refs, 1);
for (i = 0; i < count; i++) {
INIT_LIST_HEAD(&chunk->owners[i].list);
chunk->owners[i].index = i;
}
fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
return chunk;
}
enum {HASH_SIZE = 128};
static struct list_head chunk_hash_heads[HASH_SIZE];
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
......@@ -166,10 +172,15 @@ static inline struct list_head *chunk_hash(const struct inode *inode)
return chunk_hash_heads + n % HASH_SIZE;
}
/* hash_lock is held by caller */
/* hash_lock & entry->lock is held by caller */
static void insert_hash(struct audit_chunk *chunk)
{
struct list_head *list = chunk_hash(chunk->watch.inode);
struct fsnotify_mark_entry *entry = &chunk->mark;
struct list_head *list;
if (!entry->inode)
return;
list = chunk_hash(entry->inode);
list_add_rcu(&chunk->hash, list);
}
......@@ -180,7 +191,8 @@ struct audit_chunk *audit_tree_lookup(const struct inode *inode)
struct audit_chunk *p;
list_for_each_entry_rcu(p, list, hash) {
if (p->watch.inode == inode) {
/* mark.inode may have gone NULL, but who cares? */
if (p->mark.inode == inode) {
atomic_long_inc(&p->refs);
return p;
}
......@@ -209,38 +221,19 @@ static struct audit_chunk *find_chunk(struct node *p)
static void untag_chunk(struct node *p)
{
struct audit_chunk *chunk = find_chunk(p);
struct fsnotify_mark_entry *entry = &chunk->mark;
struct audit_chunk *new;
struct audit_tree *owner;
int size = chunk->count - 1;
int i, j;
if (!pin_inotify_watch(&chunk->watch)) {
/*
* Filesystem is shutting down; all watches are getting
* evicted, just take it off the node list for this
* tree and let the eviction logics take care of the
* rest.
*/
owner = p->owner;
if (owner->root == chunk) {
list_del_init(&owner->same_root);
owner->root = NULL;
}
list_del_init(&p->list);
p->owner = NULL;
put_tree(owner);
return;
}
fsnotify_get_mark(entry);
spin_unlock(&hash_lock);
/*
* pin_inotify_watch() succeeded, so the watch won't go away
* from under us.
*/
mutex_lock(&chunk->watch.inode->inotify_mutex);
if (chunk->dead) {
mutex_unlock(&chunk->watch.inode->inotify_mutex);
spin_lock(&entry->lock);
if (chunk->dead || !entry->inode) {
spin_unlock(&entry->lock);
goto out;
}
......@@ -255,16 +248,17 @@ static void untag_chunk(struct node *p)
list_del_init(&p->list);
list_del_rcu(&chunk->hash);
spin_unlock(&hash_lock);
inotify_evict_watch(&chunk->watch);
mutex_unlock(&chunk->watch.inode->inotify_mutex);
put_inotify_watch(&chunk->watch);
spin_unlock(&entry->lock);
fsnotify_destroy_mark_by_entry(entry);
fsnotify_put_mark(entry);
goto out;
}
new = alloc_chunk(size);
if (!new)
goto Fallback;
if (inotify_clone_watch(&chunk->watch, &new->watch) < 0) {
fsnotify_duplicate_mark(&new->mark, entry);
if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.inode, 1)) {
free_chunk(new);
goto Fallback;
}
......@@ -297,9 +291,9 @@ static void untag_chunk(struct node *p)
list_for_each_entry(owner, &new->trees, same_root)
owner->root = new;
spin_unlock(&hash_lock);
inotify_evict_watch(&chunk->watch);
mutex_unlock(&chunk->watch.inode->inotify_mutex);
put_inotify_watch(&chunk->watch);
spin_unlock(&entry->lock);
fsnotify_destroy_mark_by_entry(entry);
fsnotify_put_mark(entry);
goto out;
Fallback:
......@@ -313,31 +307,33 @@ Fallback:
p->owner = NULL;
put_tree(owner);
spin_unlock(&hash_lock);
mutex_unlock(&chunk->watch.inode->inotify_mutex);
spin_unlock(&entry->lock);
out:
unpin_inotify_watch(&chunk->watch);
fsnotify_put_mark(entry);
spin_lock(&hash_lock);
}
static int create_chunk(struct inode *inode, struct audit_tree *tree)
{
struct fsnotify_mark_entry *entry;
struct audit_chunk *chunk = alloc_chunk(1);
if (!chunk)
return -ENOMEM;
if (inotify_add_watch(rtree_ih, &chunk->watch, inode, IN_IGNORED | IN_DELETE_SELF) < 0) {
entry = &chunk->mark;
if (fsnotify_add_mark(entry, audit_tree_group, inode, 0)) {
free_chunk(chunk);
return -ENOSPC;
}
mutex_lock(&inode->inotify_mutex);
spin_lock(&entry->lock);
spin_lock(&hash_lock);
if (tree->goner) {
spin_unlock(&hash_lock);
chunk->dead = 1;
inotify_evict_watch(&chunk->watch);
mutex_unlock(&inode->inotify_mutex);
put_inotify_watch(&chunk->watch);
spin_unlock(&entry->lock);
fsnotify_destroy_mark_by_entry(entry);
fsnotify_put_mark(entry);
return 0;
}
chunk->owners[0].index = (1U << 31);
......@@ -350,30 +346,33 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)
}
insert_hash(chunk);
spin_unlock(&hash_lock);
mutex_unlock(&inode->inotify_mutex);
spin_unlock(&entry->lock);
return 0;
}
/* the first tagged inode becomes root of tree */
static int tag_chunk(struct inode *inode, struct audit_tree *tree)
{
struct inotify_watch *watch;
struct fsnotify_mark_entry *old_entry, *chunk_entry;
struct audit_tree *owner;
struct audit_chunk *chunk, *old;
struct node *p;
int n;
if (inotify_find_watch(rtree_ih, inode, &watch) < 0)
spin_lock(&inode->i_lock);
old_entry = fsnotify_find_mark_entry(audit_tree_group, inode);
spin_unlock(&inode->i_lock);
if (!old_entry)
return create_chunk(inode, tree);
old = container_of(watch, struct audit_chunk, watch);
old = container_of(old_entry, struct audit_chunk, mark);
/* are we already there? */
spin_lock(&hash_lock);
for (n = 0; n < old->count; n++) {
if (old->owners[n].owner == tree) {
spin_unlock(&hash_lock);
put_inotify_watch(watch);
fsnotify_put_mark(old_entry);
return 0;
}
}
......@@ -382,22 +381,40 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
chunk = alloc_chunk(old->count + 1);
if (!chunk)
return -ENOMEM;
chunk_entry = &chunk->mark;
mutex_lock(&inode->inotify_mutex);
if (inotify_clone_watch(&old->watch, &chunk->watch) < 0) {
mutex_unlock(&inode->inotify_mutex);
put_inotify_watch(&old->watch);
spin_lock(&old_entry->lock);
if (!old_entry->inode) {
/* old_entry is being shot, lets just lie */
spin_unlock(&old_entry->lock);
fsnotify_put_mark(old_entry);
free_chunk(chunk);
return -ENOENT;
}
fsnotify_duplicate_mark(chunk_entry, old_entry);
if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->inode, 1)) {
spin_unlock(&old_entry->lock);
free_chunk(chunk);
fsnotify_put_mark(old_entry);
return -ENOSPC;
}
/* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
spin_lock(&chunk_entry->lock);
spin_lock(&hash_lock);
/* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
if (tree->goner) {
spin_unlock(&hash_lock);
chunk->dead = 1;
inotify_evict_watch(&chunk->watch);
mutex_unlock(&inode->inotify_mutex);
put_inotify_watch(&old->watch);
put_inotify_watch(&chunk->watch);
spin_unlock(&chunk_entry->lock);
spin_unlock(&old_entry->lock);
fsnotify_destroy_mark_by_entry(chunk_entry);
fsnotify_put_mark(chunk_entry);
fsnotify_put_mark(old_entry);
return 0;
}
list_replace_init(&old->trees, &chunk->trees);
......@@ -423,9 +440,10 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
list_add(&tree->same_root, &chunk->trees);
}
spin_unlock(&hash_lock);
inotify_evict_watch(&old->watch);
mutex_unlock(&inode->inotify_mutex);
put_inotify_watch(&old->watch);
spin_unlock(&chunk_entry->lock);
spin_unlock(&old_entry->lock);
fsnotify_destroy_mark_by_entry(old_entry);
fsnotify_put_mark(old_entry);
return 0;
}
......@@ -578,7 +596,8 @@ void audit_trim_trees(void)
spin_lock(&hash_lock);
list_for_each_entry(node, &tree->chunks, list) {
struct audit_chunk *chunk = find_chunk(node);
struct inode *inode = chunk->watch.inode;
/* this could be NULL if the watch is dieing else where... */
struct inode *inode = chunk->mark.inode;
struct vfsmount *mnt;
node->index |= 1U<<31;
list_for_each_entry(mnt, &list, mnt_list) {
......@@ -925,34 +944,40 @@ static void evict_chunk(struct audit_chunk *chunk)
mutex_unlock(&audit_filter_mutex);
}
static void handle_event(struct inotify_watch *watch, u32 wd, u32 mask,
u32 cookie, const char *dname, struct inode *inode)
static int audit_tree_handle_event(struct fsnotify_group *group, struct fsnotify_event *event)
{
struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
BUG();
return -EOPNOTSUPP;
}
if (mask & IN_IGNORED) {
evict_chunk(chunk);
put_inotify_watch(watch);
}
static void audit_tree_freeing_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group)
{
struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
evict_chunk(chunk);
fsnotify_put_mark(entry);
}
static void destroy_watch(struct inotify_watch *watch)
static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode,
__u32 mask, void *data, int data_type)
{
struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
call_rcu(&chunk->head, __put_chunk);
return 0;
}
static const struct inotify_operations rtree_inotify_ops = {
.handle_event = handle_event,
.destroy_watch = destroy_watch,
static const struct fsnotify_ops audit_tree_ops = {
.handle_event = audit_tree_handle_event,
.should_send_event = audit_tree_send_event,
.free_group_priv = NULL,
.free_event_priv = NULL,
.freeing_mark = audit_tree_freeing_mark,
};
static int __init audit_tree_init(void)
{
int i;
rtree_ih = inotify_init(&rtree_inotify_ops);
if (IS_ERR(rtree_ih))
audit_tree_group = fsnotify_obtain_group(0, &audit_tree_ops);
if (IS_ERR(audit_tree_group))
audit_panic("cannot initialize inotify handle for rectree watches");
for (i = 0; i < HASH_SIZE; i++)
......
......@@ -24,17 +24,17 @@
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/fs.h>
#include <linux/fsnotify_backend.h>
#include <linux/namei.h>
#include <linux/netlink.h>
#include <linux/sched.h>
#include <linux/inotify.h>
#include <linux/security.h>
#include "audit.h"
/*
* Reference counting:
*
* audit_parent: lifetime is from audit_init_parent() to receipt of an IN_IGNORED
* audit_parent: lifetime is from audit_init_parent() to receipt of an FS_IGNORED
* event. Each audit_watch holds a reference to its associated parent.
*
* audit_watch: if added to lists, lifetime is from audit_init_watch() to
......@@ -45,45 +45,69 @@
struct audit_watch {
atomic_t count; /* reference count */
char *path; /* insertion path */
dev_t dev; /* associated superblock device */
char *path; /* insertion path */
unsigned long ino; /* associated inode number */
struct audit_parent *parent; /* associated parent */
struct list_head wlist; /* entry in parent->watches list */
struct list_head rules; /* associated rules */
struct list_head rules; /* anchor for krule->rlist */
};
struct audit_parent {
struct list_head ilist; /* entry in inotify registration list */
struct list_head watches; /* associated watches */
struct inotify_watch wdata; /* inotify watch data */
unsigned flags; /* status flags */
struct list_head watches; /* anchor for audit_watch->wlist */
struct fsnotify_mark_entry mark; /* fsnotify mark on the inode */
};
/* Inotify handle. */
struct inotify_handle *audit_ih;
/* fsnotify handle. */
struct fsnotify_group *audit_watch_group;
/*
* audit_parent status flags:
*
* AUDIT_PARENT_INVALID - set anytime rules/watches are auto-removed due to
* a filesystem event to ensure we're adding audit watches to a valid parent.
* Technically not needed for IN_DELETE_SELF or IN_UNMOUNT events, as we cannot
* receive them while we have nameidata, but must be used for IN_MOVE_SELF which
* we can receive while holding nameidata.
*/
#define AUDIT_PARENT_INVALID 0x001
/* fsnotify events we care about. */
#define AUDIT_FS_WATCH (FS_MOVE | FS_CREATE | FS_DELETE | FS_DELETE_SELF |\
FS_MOVE_SELF | FS_EVENT_ON_CHILD)
/* Inotify events we care about. */
#define AUDIT_IN_WATCH IN_MOVE|IN_CREATE|IN_DELETE|IN_DELETE_SELF|IN_MOVE_SELF
static void audit_free_parent(struct audit_parent *parent)
{
WARN_ON(!list_empty(&parent->watches));
kfree(parent);
}
static void audit_free_parent(struct inotify_watch *i_watch)
static void audit_watch_free_mark(struct fsnotify_mark_entry *entry)
{
struct audit_parent *parent;
parent = container_of(i_watch, struct audit_parent, wdata);
WARN_ON(!list_empty(&parent->watches));
kfree(parent);
parent = container_of(entry, struct audit_parent, mark);
audit_free_parent(parent);
}
static void audit_get_parent(struct audit_parent *parent)
{
if (likely(parent))
fsnotify_get_mark(&parent->mark);
}
static void audit_put_parent(struct audit_parent *parent)
{
if (likely(parent))
fsnotify_put_mark(&parent->mark);
}
/*
* Find and return the audit_parent on the given inode. If found a reference
* is taken on this parent.
*/
static inline struct audit_parent *audit_find_parent(struct inode *inode)
{
struct audit_parent *parent = NULL;
struct fsnotify_mark_entry *entry;
spin_lock(&inode->i_lock);
entry = fsnotify_find_mark_entry(audit_watch_group, inode);
spin_unlock(&inode->i_lock);
if (entry)
parent = container_of(entry, struct audit_parent, mark);
return parent;
}
void audit_get_watch(struct audit_watch *watch)
......@@ -104,7 +128,7 @@ void audit_put_watch(struct audit_watch *watch)
void audit_remove_watch(struct audit_watch *watch)
{
list_del(&watch->wlist);
put_inotify_watch(&watch->parent->wdata);
audit_put_parent(watch->parent);
watch->parent = NULL;
audit_put_watch(watch); /* match initial get */
}
......@@ -114,42 +138,32 @@ char *audit_watch_path(struct audit_watch *watch)
return watch->path;
}
struct list_head *audit_watch_rules(struct audit_watch *watch)
{
return &watch->rules;
}
unsigned long audit_watch_inode(struct audit_watch *watch)
{
return watch->ino;
}
dev_t audit_watch_dev(struct audit_watch *watch)
int audit_watch_compare(struct audit_watch *watch, unsigned long ino, dev_t dev)
{
return watch->dev;
return (watch->ino != (unsigned long)-1) &&
(watch->ino == ino) &&
(watch->dev == dev);
}
/* Initialize a parent watch entry. */
static struct audit_parent *audit_init_parent(struct nameidata *ndp)
{
struct inode *inode = ndp->path.dentry->d_inode;
struct audit_parent *parent;
s32 wd;
int ret;
parent = kzalloc(sizeof(*parent), GFP_KERNEL);
if (unlikely(!parent))
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&parent->watches);
parent->flags = 0;
inotify_init_watch(&parent->wdata);
/* grab a ref so inotify watch hangs around until we take audit_filter_mutex */
get_inotify_watch(&parent->wdata);
wd = inotify_add_watch(audit_ih, &parent->wdata,
ndp->path.dentry->d_inode, AUDIT_IN_WATCH);
if (wd < 0) {
audit_free_parent(&parent->wdata);
return ERR_PTR(wd);
fsnotify_init_mark(&parent->mark, audit_watch_free_mark);
parent->mark.mask = AUDIT_FS_WATCH;
ret = fsnotify_add_mark(&parent->mark, audit_watch_group, inode, 0);
if (ret < 0) {
audit_free_parent(parent);
return ERR_PTR(ret);
}
return parent;
......@@ -178,7 +192,7 @@ int audit_to_watch(struct audit_krule *krule, char *path, int len, u32 op)
{
struct audit_watch *watch;
if (!audit_ih)
if (!audit_watch_group)
return -EOPNOTSUPP;
if (path[0] != '/' || path[len-1] == '/' ||
......@@ -216,7 +230,7 @@ static struct audit_watch *audit_dupe_watch(struct audit_watch *old)
new->dev = old->dev;
new->ino = old->ino;
get_inotify_watch(&old->parent->wdata);
audit_get_parent(old->parent);
new->parent = old->parent;
out:
......@@ -250,15 +264,19 @@ static void audit_update_watch(struct audit_parent *parent,
struct audit_entry *oentry, *nentry;
mutex_lock(&audit_filter_mutex);
/* Run all of the watches on this parent looking for the one that
* matches the given dname */
list_for_each_entry_safe(owatch, nextw, &parent->watches, wlist) {
if (audit_compare_dname_path(dname, owatch->path, NULL))
continue;
/* If the update involves invalidating rules, do the inode-based
* filtering now, so we don't omit records. */
if (invalidating && current->audit_context)
if (invalidating && !audit_dummy_context())
audit_filter_inodes(current, current->audit_context);
/* updating ino will likely change which audit_hash_list we
* are on so we need a new watch for the new list */
nwatch = audit_dupe_watch(owatch);
if (IS_ERR(nwatch)) {
mutex_unlock(&audit_filter_mutex);
......@@ -274,12 +292,21 @@ static void audit_update_watch(struct audit_parent *parent,
list_del(&oentry->rule.rlist);
list_del_rcu(&oentry->list);
nentry = audit_dupe_rule(&oentry->rule, nwatch);
nentry = audit_dupe_rule(&oentry->rule);
if (IS_ERR(nentry)) {
list_del(&oentry->rule.list);
audit_panic("error updating watch, removing");
} else {
int h = audit_hash_ino((u32)ino);
/*
* nentry->rule.watch == oentry->rule.watch so
* we must drop that reference and set it to our
* new watch.
*/
audit_put_watch(nentry->rule.watch);
audit_get_watch(nwatch);
nentry->rule.watch = nwatch;
list_add(&nentry->rule.rlist, &nwatch->rules);
list_add_rcu(&nentry->list, &audit_inode_hash[h]);
list_replace(&oentry->rule.list,
......@@ -311,7 +338,6 @@ static void audit_remove_parent_watches(struct audit_parent *parent)
struct audit_entry *e;
mutex_lock(&audit_filter_mutex);
parent->flags |= AUDIT_PARENT_INVALID;
list_for_each_entry_safe(w, nextw, &parent->watches, wlist) {
list_for_each_entry_safe(r, nextr, &w->rules, rlist) {
e = container_of(r, struct audit_entry, rule);
......@@ -324,20 +350,8 @@ static void audit_remove_parent_watches(struct audit_parent *parent)
audit_remove_watch(w);
}
mutex_unlock(&audit_filter_mutex);
}
/* Unregister inotify watches for parents on in_list.
* Generates an IN_IGNORED event. */
void audit_inotify_unregister(struct list_head *in_list)
{
struct audit_parent *p, *n;
list_for_each_entry_safe(p, n, in_list, ilist) {
list_del(&p->ilist);
inotify_rm_watch(audit_ih, &p->wdata);
/* the unpin matching the pin in audit_do_del_rule() */
unpin_inotify_watch(&p->wdata);
}
fsnotify_destroy_mark_by_entry(&parent->mark);
}
/* Get path information necessary for adding watches. */
......@@ -388,7 +402,7 @@ static void audit_put_nd(struct nameidata *ndp, struct nameidata *ndw)
}
}
/* Associate the given rule with an existing parent inotify_watch.
/* Associate the given rule with an existing parent.
* Caller must hold audit_filter_mutex. */
static void audit_add_to_parent(struct audit_krule *krule,
struct audit_parent *parent)
......@@ -396,6 +410,8 @@ static void audit_add_to_parent(struct audit_krule *krule,
struct audit_watch *w, *watch = krule->watch;
int watch_found = 0;
BUG_ON(!mutex_is_locked(&audit_filter_mutex));
list_for_each_entry(w, &parent->watches, wlist) {
if (strcmp(watch->path, w->path))
continue;
......@@ -412,7 +428,7 @@ static void audit_add_to_parent(struct audit_krule *krule,
}
if (!watch_found) {
get_inotify_watch(&parent->wdata);
audit_get_parent(parent);
watch->parent = parent;
list_add(&watch->wlist, &parent->watches);
......@@ -422,13 +438,12 @@ static void audit_add_to_parent(struct audit_krule *krule,
/* Find a matching watch entry, or add this one.
* Caller must hold audit_filter_mutex. */
int audit_add_watch(struct audit_krule *krule)
int audit_add_watch(struct audit_krule *krule, struct list_head **list)
{
struct audit_watch *watch = krule->watch;
struct inotify_watch *i_watch;
struct audit_parent *parent;
struct nameidata *ndp = NULL, *ndw = NULL;
int ret = 0;
int h, ret = 0;
mutex_unlock(&audit_filter_mutex);
......@@ -440,47 +455,38 @@ int audit_add_watch(struct audit_krule *krule)
goto error;
}
mutex_lock(&audit_filter_mutex);
/* update watch filter fields */
if (ndw) {
watch->dev = ndw->path.dentry->d_inode->i_sb->s_dev;
watch->ino = ndw->path.dentry->d_inode->i_ino;
}
/* The audit_filter_mutex must not be held during inotify calls because
* we hold it during inotify event callback processing. If an existing
* inotify watch is found, inotify_find_watch() grabs a reference before
* returning.
*/
if (inotify_find_watch(audit_ih, ndp->path.dentry->d_inode,
&i_watch) < 0) {
/* either find an old parent or attach a new one */
parent = audit_find_parent(ndp->path.dentry->d_inode);
if (!parent) {
parent = audit_init_parent(ndp);
if (IS_ERR(parent)) {
/* caller expects mutex locked */
mutex_lock(&audit_filter_mutex);
ret = PTR_ERR(parent);
goto error;
}
} else
parent = container_of(i_watch, struct audit_parent, wdata);
mutex_lock(&audit_filter_mutex);
}
/* parent was moved before we took audit_filter_mutex */
if (parent->flags & AUDIT_PARENT_INVALID)
ret = -ENOENT;
else
audit_add_to_parent(krule, parent);
audit_add_to_parent(krule, parent);
/* match get in audit_init_parent or inotify_find_watch */
put_inotify_watch(&parent->wdata);
/* match get in audit_find_parent or audit_init_parent */
audit_put_parent(parent);
h = audit_hash_ino((u32)watch->ino);
*list = &audit_inode_hash[h];
error:
audit_put_nd(ndp, ndw); /* NULL args OK */
return ret;
}
void audit_remove_watch_rule(struct audit_krule *krule, struct list_head *list)
void audit_remove_watch_rule(struct audit_krule *krule)
{
struct audit_watch *watch = krule->watch;
struct audit_parent *parent = watch->parent;
......@@ -491,53 +497,91 @@ void audit_remove_watch_rule(struct audit_krule *krule, struct list_head *list)
audit_remove_watch(watch);
if (list_empty(&parent->watches)) {
/* Put parent on the inotify un-registration
* list. Grab a reference before releasing
* audit_filter_mutex, to be released in
* audit_inotify_unregister().
* If filesystem is going away, just leave
* the sucker alone, eviction will take
* care of it. */
if (pin_inotify_watch(&parent->wdata))
list_add(&parent->ilist, list);
audit_get_parent(parent);
fsnotify_destroy_mark_by_entry(&parent->mark);
audit_put_parent(parent);
}
}
}
/* Update watch data in audit rules based on inotify events. */
static void audit_handle_ievent(struct inotify_watch *i_watch, u32 wd, u32 mask,
u32 cookie, const char *dname, struct inode *inode)
static bool audit_watch_should_send_event(struct fsnotify_group *group, struct inode *inode,
__u32 mask, void *data, int data_type)
{
struct fsnotify_mark_entry *entry;
bool send;
spin_lock(&inode->i_lock);
entry = fsnotify_find_mark_entry(group, inode);
spin_unlock(&inode->i_lock);
if (!entry)
return false;
mask = (mask & ~FS_EVENT_ON_CHILD);
send = (entry->mask & mask);
/* find took a reference */
fsnotify_put_mark(entry);
return send;
}
/* Update watch data in audit rules based on fsnotify events. */
static int audit_watch_handle_event(struct fsnotify_group *group, struct fsnotify_event *event)
{
struct inode *inode;
__u32 mask = event->mask;
const char *dname = event->file_name;
struct audit_parent *parent;
parent = container_of(i_watch, struct audit_parent, wdata);
BUG_ON(group != audit_watch_group);
if (mask & (IN_CREATE|IN_MOVED_TO) && inode)
audit_update_watch(parent, dname, inode->i_sb->s_dev,
inode->i_ino, 0);
else if (mask & (IN_DELETE|IN_MOVED_FROM))
parent = audit_find_parent(event->to_tell);
if (unlikely(!parent))
return 0;
switch (event->data_type) {
case (FSNOTIFY_EVENT_PATH):
inode = event->path.dentry->d_inode;
break;
case (FSNOTIFY_EVENT_INODE):
inode = event->inode;
break;
default:
BUG();
inode = NULL;
break;
};
if (mask & (FS_CREATE|FS_MOVED_TO) && inode)
audit_update_watch(parent, dname, inode->i_sb->s_dev, inode->i_ino, 0);
else if (mask & (FS_DELETE|FS_MOVED_FROM))
audit_update_watch(parent, dname, (dev_t)-1, (unsigned long)-1, 1);
/* inotify automatically removes the watch and sends IN_IGNORED */
else if (mask & (IN_DELETE_SELF|IN_UNMOUNT))
else if (mask & (FS_DELETE_SELF|FS_UNMOUNT|FS_MOVE_SELF))
audit_remove_parent_watches(parent);
/* inotify does not remove the watch, so remove it manually */
else if(mask & IN_MOVE_SELF) {
audit_remove_parent_watches(parent);
inotify_remove_watch_locked(audit_ih, i_watch);
} else if (mask & IN_IGNORED)
put_inotify_watch(i_watch);
/* moved put_inotify_watch to freeing mark */
/* matched the ref taken by audit_find_parent */
audit_put_parent(parent);
return 0;
}
static const struct inotify_operations audit_inotify_ops = {
.handle_event = audit_handle_ievent,
.destroy_watch = audit_free_parent,
static const struct fsnotify_ops audit_watch_fsnotify_ops = {
.should_send_event = audit_watch_should_send_event,
.handle_event = audit_watch_handle_event,
.free_group_priv = NULL,
.freeing_mark = NULL,
.free_event_priv = NULL,
};
static int __init audit_watch_init(void)
{
audit_ih = inotify_init(&audit_inotify_ops);
if (IS_ERR(audit_ih))
audit_panic("cannot initialize inotify handle");
audit_watch_group = fsnotify_obtain_group(AUDIT_FS_WATCH,
&audit_watch_fsnotify_ops);
if (IS_ERR(audit_watch_group)) {
audit_watch_group = NULL;
audit_panic("cannot create audit fsnotify group");
}
return 0;
}
subsys_initcall(audit_watch_init);
device_initcall(audit_watch_init);
......@@ -70,6 +70,7 @@ static inline void audit_free_rule(struct audit_entry *e)
{
int i;
struct audit_krule *erule = &e->rule;
/* some rules don't have associated watches */
if (erule->watch)
audit_put_watch(erule->watch);
......@@ -745,8 +746,7 @@ static inline int audit_dupe_lsm_field(struct audit_field *df,
* rule with the new rule in the filterlist, then free the old rule.
* The rlist element is undefined; list manipulations are handled apart from
* the initial copy. */
struct audit_entry *audit_dupe_rule(struct audit_krule *old,
struct audit_watch *watch)
struct audit_entry *audit_dupe_rule(struct audit_krule *old)
{
u32 fcount = old->field_count;
struct audit_entry *entry;
......@@ -768,8 +768,8 @@ struct audit_entry *audit_dupe_rule(struct audit_krule *old,
new->prio = old->prio;
new->buflen = old->buflen;
new->inode_f = old->inode_f;
new->watch = NULL;
new->field_count = old->field_count;
/*
* note that we are OK with not refcounting here; audit_match_tree()
* never dereferences tree and we can't get false positives there
......@@ -810,9 +810,9 @@ struct audit_entry *audit_dupe_rule(struct audit_krule *old,
}
}
if (watch) {
audit_get_watch(watch);
new->watch = watch;
if (old->watch) {
audit_get_watch(old->watch);
new->watch = old->watch;
}
return entry;
......@@ -865,7 +865,7 @@ static inline int audit_add_rule(struct audit_entry *entry)
struct audit_watch *watch = entry->rule.watch;
struct audit_tree *tree = entry->rule.tree;
struct list_head *list;
int h, err;
int err;
#ifdef CONFIG_AUDITSYSCALL
int dont_count = 0;
......@@ -888,15 +888,11 @@ static inline int audit_add_rule(struct audit_entry *entry)
if (watch) {
/* audit_filter_mutex is dropped and re-taken during this call */
err = audit_add_watch(&entry->rule);
err = audit_add_watch(&entry->rule, &list);
if (err) {
mutex_unlock(&audit_filter_mutex);
goto error;
}
/* entry->rule.watch may have changed during audit_add_watch() */
watch = entry->rule.watch;
h = audit_hash_ino((u32)audit_watch_inode(watch));
list = &audit_inode_hash[h];
}
if (tree) {
err = audit_add_tree_rule(&entry->rule);
......@@ -948,7 +944,6 @@ static inline int audit_del_rule(struct audit_entry *entry)
struct audit_watch *watch = entry->rule.watch;
struct audit_tree *tree = entry->rule.tree;
struct list_head *list;
LIST_HEAD(inotify_list);
int ret = 0;
#ifdef CONFIG_AUDITSYSCALL
int dont_count = 0;
......@@ -968,7 +963,7 @@ static inline int audit_del_rule(struct audit_entry *entry)
}
if (e->rule.watch)
audit_remove_watch_rule(&e->rule, &inotify_list);
audit_remove_watch_rule(&e->rule);
if (e->rule.tree)
audit_remove_tree_rule(&e->rule);
......@@ -986,9 +981,6 @@ static inline int audit_del_rule(struct audit_entry *entry)
#endif
mutex_unlock(&audit_filter_mutex);
if (!list_empty(&inotify_list))
audit_inotify_unregister(&inotify_list);
out:
if (watch)
audit_put_watch(watch); /* match initial get */
......@@ -1322,30 +1314,23 @@ static int update_lsm_rule(struct audit_krule *r)
{
struct audit_entry *entry = container_of(r, struct audit_entry, rule);
struct audit_entry *nentry;
struct audit_watch *watch;
struct audit_tree *tree;
int err = 0;
if (!security_audit_rule_known(r))
return 0;
watch = r->watch;
tree = r->tree;
nentry = audit_dupe_rule(r, watch);
nentry = audit_dupe_rule(r);
if (IS_ERR(nentry)) {
/* save the first error encountered for the
* return value */
err = PTR_ERR(nentry);
audit_panic("error updating LSM filters");
if (watch)
if (r->watch)
list_del(&r->rlist);
list_del_rcu(&entry->list);
list_del(&r->list);
} else {
if (watch) {
list_add(&nentry->rule.rlist, audit_watch_rules(watch));
list_del(&r->rlist);
} else if (tree)
if (r->watch || r->tree)
list_replace_init(&r->rlist, &nentry->rule.rlist);
list_replace_rcu(&entry->list, &nentry->list);
list_replace(&r->list, &nentry->rule.list);
......
......@@ -168,12 +168,12 @@ struct audit_context {
int in_syscall; /* 1 if task is in a syscall */
enum audit_state state, current_state;
unsigned int serial; /* serial number for record */
struct timespec ctime; /* time of syscall entry */
int major; /* syscall number */
struct timespec ctime; /* time of syscall entry */
unsigned long argv[4]; /* syscall arguments */
int return_valid; /* return code is valid */
long return_code;/* syscall return code */
u64 prio;
int return_valid; /* return code is valid */
int name_count;
struct audit_names names[AUDIT_NAMES];
char * filterkey; /* key for rule that triggered record */
......@@ -198,8 +198,8 @@ struct audit_context {
char target_comm[TASK_COMM_LEN];
struct audit_tree_refs *trees, *first_trees;
int tree_count;
struct list_head killed_trees;
int tree_count;
int type;
union {
......@@ -549,9 +549,8 @@ static int audit_filter_rules(struct task_struct *tsk,
}
break;
case AUDIT_WATCH:
if (name && audit_watch_inode(rule->watch) != (unsigned long)-1)
result = (name->dev == audit_watch_dev(rule->watch) &&
name->ino == audit_watch_inode(rule->watch));
if (name)
result = audit_watch_compare(rule->watch, name->ino, name->dev);
break;
case AUDIT_DIR:
if (ctx)
......@@ -1726,7 +1725,7 @@ static inline void handle_one(const struct inode *inode)
struct audit_tree_refs *p;
struct audit_chunk *chunk;
int count;
if (likely(list_empty(&inode->inotify_watches)))
if (likely(hlist_empty(&inode->i_fsnotify_mark_entries)))
return;
context = current->audit_context;
p = context->trees;
......@@ -1769,7 +1768,7 @@ retry:
seq = read_seqbegin(&rename_lock);
for(;;) {
struct inode *inode = d->d_inode;
if (inode && unlikely(!list_empty(&inode->inotify_watches))) {
if (inode && unlikely(!hlist_empty(&inode->i_fsnotify_mark_entries))) {
struct audit_chunk *chunk;
chunk = audit_tree_lookup(inode);
if (chunk) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment