Merge branch 'stable-4.10' of git://git.infradead.org/users/pcmoore/audit
Pull audit fixes from Paul Moore: "Two small fixes relating to audit's use of fsnotify. The first patch plugs a leak and the second fixes some lock shenanigans. The patches are small and I banged on this for an afternoon with our testsuite and didn't see anything odd" * 'stable-4.10' of git://git.infradead.org/users/pcmoore/audit: audit: Fix sleep in atomic fsnotify: Remove fsnotify_duplicate_mark()
This commit is contained in:
commit
6989606a72
3 changed files with 14 additions and 18 deletions
|
@ -510,18 +510,6 @@ void fsnotify_detach_group_marks(struct fsnotify_group *group)
|
|||
}
|
||||
}
|
||||
|
||||
void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old)
|
||||
{
|
||||
assert_spin_locked(&old->lock);
|
||||
new->inode = old->inode;
|
||||
new->mnt = old->mnt;
|
||||
if (old->group)
|
||||
fsnotify_get_group(old->group);
|
||||
new->group = old->group;
|
||||
new->mask = old->mask;
|
||||
new->free_mark = old->free_mark;
|
||||
}
|
||||
|
||||
/*
|
||||
* Nothing fancy, just initialize lists and locks and counters.
|
||||
*/
|
||||
|
|
|
@ -323,8 +323,6 @@ extern void fsnotify_init_mark(struct fsnotify_mark *mark, void (*free_mark)(str
|
|||
extern struct fsnotify_mark *fsnotify_find_inode_mark(struct fsnotify_group *group, struct inode *inode);
|
||||
/* find (and take a reference) to a mark associated with group and vfsmount */
|
||||
extern struct fsnotify_mark *fsnotify_find_vfsmount_mark(struct fsnotify_group *group, struct vfsmount *mnt);
|
||||
/* copy the values from old into new */
|
||||
extern void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old);
|
||||
/* set the ignored_mask of a mark */
|
||||
extern void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mask);
|
||||
/* set the mask of a mark (might pin the object into memory */
|
||||
|
|
|
@ -231,9 +231,11 @@ static void untag_chunk(struct node *p)
|
|||
if (size)
|
||||
new = alloc_chunk(size);
|
||||
|
||||
mutex_lock(&entry->group->mark_mutex);
|
||||
spin_lock(&entry->lock);
|
||||
if (chunk->dead || !entry->inode) {
|
||||
spin_unlock(&entry->lock);
|
||||
mutex_unlock(&entry->group->mark_mutex);
|
||||
if (new)
|
||||
free_chunk(new);
|
||||
goto out;
|
||||
|
@ -251,6 +253,7 @@ static void untag_chunk(struct node *p)
|
|||
list_del_rcu(&chunk->hash);
|
||||
spin_unlock(&hash_lock);
|
||||
spin_unlock(&entry->lock);
|
||||
mutex_unlock(&entry->group->mark_mutex);
|
||||
fsnotify_destroy_mark(entry, audit_tree_group);
|
||||
goto out;
|
||||
}
|
||||
|
@ -258,8 +261,8 @@ static void untag_chunk(struct node *p)
|
|||
if (!new)
|
||||
goto Fallback;
|
||||
|
||||
fsnotify_duplicate_mark(&new->mark, entry);
|
||||
if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.inode, NULL, 1)) {
|
||||
if (fsnotify_add_mark_locked(&new->mark, entry->group, entry->inode,
|
||||
NULL, 1)) {
|
||||
fsnotify_put_mark(&new->mark);
|
||||
goto Fallback;
|
||||
}
|
||||
|
@ -293,6 +296,7 @@ static void untag_chunk(struct node *p)
|
|||
owner->root = new;
|
||||
spin_unlock(&hash_lock);
|
||||
spin_unlock(&entry->lock);
|
||||
mutex_unlock(&entry->group->mark_mutex);
|
||||
fsnotify_destroy_mark(entry, audit_tree_group);
|
||||
fsnotify_put_mark(&new->mark); /* drop initial reference */
|
||||
goto out;
|
||||
|
@ -309,6 +313,7 @@ static void untag_chunk(struct node *p)
|
|||
put_tree(owner);
|
||||
spin_unlock(&hash_lock);
|
||||
spin_unlock(&entry->lock);
|
||||
mutex_unlock(&entry->group->mark_mutex);
|
||||
out:
|
||||
fsnotify_put_mark(entry);
|
||||
spin_lock(&hash_lock);
|
||||
|
@ -386,18 +391,21 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
|
|||
|
||||
chunk_entry = &chunk->mark;
|
||||
|
||||
mutex_lock(&old_entry->group->mark_mutex);
|
||||
spin_lock(&old_entry->lock);
|
||||
if (!old_entry->inode) {
|
||||
/* old_entry is being shot, lets just lie */
|
||||
spin_unlock(&old_entry->lock);
|
||||
mutex_unlock(&old_entry->group->mark_mutex);
|
||||
fsnotify_put_mark(old_entry);
|
||||
free_chunk(chunk);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
fsnotify_duplicate_mark(chunk_entry, old_entry);
|
||||
if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->inode, NULL, 1)) {
|
||||
if (fsnotify_add_mark_locked(chunk_entry, old_entry->group,
|
||||
old_entry->inode, NULL, 1)) {
|
||||
spin_unlock(&old_entry->lock);
|
||||
mutex_unlock(&old_entry->group->mark_mutex);
|
||||
fsnotify_put_mark(chunk_entry);
|
||||
fsnotify_put_mark(old_entry);
|
||||
return -ENOSPC;
|
||||
|
@ -413,6 +421,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
|
|||
chunk->dead = 1;
|
||||
spin_unlock(&chunk_entry->lock);
|
||||
spin_unlock(&old_entry->lock);
|
||||
mutex_unlock(&old_entry->group->mark_mutex);
|
||||
|
||||
fsnotify_destroy_mark(chunk_entry, audit_tree_group);
|
||||
|
||||
|
@ -445,6 +454,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
|
|||
spin_unlock(&hash_lock);
|
||||
spin_unlock(&chunk_entry->lock);
|
||||
spin_unlock(&old_entry->lock);
|
||||
mutex_unlock(&old_entry->group->mark_mutex);
|
||||
fsnotify_destroy_mark(old_entry, audit_tree_group);
|
||||
fsnotify_put_mark(chunk_entry); /* drop initial reference */
|
||||
fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
|
||||
|
|
Loading…
Reference in a new issue