vfs: spread struct mount - remaining argument of next_mnt()
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
parent
c63181e6b6
commit
909b0a88ef
1 changed files with 18 additions and 17 deletions
|
@ -631,12 +631,12 @@ static void commit_tree(struct mount *mnt)
|
||||||
touch_mnt_namespace(n);
|
touch_mnt_namespace(n);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct mount *next_mnt(struct mount *p, struct vfsmount *root)
|
static struct mount *next_mnt(struct mount *p, struct mount *root)
|
||||||
{
|
{
|
||||||
struct list_head *next = p->mnt_mounts.next;
|
struct list_head *next = p->mnt_mounts.next;
|
||||||
if (next == &p->mnt_mounts) {
|
if (next == &p->mnt_mounts) {
|
||||||
while (1) {
|
while (1) {
|
||||||
if (&p->mnt == root)
|
if (p == root)
|
||||||
return NULL;
|
return NULL;
|
||||||
next = p->mnt_child.next;
|
next = p->mnt_child.next;
|
||||||
if (next != &p->mnt_parent->mnt_mounts)
|
if (next != &p->mnt_parent->mnt_mounts)
|
||||||
|
@ -1145,16 +1145,17 @@ const struct seq_operations mountstats_op = {
|
||||||
* open files, pwds, chroots or sub mounts that are
|
* open files, pwds, chroots or sub mounts that are
|
||||||
* busy.
|
* busy.
|
||||||
*/
|
*/
|
||||||
int may_umount_tree(struct vfsmount *mnt)
|
int may_umount_tree(struct vfsmount *m)
|
||||||
{
|
{
|
||||||
|
struct mount *mnt = real_mount(m);
|
||||||
int actual_refs = 0;
|
int actual_refs = 0;
|
||||||
int minimum_refs = 0;
|
int minimum_refs = 0;
|
||||||
struct mount *p;
|
struct mount *p;
|
||||||
BUG_ON(!mnt);
|
BUG_ON(!m);
|
||||||
|
|
||||||
/* write lock needed for mnt_get_count */
|
/* write lock needed for mnt_get_count */
|
||||||
br_write_lock(vfsmount_lock);
|
br_write_lock(vfsmount_lock);
|
||||||
for (p = real_mount(mnt); p; p = next_mnt(p, mnt)) {
|
for (p = mnt; p; p = next_mnt(p, mnt)) {
|
||||||
actual_refs += mnt_get_count(p);
|
actual_refs += mnt_get_count(p);
|
||||||
minimum_refs += 2;
|
minimum_refs += 2;
|
||||||
}
|
}
|
||||||
|
@ -1228,7 +1229,7 @@ void umount_tree(struct mount *mnt, int propagate, struct list_head *kill)
|
||||||
LIST_HEAD(tmp_list);
|
LIST_HEAD(tmp_list);
|
||||||
struct mount *p;
|
struct mount *p;
|
||||||
|
|
||||||
for (p = mnt; p; p = next_mnt(p, &mnt->mnt))
|
for (p = mnt; p; p = next_mnt(p, mnt))
|
||||||
list_move(&p->mnt_hash, &tmp_list);
|
list_move(&p->mnt_hash, &tmp_list);
|
||||||
|
|
||||||
if (propagate)
|
if (propagate)
|
||||||
|
@ -1436,7 +1437,7 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
|
||||||
if (!is_subdir(r->mnt_mountpoint, dentry))
|
if (!is_subdir(r->mnt_mountpoint, dentry))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
for (s = r; s; s = next_mnt(s, &r->mnt)) {
|
for (s = r; s; s = next_mnt(s, r)) {
|
||||||
if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(s)) {
|
if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(s)) {
|
||||||
s = skip_mnt_tree(s);
|
s = skip_mnt_tree(s);
|
||||||
continue;
|
continue;
|
||||||
|
@ -1509,7 +1510,7 @@ static void cleanup_group_ids(struct mount *mnt, struct mount *end)
|
||||||
{
|
{
|
||||||
struct mount *p;
|
struct mount *p;
|
||||||
|
|
||||||
for (p = mnt; p != end; p = next_mnt(p, &mnt->mnt)) {
|
for (p = mnt; p != end; p = next_mnt(p, mnt)) {
|
||||||
if (p->mnt_group_id && !IS_MNT_SHARED(p))
|
if (p->mnt_group_id && !IS_MNT_SHARED(p))
|
||||||
mnt_release_group_id(p);
|
mnt_release_group_id(p);
|
||||||
}
|
}
|
||||||
|
@ -1519,7 +1520,7 @@ static int invent_group_ids(struct mount *mnt, bool recurse)
|
||||||
{
|
{
|
||||||
struct mount *p;
|
struct mount *p;
|
||||||
|
|
||||||
for (p = mnt; p; p = recurse ? next_mnt(p, &mnt->mnt) : NULL) {
|
for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
|
||||||
if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
|
if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
|
||||||
int err = mnt_alloc_group_id(p);
|
int err = mnt_alloc_group_id(p);
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -1616,7 +1617,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
|
||||||
br_write_lock(vfsmount_lock);
|
br_write_lock(vfsmount_lock);
|
||||||
|
|
||||||
if (IS_MNT_SHARED(dest_mnt)) {
|
if (IS_MNT_SHARED(dest_mnt)) {
|
||||||
for (p = source_mnt; p; p = next_mnt(p, &source_mnt->mnt))
|
for (p = source_mnt; p; p = next_mnt(p, source_mnt))
|
||||||
set_mnt_shared(p);
|
set_mnt_shared(p);
|
||||||
}
|
}
|
||||||
if (parent_path) {
|
if (parent_path) {
|
||||||
|
@ -1731,7 +1732,7 @@ static int do_change_type(struct path *path, int flag)
|
||||||
}
|
}
|
||||||
|
|
||||||
br_write_lock(vfsmount_lock);
|
br_write_lock(vfsmount_lock);
|
||||||
for (m = mnt; m; m = (recurse ? next_mnt(m, &mnt->mnt) : NULL))
|
for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
|
||||||
change_mnt_propagation(m, type);
|
change_mnt_propagation(m, type);
|
||||||
br_write_unlock(vfsmount_lock);
|
br_write_unlock(vfsmount_lock);
|
||||||
|
|
||||||
|
@ -1859,7 +1860,7 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
|
||||||
static inline int tree_contains_unbindable(struct mount *mnt)
|
static inline int tree_contains_unbindable(struct mount *mnt)
|
||||||
{
|
{
|
||||||
struct mount *p;
|
struct mount *p;
|
||||||
for (p = mnt; p; p = next_mnt(p, &mnt->mnt)) {
|
for (p = mnt; p; p = next_mnt(p, mnt)) {
|
||||||
if (IS_MNT_UNBINDABLE(p))
|
if (IS_MNT_UNBINDABLE(p))
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -2399,6 +2400,7 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
|
||||||
struct mnt_namespace *new_ns;
|
struct mnt_namespace *new_ns;
|
||||||
struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
|
struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
|
||||||
struct mount *p, *q;
|
struct mount *p, *q;
|
||||||
|
struct mount *old = real_mount(mnt_ns->root);
|
||||||
struct mount *new;
|
struct mount *new;
|
||||||
|
|
||||||
new_ns = alloc_mnt_ns();
|
new_ns = alloc_mnt_ns();
|
||||||
|
@ -2407,8 +2409,7 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
|
||||||
|
|
||||||
down_write(&namespace_sem);
|
down_write(&namespace_sem);
|
||||||
/* First pass: copy the tree topology */
|
/* First pass: copy the tree topology */
|
||||||
new = copy_tree(real_mount(mnt_ns->root), mnt_ns->root->mnt_root,
|
new = copy_tree(old, old->mnt.mnt_root, CL_COPY_ALL | CL_EXPIRE);
|
||||||
CL_COPY_ALL | CL_EXPIRE);
|
|
||||||
if (!new) {
|
if (!new) {
|
||||||
up_write(&namespace_sem);
|
up_write(&namespace_sem);
|
||||||
kfree(new_ns);
|
kfree(new_ns);
|
||||||
|
@ -2424,7 +2425,7 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
|
||||||
* as belonging to new namespace. We have already acquired a private
|
* as belonging to new namespace. We have already acquired a private
|
||||||
* fs_struct, so tsk->fs->lock is not needed.
|
* fs_struct, so tsk->fs->lock is not needed.
|
||||||
*/
|
*/
|
||||||
p = real_mount(mnt_ns->root);
|
p = old;
|
||||||
q = new;
|
q = new;
|
||||||
while (p) {
|
while (p) {
|
||||||
q->mnt_ns = new_ns;
|
q->mnt_ns = new_ns;
|
||||||
|
@ -2443,8 +2444,8 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
|
||||||
pwdmnt = &p->mnt;
|
pwdmnt = &p->mnt;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
p = next_mnt(p, mnt_ns->root);
|
p = next_mnt(p, old);
|
||||||
q = next_mnt(q, new_ns->root);
|
q = next_mnt(q, new);
|
||||||
}
|
}
|
||||||
up_write(&namespace_sem);
|
up_write(&namespace_sem);
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue