1 /*
2  *  linux/fs/pnode.c
3  *
4  * (C) Copyright IBM Corporation 2005.
5  *	Released under GPL v2.
6  *	Author : Ram Pai (linuxram@us.ibm.com)
7  *
8  */
9 #include <linux/mnt_namespace.h>
10 #include <linux/mount.h>
11 #include <linux/fs.h>
12 #include <linux/nsproxy.h>
13 #include "internal.h"
14 #include "pnode.h"
15 
16 /* return the next shared peer mount of @p */
next_peer(struct mount * p)17 static inline struct mount *next_peer(struct mount *p)
18 {
19 	return list_entry(p->mnt_share.next, struct mount, mnt_share);
20 }
21 
first_slave(struct mount * p)22 static inline struct mount *first_slave(struct mount *p)
23 {
24 	return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave);
25 }
26 
last_slave(struct mount * p)27 static inline struct mount *last_slave(struct mount *p)
28 {
29 	return list_entry(p->mnt_slave_list.prev, struct mount, mnt_slave);
30 }
31 
next_slave(struct mount * p)32 static inline struct mount *next_slave(struct mount *p)
33 {
34 	return list_entry(p->mnt_slave.next, struct mount, mnt_slave);
35 }
36 
get_peer_under_root(struct mount * mnt,struct mnt_namespace * ns,const struct path * root)37 static struct mount *get_peer_under_root(struct mount *mnt,
38 					 struct mnt_namespace *ns,
39 					 const struct path *root)
40 {
41 	struct mount *m = mnt;
42 
43 	do {
44 		/* Check the namespace first for optimization */
45 		if (m->mnt_ns == ns && is_path_reachable(m, m->mnt.mnt_root, root))
46 			return m;
47 
48 		m = next_peer(m);
49 	} while (m != mnt);
50 
51 	return NULL;
52 }
53 
54 /*
55  * Get ID of closest dominating peer group having a representative
56  * under the given root.
57  *
58  * Caller must hold namespace_sem
59  */
get_dominating_id(struct mount * mnt,const struct path * root)60 int get_dominating_id(struct mount *mnt, const struct path *root)
61 {
62 	struct mount *m;
63 
64 	for (m = mnt->mnt_master; m != NULL; m = m->mnt_master) {
65 		struct mount *d = get_peer_under_root(m, mnt->mnt_ns, root);
66 		if (d)
67 			return d->mnt_group_id;
68 	}
69 
70 	return 0;
71 }
72 
do_make_slave(struct mount * mnt)73 static int do_make_slave(struct mount *mnt)
74 {
75 	struct mount *master, *slave_mnt;
76 
77 	if (list_empty(&mnt->mnt_share)) {
78 		if (IS_MNT_SHARED(mnt)) {
79 			mnt_release_group_id(mnt);
80 			CLEAR_MNT_SHARED(mnt);
81 		}
82 		master = mnt->mnt_master;
83 		if (!master) {
84 			struct list_head *p = &mnt->mnt_slave_list;
85 			while (!list_empty(p)) {
86 				slave_mnt = list_first_entry(p,
87 						struct mount, mnt_slave);
88 				list_del_init(&slave_mnt->mnt_slave);
89 				slave_mnt->mnt_master = NULL;
90 			}
91 			return 0;
92 		}
93 	} else {
94 		struct mount *m;
95 		/*
96 		 * slave 'mnt' to a peer mount that has the
97 		 * same root dentry. If none is available then
98 		 * slave it to anything that is available.
99 		 */
100 		for (m = master = next_peer(mnt); m != mnt; m = next_peer(m)) {
101 			if (m->mnt.mnt_root == mnt->mnt.mnt_root) {
102 				master = m;
103 				break;
104 			}
105 		}
106 		list_del_init(&mnt->mnt_share);
107 		mnt->mnt_group_id = 0;
108 		CLEAR_MNT_SHARED(mnt);
109 	}
110 	list_for_each_entry(slave_mnt, &mnt->mnt_slave_list, mnt_slave)
111 		slave_mnt->mnt_master = master;
112 	list_move(&mnt->mnt_slave, &master->mnt_slave_list);
113 	list_splice(&mnt->mnt_slave_list, master->mnt_slave_list.prev);
114 	INIT_LIST_HEAD(&mnt->mnt_slave_list);
115 	mnt->mnt_master = master;
116 	return 0;
117 }
118 
119 /*
120  * vfsmount lock must be held for write
121  */
change_mnt_propagation(struct mount * mnt,int type)122 void change_mnt_propagation(struct mount *mnt, int type)
123 {
124 	if (type == MS_SHARED) {
125 		set_mnt_shared(mnt);
126 		return;
127 	}
128 	do_make_slave(mnt);
129 	if (type != MS_SLAVE) {
130 		list_del_init(&mnt->mnt_slave);
131 		mnt->mnt_master = NULL;
132 		if (type == MS_UNBINDABLE)
133 			mnt->mnt.mnt_flags |= MNT_UNBINDABLE;
134 		else
135 			mnt->mnt.mnt_flags &= ~MNT_UNBINDABLE;
136 	}
137 }
138 
139 /*
140  * get the next mount in the propagation tree.
141  * @m: the mount seen last
142  * @origin: the original mount from where the tree walk initiated
143  *
144  * Note that peer groups form contiguous segments of slave lists.
145  * We rely on that in get_source() to be able to find out if
146  * vfsmount found while iterating with propagation_next() is
147  * a peer of one we'd found earlier.
148  */
propagation_next(struct mount * m,struct mount * origin)149 static struct mount *propagation_next(struct mount *m,
150 					 struct mount *origin)
151 {
152 	/* are there any slaves of this mount? */
153 	if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
154 		return first_slave(m);
155 
156 	while (1) {
157 		struct mount *master = m->mnt_master;
158 
159 		if (master == origin->mnt_master) {
160 			struct mount *next = next_peer(m);
161 			return (next == origin) ? NULL : next;
162 		} else if (m->mnt_slave.next != &master->mnt_slave_list)
163 			return next_slave(m);
164 
165 		/* back at master */
166 		m = master;
167 	}
168 }
169 
skip_propagation_subtree(struct mount * m,struct mount * origin)170 static struct mount *skip_propagation_subtree(struct mount *m,
171 						struct mount *origin)
172 {
173 	/*
174 	 * Advance m such that propagation_next will not return
175 	 * the slaves of m.
176 	 */
177 	if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
178 		m = last_slave(m);
179 
180 	return m;
181 }
182 
next_group(struct mount * m,struct mount * origin)183 static struct mount *next_group(struct mount *m, struct mount *origin)
184 {
185 	while (1) {
186 		while (1) {
187 			struct mount *next;
188 			if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
189 				return first_slave(m);
190 			next = next_peer(m);
191 			if (m->mnt_group_id == origin->mnt_group_id) {
192 				if (next == origin)
193 					return NULL;
194 			} else if (m->mnt_slave.next != &next->mnt_slave)
195 				break;
196 			m = next;
197 		}
198 		/* m is the last peer */
199 		while (1) {
200 			struct mount *master = m->mnt_master;
201 			if (m->mnt_slave.next != &master->mnt_slave_list)
202 				return next_slave(m);
203 			m = next_peer(master);
204 			if (master->mnt_group_id == origin->mnt_group_id)
205 				break;
206 			if (master->mnt_slave.next == &m->mnt_slave)
207 				break;
208 			m = master;
209 		}
210 		if (m == origin)
211 			return NULL;
212 	}
213 }
214 
215 /* all accesses are serialized by namespace_sem */
216 static struct user_namespace *user_ns;
217 static struct mount *last_dest, *first_source, *last_source, *dest_master;
218 static struct mountpoint *mp;
219 static struct hlist_head *list;
220 
peers(struct mount * m1,struct mount * m2)221 static inline bool peers(struct mount *m1, struct mount *m2)
222 {
223 	return m1->mnt_group_id == m2->mnt_group_id && m1->mnt_group_id;
224 }
225 
propagate_one(struct mount * m)226 static int propagate_one(struct mount *m)
227 {
228 	struct mount *child;
229 	int type;
230 	/* skip ones added by this propagate_mnt() */
231 	if (IS_MNT_NEW(m))
232 		return 0;
233 	/* skip if mountpoint isn't covered by it */
234 	if (!is_subdir(mp->m_dentry, m->mnt.mnt_root))
235 		return 0;
236 	if (peers(m, last_dest)) {
237 		type = CL_MAKE_SHARED;
238 	} else {
239 		struct mount *n, *p;
240 		bool done;
241 		for (n = m; ; n = p) {
242 			p = n->mnt_master;
243 			if (p == dest_master || IS_MNT_MARKED(p))
244 				break;
245 		}
246 		do {
247 			struct mount *parent = last_source->mnt_parent;
248 			if (peers(last_source, first_source))
249 				break;
250 			done = parent->mnt_master == p;
251 			if (done && peers(n, parent))
252 				break;
253 			last_source = last_source->mnt_master;
254 		} while (!done);
255 
256 		type = CL_SLAVE;
257 		/* beginning of peer group among the slaves? */
258 		if (IS_MNT_SHARED(m))
259 			type |= CL_MAKE_SHARED;
260 	}
261 
262 	/* Notice when we are propagating across user namespaces */
263 	if (m->mnt_ns->user_ns != user_ns)
264 		type |= CL_UNPRIVILEGED;
265 	child = copy_tree(last_source, last_source->mnt.mnt_root, type);
266 	if (IS_ERR(child))
267 		return PTR_ERR(child);
268 	child->mnt.mnt_flags &= ~MNT_LOCKED;
269 	read_seqlock_excl(&mount_lock);
270 	mnt_set_mountpoint(m, mp, child);
271 	if (m->mnt_master != dest_master)
272 		SET_MNT_MARK(m->mnt_master);
273 	read_sequnlock_excl(&mount_lock);
274 	last_dest = m;
275 	last_source = child;
276 	hlist_add_head(&child->mnt_hash, list);
277 	return count_mounts(m->mnt_ns, child);
278 }
279 
280 /*
281  * mount 'source_mnt' under the destination 'dest_mnt' at
282  * dentry 'dest_dentry'. And propagate that mount to
283  * all the peer and slave mounts of 'dest_mnt'.
284  * Link all the new mounts into a propagation tree headed at
285  * source_mnt. Also link all the new mounts using ->mnt_list
286  * headed at source_mnt's ->mnt_list
287  *
288  * @dest_mnt: destination mount.
289  * @dest_dentry: destination dentry.
290  * @source_mnt: source mount.
291  * @tree_list : list of heads of trees to be attached.
292  */
propagate_mnt(struct mount * dest_mnt,struct mountpoint * dest_mp,struct mount * source_mnt,struct hlist_head * tree_list)293 int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
294 		    struct mount *source_mnt, struct hlist_head *tree_list)
295 {
296 	struct mount *m, *n;
297 	int ret = 0;
298 
299 	/*
300 	 * we don't want to bother passing tons of arguments to
301 	 * propagate_one(); everything is serialized by namespace_sem,
302 	 * so globals will do just fine.
303 	 */
304 	user_ns = current->nsproxy->mnt_ns->user_ns;
305 	last_dest = dest_mnt;
306 	first_source = source_mnt;
307 	last_source = source_mnt;
308 	mp = dest_mp;
309 	list = tree_list;
310 	dest_master = dest_mnt->mnt_master;
311 
312 	/* all peers of dest_mnt, except dest_mnt itself */
313 	for (n = next_peer(dest_mnt); n != dest_mnt; n = next_peer(n)) {
314 		ret = propagate_one(n);
315 		if (ret)
316 			goto out;
317 	}
318 
319 	/* all slave groups */
320 	for (m = next_group(dest_mnt, dest_mnt); m;
321 			m = next_group(m, dest_mnt)) {
322 		/* everything in that slave group */
323 		n = m;
324 		do {
325 			ret = propagate_one(n);
326 			if (ret)
327 				goto out;
328 			n = next_peer(n);
329 		} while (n != m);
330 	}
331 out:
332 	read_seqlock_excl(&mount_lock);
333 	hlist_for_each_entry(n, tree_list, mnt_hash) {
334 		m = n->mnt_parent;
335 		if (m->mnt_master != dest_mnt->mnt_master)
336 			CLEAR_MNT_MARK(m->mnt_master);
337 	}
338 	read_sequnlock_excl(&mount_lock);
339 	return ret;
340 }
341 
find_topper(struct mount * mnt)342 static struct mount *find_topper(struct mount *mnt)
343 {
344 	/* If there is exactly one mount covering mnt completely return it. */
345 	struct mount *child;
346 
347 	if (!list_is_singular(&mnt->mnt_mounts))
348 		return NULL;
349 
350 	child = list_first_entry(&mnt->mnt_mounts, struct mount, mnt_child);
351 	if (child->mnt_mountpoint != mnt->mnt.mnt_root)
352 		return NULL;
353 
354 	return child;
355 }
356 
357 /*
358  * return true if the refcount is greater than count
359  */
do_refcount_check(struct mount * mnt,int count)360 static inline int do_refcount_check(struct mount *mnt, int count)
361 {
362 	return mnt_get_count(mnt) > count;
363 }
364 
365 /*
366  * check if the mount 'mnt' can be unmounted successfully.
367  * @mnt: the mount to be checked for unmount
368  * NOTE: unmounting 'mnt' would naturally propagate to all
369  * other mounts its parent propagates to.
370  * Check if any of these mounts that **do not have submounts**
371  * have more references than 'refcnt'. If so return busy.
372  *
373  * vfsmount lock must be held for write
374  */
propagate_mount_busy(struct mount * mnt,int refcnt)375 int propagate_mount_busy(struct mount *mnt, int refcnt)
376 {
377 	struct mount *m, *child, *topper;
378 	struct mount *parent = mnt->mnt_parent;
379 
380 	if (mnt == parent)
381 		return do_refcount_check(mnt, refcnt);
382 
383 	/*
384 	 * quickly check if the current mount can be unmounted.
385 	 * If not, we don't have to go checking for all other
386 	 * mounts
387 	 */
388 	if (!list_empty(&mnt->mnt_mounts) || do_refcount_check(mnt, refcnt))
389 		return 1;
390 
391 	for (m = propagation_next(parent, parent); m;
392 	     		m = propagation_next(m, parent)) {
393 		int count = 1;
394 		child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint);
395 		if (!child)
396 			continue;
397 
398 		/* Is there exactly one mount on the child that covers
399 		 * it completely whose reference should be ignored?
400 		 */
401 		topper = find_topper(child);
402 		if (topper)
403 			count += 1;
404 		else if (!list_empty(&child->mnt_mounts))
405 			continue;
406 
407 		if (do_refcount_check(child, count))
408 			return 1;
409 	}
410 	return 0;
411 }
412 
413 /*
414  * Clear MNT_LOCKED when it can be shown to be safe.
415  *
416  * mount_lock lock must be held for write
417  */
propagate_mount_unlock(struct mount * mnt)418 void propagate_mount_unlock(struct mount *mnt)
419 {
420 	struct mount *parent = mnt->mnt_parent;
421 	struct mount *m, *child;
422 
423 	BUG_ON(parent == mnt);
424 
425 	for (m = propagation_next(parent, parent); m;
426 			m = propagation_next(m, parent)) {
427 		child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint);
428 		if (child)
429 			child->mnt.mnt_flags &= ~MNT_LOCKED;
430 	}
431 }
432 
umount_one(struct mount * mnt,struct list_head * to_umount)433 static void umount_one(struct mount *mnt, struct list_head *to_umount)
434 {
435 	CLEAR_MNT_MARK(mnt);
436 	mnt->mnt.mnt_flags |= MNT_UMOUNT;
437 	list_del_init(&mnt->mnt_child);
438 	list_del_init(&mnt->mnt_umounting);
439 	list_move_tail(&mnt->mnt_list, to_umount);
440 }
441 
442 /*
443  * NOTE: unmounting 'mnt' naturally propagates to all other mounts its
444  * parent propagates to.
445  */
__propagate_umount(struct mount * mnt,struct list_head * to_umount,struct list_head * to_restore)446 static bool __propagate_umount(struct mount *mnt,
447 			       struct list_head *to_umount,
448 			       struct list_head *to_restore)
449 {
450 	bool progress = false;
451 	struct mount *child;
452 
453 	/*
454 	 * The state of the parent won't change if this mount is
455 	 * already unmounted or marked as without children.
456 	 */
457 	if (mnt->mnt.mnt_flags & (MNT_UMOUNT | MNT_MARKED))
458 		goto out;
459 
460 	/* Verify topper is the only grandchild that has not been
461 	 * speculatively unmounted.
462 	 */
463 	list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
464 		if (child->mnt_mountpoint == mnt->mnt.mnt_root)
465 			continue;
466 		if (!list_empty(&child->mnt_umounting) && IS_MNT_MARKED(child))
467 			continue;
468 		/* Found a mounted child */
469 		goto children;
470 	}
471 
472 	/* Mark mounts that can be unmounted if not locked */
473 	SET_MNT_MARK(mnt);
474 	progress = true;
475 
476 	/* If a mount is without children and not locked umount it. */
477 	if (!IS_MNT_LOCKED(mnt)) {
478 		umount_one(mnt, to_umount);
479 	} else {
480 children:
481 		list_move_tail(&mnt->mnt_umounting, to_restore);
482 	}
483 out:
484 	return progress;
485 }
486 
umount_list(struct list_head * to_umount,struct list_head * to_restore)487 static void umount_list(struct list_head *to_umount,
488 			struct list_head *to_restore)
489 {
490 	struct mount *mnt, *child, *tmp;
491 	list_for_each_entry(mnt, to_umount, mnt_list) {
492 		list_for_each_entry_safe(child, tmp, &mnt->mnt_mounts, mnt_child) {
493 			/* topper? */
494 			if (child->mnt_mountpoint == mnt->mnt.mnt_root)
495 				list_move_tail(&child->mnt_umounting, to_restore);
496 			else
497 				umount_one(child, to_umount);
498 		}
499 	}
500 }
501 
restore_mounts(struct list_head * to_restore)502 static void restore_mounts(struct list_head *to_restore)
503 {
504 	/* Restore mounts to a clean working state */
505 	while (!list_empty(to_restore)) {
506 		struct mount *mnt, *parent;
507 		struct mountpoint *mp;
508 
509 		mnt = list_first_entry(to_restore, struct mount, mnt_umounting);
510 		CLEAR_MNT_MARK(mnt);
511 		list_del_init(&mnt->mnt_umounting);
512 
513 		/* Should this mount be reparented? */
514 		mp = mnt->mnt_mp;
515 		parent = mnt->mnt_parent;
516 		while (parent->mnt.mnt_flags & MNT_UMOUNT) {
517 			mp = parent->mnt_mp;
518 			parent = parent->mnt_parent;
519 		}
520 		if (parent != mnt->mnt_parent)
521 			mnt_change_mountpoint(parent, mp, mnt);
522 	}
523 }
524 
cleanup_umount_visitations(struct list_head * visited)525 static void cleanup_umount_visitations(struct list_head *visited)
526 {
527 	while (!list_empty(visited)) {
528 		struct mount *mnt =
529 			list_first_entry(visited, struct mount, mnt_umounting);
530 		list_del_init(&mnt->mnt_umounting);
531 	}
532 }
533 
534 /*
535  * collect all mounts that receive propagation from the mount in @list,
536  * and return these additional mounts in the same list.
537  * @list: the list of mounts to be unmounted.
538  *
539  * vfsmount lock must be held for write
540  */
propagate_umount(struct list_head * list)541 int propagate_umount(struct list_head *list)
542 {
543 	struct mount *mnt;
544 	LIST_HEAD(to_restore);
545 	LIST_HEAD(to_umount);
546 	LIST_HEAD(visited);
547 
548 	/* Find candidates for unmounting */
549 	list_for_each_entry_reverse(mnt, list, mnt_list) {
550 		struct mount *parent = mnt->mnt_parent;
551 		struct mount *m;
552 
553 		/*
554 		 * If this mount has already been visited it is known that it's
555 		 * entire peer group and all of their slaves in the propagation
556 		 * tree for the mountpoint has already been visited and there is
557 		 * no need to visit them again.
558 		 */
559 		if (!list_empty(&mnt->mnt_umounting))
560 			continue;
561 
562 		list_add_tail(&mnt->mnt_umounting, &visited);
563 		for (m = propagation_next(parent, parent); m;
564 		     m = propagation_next(m, parent)) {
565 			struct mount *child = __lookup_mnt(&m->mnt,
566 							   mnt->mnt_mountpoint);
567 			if (!child)
568 				continue;
569 
570 			if (!list_empty(&child->mnt_umounting)) {
571 				/*
572 				 * If the child has already been visited it is
573 				 * know that it's entire peer group and all of
574 				 * their slaves in the propgation tree for the
575 				 * mountpoint has already been visited and there
576 				 * is no need to visit this subtree again.
577 				 */
578 				m = skip_propagation_subtree(m, parent);
579 				continue;
580 			} else if (child->mnt.mnt_flags & MNT_UMOUNT) {
581 				/*
582 				 * We have come accross an partially unmounted
583 				 * mount in list that has not been visited yet.
584 				 * Remember it has been visited and continue
585 				 * about our merry way.
586 				 */
587 				list_add_tail(&child->mnt_umounting, &visited);
588 				continue;
589 			}
590 
591 			/* Check the child and parents while progress is made */
592 			while (__propagate_umount(child,
593 						  &to_umount, &to_restore)) {
594 				/* Is the parent a umount candidate? */
595 				child = child->mnt_parent;
596 				if (list_empty(&child->mnt_umounting))
597 					break;
598 			}
599 		}
600 	}
601 
602 	umount_list(&to_umount, &to_restore);
603 	restore_mounts(&to_restore);
604 	cleanup_umount_visitations(&visited);
605 	list_splice_tail(&to_umount, list);
606 
607 	return 0;
608 }
609