don't bother with deferred freeing of fdtables
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / pnode.c
1 /*
2 * linux/fs/pnode.c
3 *
4 * (C) Copyright IBM Corporation 2005.
5 * Released under GPL v2.
6 * Author : Ram Pai (linuxram@us.ibm.com)
7 *
8 */
9 #include <linux/mnt_namespace.h>
10 #include <linux/mount.h>
11 #include <linux/fs.h>
12 #include "internal.h"
13 #include "pnode.h"
14
15 /* return the next shared peer mount of @p */
16 static inline struct mount *next_peer(struct mount *p)
17 {
18 return list_entry(p->mnt_share.next, struct mount, mnt_share);
19 }
20
21 static inline struct mount *first_slave(struct mount *p)
22 {
23 return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave);
24 }
25
26 static inline struct mount *next_slave(struct mount *p)
27 {
28 return list_entry(p->mnt_slave.next, struct mount, mnt_slave);
29 }
30
31 static struct mount *get_peer_under_root(struct mount *mnt,
32 struct mnt_namespace *ns,
33 const struct path *root)
34 {
35 struct mount *m = mnt;
36
37 do {
38 /* Check the namespace first for optimization */
39 if (m->mnt_ns == ns && is_path_reachable(m, m->mnt.mnt_root, root))
40 return m;
41
42 m = next_peer(m);
43 } while (m != mnt);
44
45 return NULL;
46 }
47
48 /*
49 * Get ID of closest dominating peer group having a representative
50 * under the given root.
51 *
52 * Caller must hold namespace_sem
53 */
54 int get_dominating_id(struct mount *mnt, const struct path *root)
55 {
56 struct mount *m;
57
58 for (m = mnt->mnt_master; m != NULL; m = m->mnt_master) {
59 struct mount *d = get_peer_under_root(m, mnt->mnt_ns, root);
60 if (d)
61 return d->mnt_group_id;
62 }
63
64 return 0;
65 }
66
67 static int do_make_slave(struct mount *mnt)
68 {
69 struct mount *peer_mnt = mnt, *master = mnt->mnt_master;
70 struct mount *slave_mnt;
71
72 /*
73 * slave 'mnt' to a peer mount that has the
74 * same root dentry. If none is available then
75 * slave it to anything that is available.
76 */
77 while ((peer_mnt = next_peer(peer_mnt)) != mnt &&
78 peer_mnt->mnt.mnt_root != mnt->mnt.mnt_root) ;
79
80 if (peer_mnt == mnt) {
81 peer_mnt = next_peer(mnt);
82 if (peer_mnt == mnt)
83 peer_mnt = NULL;
84 }
85 if (IS_MNT_SHARED(mnt) && list_empty(&mnt->mnt_share))
86 mnt_release_group_id(mnt);
87
88 list_del_init(&mnt->mnt_share);
89 mnt->mnt_group_id = 0;
90
91 if (peer_mnt)
92 master = peer_mnt;
93
94 if (master) {
95 list_for_each_entry(slave_mnt, &mnt->mnt_slave_list, mnt_slave)
96 slave_mnt->mnt_master = master;
97 list_move(&mnt->mnt_slave, &master->mnt_slave_list);
98 list_splice(&mnt->mnt_slave_list, master->mnt_slave_list.prev);
99 INIT_LIST_HEAD(&mnt->mnt_slave_list);
100 } else {
101 struct list_head *p = &mnt->mnt_slave_list;
102 while (!list_empty(p)) {
103 slave_mnt = list_first_entry(p,
104 struct mount, mnt_slave);
105 list_del_init(&slave_mnt->mnt_slave);
106 slave_mnt->mnt_master = NULL;
107 }
108 }
109 mnt->mnt_master = master;
110 CLEAR_MNT_SHARED(mnt);
111 return 0;
112 }
113
114 /*
115 * vfsmount lock must be held for write
116 */
117 void change_mnt_propagation(struct mount *mnt, int type)
118 {
119 if (type == MS_SHARED) {
120 set_mnt_shared(mnt);
121 return;
122 }
123 do_make_slave(mnt);
124 if (type != MS_SLAVE) {
125 list_del_init(&mnt->mnt_slave);
126 mnt->mnt_master = NULL;
127 if (type == MS_UNBINDABLE)
128 mnt->mnt.mnt_flags |= MNT_UNBINDABLE;
129 else
130 mnt->mnt.mnt_flags &= ~MNT_UNBINDABLE;
131 }
132 }
133
134 /*
135 * get the next mount in the propagation tree.
136 * @m: the mount seen last
137 * @origin: the original mount from where the tree walk initiated
138 *
139 * Note that peer groups form contiguous segments of slave lists.
140 * We rely on that in get_source() to be able to find out if
141 * vfsmount found while iterating with propagation_next() is
142 * a peer of one we'd found earlier.
143 */
144 static struct mount *propagation_next(struct mount *m,
145 struct mount *origin)
146 {
147 /* are there any slaves of this mount? */
148 if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
149 return first_slave(m);
150
151 while (1) {
152 struct mount *master = m->mnt_master;
153
154 if (master == origin->mnt_master) {
155 struct mount *next = next_peer(m);
156 return (next == origin) ? NULL : next;
157 } else if (m->mnt_slave.next != &master->mnt_slave_list)
158 return next_slave(m);
159
160 /* back at master */
161 m = master;
162 }
163 }
164
165 /*
166 * return the source mount to be used for cloning
167 *
168 * @dest the current destination mount
169 * @last_dest the last seen destination mount
170 * @last_src the last seen source mount
171 * @type return CL_SLAVE if the new mount has to be
172 * cloned as a slave.
173 */
174 static struct mount *get_source(struct mount *dest,
175 struct mount *last_dest,
176 struct mount *last_src,
177 int *type)
178 {
179 struct mount *p_last_src = NULL;
180 struct mount *p_last_dest = NULL;
181
182 while (last_dest != dest->mnt_master) {
183 p_last_dest = last_dest;
184 p_last_src = last_src;
185 last_dest = last_dest->mnt_master;
186 last_src = last_src->mnt_master;
187 }
188
189 if (p_last_dest) {
190 do {
191 p_last_dest = next_peer(p_last_dest);
192 } while (IS_MNT_NEW(p_last_dest));
193 /* is that a peer of the earlier? */
194 if (dest == p_last_dest) {
195 *type = CL_MAKE_SHARED;
196 return p_last_src;
197 }
198 }
199 /* slave of the earlier, then */
200 *type = CL_SLAVE;
201 /* beginning of peer group among the slaves? */
202 if (IS_MNT_SHARED(dest))
203 *type |= CL_MAKE_SHARED;
204 return last_src;
205 }
206
207 /*
208 * mount 'source_mnt' under the destination 'dest_mnt' at
209 * dentry 'dest_dentry'. And propagate that mount to
210 * all the peer and slave mounts of 'dest_mnt'.
211 * Link all the new mounts into a propagation tree headed at
212 * source_mnt. Also link all the new mounts using ->mnt_list
213 * headed at source_mnt's ->mnt_list
214 *
215 * @dest_mnt: destination mount.
216 * @dest_dentry: destination dentry.
217 * @source_mnt: source mount.
218 * @tree_list : list of heads of trees to be attached.
219 */
220 int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
221 struct mount *source_mnt, struct list_head *tree_list)
222 {
223 struct mount *m, *child;
224 int ret = 0;
225 struct mount *prev_dest_mnt = dest_mnt;
226 struct mount *prev_src_mnt = source_mnt;
227 LIST_HEAD(tmp_list);
228
229 for (m = propagation_next(dest_mnt, dest_mnt); m;
230 m = propagation_next(m, dest_mnt)) {
231 int type;
232 struct mount *source;
233
234 if (IS_MNT_NEW(m))
235 continue;
236
237 source = get_source(m, prev_dest_mnt, prev_src_mnt, &type);
238
239 child = copy_tree(source, source->mnt.mnt_root, type);
240 if (IS_ERR(child)) {
241 ret = PTR_ERR(child);
242 list_splice(tree_list, tmp_list.prev);
243 goto out;
244 }
245
246 if (is_subdir(dest_mp->m_dentry, m->mnt.mnt_root)) {
247 mnt_set_mountpoint(m, dest_mp, child);
248 list_add_tail(&child->mnt_hash, tree_list);
249 } else {
250 /*
251 * This can happen if the parent mount was bind mounted
252 * on some subdirectory of a shared/slave mount.
253 */
254 list_add_tail(&child->mnt_hash, &tmp_list);
255 }
256 prev_dest_mnt = m;
257 prev_src_mnt = child;
258 }
259 out:
260 br_write_lock(&vfsmount_lock);
261 while (!list_empty(&tmp_list)) {
262 child = list_first_entry(&tmp_list, struct mount, mnt_hash);
263 umount_tree(child, 0);
264 }
265 br_write_unlock(&vfsmount_lock);
266 return ret;
267 }
268
269 /*
270 * return true if the refcount is greater than count
271 */
272 static inline int do_refcount_check(struct mount *mnt, int count)
273 {
274 int mycount = mnt_get_count(mnt) - mnt->mnt_ghosts;
275 return (mycount > count);
276 }
277
278 /*
279 * check if the mount 'mnt' can be unmounted successfully.
280 * @mnt: the mount to be checked for unmount
281 * NOTE: unmounting 'mnt' would naturally propagate to all
282 * other mounts its parent propagates to.
283 * Check if any of these mounts that **do not have submounts**
284 * have more references than 'refcnt'. If so return busy.
285 *
286 * vfsmount lock must be held for write
287 */
288 int propagate_mount_busy(struct mount *mnt, int refcnt)
289 {
290 struct mount *m, *child;
291 struct mount *parent = mnt->mnt_parent;
292 int ret = 0;
293
294 if (mnt == parent)
295 return do_refcount_check(mnt, refcnt);
296
297 /*
298 * quickly check if the current mount can be unmounted.
299 * If not, we don't have to go checking for all other
300 * mounts
301 */
302 if (!list_empty(&mnt->mnt_mounts) || do_refcount_check(mnt, refcnt))
303 return 1;
304
305 for (m = propagation_next(parent, parent); m;
306 m = propagation_next(m, parent)) {
307 child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint, 0);
308 if (child && list_empty(&child->mnt_mounts) &&
309 (ret = do_refcount_check(child, 1)))
310 break;
311 }
312 return ret;
313 }
314
315 /*
316 * NOTE: unmounting 'mnt' naturally propagates to all other mounts its
317 * parent propagates to.
318 */
319 static void __propagate_umount(struct mount *mnt)
320 {
321 struct mount *parent = mnt->mnt_parent;
322 struct mount *m;
323
324 BUG_ON(parent == mnt);
325
326 for (m = propagation_next(parent, parent); m;
327 m = propagation_next(m, parent)) {
328
329 struct mount *child = __lookup_mnt(&m->mnt,
330 mnt->mnt_mountpoint, 0);
331 /*
332 * umount the child only if the child has no
333 * other children
334 */
335 if (child && list_empty(&child->mnt_mounts))
336 list_move_tail(&child->mnt_hash, &mnt->mnt_hash);
337 }
338 }
339
340 /*
341 * collect all mounts that receive propagation from the mount in @list,
342 * and return these additional mounts in the same list.
343 * @list: the list of mounts to be unmounted.
344 *
345 * vfsmount lock must be held for write
346 */
347 int propagate_umount(struct list_head *list)
348 {
349 struct mount *mnt;
350
351 list_for_each_entry(mnt, list, mnt_hash)
352 __propagate_umount(mnt);
353 return 0;
354 }