tmpfs: optimize clearing when writing
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / mm / shmem.c
CommitLineData
1da177e4
LT
1/*
2 * Resizable virtual memory filesystem for Linux.
3 *
4 * Copyright (C) 2000 Linus Torvalds.
5 * 2000 Transmeta Corp.
6 * 2000-2001 Christoph Rohland
7 * 2000-2001 SAP AG
8 * 2002 Red Hat Inc.
6922c0c7
HD
9 * Copyright (C) 2002-2011 Hugh Dickins.
10 * Copyright (C) 2011 Google Inc.
0edd73b3 11 * Copyright (C) 2002-2005 VERITAS Software Corporation.
1da177e4
LT
12 * Copyright (C) 2004 Andi Kleen, SuSE Labs
13 *
14 * Extended attribute support for tmpfs:
15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17 *
853ac43a
MM
18 * tiny-shmem:
19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20 *
1da177e4
LT
21 * This file is released under the GPL.
22 */
23
853ac43a
MM
24#include <linux/fs.h>
25#include <linux/init.h>
26#include <linux/vfs.h>
27#include <linux/mount.h>
caefba17 28#include <linux/pagemap.h>
853ac43a
MM
29#include <linux/file.h>
30#include <linux/mm.h>
b95f1b31 31#include <linux/export.h>
853ac43a
MM
32#include <linux/swap.h>
33
34static struct vfsmount *shm_mnt;
35
36#ifdef CONFIG_SHMEM
1da177e4
LT
37/*
38 * This virtual memory filesystem is heavily based on the ramfs. It
39 * extends ramfs by the ability to use swap and honor resource limits
40 * which makes it a completely usable filesystem.
41 */
42
39f0247d 43#include <linux/xattr.h>
a5694255 44#include <linux/exportfs.h>
1c7c474c 45#include <linux/posix_acl.h>
39f0247d 46#include <linux/generic_acl.h>
1da177e4 47#include <linux/mman.h>
1da177e4
LT
48#include <linux/string.h>
49#include <linux/slab.h>
50#include <linux/backing-dev.h>
51#include <linux/shmem_fs.h>
1da177e4 52#include <linux/writeback.h>
1da177e4 53#include <linux/blkdev.h>
bda97eab 54#include <linux/pagevec.h>
41ffe5d5 55#include <linux/percpu_counter.h>
708e3508 56#include <linux/splice.h>
1da177e4
LT
57#include <linux/security.h>
58#include <linux/swapops.h>
59#include <linux/mempolicy.h>
60#include <linux/namei.h>
b00dc3ad 61#include <linux/ctype.h>
304dbdb7 62#include <linux/migrate.h>
c1f60a5a 63#include <linux/highmem.h>
680d794b 64#include <linux/seq_file.h>
92562927 65#include <linux/magic.h>
304dbdb7 66
1da177e4 67#include <asm/uaccess.h>
1da177e4
LT
68#include <asm/pgtable.h>
69
caefba17 70#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
1da177e4
LT
71#define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
72
1da177e4
LT
73/* Pretend that each entry is of this size in directory's i_size */
74#define BOGO_DIRENT_SIZE 20
75
69f07ec9
HD
76/* Symlink up to this size is kmalloc'ed instead of using a swappable page */
77#define SHORT_SYMLINK_LEN 128
78
b09e0fa4
EP
79struct shmem_xattr {
80 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
81 char *name; /* xattr name */
82 size_t size;
83 char value[0];
84};
85
285b2c4f 86/* Flag allocation requirements to shmem_getpage */
1da177e4 87enum sgp_type {
1da177e4
LT
88 SGP_READ, /* don't exceed i_size, don't allocate page */
89 SGP_CACHE, /* don't exceed i_size, may allocate page */
a0ee5ec5 90 SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */
1da177e4
LT
91 SGP_WRITE, /* may exceed i_size, may allocate page */
92};
93
b76db735 94#ifdef CONFIG_TMPFS
680d794b
AM
95static unsigned long shmem_default_max_blocks(void)
96{
97 return totalram_pages / 2;
98}
99
100static unsigned long shmem_default_max_inodes(void)
101{
102 return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
103}
b76db735 104#endif
680d794b 105
bde05d1c
HD
106static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
107static int shmem_replace_page(struct page **pagep, gfp_t gfp,
108 struct shmem_inode_info *info, pgoff_t index);
68da9f05
HD
109static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
110 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type);
111
112static inline int shmem_getpage(struct inode *inode, pgoff_t index,
113 struct page **pagep, enum sgp_type sgp, int *fault_type)
114{
115 return shmem_getpage_gfp(inode, index, pagep, sgp,
116 mapping_gfp_mask(inode->i_mapping), fault_type);
117}
1da177e4 118
1da177e4
LT
119static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
120{
121 return sb->s_fs_info;
122}
123
124/*
125 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
126 * for shared memory and for shared anonymous (/dev/zero) mappings
127 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
128 * consistent with the pre-accounting of private mappings ...
129 */
130static inline int shmem_acct_size(unsigned long flags, loff_t size)
131{
0b0a0806 132 return (flags & VM_NORESERVE) ?
191c5424 133 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
1da177e4
LT
134}
135
136static inline void shmem_unacct_size(unsigned long flags, loff_t size)
137{
0b0a0806 138 if (!(flags & VM_NORESERVE))
1da177e4
LT
139 vm_unacct_memory(VM_ACCT(size));
140}
141
142/*
143 * ... whereas tmpfs objects are accounted incrementally as
144 * pages are allocated, in order to allow huge sparse files.
145 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
146 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
147 */
148static inline int shmem_acct_block(unsigned long flags)
149{
0b0a0806 150 return (flags & VM_NORESERVE) ?
191c5424 151 security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_CACHE_SIZE)) : 0;
1da177e4
LT
152}
153
154static inline void shmem_unacct_blocks(unsigned long flags, long pages)
155{
0b0a0806 156 if (flags & VM_NORESERVE)
1da177e4
LT
157 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
158}
159
759b9775 160static const struct super_operations shmem_ops;
f5e54d6e 161static const struct address_space_operations shmem_aops;
15ad7cdc 162static const struct file_operations shmem_file_operations;
92e1d5be
AV
163static const struct inode_operations shmem_inode_operations;
164static const struct inode_operations shmem_dir_inode_operations;
165static const struct inode_operations shmem_special_inode_operations;
f0f37e2f 166static const struct vm_operations_struct shmem_vm_ops;
1da177e4 167
6c231b7b 168static struct backing_dev_info shmem_backing_dev_info __read_mostly = {
1da177e4 169 .ra_pages = 0, /* No readahead */
4f98a2fe 170 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
1da177e4
LT
171};
172
173static LIST_HEAD(shmem_swaplist);
cb5f7b9a 174static DEFINE_MUTEX(shmem_swaplist_mutex);
1da177e4 175
5b04c689
PE
176static int shmem_reserve_inode(struct super_block *sb)
177{
178 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
179 if (sbinfo->max_inodes) {
180 spin_lock(&sbinfo->stat_lock);
181 if (!sbinfo->free_inodes) {
182 spin_unlock(&sbinfo->stat_lock);
183 return -ENOSPC;
184 }
185 sbinfo->free_inodes--;
186 spin_unlock(&sbinfo->stat_lock);
187 }
188 return 0;
189}
190
191static void shmem_free_inode(struct super_block *sb)
192{
193 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
194 if (sbinfo->max_inodes) {
195 spin_lock(&sbinfo->stat_lock);
196 sbinfo->free_inodes++;
197 spin_unlock(&sbinfo->stat_lock);
198 }
199}
200
46711810 201/**
41ffe5d5 202 * shmem_recalc_inode - recalculate the block usage of an inode
1da177e4
LT
203 * @inode: inode to recalc
204 *
205 * We have to calculate the free blocks since the mm can drop
206 * undirtied hole pages behind our back.
207 *
208 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
209 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
210 *
211 * It has to be called with the spinlock held.
212 */
213static void shmem_recalc_inode(struct inode *inode)
214{
215 struct shmem_inode_info *info = SHMEM_I(inode);
216 long freed;
217
218 freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
219 if (freed > 0) {
54af6042
HD
220 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
221 if (sbinfo->max_blocks)
222 percpu_counter_add(&sbinfo->used_blocks, -freed);
1da177e4 223 info->alloced -= freed;
54af6042 224 inode->i_blocks -= freed * BLOCKS_PER_PAGE;
1da177e4 225 shmem_unacct_blocks(info->flags, freed);
1da177e4
LT
226 }
227}
228
7a5d0fbb
HD
229/*
230 * Replace item expected in radix tree by a new item, while holding tree lock.
231 */
232static int shmem_radix_tree_replace(struct address_space *mapping,
233 pgoff_t index, void *expected, void *replacement)
234{
235 void **pslot;
236 void *item = NULL;
237
238 VM_BUG_ON(!expected);
239 pslot = radix_tree_lookup_slot(&mapping->page_tree, index);
240 if (pslot)
241 item = radix_tree_deref_slot_protected(pslot,
242 &mapping->tree_lock);
243 if (item != expected)
244 return -ENOENT;
245 if (replacement)
246 radix_tree_replace_slot(pslot, replacement);
247 else
248 radix_tree_delete(&mapping->page_tree, index);
249 return 0;
250}
251
46f65ec1
HD
252/*
253 * Like add_to_page_cache_locked, but error if expected item has gone.
254 */
255static int shmem_add_to_page_cache(struct page *page,
256 struct address_space *mapping,
257 pgoff_t index, gfp_t gfp, void *expected)
258{
aa3b1895 259 int error = 0;
46f65ec1
HD
260
261 VM_BUG_ON(!PageLocked(page));
262 VM_BUG_ON(!PageSwapBacked(page));
263
46f65ec1
HD
264 if (!expected)
265 error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
266 if (!error) {
267 page_cache_get(page);
268 page->mapping = mapping;
269 page->index = index;
270
271 spin_lock_irq(&mapping->tree_lock);
272 if (!expected)
273 error = radix_tree_insert(&mapping->page_tree,
274 index, page);
275 else
276 error = shmem_radix_tree_replace(mapping, index,
277 expected, page);
278 if (!error) {
279 mapping->nrpages++;
280 __inc_zone_page_state(page, NR_FILE_PAGES);
281 __inc_zone_page_state(page, NR_SHMEM);
282 spin_unlock_irq(&mapping->tree_lock);
283 } else {
284 page->mapping = NULL;
285 spin_unlock_irq(&mapping->tree_lock);
286 page_cache_release(page);
287 }
288 if (!expected)
289 radix_tree_preload_end();
290 }
291 if (error)
292 mem_cgroup_uncharge_cache_page(page);
46f65ec1
HD
293 return error;
294}
295
6922c0c7
HD
296/*
297 * Like delete_from_page_cache, but substitutes swap for page.
298 */
299static void shmem_delete_from_page_cache(struct page *page, void *radswap)
300{
301 struct address_space *mapping = page->mapping;
302 int error;
303
304 spin_lock_irq(&mapping->tree_lock);
305 error = shmem_radix_tree_replace(mapping, page->index, page, radswap);
306 page->mapping = NULL;
307 mapping->nrpages--;
308 __dec_zone_page_state(page, NR_FILE_PAGES);
309 __dec_zone_page_state(page, NR_SHMEM);
310 spin_unlock_irq(&mapping->tree_lock);
311 page_cache_release(page);
312 BUG_ON(error);
313}
314
7a5d0fbb
HD
315/*
316 * Like find_get_pages, but collecting swap entries as well as pages.
317 */
318static unsigned shmem_find_get_pages_and_swap(struct address_space *mapping,
319 pgoff_t start, unsigned int nr_pages,
320 struct page **pages, pgoff_t *indices)
321{
322 unsigned int i;
323 unsigned int ret;
324 unsigned int nr_found;
325
326 rcu_read_lock();
327restart:
328 nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
329 (void ***)pages, indices, start, nr_pages);
330 ret = 0;
331 for (i = 0; i < nr_found; i++) {
332 struct page *page;
333repeat:
334 page = radix_tree_deref_slot((void **)pages[i]);
335 if (unlikely(!page))
336 continue;
337 if (radix_tree_exception(page)) {
8079b1c8
HD
338 if (radix_tree_deref_retry(page))
339 goto restart;
340 /*
341 * Otherwise, we must be storing a swap entry
342 * here as an exceptional entry: so return it
343 * without attempting to raise page count.
344 */
345 goto export;
7a5d0fbb
HD
346 }
347 if (!page_cache_get_speculative(page))
348 goto repeat;
349
350 /* Has the page moved? */
351 if (unlikely(page != *((void **)pages[i]))) {
352 page_cache_release(page);
353 goto repeat;
354 }
355export:
356 indices[ret] = indices[i];
357 pages[ret] = page;
358 ret++;
359 }
360 if (unlikely(!ret && nr_found))
361 goto restart;
362 rcu_read_unlock();
363 return ret;
364}
365
366/*
367 * Remove swap entry from radix tree, free the swap and its page cache.
368 */
369static int shmem_free_swap(struct address_space *mapping,
370 pgoff_t index, void *radswap)
371{
372 int error;
373
374 spin_lock_irq(&mapping->tree_lock);
375 error = shmem_radix_tree_replace(mapping, index, radswap, NULL);
376 spin_unlock_irq(&mapping->tree_lock);
377 if (!error)
378 free_swap_and_cache(radix_to_swp_entry(radswap));
379 return error;
380}
381
382/*
383 * Pagevec may contain swap entries, so shuffle up pages before releasing.
384 */
24513264 385static void shmem_deswap_pagevec(struct pagevec *pvec)
7a5d0fbb
HD
386{
387 int i, j;
388
389 for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
390 struct page *page = pvec->pages[i];
391 if (!radix_tree_exceptional_entry(page))
392 pvec->pages[j++] = page;
393 }
394 pvec->nr = j;
24513264
HD
395}
396
397/*
398 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
399 */
400void shmem_unlock_mapping(struct address_space *mapping)
401{
402 struct pagevec pvec;
403 pgoff_t indices[PAGEVEC_SIZE];
404 pgoff_t index = 0;
405
406 pagevec_init(&pvec, 0);
407 /*
408 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
409 */
410 while (!mapping_unevictable(mapping)) {
411 /*
412 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
413 * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
414 */
415 pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
416 PAGEVEC_SIZE, pvec.pages, indices);
417 if (!pvec.nr)
418 break;
419 index = indices[pvec.nr - 1] + 1;
420 shmem_deswap_pagevec(&pvec);
421 check_move_unevictable_pages(pvec.pages, pvec.nr);
422 pagevec_release(&pvec);
423 cond_resched();
424 }
7a5d0fbb
HD
425}
426
427/*
428 * Remove range of pages and swap entries from radix tree, and free them.
429 */
285b2c4f 430void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
1da177e4 431{
285b2c4f 432 struct address_space *mapping = inode->i_mapping;
1da177e4 433 struct shmem_inode_info *info = SHMEM_I(inode);
285b2c4f 434 pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
bda97eab 435 unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
285b2c4f 436 pgoff_t end = (lend >> PAGE_CACHE_SHIFT);
bda97eab 437 struct pagevec pvec;
7a5d0fbb
HD
438 pgoff_t indices[PAGEVEC_SIZE];
439 long nr_swaps_freed = 0;
285b2c4f 440 pgoff_t index;
bda97eab
HD
441 int i;
442
443 BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
444
445 pagevec_init(&pvec, 0);
446 index = start;
7a5d0fbb
HD
447 while (index <= end) {
448 pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
449 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
450 pvec.pages, indices);
451 if (!pvec.nr)
452 break;
bda97eab
HD
453 mem_cgroup_uncharge_start();
454 for (i = 0; i < pagevec_count(&pvec); i++) {
455 struct page *page = pvec.pages[i];
456
7a5d0fbb 457 index = indices[i];
bda97eab
HD
458 if (index > end)
459 break;
460
7a5d0fbb
HD
461 if (radix_tree_exceptional_entry(page)) {
462 nr_swaps_freed += !shmem_free_swap(mapping,
463 index, page);
bda97eab 464 continue;
7a5d0fbb
HD
465 }
466
467 if (!trylock_page(page))
bda97eab 468 continue;
7a5d0fbb
HD
469 if (page->mapping == mapping) {
470 VM_BUG_ON(PageWriteback(page));
471 truncate_inode_page(mapping, page);
bda97eab 472 }
bda97eab
HD
473 unlock_page(page);
474 }
24513264
HD
475 shmem_deswap_pagevec(&pvec);
476 pagevec_release(&pvec);
bda97eab
HD
477 mem_cgroup_uncharge_end();
478 cond_resched();
479 index++;
480 }
1da177e4 481
bda97eab
HD
482 if (partial) {
483 struct page *page = NULL;
484 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
485 if (page) {
486 zero_user_segment(page, partial, PAGE_CACHE_SIZE);
487 set_page_dirty(page);
488 unlock_page(page);
489 page_cache_release(page);
490 }
491 }
492
493 index = start;
494 for ( ; ; ) {
495 cond_resched();
7a5d0fbb
HD
496 pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
497 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
498 pvec.pages, indices);
499 if (!pvec.nr) {
bda97eab
HD
500 if (index == start)
501 break;
502 index = start;
503 continue;
504 }
7a5d0fbb 505 if (index == start && indices[0] > end) {
24513264
HD
506 shmem_deswap_pagevec(&pvec);
507 pagevec_release(&pvec);
bda97eab
HD
508 break;
509 }
510 mem_cgroup_uncharge_start();
511 for (i = 0; i < pagevec_count(&pvec); i++) {
512 struct page *page = pvec.pages[i];
513
7a5d0fbb 514 index = indices[i];
bda97eab
HD
515 if (index > end)
516 break;
517
7a5d0fbb
HD
518 if (radix_tree_exceptional_entry(page)) {
519 nr_swaps_freed += !shmem_free_swap(mapping,
520 index, page);
521 continue;
522 }
523
bda97eab 524 lock_page(page);
7a5d0fbb
HD
525 if (page->mapping == mapping) {
526 VM_BUG_ON(PageWriteback(page));
527 truncate_inode_page(mapping, page);
528 }
bda97eab
HD
529 unlock_page(page);
530 }
24513264
HD
531 shmem_deswap_pagevec(&pvec);
532 pagevec_release(&pvec);
bda97eab
HD
533 mem_cgroup_uncharge_end();
534 index++;
535 }
94c1e62d 536
1da177e4 537 spin_lock(&info->lock);
7a5d0fbb 538 info->swapped -= nr_swaps_freed;
1da177e4
LT
539 shmem_recalc_inode(inode);
540 spin_unlock(&info->lock);
541
285b2c4f 542 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
1da177e4 543}
94c1e62d 544EXPORT_SYMBOL_GPL(shmem_truncate_range);
1da177e4 545
94c1e62d 546static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
1da177e4
LT
547{
548 struct inode *inode = dentry->d_inode;
1da177e4
LT
549 int error;
550
db78b877
CH
551 error = inode_change_ok(inode, attr);
552 if (error)
553 return error;
554
94c1e62d
HD
555 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
556 loff_t oldsize = inode->i_size;
557 loff_t newsize = attr->ia_size;
3889e6e7 558
94c1e62d
HD
559 if (newsize != oldsize) {
560 i_size_write(inode, newsize);
561 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
562 }
563 if (newsize < oldsize) {
564 loff_t holebegin = round_up(newsize, PAGE_SIZE);
565 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
566 shmem_truncate_range(inode, newsize, (loff_t)-1);
567 /* unmap again to remove racily COWed private pages */
568 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
569 }
1da177e4
LT
570 }
571
db78b877 572 setattr_copy(inode, attr);
39f0247d 573#ifdef CONFIG_TMPFS_POSIX_ACL
db78b877 574 if (attr->ia_valid & ATTR_MODE)
1c7c474c 575 error = generic_acl_chmod(inode);
39f0247d 576#endif
1da177e4
LT
577 return error;
578}
579
1f895f75 580static void shmem_evict_inode(struct inode *inode)
1da177e4 581{
1da177e4 582 struct shmem_inode_info *info = SHMEM_I(inode);
b09e0fa4 583 struct shmem_xattr *xattr, *nxattr;
1da177e4 584
3889e6e7 585 if (inode->i_mapping->a_ops == &shmem_aops) {
1da177e4
LT
586 shmem_unacct_size(info->flags, inode->i_size);
587 inode->i_size = 0;
3889e6e7 588 shmem_truncate_range(inode, 0, (loff_t)-1);
1da177e4 589 if (!list_empty(&info->swaplist)) {
cb5f7b9a 590 mutex_lock(&shmem_swaplist_mutex);
1da177e4 591 list_del_init(&info->swaplist);
cb5f7b9a 592 mutex_unlock(&shmem_swaplist_mutex);
1da177e4 593 }
69f07ec9
HD
594 } else
595 kfree(info->symlink);
b09e0fa4
EP
596
597 list_for_each_entry_safe(xattr, nxattr, &info->xattr_list, list) {
598 kfree(xattr->name);
599 kfree(xattr);
600 }
0edd73b3 601 BUG_ON(inode->i_blocks);
5b04c689 602 shmem_free_inode(inode->i_sb);
dbd5768f 603 clear_inode(inode);
1da177e4
LT
604}
605
46f65ec1
HD
606/*
607 * If swap found in inode, free it and move page from swapcache to filecache.
608 */
41ffe5d5 609static int shmem_unuse_inode(struct shmem_inode_info *info,
bde05d1c 610 swp_entry_t swap, struct page **pagep)
1da177e4 611{
285b2c4f 612 struct address_space *mapping = info->vfs_inode.i_mapping;
46f65ec1 613 void *radswap;
41ffe5d5 614 pgoff_t index;
bde05d1c
HD
615 gfp_t gfp;
616 int error = 0;
1da177e4 617
46f65ec1 618 radswap = swp_to_radix_entry(swap);
e504f3fd 619 index = radix_tree_locate_item(&mapping->page_tree, radswap);
46f65ec1 620 if (index == -1)
285b2c4f 621 return 0;
2e0e26c7 622
1b1b32f2
HD
623 /*
624 * Move _head_ to start search for next from here.
1f895f75 625 * But be careful: shmem_evict_inode checks list_empty without taking
1b1b32f2 626 * mutex, and there's an instant in list_move_tail when info->swaplist
285b2c4f 627 * would appear empty, if it were the only one on shmem_swaplist.
1b1b32f2
HD
628 */
629 if (shmem_swaplist.next != &info->swaplist)
630 list_move_tail(&shmem_swaplist, &info->swaplist);
2e0e26c7 631
bde05d1c
HD
632 gfp = mapping_gfp_mask(mapping);
633 if (shmem_should_replace_page(*pagep, gfp)) {
634 mutex_unlock(&shmem_swaplist_mutex);
635 error = shmem_replace_page(pagep, gfp, info, index);
636 mutex_lock(&shmem_swaplist_mutex);
637 /*
638 * We needed to drop mutex to make that restrictive page
639 * allocation; but the inode might already be freed by now,
640 * and we cannot refer to inode or mapping or info to check.
641 * However, we do hold page lock on the PageSwapCache page,
642 * so can check if that still has our reference remaining.
643 */
644 if (!page_swapcount(*pagep))
645 error = -ENOENT;
646 }
647
d13d1443 648 /*
778dd893
HD
649 * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
650 * but also to hold up shmem_evict_inode(): so inode cannot be freed
651 * beneath us (pagelock doesn't help until the page is in pagecache).
d13d1443 652 */
bde05d1c
HD
653 if (!error)
654 error = shmem_add_to_page_cache(*pagep, mapping, index,
46f65ec1 655 GFP_NOWAIT, radswap);
48f170fb 656 if (error != -ENOMEM) {
46f65ec1
HD
657 /*
658 * Truncation and eviction use free_swap_and_cache(), which
659 * only does trylock page: if we raced, best clean up here.
660 */
bde05d1c
HD
661 delete_from_swap_cache(*pagep);
662 set_page_dirty(*pagep);
46f65ec1
HD
663 if (!error) {
664 spin_lock(&info->lock);
665 info->swapped--;
666 spin_unlock(&info->lock);
667 swap_free(swap);
668 }
2e0e26c7 669 error = 1; /* not an error, but entry was found */
1da177e4 670 }
2e0e26c7 671 return error;
1da177e4
LT
672}
673
674/*
46f65ec1 675 * Search through swapped inodes to find and replace swap by page.
1da177e4 676 */
41ffe5d5 677int shmem_unuse(swp_entry_t swap, struct page *page)
1da177e4 678{
41ffe5d5 679 struct list_head *this, *next;
1da177e4
LT
680 struct shmem_inode_info *info;
681 int found = 0;
bde05d1c
HD
682 int error = 0;
683
684 /*
685 * There's a faint possibility that swap page was replaced before
686 * caller locked it: it will come back later with the right page.
687 */
688 if (unlikely(!PageSwapCache(page)))
689 goto out;
778dd893
HD
690
691 /*
692 * Charge page using GFP_KERNEL while we can wait, before taking
693 * the shmem_swaplist_mutex which might hold up shmem_writepage().
694 * Charged back to the user (not to caller) when swap account is used.
778dd893
HD
695 */
696 error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
697 if (error)
698 goto out;
46f65ec1 699 /* No radix_tree_preload: swap entry keeps a place for page in tree */
1da177e4 700
cb5f7b9a 701 mutex_lock(&shmem_swaplist_mutex);
41ffe5d5
HD
702 list_for_each_safe(this, next, &shmem_swaplist) {
703 info = list_entry(this, struct shmem_inode_info, swaplist);
285b2c4f 704 if (info->swapped)
bde05d1c 705 found = shmem_unuse_inode(info, swap, &page);
6922c0c7
HD
706 else
707 list_del_init(&info->swaplist);
cb5f7b9a 708 cond_resched();
2e0e26c7 709 if (found)
778dd893 710 break;
1da177e4 711 }
cb5f7b9a 712 mutex_unlock(&shmem_swaplist_mutex);
778dd893 713
778dd893
HD
714 if (found < 0)
715 error = found;
716out:
aaa46865
HD
717 unlock_page(page);
718 page_cache_release(page);
778dd893 719 return error;
1da177e4
LT
720}
721
722/*
723 * Move the page from the page cache to the swap cache.
724 */
725static int shmem_writepage(struct page *page, struct writeback_control *wbc)
726{
727 struct shmem_inode_info *info;
1da177e4 728 struct address_space *mapping;
1da177e4 729 struct inode *inode;
6922c0c7
HD
730 swp_entry_t swap;
731 pgoff_t index;
1da177e4
LT
732
733 BUG_ON(!PageLocked(page));
1da177e4
LT
734 mapping = page->mapping;
735 index = page->index;
736 inode = mapping->host;
737 info = SHMEM_I(inode);
738 if (info->flags & VM_LOCKED)
739 goto redirty;
d9fe526a 740 if (!total_swap_pages)
1da177e4
LT
741 goto redirty;
742
d9fe526a
HD
743 /*
744 * shmem_backing_dev_info's capabilities prevent regular writeback or
745 * sync from ever calling shmem_writepage; but a stacking filesystem
48f170fb 746 * might use ->writepage of its underlying filesystem, in which case
d9fe526a 747 * tmpfs should write out to swap only in response to memory pressure,
48f170fb 748 * and not for the writeback threads or sync.
d9fe526a 749 */
48f170fb
HD
750 if (!wbc->for_reclaim) {
751 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */
752 goto redirty;
753 }
754 swap = get_swap_page();
755 if (!swap.val)
756 goto redirty;
d9fe526a 757
b1dea800
HD
758 /*
759 * Add inode to shmem_unuse()'s list of swapped-out inodes,
6922c0c7
HD
760 * if it's not already there. Do it now before the page is
761 * moved to swap cache, when its pagelock no longer protects
b1dea800 762 * the inode from eviction. But don't unlock the mutex until
6922c0c7
HD
763 * we've incremented swapped, because shmem_unuse_inode() will
764 * prune a !swapped inode from the swaplist under this mutex.
b1dea800 765 */
48f170fb
HD
766 mutex_lock(&shmem_swaplist_mutex);
767 if (list_empty(&info->swaplist))
768 list_add_tail(&info->swaplist, &shmem_swaplist);
b1dea800 769
48f170fb 770 if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
aaa46865 771 swap_shmem_alloc(swap);
6922c0c7
HD
772 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
773
774 spin_lock(&info->lock);
775 info->swapped++;
776 shmem_recalc_inode(inode);
826267cf 777 spin_unlock(&info->lock);
6922c0c7
HD
778
779 mutex_unlock(&shmem_swaplist_mutex);
d9fe526a 780 BUG_ON(page_mapped(page));
9fab5619 781 swap_writepage(page, wbc);
1da177e4
LT
782 return 0;
783 }
784
6922c0c7 785 mutex_unlock(&shmem_swaplist_mutex);
cb4b86ba 786 swapcache_free(swap, NULL);
1da177e4
LT
787redirty:
788 set_page_dirty(page);
d9fe526a
HD
789 if (wbc->for_reclaim)
790 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */
791 unlock_page(page);
792 return 0;
1da177e4
LT
793}
794
795#ifdef CONFIG_NUMA
680d794b 796#ifdef CONFIG_TMPFS
71fe804b 797static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
680d794b 798{
095f1fc4 799 char buffer[64];
680d794b 800
71fe804b 801 if (!mpol || mpol->mode == MPOL_DEFAULT)
095f1fc4 802 return; /* show nothing */
680d794b 803
71fe804b 804 mpol_to_str(buffer, sizeof(buffer), mpol, 1);
095f1fc4
LS
805
806 seq_printf(seq, ",mpol=%s", buffer);
680d794b 807}
71fe804b
LS
808
809static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
810{
811 struct mempolicy *mpol = NULL;
812 if (sbinfo->mpol) {
813 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */
814 mpol = sbinfo->mpol;
815 mpol_get(mpol);
816 spin_unlock(&sbinfo->stat_lock);
817 }
818 return mpol;
819}
680d794b
AM
820#endif /* CONFIG_TMPFS */
821
41ffe5d5
HD
822static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
823 struct shmem_inode_info *info, pgoff_t index)
1da177e4 824{
52cd3b07 825 struct mempolicy mpol, *spol;
1da177e4
LT
826 struct vm_area_struct pvma;
827
52cd3b07 828 spol = mpol_cond_copy(&mpol,
41ffe5d5 829 mpol_shared_policy_lookup(&info->policy, index));
52cd3b07 830
1da177e4 831 /* Create a pseudo vma that just contains the policy */
c4cc6d07 832 pvma.vm_start = 0;
41ffe5d5 833 pvma.vm_pgoff = index;
c4cc6d07 834 pvma.vm_ops = NULL;
52cd3b07 835 pvma.vm_policy = spol;
41ffe5d5 836 return swapin_readahead(swap, gfp, &pvma, 0);
1da177e4
LT
837}
838
02098fea 839static struct page *shmem_alloc_page(gfp_t gfp,
41ffe5d5 840 struct shmem_inode_info *info, pgoff_t index)
1da177e4
LT
841{
842 struct vm_area_struct pvma;
1da177e4 843
c4cc6d07
HD
844 /* Create a pseudo vma that just contains the policy */
845 pvma.vm_start = 0;
41ffe5d5 846 pvma.vm_pgoff = index;
c4cc6d07 847 pvma.vm_ops = NULL;
41ffe5d5 848 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
52cd3b07
LS
849
850 /*
851 * alloc_page_vma() will drop the shared policy reference
852 */
853 return alloc_page_vma(gfp, &pvma, 0);
1da177e4 854}
680d794b
AM
855#else /* !CONFIG_NUMA */
856#ifdef CONFIG_TMPFS
41ffe5d5 857static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
680d794b
AM
858{
859}
860#endif /* CONFIG_TMPFS */
861
41ffe5d5
HD
862static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
863 struct shmem_inode_info *info, pgoff_t index)
1da177e4 864{
41ffe5d5 865 return swapin_readahead(swap, gfp, NULL, 0);
1da177e4
LT
866}
867
02098fea 868static inline struct page *shmem_alloc_page(gfp_t gfp,
41ffe5d5 869 struct shmem_inode_info *info, pgoff_t index)
1da177e4 870{
e84e2e13 871 return alloc_page(gfp);
1da177e4 872}
680d794b 873#endif /* CONFIG_NUMA */
1da177e4 874
71fe804b
LS
875#if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
876static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
877{
878 return NULL;
879}
880#endif
881
bde05d1c
HD
882/*
883 * When a page is moved from swapcache to shmem filecache (either by the
884 * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
885 * shmem_unuse_inode()), it may have been read in earlier from swap, in
886 * ignorance of the mapping it belongs to. If that mapping has special
887 * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
888 * we may need to copy to a suitable page before moving to filecache.
889 *
890 * In a future release, this may well be extended to respect cpuset and
891 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
892 * but for now it is a simple matter of zone.
893 */
894static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
895{
896 return page_zonenum(page) > gfp_zone(gfp);
897}
898
899static int shmem_replace_page(struct page **pagep, gfp_t gfp,
900 struct shmem_inode_info *info, pgoff_t index)
901{
902 struct page *oldpage, *newpage;
903 struct address_space *swap_mapping;
904 pgoff_t swap_index;
905 int error;
906
907 oldpage = *pagep;
908 swap_index = page_private(oldpage);
909 swap_mapping = page_mapping(oldpage);
910
911 /*
912 * We have arrived here because our zones are constrained, so don't
913 * limit chance of success by further cpuset and node constraints.
914 */
915 gfp &= ~GFP_CONSTRAINT_MASK;
916 newpage = shmem_alloc_page(gfp, info, index);
917 if (!newpage)
918 return -ENOMEM;
919 VM_BUG_ON(shmem_should_replace_page(newpage, gfp));
920
921 *pagep = newpage;
922 page_cache_get(newpage);
923 copy_highpage(newpage, oldpage);
924
925 VM_BUG_ON(!PageLocked(oldpage));
926 __set_page_locked(newpage);
927 VM_BUG_ON(!PageUptodate(oldpage));
928 SetPageUptodate(newpage);
929 VM_BUG_ON(!PageSwapBacked(oldpage));
930 SetPageSwapBacked(newpage);
931 VM_BUG_ON(!swap_index);
932 set_page_private(newpage, swap_index);
933 VM_BUG_ON(!PageSwapCache(oldpage));
934 SetPageSwapCache(newpage);
935
936 /*
937 * Our caller will very soon move newpage out of swapcache, but it's
938 * a nice clean interface for us to replace oldpage by newpage there.
939 */
940 spin_lock_irq(&swap_mapping->tree_lock);
941 error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage,
942 newpage);
943 __inc_zone_page_state(newpage, NR_FILE_PAGES);
944 __dec_zone_page_state(oldpage, NR_FILE_PAGES);
945 spin_unlock_irq(&swap_mapping->tree_lock);
946 BUG_ON(error);
947
948 mem_cgroup_replace_page_cache(oldpage, newpage);
949 lru_cache_add_anon(newpage);
950
951 ClearPageSwapCache(oldpage);
952 set_page_private(oldpage, 0);
953
954 unlock_page(oldpage);
955 page_cache_release(oldpage);
956 page_cache_release(oldpage);
957 return 0;
958}
959
1da177e4 960/*
68da9f05 961 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1da177e4
LT
962 *
963 * If we allocate a new one we do not mark it dirty. That's up to the
964 * vm. If we swap it in we mark it dirty since we also free the swap
965 * entry since a page cannot live in both the swap and page cache
966 */
41ffe5d5 967static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
68da9f05 968 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type)
1da177e4
LT
969{
970 struct address_space *mapping = inode->i_mapping;
54af6042 971 struct shmem_inode_info *info;
1da177e4 972 struct shmem_sb_info *sbinfo;
27ab7006 973 struct page *page;
1da177e4
LT
974 swp_entry_t swap;
975 int error;
54af6042 976 int once = 0;
1da177e4 977
41ffe5d5 978 if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT))
1da177e4 979 return -EFBIG;
1da177e4 980repeat:
54af6042 981 swap.val = 0;
41ffe5d5 982 page = find_lock_page(mapping, index);
54af6042
HD
983 if (radix_tree_exceptional_entry(page)) {
984 swap = radix_to_swp_entry(page);
985 page = NULL;
986 }
987
988 if (sgp != SGP_WRITE &&
989 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
990 error = -EINVAL;
991 goto failed;
992 }
993
994 if (page || (sgp == SGP_READ && !swap.val)) {
b409f9fc 995 /*
27ab7006
HD
996 * Once we can get the page lock, it must be uptodate:
997 * if there were an error in reading back from swap,
998 * the page would not be inserted into the filecache.
b409f9fc 999 */
54af6042
HD
1000 BUG_ON(page && !PageUptodate(page));
1001 *pagep = page;
1002 return 0;
27ab7006
HD
1003 }
1004
1005 /*
54af6042
HD
1006 * Fast cache lookup did not find it:
1007 * bring it back from swap or allocate.
27ab7006 1008 */
54af6042
HD
1009 info = SHMEM_I(inode);
1010 sbinfo = SHMEM_SB(inode->i_sb);
1da177e4 1011
1da177e4
LT
1012 if (swap.val) {
1013 /* Look it up and read it in.. */
27ab7006
HD
1014 page = lookup_swap_cache(swap);
1015 if (!page) {
1da177e4 1016 /* here we actually do the io */
68da9f05
HD
1017 if (fault_type)
1018 *fault_type |= VM_FAULT_MAJOR;
41ffe5d5 1019 page = shmem_swapin(swap, gfp, info, index);
27ab7006 1020 if (!page) {
54af6042
HD
1021 error = -ENOMEM;
1022 goto failed;
1da177e4 1023 }
1da177e4
LT
1024 }
1025
1026 /* We have to do this with page locked to prevent races */
54af6042 1027 lock_page(page);
bde05d1c
HD
1028 if (!PageSwapCache(page) || page->mapping) {
1029 error = -EEXIST; /* try again */
1030 goto failed;
1031 }
27ab7006 1032 if (!PageUptodate(page)) {
1da177e4 1033 error = -EIO;
54af6042 1034 goto failed;
1da177e4 1035 }
54af6042
HD
1036 wait_on_page_writeback(page);
1037
bde05d1c
HD
1038 if (shmem_should_replace_page(page, gfp)) {
1039 error = shmem_replace_page(&page, gfp, info, index);
1040 if (error)
1041 goto failed;
1da177e4 1042 }
27ab7006 1043
aa3b1895
HD
1044 error = mem_cgroup_cache_charge(page, current->mm,
1045 gfp & GFP_RECLAIM_MASK);
1046 if (!error)
1047 error = shmem_add_to_page_cache(page, mapping, index,
1048 gfp, swp_to_radix_entry(swap));
54af6042
HD
1049 if (error)
1050 goto failed;
1051
1052 spin_lock(&info->lock);
285b2c4f 1053 info->swapped--;
54af6042 1054 shmem_recalc_inode(inode);
27ab7006 1055 spin_unlock(&info->lock);
54af6042
HD
1056
1057 delete_from_swap_cache(page);
27ab7006
HD
1058 set_page_dirty(page);
1059 swap_free(swap);
1060
54af6042
HD
1061 } else {
1062 if (shmem_acct_block(info->flags)) {
1063 error = -ENOSPC;
1064 goto failed;
1da177e4 1065 }
0edd73b3 1066 if (sbinfo->max_blocks) {
fc5da22a 1067 if (percpu_counter_compare(&sbinfo->used_blocks,
54af6042
HD
1068 sbinfo->max_blocks) >= 0) {
1069 error = -ENOSPC;
1070 goto unacct;
1071 }
7e496299 1072 percpu_counter_inc(&sbinfo->used_blocks);
54af6042 1073 }
1da177e4 1074
54af6042
HD
1075 page = shmem_alloc_page(gfp, info, index);
1076 if (!page) {
1077 error = -ENOMEM;
1078 goto decused;
1da177e4
LT
1079 }
1080
54af6042
HD
1081 SetPageSwapBacked(page);
1082 __set_page_locked(page);
aa3b1895
HD
1083 error = mem_cgroup_cache_charge(page, current->mm,
1084 gfp & GFP_RECLAIM_MASK);
1085 if (!error)
1086 error = shmem_add_to_page_cache(page, mapping, index,
1087 gfp, NULL);
54af6042
HD
1088 if (error)
1089 goto decused;
1090 lru_cache_add_anon(page);
1091
1092 spin_lock(&info->lock);
1da177e4 1093 info->alloced++;
54af6042
HD
1094 inode->i_blocks += BLOCKS_PER_PAGE;
1095 shmem_recalc_inode(inode);
1da177e4 1096 spin_unlock(&info->lock);
54af6042 1097
ec9516fb
HD
1098 /*
1099 * Let SGP_WRITE caller clear ends if write does not fill page
1100 */
1101 if (sgp != SGP_WRITE) {
1102 clear_highpage(page);
1103 flush_dcache_page(page);
1104 SetPageUptodate(page);
1105 }
a0ee5ec5 1106 if (sgp == SGP_DIRTY)
27ab7006 1107 set_page_dirty(page);
1da177e4 1108 }
bde05d1c 1109
54af6042
HD
1110 /* Perhaps the file has been truncated since we checked */
1111 if (sgp != SGP_WRITE &&
1112 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
1113 error = -EINVAL;
1114 goto trunc;
e83c32e8 1115 }
54af6042
HD
1116 *pagep = page;
1117 return 0;
1da177e4 1118
59a16ead 1119 /*
54af6042 1120 * Error recovery.
59a16ead 1121 */
54af6042
HD
1122trunc:
1123 ClearPageDirty(page);
1124 delete_from_page_cache(page);
1125 spin_lock(&info->lock);
1126 info->alloced--;
1127 inode->i_blocks -= BLOCKS_PER_PAGE;
59a16ead 1128 spin_unlock(&info->lock);
54af6042
HD
1129decused:
1130 if (sbinfo->max_blocks)
1131 percpu_counter_add(&sbinfo->used_blocks, -1);
1132unacct:
1133 shmem_unacct_blocks(info->flags, 1);
1134failed:
1135 if (swap.val && error != -EINVAL) {
1136 struct page *test = find_get_page(mapping, index);
1137 if (test && !radix_tree_exceptional_entry(test))
1138 page_cache_release(test);
1139 /* Have another try if the entry has changed */
1140 if (test != swp_to_radix_entry(swap))
1141 error = -EEXIST;
1142 }
27ab7006 1143 if (page) {
54af6042 1144 unlock_page(page);
27ab7006 1145 page_cache_release(page);
54af6042
HD
1146 }
1147 if (error == -ENOSPC && !once++) {
1148 info = SHMEM_I(inode);
1149 spin_lock(&info->lock);
1150 shmem_recalc_inode(inode);
1151 spin_unlock(&info->lock);
27ab7006 1152 goto repeat;
ff36b801 1153 }
54af6042
HD
1154 if (error == -EEXIST)
1155 goto repeat;
1156 return error;
1da177e4
LT
1157}
1158
d0217ac0 1159static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1da177e4 1160{
d3ac7f89 1161 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1da177e4 1162 int error;
68da9f05 1163 int ret = VM_FAULT_LOCKED;
1da177e4 1164
27d54b39 1165 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
d0217ac0
NP
1166 if (error)
1167 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
68da9f05 1168
456f998e
YH
1169 if (ret & VM_FAULT_MAJOR) {
1170 count_vm_event(PGMAJFAULT);
1171 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1172 }
68da9f05 1173 return ret;
1da177e4
LT
1174}
1175
1da177e4 1176#ifdef CONFIG_NUMA
41ffe5d5 1177static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
1da177e4 1178{
41ffe5d5
HD
1179 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1180 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
1da177e4
LT
1181}
1182
d8dc74f2
AB
1183static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
1184 unsigned long addr)
1da177e4 1185{
41ffe5d5
HD
1186 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1187 pgoff_t index;
1da177e4 1188
41ffe5d5
HD
1189 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1190 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
1da177e4
LT
1191}
1192#endif
1193
1194int shmem_lock(struct file *file, int lock, struct user_struct *user)
1195{
d3ac7f89 1196 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1197 struct shmem_inode_info *info = SHMEM_I(inode);
1198 int retval = -ENOMEM;
1199
1200 spin_lock(&info->lock);
1201 if (lock && !(info->flags & VM_LOCKED)) {
1202 if (!user_shm_lock(inode->i_size, user))
1203 goto out_nomem;
1204 info->flags |= VM_LOCKED;
89e004ea 1205 mapping_set_unevictable(file->f_mapping);
1da177e4
LT
1206 }
1207 if (!lock && (info->flags & VM_LOCKED) && user) {
1208 user_shm_unlock(inode->i_size, user);
1209 info->flags &= ~VM_LOCKED;
89e004ea 1210 mapping_clear_unevictable(file->f_mapping);
1da177e4
LT
1211 }
1212 retval = 0;
89e004ea 1213
1da177e4
LT
1214out_nomem:
1215 spin_unlock(&info->lock);
1216 return retval;
1217}
1218
9b83a6a8 1219static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1da177e4
LT
1220{
1221 file_accessed(file);
1222 vma->vm_ops = &shmem_vm_ops;
d0217ac0 1223 vma->vm_flags |= VM_CAN_NONLINEAR;
1da177e4
LT
1224 return 0;
1225}
1226
454abafe 1227static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
09208d15 1228 umode_t mode, dev_t dev, unsigned long flags)
1da177e4
LT
1229{
1230 struct inode *inode;
1231 struct shmem_inode_info *info;
1232 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1233
5b04c689
PE
1234 if (shmem_reserve_inode(sb))
1235 return NULL;
1da177e4
LT
1236
1237 inode = new_inode(sb);
1238 if (inode) {
85fe4025 1239 inode->i_ino = get_next_ino();
454abafe 1240 inode_init_owner(inode, dir, mode);
1da177e4 1241 inode->i_blocks = 0;
1da177e4
LT
1242 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1243 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
91828a40 1244 inode->i_generation = get_seconds();
1da177e4
LT
1245 info = SHMEM_I(inode);
1246 memset(info, 0, (char *)inode - (char *)info);
1247 spin_lock_init(&info->lock);
0b0a0806 1248 info->flags = flags & VM_NORESERVE;
1da177e4 1249 INIT_LIST_HEAD(&info->swaplist);
b09e0fa4 1250 INIT_LIST_HEAD(&info->xattr_list);
72c04902 1251 cache_no_acl(inode);
1da177e4
LT
1252
1253 switch (mode & S_IFMT) {
1254 default:
39f0247d 1255 inode->i_op = &shmem_special_inode_operations;
1da177e4
LT
1256 init_special_inode(inode, mode, dev);
1257 break;
1258 case S_IFREG:
14fcc23f 1259 inode->i_mapping->a_ops = &shmem_aops;
1da177e4
LT
1260 inode->i_op = &shmem_inode_operations;
1261 inode->i_fop = &shmem_file_operations;
71fe804b
LS
1262 mpol_shared_policy_init(&info->policy,
1263 shmem_get_sbmpol(sbinfo));
1da177e4
LT
1264 break;
1265 case S_IFDIR:
d8c76e6f 1266 inc_nlink(inode);
1da177e4
LT
1267 /* Some things misbehave if size == 0 on a directory */
1268 inode->i_size = 2 * BOGO_DIRENT_SIZE;
1269 inode->i_op = &shmem_dir_inode_operations;
1270 inode->i_fop = &simple_dir_operations;
1271 break;
1272 case S_IFLNK:
1273 /*
1274 * Must not load anything in the rbtree,
1275 * mpol_free_shared_policy will not be called.
1276 */
71fe804b 1277 mpol_shared_policy_init(&info->policy, NULL);
1da177e4
LT
1278 break;
1279 }
5b04c689
PE
1280 } else
1281 shmem_free_inode(sb);
1da177e4
LT
1282 return inode;
1283}
1284
1285#ifdef CONFIG_TMPFS
92e1d5be 1286static const struct inode_operations shmem_symlink_inode_operations;
69f07ec9 1287static const struct inode_operations shmem_short_symlink_operations;
1da177e4 1288
6d9d88d0
JS
1289#ifdef CONFIG_TMPFS_XATTR
1290static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
1291#else
1292#define shmem_initxattrs NULL
1293#endif
1294
1da177e4 1295static int
800d15a5
NP
1296shmem_write_begin(struct file *file, struct address_space *mapping,
1297 loff_t pos, unsigned len, unsigned flags,
1298 struct page **pagep, void **fsdata)
1da177e4 1299{
800d15a5
NP
1300 struct inode *inode = mapping->host;
1301 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
800d15a5
NP
1302 return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
1303}
1304
1305static int
1306shmem_write_end(struct file *file, struct address_space *mapping,
1307 loff_t pos, unsigned len, unsigned copied,
1308 struct page *page, void *fsdata)
1309{
1310 struct inode *inode = mapping->host;
1311
d3602444
HD
1312 if (pos + copied > inode->i_size)
1313 i_size_write(inode, pos + copied);
1314
ec9516fb
HD
1315 if (!PageUptodate(page)) {
1316 if (copied < PAGE_CACHE_SIZE) {
1317 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
1318 zero_user_segments(page, 0, from,
1319 from + copied, PAGE_CACHE_SIZE);
1320 }
1321 SetPageUptodate(page);
1322 }
800d15a5 1323 set_page_dirty(page);
6746aff7 1324 unlock_page(page);
800d15a5
NP
1325 page_cache_release(page);
1326
800d15a5 1327 return copied;
1da177e4
LT
1328}
1329
1da177e4
LT
1330static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1331{
d3ac7f89 1332 struct inode *inode = filp->f_path.dentry->d_inode;
1da177e4 1333 struct address_space *mapping = inode->i_mapping;
41ffe5d5
HD
1334 pgoff_t index;
1335 unsigned long offset;
a0ee5ec5
HD
1336 enum sgp_type sgp = SGP_READ;
1337
1338 /*
1339 * Might this read be for a stacking filesystem? Then when reading
1340 * holes of a sparse file, we actually need to allocate those pages,
1341 * and even mark them dirty, so it cannot exceed the max_blocks limit.
1342 */
1343 if (segment_eq(get_fs(), KERNEL_DS))
1344 sgp = SGP_DIRTY;
1da177e4
LT
1345
1346 index = *ppos >> PAGE_CACHE_SHIFT;
1347 offset = *ppos & ~PAGE_CACHE_MASK;
1348
1349 for (;;) {
1350 struct page *page = NULL;
41ffe5d5
HD
1351 pgoff_t end_index;
1352 unsigned long nr, ret;
1da177e4
LT
1353 loff_t i_size = i_size_read(inode);
1354
1355 end_index = i_size >> PAGE_CACHE_SHIFT;
1356 if (index > end_index)
1357 break;
1358 if (index == end_index) {
1359 nr = i_size & ~PAGE_CACHE_MASK;
1360 if (nr <= offset)
1361 break;
1362 }
1363
a0ee5ec5 1364 desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
1da177e4
LT
1365 if (desc->error) {
1366 if (desc->error == -EINVAL)
1367 desc->error = 0;
1368 break;
1369 }
d3602444
HD
1370 if (page)
1371 unlock_page(page);
1da177e4
LT
1372
1373 /*
1374 * We must evaluate after, since reads (unlike writes)
1b1dcc1b 1375 * are called without i_mutex protection against truncate
1da177e4
LT
1376 */
1377 nr = PAGE_CACHE_SIZE;
1378 i_size = i_size_read(inode);
1379 end_index = i_size >> PAGE_CACHE_SHIFT;
1380 if (index == end_index) {
1381 nr = i_size & ~PAGE_CACHE_MASK;
1382 if (nr <= offset) {
1383 if (page)
1384 page_cache_release(page);
1385 break;
1386 }
1387 }
1388 nr -= offset;
1389
1390 if (page) {
1391 /*
1392 * If users can be writing to this page using arbitrary
1393 * virtual addresses, take care about potential aliasing
1394 * before reading the page on the kernel side.
1395 */
1396 if (mapping_writably_mapped(mapping))
1397 flush_dcache_page(page);
1398 /*
1399 * Mark the page accessed if we read the beginning.
1400 */
1401 if (!offset)
1402 mark_page_accessed(page);
b5810039 1403 } else {
1da177e4 1404 page = ZERO_PAGE(0);
b5810039
NP
1405 page_cache_get(page);
1406 }
1da177e4
LT
1407
1408 /*
1409 * Ok, we have the page, and it's up-to-date, so
1410 * now we can copy it to user space...
1411 *
1412 * The actor routine returns how many bytes were actually used..
1413 * NOTE! This may not be the same as how much of a user buffer
1414 * we filled up (we may be padding etc), so we can only update
1415 * "pos" here (the actor routine has to update the user buffer
1416 * pointers and the remaining count).
1417 */
1418 ret = actor(desc, page, offset, nr);
1419 offset += ret;
1420 index += offset >> PAGE_CACHE_SHIFT;
1421 offset &= ~PAGE_CACHE_MASK;
1422
1423 page_cache_release(page);
1424 if (ret != nr || !desc->count)
1425 break;
1426
1427 cond_resched();
1428 }
1429
1430 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1431 file_accessed(filp);
1432}
1433
bcd78e49
HD
1434static ssize_t shmem_file_aio_read(struct kiocb *iocb,
1435 const struct iovec *iov, unsigned long nr_segs, loff_t pos)
1436{
1437 struct file *filp = iocb->ki_filp;
1438 ssize_t retval;
1439 unsigned long seg;
1440 size_t count;
1441 loff_t *ppos = &iocb->ki_pos;
1442
1443 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1444 if (retval)
1445 return retval;
1446
1447 for (seg = 0; seg < nr_segs; seg++) {
1448 read_descriptor_t desc;
1449
1450 desc.written = 0;
1451 desc.arg.buf = iov[seg].iov_base;
1452 desc.count = iov[seg].iov_len;
1453 if (desc.count == 0)
1454 continue;
1455 desc.error = 0;
1456 do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1457 retval += desc.written;
1458 if (desc.error) {
1459 retval = retval ?: desc.error;
1460 break;
1461 }
1462 if (desc.count > 0)
1463 break;
1464 }
1465 return retval;
1da177e4
LT
1466}
1467
708e3508
HD
1468static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1469 struct pipe_inode_info *pipe, size_t len,
1470 unsigned int flags)
1471{
1472 struct address_space *mapping = in->f_mapping;
71f0e07a 1473 struct inode *inode = mapping->host;
708e3508
HD
1474 unsigned int loff, nr_pages, req_pages;
1475 struct page *pages[PIPE_DEF_BUFFERS];
1476 struct partial_page partial[PIPE_DEF_BUFFERS];
1477 struct page *page;
1478 pgoff_t index, end_index;
1479 loff_t isize, left;
1480 int error, page_nr;
1481 struct splice_pipe_desc spd = {
1482 .pages = pages,
1483 .partial = partial,
1484 .flags = flags,
1485 .ops = &page_cache_pipe_buf_ops,
1486 .spd_release = spd_release_page,
1487 };
1488
71f0e07a 1489 isize = i_size_read(inode);
708e3508
HD
1490 if (unlikely(*ppos >= isize))
1491 return 0;
1492
1493 left = isize - *ppos;
1494 if (unlikely(left < len))
1495 len = left;
1496
1497 if (splice_grow_spd(pipe, &spd))
1498 return -ENOMEM;
1499
1500 index = *ppos >> PAGE_CACHE_SHIFT;
1501 loff = *ppos & ~PAGE_CACHE_MASK;
1502 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1503 nr_pages = min(req_pages, pipe->buffers);
1504
708e3508
HD
1505 spd.nr_pages = find_get_pages_contig(mapping, index,
1506 nr_pages, spd.pages);
1507 index += spd.nr_pages;
708e3508 1508 error = 0;
708e3508 1509
71f0e07a 1510 while (spd.nr_pages < nr_pages) {
71f0e07a
HD
1511 error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL);
1512 if (error)
1513 break;
1514 unlock_page(page);
708e3508
HD
1515 spd.pages[spd.nr_pages++] = page;
1516 index++;
1517 }
1518
708e3508
HD
1519 index = *ppos >> PAGE_CACHE_SHIFT;
1520 nr_pages = spd.nr_pages;
1521 spd.nr_pages = 0;
71f0e07a 1522
708e3508
HD
1523 for (page_nr = 0; page_nr < nr_pages; page_nr++) {
1524 unsigned int this_len;
1525
1526 if (!len)
1527 break;
1528
708e3508
HD
1529 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
1530 page = spd.pages[page_nr];
1531
71f0e07a 1532 if (!PageUptodate(page) || page->mapping != mapping) {
71f0e07a
HD
1533 error = shmem_getpage(inode, index, &page,
1534 SGP_CACHE, NULL);
1535 if (error)
708e3508 1536 break;
71f0e07a
HD
1537 unlock_page(page);
1538 page_cache_release(spd.pages[page_nr]);
1539 spd.pages[page_nr] = page;
708e3508 1540 }
71f0e07a
HD
1541
1542 isize = i_size_read(inode);
708e3508
HD
1543 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
1544 if (unlikely(!isize || index > end_index))
1545 break;
1546
708e3508
HD
1547 if (end_index == index) {
1548 unsigned int plen;
1549
708e3508
HD
1550 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
1551 if (plen <= loff)
1552 break;
1553
708e3508
HD
1554 this_len = min(this_len, plen - loff);
1555 len = this_len;
1556 }
1557
1558 spd.partial[page_nr].offset = loff;
1559 spd.partial[page_nr].len = this_len;
1560 len -= this_len;
1561 loff = 0;
1562 spd.nr_pages++;
1563 index++;
1564 }
1565
708e3508
HD
1566 while (page_nr < nr_pages)
1567 page_cache_release(spd.pages[page_nr++]);
708e3508
HD
1568
1569 if (spd.nr_pages)
1570 error = splice_to_pipe(pipe, &spd);
1571
1572 splice_shrink_spd(pipe, &spd);
1573
1574 if (error > 0) {
1575 *ppos += error;
1576 file_accessed(in);
1577 }
1578 return error;
1579}
1580
726c3342 1581static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1da177e4 1582{
726c3342 1583 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
1da177e4
LT
1584
1585 buf->f_type = TMPFS_MAGIC;
1586 buf->f_bsize = PAGE_CACHE_SIZE;
1587 buf->f_namelen = NAME_MAX;
0edd73b3 1588 if (sbinfo->max_blocks) {
1da177e4 1589 buf->f_blocks = sbinfo->max_blocks;
41ffe5d5
HD
1590 buf->f_bavail =
1591 buf->f_bfree = sbinfo->max_blocks -
1592 percpu_counter_sum(&sbinfo->used_blocks);
0edd73b3
HD
1593 }
1594 if (sbinfo->max_inodes) {
1da177e4
LT
1595 buf->f_files = sbinfo->max_inodes;
1596 buf->f_ffree = sbinfo->free_inodes;
1da177e4
LT
1597 }
1598 /* else leave those fields 0 like simple_statfs */
1599 return 0;
1600}
1601
1602/*
1603 * File creation. Allocate an inode, and we're done..
1604 */
1605static int
1a67aafb 1606shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
1da177e4 1607{
0b0a0806 1608 struct inode *inode;
1da177e4
LT
1609 int error = -ENOSPC;
1610
454abafe 1611 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
1da177e4 1612 if (inode) {
2a7dba39 1613 error = security_inode_init_security(inode, dir,
9d8f13ba 1614 &dentry->d_name,
6d9d88d0 1615 shmem_initxattrs, NULL);
570bc1c2
SS
1616 if (error) {
1617 if (error != -EOPNOTSUPP) {
1618 iput(inode);
1619 return error;
1620 }
39f0247d 1621 }
1c7c474c
CH
1622#ifdef CONFIG_TMPFS_POSIX_ACL
1623 error = generic_acl_init(inode, dir);
39f0247d
AG
1624 if (error) {
1625 iput(inode);
1626 return error;
570bc1c2 1627 }
718deb6b
AV
1628#else
1629 error = 0;
1c7c474c 1630#endif
1da177e4
LT
1631 dir->i_size += BOGO_DIRENT_SIZE;
1632 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1633 d_instantiate(dentry, inode);
1634 dget(dentry); /* Extra count - pin the dentry in core */
1da177e4
LT
1635 }
1636 return error;
1637}
1638
18bb1db3 1639static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
1da177e4
LT
1640{
1641 int error;
1642
1643 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1644 return error;
d8c76e6f 1645 inc_nlink(dir);
1da177e4
LT
1646 return 0;
1647}
1648
4acdaf27 1649static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
1da177e4
LT
1650 struct nameidata *nd)
1651{
1652 return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1653}
1654
1655/*
1656 * Link a file..
1657 */
1658static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1659{
1660 struct inode *inode = old_dentry->d_inode;
5b04c689 1661 int ret;
1da177e4
LT
1662
1663 /*
1664 * No ordinary (disk based) filesystem counts links as inodes;
1665 * but each new link needs a new dentry, pinning lowmem, and
1666 * tmpfs dentries cannot be pruned until they are unlinked.
1667 */
5b04c689
PE
1668 ret = shmem_reserve_inode(inode->i_sb);
1669 if (ret)
1670 goto out;
1da177e4
LT
1671
1672 dir->i_size += BOGO_DIRENT_SIZE;
1673 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
d8c76e6f 1674 inc_nlink(inode);
7de9c6ee 1675 ihold(inode); /* New dentry reference */
1da177e4
LT
1676 dget(dentry); /* Extra pinning count for the created dentry */
1677 d_instantiate(dentry, inode);
5b04c689
PE
1678out:
1679 return ret;
1da177e4
LT
1680}
1681
1682static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1683{
1684 struct inode *inode = dentry->d_inode;
1685
5b04c689
PE
1686 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
1687 shmem_free_inode(inode->i_sb);
1da177e4
LT
1688
1689 dir->i_size -= BOGO_DIRENT_SIZE;
1690 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
9a53c3a7 1691 drop_nlink(inode);
1da177e4
LT
1692 dput(dentry); /* Undo the count from "create" - this does all the work */
1693 return 0;
1694}
1695
1696static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1697{
1698 if (!simple_empty(dentry))
1699 return -ENOTEMPTY;
1700
9a53c3a7
DH
1701 drop_nlink(dentry->d_inode);
1702 drop_nlink(dir);
1da177e4
LT
1703 return shmem_unlink(dir, dentry);
1704}
1705
1706/*
1707 * The VFS layer already does all the dentry stuff for rename,
1708 * we just have to decrement the usage count for the target if
1709 * it exists so that the VFS layer correctly free's it when it
1710 * gets overwritten.
1711 */
1712static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1713{
1714 struct inode *inode = old_dentry->d_inode;
1715 int they_are_dirs = S_ISDIR(inode->i_mode);
1716
1717 if (!simple_empty(new_dentry))
1718 return -ENOTEMPTY;
1719
1720 if (new_dentry->d_inode) {
1721 (void) shmem_unlink(new_dir, new_dentry);
1722 if (they_are_dirs)
9a53c3a7 1723 drop_nlink(old_dir);
1da177e4 1724 } else if (they_are_dirs) {
9a53c3a7 1725 drop_nlink(old_dir);
d8c76e6f 1726 inc_nlink(new_dir);
1da177e4
LT
1727 }
1728
1729 old_dir->i_size -= BOGO_DIRENT_SIZE;
1730 new_dir->i_size += BOGO_DIRENT_SIZE;
1731 old_dir->i_ctime = old_dir->i_mtime =
1732 new_dir->i_ctime = new_dir->i_mtime =
1733 inode->i_ctime = CURRENT_TIME;
1734 return 0;
1735}
1736
1737static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1738{
1739 int error;
1740 int len;
1741 struct inode *inode;
9276aad6 1742 struct page *page;
1da177e4
LT
1743 char *kaddr;
1744 struct shmem_inode_info *info;
1745
1746 len = strlen(symname) + 1;
1747 if (len > PAGE_CACHE_SIZE)
1748 return -ENAMETOOLONG;
1749
454abafe 1750 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
1da177e4
LT
1751 if (!inode)
1752 return -ENOSPC;
1753
9d8f13ba 1754 error = security_inode_init_security(inode, dir, &dentry->d_name,
6d9d88d0 1755 shmem_initxattrs, NULL);
570bc1c2
SS
1756 if (error) {
1757 if (error != -EOPNOTSUPP) {
1758 iput(inode);
1759 return error;
1760 }
1761 error = 0;
1762 }
1763
1da177e4
LT
1764 info = SHMEM_I(inode);
1765 inode->i_size = len-1;
69f07ec9
HD
1766 if (len <= SHORT_SYMLINK_LEN) {
1767 info->symlink = kmemdup(symname, len, GFP_KERNEL);
1768 if (!info->symlink) {
1769 iput(inode);
1770 return -ENOMEM;
1771 }
1772 inode->i_op = &shmem_short_symlink_operations;
1da177e4
LT
1773 } else {
1774 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
1775 if (error) {
1776 iput(inode);
1777 return error;
1778 }
14fcc23f 1779 inode->i_mapping->a_ops = &shmem_aops;
1da177e4 1780 inode->i_op = &shmem_symlink_inode_operations;
9b04c5fe 1781 kaddr = kmap_atomic(page);
1da177e4 1782 memcpy(kaddr, symname, len);
9b04c5fe 1783 kunmap_atomic(kaddr);
ec9516fb 1784 SetPageUptodate(page);
1da177e4 1785 set_page_dirty(page);
6746aff7 1786 unlock_page(page);
1da177e4
LT
1787 page_cache_release(page);
1788 }
1da177e4
LT
1789 dir->i_size += BOGO_DIRENT_SIZE;
1790 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1791 d_instantiate(dentry, inode);
1792 dget(dentry);
1793 return 0;
1794}
1795
69f07ec9 1796static void *shmem_follow_short_symlink(struct dentry *dentry, struct nameidata *nd)
1da177e4 1797{
69f07ec9 1798 nd_set_link(nd, SHMEM_I(dentry->d_inode)->symlink);
cc314eef 1799 return NULL;
1da177e4
LT
1800}
1801
cc314eef 1802static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1da177e4
LT
1803{
1804 struct page *page = NULL;
41ffe5d5
HD
1805 int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
1806 nd_set_link(nd, error ? ERR_PTR(error) : kmap(page));
d3602444
HD
1807 if (page)
1808 unlock_page(page);
cc314eef 1809 return page;
1da177e4
LT
1810}
1811
cc314eef 1812static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
1da177e4
LT
1813{
1814 if (!IS_ERR(nd_get_link(nd))) {
cc314eef 1815 struct page *page = cookie;
1da177e4
LT
1816 kunmap(page);
1817 mark_page_accessed(page);
1818 page_cache_release(page);
1da177e4
LT
1819 }
1820}
1821
b09e0fa4 1822#ifdef CONFIG_TMPFS_XATTR
46711810 1823/*
b09e0fa4
EP
1824 * Superblocks without xattr inode operations may get some security.* xattr
1825 * support from the LSM "for free". As soon as we have any other xattrs
39f0247d
AG
1826 * like ACLs, we also need to implement the security.* handlers at
1827 * filesystem level, though.
1828 */
1829
6d9d88d0
JS
1830/*
1831 * Allocate new xattr and copy in the value; but leave the name to callers.
1832 */
1833static struct shmem_xattr *shmem_xattr_alloc(const void *value, size_t size)
1834{
1835 struct shmem_xattr *new_xattr;
1836 size_t len;
1837
1838 /* wrap around? */
1839 len = sizeof(*new_xattr) + size;
1840 if (len <= sizeof(*new_xattr))
1841 return NULL;
1842
1843 new_xattr = kmalloc(len, GFP_KERNEL);
1844 if (!new_xattr)
1845 return NULL;
1846
1847 new_xattr->size = size;
1848 memcpy(new_xattr->value, value, size);
1849 return new_xattr;
1850}
1851
1852/*
1853 * Callback for security_inode_init_security() for acquiring xattrs.
1854 */
1855static int shmem_initxattrs(struct inode *inode,
1856 const struct xattr *xattr_array,
1857 void *fs_info)
1858{
1859 struct shmem_inode_info *info = SHMEM_I(inode);
1860 const struct xattr *xattr;
1861 struct shmem_xattr *new_xattr;
1862 size_t len;
1863
1864 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
1865 new_xattr = shmem_xattr_alloc(xattr->value, xattr->value_len);
1866 if (!new_xattr)
1867 return -ENOMEM;
1868
1869 len = strlen(xattr->name) + 1;
1870 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
1871 GFP_KERNEL);
1872 if (!new_xattr->name) {
1873 kfree(new_xattr);
1874 return -ENOMEM;
1875 }
1876
1877 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
1878 XATTR_SECURITY_PREFIX_LEN);
1879 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
1880 xattr->name, len);
1881
1882 spin_lock(&info->lock);
1883 list_add(&new_xattr->list, &info->xattr_list);
1884 spin_unlock(&info->lock);
1885 }
1886
1887 return 0;
1888}
1889
b09e0fa4
EP
1890static int shmem_xattr_get(struct dentry *dentry, const char *name,
1891 void *buffer, size_t size)
39f0247d 1892{
b09e0fa4
EP
1893 struct shmem_inode_info *info;
1894 struct shmem_xattr *xattr;
1895 int ret = -ENODATA;
39f0247d 1896
b09e0fa4
EP
1897 info = SHMEM_I(dentry->d_inode);
1898
1899 spin_lock(&info->lock);
1900 list_for_each_entry(xattr, &info->xattr_list, list) {
1901 if (strcmp(name, xattr->name))
1902 continue;
1903
1904 ret = xattr->size;
1905 if (buffer) {
1906 if (size < xattr->size)
1907 ret = -ERANGE;
1908 else
1909 memcpy(buffer, xattr->value, xattr->size);
1910 }
1911 break;
1912 }
1913 spin_unlock(&info->lock);
1914 return ret;
39f0247d
AG
1915}
1916
6d9d88d0 1917static int shmem_xattr_set(struct inode *inode, const char *name,
b09e0fa4 1918 const void *value, size_t size, int flags)
39f0247d 1919{
b09e0fa4
EP
1920 struct shmem_inode_info *info = SHMEM_I(inode);
1921 struct shmem_xattr *xattr;
1922 struct shmem_xattr *new_xattr = NULL;
b09e0fa4
EP
1923 int err = 0;
1924
1925 /* value == NULL means remove */
1926 if (value) {
6d9d88d0 1927 new_xattr = shmem_xattr_alloc(value, size);
b09e0fa4
EP
1928 if (!new_xattr)
1929 return -ENOMEM;
1930
1931 new_xattr->name = kstrdup(name, GFP_KERNEL);
1932 if (!new_xattr->name) {
1933 kfree(new_xattr);
1934 return -ENOMEM;
1935 }
b09e0fa4
EP
1936 }
1937
1938 spin_lock(&info->lock);
1939 list_for_each_entry(xattr, &info->xattr_list, list) {
1940 if (!strcmp(name, xattr->name)) {
1941 if (flags & XATTR_CREATE) {
1942 xattr = new_xattr;
1943 err = -EEXIST;
1944 } else if (new_xattr) {
1945 list_replace(&xattr->list, &new_xattr->list);
1946 } else {
1947 list_del(&xattr->list);
1948 }
1949 goto out;
1950 }
1951 }
1952 if (flags & XATTR_REPLACE) {
1953 xattr = new_xattr;
1954 err = -ENODATA;
1955 } else {
1956 list_add(&new_xattr->list, &info->xattr_list);
1957 xattr = NULL;
1958 }
1959out:
1960 spin_unlock(&info->lock);
1961 if (xattr)
1962 kfree(xattr->name);
1963 kfree(xattr);
1964 return err;
39f0247d
AG
1965}
1966
bb435453 1967static const struct xattr_handler *shmem_xattr_handlers[] = {
b09e0fa4 1968#ifdef CONFIG_TMPFS_POSIX_ACL
1c7c474c
CH
1969 &generic_acl_access_handler,
1970 &generic_acl_default_handler,
b09e0fa4 1971#endif
39f0247d
AG
1972 NULL
1973};
b09e0fa4
EP
1974
1975static int shmem_xattr_validate(const char *name)
1976{
1977 struct { const char *prefix; size_t len; } arr[] = {
1978 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
1979 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
1980 };
1981 int i;
1982
1983 for (i = 0; i < ARRAY_SIZE(arr); i++) {
1984 size_t preflen = arr[i].len;
1985 if (strncmp(name, arr[i].prefix, preflen) == 0) {
1986 if (!name[preflen])
1987 return -EINVAL;
1988 return 0;
1989 }
1990 }
1991 return -EOPNOTSUPP;
1992}
1993
1994static ssize_t shmem_getxattr(struct dentry *dentry, const char *name,
1995 void *buffer, size_t size)
1996{
1997 int err;
1998
1999 /*
2000 * If this is a request for a synthetic attribute in the system.*
2001 * namespace use the generic infrastructure to resolve a handler
2002 * for it via sb->s_xattr.
2003 */
2004 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2005 return generic_getxattr(dentry, name, buffer, size);
2006
2007 err = shmem_xattr_validate(name);
2008 if (err)
2009 return err;
2010
2011 return shmem_xattr_get(dentry, name, buffer, size);
2012}
2013
2014static int shmem_setxattr(struct dentry *dentry, const char *name,
2015 const void *value, size_t size, int flags)
2016{
2017 int err;
2018
2019 /*
2020 * If this is a request for a synthetic attribute in the system.*
2021 * namespace use the generic infrastructure to resolve a handler
2022 * for it via sb->s_xattr.
2023 */
2024 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2025 return generic_setxattr(dentry, name, value, size, flags);
2026
2027 err = shmem_xattr_validate(name);
2028 if (err)
2029 return err;
2030
2031 if (size == 0)
2032 value = ""; /* empty EA, do not remove */
2033
6d9d88d0 2034 return shmem_xattr_set(dentry->d_inode, name, value, size, flags);
b09e0fa4
EP
2035
2036}
2037
2038static int shmem_removexattr(struct dentry *dentry, const char *name)
2039{
2040 int err;
2041
2042 /*
2043 * If this is a request for a synthetic attribute in the system.*
2044 * namespace use the generic infrastructure to resolve a handler
2045 * for it via sb->s_xattr.
2046 */
2047 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2048 return generic_removexattr(dentry, name);
2049
2050 err = shmem_xattr_validate(name);
2051 if (err)
2052 return err;
2053
6d9d88d0 2054 return shmem_xattr_set(dentry->d_inode, name, NULL, 0, XATTR_REPLACE);
b09e0fa4
EP
2055}
2056
2057static bool xattr_is_trusted(const char *name)
2058{
2059 return !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN);
2060}
2061
2062static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
2063{
2064 bool trusted = capable(CAP_SYS_ADMIN);
2065 struct shmem_xattr *xattr;
2066 struct shmem_inode_info *info;
2067 size_t used = 0;
2068
2069 info = SHMEM_I(dentry->d_inode);
2070
2071 spin_lock(&info->lock);
2072 list_for_each_entry(xattr, &info->xattr_list, list) {
2073 size_t len;
2074
2075 /* skip "trusted." attributes for unprivileged callers */
2076 if (!trusted && xattr_is_trusted(xattr->name))
2077 continue;
2078
2079 len = strlen(xattr->name) + 1;
2080 used += len;
2081 if (buffer) {
2082 if (size < used) {
2083 used = -ERANGE;
2084 break;
2085 }
2086 memcpy(buffer, xattr->name, len);
2087 buffer += len;
2088 }
2089 }
2090 spin_unlock(&info->lock);
2091
2092 return used;
2093}
2094#endif /* CONFIG_TMPFS_XATTR */
2095
69f07ec9 2096static const struct inode_operations shmem_short_symlink_operations = {
b09e0fa4 2097 .readlink = generic_readlink,
69f07ec9 2098 .follow_link = shmem_follow_short_symlink,
b09e0fa4
EP
2099#ifdef CONFIG_TMPFS_XATTR
2100 .setxattr = shmem_setxattr,
2101 .getxattr = shmem_getxattr,
2102 .listxattr = shmem_listxattr,
2103 .removexattr = shmem_removexattr,
2104#endif
2105};
2106
2107static const struct inode_operations shmem_symlink_inode_operations = {
2108 .readlink = generic_readlink,
2109 .follow_link = shmem_follow_link,
2110 .put_link = shmem_put_link,
2111#ifdef CONFIG_TMPFS_XATTR
2112 .setxattr = shmem_setxattr,
2113 .getxattr = shmem_getxattr,
2114 .listxattr = shmem_listxattr,
2115 .removexattr = shmem_removexattr,
39f0247d 2116#endif
b09e0fa4 2117};
39f0247d 2118
91828a40
DG
2119static struct dentry *shmem_get_parent(struct dentry *child)
2120{
2121 return ERR_PTR(-ESTALE);
2122}
2123
2124static int shmem_match(struct inode *ino, void *vfh)
2125{
2126 __u32 *fh = vfh;
2127 __u64 inum = fh[2];
2128 inum = (inum << 32) | fh[1];
2129 return ino->i_ino == inum && fh[0] == ino->i_generation;
2130}
2131
480b116c
CH
2132static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
2133 struct fid *fid, int fh_len, int fh_type)
91828a40 2134{
91828a40 2135 struct inode *inode;
480b116c
CH
2136 struct dentry *dentry = NULL;
2137 u64 inum = fid->raw[2];
2138 inum = (inum << 32) | fid->raw[1];
2139
2140 if (fh_len < 3)
2141 return NULL;
91828a40 2142
480b116c
CH
2143 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
2144 shmem_match, fid->raw);
91828a40 2145 if (inode) {
480b116c 2146 dentry = d_find_alias(inode);
91828a40
DG
2147 iput(inode);
2148 }
2149
480b116c 2150 return dentry;
91828a40
DG
2151}
2152
2153static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
2154 int connectable)
2155{
2156 struct inode *inode = dentry->d_inode;
2157
5fe0c237
AK
2158 if (*len < 3) {
2159 *len = 3;
91828a40 2160 return 255;
5fe0c237 2161 }
91828a40 2162
1d3382cb 2163 if (inode_unhashed(inode)) {
91828a40
DG
2164 /* Unfortunately insert_inode_hash is not idempotent,
2165 * so as we hash inodes here rather than at creation
2166 * time, we need a lock to ensure we only try
2167 * to do it once
2168 */
2169 static DEFINE_SPINLOCK(lock);
2170 spin_lock(&lock);
1d3382cb 2171 if (inode_unhashed(inode))
91828a40
DG
2172 __insert_inode_hash(inode,
2173 inode->i_ino + inode->i_generation);
2174 spin_unlock(&lock);
2175 }
2176
2177 fh[0] = inode->i_generation;
2178 fh[1] = inode->i_ino;
2179 fh[2] = ((__u64)inode->i_ino) >> 32;
2180
2181 *len = 3;
2182 return 1;
2183}
2184
39655164 2185static const struct export_operations shmem_export_ops = {
91828a40 2186 .get_parent = shmem_get_parent,
91828a40 2187 .encode_fh = shmem_encode_fh,
480b116c 2188 .fh_to_dentry = shmem_fh_to_dentry,
91828a40
DG
2189};
2190
680d794b
AM
2191static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
2192 bool remount)
1da177e4
LT
2193{
2194 char *this_char, *value, *rest;
8751e039
EB
2195 uid_t uid;
2196 gid_t gid;
1da177e4 2197
b00dc3ad
HD
2198 while (options != NULL) {
2199 this_char = options;
2200 for (;;) {
2201 /*
2202 * NUL-terminate this option: unfortunately,
2203 * mount options form a comma-separated list,
2204 * but mpol's nodelist may also contain commas.
2205 */
2206 options = strchr(options, ',');
2207 if (options == NULL)
2208 break;
2209 options++;
2210 if (!isdigit(*options)) {
2211 options[-1] = '\0';
2212 break;
2213 }
2214 }
1da177e4
LT
2215 if (!*this_char)
2216 continue;
2217 if ((value = strchr(this_char,'=')) != NULL) {
2218 *value++ = 0;
2219 } else {
2220 printk(KERN_ERR
2221 "tmpfs: No value for mount option '%s'\n",
2222 this_char);
2223 return 1;
2224 }
2225
2226 if (!strcmp(this_char,"size")) {
2227 unsigned long long size;
2228 size = memparse(value,&rest);
2229 if (*rest == '%') {
2230 size <<= PAGE_SHIFT;
2231 size *= totalram_pages;
2232 do_div(size, 100);
2233 rest++;
2234 }
2235 if (*rest)
2236 goto bad_val;
680d794b
AM
2237 sbinfo->max_blocks =
2238 DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
1da177e4 2239 } else if (!strcmp(this_char,"nr_blocks")) {
680d794b 2240 sbinfo->max_blocks = memparse(value, &rest);
1da177e4
LT
2241 if (*rest)
2242 goto bad_val;
2243 } else if (!strcmp(this_char,"nr_inodes")) {
680d794b 2244 sbinfo->max_inodes = memparse(value, &rest);
1da177e4
LT
2245 if (*rest)
2246 goto bad_val;
2247 } else if (!strcmp(this_char,"mode")) {
680d794b 2248 if (remount)
1da177e4 2249 continue;
680d794b 2250 sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
1da177e4
LT
2251 if (*rest)
2252 goto bad_val;
2253 } else if (!strcmp(this_char,"uid")) {
680d794b 2254 if (remount)
1da177e4 2255 continue;
8751e039 2256 uid = simple_strtoul(value, &rest, 0);
1da177e4
LT
2257 if (*rest)
2258 goto bad_val;
8751e039
EB
2259 sbinfo->uid = make_kuid(current_user_ns(), uid);
2260 if (!uid_valid(sbinfo->uid))
2261 goto bad_val;
1da177e4 2262 } else if (!strcmp(this_char,"gid")) {
680d794b 2263 if (remount)
1da177e4 2264 continue;
8751e039 2265 gid = simple_strtoul(value, &rest, 0);
1da177e4
LT
2266 if (*rest)
2267 goto bad_val;
8751e039
EB
2268 sbinfo->gid = make_kgid(current_user_ns(), gid);
2269 if (!gid_valid(sbinfo->gid))
2270 goto bad_val;
7339ff83 2271 } else if (!strcmp(this_char,"mpol")) {
71fe804b 2272 if (mpol_parse_str(value, &sbinfo->mpol, 1))
7339ff83 2273 goto bad_val;
1da177e4
LT
2274 } else {
2275 printk(KERN_ERR "tmpfs: Bad mount option %s\n",
2276 this_char);
2277 return 1;
2278 }
2279 }
2280 return 0;
2281
2282bad_val:
2283 printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
2284 value, this_char);
2285 return 1;
2286
2287}
2288
2289static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2290{
2291 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
680d794b 2292 struct shmem_sb_info config = *sbinfo;
0edd73b3
HD
2293 unsigned long inodes;
2294 int error = -EINVAL;
2295
680d794b 2296 if (shmem_parse_options(data, &config, true))
0edd73b3 2297 return error;
1da177e4 2298
0edd73b3 2299 spin_lock(&sbinfo->stat_lock);
0edd73b3 2300 inodes = sbinfo->max_inodes - sbinfo->free_inodes;
7e496299 2301 if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
0edd73b3 2302 goto out;
680d794b 2303 if (config.max_inodes < inodes)
0edd73b3
HD
2304 goto out;
2305 /*
54af6042 2306 * Those tests disallow limited->unlimited while any are in use;
0edd73b3
HD
2307 * but we must separately disallow unlimited->limited, because
2308 * in that case we have no record of how much is already in use.
2309 */
680d794b 2310 if (config.max_blocks && !sbinfo->max_blocks)
0edd73b3 2311 goto out;
680d794b 2312 if (config.max_inodes && !sbinfo->max_inodes)
0edd73b3
HD
2313 goto out;
2314
2315 error = 0;
680d794b 2316 sbinfo->max_blocks = config.max_blocks;
680d794b
AM
2317 sbinfo->max_inodes = config.max_inodes;
2318 sbinfo->free_inodes = config.max_inodes - inodes;
71fe804b
LS
2319
2320 mpol_put(sbinfo->mpol);
2321 sbinfo->mpol = config.mpol; /* transfers initial ref */
0edd73b3
HD
2322out:
2323 spin_unlock(&sbinfo->stat_lock);
2324 return error;
1da177e4 2325}
680d794b 2326
34c80b1d 2327static int shmem_show_options(struct seq_file *seq, struct dentry *root)
680d794b 2328{
34c80b1d 2329 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
680d794b
AM
2330
2331 if (sbinfo->max_blocks != shmem_default_max_blocks())
2332 seq_printf(seq, ",size=%luk",
2333 sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10));
2334 if (sbinfo->max_inodes != shmem_default_max_inodes())
2335 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
2336 if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
09208d15 2337 seq_printf(seq, ",mode=%03ho", sbinfo->mode);
8751e039
EB
2338 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
2339 seq_printf(seq, ",uid=%u",
2340 from_kuid_munged(&init_user_ns, sbinfo->uid));
2341 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
2342 seq_printf(seq, ",gid=%u",
2343 from_kgid_munged(&init_user_ns, sbinfo->gid));
71fe804b 2344 shmem_show_mpol(seq, sbinfo->mpol);
680d794b
AM
2345 return 0;
2346}
2347#endif /* CONFIG_TMPFS */
1da177e4
LT
2348
2349static void shmem_put_super(struct super_block *sb)
2350{
602586a8
HD
2351 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2352
2353 percpu_counter_destroy(&sbinfo->used_blocks);
2354 kfree(sbinfo);
1da177e4
LT
2355 sb->s_fs_info = NULL;
2356}
2357
2b2af54a 2358int shmem_fill_super(struct super_block *sb, void *data, int silent)
1da177e4
LT
2359{
2360 struct inode *inode;
0edd73b3 2361 struct shmem_sb_info *sbinfo;
680d794b
AM
2362 int err = -ENOMEM;
2363
2364 /* Round up to L1_CACHE_BYTES to resist false sharing */
425fbf04 2365 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
680d794b
AM
2366 L1_CACHE_BYTES), GFP_KERNEL);
2367 if (!sbinfo)
2368 return -ENOMEM;
2369
680d794b 2370 sbinfo->mode = S_IRWXUGO | S_ISVTX;
76aac0e9
DH
2371 sbinfo->uid = current_fsuid();
2372 sbinfo->gid = current_fsgid();
680d794b 2373 sb->s_fs_info = sbinfo;
1da177e4 2374
0edd73b3 2375#ifdef CONFIG_TMPFS
1da177e4
LT
2376 /*
2377 * Per default we only allow half of the physical ram per
2378 * tmpfs instance, limiting inodes to one per page of lowmem;
2379 * but the internal instance is left unlimited.
2380 */
2381 if (!(sb->s_flags & MS_NOUSER)) {
680d794b
AM
2382 sbinfo->max_blocks = shmem_default_max_blocks();
2383 sbinfo->max_inodes = shmem_default_max_inodes();
2384 if (shmem_parse_options(data, sbinfo, false)) {
2385 err = -EINVAL;
2386 goto failed;
2387 }
1da177e4 2388 }
91828a40 2389 sb->s_export_op = &shmem_export_ops;
2f6e38f3 2390 sb->s_flags |= MS_NOSEC;
1da177e4
LT
2391#else
2392 sb->s_flags |= MS_NOUSER;
2393#endif
2394
0edd73b3 2395 spin_lock_init(&sbinfo->stat_lock);
602586a8
HD
2396 if (percpu_counter_init(&sbinfo->used_blocks, 0))
2397 goto failed;
680d794b 2398 sbinfo->free_inodes = sbinfo->max_inodes;
0edd73b3 2399
285b2c4f 2400 sb->s_maxbytes = MAX_LFS_FILESIZE;
1da177e4
LT
2401 sb->s_blocksize = PAGE_CACHE_SIZE;
2402 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2403 sb->s_magic = TMPFS_MAGIC;
2404 sb->s_op = &shmem_ops;
cfd95a9c 2405 sb->s_time_gran = 1;
b09e0fa4 2406#ifdef CONFIG_TMPFS_XATTR
39f0247d 2407 sb->s_xattr = shmem_xattr_handlers;
b09e0fa4
EP
2408#endif
2409#ifdef CONFIG_TMPFS_POSIX_ACL
39f0247d
AG
2410 sb->s_flags |= MS_POSIXACL;
2411#endif
0edd73b3 2412
454abafe 2413 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
1da177e4
LT
2414 if (!inode)
2415 goto failed;
680d794b
AM
2416 inode->i_uid = sbinfo->uid;
2417 inode->i_gid = sbinfo->gid;
318ceed0
AV
2418 sb->s_root = d_make_root(inode);
2419 if (!sb->s_root)
48fde701 2420 goto failed;
1da177e4
LT
2421 return 0;
2422
1da177e4
LT
2423failed:
2424 shmem_put_super(sb);
2425 return err;
2426}
2427
fcc234f8 2428static struct kmem_cache *shmem_inode_cachep;
1da177e4
LT
2429
2430static struct inode *shmem_alloc_inode(struct super_block *sb)
2431{
41ffe5d5
HD
2432 struct shmem_inode_info *info;
2433 info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
2434 if (!info)
1da177e4 2435 return NULL;
41ffe5d5 2436 return &info->vfs_inode;
1da177e4
LT
2437}
2438
41ffe5d5 2439static void shmem_destroy_callback(struct rcu_head *head)
fa0d7e3d
NP
2440{
2441 struct inode *inode = container_of(head, struct inode, i_rcu);
fa0d7e3d
NP
2442 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2443}
2444
1da177e4
LT
2445static void shmem_destroy_inode(struct inode *inode)
2446{
09208d15 2447 if (S_ISREG(inode->i_mode))
1da177e4 2448 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
41ffe5d5 2449 call_rcu(&inode->i_rcu, shmem_destroy_callback);
1da177e4
LT
2450}
2451
41ffe5d5 2452static void shmem_init_inode(void *foo)
1da177e4 2453{
41ffe5d5
HD
2454 struct shmem_inode_info *info = foo;
2455 inode_init_once(&info->vfs_inode);
1da177e4
LT
2456}
2457
41ffe5d5 2458static int shmem_init_inodecache(void)
1da177e4
LT
2459{
2460 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2461 sizeof(struct shmem_inode_info),
41ffe5d5 2462 0, SLAB_PANIC, shmem_init_inode);
1da177e4
LT
2463 return 0;
2464}
2465
41ffe5d5 2466static void shmem_destroy_inodecache(void)
1da177e4 2467{
1a1d92c1 2468 kmem_cache_destroy(shmem_inode_cachep);
1da177e4
LT
2469}
2470
f5e54d6e 2471static const struct address_space_operations shmem_aops = {
1da177e4 2472 .writepage = shmem_writepage,
76719325 2473 .set_page_dirty = __set_page_dirty_no_writeback,
1da177e4 2474#ifdef CONFIG_TMPFS
800d15a5
NP
2475 .write_begin = shmem_write_begin,
2476 .write_end = shmem_write_end,
1da177e4 2477#endif
304dbdb7 2478 .migratepage = migrate_page,
aa261f54 2479 .error_remove_page = generic_error_remove_page,
1da177e4
LT
2480};
2481
15ad7cdc 2482static const struct file_operations shmem_file_operations = {
1da177e4
LT
2483 .mmap = shmem_mmap,
2484#ifdef CONFIG_TMPFS
2485 .llseek = generic_file_llseek,
bcd78e49 2486 .read = do_sync_read,
5402b976 2487 .write = do_sync_write,
bcd78e49 2488 .aio_read = shmem_file_aio_read,
5402b976 2489 .aio_write = generic_file_aio_write,
1b061d92 2490 .fsync = noop_fsync,
708e3508 2491 .splice_read = shmem_file_splice_read,
ae976416 2492 .splice_write = generic_file_splice_write,
1da177e4
LT
2493#endif
2494};
2495
92e1d5be 2496static const struct inode_operations shmem_inode_operations = {
94c1e62d 2497 .setattr = shmem_setattr,
f6b3ec23 2498 .truncate_range = shmem_truncate_range,
b09e0fa4
EP
2499#ifdef CONFIG_TMPFS_XATTR
2500 .setxattr = shmem_setxattr,
2501 .getxattr = shmem_getxattr,
2502 .listxattr = shmem_listxattr,
2503 .removexattr = shmem_removexattr,
2504#endif
1da177e4
LT
2505};
2506
92e1d5be 2507static const struct inode_operations shmem_dir_inode_operations = {
1da177e4
LT
2508#ifdef CONFIG_TMPFS
2509 .create = shmem_create,
2510 .lookup = simple_lookup,
2511 .link = shmem_link,
2512 .unlink = shmem_unlink,
2513 .symlink = shmem_symlink,
2514 .mkdir = shmem_mkdir,
2515 .rmdir = shmem_rmdir,
2516 .mknod = shmem_mknod,
2517 .rename = shmem_rename,
1da177e4 2518#endif
b09e0fa4
EP
2519#ifdef CONFIG_TMPFS_XATTR
2520 .setxattr = shmem_setxattr,
2521 .getxattr = shmem_getxattr,
2522 .listxattr = shmem_listxattr,
2523 .removexattr = shmem_removexattr,
2524#endif
39f0247d 2525#ifdef CONFIG_TMPFS_POSIX_ACL
94c1e62d 2526 .setattr = shmem_setattr,
39f0247d
AG
2527#endif
2528};
2529
92e1d5be 2530static const struct inode_operations shmem_special_inode_operations = {
b09e0fa4
EP
2531#ifdef CONFIG_TMPFS_XATTR
2532 .setxattr = shmem_setxattr,
2533 .getxattr = shmem_getxattr,
2534 .listxattr = shmem_listxattr,
2535 .removexattr = shmem_removexattr,
2536#endif
39f0247d 2537#ifdef CONFIG_TMPFS_POSIX_ACL
94c1e62d 2538 .setattr = shmem_setattr,
39f0247d 2539#endif
1da177e4
LT
2540};
2541
759b9775 2542static const struct super_operations shmem_ops = {
1da177e4
LT
2543 .alloc_inode = shmem_alloc_inode,
2544 .destroy_inode = shmem_destroy_inode,
2545#ifdef CONFIG_TMPFS
2546 .statfs = shmem_statfs,
2547 .remount_fs = shmem_remount_fs,
680d794b 2548 .show_options = shmem_show_options,
1da177e4 2549#endif
1f895f75 2550 .evict_inode = shmem_evict_inode,
1da177e4
LT
2551 .drop_inode = generic_delete_inode,
2552 .put_super = shmem_put_super,
2553};
2554
f0f37e2f 2555static const struct vm_operations_struct shmem_vm_ops = {
54cb8821 2556 .fault = shmem_fault,
1da177e4
LT
2557#ifdef CONFIG_NUMA
2558 .set_policy = shmem_set_policy,
2559 .get_policy = shmem_get_policy,
2560#endif
2561};
2562
3c26ff6e
AV
2563static struct dentry *shmem_mount(struct file_system_type *fs_type,
2564 int flags, const char *dev_name, void *data)
1da177e4 2565{
3c26ff6e 2566 return mount_nodev(fs_type, flags, data, shmem_fill_super);
1da177e4
LT
2567}
2568
41ffe5d5 2569static struct file_system_type shmem_fs_type = {
1da177e4
LT
2570 .owner = THIS_MODULE,
2571 .name = "tmpfs",
3c26ff6e 2572 .mount = shmem_mount,
1da177e4
LT
2573 .kill_sb = kill_litter_super,
2574};
1da177e4 2575
41ffe5d5 2576int __init shmem_init(void)
1da177e4
LT
2577{
2578 int error;
2579
e0bf68dd
PZ
2580 error = bdi_init(&shmem_backing_dev_info);
2581 if (error)
2582 goto out4;
2583
41ffe5d5 2584 error = shmem_init_inodecache();
1da177e4
LT
2585 if (error)
2586 goto out3;
2587
41ffe5d5 2588 error = register_filesystem(&shmem_fs_type);
1da177e4
LT
2589 if (error) {
2590 printk(KERN_ERR "Could not register tmpfs\n");
2591 goto out2;
2592 }
95dc112a 2593
41ffe5d5
HD
2594 shm_mnt = vfs_kern_mount(&shmem_fs_type, MS_NOUSER,
2595 shmem_fs_type.name, NULL);
1da177e4
LT
2596 if (IS_ERR(shm_mnt)) {
2597 error = PTR_ERR(shm_mnt);
2598 printk(KERN_ERR "Could not kern_mount tmpfs\n");
2599 goto out1;
2600 }
2601 return 0;
2602
2603out1:
41ffe5d5 2604 unregister_filesystem(&shmem_fs_type);
1da177e4 2605out2:
41ffe5d5 2606 shmem_destroy_inodecache();
1da177e4 2607out3:
e0bf68dd
PZ
2608 bdi_destroy(&shmem_backing_dev_info);
2609out4:
1da177e4
LT
2610 shm_mnt = ERR_PTR(error);
2611 return error;
2612}
853ac43a
MM
2613
2614#else /* !CONFIG_SHMEM */
2615
2616/*
2617 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
2618 *
2619 * This is intended for small system where the benefits of the full
2620 * shmem code (swap-backed and resource-limited) are outweighed by
2621 * their complexity. On systems without swap this code should be
2622 * effectively equivalent, but much lighter weight.
2623 */
2624
2625#include <linux/ramfs.h>
2626
41ffe5d5 2627static struct file_system_type shmem_fs_type = {
853ac43a 2628 .name = "tmpfs",
3c26ff6e 2629 .mount = ramfs_mount,
853ac43a
MM
2630 .kill_sb = kill_litter_super,
2631};
2632
41ffe5d5 2633int __init shmem_init(void)
853ac43a 2634{
41ffe5d5 2635 BUG_ON(register_filesystem(&shmem_fs_type) != 0);
853ac43a 2636
41ffe5d5 2637 shm_mnt = kern_mount(&shmem_fs_type);
853ac43a
MM
2638 BUG_ON(IS_ERR(shm_mnt));
2639
2640 return 0;
2641}
2642
41ffe5d5 2643int shmem_unuse(swp_entry_t swap, struct page *page)
853ac43a
MM
2644{
2645 return 0;
2646}
2647
3f96b79a
HD
2648int shmem_lock(struct file *file, int lock, struct user_struct *user)
2649{
2650 return 0;
2651}
2652
24513264
HD
2653void shmem_unlock_mapping(struct address_space *mapping)
2654{
2655}
2656
41ffe5d5 2657void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
94c1e62d 2658{
41ffe5d5 2659 truncate_inode_pages_range(inode->i_mapping, lstart, lend);
94c1e62d
HD
2660}
2661EXPORT_SYMBOL_GPL(shmem_truncate_range);
2662
0b0a0806
HD
2663#define shmem_vm_ops generic_file_vm_ops
2664#define shmem_file_operations ramfs_file_operations
454abafe 2665#define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
0b0a0806
HD
2666#define shmem_acct_size(flags, size) 0
2667#define shmem_unacct_size(flags, size) do {} while (0)
853ac43a
MM
2668
2669#endif /* CONFIG_SHMEM */
2670
2671/* common code */
1da177e4 2672
46711810 2673/**
1da177e4 2674 * shmem_file_setup - get an unlinked file living in tmpfs
1da177e4
LT
2675 * @name: name for dentry (to be seen in /proc/<pid>/maps
2676 * @size: size to be set for the file
0b0a0806 2677 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
1da177e4 2678 */
168f5ac6 2679struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
1da177e4
LT
2680{
2681 int error;
2682 struct file *file;
2683 struct inode *inode;
2c48b9c4
AV
2684 struct path path;
2685 struct dentry *root;
1da177e4
LT
2686 struct qstr this;
2687
2688 if (IS_ERR(shm_mnt))
2689 return (void *)shm_mnt;
2690
285b2c4f 2691 if (size < 0 || size > MAX_LFS_FILESIZE)
1da177e4
LT
2692 return ERR_PTR(-EINVAL);
2693
2694 if (shmem_acct_size(flags, size))
2695 return ERR_PTR(-ENOMEM);
2696
2697 error = -ENOMEM;
2698 this.name = name;
2699 this.len = strlen(name);
2700 this.hash = 0; /* will go */
2701 root = shm_mnt->mnt_root;
2c48b9c4
AV
2702 path.dentry = d_alloc(root, &this);
2703 if (!path.dentry)
1da177e4 2704 goto put_memory;
2c48b9c4 2705 path.mnt = mntget(shm_mnt);
1da177e4 2706
1da177e4 2707 error = -ENOSPC;
454abafe 2708 inode = shmem_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
1da177e4 2709 if (!inode)
4b42af81 2710 goto put_dentry;
1da177e4 2711
2c48b9c4 2712 d_instantiate(path.dentry, inode);
1da177e4 2713 inode->i_size = size;
6d6b77f1 2714 clear_nlink(inode); /* It is unlinked */
853ac43a
MM
2715#ifndef CONFIG_MMU
2716 error = ramfs_nommu_expand_for_mapping(inode, size);
2717 if (error)
4b42af81 2718 goto put_dentry;
853ac43a 2719#endif
4b42af81
AV
2720
2721 error = -ENFILE;
2c48b9c4 2722 file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
4b42af81
AV
2723 &shmem_file_operations);
2724 if (!file)
2725 goto put_dentry;
2726
1da177e4
LT
2727 return file;
2728
1da177e4 2729put_dentry:
2c48b9c4 2730 path_put(&path);
1da177e4
LT
2731put_memory:
2732 shmem_unacct_size(flags, size);
2733 return ERR_PTR(error);
2734}
395e0ddc 2735EXPORT_SYMBOL_GPL(shmem_file_setup);
1da177e4 2736
46711810 2737/**
1da177e4 2738 * shmem_zero_setup - setup a shared anonymous mapping
1da177e4
LT
2739 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2740 */
2741int shmem_zero_setup(struct vm_area_struct *vma)
2742{
2743 struct file *file;
2744 loff_t size = vma->vm_end - vma->vm_start;
2745
2746 file = shmem_file_setup("dev/zero", size, vma->vm_flags);
2747 if (IS_ERR(file))
2748 return PTR_ERR(file);
2749
2750 if (vma->vm_file)
2751 fput(vma->vm_file);
2752 vma->vm_file = file;
2753 vma->vm_ops = &shmem_vm_ops;
bee4c36a 2754 vma->vm_flags |= VM_CAN_NONLINEAR;
1da177e4
LT
2755 return 0;
2756}
d9d90e5e
HD
2757
2758/**
2759 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
2760 * @mapping: the page's address_space
2761 * @index: the page index
2762 * @gfp: the page allocator flags to use if allocating
2763 *
2764 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
2765 * with any new page allocations done using the specified allocation flags.
2766 * But read_cache_page_gfp() uses the ->readpage() method: which does not
2767 * suit tmpfs, since it may have pages in swapcache, and needs to find those
2768 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
2769 *
68da9f05
HD
2770 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
2771 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
d9d90e5e
HD
2772 */
2773struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
2774 pgoff_t index, gfp_t gfp)
2775{
68da9f05
HD
2776#ifdef CONFIG_SHMEM
2777 struct inode *inode = mapping->host;
9276aad6 2778 struct page *page;
68da9f05
HD
2779 int error;
2780
2781 BUG_ON(mapping->a_ops != &shmem_aops);
2782 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL);
2783 if (error)
2784 page = ERR_PTR(error);
2785 else
2786 unlock_page(page);
2787 return page;
2788#else
2789 /*
2790 * The tiny !SHMEM case uses ramfs without swap
2791 */
d9d90e5e 2792 return read_cache_page_gfp(mapping, index, gfp);
68da9f05 2793#endif
d9d90e5e
HD
2794}
2795EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);