shmem: replace page if mapping excludes its zone
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / mm / shmem.c
CommitLineData
1da177e4
LT
1/*
2 * Resizable virtual memory filesystem for Linux.
3 *
4 * Copyright (C) 2000 Linus Torvalds.
5 * 2000 Transmeta Corp.
6 * 2000-2001 Christoph Rohland
7 * 2000-2001 SAP AG
8 * 2002 Red Hat Inc.
6922c0c7
HD
9 * Copyright (C) 2002-2011 Hugh Dickins.
10 * Copyright (C) 2011 Google Inc.
0edd73b3 11 * Copyright (C) 2002-2005 VERITAS Software Corporation.
1da177e4
LT
12 * Copyright (C) 2004 Andi Kleen, SuSE Labs
13 *
14 * Extended attribute support for tmpfs:
15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17 *
853ac43a
MM
18 * tiny-shmem:
19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20 *
1da177e4
LT
21 * This file is released under the GPL.
22 */
23
853ac43a
MM
24#include <linux/fs.h>
25#include <linux/init.h>
26#include <linux/vfs.h>
27#include <linux/mount.h>
caefba17 28#include <linux/pagemap.h>
853ac43a
MM
29#include <linux/file.h>
30#include <linux/mm.h>
b95f1b31 31#include <linux/export.h>
853ac43a
MM
32#include <linux/swap.h>
33
34static struct vfsmount *shm_mnt;
35
36#ifdef CONFIG_SHMEM
1da177e4
LT
37/*
38 * This virtual memory filesystem is heavily based on the ramfs. It
39 * extends ramfs by the ability to use swap and honor resource limits
40 * which makes it a completely usable filesystem.
41 */
42
39f0247d 43#include <linux/xattr.h>
a5694255 44#include <linux/exportfs.h>
1c7c474c 45#include <linux/posix_acl.h>
39f0247d 46#include <linux/generic_acl.h>
1da177e4 47#include <linux/mman.h>
1da177e4
LT
48#include <linux/string.h>
49#include <linux/slab.h>
50#include <linux/backing-dev.h>
51#include <linux/shmem_fs.h>
1da177e4 52#include <linux/writeback.h>
1da177e4 53#include <linux/blkdev.h>
bda97eab 54#include <linux/pagevec.h>
41ffe5d5 55#include <linux/percpu_counter.h>
708e3508 56#include <linux/splice.h>
1da177e4
LT
57#include <linux/security.h>
58#include <linux/swapops.h>
59#include <linux/mempolicy.h>
60#include <linux/namei.h>
b00dc3ad 61#include <linux/ctype.h>
304dbdb7 62#include <linux/migrate.h>
c1f60a5a 63#include <linux/highmem.h>
680d794b 64#include <linux/seq_file.h>
92562927 65#include <linux/magic.h>
304dbdb7 66
1da177e4 67#include <asm/uaccess.h>
1da177e4
LT
68#include <asm/pgtable.h>
69
caefba17 70#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
1da177e4
LT
71#define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
72
1da177e4
LT
73/* Pretend that each entry is of this size in directory's i_size */
74#define BOGO_DIRENT_SIZE 20
75
69f07ec9
HD
76/* Symlink up to this size is kmalloc'ed instead of using a swappable page */
77#define SHORT_SYMLINK_LEN 128
78
b09e0fa4
EP
79struct shmem_xattr {
80 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
81 char *name; /* xattr name */
82 size_t size;
83 char value[0];
84};
85
285b2c4f 86/* Flag allocation requirements to shmem_getpage */
1da177e4 87enum sgp_type {
1da177e4
LT
88 SGP_READ, /* don't exceed i_size, don't allocate page */
89 SGP_CACHE, /* don't exceed i_size, may allocate page */
a0ee5ec5 90 SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */
1da177e4
LT
91 SGP_WRITE, /* may exceed i_size, may allocate page */
92};
93
b76db735 94#ifdef CONFIG_TMPFS
680d794b
AM
95static unsigned long shmem_default_max_blocks(void)
96{
97 return totalram_pages / 2;
98}
99
100static unsigned long shmem_default_max_inodes(void)
101{
102 return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
103}
b76db735 104#endif
680d794b 105
bde05d1c
HD
106static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
107static int shmem_replace_page(struct page **pagep, gfp_t gfp,
108 struct shmem_inode_info *info, pgoff_t index);
68da9f05
HD
109static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
110 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type);
111
112static inline int shmem_getpage(struct inode *inode, pgoff_t index,
113 struct page **pagep, enum sgp_type sgp, int *fault_type)
114{
115 return shmem_getpage_gfp(inode, index, pagep, sgp,
116 mapping_gfp_mask(inode->i_mapping), fault_type);
117}
1da177e4 118
1da177e4
LT
119static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
120{
121 return sb->s_fs_info;
122}
123
124/*
125 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
126 * for shared memory and for shared anonymous (/dev/zero) mappings
127 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
128 * consistent with the pre-accounting of private mappings ...
129 */
130static inline int shmem_acct_size(unsigned long flags, loff_t size)
131{
0b0a0806 132 return (flags & VM_NORESERVE) ?
191c5424 133 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
1da177e4
LT
134}
135
136static inline void shmem_unacct_size(unsigned long flags, loff_t size)
137{
0b0a0806 138 if (!(flags & VM_NORESERVE))
1da177e4
LT
139 vm_unacct_memory(VM_ACCT(size));
140}
141
142/*
143 * ... whereas tmpfs objects are accounted incrementally as
144 * pages are allocated, in order to allow huge sparse files.
145 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
146 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
147 */
148static inline int shmem_acct_block(unsigned long flags)
149{
0b0a0806 150 return (flags & VM_NORESERVE) ?
191c5424 151 security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_CACHE_SIZE)) : 0;
1da177e4
LT
152}
153
154static inline void shmem_unacct_blocks(unsigned long flags, long pages)
155{
0b0a0806 156 if (flags & VM_NORESERVE)
1da177e4
LT
157 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
158}
159
759b9775 160static const struct super_operations shmem_ops;
f5e54d6e 161static const struct address_space_operations shmem_aops;
15ad7cdc 162static const struct file_operations shmem_file_operations;
92e1d5be
AV
163static const struct inode_operations shmem_inode_operations;
164static const struct inode_operations shmem_dir_inode_operations;
165static const struct inode_operations shmem_special_inode_operations;
f0f37e2f 166static const struct vm_operations_struct shmem_vm_ops;
1da177e4 167
6c231b7b 168static struct backing_dev_info shmem_backing_dev_info __read_mostly = {
1da177e4 169 .ra_pages = 0, /* No readahead */
4f98a2fe 170 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
1da177e4
LT
171};
172
173static LIST_HEAD(shmem_swaplist);
cb5f7b9a 174static DEFINE_MUTEX(shmem_swaplist_mutex);
1da177e4 175
5b04c689
PE
176static int shmem_reserve_inode(struct super_block *sb)
177{
178 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
179 if (sbinfo->max_inodes) {
180 spin_lock(&sbinfo->stat_lock);
181 if (!sbinfo->free_inodes) {
182 spin_unlock(&sbinfo->stat_lock);
183 return -ENOSPC;
184 }
185 sbinfo->free_inodes--;
186 spin_unlock(&sbinfo->stat_lock);
187 }
188 return 0;
189}
190
191static void shmem_free_inode(struct super_block *sb)
192{
193 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
194 if (sbinfo->max_inodes) {
195 spin_lock(&sbinfo->stat_lock);
196 sbinfo->free_inodes++;
197 spin_unlock(&sbinfo->stat_lock);
198 }
199}
200
46711810 201/**
41ffe5d5 202 * shmem_recalc_inode - recalculate the block usage of an inode
1da177e4
LT
203 * @inode: inode to recalc
204 *
205 * We have to calculate the free blocks since the mm can drop
206 * undirtied hole pages behind our back.
207 *
208 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
209 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
210 *
211 * It has to be called with the spinlock held.
212 */
213static void shmem_recalc_inode(struct inode *inode)
214{
215 struct shmem_inode_info *info = SHMEM_I(inode);
216 long freed;
217
218 freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
219 if (freed > 0) {
54af6042
HD
220 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
221 if (sbinfo->max_blocks)
222 percpu_counter_add(&sbinfo->used_blocks, -freed);
1da177e4 223 info->alloced -= freed;
54af6042 224 inode->i_blocks -= freed * BLOCKS_PER_PAGE;
1da177e4 225 shmem_unacct_blocks(info->flags, freed);
1da177e4
LT
226 }
227}
228
7a5d0fbb
HD
229/*
230 * Replace item expected in radix tree by a new item, while holding tree lock.
231 */
232static int shmem_radix_tree_replace(struct address_space *mapping,
233 pgoff_t index, void *expected, void *replacement)
234{
235 void **pslot;
236 void *item = NULL;
237
238 VM_BUG_ON(!expected);
239 pslot = radix_tree_lookup_slot(&mapping->page_tree, index);
240 if (pslot)
241 item = radix_tree_deref_slot_protected(pslot,
242 &mapping->tree_lock);
243 if (item != expected)
244 return -ENOENT;
245 if (replacement)
246 radix_tree_replace_slot(pslot, replacement);
247 else
248 radix_tree_delete(&mapping->page_tree, index);
249 return 0;
250}
251
46f65ec1
HD
252/*
253 * Like add_to_page_cache_locked, but error if expected item has gone.
254 */
255static int shmem_add_to_page_cache(struct page *page,
256 struct address_space *mapping,
257 pgoff_t index, gfp_t gfp, void *expected)
258{
aa3b1895 259 int error = 0;
46f65ec1
HD
260
261 VM_BUG_ON(!PageLocked(page));
262 VM_BUG_ON(!PageSwapBacked(page));
263
46f65ec1
HD
264 if (!expected)
265 error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
266 if (!error) {
267 page_cache_get(page);
268 page->mapping = mapping;
269 page->index = index;
270
271 spin_lock_irq(&mapping->tree_lock);
272 if (!expected)
273 error = radix_tree_insert(&mapping->page_tree,
274 index, page);
275 else
276 error = shmem_radix_tree_replace(mapping, index,
277 expected, page);
278 if (!error) {
279 mapping->nrpages++;
280 __inc_zone_page_state(page, NR_FILE_PAGES);
281 __inc_zone_page_state(page, NR_SHMEM);
282 spin_unlock_irq(&mapping->tree_lock);
283 } else {
284 page->mapping = NULL;
285 spin_unlock_irq(&mapping->tree_lock);
286 page_cache_release(page);
287 }
288 if (!expected)
289 radix_tree_preload_end();
290 }
291 if (error)
292 mem_cgroup_uncharge_cache_page(page);
46f65ec1
HD
293 return error;
294}
295
6922c0c7
HD
296/*
297 * Like delete_from_page_cache, but substitutes swap for page.
298 */
299static void shmem_delete_from_page_cache(struct page *page, void *radswap)
300{
301 struct address_space *mapping = page->mapping;
302 int error;
303
304 spin_lock_irq(&mapping->tree_lock);
305 error = shmem_radix_tree_replace(mapping, page->index, page, radswap);
306 page->mapping = NULL;
307 mapping->nrpages--;
308 __dec_zone_page_state(page, NR_FILE_PAGES);
309 __dec_zone_page_state(page, NR_SHMEM);
310 spin_unlock_irq(&mapping->tree_lock);
311 page_cache_release(page);
312 BUG_ON(error);
313}
314
7a5d0fbb
HD
315/*
316 * Like find_get_pages, but collecting swap entries as well as pages.
317 */
318static unsigned shmem_find_get_pages_and_swap(struct address_space *mapping,
319 pgoff_t start, unsigned int nr_pages,
320 struct page **pages, pgoff_t *indices)
321{
322 unsigned int i;
323 unsigned int ret;
324 unsigned int nr_found;
325
326 rcu_read_lock();
327restart:
328 nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
329 (void ***)pages, indices, start, nr_pages);
330 ret = 0;
331 for (i = 0; i < nr_found; i++) {
332 struct page *page;
333repeat:
334 page = radix_tree_deref_slot((void **)pages[i]);
335 if (unlikely(!page))
336 continue;
337 if (radix_tree_exception(page)) {
8079b1c8
HD
338 if (radix_tree_deref_retry(page))
339 goto restart;
340 /*
341 * Otherwise, we must be storing a swap entry
342 * here as an exceptional entry: so return it
343 * without attempting to raise page count.
344 */
345 goto export;
7a5d0fbb
HD
346 }
347 if (!page_cache_get_speculative(page))
348 goto repeat;
349
350 /* Has the page moved? */
351 if (unlikely(page != *((void **)pages[i]))) {
352 page_cache_release(page);
353 goto repeat;
354 }
355export:
356 indices[ret] = indices[i];
357 pages[ret] = page;
358 ret++;
359 }
360 if (unlikely(!ret && nr_found))
361 goto restart;
362 rcu_read_unlock();
363 return ret;
364}
365
366/*
367 * Remove swap entry from radix tree, free the swap and its page cache.
368 */
369static int shmem_free_swap(struct address_space *mapping,
370 pgoff_t index, void *radswap)
371{
372 int error;
373
374 spin_lock_irq(&mapping->tree_lock);
375 error = shmem_radix_tree_replace(mapping, index, radswap, NULL);
376 spin_unlock_irq(&mapping->tree_lock);
377 if (!error)
378 free_swap_and_cache(radix_to_swp_entry(radswap));
379 return error;
380}
381
382/*
383 * Pagevec may contain swap entries, so shuffle up pages before releasing.
384 */
24513264 385static void shmem_deswap_pagevec(struct pagevec *pvec)
7a5d0fbb
HD
386{
387 int i, j;
388
389 for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
390 struct page *page = pvec->pages[i];
391 if (!radix_tree_exceptional_entry(page))
392 pvec->pages[j++] = page;
393 }
394 pvec->nr = j;
24513264
HD
395}
396
397/*
398 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
399 */
400void shmem_unlock_mapping(struct address_space *mapping)
401{
402 struct pagevec pvec;
403 pgoff_t indices[PAGEVEC_SIZE];
404 pgoff_t index = 0;
405
406 pagevec_init(&pvec, 0);
407 /*
408 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
409 */
410 while (!mapping_unevictable(mapping)) {
411 /*
412 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
413 * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
414 */
415 pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
416 PAGEVEC_SIZE, pvec.pages, indices);
417 if (!pvec.nr)
418 break;
419 index = indices[pvec.nr - 1] + 1;
420 shmem_deswap_pagevec(&pvec);
421 check_move_unevictable_pages(pvec.pages, pvec.nr);
422 pagevec_release(&pvec);
423 cond_resched();
424 }
7a5d0fbb
HD
425}
426
427/*
428 * Remove range of pages and swap entries from radix tree, and free them.
429 */
285b2c4f 430void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
1da177e4 431{
285b2c4f 432 struct address_space *mapping = inode->i_mapping;
1da177e4 433 struct shmem_inode_info *info = SHMEM_I(inode);
285b2c4f 434 pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
bda97eab 435 unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
285b2c4f 436 pgoff_t end = (lend >> PAGE_CACHE_SHIFT);
bda97eab 437 struct pagevec pvec;
7a5d0fbb
HD
438 pgoff_t indices[PAGEVEC_SIZE];
439 long nr_swaps_freed = 0;
285b2c4f 440 pgoff_t index;
bda97eab
HD
441 int i;
442
443 BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
444
445 pagevec_init(&pvec, 0);
446 index = start;
7a5d0fbb
HD
447 while (index <= end) {
448 pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
449 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
450 pvec.pages, indices);
451 if (!pvec.nr)
452 break;
bda97eab
HD
453 mem_cgroup_uncharge_start();
454 for (i = 0; i < pagevec_count(&pvec); i++) {
455 struct page *page = pvec.pages[i];
456
7a5d0fbb 457 index = indices[i];
bda97eab
HD
458 if (index > end)
459 break;
460
7a5d0fbb
HD
461 if (radix_tree_exceptional_entry(page)) {
462 nr_swaps_freed += !shmem_free_swap(mapping,
463 index, page);
bda97eab 464 continue;
7a5d0fbb
HD
465 }
466
467 if (!trylock_page(page))
bda97eab 468 continue;
7a5d0fbb
HD
469 if (page->mapping == mapping) {
470 VM_BUG_ON(PageWriteback(page));
471 truncate_inode_page(mapping, page);
bda97eab 472 }
bda97eab
HD
473 unlock_page(page);
474 }
24513264
HD
475 shmem_deswap_pagevec(&pvec);
476 pagevec_release(&pvec);
bda97eab
HD
477 mem_cgroup_uncharge_end();
478 cond_resched();
479 index++;
480 }
1da177e4 481
bda97eab
HD
482 if (partial) {
483 struct page *page = NULL;
484 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
485 if (page) {
486 zero_user_segment(page, partial, PAGE_CACHE_SIZE);
487 set_page_dirty(page);
488 unlock_page(page);
489 page_cache_release(page);
490 }
491 }
492
493 index = start;
494 for ( ; ; ) {
495 cond_resched();
7a5d0fbb
HD
496 pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
497 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
498 pvec.pages, indices);
499 if (!pvec.nr) {
bda97eab
HD
500 if (index == start)
501 break;
502 index = start;
503 continue;
504 }
7a5d0fbb 505 if (index == start && indices[0] > end) {
24513264
HD
506 shmem_deswap_pagevec(&pvec);
507 pagevec_release(&pvec);
bda97eab
HD
508 break;
509 }
510 mem_cgroup_uncharge_start();
511 for (i = 0; i < pagevec_count(&pvec); i++) {
512 struct page *page = pvec.pages[i];
513
7a5d0fbb 514 index = indices[i];
bda97eab
HD
515 if (index > end)
516 break;
517
7a5d0fbb
HD
518 if (radix_tree_exceptional_entry(page)) {
519 nr_swaps_freed += !shmem_free_swap(mapping,
520 index, page);
521 continue;
522 }
523
bda97eab 524 lock_page(page);
7a5d0fbb
HD
525 if (page->mapping == mapping) {
526 VM_BUG_ON(PageWriteback(page));
527 truncate_inode_page(mapping, page);
528 }
bda97eab
HD
529 unlock_page(page);
530 }
24513264
HD
531 shmem_deswap_pagevec(&pvec);
532 pagevec_release(&pvec);
bda97eab
HD
533 mem_cgroup_uncharge_end();
534 index++;
535 }
94c1e62d 536
1da177e4 537 spin_lock(&info->lock);
7a5d0fbb 538 info->swapped -= nr_swaps_freed;
1da177e4
LT
539 shmem_recalc_inode(inode);
540 spin_unlock(&info->lock);
541
285b2c4f 542 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
1da177e4 543}
94c1e62d 544EXPORT_SYMBOL_GPL(shmem_truncate_range);
1da177e4 545
94c1e62d 546static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
1da177e4
LT
547{
548 struct inode *inode = dentry->d_inode;
1da177e4
LT
549 int error;
550
db78b877
CH
551 error = inode_change_ok(inode, attr);
552 if (error)
553 return error;
554
94c1e62d
HD
555 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
556 loff_t oldsize = inode->i_size;
557 loff_t newsize = attr->ia_size;
3889e6e7 558
94c1e62d
HD
559 if (newsize != oldsize) {
560 i_size_write(inode, newsize);
561 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
562 }
563 if (newsize < oldsize) {
564 loff_t holebegin = round_up(newsize, PAGE_SIZE);
565 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
566 shmem_truncate_range(inode, newsize, (loff_t)-1);
567 /* unmap again to remove racily COWed private pages */
568 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
569 }
1da177e4
LT
570 }
571
db78b877 572 setattr_copy(inode, attr);
39f0247d 573#ifdef CONFIG_TMPFS_POSIX_ACL
db78b877 574 if (attr->ia_valid & ATTR_MODE)
1c7c474c 575 error = generic_acl_chmod(inode);
39f0247d 576#endif
1da177e4
LT
577 return error;
578}
579
1f895f75 580static void shmem_evict_inode(struct inode *inode)
1da177e4 581{
1da177e4 582 struct shmem_inode_info *info = SHMEM_I(inode);
b09e0fa4 583 struct shmem_xattr *xattr, *nxattr;
1da177e4 584
3889e6e7 585 if (inode->i_mapping->a_ops == &shmem_aops) {
1da177e4
LT
586 shmem_unacct_size(info->flags, inode->i_size);
587 inode->i_size = 0;
3889e6e7 588 shmem_truncate_range(inode, 0, (loff_t)-1);
1da177e4 589 if (!list_empty(&info->swaplist)) {
cb5f7b9a 590 mutex_lock(&shmem_swaplist_mutex);
1da177e4 591 list_del_init(&info->swaplist);
cb5f7b9a 592 mutex_unlock(&shmem_swaplist_mutex);
1da177e4 593 }
69f07ec9
HD
594 } else
595 kfree(info->symlink);
b09e0fa4
EP
596
597 list_for_each_entry_safe(xattr, nxattr, &info->xattr_list, list) {
598 kfree(xattr->name);
599 kfree(xattr);
600 }
0edd73b3 601 BUG_ON(inode->i_blocks);
5b04c689 602 shmem_free_inode(inode->i_sb);
dbd5768f 603 clear_inode(inode);
1da177e4
LT
604}
605
46f65ec1
HD
606/*
607 * If swap found in inode, free it and move page from swapcache to filecache.
608 */
41ffe5d5 609static int shmem_unuse_inode(struct shmem_inode_info *info,
bde05d1c 610 swp_entry_t swap, struct page **pagep)
1da177e4 611{
285b2c4f 612 struct address_space *mapping = info->vfs_inode.i_mapping;
46f65ec1 613 void *radswap;
41ffe5d5 614 pgoff_t index;
bde05d1c
HD
615 gfp_t gfp;
616 int error = 0;
1da177e4 617
46f65ec1 618 radswap = swp_to_radix_entry(swap);
e504f3fd 619 index = radix_tree_locate_item(&mapping->page_tree, radswap);
46f65ec1 620 if (index == -1)
285b2c4f 621 return 0;
2e0e26c7 622
1b1b32f2
HD
623 /*
624 * Move _head_ to start search for next from here.
1f895f75 625 * But be careful: shmem_evict_inode checks list_empty without taking
1b1b32f2 626 * mutex, and there's an instant in list_move_tail when info->swaplist
285b2c4f 627 * would appear empty, if it were the only one on shmem_swaplist.
1b1b32f2
HD
628 */
629 if (shmem_swaplist.next != &info->swaplist)
630 list_move_tail(&shmem_swaplist, &info->swaplist);
2e0e26c7 631
bde05d1c
HD
632 gfp = mapping_gfp_mask(mapping);
633 if (shmem_should_replace_page(*pagep, gfp)) {
634 mutex_unlock(&shmem_swaplist_mutex);
635 error = shmem_replace_page(pagep, gfp, info, index);
636 mutex_lock(&shmem_swaplist_mutex);
637 /*
638 * We needed to drop mutex to make that restrictive page
639 * allocation; but the inode might already be freed by now,
640 * and we cannot refer to inode or mapping or info to check.
641 * However, we do hold page lock on the PageSwapCache page,
642 * so can check if that still has our reference remaining.
643 */
644 if (!page_swapcount(*pagep))
645 error = -ENOENT;
646 }
647
d13d1443 648 /*
778dd893
HD
649 * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
650 * but also to hold up shmem_evict_inode(): so inode cannot be freed
651 * beneath us (pagelock doesn't help until the page is in pagecache).
d13d1443 652 */
bde05d1c
HD
653 if (!error)
654 error = shmem_add_to_page_cache(*pagep, mapping, index,
46f65ec1 655 GFP_NOWAIT, radswap);
48f170fb 656 if (error != -ENOMEM) {
46f65ec1
HD
657 /*
658 * Truncation and eviction use free_swap_and_cache(), which
659 * only does trylock page: if we raced, best clean up here.
660 */
bde05d1c
HD
661 delete_from_swap_cache(*pagep);
662 set_page_dirty(*pagep);
46f65ec1
HD
663 if (!error) {
664 spin_lock(&info->lock);
665 info->swapped--;
666 spin_unlock(&info->lock);
667 swap_free(swap);
668 }
2e0e26c7 669 error = 1; /* not an error, but entry was found */
1da177e4 670 }
2e0e26c7 671 return error;
1da177e4
LT
672}
673
674/*
46f65ec1 675 * Search through swapped inodes to find and replace swap by page.
1da177e4 676 */
41ffe5d5 677int shmem_unuse(swp_entry_t swap, struct page *page)
1da177e4 678{
41ffe5d5 679 struct list_head *this, *next;
1da177e4
LT
680 struct shmem_inode_info *info;
681 int found = 0;
bde05d1c
HD
682 int error = 0;
683
684 /*
685 * There's a faint possibility that swap page was replaced before
686 * caller locked it: it will come back later with the right page.
687 */
688 if (unlikely(!PageSwapCache(page)))
689 goto out;
778dd893
HD
690
691 /*
692 * Charge page using GFP_KERNEL while we can wait, before taking
693 * the shmem_swaplist_mutex which might hold up shmem_writepage().
694 * Charged back to the user (not to caller) when swap account is used.
778dd893
HD
695 */
696 error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
697 if (error)
698 goto out;
46f65ec1 699 /* No radix_tree_preload: swap entry keeps a place for page in tree */
1da177e4 700
cb5f7b9a 701 mutex_lock(&shmem_swaplist_mutex);
41ffe5d5
HD
702 list_for_each_safe(this, next, &shmem_swaplist) {
703 info = list_entry(this, struct shmem_inode_info, swaplist);
285b2c4f 704 if (info->swapped)
bde05d1c 705 found = shmem_unuse_inode(info, swap, &page);
6922c0c7
HD
706 else
707 list_del_init(&info->swaplist);
cb5f7b9a 708 cond_resched();
2e0e26c7 709 if (found)
778dd893 710 break;
1da177e4 711 }
cb5f7b9a 712 mutex_unlock(&shmem_swaplist_mutex);
778dd893 713
778dd893
HD
714 if (found < 0)
715 error = found;
716out:
aaa46865
HD
717 unlock_page(page);
718 page_cache_release(page);
778dd893 719 return error;
1da177e4
LT
720}
721
722/*
723 * Move the page from the page cache to the swap cache.
724 */
725static int shmem_writepage(struct page *page, struct writeback_control *wbc)
726{
727 struct shmem_inode_info *info;
1da177e4 728 struct address_space *mapping;
1da177e4 729 struct inode *inode;
6922c0c7
HD
730 swp_entry_t swap;
731 pgoff_t index;
1da177e4
LT
732
733 BUG_ON(!PageLocked(page));
1da177e4
LT
734 mapping = page->mapping;
735 index = page->index;
736 inode = mapping->host;
737 info = SHMEM_I(inode);
738 if (info->flags & VM_LOCKED)
739 goto redirty;
d9fe526a 740 if (!total_swap_pages)
1da177e4
LT
741 goto redirty;
742
d9fe526a
HD
743 /*
744 * shmem_backing_dev_info's capabilities prevent regular writeback or
745 * sync from ever calling shmem_writepage; but a stacking filesystem
48f170fb 746 * might use ->writepage of its underlying filesystem, in which case
d9fe526a 747 * tmpfs should write out to swap only in response to memory pressure,
48f170fb 748 * and not for the writeback threads or sync.
d9fe526a 749 */
48f170fb
HD
750 if (!wbc->for_reclaim) {
751 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */
752 goto redirty;
753 }
754 swap = get_swap_page();
755 if (!swap.val)
756 goto redirty;
d9fe526a 757
b1dea800
HD
758 /*
759 * Add inode to shmem_unuse()'s list of swapped-out inodes,
6922c0c7
HD
760 * if it's not already there. Do it now before the page is
761 * moved to swap cache, when its pagelock no longer protects
b1dea800 762 * the inode from eviction. But don't unlock the mutex until
6922c0c7
HD
763 * we've incremented swapped, because shmem_unuse_inode() will
764 * prune a !swapped inode from the swaplist under this mutex.
b1dea800 765 */
48f170fb
HD
766 mutex_lock(&shmem_swaplist_mutex);
767 if (list_empty(&info->swaplist))
768 list_add_tail(&info->swaplist, &shmem_swaplist);
b1dea800 769
48f170fb 770 if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
aaa46865 771 swap_shmem_alloc(swap);
6922c0c7
HD
772 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
773
774 spin_lock(&info->lock);
775 info->swapped++;
776 shmem_recalc_inode(inode);
826267cf 777 spin_unlock(&info->lock);
6922c0c7
HD
778
779 mutex_unlock(&shmem_swaplist_mutex);
d9fe526a 780 BUG_ON(page_mapped(page));
9fab5619 781 swap_writepage(page, wbc);
1da177e4
LT
782 return 0;
783 }
784
6922c0c7 785 mutex_unlock(&shmem_swaplist_mutex);
cb4b86ba 786 swapcache_free(swap, NULL);
1da177e4
LT
787redirty:
788 set_page_dirty(page);
d9fe526a
HD
789 if (wbc->for_reclaim)
790 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */
791 unlock_page(page);
792 return 0;
1da177e4
LT
793}
794
795#ifdef CONFIG_NUMA
680d794b 796#ifdef CONFIG_TMPFS
71fe804b 797static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
680d794b 798{
095f1fc4 799 char buffer[64];
680d794b 800
71fe804b 801 if (!mpol || mpol->mode == MPOL_DEFAULT)
095f1fc4 802 return; /* show nothing */
680d794b 803
71fe804b 804 mpol_to_str(buffer, sizeof(buffer), mpol, 1);
095f1fc4
LS
805
806 seq_printf(seq, ",mpol=%s", buffer);
680d794b 807}
71fe804b
LS
808
809static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
810{
811 struct mempolicy *mpol = NULL;
812 if (sbinfo->mpol) {
813 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */
814 mpol = sbinfo->mpol;
815 mpol_get(mpol);
816 spin_unlock(&sbinfo->stat_lock);
817 }
818 return mpol;
819}
680d794b
AM
820#endif /* CONFIG_TMPFS */
821
41ffe5d5
HD
822static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
823 struct shmem_inode_info *info, pgoff_t index)
1da177e4 824{
52cd3b07 825 struct mempolicy mpol, *spol;
1da177e4
LT
826 struct vm_area_struct pvma;
827
52cd3b07 828 spol = mpol_cond_copy(&mpol,
41ffe5d5 829 mpol_shared_policy_lookup(&info->policy, index));
52cd3b07 830
1da177e4 831 /* Create a pseudo vma that just contains the policy */
c4cc6d07 832 pvma.vm_start = 0;
41ffe5d5 833 pvma.vm_pgoff = index;
c4cc6d07 834 pvma.vm_ops = NULL;
52cd3b07 835 pvma.vm_policy = spol;
41ffe5d5 836 return swapin_readahead(swap, gfp, &pvma, 0);
1da177e4
LT
837}
838
02098fea 839static struct page *shmem_alloc_page(gfp_t gfp,
41ffe5d5 840 struct shmem_inode_info *info, pgoff_t index)
1da177e4
LT
841{
842 struct vm_area_struct pvma;
1da177e4 843
c4cc6d07
HD
844 /* Create a pseudo vma that just contains the policy */
845 pvma.vm_start = 0;
41ffe5d5 846 pvma.vm_pgoff = index;
c4cc6d07 847 pvma.vm_ops = NULL;
41ffe5d5 848 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
52cd3b07
LS
849
850 /*
851 * alloc_page_vma() will drop the shared policy reference
852 */
853 return alloc_page_vma(gfp, &pvma, 0);
1da177e4 854}
680d794b
AM
855#else /* !CONFIG_NUMA */
856#ifdef CONFIG_TMPFS
41ffe5d5 857static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
680d794b
AM
858{
859}
860#endif /* CONFIG_TMPFS */
861
41ffe5d5
HD
862static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
863 struct shmem_inode_info *info, pgoff_t index)
1da177e4 864{
41ffe5d5 865 return swapin_readahead(swap, gfp, NULL, 0);
1da177e4
LT
866}
867
02098fea 868static inline struct page *shmem_alloc_page(gfp_t gfp,
41ffe5d5 869 struct shmem_inode_info *info, pgoff_t index)
1da177e4 870{
e84e2e13 871 return alloc_page(gfp);
1da177e4 872}
680d794b 873#endif /* CONFIG_NUMA */
1da177e4 874
71fe804b
LS
875#if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
876static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
877{
878 return NULL;
879}
880#endif
881
bde05d1c
HD
882/*
883 * When a page is moved from swapcache to shmem filecache (either by the
884 * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
885 * shmem_unuse_inode()), it may have been read in earlier from swap, in
886 * ignorance of the mapping it belongs to. If that mapping has special
887 * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
888 * we may need to copy to a suitable page before moving to filecache.
889 *
890 * In a future release, this may well be extended to respect cpuset and
891 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
892 * but for now it is a simple matter of zone.
893 */
894static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
895{
896 return page_zonenum(page) > gfp_zone(gfp);
897}
898
899static int shmem_replace_page(struct page **pagep, gfp_t gfp,
900 struct shmem_inode_info *info, pgoff_t index)
901{
902 struct page *oldpage, *newpage;
903 struct address_space *swap_mapping;
904 pgoff_t swap_index;
905 int error;
906
907 oldpage = *pagep;
908 swap_index = page_private(oldpage);
909 swap_mapping = page_mapping(oldpage);
910
911 /*
912 * We have arrived here because our zones are constrained, so don't
913 * limit chance of success by further cpuset and node constraints.
914 */
915 gfp &= ~GFP_CONSTRAINT_MASK;
916 newpage = shmem_alloc_page(gfp, info, index);
917 if (!newpage)
918 return -ENOMEM;
919 VM_BUG_ON(shmem_should_replace_page(newpage, gfp));
920
921 *pagep = newpage;
922 page_cache_get(newpage);
923 copy_highpage(newpage, oldpage);
924
925 VM_BUG_ON(!PageLocked(oldpage));
926 __set_page_locked(newpage);
927 VM_BUG_ON(!PageUptodate(oldpage));
928 SetPageUptodate(newpage);
929 VM_BUG_ON(!PageSwapBacked(oldpage));
930 SetPageSwapBacked(newpage);
931 VM_BUG_ON(!swap_index);
932 set_page_private(newpage, swap_index);
933 VM_BUG_ON(!PageSwapCache(oldpage));
934 SetPageSwapCache(newpage);
935
936 /*
937 * Our caller will very soon move newpage out of swapcache, but it's
938 * a nice clean interface for us to replace oldpage by newpage there.
939 */
940 spin_lock_irq(&swap_mapping->tree_lock);
941 error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage,
942 newpage);
943 __inc_zone_page_state(newpage, NR_FILE_PAGES);
944 __dec_zone_page_state(oldpage, NR_FILE_PAGES);
945 spin_unlock_irq(&swap_mapping->tree_lock);
946 BUG_ON(error);
947
948 mem_cgroup_replace_page_cache(oldpage, newpage);
949 lru_cache_add_anon(newpage);
950
951 ClearPageSwapCache(oldpage);
952 set_page_private(oldpage, 0);
953
954 unlock_page(oldpage);
955 page_cache_release(oldpage);
956 page_cache_release(oldpage);
957 return 0;
958}
959
1da177e4 960/*
68da9f05 961 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1da177e4
LT
962 *
963 * If we allocate a new one we do not mark it dirty. That's up to the
964 * vm. If we swap it in we mark it dirty since we also free the swap
965 * entry since a page cannot live in both the swap and page cache
966 */
41ffe5d5 967static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
68da9f05 968 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type)
1da177e4
LT
969{
970 struct address_space *mapping = inode->i_mapping;
54af6042 971 struct shmem_inode_info *info;
1da177e4 972 struct shmem_sb_info *sbinfo;
27ab7006 973 struct page *page;
1da177e4
LT
974 swp_entry_t swap;
975 int error;
54af6042 976 int once = 0;
1da177e4 977
41ffe5d5 978 if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT))
1da177e4 979 return -EFBIG;
1da177e4 980repeat:
54af6042 981 swap.val = 0;
41ffe5d5 982 page = find_lock_page(mapping, index);
54af6042
HD
983 if (radix_tree_exceptional_entry(page)) {
984 swap = radix_to_swp_entry(page);
985 page = NULL;
986 }
987
988 if (sgp != SGP_WRITE &&
989 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
990 error = -EINVAL;
991 goto failed;
992 }
993
994 if (page || (sgp == SGP_READ && !swap.val)) {
b409f9fc 995 /*
27ab7006
HD
996 * Once we can get the page lock, it must be uptodate:
997 * if there were an error in reading back from swap,
998 * the page would not be inserted into the filecache.
b409f9fc 999 */
54af6042
HD
1000 BUG_ON(page && !PageUptodate(page));
1001 *pagep = page;
1002 return 0;
27ab7006
HD
1003 }
1004
1005 /*
54af6042
HD
1006 * Fast cache lookup did not find it:
1007 * bring it back from swap or allocate.
27ab7006 1008 */
54af6042
HD
1009 info = SHMEM_I(inode);
1010 sbinfo = SHMEM_SB(inode->i_sb);
1da177e4 1011
1da177e4
LT
1012 if (swap.val) {
1013 /* Look it up and read it in.. */
27ab7006
HD
1014 page = lookup_swap_cache(swap);
1015 if (!page) {
1da177e4 1016 /* here we actually do the io */
68da9f05
HD
1017 if (fault_type)
1018 *fault_type |= VM_FAULT_MAJOR;
41ffe5d5 1019 page = shmem_swapin(swap, gfp, info, index);
27ab7006 1020 if (!page) {
54af6042
HD
1021 error = -ENOMEM;
1022 goto failed;
1da177e4 1023 }
1da177e4
LT
1024 }
1025
1026 /* We have to do this with page locked to prevent races */
54af6042 1027 lock_page(page);
bde05d1c
HD
1028 if (!PageSwapCache(page) || page->mapping) {
1029 error = -EEXIST; /* try again */
1030 goto failed;
1031 }
27ab7006 1032 if (!PageUptodate(page)) {
1da177e4 1033 error = -EIO;
54af6042 1034 goto failed;
1da177e4 1035 }
54af6042
HD
1036 wait_on_page_writeback(page);
1037
bde05d1c
HD
1038 if (shmem_should_replace_page(page, gfp)) {
1039 error = shmem_replace_page(&page, gfp, info, index);
1040 if (error)
1041 goto failed;
1da177e4 1042 }
27ab7006 1043
aa3b1895
HD
1044 error = mem_cgroup_cache_charge(page, current->mm,
1045 gfp & GFP_RECLAIM_MASK);
1046 if (!error)
1047 error = shmem_add_to_page_cache(page, mapping, index,
1048 gfp, swp_to_radix_entry(swap));
54af6042
HD
1049 if (error)
1050 goto failed;
1051
1052 spin_lock(&info->lock);
285b2c4f 1053 info->swapped--;
54af6042 1054 shmem_recalc_inode(inode);
27ab7006 1055 spin_unlock(&info->lock);
54af6042
HD
1056
1057 delete_from_swap_cache(page);
27ab7006
HD
1058 set_page_dirty(page);
1059 swap_free(swap);
1060
54af6042
HD
1061 } else {
1062 if (shmem_acct_block(info->flags)) {
1063 error = -ENOSPC;
1064 goto failed;
1da177e4 1065 }
0edd73b3 1066 if (sbinfo->max_blocks) {
fc5da22a 1067 if (percpu_counter_compare(&sbinfo->used_blocks,
54af6042
HD
1068 sbinfo->max_blocks) >= 0) {
1069 error = -ENOSPC;
1070 goto unacct;
1071 }
7e496299 1072 percpu_counter_inc(&sbinfo->used_blocks);
54af6042 1073 }
1da177e4 1074
54af6042
HD
1075 page = shmem_alloc_page(gfp, info, index);
1076 if (!page) {
1077 error = -ENOMEM;
1078 goto decused;
1da177e4
LT
1079 }
1080
54af6042
HD
1081 SetPageSwapBacked(page);
1082 __set_page_locked(page);
aa3b1895
HD
1083 error = mem_cgroup_cache_charge(page, current->mm,
1084 gfp & GFP_RECLAIM_MASK);
1085 if (!error)
1086 error = shmem_add_to_page_cache(page, mapping, index,
1087 gfp, NULL);
54af6042
HD
1088 if (error)
1089 goto decused;
1090 lru_cache_add_anon(page);
1091
1092 spin_lock(&info->lock);
1da177e4 1093 info->alloced++;
54af6042
HD
1094 inode->i_blocks += BLOCKS_PER_PAGE;
1095 shmem_recalc_inode(inode);
1da177e4 1096 spin_unlock(&info->lock);
54af6042 1097
27ab7006
HD
1098 clear_highpage(page);
1099 flush_dcache_page(page);
1100 SetPageUptodate(page);
a0ee5ec5 1101 if (sgp == SGP_DIRTY)
27ab7006 1102 set_page_dirty(page);
1da177e4 1103 }
bde05d1c 1104
54af6042
HD
1105 /* Perhaps the file has been truncated since we checked */
1106 if (sgp != SGP_WRITE &&
1107 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
1108 error = -EINVAL;
1109 goto trunc;
e83c32e8 1110 }
54af6042
HD
1111 *pagep = page;
1112 return 0;
1da177e4 1113
59a16ead 1114 /*
54af6042 1115 * Error recovery.
59a16ead 1116 */
54af6042
HD
1117trunc:
1118 ClearPageDirty(page);
1119 delete_from_page_cache(page);
1120 spin_lock(&info->lock);
1121 info->alloced--;
1122 inode->i_blocks -= BLOCKS_PER_PAGE;
59a16ead 1123 spin_unlock(&info->lock);
54af6042
HD
1124decused:
1125 if (sbinfo->max_blocks)
1126 percpu_counter_add(&sbinfo->used_blocks, -1);
1127unacct:
1128 shmem_unacct_blocks(info->flags, 1);
1129failed:
1130 if (swap.val && error != -EINVAL) {
1131 struct page *test = find_get_page(mapping, index);
1132 if (test && !radix_tree_exceptional_entry(test))
1133 page_cache_release(test);
1134 /* Have another try if the entry has changed */
1135 if (test != swp_to_radix_entry(swap))
1136 error = -EEXIST;
1137 }
27ab7006 1138 if (page) {
54af6042 1139 unlock_page(page);
27ab7006 1140 page_cache_release(page);
54af6042
HD
1141 }
1142 if (error == -ENOSPC && !once++) {
1143 info = SHMEM_I(inode);
1144 spin_lock(&info->lock);
1145 shmem_recalc_inode(inode);
1146 spin_unlock(&info->lock);
27ab7006 1147 goto repeat;
ff36b801 1148 }
54af6042
HD
1149 if (error == -EEXIST)
1150 goto repeat;
1151 return error;
1da177e4
LT
1152}
1153
d0217ac0 1154static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1da177e4 1155{
d3ac7f89 1156 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1da177e4 1157 int error;
68da9f05 1158 int ret = VM_FAULT_LOCKED;
1da177e4 1159
27d54b39 1160 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
d0217ac0
NP
1161 if (error)
1162 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
68da9f05 1163
456f998e
YH
1164 if (ret & VM_FAULT_MAJOR) {
1165 count_vm_event(PGMAJFAULT);
1166 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1167 }
68da9f05 1168 return ret;
1da177e4
LT
1169}
1170
1da177e4 1171#ifdef CONFIG_NUMA
41ffe5d5 1172static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
1da177e4 1173{
41ffe5d5
HD
1174 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1175 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
1da177e4
LT
1176}
1177
d8dc74f2
AB
1178static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
1179 unsigned long addr)
1da177e4 1180{
41ffe5d5
HD
1181 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1182 pgoff_t index;
1da177e4 1183
41ffe5d5
HD
1184 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1185 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
1da177e4
LT
1186}
1187#endif
1188
1189int shmem_lock(struct file *file, int lock, struct user_struct *user)
1190{
d3ac7f89 1191 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1192 struct shmem_inode_info *info = SHMEM_I(inode);
1193 int retval = -ENOMEM;
1194
1195 spin_lock(&info->lock);
1196 if (lock && !(info->flags & VM_LOCKED)) {
1197 if (!user_shm_lock(inode->i_size, user))
1198 goto out_nomem;
1199 info->flags |= VM_LOCKED;
89e004ea 1200 mapping_set_unevictable(file->f_mapping);
1da177e4
LT
1201 }
1202 if (!lock && (info->flags & VM_LOCKED) && user) {
1203 user_shm_unlock(inode->i_size, user);
1204 info->flags &= ~VM_LOCKED;
89e004ea 1205 mapping_clear_unevictable(file->f_mapping);
1da177e4
LT
1206 }
1207 retval = 0;
89e004ea 1208
1da177e4
LT
1209out_nomem:
1210 spin_unlock(&info->lock);
1211 return retval;
1212}
1213
9b83a6a8 1214static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1da177e4
LT
1215{
1216 file_accessed(file);
1217 vma->vm_ops = &shmem_vm_ops;
d0217ac0 1218 vma->vm_flags |= VM_CAN_NONLINEAR;
1da177e4
LT
1219 return 0;
1220}
1221
454abafe 1222static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
09208d15 1223 umode_t mode, dev_t dev, unsigned long flags)
1da177e4
LT
1224{
1225 struct inode *inode;
1226 struct shmem_inode_info *info;
1227 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1228
5b04c689
PE
1229 if (shmem_reserve_inode(sb))
1230 return NULL;
1da177e4
LT
1231
1232 inode = new_inode(sb);
1233 if (inode) {
85fe4025 1234 inode->i_ino = get_next_ino();
454abafe 1235 inode_init_owner(inode, dir, mode);
1da177e4 1236 inode->i_blocks = 0;
1da177e4
LT
1237 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1238 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
91828a40 1239 inode->i_generation = get_seconds();
1da177e4
LT
1240 info = SHMEM_I(inode);
1241 memset(info, 0, (char *)inode - (char *)info);
1242 spin_lock_init(&info->lock);
0b0a0806 1243 info->flags = flags & VM_NORESERVE;
1da177e4 1244 INIT_LIST_HEAD(&info->swaplist);
b09e0fa4 1245 INIT_LIST_HEAD(&info->xattr_list);
72c04902 1246 cache_no_acl(inode);
1da177e4
LT
1247
1248 switch (mode & S_IFMT) {
1249 default:
39f0247d 1250 inode->i_op = &shmem_special_inode_operations;
1da177e4
LT
1251 init_special_inode(inode, mode, dev);
1252 break;
1253 case S_IFREG:
14fcc23f 1254 inode->i_mapping->a_ops = &shmem_aops;
1da177e4
LT
1255 inode->i_op = &shmem_inode_operations;
1256 inode->i_fop = &shmem_file_operations;
71fe804b
LS
1257 mpol_shared_policy_init(&info->policy,
1258 shmem_get_sbmpol(sbinfo));
1da177e4
LT
1259 break;
1260 case S_IFDIR:
d8c76e6f 1261 inc_nlink(inode);
1da177e4
LT
1262 /* Some things misbehave if size == 0 on a directory */
1263 inode->i_size = 2 * BOGO_DIRENT_SIZE;
1264 inode->i_op = &shmem_dir_inode_operations;
1265 inode->i_fop = &simple_dir_operations;
1266 break;
1267 case S_IFLNK:
1268 /*
1269 * Must not load anything in the rbtree,
1270 * mpol_free_shared_policy will not be called.
1271 */
71fe804b 1272 mpol_shared_policy_init(&info->policy, NULL);
1da177e4
LT
1273 break;
1274 }
5b04c689
PE
1275 } else
1276 shmem_free_inode(sb);
1da177e4
LT
1277 return inode;
1278}
1279
1280#ifdef CONFIG_TMPFS
92e1d5be 1281static const struct inode_operations shmem_symlink_inode_operations;
69f07ec9 1282static const struct inode_operations shmem_short_symlink_operations;
1da177e4 1283
6d9d88d0
JS
1284#ifdef CONFIG_TMPFS_XATTR
1285static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
1286#else
1287#define shmem_initxattrs NULL
1288#endif
1289
1da177e4 1290static int
800d15a5
NP
1291shmem_write_begin(struct file *file, struct address_space *mapping,
1292 loff_t pos, unsigned len, unsigned flags,
1293 struct page **pagep, void **fsdata)
1da177e4 1294{
800d15a5
NP
1295 struct inode *inode = mapping->host;
1296 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
800d15a5
NP
1297 return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
1298}
1299
1300static int
1301shmem_write_end(struct file *file, struct address_space *mapping,
1302 loff_t pos, unsigned len, unsigned copied,
1303 struct page *page, void *fsdata)
1304{
1305 struct inode *inode = mapping->host;
1306
d3602444
HD
1307 if (pos + copied > inode->i_size)
1308 i_size_write(inode, pos + copied);
1309
800d15a5 1310 set_page_dirty(page);
6746aff7 1311 unlock_page(page);
800d15a5
NP
1312 page_cache_release(page);
1313
800d15a5 1314 return copied;
1da177e4
LT
1315}
1316
1da177e4
LT
1317static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1318{
d3ac7f89 1319 struct inode *inode = filp->f_path.dentry->d_inode;
1da177e4 1320 struct address_space *mapping = inode->i_mapping;
41ffe5d5
HD
1321 pgoff_t index;
1322 unsigned long offset;
a0ee5ec5
HD
1323 enum sgp_type sgp = SGP_READ;
1324
1325 /*
1326 * Might this read be for a stacking filesystem? Then when reading
1327 * holes of a sparse file, we actually need to allocate those pages,
1328 * and even mark them dirty, so it cannot exceed the max_blocks limit.
1329 */
1330 if (segment_eq(get_fs(), KERNEL_DS))
1331 sgp = SGP_DIRTY;
1da177e4
LT
1332
1333 index = *ppos >> PAGE_CACHE_SHIFT;
1334 offset = *ppos & ~PAGE_CACHE_MASK;
1335
1336 for (;;) {
1337 struct page *page = NULL;
41ffe5d5
HD
1338 pgoff_t end_index;
1339 unsigned long nr, ret;
1da177e4
LT
1340 loff_t i_size = i_size_read(inode);
1341
1342 end_index = i_size >> PAGE_CACHE_SHIFT;
1343 if (index > end_index)
1344 break;
1345 if (index == end_index) {
1346 nr = i_size & ~PAGE_CACHE_MASK;
1347 if (nr <= offset)
1348 break;
1349 }
1350
a0ee5ec5 1351 desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
1da177e4
LT
1352 if (desc->error) {
1353 if (desc->error == -EINVAL)
1354 desc->error = 0;
1355 break;
1356 }
d3602444
HD
1357 if (page)
1358 unlock_page(page);
1da177e4
LT
1359
1360 /*
1361 * We must evaluate after, since reads (unlike writes)
1b1dcc1b 1362 * are called without i_mutex protection against truncate
1da177e4
LT
1363 */
1364 nr = PAGE_CACHE_SIZE;
1365 i_size = i_size_read(inode);
1366 end_index = i_size >> PAGE_CACHE_SHIFT;
1367 if (index == end_index) {
1368 nr = i_size & ~PAGE_CACHE_MASK;
1369 if (nr <= offset) {
1370 if (page)
1371 page_cache_release(page);
1372 break;
1373 }
1374 }
1375 nr -= offset;
1376
1377 if (page) {
1378 /*
1379 * If users can be writing to this page using arbitrary
1380 * virtual addresses, take care about potential aliasing
1381 * before reading the page on the kernel side.
1382 */
1383 if (mapping_writably_mapped(mapping))
1384 flush_dcache_page(page);
1385 /*
1386 * Mark the page accessed if we read the beginning.
1387 */
1388 if (!offset)
1389 mark_page_accessed(page);
b5810039 1390 } else {
1da177e4 1391 page = ZERO_PAGE(0);
b5810039
NP
1392 page_cache_get(page);
1393 }
1da177e4
LT
1394
1395 /*
1396 * Ok, we have the page, and it's up-to-date, so
1397 * now we can copy it to user space...
1398 *
1399 * The actor routine returns how many bytes were actually used..
1400 * NOTE! This may not be the same as how much of a user buffer
1401 * we filled up (we may be padding etc), so we can only update
1402 * "pos" here (the actor routine has to update the user buffer
1403 * pointers and the remaining count).
1404 */
1405 ret = actor(desc, page, offset, nr);
1406 offset += ret;
1407 index += offset >> PAGE_CACHE_SHIFT;
1408 offset &= ~PAGE_CACHE_MASK;
1409
1410 page_cache_release(page);
1411 if (ret != nr || !desc->count)
1412 break;
1413
1414 cond_resched();
1415 }
1416
1417 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1418 file_accessed(filp);
1419}
1420
bcd78e49
HD
1421static ssize_t shmem_file_aio_read(struct kiocb *iocb,
1422 const struct iovec *iov, unsigned long nr_segs, loff_t pos)
1423{
1424 struct file *filp = iocb->ki_filp;
1425 ssize_t retval;
1426 unsigned long seg;
1427 size_t count;
1428 loff_t *ppos = &iocb->ki_pos;
1429
1430 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1431 if (retval)
1432 return retval;
1433
1434 for (seg = 0; seg < nr_segs; seg++) {
1435 read_descriptor_t desc;
1436
1437 desc.written = 0;
1438 desc.arg.buf = iov[seg].iov_base;
1439 desc.count = iov[seg].iov_len;
1440 if (desc.count == 0)
1441 continue;
1442 desc.error = 0;
1443 do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1444 retval += desc.written;
1445 if (desc.error) {
1446 retval = retval ?: desc.error;
1447 break;
1448 }
1449 if (desc.count > 0)
1450 break;
1451 }
1452 return retval;
1da177e4
LT
1453}
1454
708e3508
HD
1455static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1456 struct pipe_inode_info *pipe, size_t len,
1457 unsigned int flags)
1458{
1459 struct address_space *mapping = in->f_mapping;
71f0e07a 1460 struct inode *inode = mapping->host;
708e3508
HD
1461 unsigned int loff, nr_pages, req_pages;
1462 struct page *pages[PIPE_DEF_BUFFERS];
1463 struct partial_page partial[PIPE_DEF_BUFFERS];
1464 struct page *page;
1465 pgoff_t index, end_index;
1466 loff_t isize, left;
1467 int error, page_nr;
1468 struct splice_pipe_desc spd = {
1469 .pages = pages,
1470 .partial = partial,
1471 .flags = flags,
1472 .ops = &page_cache_pipe_buf_ops,
1473 .spd_release = spd_release_page,
1474 };
1475
71f0e07a 1476 isize = i_size_read(inode);
708e3508
HD
1477 if (unlikely(*ppos >= isize))
1478 return 0;
1479
1480 left = isize - *ppos;
1481 if (unlikely(left < len))
1482 len = left;
1483
1484 if (splice_grow_spd(pipe, &spd))
1485 return -ENOMEM;
1486
1487 index = *ppos >> PAGE_CACHE_SHIFT;
1488 loff = *ppos & ~PAGE_CACHE_MASK;
1489 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1490 nr_pages = min(req_pages, pipe->buffers);
1491
708e3508
HD
1492 spd.nr_pages = find_get_pages_contig(mapping, index,
1493 nr_pages, spd.pages);
1494 index += spd.nr_pages;
708e3508 1495 error = 0;
708e3508 1496
71f0e07a 1497 while (spd.nr_pages < nr_pages) {
71f0e07a
HD
1498 error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL);
1499 if (error)
1500 break;
1501 unlock_page(page);
708e3508
HD
1502 spd.pages[spd.nr_pages++] = page;
1503 index++;
1504 }
1505
708e3508
HD
1506 index = *ppos >> PAGE_CACHE_SHIFT;
1507 nr_pages = spd.nr_pages;
1508 spd.nr_pages = 0;
71f0e07a 1509
708e3508
HD
1510 for (page_nr = 0; page_nr < nr_pages; page_nr++) {
1511 unsigned int this_len;
1512
1513 if (!len)
1514 break;
1515
708e3508
HD
1516 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
1517 page = spd.pages[page_nr];
1518
71f0e07a 1519 if (!PageUptodate(page) || page->mapping != mapping) {
71f0e07a
HD
1520 error = shmem_getpage(inode, index, &page,
1521 SGP_CACHE, NULL);
1522 if (error)
708e3508 1523 break;
71f0e07a
HD
1524 unlock_page(page);
1525 page_cache_release(spd.pages[page_nr]);
1526 spd.pages[page_nr] = page;
708e3508 1527 }
71f0e07a
HD
1528
1529 isize = i_size_read(inode);
708e3508
HD
1530 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
1531 if (unlikely(!isize || index > end_index))
1532 break;
1533
708e3508
HD
1534 if (end_index == index) {
1535 unsigned int plen;
1536
708e3508
HD
1537 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
1538 if (plen <= loff)
1539 break;
1540
708e3508
HD
1541 this_len = min(this_len, plen - loff);
1542 len = this_len;
1543 }
1544
1545 spd.partial[page_nr].offset = loff;
1546 spd.partial[page_nr].len = this_len;
1547 len -= this_len;
1548 loff = 0;
1549 spd.nr_pages++;
1550 index++;
1551 }
1552
708e3508
HD
1553 while (page_nr < nr_pages)
1554 page_cache_release(spd.pages[page_nr++]);
708e3508
HD
1555
1556 if (spd.nr_pages)
1557 error = splice_to_pipe(pipe, &spd);
1558
1559 splice_shrink_spd(pipe, &spd);
1560
1561 if (error > 0) {
1562 *ppos += error;
1563 file_accessed(in);
1564 }
1565 return error;
1566}
1567
726c3342 1568static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1da177e4 1569{
726c3342 1570 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
1da177e4
LT
1571
1572 buf->f_type = TMPFS_MAGIC;
1573 buf->f_bsize = PAGE_CACHE_SIZE;
1574 buf->f_namelen = NAME_MAX;
0edd73b3 1575 if (sbinfo->max_blocks) {
1da177e4 1576 buf->f_blocks = sbinfo->max_blocks;
41ffe5d5
HD
1577 buf->f_bavail =
1578 buf->f_bfree = sbinfo->max_blocks -
1579 percpu_counter_sum(&sbinfo->used_blocks);
0edd73b3
HD
1580 }
1581 if (sbinfo->max_inodes) {
1da177e4
LT
1582 buf->f_files = sbinfo->max_inodes;
1583 buf->f_ffree = sbinfo->free_inodes;
1da177e4
LT
1584 }
1585 /* else leave those fields 0 like simple_statfs */
1586 return 0;
1587}
1588
1589/*
1590 * File creation. Allocate an inode, and we're done..
1591 */
1592static int
1a67aafb 1593shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
1da177e4 1594{
0b0a0806 1595 struct inode *inode;
1da177e4
LT
1596 int error = -ENOSPC;
1597
454abafe 1598 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
1da177e4 1599 if (inode) {
2a7dba39 1600 error = security_inode_init_security(inode, dir,
9d8f13ba 1601 &dentry->d_name,
6d9d88d0 1602 shmem_initxattrs, NULL);
570bc1c2
SS
1603 if (error) {
1604 if (error != -EOPNOTSUPP) {
1605 iput(inode);
1606 return error;
1607 }
39f0247d 1608 }
1c7c474c
CH
1609#ifdef CONFIG_TMPFS_POSIX_ACL
1610 error = generic_acl_init(inode, dir);
39f0247d
AG
1611 if (error) {
1612 iput(inode);
1613 return error;
570bc1c2 1614 }
718deb6b
AV
1615#else
1616 error = 0;
1c7c474c 1617#endif
1da177e4
LT
1618 dir->i_size += BOGO_DIRENT_SIZE;
1619 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1620 d_instantiate(dentry, inode);
1621 dget(dentry); /* Extra count - pin the dentry in core */
1da177e4
LT
1622 }
1623 return error;
1624}
1625
18bb1db3 1626static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
1da177e4
LT
1627{
1628 int error;
1629
1630 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1631 return error;
d8c76e6f 1632 inc_nlink(dir);
1da177e4
LT
1633 return 0;
1634}
1635
4acdaf27 1636static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
1da177e4
LT
1637 struct nameidata *nd)
1638{
1639 return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1640}
1641
1642/*
1643 * Link a file..
1644 */
1645static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1646{
1647 struct inode *inode = old_dentry->d_inode;
5b04c689 1648 int ret;
1da177e4
LT
1649
1650 /*
1651 * No ordinary (disk based) filesystem counts links as inodes;
1652 * but each new link needs a new dentry, pinning lowmem, and
1653 * tmpfs dentries cannot be pruned until they are unlinked.
1654 */
5b04c689
PE
1655 ret = shmem_reserve_inode(inode->i_sb);
1656 if (ret)
1657 goto out;
1da177e4
LT
1658
1659 dir->i_size += BOGO_DIRENT_SIZE;
1660 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
d8c76e6f 1661 inc_nlink(inode);
7de9c6ee 1662 ihold(inode); /* New dentry reference */
1da177e4
LT
1663 dget(dentry); /* Extra pinning count for the created dentry */
1664 d_instantiate(dentry, inode);
5b04c689
PE
1665out:
1666 return ret;
1da177e4
LT
1667}
1668
1669static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1670{
1671 struct inode *inode = dentry->d_inode;
1672
5b04c689
PE
1673 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
1674 shmem_free_inode(inode->i_sb);
1da177e4
LT
1675
1676 dir->i_size -= BOGO_DIRENT_SIZE;
1677 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
9a53c3a7 1678 drop_nlink(inode);
1da177e4
LT
1679 dput(dentry); /* Undo the count from "create" - this does all the work */
1680 return 0;
1681}
1682
1683static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1684{
1685 if (!simple_empty(dentry))
1686 return -ENOTEMPTY;
1687
9a53c3a7
DH
1688 drop_nlink(dentry->d_inode);
1689 drop_nlink(dir);
1da177e4
LT
1690 return shmem_unlink(dir, dentry);
1691}
1692
1693/*
1694 * The VFS layer already does all the dentry stuff for rename,
1695 * we just have to decrement the usage count for the target if
1696 * it exists so that the VFS layer correctly free's it when it
1697 * gets overwritten.
1698 */
1699static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1700{
1701 struct inode *inode = old_dentry->d_inode;
1702 int they_are_dirs = S_ISDIR(inode->i_mode);
1703
1704 if (!simple_empty(new_dentry))
1705 return -ENOTEMPTY;
1706
1707 if (new_dentry->d_inode) {
1708 (void) shmem_unlink(new_dir, new_dentry);
1709 if (they_are_dirs)
9a53c3a7 1710 drop_nlink(old_dir);
1da177e4 1711 } else if (they_are_dirs) {
9a53c3a7 1712 drop_nlink(old_dir);
d8c76e6f 1713 inc_nlink(new_dir);
1da177e4
LT
1714 }
1715
1716 old_dir->i_size -= BOGO_DIRENT_SIZE;
1717 new_dir->i_size += BOGO_DIRENT_SIZE;
1718 old_dir->i_ctime = old_dir->i_mtime =
1719 new_dir->i_ctime = new_dir->i_mtime =
1720 inode->i_ctime = CURRENT_TIME;
1721 return 0;
1722}
1723
1724static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1725{
1726 int error;
1727 int len;
1728 struct inode *inode;
9276aad6 1729 struct page *page;
1da177e4
LT
1730 char *kaddr;
1731 struct shmem_inode_info *info;
1732
1733 len = strlen(symname) + 1;
1734 if (len > PAGE_CACHE_SIZE)
1735 return -ENAMETOOLONG;
1736
454abafe 1737 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
1da177e4
LT
1738 if (!inode)
1739 return -ENOSPC;
1740
9d8f13ba 1741 error = security_inode_init_security(inode, dir, &dentry->d_name,
6d9d88d0 1742 shmem_initxattrs, NULL);
570bc1c2
SS
1743 if (error) {
1744 if (error != -EOPNOTSUPP) {
1745 iput(inode);
1746 return error;
1747 }
1748 error = 0;
1749 }
1750
1da177e4
LT
1751 info = SHMEM_I(inode);
1752 inode->i_size = len-1;
69f07ec9
HD
1753 if (len <= SHORT_SYMLINK_LEN) {
1754 info->symlink = kmemdup(symname, len, GFP_KERNEL);
1755 if (!info->symlink) {
1756 iput(inode);
1757 return -ENOMEM;
1758 }
1759 inode->i_op = &shmem_short_symlink_operations;
1da177e4
LT
1760 } else {
1761 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
1762 if (error) {
1763 iput(inode);
1764 return error;
1765 }
14fcc23f 1766 inode->i_mapping->a_ops = &shmem_aops;
1da177e4 1767 inode->i_op = &shmem_symlink_inode_operations;
9b04c5fe 1768 kaddr = kmap_atomic(page);
1da177e4 1769 memcpy(kaddr, symname, len);
9b04c5fe 1770 kunmap_atomic(kaddr);
1da177e4 1771 set_page_dirty(page);
6746aff7 1772 unlock_page(page);
1da177e4
LT
1773 page_cache_release(page);
1774 }
1da177e4
LT
1775 dir->i_size += BOGO_DIRENT_SIZE;
1776 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1777 d_instantiate(dentry, inode);
1778 dget(dentry);
1779 return 0;
1780}
1781
69f07ec9 1782static void *shmem_follow_short_symlink(struct dentry *dentry, struct nameidata *nd)
1da177e4 1783{
69f07ec9 1784 nd_set_link(nd, SHMEM_I(dentry->d_inode)->symlink);
cc314eef 1785 return NULL;
1da177e4
LT
1786}
1787
cc314eef 1788static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1da177e4
LT
1789{
1790 struct page *page = NULL;
41ffe5d5
HD
1791 int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
1792 nd_set_link(nd, error ? ERR_PTR(error) : kmap(page));
d3602444
HD
1793 if (page)
1794 unlock_page(page);
cc314eef 1795 return page;
1da177e4
LT
1796}
1797
cc314eef 1798static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
1da177e4
LT
1799{
1800 if (!IS_ERR(nd_get_link(nd))) {
cc314eef 1801 struct page *page = cookie;
1da177e4
LT
1802 kunmap(page);
1803 mark_page_accessed(page);
1804 page_cache_release(page);
1da177e4
LT
1805 }
1806}
1807
b09e0fa4 1808#ifdef CONFIG_TMPFS_XATTR
46711810 1809/*
b09e0fa4
EP
1810 * Superblocks without xattr inode operations may get some security.* xattr
1811 * support from the LSM "for free". As soon as we have any other xattrs
39f0247d
AG
1812 * like ACLs, we also need to implement the security.* handlers at
1813 * filesystem level, though.
1814 */
1815
6d9d88d0
JS
1816/*
1817 * Allocate new xattr and copy in the value; but leave the name to callers.
1818 */
1819static struct shmem_xattr *shmem_xattr_alloc(const void *value, size_t size)
1820{
1821 struct shmem_xattr *new_xattr;
1822 size_t len;
1823
1824 /* wrap around? */
1825 len = sizeof(*new_xattr) + size;
1826 if (len <= sizeof(*new_xattr))
1827 return NULL;
1828
1829 new_xattr = kmalloc(len, GFP_KERNEL);
1830 if (!new_xattr)
1831 return NULL;
1832
1833 new_xattr->size = size;
1834 memcpy(new_xattr->value, value, size);
1835 return new_xattr;
1836}
1837
1838/*
1839 * Callback for security_inode_init_security() for acquiring xattrs.
1840 */
1841static int shmem_initxattrs(struct inode *inode,
1842 const struct xattr *xattr_array,
1843 void *fs_info)
1844{
1845 struct shmem_inode_info *info = SHMEM_I(inode);
1846 const struct xattr *xattr;
1847 struct shmem_xattr *new_xattr;
1848 size_t len;
1849
1850 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
1851 new_xattr = shmem_xattr_alloc(xattr->value, xattr->value_len);
1852 if (!new_xattr)
1853 return -ENOMEM;
1854
1855 len = strlen(xattr->name) + 1;
1856 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
1857 GFP_KERNEL);
1858 if (!new_xattr->name) {
1859 kfree(new_xattr);
1860 return -ENOMEM;
1861 }
1862
1863 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
1864 XATTR_SECURITY_PREFIX_LEN);
1865 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
1866 xattr->name, len);
1867
1868 spin_lock(&info->lock);
1869 list_add(&new_xattr->list, &info->xattr_list);
1870 spin_unlock(&info->lock);
1871 }
1872
1873 return 0;
1874}
1875
b09e0fa4
EP
1876static int shmem_xattr_get(struct dentry *dentry, const char *name,
1877 void *buffer, size_t size)
39f0247d 1878{
b09e0fa4
EP
1879 struct shmem_inode_info *info;
1880 struct shmem_xattr *xattr;
1881 int ret = -ENODATA;
39f0247d 1882
b09e0fa4
EP
1883 info = SHMEM_I(dentry->d_inode);
1884
1885 spin_lock(&info->lock);
1886 list_for_each_entry(xattr, &info->xattr_list, list) {
1887 if (strcmp(name, xattr->name))
1888 continue;
1889
1890 ret = xattr->size;
1891 if (buffer) {
1892 if (size < xattr->size)
1893 ret = -ERANGE;
1894 else
1895 memcpy(buffer, xattr->value, xattr->size);
1896 }
1897 break;
1898 }
1899 spin_unlock(&info->lock);
1900 return ret;
39f0247d
AG
1901}
1902
6d9d88d0 1903static int shmem_xattr_set(struct inode *inode, const char *name,
b09e0fa4 1904 const void *value, size_t size, int flags)
39f0247d 1905{
b09e0fa4
EP
1906 struct shmem_inode_info *info = SHMEM_I(inode);
1907 struct shmem_xattr *xattr;
1908 struct shmem_xattr *new_xattr = NULL;
b09e0fa4
EP
1909 int err = 0;
1910
1911 /* value == NULL means remove */
1912 if (value) {
6d9d88d0 1913 new_xattr = shmem_xattr_alloc(value, size);
b09e0fa4
EP
1914 if (!new_xattr)
1915 return -ENOMEM;
1916
1917 new_xattr->name = kstrdup(name, GFP_KERNEL);
1918 if (!new_xattr->name) {
1919 kfree(new_xattr);
1920 return -ENOMEM;
1921 }
b09e0fa4
EP
1922 }
1923
1924 spin_lock(&info->lock);
1925 list_for_each_entry(xattr, &info->xattr_list, list) {
1926 if (!strcmp(name, xattr->name)) {
1927 if (flags & XATTR_CREATE) {
1928 xattr = new_xattr;
1929 err = -EEXIST;
1930 } else if (new_xattr) {
1931 list_replace(&xattr->list, &new_xattr->list);
1932 } else {
1933 list_del(&xattr->list);
1934 }
1935 goto out;
1936 }
1937 }
1938 if (flags & XATTR_REPLACE) {
1939 xattr = new_xattr;
1940 err = -ENODATA;
1941 } else {
1942 list_add(&new_xattr->list, &info->xattr_list);
1943 xattr = NULL;
1944 }
1945out:
1946 spin_unlock(&info->lock);
1947 if (xattr)
1948 kfree(xattr->name);
1949 kfree(xattr);
1950 return err;
39f0247d
AG
1951}
1952
bb435453 1953static const struct xattr_handler *shmem_xattr_handlers[] = {
b09e0fa4 1954#ifdef CONFIG_TMPFS_POSIX_ACL
1c7c474c
CH
1955 &generic_acl_access_handler,
1956 &generic_acl_default_handler,
b09e0fa4 1957#endif
39f0247d
AG
1958 NULL
1959};
b09e0fa4
EP
1960
1961static int shmem_xattr_validate(const char *name)
1962{
1963 struct { const char *prefix; size_t len; } arr[] = {
1964 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
1965 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
1966 };
1967 int i;
1968
1969 for (i = 0; i < ARRAY_SIZE(arr); i++) {
1970 size_t preflen = arr[i].len;
1971 if (strncmp(name, arr[i].prefix, preflen) == 0) {
1972 if (!name[preflen])
1973 return -EINVAL;
1974 return 0;
1975 }
1976 }
1977 return -EOPNOTSUPP;
1978}
1979
1980static ssize_t shmem_getxattr(struct dentry *dentry, const char *name,
1981 void *buffer, size_t size)
1982{
1983 int err;
1984
1985 /*
1986 * If this is a request for a synthetic attribute in the system.*
1987 * namespace use the generic infrastructure to resolve a handler
1988 * for it via sb->s_xattr.
1989 */
1990 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
1991 return generic_getxattr(dentry, name, buffer, size);
1992
1993 err = shmem_xattr_validate(name);
1994 if (err)
1995 return err;
1996
1997 return shmem_xattr_get(dentry, name, buffer, size);
1998}
1999
2000static int shmem_setxattr(struct dentry *dentry, const char *name,
2001 const void *value, size_t size, int flags)
2002{
2003 int err;
2004
2005 /*
2006 * If this is a request for a synthetic attribute in the system.*
2007 * namespace use the generic infrastructure to resolve a handler
2008 * for it via sb->s_xattr.
2009 */
2010 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2011 return generic_setxattr(dentry, name, value, size, flags);
2012
2013 err = shmem_xattr_validate(name);
2014 if (err)
2015 return err;
2016
2017 if (size == 0)
2018 value = ""; /* empty EA, do not remove */
2019
6d9d88d0 2020 return shmem_xattr_set(dentry->d_inode, name, value, size, flags);
b09e0fa4
EP
2021
2022}
2023
2024static int shmem_removexattr(struct dentry *dentry, const char *name)
2025{
2026 int err;
2027
2028 /*
2029 * If this is a request for a synthetic attribute in the system.*
2030 * namespace use the generic infrastructure to resolve a handler
2031 * for it via sb->s_xattr.
2032 */
2033 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2034 return generic_removexattr(dentry, name);
2035
2036 err = shmem_xattr_validate(name);
2037 if (err)
2038 return err;
2039
6d9d88d0 2040 return shmem_xattr_set(dentry->d_inode, name, NULL, 0, XATTR_REPLACE);
b09e0fa4
EP
2041}
2042
2043static bool xattr_is_trusted(const char *name)
2044{
2045 return !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN);
2046}
2047
2048static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
2049{
2050 bool trusted = capable(CAP_SYS_ADMIN);
2051 struct shmem_xattr *xattr;
2052 struct shmem_inode_info *info;
2053 size_t used = 0;
2054
2055 info = SHMEM_I(dentry->d_inode);
2056
2057 spin_lock(&info->lock);
2058 list_for_each_entry(xattr, &info->xattr_list, list) {
2059 size_t len;
2060
2061 /* skip "trusted." attributes for unprivileged callers */
2062 if (!trusted && xattr_is_trusted(xattr->name))
2063 continue;
2064
2065 len = strlen(xattr->name) + 1;
2066 used += len;
2067 if (buffer) {
2068 if (size < used) {
2069 used = -ERANGE;
2070 break;
2071 }
2072 memcpy(buffer, xattr->name, len);
2073 buffer += len;
2074 }
2075 }
2076 spin_unlock(&info->lock);
2077
2078 return used;
2079}
2080#endif /* CONFIG_TMPFS_XATTR */
2081
69f07ec9 2082static const struct inode_operations shmem_short_symlink_operations = {
b09e0fa4 2083 .readlink = generic_readlink,
69f07ec9 2084 .follow_link = shmem_follow_short_symlink,
b09e0fa4
EP
2085#ifdef CONFIG_TMPFS_XATTR
2086 .setxattr = shmem_setxattr,
2087 .getxattr = shmem_getxattr,
2088 .listxattr = shmem_listxattr,
2089 .removexattr = shmem_removexattr,
2090#endif
2091};
2092
2093static const struct inode_operations shmem_symlink_inode_operations = {
2094 .readlink = generic_readlink,
2095 .follow_link = shmem_follow_link,
2096 .put_link = shmem_put_link,
2097#ifdef CONFIG_TMPFS_XATTR
2098 .setxattr = shmem_setxattr,
2099 .getxattr = shmem_getxattr,
2100 .listxattr = shmem_listxattr,
2101 .removexattr = shmem_removexattr,
39f0247d 2102#endif
b09e0fa4 2103};
39f0247d 2104
91828a40
DG
2105static struct dentry *shmem_get_parent(struct dentry *child)
2106{
2107 return ERR_PTR(-ESTALE);
2108}
2109
2110static int shmem_match(struct inode *ino, void *vfh)
2111{
2112 __u32 *fh = vfh;
2113 __u64 inum = fh[2];
2114 inum = (inum << 32) | fh[1];
2115 return ino->i_ino == inum && fh[0] == ino->i_generation;
2116}
2117
480b116c
CH
2118static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
2119 struct fid *fid, int fh_len, int fh_type)
91828a40 2120{
91828a40 2121 struct inode *inode;
480b116c
CH
2122 struct dentry *dentry = NULL;
2123 u64 inum = fid->raw[2];
2124 inum = (inum << 32) | fid->raw[1];
2125
2126 if (fh_len < 3)
2127 return NULL;
91828a40 2128
480b116c
CH
2129 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
2130 shmem_match, fid->raw);
91828a40 2131 if (inode) {
480b116c 2132 dentry = d_find_alias(inode);
91828a40
DG
2133 iput(inode);
2134 }
2135
480b116c 2136 return dentry;
91828a40
DG
2137}
2138
2139static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
2140 int connectable)
2141{
2142 struct inode *inode = dentry->d_inode;
2143
5fe0c237
AK
2144 if (*len < 3) {
2145 *len = 3;
91828a40 2146 return 255;
5fe0c237 2147 }
91828a40 2148
1d3382cb 2149 if (inode_unhashed(inode)) {
91828a40
DG
2150 /* Unfortunately insert_inode_hash is not idempotent,
2151 * so as we hash inodes here rather than at creation
2152 * time, we need a lock to ensure we only try
2153 * to do it once
2154 */
2155 static DEFINE_SPINLOCK(lock);
2156 spin_lock(&lock);
1d3382cb 2157 if (inode_unhashed(inode))
91828a40
DG
2158 __insert_inode_hash(inode,
2159 inode->i_ino + inode->i_generation);
2160 spin_unlock(&lock);
2161 }
2162
2163 fh[0] = inode->i_generation;
2164 fh[1] = inode->i_ino;
2165 fh[2] = ((__u64)inode->i_ino) >> 32;
2166
2167 *len = 3;
2168 return 1;
2169}
2170
39655164 2171static const struct export_operations shmem_export_ops = {
91828a40 2172 .get_parent = shmem_get_parent,
91828a40 2173 .encode_fh = shmem_encode_fh,
480b116c 2174 .fh_to_dentry = shmem_fh_to_dentry,
91828a40
DG
2175};
2176
680d794b
AM
2177static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
2178 bool remount)
1da177e4
LT
2179{
2180 char *this_char, *value, *rest;
8751e039
EB
2181 uid_t uid;
2182 gid_t gid;
1da177e4 2183
b00dc3ad
HD
2184 while (options != NULL) {
2185 this_char = options;
2186 for (;;) {
2187 /*
2188 * NUL-terminate this option: unfortunately,
2189 * mount options form a comma-separated list,
2190 * but mpol's nodelist may also contain commas.
2191 */
2192 options = strchr(options, ',');
2193 if (options == NULL)
2194 break;
2195 options++;
2196 if (!isdigit(*options)) {
2197 options[-1] = '\0';
2198 break;
2199 }
2200 }
1da177e4
LT
2201 if (!*this_char)
2202 continue;
2203 if ((value = strchr(this_char,'=')) != NULL) {
2204 *value++ = 0;
2205 } else {
2206 printk(KERN_ERR
2207 "tmpfs: No value for mount option '%s'\n",
2208 this_char);
2209 return 1;
2210 }
2211
2212 if (!strcmp(this_char,"size")) {
2213 unsigned long long size;
2214 size = memparse(value,&rest);
2215 if (*rest == '%') {
2216 size <<= PAGE_SHIFT;
2217 size *= totalram_pages;
2218 do_div(size, 100);
2219 rest++;
2220 }
2221 if (*rest)
2222 goto bad_val;
680d794b
AM
2223 sbinfo->max_blocks =
2224 DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
1da177e4 2225 } else if (!strcmp(this_char,"nr_blocks")) {
680d794b 2226 sbinfo->max_blocks = memparse(value, &rest);
1da177e4
LT
2227 if (*rest)
2228 goto bad_val;
2229 } else if (!strcmp(this_char,"nr_inodes")) {
680d794b 2230 sbinfo->max_inodes = memparse(value, &rest);
1da177e4
LT
2231 if (*rest)
2232 goto bad_val;
2233 } else if (!strcmp(this_char,"mode")) {
680d794b 2234 if (remount)
1da177e4 2235 continue;
680d794b 2236 sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
1da177e4
LT
2237 if (*rest)
2238 goto bad_val;
2239 } else if (!strcmp(this_char,"uid")) {
680d794b 2240 if (remount)
1da177e4 2241 continue;
8751e039 2242 uid = simple_strtoul(value, &rest, 0);
1da177e4
LT
2243 if (*rest)
2244 goto bad_val;
8751e039
EB
2245 sbinfo->uid = make_kuid(current_user_ns(), uid);
2246 if (!uid_valid(sbinfo->uid))
2247 goto bad_val;
1da177e4 2248 } else if (!strcmp(this_char,"gid")) {
680d794b 2249 if (remount)
1da177e4 2250 continue;
8751e039 2251 gid = simple_strtoul(value, &rest, 0);
1da177e4
LT
2252 if (*rest)
2253 goto bad_val;
8751e039
EB
2254 sbinfo->gid = make_kgid(current_user_ns(), gid);
2255 if (!gid_valid(sbinfo->gid))
2256 goto bad_val;
7339ff83 2257 } else if (!strcmp(this_char,"mpol")) {
71fe804b 2258 if (mpol_parse_str(value, &sbinfo->mpol, 1))
7339ff83 2259 goto bad_val;
1da177e4
LT
2260 } else {
2261 printk(KERN_ERR "tmpfs: Bad mount option %s\n",
2262 this_char);
2263 return 1;
2264 }
2265 }
2266 return 0;
2267
2268bad_val:
2269 printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
2270 value, this_char);
2271 return 1;
2272
2273}
2274
2275static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2276{
2277 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
680d794b 2278 struct shmem_sb_info config = *sbinfo;
0edd73b3
HD
2279 unsigned long inodes;
2280 int error = -EINVAL;
2281
680d794b 2282 if (shmem_parse_options(data, &config, true))
0edd73b3 2283 return error;
1da177e4 2284
0edd73b3 2285 spin_lock(&sbinfo->stat_lock);
0edd73b3 2286 inodes = sbinfo->max_inodes - sbinfo->free_inodes;
7e496299 2287 if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
0edd73b3 2288 goto out;
680d794b 2289 if (config.max_inodes < inodes)
0edd73b3
HD
2290 goto out;
2291 /*
54af6042 2292 * Those tests disallow limited->unlimited while any are in use;
0edd73b3
HD
2293 * but we must separately disallow unlimited->limited, because
2294 * in that case we have no record of how much is already in use.
2295 */
680d794b 2296 if (config.max_blocks && !sbinfo->max_blocks)
0edd73b3 2297 goto out;
680d794b 2298 if (config.max_inodes && !sbinfo->max_inodes)
0edd73b3
HD
2299 goto out;
2300
2301 error = 0;
680d794b 2302 sbinfo->max_blocks = config.max_blocks;
680d794b
AM
2303 sbinfo->max_inodes = config.max_inodes;
2304 sbinfo->free_inodes = config.max_inodes - inodes;
71fe804b
LS
2305
2306 mpol_put(sbinfo->mpol);
2307 sbinfo->mpol = config.mpol; /* transfers initial ref */
0edd73b3
HD
2308out:
2309 spin_unlock(&sbinfo->stat_lock);
2310 return error;
1da177e4 2311}
680d794b 2312
34c80b1d 2313static int shmem_show_options(struct seq_file *seq, struct dentry *root)
680d794b 2314{
34c80b1d 2315 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
680d794b
AM
2316
2317 if (sbinfo->max_blocks != shmem_default_max_blocks())
2318 seq_printf(seq, ",size=%luk",
2319 sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10));
2320 if (sbinfo->max_inodes != shmem_default_max_inodes())
2321 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
2322 if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
09208d15 2323 seq_printf(seq, ",mode=%03ho", sbinfo->mode);
8751e039
EB
2324 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
2325 seq_printf(seq, ",uid=%u",
2326 from_kuid_munged(&init_user_ns, sbinfo->uid));
2327 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
2328 seq_printf(seq, ",gid=%u",
2329 from_kgid_munged(&init_user_ns, sbinfo->gid));
71fe804b 2330 shmem_show_mpol(seq, sbinfo->mpol);
680d794b
AM
2331 return 0;
2332}
2333#endif /* CONFIG_TMPFS */
1da177e4
LT
2334
2335static void shmem_put_super(struct super_block *sb)
2336{
602586a8
HD
2337 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2338
2339 percpu_counter_destroy(&sbinfo->used_blocks);
2340 kfree(sbinfo);
1da177e4
LT
2341 sb->s_fs_info = NULL;
2342}
2343
2b2af54a 2344int shmem_fill_super(struct super_block *sb, void *data, int silent)
1da177e4
LT
2345{
2346 struct inode *inode;
0edd73b3 2347 struct shmem_sb_info *sbinfo;
680d794b
AM
2348 int err = -ENOMEM;
2349
2350 /* Round up to L1_CACHE_BYTES to resist false sharing */
425fbf04 2351 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
680d794b
AM
2352 L1_CACHE_BYTES), GFP_KERNEL);
2353 if (!sbinfo)
2354 return -ENOMEM;
2355
680d794b 2356 sbinfo->mode = S_IRWXUGO | S_ISVTX;
76aac0e9
DH
2357 sbinfo->uid = current_fsuid();
2358 sbinfo->gid = current_fsgid();
680d794b 2359 sb->s_fs_info = sbinfo;
1da177e4 2360
0edd73b3 2361#ifdef CONFIG_TMPFS
1da177e4
LT
2362 /*
2363 * Per default we only allow half of the physical ram per
2364 * tmpfs instance, limiting inodes to one per page of lowmem;
2365 * but the internal instance is left unlimited.
2366 */
2367 if (!(sb->s_flags & MS_NOUSER)) {
680d794b
AM
2368 sbinfo->max_blocks = shmem_default_max_blocks();
2369 sbinfo->max_inodes = shmem_default_max_inodes();
2370 if (shmem_parse_options(data, sbinfo, false)) {
2371 err = -EINVAL;
2372 goto failed;
2373 }
1da177e4 2374 }
91828a40 2375 sb->s_export_op = &shmem_export_ops;
1da177e4
LT
2376#else
2377 sb->s_flags |= MS_NOUSER;
2378#endif
2379
0edd73b3 2380 spin_lock_init(&sbinfo->stat_lock);
602586a8
HD
2381 if (percpu_counter_init(&sbinfo->used_blocks, 0))
2382 goto failed;
680d794b 2383 sbinfo->free_inodes = sbinfo->max_inodes;
0edd73b3 2384
285b2c4f 2385 sb->s_maxbytes = MAX_LFS_FILESIZE;
1da177e4
LT
2386 sb->s_blocksize = PAGE_CACHE_SIZE;
2387 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2388 sb->s_magic = TMPFS_MAGIC;
2389 sb->s_op = &shmem_ops;
cfd95a9c 2390 sb->s_time_gran = 1;
b09e0fa4 2391#ifdef CONFIG_TMPFS_XATTR
39f0247d 2392 sb->s_xattr = shmem_xattr_handlers;
b09e0fa4
EP
2393#endif
2394#ifdef CONFIG_TMPFS_POSIX_ACL
39f0247d
AG
2395 sb->s_flags |= MS_POSIXACL;
2396#endif
0edd73b3 2397
454abafe 2398 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
1da177e4
LT
2399 if (!inode)
2400 goto failed;
680d794b
AM
2401 inode->i_uid = sbinfo->uid;
2402 inode->i_gid = sbinfo->gid;
318ceed0
AV
2403 sb->s_root = d_make_root(inode);
2404 if (!sb->s_root)
48fde701 2405 goto failed;
1da177e4
LT
2406 return 0;
2407
1da177e4
LT
2408failed:
2409 shmem_put_super(sb);
2410 return err;
2411}
2412
fcc234f8 2413static struct kmem_cache *shmem_inode_cachep;
1da177e4
LT
2414
2415static struct inode *shmem_alloc_inode(struct super_block *sb)
2416{
41ffe5d5
HD
2417 struct shmem_inode_info *info;
2418 info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
2419 if (!info)
1da177e4 2420 return NULL;
41ffe5d5 2421 return &info->vfs_inode;
1da177e4
LT
2422}
2423
41ffe5d5 2424static void shmem_destroy_callback(struct rcu_head *head)
fa0d7e3d
NP
2425{
2426 struct inode *inode = container_of(head, struct inode, i_rcu);
fa0d7e3d
NP
2427 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2428}
2429
1da177e4
LT
2430static void shmem_destroy_inode(struct inode *inode)
2431{
09208d15 2432 if (S_ISREG(inode->i_mode))
1da177e4 2433 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
41ffe5d5 2434 call_rcu(&inode->i_rcu, shmem_destroy_callback);
1da177e4
LT
2435}
2436
41ffe5d5 2437static void shmem_init_inode(void *foo)
1da177e4 2438{
41ffe5d5
HD
2439 struct shmem_inode_info *info = foo;
2440 inode_init_once(&info->vfs_inode);
1da177e4
LT
2441}
2442
41ffe5d5 2443static int shmem_init_inodecache(void)
1da177e4
LT
2444{
2445 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2446 sizeof(struct shmem_inode_info),
41ffe5d5 2447 0, SLAB_PANIC, shmem_init_inode);
1da177e4
LT
2448 return 0;
2449}
2450
41ffe5d5 2451static void shmem_destroy_inodecache(void)
1da177e4 2452{
1a1d92c1 2453 kmem_cache_destroy(shmem_inode_cachep);
1da177e4
LT
2454}
2455
f5e54d6e 2456static const struct address_space_operations shmem_aops = {
1da177e4 2457 .writepage = shmem_writepage,
76719325 2458 .set_page_dirty = __set_page_dirty_no_writeback,
1da177e4 2459#ifdef CONFIG_TMPFS
800d15a5
NP
2460 .write_begin = shmem_write_begin,
2461 .write_end = shmem_write_end,
1da177e4 2462#endif
304dbdb7 2463 .migratepage = migrate_page,
aa261f54 2464 .error_remove_page = generic_error_remove_page,
1da177e4
LT
2465};
2466
15ad7cdc 2467static const struct file_operations shmem_file_operations = {
1da177e4
LT
2468 .mmap = shmem_mmap,
2469#ifdef CONFIG_TMPFS
2470 .llseek = generic_file_llseek,
bcd78e49 2471 .read = do_sync_read,
5402b976 2472 .write = do_sync_write,
bcd78e49 2473 .aio_read = shmem_file_aio_read,
5402b976 2474 .aio_write = generic_file_aio_write,
1b061d92 2475 .fsync = noop_fsync,
708e3508 2476 .splice_read = shmem_file_splice_read,
ae976416 2477 .splice_write = generic_file_splice_write,
1da177e4
LT
2478#endif
2479};
2480
92e1d5be 2481static const struct inode_operations shmem_inode_operations = {
94c1e62d 2482 .setattr = shmem_setattr,
f6b3ec23 2483 .truncate_range = shmem_truncate_range,
b09e0fa4
EP
2484#ifdef CONFIG_TMPFS_XATTR
2485 .setxattr = shmem_setxattr,
2486 .getxattr = shmem_getxattr,
2487 .listxattr = shmem_listxattr,
2488 .removexattr = shmem_removexattr,
2489#endif
1da177e4
LT
2490};
2491
92e1d5be 2492static const struct inode_operations shmem_dir_inode_operations = {
1da177e4
LT
2493#ifdef CONFIG_TMPFS
2494 .create = shmem_create,
2495 .lookup = simple_lookup,
2496 .link = shmem_link,
2497 .unlink = shmem_unlink,
2498 .symlink = shmem_symlink,
2499 .mkdir = shmem_mkdir,
2500 .rmdir = shmem_rmdir,
2501 .mknod = shmem_mknod,
2502 .rename = shmem_rename,
1da177e4 2503#endif
b09e0fa4
EP
2504#ifdef CONFIG_TMPFS_XATTR
2505 .setxattr = shmem_setxattr,
2506 .getxattr = shmem_getxattr,
2507 .listxattr = shmem_listxattr,
2508 .removexattr = shmem_removexattr,
2509#endif
39f0247d 2510#ifdef CONFIG_TMPFS_POSIX_ACL
94c1e62d 2511 .setattr = shmem_setattr,
39f0247d
AG
2512#endif
2513};
2514
92e1d5be 2515static const struct inode_operations shmem_special_inode_operations = {
b09e0fa4
EP
2516#ifdef CONFIG_TMPFS_XATTR
2517 .setxattr = shmem_setxattr,
2518 .getxattr = shmem_getxattr,
2519 .listxattr = shmem_listxattr,
2520 .removexattr = shmem_removexattr,
2521#endif
39f0247d 2522#ifdef CONFIG_TMPFS_POSIX_ACL
94c1e62d 2523 .setattr = shmem_setattr,
39f0247d 2524#endif
1da177e4
LT
2525};
2526
759b9775 2527static const struct super_operations shmem_ops = {
1da177e4
LT
2528 .alloc_inode = shmem_alloc_inode,
2529 .destroy_inode = shmem_destroy_inode,
2530#ifdef CONFIG_TMPFS
2531 .statfs = shmem_statfs,
2532 .remount_fs = shmem_remount_fs,
680d794b 2533 .show_options = shmem_show_options,
1da177e4 2534#endif
1f895f75 2535 .evict_inode = shmem_evict_inode,
1da177e4
LT
2536 .drop_inode = generic_delete_inode,
2537 .put_super = shmem_put_super,
2538};
2539
f0f37e2f 2540static const struct vm_operations_struct shmem_vm_ops = {
54cb8821 2541 .fault = shmem_fault,
1da177e4
LT
2542#ifdef CONFIG_NUMA
2543 .set_policy = shmem_set_policy,
2544 .get_policy = shmem_get_policy,
2545#endif
2546};
2547
3c26ff6e
AV
2548static struct dentry *shmem_mount(struct file_system_type *fs_type,
2549 int flags, const char *dev_name, void *data)
1da177e4 2550{
3c26ff6e 2551 return mount_nodev(fs_type, flags, data, shmem_fill_super);
1da177e4
LT
2552}
2553
41ffe5d5 2554static struct file_system_type shmem_fs_type = {
1da177e4
LT
2555 .owner = THIS_MODULE,
2556 .name = "tmpfs",
3c26ff6e 2557 .mount = shmem_mount,
1da177e4
LT
2558 .kill_sb = kill_litter_super,
2559};
1da177e4 2560
41ffe5d5 2561int __init shmem_init(void)
1da177e4
LT
2562{
2563 int error;
2564
e0bf68dd
PZ
2565 error = bdi_init(&shmem_backing_dev_info);
2566 if (error)
2567 goto out4;
2568
41ffe5d5 2569 error = shmem_init_inodecache();
1da177e4
LT
2570 if (error)
2571 goto out3;
2572
41ffe5d5 2573 error = register_filesystem(&shmem_fs_type);
1da177e4
LT
2574 if (error) {
2575 printk(KERN_ERR "Could not register tmpfs\n");
2576 goto out2;
2577 }
95dc112a 2578
41ffe5d5
HD
2579 shm_mnt = vfs_kern_mount(&shmem_fs_type, MS_NOUSER,
2580 shmem_fs_type.name, NULL);
1da177e4
LT
2581 if (IS_ERR(shm_mnt)) {
2582 error = PTR_ERR(shm_mnt);
2583 printk(KERN_ERR "Could not kern_mount tmpfs\n");
2584 goto out1;
2585 }
2586 return 0;
2587
2588out1:
41ffe5d5 2589 unregister_filesystem(&shmem_fs_type);
1da177e4 2590out2:
41ffe5d5 2591 shmem_destroy_inodecache();
1da177e4 2592out3:
e0bf68dd
PZ
2593 bdi_destroy(&shmem_backing_dev_info);
2594out4:
1da177e4
LT
2595 shm_mnt = ERR_PTR(error);
2596 return error;
2597}
853ac43a
MM
2598
2599#else /* !CONFIG_SHMEM */
2600
2601/*
2602 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
2603 *
2604 * This is intended for small system where the benefits of the full
2605 * shmem code (swap-backed and resource-limited) are outweighed by
2606 * their complexity. On systems without swap this code should be
2607 * effectively equivalent, but much lighter weight.
2608 */
2609
2610#include <linux/ramfs.h>
2611
41ffe5d5 2612static struct file_system_type shmem_fs_type = {
853ac43a 2613 .name = "tmpfs",
3c26ff6e 2614 .mount = ramfs_mount,
853ac43a
MM
2615 .kill_sb = kill_litter_super,
2616};
2617
41ffe5d5 2618int __init shmem_init(void)
853ac43a 2619{
41ffe5d5 2620 BUG_ON(register_filesystem(&shmem_fs_type) != 0);
853ac43a 2621
41ffe5d5 2622 shm_mnt = kern_mount(&shmem_fs_type);
853ac43a
MM
2623 BUG_ON(IS_ERR(shm_mnt));
2624
2625 return 0;
2626}
2627
41ffe5d5 2628int shmem_unuse(swp_entry_t swap, struct page *page)
853ac43a
MM
2629{
2630 return 0;
2631}
2632
3f96b79a
HD
2633int shmem_lock(struct file *file, int lock, struct user_struct *user)
2634{
2635 return 0;
2636}
2637
24513264
HD
2638void shmem_unlock_mapping(struct address_space *mapping)
2639{
2640}
2641
41ffe5d5 2642void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
94c1e62d 2643{
41ffe5d5 2644 truncate_inode_pages_range(inode->i_mapping, lstart, lend);
94c1e62d
HD
2645}
2646EXPORT_SYMBOL_GPL(shmem_truncate_range);
2647
0b0a0806
HD
2648#define shmem_vm_ops generic_file_vm_ops
2649#define shmem_file_operations ramfs_file_operations
454abafe 2650#define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
0b0a0806
HD
2651#define shmem_acct_size(flags, size) 0
2652#define shmem_unacct_size(flags, size) do {} while (0)
853ac43a
MM
2653
2654#endif /* CONFIG_SHMEM */
2655
2656/* common code */
1da177e4 2657
46711810 2658/**
1da177e4 2659 * shmem_file_setup - get an unlinked file living in tmpfs
1da177e4
LT
2660 * @name: name for dentry (to be seen in /proc/<pid>/maps
2661 * @size: size to be set for the file
0b0a0806 2662 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
1da177e4 2663 */
168f5ac6 2664struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
1da177e4
LT
2665{
2666 int error;
2667 struct file *file;
2668 struct inode *inode;
2c48b9c4
AV
2669 struct path path;
2670 struct dentry *root;
1da177e4
LT
2671 struct qstr this;
2672
2673 if (IS_ERR(shm_mnt))
2674 return (void *)shm_mnt;
2675
285b2c4f 2676 if (size < 0 || size > MAX_LFS_FILESIZE)
1da177e4
LT
2677 return ERR_PTR(-EINVAL);
2678
2679 if (shmem_acct_size(flags, size))
2680 return ERR_PTR(-ENOMEM);
2681
2682 error = -ENOMEM;
2683 this.name = name;
2684 this.len = strlen(name);
2685 this.hash = 0; /* will go */
2686 root = shm_mnt->mnt_root;
2c48b9c4
AV
2687 path.dentry = d_alloc(root, &this);
2688 if (!path.dentry)
1da177e4 2689 goto put_memory;
2c48b9c4 2690 path.mnt = mntget(shm_mnt);
1da177e4 2691
1da177e4 2692 error = -ENOSPC;
454abafe 2693 inode = shmem_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
1da177e4 2694 if (!inode)
4b42af81 2695 goto put_dentry;
1da177e4 2696
2c48b9c4 2697 d_instantiate(path.dentry, inode);
1da177e4 2698 inode->i_size = size;
6d6b77f1 2699 clear_nlink(inode); /* It is unlinked */
853ac43a
MM
2700#ifndef CONFIG_MMU
2701 error = ramfs_nommu_expand_for_mapping(inode, size);
2702 if (error)
4b42af81 2703 goto put_dentry;
853ac43a 2704#endif
4b42af81
AV
2705
2706 error = -ENFILE;
2c48b9c4 2707 file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
4b42af81
AV
2708 &shmem_file_operations);
2709 if (!file)
2710 goto put_dentry;
2711
1da177e4
LT
2712 return file;
2713
1da177e4 2714put_dentry:
2c48b9c4 2715 path_put(&path);
1da177e4
LT
2716put_memory:
2717 shmem_unacct_size(flags, size);
2718 return ERR_PTR(error);
2719}
395e0ddc 2720EXPORT_SYMBOL_GPL(shmem_file_setup);
1da177e4 2721
46711810 2722/**
1da177e4 2723 * shmem_zero_setup - setup a shared anonymous mapping
1da177e4
LT
2724 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2725 */
2726int shmem_zero_setup(struct vm_area_struct *vma)
2727{
2728 struct file *file;
2729 loff_t size = vma->vm_end - vma->vm_start;
2730
2731 file = shmem_file_setup("dev/zero", size, vma->vm_flags);
2732 if (IS_ERR(file))
2733 return PTR_ERR(file);
2734
2735 if (vma->vm_file)
2736 fput(vma->vm_file);
2737 vma->vm_file = file;
2738 vma->vm_ops = &shmem_vm_ops;
bee4c36a 2739 vma->vm_flags |= VM_CAN_NONLINEAR;
1da177e4
LT
2740 return 0;
2741}
d9d90e5e
HD
2742
2743/**
2744 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
2745 * @mapping: the page's address_space
2746 * @index: the page index
2747 * @gfp: the page allocator flags to use if allocating
2748 *
2749 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
2750 * with any new page allocations done using the specified allocation flags.
2751 * But read_cache_page_gfp() uses the ->readpage() method: which does not
2752 * suit tmpfs, since it may have pages in swapcache, and needs to find those
2753 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
2754 *
68da9f05
HD
2755 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
2756 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
d9d90e5e
HD
2757 */
2758struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
2759 pgoff_t index, gfp_t gfp)
2760{
68da9f05
HD
2761#ifdef CONFIG_SHMEM
2762 struct inode *inode = mapping->host;
9276aad6 2763 struct page *page;
68da9f05
HD
2764 int error;
2765
2766 BUG_ON(mapping->a_ops != &shmem_aops);
2767 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL);
2768 if (error)
2769 page = ERR_PTR(error);
2770 else
2771 unlock_page(page);
2772 return page;
2773#else
2774 /*
2775 * The tiny !SHMEM case uses ramfs without swap
2776 */
d9d90e5e 2777 return read_cache_page_gfp(mapping, index, gfp);
68da9f05 2778#endif
d9d90e5e
HD
2779}
2780EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);