Merge git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / linux / pagemap.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_PAGEMAP_H
2#define _LINUX_PAGEMAP_H
3
4/*
5 * Copyright 1995 Linus Torvalds
6 */
7#include <linux/mm.h>
8#include <linux/fs.h>
9#include <linux/list.h>
10#include <linux/highmem.h>
11#include <linux/compiler.h>
12#include <asm/uaccess.h>
13#include <linux/gfp.h>
14
15/*
16 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
17 * allocation mode flags.
18 */
19#define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */
20#define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */
21
dd0fc66f 22static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
1da177e4 23{
260b2367 24 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
1da177e4
LT
25}
26
27/*
28 * This is non-atomic. Only to be used before the mapping is activated.
29 * Probably needs a barrier...
30 */
260b2367 31static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
1da177e4 32{
260b2367
AV
33 m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
34 (__force unsigned long)mask;
1da177e4
LT
35}
36
37/*
38 * The page cache can done in larger chunks than
39 * one page, because it allows for more efficient
40 * throughput (it can then be mapped into user
41 * space in smaller chunks for same flexibility).
42 *
43 * Or rather, it _will_ be done in larger chunks.
44 */
45#define PAGE_CACHE_SHIFT PAGE_SHIFT
46#define PAGE_CACHE_SIZE PAGE_SIZE
47#define PAGE_CACHE_MASK PAGE_MASK
48#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
49
50#define page_cache_get(page) get_page(page)
51#define page_cache_release(page) put_page(page)
52void release_pages(struct page **pages, int nr, int cold);
53
44110fe3 54#ifdef CONFIG_NUMA
2ae88149 55extern struct page *__page_cache_alloc(gfp_t gfp);
44110fe3 56#else
2ae88149
NP
57static inline struct page *__page_cache_alloc(gfp_t gfp)
58{
59 return alloc_pages(gfp, 0);
60}
61#endif
62
1da177e4
LT
63static inline struct page *page_cache_alloc(struct address_space *x)
64{
2ae88149 65 return __page_cache_alloc(mapping_gfp_mask(x));
1da177e4
LT
66}
67
68static inline struct page *page_cache_alloc_cold(struct address_space *x)
69{
2ae88149 70 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
1da177e4
LT
71}
72
73typedef int filler_t(void *, struct page *);
74
75extern struct page * find_get_page(struct address_space *mapping,
76 unsigned long index);
77extern struct page * find_lock_page(struct address_space *mapping,
78 unsigned long index);
1da177e4 79extern struct page * find_or_create_page(struct address_space *mapping,
6daa0e28 80 unsigned long index, gfp_t gfp_mask);
1da177e4
LT
81unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
82 unsigned int nr_pages, struct page **pages);
ebf43500
JA
83unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
84 unsigned int nr_pages, struct page **pages);
1da177e4
LT
85unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
86 int tag, unsigned int nr_pages, struct page **pages);
87
88/*
89 * Returns locked page at given index in given cache, creating it if needed.
90 */
91static inline struct page *grab_cache_page(struct address_space *mapping, unsigned long index)
92{
93 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
94}
95
96extern struct page * grab_cache_page_nowait(struct address_space *mapping,
97 unsigned long index);
6fe6900e
NP
98extern struct page * read_cache_page_async(struct address_space *mapping,
99 unsigned long index, filler_t *filler,
100 void *data);
1da177e4
LT
101extern struct page * read_cache_page(struct address_space *mapping,
102 unsigned long index, filler_t *filler,
103 void *data);
104extern int read_cache_pages(struct address_space *mapping,
105 struct list_head *pages, filler_t *filler, void *data);
106
6fe6900e
NP
107static inline struct page *read_mapping_page_async(
108 struct address_space *mapping,
109 unsigned long index, void *data)
110{
111 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
112 return read_cache_page_async(mapping, index, filler, data);
113}
114
090d2b18
PE
115static inline struct page *read_mapping_page(struct address_space *mapping,
116 unsigned long index, void *data)
117{
118 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
119 return read_cache_page(mapping, index, filler, data);
120}
121
1da177e4 122int add_to_page_cache(struct page *page, struct address_space *mapping,
6daa0e28 123 unsigned long index, gfp_t gfp_mask);
1da177e4 124int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
6daa0e28 125 unsigned long index, gfp_t gfp_mask);
1da177e4
LT
126extern void remove_from_page_cache(struct page *page);
127extern void __remove_from_page_cache(struct page *page);
128
1da177e4
LT
129/*
130 * Return byte-offset into filesystem object for page.
131 */
132static inline loff_t page_offset(struct page *page)
133{
134 return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
135}
136
137static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
138 unsigned long address)
139{
140 pgoff_t pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
141 pgoff += vma->vm_pgoff;
142 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
143}
144
145extern void FASTCALL(__lock_page(struct page *page));
db37648c 146extern void FASTCALL(__lock_page_nosync(struct page *page));
1da177e4
LT
147extern void FASTCALL(unlock_page(struct page *page));
148
db37648c
NP
149/*
150 * lock_page may only be called if we have the page's inode pinned.
151 */
1da177e4
LT
152static inline void lock_page(struct page *page)
153{
154 might_sleep();
155 if (TestSetPageLocked(page))
156 __lock_page(page);
157}
db37648c
NP
158
159/*
160 * lock_page_nosync should only be used if we can't pin the page's inode.
161 * Doesn't play quite so well with block device plugging.
162 */
163static inline void lock_page_nosync(struct page *page)
164{
165 might_sleep();
166 if (TestSetPageLocked(page))
167 __lock_page_nosync(page);
168}
1da177e4
LT
169
170/*
171 * This is exported only for wait_on_page_locked/wait_on_page_writeback.
172 * Never use this directly!
173 */
174extern void FASTCALL(wait_on_page_bit(struct page *page, int bit_nr));
175
176/*
177 * Wait for a page to be unlocked.
178 *
179 * This must be called with the caller "holding" the page,
180 * ie with increased "page->count" so that the page won't
181 * go away during the wait..
182 */
183static inline void wait_on_page_locked(struct page *page)
184{
185 if (PageLocked(page))
186 wait_on_page_bit(page, PG_locked);
187}
188
189/*
190 * Wait for a page to complete writeback
191 */
192static inline void wait_on_page_writeback(struct page *page)
193{
194 if (PageWriteback(page))
195 wait_on_page_bit(page, PG_writeback);
196}
197
198extern void end_page_writeback(struct page *page);
199
200/*
201 * Fault a userspace page into pagetables. Return non-zero on a fault.
202 *
203 * This assumes that two userspace pages are always sufficient. That's
204 * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
205 */
206static inline int fault_in_pages_writeable(char __user *uaddr, int size)
207{
208 int ret;
209
210 /*
211 * Writing zeroes into userspace here is OK, because we know that if
212 * the zero gets there, we'll be overwriting it.
213 */
214 ret = __put_user(0, uaddr);
215 if (ret == 0) {
216 char __user *end = uaddr + size - 1;
217
218 /*
219 * If the page was already mapped, this will get a cache miss
220 * for sure, so try to avoid doing it.
221 */
222 if (((unsigned long)uaddr & PAGE_MASK) !=
223 ((unsigned long)end & PAGE_MASK))
224 ret = __put_user(0, end);
225 }
226 return ret;
227}
228
229static inline void fault_in_pages_readable(const char __user *uaddr, int size)
230{
231 volatile char c;
232 int ret;
233
234 ret = __get_user(c, uaddr);
235 if (ret == 0) {
236 const char __user *end = uaddr + size - 1;
237
238 if (((unsigned long)uaddr & PAGE_MASK) !=
239 ((unsigned long)end & PAGE_MASK))
240 __get_user(c, end);
241 }
242}
243
244#endif /* _LINUX_PAGEMAP_H */