Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef _LINUX_PAGEMAP_H |
3 | #define _LINUX_PAGEMAP_H | |
4 | ||
5 | /* | |
6 | * Copyright 1995 Linus Torvalds | |
7 | */ | |
8 | #include <linux/mm.h> | |
9 | #include <linux/fs.h> | |
10 | #include <linux/list.h> | |
11 | #include <linux/highmem.h> | |
12 | #include <linux/compiler.h> | |
7c0f6ba6 | 13 | #include <linux/uaccess.h> |
1da177e4 | 14 | #include <linux/gfp.h> |
3e9f45bd | 15 | #include <linux/bitops.h> |
e286781d | 16 | #include <linux/hardirq.h> /* for in_interrupt() */ |
8edf344c | 17 | #include <linux/hugetlb_inline.h> |
1da177e4 LT |
18 | |
19 | /* | |
9c5d760b | 20 | * Bits in mapping->flags. |
1da177e4 | 21 | */ |
9a896c9a | 22 | enum mapping_flags { |
9c5d760b MH |
23 | AS_EIO = 0, /* IO error on async write */ |
24 | AS_ENOSPC = 1, /* ENOSPC on async write */ | |
25 | AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */ | |
26 | AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */ | |
27 | AS_EXITING = 4, /* final truncate in progress */ | |
371a096e | 28 | /* writeback related tags are not used */ |
9c5d760b | 29 | AS_NO_WRITEBACK_TAGS = 5, |
9a896c9a | 30 | }; |
1da177e4 | 31 | |
8ed1e46a JL |
32 | /** |
33 | * mapping_set_error - record a writeback error in the address_space | |
34 | * @mapping - the mapping in which an error should be set | |
35 | * @error - the error to set in the mapping | |
36 | * | |
37 | * When writeback fails in some way, we must record that error so that | |
38 | * userspace can be informed when fsync and the like are called. We endeavor | |
39 | * to report errors on any file that was open at the time of the error. Some | |
40 | * internal callers also need to know when writeback errors have occurred. | |
41 | * | |
42 | * When a writeback error occurs, most filesystems will want to call | |
43 | * mapping_set_error to record the error in the mapping so that it can be | |
44 | * reported when the application calls fsync(2). | |
45 | */ | |
3e9f45bd GC |
46 | static inline void mapping_set_error(struct address_space *mapping, int error) |
47 | { | |
8ed1e46a JL |
48 | if (likely(!error)) |
49 | return; | |
50 | ||
51 | /* Record in wb_err for checkers using errseq_t based tracking */ | |
52 | filemap_set_wb_err(mapping, error); | |
53 | ||
54 | /* Record it in flags for now, for legacy callers */ | |
55 | if (error == -ENOSPC) | |
56 | set_bit(AS_ENOSPC, &mapping->flags); | |
57 | else | |
58 | set_bit(AS_EIO, &mapping->flags); | |
3e9f45bd GC |
59 | } |
60 | ||
ba9ddf49 LS |
61 | static inline void mapping_set_unevictable(struct address_space *mapping) |
62 | { | |
63 | set_bit(AS_UNEVICTABLE, &mapping->flags); | |
64 | } | |
65 | ||
89e004ea LS |
66 | static inline void mapping_clear_unevictable(struct address_space *mapping) |
67 | { | |
68 | clear_bit(AS_UNEVICTABLE, &mapping->flags); | |
69 | } | |
70 | ||
ba9ddf49 LS |
71 | static inline int mapping_unevictable(struct address_space *mapping) |
72 | { | |
088e5465 | 73 | if (mapping) |
89e004ea LS |
74 | return test_bit(AS_UNEVICTABLE, &mapping->flags); |
75 | return !!mapping; | |
ba9ddf49 | 76 | } |
ba9ddf49 | 77 | |
91b0abe3 JW |
78 | static inline void mapping_set_exiting(struct address_space *mapping) |
79 | { | |
80 | set_bit(AS_EXITING, &mapping->flags); | |
81 | } | |
82 | ||
83 | static inline int mapping_exiting(struct address_space *mapping) | |
84 | { | |
85 | return test_bit(AS_EXITING, &mapping->flags); | |
86 | } | |
87 | ||
371a096e HY |
88 | static inline void mapping_set_no_writeback_tags(struct address_space *mapping) |
89 | { | |
90 | set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); | |
91 | } | |
92 | ||
93 | static inline int mapping_use_writeback_tags(struct address_space *mapping) | |
94 | { | |
95 | return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); | |
96 | } | |
97 | ||
dd0fc66f | 98 | static inline gfp_t mapping_gfp_mask(struct address_space * mapping) |
1da177e4 | 99 | { |
9c5d760b | 100 | return mapping->gfp_mask; |
1da177e4 LT |
101 | } |
102 | ||
c62d2555 MH |
103 | /* Restricts the given gfp_mask to what the mapping allows. */ |
104 | static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, | |
105 | gfp_t gfp_mask) | |
106 | { | |
107 | return mapping_gfp_mask(mapping) & gfp_mask; | |
108 | } | |
109 | ||
1da177e4 LT |
110 | /* |
111 | * This is non-atomic. Only to be used before the mapping is activated. | |
112 | * Probably needs a barrier... | |
113 | */ | |
260b2367 | 114 | static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) |
1da177e4 | 115 | { |
9c5d760b | 116 | m->gfp_mask = mask; |
1da177e4 LT |
117 | } |
118 | ||
b745bc85 | 119 | void release_pages(struct page **pages, int nr, bool cold); |
1da177e4 | 120 | |
e286781d NP |
121 | /* |
122 | * speculatively take a reference to a page. | |
0139aa7b JK |
123 | * If the page is free (_refcount == 0), then _refcount is untouched, and 0 |
124 | * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned. | |
e286781d NP |
125 | * |
126 | * This function must be called inside the same rcu_read_lock() section as has | |
127 | * been used to lookup the page in the pagecache radix-tree (or page table): | |
0139aa7b | 128 | * this allows allocators to use a synchronize_rcu() to stabilize _refcount. |
e286781d NP |
129 | * |
130 | * Unless an RCU grace period has passed, the count of all pages coming out | |
131 | * of the allocator must be considered unstable. page_count may return higher | |
132 | * than expected, and put_page must be able to do the right thing when the | |
133 | * page has been finished with, no matter what it is subsequently allocated | |
134 | * for (because put_page is what is used here to drop an invalid speculative | |
135 | * reference). | |
136 | * | |
137 | * This is the interesting part of the lockless pagecache (and lockless | |
138 | * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page) | |
139 | * has the following pattern: | |
140 | * 1. find page in radix tree | |
141 | * 2. conditionally increment refcount | |
142 | * 3. check the page is still in pagecache (if no, goto 1) | |
143 | * | |
0139aa7b | 144 | * Remove-side that cares about stability of _refcount (eg. reclaim) has the |
e286781d NP |
145 | * following (with tree_lock held for write): |
146 | * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) | |
147 | * B. remove page from pagecache | |
148 | * C. free the page | |
149 | * | |
150 | * There are 2 critical interleavings that matter: | |
151 | * - 2 runs before A: in this case, A sees elevated refcount and bails out | |
152 | * - A runs before 2: in this case, 2 sees zero refcount and retries; | |
153 | * subsequently, B will complete and 1 will find no page, causing the | |
154 | * lookup to return NULL. | |
155 | * | |
156 | * It is possible that between 1 and 2, the page is removed then the exact same | |
157 | * page is inserted into the same position in pagecache. That's OK: the | |
158 | * old find_get_page using tree_lock could equally have run before or after | |
159 | * such a re-insertion, depending on order that locks are granted. | |
160 | * | |
161 | * Lookups racing against pagecache insertion isn't a big problem: either 1 | |
162 | * will find the page or it will not. Likewise, the old find_get_page could run | |
163 | * either before the insertion or afterwards, depending on timing. | |
164 | */ | |
165 | static inline int page_cache_get_speculative(struct page *page) | |
166 | { | |
8375ad98 | 167 | #ifdef CONFIG_TINY_RCU |
bdd4e85d | 168 | # ifdef CONFIG_PREEMPT_COUNT |
591a3d7c | 169 | VM_BUG_ON(!in_atomic() && !irqs_disabled()); |
e286781d NP |
170 | # endif |
171 | /* | |
172 | * Preempt must be disabled here - we rely on rcu_read_lock doing | |
173 | * this for us. | |
174 | * | |
175 | * Pagecache won't be truncated from interrupt context, so if we have | |
176 | * found a page in the radix tree here, we have pinned its refcount by | |
177 | * disabling preempt, and hence no need for the "speculative get" that | |
178 | * SMP requires. | |
179 | */ | |
309381fe | 180 | VM_BUG_ON_PAGE(page_count(page) == 0, page); |
fe896d18 | 181 | page_ref_inc(page); |
e286781d NP |
182 | |
183 | #else | |
184 | if (unlikely(!get_page_unless_zero(page))) { | |
185 | /* | |
186 | * Either the page has been freed, or will be freed. | |
187 | * In either case, retry here and the caller should | |
188 | * do the right thing (see comments above). | |
189 | */ | |
190 | return 0; | |
191 | } | |
192 | #endif | |
309381fe | 193 | VM_BUG_ON_PAGE(PageTail(page), page); |
e286781d NP |
194 | |
195 | return 1; | |
196 | } | |
197 | ||
ce0ad7f0 NP |
198 | /* |
199 | * Same as above, but add instead of inc (could just be merged) | |
200 | */ | |
201 | static inline int page_cache_add_speculative(struct page *page, int count) | |
202 | { | |
203 | VM_BUG_ON(in_interrupt()); | |
204 | ||
b560d8ad | 205 | #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) |
bdd4e85d | 206 | # ifdef CONFIG_PREEMPT_COUNT |
591a3d7c | 207 | VM_BUG_ON(!in_atomic() && !irqs_disabled()); |
ce0ad7f0 | 208 | # endif |
309381fe | 209 | VM_BUG_ON_PAGE(page_count(page) == 0, page); |
fe896d18 | 210 | page_ref_add(page, count); |
ce0ad7f0 NP |
211 | |
212 | #else | |
fe896d18 | 213 | if (unlikely(!page_ref_add_unless(page, count, 0))) |
ce0ad7f0 NP |
214 | return 0; |
215 | #endif | |
309381fe | 216 | VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page); |
ce0ad7f0 NP |
217 | |
218 | return 1; | |
219 | } | |
220 | ||
44110fe3 | 221 | #ifdef CONFIG_NUMA |
2ae88149 | 222 | extern struct page *__page_cache_alloc(gfp_t gfp); |
44110fe3 | 223 | #else |
2ae88149 NP |
224 | static inline struct page *__page_cache_alloc(gfp_t gfp) |
225 | { | |
226 | return alloc_pages(gfp, 0); | |
227 | } | |
228 | #endif | |
229 | ||
1da177e4 LT |
230 | static inline struct page *page_cache_alloc(struct address_space *x) |
231 | { | |
2ae88149 | 232 | return __page_cache_alloc(mapping_gfp_mask(x)); |
1da177e4 LT |
233 | } |
234 | ||
235 | static inline struct page *page_cache_alloc_cold(struct address_space *x) | |
236 | { | |
2ae88149 | 237 | return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD); |
1da177e4 LT |
238 | } |
239 | ||
8a5c743e | 240 | static inline gfp_t readahead_gfp_mask(struct address_space *x) |
7b1de586 | 241 | { |
8a5c743e MH |
242 | return mapping_gfp_mask(x) | |
243 | __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN; | |
7b1de586 WF |
244 | } |
245 | ||
c1e98bf2 | 246 | typedef int filler_t(struct file *, struct page *); |
1da177e4 | 247 | |
e7b563bb JW |
248 | pgoff_t page_cache_next_hole(struct address_space *mapping, |
249 | pgoff_t index, unsigned long max_scan); | |
250 | pgoff_t page_cache_prev_hole(struct address_space *mapping, | |
251 | pgoff_t index, unsigned long max_scan); | |
252 | ||
2457aec6 MG |
253 | #define FGP_ACCESSED 0x00000001 |
254 | #define FGP_LOCK 0x00000002 | |
255 | #define FGP_CREAT 0x00000004 | |
256 | #define FGP_WRITE 0x00000008 | |
257 | #define FGP_NOFS 0x00000010 | |
258 | #define FGP_NOWAIT 0x00000020 | |
259 | ||
260 | struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, | |
45f87de5 | 261 | int fgp_flags, gfp_t cache_gfp_mask); |
2457aec6 MG |
262 | |
263 | /** | |
264 | * find_get_page - find and get a page reference | |
265 | * @mapping: the address_space to search | |
266 | * @offset: the page index | |
267 | * | |
268 | * Looks up the page cache slot at @mapping & @offset. If there is a | |
269 | * page cache page, it is returned with an increased refcount. | |
270 | * | |
271 | * Otherwise, %NULL is returned. | |
272 | */ | |
273 | static inline struct page *find_get_page(struct address_space *mapping, | |
274 | pgoff_t offset) | |
275 | { | |
45f87de5 | 276 | return pagecache_get_page(mapping, offset, 0, 0); |
2457aec6 MG |
277 | } |
278 | ||
279 | static inline struct page *find_get_page_flags(struct address_space *mapping, | |
280 | pgoff_t offset, int fgp_flags) | |
281 | { | |
45f87de5 | 282 | return pagecache_get_page(mapping, offset, fgp_flags, 0); |
2457aec6 MG |
283 | } |
284 | ||
285 | /** | |
286 | * find_lock_page - locate, pin and lock a pagecache page | |
2457aec6 MG |
287 | * @mapping: the address_space to search |
288 | * @offset: the page index | |
289 | * | |
290 | * Looks up the page cache slot at @mapping & @offset. If there is a | |
291 | * page cache page, it is returned locked and with an increased | |
292 | * refcount. | |
293 | * | |
294 | * Otherwise, %NULL is returned. | |
295 | * | |
296 | * find_lock_page() may sleep. | |
297 | */ | |
298 | static inline struct page *find_lock_page(struct address_space *mapping, | |
299 | pgoff_t offset) | |
300 | { | |
45f87de5 | 301 | return pagecache_get_page(mapping, offset, FGP_LOCK, 0); |
2457aec6 MG |
302 | } |
303 | ||
304 | /** | |
305 | * find_or_create_page - locate or add a pagecache page | |
306 | * @mapping: the page's address_space | |
307 | * @index: the page's index into the mapping | |
308 | * @gfp_mask: page allocation mode | |
309 | * | |
310 | * Looks up the page cache slot at @mapping & @offset. If there is a | |
311 | * page cache page, it is returned locked and with an increased | |
312 | * refcount. | |
313 | * | |
314 | * If the page is not present, a new page is allocated using @gfp_mask | |
315 | * and added to the page cache and the VM's LRU list. The page is | |
316 | * returned locked and with an increased refcount. | |
317 | * | |
318 | * On memory exhaustion, %NULL is returned. | |
319 | * | |
320 | * find_or_create_page() may sleep, even if @gfp_flags specifies an | |
321 | * atomic allocation! | |
322 | */ | |
323 | static inline struct page *find_or_create_page(struct address_space *mapping, | |
324 | pgoff_t offset, gfp_t gfp_mask) | |
325 | { | |
326 | return pagecache_get_page(mapping, offset, | |
327 | FGP_LOCK|FGP_ACCESSED|FGP_CREAT, | |
45f87de5 | 328 | gfp_mask); |
2457aec6 MG |
329 | } |
330 | ||
331 | /** | |
332 | * grab_cache_page_nowait - returns locked page at given index in given cache | |
333 | * @mapping: target address_space | |
334 | * @index: the page index | |
335 | * | |
336 | * Same as grab_cache_page(), but do not wait if the page is unavailable. | |
337 | * This is intended for speculative data generators, where the data can | |
338 | * be regenerated if the page couldn't be grabbed. This routine should | |
339 | * be safe to call while holding the lock for another page. | |
340 | * | |
341 | * Clear __GFP_FS when allocating the page to avoid recursion into the fs | |
342 | * and deadlock against the caller's locked page. | |
343 | */ | |
344 | static inline struct page *grab_cache_page_nowait(struct address_space *mapping, | |
345 | pgoff_t index) | |
346 | { | |
347 | return pagecache_get_page(mapping, index, | |
348 | FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, | |
45f87de5 | 349 | mapping_gfp_mask(mapping)); |
2457aec6 MG |
350 | } |
351 | ||
0cd6144a | 352 | struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); |
0cd6144a | 353 | struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset); |
0cd6144a JW |
354 | unsigned find_get_entries(struct address_space *mapping, pgoff_t start, |
355 | unsigned int nr_entries, struct page **entries, | |
356 | pgoff_t *indices); | |
b947cee4 JK |
357 | unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, |
358 | pgoff_t end, unsigned int nr_pages, | |
359 | struct page **pages); | |
360 | static inline unsigned find_get_pages(struct address_space *mapping, | |
361 | pgoff_t *start, unsigned int nr_pages, | |
362 | struct page **pages) | |
363 | { | |
364 | return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages, | |
365 | pages); | |
366 | } | |
ebf43500 JA |
367 | unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, |
368 | unsigned int nr_pages, struct page **pages); | |
1da177e4 LT |
369 | unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, |
370 | int tag, unsigned int nr_pages, struct page **pages); | |
7e7f7749 RZ |
371 | unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start, |
372 | int tag, unsigned int nr_entries, | |
373 | struct page **entries, pgoff_t *indices); | |
1da177e4 | 374 | |
54566b2c NP |
375 | struct page *grab_cache_page_write_begin(struct address_space *mapping, |
376 | pgoff_t index, unsigned flags); | |
afddba49 | 377 | |
1da177e4 LT |
378 | /* |
379 | * Returns locked page at given index in given cache, creating it if needed. | |
380 | */ | |
57f6b96c FW |
381 | static inline struct page *grab_cache_page(struct address_space *mapping, |
382 | pgoff_t index) | |
1da177e4 LT |
383 | { |
384 | return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); | |
385 | } | |
386 | ||
1da177e4 | 387 | extern struct page * read_cache_page(struct address_space *mapping, |
5e5358e7 | 388 | pgoff_t index, filler_t *filler, void *data); |
0531b2aa LT |
389 | extern struct page * read_cache_page_gfp(struct address_space *mapping, |
390 | pgoff_t index, gfp_t gfp_mask); | |
1da177e4 LT |
391 | extern int read_cache_pages(struct address_space *mapping, |
392 | struct list_head *pages, filler_t *filler, void *data); | |
393 | ||
090d2b18 | 394 | static inline struct page *read_mapping_page(struct address_space *mapping, |
5e5358e7 | 395 | pgoff_t index, void *data) |
090d2b18 | 396 | { |
c1e98bf2 | 397 | filler_t *filler = mapping->a_ops->readpage; |
090d2b18 PE |
398 | return read_cache_page(mapping, index, filler, data); |
399 | } | |
400 | ||
a0f7a756 | 401 | /* |
5cbc198a KS |
402 | * Get index of the page with in radix-tree |
403 | * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE) | |
a0f7a756 | 404 | */ |
5cbc198a | 405 | static inline pgoff_t page_to_index(struct page *page) |
a0f7a756 | 406 | { |
e9b61f19 KS |
407 | pgoff_t pgoff; |
408 | ||
e9b61f19 | 409 | if (likely(!PageTransTail(page))) |
09cbfeaf | 410 | return page->index; |
e9b61f19 KS |
411 | |
412 | /* | |
413 | * We don't initialize ->index for tail pages: calculate based on | |
414 | * head page | |
415 | */ | |
09cbfeaf | 416 | pgoff = compound_head(page)->index; |
e9b61f19 KS |
417 | pgoff += page - compound_head(page); |
418 | return pgoff; | |
a0f7a756 NH |
419 | } |
420 | ||
5cbc198a KS |
421 | /* |
422 | * Get the offset in PAGE_SIZE. | |
423 | * (TODO: hugepage should have ->index in PAGE_SIZE) | |
424 | */ | |
425 | static inline pgoff_t page_to_pgoff(struct page *page) | |
426 | { | |
427 | if (unlikely(PageHeadHuge(page))) | |
428 | return page->index << compound_order(page); | |
429 | ||
430 | return page_to_index(page); | |
431 | } | |
432 | ||
1da177e4 LT |
433 | /* |
434 | * Return byte-offset into filesystem object for page. | |
435 | */ | |
436 | static inline loff_t page_offset(struct page *page) | |
437 | { | |
09cbfeaf | 438 | return ((loff_t)page->index) << PAGE_SHIFT; |
1da177e4 LT |
439 | } |
440 | ||
f981c595 MG |
441 | static inline loff_t page_file_offset(struct page *page) |
442 | { | |
8cd79788 | 443 | return ((loff_t)page_index(page)) << PAGE_SHIFT; |
f981c595 MG |
444 | } |
445 | ||
0fe6e20b NH |
446 | extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, |
447 | unsigned long address); | |
448 | ||
1da177e4 LT |
449 | static inline pgoff_t linear_page_index(struct vm_area_struct *vma, |
450 | unsigned long address) | |
451 | { | |
0fe6e20b NH |
452 | pgoff_t pgoff; |
453 | if (unlikely(is_vm_hugetlb_page(vma))) | |
454 | return linear_hugepage_index(vma, address); | |
455 | pgoff = (address - vma->vm_start) >> PAGE_SHIFT; | |
1da177e4 | 456 | pgoff += vma->vm_pgoff; |
09cbfeaf | 457 | return pgoff; |
1da177e4 LT |
458 | } |
459 | ||
b3c97528 HH |
460 | extern void __lock_page(struct page *page); |
461 | extern int __lock_page_killable(struct page *page); | |
d065bd81 ML |
462 | extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, |
463 | unsigned int flags); | |
b3c97528 | 464 | extern void unlock_page(struct page *page); |
1da177e4 | 465 | |
529ae9aa NP |
466 | static inline int trylock_page(struct page *page) |
467 | { | |
48c935ad | 468 | page = compound_head(page); |
8413ac9d | 469 | return (likely(!test_and_set_bit_lock(PG_locked, &page->flags))); |
529ae9aa NP |
470 | } |
471 | ||
db37648c NP |
472 | /* |
473 | * lock_page may only be called if we have the page's inode pinned. | |
474 | */ | |
1da177e4 LT |
475 | static inline void lock_page(struct page *page) |
476 | { | |
477 | might_sleep(); | |
529ae9aa | 478 | if (!trylock_page(page)) |
1da177e4 LT |
479 | __lock_page(page); |
480 | } | |
db37648c | 481 | |
2687a356 MW |
482 | /* |
483 | * lock_page_killable is like lock_page but can be interrupted by fatal | |
484 | * signals. It returns 0 if it locked the page and -EINTR if it was | |
485 | * killed while waiting. | |
486 | */ | |
487 | static inline int lock_page_killable(struct page *page) | |
488 | { | |
489 | might_sleep(); | |
529ae9aa | 490 | if (!trylock_page(page)) |
2687a356 MW |
491 | return __lock_page_killable(page); |
492 | return 0; | |
493 | } | |
494 | ||
d065bd81 ML |
495 | /* |
496 | * lock_page_or_retry - Lock the page, unless this would block and the | |
497 | * caller indicated that it can handle a retry. | |
9a95f3cf PC |
498 | * |
499 | * Return value and mmap_sem implications depend on flags; see | |
500 | * __lock_page_or_retry(). | |
d065bd81 ML |
501 | */ |
502 | static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, | |
503 | unsigned int flags) | |
504 | { | |
505 | might_sleep(); | |
506 | return trylock_page(page) || __lock_page_or_retry(page, mm, flags); | |
507 | } | |
508 | ||
1da177e4 | 509 | /* |
74d81bfa NP |
510 | * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc., |
511 | * and should not be used directly. | |
1da177e4 | 512 | */ |
b3c97528 | 513 | extern void wait_on_page_bit(struct page *page, int bit_nr); |
f62e00cc | 514 | extern int wait_on_page_bit_killable(struct page *page, int bit_nr); |
a4796e37 | 515 | |
1da177e4 LT |
516 | /* |
517 | * Wait for a page to be unlocked. | |
518 | * | |
519 | * This must be called with the caller "holding" the page, | |
520 | * ie with increased "page->count" so that the page won't | |
521 | * go away during the wait.. | |
522 | */ | |
523 | static inline void wait_on_page_locked(struct page *page) | |
524 | { | |
525 | if (PageLocked(page)) | |
48c935ad | 526 | wait_on_page_bit(compound_head(page), PG_locked); |
1da177e4 LT |
527 | } |
528 | ||
62906027 NP |
529 | static inline int wait_on_page_locked_killable(struct page *page) |
530 | { | |
531 | if (!PageLocked(page)) | |
532 | return 0; | |
533 | return wait_on_page_bit_killable(compound_head(page), PG_locked); | |
534 | } | |
535 | ||
1da177e4 LT |
536 | /* |
537 | * Wait for a page to complete writeback | |
538 | */ | |
539 | static inline void wait_on_page_writeback(struct page *page) | |
540 | { | |
541 | if (PageWriteback(page)) | |
542 | wait_on_page_bit(page, PG_writeback); | |
543 | } | |
544 | ||
545 | extern void end_page_writeback(struct page *page); | |
1d1d1a76 | 546 | void wait_for_stable_page(struct page *page); |
1da177e4 | 547 | |
c11f0c0b | 548 | void page_endio(struct page *page, bool is_write, int err); |
57d99845 | 549 | |
385e1ca5 DH |
550 | /* |
551 | * Add an arbitrary waiter to a page's wait queue | |
552 | */ | |
ac6424b9 | 553 | extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter); |
385e1ca5 | 554 | |
1da177e4 | 555 | /* |
4bce9f6e | 556 | * Fault everything in given userspace address range in. |
1da177e4 LT |
557 | */ |
558 | static inline int fault_in_pages_writeable(char __user *uaddr, int size) | |
f56f821f | 559 | { |
9923777d | 560 | char __user *end = uaddr + size - 1; |
f56f821f DV |
561 | |
562 | if (unlikely(size == 0)) | |
e23d4159 | 563 | return 0; |
f56f821f | 564 | |
e23d4159 AV |
565 | if (unlikely(uaddr > end)) |
566 | return -EFAULT; | |
f56f821f DV |
567 | /* |
568 | * Writing zeroes into userspace here is OK, because we know that if | |
569 | * the zero gets there, we'll be overwriting it. | |
570 | */ | |
e23d4159 AV |
571 | do { |
572 | if (unlikely(__put_user(0, uaddr) != 0)) | |
573 | return -EFAULT; | |
f56f821f | 574 | uaddr += PAGE_SIZE; |
e23d4159 | 575 | } while (uaddr <= end); |
f56f821f DV |
576 | |
577 | /* Check whether the range spilled into the next page. */ | |
578 | if (((unsigned long)uaddr & PAGE_MASK) == | |
579 | ((unsigned long)end & PAGE_MASK)) | |
e23d4159 | 580 | return __put_user(0, end); |
f56f821f | 581 | |
e23d4159 | 582 | return 0; |
f56f821f DV |
583 | } |
584 | ||
4bce9f6e | 585 | static inline int fault_in_pages_readable(const char __user *uaddr, int size) |
f56f821f DV |
586 | { |
587 | volatile char c; | |
f56f821f DV |
588 | const char __user *end = uaddr + size - 1; |
589 | ||
590 | if (unlikely(size == 0)) | |
e23d4159 | 591 | return 0; |
f56f821f | 592 | |
e23d4159 AV |
593 | if (unlikely(uaddr > end)) |
594 | return -EFAULT; | |
595 | ||
596 | do { | |
597 | if (unlikely(__get_user(c, uaddr) != 0)) | |
598 | return -EFAULT; | |
f56f821f | 599 | uaddr += PAGE_SIZE; |
e23d4159 | 600 | } while (uaddr <= end); |
f56f821f DV |
601 | |
602 | /* Check whether the range spilled into the next page. */ | |
603 | if (((unsigned long)uaddr & PAGE_MASK) == | |
604 | ((unsigned long)end & PAGE_MASK)) { | |
e23d4159 | 605 | return __get_user(c, end); |
f56f821f DV |
606 | } |
607 | ||
90b75db6 | 608 | (void)c; |
e23d4159 | 609 | return 0; |
f56f821f DV |
610 | } |
611 | ||
529ae9aa NP |
612 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, |
613 | pgoff_t index, gfp_t gfp_mask); | |
614 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | |
615 | pgoff_t index, gfp_t gfp_mask); | |
97cecb5a | 616 | extern void delete_from_page_cache(struct page *page); |
62cccb8c | 617 | extern void __delete_from_page_cache(struct page *page, void *shadow); |
ef6a3c63 | 618 | int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); |
529ae9aa NP |
619 | |
620 | /* | |
621 | * Like add_to_page_cache_locked, but used to add newly allocated pages: | |
48c935ad | 622 | * the page is new, so we can just run __SetPageLocked() against it. |
529ae9aa NP |
623 | */ |
624 | static inline int add_to_page_cache(struct page *page, | |
625 | struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) | |
626 | { | |
627 | int error; | |
628 | ||
48c935ad | 629 | __SetPageLocked(page); |
529ae9aa NP |
630 | error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); |
631 | if (unlikely(error)) | |
48c935ad | 632 | __ClearPageLocked(page); |
529ae9aa NP |
633 | return error; |
634 | } | |
635 | ||
b57c2cb9 FF |
636 | static inline unsigned long dir_pages(struct inode *inode) |
637 | { | |
09cbfeaf KS |
638 | return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> |
639 | PAGE_SHIFT; | |
b57c2cb9 FF |
640 | } |
641 | ||
1da177e4 | 642 | #endif /* _LINUX_PAGEMAP_H */ |