Commit | Line | Data |
---|---|---|
ba4e7d97 TH |
1 | /************************************************************************** |
2 | * | |
3 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA | |
4 | * All Rights Reserved. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the | |
8 | * "Software"), to deal in the Software without restriction, including | |
9 | * without limitation the rights to use, copy, modify, merge, publish, | |
10 | * distribute, sub license, and/or sell copies of the Software, and to | |
11 | * permit persons to whom the Software is furnished to do so, subject to | |
12 | * the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice (including the | |
15 | * next paragraph) shall be included in all copies or substantial portions | |
16 | * of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
25 | * | |
26 | **************************************************************************/ | |
27 | /* | |
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | |
29 | */ | |
30 | ||
ba4e7d97 TH |
31 | #include <linux/vmalloc.h> |
32 | #include <linux/sched.h> | |
33 | #include <linux/highmem.h> | |
34 | #include <linux/pagemap.h> | |
35 | #include <linux/file.h> | |
36 | #include <linux/swap.h> | |
37 | #include "ttm/ttm_module.h" | |
38 | #include "ttm/ttm_bo_driver.h" | |
39 | #include "ttm/ttm_placement.h" | |
40 | ||
41 | static int ttm_tt_swapin(struct ttm_tt *ttm); | |
42 | ||
43 | #if defined(CONFIG_X86) | |
44 | static void ttm_tt_clflush_page(struct page *page) | |
45 | { | |
46 | uint8_t *page_virtual; | |
47 | unsigned int i; | |
48 | ||
49 | if (unlikely(page == NULL)) | |
50 | return; | |
51 | ||
52 | page_virtual = kmap_atomic(page, KM_USER0); | |
53 | ||
54 | for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) | |
55 | clflush(page_virtual + i); | |
56 | ||
57 | kunmap_atomic(page_virtual, KM_USER0); | |
58 | } | |
59 | ||
60 | static void ttm_tt_cache_flush_clflush(struct page *pages[], | |
61 | unsigned long num_pages) | |
62 | { | |
63 | unsigned long i; | |
64 | ||
65 | mb(); | |
66 | for (i = 0; i < num_pages; ++i) | |
67 | ttm_tt_clflush_page(*pages++); | |
68 | mb(); | |
69 | } | |
46f4b3ea | 70 | #elif !defined(__powerpc__) |
ba4e7d97 TH |
71 | static void ttm_tt_ipi_handler(void *null) |
72 | { | |
73 | ; | |
74 | } | |
75 | #endif | |
76 | ||
77 | void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages) | |
78 | { | |
79 | ||
80 | #if defined(CONFIG_X86) | |
81 | if (cpu_has_clflush) { | |
82 | ttm_tt_cache_flush_clflush(pages, num_pages); | |
83 | return; | |
84 | } | |
46f4b3ea MD |
85 | #elif defined(__powerpc__) |
86 | unsigned long i; | |
87 | ||
88 | for (i = 0; i < num_pages; ++i) { | |
f121ecfe TH |
89 | struct page *page = pages[i]; |
90 | void *page_virtual; | |
91 | ||
92 | if (unlikely(page == NULL)) | |
93 | continue; | |
94 | ||
95 | page_virtual = kmap_atomic(page, KM_USER0); | |
96 | flush_dcache_range((unsigned long) page_virtual, | |
97 | (unsigned long) page_virtual + PAGE_SIZE); | |
98 | kunmap_atomic(page_virtual, KM_USER0); | |
46f4b3ea | 99 | } |
ba4e7d97 TH |
100 | #else |
101 | if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0) | |
102 | printk(KERN_ERR TTM_PFX | |
103 | "Timed out waiting for drm cache flush.\n"); | |
104 | #endif | |
105 | } | |
106 | ||
107 | /** | |
108 | * Allocates storage for pointers to the pages that back the ttm. | |
109 | * | |
110 | * Uses kmalloc if possible. Otherwise falls back to vmalloc. | |
111 | */ | |
112 | static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) | |
113 | { | |
114 | unsigned long size = ttm->num_pages * sizeof(*ttm->pages); | |
115 | ttm->pages = NULL; | |
116 | ||
117 | if (size <= PAGE_SIZE) | |
118 | ttm->pages = kzalloc(size, GFP_KERNEL); | |
119 | ||
120 | if (!ttm->pages) { | |
121 | ttm->pages = vmalloc_user(size); | |
122 | if (ttm->pages) | |
123 | ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC; | |
124 | } | |
125 | } | |
126 | ||
127 | static void ttm_tt_free_page_directory(struct ttm_tt *ttm) | |
128 | { | |
129 | if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) { | |
130 | vfree(ttm->pages); | |
131 | ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC; | |
132 | } else { | |
133 | kfree(ttm->pages); | |
134 | } | |
135 | ttm->pages = NULL; | |
136 | } | |
137 | ||
138 | static struct page *ttm_tt_alloc_page(unsigned page_flags) | |
139 | { | |
b42db2b1 | 140 | gfp_t gfp_flags = GFP_USER; |
ad49f501 | 141 | |
ba4e7d97 | 142 | if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) |
ad49f501 DA |
143 | gfp_flags |= __GFP_ZERO; |
144 | ||
145 | if (page_flags & TTM_PAGE_FLAG_DMA32) | |
146 | gfp_flags |= __GFP_DMA32; | |
b42db2b1 DA |
147 | else |
148 | gfp_flags |= __GFP_HIGHMEM; | |
ba4e7d97 | 149 | |
ad49f501 | 150 | return alloc_page(gfp_flags); |
ba4e7d97 TH |
151 | } |
152 | ||
153 | static void ttm_tt_free_user_pages(struct ttm_tt *ttm) | |
154 | { | |
155 | int write; | |
156 | int dirty; | |
157 | struct page *page; | |
158 | int i; | |
159 | struct ttm_backend *be = ttm->be; | |
160 | ||
161 | BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER)); | |
162 | write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0); | |
163 | dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0); | |
164 | ||
165 | if (be) | |
166 | be->func->clear(be); | |
167 | ||
168 | for (i = 0; i < ttm->num_pages; ++i) { | |
169 | page = ttm->pages[i]; | |
170 | if (page == NULL) | |
171 | continue; | |
172 | ||
173 | if (page == ttm->dummy_read_page) { | |
174 | BUG_ON(write); | |
175 | continue; | |
176 | } | |
177 | ||
178 | if (write && dirty && !PageReserved(page)) | |
179 | set_page_dirty_lock(page); | |
180 | ||
181 | ttm->pages[i] = NULL; | |
a987fcaa | 182 | ttm_mem_global_free(ttm->glob->mem_glob, PAGE_SIZE); |
ba4e7d97 TH |
183 | put_page(page); |
184 | } | |
185 | ttm->state = tt_unpopulated; | |
186 | ttm->first_himem_page = ttm->num_pages; | |
187 | ttm->last_lomem_page = -1; | |
188 | } | |
189 | ||
190 | static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) | |
191 | { | |
192 | struct page *p; | |
a987fcaa | 193 | struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; |
ba4e7d97 TH |
194 | int ret; |
195 | ||
196 | while (NULL == (p = ttm->pages[index])) { | |
197 | p = ttm_tt_alloc_page(ttm->page_flags); | |
198 | ||
199 | if (!p) | |
200 | return NULL; | |
201 | ||
5fd9cbad TH |
202 | ret = ttm_mem_global_alloc_page(mem_glob, p, false, false); |
203 | if (unlikely(ret != 0)) | |
204 | goto out_err; | |
205 | ||
206 | if (PageHighMem(p)) | |
ba4e7d97 | 207 | ttm->pages[--ttm->first_himem_page] = p; |
5fd9cbad | 208 | else |
ba4e7d97 | 209 | ttm->pages[++ttm->last_lomem_page] = p; |
ba4e7d97 TH |
210 | } |
211 | return p; | |
212 | out_err: | |
213 | put_page(p); | |
214 | return NULL; | |
215 | } | |
216 | ||
217 | struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index) | |
218 | { | |
219 | int ret; | |
220 | ||
221 | if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { | |
222 | ret = ttm_tt_swapin(ttm); | |
223 | if (unlikely(ret != 0)) | |
224 | return NULL; | |
225 | } | |
226 | return __ttm_tt_get_page(ttm, index); | |
227 | } | |
228 | ||
229 | int ttm_tt_populate(struct ttm_tt *ttm) | |
230 | { | |
231 | struct page *page; | |
232 | unsigned long i; | |
233 | struct ttm_backend *be; | |
234 | int ret; | |
235 | ||
236 | if (ttm->state != tt_unpopulated) | |
237 | return 0; | |
238 | ||
239 | if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { | |
240 | ret = ttm_tt_swapin(ttm); | |
241 | if (unlikely(ret != 0)) | |
242 | return ret; | |
243 | } | |
244 | ||
245 | be = ttm->be; | |
246 | ||
247 | for (i = 0; i < ttm->num_pages; ++i) { | |
248 | page = __ttm_tt_get_page(ttm, i); | |
249 | if (!page) | |
250 | return -ENOMEM; | |
251 | } | |
252 | ||
253 | be->func->populate(be, ttm->num_pages, ttm->pages, | |
254 | ttm->dummy_read_page); | |
255 | ttm->state = tt_unbound; | |
256 | return 0; | |
257 | } | |
258 | ||
259 | #ifdef CONFIG_X86 | |
260 | static inline int ttm_tt_set_page_caching(struct page *p, | |
261 | enum ttm_caching_state c_state) | |
262 | { | |
263 | if (PageHighMem(p)) | |
264 | return 0; | |
265 | ||
266 | switch (c_state) { | |
267 | case tt_cached: | |
268 | return set_pages_wb(p, 1); | |
269 | case tt_wc: | |
270 | return set_memory_wc((unsigned long) page_address(p), 1); | |
271 | default: | |
272 | return set_pages_uc(p, 1); | |
273 | } | |
274 | } | |
275 | #else /* CONFIG_X86 */ | |
276 | static inline int ttm_tt_set_page_caching(struct page *p, | |
277 | enum ttm_caching_state c_state) | |
278 | { | |
279 | return 0; | |
280 | } | |
281 | #endif /* CONFIG_X86 */ | |
282 | ||
283 | /* | |
284 | * Change caching policy for the linear kernel map | |
285 | * for range of pages in a ttm. | |
286 | */ | |
287 | ||
288 | static int ttm_tt_set_caching(struct ttm_tt *ttm, | |
289 | enum ttm_caching_state c_state) | |
290 | { | |
291 | int i, j; | |
292 | struct page *cur_page; | |
293 | int ret; | |
294 | ||
295 | if (ttm->caching_state == c_state) | |
296 | return 0; | |
297 | ||
298 | if (c_state != tt_cached) { | |
299 | ret = ttm_tt_populate(ttm); | |
300 | if (unlikely(ret != 0)) | |
301 | return ret; | |
302 | } | |
303 | ||
304 | if (ttm->caching_state == tt_cached) | |
305 | ttm_tt_cache_flush(ttm->pages, ttm->num_pages); | |
306 | ||
307 | for (i = 0; i < ttm->num_pages; ++i) { | |
308 | cur_page = ttm->pages[i]; | |
309 | if (likely(cur_page != NULL)) { | |
310 | ret = ttm_tt_set_page_caching(cur_page, c_state); | |
311 | if (unlikely(ret != 0)) | |
312 | goto out_err; | |
313 | } | |
314 | } | |
315 | ||
316 | ttm->caching_state = c_state; | |
317 | ||
318 | return 0; | |
319 | ||
320 | out_err: | |
321 | for (j = 0; j < i; ++j) { | |
322 | cur_page = ttm->pages[j]; | |
323 | if (likely(cur_page != NULL)) { | |
324 | (void)ttm_tt_set_page_caching(cur_page, | |
325 | ttm->caching_state); | |
326 | } | |
327 | } | |
328 | ||
329 | return ret; | |
330 | } | |
331 | ||
332 | int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement) | |
333 | { | |
334 | enum ttm_caching_state state; | |
335 | ||
336 | if (placement & TTM_PL_FLAG_WC) | |
337 | state = tt_wc; | |
338 | else if (placement & TTM_PL_FLAG_UNCACHED) | |
339 | state = tt_uncached; | |
340 | else | |
341 | state = tt_cached; | |
342 | ||
343 | return ttm_tt_set_caching(ttm, state); | |
344 | } | |
345 | ||
346 | static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) | |
347 | { | |
348 | int i; | |
349 | struct page *cur_page; | |
350 | struct ttm_backend *be = ttm->be; | |
351 | ||
352 | if (be) | |
353 | be->func->clear(be); | |
354 | (void)ttm_tt_set_caching(ttm, tt_cached); | |
355 | for (i = 0; i < ttm->num_pages; ++i) { | |
356 | cur_page = ttm->pages[i]; | |
357 | ttm->pages[i] = NULL; | |
358 | if (cur_page) { | |
359 | if (page_count(cur_page) != 1) | |
360 | printk(KERN_ERR TTM_PFX | |
361 | "Erroneous page count. " | |
362 | "Leaking pages.\n"); | |
a987fcaa | 363 | ttm_mem_global_free_page(ttm->glob->mem_glob, |
5fd9cbad | 364 | cur_page); |
ba4e7d97 TH |
365 | __free_page(cur_page); |
366 | } | |
367 | } | |
368 | ttm->state = tt_unpopulated; | |
369 | ttm->first_himem_page = ttm->num_pages; | |
370 | ttm->last_lomem_page = -1; | |
371 | } | |
372 | ||
373 | void ttm_tt_destroy(struct ttm_tt *ttm) | |
374 | { | |
375 | struct ttm_backend *be; | |
376 | ||
377 | if (unlikely(ttm == NULL)) | |
378 | return; | |
379 | ||
380 | be = ttm->be; | |
381 | if (likely(be != NULL)) { | |
382 | be->func->destroy(be); | |
383 | ttm->be = NULL; | |
384 | } | |
385 | ||
386 | if (likely(ttm->pages != NULL)) { | |
387 | if (ttm->page_flags & TTM_PAGE_FLAG_USER) | |
388 | ttm_tt_free_user_pages(ttm); | |
389 | else | |
390 | ttm_tt_free_alloced_pages(ttm); | |
391 | ||
392 | ttm_tt_free_page_directory(ttm); | |
393 | } | |
394 | ||
395 | if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP) && | |
396 | ttm->swap_storage) | |
397 | fput(ttm->swap_storage); | |
398 | ||
399 | kfree(ttm); | |
400 | } | |
401 | ||
402 | int ttm_tt_set_user(struct ttm_tt *ttm, | |
403 | struct task_struct *tsk, | |
404 | unsigned long start, unsigned long num_pages) | |
405 | { | |
406 | struct mm_struct *mm = tsk->mm; | |
407 | int ret; | |
408 | int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0; | |
a987fcaa | 409 | struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; |
ba4e7d97 TH |
410 | |
411 | BUG_ON(num_pages != ttm->num_pages); | |
412 | BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0); | |
413 | ||
414 | /** | |
415 | * Account user pages as lowmem pages for now. | |
416 | */ | |
417 | ||
418 | ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE, | |
5fd9cbad | 419 | false, false); |
ba4e7d97 TH |
420 | if (unlikely(ret != 0)) |
421 | return ret; | |
422 | ||
423 | down_read(&mm->mmap_sem); | |
424 | ret = get_user_pages(tsk, mm, start, num_pages, | |
425 | write, 0, ttm->pages, NULL); | |
426 | up_read(&mm->mmap_sem); | |
427 | ||
428 | if (ret != num_pages && write) { | |
429 | ttm_tt_free_user_pages(ttm); | |
5fd9cbad | 430 | ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE); |
ba4e7d97 TH |
431 | return -ENOMEM; |
432 | } | |
433 | ||
434 | ttm->tsk = tsk; | |
435 | ttm->start = start; | |
436 | ttm->state = tt_unbound; | |
437 | ||
438 | return 0; | |
439 | } | |
440 | ||
441 | struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, | |
442 | uint32_t page_flags, struct page *dummy_read_page) | |
443 | { | |
444 | struct ttm_bo_driver *bo_driver = bdev->driver; | |
445 | struct ttm_tt *ttm; | |
446 | ||
447 | if (!bo_driver) | |
448 | return NULL; | |
449 | ||
450 | ttm = kzalloc(sizeof(*ttm), GFP_KERNEL); | |
451 | if (!ttm) | |
452 | return NULL; | |
453 | ||
a987fcaa | 454 | ttm->glob = bdev->glob; |
ba4e7d97 TH |
455 | ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
456 | ttm->first_himem_page = ttm->num_pages; | |
457 | ttm->last_lomem_page = -1; | |
458 | ttm->caching_state = tt_cached; | |
459 | ttm->page_flags = page_flags; | |
460 | ||
461 | ttm->dummy_read_page = dummy_read_page; | |
462 | ||
463 | ttm_tt_alloc_page_directory(ttm); | |
464 | if (!ttm->pages) { | |
465 | ttm_tt_destroy(ttm); | |
466 | printk(KERN_ERR TTM_PFX "Failed allocating page table\n"); | |
467 | return NULL; | |
468 | } | |
469 | ttm->be = bo_driver->create_ttm_backend_entry(bdev); | |
470 | if (!ttm->be) { | |
471 | ttm_tt_destroy(ttm); | |
472 | printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n"); | |
473 | return NULL; | |
474 | } | |
475 | ttm->state = tt_unpopulated; | |
476 | return ttm; | |
477 | } | |
478 | ||
479 | void ttm_tt_unbind(struct ttm_tt *ttm) | |
480 | { | |
481 | int ret; | |
482 | struct ttm_backend *be = ttm->be; | |
483 | ||
484 | if (ttm->state == tt_bound) { | |
485 | ret = be->func->unbind(be); | |
486 | BUG_ON(ret); | |
487 | ttm->state = tt_unbound; | |
488 | } | |
489 | } | |
490 | ||
491 | int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) | |
492 | { | |
493 | int ret = 0; | |
494 | struct ttm_backend *be; | |
495 | ||
496 | if (!ttm) | |
497 | return -EINVAL; | |
498 | ||
499 | if (ttm->state == tt_bound) | |
500 | return 0; | |
501 | ||
502 | be = ttm->be; | |
503 | ||
504 | ret = ttm_tt_populate(ttm); | |
505 | if (ret) | |
506 | return ret; | |
507 | ||
508 | ret = be->func->bind(be, bo_mem); | |
509 | if (ret) { | |
510 | printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n"); | |
511 | return ret; | |
512 | } | |
513 | ||
514 | ttm->state = tt_bound; | |
515 | ||
516 | if (ttm->page_flags & TTM_PAGE_FLAG_USER) | |
517 | ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY; | |
518 | return 0; | |
519 | } | |
520 | EXPORT_SYMBOL(ttm_tt_bind); | |
521 | ||
522 | static int ttm_tt_swapin(struct ttm_tt *ttm) | |
523 | { | |
524 | struct address_space *swap_space; | |
525 | struct file *swap_storage; | |
526 | struct page *from_page; | |
527 | struct page *to_page; | |
528 | void *from_virtual; | |
529 | void *to_virtual; | |
530 | int i; | |
531 | int ret; | |
532 | ||
533 | if (ttm->page_flags & TTM_PAGE_FLAG_USER) { | |
534 | ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start, | |
535 | ttm->num_pages); | |
536 | if (unlikely(ret != 0)) | |
537 | return ret; | |
538 | ||
539 | ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED; | |
540 | return 0; | |
541 | } | |
542 | ||
543 | swap_storage = ttm->swap_storage; | |
544 | BUG_ON(swap_storage == NULL); | |
545 | ||
546 | swap_space = swap_storage->f_path.dentry->d_inode->i_mapping; | |
547 | ||
548 | for (i = 0; i < ttm->num_pages; ++i) { | |
549 | from_page = read_mapping_page(swap_space, i, NULL); | |
550 | if (IS_ERR(from_page)) | |
551 | goto out_err; | |
552 | to_page = __ttm_tt_get_page(ttm, i); | |
553 | if (unlikely(to_page == NULL)) | |
554 | goto out_err; | |
555 | ||
556 | preempt_disable(); | |
557 | from_virtual = kmap_atomic(from_page, KM_USER0); | |
558 | to_virtual = kmap_atomic(to_page, KM_USER1); | |
559 | memcpy(to_virtual, from_virtual, PAGE_SIZE); | |
560 | kunmap_atomic(to_virtual, KM_USER1); | |
561 | kunmap_atomic(from_virtual, KM_USER0); | |
562 | preempt_enable(); | |
563 | page_cache_release(from_page); | |
564 | } | |
565 | ||
566 | if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP)) | |
567 | fput(swap_storage); | |
568 | ttm->swap_storage = NULL; | |
569 | ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED; | |
570 | ||
571 | return 0; | |
572 | out_err: | |
573 | ttm_tt_free_alloced_pages(ttm); | |
574 | return -ENOMEM; | |
575 | } | |
576 | ||
577 | int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage) | |
578 | { | |
579 | struct address_space *swap_space; | |
580 | struct file *swap_storage; | |
581 | struct page *from_page; | |
582 | struct page *to_page; | |
583 | void *from_virtual; | |
584 | void *to_virtual; | |
585 | int i; | |
586 | ||
587 | BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated); | |
588 | BUG_ON(ttm->caching_state != tt_cached); | |
589 | ||
590 | /* | |
591 | * For user buffers, just unpin the pages, as there should be | |
592 | * vma references. | |
593 | */ | |
594 | ||
595 | if (ttm->page_flags & TTM_PAGE_FLAG_USER) { | |
596 | ttm_tt_free_user_pages(ttm); | |
597 | ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; | |
598 | ttm->swap_storage = NULL; | |
599 | return 0; | |
600 | } | |
601 | ||
602 | if (!persistant_swap_storage) { | |
603 | swap_storage = shmem_file_setup("ttm swap", | |
604 | ttm->num_pages << PAGE_SHIFT, | |
605 | 0); | |
606 | if (unlikely(IS_ERR(swap_storage))) { | |
607 | printk(KERN_ERR "Failed allocating swap storage.\n"); | |
608 | return -ENOMEM; | |
609 | } | |
610 | } else | |
611 | swap_storage = persistant_swap_storage; | |
612 | ||
613 | swap_space = swap_storage->f_path.dentry->d_inode->i_mapping; | |
614 | ||
615 | for (i = 0; i < ttm->num_pages; ++i) { | |
616 | from_page = ttm->pages[i]; | |
617 | if (unlikely(from_page == NULL)) | |
618 | continue; | |
619 | to_page = read_mapping_page(swap_space, i, NULL); | |
620 | if (unlikely(to_page == NULL)) | |
621 | goto out_err; | |
622 | ||
623 | preempt_disable(); | |
624 | from_virtual = kmap_atomic(from_page, KM_USER0); | |
625 | to_virtual = kmap_atomic(to_page, KM_USER1); | |
626 | memcpy(to_virtual, from_virtual, PAGE_SIZE); | |
627 | kunmap_atomic(to_virtual, KM_USER1); | |
628 | kunmap_atomic(from_virtual, KM_USER0); | |
629 | preempt_enable(); | |
630 | set_page_dirty(to_page); | |
631 | mark_page_accessed(to_page); | |
632 | page_cache_release(to_page); | |
633 | } | |
634 | ||
635 | ttm_tt_free_alloced_pages(ttm); | |
636 | ttm->swap_storage = swap_storage; | |
637 | ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; | |
638 | if (persistant_swap_storage) | |
639 | ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP; | |
640 | ||
641 | return 0; | |
642 | out_err: | |
643 | if (!persistant_swap_storage) | |
644 | fput(swap_storage); | |
645 | ||
646 | return -ENOMEM; | |
647 | } |