1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include <linux/vmalloc.h>
32 #include <linux/sched.h>
33 #include <linux/highmem.h>
34 #include <linux/pagemap.h>
35 #include <linux/file.h>
36 #include <linux/swap.h>
37 #include "ttm/ttm_module.h"
38 #include "ttm/ttm_bo_driver.h"
39 #include "ttm/ttm_placement.h"
41 static int ttm_tt_swapin(struct ttm_tt
*ttm
);
43 #if defined(CONFIG_X86)
44 static void ttm_tt_clflush_page(struct page
*page
)
46 uint8_t *page_virtual
;
49 if (unlikely(page
== NULL
))
52 page_virtual
= kmap_atomic(page
, KM_USER0
);
54 for (i
= 0; i
< PAGE_SIZE
; i
+= boot_cpu_data
.x86_clflush_size
)
55 clflush(page_virtual
+ i
);
57 kunmap_atomic(page_virtual
, KM_USER0
);
60 static void ttm_tt_cache_flush_clflush(struct page
*pages
[],
61 unsigned long num_pages
)
66 for (i
= 0; i
< num_pages
; ++i
)
67 ttm_tt_clflush_page(*pages
++);
70 #elif !defined(__powerpc__)
71 static void ttm_tt_ipi_handler(void *null
)
77 void ttm_tt_cache_flush(struct page
*pages
[], unsigned long num_pages
)
80 #if defined(CONFIG_X86)
81 if (cpu_has_clflush
) {
82 ttm_tt_cache_flush_clflush(pages
, num_pages
);
85 #elif defined(__powerpc__)
88 for (i
= 0; i
< num_pages
; ++i
) {
89 struct page
*page
= pages
[i
];
92 if (unlikely(page
== NULL
))
95 page_virtual
= kmap_atomic(page
, KM_USER0
);
96 flush_dcache_range((unsigned long) page_virtual
,
97 (unsigned long) page_virtual
+ PAGE_SIZE
);
98 kunmap_atomic(page_virtual
, KM_USER0
);
101 if (on_each_cpu(ttm_tt_ipi_handler
, NULL
, 1) != 0)
102 printk(KERN_ERR TTM_PFX
103 "Timed out waiting for drm cache flush.\n");
108 * Allocates storage for pointers to the pages that back the ttm.
110 * Uses kmalloc if possible. Otherwise falls back to vmalloc.
112 static void ttm_tt_alloc_page_directory(struct ttm_tt
*ttm
)
114 unsigned long size
= ttm
->num_pages
* sizeof(*ttm
->pages
);
117 if (size
<= PAGE_SIZE
)
118 ttm
->pages
= kzalloc(size
, GFP_KERNEL
);
121 ttm
->pages
= vmalloc_user(size
);
123 ttm
->page_flags
|= TTM_PAGE_FLAG_VMALLOC
;
127 static void ttm_tt_free_page_directory(struct ttm_tt
*ttm
)
129 if (ttm
->page_flags
& TTM_PAGE_FLAG_VMALLOC
) {
131 ttm
->page_flags
&= ~TTM_PAGE_FLAG_VMALLOC
;
138 static struct page
*ttm_tt_alloc_page(unsigned page_flags
)
140 gfp_t gfp_flags
= GFP_HIGHUSER
;
142 if (page_flags
& TTM_PAGE_FLAG_ZERO_ALLOC
)
143 gfp_flags
|= __GFP_ZERO
;
145 if (page_flags
& TTM_PAGE_FLAG_DMA32
)
146 gfp_flags
|= __GFP_DMA32
;
148 return alloc_page(gfp_flags
);
151 static void ttm_tt_free_user_pages(struct ttm_tt
*ttm
)
157 struct ttm_backend
*be
= ttm
->be
;
159 BUG_ON(!(ttm
->page_flags
& TTM_PAGE_FLAG_USER
));
160 write
= ((ttm
->page_flags
& TTM_PAGE_FLAG_WRITE
) != 0);
161 dirty
= ((ttm
->page_flags
& TTM_PAGE_FLAG_USER_DIRTY
) != 0);
166 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
167 page
= ttm
->pages
[i
];
171 if (page
== ttm
->dummy_read_page
) {
176 if (write
&& dirty
&& !PageReserved(page
))
177 set_page_dirty_lock(page
);
179 ttm
->pages
[i
] = NULL
;
180 ttm_mem_global_free(ttm
->bdev
->mem_glob
, PAGE_SIZE
, false);
183 ttm
->state
= tt_unpopulated
;
184 ttm
->first_himem_page
= ttm
->num_pages
;
185 ttm
->last_lomem_page
= -1;
188 static struct page
*__ttm_tt_get_page(struct ttm_tt
*ttm
, int index
)
191 struct ttm_bo_device
*bdev
= ttm
->bdev
;
192 struct ttm_mem_global
*mem_glob
= bdev
->mem_glob
;
195 while (NULL
== (p
= ttm
->pages
[index
])) {
196 p
= ttm_tt_alloc_page(ttm
->page_flags
);
201 if (PageHighMem(p
)) {
203 ttm_mem_global_alloc(mem_glob
, PAGE_SIZE
,
205 if (unlikely(ret
!= 0))
207 ttm
->pages
[--ttm
->first_himem_page
] = p
;
210 ttm_mem_global_alloc(mem_glob
, PAGE_SIZE
,
211 false, false, false);
212 if (unlikely(ret
!= 0))
214 ttm
->pages
[++ttm
->last_lomem_page
] = p
;
223 struct page
*ttm_tt_get_page(struct ttm_tt
*ttm
, int index
)
227 if (unlikely(ttm
->page_flags
& TTM_PAGE_FLAG_SWAPPED
)) {
228 ret
= ttm_tt_swapin(ttm
);
229 if (unlikely(ret
!= 0))
232 return __ttm_tt_get_page(ttm
, index
);
235 int ttm_tt_populate(struct ttm_tt
*ttm
)
239 struct ttm_backend
*be
;
242 if (ttm
->state
!= tt_unpopulated
)
245 if (unlikely(ttm
->page_flags
& TTM_PAGE_FLAG_SWAPPED
)) {
246 ret
= ttm_tt_swapin(ttm
);
247 if (unlikely(ret
!= 0))
253 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
254 page
= __ttm_tt_get_page(ttm
, i
);
259 be
->func
->populate(be
, ttm
->num_pages
, ttm
->pages
,
260 ttm
->dummy_read_page
);
261 ttm
->state
= tt_unbound
;
266 static inline int ttm_tt_set_page_caching(struct page
*p
,
267 enum ttm_caching_state c_state
)
274 return set_pages_wb(p
, 1);
276 return set_memory_wc((unsigned long) page_address(p
), 1);
278 return set_pages_uc(p
, 1);
281 #else /* CONFIG_X86 */
282 static inline int ttm_tt_set_page_caching(struct page
*p
,
283 enum ttm_caching_state c_state
)
287 #endif /* CONFIG_X86 */
290 * Change caching policy for the linear kernel map
291 * for range of pages in a ttm.
294 static int ttm_tt_set_caching(struct ttm_tt
*ttm
,
295 enum ttm_caching_state c_state
)
298 struct page
*cur_page
;
301 if (ttm
->caching_state
== c_state
)
304 if (c_state
!= tt_cached
) {
305 ret
= ttm_tt_populate(ttm
);
306 if (unlikely(ret
!= 0))
310 if (ttm
->caching_state
== tt_cached
)
311 ttm_tt_cache_flush(ttm
->pages
, ttm
->num_pages
);
313 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
314 cur_page
= ttm
->pages
[i
];
315 if (likely(cur_page
!= NULL
)) {
316 ret
= ttm_tt_set_page_caching(cur_page
, c_state
);
317 if (unlikely(ret
!= 0))
322 ttm
->caching_state
= c_state
;
327 for (j
= 0; j
< i
; ++j
) {
328 cur_page
= ttm
->pages
[j
];
329 if (likely(cur_page
!= NULL
)) {
330 (void)ttm_tt_set_page_caching(cur_page
,
338 int ttm_tt_set_placement_caching(struct ttm_tt
*ttm
, uint32_t placement
)
340 enum ttm_caching_state state
;
342 if (placement
& TTM_PL_FLAG_WC
)
344 else if (placement
& TTM_PL_FLAG_UNCACHED
)
349 return ttm_tt_set_caching(ttm
, state
);
352 static void ttm_tt_free_alloced_pages(struct ttm_tt
*ttm
)
355 struct page
*cur_page
;
356 struct ttm_backend
*be
= ttm
->be
;
360 (void)ttm_tt_set_caching(ttm
, tt_cached
);
361 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
362 cur_page
= ttm
->pages
[i
];
363 ttm
->pages
[i
] = NULL
;
365 if (page_count(cur_page
) != 1)
366 printk(KERN_ERR TTM_PFX
367 "Erroneous page count. "
369 ttm_mem_global_free(ttm
->bdev
->mem_glob
, PAGE_SIZE
,
370 PageHighMem(cur_page
));
371 __free_page(cur_page
);
374 ttm
->state
= tt_unpopulated
;
375 ttm
->first_himem_page
= ttm
->num_pages
;
376 ttm
->last_lomem_page
= -1;
379 void ttm_tt_destroy(struct ttm_tt
*ttm
)
381 struct ttm_backend
*be
;
383 if (unlikely(ttm
== NULL
))
387 if (likely(be
!= NULL
)) {
388 be
->func
->destroy(be
);
392 if (likely(ttm
->pages
!= NULL
)) {
393 if (ttm
->page_flags
& TTM_PAGE_FLAG_USER
)
394 ttm_tt_free_user_pages(ttm
);
396 ttm_tt_free_alloced_pages(ttm
);
398 ttm_tt_free_page_directory(ttm
);
401 if (!(ttm
->page_flags
& TTM_PAGE_FLAG_PERSISTANT_SWAP
) &&
403 fput(ttm
->swap_storage
);
408 int ttm_tt_set_user(struct ttm_tt
*ttm
,
409 struct task_struct
*tsk
,
410 unsigned long start
, unsigned long num_pages
)
412 struct mm_struct
*mm
= tsk
->mm
;
414 int write
= (ttm
->page_flags
& TTM_PAGE_FLAG_WRITE
) != 0;
415 struct ttm_mem_global
*mem_glob
= ttm
->bdev
->mem_glob
;
417 BUG_ON(num_pages
!= ttm
->num_pages
);
418 BUG_ON((ttm
->page_flags
& TTM_PAGE_FLAG_USER
) == 0);
421 * Account user pages as lowmem pages for now.
424 ret
= ttm_mem_global_alloc(mem_glob
, num_pages
* PAGE_SIZE
,
425 false, false, false);
426 if (unlikely(ret
!= 0))
429 down_read(&mm
->mmap_sem
);
430 ret
= get_user_pages(tsk
, mm
, start
, num_pages
,
431 write
, 0, ttm
->pages
, NULL
);
432 up_read(&mm
->mmap_sem
);
434 if (ret
!= num_pages
&& write
) {
435 ttm_tt_free_user_pages(ttm
);
436 ttm_mem_global_free(mem_glob
, num_pages
* PAGE_SIZE
, false);
442 ttm
->state
= tt_unbound
;
447 struct ttm_tt
*ttm_tt_create(struct ttm_bo_device
*bdev
, unsigned long size
,
448 uint32_t page_flags
, struct page
*dummy_read_page
)
450 struct ttm_bo_driver
*bo_driver
= bdev
->driver
;
456 ttm
= kzalloc(sizeof(*ttm
), GFP_KERNEL
);
462 ttm
->num_pages
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
463 ttm
->first_himem_page
= ttm
->num_pages
;
464 ttm
->last_lomem_page
= -1;
465 ttm
->caching_state
= tt_cached
;
466 ttm
->page_flags
= page_flags
;
468 ttm
->dummy_read_page
= dummy_read_page
;
470 ttm_tt_alloc_page_directory(ttm
);
473 printk(KERN_ERR TTM_PFX
"Failed allocating page table\n");
476 ttm
->be
= bo_driver
->create_ttm_backend_entry(bdev
);
479 printk(KERN_ERR TTM_PFX
"Failed creating ttm backend entry\n");
482 ttm
->state
= tt_unpopulated
;
486 void ttm_tt_unbind(struct ttm_tt
*ttm
)
489 struct ttm_backend
*be
= ttm
->be
;
491 if (ttm
->state
== tt_bound
) {
492 ret
= be
->func
->unbind(be
);
494 ttm
->state
= tt_unbound
;
498 int ttm_tt_bind(struct ttm_tt
*ttm
, struct ttm_mem_reg
*bo_mem
)
501 struct ttm_backend
*be
;
506 if (ttm
->state
== tt_bound
)
511 ret
= ttm_tt_populate(ttm
);
515 ret
= be
->func
->bind(be
, bo_mem
);
517 printk(KERN_ERR TTM_PFX
"Couldn't bind backend.\n");
521 ttm
->state
= tt_bound
;
523 if (ttm
->page_flags
& TTM_PAGE_FLAG_USER
)
524 ttm
->page_flags
|= TTM_PAGE_FLAG_USER_DIRTY
;
527 EXPORT_SYMBOL(ttm_tt_bind
);
529 static int ttm_tt_swapin(struct ttm_tt
*ttm
)
531 struct address_space
*swap_space
;
532 struct file
*swap_storage
;
533 struct page
*from_page
;
534 struct page
*to_page
;
540 if (ttm
->page_flags
& TTM_PAGE_FLAG_USER
) {
541 ret
= ttm_tt_set_user(ttm
, ttm
->tsk
, ttm
->start
,
543 if (unlikely(ret
!= 0))
546 ttm
->page_flags
&= ~TTM_PAGE_FLAG_SWAPPED
;
550 swap_storage
= ttm
->swap_storage
;
551 BUG_ON(swap_storage
== NULL
);
553 swap_space
= swap_storage
->f_path
.dentry
->d_inode
->i_mapping
;
555 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
556 from_page
= read_mapping_page(swap_space
, i
, NULL
);
557 if (IS_ERR(from_page
))
559 to_page
= __ttm_tt_get_page(ttm
, i
);
560 if (unlikely(to_page
== NULL
))
564 from_virtual
= kmap_atomic(from_page
, KM_USER0
);
565 to_virtual
= kmap_atomic(to_page
, KM_USER1
);
566 memcpy(to_virtual
, from_virtual
, PAGE_SIZE
);
567 kunmap_atomic(to_virtual
, KM_USER1
);
568 kunmap_atomic(from_virtual
, KM_USER0
);
570 page_cache_release(from_page
);
573 if (!(ttm
->page_flags
& TTM_PAGE_FLAG_PERSISTANT_SWAP
))
575 ttm
->swap_storage
= NULL
;
576 ttm
->page_flags
&= ~TTM_PAGE_FLAG_SWAPPED
;
580 ttm_tt_free_alloced_pages(ttm
);
584 int ttm_tt_swapout(struct ttm_tt
*ttm
, struct file
*persistant_swap_storage
)
586 struct address_space
*swap_space
;
587 struct file
*swap_storage
;
588 struct page
*from_page
;
589 struct page
*to_page
;
594 BUG_ON(ttm
->state
!= tt_unbound
&& ttm
->state
!= tt_unpopulated
);
595 BUG_ON(ttm
->caching_state
!= tt_cached
);
598 * For user buffers, just unpin the pages, as there should be
602 if (ttm
->page_flags
& TTM_PAGE_FLAG_USER
) {
603 ttm_tt_free_user_pages(ttm
);
604 ttm
->page_flags
|= TTM_PAGE_FLAG_SWAPPED
;
605 ttm
->swap_storage
= NULL
;
609 if (!persistant_swap_storage
) {
610 swap_storage
= shmem_file_setup("ttm swap",
611 ttm
->num_pages
<< PAGE_SHIFT
,
613 if (unlikely(IS_ERR(swap_storage
))) {
614 printk(KERN_ERR
"Failed allocating swap storage.\n");
618 swap_storage
= persistant_swap_storage
;
620 swap_space
= swap_storage
->f_path
.dentry
->d_inode
->i_mapping
;
622 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
623 from_page
= ttm
->pages
[i
];
624 if (unlikely(from_page
== NULL
))
626 to_page
= read_mapping_page(swap_space
, i
, NULL
);
627 if (unlikely(to_page
== NULL
))
631 from_virtual
= kmap_atomic(from_page
, KM_USER0
);
632 to_virtual
= kmap_atomic(to_page
, KM_USER1
);
633 memcpy(to_virtual
, from_virtual
, PAGE_SIZE
);
634 kunmap_atomic(to_virtual
, KM_USER1
);
635 kunmap_atomic(from_virtual
, KM_USER0
);
637 set_page_dirty(to_page
);
638 mark_page_accessed(to_page
);
639 page_cache_release(to_page
);
642 ttm_tt_free_alloced_pages(ttm
);
643 ttm
->swap_storage
= swap_storage
;
644 ttm
->page_flags
|= TTM_PAGE_FLAG_SWAPPED
;
645 if (persistant_swap_storage
)
646 ttm
->page_flags
|= TTM_PAGE_FLAG_PERSISTANT_SWAP
;
650 if (!persistant_swap_storage
)