2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
32 #include <ttm/ttm_bo_api.h>
33 #include <ttm/ttm_bo_driver.h>
34 #include <ttm/ttm_placement.h>
35 #include <ttm/ttm_module.h>
36 #include <ttm/ttm_page_alloc.h>
38 #include <drm/radeon_drm.h>
39 #include <linux/seq_file.h>
40 #include <linux/slab.h>
41 #include <linux/swiotlb.h>
42 #include "radeon_reg.h"
45 #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
47 static int radeon_ttm_debugfs_init(struct radeon_device
*rdev
);
49 static struct radeon_device
*radeon_get_rdev(struct ttm_bo_device
*bdev
)
51 struct radeon_mman
*mman
;
52 struct radeon_device
*rdev
;
54 mman
= container_of(bdev
, struct radeon_mman
, bdev
);
55 rdev
= container_of(mman
, struct radeon_device
, mman
);
63 static int radeon_ttm_mem_global_init(struct drm_global_reference
*ref
)
65 return ttm_mem_global_init(ref
->object
);
68 static void radeon_ttm_mem_global_release(struct drm_global_reference
*ref
)
70 ttm_mem_global_release(ref
->object
);
73 static int radeon_ttm_global_init(struct radeon_device
*rdev
)
75 struct drm_global_reference
*global_ref
;
78 rdev
->mman
.mem_global_referenced
= false;
79 global_ref
= &rdev
->mman
.mem_global_ref
;
80 global_ref
->global_type
= DRM_GLOBAL_TTM_MEM
;
81 global_ref
->size
= sizeof(struct ttm_mem_global
);
82 global_ref
->init
= &radeon_ttm_mem_global_init
;
83 global_ref
->release
= &radeon_ttm_mem_global_release
;
84 r
= drm_global_item_ref(global_ref
);
86 DRM_ERROR("Failed setting up TTM memory accounting "
91 rdev
->mman
.bo_global_ref
.mem_glob
=
92 rdev
->mman
.mem_global_ref
.object
;
93 global_ref
= &rdev
->mman
.bo_global_ref
.ref
;
94 global_ref
->global_type
= DRM_GLOBAL_TTM_BO
;
95 global_ref
->size
= sizeof(struct ttm_bo_global
);
96 global_ref
->init
= &ttm_bo_global_init
;
97 global_ref
->release
= &ttm_bo_global_release
;
98 r
= drm_global_item_ref(global_ref
);
100 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
101 drm_global_item_unref(&rdev
->mman
.mem_global_ref
);
105 rdev
->mman
.mem_global_referenced
= true;
109 static void radeon_ttm_global_fini(struct radeon_device
*rdev
)
111 if (rdev
->mman
.mem_global_referenced
) {
112 drm_global_item_unref(&rdev
->mman
.bo_global_ref
.ref
);
113 drm_global_item_unref(&rdev
->mman
.mem_global_ref
);
114 rdev
->mman
.mem_global_referenced
= false;
118 static int radeon_invalidate_caches(struct ttm_bo_device
*bdev
, uint32_t flags
)
123 static int radeon_init_mem_type(struct ttm_bo_device
*bdev
, uint32_t type
,
124 struct ttm_mem_type_manager
*man
)
126 struct radeon_device
*rdev
;
128 rdev
= radeon_get_rdev(bdev
);
133 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
134 man
->available_caching
= TTM_PL_MASK_CACHING
;
135 man
->default_caching
= TTM_PL_FLAG_CACHED
;
138 man
->func
= &ttm_bo_manager_func
;
139 man
->gpu_offset
= rdev
->mc
.gtt_start
;
140 man
->available_caching
= TTM_PL_MASK_CACHING
;
141 man
->default_caching
= TTM_PL_FLAG_CACHED
;
142 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
| TTM_MEMTYPE_FLAG_CMA
;
144 if (rdev
->flags
& RADEON_IS_AGP
) {
145 if (!(drm_core_has_AGP(rdev
->ddev
) && rdev
->ddev
->agp
)) {
146 DRM_ERROR("AGP is not enabled for memory type %u\n",
150 if (!rdev
->ddev
->agp
->cant_use_aperture
)
151 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
152 man
->available_caching
= TTM_PL_FLAG_UNCACHED
|
154 man
->default_caching
= TTM_PL_FLAG_WC
;
159 /* "On-card" video ram */
160 man
->func
= &ttm_bo_manager_func
;
161 man
->gpu_offset
= rdev
->mc
.vram_start
;
162 man
->flags
= TTM_MEMTYPE_FLAG_FIXED
|
163 TTM_MEMTYPE_FLAG_MAPPABLE
;
164 man
->available_caching
= TTM_PL_FLAG_UNCACHED
| TTM_PL_FLAG_WC
;
165 man
->default_caching
= TTM_PL_FLAG_WC
;
168 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type
);
174 static void radeon_evict_flags(struct ttm_buffer_object
*bo
,
175 struct ttm_placement
*placement
)
177 struct radeon_bo
*rbo
;
178 static u32 placements
= TTM_PL_MASK_CACHING
| TTM_PL_FLAG_SYSTEM
;
180 if (!radeon_ttm_bo_is_radeon_bo(bo
)) {
183 placement
->placement
= &placements
;
184 placement
->busy_placement
= &placements
;
185 placement
->num_placement
= 1;
186 placement
->num_busy_placement
= 1;
189 rbo
= container_of(bo
, struct radeon_bo
, tbo
);
190 switch (bo
->mem
.mem_type
) {
192 if (rbo
->rdev
->ring
[radeon_copy_ring_index(rbo
->rdev
)].ready
== false)
193 radeon_ttm_placement_from_domain(rbo
, RADEON_GEM_DOMAIN_CPU
);
195 radeon_ttm_placement_from_domain(rbo
, RADEON_GEM_DOMAIN_GTT
);
199 radeon_ttm_placement_from_domain(rbo
, RADEON_GEM_DOMAIN_CPU
);
201 *placement
= rbo
->placement
;
204 static int radeon_verify_access(struct ttm_buffer_object
*bo
, struct file
*filp
)
209 static void radeon_move_null(struct ttm_buffer_object
*bo
,
210 struct ttm_mem_reg
*new_mem
)
212 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
214 BUG_ON(old_mem
->mm_node
!= NULL
);
216 new_mem
->mm_node
= NULL
;
219 static int radeon_move_blit(struct ttm_buffer_object
*bo
,
220 bool evict
, bool no_wait_gpu
,
221 struct ttm_mem_reg
*new_mem
,
222 struct ttm_mem_reg
*old_mem
)
224 struct radeon_device
*rdev
;
225 uint64_t old_start
, new_start
;
226 struct radeon_fence
*fence
;
229 rdev
= radeon_get_rdev(bo
->bdev
);
230 ridx
= radeon_copy_ring_index(rdev
);
231 old_start
= (u64
)old_mem
->start
<< PAGE_SHIFT
;
232 new_start
= (u64
)new_mem
->start
<< PAGE_SHIFT
;
234 switch (old_mem
->mem_type
) {
236 old_start
+= rdev
->mc
.vram_start
;
239 old_start
+= rdev
->mc
.gtt_start
;
242 DRM_ERROR("Unknown placement %d\n", old_mem
->mem_type
);
245 switch (new_mem
->mem_type
) {
247 new_start
+= rdev
->mc
.vram_start
;
250 new_start
+= rdev
->mc
.gtt_start
;
253 DRM_ERROR("Unknown placement %d\n", old_mem
->mem_type
);
256 if (!rdev
->ring
[ridx
].ready
) {
257 DRM_ERROR("Trying to move memory with ring turned off.\n");
261 BUILD_BUG_ON((PAGE_SIZE
% RADEON_GPU_PAGE_SIZE
) != 0);
263 /* sync other rings */
264 fence
= bo
->sync_obj
;
265 r
= radeon_copy(rdev
, old_start
, new_start
,
266 new_mem
->num_pages
* (PAGE_SIZE
/ RADEON_GPU_PAGE_SIZE
), /* GPU pages */
268 /* FIXME: handle copy error */
269 r
= ttm_bo_move_accel_cleanup(bo
, (void *)fence
,
270 evict
, no_wait_gpu
, new_mem
);
271 radeon_fence_unref(&fence
);
275 static int radeon_move_vram_ram(struct ttm_buffer_object
*bo
,
276 bool evict
, bool interruptible
,
278 struct ttm_mem_reg
*new_mem
)
280 struct radeon_device
*rdev
;
281 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
282 struct ttm_mem_reg tmp_mem
;
284 struct ttm_placement placement
;
287 rdev
= radeon_get_rdev(bo
->bdev
);
289 tmp_mem
.mm_node
= NULL
;
292 placement
.num_placement
= 1;
293 placement
.placement
= &placements
;
294 placement
.num_busy_placement
= 1;
295 placement
.busy_placement
= &placements
;
296 placements
= TTM_PL_MASK_CACHING
| TTM_PL_FLAG_TT
;
297 r
= ttm_bo_mem_space(bo
, &placement
, &tmp_mem
,
298 interruptible
, no_wait_gpu
);
303 r
= ttm_tt_set_placement_caching(bo
->ttm
, tmp_mem
.placement
);
308 r
= ttm_tt_bind(bo
->ttm
, &tmp_mem
);
312 r
= radeon_move_blit(bo
, true, no_wait_gpu
, &tmp_mem
, old_mem
);
316 r
= ttm_bo_move_ttm(bo
, true, no_wait_gpu
, new_mem
);
318 ttm_bo_mem_put(bo
, &tmp_mem
);
322 static int radeon_move_ram_vram(struct ttm_buffer_object
*bo
,
323 bool evict
, bool interruptible
,
325 struct ttm_mem_reg
*new_mem
)
327 struct radeon_device
*rdev
;
328 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
329 struct ttm_mem_reg tmp_mem
;
330 struct ttm_placement placement
;
334 rdev
= radeon_get_rdev(bo
->bdev
);
336 tmp_mem
.mm_node
= NULL
;
339 placement
.num_placement
= 1;
340 placement
.placement
= &placements
;
341 placement
.num_busy_placement
= 1;
342 placement
.busy_placement
= &placements
;
343 placements
= TTM_PL_MASK_CACHING
| TTM_PL_FLAG_TT
;
344 r
= ttm_bo_mem_space(bo
, &placement
, &tmp_mem
,
345 interruptible
, no_wait_gpu
);
349 r
= ttm_bo_move_ttm(bo
, true, no_wait_gpu
, &tmp_mem
);
353 r
= radeon_move_blit(bo
, true, no_wait_gpu
, new_mem
, old_mem
);
358 ttm_bo_mem_put(bo
, &tmp_mem
);
362 static int radeon_bo_move(struct ttm_buffer_object
*bo
,
363 bool evict
, bool interruptible
,
365 struct ttm_mem_reg
*new_mem
)
367 struct radeon_device
*rdev
;
368 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
371 rdev
= radeon_get_rdev(bo
->bdev
);
372 if (old_mem
->mem_type
== TTM_PL_SYSTEM
&& bo
->ttm
== NULL
) {
373 radeon_move_null(bo
, new_mem
);
376 if ((old_mem
->mem_type
== TTM_PL_TT
&&
377 new_mem
->mem_type
== TTM_PL_SYSTEM
) ||
378 (old_mem
->mem_type
== TTM_PL_SYSTEM
&&
379 new_mem
->mem_type
== TTM_PL_TT
)) {
381 radeon_move_null(bo
, new_mem
);
384 if (!rdev
->ring
[radeon_copy_ring_index(rdev
)].ready
||
385 rdev
->asic
->copy
.copy
== NULL
) {
390 if (old_mem
->mem_type
== TTM_PL_VRAM
&&
391 new_mem
->mem_type
== TTM_PL_SYSTEM
) {
392 r
= radeon_move_vram_ram(bo
, evict
, interruptible
,
393 no_wait_gpu
, new_mem
);
394 } else if (old_mem
->mem_type
== TTM_PL_SYSTEM
&&
395 new_mem
->mem_type
== TTM_PL_VRAM
) {
396 r
= radeon_move_ram_vram(bo
, evict
, interruptible
,
397 no_wait_gpu
, new_mem
);
399 r
= radeon_move_blit(bo
, evict
, no_wait_gpu
, new_mem
, old_mem
);
404 r
= ttm_bo_move_memcpy(bo
, evict
, no_wait_gpu
, new_mem
);
409 static int radeon_ttm_io_mem_reserve(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
411 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
412 struct radeon_device
*rdev
= radeon_get_rdev(bdev
);
414 mem
->bus
.addr
= NULL
;
416 mem
->bus
.size
= mem
->num_pages
<< PAGE_SHIFT
;
418 mem
->bus
.is_iomem
= false;
419 if (!(man
->flags
& TTM_MEMTYPE_FLAG_MAPPABLE
))
421 switch (mem
->mem_type
) {
427 if (rdev
->flags
& RADEON_IS_AGP
) {
428 /* RADEON_IS_AGP is set only if AGP is active */
429 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
430 mem
->bus
.base
= rdev
->mc
.agp_base
;
431 mem
->bus
.is_iomem
= !rdev
->ddev
->agp
->cant_use_aperture
;
436 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
437 /* check if it's visible */
438 if ((mem
->bus
.offset
+ mem
->bus
.size
) > rdev
->mc
.visible_vram_size
)
440 mem
->bus
.base
= rdev
->mc
.aper_base
;
441 mem
->bus
.is_iomem
= true;
444 * Alpha: use bus.addr to hold the ioremap() return,
445 * so we can modify bus.base below.
447 if (mem
->placement
& TTM_PL_FLAG_WC
)
449 ioremap_wc(mem
->bus
.base
+ mem
->bus
.offset
,
453 ioremap_nocache(mem
->bus
.base
+ mem
->bus
.offset
,
457 * Alpha: Use just the bus offset plus
458 * the hose/domain memory base for bus.base.
459 * It then can be used to build PTEs for VRAM
460 * access, as done in ttm_bo_vm_fault().
462 mem
->bus
.base
= (mem
->bus
.base
& 0x0ffffffffUL
) +
463 rdev
->ddev
->hose
->dense_mem_base
;
472 static void radeon_ttm_io_mem_free(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
476 static int radeon_sync_obj_wait(void *sync_obj
, bool lazy
, bool interruptible
)
478 return radeon_fence_wait((struct radeon_fence
*)sync_obj
, interruptible
);
481 static int radeon_sync_obj_flush(void *sync_obj
)
486 static void radeon_sync_obj_unref(void **sync_obj
)
488 radeon_fence_unref((struct radeon_fence
**)sync_obj
);
491 static void *radeon_sync_obj_ref(void *sync_obj
)
493 return radeon_fence_ref((struct radeon_fence
*)sync_obj
);
496 static bool radeon_sync_obj_signaled(void *sync_obj
)
498 return radeon_fence_signaled((struct radeon_fence
*)sync_obj
);
502 * TTM backend functions.
504 struct radeon_ttm_tt
{
505 struct ttm_dma_tt ttm
;
506 struct radeon_device
*rdev
;
510 static int radeon_ttm_backend_bind(struct ttm_tt
*ttm
,
511 struct ttm_mem_reg
*bo_mem
)
513 struct radeon_ttm_tt
*gtt
= (void*)ttm
;
516 gtt
->offset
= (unsigned long)(bo_mem
->start
<< PAGE_SHIFT
);
517 if (!ttm
->num_pages
) {
518 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
519 ttm
->num_pages
, bo_mem
, ttm
);
521 r
= radeon_gart_bind(gtt
->rdev
, gtt
->offset
,
522 ttm
->num_pages
, ttm
->pages
, gtt
->ttm
.dma_address
);
524 DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
525 ttm
->num_pages
, (unsigned)gtt
->offset
);
531 static int radeon_ttm_backend_unbind(struct ttm_tt
*ttm
)
533 struct radeon_ttm_tt
*gtt
= (void *)ttm
;
535 radeon_gart_unbind(gtt
->rdev
, gtt
->offset
, ttm
->num_pages
);
539 static void radeon_ttm_backend_destroy(struct ttm_tt
*ttm
)
541 struct radeon_ttm_tt
*gtt
= (void *)ttm
;
543 ttm_dma_tt_fini(>t
->ttm
);
547 static struct ttm_backend_func radeon_backend_func
= {
548 .bind
= &radeon_ttm_backend_bind
,
549 .unbind
= &radeon_ttm_backend_unbind
,
550 .destroy
= &radeon_ttm_backend_destroy
,
553 static struct ttm_tt
*radeon_ttm_tt_create(struct ttm_bo_device
*bdev
,
554 unsigned long size
, uint32_t page_flags
,
555 struct page
*dummy_read_page
)
557 struct radeon_device
*rdev
;
558 struct radeon_ttm_tt
*gtt
;
560 rdev
= radeon_get_rdev(bdev
);
562 if (rdev
->flags
& RADEON_IS_AGP
) {
563 return ttm_agp_tt_create(bdev
, rdev
->ddev
->agp
->bridge
,
564 size
, page_flags
, dummy_read_page
);
568 gtt
= kzalloc(sizeof(struct radeon_ttm_tt
), GFP_KERNEL
);
572 gtt
->ttm
.ttm
.func
= &radeon_backend_func
;
574 if (ttm_dma_tt_init(>t
->ttm
, bdev
, size
, page_flags
, dummy_read_page
)) {
578 return >t
->ttm
.ttm
;
581 static int radeon_ttm_tt_populate(struct ttm_tt
*ttm
)
583 struct radeon_device
*rdev
;
584 struct radeon_ttm_tt
*gtt
= (void *)ttm
;
587 bool slave
= !!(ttm
->page_flags
& TTM_PAGE_FLAG_SG
);
589 if (ttm
->state
!= tt_unpopulated
)
592 if (slave
&& ttm
->sg
) {
593 drm_prime_sg_to_page_addr_arrays(ttm
->sg
, ttm
->pages
,
594 gtt
->ttm
.dma_address
, ttm
->num_pages
);
595 ttm
->state
= tt_unbound
;
599 rdev
= radeon_get_rdev(ttm
->bdev
);
601 if (rdev
->flags
& RADEON_IS_AGP
) {
602 return ttm_agp_tt_populate(ttm
);
606 #ifdef CONFIG_SWIOTLB
607 if (swiotlb_nr_tbl()) {
608 return ttm_dma_populate(>t
->ttm
, rdev
->dev
);
612 r
= ttm_pool_populate(ttm
);
617 for (i
= 0; i
< ttm
->num_pages
; i
++) {
618 gtt
->ttm
.dma_address
[i
] = pci_map_page(rdev
->pdev
, ttm
->pages
[i
],
620 PCI_DMA_BIDIRECTIONAL
);
621 if (pci_dma_mapping_error(rdev
->pdev
, gtt
->ttm
.dma_address
[i
])) {
623 pci_unmap_page(rdev
->pdev
, gtt
->ttm
.dma_address
[i
],
624 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
625 gtt
->ttm
.dma_address
[i
] = 0;
627 ttm_pool_unpopulate(ttm
);
634 static void radeon_ttm_tt_unpopulate(struct ttm_tt
*ttm
)
636 struct radeon_device
*rdev
;
637 struct radeon_ttm_tt
*gtt
= (void *)ttm
;
639 bool slave
= !!(ttm
->page_flags
& TTM_PAGE_FLAG_SG
);
644 rdev
= radeon_get_rdev(ttm
->bdev
);
646 if (rdev
->flags
& RADEON_IS_AGP
) {
647 ttm_agp_tt_unpopulate(ttm
);
652 #ifdef CONFIG_SWIOTLB
653 if (swiotlb_nr_tbl()) {
654 ttm_dma_unpopulate(>t
->ttm
, rdev
->dev
);
659 for (i
= 0; i
< ttm
->num_pages
; i
++) {
660 if (gtt
->ttm
.dma_address
[i
]) {
661 pci_unmap_page(rdev
->pdev
, gtt
->ttm
.dma_address
[i
],
662 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
666 ttm_pool_unpopulate(ttm
);
669 static struct ttm_bo_driver radeon_bo_driver
= {
670 .ttm_tt_create
= &radeon_ttm_tt_create
,
671 .ttm_tt_populate
= &radeon_ttm_tt_populate
,
672 .ttm_tt_unpopulate
= &radeon_ttm_tt_unpopulate
,
673 .invalidate_caches
= &radeon_invalidate_caches
,
674 .init_mem_type
= &radeon_init_mem_type
,
675 .evict_flags
= &radeon_evict_flags
,
676 .move
= &radeon_bo_move
,
677 .verify_access
= &radeon_verify_access
,
678 .sync_obj_signaled
= &radeon_sync_obj_signaled
,
679 .sync_obj_wait
= &radeon_sync_obj_wait
,
680 .sync_obj_flush
= &radeon_sync_obj_flush
,
681 .sync_obj_unref
= &radeon_sync_obj_unref
,
682 .sync_obj_ref
= &radeon_sync_obj_ref
,
683 .move_notify
= &radeon_bo_move_notify
,
684 .fault_reserve_notify
= &radeon_bo_fault_reserve_notify
,
685 .io_mem_reserve
= &radeon_ttm_io_mem_reserve
,
686 .io_mem_free
= &radeon_ttm_io_mem_free
,
689 int radeon_ttm_init(struct radeon_device
*rdev
)
693 r
= radeon_ttm_global_init(rdev
);
697 /* No others user of address space so set it to 0 */
698 r
= ttm_bo_device_init(&rdev
->mman
.bdev
,
699 rdev
->mman
.bo_global_ref
.ref
.object
,
700 &radeon_bo_driver
, DRM_FILE_PAGE_OFFSET
,
703 DRM_ERROR("failed initializing buffer object driver(%d).\n", r
);
706 rdev
->mman
.initialized
= true;
707 r
= ttm_bo_init_mm(&rdev
->mman
.bdev
, TTM_PL_VRAM
,
708 rdev
->mc
.real_vram_size
>> PAGE_SHIFT
);
710 DRM_ERROR("Failed initializing VRAM heap.\n");
713 r
= radeon_bo_create(rdev
, 256 * 1024, PAGE_SIZE
, true,
714 RADEON_GEM_DOMAIN_VRAM
,
715 NULL
, &rdev
->stollen_vga_memory
);
719 r
= radeon_bo_reserve(rdev
->stollen_vga_memory
, false);
722 r
= radeon_bo_pin(rdev
->stollen_vga_memory
, RADEON_GEM_DOMAIN_VRAM
, NULL
);
723 radeon_bo_unreserve(rdev
->stollen_vga_memory
);
725 radeon_bo_unref(&rdev
->stollen_vga_memory
);
728 DRM_INFO("radeon: %uM of VRAM memory ready\n",
729 (unsigned) (rdev
->mc
.real_vram_size
/ (1024 * 1024)));
730 r
= ttm_bo_init_mm(&rdev
->mman
.bdev
, TTM_PL_TT
,
731 rdev
->mc
.gtt_size
>> PAGE_SHIFT
);
733 DRM_ERROR("Failed initializing GTT heap.\n");
736 DRM_INFO("radeon: %uM of GTT memory ready.\n",
737 (unsigned)(rdev
->mc
.gtt_size
/ (1024 * 1024)));
738 rdev
->mman
.bdev
.dev_mapping
= rdev
->ddev
->dev_mapping
;
740 r
= radeon_ttm_debugfs_init(rdev
);
742 DRM_ERROR("Failed to init debugfs\n");
748 void radeon_ttm_fini(struct radeon_device
*rdev
)
752 if (!rdev
->mman
.initialized
)
754 if (rdev
->stollen_vga_memory
) {
755 r
= radeon_bo_reserve(rdev
->stollen_vga_memory
, false);
757 radeon_bo_unpin(rdev
->stollen_vga_memory
);
758 radeon_bo_unreserve(rdev
->stollen_vga_memory
);
760 radeon_bo_unref(&rdev
->stollen_vga_memory
);
762 ttm_bo_clean_mm(&rdev
->mman
.bdev
, TTM_PL_VRAM
);
763 ttm_bo_clean_mm(&rdev
->mman
.bdev
, TTM_PL_TT
);
764 ttm_bo_device_release(&rdev
->mman
.bdev
);
765 radeon_gart_fini(rdev
);
766 radeon_ttm_global_fini(rdev
);
767 rdev
->mman
.initialized
= false;
768 DRM_INFO("radeon: ttm finalized\n");
771 /* this should only be called at bootup or when userspace
773 void radeon_ttm_set_active_vram_size(struct radeon_device
*rdev
, u64 size
)
775 struct ttm_mem_type_manager
*man
;
777 if (!rdev
->mman
.initialized
)
780 man
= &rdev
->mman
.bdev
.man
[TTM_PL_VRAM
];
781 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
782 man
->size
= size
>> PAGE_SHIFT
;
785 static struct vm_operations_struct radeon_ttm_vm_ops
;
786 static const struct vm_operations_struct
*ttm_vm_ops
= NULL
;
788 static int radeon_ttm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
790 struct ttm_buffer_object
*bo
;
791 struct radeon_device
*rdev
;
794 bo
= (struct ttm_buffer_object
*)vma
->vm_private_data
;
796 return VM_FAULT_NOPAGE
;
798 rdev
= radeon_get_rdev(bo
->bdev
);
799 down_read(&rdev
->pm
.mclk_lock
);
800 r
= ttm_vm_ops
->fault(vma
, vmf
);
801 up_read(&rdev
->pm
.mclk_lock
);
805 int radeon_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
807 struct drm_file
*file_priv
;
808 struct radeon_device
*rdev
;
811 if (unlikely(vma
->vm_pgoff
< DRM_FILE_PAGE_OFFSET
)) {
812 return drm_mmap(filp
, vma
);
815 file_priv
= filp
->private_data
;
816 rdev
= file_priv
->minor
->dev
->dev_private
;
820 r
= ttm_bo_mmap(filp
, vma
, &rdev
->mman
.bdev
);
821 if (unlikely(r
!= 0)) {
824 if (unlikely(ttm_vm_ops
== NULL
)) {
825 ttm_vm_ops
= vma
->vm_ops
;
826 radeon_ttm_vm_ops
= *ttm_vm_ops
;
827 radeon_ttm_vm_ops
.fault
= &radeon_ttm_fault
;
829 vma
->vm_ops
= &radeon_ttm_vm_ops
;
834 #define RADEON_DEBUGFS_MEM_TYPES 2
836 #if defined(CONFIG_DEBUG_FS)
837 static int radeon_mm_dump_table(struct seq_file
*m
, void *data
)
839 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
840 struct drm_mm
*mm
= (struct drm_mm
*)node
->info_ent
->data
;
841 struct drm_device
*dev
= node
->minor
->dev
;
842 struct radeon_device
*rdev
= dev
->dev_private
;
844 struct ttm_bo_global
*glob
= rdev
->mman
.bdev
.glob
;
846 spin_lock(&glob
->lru_lock
);
847 ret
= drm_mm_dump_table(m
, mm
);
848 spin_unlock(&glob
->lru_lock
);
853 static int radeon_ttm_debugfs_init(struct radeon_device
*rdev
)
855 #if defined(CONFIG_DEBUG_FS)
856 static struct drm_info_list radeon_mem_types_list
[RADEON_DEBUGFS_MEM_TYPES
+2];
857 static char radeon_mem_types_names
[RADEON_DEBUGFS_MEM_TYPES
+2][32];
860 for (i
= 0; i
< RADEON_DEBUGFS_MEM_TYPES
; i
++) {
862 sprintf(radeon_mem_types_names
[i
], "radeon_vram_mm");
864 sprintf(radeon_mem_types_names
[i
], "radeon_gtt_mm");
865 radeon_mem_types_list
[i
].name
= radeon_mem_types_names
[i
];
866 radeon_mem_types_list
[i
].show
= &radeon_mm_dump_table
;
867 radeon_mem_types_list
[i
].driver_features
= 0;
869 radeon_mem_types_list
[i
].data
= rdev
->mman
.bdev
.man
[TTM_PL_VRAM
].priv
;
871 radeon_mem_types_list
[i
].data
= rdev
->mman
.bdev
.man
[TTM_PL_TT
].priv
;
874 /* Add ttm page pool to debugfs */
875 sprintf(radeon_mem_types_names
[i
], "ttm_page_pool");
876 radeon_mem_types_list
[i
].name
= radeon_mem_types_names
[i
];
877 radeon_mem_types_list
[i
].show
= &ttm_page_alloc_debugfs
;
878 radeon_mem_types_list
[i
].driver_features
= 0;
879 radeon_mem_types_list
[i
++].data
= NULL
;
880 #ifdef CONFIG_SWIOTLB
881 if (swiotlb_nr_tbl()) {
882 sprintf(radeon_mem_types_names
[i
], "ttm_dma_page_pool");
883 radeon_mem_types_list
[i
].name
= radeon_mem_types_names
[i
];
884 radeon_mem_types_list
[i
].show
= &ttm_dma_page_alloc_debugfs
;
885 radeon_mem_types_list
[i
].driver_features
= 0;
886 radeon_mem_types_list
[i
++].data
= NULL
;
889 return radeon_debugfs_add_files(rdev
, radeon_mem_types_list
, i
);