2 * Copyright 2007 Dave Airlied
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
30 #include <core/engine.h>
32 #include <subdev/fb.h>
33 #include <subdev/vm.h>
34 #include <subdev/bar.h>
36 #include "nouveau_drm.h"
37 #include "nouveau_dma.h"
38 #include "nouveau_fence.h"
40 #include "nouveau_bo.h"
41 #include "nouveau_ttm.h"
42 #include "nouveau_gem.h"
45 * NV10-NV40 tiling helpers
49 nv10_bo_update_tile_region(struct drm_device
*dev
, struct nouveau_drm_tile
*reg
,
50 u32 addr
, u32 size
, u32 pitch
, u32 flags
)
52 struct nouveau_drm
*drm
= nouveau_drm(dev
);
53 int i
= reg
- drm
->tile
.reg
;
54 struct nouveau_fb
*pfb
= nouveau_fb(drm
->device
);
55 struct nouveau_fb_tile
*tile
= &pfb
->tile
.region
[i
];
56 struct nouveau_engine
*engine
;
58 nouveau_fence_unref(®
->fence
);
61 pfb
->tile
.fini(pfb
, i
, tile
);
64 pfb
->tile
.init(pfb
, i
, addr
, size
, pitch
, flags
, tile
);
66 pfb
->tile
.prog(pfb
, i
, tile
);
68 if ((engine
= nouveau_engine(pfb
, NVDEV_ENGINE_GR
)))
69 engine
->tile_prog(engine
, i
);
70 if ((engine
= nouveau_engine(pfb
, NVDEV_ENGINE_MPEG
)))
71 engine
->tile_prog(engine
, i
);
74 static struct nouveau_drm_tile
*
75 nv10_bo_get_tile_region(struct drm_device
*dev
, int i
)
77 struct nouveau_drm
*drm
= nouveau_drm(dev
);
78 struct nouveau_drm_tile
*tile
= &drm
->tile
.reg
[i
];
80 spin_lock(&drm
->tile
.lock
);
83 (!tile
->fence
|| nouveau_fence_done(tile
->fence
)))
88 spin_unlock(&drm
->tile
.lock
);
93 nv10_bo_put_tile_region(struct drm_device
*dev
, struct nouveau_drm_tile
*tile
,
94 struct nouveau_fence
*fence
)
96 struct nouveau_drm
*drm
= nouveau_drm(dev
);
99 spin_lock(&drm
->tile
.lock
);
101 /* Mark it as pending. */
103 nouveau_fence_ref(fence
);
107 spin_unlock(&drm
->tile
.lock
);
111 static struct nouveau_drm_tile
*
112 nv10_bo_set_tiling(struct drm_device
*dev
, u32 addr
,
113 u32 size
, u32 pitch
, u32 flags
)
115 struct nouveau_drm
*drm
= nouveau_drm(dev
);
116 struct nouveau_fb
*pfb
= nouveau_fb(drm
->device
);
117 struct nouveau_drm_tile
*tile
, *found
= NULL
;
120 for (i
= 0; i
< pfb
->tile
.regions
; i
++) {
121 tile
= nv10_bo_get_tile_region(dev
, i
);
123 if (pitch
&& !found
) {
127 } else if (tile
&& pfb
->tile
.region
[i
].pitch
) {
128 /* Kill an unused tile region. */
129 nv10_bo_update_tile_region(dev
, tile
, 0, 0, 0, 0);
132 nv10_bo_put_tile_region(dev
, tile
, NULL
);
136 nv10_bo_update_tile_region(dev
, found
, addr
, size
,
142 nouveau_bo_del_ttm(struct ttm_buffer_object
*bo
)
144 struct nouveau_drm
*drm
= nouveau_bdev(bo
->bdev
);
145 struct drm_device
*dev
= drm
->dev
;
146 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
148 if (unlikely(nvbo
->gem
))
149 DRM_ERROR("bo %p still attached to GEM object\n", bo
);
150 nv10_bo_put_tile_region(dev
, nvbo
->tile
, NULL
);
155 nouveau_bo_fixup_align(struct nouveau_bo
*nvbo
, u32 flags
,
156 int *align
, int *size
)
158 struct nouveau_drm
*drm
= nouveau_bdev(nvbo
->bo
.bdev
);
159 struct nouveau_device
*device
= nv_device(drm
->device
);
161 if (device
->card_type
< NV_50
) {
162 if (nvbo
->tile_mode
) {
163 if (device
->chipset
>= 0x40) {
165 *size
= roundup(*size
, 64 * nvbo
->tile_mode
);
167 } else if (device
->chipset
>= 0x30) {
169 *size
= roundup(*size
, 64 * nvbo
->tile_mode
);
171 } else if (device
->chipset
>= 0x20) {
173 *size
= roundup(*size
, 64 * nvbo
->tile_mode
);
175 } else if (device
->chipset
>= 0x10) {
177 *size
= roundup(*size
, 32 * nvbo
->tile_mode
);
181 *size
= roundup(*size
, (1 << nvbo
->page_shift
));
182 *align
= max((1 << nvbo
->page_shift
), *align
);
185 *size
= roundup(*size
, PAGE_SIZE
);
189 nouveau_bo_new(struct drm_device
*dev
, int size
, int align
,
190 uint32_t flags
, uint32_t tile_mode
, uint32_t tile_flags
,
192 struct nouveau_bo
**pnvbo
)
194 struct nouveau_drm
*drm
= nouveau_drm(dev
);
195 struct nouveau_bo
*nvbo
;
198 int type
= ttm_bo_type_device
;
201 type
= ttm_bo_type_sg
;
203 nvbo
= kzalloc(sizeof(struct nouveau_bo
), GFP_KERNEL
);
206 INIT_LIST_HEAD(&nvbo
->head
);
207 INIT_LIST_HEAD(&nvbo
->entry
);
208 INIT_LIST_HEAD(&nvbo
->vma_list
);
209 nvbo
->tile_mode
= tile_mode
;
210 nvbo
->tile_flags
= tile_flags
;
211 nvbo
->bo
.bdev
= &drm
->ttm
.bdev
;
213 nvbo
->page_shift
= 12;
214 if (drm
->client
.base
.vm
) {
215 if (!(flags
& TTM_PL_FLAG_TT
) && size
> 256 * 1024)
216 nvbo
->page_shift
= drm
->client
.base
.vm
->vmm
->lpg_shift
;
219 nouveau_bo_fixup_align(nvbo
, flags
, &align
, &size
);
220 nvbo
->bo
.mem
.num_pages
= size
>> PAGE_SHIFT
;
221 nouveau_bo_placement_set(nvbo
, flags
, 0);
223 acc_size
= ttm_bo_dma_acc_size(&drm
->ttm
.bdev
, size
,
224 sizeof(struct nouveau_bo
));
226 ret
= ttm_bo_init(&drm
->ttm
.bdev
, &nvbo
->bo
, size
,
227 type
, &nvbo
->placement
,
228 align
>> PAGE_SHIFT
, 0, false, NULL
, acc_size
, sg
,
231 /* ttm will call nouveau_bo_del_ttm if it fails.. */
240 set_placement_list(uint32_t *pl
, unsigned *n
, uint32_t type
, uint32_t flags
)
244 if (type
& TTM_PL_FLAG_VRAM
)
245 pl
[(*n
)++] = TTM_PL_FLAG_VRAM
| flags
;
246 if (type
& TTM_PL_FLAG_TT
)
247 pl
[(*n
)++] = TTM_PL_FLAG_TT
| flags
;
248 if (type
& TTM_PL_FLAG_SYSTEM
)
249 pl
[(*n
)++] = TTM_PL_FLAG_SYSTEM
| flags
;
253 set_placement_range(struct nouveau_bo
*nvbo
, uint32_t type
)
255 struct nouveau_drm
*drm
= nouveau_bdev(nvbo
->bo
.bdev
);
256 struct nouveau_fb
*pfb
= nouveau_fb(drm
->device
);
257 u32 vram_pages
= pfb
->ram
.size
>> PAGE_SHIFT
;
259 if (nv_device(drm
->device
)->card_type
== NV_10
&&
260 nvbo
->tile_mode
&& (type
& TTM_PL_FLAG_VRAM
) &&
261 nvbo
->bo
.mem
.num_pages
< vram_pages
/ 4) {
263 * Make sure that the color and depth buffers are handled
264 * by independent memory controller units. Up to a 9x
265 * speed up when alpha-blending and depth-test are enabled
268 if (nvbo
->tile_flags
& NOUVEAU_GEM_TILE_ZETA
) {
269 nvbo
->placement
.fpfn
= vram_pages
/ 2;
270 nvbo
->placement
.lpfn
= ~0;
272 nvbo
->placement
.fpfn
= 0;
273 nvbo
->placement
.lpfn
= vram_pages
/ 2;
279 nouveau_bo_placement_set(struct nouveau_bo
*nvbo
, uint32_t type
, uint32_t busy
)
281 struct ttm_placement
*pl
= &nvbo
->placement
;
282 uint32_t flags
= TTM_PL_MASK_CACHING
|
283 (nvbo
->pin_refcnt
? TTM_PL_FLAG_NO_EVICT
: 0);
285 pl
->placement
= nvbo
->placements
;
286 set_placement_list(nvbo
->placements
, &pl
->num_placement
,
289 pl
->busy_placement
= nvbo
->busy_placements
;
290 set_placement_list(nvbo
->busy_placements
, &pl
->num_busy_placement
,
293 set_placement_range(nvbo
, type
);
297 nouveau_bo_pin(struct nouveau_bo
*nvbo
, uint32_t memtype
)
299 struct nouveau_drm
*drm
= nouveau_bdev(nvbo
->bo
.bdev
);
300 struct ttm_buffer_object
*bo
= &nvbo
->bo
;
303 if (nvbo
->pin_refcnt
&& !(memtype
& (1 << bo
->mem
.mem_type
))) {
304 NV_ERROR(drm
, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo
,
305 1 << bo
->mem
.mem_type
, memtype
);
309 if (nvbo
->pin_refcnt
++)
312 ret
= ttm_bo_reserve(bo
, false, false, false, 0);
316 nouveau_bo_placement_set(nvbo
, memtype
, 0);
318 ret
= nouveau_bo_validate(nvbo
, false, false, false);
320 switch (bo
->mem
.mem_type
) {
322 drm
->gem
.vram_available
-= bo
->mem
.size
;
325 drm
->gem
.gart_available
-= bo
->mem
.size
;
331 ttm_bo_unreserve(bo
);
339 nouveau_bo_unpin(struct nouveau_bo
*nvbo
)
341 struct nouveau_drm
*drm
= nouveau_bdev(nvbo
->bo
.bdev
);
342 struct ttm_buffer_object
*bo
= &nvbo
->bo
;
345 if (--nvbo
->pin_refcnt
)
348 ret
= ttm_bo_reserve(bo
, false, false, false, 0);
352 nouveau_bo_placement_set(nvbo
, bo
->mem
.placement
, 0);
354 ret
= nouveau_bo_validate(nvbo
, false, false, false);
356 switch (bo
->mem
.mem_type
) {
358 drm
->gem
.vram_available
+= bo
->mem
.size
;
361 drm
->gem
.gart_available
+= bo
->mem
.size
;
368 ttm_bo_unreserve(bo
);
373 nouveau_bo_map(struct nouveau_bo
*nvbo
)
377 ret
= ttm_bo_reserve(&nvbo
->bo
, false, false, false, 0);
381 ret
= ttm_bo_kmap(&nvbo
->bo
, 0, nvbo
->bo
.mem
.num_pages
, &nvbo
->kmap
);
382 ttm_bo_unreserve(&nvbo
->bo
);
387 nouveau_bo_unmap(struct nouveau_bo
*nvbo
)
390 ttm_bo_kunmap(&nvbo
->kmap
);
394 nouveau_bo_validate(struct nouveau_bo
*nvbo
, bool interruptible
,
395 bool no_wait_reserve
, bool no_wait_gpu
)
399 ret
= ttm_bo_validate(&nvbo
->bo
, &nvbo
->placement
, interruptible
,
400 no_wait_reserve
, no_wait_gpu
);
408 nouveau_bo_rd16(struct nouveau_bo
*nvbo
, unsigned index
)
411 u16
*mem
= ttm_kmap_obj_virtual(&nvbo
->kmap
, &is_iomem
);
414 return ioread16_native((void __force __iomem
*)mem
);
420 nouveau_bo_wr16(struct nouveau_bo
*nvbo
, unsigned index
, u16 val
)
423 u16
*mem
= ttm_kmap_obj_virtual(&nvbo
->kmap
, &is_iomem
);
426 iowrite16_native(val
, (void __force __iomem
*)mem
);
432 nouveau_bo_rd32(struct nouveau_bo
*nvbo
, unsigned index
)
435 u32
*mem
= ttm_kmap_obj_virtual(&nvbo
->kmap
, &is_iomem
);
438 return ioread32_native((void __force __iomem
*)mem
);
444 nouveau_bo_wr32(struct nouveau_bo
*nvbo
, unsigned index
, u32 val
)
447 u32
*mem
= ttm_kmap_obj_virtual(&nvbo
->kmap
, &is_iomem
);
450 iowrite32_native(val
, (void __force __iomem
*)mem
);
455 static struct ttm_tt
*
456 nouveau_ttm_tt_create(struct ttm_bo_device
*bdev
, unsigned long size
,
457 uint32_t page_flags
, struct page
*dummy_read
)
459 struct nouveau_drm
*drm
= nouveau_bdev(bdev
);
460 struct drm_device
*dev
= drm
->dev
;
462 if (drm
->agp
.stat
== ENABLED
) {
463 return ttm_agp_tt_create(bdev
, dev
->agp
->bridge
, size
,
464 page_flags
, dummy_read
);
467 return nouveau_sgdma_create_ttm(bdev
, size
, page_flags
, dummy_read
);
471 nouveau_bo_invalidate_caches(struct ttm_bo_device
*bdev
, uint32_t flags
)
473 /* We'll do this from user space. */
478 nouveau_bo_init_mem_type(struct ttm_bo_device
*bdev
, uint32_t type
,
479 struct ttm_mem_type_manager
*man
)
481 struct nouveau_drm
*drm
= nouveau_bdev(bdev
);
485 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
486 man
->available_caching
= TTM_PL_MASK_CACHING
;
487 man
->default_caching
= TTM_PL_FLAG_CACHED
;
490 if (nv_device(drm
->device
)->card_type
>= NV_50
) {
491 man
->func
= &nouveau_vram_manager
;
492 man
->io_reserve_fastpath
= false;
493 man
->use_io_reserve_lru
= true;
495 man
->func
= &ttm_bo_manager_func
;
497 man
->flags
= TTM_MEMTYPE_FLAG_FIXED
|
498 TTM_MEMTYPE_FLAG_MAPPABLE
;
499 man
->available_caching
= TTM_PL_FLAG_UNCACHED
|
501 man
->default_caching
= TTM_PL_FLAG_WC
;
504 if (nv_device(drm
->device
)->card_type
>= NV_50
)
505 man
->func
= &nouveau_gart_manager
;
507 if (drm
->agp
.stat
!= ENABLED
)
508 man
->func
= &nv04_gart_manager
;
510 man
->func
= &ttm_bo_manager_func
;
512 if (drm
->agp
.stat
== ENABLED
) {
513 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
514 man
->available_caching
= TTM_PL_FLAG_UNCACHED
|
516 man
->default_caching
= TTM_PL_FLAG_WC
;
518 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
|
519 TTM_MEMTYPE_FLAG_CMA
;
520 man
->available_caching
= TTM_PL_MASK_CACHING
;
521 man
->default_caching
= TTM_PL_FLAG_CACHED
;
532 nouveau_bo_evict_flags(struct ttm_buffer_object
*bo
, struct ttm_placement
*pl
)
534 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
536 switch (bo
->mem
.mem_type
) {
538 nouveau_bo_placement_set(nvbo
, TTM_PL_FLAG_TT
,
542 nouveau_bo_placement_set(nvbo
, TTM_PL_FLAG_SYSTEM
, 0);
546 *pl
= nvbo
->placement
;
550 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
551 * TTM_PL_{VRAM,TT} directly.
555 nouveau_bo_move_accel_cleanup(struct nouveau_channel
*chan
,
556 struct nouveau_bo
*nvbo
, bool evict
,
557 bool no_wait_reserve
, bool no_wait_gpu
,
558 struct ttm_mem_reg
*new_mem
)
560 struct nouveau_fence
*fence
= NULL
;
563 ret
= nouveau_fence_new(chan
, &fence
);
567 ret
= ttm_bo_move_accel_cleanup(&nvbo
->bo
, fence
, NULL
, evict
,
568 no_wait_reserve
, no_wait_gpu
, new_mem
);
569 nouveau_fence_unref(&fence
);
574 nve0_bo_move_init(struct nouveau_channel
*chan
, u32 handle
)
576 int ret
= RING_SPACE(chan
, 2);
578 BEGIN_NVC0(chan
, NvSubCopy
, 0x0000, 1);
579 OUT_RING (chan
, handle
);
586 nve0_bo_move_copy(struct nouveau_channel
*chan
, struct ttm_buffer_object
*bo
,
587 struct ttm_mem_reg
*old_mem
, struct ttm_mem_reg
*new_mem
)
589 struct nouveau_mem
*node
= old_mem
->mm_node
;
590 int ret
= RING_SPACE(chan
, 10);
592 BEGIN_NVC0(chan
, NvSubCopy
, 0x0400, 8);
593 OUT_RING (chan
, upper_32_bits(node
->vma
[0].offset
));
594 OUT_RING (chan
, lower_32_bits(node
->vma
[0].offset
));
595 OUT_RING (chan
, upper_32_bits(node
->vma
[1].offset
));
596 OUT_RING (chan
, lower_32_bits(node
->vma
[1].offset
));
597 OUT_RING (chan
, PAGE_SIZE
);
598 OUT_RING (chan
, PAGE_SIZE
);
599 OUT_RING (chan
, PAGE_SIZE
);
600 OUT_RING (chan
, new_mem
->num_pages
);
601 BEGIN_IMC0(chan
, NvSubCopy
, 0x0300, 0x0386);
607 nvc0_bo_move_init(struct nouveau_channel
*chan
, u32 handle
)
609 int ret
= RING_SPACE(chan
, 2);
611 BEGIN_NVC0(chan
, NvSubCopy
, 0x0000, 1);
612 OUT_RING (chan
, handle
);
618 nvc0_bo_move_copy(struct nouveau_channel
*chan
, struct ttm_buffer_object
*bo
,
619 struct ttm_mem_reg
*old_mem
, struct ttm_mem_reg
*new_mem
)
621 struct nouveau_mem
*node
= old_mem
->mm_node
;
622 u64 src_offset
= node
->vma
[0].offset
;
623 u64 dst_offset
= node
->vma
[1].offset
;
624 u32 page_count
= new_mem
->num_pages
;
627 page_count
= new_mem
->num_pages
;
629 int line_count
= (page_count
> 8191) ? 8191 : page_count
;
631 ret
= RING_SPACE(chan
, 11);
635 BEGIN_NVC0(chan
, NvSubCopy
, 0x030c, 8);
636 OUT_RING (chan
, upper_32_bits(src_offset
));
637 OUT_RING (chan
, lower_32_bits(src_offset
));
638 OUT_RING (chan
, upper_32_bits(dst_offset
));
639 OUT_RING (chan
, lower_32_bits(dst_offset
));
640 OUT_RING (chan
, PAGE_SIZE
);
641 OUT_RING (chan
, PAGE_SIZE
);
642 OUT_RING (chan
, PAGE_SIZE
);
643 OUT_RING (chan
, line_count
);
644 BEGIN_NVC0(chan
, NvSubCopy
, 0x0300, 1);
645 OUT_RING (chan
, 0x00000110);
647 page_count
-= line_count
;
648 src_offset
+= (PAGE_SIZE
* line_count
);
649 dst_offset
+= (PAGE_SIZE
* line_count
);
656 nvc0_bo_move_m2mf(struct nouveau_channel
*chan
, struct ttm_buffer_object
*bo
,
657 struct ttm_mem_reg
*old_mem
, struct ttm_mem_reg
*new_mem
)
659 struct nouveau_mem
*node
= old_mem
->mm_node
;
660 u64 src_offset
= node
->vma
[0].offset
;
661 u64 dst_offset
= node
->vma
[1].offset
;
662 u32 page_count
= new_mem
->num_pages
;
665 page_count
= new_mem
->num_pages
;
667 int line_count
= (page_count
> 2047) ? 2047 : page_count
;
669 ret
= RING_SPACE(chan
, 12);
673 BEGIN_NVC0(chan
, NvSubCopy
, 0x0238, 2);
674 OUT_RING (chan
, upper_32_bits(dst_offset
));
675 OUT_RING (chan
, lower_32_bits(dst_offset
));
676 BEGIN_NVC0(chan
, NvSubCopy
, 0x030c, 6);
677 OUT_RING (chan
, upper_32_bits(src_offset
));
678 OUT_RING (chan
, lower_32_bits(src_offset
));
679 OUT_RING (chan
, PAGE_SIZE
); /* src_pitch */
680 OUT_RING (chan
, PAGE_SIZE
); /* dst_pitch */
681 OUT_RING (chan
, PAGE_SIZE
); /* line_length */
682 OUT_RING (chan
, line_count
);
683 BEGIN_NVC0(chan
, NvSubCopy
, 0x0300, 1);
684 OUT_RING (chan
, 0x00100110);
686 page_count
-= line_count
;
687 src_offset
+= (PAGE_SIZE
* line_count
);
688 dst_offset
+= (PAGE_SIZE
* line_count
);
695 nva3_bo_move_copy(struct nouveau_channel
*chan
, struct ttm_buffer_object
*bo
,
696 struct ttm_mem_reg
*old_mem
, struct ttm_mem_reg
*new_mem
)
698 struct nouveau_mem
*node
= old_mem
->mm_node
;
699 u64 src_offset
= node
->vma
[0].offset
;
700 u64 dst_offset
= node
->vma
[1].offset
;
701 u32 page_count
= new_mem
->num_pages
;
704 page_count
= new_mem
->num_pages
;
706 int line_count
= (page_count
> 8191) ? 8191 : page_count
;
708 ret
= RING_SPACE(chan
, 11);
712 BEGIN_NV04(chan
, NvSubCopy
, 0x030c, 8);
713 OUT_RING (chan
, upper_32_bits(src_offset
));
714 OUT_RING (chan
, lower_32_bits(src_offset
));
715 OUT_RING (chan
, upper_32_bits(dst_offset
));
716 OUT_RING (chan
, lower_32_bits(dst_offset
));
717 OUT_RING (chan
, PAGE_SIZE
);
718 OUT_RING (chan
, PAGE_SIZE
);
719 OUT_RING (chan
, PAGE_SIZE
);
720 OUT_RING (chan
, line_count
);
721 BEGIN_NV04(chan
, NvSubCopy
, 0x0300, 1);
722 OUT_RING (chan
, 0x00000110);
724 page_count
-= line_count
;
725 src_offset
+= (PAGE_SIZE
* line_count
);
726 dst_offset
+= (PAGE_SIZE
* line_count
);
733 nv98_bo_move_exec(struct nouveau_channel
*chan
, struct ttm_buffer_object
*bo
,
734 struct ttm_mem_reg
*old_mem
, struct ttm_mem_reg
*new_mem
)
736 struct nouveau_mem
*node
= old_mem
->mm_node
;
737 int ret
= RING_SPACE(chan
, 7);
739 BEGIN_NV04(chan
, NvSubCopy
, 0x0320, 6);
740 OUT_RING (chan
, upper_32_bits(node
->vma
[0].offset
));
741 OUT_RING (chan
, lower_32_bits(node
->vma
[0].offset
));
742 OUT_RING (chan
, upper_32_bits(node
->vma
[1].offset
));
743 OUT_RING (chan
, lower_32_bits(node
->vma
[1].offset
));
744 OUT_RING (chan
, 0x00000000 /* COPY */);
745 OUT_RING (chan
, new_mem
->num_pages
<< PAGE_SHIFT
);
751 nv84_bo_move_exec(struct nouveau_channel
*chan
, struct ttm_buffer_object
*bo
,
752 struct ttm_mem_reg
*old_mem
, struct ttm_mem_reg
*new_mem
)
754 struct nouveau_mem
*node
= old_mem
->mm_node
;
755 int ret
= RING_SPACE(chan
, 7);
757 BEGIN_NV04(chan
, NvSubCopy
, 0x0304, 6);
758 OUT_RING (chan
, new_mem
->num_pages
<< PAGE_SHIFT
);
759 OUT_RING (chan
, upper_32_bits(node
->vma
[0].offset
));
760 OUT_RING (chan
, lower_32_bits(node
->vma
[0].offset
));
761 OUT_RING (chan
, upper_32_bits(node
->vma
[1].offset
));
762 OUT_RING (chan
, lower_32_bits(node
->vma
[1].offset
));
763 OUT_RING (chan
, 0x00000000 /* MODE_COPY, QUERY_NONE */);
769 nv50_bo_move_init(struct nouveau_channel
*chan
, u32 handle
)
771 int ret
= RING_SPACE(chan
, 6);
773 BEGIN_NV04(chan
, NvSubCopy
, 0x0000, 1);
774 OUT_RING (chan
, handle
);
775 BEGIN_NV04(chan
, NvSubCopy
, 0x0180, 3);
776 OUT_RING (chan
, NvNotify0
);
777 OUT_RING (chan
, NvDmaFB
);
778 OUT_RING (chan
, NvDmaFB
);
785 nv50_bo_move_m2mf(struct nouveau_channel
*chan
, struct ttm_buffer_object
*bo
,
786 struct ttm_mem_reg
*old_mem
, struct ttm_mem_reg
*new_mem
)
788 struct nouveau_mem
*node
= old_mem
->mm_node
;
789 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
790 u64 length
= (new_mem
->num_pages
<< PAGE_SHIFT
);
791 u64 src_offset
= node
->vma
[0].offset
;
792 u64 dst_offset
= node
->vma
[1].offset
;
796 u32 amount
, stride
, height
;
798 amount
= min(length
, (u64
)(4 * 1024 * 1024));
800 height
= amount
/ stride
;
802 if (new_mem
->mem_type
== TTM_PL_VRAM
&&
803 nouveau_bo_tile_layout(nvbo
)) {
804 ret
= RING_SPACE(chan
, 8);
808 BEGIN_NV04(chan
, NvSubCopy
, 0x0200, 7);
811 OUT_RING (chan
, stride
);
812 OUT_RING (chan
, height
);
817 ret
= RING_SPACE(chan
, 2);
821 BEGIN_NV04(chan
, NvSubCopy
, 0x0200, 1);
824 if (old_mem
->mem_type
== TTM_PL_VRAM
&&
825 nouveau_bo_tile_layout(nvbo
)) {
826 ret
= RING_SPACE(chan
, 8);
830 BEGIN_NV04(chan
, NvSubCopy
, 0x021c, 7);
833 OUT_RING (chan
, stride
);
834 OUT_RING (chan
, height
);
839 ret
= RING_SPACE(chan
, 2);
843 BEGIN_NV04(chan
, NvSubCopy
, 0x021c, 1);
847 ret
= RING_SPACE(chan
, 14);
851 BEGIN_NV04(chan
, NvSubCopy
, 0x0238, 2);
852 OUT_RING (chan
, upper_32_bits(src_offset
));
853 OUT_RING (chan
, upper_32_bits(dst_offset
));
854 BEGIN_NV04(chan
, NvSubCopy
, 0x030c, 8);
855 OUT_RING (chan
, lower_32_bits(src_offset
));
856 OUT_RING (chan
, lower_32_bits(dst_offset
));
857 OUT_RING (chan
, stride
);
858 OUT_RING (chan
, stride
);
859 OUT_RING (chan
, stride
);
860 OUT_RING (chan
, height
);
861 OUT_RING (chan
, 0x00000101);
862 OUT_RING (chan
, 0x00000000);
863 BEGIN_NV04(chan
, NvSubCopy
, NV_MEMORY_TO_MEMORY_FORMAT_NOP
, 1);
867 src_offset
+= amount
;
868 dst_offset
+= amount
;
875 nv04_bo_move_init(struct nouveau_channel
*chan
, u32 handle
)
877 int ret
= RING_SPACE(chan
, 4);
879 BEGIN_NV04(chan
, NvSubCopy
, 0x0000, 1);
880 OUT_RING (chan
, handle
);
881 BEGIN_NV04(chan
, NvSubCopy
, 0x0180, 1);
882 OUT_RING (chan
, NvNotify0
);
888 static inline uint32_t
889 nouveau_bo_mem_ctxdma(struct ttm_buffer_object
*bo
,
890 struct nouveau_channel
*chan
, struct ttm_mem_reg
*mem
)
892 if (mem
->mem_type
== TTM_PL_TT
)
898 nv04_bo_move_m2mf(struct nouveau_channel
*chan
, struct ttm_buffer_object
*bo
,
899 struct ttm_mem_reg
*old_mem
, struct ttm_mem_reg
*new_mem
)
901 u32 src_offset
= old_mem
->start
<< PAGE_SHIFT
;
902 u32 dst_offset
= new_mem
->start
<< PAGE_SHIFT
;
903 u32 page_count
= new_mem
->num_pages
;
906 ret
= RING_SPACE(chan
, 3);
910 BEGIN_NV04(chan
, NvSubCopy
, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE
, 2);
911 OUT_RING (chan
, nouveau_bo_mem_ctxdma(bo
, chan
, old_mem
));
912 OUT_RING (chan
, nouveau_bo_mem_ctxdma(bo
, chan
, new_mem
));
914 page_count
= new_mem
->num_pages
;
916 int line_count
= (page_count
> 2047) ? 2047 : page_count
;
918 ret
= RING_SPACE(chan
, 11);
922 BEGIN_NV04(chan
, NvSubCopy
,
923 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN
, 8);
924 OUT_RING (chan
, src_offset
);
925 OUT_RING (chan
, dst_offset
);
926 OUT_RING (chan
, PAGE_SIZE
); /* src_pitch */
927 OUT_RING (chan
, PAGE_SIZE
); /* dst_pitch */
928 OUT_RING (chan
, PAGE_SIZE
); /* line_length */
929 OUT_RING (chan
, line_count
);
930 OUT_RING (chan
, 0x00000101);
931 OUT_RING (chan
, 0x00000000);
932 BEGIN_NV04(chan
, NvSubCopy
, NV_MEMORY_TO_MEMORY_FORMAT_NOP
, 1);
935 page_count
-= line_count
;
936 src_offset
+= (PAGE_SIZE
* line_count
);
937 dst_offset
+= (PAGE_SIZE
* line_count
);
944 nouveau_vma_getmap(struct nouveau_channel
*chan
, struct nouveau_bo
*nvbo
,
945 struct ttm_mem_reg
*mem
, struct nouveau_vma
*vma
)
947 struct nouveau_mem
*node
= mem
->mm_node
;
950 ret
= nouveau_vm_get(nv_client(chan
->cli
)->vm
, mem
->num_pages
<<
951 PAGE_SHIFT
, node
->page_shift
,
952 NV_MEM_ACCESS_RW
, vma
);
956 if (mem
->mem_type
== TTM_PL_VRAM
)
957 nouveau_vm_map(vma
, node
);
959 nouveau_vm_map_sg(vma
, 0, mem
->num_pages
<< PAGE_SHIFT
, node
);
965 nouveau_bo_move_m2mf(struct ttm_buffer_object
*bo
, int evict
, bool intr
,
966 bool no_wait_reserve
, bool no_wait_gpu
,
967 struct ttm_mem_reg
*new_mem
)
969 struct nouveau_drm
*drm
= nouveau_bdev(bo
->bdev
);
970 struct nouveau_channel
*chan
= chan
= drm
->channel
;
971 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
972 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
975 mutex_lock(&chan
->cli
->mutex
);
977 /* create temporary vmas for the transfer and attach them to the
978 * old nouveau_mem node, these will get cleaned up after ttm has
979 * destroyed the ttm_mem_reg
981 if (nv_device(drm
->device
)->card_type
>= NV_50
) {
982 struct nouveau_mem
*node
= old_mem
->mm_node
;
984 ret
= nouveau_vma_getmap(chan
, nvbo
, old_mem
, &node
->vma
[0]);
988 ret
= nouveau_vma_getmap(chan
, nvbo
, new_mem
, &node
->vma
[1]);
993 ret
= drm
->ttm
.move(chan
, bo
, &bo
->mem
, new_mem
);
995 ret
= nouveau_bo_move_accel_cleanup(chan
, nvbo
, evict
,
997 no_wait_gpu
, new_mem
);
1001 mutex_unlock(&chan
->cli
->mutex
);
1006 nouveau_bo_move_init(struct nouveau_drm
*drm
)
1008 static const struct {
1012 int (*exec
)(struct nouveau_channel
*,
1013 struct ttm_buffer_object
*,
1014 struct ttm_mem_reg
*, struct ttm_mem_reg
*);
1015 int (*init
)(struct nouveau_channel
*, u32 handle
);
1017 { "COPY", 0, 0xa0b5, nve0_bo_move_copy
, nve0_bo_move_init
},
1018 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy
, nvc0_bo_move_init
},
1019 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy
, nvc0_bo_move_init
},
1020 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy
, nvc0_bo_move_init
},
1021 { "COPY", 0, 0x85b5, nva3_bo_move_copy
, nv50_bo_move_init
},
1022 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec
, nv50_bo_move_init
},
1023 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf
, nvc0_bo_move_init
},
1024 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf
, nv50_bo_move_init
},
1025 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf
, nv04_bo_move_init
},
1027 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec
, nv50_bo_move_init
},
1028 }, *mthd
= _methods
;
1029 const char *name
= "CPU";
1033 struct nouveau_object
*object
;
1034 struct nouveau_channel
*chan
;
1035 u32 handle
= (mthd
->engine
<< 16) | mthd
->oclass
;
1037 if (mthd
->init
== nve0_bo_move_init
)
1040 chan
= drm
->channel
;
1044 ret
= nouveau_object_new(nv_object(drm
), chan
->handle
, handle
,
1045 mthd
->oclass
, NULL
, 0, &object
);
1047 ret
= mthd
->init(chan
, handle
);
1049 nouveau_object_del(nv_object(drm
),
1050 chan
->handle
, handle
);
1054 drm
->ttm
.move
= mthd
->exec
;
1058 } while ((++mthd
)->exec
);
1060 NV_INFO(drm
, "MM: using %s for buffer copies\n", name
);
1064 nouveau_bo_move_flipd(struct ttm_buffer_object
*bo
, bool evict
, bool intr
,
1065 bool no_wait_reserve
, bool no_wait_gpu
,
1066 struct ttm_mem_reg
*new_mem
)
1068 u32 placement_memtype
= TTM_PL_FLAG_TT
| TTM_PL_MASK_CACHING
;
1069 struct ttm_placement placement
;
1070 struct ttm_mem_reg tmp_mem
;
1073 placement
.fpfn
= placement
.lpfn
= 0;
1074 placement
.num_placement
= placement
.num_busy_placement
= 1;
1075 placement
.placement
= placement
.busy_placement
= &placement_memtype
;
1078 tmp_mem
.mm_node
= NULL
;
1079 ret
= ttm_bo_mem_space(bo
, &placement
, &tmp_mem
, intr
, no_wait_reserve
, no_wait_gpu
);
1083 ret
= ttm_tt_bind(bo
->ttm
, &tmp_mem
);
1087 ret
= nouveau_bo_move_m2mf(bo
, true, intr
, no_wait_reserve
, no_wait_gpu
, &tmp_mem
);
1091 ret
= ttm_bo_move_ttm(bo
, true, no_wait_reserve
, no_wait_gpu
, new_mem
);
1093 ttm_bo_mem_put(bo
, &tmp_mem
);
1098 nouveau_bo_move_flips(struct ttm_buffer_object
*bo
, bool evict
, bool intr
,
1099 bool no_wait_reserve
, bool no_wait_gpu
,
1100 struct ttm_mem_reg
*new_mem
)
1102 u32 placement_memtype
= TTM_PL_FLAG_TT
| TTM_PL_MASK_CACHING
;
1103 struct ttm_placement placement
;
1104 struct ttm_mem_reg tmp_mem
;
1107 placement
.fpfn
= placement
.lpfn
= 0;
1108 placement
.num_placement
= placement
.num_busy_placement
= 1;
1109 placement
.placement
= placement
.busy_placement
= &placement_memtype
;
1112 tmp_mem
.mm_node
= NULL
;
1113 ret
= ttm_bo_mem_space(bo
, &placement
, &tmp_mem
, intr
, no_wait_reserve
, no_wait_gpu
);
1117 ret
= ttm_bo_move_ttm(bo
, true, no_wait_reserve
, no_wait_gpu
, &tmp_mem
);
1121 ret
= nouveau_bo_move_m2mf(bo
, true, intr
, no_wait_reserve
, no_wait_gpu
, new_mem
);
1126 ttm_bo_mem_put(bo
, &tmp_mem
);
1131 nouveau_bo_move_ntfy(struct ttm_buffer_object
*bo
, struct ttm_mem_reg
*new_mem
)
1133 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
1134 struct nouveau_vma
*vma
;
1136 /* ttm can now (stupidly) pass the driver bos it didn't create... */
1137 if (bo
->destroy
!= nouveau_bo_del_ttm
)
1140 list_for_each_entry(vma
, &nvbo
->vma_list
, head
) {
1141 if (new_mem
&& new_mem
->mem_type
== TTM_PL_VRAM
) {
1142 nouveau_vm_map(vma
, new_mem
->mm_node
);
1144 if (new_mem
&& new_mem
->mem_type
== TTM_PL_TT
&&
1145 nvbo
->page_shift
== vma
->vm
->vmm
->spg_shift
) {
1146 if (((struct nouveau_mem
*)new_mem
->mm_node
)->sg
)
1147 nouveau_vm_map_sg_table(vma
, 0, new_mem
->
1148 num_pages
<< PAGE_SHIFT
,
1151 nouveau_vm_map_sg(vma
, 0, new_mem
->
1152 num_pages
<< PAGE_SHIFT
,
1155 nouveau_vm_unmap(vma
);
1161 nouveau_bo_vm_bind(struct ttm_buffer_object
*bo
, struct ttm_mem_reg
*new_mem
,
1162 struct nouveau_drm_tile
**new_tile
)
1164 struct nouveau_drm
*drm
= nouveau_bdev(bo
->bdev
);
1165 struct drm_device
*dev
= drm
->dev
;
1166 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
1167 u64 offset
= new_mem
->start
<< PAGE_SHIFT
;
1170 if (new_mem
->mem_type
!= TTM_PL_VRAM
)
1173 if (nv_device(drm
->device
)->card_type
>= NV_10
) {
1174 *new_tile
= nv10_bo_set_tiling(dev
, offset
, new_mem
->size
,
1183 nouveau_bo_vm_cleanup(struct ttm_buffer_object
*bo
,
1184 struct nouveau_drm_tile
*new_tile
,
1185 struct nouveau_drm_tile
**old_tile
)
1187 struct nouveau_drm
*drm
= nouveau_bdev(bo
->bdev
);
1188 struct drm_device
*dev
= drm
->dev
;
1190 nv10_bo_put_tile_region(dev
, *old_tile
, bo
->sync_obj
);
1191 *old_tile
= new_tile
;
1195 nouveau_bo_move(struct ttm_buffer_object
*bo
, bool evict
, bool intr
,
1196 bool no_wait_reserve
, bool no_wait_gpu
,
1197 struct ttm_mem_reg
*new_mem
)
1199 struct nouveau_drm
*drm
= nouveau_bdev(bo
->bdev
);
1200 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
1201 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
1202 struct nouveau_drm_tile
*new_tile
= NULL
;
1205 if (nv_device(drm
->device
)->card_type
< NV_50
) {
1206 ret
= nouveau_bo_vm_bind(bo
, new_mem
, &new_tile
);
1212 if (old_mem
->mem_type
== TTM_PL_SYSTEM
&& !bo
->ttm
) {
1213 BUG_ON(bo
->mem
.mm_node
!= NULL
);
1215 new_mem
->mm_node
= NULL
;
1219 /* CPU copy if we have no accelerated method available */
1220 if (!drm
->ttm
.move
) {
1221 ret
= ttm_bo_move_memcpy(bo
, evict
, no_wait_reserve
, no_wait_gpu
, new_mem
);
1225 /* Hardware assisted copy. */
1226 if (new_mem
->mem_type
== TTM_PL_SYSTEM
)
1227 ret
= nouveau_bo_move_flipd(bo
, evict
, intr
, no_wait_reserve
, no_wait_gpu
, new_mem
);
1228 else if (old_mem
->mem_type
== TTM_PL_SYSTEM
)
1229 ret
= nouveau_bo_move_flips(bo
, evict
, intr
, no_wait_reserve
, no_wait_gpu
, new_mem
);
1231 ret
= nouveau_bo_move_m2mf(bo
, evict
, intr
, no_wait_reserve
, no_wait_gpu
, new_mem
);
1236 /* Fallback to software copy. */
1237 ret
= ttm_bo_move_memcpy(bo
, evict
, no_wait_reserve
, no_wait_gpu
, new_mem
);
1240 if (nv_device(drm
->device
)->card_type
< NV_50
) {
1242 nouveau_bo_vm_cleanup(bo
, NULL
, &new_tile
);
1244 nouveau_bo_vm_cleanup(bo
, new_tile
, &nvbo
->tile
);
1251 nouveau_bo_verify_access(struct ttm_buffer_object
*bo
, struct file
*filp
)
1257 nouveau_ttm_io_mem_reserve(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
1259 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
1260 struct nouveau_drm
*drm
= nouveau_bdev(bdev
);
1261 struct drm_device
*dev
= drm
->dev
;
1264 mem
->bus
.addr
= NULL
;
1265 mem
->bus
.offset
= 0;
1266 mem
->bus
.size
= mem
->num_pages
<< PAGE_SHIFT
;
1268 mem
->bus
.is_iomem
= false;
1269 if (!(man
->flags
& TTM_MEMTYPE_FLAG_MAPPABLE
))
1271 switch (mem
->mem_type
) {
1277 if (drm
->agp
.stat
== ENABLED
) {
1278 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
1279 mem
->bus
.base
= drm
->agp
.base
;
1280 mem
->bus
.is_iomem
= true;
1285 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
1286 mem
->bus
.base
= pci_resource_start(dev
->pdev
, 1);
1287 mem
->bus
.is_iomem
= true;
1288 if (nv_device(drm
->device
)->card_type
>= NV_50
) {
1289 struct nouveau_bar
*bar
= nouveau_bar(drm
->device
);
1290 struct nouveau_mem
*node
= mem
->mm_node
;
1292 ret
= bar
->umap(bar
, node
, NV_MEM_ACCESS_RW
,
1297 mem
->bus
.offset
= node
->bar_vma
.offset
;
1307 nouveau_ttm_io_mem_free(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
1309 struct nouveau_drm
*drm
= nouveau_bdev(bdev
);
1310 struct nouveau_bar
*bar
= nouveau_bar(drm
->device
);
1311 struct nouveau_mem
*node
= mem
->mm_node
;
1313 if (!node
->bar_vma
.node
)
1316 bar
->unmap(bar
, &node
->bar_vma
);
1320 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object
*bo
)
1322 struct nouveau_drm
*drm
= nouveau_bdev(bo
->bdev
);
1323 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
1324 struct nouveau_device
*device
= nv_device(drm
->device
);
1325 u32 mappable
= pci_resource_len(device
->pdev
, 1) >> PAGE_SHIFT
;
1327 /* as long as the bo isn't in vram, and isn't tiled, we've got
1328 * nothing to do here.
1330 if (bo
->mem
.mem_type
!= TTM_PL_VRAM
) {
1331 if (nv_device(drm
->device
)->card_type
< NV_50
||
1332 !nouveau_bo_tile_layout(nvbo
))
1336 /* make sure bo is in mappable vram */
1337 if (bo
->mem
.start
+ bo
->mem
.num_pages
< mappable
)
1341 nvbo
->placement
.fpfn
= 0;
1342 nvbo
->placement
.lpfn
= mappable
;
1343 nouveau_bo_placement_set(nvbo
, TTM_PL_FLAG_VRAM
, 0);
1344 return nouveau_bo_validate(nvbo
, false, true, false);
1348 nouveau_ttm_tt_populate(struct ttm_tt
*ttm
)
1350 struct ttm_dma_tt
*ttm_dma
= (void *)ttm
;
1351 struct nouveau_drm
*drm
;
1352 struct drm_device
*dev
;
1355 bool slave
= !!(ttm
->page_flags
& TTM_PAGE_FLAG_SG
);
1357 if (ttm
->state
!= tt_unpopulated
)
1360 if (slave
&& ttm
->sg
) {
1361 /* make userspace faulting work */
1362 drm_prime_sg_to_page_addr_arrays(ttm
->sg
, ttm
->pages
,
1363 ttm_dma
->dma_address
, ttm
->num_pages
);
1364 ttm
->state
= tt_unbound
;
1368 drm
= nouveau_bdev(ttm
->bdev
);
1372 if (drm
->agp
.stat
== ENABLED
) {
1373 return ttm_agp_tt_populate(ttm
);
1377 #ifdef CONFIG_SWIOTLB
1378 if (swiotlb_nr_tbl()) {
1379 return ttm_dma_populate((void *)ttm
, dev
->dev
);
1383 r
= ttm_pool_populate(ttm
);
1388 for (i
= 0; i
< ttm
->num_pages
; i
++) {
1389 ttm_dma
->dma_address
[i
] = pci_map_page(dev
->pdev
, ttm
->pages
[i
],
1391 PCI_DMA_BIDIRECTIONAL
);
1392 if (pci_dma_mapping_error(dev
->pdev
, ttm_dma
->dma_address
[i
])) {
1394 pci_unmap_page(dev
->pdev
, ttm_dma
->dma_address
[i
],
1395 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
1396 ttm_dma
->dma_address
[i
] = 0;
1398 ttm_pool_unpopulate(ttm
);
1406 nouveau_ttm_tt_unpopulate(struct ttm_tt
*ttm
)
1408 struct ttm_dma_tt
*ttm_dma
= (void *)ttm
;
1409 struct nouveau_drm
*drm
;
1410 struct drm_device
*dev
;
1412 bool slave
= !!(ttm
->page_flags
& TTM_PAGE_FLAG_SG
);
1417 drm
= nouveau_bdev(ttm
->bdev
);
1421 if (drm
->agp
.stat
== ENABLED
) {
1422 ttm_agp_tt_unpopulate(ttm
);
1427 #ifdef CONFIG_SWIOTLB
1428 if (swiotlb_nr_tbl()) {
1429 ttm_dma_unpopulate((void *)ttm
, dev
->dev
);
1434 for (i
= 0; i
< ttm
->num_pages
; i
++) {
1435 if (ttm_dma
->dma_address
[i
]) {
1436 pci_unmap_page(dev
->pdev
, ttm_dma
->dma_address
[i
],
1437 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
1441 ttm_pool_unpopulate(ttm
);
1445 nouveau_bo_fence(struct nouveau_bo
*nvbo
, struct nouveau_fence
*fence
)
1447 struct nouveau_fence
*old_fence
= NULL
;
1450 nouveau_fence_ref(fence
);
1452 spin_lock(&nvbo
->bo
.bdev
->fence_lock
);
1453 old_fence
= nvbo
->bo
.sync_obj
;
1454 nvbo
->bo
.sync_obj
= fence
;
1455 spin_unlock(&nvbo
->bo
.bdev
->fence_lock
);
1457 nouveau_fence_unref(&old_fence
);
1461 nouveau_bo_fence_unref(void **sync_obj
)
1463 nouveau_fence_unref((struct nouveau_fence
**)sync_obj
);
1467 nouveau_bo_fence_ref(void *sync_obj
)
1469 return nouveau_fence_ref(sync_obj
);
1473 nouveau_bo_fence_signalled(void *sync_obj
, void *sync_arg
)
1475 return nouveau_fence_done(sync_obj
);
1479 nouveau_bo_fence_wait(void *sync_obj
, void *sync_arg
, bool lazy
, bool intr
)
1481 return nouveau_fence_wait(sync_obj
, lazy
, intr
);
1485 nouveau_bo_fence_flush(void *sync_obj
, void *sync_arg
)
1490 struct ttm_bo_driver nouveau_bo_driver
= {
1491 .ttm_tt_create
= &nouveau_ttm_tt_create
,
1492 .ttm_tt_populate
= &nouveau_ttm_tt_populate
,
1493 .ttm_tt_unpopulate
= &nouveau_ttm_tt_unpopulate
,
1494 .invalidate_caches
= nouveau_bo_invalidate_caches
,
1495 .init_mem_type
= nouveau_bo_init_mem_type
,
1496 .evict_flags
= nouveau_bo_evict_flags
,
1497 .move_notify
= nouveau_bo_move_ntfy
,
1498 .move
= nouveau_bo_move
,
1499 .verify_access
= nouveau_bo_verify_access
,
1500 .sync_obj_signaled
= nouveau_bo_fence_signalled
,
1501 .sync_obj_wait
= nouveau_bo_fence_wait
,
1502 .sync_obj_flush
= nouveau_bo_fence_flush
,
1503 .sync_obj_unref
= nouveau_bo_fence_unref
,
1504 .sync_obj_ref
= nouveau_bo_fence_ref
,
1505 .fault_reserve_notify
= &nouveau_ttm_fault_reserve_notify
,
1506 .io_mem_reserve
= &nouveau_ttm_io_mem_reserve
,
1507 .io_mem_free
= &nouveau_ttm_io_mem_free
,
1510 struct nouveau_vma
*
1511 nouveau_bo_vma_find(struct nouveau_bo
*nvbo
, struct nouveau_vm
*vm
)
1513 struct nouveau_vma
*vma
;
1514 list_for_each_entry(vma
, &nvbo
->vma_list
, head
) {
1523 nouveau_bo_vma_add(struct nouveau_bo
*nvbo
, struct nouveau_vm
*vm
,
1524 struct nouveau_vma
*vma
)
1526 const u32 size
= nvbo
->bo
.mem
.num_pages
<< PAGE_SHIFT
;
1527 struct nouveau_mem
*node
= nvbo
->bo
.mem
.mm_node
;
1530 ret
= nouveau_vm_get(vm
, size
, nvbo
->page_shift
,
1531 NV_MEM_ACCESS_RW
, vma
);
1535 if (nvbo
->bo
.mem
.mem_type
== TTM_PL_VRAM
)
1536 nouveau_vm_map(vma
, nvbo
->bo
.mem
.mm_node
);
1537 else if (nvbo
->bo
.mem
.mem_type
== TTM_PL_TT
) {
1539 nouveau_vm_map_sg_table(vma
, 0, size
, node
);
1541 nouveau_vm_map_sg(vma
, 0, size
, node
);
1544 list_add_tail(&vma
->head
, &nvbo
->vma_list
);
1550 nouveau_bo_vma_del(struct nouveau_bo
*nvbo
, struct nouveau_vma
*vma
)
1553 if (nvbo
->bo
.mem
.mem_type
!= TTM_PL_SYSTEM
) {
1554 spin_lock(&nvbo
->bo
.bdev
->fence_lock
);
1555 ttm_bo_wait(&nvbo
->bo
, false, false, false);
1556 spin_unlock(&nvbo
->bo
.bdev
->fence_lock
);
1557 nouveau_vm_unmap(vma
);
1560 nouveau_vm_put(vma
);
1561 list_del(&vma
->head
);