Commit | Line | Data |
---|---|---|
6ee73861 BS |
1 | /* |
2 | * Copyright 2007 Dave Airlied | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the "Software"), | |
7 | * to deal in the Software without restriction, including without limitation | |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
9 | * and/or sell copies of the Software, and to permit persons to whom the | |
10 | * Software is furnished to do so, subject to the following conditions: | |
11 | * | |
12 | * The above copyright notice and this permission notice (including the next | |
13 | * paragraph) shall be included in all copies or substantial portions of the | |
14 | * Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | */ | |
24 | /* | |
25 | * Authors: Dave Airlied <airlied@linux.ie> | |
26 | * Ben Skeggs <darktama@iinet.net.au> | |
27 | * Jeremy Kolb <jkolb@brandeis.edu> | |
28 | */ | |
29 | ||
ebb945a9 | 30 | #include <core/engine.h> |
6ee73861 | 31 | |
ebb945a9 BS |
32 | #include <subdev/fb.h> |
33 | #include <subdev/vm.h> | |
34 | #include <subdev/bar.h> | |
35 | ||
36 | #include "nouveau_drm.h" | |
6ee73861 | 37 | #include "nouveau_dma.h" |
d375e7d5 | 38 | #include "nouveau_fence.h" |
6ee73861 | 39 | |
ebb945a9 BS |
40 | #include "nouveau_bo.h" |
41 | #include "nouveau_ttm.h" | |
42 | #include "nouveau_gem.h" | |
a510604d | 43 | |
bc9e7b9a BS |
44 | /* |
45 | * NV10-NV40 tiling helpers | |
46 | */ | |
47 | ||
48 | static void | |
ebb945a9 BS |
49 | nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg, |
50 | u32 addr, u32 size, u32 pitch, u32 flags) | |
bc9e7b9a | 51 | { |
77145f1c | 52 | struct nouveau_drm *drm = nouveau_drm(dev); |
ebb945a9 BS |
53 | int i = reg - drm->tile.reg; |
54 | struct nouveau_fb *pfb = nouveau_fb(drm->device); | |
55 | struct nouveau_fb_tile *tile = &pfb->tile.region[i]; | |
56 | struct nouveau_engine *engine; | |
bc9e7b9a | 57 | |
ebb945a9 | 58 | nouveau_fence_unref(®->fence); |
bc9e7b9a BS |
59 | |
60 | if (tile->pitch) | |
ebb945a9 | 61 | pfb->tile.fini(pfb, i, tile); |
bc9e7b9a BS |
62 | |
63 | if (pitch) | |
ebb945a9 | 64 | pfb->tile.init(pfb, i, addr, size, pitch, flags, tile); |
bc9e7b9a | 65 | |
ebb945a9 | 66 | pfb->tile.prog(pfb, i, tile); |
bc9e7b9a | 67 | |
ebb945a9 BS |
68 | if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR))) |
69 | engine->tile_prog(engine, i); | |
70 | if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG))) | |
71 | engine->tile_prog(engine, i); | |
bc9e7b9a BS |
72 | } |
73 | ||
ebb945a9 | 74 | static struct nouveau_drm_tile * |
bc9e7b9a BS |
75 | nv10_bo_get_tile_region(struct drm_device *dev, int i) |
76 | { | |
77145f1c | 77 | struct nouveau_drm *drm = nouveau_drm(dev); |
ebb945a9 | 78 | struct nouveau_drm_tile *tile = &drm->tile.reg[i]; |
bc9e7b9a | 79 | |
ebb945a9 | 80 | spin_lock(&drm->tile.lock); |
bc9e7b9a BS |
81 | |
82 | if (!tile->used && | |
83 | (!tile->fence || nouveau_fence_done(tile->fence))) | |
84 | tile->used = true; | |
85 | else | |
86 | tile = NULL; | |
87 | ||
ebb945a9 | 88 | spin_unlock(&drm->tile.lock); |
bc9e7b9a BS |
89 | return tile; |
90 | } | |
91 | ||
92 | static void | |
ebb945a9 BS |
93 | nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile, |
94 | struct nouveau_fence *fence) | |
bc9e7b9a | 95 | { |
77145f1c | 96 | struct nouveau_drm *drm = nouveau_drm(dev); |
bc9e7b9a BS |
97 | |
98 | if (tile) { | |
ebb945a9 | 99 | spin_lock(&drm->tile.lock); |
bc9e7b9a BS |
100 | if (fence) { |
101 | /* Mark it as pending. */ | |
102 | tile->fence = fence; | |
103 | nouveau_fence_ref(fence); | |
104 | } | |
105 | ||
106 | tile->used = false; | |
ebb945a9 | 107 | spin_unlock(&drm->tile.lock); |
bc9e7b9a BS |
108 | } |
109 | } | |
110 | ||
ebb945a9 BS |
111 | static struct nouveau_drm_tile * |
112 | nv10_bo_set_tiling(struct drm_device *dev, u32 addr, | |
113 | u32 size, u32 pitch, u32 flags) | |
bc9e7b9a | 114 | { |
77145f1c | 115 | struct nouveau_drm *drm = nouveau_drm(dev); |
ebb945a9 BS |
116 | struct nouveau_fb *pfb = nouveau_fb(drm->device); |
117 | struct nouveau_drm_tile *tile, *found = NULL; | |
bc9e7b9a BS |
118 | int i; |
119 | ||
ebb945a9 | 120 | for (i = 0; i < pfb->tile.regions; i++) { |
bc9e7b9a BS |
121 | tile = nv10_bo_get_tile_region(dev, i); |
122 | ||
123 | if (pitch && !found) { | |
124 | found = tile; | |
125 | continue; | |
126 | ||
ebb945a9 | 127 | } else if (tile && pfb->tile.region[i].pitch) { |
bc9e7b9a BS |
128 | /* Kill an unused tile region. */ |
129 | nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0); | |
130 | } | |
131 | ||
132 | nv10_bo_put_tile_region(dev, tile, NULL); | |
133 | } | |
134 | ||
135 | if (found) | |
136 | nv10_bo_update_tile_region(dev, found, addr, size, | |
137 | pitch, flags); | |
138 | return found; | |
139 | } | |
140 | ||
6ee73861 BS |
141 | static void |
142 | nouveau_bo_del_ttm(struct ttm_buffer_object *bo) | |
143 | { | |
ebb945a9 BS |
144 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
145 | struct drm_device *dev = drm->dev; | |
6ee73861 BS |
146 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
147 | ||
6ee73861 BS |
148 | if (unlikely(nvbo->gem)) |
149 | DRM_ERROR("bo %p still attached to GEM object\n", bo); | |
bc9e7b9a | 150 | nv10_bo_put_tile_region(dev, nvbo->tile, NULL); |
6ee73861 BS |
151 | kfree(nvbo); |
152 | } | |
153 | ||
a0af9add | 154 | static void |
db5c8e29 | 155 | nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, |
f91bac5b | 156 | int *align, int *size) |
a0af9add | 157 | { |
ebb945a9 BS |
158 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
159 | struct nouveau_device *device = nv_device(drm->device); | |
a0af9add | 160 | |
ebb945a9 | 161 | if (device->card_type < NV_50) { |
bfd83aca | 162 | if (nvbo->tile_mode) { |
ebb945a9 | 163 | if (device->chipset >= 0x40) { |
a0af9add | 164 | *align = 65536; |
bfd83aca | 165 | *size = roundup(*size, 64 * nvbo->tile_mode); |
a0af9add | 166 | |
ebb945a9 | 167 | } else if (device->chipset >= 0x30) { |
a0af9add | 168 | *align = 32768; |
bfd83aca | 169 | *size = roundup(*size, 64 * nvbo->tile_mode); |
a0af9add | 170 | |
ebb945a9 | 171 | } else if (device->chipset >= 0x20) { |
a0af9add | 172 | *align = 16384; |
bfd83aca | 173 | *size = roundup(*size, 64 * nvbo->tile_mode); |
a0af9add | 174 | |
ebb945a9 | 175 | } else if (device->chipset >= 0x10) { |
a0af9add | 176 | *align = 16384; |
bfd83aca | 177 | *size = roundup(*size, 32 * nvbo->tile_mode); |
a0af9add FJ |
178 | } |
179 | } | |
bfd83aca | 180 | } else { |
f91bac5b BS |
181 | *size = roundup(*size, (1 << nvbo->page_shift)); |
182 | *align = max((1 << nvbo->page_shift), *align); | |
a0af9add FJ |
183 | } |
184 | ||
1c7059e4 | 185 | *size = roundup(*size, PAGE_SIZE); |
a0af9add FJ |
186 | } |
187 | ||
6ee73861 | 188 | int |
7375c95b BS |
189 | nouveau_bo_new(struct drm_device *dev, int size, int align, |
190 | uint32_t flags, uint32_t tile_mode, uint32_t tile_flags, | |
22b33e8e | 191 | struct sg_table *sg, |
7375c95b | 192 | struct nouveau_bo **pnvbo) |
6ee73861 | 193 | { |
77145f1c | 194 | struct nouveau_drm *drm = nouveau_drm(dev); |
6ee73861 | 195 | struct nouveau_bo *nvbo; |
57de4ba9 | 196 | size_t acc_size; |
f91bac5b | 197 | int ret; |
22b33e8e DA |
198 | int type = ttm_bo_type_device; |
199 | ||
200 | if (sg) | |
201 | type = ttm_bo_type_sg; | |
6ee73861 BS |
202 | |
203 | nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); | |
204 | if (!nvbo) | |
205 | return -ENOMEM; | |
206 | INIT_LIST_HEAD(&nvbo->head); | |
207 | INIT_LIST_HEAD(&nvbo->entry); | |
fd2871af | 208 | INIT_LIST_HEAD(&nvbo->vma_list); |
6ee73861 BS |
209 | nvbo->tile_mode = tile_mode; |
210 | nvbo->tile_flags = tile_flags; | |
ebb945a9 | 211 | nvbo->bo.bdev = &drm->ttm.bdev; |
6ee73861 | 212 | |
f91bac5b | 213 | nvbo->page_shift = 12; |
ebb945a9 | 214 | if (drm->client.base.vm) { |
f91bac5b | 215 | if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024) |
ebb945a9 | 216 | nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift; |
f91bac5b BS |
217 | } |
218 | ||
219 | nouveau_bo_fixup_align(nvbo, flags, &align, &size); | |
fd2871af BS |
220 | nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; |
221 | nouveau_bo_placement_set(nvbo, flags, 0); | |
6ee73861 | 222 | |
ebb945a9 | 223 | acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size, |
57de4ba9 JG |
224 | sizeof(struct nouveau_bo)); |
225 | ||
ebb945a9 | 226 | ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size, |
22b33e8e DA |
227 | type, &nvbo->placement, |
228 | align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg, | |
fd2871af | 229 | nouveau_bo_del_ttm); |
6ee73861 BS |
230 | if (ret) { |
231 | /* ttm will call nouveau_bo_del_ttm if it fails.. */ | |
232 | return ret; | |
233 | } | |
234 | ||
6ee73861 BS |
235 | *pnvbo = nvbo; |
236 | return 0; | |
237 | } | |
238 | ||
78ad0f7b FJ |
239 | static void |
240 | set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags) | |
241 | { | |
242 | *n = 0; | |
243 | ||
244 | if (type & TTM_PL_FLAG_VRAM) | |
245 | pl[(*n)++] = TTM_PL_FLAG_VRAM | flags; | |
246 | if (type & TTM_PL_FLAG_TT) | |
247 | pl[(*n)++] = TTM_PL_FLAG_TT | flags; | |
248 | if (type & TTM_PL_FLAG_SYSTEM) | |
249 | pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags; | |
250 | } | |
251 | ||
699ddfd9 FJ |
252 | static void |
253 | set_placement_range(struct nouveau_bo *nvbo, uint32_t type) | |
254 | { | |
ebb945a9 BS |
255 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
256 | struct nouveau_fb *pfb = nouveau_fb(drm->device); | |
257 | u32 vram_pages = pfb->ram.size >> PAGE_SHIFT; | |
699ddfd9 | 258 | |
ebb945a9 | 259 | if (nv_device(drm->device)->card_type == NV_10 && |
812f219a | 260 | nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && |
4beb116a | 261 | nvbo->bo.mem.num_pages < vram_pages / 4) { |
699ddfd9 FJ |
262 | /* |
263 | * Make sure that the color and depth buffers are handled | |
264 | * by independent memory controller units. Up to a 9x | |
265 | * speed up when alpha-blending and depth-test are enabled | |
266 | * at the same time. | |
267 | */ | |
699ddfd9 FJ |
268 | if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { |
269 | nvbo->placement.fpfn = vram_pages / 2; | |
270 | nvbo->placement.lpfn = ~0; | |
271 | } else { | |
272 | nvbo->placement.fpfn = 0; | |
273 | nvbo->placement.lpfn = vram_pages / 2; | |
274 | } | |
275 | } | |
276 | } | |
277 | ||
6ee73861 | 278 | void |
78ad0f7b | 279 | nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) |
6ee73861 | 280 | { |
78ad0f7b FJ |
281 | struct ttm_placement *pl = &nvbo->placement; |
282 | uint32_t flags = TTM_PL_MASK_CACHING | | |
283 | (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0); | |
284 | ||
285 | pl->placement = nvbo->placements; | |
286 | set_placement_list(nvbo->placements, &pl->num_placement, | |
287 | type, flags); | |
288 | ||
289 | pl->busy_placement = nvbo->busy_placements; | |
290 | set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, | |
291 | type | busy, flags); | |
699ddfd9 FJ |
292 | |
293 | set_placement_range(nvbo, type); | |
6ee73861 BS |
294 | } |
295 | ||
296 | int | |
297 | nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) | |
298 | { | |
ebb945a9 | 299 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
6ee73861 | 300 | struct ttm_buffer_object *bo = &nvbo->bo; |
78ad0f7b | 301 | int ret; |
6ee73861 BS |
302 | |
303 | if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) { | |
ebb945a9 | 304 | NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo, |
6ee73861 BS |
305 | 1 << bo->mem.mem_type, memtype); |
306 | return -EINVAL; | |
307 | } | |
308 | ||
309 | if (nvbo->pin_refcnt++) | |
310 | return 0; | |
311 | ||
312 | ret = ttm_bo_reserve(bo, false, false, false, 0); | |
313 | if (ret) | |
314 | goto out; | |
315 | ||
78ad0f7b | 316 | nouveau_bo_placement_set(nvbo, memtype, 0); |
6ee73861 | 317 | |
7a45d764 | 318 | ret = nouveau_bo_validate(nvbo, false, false, false); |
6ee73861 BS |
319 | if (ret == 0) { |
320 | switch (bo->mem.mem_type) { | |
321 | case TTM_PL_VRAM: | |
ebb945a9 | 322 | drm->gem.vram_available -= bo->mem.size; |
6ee73861 BS |
323 | break; |
324 | case TTM_PL_TT: | |
ebb945a9 | 325 | drm->gem.gart_available -= bo->mem.size; |
6ee73861 BS |
326 | break; |
327 | default: | |
328 | break; | |
329 | } | |
330 | } | |
331 | ttm_bo_unreserve(bo); | |
332 | out: | |
333 | if (unlikely(ret)) | |
334 | nvbo->pin_refcnt--; | |
335 | return ret; | |
336 | } | |
337 | ||
338 | int | |
339 | nouveau_bo_unpin(struct nouveau_bo *nvbo) | |
340 | { | |
ebb945a9 | 341 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
6ee73861 | 342 | struct ttm_buffer_object *bo = &nvbo->bo; |
78ad0f7b | 343 | int ret; |
6ee73861 BS |
344 | |
345 | if (--nvbo->pin_refcnt) | |
346 | return 0; | |
347 | ||
348 | ret = ttm_bo_reserve(bo, false, false, false, 0); | |
349 | if (ret) | |
350 | return ret; | |
351 | ||
78ad0f7b | 352 | nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); |
6ee73861 | 353 | |
7a45d764 | 354 | ret = nouveau_bo_validate(nvbo, false, false, false); |
6ee73861 BS |
355 | if (ret == 0) { |
356 | switch (bo->mem.mem_type) { | |
357 | case TTM_PL_VRAM: | |
ebb945a9 | 358 | drm->gem.vram_available += bo->mem.size; |
6ee73861 BS |
359 | break; |
360 | case TTM_PL_TT: | |
ebb945a9 | 361 | drm->gem.gart_available += bo->mem.size; |
6ee73861 BS |
362 | break; |
363 | default: | |
364 | break; | |
365 | } | |
366 | } | |
367 | ||
368 | ttm_bo_unreserve(bo); | |
369 | return ret; | |
370 | } | |
371 | ||
372 | int | |
373 | nouveau_bo_map(struct nouveau_bo *nvbo) | |
374 | { | |
375 | int ret; | |
376 | ||
377 | ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); | |
378 | if (ret) | |
379 | return ret; | |
380 | ||
381 | ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap); | |
382 | ttm_bo_unreserve(&nvbo->bo); | |
383 | return ret; | |
384 | } | |
385 | ||
386 | void | |
387 | nouveau_bo_unmap(struct nouveau_bo *nvbo) | |
388 | { | |
9d59e8a1 BS |
389 | if (nvbo) |
390 | ttm_bo_kunmap(&nvbo->kmap); | |
6ee73861 BS |
391 | } |
392 | ||
7a45d764 BS |
393 | int |
394 | nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, | |
395 | bool no_wait_reserve, bool no_wait_gpu) | |
396 | { | |
397 | int ret; | |
398 | ||
399 | ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible, | |
400 | no_wait_reserve, no_wait_gpu); | |
401 | if (ret) | |
402 | return ret; | |
403 | ||
404 | return 0; | |
405 | } | |
406 | ||
6ee73861 BS |
407 | u16 |
408 | nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index) | |
409 | { | |
410 | bool is_iomem; | |
411 | u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); | |
412 | mem = &mem[index]; | |
413 | if (is_iomem) | |
414 | return ioread16_native((void __force __iomem *)mem); | |
415 | else | |
416 | return *mem; | |
417 | } | |
418 | ||
419 | void | |
420 | nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val) | |
421 | { | |
422 | bool is_iomem; | |
423 | u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); | |
424 | mem = &mem[index]; | |
425 | if (is_iomem) | |
426 | iowrite16_native(val, (void __force __iomem *)mem); | |
427 | else | |
428 | *mem = val; | |
429 | } | |
430 | ||
431 | u32 | |
432 | nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index) | |
433 | { | |
434 | bool is_iomem; | |
435 | u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); | |
436 | mem = &mem[index]; | |
437 | if (is_iomem) | |
438 | return ioread32_native((void __force __iomem *)mem); | |
439 | else | |
440 | return *mem; | |
441 | } | |
442 | ||
443 | void | |
444 | nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val) | |
445 | { | |
446 | bool is_iomem; | |
447 | u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); | |
448 | mem = &mem[index]; | |
449 | if (is_iomem) | |
450 | iowrite32_native(val, (void __force __iomem *)mem); | |
451 | else | |
452 | *mem = val; | |
453 | } | |
454 | ||
649bf3ca | 455 | static struct ttm_tt * |
ebb945a9 BS |
456 | nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, |
457 | uint32_t page_flags, struct page *dummy_read) | |
6ee73861 | 458 | { |
ebb945a9 BS |
459 | struct nouveau_drm *drm = nouveau_bdev(bdev); |
460 | struct drm_device *dev = drm->dev; | |
6ee73861 | 461 | |
ebb945a9 BS |
462 | if (drm->agp.stat == ENABLED) { |
463 | return ttm_agp_tt_create(bdev, dev->agp->bridge, size, | |
464 | page_flags, dummy_read); | |
6ee73861 BS |
465 | } |
466 | ||
ebb945a9 | 467 | return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read); |
6ee73861 BS |
468 | } |
469 | ||
470 | static int | |
471 | nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) | |
472 | { | |
473 | /* We'll do this from user space. */ | |
474 | return 0; | |
475 | } | |
476 | ||
477 | static int | |
478 | nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |
479 | struct ttm_mem_type_manager *man) | |
480 | { | |
ebb945a9 | 481 | struct nouveau_drm *drm = nouveau_bdev(bdev); |
6ee73861 BS |
482 | |
483 | switch (type) { | |
484 | case TTM_PL_SYSTEM: | |
485 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; | |
486 | man->available_caching = TTM_PL_MASK_CACHING; | |
487 | man->default_caching = TTM_PL_FLAG_CACHED; | |
488 | break; | |
489 | case TTM_PL_VRAM: | |
ebb945a9 | 490 | if (nv_device(drm->device)->card_type >= NV_50) { |
573a2a37 | 491 | man->func = &nouveau_vram_manager; |
f869ef88 BS |
492 | man->io_reserve_fastpath = false; |
493 | man->use_io_reserve_lru = true; | |
494 | } else { | |
573a2a37 | 495 | man->func = &ttm_bo_manager_func; |
f869ef88 | 496 | } |
6ee73861 | 497 | man->flags = TTM_MEMTYPE_FLAG_FIXED | |
f32f02fd | 498 | TTM_MEMTYPE_FLAG_MAPPABLE; |
6ee73861 BS |
499 | man->available_caching = TTM_PL_FLAG_UNCACHED | |
500 | TTM_PL_FLAG_WC; | |
501 | man->default_caching = TTM_PL_FLAG_WC; | |
6ee73861 BS |
502 | break; |
503 | case TTM_PL_TT: | |
ebb945a9 | 504 | if (nv_device(drm->device)->card_type >= NV_50) |
26c0c9e3 | 505 | man->func = &nouveau_gart_manager; |
3863c9bc | 506 | else |
ebb945a9 | 507 | if (drm->agp.stat != ENABLED) |
3863c9bc | 508 | man->func = &nv04_gart_manager; |
26c0c9e3 BS |
509 | else |
510 | man->func = &ttm_bo_manager_func; | |
ebb945a9 BS |
511 | |
512 | if (drm->agp.stat == ENABLED) { | |
f32f02fd | 513 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; |
a3d487ea FJ |
514 | man->available_caching = TTM_PL_FLAG_UNCACHED | |
515 | TTM_PL_FLAG_WC; | |
516 | man->default_caching = TTM_PL_FLAG_WC; | |
ebb945a9 | 517 | } else { |
6ee73861 BS |
518 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | |
519 | TTM_MEMTYPE_FLAG_CMA; | |
520 | man->available_caching = TTM_PL_MASK_CACHING; | |
521 | man->default_caching = TTM_PL_FLAG_CACHED; | |
6ee73861 | 522 | } |
ebb945a9 | 523 | |
6ee73861 BS |
524 | break; |
525 | default: | |
6ee73861 BS |
526 | return -EINVAL; |
527 | } | |
528 | return 0; | |
529 | } | |
530 | ||
531 | static void | |
532 | nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) | |
533 | { | |
534 | struct nouveau_bo *nvbo = nouveau_bo(bo); | |
535 | ||
536 | switch (bo->mem.mem_type) { | |
22fbd538 | 537 | case TTM_PL_VRAM: |
78ad0f7b FJ |
538 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, |
539 | TTM_PL_FLAG_SYSTEM); | |
22fbd538 | 540 | break; |
6ee73861 | 541 | default: |
78ad0f7b | 542 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0); |
6ee73861 BS |
543 | break; |
544 | } | |
22fbd538 FJ |
545 | |
546 | *pl = nvbo->placement; | |
6ee73861 BS |
547 | } |
548 | ||
549 | ||
550 | /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access | |
551 | * TTM_PL_{VRAM,TT} directly. | |
552 | */ | |
a0af9add | 553 | |
6ee73861 BS |
554 | static int |
555 | nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, | |
9d87fa21 JG |
556 | struct nouveau_bo *nvbo, bool evict, |
557 | bool no_wait_reserve, bool no_wait_gpu, | |
6ee73861 BS |
558 | struct ttm_mem_reg *new_mem) |
559 | { | |
560 | struct nouveau_fence *fence = NULL; | |
561 | int ret; | |
562 | ||
d375e7d5 | 563 | ret = nouveau_fence_new(chan, &fence); |
6ee73861 BS |
564 | if (ret) |
565 | return ret; | |
566 | ||
64798817 | 567 | ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict, |
311ab694 | 568 | no_wait_reserve, no_wait_gpu, new_mem); |
382d62e5 | 569 | nouveau_fence_unref(&fence); |
6ee73861 BS |
570 | return ret; |
571 | } | |
572 | ||
49981046 BS |
573 | static int |
574 | nve0_bo_move_init(struct nouveau_channel *chan, u32 handle) | |
575 | { | |
576 | int ret = RING_SPACE(chan, 2); | |
577 | if (ret == 0) { | |
578 | BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1); | |
579 | OUT_RING (chan, handle); | |
580 | FIRE_RING (chan); | |
581 | } | |
582 | return ret; | |
583 | } | |
584 | ||
c6b7e895 BS |
585 | static int |
586 | nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |
587 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | |
588 | { | |
589 | struct nouveau_mem *node = old_mem->mm_node; | |
590 | int ret = RING_SPACE(chan, 10); | |
591 | if (ret == 0) { | |
6d597027 | 592 | BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8); |
c6b7e895 BS |
593 | OUT_RING (chan, upper_32_bits(node->vma[0].offset)); |
594 | OUT_RING (chan, lower_32_bits(node->vma[0].offset)); | |
595 | OUT_RING (chan, upper_32_bits(node->vma[1].offset)); | |
596 | OUT_RING (chan, lower_32_bits(node->vma[1].offset)); | |
597 | OUT_RING (chan, PAGE_SIZE); | |
598 | OUT_RING (chan, PAGE_SIZE); | |
599 | OUT_RING (chan, PAGE_SIZE); | |
600 | OUT_RING (chan, new_mem->num_pages); | |
6d597027 | 601 | BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386); |
c6b7e895 BS |
602 | } |
603 | return ret; | |
604 | } | |
605 | ||
d1b167e1 BS |
606 | static int |
607 | nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle) | |
608 | { | |
609 | int ret = RING_SPACE(chan, 2); | |
610 | if (ret == 0) { | |
611 | BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1); | |
612 | OUT_RING (chan, handle); | |
613 | } | |
614 | return ret; | |
615 | } | |
616 | ||
1a46098e BS |
617 | static int |
618 | nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |
619 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | |
620 | { | |
621 | struct nouveau_mem *node = old_mem->mm_node; | |
622 | u64 src_offset = node->vma[0].offset; | |
623 | u64 dst_offset = node->vma[1].offset; | |
624 | u32 page_count = new_mem->num_pages; | |
625 | int ret; | |
626 | ||
627 | page_count = new_mem->num_pages; | |
628 | while (page_count) { | |
629 | int line_count = (page_count > 8191) ? 8191 : page_count; | |
630 | ||
631 | ret = RING_SPACE(chan, 11); | |
632 | if (ret) | |
633 | return ret; | |
634 | ||
635 | BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8); | |
636 | OUT_RING (chan, upper_32_bits(src_offset)); | |
637 | OUT_RING (chan, lower_32_bits(src_offset)); | |
638 | OUT_RING (chan, upper_32_bits(dst_offset)); | |
639 | OUT_RING (chan, lower_32_bits(dst_offset)); | |
640 | OUT_RING (chan, PAGE_SIZE); | |
641 | OUT_RING (chan, PAGE_SIZE); | |
642 | OUT_RING (chan, PAGE_SIZE); | |
643 | OUT_RING (chan, line_count); | |
644 | BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1); | |
645 | OUT_RING (chan, 0x00000110); | |
646 | ||
647 | page_count -= line_count; | |
648 | src_offset += (PAGE_SIZE * line_count); | |
649 | dst_offset += (PAGE_SIZE * line_count); | |
650 | } | |
651 | ||
652 | return 0; | |
653 | } | |
654 | ||
183720b8 BS |
655 | static int |
656 | nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |
657 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | |
658 | { | |
d2f96666 BS |
659 | struct nouveau_mem *node = old_mem->mm_node; |
660 | u64 src_offset = node->vma[0].offset; | |
661 | u64 dst_offset = node->vma[1].offset; | |
183720b8 BS |
662 | u32 page_count = new_mem->num_pages; |
663 | int ret; | |
664 | ||
183720b8 BS |
665 | page_count = new_mem->num_pages; |
666 | while (page_count) { | |
667 | int line_count = (page_count > 2047) ? 2047 : page_count; | |
668 | ||
669 | ret = RING_SPACE(chan, 12); | |
670 | if (ret) | |
671 | return ret; | |
672 | ||
d1b167e1 | 673 | BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2); |
183720b8 BS |
674 | OUT_RING (chan, upper_32_bits(dst_offset)); |
675 | OUT_RING (chan, lower_32_bits(dst_offset)); | |
d1b167e1 | 676 | BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6); |
183720b8 BS |
677 | OUT_RING (chan, upper_32_bits(src_offset)); |
678 | OUT_RING (chan, lower_32_bits(src_offset)); | |
679 | OUT_RING (chan, PAGE_SIZE); /* src_pitch */ | |
680 | OUT_RING (chan, PAGE_SIZE); /* dst_pitch */ | |
681 | OUT_RING (chan, PAGE_SIZE); /* line_length */ | |
682 | OUT_RING (chan, line_count); | |
d1b167e1 | 683 | BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1); |
183720b8 BS |
684 | OUT_RING (chan, 0x00100110); |
685 | ||
686 | page_count -= line_count; | |
687 | src_offset += (PAGE_SIZE * line_count); | |
688 | dst_offset += (PAGE_SIZE * line_count); | |
689 | } | |
690 | ||
691 | return 0; | |
692 | } | |
693 | ||
fdf53241 BS |
694 | static int |
695 | nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |
696 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | |
697 | { | |
698 | struct nouveau_mem *node = old_mem->mm_node; | |
699 | u64 src_offset = node->vma[0].offset; | |
700 | u64 dst_offset = node->vma[1].offset; | |
701 | u32 page_count = new_mem->num_pages; | |
702 | int ret; | |
703 | ||
704 | page_count = new_mem->num_pages; | |
705 | while (page_count) { | |
706 | int line_count = (page_count > 8191) ? 8191 : page_count; | |
707 | ||
708 | ret = RING_SPACE(chan, 11); | |
709 | if (ret) | |
710 | return ret; | |
711 | ||
712 | BEGIN_NV04(chan, NvSubCopy, 0x030c, 8); | |
713 | OUT_RING (chan, upper_32_bits(src_offset)); | |
714 | OUT_RING (chan, lower_32_bits(src_offset)); | |
715 | OUT_RING (chan, upper_32_bits(dst_offset)); | |
716 | OUT_RING (chan, lower_32_bits(dst_offset)); | |
717 | OUT_RING (chan, PAGE_SIZE); | |
718 | OUT_RING (chan, PAGE_SIZE); | |
719 | OUT_RING (chan, PAGE_SIZE); | |
720 | OUT_RING (chan, line_count); | |
721 | BEGIN_NV04(chan, NvSubCopy, 0x0300, 1); | |
722 | OUT_RING (chan, 0x00000110); | |
723 | ||
724 | page_count -= line_count; | |
725 | src_offset += (PAGE_SIZE * line_count); | |
726 | dst_offset += (PAGE_SIZE * line_count); | |
727 | } | |
728 | ||
729 | return 0; | |
730 | } | |
731 | ||
5490e5df BS |
732 | static int |
733 | nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |
734 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | |
735 | { | |
736 | struct nouveau_mem *node = old_mem->mm_node; | |
737 | int ret = RING_SPACE(chan, 7); | |
738 | if (ret == 0) { | |
739 | BEGIN_NV04(chan, NvSubCopy, 0x0320, 6); | |
740 | OUT_RING (chan, upper_32_bits(node->vma[0].offset)); | |
741 | OUT_RING (chan, lower_32_bits(node->vma[0].offset)); | |
742 | OUT_RING (chan, upper_32_bits(node->vma[1].offset)); | |
743 | OUT_RING (chan, lower_32_bits(node->vma[1].offset)); | |
744 | OUT_RING (chan, 0x00000000 /* COPY */); | |
745 | OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT); | |
746 | } | |
747 | return ret; | |
748 | } | |
749 | ||
4c193d25 BS |
750 | static int |
751 | nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |
752 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | |
753 | { | |
754 | struct nouveau_mem *node = old_mem->mm_node; | |
755 | int ret = RING_SPACE(chan, 7); | |
756 | if (ret == 0) { | |
757 | BEGIN_NV04(chan, NvSubCopy, 0x0304, 6); | |
758 | OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT); | |
759 | OUT_RING (chan, upper_32_bits(node->vma[0].offset)); | |
760 | OUT_RING (chan, lower_32_bits(node->vma[0].offset)); | |
761 | OUT_RING (chan, upper_32_bits(node->vma[1].offset)); | |
762 | OUT_RING (chan, lower_32_bits(node->vma[1].offset)); | |
763 | OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */); | |
764 | } | |
765 | return ret; | |
766 | } | |
767 | ||
d1b167e1 BS |
768 | static int |
769 | nv50_bo_move_init(struct nouveau_channel *chan, u32 handle) | |
770 | { | |
ebb945a9 | 771 | int ret = RING_SPACE(chan, 6); |
d1b167e1 | 772 | if (ret == 0) { |
ebb945a9 BS |
773 | BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); |
774 | OUT_RING (chan, handle); | |
775 | BEGIN_NV04(chan, NvSubCopy, 0x0180, 3); | |
776 | OUT_RING (chan, NvNotify0); | |
777 | OUT_RING (chan, NvDmaFB); | |
778 | OUT_RING (chan, NvDmaFB); | |
d1b167e1 BS |
779 | } |
780 | ||
781 | return ret; | |
782 | } | |
783 | ||
6ee73861 | 784 | static int |
f1ab0cc9 BS |
785 | nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
786 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | |
6ee73861 | 787 | { |
d2f96666 | 788 | struct nouveau_mem *node = old_mem->mm_node; |
f1ab0cc9 BS |
789 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
790 | u64 length = (new_mem->num_pages << PAGE_SHIFT); | |
d2f96666 BS |
791 | u64 src_offset = node->vma[0].offset; |
792 | u64 dst_offset = node->vma[1].offset; | |
6ee73861 BS |
793 | int ret; |
794 | ||
f1ab0cc9 BS |
795 | while (length) { |
796 | u32 amount, stride, height; | |
797 | ||
5220b3c1 BS |
798 | amount = min(length, (u64)(4 * 1024 * 1024)); |
799 | stride = 16 * 4; | |
f1ab0cc9 BS |
800 | height = amount / stride; |
801 | ||
f13b3263 FJ |
802 | if (new_mem->mem_type == TTM_PL_VRAM && |
803 | nouveau_bo_tile_layout(nvbo)) { | |
f1ab0cc9 BS |
804 | ret = RING_SPACE(chan, 8); |
805 | if (ret) | |
806 | return ret; | |
807 | ||
d1b167e1 | 808 | BEGIN_NV04(chan, NvSubCopy, 0x0200, 7); |
f1ab0cc9 | 809 | OUT_RING (chan, 0); |
5220b3c1 | 810 | OUT_RING (chan, 0); |
f1ab0cc9 BS |
811 | OUT_RING (chan, stride); |
812 | OUT_RING (chan, height); | |
813 | OUT_RING (chan, 1); | |
814 | OUT_RING (chan, 0); | |
815 | OUT_RING (chan, 0); | |
816 | } else { | |
817 | ret = RING_SPACE(chan, 2); | |
818 | if (ret) | |
819 | return ret; | |
820 | ||
d1b167e1 | 821 | BEGIN_NV04(chan, NvSubCopy, 0x0200, 1); |
f1ab0cc9 BS |
822 | OUT_RING (chan, 1); |
823 | } | |
f13b3263 FJ |
824 | if (old_mem->mem_type == TTM_PL_VRAM && |
825 | nouveau_bo_tile_layout(nvbo)) { | |
f1ab0cc9 BS |
826 | ret = RING_SPACE(chan, 8); |
827 | if (ret) | |
828 | return ret; | |
829 | ||
d1b167e1 | 830 | BEGIN_NV04(chan, NvSubCopy, 0x021c, 7); |
f1ab0cc9 | 831 | OUT_RING (chan, 0); |
5220b3c1 | 832 | OUT_RING (chan, 0); |
f1ab0cc9 BS |
833 | OUT_RING (chan, stride); |
834 | OUT_RING (chan, height); | |
835 | OUT_RING (chan, 1); | |
836 | OUT_RING (chan, 0); | |
837 | OUT_RING (chan, 0); | |
838 | } else { | |
839 | ret = RING_SPACE(chan, 2); | |
840 | if (ret) | |
841 | return ret; | |
842 | ||
d1b167e1 | 843 | BEGIN_NV04(chan, NvSubCopy, 0x021c, 1); |
f1ab0cc9 BS |
844 | OUT_RING (chan, 1); |
845 | } | |
846 | ||
847 | ret = RING_SPACE(chan, 14); | |
6ee73861 BS |
848 | if (ret) |
849 | return ret; | |
f1ab0cc9 | 850 | |
d1b167e1 | 851 | BEGIN_NV04(chan, NvSubCopy, 0x0238, 2); |
f1ab0cc9 BS |
852 | OUT_RING (chan, upper_32_bits(src_offset)); |
853 | OUT_RING (chan, upper_32_bits(dst_offset)); | |
d1b167e1 | 854 | BEGIN_NV04(chan, NvSubCopy, 0x030c, 8); |
f1ab0cc9 BS |
855 | OUT_RING (chan, lower_32_bits(src_offset)); |
856 | OUT_RING (chan, lower_32_bits(dst_offset)); | |
857 | OUT_RING (chan, stride); | |
858 | OUT_RING (chan, stride); | |
859 | OUT_RING (chan, stride); | |
860 | OUT_RING (chan, height); | |
861 | OUT_RING (chan, 0x00000101); | |
862 | OUT_RING (chan, 0x00000000); | |
d1b167e1 | 863 | BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); |
f1ab0cc9 BS |
864 | OUT_RING (chan, 0); |
865 | ||
866 | length -= amount; | |
867 | src_offset += amount; | |
868 | dst_offset += amount; | |
6ee73861 BS |
869 | } |
870 | ||
f1ab0cc9 BS |
871 | return 0; |
872 | } | |
873 | ||
d1b167e1 BS |
874 | static int |
875 | nv04_bo_move_init(struct nouveau_channel *chan, u32 handle) | |
876 | { | |
ebb945a9 | 877 | int ret = RING_SPACE(chan, 4); |
d1b167e1 | 878 | if (ret == 0) { |
ebb945a9 BS |
879 | BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); |
880 | OUT_RING (chan, handle); | |
881 | BEGIN_NV04(chan, NvSubCopy, 0x0180, 1); | |
882 | OUT_RING (chan, NvNotify0); | |
d1b167e1 BS |
883 | } |
884 | ||
885 | return ret; | |
886 | } | |
887 | ||
a6704788 BS |
888 | static inline uint32_t |
889 | nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, | |
890 | struct nouveau_channel *chan, struct ttm_mem_reg *mem) | |
891 | { | |
892 | if (mem->mem_type == TTM_PL_TT) | |
ebb945a9 BS |
893 | return NvDmaTT; |
894 | return NvDmaFB; | |
a6704788 BS |
895 | } |
896 | ||
f1ab0cc9 BS |
897 | static int |
898 | nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |
899 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | |
900 | { | |
d961db75 BS |
901 | u32 src_offset = old_mem->start << PAGE_SHIFT; |
902 | u32 dst_offset = new_mem->start << PAGE_SHIFT; | |
f1ab0cc9 BS |
903 | u32 page_count = new_mem->num_pages; |
904 | int ret; | |
905 | ||
906 | ret = RING_SPACE(chan, 3); | |
907 | if (ret) | |
908 | return ret; | |
909 | ||
d1b167e1 | 910 | BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2); |
f1ab0cc9 BS |
911 | OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem)); |
912 | OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem)); | |
913 | ||
6ee73861 BS |
914 | page_count = new_mem->num_pages; |
915 | while (page_count) { | |
916 | int line_count = (page_count > 2047) ? 2047 : page_count; | |
917 | ||
6ee73861 BS |
918 | ret = RING_SPACE(chan, 11); |
919 | if (ret) | |
920 | return ret; | |
f1ab0cc9 | 921 | |
d1b167e1 | 922 | BEGIN_NV04(chan, NvSubCopy, |
6ee73861 | 923 | NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); |
f1ab0cc9 BS |
924 | OUT_RING (chan, src_offset); |
925 | OUT_RING (chan, dst_offset); | |
926 | OUT_RING (chan, PAGE_SIZE); /* src_pitch */ | |
927 | OUT_RING (chan, PAGE_SIZE); /* dst_pitch */ | |
928 | OUT_RING (chan, PAGE_SIZE); /* line_length */ | |
929 | OUT_RING (chan, line_count); | |
930 | OUT_RING (chan, 0x00000101); | |
931 | OUT_RING (chan, 0x00000000); | |
d1b167e1 | 932 | BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); |
f1ab0cc9 | 933 | OUT_RING (chan, 0); |
6ee73861 BS |
934 | |
935 | page_count -= line_count; | |
936 | src_offset += (PAGE_SIZE * line_count); | |
937 | dst_offset += (PAGE_SIZE * line_count); | |
938 | } | |
939 | ||
f1ab0cc9 BS |
940 | return 0; |
941 | } | |
942 | ||
d2f96666 BS |
943 | static int |
944 | nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo, | |
945 | struct ttm_mem_reg *mem, struct nouveau_vma *vma) | |
946 | { | |
947 | struct nouveau_mem *node = mem->mm_node; | |
948 | int ret; | |
949 | ||
ebb945a9 BS |
950 | ret = nouveau_vm_get(nv_client(chan->cli)->vm, mem->num_pages << |
951 | PAGE_SHIFT, node->page_shift, | |
952 | NV_MEM_ACCESS_RW, vma); | |
d2f96666 BS |
953 | if (ret) |
954 | return ret; | |
955 | ||
956 | if (mem->mem_type == TTM_PL_VRAM) | |
957 | nouveau_vm_map(vma, node); | |
958 | else | |
f7b24c42 | 959 | nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node); |
d2f96666 BS |
960 | |
961 | return 0; | |
962 | } | |
963 | ||
f1ab0cc9 BS |
964 | static int |
965 | nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, | |
966 | bool no_wait_reserve, bool no_wait_gpu, | |
967 | struct ttm_mem_reg *new_mem) | |
968 | { | |
ebb945a9 BS |
969 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
970 | struct nouveau_channel *chan = chan = drm->channel; | |
f1ab0cc9 | 971 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
3425df48 | 972 | struct ttm_mem_reg *old_mem = &bo->mem; |
f1ab0cc9 BS |
973 | int ret; |
974 | ||
ebb945a9 | 975 | mutex_lock(&chan->cli->mutex); |
f1ab0cc9 | 976 | |
d2f96666 BS |
977 | /* create temporary vmas for the transfer and attach them to the |
978 | * old nouveau_mem node, these will get cleaned up after ttm has | |
979 | * destroyed the ttm_mem_reg | |
3425df48 | 980 | */ |
ebb945a9 | 981 | if (nv_device(drm->device)->card_type >= NV_50) { |
d5f42394 | 982 | struct nouveau_mem *node = old_mem->mm_node; |
3425df48 | 983 | |
d2f96666 BS |
984 | ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]); |
985 | if (ret) | |
986 | goto out; | |
987 | ||
988 | ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]); | |
989 | if (ret) | |
990 | goto out; | |
3425df48 BS |
991 | } |
992 | ||
ebb945a9 | 993 | ret = drm->ttm.move(chan, bo, &bo->mem, new_mem); |
6a6b73f2 BS |
994 | if (ret == 0) { |
995 | ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict, | |
996 | no_wait_reserve, | |
997 | no_wait_gpu, new_mem); | |
998 | } | |
f1ab0cc9 | 999 | |
3425df48 | 1000 | out: |
ebb945a9 | 1001 | mutex_unlock(&chan->cli->mutex); |
6a6b73f2 | 1002 | return ret; |
6ee73861 BS |
1003 | } |
1004 | ||
d1b167e1 | 1005 | void |
49981046 | 1006 | nouveau_bo_move_init(struct nouveau_drm *drm) |
d1b167e1 | 1007 | { |
d1b167e1 BS |
1008 | static const struct { |
1009 | const char *name; | |
1a46098e | 1010 | int engine; |
d1b167e1 BS |
1011 | u32 oclass; |
1012 | int (*exec)(struct nouveau_channel *, | |
1013 | struct ttm_buffer_object *, | |
1014 | struct ttm_mem_reg *, struct ttm_mem_reg *); | |
1015 | int (*init)(struct nouveau_channel *, u32 handle); | |
1016 | } _methods[] = { | |
49981046 BS |
1017 | { "COPY", 0, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init }, |
1018 | { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init }, | |
1a46098e BS |
1019 | { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init }, |
1020 | { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init }, | |
1021 | { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init }, | |
1022 | { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init }, | |
1023 | { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init }, | |
1024 | { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init }, | |
1025 | { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init }, | |
5490e5df | 1026 | {}, |
1a46098e | 1027 | { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init }, |
d1b167e1 BS |
1028 | }, *mthd = _methods; |
1029 | const char *name = "CPU"; | |
1030 | int ret; | |
1031 | ||
1032 | do { | |
ebb945a9 | 1033 | struct nouveau_object *object; |
49981046 | 1034 | struct nouveau_channel *chan; |
1a46098e | 1035 | u32 handle = (mthd->engine << 16) | mthd->oclass; |
ebb945a9 | 1036 | |
49981046 BS |
1037 | if (mthd->init == nve0_bo_move_init) |
1038 | chan = drm->cechan; | |
1039 | else | |
1040 | chan = drm->channel; | |
1041 | if (chan == NULL) | |
1042 | continue; | |
1043 | ||
1044 | ret = nouveau_object_new(nv_object(drm), chan->handle, handle, | |
ebb945a9 | 1045 | mthd->oclass, NULL, 0, &object); |
d1b167e1 | 1046 | if (ret == 0) { |
1a46098e | 1047 | ret = mthd->init(chan, handle); |
ebb945a9 | 1048 | if (ret) { |
49981046 | 1049 | nouveau_object_del(nv_object(drm), |
ebb945a9 BS |
1050 | chan->handle, handle); |
1051 | continue; | |
d1b167e1 | 1052 | } |
ebb945a9 BS |
1053 | |
1054 | drm->ttm.move = mthd->exec; | |
1055 | name = mthd->name; | |
1056 | break; | |
d1b167e1 BS |
1057 | } |
1058 | } while ((++mthd)->exec); | |
1059 | ||
ebb945a9 | 1060 | NV_INFO(drm, "MM: using %s for buffer copies\n", name); |
d1b167e1 BS |
1061 | } |
1062 | ||
6ee73861 BS |
1063 | static int |
1064 | nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, | |
9d87fa21 JG |
1065 | bool no_wait_reserve, bool no_wait_gpu, |
1066 | struct ttm_mem_reg *new_mem) | |
6ee73861 BS |
1067 | { |
1068 | u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; | |
1069 | struct ttm_placement placement; | |
1070 | struct ttm_mem_reg tmp_mem; | |
1071 | int ret; | |
1072 | ||
1073 | placement.fpfn = placement.lpfn = 0; | |
1074 | placement.num_placement = placement.num_busy_placement = 1; | |
77e2b5ed | 1075 | placement.placement = placement.busy_placement = &placement_memtype; |
6ee73861 BS |
1076 | |
1077 | tmp_mem = *new_mem; | |
1078 | tmp_mem.mm_node = NULL; | |
9d87fa21 | 1079 | ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu); |
6ee73861 BS |
1080 | if (ret) |
1081 | return ret; | |
1082 | ||
1083 | ret = ttm_tt_bind(bo->ttm, &tmp_mem); | |
1084 | if (ret) | |
1085 | goto out; | |
1086 | ||
9d87fa21 | 1087 | ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem); |
6ee73861 BS |
1088 | if (ret) |
1089 | goto out; | |
1090 | ||
b8884da6 | 1091 | ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem); |
6ee73861 | 1092 | out: |
42311ff9 | 1093 | ttm_bo_mem_put(bo, &tmp_mem); |
6ee73861 BS |
1094 | return ret; |
1095 | } | |
1096 | ||
1097 | static int | |
1098 | nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, | |
9d87fa21 JG |
1099 | bool no_wait_reserve, bool no_wait_gpu, |
1100 | struct ttm_mem_reg *new_mem) | |
6ee73861 BS |
1101 | { |
1102 | u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; | |
1103 | struct ttm_placement placement; | |
1104 | struct ttm_mem_reg tmp_mem; | |
1105 | int ret; | |
1106 | ||
1107 | placement.fpfn = placement.lpfn = 0; | |
1108 | placement.num_placement = placement.num_busy_placement = 1; | |
77e2b5ed | 1109 | placement.placement = placement.busy_placement = &placement_memtype; |
6ee73861 BS |
1110 | |
1111 | tmp_mem = *new_mem; | |
1112 | tmp_mem.mm_node = NULL; | |
9d87fa21 | 1113 | ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu); |
6ee73861 BS |
1114 | if (ret) |
1115 | return ret; | |
1116 | ||
b8884da6 | 1117 | ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem); |
6ee73861 BS |
1118 | if (ret) |
1119 | goto out; | |
1120 | ||
b8884da6 | 1121 | ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem); |
6ee73861 BS |
1122 | if (ret) |
1123 | goto out; | |
1124 | ||
1125 | out: | |
42311ff9 | 1126 | ttm_bo_mem_put(bo, &tmp_mem); |
6ee73861 BS |
1127 | return ret; |
1128 | } | |
1129 | ||
a4154bbf BS |
1130 | static void |
1131 | nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) | |
1132 | { | |
a4154bbf | 1133 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
fd2871af BS |
1134 | struct nouveau_vma *vma; |
1135 | ||
9f1feed2 BS |
1136 | /* ttm can now (stupidly) pass the driver bos it didn't create... */ |
1137 | if (bo->destroy != nouveau_bo_del_ttm) | |
1138 | return; | |
1139 | ||
fd2871af | 1140 | list_for_each_entry(vma, &nvbo->vma_list, head) { |
dc97b340 | 1141 | if (new_mem && new_mem->mem_type == TTM_PL_VRAM) { |
fd2871af BS |
1142 | nouveau_vm_map(vma, new_mem->mm_node); |
1143 | } else | |
dc97b340 | 1144 | if (new_mem && new_mem->mem_type == TTM_PL_TT && |
ebb945a9 | 1145 | nvbo->page_shift == vma->vm->vmm->spg_shift) { |
22b33e8e DA |
1146 | if (((struct nouveau_mem *)new_mem->mm_node)->sg) |
1147 | nouveau_vm_map_sg_table(vma, 0, new_mem-> | |
1148 | num_pages << PAGE_SHIFT, | |
1149 | new_mem->mm_node); | |
1150 | else | |
1151 | nouveau_vm_map_sg(vma, 0, new_mem-> | |
1152 | num_pages << PAGE_SHIFT, | |
1153 | new_mem->mm_node); | |
fd2871af BS |
1154 | } else { |
1155 | nouveau_vm_unmap(vma); | |
1156 | } | |
a4154bbf BS |
1157 | } |
1158 | } | |
1159 | ||
6ee73861 | 1160 | static int |
a0af9add | 1161 | nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, |
ebb945a9 | 1162 | struct nouveau_drm_tile **new_tile) |
6ee73861 | 1163 | { |
ebb945a9 BS |
1164 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
1165 | struct drm_device *dev = drm->dev; | |
a0af9add | 1166 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
a4154bbf | 1167 | u64 offset = new_mem->start << PAGE_SHIFT; |
6ee73861 | 1168 | |
a4154bbf BS |
1169 | *new_tile = NULL; |
1170 | if (new_mem->mem_type != TTM_PL_VRAM) | |
a0af9add | 1171 | return 0; |
a0af9add | 1172 | |
ebb945a9 | 1173 | if (nv_device(drm->device)->card_type >= NV_10) { |
bc9e7b9a | 1174 | *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size, |
a5cf68b0 FJ |
1175 | nvbo->tile_mode, |
1176 | nvbo->tile_flags); | |
6ee73861 BS |
1177 | } |
1178 | ||
a0af9add FJ |
1179 | return 0; |
1180 | } | |
1181 | ||
1182 | static void | |
1183 | nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, | |
ebb945a9 BS |
1184 | struct nouveau_drm_tile *new_tile, |
1185 | struct nouveau_drm_tile **old_tile) | |
a0af9add | 1186 | { |
ebb945a9 BS |
1187 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
1188 | struct drm_device *dev = drm->dev; | |
a0af9add | 1189 | |
bc9e7b9a | 1190 | nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj); |
a4154bbf | 1191 | *old_tile = new_tile; |
a0af9add FJ |
1192 | } |
1193 | ||
1194 | static int | |
1195 | nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, | |
9d87fa21 JG |
1196 | bool no_wait_reserve, bool no_wait_gpu, |
1197 | struct ttm_mem_reg *new_mem) | |
a0af9add | 1198 | { |
ebb945a9 | 1199 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
a0af9add FJ |
1200 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
1201 | struct ttm_mem_reg *old_mem = &bo->mem; | |
ebb945a9 | 1202 | struct nouveau_drm_tile *new_tile = NULL; |
a0af9add FJ |
1203 | int ret = 0; |
1204 | ||
ebb945a9 | 1205 | if (nv_device(drm->device)->card_type < NV_50) { |
a4154bbf BS |
1206 | ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile); |
1207 | if (ret) | |
1208 | return ret; | |
1209 | } | |
a0af9add | 1210 | |
a0af9add | 1211 | /* Fake bo copy. */ |
6ee73861 BS |
1212 | if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) { |
1213 | BUG_ON(bo->mem.mm_node != NULL); | |
1214 | bo->mem = *new_mem; | |
1215 | new_mem->mm_node = NULL; | |
a0af9add | 1216 | goto out; |
6ee73861 BS |
1217 | } |
1218 | ||
d1b167e1 | 1219 | /* CPU copy if we have no accelerated method available */ |
ebb945a9 | 1220 | if (!drm->ttm.move) { |
b8a6a804 BS |
1221 | ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); |
1222 | goto out; | |
1223 | } | |
1224 | ||
a0af9add FJ |
1225 | /* Hardware assisted copy. */ |
1226 | if (new_mem->mem_type == TTM_PL_SYSTEM) | |
9d87fa21 | 1227 | ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); |
a0af9add | 1228 | else if (old_mem->mem_type == TTM_PL_SYSTEM) |
9d87fa21 | 1229 | ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); |
a0af9add | 1230 | else |
9d87fa21 | 1231 | ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); |
6ee73861 | 1232 | |
a0af9add FJ |
1233 | if (!ret) |
1234 | goto out; | |
1235 | ||
1236 | /* Fallback to software copy. */ | |
9d87fa21 | 1237 | ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); |
a0af9add FJ |
1238 | |
1239 | out: | |
ebb945a9 | 1240 | if (nv_device(drm->device)->card_type < NV_50) { |
a4154bbf BS |
1241 | if (ret) |
1242 | nouveau_bo_vm_cleanup(bo, NULL, &new_tile); | |
1243 | else | |
1244 | nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile); | |
1245 | } | |
a0af9add FJ |
1246 | |
1247 | return ret; | |
6ee73861 BS |
1248 | } |
1249 | ||
1250 | static int | |
1251 | nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) | |
1252 | { | |
1253 | return 0; | |
1254 | } | |
1255 | ||
f32f02fd JG |
1256 | static int |
1257 | nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |
1258 | { | |
1259 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | |
ebb945a9 BS |
1260 | struct nouveau_drm *drm = nouveau_bdev(bdev); |
1261 | struct drm_device *dev = drm->dev; | |
f869ef88 | 1262 | int ret; |
f32f02fd JG |
1263 | |
1264 | mem->bus.addr = NULL; | |
1265 | mem->bus.offset = 0; | |
1266 | mem->bus.size = mem->num_pages << PAGE_SHIFT; | |
1267 | mem->bus.base = 0; | |
1268 | mem->bus.is_iomem = false; | |
1269 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) | |
1270 | return -EINVAL; | |
1271 | switch (mem->mem_type) { | |
1272 | case TTM_PL_SYSTEM: | |
1273 | /* System memory */ | |
1274 | return 0; | |
1275 | case TTM_PL_TT: | |
1276 | #if __OS_HAS_AGP | |
ebb945a9 | 1277 | if (drm->agp.stat == ENABLED) { |
d961db75 | 1278 | mem->bus.offset = mem->start << PAGE_SHIFT; |
ebb945a9 | 1279 | mem->bus.base = drm->agp.base; |
f32f02fd JG |
1280 | mem->bus.is_iomem = true; |
1281 | } | |
1282 | #endif | |
1283 | break; | |
1284 | case TTM_PL_VRAM: | |
3863c9bc BS |
1285 | mem->bus.offset = mem->start << PAGE_SHIFT; |
1286 | mem->bus.base = pci_resource_start(dev->pdev, 1); | |
1287 | mem->bus.is_iomem = true; | |
ebb945a9 BS |
1288 | if (nv_device(drm->device)->card_type >= NV_50) { |
1289 | struct nouveau_bar *bar = nouveau_bar(drm->device); | |
3863c9bc | 1290 | struct nouveau_mem *node = mem->mm_node; |
8984e046 | 1291 | |
ebb945a9 | 1292 | ret = bar->umap(bar, node, NV_MEM_ACCESS_RW, |
3863c9bc BS |
1293 | &node->bar_vma); |
1294 | if (ret) | |
1295 | return ret; | |
f869ef88 | 1296 | |
3863c9bc | 1297 | mem->bus.offset = node->bar_vma.offset; |
f869ef88 | 1298 | } |
f32f02fd JG |
1299 | break; |
1300 | default: | |
1301 | return -EINVAL; | |
1302 | } | |
1303 | return 0; | |
1304 | } | |
1305 | ||
1306 | static void | |
1307 | nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |
1308 | { | |
ebb945a9 BS |
1309 | struct nouveau_drm *drm = nouveau_bdev(bdev); |
1310 | struct nouveau_bar *bar = nouveau_bar(drm->device); | |
d5f42394 | 1311 | struct nouveau_mem *node = mem->mm_node; |
f869ef88 | 1312 | |
d5f42394 | 1313 | if (!node->bar_vma.node) |
f869ef88 BS |
1314 | return; |
1315 | ||
ebb945a9 | 1316 | bar->unmap(bar, &node->bar_vma); |
f32f02fd JG |
1317 | } |
1318 | ||
1319 | static int | |
1320 | nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) | |
1321 | { | |
ebb945a9 | 1322 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
e1429b4c | 1323 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
ebb945a9 BS |
1324 | struct nouveau_device *device = nv_device(drm->device); |
1325 | u32 mappable = pci_resource_len(device->pdev, 1) >> PAGE_SHIFT; | |
e1429b4c BS |
1326 | |
1327 | /* as long as the bo isn't in vram, and isn't tiled, we've got | |
1328 | * nothing to do here. | |
1329 | */ | |
1330 | if (bo->mem.mem_type != TTM_PL_VRAM) { | |
ebb945a9 | 1331 | if (nv_device(drm->device)->card_type < NV_50 || |
f13b3263 | 1332 | !nouveau_bo_tile_layout(nvbo)) |
e1429b4c BS |
1333 | return 0; |
1334 | } | |
1335 | ||
1336 | /* make sure bo is in mappable vram */ | |
ebb945a9 | 1337 | if (bo->mem.start + bo->mem.num_pages < mappable) |
e1429b4c BS |
1338 | return 0; |
1339 | ||
1340 | ||
1341 | nvbo->placement.fpfn = 0; | |
ebb945a9 | 1342 | nvbo->placement.lpfn = mappable; |
c284815d | 1343 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0); |
7a45d764 | 1344 | return nouveau_bo_validate(nvbo, false, true, false); |
f32f02fd JG |
1345 | } |
1346 | ||
3230cfc3 KRW |
1347 | static int |
1348 | nouveau_ttm_tt_populate(struct ttm_tt *ttm) | |
1349 | { | |
8e7e7052 | 1350 | struct ttm_dma_tt *ttm_dma = (void *)ttm; |
ebb945a9 | 1351 | struct nouveau_drm *drm; |
3230cfc3 KRW |
1352 | struct drm_device *dev; |
1353 | unsigned i; | |
1354 | int r; | |
22b33e8e | 1355 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
3230cfc3 KRW |
1356 | |
1357 | if (ttm->state != tt_unpopulated) | |
1358 | return 0; | |
1359 | ||
22b33e8e DA |
1360 | if (slave && ttm->sg) { |
1361 | /* make userspace faulting work */ | |
1362 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, | |
1363 | ttm_dma->dma_address, ttm->num_pages); | |
1364 | ttm->state = tt_unbound; | |
1365 | return 0; | |
1366 | } | |
1367 | ||
ebb945a9 BS |
1368 | drm = nouveau_bdev(ttm->bdev); |
1369 | dev = drm->dev; | |
3230cfc3 | 1370 | |
dea7e0ac | 1371 | #if __OS_HAS_AGP |
ebb945a9 | 1372 | if (drm->agp.stat == ENABLED) { |
dea7e0ac JG |
1373 | return ttm_agp_tt_populate(ttm); |
1374 | } | |
1375 | #endif | |
1376 | ||
3230cfc3 KRW |
1377 | #ifdef CONFIG_SWIOTLB |
1378 | if (swiotlb_nr_tbl()) { | |
8e7e7052 | 1379 | return ttm_dma_populate((void *)ttm, dev->dev); |
3230cfc3 KRW |
1380 | } |
1381 | #endif | |
1382 | ||
1383 | r = ttm_pool_populate(ttm); | |
1384 | if (r) { | |
1385 | return r; | |
1386 | } | |
1387 | ||
1388 | for (i = 0; i < ttm->num_pages; i++) { | |
8e7e7052 | 1389 | ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i], |
3230cfc3 KRW |
1390 | 0, PAGE_SIZE, |
1391 | PCI_DMA_BIDIRECTIONAL); | |
8e7e7052 | 1392 | if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) { |
3230cfc3 | 1393 | while (--i) { |
8e7e7052 | 1394 | pci_unmap_page(dev->pdev, ttm_dma->dma_address[i], |
3230cfc3 | 1395 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
8e7e7052 | 1396 | ttm_dma->dma_address[i] = 0; |
3230cfc3 KRW |
1397 | } |
1398 | ttm_pool_unpopulate(ttm); | |
1399 | return -EFAULT; | |
1400 | } | |
1401 | } | |
1402 | return 0; | |
1403 | } | |
1404 | ||
1405 | static void | |
1406 | nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) | |
1407 | { | |
8e7e7052 | 1408 | struct ttm_dma_tt *ttm_dma = (void *)ttm; |
ebb945a9 | 1409 | struct nouveau_drm *drm; |
3230cfc3 KRW |
1410 | struct drm_device *dev; |
1411 | unsigned i; | |
22b33e8e DA |
1412 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
1413 | ||
1414 | if (slave) | |
1415 | return; | |
3230cfc3 | 1416 | |
ebb945a9 BS |
1417 | drm = nouveau_bdev(ttm->bdev); |
1418 | dev = drm->dev; | |
3230cfc3 | 1419 | |
dea7e0ac | 1420 | #if __OS_HAS_AGP |
ebb945a9 | 1421 | if (drm->agp.stat == ENABLED) { |
dea7e0ac JG |
1422 | ttm_agp_tt_unpopulate(ttm); |
1423 | return; | |
1424 | } | |
1425 | #endif | |
1426 | ||
3230cfc3 KRW |
1427 | #ifdef CONFIG_SWIOTLB |
1428 | if (swiotlb_nr_tbl()) { | |
8e7e7052 | 1429 | ttm_dma_unpopulate((void *)ttm, dev->dev); |
3230cfc3 KRW |
1430 | return; |
1431 | } | |
1432 | #endif | |
1433 | ||
1434 | for (i = 0; i < ttm->num_pages; i++) { | |
8e7e7052 JG |
1435 | if (ttm_dma->dma_address[i]) { |
1436 | pci_unmap_page(dev->pdev, ttm_dma->dma_address[i], | |
3230cfc3 KRW |
1437 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
1438 | } | |
1439 | } | |
1440 | ||
1441 | ttm_pool_unpopulate(ttm); | |
1442 | } | |
1443 | ||
875ac34a BS |
1444 | void |
1445 | nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence) | |
1446 | { | |
1447 | struct nouveau_fence *old_fence = NULL; | |
1448 | ||
1449 | if (likely(fence)) | |
1450 | nouveau_fence_ref(fence); | |
1451 | ||
1452 | spin_lock(&nvbo->bo.bdev->fence_lock); | |
1453 | old_fence = nvbo->bo.sync_obj; | |
1454 | nvbo->bo.sync_obj = fence; | |
1455 | spin_unlock(&nvbo->bo.bdev->fence_lock); | |
1456 | ||
1457 | nouveau_fence_unref(&old_fence); | |
1458 | } | |
1459 | ||
1460 | static void | |
1461 | nouveau_bo_fence_unref(void **sync_obj) | |
1462 | { | |
1463 | nouveau_fence_unref((struct nouveau_fence **)sync_obj); | |
1464 | } | |
1465 | ||
1466 | static void * | |
1467 | nouveau_bo_fence_ref(void *sync_obj) | |
1468 | { | |
1469 | return nouveau_fence_ref(sync_obj); | |
1470 | } | |
1471 | ||
1472 | static bool | |
1473 | nouveau_bo_fence_signalled(void *sync_obj, void *sync_arg) | |
1474 | { | |
d375e7d5 | 1475 | return nouveau_fence_done(sync_obj); |
875ac34a BS |
1476 | } |
1477 | ||
1478 | static int | |
1479 | nouveau_bo_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr) | |
1480 | { | |
1481 | return nouveau_fence_wait(sync_obj, lazy, intr); | |
1482 | } | |
1483 | ||
1484 | static int | |
1485 | nouveau_bo_fence_flush(void *sync_obj, void *sync_arg) | |
1486 | { | |
1487 | return 0; | |
1488 | } | |
1489 | ||
6ee73861 | 1490 | struct ttm_bo_driver nouveau_bo_driver = { |
649bf3ca | 1491 | .ttm_tt_create = &nouveau_ttm_tt_create, |
3230cfc3 KRW |
1492 | .ttm_tt_populate = &nouveau_ttm_tt_populate, |
1493 | .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate, | |
6ee73861 BS |
1494 | .invalidate_caches = nouveau_bo_invalidate_caches, |
1495 | .init_mem_type = nouveau_bo_init_mem_type, | |
1496 | .evict_flags = nouveau_bo_evict_flags, | |
a4154bbf | 1497 | .move_notify = nouveau_bo_move_ntfy, |
6ee73861 BS |
1498 | .move = nouveau_bo_move, |
1499 | .verify_access = nouveau_bo_verify_access, | |
875ac34a BS |
1500 | .sync_obj_signaled = nouveau_bo_fence_signalled, |
1501 | .sync_obj_wait = nouveau_bo_fence_wait, | |
1502 | .sync_obj_flush = nouveau_bo_fence_flush, | |
1503 | .sync_obj_unref = nouveau_bo_fence_unref, | |
1504 | .sync_obj_ref = nouveau_bo_fence_ref, | |
f32f02fd JG |
1505 | .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, |
1506 | .io_mem_reserve = &nouveau_ttm_io_mem_reserve, | |
1507 | .io_mem_free = &nouveau_ttm_io_mem_free, | |
6ee73861 BS |
1508 | }; |
1509 | ||
fd2871af BS |
1510 | struct nouveau_vma * |
1511 | nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm) | |
1512 | { | |
1513 | struct nouveau_vma *vma; | |
1514 | list_for_each_entry(vma, &nvbo->vma_list, head) { | |
1515 | if (vma->vm == vm) | |
1516 | return vma; | |
1517 | } | |
1518 | ||
1519 | return NULL; | |
1520 | } | |
1521 | ||
1522 | int | |
1523 | nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm, | |
1524 | struct nouveau_vma *vma) | |
1525 | { | |
1526 | const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT; | |
1527 | struct nouveau_mem *node = nvbo->bo.mem.mm_node; | |
1528 | int ret; | |
1529 | ||
1530 | ret = nouveau_vm_get(vm, size, nvbo->page_shift, | |
1531 | NV_MEM_ACCESS_RW, vma); | |
1532 | if (ret) | |
1533 | return ret; | |
1534 | ||
1535 | if (nvbo->bo.mem.mem_type == TTM_PL_VRAM) | |
1536 | nouveau_vm_map(vma, nvbo->bo.mem.mm_node); | |
22b33e8e DA |
1537 | else if (nvbo->bo.mem.mem_type == TTM_PL_TT) { |
1538 | if (node->sg) | |
1539 | nouveau_vm_map_sg_table(vma, 0, size, node); | |
1540 | else | |
1541 | nouveau_vm_map_sg(vma, 0, size, node); | |
1542 | } | |
fd2871af BS |
1543 | |
1544 | list_add_tail(&vma->head, &nvbo->vma_list); | |
2fd3db6f | 1545 | vma->refcount = 1; |
fd2871af BS |
1546 | return 0; |
1547 | } | |
1548 | ||
1549 | void | |
1550 | nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma) | |
1551 | { | |
1552 | if (vma->node) { | |
1553 | if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) { | |
1554 | spin_lock(&nvbo->bo.bdev->fence_lock); | |
1717c0e2 | 1555 | ttm_bo_wait(&nvbo->bo, false, false, false); |
fd2871af BS |
1556 | spin_unlock(&nvbo->bo.bdev->fence_lock); |
1557 | nouveau_vm_unmap(vma); | |
1558 | } | |
1559 | ||
1560 | nouveau_vm_put(vma); | |
1561 | list_del(&vma->head); | |
1562 | } | |
1563 | } |