Commit | Line | Data |
---|---|---|
771fe6b9 JG |
1 | /* |
2 | * Copyright 2009 Jerome Glisse. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Jerome Glisse <glisse@freedesktop.org> | |
29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> | |
30 | * Dave Airlie | |
31 | */ | |
32 | #include <ttm/ttm_bo_api.h> | |
33 | #include <ttm/ttm_bo_driver.h> | |
34 | #include <ttm/ttm_placement.h> | |
35 | #include <ttm/ttm_module.h> | |
8d7cddcd | 36 | #include <ttm/ttm_page_alloc.h> |
771fe6b9 JG |
37 | #include <drm/drmP.h> |
38 | #include <drm/radeon_drm.h> | |
fa8a1238 | 39 | #include <linux/seq_file.h> |
5a0e3ad6 | 40 | #include <linux/slab.h> |
4cfe7629 | 41 | #include <linux/swiotlb.h> |
771fe6b9 JG |
42 | #include "radeon_reg.h" |
43 | #include "radeon.h" | |
44 | ||
45 | #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) | |
46 | ||
fa8a1238 DA |
47 | static int radeon_ttm_debugfs_init(struct radeon_device *rdev); |
48 | ||
771fe6b9 JG |
49 | static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev) |
50 | { | |
51 | struct radeon_mman *mman; | |
52 | struct radeon_device *rdev; | |
53 | ||
54 | mman = container_of(bdev, struct radeon_mman, bdev); | |
55 | rdev = container_of(mman, struct radeon_device, mman); | |
56 | return rdev; | |
57 | } | |
58 | ||
59 | ||
60 | /* | |
61 | * Global memory. | |
62 | */ | |
ba4420c2 | 63 | static int radeon_ttm_mem_global_init(struct drm_global_reference *ref) |
771fe6b9 JG |
64 | { |
65 | return ttm_mem_global_init(ref->object); | |
66 | } | |
67 | ||
ba4420c2 | 68 | static void radeon_ttm_mem_global_release(struct drm_global_reference *ref) |
771fe6b9 JG |
69 | { |
70 | ttm_mem_global_release(ref->object); | |
71 | } | |
72 | ||
73 | static int radeon_ttm_global_init(struct radeon_device *rdev) | |
74 | { | |
ba4420c2 | 75 | struct drm_global_reference *global_ref; |
771fe6b9 JG |
76 | int r; |
77 | ||
78 | rdev->mman.mem_global_referenced = false; | |
79 | global_ref = &rdev->mman.mem_global_ref; | |
ba4420c2 | 80 | global_ref->global_type = DRM_GLOBAL_TTM_MEM; |
771fe6b9 JG |
81 | global_ref->size = sizeof(struct ttm_mem_global); |
82 | global_ref->init = &radeon_ttm_mem_global_init; | |
83 | global_ref->release = &radeon_ttm_mem_global_release; | |
ba4420c2 | 84 | r = drm_global_item_ref(global_ref); |
771fe6b9 | 85 | if (r != 0) { |
a987fcaa TH |
86 | DRM_ERROR("Failed setting up TTM memory accounting " |
87 | "subsystem.\n"); | |
771fe6b9 JG |
88 | return r; |
89 | } | |
a987fcaa TH |
90 | |
91 | rdev->mman.bo_global_ref.mem_glob = | |
92 | rdev->mman.mem_global_ref.object; | |
93 | global_ref = &rdev->mman.bo_global_ref.ref; | |
ba4420c2 | 94 | global_ref->global_type = DRM_GLOBAL_TTM_BO; |
7f5f4db2 | 95 | global_ref->size = sizeof(struct ttm_bo_global); |
a987fcaa TH |
96 | global_ref->init = &ttm_bo_global_init; |
97 | global_ref->release = &ttm_bo_global_release; | |
ba4420c2 | 98 | r = drm_global_item_ref(global_ref); |
a987fcaa TH |
99 | if (r != 0) { |
100 | DRM_ERROR("Failed setting up TTM BO subsystem.\n"); | |
ba4420c2 | 101 | drm_global_item_unref(&rdev->mman.mem_global_ref); |
a987fcaa TH |
102 | return r; |
103 | } | |
104 | ||
771fe6b9 JG |
105 | rdev->mman.mem_global_referenced = true; |
106 | return 0; | |
107 | } | |
108 | ||
109 | static void radeon_ttm_global_fini(struct radeon_device *rdev) | |
110 | { | |
111 | if (rdev->mman.mem_global_referenced) { | |
ba4420c2 DA |
112 | drm_global_item_unref(&rdev->mman.bo_global_ref.ref); |
113 | drm_global_item_unref(&rdev->mman.mem_global_ref); | |
771fe6b9 JG |
114 | rdev->mman.mem_global_referenced = false; |
115 | } | |
116 | } | |
117 | ||
771fe6b9 JG |
118 | static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) |
119 | { | |
120 | return 0; | |
121 | } | |
122 | ||
123 | static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |
124 | struct ttm_mem_type_manager *man) | |
125 | { | |
126 | struct radeon_device *rdev; | |
127 | ||
128 | rdev = radeon_get_rdev(bdev); | |
129 | ||
130 | switch (type) { | |
131 | case TTM_PL_SYSTEM: | |
132 | /* System memory */ | |
133 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; | |
134 | man->available_caching = TTM_PL_MASK_CACHING; | |
135 | man->default_caching = TTM_PL_FLAG_CACHED; | |
136 | break; | |
137 | case TTM_PL_TT: | |
d961db75 | 138 | man->func = &ttm_bo_manager_func; |
d594e46a | 139 | man->gpu_offset = rdev->mc.gtt_start; |
771fe6b9 JG |
140 | man->available_caching = TTM_PL_MASK_CACHING; |
141 | man->default_caching = TTM_PL_FLAG_CACHED; | |
55c93278 | 142 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; |
771fe6b9 JG |
143 | #if __OS_HAS_AGP |
144 | if (rdev->flags & RADEON_IS_AGP) { | |
145 | if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) { | |
146 | DRM_ERROR("AGP is not enabled for memory type %u\n", | |
147 | (unsigned)type); | |
148 | return -EINVAL; | |
149 | } | |
55c93278 | 150 | if (!rdev->ddev->agp->cant_use_aperture) |
0a2d50e3 | 151 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; |
771fe6b9 JG |
152 | man->available_caching = TTM_PL_FLAG_UNCACHED | |
153 | TTM_PL_FLAG_WC; | |
154 | man->default_caching = TTM_PL_FLAG_WC; | |
771fe6b9 | 155 | } |
0c321c79 | 156 | #endif |
771fe6b9 JG |
157 | break; |
158 | case TTM_PL_VRAM: | |
159 | /* "On-card" video ram */ | |
d961db75 | 160 | man->func = &ttm_bo_manager_func; |
d594e46a | 161 | man->gpu_offset = rdev->mc.vram_start; |
771fe6b9 | 162 | man->flags = TTM_MEMTYPE_FLAG_FIXED | |
771fe6b9 JG |
163 | TTM_MEMTYPE_FLAG_MAPPABLE; |
164 | man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; | |
165 | man->default_caching = TTM_PL_FLAG_WC; | |
771fe6b9 JG |
166 | break; |
167 | default: | |
168 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); | |
169 | return -EINVAL; | |
170 | } | |
171 | return 0; | |
172 | } | |
173 | ||
312ea8da JG |
174 | static void radeon_evict_flags(struct ttm_buffer_object *bo, |
175 | struct ttm_placement *placement) | |
771fe6b9 | 176 | { |
d03d8589 JG |
177 | struct radeon_bo *rbo; |
178 | static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; | |
179 | ||
180 | if (!radeon_ttm_bo_is_radeon_bo(bo)) { | |
181 | placement->fpfn = 0; | |
182 | placement->lpfn = 0; | |
183 | placement->placement = &placements; | |
184 | placement->busy_placement = &placements; | |
185 | placement->num_placement = 1; | |
186 | placement->num_busy_placement = 1; | |
187 | return; | |
188 | } | |
189 | rbo = container_of(bo, struct radeon_bo, tbo); | |
771fe6b9 | 190 | switch (bo->mem.mem_type) { |
312ea8da | 191 | case TTM_PL_VRAM: |
13742208 | 192 | if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false) |
9270eb1b DA |
193 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); |
194 | else | |
195 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); | |
312ea8da JG |
196 | break; |
197 | case TTM_PL_TT: | |
771fe6b9 | 198 | default: |
312ea8da | 199 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); |
771fe6b9 | 200 | } |
eaa5fd1a | 201 | *placement = rbo->placement; |
771fe6b9 JG |
202 | } |
203 | ||
204 | static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp) | |
205 | { | |
206 | return 0; | |
207 | } | |
208 | ||
209 | static void radeon_move_null(struct ttm_buffer_object *bo, | |
210 | struct ttm_mem_reg *new_mem) | |
211 | { | |
212 | struct ttm_mem_reg *old_mem = &bo->mem; | |
213 | ||
214 | BUG_ON(old_mem->mm_node != NULL); | |
215 | *old_mem = *new_mem; | |
216 | new_mem->mm_node = NULL; | |
217 | } | |
218 | ||
219 | static int radeon_move_blit(struct ttm_buffer_object *bo, | |
97a875cb | 220 | bool evict, bool no_wait_gpu, |
9d87fa21 JG |
221 | struct ttm_mem_reg *new_mem, |
222 | struct ttm_mem_reg *old_mem) | |
771fe6b9 JG |
223 | { |
224 | struct radeon_device *rdev; | |
225 | uint64_t old_start, new_start; | |
876dc9f3 | 226 | struct radeon_fence *fence; |
876dc9f3 | 227 | int r, ridx; |
771fe6b9 JG |
228 | |
229 | rdev = radeon_get_rdev(bo->bdev); | |
876dc9f3 | 230 | ridx = radeon_copy_ring_index(rdev); |
85f44a42 CK |
231 | old_start = (u64)old_mem->start << PAGE_SHIFT; |
232 | new_start = (u64)new_mem->start << PAGE_SHIFT; | |
771fe6b9 JG |
233 | |
234 | switch (old_mem->mem_type) { | |
235 | case TTM_PL_VRAM: | |
d594e46a | 236 | old_start += rdev->mc.vram_start; |
771fe6b9 JG |
237 | break; |
238 | case TTM_PL_TT: | |
d594e46a | 239 | old_start += rdev->mc.gtt_start; |
771fe6b9 JG |
240 | break; |
241 | default: | |
242 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); | |
243 | return -EINVAL; | |
244 | } | |
245 | switch (new_mem->mem_type) { | |
246 | case TTM_PL_VRAM: | |
d594e46a | 247 | new_start += rdev->mc.vram_start; |
771fe6b9 JG |
248 | break; |
249 | case TTM_PL_TT: | |
d594e46a | 250 | new_start += rdev->mc.gtt_start; |
771fe6b9 JG |
251 | break; |
252 | default: | |
253 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); | |
254 | return -EINVAL; | |
255 | } | |
876dc9f3 | 256 | if (!rdev->ring[ridx].ready) { |
3000bf39 | 257 | DRM_ERROR("Trying to move memory with ring turned off.\n"); |
771fe6b9 JG |
258 | return -EINVAL; |
259 | } | |
003cefe0 AD |
260 | |
261 | BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); | |
262 | ||
3000bf39 | 263 | /* sync other rings */ |
876dc9f3 | 264 | fence = bo->sync_obj; |
003cefe0 AD |
265 | r = radeon_copy(rdev, old_start, new_start, |
266 | new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ | |
876dc9f3 | 267 | &fence); |
771fe6b9 | 268 | /* FIXME: handle copy error */ |
b03640b1 | 269 | r = ttm_bo_move_accel_cleanup(bo, (void *)fence, |
97a875cb | 270 | evict, no_wait_gpu, new_mem); |
771fe6b9 JG |
271 | radeon_fence_unref(&fence); |
272 | return r; | |
273 | } | |
274 | ||
275 | static int radeon_move_vram_ram(struct ttm_buffer_object *bo, | |
9d87fa21 | 276 | bool evict, bool interruptible, |
97a875cb | 277 | bool no_wait_gpu, |
771fe6b9 JG |
278 | struct ttm_mem_reg *new_mem) |
279 | { | |
280 | struct radeon_device *rdev; | |
281 | struct ttm_mem_reg *old_mem = &bo->mem; | |
282 | struct ttm_mem_reg tmp_mem; | |
312ea8da JG |
283 | u32 placements; |
284 | struct ttm_placement placement; | |
771fe6b9 JG |
285 | int r; |
286 | ||
287 | rdev = radeon_get_rdev(bo->bdev); | |
288 | tmp_mem = *new_mem; | |
289 | tmp_mem.mm_node = NULL; | |
312ea8da JG |
290 | placement.fpfn = 0; |
291 | placement.lpfn = 0; | |
292 | placement.num_placement = 1; | |
293 | placement.placement = &placements; | |
294 | placement.num_busy_placement = 1; | |
295 | placement.busy_placement = &placements; | |
296 | placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; | |
297 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, | |
97a875cb | 298 | interruptible, no_wait_gpu); |
771fe6b9 JG |
299 | if (unlikely(r)) { |
300 | return r; | |
301 | } | |
df67bed9 DA |
302 | |
303 | r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); | |
304 | if (unlikely(r)) { | |
305 | goto out_cleanup; | |
306 | } | |
307 | ||
771fe6b9 JG |
308 | r = ttm_tt_bind(bo->ttm, &tmp_mem); |
309 | if (unlikely(r)) { | |
310 | goto out_cleanup; | |
311 | } | |
97a875cb | 312 | r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem); |
771fe6b9 JG |
313 | if (unlikely(r)) { |
314 | goto out_cleanup; | |
315 | } | |
97a875cb | 316 | r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); |
771fe6b9 | 317 | out_cleanup: |
42311ff9 | 318 | ttm_bo_mem_put(bo, &tmp_mem); |
771fe6b9 JG |
319 | return r; |
320 | } | |
321 | ||
322 | static int radeon_move_ram_vram(struct ttm_buffer_object *bo, | |
9d87fa21 | 323 | bool evict, bool interruptible, |
97a875cb | 324 | bool no_wait_gpu, |
771fe6b9 JG |
325 | struct ttm_mem_reg *new_mem) |
326 | { | |
327 | struct radeon_device *rdev; | |
328 | struct ttm_mem_reg *old_mem = &bo->mem; | |
329 | struct ttm_mem_reg tmp_mem; | |
312ea8da JG |
330 | struct ttm_placement placement; |
331 | u32 placements; | |
771fe6b9 JG |
332 | int r; |
333 | ||
334 | rdev = radeon_get_rdev(bo->bdev); | |
335 | tmp_mem = *new_mem; | |
336 | tmp_mem.mm_node = NULL; | |
312ea8da JG |
337 | placement.fpfn = 0; |
338 | placement.lpfn = 0; | |
339 | placement.num_placement = 1; | |
340 | placement.placement = &placements; | |
341 | placement.num_busy_placement = 1; | |
342 | placement.busy_placement = &placements; | |
343 | placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; | |
97a875cb ML |
344 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, |
345 | interruptible, no_wait_gpu); | |
771fe6b9 JG |
346 | if (unlikely(r)) { |
347 | return r; | |
348 | } | |
97a875cb | 349 | r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); |
771fe6b9 JG |
350 | if (unlikely(r)) { |
351 | goto out_cleanup; | |
352 | } | |
97a875cb | 353 | r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem); |
771fe6b9 JG |
354 | if (unlikely(r)) { |
355 | goto out_cleanup; | |
356 | } | |
357 | out_cleanup: | |
42311ff9 | 358 | ttm_bo_mem_put(bo, &tmp_mem); |
771fe6b9 JG |
359 | return r; |
360 | } | |
361 | ||
362 | static int radeon_bo_move(struct ttm_buffer_object *bo, | |
9d87fa21 | 363 | bool evict, bool interruptible, |
97a875cb | 364 | bool no_wait_gpu, |
9d87fa21 | 365 | struct ttm_mem_reg *new_mem) |
771fe6b9 JG |
366 | { |
367 | struct radeon_device *rdev; | |
368 | struct ttm_mem_reg *old_mem = &bo->mem; | |
369 | int r; | |
370 | ||
371 | rdev = radeon_get_rdev(bo->bdev); | |
372 | if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { | |
373 | radeon_move_null(bo, new_mem); | |
374 | return 0; | |
375 | } | |
376 | if ((old_mem->mem_type == TTM_PL_TT && | |
377 | new_mem->mem_type == TTM_PL_SYSTEM) || | |
378 | (old_mem->mem_type == TTM_PL_SYSTEM && | |
379 | new_mem->mem_type == TTM_PL_TT)) { | |
af901ca1 | 380 | /* bind is enough */ |
771fe6b9 JG |
381 | radeon_move_null(bo, new_mem); |
382 | return 0; | |
383 | } | |
27cd7769 AD |
384 | if (!rdev->ring[radeon_copy_ring_index(rdev)].ready || |
385 | rdev->asic->copy.copy == NULL) { | |
771fe6b9 | 386 | /* use memcpy */ |
1ab2e105 | 387 | goto memcpy; |
771fe6b9 JG |
388 | } |
389 | ||
390 | if (old_mem->mem_type == TTM_PL_VRAM && | |
391 | new_mem->mem_type == TTM_PL_SYSTEM) { | |
1ab2e105 | 392 | r = radeon_move_vram_ram(bo, evict, interruptible, |
97a875cb | 393 | no_wait_gpu, new_mem); |
771fe6b9 JG |
394 | } else if (old_mem->mem_type == TTM_PL_SYSTEM && |
395 | new_mem->mem_type == TTM_PL_VRAM) { | |
1ab2e105 | 396 | r = radeon_move_ram_vram(bo, evict, interruptible, |
97a875cb | 397 | no_wait_gpu, new_mem); |
771fe6b9 | 398 | } else { |
97a875cb | 399 | r = radeon_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem); |
771fe6b9 | 400 | } |
1ab2e105 MD |
401 | |
402 | if (r) { | |
403 | memcpy: | |
97a875cb | 404 | r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); |
1ab2e105 | 405 | } |
771fe6b9 JG |
406 | return r; |
407 | } | |
408 | ||
0a2d50e3 JG |
409 | static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
410 | { | |
411 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | |
412 | struct radeon_device *rdev = radeon_get_rdev(bdev); | |
413 | ||
414 | mem->bus.addr = NULL; | |
415 | mem->bus.offset = 0; | |
416 | mem->bus.size = mem->num_pages << PAGE_SHIFT; | |
417 | mem->bus.base = 0; | |
418 | mem->bus.is_iomem = false; | |
419 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) | |
420 | return -EINVAL; | |
421 | switch (mem->mem_type) { | |
422 | case TTM_PL_SYSTEM: | |
423 | /* system memory */ | |
424 | return 0; | |
425 | case TTM_PL_TT: | |
426 | #if __OS_HAS_AGP | |
427 | if (rdev->flags & RADEON_IS_AGP) { | |
428 | /* RADEON_IS_AGP is set only if AGP is active */ | |
d961db75 | 429 | mem->bus.offset = mem->start << PAGE_SHIFT; |
0a2d50e3 | 430 | mem->bus.base = rdev->mc.agp_base; |
365048ff | 431 | mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture; |
0a2d50e3 JG |
432 | } |
433 | #endif | |
434 | break; | |
435 | case TTM_PL_VRAM: | |
d961db75 | 436 | mem->bus.offset = mem->start << PAGE_SHIFT; |
0a2d50e3 JG |
437 | /* check if it's visible */ |
438 | if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size) | |
439 | return -EINVAL; | |
440 | mem->bus.base = rdev->mc.aper_base; | |
441 | mem->bus.is_iomem = true; | |
ffb57c4b JE |
442 | #ifdef __alpha__ |
443 | /* | |
444 | * Alpha: use bus.addr to hold the ioremap() return, | |
445 | * so we can modify bus.base below. | |
446 | */ | |
447 | if (mem->placement & TTM_PL_FLAG_WC) | |
448 | mem->bus.addr = | |
449 | ioremap_wc(mem->bus.base + mem->bus.offset, | |
450 | mem->bus.size); | |
451 | else | |
452 | mem->bus.addr = | |
453 | ioremap_nocache(mem->bus.base + mem->bus.offset, | |
454 | mem->bus.size); | |
455 | ||
456 | /* | |
457 | * Alpha: Use just the bus offset plus | |
458 | * the hose/domain memory base for bus.base. | |
459 | * It then can be used to build PTEs for VRAM | |
460 | * access, as done in ttm_bo_vm_fault(). | |
461 | */ | |
462 | mem->bus.base = (mem->bus.base & 0x0ffffffffUL) + | |
463 | rdev->ddev->hose->dense_mem_base; | |
464 | #endif | |
0a2d50e3 JG |
465 | break; |
466 | default: | |
467 | return -EINVAL; | |
468 | } | |
469 | return 0; | |
470 | } | |
471 | ||
472 | static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |
473 | { | |
474 | } | |
475 | ||
dedfdffd | 476 | static int radeon_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible) |
771fe6b9 JG |
477 | { |
478 | return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible); | |
479 | } | |
480 | ||
dedfdffd | 481 | static int radeon_sync_obj_flush(void *sync_obj) |
771fe6b9 JG |
482 | { |
483 | return 0; | |
484 | } | |
485 | ||
486 | static void radeon_sync_obj_unref(void **sync_obj) | |
487 | { | |
488 | radeon_fence_unref((struct radeon_fence **)sync_obj); | |
489 | } | |
490 | ||
491 | static void *radeon_sync_obj_ref(void *sync_obj) | |
492 | { | |
493 | return radeon_fence_ref((struct radeon_fence *)sync_obj); | |
494 | } | |
495 | ||
dedfdffd | 496 | static bool radeon_sync_obj_signaled(void *sync_obj) |
771fe6b9 JG |
497 | { |
498 | return radeon_fence_signaled((struct radeon_fence *)sync_obj); | |
499 | } | |
500 | ||
649bf3ca JG |
501 | /* |
502 | * TTM backend functions. | |
503 | */ | |
504 | struct radeon_ttm_tt { | |
8e7e7052 | 505 | struct ttm_dma_tt ttm; |
649bf3ca JG |
506 | struct radeon_device *rdev; |
507 | u64 offset; | |
508 | }; | |
509 | ||
510 | static int radeon_ttm_backend_bind(struct ttm_tt *ttm, | |
511 | struct ttm_mem_reg *bo_mem) | |
512 | { | |
8e7e7052 | 513 | struct radeon_ttm_tt *gtt = (void*)ttm; |
649bf3ca JG |
514 | int r; |
515 | ||
649bf3ca JG |
516 | gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); |
517 | if (!ttm->num_pages) { | |
518 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", | |
519 | ttm->num_pages, bo_mem, ttm); | |
520 | } | |
521 | r = radeon_gart_bind(gtt->rdev, gtt->offset, | |
8e7e7052 | 522 | ttm->num_pages, ttm->pages, gtt->ttm.dma_address); |
649bf3ca JG |
523 | if (r) { |
524 | DRM_ERROR("failed to bind %lu pages at 0x%08X\n", | |
525 | ttm->num_pages, (unsigned)gtt->offset); | |
526 | return r; | |
527 | } | |
528 | return 0; | |
529 | } | |
530 | ||
531 | static int radeon_ttm_backend_unbind(struct ttm_tt *ttm) | |
532 | { | |
8e7e7052 | 533 | struct radeon_ttm_tt *gtt = (void *)ttm; |
649bf3ca | 534 | |
649bf3ca JG |
535 | radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages); |
536 | return 0; | |
537 | } | |
538 | ||
539 | static void radeon_ttm_backend_destroy(struct ttm_tt *ttm) | |
540 | { | |
8e7e7052 | 541 | struct radeon_ttm_tt *gtt = (void *)ttm; |
649bf3ca | 542 | |
8e7e7052 | 543 | ttm_dma_tt_fini(>t->ttm); |
649bf3ca JG |
544 | kfree(gtt); |
545 | } | |
546 | ||
547 | static struct ttm_backend_func radeon_backend_func = { | |
548 | .bind = &radeon_ttm_backend_bind, | |
549 | .unbind = &radeon_ttm_backend_unbind, | |
550 | .destroy = &radeon_ttm_backend_destroy, | |
551 | }; | |
552 | ||
1109ca09 | 553 | static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev, |
649bf3ca JG |
554 | unsigned long size, uint32_t page_flags, |
555 | struct page *dummy_read_page) | |
556 | { | |
557 | struct radeon_device *rdev; | |
558 | struct radeon_ttm_tt *gtt; | |
559 | ||
560 | rdev = radeon_get_rdev(bdev); | |
561 | #if __OS_HAS_AGP | |
562 | if (rdev->flags & RADEON_IS_AGP) { | |
563 | return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge, | |
564 | size, page_flags, dummy_read_page); | |
565 | } | |
566 | #endif | |
567 | ||
568 | gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL); | |
569 | if (gtt == NULL) { | |
570 | return NULL; | |
571 | } | |
8e7e7052 | 572 | gtt->ttm.ttm.func = &radeon_backend_func; |
649bf3ca | 573 | gtt->rdev = rdev; |
8e7e7052 JG |
574 | if (ttm_dma_tt_init(>t->ttm, bdev, size, page_flags, dummy_read_page)) { |
575 | kfree(gtt); | |
649bf3ca JG |
576 | return NULL; |
577 | } | |
8e7e7052 | 578 | return >t->ttm.ttm; |
649bf3ca JG |
579 | } |
580 | ||
c52494f6 KRW |
581 | static int radeon_ttm_tt_populate(struct ttm_tt *ttm) |
582 | { | |
583 | struct radeon_device *rdev; | |
8e7e7052 | 584 | struct radeon_ttm_tt *gtt = (void *)ttm; |
c52494f6 KRW |
585 | unsigned i; |
586 | int r; | |
40f5cf99 | 587 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
c52494f6 KRW |
588 | |
589 | if (ttm->state != tt_unpopulated) | |
590 | return 0; | |
591 | ||
40f5cf99 AD |
592 | if (slave && ttm->sg) { |
593 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, | |
594 | gtt->ttm.dma_address, ttm->num_pages); | |
595 | ttm->state = tt_unbound; | |
596 | return 0; | |
597 | } | |
598 | ||
c52494f6 | 599 | rdev = radeon_get_rdev(ttm->bdev); |
dea7e0ac JG |
600 | #if __OS_HAS_AGP |
601 | if (rdev->flags & RADEON_IS_AGP) { | |
602 | return ttm_agp_tt_populate(ttm); | |
603 | } | |
604 | #endif | |
c52494f6 KRW |
605 | |
606 | #ifdef CONFIG_SWIOTLB | |
607 | if (swiotlb_nr_tbl()) { | |
8e7e7052 | 608 | return ttm_dma_populate(>t->ttm, rdev->dev); |
c52494f6 KRW |
609 | } |
610 | #endif | |
611 | ||
612 | r = ttm_pool_populate(ttm); | |
613 | if (r) { | |
614 | return r; | |
615 | } | |
616 | ||
617 | for (i = 0; i < ttm->num_pages; i++) { | |
8e7e7052 JG |
618 | gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i], |
619 | 0, PAGE_SIZE, | |
620 | PCI_DMA_BIDIRECTIONAL); | |
621 | if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) { | |
a81bfc00 | 622 | while (i--) { |
8e7e7052 | 623 | pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], |
c52494f6 | 624 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
8e7e7052 | 625 | gtt->ttm.dma_address[i] = 0; |
c52494f6 KRW |
626 | } |
627 | ttm_pool_unpopulate(ttm); | |
628 | return -EFAULT; | |
629 | } | |
630 | } | |
631 | return 0; | |
632 | } | |
633 | ||
634 | static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) | |
635 | { | |
636 | struct radeon_device *rdev; | |
8e7e7052 | 637 | struct radeon_ttm_tt *gtt = (void *)ttm; |
c52494f6 | 638 | unsigned i; |
40f5cf99 AD |
639 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
640 | ||
641 | if (slave) | |
642 | return; | |
c52494f6 KRW |
643 | |
644 | rdev = radeon_get_rdev(ttm->bdev); | |
dea7e0ac JG |
645 | #if __OS_HAS_AGP |
646 | if (rdev->flags & RADEON_IS_AGP) { | |
647 | ttm_agp_tt_unpopulate(ttm); | |
648 | return; | |
649 | } | |
650 | #endif | |
c52494f6 KRW |
651 | |
652 | #ifdef CONFIG_SWIOTLB | |
653 | if (swiotlb_nr_tbl()) { | |
8e7e7052 | 654 | ttm_dma_unpopulate(>t->ttm, rdev->dev); |
c52494f6 KRW |
655 | return; |
656 | } | |
657 | #endif | |
658 | ||
659 | for (i = 0; i < ttm->num_pages; i++) { | |
8e7e7052 JG |
660 | if (gtt->ttm.dma_address[i]) { |
661 | pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], | |
c52494f6 KRW |
662 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
663 | } | |
664 | } | |
665 | ||
666 | ttm_pool_unpopulate(ttm); | |
667 | } | |
649bf3ca | 668 | |
771fe6b9 | 669 | static struct ttm_bo_driver radeon_bo_driver = { |
649bf3ca | 670 | .ttm_tt_create = &radeon_ttm_tt_create, |
c52494f6 KRW |
671 | .ttm_tt_populate = &radeon_ttm_tt_populate, |
672 | .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate, | |
771fe6b9 JG |
673 | .invalidate_caches = &radeon_invalidate_caches, |
674 | .init_mem_type = &radeon_init_mem_type, | |
675 | .evict_flags = &radeon_evict_flags, | |
676 | .move = &radeon_bo_move, | |
677 | .verify_access = &radeon_verify_access, | |
678 | .sync_obj_signaled = &radeon_sync_obj_signaled, | |
679 | .sync_obj_wait = &radeon_sync_obj_wait, | |
680 | .sync_obj_flush = &radeon_sync_obj_flush, | |
681 | .sync_obj_unref = &radeon_sync_obj_unref, | |
682 | .sync_obj_ref = &radeon_sync_obj_ref, | |
e024e110 DA |
683 | .move_notify = &radeon_bo_move_notify, |
684 | .fault_reserve_notify = &radeon_bo_fault_reserve_notify, | |
0a2d50e3 JG |
685 | .io_mem_reserve = &radeon_ttm_io_mem_reserve, |
686 | .io_mem_free = &radeon_ttm_io_mem_free, | |
771fe6b9 JG |
687 | }; |
688 | ||
689 | int radeon_ttm_init(struct radeon_device *rdev) | |
690 | { | |
691 | int r; | |
692 | ||
693 | r = radeon_ttm_global_init(rdev); | |
694 | if (r) { | |
695 | return r; | |
696 | } | |
697 | /* No others user of address space so set it to 0 */ | |
698 | r = ttm_bo_device_init(&rdev->mman.bdev, | |
a987fcaa | 699 | rdev->mman.bo_global_ref.ref.object, |
ad49f501 DA |
700 | &radeon_bo_driver, DRM_FILE_PAGE_OFFSET, |
701 | rdev->need_dma32); | |
771fe6b9 JG |
702 | if (r) { |
703 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); | |
704 | return r; | |
705 | } | |
0a0c7596 | 706 | rdev->mman.initialized = true; |
4c788679 | 707 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, |
312ea8da | 708 | rdev->mc.real_vram_size >> PAGE_SHIFT); |
771fe6b9 JG |
709 | if (r) { |
710 | DRM_ERROR("Failed initializing VRAM heap.\n"); | |
711 | return r; | |
712 | } | |
441921d5 | 713 | r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true, |
40f5cf99 AD |
714 | RADEON_GEM_DOMAIN_VRAM, |
715 | NULL, &rdev->stollen_vga_memory); | |
771fe6b9 JG |
716 | if (r) { |
717 | return r; | |
718 | } | |
4c788679 JG |
719 | r = radeon_bo_reserve(rdev->stollen_vga_memory, false); |
720 | if (r) | |
721 | return r; | |
722 | r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL); | |
723 | radeon_bo_unreserve(rdev->stollen_vga_memory); | |
771fe6b9 | 724 | if (r) { |
4c788679 | 725 | radeon_bo_unref(&rdev->stollen_vga_memory); |
771fe6b9 JG |
726 | return r; |
727 | } | |
728 | DRM_INFO("radeon: %uM of VRAM memory ready\n", | |
fc986034 | 729 | (unsigned) (rdev->mc.real_vram_size / (1024 * 1024))); |
4c788679 | 730 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, |
312ea8da | 731 | rdev->mc.gtt_size >> PAGE_SHIFT); |
771fe6b9 JG |
732 | if (r) { |
733 | DRM_ERROR("Failed initializing GTT heap.\n"); | |
734 | return r; | |
735 | } | |
736 | DRM_INFO("radeon: %uM of GTT memory ready.\n", | |
3ce0a23d | 737 | (unsigned)(rdev->mc.gtt_size / (1024 * 1024))); |
949c4a34 | 738 | rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; |
fa8a1238 DA |
739 | |
740 | r = radeon_ttm_debugfs_init(rdev); | |
741 | if (r) { | |
742 | DRM_ERROR("Failed to init debugfs\n"); | |
743 | return r; | |
744 | } | |
771fe6b9 JG |
745 | return 0; |
746 | } | |
747 | ||
748 | void radeon_ttm_fini(struct radeon_device *rdev) | |
749 | { | |
4c788679 JG |
750 | int r; |
751 | ||
0a0c7596 JG |
752 | if (!rdev->mman.initialized) |
753 | return; | |
771fe6b9 | 754 | if (rdev->stollen_vga_memory) { |
4c788679 JG |
755 | r = radeon_bo_reserve(rdev->stollen_vga_memory, false); |
756 | if (r == 0) { | |
757 | radeon_bo_unpin(rdev->stollen_vga_memory); | |
758 | radeon_bo_unreserve(rdev->stollen_vga_memory); | |
759 | } | |
760 | radeon_bo_unref(&rdev->stollen_vga_memory); | |
771fe6b9 JG |
761 | } |
762 | ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM); | |
763 | ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT); | |
764 | ttm_bo_device_release(&rdev->mman.bdev); | |
765 | radeon_gart_fini(rdev); | |
766 | radeon_ttm_global_fini(rdev); | |
0a0c7596 | 767 | rdev->mman.initialized = false; |
771fe6b9 JG |
768 | DRM_INFO("radeon: ttm finalized\n"); |
769 | } | |
770 | ||
53595338 DA |
771 | /* this should only be called at bootup or when userspace |
772 | * isn't running */ | |
773 | void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size) | |
774 | { | |
775 | struct ttm_mem_type_manager *man; | |
776 | ||
777 | if (!rdev->mman.initialized) | |
778 | return; | |
779 | ||
780 | man = &rdev->mman.bdev.man[TTM_PL_VRAM]; | |
781 | /* this just adjusts TTM size idea, which sets lpfn to the correct value */ | |
782 | man->size = size >> PAGE_SHIFT; | |
783 | } | |
784 | ||
771fe6b9 | 785 | static struct vm_operations_struct radeon_ttm_vm_ops; |
f0f37e2f | 786 | static const struct vm_operations_struct *ttm_vm_ops = NULL; |
771fe6b9 JG |
787 | |
788 | static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |
789 | { | |
790 | struct ttm_buffer_object *bo; | |
5876dd24 | 791 | struct radeon_device *rdev; |
771fe6b9 JG |
792 | int r; |
793 | ||
5876dd24 | 794 | bo = (struct ttm_buffer_object *)vma->vm_private_data; |
771fe6b9 JG |
795 | if (bo == NULL) { |
796 | return VM_FAULT_NOPAGE; | |
797 | } | |
5876dd24 | 798 | rdev = radeon_get_rdev(bo->bdev); |
db7fce39 | 799 | down_read(&rdev->pm.mclk_lock); |
771fe6b9 | 800 | r = ttm_vm_ops->fault(vma, vmf); |
db7fce39 | 801 | up_read(&rdev->pm.mclk_lock); |
771fe6b9 JG |
802 | return r; |
803 | } | |
804 | ||
805 | int radeon_mmap(struct file *filp, struct vm_area_struct *vma) | |
806 | { | |
807 | struct drm_file *file_priv; | |
808 | struct radeon_device *rdev; | |
809 | int r; | |
810 | ||
811 | if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) { | |
812 | return drm_mmap(filp, vma); | |
813 | } | |
814 | ||
40b3be3f | 815 | file_priv = filp->private_data; |
771fe6b9 JG |
816 | rdev = file_priv->minor->dev->dev_private; |
817 | if (rdev == NULL) { | |
818 | return -EINVAL; | |
819 | } | |
820 | r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev); | |
821 | if (unlikely(r != 0)) { | |
822 | return r; | |
823 | } | |
824 | if (unlikely(ttm_vm_ops == NULL)) { | |
825 | ttm_vm_ops = vma->vm_ops; | |
826 | radeon_ttm_vm_ops = *ttm_vm_ops; | |
827 | radeon_ttm_vm_ops.fault = &radeon_ttm_fault; | |
828 | } | |
829 | vma->vm_ops = &radeon_ttm_vm_ops; | |
830 | return 0; | |
831 | } | |
832 | ||
833 | ||
fa8a1238 DA |
834 | #define RADEON_DEBUGFS_MEM_TYPES 2 |
835 | ||
fa8a1238 DA |
836 | #if defined(CONFIG_DEBUG_FS) |
837 | static int radeon_mm_dump_table(struct seq_file *m, void *data) | |
838 | { | |
839 | struct drm_info_node *node = (struct drm_info_node *)m->private; | |
840 | struct drm_mm *mm = (struct drm_mm *)node->info_ent->data; | |
841 | struct drm_device *dev = node->minor->dev; | |
842 | struct radeon_device *rdev = dev->dev_private; | |
843 | int ret; | |
844 | struct ttm_bo_global *glob = rdev->mman.bdev.glob; | |
845 | ||
846 | spin_lock(&glob->lru_lock); | |
847 | ret = drm_mm_dump_table(m, mm); | |
848 | spin_unlock(&glob->lru_lock); | |
849 | return ret; | |
850 | } | |
851 | #endif | |
852 | ||
853 | static int radeon_ttm_debugfs_init(struct radeon_device *rdev) | |
854 | { | |
f4e45d02 | 855 | #if defined(CONFIG_DEBUG_FS) |
c52494f6 KRW |
856 | static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2]; |
857 | static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32]; | |
fa8a1238 DA |
858 | unsigned i; |
859 | ||
fa8a1238 DA |
860 | for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) { |
861 | if (i == 0) | |
862 | sprintf(radeon_mem_types_names[i], "radeon_vram_mm"); | |
863 | else | |
864 | sprintf(radeon_mem_types_names[i], "radeon_gtt_mm"); | |
865 | radeon_mem_types_list[i].name = radeon_mem_types_names[i]; | |
866 | radeon_mem_types_list[i].show = &radeon_mm_dump_table; | |
867 | radeon_mem_types_list[i].driver_features = 0; | |
868 | if (i == 0) | |
16f9fdcb | 869 | radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv; |
fa8a1238 | 870 | else |
16f9fdcb | 871 | radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv; |
fa8a1238 DA |
872 | |
873 | } | |
8d7cddcd PN |
874 | /* Add ttm page pool to debugfs */ |
875 | sprintf(radeon_mem_types_names[i], "ttm_page_pool"); | |
876 | radeon_mem_types_list[i].name = radeon_mem_types_names[i]; | |
877 | radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs; | |
878 | radeon_mem_types_list[i].driver_features = 0; | |
c52494f6 KRW |
879 | radeon_mem_types_list[i++].data = NULL; |
880 | #ifdef CONFIG_SWIOTLB | |
881 | if (swiotlb_nr_tbl()) { | |
882 | sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool"); | |
883 | radeon_mem_types_list[i].name = radeon_mem_types_names[i]; | |
884 | radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs; | |
885 | radeon_mem_types_list[i].driver_features = 0; | |
886 | radeon_mem_types_list[i++].data = NULL; | |
887 | } | |
888 | #endif | |
889 | return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i); | |
fa8a1238 DA |
890 | |
891 | #endif | |
892 | return 0; | |
893 | } |