case RADEON_INFO_VCE_FB_VERSION:
*value = rdev->vce.fb_version;
break;
+ case RADEON_INFO_NUM_BYTES_MOVED:
+ value = (uint32_t*)&value64;
+ value_size = sizeof(uint64_t);
+ value64 = atomic64_read(&rdev->num_bytes_moved);
+ break;
+ case RADEON_INFO_VRAM_USAGE:
+ value = (uint32_t*)&value64;
+ value_size = sizeof(uint64_t);
+ value64 = atomic64_read(&rdev->vram_usage);
+ break;
+ case RADEON_INFO_GTT_USAGE:
+ value = (uint32_t*)&value64;
+ value_size = sizeof(uint64_t);
+ value64 = atomic64_read(&rdev->gtt_usage);
+ break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;
}
}
+static void radeon_update_memory_usage(struct radeon_bo *bo,
+ unsigned mem_type, int sign)
+{
+ struct radeon_device *rdev = bo->rdev;
+ u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;
+
+ switch (mem_type) {
+ case TTM_PL_TT:
+ if (sign > 0)
+ atomic64_add(size, &rdev->gtt_usage);
+ else
+ atomic64_sub(size, &rdev->gtt_usage);
+ break;
+ case TTM_PL_VRAM:
+ if (sign > 0)
+ atomic64_add(size, &rdev->vram_usage);
+ else
+ atomic64_sub(size, &rdev->vram_usage);
+ break;
+ }
+}
+
static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
struct radeon_bo *bo;
bo = container_of(tbo, struct radeon_bo, tbo);
+
+ radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
+
mutex_lock(&bo->rdev->gem.mutex);
list_del_init(&bo->list);
mutex_unlock(&bo->rdev->gem.mutex);
}
void radeon_bo_move_notify(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem)
+ struct ttm_mem_reg *new_mem)
{
struct radeon_bo *rbo;
+
if (!radeon_ttm_bo_is_radeon_bo(bo))
return;
+
rbo = container_of(bo, struct radeon_bo, tbo);
radeon_bo_check_tiling(rbo, 0, 1);
radeon_vm_bo_invalidate(rbo->rdev, rbo);
+
+ /* update statistics */
+ if (!new_mem)
+ return;
+
+ radeon_update_memory_usage(rbo, bo->mem.mem_type, -1);
+ radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
}
int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
bool force_drop);
extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem);
+ struct ttm_mem_reg *new_mem);
extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
if (r) {
memcpy:
r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+ if (r) {
+ return r;
+ }
}
- return r;
+
+ /* update statistics */
+ atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved);
+ return 0;
}
static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)