From: Christian König Date: Tue, 1 Mar 2016 15:46:18 +0000 (+0100) Subject: drm/amdgpu: add a fence after the VM flush X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=41d9eb2c5a2a21c9120e906d077e77562883510e;p=GitHub%2FLineageOS%2Fandroid_kernel_motorola_exynos9610.git drm/amdgpu: add a fence after the VM flush This way we can track when the flush is done. Signed-off-by: Christian König Acked-by: Alex Deucher Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 148e2c61463c..66e51f9e593b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -880,6 +880,7 @@ struct amdgpu_vm_id { struct list_head list; struct fence *first; struct amdgpu_sync active; + struct fence *last_flush; atomic_long_t owner; uint64_t pd_gpu_addr; @@ -926,11 +927,11 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_sync *sync, struct fence *fence, unsigned *vm_id, uint64_t *vm_pd_addr); -void amdgpu_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr, - uint32_t gds_base, uint32_t gds_size, - uint32_t gws_base, uint32_t gws_size, - uint32_t oa_base, uint32_t oa_size); +int amdgpu_vm_flush(struct amdgpu_ring *ring, + unsigned vm_id, uint64_t pd_addr, + uint32_t gds_base, uint32_t gds_size, + uint32_t gws_base, uint32_t gws_size, + uint32_t oa_base, uint32_t oa_size); void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 644336d76aca..83973d051080 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -155,10 +155,14 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, if (vm) { /* do context switch */ - amdgpu_vm_flush(ring, ib->vm_id, ib->vm_pd_addr, - ib->gds_base, ib->gds_size, - ib->gws_base, ib->gws_size, - ib->oa_base, ib->oa_size); + r = amdgpu_vm_flush(ring, ib->vm_id, ib->vm_pd_addr, + ib->gds_base, ib->gds_size, + ib->gws_base, ib->gws_size, + ib->oa_base, ib->oa_size); + if (r) { + amdgpu_ring_undo(ring); + return r; + } if (ring->funcs->emit_hdp_flush) amdgpu_ring_emit_hdp_flush(ring); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index d0cce7c3129a..252445f578f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -236,6 +236,9 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, fence_put(id->first); id->first = fence_get(fence); + fence_put(id->last_flush); + id->last_flush = NULL; + fence_put(id->flushed_updates); id->flushed_updates = fence_get(updates); @@ -263,11 +266,11 @@ error: * * Emit a VM flush when it is necessary. */ -void amdgpu_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr, - uint32_t gds_base, uint32_t gds_size, - uint32_t gws_base, uint32_t gws_size, - uint32_t oa_base, uint32_t oa_size) +int amdgpu_vm_flush(struct amdgpu_ring *ring, + unsigned vm_id, uint64_t pd_addr, + uint32_t gds_base, uint32_t gds_size, + uint32_t gws_base, uint32_t gws_size, + uint32_t oa_base, uint32_t oa_size) { struct amdgpu_device *adev = ring->adev; struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id]; @@ -278,14 +281,25 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring, id->gws_size != gws_size || id->oa_base != oa_base || id->oa_size != oa_size); + int r; if (ring->funcs->emit_pipeline_sync && ( pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed)) amdgpu_ring_emit_pipeline_sync(ring); if (pd_addr != AMDGPU_VM_NO_FLUSH) { + struct fence *fence; + trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id); amdgpu_ring_emit_vm_flush(ring, vm_id, pd_addr); + r = amdgpu_fence_emit(ring, &fence); + if (r) + return r; + + mutex_lock(&adev->vm_manager.lock); + fence_put(id->last_flush); + id->last_flush = fence; + mutex_unlock(&adev->vm_manager.lock); } if (gds_switch_needed) { @@ -300,6 +314,8 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring, gws_base, gws_size, oa_base, oa_size); } + + return 0; } /**