From: Christian König Date: Tue, 1 Mar 2016 14:09:25 +0000 (+0100) Subject: drm/amdgpu: switch the GDS only on demand v2 X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=971fe9a9414b2ccabc11ff6a5ff6be0d6f2dabda;p=GitHub%2FLineageOS%2Fandroid_kernel_motorola_exynos9610.git drm/amdgpu: switch the GDS only on demand v2 Switching the GDS space to often seems to be problematic. This patch together with the following can avoid VM faults on context switch. v2: extend commit message a bit Signed-off-by: Christian König Reviewed-by: Alex Deucher (v1) Reviewed-by: Chunming Zhou (v1) --- diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index b6fae4b301d7..866b5fa298e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -922,6 +922,13 @@ struct amdgpu_vm_manager_id { struct list_head list; struct fence *active; atomic_long_t owner; + + uint32_t gds_base; + uint32_t gds_size; + uint32_t gws_base; + uint32_t gws_size; + uint32_t oa_base; + uint32_t oa_size; }; struct amdgpu_vm_manager { @@ -961,6 +968,7 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring, uint32_t gds_base, uint32_t gds_size, uint32_t gws_base, uint32_t gws_size, uint32_t oa_base, uint32_t oa_size); +void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, struct amdgpu_vm *vm); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index e63e57e51db7..798d46626820 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -165,6 +165,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, if (ib->ctx != ctx || ib->vm != vm) { ring->current_ctx = old_ctx; + if (ib->vm_id) + amdgpu_vm_reset_id(adev, ib->vm_id); amdgpu_ring_undo(ring); return -EINVAL; } @@ -181,6 +183,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, if (r) { dev_err(adev->dev, "failed to emit fence (%d)\n", r); ring->current_ctx = old_ctx; + if (ib->vm_id) + amdgpu_vm_reset_id(adev, ib->vm_id); amdgpu_ring_undo(ring); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 5fab5b25b935..8642a1ccd6c3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -252,16 +252,53 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring, uint32_t gws_base, uint32_t gws_size, uint32_t oa_base, uint32_t oa_size) { + struct amdgpu_device *adev = ring->adev; + struct amdgpu_vm_manager_id *mgr_id = &adev->vm_manager.ids[vm_id]; + if (pd_addr != AMDGPU_VM_NO_FLUSH) { trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id); amdgpu_ring_emit_vm_flush(ring, vm_id, pd_addr); } - if (ring->funcs->emit_gds_switch) + if (ring->funcs->emit_gds_switch && ( + mgr_id->gds_base != gds_base || + mgr_id->gds_size != gds_size || + mgr_id->gws_base != gws_base || + mgr_id->gws_size != gws_size || + mgr_id->oa_base != oa_base || + mgr_id->oa_size != oa_size)) { + + mgr_id->gds_base = gds_base; + mgr_id->gds_size = gds_size; + mgr_id->gws_base = gws_base; + mgr_id->gws_size = gws_size; + mgr_id->oa_base = oa_base; + mgr_id->oa_size = oa_size; amdgpu_ring_emit_gds_switch(ring, vm_id, gds_base, gds_size, gws_base, gws_size, oa_base, oa_size); + } +} + +/** + * amdgpu_vm_reset_id - reset VMID to zero + * + * @adev: amdgpu device structure + * @vm_id: vmid number to use + * + * Reset saved GDW, GWS and OA to force switch on next flush. + */ +void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id) +{ + struct amdgpu_vm_manager_id *mgr_id = &adev->vm_manager.ids[vm_id]; + + mgr_id->gds_base = 0; + mgr_id->gds_size = 0; + mgr_id->gws_base = 0; + mgr_id->gws_size = 0; + mgr_id->oa_base = 0; + mgr_id->oa_size = 0; } /** @@ -1425,9 +1462,11 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) INIT_LIST_HEAD(&adev->vm_manager.ids_lru); /* skip over VMID 0, since it is the system VM */ - for (i = 1; i < adev->vm_manager.num_ids; ++i) + for (i = 1; i < adev->vm_manager.num_ids; ++i) { + amdgpu_vm_reset_id(adev, i); list_add_tail(&adev->vm_manager.ids[i].list, &adev->vm_manager.ids_lru); + } atomic_set(&adev->vm_manager.vm_pte_next_ring, 0); }