drm/amdgpu: save the PD addr before scheduling the job
authorChristian König <christian.koenig@amd.com>
Wed, 15 Jun 2016 11:44:04 +0000 (13:44 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 7 Jul 2016 18:54:41 +0000 (14:54 -0400)
When we pipeline evictions the page directory could already be
moving somewhere else when grab_id is called.

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

index 523da20e6ea0c9c1feb93290f11c12efa0538018..de171ccf2b9c37e1a0d1654e3eb4743aaaee64f8 100644 (file)
@@ -660,6 +660,8 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
                }
        }
 
+       p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
+
        r = amdgpu_bo_vm_update_pte(p, vm);
        if (!r)
                amdgpu_cs_sync_rings(p);
index 18daa2d64d57340a5a0dfb4bd457a2764f652469..adb87789cc3418eb28705aab69a56c3ee90b2373 100644 (file)
@@ -177,7 +177,6 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
                      struct amdgpu_sync *sync, struct fence *fence,
                      unsigned *vm_id, uint64_t *vm_pd_addr)
 {
-       uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
        struct amdgpu_device *adev = ring->adev;
        struct fence *updates = sync->last_vm_update;
        struct amdgpu_vm_id *id, *idle;
@@ -250,7 +249,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
                if (atomic64_read(&id->owner) != vm->client_id)
                        continue;
 
-               if (pd_addr != id->pd_gpu_addr)
+               if (*vm_pd_addr != id->pd_gpu_addr)
                        continue;
 
                if (!same_ring &&
@@ -298,14 +297,13 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
        fence_put(id->flushed_updates);
        id->flushed_updates = fence_get(updates);
 
-       id->pd_gpu_addr = pd_addr;
+       id->pd_gpu_addr = *vm_pd_addr;
 
        list_move_tail(&id->list, &adev->vm_manager.ids_lru);
        atomic64_set(&id->owner, vm->client_id);
        vm->ids[ring->idx] = id;
 
        *vm_id = id - adev->vm_manager.ids;
-       *vm_pd_addr = pd_addr;
        trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
 
 error: