drm/amdgpu: reuse VMIDs already assigned to a process
authorChristian König <christian.koenig@amd.com>
Wed, 9 Mar 2016 21:11:53 +0000 (22:11 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 5 May 2016 00:19:30 +0000 (20:19 -0400)
If we don't need to flush we can easily use another VMID
already assigned to the process.

Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

index 252445f578f6c2ce3aa41e7728a91d0ca8a4cf12..1425aab312332d85a45007fd6e118f28df319b5a 100644 (file)
@@ -166,48 +166,63 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 {
        uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
        struct amdgpu_device *adev = ring->adev;
-       struct amdgpu_vm_id *id = vm->ids[ring->idx];
        struct fence *updates = sync->last_vm_update;
+       struct amdgpu_vm_id *id;
+       unsigned i = ring->idx;
        int r;
 
        mutex_lock(&adev->vm_manager.lock);
 
-       /* check if the id is still valid */
-       if (id) {
-               struct fence *flushed = id->flushed_updates;
-               long owner = atomic_long_read(&id->owner);
-               bool usable = pd_addr == id->pd_gpu_addr;
-
-               if (owner != (long)&vm->ids[ring->idx])
-                       usable = false;
-               else if (!flushed)
-                       usable = false;
-               else if (!updates)
-                       usable = true;
-               else
-                       usable = !fence_is_later(updates, flushed);
+       /* Check if we can use a VMID already assigned to this VM */
+       do {
+               struct fence *flushed;
 
-               if (usable) {
+               id = vm->ids[i++];
+               if (i == AMDGPU_MAX_RINGS)
+                       i = 0;
 
-                       r = amdgpu_sync_fence(ring->adev, sync, id->first);
-                       if (r)
-                               goto error;
+               /* Check all the prerequisites to using this VMID */
+               if (!id)
+                       continue;
+
+               if (atomic_long_read(&id->owner) != (long)vm)
+                       continue;
+
+               if (pd_addr != id->pd_gpu_addr)
+                       continue;
+
+               if (id != vm->ids[ring->idx] &&
+                   (!id->last_flush || !fence_is_signaled(id->last_flush)))
+                       continue;
+
+               flushed  = id->flushed_updates;
+               if (updates && (!flushed || fence_is_later(updates, flushed)))
+                       continue;
 
-                       r = amdgpu_sync_fence(ring->adev, &id->active, fence);
+               /* Good we can use this VMID */
+               if (id == vm->ids[ring->idx]) {
+                       r = amdgpu_sync_fence(ring->adev, sync,
+                                             id->first);
                        if (r)
                                goto error;
+               }
+
+               /* And remember this submission as user of the VMID */
+               r = amdgpu_sync_fence(ring->adev, &id->active, fence);
+               if (r)
+                       goto error;
 
-                       list_move_tail(&id->list, &adev->vm_manager.ids_lru);
+               list_move_tail(&id->list, &adev->vm_manager.ids_lru);
+               vm->ids[ring->idx] = id;
 
-                       *vm_id = id - adev->vm_manager.ids;
-                       *vm_pd_addr = AMDGPU_VM_NO_FLUSH;
-                       trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id,
-                                               *vm_pd_addr);
+               *vm_id = id - adev->vm_manager.ids;
+               *vm_pd_addr = AMDGPU_VM_NO_FLUSH;
+               trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
 
-                       mutex_unlock(&adev->vm_manager.lock);
-                       return 0;
-               }
-       }
+               mutex_unlock(&adev->vm_manager.lock);
+               return 0;
+
+       } while (i != ring->idx);
 
        id = list_first_entry(&adev->vm_manager.ids_lru,
                              struct amdgpu_vm_id,
@@ -245,7 +260,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
        id->pd_gpu_addr = pd_addr;
 
        list_move_tail(&id->list, &adev->vm_manager.ids_lru);
-       atomic_long_set(&id->owner, (long)id);
+       atomic_long_set(&id->owner, (long)vm);
        vm->ids[ring->idx] = id;
 
        *vm_id = id - adev->vm_manager.ids;
@@ -1464,7 +1479,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
                if (!id)
                        continue;
 
-               atomic_long_cmpxchg(&id->owner, (long)&vm->ids[i], 0);
+               atomic_long_cmpxchg(&id->owner, (long)vm, 0);
        }
 }