drm/amdgpu: use kernel fence diretly in amdgpu_bo_fence
authorChunming Zhou <david1.zhou@amd.com>
Mon, 3 Aug 2015 03:38:09 +0000 (11:38 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 17 Aug 2015 20:50:50 +0000 (16:50 -0400)
Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Christian K?nig <christian.koenig@amd.com>
Reviewed-by: Jammy Zhou <jammy.zhou@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

index 6c63a2c6395cb7425015f199bd8ce8e0136ad191..87d67f8c85c96cfce97169ef9401eb4c42ddb6f9 100644 (file)
@@ -658,13 +658,13 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
  * @shared: true if fence should be added shared
  *
  */
-void amdgpu_bo_fence(struct amdgpu_bo *bo, struct amdgpu_fence *fence,
+void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
                     bool shared)
 {
        struct reservation_object *resv = bo->tbo.resv;
 
        if (shared)
-               reservation_object_add_shared_fence(resv, &fence->base);
+               reservation_object_add_shared_fence(resv, fence);
        else
-               reservation_object_add_excl_fence(resv, &fence->base);
+               reservation_object_add_excl_fence(resv, fence);
 }
index 675bdc30e41d93cbe69f2e66f29c571d557c1860..238465a9ac5564ebeaa1e142a07871557e96ed39 100644 (file)
@@ -161,7 +161,7 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
                                  struct ttm_mem_reg *new_mem);
 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
-void amdgpu_bo_fence(struct amdgpu_bo *bo, struct amdgpu_fence *fence,
+void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
                     bool shared);
 
 /*
index f2166320a5e5afe7248211128ebbbdb1d8057c23..8745d4cc7ae0d93729aac5437db95006f05ad8da 100644 (file)
@@ -320,7 +320,7 @@ static int amdgpu_vm_run_job(
        struct amdgpu_cs_parser *sched_job)
 {
        amdgpu_bo_fence(sched_job->job_param.vm.bo,
-                       sched_job->ibs[sched_job->num_ibs -1].fence, true);
+                       &sched_job->ibs[sched_job->num_ibs -1].fence->base, true);
        return 0;
 }
 
@@ -397,7 +397,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
                r = amdgpu_ib_schedule(adev, 1, ib, AMDGPU_FENCE_OWNER_VM);
                if (r)
                        goto error_free;
-               amdgpu_bo_fence(bo, ib->fence, true);
+               amdgpu_bo_fence(bo, &ib->fence->base, true);
        }
 
 error_free:
@@ -547,7 +547,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
                                amdgpu_ib_free(adev, ib);
                                return r;
                        }
-                       amdgpu_bo_fence(pd, ib->fence, true);
+                       amdgpu_bo_fence(pd, &ib->fence->base, true);
                }
        }
 
@@ -745,7 +745,7 @@ static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
        end >>= amdgpu_vm_block_size;
 
        for (i = start; i <= end; ++i)
-               amdgpu_bo_fence(vm->page_tables[i].bo, fence, true);
+               amdgpu_bo_fence(vm->page_tables[i].bo, &fence->base, true);
 }
 
 static int amdgpu_vm_bo_update_mapping_run_job(