* @shared: true if fence should be added shared
*
*/
-void amdgpu_bo_fence(struct amdgpu_bo *bo, struct amdgpu_fence *fence,
+void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
bool shared)
{
struct reservation_object *resv = bo->tbo.resv;
if (shared)
- reservation_object_add_shared_fence(resv, &fence->base);
+ reservation_object_add_shared_fence(resv, fence);
else
- reservation_object_add_excl_fence(resv, &fence->base);
+ reservation_object_add_excl_fence(resv, fence);
}
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem);
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
-void amdgpu_bo_fence(struct amdgpu_bo *bo, struct amdgpu_fence *fence,
+void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
bool shared);
/*
struct amdgpu_cs_parser *sched_job)
{
amdgpu_bo_fence(sched_job->job_param.vm.bo,
- sched_job->ibs[sched_job->num_ibs -1].fence, true);
+ &sched_job->ibs[sched_job->num_ibs -1].fence->base, true);
return 0;
}
r = amdgpu_ib_schedule(adev, 1, ib, AMDGPU_FENCE_OWNER_VM);
if (r)
goto error_free;
- amdgpu_bo_fence(bo, ib->fence, true);
+ amdgpu_bo_fence(bo, &ib->fence->base, true);
}
error_free:
amdgpu_ib_free(adev, ib);
return r;
}
- amdgpu_bo_fence(pd, ib->fence, true);
+ amdgpu_bo_fence(pd, &ib->fence->base, true);
}
}
end >>= amdgpu_vm_block_size;
for (i = start; i <= end; ++i)
- amdgpu_bo_fence(vm->page_tables[i].bo, fence, true);
+ amdgpu_bo_fence(vm->page_tables[i].bo, &fence->base, true);
}
static int amdgpu_vm_bo_update_mapping_run_job(