struct amdgpu_sync *sync);
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
- struct ttm_mem_reg *mem);
+ bool clear);
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_bo *bo);
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
*
* @adev: amdgpu_device pointer
* @bo_va: requested BO and VM object
- * @mem: ttm mem
+ * @clear: if true clear the entries
*
* Fill in the page table entries for @bo_va.
* Returns 0 for success, -EINVAL for failure.
- *
- * Object have to be reserved and mutex must be locked!
*/
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
- struct ttm_mem_reg *mem)
+ bool clear)
{
struct amdgpu_vm *vm = bo_va->vm;
struct amdgpu_bo_va_mapping *mapping;
dma_addr_t *pages_addr = NULL;
uint32_t gtt_flags, flags;
+ struct ttm_mem_reg *mem;
struct fence *exclusive;
uint64_t addr;
int r;
- if (mem) {
+ if (clear) {
+ mem = NULL;
+ addr = 0;
+ exclusive = NULL;
+ } else {
struct ttm_dma_tt *ttm;
+ mem = &bo_va->bo->tbo.mem;
addr = (u64)mem->start << PAGE_SHIFT;
switch (mem->mem_type) {
case TTM_PL_TT:
}
exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
- } else {
- addr = 0;
- exclusive = NULL;
}
flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
spin_lock(&vm->status_lock);
list_splice_init(&bo_va->invalids, &bo_va->valids);
list_del_init(&bo_va->vm_status);
- if (!mem)
+ if (clear)
list_add(&bo_va->vm_status, &vm->cleared);
spin_unlock(&vm->status_lock);
struct amdgpu_bo_va, vm_status);
spin_unlock(&vm->status_lock);
- r = amdgpu_vm_bo_update(adev, bo_va, NULL);
+ r = amdgpu_vm_bo_update(adev, bo_va, true);
if (r)
return r;