if (r)
return r;
+ if (amdgpu_sriov_vf(adev)) {
+ struct dma_fence *f;
+ bo_va = vm->csa_bo_va;
+ BUG_ON(!bo_va);
+ r = amdgpu_vm_bo_update(adev, bo_va, false);
+ if (r)
+ return r;
+
+ f = bo_va->last_pt_update;
+ r = amdgpu_sync_fence(adev, &p->job->sync, f);
+ if (r)
+ return r;
+ }
+
if (p->bo_list) {
for (i = 0; i < p->bo_list->num_entries; i++) {
struct dma_fence *f;
return r;
}
adev->ip_blocks[i].status.hw = true;
+
+ /* right after GMC hw init, we create CSA */
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_allocate_static_csa(adev);
+ if (r) {
+ DRM_ERROR("allocate CSA failed %d\n", r);
+ return r;
+ }
+ }
}
}
adev->ip_blocks[i].status.late_initialized = false;
}
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
+
return 0;
}
goto out_suspend;
}
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_map_static_csa(adev, &fpriv->vm);
+ if (r)
+ goto out_suspend;
+ }
+
mutex_init(&fpriv->bo_list_lock);
idr_init(&fpriv->bo_list_handles);
amdgpu_uvd_free_handles(adev, file_priv);
amdgpu_vce_free_handles(adev, file_priv);
+ if (amdgpu_sriov_vf(adev)) {
+ /* TODO: how to handle reserve failure */
+ BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, false));
+ amdgpu_vm_bo_rmv(adev, fpriv->vm.csa_bo_va);
+ fpriv->vm.csa_bo_va = NULL;
+ amdgpu_bo_unreserve(adev->virt.csa_obj);
+ }
+
amdgpu_vm_fini(adev, &fpriv->vm);
idr_for_each_entry(&fpriv->bo_list_handles, list, handle)