drm/amdgpu:invoke CSA functions (v2)
authorMonk Liu <Monk.Liu@amd.com>
Mon, 9 Jan 2017 07:54:32 +0000 (15:54 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 27 Jan 2017 16:13:20 +0000 (11:13 -0500)
Make sure the CSA is mapped.

v2: agd: rebase.

Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c

index 29d6d84d1c28b1e847715c08b6fac8f9d2a9aac3..8d019ca7d9c72b17077624a88ce00434746b9da1 100644 (file)
@@ -771,6 +771,20 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
        if (r)
                return r;
 
+       if (amdgpu_sriov_vf(adev)) {
+               struct dma_fence *f;
+               bo_va = vm->csa_bo_va;
+               BUG_ON(!bo_va);
+               r = amdgpu_vm_bo_update(adev, bo_va, false);
+               if (r)
+                       return r;
+
+               f = bo_va->last_pt_update;
+               r = amdgpu_sync_fence(adev, &p->job->sync, f);
+               if (r)
+                       return r;
+       }
+
        if (p->bo_list) {
                for (i = 0; i < p->bo_list->num_entries; i++) {
                        struct dma_fence *f;
index 85b36edbdd61536ebff6cf731841abd3758e2e39..121b8440cf40aa2933fde49f76fbadc73abb96f7 100644 (file)
@@ -1395,6 +1395,15 @@ static int amdgpu_init(struct amdgpu_device *adev)
                                return r;
                        }
                        adev->ip_blocks[i].status.hw = true;
+
+                       /* right after GMC hw init, we create CSA */
+                       if (amdgpu_sriov_vf(adev)) {
+                               r = amdgpu_allocate_static_csa(adev);
+                               if (r) {
+                                       DRM_ERROR("allocate CSA failed %d\n", r);
+                                       return r;
+                               }
+                       }
                }
        }
 
@@ -1528,6 +1537,9 @@ static int amdgpu_fini(struct amdgpu_device *adev)
                adev->ip_blocks[i].status.late_initialized = false;
        }
 
+       if (amdgpu_sriov_vf(adev))
+               amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
+
        return 0;
 }
 
index 43169ab61a7862991db6ca5da9946e60e3b634c4..8f852cb152f5445a16ffb0a5016efdbdccbdceec 100644 (file)
@@ -649,6 +649,12 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
                goto out_suspend;
        }
 
+       if (amdgpu_sriov_vf(adev)) {
+               r = amdgpu_map_static_csa(adev, &fpriv->vm);
+               if (r)
+                       goto out_suspend;
+       }
+
        mutex_init(&fpriv->bo_list_lock);
        idr_init(&fpriv->bo_list_handles);
 
@@ -687,6 +693,14 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
        amdgpu_uvd_free_handles(adev, file_priv);
        amdgpu_vce_free_handles(adev, file_priv);
 
+       if (amdgpu_sriov_vf(adev)) {
+               /* TODO: how to handle reserve failure */
+               BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, false));
+               amdgpu_vm_bo_rmv(adev, fpriv->vm.csa_bo_va);
+               fpriv->vm.csa_bo_va = NULL;
+               amdgpu_bo_unreserve(adev->virt.csa_obj);
+       }
+
        amdgpu_vm_fini(adev, &fpriv->vm);
 
        idr_for_each_entry(&fpriv->bo_list_handles, list, handle)