drm/amdgpu/virt: add high level interfaces for virt
authorXiangliang Yu <Xiangliang.Yu@amd.com>
Thu, 12 Jan 2017 06:53:08 +0000 (14:53 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 27 Jan 2017 16:13:23 +0000 (11:13 -0500)
Add high level interfaces that is not relate to specific asic. So
asic files just need to implement the interfaces to support
virtualization.

Signed-off-by: Xiangliang Yu <Xiangliang.Yu@amd.com>
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Monk Liu <Monk.Liu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h

index 00583baab1329dea480efbc41ae080cc18c0d127..6e63e7f3025caccc480fa82417308252ae456ce0 100644 (file)
@@ -149,3 +149,70 @@ void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
                DRM_ERROR("wait for kiq fence error: %ld.\n", r);
        dma_fence_put(f);
 }
+
+/**
+ * amdgpu_virt_request_full_gpu() - request full gpu access
+ * @amdgpu:    amdgpu device.
+ * @init:      is driver init time.
+ * When start to init/fini driver, first need to request full gpu access.
+ * Return: Zero if request success, otherwise will return error.
+ */
+int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
+{
+       struct amdgpu_virt *virt = &adev->virt;
+       int r;
+
+       if (virt->ops && virt->ops->req_full_gpu) {
+               r = virt->ops->req_full_gpu(adev, init);
+               if (r)
+                       return r;
+
+               adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
+       }
+
+       return 0;
+}
+
+/**
+ * amdgpu_virt_release_full_gpu() - release full gpu access
+ * @amdgpu:    amdgpu device.
+ * @init:      is driver init time.
+ * When finishing driver init/fini, need to release full gpu access.
+ * Return: Zero if release success, otherwise will returen error.
+ */
+int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
+{
+       struct amdgpu_virt *virt = &adev->virt;
+       int r;
+
+       if (virt->ops && virt->ops->rel_full_gpu) {
+               r = virt->ops->rel_full_gpu(adev, init);
+               if (r)
+                       return r;
+
+               adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
+       }
+       return 0;
+}
+
+/**
+ * amdgpu_virt_reset_gpu() - reset gpu
+ * @amdgpu:    amdgpu device.
+ * Send reset command to GPU hypervisor to reset GPU that VM is using
+ * Return: Zero if reset success, otherwise will return error.
+ */
+int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
+{
+       struct amdgpu_virt *virt = &adev->virt;
+       int r;
+
+       if (virt->ops && virt->ops->reset_gpu) {
+               r = virt->ops->reset_gpu(adev);
+               if (r)
+                       return r;
+
+               adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
+       }
+
+       return 0;
+}
index 433f559bc4a1d27061ef28409d22cbe1a35ab36b..d4605442026e0b2b8fd8370c485b4ab53b256870 100644 (file)
 #define AMDGPU_PASSTHROUGH_MODE        (1 << 3) /* thw whole GPU is pass through for VM */
 #define AMDGPU_SRIOV_CAPS_RUNTIME      (1 << 4) /* is out of full access mode */
 
+/**
+ * struct amdgpu_virt_ops - amdgpu device virt operations
+ */
+struct amdgpu_virt_ops {
+       int (*req_full_gpu)(struct amdgpu_device *adev, bool init);
+       int (*rel_full_gpu)(struct amdgpu_device *adev, bool init);
+       int (*reset_gpu)(struct amdgpu_device *adev);
+};
+
 /* GPU virtualization */
 struct amdgpu_virt {
-       uint32_t caps;
-       struct amdgpu_bo *csa_obj;
-       uint64_t csa_vmid0_addr;
-       uint32_t                reg_val_offs;
-       struct mutex            lock;
+       uint32_t                        caps;
+       struct amdgpu_bo                *csa_obj;
+       uint64_t                        csa_vmid0_addr;
+       uint32_t                        reg_val_offs;
+       struct mutex                    lock;
+       const struct amdgpu_virt_ops    *ops;
 };
 
 #define AMDGPU_CSA_SIZE    (8 * 1024)
@@ -72,5 +82,8 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm);
 void amdgpu_virt_init_setting(struct amdgpu_device *adev);
 uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
 void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
+int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
+int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
+int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
 
 #endif