drm/amdgpu/virt: use kiq to access registers (v2)
authorXiangliang Yu <Xiangliang.Yu@amd.com>
Thu, 12 Jan 2017 06:29:34 +0000 (14:29 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 27 Jan 2017 16:13:23 +0000 (11:13 -0500)
For virtualization, it is must for driver to use KIQ to access
registers when it is out of GPU full access mode.

v2: agd: rebase

Signed-off-by: Xiangliang Yu <Xiangliang.Yu@amd.com>
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Monk Liu <Monk.Liu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
drivers/gpu/drm/amd/amdgpu/vi.c

index 121b8440cf40aa2933fde49f76fbadc73abb96f7..6eabeaaa3c3040fbeb175f47f81589bc03128b32 100644 (file)
@@ -94,6 +94,11 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
 {
        uint32_t ret;
 
+       if (amdgpu_sriov_runtime(adev)) {
+               BUG_ON(in_interrupt());
+               return amdgpu_virt_kiq_rreg(adev, reg);
+       }
+
        if ((reg * 4) < adev->rmmio_size && !always_indirect)
                ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
        else {
@@ -113,6 +118,11 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
 {
        trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
 
+       if (amdgpu_sriov_runtime(adev)) {
+               BUG_ON(in_interrupt());
+               return amdgpu_virt_kiq_wreg(adev, reg, v);
+       }
+
        if ((reg * 4) < adev->rmmio_size && !always_indirect)
                writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
        else {
index cfc47214161afafd3a06f8e6bb29d17454fdac7d..00583baab1329dea480efbc41ae080cc18c0d127 100644 (file)
@@ -91,3 +91,61 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm)
        ttm_eu_backoff_reservation(&ticket, &list);
        return 0;
 }
+
+void amdgpu_virt_init_setting(struct amdgpu_device *adev)
+{
+       mutex_init(&adev->virt.lock);
+}
+
+uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
+{
+       signed long r;
+       uint32_t val;
+       struct dma_fence *f;
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+       struct amdgpu_ring *ring = &kiq->ring;
+
+       BUG_ON(!ring->funcs->emit_rreg);
+
+       mutex_lock(&adev->virt.lock);
+       amdgpu_ring_alloc(ring, 32);
+       amdgpu_ring_emit_hdp_flush(ring);
+       amdgpu_ring_emit_rreg(ring, reg);
+       amdgpu_ring_emit_hdp_invalidate(ring);
+       amdgpu_fence_emit(ring, &f);
+       amdgpu_ring_commit(ring);
+       mutex_unlock(&adev->virt.lock);
+
+       r = dma_fence_wait(f, false);
+       if (r)
+               DRM_ERROR("wait for kiq fence error: %ld.\n", r);
+       dma_fence_put(f);
+
+       val = adev->wb.wb[adev->virt.reg_val_offs];
+
+       return val;
+}
+
+void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
+{
+       signed long r;
+       struct dma_fence *f;
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+       struct amdgpu_ring *ring = &kiq->ring;
+
+       BUG_ON(!ring->funcs->emit_wreg);
+
+       mutex_lock(&adev->virt.lock);
+       amdgpu_ring_alloc(ring, 32);
+       amdgpu_ring_emit_hdp_flush(ring);
+       amdgpu_ring_emit_wreg(ring, reg, v);
+       amdgpu_ring_emit_hdp_invalidate(ring);
+       amdgpu_fence_emit(ring, &f);
+       amdgpu_ring_commit(ring);
+       mutex_unlock(&adev->virt.lock);
+
+       r = dma_fence_wait(f, false);
+       if (r)
+               DRM_ERROR("wait for kiq fence error: %ld.\n", r);
+       dma_fence_put(f);
+}
index 8bb9e5d307cb464f5d0f3602084bfbadfc8225ce..433f559bc4a1d27061ef28409d22cbe1a35ab36b 100644 (file)
@@ -36,6 +36,7 @@ struct amdgpu_virt {
        struct amdgpu_bo *csa_obj;
        uint64_t csa_vmid0_addr;
        uint32_t                reg_val_offs;
+       struct mutex            lock;
 };
 
 #define AMDGPU_CSA_SIZE    (8 * 1024)
@@ -68,5 +69,8 @@ static inline bool is_virtual_machine(void)
 struct amdgpu_vm;
 int amdgpu_allocate_static_csa(struct amdgpu_device *adev);
 int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+void amdgpu_virt_init_setting(struct amdgpu_device *adev);
+uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
+void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
 
 #endif
index 95029e2e2650305797d8107a9d297b0555c00dfb..abdb80b812a6d0b4b8a305b2fbcc83e110d42fec 100644 (file)
@@ -921,6 +921,9 @@ static int vi_common_early_init(void *handle)
                (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC)))
                smc_enabled = true;
 
+       if (amdgpu_sriov_vf(adev))
+               amdgpu_virt_init_setting(adev);
+
        adev->rev_id = vi_get_rev_id(adev);
        adev->external_rev_id = 0xFF;
        switch (adev->asic_type) {