drm/amdgpu: implement UVD VM mode for Stoney v2
authorChristian König <christian.koenig@amd.com>
Thu, 23 Jun 2016 10:11:46 +0000 (12:11 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 29 Jul 2016 18:36:57 +0000 (14:36 -0400)
Starting with Stoney we support running UVD in VM mode as well.

v2: rebased, only enable on Polaris for now.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h

index 07e9a987fbee413f6d21d9323dbd8045b7ac4d97..358608ea4cfd3c4335943babb93c1f1ed1dbd96f 100644 (file)
@@ -34,6 +34,7 @@
 #include "smu/smu_7_1_3_d.h"
 #include "smu/smu_7_1_3_sh_mask.h"
 #include "bif/bif_5_1_d.h"
+#include "gmc/gmc_8_1_d.h"
 #include "vi.h"
 
 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
@@ -672,6 +673,9 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
                                  struct amdgpu_ib *ib,
                                  unsigned vm_id, bool ctx_switch)
 {
+       amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
+       amdgpu_ring_write(ring, vm_id);
+
        amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
        amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
@@ -715,6 +719,57 @@ error:
        return r;
 }
 
+static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+                                        unsigned vm_id, uint64_t pd_addr)
+{
+       uint32_t reg;
+
+       if (vm_id < 8)
+               reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id;
+       else
+               reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8;
+
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
+       amdgpu_ring_write(ring, reg << 2);
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
+       amdgpu_ring_write(ring, pd_addr >> 12);
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
+       amdgpu_ring_write(ring, 0x8);
+
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
+       amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
+       amdgpu_ring_write(ring, 1 << vm_id);
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
+       amdgpu_ring_write(ring, 0x8);
+
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
+       amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
+       amdgpu_ring_write(ring, 1 << vm_id); /* mask */
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
+       amdgpu_ring_write(ring, 0xC);
+}
+
+static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+       uint32_t seq = ring->fence_drv.sync_seq;
+       uint64_t addr = ring->fence_drv.gpu_addr;
+
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
+       amdgpu_ring_write(ring, lower_32_bits(addr));
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
+       amdgpu_ring_write(ring, upper_32_bits(addr));
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
+       amdgpu_ring_write(ring, 0xffffffff); /* mask */
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
+       amdgpu_ring_write(ring, seq);
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
+       amdgpu_ring_write(ring, 0xE);
+}
+
 static bool uvd_v6_0_is_idle(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -951,7 +1006,7 @@ const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
        .set_powergating_state = uvd_v6_0_set_powergating_state,
 };
 
-static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = {
+static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
        .get_rptr = uvd_v6_0_ring_get_rptr,
        .get_wptr = uvd_v6_0_ring_get_wptr,
        .set_wptr = uvd_v6_0_ring_set_wptr,
@@ -966,9 +1021,32 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = {
        .pad_ib = amdgpu_ring_generic_pad_ib,
 };
 
+static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
+       .get_rptr = uvd_v6_0_ring_get_rptr,
+       .get_wptr = uvd_v6_0_ring_get_wptr,
+       .set_wptr = uvd_v6_0_ring_set_wptr,
+       .parse_cs = NULL,
+       .emit_ib = uvd_v6_0_ring_emit_ib,
+       .emit_fence = uvd_v6_0_ring_emit_fence,
+       .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
+       .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
+       .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
+       .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
+       .test_ring = uvd_v6_0_ring_test_ring,
+       .test_ib = uvd_v6_0_ring_test_ib,
+       .insert_nop = amdgpu_ring_insert_nop,
+       .pad_ib = amdgpu_ring_generic_pad_ib,
+};
+
 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
 {
-       adev->uvd.ring.funcs = &uvd_v6_0_ring_funcs;
+       if (adev->asic_type >= CHIP_STONEY) {
+               adev->uvd.ring.funcs = &uvd_v6_0_ring_vm_funcs;
+               DRM_INFO("UVD is enabled in VM mode\n");
+       } else {
+               adev->uvd.ring.funcs = &uvd_v6_0_ring_phys_funcs;
+               DRM_INFO("UVD is enabled in physical mode\n");
+       }
 }
 
 static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
index 6f6fb34742d299a46d2b8d50764a43f732d9de83..ec69869c55ff530da2b00a1dae3833896ce07b5b 100644 (file)
 #define mmUVD_MIF_RECON1_ADDR_CONFIG                                            0x39c5
 #define ixUVD_MIF_SCLR_ADDR_CONFIG                                              0x4
 #define mmUVD_JPEG_ADDR_CONFIG                                                  0x3a1f
+#define mmUVD_GP_SCRATCH8                                                       0x3c0a
+#define mmUVD_GP_SCRATCH9                                                       0x3c0b
 #define mmUVD_GP_SCRATCH4                                                       0x3d38
 
 #endif /* UVD_6_0_D_H */