drm/amdgpu: Add initial VI support
authorAlex Deucher <alexander.deucher@amd.com>
Mon, 20 Apr 2015 21:31:14 +0000 (17:31 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 4 Jun 2015 01:03:17 +0000 (21:03 -0400)
This adds initial support for VI asics.  This
includes Iceland, Tonga, and Carrizo.  Our inital
focus as been Carrizo, so there are still gaps in
support for Tonga and Iceland, notably power
management.

Acked-by: Christian König <christian.koenig@amd.com>
Acked-by: Jammy Zhou <Jammy.Zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
49 files changed:
drivers/gpu/drm/amd/amdgpu/Makefile
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/clearstate_vi.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/cz_dpm.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/cz_dpm.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/cz_ih.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/cz_ih.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/cz_ppsmc.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/cz_smc.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/cz_smumgr.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/dce_v10_0.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/dce_v11_0.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/iceland_dpm.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/iceland_ih.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/iceland_ih.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/iceland_smc.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/smu8.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/smu8_fusion.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_cz.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_vi.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/tonga_dpm.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/tonga_ih.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/tonga_ih.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/tonga_ppsmc.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/tonga_smc.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/tonga_smumgr.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/vce_v3_0.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/vi.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/vi.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/vi_dpm.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/vid.h [new file with mode: 0644]

index aec28866945fe943f9776dd5d9a1d21bb2408033..9a573e87cdd3b6317a377d655e2a09b3804938f2 100644 (file)
@@ -18,29 +18,57 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
        amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
        amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o
 
+# add asic specific block
 amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \
        ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o
 
+amdgpu-y += \
+       vi.o
+
+# add GMC block
+amdgpu-y += \
+       gmc_v8_0.o
+
 # add IH block
 amdgpu-y += \
        amdgpu_irq.o \
-       amdgpu_ih.o
+       amdgpu_ih.o \
+       iceland_ih.o \
+       tonga_ih.o \
+       cz_ih.o
 
 # add SMC block
 amdgpu-y += \
-       amdgpu_dpm.o
+       amdgpu_dpm.o \
+       cz_smc.o cz_dpm.o \
+       tonga_smc.o tonga_dpm.o \
+       iceland_smc.o iceland_dpm.o
+
+# add DCE block
+amdgpu-y += \
+       dce_v10_0.o \
+       dce_v11_0.o
 
 # add GFX block
 amdgpu-y += \
-       amdgpu_gfx.o
+       amdgpu_gfx.o \
+       gfx_v8_0.o
+
+# add async DMA block
+amdgpu-y += \
+       sdma_v2_4.o \
+       sdma_v3_0.o
 
 # add UVD block
 amdgpu-y += \
-       amdgpu_uvd.o
+       amdgpu_uvd.o \
+       uvd_v5_0.o \
+       uvd_v6_0.o
 
 # add VCE block
 amdgpu-y += \
-       amdgpu_vce.o
+       amdgpu_vce.o \
+       vce_v3_0.o
 
 amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o
 amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o
index 548e0843d95ad67c1dcb8bcc1f1065179d41e8fd..61cf5ad788579ce1d16d090cec5338aec2aa6a32 100644 (file)
@@ -41,6 +41,7 @@
 #ifdef CONFIG_DRM_AMDGPU_CIK
 #include "cik.h"
 #endif
+#include "vi.h"
 #include "bif/bif_4_1_d.h"
 
 static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
@@ -1154,9 +1155,21 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
 
 static int amdgpu_early_init(struct amdgpu_device *adev)
 {
-       int i, r = -EINVAL;
+       int i, r;
 
        switch (adev->asic_type) {
+       case CHIP_TOPAZ:
+       case CHIP_TONGA:
+       case CHIP_CARRIZO:
+               if (adev->asic_type == CHIP_CARRIZO)
+                       adev->family = AMDGPU_FAMILY_CZ;
+               else
+                       adev->family = AMDGPU_FAMILY_VI;
+
+               r = vi_set_ip_blocks(adev);
+               if (r)
+                       return r;
+               break;
 #ifdef CONFIG_DRM_AMDGPU_CIK
        case CHIP_BONAIRE:
        case CHIP_HAWAII:
diff --git a/drivers/gpu/drm/amd/amdgpu/clearstate_vi.h b/drivers/gpu/drm/amd/amdgpu/clearstate_vi.h
new file mode 100644 (file)
index 0000000..1aab9be
--- /dev/null
@@ -0,0 +1,944 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+static const unsigned int vi_SECT_CONTEXT_def_1[] =
+{
+    0x00000000, // DB_RENDER_CONTROL
+    0x00000000, // DB_COUNT_CONTROL
+    0x00000000, // DB_DEPTH_VIEW
+    0x00000000, // DB_RENDER_OVERRIDE
+    0x00000000, // DB_RENDER_OVERRIDE2
+    0x00000000, // DB_HTILE_DATA_BASE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // DB_DEPTH_BOUNDS_MIN
+    0x00000000, // DB_DEPTH_BOUNDS_MAX
+    0x00000000, // DB_STENCIL_CLEAR
+    0x00000000, // DB_DEPTH_CLEAR
+    0x00000000, // PA_SC_SCREEN_SCISSOR_TL
+    0x40004000, // PA_SC_SCREEN_SCISSOR_BR
+    0, // HOLE
+    0x00000000, // DB_DEPTH_INFO
+    0x00000000, // DB_Z_INFO
+    0x00000000, // DB_STENCIL_INFO
+    0x00000000, // DB_Z_READ_BASE
+    0x00000000, // DB_STENCIL_READ_BASE
+    0x00000000, // DB_Z_WRITE_BASE
+    0x00000000, // DB_STENCIL_WRITE_BASE
+    0x00000000, // DB_DEPTH_SIZE
+    0x00000000, // DB_DEPTH_SLICE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // TA_BC_BASE_ADDR
+    0x00000000, // TA_BC_BASE_ADDR_HI
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // COHER_DEST_BASE_HI_0
+    0x00000000, // COHER_DEST_BASE_HI_1
+    0x00000000, // COHER_DEST_BASE_HI_2
+    0x00000000, // COHER_DEST_BASE_HI_3
+    0x00000000, // COHER_DEST_BASE_2
+    0x00000000, // COHER_DEST_BASE_3
+    0x00000000, // PA_SC_WINDOW_OFFSET
+    0x80000000, // PA_SC_WINDOW_SCISSOR_TL
+    0x40004000, // PA_SC_WINDOW_SCISSOR_BR
+    0x0000ffff, // PA_SC_CLIPRECT_RULE
+    0x00000000, // PA_SC_CLIPRECT_0_TL
+    0x40004000, // PA_SC_CLIPRECT_0_BR
+    0x00000000, // PA_SC_CLIPRECT_1_TL
+    0x40004000, // PA_SC_CLIPRECT_1_BR
+    0x00000000, // PA_SC_CLIPRECT_2_TL
+    0x40004000, // PA_SC_CLIPRECT_2_BR
+    0x00000000, // PA_SC_CLIPRECT_3_TL
+    0x40004000, // PA_SC_CLIPRECT_3_BR
+    0xaa99aaaa, // PA_SC_EDGERULE
+    0x00000000, // PA_SU_HARDWARE_SCREEN_OFFSET
+    0xffffffff, // CB_TARGET_MASK
+    0xffffffff, // CB_SHADER_MASK
+    0x80000000, // PA_SC_GENERIC_SCISSOR_TL
+    0x40004000, // PA_SC_GENERIC_SCISSOR_BR
+    0x00000000, // COHER_DEST_BASE_0
+    0x00000000, // COHER_DEST_BASE_1
+    0x80000000, // PA_SC_VPORT_SCISSOR_0_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_0_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_1_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_1_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_2_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_2_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_3_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_3_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_4_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_4_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_5_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_5_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_6_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_6_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_7_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_7_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_8_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_8_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_9_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_9_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_10_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_10_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_11_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_11_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_12_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_12_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_13_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_13_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_14_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_14_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_15_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_15_BR
+    0x00000000, // PA_SC_VPORT_ZMIN_0
+    0x3f800000, // PA_SC_VPORT_ZMAX_0
+    0x00000000, // PA_SC_VPORT_ZMIN_1
+    0x3f800000, // PA_SC_VPORT_ZMAX_1
+    0x00000000, // PA_SC_VPORT_ZMIN_2
+    0x3f800000, // PA_SC_VPORT_ZMAX_2
+    0x00000000, // PA_SC_VPORT_ZMIN_3
+    0x3f800000, // PA_SC_VPORT_ZMAX_3
+    0x00000000, // PA_SC_VPORT_ZMIN_4
+    0x3f800000, // PA_SC_VPORT_ZMAX_4
+    0x00000000, // PA_SC_VPORT_ZMIN_5
+    0x3f800000, // PA_SC_VPORT_ZMAX_5
+    0x00000000, // PA_SC_VPORT_ZMIN_6
+    0x3f800000, // PA_SC_VPORT_ZMAX_6
+    0x00000000, // PA_SC_VPORT_ZMIN_7
+    0x3f800000, // PA_SC_VPORT_ZMAX_7
+    0x00000000, // PA_SC_VPORT_ZMIN_8
+    0x3f800000, // PA_SC_VPORT_ZMAX_8
+    0x00000000, // PA_SC_VPORT_ZMIN_9
+    0x3f800000, // PA_SC_VPORT_ZMAX_9
+    0x00000000, // PA_SC_VPORT_ZMIN_10
+    0x3f800000, // PA_SC_VPORT_ZMAX_10
+    0x00000000, // PA_SC_VPORT_ZMIN_11
+    0x3f800000, // PA_SC_VPORT_ZMAX_11
+    0x00000000, // PA_SC_VPORT_ZMIN_12
+    0x3f800000, // PA_SC_VPORT_ZMAX_12
+    0x00000000, // PA_SC_VPORT_ZMIN_13
+    0x3f800000, // PA_SC_VPORT_ZMAX_13
+    0x00000000, // PA_SC_VPORT_ZMIN_14
+    0x3f800000, // PA_SC_VPORT_ZMAX_14
+    0x00000000, // PA_SC_VPORT_ZMIN_15
+    0x3f800000, // PA_SC_VPORT_ZMAX_15
+};
+static const unsigned int vi_SECT_CONTEXT_def_2[] =
+{
+    0x00000000, // PA_SC_SCREEN_EXTENT_CONTROL
+    0, // HOLE
+    0x00000000, // CP_PERFMON_CNTX_CNTL
+    0x00000000, // CP_RINGID
+    0x00000000, // CP_VMID
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0xffffffff, // VGT_MAX_VTX_INDX
+    0x00000000, // VGT_MIN_VTX_INDX
+    0x00000000, // VGT_INDX_OFFSET
+    0x00000000, // VGT_MULTI_PRIM_IB_RESET_INDX
+    0, // HOLE
+    0x00000000, // CB_BLEND_RED
+    0x00000000, // CB_BLEND_GREEN
+    0x00000000, // CB_BLEND_BLUE
+    0x00000000, // CB_BLEND_ALPHA
+    0x00000000, // CB_DCC_CONTROL
+    0, // HOLE
+    0x00000000, // DB_STENCIL_CONTROL
+    0x00000000, // DB_STENCILREFMASK
+    0x00000000, // DB_STENCILREFMASK_BF
+    0, // HOLE
+    0x00000000, // PA_CL_VPORT_XSCALE
+    0x00000000, // PA_CL_VPORT_XOFFSET
+    0x00000000, // PA_CL_VPORT_YSCALE
+    0x00000000, // PA_CL_VPORT_YOFFSET
+    0x00000000, // PA_CL_VPORT_ZSCALE
+    0x00000000, // PA_CL_VPORT_ZOFFSET
+    0x00000000, // PA_CL_VPORT_XSCALE_1
+    0x00000000, // PA_CL_VPORT_XOFFSET_1
+    0x00000000, // PA_CL_VPORT_YSCALE_1
+    0x00000000, // PA_CL_VPORT_YOFFSET_1
+    0x00000000, // PA_CL_VPORT_ZSCALE_1
+    0x00000000, // PA_CL_VPORT_ZOFFSET_1
+    0x00000000, // PA_CL_VPORT_XSCALE_2
+    0x00000000, // PA_CL_VPORT_XOFFSET_2
+    0x00000000, // PA_CL_VPORT_YSCALE_2
+    0x00000000, // PA_CL_VPORT_YOFFSET_2
+    0x00000000, // PA_CL_VPORT_ZSCALE_2
+    0x00000000, // PA_CL_VPORT_ZOFFSET_2
+    0x00000000, // PA_CL_VPORT_XSCALE_3
+    0x00000000, // PA_CL_VPORT_XOFFSET_3
+    0x00000000, // PA_CL_VPORT_YSCALE_3
+    0x00000000, // PA_CL_VPORT_YOFFSET_3
+    0x00000000, // PA_CL_VPORT_ZSCALE_3
+    0x00000000, // PA_CL_VPORT_ZOFFSET_3
+    0x00000000, // PA_CL_VPORT_XSCALE_4
+    0x00000000, // PA_CL_VPORT_XOFFSET_4
+    0x00000000, // PA_CL_VPORT_YSCALE_4
+    0x00000000, // PA_CL_VPORT_YOFFSET_4
+    0x00000000, // PA_CL_VPORT_ZSCALE_4
+    0x00000000, // PA_CL_VPORT_ZOFFSET_4
+    0x00000000, // PA_CL_VPORT_XSCALE_5
+    0x00000000, // PA_CL_VPORT_XOFFSET_5
+    0x00000000, // PA_CL_VPORT_YSCALE_5
+    0x00000000, // PA_CL_VPORT_YOFFSET_5
+    0x00000000, // PA_CL_VPORT_ZSCALE_5
+    0x00000000, // PA_CL_VPORT_ZOFFSET_5
+    0x00000000, // PA_CL_VPORT_XSCALE_6
+    0x00000000, // PA_CL_VPORT_XOFFSET_6
+    0x00000000, // PA_CL_VPORT_YSCALE_6
+    0x00000000, // PA_CL_VPORT_YOFFSET_6
+    0x00000000, // PA_CL_VPORT_ZSCALE_6
+    0x00000000, // PA_CL_VPORT_ZOFFSET_6
+    0x00000000, // PA_CL_VPORT_XSCALE_7
+    0x00000000, // PA_CL_VPORT_XOFFSET_7
+    0x00000000, // PA_CL_VPORT_YSCALE_7
+    0x00000000, // PA_CL_VPORT_YOFFSET_7
+    0x00000000, // PA_CL_VPORT_ZSCALE_7
+    0x00000000, // PA_CL_VPORT_ZOFFSET_7
+    0x00000000, // PA_CL_VPORT_XSCALE_8
+    0x00000000, // PA_CL_VPORT_XOFFSET_8
+    0x00000000, // PA_CL_VPORT_YSCALE_8
+    0x00000000, // PA_CL_VPORT_YOFFSET_8
+    0x00000000, // PA_CL_VPORT_ZSCALE_8
+    0x00000000, // PA_CL_VPORT_ZOFFSET_8
+    0x00000000, // PA_CL_VPORT_XSCALE_9
+    0x00000000, // PA_CL_VPORT_XOFFSET_9
+    0x00000000, // PA_CL_VPORT_YSCALE_9
+    0x00000000, // PA_CL_VPORT_YOFFSET_9
+    0x00000000, // PA_CL_VPORT_ZSCALE_9
+    0x00000000, // PA_CL_VPORT_ZOFFSET_9
+    0x00000000, // PA_CL_VPORT_XSCALE_10
+    0x00000000, // PA_CL_VPORT_XOFFSET_10
+    0x00000000, // PA_CL_VPORT_YSCALE_10
+    0x00000000, // PA_CL_VPORT_YOFFSET_10
+    0x00000000, // PA_CL_VPORT_ZSCALE_10
+    0x00000000, // PA_CL_VPORT_ZOFFSET_10
+    0x00000000, // PA_CL_VPORT_XSCALE_11
+    0x00000000, // PA_CL_VPORT_XOFFSET_11
+    0x00000000, // PA_CL_VPORT_YSCALE_11
+    0x00000000, // PA_CL_VPORT_YOFFSET_11
+    0x00000000, // PA_CL_VPORT_ZSCALE_11
+    0x00000000, // PA_CL_VPORT_ZOFFSET_11
+    0x00000000, // PA_CL_VPORT_XSCALE_12
+    0x00000000, // PA_CL_VPORT_XOFFSET_12
+    0x00000000, // PA_CL_VPORT_YSCALE_12
+    0x00000000, // PA_CL_VPORT_YOFFSET_12
+    0x00000000, // PA_CL_VPORT_ZSCALE_12
+    0x00000000, // PA_CL_VPORT_ZOFFSET_12
+    0x00000000, // PA_CL_VPORT_XSCALE_13
+    0x00000000, // PA_CL_VPORT_XOFFSET_13
+    0x00000000, // PA_CL_VPORT_YSCALE_13
+    0x00000000, // PA_CL_VPORT_YOFFSET_13
+    0x00000000, // PA_CL_VPORT_ZSCALE_13
+    0x00000000, // PA_CL_VPORT_ZOFFSET_13
+    0x00000000, // PA_CL_VPORT_XSCALE_14
+    0x00000000, // PA_CL_VPORT_XOFFSET_14
+    0x00000000, // PA_CL_VPORT_YSCALE_14
+    0x00000000, // PA_CL_VPORT_YOFFSET_14
+    0x00000000, // PA_CL_VPORT_ZSCALE_14
+    0x00000000, // PA_CL_VPORT_ZOFFSET_14
+    0x00000000, // PA_CL_VPORT_XSCALE_15
+    0x00000000, // PA_CL_VPORT_XOFFSET_15
+    0x00000000, // PA_CL_VPORT_YSCALE_15
+    0x00000000, // PA_CL_VPORT_YOFFSET_15
+    0x00000000, // PA_CL_VPORT_ZSCALE_15
+    0x00000000, // PA_CL_VPORT_ZOFFSET_15
+    0x00000000, // PA_CL_UCP_0_X
+    0x00000000, // PA_CL_UCP_0_Y
+    0x00000000, // PA_CL_UCP_0_Z
+    0x00000000, // PA_CL_UCP_0_W
+    0x00000000, // PA_CL_UCP_1_X
+    0x00000000, // PA_CL_UCP_1_Y
+    0x00000000, // PA_CL_UCP_1_Z
+    0x00000000, // PA_CL_UCP_1_W
+    0x00000000, // PA_CL_UCP_2_X
+    0x00000000, // PA_CL_UCP_2_Y
+    0x00000000, // PA_CL_UCP_2_Z
+    0x00000000, // PA_CL_UCP_2_W
+    0x00000000, // PA_CL_UCP_3_X
+    0x00000000, // PA_CL_UCP_3_Y
+    0x00000000, // PA_CL_UCP_3_Z
+    0x00000000, // PA_CL_UCP_3_W
+    0x00000000, // PA_CL_UCP_4_X
+    0x00000000, // PA_CL_UCP_4_Y
+    0x00000000, // PA_CL_UCP_4_Z
+    0x00000000, // PA_CL_UCP_4_W
+    0x00000000, // PA_CL_UCP_5_X
+    0x00000000, // PA_CL_UCP_5_Y
+    0x00000000, // PA_CL_UCP_5_Z
+    0x00000000, // PA_CL_UCP_5_W
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SPI_PS_INPUT_CNTL_0
+    0x00000000, // SPI_PS_INPUT_CNTL_1
+    0x00000000, // SPI_PS_INPUT_CNTL_2
+    0x00000000, // SPI_PS_INPUT_CNTL_3
+    0x00000000, // SPI_PS_INPUT_CNTL_4
+    0x00000000, // SPI_PS_INPUT_CNTL_5
+    0x00000000, // SPI_PS_INPUT_CNTL_6
+    0x00000000, // SPI_PS_INPUT_CNTL_7
+    0x00000000, // SPI_PS_INPUT_CNTL_8
+    0x00000000, // SPI_PS_INPUT_CNTL_9
+    0x00000000, // SPI_PS_INPUT_CNTL_10
+    0x00000000, // SPI_PS_INPUT_CNTL_11
+    0x00000000, // SPI_PS_INPUT_CNTL_12
+    0x00000000, // SPI_PS_INPUT_CNTL_13
+    0x00000000, // SPI_PS_INPUT_CNTL_14
+    0x00000000, // SPI_PS_INPUT_CNTL_15
+    0x00000000, // SPI_PS_INPUT_CNTL_16
+    0x00000000, // SPI_PS_INPUT_CNTL_17
+    0x00000000, // SPI_PS_INPUT_CNTL_18
+    0x00000000, // SPI_PS_INPUT_CNTL_19
+    0x00000000, // SPI_PS_INPUT_CNTL_20
+    0x00000000, // SPI_PS_INPUT_CNTL_21
+    0x00000000, // SPI_PS_INPUT_CNTL_22
+    0x00000000, // SPI_PS_INPUT_CNTL_23
+    0x00000000, // SPI_PS_INPUT_CNTL_24
+    0x00000000, // SPI_PS_INPUT_CNTL_25
+    0x00000000, // SPI_PS_INPUT_CNTL_26
+    0x00000000, // SPI_PS_INPUT_CNTL_27
+    0x00000000, // SPI_PS_INPUT_CNTL_28
+    0x00000000, // SPI_PS_INPUT_CNTL_29
+    0x00000000, // SPI_PS_INPUT_CNTL_30
+    0x00000000, // SPI_PS_INPUT_CNTL_31
+    0x00000000, // SPI_VS_OUT_CONFIG
+    0, // HOLE
+    0x00000000, // SPI_PS_INPUT_ENA
+    0x00000000, // SPI_PS_INPUT_ADDR
+    0x00000000, // SPI_INTERP_CONTROL_0
+    0x00000002, // SPI_PS_IN_CONTROL
+    0, // HOLE
+    0x00000000, // SPI_BARYC_CNTL
+    0, // HOLE
+    0x00000000, // SPI_TMPRING_SIZE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SPI_SHADER_POS_FORMAT
+    0x00000000, // SPI_SHADER_Z_FORMAT
+    0x00000000, // SPI_SHADER_COL_FORMAT
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_BLEND0_CONTROL
+    0x00000000, // CB_BLEND1_CONTROL
+    0x00000000, // CB_BLEND2_CONTROL
+    0x00000000, // CB_BLEND3_CONTROL
+    0x00000000, // CB_BLEND4_CONTROL
+    0x00000000, // CB_BLEND5_CONTROL
+    0x00000000, // CB_BLEND6_CONTROL
+    0x00000000, // CB_BLEND7_CONTROL
+};
+static const unsigned int vi_SECT_CONTEXT_def_3[] =
+{
+    0x00000000, // PA_CL_POINT_X_RAD
+    0x00000000, // PA_CL_POINT_Y_RAD
+    0x00000000, // PA_CL_POINT_SIZE
+    0x00000000, // PA_CL_POINT_CULL_RAD
+    0x00000000, // VGT_DMA_BASE_HI
+    0x00000000, // VGT_DMA_BASE
+};
+static const unsigned int vi_SECT_CONTEXT_def_4[] =
+{
+    0x00000000, // DB_DEPTH_CONTROL
+    0x00000000, // DB_EQAA
+    0x00000000, // CB_COLOR_CONTROL
+    0x00000000, // DB_SHADER_CONTROL
+    0x00090000, // PA_CL_CLIP_CNTL
+    0x00000004, // PA_SU_SC_MODE_CNTL
+    0x00000000, // PA_CL_VTE_CNTL
+    0x00000000, // PA_CL_VS_OUT_CNTL
+    0x00000000, // PA_CL_NANINF_CNTL
+    0x00000000, // PA_SU_LINE_STIPPLE_CNTL
+    0x00000000, // PA_SU_LINE_STIPPLE_SCALE
+    0x00000000, // PA_SU_PRIM_FILTER_CNTL
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // PA_SU_POINT_SIZE
+    0x00000000, // PA_SU_POINT_MINMAX
+    0x00000000, // PA_SU_LINE_CNTL
+    0x00000000, // PA_SC_LINE_STIPPLE
+    0x00000000, // VGT_OUTPUT_PATH_CNTL
+    0x00000000, // VGT_HOS_CNTL
+    0x00000000, // VGT_HOS_MAX_TESS_LEVEL
+    0x00000000, // VGT_HOS_MIN_TESS_LEVEL
+    0x00000000, // VGT_HOS_REUSE_DEPTH
+    0x00000000, // VGT_GROUP_PRIM_TYPE
+    0x00000000, // VGT_GROUP_FIRST_DECR
+    0x00000000, // VGT_GROUP_DECR
+    0x00000000, // VGT_GROUP_VECT_0_CNTL
+    0x00000000, // VGT_GROUP_VECT_1_CNTL
+    0x00000000, // VGT_GROUP_VECT_0_FMT_CNTL
+    0x00000000, // VGT_GROUP_VECT_1_FMT_CNTL
+    0x00000000, // VGT_GS_MODE
+    0x00000000, // VGT_GS_ONCHIP_CNTL
+    0x00000000, // PA_SC_MODE_CNTL_0
+    0x00000000, // PA_SC_MODE_CNTL_1
+    0x00000000, // VGT_ENHANCE
+    0x00000100, // VGT_GS_PER_ES
+    0x00000080, // VGT_ES_PER_GS
+    0x00000002, // VGT_GS_PER_VS
+    0x00000000, // VGT_GSVS_RING_OFFSET_1
+    0x00000000, // VGT_GSVS_RING_OFFSET_2
+    0x00000000, // VGT_GSVS_RING_OFFSET_3
+    0x00000000, // VGT_GS_OUT_PRIM_TYPE
+    0x00000000, // IA_ENHANCE
+};
+static const unsigned int vi_SECT_CONTEXT_def_5[] =
+{
+    0x00000000, // WD_ENHANCE
+    0x00000000, // VGT_PRIMITIVEID_EN
+};
+static const unsigned int vi_SECT_CONTEXT_def_6[] =
+{
+    0x00000000, // VGT_PRIMITIVEID_RESET
+};
+static const unsigned int vi_SECT_CONTEXT_def_7[] =
+{
+    0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_INSTANCE_STEP_RATE_0
+    0x00000000, // VGT_INSTANCE_STEP_RATE_1
+    0x000000ff, // IA_MULTI_VGT_PARAM
+    0x00000000, // VGT_ESGS_RING_ITEMSIZE
+    0x00000000, // VGT_GSVS_RING_ITEMSIZE
+    0x00000000, // VGT_REUSE_OFF
+    0x00000000, // VGT_VTX_CNT_EN
+    0x00000000, // DB_HTILE_SURFACE
+    0x00000000, // DB_SRESULTS_COMPARE_STATE0
+    0x00000000, // DB_SRESULTS_COMPARE_STATE1
+    0x00000000, // DB_PRELOAD_CONTROL
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_0
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_0
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_0
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_1
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_1
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_1
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_2
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_2
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_2
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_3
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_3
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_3
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+    0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+    0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+    0, // HOLE
+    0x00000000, // VGT_GS_MAX_VERT_OUT
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_TESS_DISTRIBUTION
+    0x00000000, // VGT_SHADER_STAGES_EN
+    0x00000000, // VGT_LS_HS_CONFIG
+    0x00000000, // VGT_GS_VERT_ITEMSIZE
+    0x00000000, // VGT_GS_VERT_ITEMSIZE_1
+    0x00000000, // VGT_GS_VERT_ITEMSIZE_2
+    0x00000000, // VGT_GS_VERT_ITEMSIZE_3
+    0x00000000, // VGT_TF_PARAM
+    0x00000000, // DB_ALPHA_TO_MASK
+    0x00000000, // VGT_DISPATCH_DRAW_INDEX
+    0x00000000, // PA_SU_POLY_OFFSET_DB_FMT_CNTL
+    0x00000000, // PA_SU_POLY_OFFSET_CLAMP
+    0x00000000, // PA_SU_POLY_OFFSET_FRONT_SCALE
+    0x00000000, // PA_SU_POLY_OFFSET_FRONT_OFFSET
+    0x00000000, // PA_SU_POLY_OFFSET_BACK_SCALE
+    0x00000000, // PA_SU_POLY_OFFSET_BACK_OFFSET
+    0x00000000, // VGT_GS_INSTANCE_CNT
+    0x00000000, // VGT_STRMOUT_CONFIG
+    0x00000000, // VGT_STRMOUT_BUFFER_CONFIG
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // PA_SC_CENTROID_PRIORITY_0
+    0x00000000, // PA_SC_CENTROID_PRIORITY_1
+    0x00001000, // PA_SC_LINE_CNTL
+    0x00000000, // PA_SC_AA_CONFIG
+    0x00000005, // PA_SU_VTX_CNTL
+    0x3f800000, // PA_CL_GB_VERT_CLIP_ADJ
+    0x3f800000, // PA_CL_GB_VERT_DISC_ADJ
+    0x3f800000, // PA_CL_GB_HORZ_CLIP_ADJ
+    0x3f800000, // PA_CL_GB_HORZ_DISC_ADJ
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3
+    0xffffffff, // PA_SC_AA_MASK_X0Y0_X1Y0
+    0xffffffff, // PA_SC_AA_MASK_X0Y1_X1Y1
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x0000001e, // VGT_VERTEX_REUSE_BLOCK_CNTL
+    0x00000020, // VGT_OUT_DEALLOC_CNTL
+    0x00000000, // CB_COLOR0_BASE
+    0x00000000, // CB_COLOR0_PITCH
+    0x00000000, // CB_COLOR0_SLICE
+    0x00000000, // CB_COLOR0_VIEW
+    0x00000000, // CB_COLOR0_INFO
+    0x00000000, // CB_COLOR0_ATTRIB
+    0x00000000, // CB_COLOR0_DCC_CONTROL
+    0x00000000, // CB_COLOR0_CMASK
+    0x00000000, // CB_COLOR0_CMASK_SLICE
+    0x00000000, // CB_COLOR0_FMASK
+    0x00000000, // CB_COLOR0_FMASK_SLICE
+    0x00000000, // CB_COLOR0_CLEAR_WORD0
+    0x00000000, // CB_COLOR0_CLEAR_WORD1
+    0x00000000, // CB_COLOR0_DCC_BASE
+    0, // HOLE
+    0x00000000, // CB_COLOR1_BASE
+    0x00000000, // CB_COLOR1_PITCH
+    0x00000000, // CB_COLOR1_SLICE
+    0x00000000, // CB_COLOR1_VIEW
+    0x00000000, // CB_COLOR1_INFO
+    0x00000000, // CB_COLOR1_ATTRIB
+    0x00000000, // CB_COLOR1_DCC_CONTROL
+    0x00000000, // CB_COLOR1_CMASK
+    0x00000000, // CB_COLOR1_CMASK_SLICE
+    0x00000000, // CB_COLOR1_FMASK
+    0x00000000, // CB_COLOR1_FMASK_SLICE
+    0x00000000, // CB_COLOR1_CLEAR_WORD0
+    0x00000000, // CB_COLOR1_CLEAR_WORD1
+    0x00000000, // CB_COLOR1_DCC_BASE
+    0, // HOLE
+    0x00000000, // CB_COLOR2_BASE
+    0x00000000, // CB_COLOR2_PITCH
+    0x00000000, // CB_COLOR2_SLICE
+    0x00000000, // CB_COLOR2_VIEW
+    0x00000000, // CB_COLOR2_INFO
+    0x00000000, // CB_COLOR2_ATTRIB
+    0x00000000, // CB_COLOR2_DCC_CONTROL
+    0x00000000, // CB_COLOR2_CMASK
+    0x00000000, // CB_COLOR2_CMASK_SLICE
+    0x00000000, // CB_COLOR2_FMASK
+    0x00000000, // CB_COLOR2_FMASK_SLICE
+    0x00000000, // CB_COLOR2_CLEAR_WORD0
+    0x00000000, // CB_COLOR2_CLEAR_WORD1
+    0x00000000, // CB_COLOR2_DCC_BASE
+    0, // HOLE
+    0x00000000, // CB_COLOR3_BASE
+    0x00000000, // CB_COLOR3_PITCH
+    0x00000000, // CB_COLOR3_SLICE
+    0x00000000, // CB_COLOR3_VIEW
+    0x00000000, // CB_COLOR3_INFO
+    0x00000000, // CB_COLOR3_ATTRIB
+    0x00000000, // CB_COLOR3_DCC_CONTROL
+    0x00000000, // CB_COLOR3_CMASK
+    0x00000000, // CB_COLOR3_CMASK_SLICE
+    0x00000000, // CB_COLOR3_FMASK
+    0x00000000, // CB_COLOR3_FMASK_SLICE
+    0x00000000, // CB_COLOR3_CLEAR_WORD0
+    0x00000000, // CB_COLOR3_CLEAR_WORD1
+    0x00000000, // CB_COLOR3_DCC_BASE
+    0, // HOLE
+    0x00000000, // CB_COLOR4_BASE
+    0x00000000, // CB_COLOR4_PITCH
+    0x00000000, // CB_COLOR4_SLICE
+    0x00000000, // CB_COLOR4_VIEW
+    0x00000000, // CB_COLOR4_INFO
+    0x00000000, // CB_COLOR4_ATTRIB
+    0x00000000, // CB_COLOR4_DCC_CONTROL
+    0x00000000, // CB_COLOR4_CMASK
+    0x00000000, // CB_COLOR4_CMASK_SLICE
+    0x00000000, // CB_COLOR4_FMASK
+    0x00000000, // CB_COLOR4_FMASK_SLICE
+    0x00000000, // CB_COLOR4_CLEAR_WORD0
+    0x00000000, // CB_COLOR4_CLEAR_WORD1
+    0x00000000, // CB_COLOR4_DCC_BASE
+    0, // HOLE
+    0x00000000, // CB_COLOR5_BASE
+    0x00000000, // CB_COLOR5_PITCH
+    0x00000000, // CB_COLOR5_SLICE
+    0x00000000, // CB_COLOR5_VIEW
+    0x00000000, // CB_COLOR5_INFO
+    0x00000000, // CB_COLOR5_ATTRIB
+    0x00000000, // CB_COLOR5_DCC_CONTROL
+    0x00000000, // CB_COLOR5_CMASK
+    0x00000000, // CB_COLOR5_CMASK_SLICE
+    0x00000000, // CB_COLOR5_FMASK
+    0x00000000, // CB_COLOR5_FMASK_SLICE
+    0x00000000, // CB_COLOR5_CLEAR_WORD0
+    0x00000000, // CB_COLOR5_CLEAR_WORD1
+    0x00000000, // CB_COLOR5_DCC_BASE
+    0, // HOLE
+    0x00000000, // CB_COLOR6_BASE
+    0x00000000, // CB_COLOR6_PITCH
+    0x00000000, // CB_COLOR6_SLICE
+    0x00000000, // CB_COLOR6_VIEW
+    0x00000000, // CB_COLOR6_INFO
+    0x00000000, // CB_COLOR6_ATTRIB
+    0x00000000, // CB_COLOR6_DCC_CONTROL
+    0x00000000, // CB_COLOR6_CMASK
+    0x00000000, // CB_COLOR6_CMASK_SLICE
+    0x00000000, // CB_COLOR6_FMASK
+    0x00000000, // CB_COLOR6_FMASK_SLICE
+    0x00000000, // CB_COLOR6_CLEAR_WORD0
+    0x00000000, // CB_COLOR6_CLEAR_WORD1
+    0x00000000, // CB_COLOR6_DCC_BASE
+    0, // HOLE
+    0x00000000, // CB_COLOR7_BASE
+    0x00000000, // CB_COLOR7_PITCH
+    0x00000000, // CB_COLOR7_SLICE
+    0x00000000, // CB_COLOR7_VIEW
+    0x00000000, // CB_COLOR7_INFO
+    0x00000000, // CB_COLOR7_ATTRIB
+    0x00000000, // CB_COLOR7_DCC_CONTROL
+    0x00000000, // CB_COLOR7_CMASK
+    0x00000000, // CB_COLOR7_CMASK_SLICE
+    0x00000000, // CB_COLOR7_FMASK
+    0x00000000, // CB_COLOR7_FMASK_SLICE
+    0x00000000, // CB_COLOR7_CLEAR_WORD0
+    0x00000000, // CB_COLOR7_CLEAR_WORD1
+};
+static const struct cs_extent_def vi_SECT_CONTEXT_defs[] =
+{
+    {vi_SECT_CONTEXT_def_1, 0x0000a000, 212 },
+    {vi_SECT_CONTEXT_def_2, 0x0000a0d6, 274 },
+    {vi_SECT_CONTEXT_def_3, 0x0000a1f5, 6 },
+    {vi_SECT_CONTEXT_def_4, 0x0000a200, 157 },
+    {vi_SECT_CONTEXT_def_5, 0x0000a2a0, 2 },
+    {vi_SECT_CONTEXT_def_6, 0x0000a2a3, 1 },
+    {vi_SECT_CONTEXT_def_7, 0x0000a2a5, 233 },
+    { 0, 0, 0 }
+};
+static const struct cs_section_def vi_cs_data[] = {
+    { vi_SECT_CONTEXT_defs, SECT_CONTEXT },
+    { 0, SECT_NONE }
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
new file mode 100644 (file)
index 0000000..b5c8485
--- /dev/null
@@ -0,0 +1,1712 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include <linux/seq_file.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_atombios.h"
+#include "vid.h"
+#include "vi_dpm.h"
+#include "amdgpu_dpm.h"
+#include "cz_dpm.h"
+#include "cz_ppsmc.h"
+#include "atom.h"
+
+#include "smu/smu_8_0_d.h"
+#include "smu/smu_8_0_sh_mask.h"
+#include "gca/gfx_8_0_d.h"
+#include "gca/gfx_8_0_sh_mask.h"
+#include "gmc/gmc_8_1_d.h"
+#include "bif/bif_5_1_d.h"
+#include "gfx_v8_0.h"
+
+static struct cz_ps *cz_get_ps(struct amdgpu_ps *rps)
+{
+       struct cz_ps *ps = rps->ps_priv;
+
+       return ps;
+}
+
+static struct cz_power_info *cz_get_pi(struct amdgpu_device *adev)
+{
+       struct cz_power_info *pi = adev->pm.dpm.priv;
+
+       return pi;
+}
+
+static uint16_t cz_convert_8bit_index_to_voltage(struct amdgpu_device *adev,
+                                                       uint16_t voltage)
+{
+       uint16_t tmp = 6200 - voltage * 25;
+
+       return tmp;
+}
+
+static void cz_construct_max_power_limits_table(struct amdgpu_device *adev,
+                               struct amdgpu_clock_and_voltage_limits *table)
+{
+       struct cz_power_info *pi = cz_get_pi(adev);
+       struct amdgpu_clock_voltage_dependency_table *dep_table =
+               &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+
+       if (dep_table->count > 0) {
+               table->sclk = dep_table->entries[dep_table->count - 1].clk;
+               table->vddc = cz_convert_8bit_index_to_voltage(adev,
+                               dep_table->entries[dep_table->count - 1].v);
+       }
+
+       table->mclk = pi->sys_info.nbp_memory_clock[0];
+
+}
+
+union igp_info {
+       struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+       struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
+       struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
+       struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9 info_9;
+};
+
+static int cz_parse_sys_info_table(struct amdgpu_device *adev)
+{
+       struct cz_power_info *pi = cz_get_pi(adev);
+       struct amdgpu_mode_info *mode_info = &adev->mode_info;
+       int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+       union igp_info *igp_info;
+       u8 frev, crev;
+       u16 data_offset;
+       int i = 0;
+
+       if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+                                  &frev, &crev, &data_offset)) {
+               igp_info = (union igp_info *)(mode_info->atom_context->bios +
+                                             data_offset);
+
+               if (crev != 9) {
+                       DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
+                       return -EINVAL;
+               }
+               pi->sys_info.bootup_sclk =
+                       le32_to_cpu(igp_info->info_9.ulBootUpEngineClock);
+               pi->sys_info.bootup_uma_clk =
+                       le32_to_cpu(igp_info->info_9.ulBootUpUMAClock);
+               pi->sys_info.dentist_vco_freq =
+                       le32_to_cpu(igp_info->info_9.ulDentistVCOFreq);
+               pi->sys_info.bootup_nb_voltage_index =
+                       le16_to_cpu(igp_info->info_9.usBootUpNBVoltage);
+
+               if (igp_info->info_9.ucHtcTmpLmt == 0)
+                       pi->sys_info.htc_tmp_lmt = 203;
+               else
+                       pi->sys_info.htc_tmp_lmt = igp_info->info_9.ucHtcTmpLmt;
+
+               if (igp_info->info_9.ucHtcHystLmt == 0)
+                       pi->sys_info.htc_hyst_lmt = 5;
+               else
+                       pi->sys_info.htc_hyst_lmt = igp_info->info_9.ucHtcHystLmt;
+
+               if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
+                       DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
+                       return -EINVAL;
+               }
+
+               if (le32_to_cpu(igp_info->info_9.ulSystemConfig) & (1 << 3) &&
+                               pi->enable_nb_ps_policy)
+                       pi->sys_info.nb_dpm_enable = true;
+               else
+                       pi->sys_info.nb_dpm_enable = false;
+
+               for (i = 0; i < CZ_NUM_NBPSTATES; i++) {
+                       if (i < CZ_NUM_NBPMEMORY_CLOCK)
+                               pi->sys_info.nbp_memory_clock[i] =
+                               le32_to_cpu(igp_info->info_9.ulNbpStateMemclkFreq[i]);
+                       pi->sys_info.nbp_n_clock[i] =
+                       le32_to_cpu(igp_info->info_9.ulNbpStateNClkFreq[i]);
+               }
+
+               for (i = 0; i < CZ_MAX_DISPLAY_CLOCK_LEVEL; i++)
+                       pi->sys_info.display_clock[i] =
+                       le32_to_cpu(igp_info->info_9.sDispClkVoltageMapping[i].ulMaximumSupportedCLK);
+
+               for (i = 0; i < CZ_NUM_NBPSTATES; i++)
+                       pi->sys_info.nbp_voltage_index[i] =
+                               le32_to_cpu(igp_info->info_9.usNBPStateVoltage[i]);
+
+               if (le32_to_cpu(igp_info->info_9.ulGPUCapInfo) &
+                       SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
+                       pi->caps_enable_dfs_bypass = true;
+
+               pi->sys_info.uma_channel_number =
+                       igp_info->info_9.ucUMAChannelNumber;
+
+               cz_construct_max_power_limits_table(adev,
+                       &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
+       }
+
+       return 0;
+}
+
+static void cz_patch_voltage_values(struct amdgpu_device *adev)
+{
+       int i;
+       struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
+               &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
+       struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
+               &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+       struct amdgpu_clock_voltage_dependency_table *acp_table =
+               &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
+
+       if (uvd_table->count) {
+               for (i = 0; i < uvd_table->count; i++)
+                       uvd_table->entries[i].v =
+                               cz_convert_8bit_index_to_voltage(adev,
+                                               uvd_table->entries[i].v);
+       }
+
+       if (vce_table->count) {
+               for (i = 0; i < vce_table->count; i++)
+                       vce_table->entries[i].v =
+                               cz_convert_8bit_index_to_voltage(adev,
+                                               vce_table->entries[i].v);
+       }
+
+       if (acp_table->count) {
+               for (i = 0; i < acp_table->count; i++)
+                       acp_table->entries[i].v =
+                               cz_convert_8bit_index_to_voltage(adev,
+                                               acp_table->entries[i].v);
+       }
+
+}
+
+static void cz_construct_boot_state(struct amdgpu_device *adev)
+{
+       struct cz_power_info *pi = cz_get_pi(adev);
+
+       pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
+       pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
+       pi->boot_pl.ds_divider_index = 0;
+       pi->boot_pl.ss_divider_index = 0;
+       pi->boot_pl.allow_gnb_slow = 1;
+       pi->boot_pl.force_nbp_state = 0;
+       pi->boot_pl.display_wm = 0;
+       pi->boot_pl.vce_wm = 0;
+
+}
+
+static void cz_patch_boot_state(struct amdgpu_device *adev,
+                               struct cz_ps *ps)
+{
+       struct cz_power_info *pi = cz_get_pi(adev);
+
+       ps->num_levels = 1;
+       ps->levels[0] = pi->boot_pl;
+}
+
+union pplib_clock_info {
+       struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+       struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+       struct _ATOM_PPLIB_CZ_CLOCK_INFO carrizo;
+};
+
+static void cz_parse_pplib_clock_info(struct amdgpu_device *adev,
+                                       struct amdgpu_ps *rps, int index,
+                                       union pplib_clock_info *clock_info)
+{
+       struct cz_power_info *pi = cz_get_pi(adev);
+       struct cz_ps *ps = cz_get_ps(rps);
+       struct cz_pl *pl = &ps->levels[index];
+       struct amdgpu_clock_voltage_dependency_table *table =
+                       &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+
+       pl->sclk = table->entries[clock_info->carrizo.index].clk;
+       pl->vddc_index = table->entries[clock_info->carrizo.index].v;
+
+       ps->num_levels = index + 1;
+
+       if (pi->caps_sclk_ds) {
+               pl->ds_divider_index = 5;
+               pl->ss_divider_index = 5;
+       }
+
+}
+
+static void cz_parse_pplib_non_clock_info(struct amdgpu_device *adev,
+                       struct amdgpu_ps *rps,
+                       struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
+                       u8 table_rev)
+{
+       struct cz_ps *ps = cz_get_ps(rps);
+
+       rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+       rps->class = le16_to_cpu(non_clock_info->usClassification);
+       rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
+
+       if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
+               rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
+               rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
+       } else {
+               rps->vclk = 0;
+               rps->dclk = 0;
+       }
+
+       if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+               adev->pm.dpm.boot_ps = rps;
+               cz_patch_boot_state(adev, ps);
+       }
+       if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+               adev->pm.dpm.uvd_ps = rps;
+
+}
+
+union power_info {
+       struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+       struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+       struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+       struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
+       struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
+};
+
+union pplib_power_state {
+       struct _ATOM_PPLIB_STATE v1;
+       struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static int cz_parse_power_table(struct amdgpu_device *adev)
+{
+       struct amdgpu_mode_info *mode_info = &adev->mode_info;
+       struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+       union pplib_power_state *power_state;
+       int i, j, k, non_clock_array_index, clock_array_index;
+       union pplib_clock_info *clock_info;
+       struct _StateArray *state_array;
+       struct _ClockInfoArray *clock_info_array;
+       struct _NonClockInfoArray *non_clock_info_array;
+       union power_info *power_info;
+       int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+       u16 data_offset;
+       u8 frev, crev;
+       u8 *power_state_offset;
+       struct cz_ps *ps;
+
+       if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+                                   &frev, &crev, &data_offset))
+               return -EINVAL;
+       power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+       state_array = (struct _StateArray *)
+               (mode_info->atom_context->bios + data_offset +
+               le16_to_cpu(power_info->pplib.usStateArrayOffset));
+       clock_info_array = (struct _ClockInfoArray *)
+               (mode_info->atom_context->bios + data_offset +
+               le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
+       non_clock_info_array = (struct _NonClockInfoArray *)
+               (mode_info->atom_context->bios + data_offset +
+               le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+
+       adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
+                                       state_array->ucNumEntries, GFP_KERNEL);
+
+       if (!adev->pm.dpm.ps)
+               return -ENOMEM;
+
+       power_state_offset = (u8 *)state_array->states;
+       adev->pm.dpm.platform_caps =
+                       le32_to_cpu(power_info->pplib.ulPlatformCaps);
+       adev->pm.dpm.backbias_response_time =
+                       le16_to_cpu(power_info->pplib.usBackbiasTime);
+       adev->pm.dpm.voltage_response_time =
+                       le16_to_cpu(power_info->pplib.usVoltageTime);
+
+       for (i = 0; i < state_array->ucNumEntries; i++) {
+               power_state = (union pplib_power_state *)power_state_offset;
+               non_clock_array_index = power_state->v2.nonClockInfoIndex;
+               non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+                       &non_clock_info_array->nonClockInfo[non_clock_array_index];
+
+               ps = kzalloc(sizeof(struct cz_ps), GFP_KERNEL);
+               if (ps == NULL) {
+                       kfree(adev->pm.dpm.ps);
+                       return -ENOMEM;
+               }
+
+               adev->pm.dpm.ps[i].ps_priv = ps;
+               k = 0;
+               for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+                       clock_array_index = power_state->v2.clockInfoIndex[j];
+                       if (clock_array_index >= clock_info_array->ucNumEntries)
+                               continue;
+                       if (k >= CZ_MAX_HARDWARE_POWERLEVELS)
+                               break;
+                       clock_info = (union pplib_clock_info *)
+                               &clock_info_array->clockInfo[clock_array_index *
+                               clock_info_array->ucEntrySize];
+                       cz_parse_pplib_clock_info(adev, &adev->pm.dpm.ps[i],
+                               k, clock_info);
+                       k++;
+               }
+               cz_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
+                                       non_clock_info,
+                                       non_clock_info_array->ucEntrySize);
+               power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+       }
+       adev->pm.dpm.num_ps = state_array->ucNumEntries;
+
+       return 0;
+}
+
+static int cz_process_firmware_header(struct amdgpu_device *adev)
+{
+       struct cz_power_info *pi = cz_get_pi(adev);
+       u32 tmp;
+       int ret;
+
+       ret = cz_read_smc_sram_dword(adev, SMU8_FIRMWARE_HEADER_LOCATION +
+                                    offsetof(struct SMU8_Firmware_Header,
+                                    DpmTable),
+                                    &tmp, pi->sram_end);
+
+       if (ret == 0)
+               pi->dpm_table_start = tmp;
+
+       return ret;
+}
+
+static int cz_dpm_init(struct amdgpu_device *adev)
+{
+       struct cz_power_info *pi;
+       int ret, i;
+
+       pi = kzalloc(sizeof(struct cz_power_info), GFP_KERNEL);
+       if (NULL == pi)
+               return -ENOMEM;
+
+       adev->pm.dpm.priv = pi;
+
+       ret = amdgpu_get_platform_caps(adev);
+       if (ret)
+               return ret;
+
+       ret = amdgpu_parse_extended_power_table(adev);
+       if (ret)
+               return ret;
+
+       pi->sram_end = SMC_RAM_END;
+
+       /* set up DPM defaults */
+       for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++)
+               pi->active_target[i] = CZ_AT_DFLT;
+
+       pi->mgcg_cgtt_local0 = 0x0;
+       pi->mgcg_cgtt_local1 = 0x0;
+       pi->clock_slow_down_step = 25000;
+       pi->skip_clock_slow_down = 1;
+       pi->enable_nb_ps_policy = 1;
+       pi->caps_power_containment = true;
+       pi->caps_cac = true;
+       pi->didt_enabled = false;
+       if (pi->didt_enabled) {
+               pi->caps_sq_ramping = true;
+               pi->caps_db_ramping = true;
+               pi->caps_td_ramping = true;
+               pi->caps_tcp_ramping = true;
+       }
+       pi->caps_sclk_ds = true;
+       pi->voting_clients = 0x00c00033;
+       pi->auto_thermal_throttling_enabled = true;
+       pi->bapm_enabled = false;
+       pi->disable_nb_ps3_in_battery = false;
+       pi->voltage_drop_threshold = 0;
+       pi->caps_sclk_throttle_low_notification = false;
+       pi->gfx_pg_threshold = 500;
+       pi->caps_fps = true;
+       /* uvd */
+       pi->caps_uvd_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_UVD) ? true : false;
+       pi->caps_uvd_dpm = true;
+       /* vce */
+       pi->caps_vce_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_VCE) ? true : false;
+       pi->caps_vce_dpm = true;
+       /* acp */
+       pi->caps_acp_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_ACP) ? true : false;
+       pi->caps_acp_dpm = true;
+
+       pi->caps_stable_power_state = false;
+       pi->nb_dpm_enabled_by_driver = true;
+       pi->nb_dpm_enabled = false;
+       pi->caps_voltage_island = false;
+       /* flags which indicate need to upload pptable */
+       pi->need_pptable_upload = true;
+
+       ret = cz_parse_sys_info_table(adev);
+       if (ret)
+               return ret;
+
+       cz_patch_voltage_values(adev);
+       cz_construct_boot_state(adev);
+
+       ret = cz_parse_power_table(adev);
+       if (ret)
+               return ret;
+
+       ret = cz_process_firmware_header(adev);
+       if (ret)
+               return ret;
+
+       pi->dpm_enabled = true;
+
+       return 0;
+}
+
+static void cz_dpm_fini(struct amdgpu_device *adev)
+{
+       int i;
+
+       for (i = 0; i < adev->pm.dpm.num_ps; i++)
+               kfree(adev->pm.dpm.ps[i].ps_priv);
+
+       kfree(adev->pm.dpm.ps);
+       kfree(adev->pm.dpm.priv);
+       amdgpu_free_extended_power_table(adev);
+}
+
+static void
+cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
+                                              struct seq_file *m)
+{
+       struct amdgpu_clock_voltage_dependency_table *table =
+               &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+       u32 current_index =
+               (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
+               TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
+               TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
+       u32 sclk, tmp;
+       u16 vddc;
+
+       if (current_index >= NUM_SCLK_LEVELS) {
+               seq_printf(m, "invalid dpm profile %d\n", current_index);
+       } else {
+               sclk = table->entries[current_index].clk;
+               tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) &
+                       SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
+                       SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT;
+               vddc = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
+               seq_printf(m, "power level %d    sclk: %u vddc: %u\n",
+                          current_index, sclk, vddc);
+       }
+}
+
+static void cz_dpm_print_power_state(struct amdgpu_device *adev,
+                                       struct amdgpu_ps *rps)
+{
+       int i;
+       struct cz_ps *ps = cz_get_ps(rps);
+
+       amdgpu_dpm_print_class_info(rps->class, rps->class2);
+       amdgpu_dpm_print_cap_info(rps->caps);
+
+       DRM_INFO("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+       for (i = 0; i < ps->num_levels; i++) {
+               struct cz_pl *pl = &ps->levels[i];
+
+               DRM_INFO("\t\tpower level %d    sclk: %u vddc: %u\n",
+                      i, pl->sclk,
+                      cz_convert_8bit_index_to_voltage(adev, pl->vddc_index));
+       }
+
+       amdgpu_dpm_print_ps_status(adev, rps);
+}
+
+static void cz_dpm_set_funcs(struct amdgpu_device *adev);
+
+static int cz_dpm_early_init(struct amdgpu_device *adev)
+{
+       cz_dpm_set_funcs(adev);
+
+       return 0;
+}
+
+static int cz_dpm_sw_init(struct amdgpu_device *adev)
+{
+       int ret = 0;
+       /* fix me to add thermal support TODO */
+
+       /* default to balanced state */
+       adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
+       adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
+       adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
+       adev->pm.default_sclk = adev->clock.default_sclk;
+       adev->pm.default_mclk = adev->clock.default_mclk;
+       adev->pm.current_sclk = adev->clock.default_sclk;
+       adev->pm.current_mclk = adev->clock.default_mclk;
+       adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
+
+       if (amdgpu_dpm == 0)
+               return 0;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = cz_dpm_init(adev);
+       if (ret)
+               goto dpm_init_failed;
+
+       adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+       if (amdgpu_dpm == 1)
+               amdgpu_pm_print_power_states(adev);
+
+       ret = amdgpu_pm_sysfs_init(adev);
+       if (ret)
+               goto dpm_init_failed;
+
+       mutex_unlock(&adev->pm.mutex);
+       DRM_INFO("amdgpu: dpm initialized\n");
+
+       return 0;
+
+dpm_init_failed:
+       cz_dpm_fini(adev);
+       mutex_unlock(&adev->pm.mutex);
+       DRM_ERROR("amdgpu: dpm initialization failed\n");
+
+       return ret;
+}
+
+static int cz_dpm_sw_fini(struct amdgpu_device *adev)
+{
+       mutex_lock(&adev->pm.mutex);
+       amdgpu_pm_sysfs_fini(adev);
+       cz_dpm_fini(adev);
+       mutex_unlock(&adev->pm.mutex);
+
+       return 0;
+}
+
+static void cz_reset_ap_mask(struct amdgpu_device *adev)
+{
+       struct cz_power_info *pi = cz_get_pi(adev);
+
+       pi->active_process_mask = 0;
+
+}
+
+static int cz_dpm_download_pptable_from_smu(struct amdgpu_device *adev,
+                                                       void **table)
+{
+       int ret = 0;
+
+       ret = cz_smu_download_pptable(adev, table);
+
+       return ret;
+}
+
+static int cz_dpm_upload_pptable_to_smu(struct amdgpu_device *adev)
+{
+       struct cz_power_info *pi = cz_get_pi(adev);
+       struct SMU8_Fusion_ClkTable *clock_table;
+       struct atom_clock_dividers dividers;
+       void *table = NULL;
+       uint8_t i = 0;
+       int ret = 0;
+
+       struct amdgpu_clock_voltage_dependency_table *vddc_table =
+               &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+       struct amdgpu_clock_voltage_dependency_table *vddgfx_table =
+               &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk;
+       struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
+               &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
+       struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
+               &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+       struct amdgpu_clock_voltage_dependency_table *acp_table =
+               &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
+
+       if (!pi->need_pptable_upload)
+               return 0;
+
+       ret = cz_dpm_download_pptable_from_smu(adev, &table);
+       if (ret) {
+               DRM_ERROR("amdgpu: Failed to get power play table from SMU!\n");
+               return -EINVAL;
+       }
+
+       clock_table = (struct SMU8_Fusion_ClkTable *)table;
+       /* patch clock table */
+       if (vddc_table->count > CZ_MAX_HARDWARE_POWERLEVELS ||
+                       vddgfx_table->count > CZ_MAX_HARDWARE_POWERLEVELS ||
+                       uvd_table->count > CZ_MAX_HARDWARE_POWERLEVELS ||
+                       vce_table->count > CZ_MAX_HARDWARE_POWERLEVELS ||
+                       acp_table->count > CZ_MAX_HARDWARE_POWERLEVELS) {
+               DRM_ERROR("amdgpu: Invalid Clock Voltage Dependency Table!\n");
+               return -EINVAL;
+       }
+
+       for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++) {
+
+               /* vddc sclk */
+               clock_table->SclkBreakdownTable.ClkLevel[i].GnbVid =
+                       (i < vddc_table->count) ? (uint8_t)vddc_table->entries[i].v : 0;
+               clock_table->SclkBreakdownTable.ClkLevel[i].Frequency =
+                       (i < vddc_table->count) ? vddc_table->entries[i].clk : 0;
+               ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+                               clock_table->SclkBreakdownTable.ClkLevel[i].Frequency,
+                               false, &dividers);
+               if (ret)
+                       return ret;
+               clock_table->SclkBreakdownTable.ClkLevel[i].DfsDid =
+                                               (uint8_t)dividers.post_divider;
+
+               /* vddgfx sclk */
+               clock_table->SclkBreakdownTable.ClkLevel[i].GfxVid =
+                       (i < vddgfx_table->count) ? (uint8_t)vddgfx_table->entries[i].v : 0;
+
+               /* acp breakdown */
+               clock_table->AclkBreakdownTable.ClkLevel[i].GfxVid =
+                       (i < acp_table->count) ? (uint8_t)acp_table->entries[i].v : 0;
+               clock_table->AclkBreakdownTable.ClkLevel[i].Frequency =
+                       (i < acp_table->count) ? acp_table->entries[i].clk : 0;
+               ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+                               clock_table->SclkBreakdownTable.ClkLevel[i].Frequency,
+                               false, &dividers);
+               if (ret)
+                       return ret;
+               clock_table->AclkBreakdownTable.ClkLevel[i].DfsDid =
+                                               (uint8_t)dividers.post_divider;
+
+               /* uvd breakdown */
+               clock_table->VclkBreakdownTable.ClkLevel[i].GfxVid =
+                       (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
+               clock_table->VclkBreakdownTable.ClkLevel[i].Frequency =
+                       (i < uvd_table->count) ? uvd_table->entries[i].vclk : 0;
+               ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+                               clock_table->VclkBreakdownTable.ClkLevel[i].Frequency,
+                               false, &dividers);
+               if (ret)
+                       return ret;
+               clock_table->VclkBreakdownTable.ClkLevel[i].DfsDid =
+                                               (uint8_t)dividers.post_divider;
+
+               clock_table->DclkBreakdownTable.ClkLevel[i].GfxVid =
+                       (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
+               clock_table->DclkBreakdownTable.ClkLevel[i].Frequency =
+                       (i < uvd_table->count) ? uvd_table->entries[i].dclk : 0;
+               ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+                               clock_table->DclkBreakdownTable.ClkLevel[i].Frequency,
+                               false, &dividers);
+               if (ret)
+                       return ret;
+               clock_table->DclkBreakdownTable.ClkLevel[i].DfsDid =
+                                               (uint8_t)dividers.post_divider;
+
+               /* vce breakdown */
+               clock_table->EclkBreakdownTable.ClkLevel[i].GfxVid =
+                       (i < vce_table->count) ? (uint8_t)vce_table->entries[i].v : 0;
+               clock_table->EclkBreakdownTable.ClkLevel[i].Frequency =
+                       (i < vce_table->count) ? vce_table->entries[i].ecclk : 0;
+               ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+                               clock_table->EclkBreakdownTable.ClkLevel[i].Frequency,
+                               false, &dividers);
+               if (ret)
+                       return ret;
+               clock_table->EclkBreakdownTable.ClkLevel[i].DfsDid =
+                                               (uint8_t)dividers.post_divider;
+       }
+
+       /* its time to upload to SMU */
+       ret = cz_smu_upload_pptable(adev);
+       if (ret) {
+               DRM_ERROR("amdgpu: Failed to put power play table to SMU!\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+static void cz_init_sclk_limit(struct amdgpu_device *adev)
+{
+       struct cz_power_info *pi = cz_get_pi(adev);
+       struct amdgpu_clock_voltage_dependency_table *table =
+               &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+       uint32_t clock = 0, level;
+
+       if (!table || !table->count) {
+               DRM_ERROR("Invalid Voltage Dependency table.\n");
+               return;
+       }
+
+       pi->sclk_dpm.soft_min_clk = 0;
+       pi->sclk_dpm.hard_min_clk = 0;
+       cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxSclkLevel);
+       level = cz_get_argument(adev);
+       if (level < table->count)
+               clock = table->entries[level].clk;
+       else {
+               DRM_ERROR("Invalid SLCK Voltage Dependency table entry.\n");
+               clock = table->entries[table->count - 1].clk;
+       }
+
+       pi->sclk_dpm.soft_max_clk = clock;
+       pi->sclk_dpm.hard_max_clk = clock;
+
+}
+
+static void cz_init_uvd_limit(struct amdgpu_device *adev)
+{
+       struct cz_power_info *pi = cz_get_pi(adev);
+       struct amdgpu_uvd_clock_voltage_dependency_table *table =
+               &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
+       uint32_t clock = 0, level;
+
+       if (!table || !table->count) {
+               DRM_ERROR("Invalid Voltage Dependency table.\n");
+               return;
+       }
+
+       pi->uvd_dpm.soft_min_clk = 0;
+       pi->uvd_dpm.hard_min_clk = 0;
+       cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxUvdLevel);
+       level = cz_get_argument(adev);
+       if (level < table->count)
+               clock = table->entries[level].vclk;
+       else {
+               DRM_ERROR("Invalid UVD Voltage Dependency table entry.\n");
+               clock = table->entries[table->count - 1].vclk;
+       }
+
+       pi->uvd_dpm.soft_max_clk = clock;
+       pi->uvd_dpm.hard_max_clk = clock;
+
+}
+
+static void cz_init_vce_limit(struct amdgpu_device *adev)
+{
+       struct cz_power_info *pi = cz_get_pi(adev);
+       struct amdgpu_vce_clock_voltage_dependency_table *table =
+               &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+       uint32_t clock = 0, level;
+
+       if (!table || !table->count) {
+               DRM_ERROR("Invalid Voltage Dependency table.\n");
+               return;
+       }
+
+       pi->vce_dpm.soft_min_clk = 0;
+       pi->vce_dpm.hard_min_clk = 0;
+       cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxEclkLevel);
+       level = cz_get_argument(adev);
+       if (level < table->count)
+               clock = table->entries[level].evclk;
+       else {
+               /* future BIOS would fix this error */
+               DRM_ERROR("Invalid VCE Voltage Dependency table entry.\n");
+               clock = table->entries[table->count - 1].evclk;
+       }
+
+       pi->vce_dpm.soft_max_clk = clock;
+       pi->vce_dpm.hard_max_clk = clock;
+
+}
+
+static void cz_init_acp_limit(struct amdgpu_device *adev)
+{
+       struct cz_power_info *pi = cz_get_pi(adev);
+       struct amdgpu_clock_voltage_dependency_table *table =
+               &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
+       uint32_t clock = 0, level;
+
+       if (!table || !table->count) {
+               DRM_ERROR("Invalid Voltage Dependency table.\n");
+               return;
+       }
+
+       pi->acp_dpm.soft_min_clk = 0;
+       pi->acp_dpm.hard_min_clk = 0;
+       cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxAclkLevel);
+       level = cz_get_argument(adev);
+       if (level < table->count)
+               clock = table->entries[level].clk;
+       else {
+               DRM_ERROR("Invalid ACP Voltage Dependency table entry.\n");
+               clock = table->entries[table->count - 1].clk;
+       }
+
+       pi->acp_dpm.soft_max_clk = clock;
+       pi->acp_dpm.hard_max_clk = clock;
+
+}
+
+static void cz_init_pg_state(struct amdgpu_device *adev)
+{
+       struct cz_power_info *pi = cz_get_pi(adev);
+
+       pi->uvd_power_gated = false;
+       pi->vce_power_gated = false;
+       pi->acp_power_gated = false;
+
+}
+
+static void cz_init_sclk_threshold(struct amdgpu_device *adev)
+{
+       struct cz_power_info *pi = cz_get_pi(adev);
+
+       pi->low_sclk_interrupt_threshold = 0;
+
+}
+
+static void cz_dpm_setup_asic(struct amdgpu_device *adev)
+{
+       cz_reset_ap_mask(adev);
+       cz_dpm_upload_pptable_to_smu(adev);
+       cz_init_sclk_limit(adev);
+       cz_init_uvd_limit(adev);
+       cz_init_vce_limit(adev);
+       cz_init_acp_limit(adev);
+       cz_init_pg_state(adev);
+       cz_init_sclk_threshold(adev);
+
+}
+
+static bool cz_check_smu_feature(struct amdgpu_device *adev,
+                               uint32_t feature)
+{
+       uint32_t smu_feature = 0;
+       int ret;
+
+       ret = cz_send_msg_to_smc_with_parameter(adev,
+                               PPSMC_MSG_GetFeatureStatus, 0);
+       if (ret) {
+               DRM_ERROR("Failed to get SMU features from SMC.\n");
+               return false;
+       } else {
+               smu_feature = cz_get_argument(adev);
+               if (feature & smu_feature)
+                       return true;
+       }
+
+       return false;
+}
+
+static bool cz_check_for_dpm_enabled(struct amdgpu_device *adev)
+{
+       if (cz_check_smu_feature(adev,
+                               SMU_EnabledFeatureScoreboard_SclkDpmOn))
+               return true;
+
+       return false;
+}
+
+static void cz_program_voting_clients(struct amdgpu_device *adev)
+{
+       WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, PPCZ_VOTINGRIGHTSCLIENTS_DFLT0);
+}
+
+static void cz_clear_voting_clients(struct amdgpu_device *adev)
+{
+       WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
+}
+
+static int cz_start_dpm(struct amdgpu_device *adev)
+{
+       int ret = 0;
+
+       if (amdgpu_dpm) {
+               ret = cz_send_msg_to_smc_with_parameter(adev,
+                               PPSMC_MSG_EnableAllSmuFeatures, SCLK_DPM_MASK);
+               if (ret) {
+                       DRM_ERROR("SMU feature: SCLK_DPM enable failed\n");
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+static int cz_stop_dpm(struct amdgpu_device *adev)
+{
+       int ret = 0;
+
+       if (amdgpu_dpm && adev->pm.dpm_enabled) {
+               ret = cz_send_msg_to_smc_with_parameter(adev,
+                               PPSMC_MSG_DisableAllSmuFeatures, SCLK_DPM_MASK);
+               if (ret) {
+                       DRM_ERROR("SMU feature: SCLK_DPM disable failed\n");
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+static uint32_t cz_get_sclk_level(struct amdgpu_device *adev,
+                               uint32_t clock, uint16_t msg)
+{
+       int i = 0;
+       struct amdgpu_clock_voltage_dependency_table *table =
+               &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+
+       switch (msg) {
+       case PPSMC_MSG_SetSclkSoftMin:
+       case PPSMC_MSG_SetSclkHardMin:
+               for (i = 0; i < table->count; i++)
+                       if (clock <= table->entries[i].clk)
+                               break;
+               if (i == table->count)
+                       i = table->count - 1;
+               break;
+       case PPSMC_MSG_SetSclkSoftMax:
+       case PPSMC_MSG_SetSclkHardMax:
+               for (i = table->count - 1; i >= 0; i--)
+                       if (clock >= table->entries[i].clk)
+                               break;
+               if (i < 0)
+                       i = 0;
+               break;
+       default:
+               break;
+       }
+
+       return i;
+}
+
+static int cz_program_bootup_state(struct amdgpu_device *adev)
+{
+       struct cz_power_info *pi = cz_get_pi(adev);
+       uint32_t soft_min_clk = 0;
+       uint32_t soft_max_clk = 0;
+       int ret = 0;
+
+       pi->sclk_dpm.soft_min_clk = pi->sys_info.bootup_sclk;
+       pi->sclk_dpm.soft_max_clk = pi->sys_info.bootup_sclk;
+
+       soft_min_clk = cz_get_sclk_level(adev,
+                               pi->sclk_dpm.soft_min_clk,
+                               PPSMC_MSG_SetSclkSoftMin);
+       soft_max_clk = cz_get_sclk_level(adev,
+                               pi->sclk_dpm.soft_max_clk,
+                               PPSMC_MSG_SetSclkSoftMax);
+
+       ret = cz_send_msg_to_smc_with_parameter(adev,
+                               PPSMC_MSG_SetSclkSoftMin, soft_min_clk);
+       if (ret)
+               return -EINVAL;
+
+       ret = cz_send_msg_to_smc_with_parameter(adev,
+                               PPSMC_MSG_SetSclkSoftMax, soft_max_clk);
+       if (ret)
+               return -EINVAL;
+
+       return 0;
+}
+
+/* TODO */
+static int cz_disable_cgpg(struct amdgpu_device *adev)
+{
+       return 0;
+}
+
+/* TODO */
+static int cz_enable_cgpg(struct amdgpu_device *adev)
+{
+       return 0;
+}
+
+/* TODO */
+static int cz_program_pt_config_registers(struct amdgpu_device *adev)
+{
+       return 0;
+}
+
+static void cz_do_enable_didt(struct amdgpu_device *adev, bool enable)
+{
+       struct cz_power_info *pi = cz_get_pi(adev);
+       uint32_t reg = 0;
+
+       if (pi->caps_sq_ramping) {
+               reg = RREG32_DIDT(ixDIDT_SQ_CTRL0);
+               if (enable)
+                       reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 1);
+               else
+                       reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 0);
+               WREG32_DIDT(ixDIDT_SQ_CTRL0, reg);
+       }
+       if (pi->caps_db_ramping) {
+               reg = RREG32_DIDT(ixDIDT_DB_CTRL0);
+               if (enable)
+                       reg = REG_SET_FIELD(reg, DIDT_DB_CTRL0, DIDT_CTRL_EN, 1);
+               else
+                       reg = REG_SET_FIELD(reg, DIDT_DB_CTRL0, DIDT_CTRL_EN, 0);
+               WREG32_DIDT(ixDIDT_DB_CTRL0, reg);
+       }
+       if (pi->caps_td_ramping) {
+               reg = RREG32_DIDT(ixDIDT_TD_CTRL0);
+               if (enable)
+                       reg = REG_SET_FIELD(reg, DIDT_TD_CTRL0, DIDT_CTRL_EN, 1);
+               else
+                       reg = REG_SET_FIELD(reg, DIDT_TD_CTRL0, DIDT_CTRL_EN, 0);
+               WREG32_DIDT(ixDIDT_TD_CTRL0, reg);
+       }
+       if (pi->caps_tcp_ramping) {
+               reg = RREG32_DIDT(ixDIDT_TCP_CTRL0);
+               if (enable)
+                       reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 1);
+               else
+                       reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 0);
+               WREG32_DIDT(ixDIDT_TCP_CTRL0, reg);
+       }
+
+}
+
+static int cz_enable_didt(struct amdgpu_device *adev, bool enable)
+{
+       struct cz_power_info *pi = cz_get_pi(adev);
+       int ret;
+
+       if (pi->caps_sq_ramping || pi->caps_db_ramping ||
+                       pi->caps_td_ramping || pi->caps_tcp_ramping) {
+               if (adev->gfx.gfx_current_status != AMDGPU_GFX_SAFE_MODE) {
+                       ret = cz_disable_cgpg(adev);
+                       if (ret) {
+                               DRM_ERROR("Pre Di/Dt disable cg/pg failed\n");
+                               return -EINVAL;
+                       }
+                       adev->gfx.gfx_current_status = AMDGPU_GFX_SAFE_MODE;
+               }
+
+               ret = cz_program_pt_config_registers(adev);
+               if (ret) {
+                       DRM_ERROR("Di/Dt config failed\n");
+                       return -EINVAL;
+               }
+               cz_do_enable_didt(adev, enable);
+
+               if (adev->gfx.gfx_current_status == AMDGPU_GFX_SAFE_MODE) {
+                       ret = cz_enable_cgpg(adev);
+                       if (ret) {
+                               DRM_ERROR("Post Di/Dt enable cg/pg failed\n");
+                               return -EINVAL;
+                       }
+                       adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
+               }
+       }
+
+       return 0;
+}
+
+/* TODO */
+static void cz_reset_acp_boot_level(struct amdgpu_device *adev)
+{
+}
+
+static void cz_update_current_ps(struct amdgpu_device *adev,
+                                       struct amdgpu_ps *rps)
+{
+       struct cz_power_info *pi = cz_get_pi(adev);
+       struct cz_ps *ps = cz_get_ps(rps);
+
+       pi->current_ps = *ps;
+       pi->current_rps = *rps;
+       pi->current_rps.ps_priv = ps;
+
+}
+
+static void cz_update_requested_ps(struct amdgpu_device *adev,
+                                       struct amdgpu_ps *rps)
+{
+       struct cz_power_info *pi = cz_get_pi(adev);
+       struct cz_ps *ps = cz_get_ps(rps);
+
+       pi->requested_ps = *ps;
+       pi->requested_rps = *rps;
+       pi->requested_rps.ps_priv = ps;
+
+}
+
+/* PP arbiter support needed TODO */
+static void cz_apply_state_adjust_rules(struct amdgpu_device *adev,
+                                       struct amdgpu_ps *new_rps,
+                                       struct amdgpu_ps *old_rps)
+{
+       struct cz_ps *ps = cz_get_ps(new_rps);
+       struct cz_power_info *pi = cz_get_pi(adev);
+       struct amdgpu_clock_and_voltage_limits *limits =
+               &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+       /* 10kHz memory clock */
+       uint32_t mclk = 0;
+
+       ps->force_high = false;
+       ps->need_dfs_bypass = true;
+       pi->video_start = new_rps->dclk || new_rps->vclk ||
+                               new_rps->evclk || new_rps->ecclk;
+
+       if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
+                       ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
+               pi->battery_state = true;
+       else
+               pi->battery_state = false;
+
+       if (pi->caps_stable_power_state)
+               mclk = limits->mclk;
+
+       if (mclk > pi->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORY_CLOCK - 1])
+               ps->force_high = true;
+
+}
+
+static int cz_dpm_enable(struct amdgpu_device *adev)
+{
+       int ret = 0;
+
+       /* renable will hang up SMU, so check first */
+       if (cz_check_for_dpm_enabled(adev))
+               return -EINVAL;
+
+       cz_program_voting_clients(adev);
+
+       ret = cz_start_dpm(adev);
+       if (ret) {
+               DRM_ERROR("Carrizo DPM enable failed\n");
+               return -EINVAL;
+       }
+
+       ret = cz_program_bootup_state(adev);
+       if (ret) {
+               DRM_ERROR("Carrizo bootup state program failed\n");
+               return -EINVAL;
+       }
+
+       ret = cz_enable_didt(adev, true);
+       if (ret) {
+               DRM_ERROR("Carrizo enable di/dt failed\n");
+               return -EINVAL;
+       }
+
+       cz_reset_acp_boot_level(adev);
+
+       cz_update_current_ps(adev, adev->pm.dpm.boot_ps);
+
+       return 0;
+}
+
+static int cz_dpm_hw_init(struct amdgpu_device *adev)
+{
+       int ret;
+
+       if (!amdgpu_dpm)
+               return 0;
+
+       mutex_lock(&adev->pm.mutex);
+
+       /* init smc in dpm hw init */
+       ret = cz_smu_init(adev);
+       if (ret) {
+               DRM_ERROR("amdgpu: smc initialization failed\n");
+               mutex_unlock(&adev->pm.mutex);
+               return ret;
+       }
+
+       /* do the actual fw loading */
+       ret = cz_smu_start(adev);
+       if (ret) {
+               DRM_ERROR("amdgpu: smc start failed\n");
+               mutex_unlock(&adev->pm.mutex);
+               return ret;
+       }
+
+       /* cz dpm setup asic */
+       cz_dpm_setup_asic(adev);
+
+       /* cz dpm enable */
+       ret = cz_dpm_enable(adev);
+       if (ret)
+               adev->pm.dpm_enabled = false;
+       else
+               adev->pm.dpm_enabled = true;
+
+       mutex_unlock(&adev->pm.mutex);
+
+       return 0;
+}
+
+static int cz_dpm_disable(struct amdgpu_device *adev)
+{
+       int ret = 0;
+
+       if (!cz_check_for_dpm_enabled(adev))
+               return -EINVAL;
+
+       ret = cz_enable_didt(adev, false);
+       if (ret) {
+               DRM_ERROR("Carrizo disable di/dt failed\n");
+               return -EINVAL;
+       }
+
+       cz_clear_voting_clients(adev);
+       cz_stop_dpm(adev);
+       cz_update_current_ps(adev, adev->pm.dpm.boot_ps);
+
+       return 0;
+}
+
+static int cz_dpm_hw_fini(struct amdgpu_device *adev)
+{
+       int ret = 0;
+
+       mutex_lock(&adev->pm.mutex);
+
+       cz_smu_fini(adev);
+
+       if (adev->pm.dpm_enabled) {
+               ret = cz_dpm_disable(adev);
+               if (ret)
+                       return -EINVAL;
+
+               adev->pm.dpm.current_ps =
+                       adev->pm.dpm.requested_ps =
+                       adev->pm.dpm.boot_ps;
+       }
+
+       adev->pm.dpm_enabled = false;
+
+       mutex_unlock(&adev->pm.mutex);
+
+       return 0;
+}
+
+static int cz_dpm_suspend(struct amdgpu_device *adev)
+{
+       int ret = 0;
+
+       if (adev->pm.dpm_enabled) {
+               mutex_lock(&adev->pm.mutex);
+
+               ret = cz_dpm_disable(adev);
+               if (ret)
+                       return -EINVAL;
+
+               adev->pm.dpm.current_ps =
+                       adev->pm.dpm.requested_ps =
+                       adev->pm.dpm.boot_ps;
+
+               mutex_unlock(&adev->pm.mutex);
+       }
+
+       return 0;
+}
+
+static int cz_dpm_resume(struct amdgpu_device *adev)
+{
+       int ret = 0;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = cz_smu_init(adev);
+       if (ret) {
+               DRM_ERROR("amdgpu: smc resume failed\n");
+               mutex_unlock(&adev->pm.mutex);
+               return ret;
+       }
+
+       /* do the actual fw loading */
+       ret = cz_smu_start(adev);
+       if (ret) {
+               DRM_ERROR("amdgpu: smc start failed\n");
+               mutex_unlock(&adev->pm.mutex);
+               return ret;
+       }
+
+       /* cz dpm setup asic */
+       cz_dpm_setup_asic(adev);
+
+       /* cz dpm enable */
+       ret = cz_dpm_enable(adev);
+       if (ret)
+               adev->pm.dpm_enabled = false;
+       else
+               adev->pm.dpm_enabled = true;
+
+       mutex_unlock(&adev->pm.mutex);
+       /* upon resume, re-compute the clocks */
+       if (adev->pm.dpm_enabled)
+               amdgpu_pm_compute_clocks(adev);
+
+       return 0;
+}
+
+static int cz_dpm_set_clockgating_state(struct amdgpu_device *adev,
+                                       enum amdgpu_clockgating_state state)
+{
+       return 0;
+}
+
+static int cz_dpm_set_powergating_state(struct amdgpu_device *adev,
+                                       enum amdgpu_powergating_state state)
+{
+       return 0;
+}
+
+/* borrowed from KV, need future unify */
+static int cz_dpm_get_temperature(struct amdgpu_device *adev)
+{
+       int actual_temp = 0;
+       uint32_t temp = RREG32_SMC(0xC0300E0C);
+
+       if (temp)
+               actual_temp = 1000 * ((temp / 8) - 49);
+
+       return actual_temp;
+}
+
+static int cz_dpm_pre_set_power_state(struct amdgpu_device *adev)
+{
+       struct cz_power_info *pi = cz_get_pi(adev);
+       struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
+       struct amdgpu_ps *new_ps = &requested_ps;
+
+       cz_update_requested_ps(adev, new_ps);
+       cz_apply_state_adjust_rules(adev, &pi->requested_rps,
+                                       &pi->current_rps);
+
+       return 0;
+}
+
+static int cz_dpm_update_sclk_limit(struct amdgpu_device *adev)
+{
+       struct cz_power_info *pi = cz_get_pi(adev);
+       struct amdgpu_clock_and_voltage_limits *limits =
+               &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+       uint32_t clock, stable_ps_clock = 0;
+
+       clock = pi->sclk_dpm.soft_min_clk;
+
+       if (pi->caps_stable_power_state) {
+               stable_ps_clock = limits->sclk * 75 / 100;
+               if (clock < stable_ps_clock)
+                       clock = stable_ps_clock;
+       }
+
+       if (clock != pi->sclk_dpm.soft_min_clk) {
+               pi->sclk_dpm.soft_min_clk = clock;
+               cz_send_msg_to_smc_with_parameter(adev,
+                               PPSMC_MSG_SetSclkSoftMin,
+                               cz_get_sclk_level(adev, clock,
+                                       PPSMC_MSG_SetSclkSoftMin));
+       }
+
+       if (pi->caps_stable_power_state &&
+                       pi->sclk_dpm.soft_max_clk != clock) {
+               pi->sclk_dpm.soft_max_clk = clock;
+               cz_send_msg_to_smc_with_parameter(adev,
+                               PPSMC_MSG_SetSclkSoftMax,
+                               cz_get_sclk_level(adev, clock,
+                                       PPSMC_MSG_SetSclkSoftMax));
+       } else {
+               cz_send_msg_to_smc_with_parameter(adev,
+                               PPSMC_MSG_SetSclkSoftMax,
+                               cz_get_sclk_level(adev,
+                                       pi->sclk_dpm.soft_max_clk,
+                                       PPSMC_MSG_SetSclkSoftMax));
+       }
+
+       return 0;
+}
+
+static int cz_dpm_set_deep_sleep_sclk_threshold(struct amdgpu_device *adev)
+{
+       int ret = 0;
+       struct cz_power_info *pi = cz_get_pi(adev);
+
+       if (pi->caps_sclk_ds) {
+               cz_send_msg_to_smc_with_parameter(adev,
+                               PPSMC_MSG_SetMinDeepSleepSclk,
+                               CZ_MIN_DEEP_SLEEP_SCLK);
+       }
+
+       return ret;
+}
+
+/* ?? without dal support, is this still needed in setpowerstate list*/
+static int cz_dpm_set_watermark_threshold(struct amdgpu_device *adev)
+{
+       int ret = 0;
+       struct cz_power_info *pi = cz_get_pi(adev);
+
+       cz_send_msg_to_smc_with_parameter(adev,
+                       PPSMC_MSG_SetWatermarkFrequency,
+                       pi->sclk_dpm.soft_max_clk);
+
+       return ret;
+}
+
+static int cz_dpm_enable_nbdpm(struct amdgpu_device *adev)
+{
+       int ret = 0;
+       struct cz_power_info *pi = cz_get_pi(adev);
+
+       /* also depend on dal NBPStateDisableRequired */
+       if (pi->nb_dpm_enabled_by_driver && !pi->nb_dpm_enabled) {
+               ret = cz_send_msg_to_smc_with_parameter(adev,
+                               PPSMC_MSG_EnableAllSmuFeatures,
+                               NB_DPM_MASK);
+               if (ret) {
+                       DRM_ERROR("amdgpu: nb dpm enable failed\n");
+                       return ret;
+               }
+               pi->nb_dpm_enabled = true;
+       }
+
+       return ret;
+}
+
+static void cz_dpm_nbdpm_lm_pstate_enable(struct amdgpu_device *adev,
+                                                       bool enable)
+{
+       if (enable)
+               cz_send_msg_to_smc(adev, PPSMC_MSG_EnableLowMemoryPstate);
+       else
+               cz_send_msg_to_smc(adev, PPSMC_MSG_DisableLowMemoryPstate);
+
+}
+
+static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev)
+{
+       int ret = 0;
+       struct cz_power_info *pi = cz_get_pi(adev);
+       struct cz_ps *ps = &pi->requested_ps;
+
+       if (pi->sys_info.nb_dpm_enable) {
+               if (ps->force_high)
+                       cz_dpm_nbdpm_lm_pstate_enable(adev, true);
+               else
+                       cz_dpm_nbdpm_lm_pstate_enable(adev, false);
+       }
+
+       return ret;
+}
+
+/* with dpm enabled */
+static int cz_dpm_set_power_state(struct amdgpu_device *adev)
+{
+       int ret = 0;
+
+       cz_dpm_update_sclk_limit(adev);
+       cz_dpm_set_deep_sleep_sclk_threshold(adev);
+       cz_dpm_set_watermark_threshold(adev);
+       cz_dpm_enable_nbdpm(adev);
+       cz_dpm_update_low_memory_pstate(adev);
+
+       return ret;
+}
+
+static void cz_dpm_post_set_power_state(struct amdgpu_device *adev)
+{
+       struct cz_power_info *pi = cz_get_pi(adev);
+       struct amdgpu_ps *ps = &pi->requested_rps;
+
+       cz_update_current_ps(adev, ps);
+
+}
+
+static int cz_dpm_force_highest(struct amdgpu_device *adev)
+{
+       struct cz_power_info *pi = cz_get_pi(adev);
+       int ret = 0;
+
+       if (pi->sclk_dpm.soft_min_clk != pi->sclk_dpm.soft_max_clk) {
+               pi->sclk_dpm.soft_min_clk =
+                       pi->sclk_dpm.soft_max_clk;
+               ret = cz_send_msg_to_smc_with_parameter(adev,
+                               PPSMC_MSG_SetSclkSoftMin,
+                               cz_get_sclk_level(adev,
+                                       pi->sclk_dpm.soft_min_clk,
+                                       PPSMC_MSG_SetSclkSoftMin));
+               if (ret)
+                       return ret;
+       }
+
+       return ret;
+}
+
+static int cz_dpm_force_lowest(struct amdgpu_device *adev)
+{
+       struct cz_power_info *pi = cz_get_pi(adev);
+       int ret = 0;
+
+       if (pi->sclk_dpm.soft_max_clk != pi->sclk_dpm.soft_min_clk) {
+               pi->sclk_dpm.soft_max_clk = pi->sclk_dpm.soft_min_clk;
+               ret = cz_send_msg_to_smc_with_parameter(adev,
+                               PPSMC_MSG_SetSclkSoftMax,
+                               cz_get_sclk_level(adev,
+                                       pi->sclk_dpm.soft_max_clk,
+                                       PPSMC_MSG_SetSclkSoftMax));
+               if (ret)
+                       return ret;
+       }
+
+       return ret;
+}
+
+static uint32_t cz_dpm_get_max_sclk_level(struct amdgpu_device *adev)
+{
+       struct cz_power_info *pi = cz_get_pi(adev);
+
+       if (!pi->max_sclk_level) {
+               cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxSclkLevel);
+               pi->max_sclk_level = cz_get_argument(adev) + 1;
+       }
+
+       if (pi->max_sclk_level > CZ_MAX_HARDWARE_POWERLEVELS) {
+               DRM_ERROR("Invalid max sclk level!\n");
+               return -EINVAL;
+       }
+
+       return pi->max_sclk_level;
+}
+
+static int cz_dpm_unforce_dpm_levels(struct amdgpu_device *adev)
+{
+       struct cz_power_info *pi = cz_get_pi(adev);
+       struct amdgpu_clock_voltage_dependency_table *dep_table =
+               &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+       uint32_t level = 0;
+       int ret = 0;
+
+       pi->sclk_dpm.soft_min_clk = dep_table->entries[0].clk;
+       level = cz_dpm_get_max_sclk_level(adev) - 1;
+       if (level < dep_table->count)
+               pi->sclk_dpm.soft_max_clk = dep_table->entries[level].clk;
+       else
+               pi->sclk_dpm.soft_max_clk =
+                       dep_table->entries[dep_table->count - 1].clk;
+
+       /* get min/max sclk soft value
+        * notify SMU to execute */
+       ret = cz_send_msg_to_smc_with_parameter(adev,
+                               PPSMC_MSG_SetSclkSoftMin,
+                               cz_get_sclk_level(adev,
+                                       pi->sclk_dpm.soft_min_clk,
+                                       PPSMC_MSG_SetSclkSoftMin));
+       if (ret)
+               return ret;
+
+       ret = cz_send_msg_to_smc_with_parameter(adev,
+                               PPSMC_MSG_SetSclkSoftMax,
+                               cz_get_sclk_level(adev,
+                                       pi->sclk_dpm.soft_max_clk,
+                                       PPSMC_MSG_SetSclkSoftMax));
+       if (ret)
+               return ret;
+
+       DRM_INFO("DPM unforce state min=%d, max=%d.\n",
+                       pi->sclk_dpm.soft_min_clk,
+                       pi->sclk_dpm.soft_max_clk);
+
+       return 0;
+}
+
+static int cz_dpm_force_dpm_level(struct amdgpu_device *adev,
+                               enum amdgpu_dpm_forced_level level)
+{
+       int ret = 0;
+
+       switch (level) {
+       case AMDGPU_DPM_FORCED_LEVEL_HIGH:
+               ret = cz_dpm_force_highest(adev);
+               if (ret)
+                       return ret;
+               break;
+       case AMDGPU_DPM_FORCED_LEVEL_LOW:
+               ret = cz_dpm_force_lowest(adev);
+               if (ret)
+                       return ret;
+               break;
+       case AMDGPU_DPM_FORCED_LEVEL_AUTO:
+               ret = cz_dpm_unforce_dpm_levels(adev);
+               if (ret)
+                       return ret;
+               break;
+       default:
+               break;
+       }
+
+       return ret;
+}
+
+/* fix me, display configuration change lists here
+ * mostly dal related*/
+static void cz_dpm_display_configuration_changed(struct amdgpu_device *adev)
+{
+}
+
+static uint32_t cz_dpm_get_sclk(struct amdgpu_device *adev, bool low)
+{
+       struct cz_power_info *pi = cz_get_pi(adev);
+       struct cz_ps *requested_state = cz_get_ps(&pi->requested_rps);
+
+       if (low)
+               return requested_state->levels[0].sclk;
+       else
+               return requested_state->levels[requested_state->num_levels - 1].sclk;
+
+}
+
+static uint32_t cz_dpm_get_mclk(struct amdgpu_device *adev, bool low)
+{
+       struct cz_power_info *pi = cz_get_pi(adev);
+
+       return pi->sys_info.bootup_uma_clk;
+}
+
+const struct amdgpu_ip_funcs cz_dpm_ip_funcs = {
+       .early_init = cz_dpm_early_init,
+       .late_init = NULL,
+       .sw_init = cz_dpm_sw_init,
+       .sw_fini = cz_dpm_sw_fini,
+       .hw_init = cz_dpm_hw_init,
+       .hw_fini = cz_dpm_hw_fini,
+       .suspend = cz_dpm_suspend,
+       .resume = cz_dpm_resume,
+       .is_idle = NULL,
+       .wait_for_idle = NULL,
+       .soft_reset = NULL,
+       .print_status = NULL,
+       .set_clockgating_state = cz_dpm_set_clockgating_state,
+       .set_powergating_state = cz_dpm_set_powergating_state,
+};
+
+static const struct amdgpu_dpm_funcs cz_dpm_funcs = {
+       .get_temperature = cz_dpm_get_temperature,
+       .pre_set_power_state = cz_dpm_pre_set_power_state,
+       .set_power_state = cz_dpm_set_power_state,
+       .post_set_power_state = cz_dpm_post_set_power_state,
+       .display_configuration_changed = cz_dpm_display_configuration_changed,
+       .get_sclk = cz_dpm_get_sclk,
+       .get_mclk = cz_dpm_get_mclk,
+       .print_power_state = cz_dpm_print_power_state,
+       .debugfs_print_current_performance_level =
+                               cz_dpm_debugfs_print_current_performance_level,
+       .force_performance_level = cz_dpm_force_dpm_level,
+       .vblank_too_short = NULL,
+       .powergate_uvd = NULL,
+};
+
+static void cz_dpm_set_funcs(struct amdgpu_device *adev)
+{
+       if (NULL == adev->pm.funcs)
+               adev->pm.funcs = &cz_dpm_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.h b/drivers/gpu/drm/amd/amdgpu/cz_dpm.h
new file mode 100644 (file)
index 0000000..ed6449d
--- /dev/null
@@ -0,0 +1,235 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __CZ_DPM_H__
+#define __CZ_DPM_H__
+
+#include "smu8_fusion.h"
+
+#define CZ_AT_DFLT                                     30
+#define CZ_NUM_NBPSTATES                               4
+#define CZ_NUM_NBPMEMORY_CLOCK                         2
+#define CZ_MAX_HARDWARE_POWERLEVELS                    8
+#define CZ_MAX_DISPLAY_CLOCK_LEVEL                     8
+#define CZ_MAX_DISPLAYPHY_IDS                          10
+
+#define PPCZ_VOTINGRIGHTSCLIENTS_DFLT0                 0x3FFFC102
+
+#define SMC_RAM_END                                    0x40000
+
+#define DPMFlags_SCLK_Enabled                          0x00000001
+#define DPMFlags_UVD_Enabled                           0x00000002
+#define DPMFlags_VCE_Enabled                           0x00000004
+#define DPMFlags_ACP_Enabled                           0x00000008
+#define DPMFlags_ForceHighestValid                     0x40000000
+#define DPMFlags_Debug                                 0x80000000
+
+/* Do not change the following, it is also defined in SMU8.h */
+#define SMU_EnabledFeatureScoreboard_AcpDpmOn          0x00000001
+#define SMU_EnabledFeatureScoreboard_SclkDpmOn         0x00100000
+#define SMU_EnabledFeatureScoreboard_UvdDpmOn          0x00800000
+#define SMU_EnabledFeatureScoreboard_VceDpmOn          0x01000000
+
+/* temporary solution to SetMinDeepSleepSclk
+ * should indicate by display adaptor
+ * 10k Hz unit*/
+#define CZ_MIN_DEEP_SLEEP_SCLK                         800
+
+enum cz_pt_config_reg_type {
+       CZ_CONFIGREG_MMR = 0,
+       CZ_CONFIGREG_SMC_IND,
+       CZ_CONFIGREG_DIDT_IND,
+       CZ_CONFIGREG_CACHE,
+       CZ_CONFIGREG_MAX
+};
+
+struct cz_pt_config_reg {
+       uint32_t offset;
+       uint32_t mask;
+       uint32_t shift;
+       uint32_t value;
+       enum cz_pt_config_reg_type type;
+};
+
+struct cz_dpm_entry {
+       uint32_t        soft_min_clk;
+       uint32_t        hard_min_clk;
+       uint32_t        soft_max_clk;
+       uint32_t        hard_max_clk;
+};
+
+struct cz_pl {
+       uint32_t sclk;
+       uint8_t vddc_index;
+       uint8_t ds_divider_index;
+       uint8_t ss_divider_index;
+       uint8_t allow_gnb_slow;
+       uint8_t force_nbp_state;
+       uint8_t display_wm;
+       uint8_t vce_wm;
+};
+
+struct cz_ps {
+       struct cz_pl levels[CZ_MAX_HARDWARE_POWERLEVELS];
+       uint32_t num_levels;
+       bool need_dfs_bypass;
+       uint8_t dpm0_pg_nb_ps_lo;
+       uint8_t dpm0_pg_nb_ps_hi;
+       uint8_t dpmx_nb_ps_lo;
+       uint8_t dpmx_nb_ps_hi;
+       bool force_high;
+};
+
+struct cz_displayphy_entry {
+       uint8_t phy_present;
+       uint8_t active_lane_mapping;
+       uint8_t display_conf_type;
+       uint8_t num_active_lanes;
+};
+
+struct cz_displayphy_info {
+       bool phy_access_initialized;
+       struct cz_displayphy_entry entries[CZ_MAX_DISPLAYPHY_IDS];
+};
+
+struct cz_sys_info {
+       uint32_t bootup_uma_clk;
+       uint32_t bootup_sclk;
+       uint32_t dentist_vco_freq;
+       uint32_t nb_dpm_enable;
+       uint32_t nbp_memory_clock[CZ_NUM_NBPMEMORY_CLOCK];
+       uint32_t nbp_n_clock[CZ_NUM_NBPSTATES];
+       uint8_t nbp_voltage_index[CZ_NUM_NBPSTATES];
+       uint32_t display_clock[CZ_MAX_DISPLAY_CLOCK_LEVEL];
+       uint16_t bootup_nb_voltage_index;
+       uint8_t htc_tmp_lmt;
+       uint8_t htc_hyst_lmt;
+       uint32_t uma_channel_number;
+};
+
+struct cz_power_info {
+       uint32_t active_target[CZ_MAX_HARDWARE_POWERLEVELS];
+       struct cz_sys_info sys_info;
+       struct cz_pl boot_pl;
+       bool disable_nb_ps3_in_battery;
+       bool battery_state;
+       uint32_t lowest_valid;
+       uint32_t highest_valid;
+       uint16_t high_voltage_threshold;
+       /* smc offsets */
+       uint32_t sram_end;
+       uint32_t dpm_table_start;
+       uint32_t soft_regs_start;
+       /* dpm SMU tables */
+       uint8_t uvd_level_count;
+       uint8_t vce_level_count;
+       uint8_t acp_level_count;
+       uint32_t fps_high_threshold;
+       uint32_t fps_low_threshold;
+       /* dpm table */
+       uint32_t dpm_flags;
+       struct cz_dpm_entry sclk_dpm;
+       struct cz_dpm_entry uvd_dpm;
+       struct cz_dpm_entry vce_dpm;
+       struct cz_dpm_entry acp_dpm;
+
+       uint8_t uvd_boot_level;
+       uint8_t uvd_interval;
+       uint8_t vce_boot_level;
+       uint8_t vce_interval;
+       uint8_t acp_boot_level;
+       uint8_t acp_interval;
+
+       uint8_t graphics_boot_level;
+       uint8_t graphics_interval;
+       uint8_t graphics_therm_throttle_enable;
+       uint8_t graphics_voltage_change_enable;
+       uint8_t graphics_clk_slow_enable;
+       uint8_t graphics_clk_slow_divider;
+
+       uint32_t low_sclk_interrupt_threshold;
+       bool uvd_power_gated;
+       bool vce_power_gated;
+       bool acp_power_gated;
+
+       uint32_t active_process_mask;
+
+       uint32_t mgcg_cgtt_local0;
+       uint32_t mgcg_cgtt_local1;
+       uint32_t clock_slow_down_step;
+       uint32_t skip_clock_slow_down;
+       bool enable_nb_ps_policy;
+       uint32_t voting_clients;
+       uint32_t voltage_drop_threshold;
+       uint32_t gfx_pg_threshold;
+       uint32_t max_sclk_level;
+       /* flags */
+       bool didt_enabled;
+       bool video_start;
+       bool cac_enabled;
+       bool bapm_enabled;
+       bool nb_dpm_enabled_by_driver;
+       bool nb_dpm_enabled;
+       bool auto_thermal_throttling_enabled;
+       bool dpm_enabled;
+       bool need_pptable_upload;
+       /* caps */
+       bool caps_cac;
+       bool caps_power_containment;
+       bool caps_sq_ramping;
+       bool caps_db_ramping;
+       bool caps_td_ramping;
+       bool caps_tcp_ramping;
+       bool caps_sclk_throttle_low_notification;
+       bool caps_fps;
+       bool caps_uvd_dpm;
+       bool caps_uvd_pg;
+       bool caps_vce_dpm;
+       bool caps_vce_pg;
+       bool caps_acp_dpm;
+       bool caps_acp_pg;
+       bool caps_stable_power_state;
+       bool caps_enable_dfs_bypass;
+       bool caps_sclk_ds;
+       bool caps_voltage_island;
+       /* power state */
+       struct amdgpu_ps current_rps;
+       struct cz_ps current_ps;
+       struct amdgpu_ps requested_rps;
+       struct cz_ps requested_ps;
+
+       bool uvd_power_down;
+       bool vce_power_down;
+       bool acp_power_down;
+};
+
+/* cz_smc.c */
+uint32_t cz_get_argument(struct amdgpu_device *adev);
+int cz_send_msg_to_smc(struct amdgpu_device *adev, uint16_t msg);
+int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
+                       uint16_t msg, uint32_t parameter);
+int cz_read_smc_sram_dword(struct amdgpu_device *adev,
+                       uint32_t smc_address, uint32_t *value, uint32_t limit);
+int cz_smu_upload_pptable(struct amdgpu_device *adev);
+int cz_smu_download_pptable(struct amdgpu_device *adev, void **table);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
new file mode 100644 (file)
index 0000000..80d508e
--- /dev/null
@@ -0,0 +1,435 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_ih.h"
+#include "vid.h"
+
+#include "oss/oss_3_0_1_d.h"
+#include "oss/oss_3_0_1_sh_mask.h"
+
+#include "bif/bif_5_1_d.h"
+#include "bif/bif_5_1_sh_mask.h"
+
+/*
+ * Interrupts
+ * Starting with r6xx, interrupts are handled via a ring buffer.
+ * Ring buffers are areas of GPU accessible memory that the GPU
+ * writes interrupt vectors into and the host reads vectors out of.
+ * There is a rptr (read pointer) that determines where the
+ * host is currently reading, and a wptr (write pointer)
+ * which determines where the GPU has written.  When the
+ * pointers are equal, the ring is idle.  When the GPU
+ * writes vectors to the ring buffer, it increments the
+ * wptr.  When there is an interrupt, the host then starts
+ * fetching commands and processing them until the pointers are
+ * equal again at which point it updates the rptr.
+ */
+
+static void cz_ih_set_interrupt_funcs(struct amdgpu_device *adev);
+
+/**
+ * cz_ih_enable_interrupts - Enable the interrupt ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Enable the interrupt ring buffer (VI).
+ */
+static void cz_ih_enable_interrupts(struct amdgpu_device *adev)
+{
+       u32 ih_cntl = RREG32(mmIH_CNTL);
+       u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL);
+
+       ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, ENABLE_INTR, 1);
+       ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
+       WREG32(mmIH_CNTL, ih_cntl);
+       WREG32(mmIH_RB_CNTL, ih_rb_cntl);
+       adev->irq.ih.enabled = true;
+}
+
+/**
+ * cz_ih_disable_interrupts - Disable the interrupt ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Disable the interrupt ring buffer (VI).
+ */
+static void cz_ih_disable_interrupts(struct amdgpu_device *adev)
+{
+       u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL);
+       u32 ih_cntl = RREG32(mmIH_CNTL);
+
+       ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
+       ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, ENABLE_INTR, 0);
+       WREG32(mmIH_RB_CNTL, ih_rb_cntl);
+       WREG32(mmIH_CNTL, ih_cntl);
+       /* set rptr, wptr to 0 */
+       WREG32(mmIH_RB_RPTR, 0);
+       WREG32(mmIH_RB_WPTR, 0);
+       adev->irq.ih.enabled = false;
+       adev->irq.ih.rptr = 0;
+}
+
+/**
+ * cz_ih_irq_init - init and enable the interrupt ring
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate a ring buffer for the interrupt controller,
+ * enable the RLC, disable interrupts, enable the IH
+ * ring buffer and enable it (VI).
+ * Called at device load and reume.
+ * Returns 0 for success, errors for failure.
+ */
+static int cz_ih_irq_init(struct amdgpu_device *adev)
+{
+       int ret = 0;
+       int rb_bufsz;
+       u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
+       u64 wptr_off;
+
+       /* disable irqs */
+       cz_ih_disable_interrupts(adev);
+
+       /* setup interrupt control */
+       WREG32(mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8);
+       interrupt_cntl = RREG32(mmINTERRUPT_CNTL);
+       /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
+        * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
+        */
+       interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0);
+       /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
+       interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0);
+       WREG32(mmINTERRUPT_CNTL, interrupt_cntl);
+
+       /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
+       WREG32(mmIH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
+
+       rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
+       ih_rb_cntl = REG_SET_FIELD(0, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 1);
+       ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+       ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
+
+       /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register value is written to memory */
+       ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_WRITEBACK_ENABLE, 1);
+
+       /* set the writeback address whether it's enabled or not */
+       wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
+       WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
+       WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
+
+       WREG32(mmIH_RB_CNTL, ih_rb_cntl);
+
+       /* set rptr, wptr to 0 */
+       WREG32(mmIH_RB_RPTR, 0);
+       WREG32(mmIH_RB_WPTR, 0);
+
+       /* Default settings for IH_CNTL (disabled at first) */
+       ih_cntl = RREG32(mmIH_CNTL);
+       ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, MC_VMID, 0);
+
+       if (adev->irq.msi_enabled)
+               ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, RPTR_REARM, 1);
+       WREG32(mmIH_CNTL, ih_cntl);
+
+       pci_set_master(adev->pdev);
+
+       /* enable interrupts */
+       cz_ih_enable_interrupts(adev);
+
+       return ret;
+}
+
+/**
+ * cz_ih_irq_disable - disable interrupts
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Disable interrupts on the hw (VI).
+ */
+static void cz_ih_irq_disable(struct amdgpu_device *adev)
+{
+       cz_ih_disable_interrupts(adev);
+
+       /* Wait and acknowledge irq */
+       mdelay(1);
+}
+
+/**
+ * cz_ih_get_wptr - get the IH ring buffer wptr
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Get the IH ring buffer wptr from either the register
+ * or the writeback memory buffer (VI).  Also check for
+ * ring buffer overflow and deal with it.
+ * Used by cz_irq_process(VI).
+ * Returns the value of the wptr.
+ */
+static u32 cz_ih_get_wptr(struct amdgpu_device *adev)
+{
+       u32 wptr, tmp;
+
+       wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]);
+
+       if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
+               wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+               /* When a ring buffer overflow happen start parsing interrupt
+                * from the last not overwritten vector (wptr + 16). Hopefully
+                * this should allow us to catchup.
+                */
+               dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+                       wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask);
+               adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask;
+               tmp = RREG32(mmIH_RB_CNTL);
+               tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+               WREG32(mmIH_RB_CNTL, tmp);
+       }
+       return (wptr & adev->irq.ih.ptr_mask);
+}
+
+/**
+ * cz_ih_decode_iv - decode an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Decodes the interrupt vector at the current rptr
+ * position and also advance the position.
+ */
+static void cz_ih_decode_iv(struct amdgpu_device *adev,
+                                struct amdgpu_iv_entry *entry)
+{
+       /* wptr/rptr are in bytes! */
+       u32 ring_index = adev->irq.ih.rptr >> 2;
+       uint32_t dw[4];
+       
+       dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
+       dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
+       dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
+       dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
+
+       entry->src_id = dw[0] & 0xff;
+       entry->src_data = dw[1] & 0xfffffff;
+       entry->ring_id = dw[2] & 0xff;
+       entry->vm_id = (dw[2] >> 8) & 0xff;
+       entry->pas_id = (dw[2] >> 16) & 0xffff;
+
+       /* wptr/rptr are in bytes! */
+       adev->irq.ih.rptr += 16;
+}
+
+/**
+ * cz_ih_set_rptr - set the IH ring buffer rptr
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set the IH ring buffer rptr.
+ */
+static void cz_ih_set_rptr(struct amdgpu_device *adev)
+{
+       WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr);
+}
+
+static int cz_ih_early_init(struct amdgpu_device *adev)
+{
+       cz_ih_set_interrupt_funcs(adev);
+       return 0;
+}
+
+static int cz_ih_sw_init(struct amdgpu_device *adev)
+{
+       int r;
+
+       r = amdgpu_ih_ring_init(adev, 64 * 1024, false);
+       if (r)
+               return r;
+
+       r = amdgpu_irq_init(adev);
+
+       return r;
+}
+
+static int cz_ih_sw_fini(struct amdgpu_device *adev)
+{
+       amdgpu_irq_fini(adev);
+       amdgpu_ih_ring_fini(adev);
+
+       return 0;
+}
+
+static int cz_ih_hw_init(struct amdgpu_device *adev)
+{
+       int r;
+
+       r = cz_ih_irq_init(adev);
+       if (r)
+               return r;
+
+       return 0;
+}
+
+static int cz_ih_hw_fini(struct amdgpu_device *adev)
+{
+       cz_ih_irq_disable(adev);
+
+       return 0;
+}
+
+static int cz_ih_suspend(struct amdgpu_device *adev)
+{
+       return cz_ih_hw_fini(adev);
+}
+
+static int cz_ih_resume(struct amdgpu_device *adev)
+{
+       return cz_ih_hw_init(adev);
+}
+
+static bool cz_ih_is_idle(struct amdgpu_device *adev)
+{
+       u32 tmp = RREG32(mmSRBM_STATUS);
+
+       if (REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY))
+               return false;
+
+       return true;
+}
+
+static int cz_ih_wait_for_idle(struct amdgpu_device *adev)
+{
+       unsigned i;
+       u32 tmp;
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               /* read MC_STATUS */
+               tmp = RREG32(mmSRBM_STATUS);
+               if (!REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY))
+                       return 0;
+               udelay(1);
+       }
+       return -ETIMEDOUT;
+}
+
+static void cz_ih_print_status(struct amdgpu_device *adev)
+{
+       dev_info(adev->dev, "CZ IH registers\n");
+       dev_info(adev->dev, "  SRBM_STATUS=0x%08X\n",
+               RREG32(mmSRBM_STATUS));
+       dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
+               RREG32(mmSRBM_STATUS2));
+       dev_info(adev->dev, "  INTERRUPT_CNTL=0x%08X\n",
+                RREG32(mmINTERRUPT_CNTL));
+       dev_info(adev->dev, "  INTERRUPT_CNTL2=0x%08X\n",
+                RREG32(mmINTERRUPT_CNTL2));
+       dev_info(adev->dev, "  IH_CNTL=0x%08X\n",
+                RREG32(mmIH_CNTL));
+       dev_info(adev->dev, "  IH_RB_CNTL=0x%08X\n",
+                RREG32(mmIH_RB_CNTL));
+       dev_info(adev->dev, "  IH_RB_BASE=0x%08X\n",
+                RREG32(mmIH_RB_BASE));
+       dev_info(adev->dev, "  IH_RB_WPTR_ADDR_LO=0x%08X\n",
+                RREG32(mmIH_RB_WPTR_ADDR_LO));
+       dev_info(adev->dev, "  IH_RB_WPTR_ADDR_HI=0x%08X\n",
+                RREG32(mmIH_RB_WPTR_ADDR_HI));
+       dev_info(adev->dev, "  IH_RB_RPTR=0x%08X\n",
+                RREG32(mmIH_RB_RPTR));
+       dev_info(adev->dev, "  IH_RB_WPTR=0x%08X\n",
+                RREG32(mmIH_RB_WPTR));
+}
+
+static int cz_ih_soft_reset(struct amdgpu_device *adev)
+{
+       u32 srbm_soft_reset = 0;
+       u32 tmp = RREG32(mmSRBM_STATUS);
+
+       if (tmp & SRBM_STATUS__IH_BUSY_MASK)
+               srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET,
+                                               SOFT_RESET_IH, 1);
+
+       if (srbm_soft_reset) {
+               cz_ih_print_status(adev);
+
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+               tmp |= srbm_soft_reset;
+               dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+               WREG32(mmSRBM_SOFT_RESET, tmp);
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+
+               udelay(50);
+
+               tmp &= ~srbm_soft_reset;
+               WREG32(mmSRBM_SOFT_RESET, tmp);
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+
+               /* Wait a little for things to settle down */
+               udelay(50);
+
+               cz_ih_print_status(adev);
+       }
+
+       return 0;
+}
+
+static int cz_ih_set_clockgating_state(struct amdgpu_device *adev,
+                                         enum amdgpu_clockgating_state state)
+{
+       // TODO
+       return 0;
+}
+
+static int cz_ih_set_powergating_state(struct amdgpu_device *adev,
+                                         enum amdgpu_powergating_state state)
+{
+       // TODO
+       return 0;
+}
+
+const struct amdgpu_ip_funcs cz_ih_ip_funcs = {
+       .early_init = cz_ih_early_init,
+       .late_init = NULL,
+       .sw_init = cz_ih_sw_init,
+       .sw_fini = cz_ih_sw_fini,
+       .hw_init = cz_ih_hw_init,
+       .hw_fini = cz_ih_hw_fini,
+       .suspend = cz_ih_suspend,
+       .resume = cz_ih_resume,
+       .is_idle = cz_ih_is_idle,
+       .wait_for_idle = cz_ih_wait_for_idle,
+       .soft_reset = cz_ih_soft_reset,
+       .print_status = cz_ih_print_status,
+       .set_clockgating_state = cz_ih_set_clockgating_state,
+       .set_powergating_state = cz_ih_set_powergating_state,
+};
+
+static const struct amdgpu_ih_funcs cz_ih_funcs = {
+       .get_wptr = cz_ih_get_wptr,
+       .decode_iv = cz_ih_decode_iv,
+       .set_rptr = cz_ih_set_rptr
+};
+
+static void cz_ih_set_interrupt_funcs(struct amdgpu_device *adev)
+{
+       if (adev->irq.ih_funcs == NULL)
+               adev->irq.ih_funcs = &cz_ih_funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.h b/drivers/gpu/drm/amd/amdgpu/cz_ih.h
new file mode 100644 (file)
index 0000000..1bce136
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __CZ_IH_H__
+#define __CZ_IH_H__
+
+extern const struct amdgpu_ip_funcs cz_ih_ip_funcs;
+
+#endif /* __CZ_IH_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ppsmc.h b/drivers/gpu/drm/amd/amdgpu/cz_ppsmc.h
new file mode 100644 (file)
index 0000000..273616a
--- /dev/null
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef CZ_PP_SMC_H
+#define CZ_PP_SMC_H
+
+#pragma pack(push, 1)
+
+/* Fan control algorithm:*/
+#define FDO_MODE_HARDWARE 0
+#define FDO_MODE_PIECE_WISE_LINEAR 1
+
+enum FAN_CONTROL {
+    FAN_CONTROL_FUZZY,
+    FAN_CONTROL_TABLE
+};
+
+enum DPM_ARRAY {
+    DPM_ARRAY_HARD_MAX,
+    DPM_ARRAY_HARD_MIN,
+    DPM_ARRAY_SOFT_MAX,
+    DPM_ARRAY_SOFT_MIN
+};
+
+/*
+ * Return codes for driver to SMC communication.
+ * Leave these #define-s, enums might not be exactly 8-bits on the microcontroller.
+ */
+#define PPSMC_Result_OK             ((uint16_t)0x01)
+#define PPSMC_Result_NoMore         ((uint16_t)0x02)
+#define PPSMC_Result_NotNow         ((uint16_t)0x03)
+#define PPSMC_Result_Failed         ((uint16_t)0xFF)
+#define PPSMC_Result_UnknownCmd     ((uint16_t)0xFE)
+#define PPSMC_Result_UnknownVT      ((uint16_t)0xFD)
+
+#define PPSMC_isERROR(x)            ((uint16_t)0x80 & (x))
+
+/*
+ * Supported driver messages
+ */
+#define PPSMC_MSG_Test                        ((uint16_t) 0x1)
+#define PPSMC_MSG_GetFeatureStatus            ((uint16_t) 0x2)
+#define PPSMC_MSG_EnableAllSmuFeatures        ((uint16_t) 0x3)
+#define PPSMC_MSG_DisableAllSmuFeatures       ((uint16_t) 0x4)
+#define PPSMC_MSG_OptimizeBattery             ((uint16_t) 0x5)
+#define PPSMC_MSG_MaximizePerf                ((uint16_t) 0x6)
+#define PPSMC_MSG_UVDPowerOFF                 ((uint16_t) 0x7)
+#define PPSMC_MSG_UVDPowerON                  ((uint16_t) 0x8)
+#define PPSMC_MSG_VCEPowerOFF                 ((uint16_t) 0x9)
+#define PPSMC_MSG_VCEPowerON                  ((uint16_t) 0xA)
+#define PPSMC_MSG_ACPPowerOFF                 ((uint16_t) 0xB)
+#define PPSMC_MSG_ACPPowerON                  ((uint16_t) 0xC)
+#define PPSMC_MSG_SDMAPowerOFF                ((uint16_t) 0xD)
+#define PPSMC_MSG_SDMAPowerON                 ((uint16_t) 0xE)
+#define PPSMC_MSG_XDMAPowerOFF                ((uint16_t) 0xF)
+#define PPSMC_MSG_XDMAPowerON                 ((uint16_t) 0x10)
+#define PPSMC_MSG_SetMinDeepSleepSclk         ((uint16_t) 0x11)
+#define PPSMC_MSG_SetSclkSoftMin              ((uint16_t) 0x12)
+#define PPSMC_MSG_SetSclkSoftMax              ((uint16_t) 0x13)
+#define PPSMC_MSG_SetSclkHardMin              ((uint16_t) 0x14)
+#define PPSMC_MSG_SetSclkHardMax              ((uint16_t) 0x15)
+#define PPSMC_MSG_SetLclkSoftMin              ((uint16_t) 0x16)
+#define PPSMC_MSG_SetLclkSoftMax              ((uint16_t) 0x17)
+#define PPSMC_MSG_SetLclkHardMin              ((uint16_t) 0x18)
+#define PPSMC_MSG_SetLclkHardMax              ((uint16_t) 0x19)
+#define PPSMC_MSG_SetUvdSoftMin               ((uint16_t) 0x1A)
+#define PPSMC_MSG_SetUvdSoftMax               ((uint16_t) 0x1B)
+#define PPSMC_MSG_SetUvdHardMin               ((uint16_t) 0x1C)
+#define PPSMC_MSG_SetUvdHardMax               ((uint16_t) 0x1D)
+#define PPSMC_MSG_SetEclkSoftMin              ((uint16_t) 0x1E)
+#define PPSMC_MSG_SetEclkSoftMax              ((uint16_t) 0x1F)
+#define PPSMC_MSG_SetEclkHardMin              ((uint16_t) 0x20)
+#define PPSMC_MSG_SetEclkHardMax              ((uint16_t) 0x21)
+#define PPSMC_MSG_SetAclkSoftMin              ((uint16_t) 0x22)
+#define PPSMC_MSG_SetAclkSoftMax              ((uint16_t) 0x23)
+#define PPSMC_MSG_SetAclkHardMin              ((uint16_t) 0x24)
+#define PPSMC_MSG_SetAclkHardMax              ((uint16_t) 0x25)
+#define PPSMC_MSG_SetNclkSoftMin              ((uint16_t) 0x26)
+#define PPSMC_MSG_SetNclkSoftMax              ((uint16_t) 0x27)
+#define PPSMC_MSG_SetNclkHardMin              ((uint16_t) 0x28)
+#define PPSMC_MSG_SetNclkHardMax              ((uint16_t) 0x29)
+#define PPSMC_MSG_SetPstateSoftMin            ((uint16_t) 0x2A)
+#define PPSMC_MSG_SetPstateSoftMax            ((uint16_t) 0x2B)
+#define PPSMC_MSG_SetPstateHardMin            ((uint16_t) 0x2C)
+#define PPSMC_MSG_SetPstateHardMax            ((uint16_t) 0x2D)
+#define PPSMC_MSG_DisableLowMemoryPstate      ((uint16_t) 0x2E)
+#define PPSMC_MSG_EnableLowMemoryPstate       ((uint16_t) 0x2F)
+#define PPSMC_MSG_UcodeAddressLow             ((uint16_t) 0x30)
+#define PPSMC_MSG_UcodeAddressHigh            ((uint16_t) 0x31)
+#define PPSMC_MSG_UcodeLoadStatus             ((uint16_t) 0x32)
+#define PPSMC_MSG_DriverDramAddrHi            ((uint16_t) 0x33)
+#define PPSMC_MSG_DriverDramAddrLo            ((uint16_t) 0x34)
+#define PPSMC_MSG_CondExecDramAddrHi          ((uint16_t) 0x35)
+#define PPSMC_MSG_CondExecDramAddrLo          ((uint16_t) 0x36)
+#define PPSMC_MSG_LoadUcodes                  ((uint16_t) 0x37)
+#define PPSMC_MSG_DriverResetMode             ((uint16_t) 0x38)
+#define PPSMC_MSG_PowerStateNotify            ((uint16_t) 0x39)
+#define PPSMC_MSG_SetDisplayPhyConfig         ((uint16_t) 0x3A)
+#define PPSMC_MSG_GetMaxSclkLevel             ((uint16_t) 0x3B)
+#define PPSMC_MSG_GetMaxLclkLevel             ((uint16_t) 0x3C)
+#define PPSMC_MSG_GetMaxUvdLevel              ((uint16_t) 0x3D)
+#define PPSMC_MSG_GetMaxEclkLevel             ((uint16_t) 0x3E)
+#define PPSMC_MSG_GetMaxAclkLevel             ((uint16_t) 0x3F)
+#define PPSMC_MSG_GetMaxNclkLevel             ((uint16_t) 0x40)
+#define PPSMC_MSG_GetMaxPstate                ((uint16_t) 0x41)
+#define PPSMC_MSG_DramAddrHiVirtual           ((uint16_t) 0x42)
+#define PPSMC_MSG_DramAddrLoVirtual           ((uint16_t) 0x43)
+#define PPSMC_MSG_DramAddrHiPhysical          ((uint16_t) 0x44)
+#define PPSMC_MSG_DramAddrLoPhysical          ((uint16_t) 0x45)
+#define PPSMC_MSG_DramBufferSize              ((uint16_t) 0x46)
+#define PPSMC_MSG_SetMmPwrLogDramAddrHi       ((uint16_t) 0x47)
+#define PPSMC_MSG_SetMmPwrLogDramAddrLo       ((uint16_t) 0x48)
+#define PPSMC_MSG_SetClkTableAddrHi           ((uint16_t) 0x49)
+#define PPSMC_MSG_SetClkTableAddrLo           ((uint16_t) 0x4A)
+#define PPSMC_MSG_GetConservativePowerLimit   ((uint16_t) 0x4B)
+
+#define PPSMC_MSG_InitJobs                    ((uint16_t) 0x252)
+#define PPSMC_MSG_ExecuteJob                  ((uint16_t) 0x254)
+
+#define PPSMC_MSG_NBDPM_Enable                ((uint16_t) 0x140)
+#define PPSMC_MSG_NBDPM_Disable               ((uint16_t) 0x141)
+
+#define PPSMC_MSG_DPM_FPS_Mode                ((uint16_t) 0x15d)
+#define PPSMC_MSG_DPM_Activity_Mode           ((uint16_t) 0x15e)
+
+#define PPSMC_MSG_PmStatusLogStart            ((uint16_t) 0x170)
+#define PPSMC_MSG_PmStatusLogSample           ((uint16_t) 0x171)
+
+#define PPSMC_MSG_AllowLowSclkInterrupt       ((uint16_t) 0x184)
+#define PPSMC_MSG_MmPowerMonitorStart         ((uint16_t) 0x18F)
+#define PPSMC_MSG_MmPowerMonitorStop          ((uint16_t) 0x190)
+#define PPSMC_MSG_MmPowerMonitorRestart       ((uint16_t) 0x191)
+
+#define PPSMC_MSG_SetClockGateMask            ((uint16_t) 0x260)
+#define PPSMC_MSG_SetFpsThresholdLo           ((uint16_t) 0x264)
+#define PPSMC_MSG_SetFpsThresholdHi           ((uint16_t) 0x265)
+#define PPSMC_MSG_SetLowSclkIntrThreshold     ((uint16_t) 0x266)
+
+#define PPSMC_MSG_ClkTableXferToDram          ((uint16_t) 0x267)
+#define PPSMC_MSG_ClkTableXferToSmu           ((uint16_t) 0x268)
+#define PPSMC_MSG_GetAverageGraphicsActivity  ((uint16_t) 0x269)
+#define PPSMC_MSG_GetAverageGioActivity       ((uint16_t) 0x26A)
+#define PPSMC_MSG_SetLoggerBufferSize         ((uint16_t) 0x26B)
+#define PPSMC_MSG_SetLoggerAddressHigh        ((uint16_t) 0x26C)
+#define PPSMC_MSG_SetLoggerAddressLow         ((uint16_t) 0x26D)
+#define PPSMC_MSG_SetWatermarkFrequency       ((uint16_t) 0x26E)
+
+/* REMOVE LATER*/
+#define PPSMC_MSG_DPM_ForceState              ((uint16_t) 0x104)
+
+/* Feature Enable Masks*/
+#define NB_DPM_MASK             0x00000800
+#define VDDGFX_MASK             0x00800000
+#define VCE_DPM_MASK            0x00400000
+#define ACP_DPM_MASK            0x00040000
+#define UVD_DPM_MASK            0x00010000
+#define GFX_CU_PG_MASK          0x00004000
+#define SCLK_DPM_MASK           0x00080000
+
+#if !defined(SMC_MICROCODE)
+#pragma pack(pop)
+
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smc.c b/drivers/gpu/drm/amd/amdgpu/cz_smc.c
new file mode 100644 (file)
index 0000000..a72ffc7
--- /dev/null
@@ -0,0 +1,962 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "smu8.h"
+#include "smu8_fusion.h"
+#include "cz_ppsmc.h"
+#include "cz_smumgr.h"
+#include "smu_ucode_xfer_cz.h"
+#include "amdgpu_ucode.h"
+
+#include "smu/smu_8_0_d.h"
+#include "smu/smu_8_0_sh_mask.h"
+#include "gca/gfx_8_0_d.h"
+#include "gca/gfx_8_0_sh_mask.h"
+
+uint32_t cz_get_argument(struct amdgpu_device *adev)
+{
+       return RREG32(mmSMU_MP1_SRBM2P_ARG_0);
+}
+
+static struct cz_smu_private_data *cz_smu_get_priv(struct amdgpu_device *adev)
+{
+       struct cz_smu_private_data *priv =
+                       (struct cz_smu_private_data *)(adev->smu.priv);
+
+       return priv;
+}
+
+int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg)
+{
+       int i;
+       u32 content = 0, tmp;
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0),
+                               SMU_MP1_SRBM2P_RESP_0, CONTENT);
+               if (content != tmp)
+                       break;
+               udelay(1);
+       }
+
+       /* timeout means wrong logic*/
+       if (i == adev->usec_timeout)
+               return -EINVAL;
+
+       WREG32(mmSMU_MP1_SRBM2P_RESP_0, 0);
+       WREG32(mmSMU_MP1_SRBM2P_MSG_0, msg);
+
+       return 0;
+}
+
+int cz_send_msg_to_smc(struct amdgpu_device *adev, u16 msg)
+{
+       int i;
+       u32 content = 0, tmp = 0;
+
+       if (cz_send_msg_to_smc_async(adev, msg))
+               return -EINVAL;
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0),
+                               SMU_MP1_SRBM2P_RESP_0, CONTENT);
+               if (content != tmp)
+                       break;
+               udelay(1);
+       }
+
+       /* timeout means wrong logic*/
+       if (i == adev->usec_timeout)
+               return -EINVAL;
+
+       if (PPSMC_Result_OK != tmp) {
+               dev_err(adev->dev, "SMC Failed to send Message.\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int cz_send_msg_to_smc_with_parameter_async(struct amdgpu_device *adev,
+                                               u16 msg, u32 parameter)
+{
+       WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter);
+       return cz_send_msg_to_smc_async(adev, msg);
+}
+
+int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
+                                               u16 msg, u32 parameter)
+{
+       WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter);
+       return cz_send_msg_to_smc(adev, msg);
+}
+
+static int cz_set_smc_sram_address(struct amdgpu_device *adev,
+                                               u32 smc_address, u32 limit)
+{
+       if (smc_address & 3)
+               return -EINVAL;
+       if ((smc_address + 3) > limit)
+               return -EINVAL;
+
+       WREG32(mmMP0PUB_IND_INDEX_0, SMN_MP1_SRAM_START_ADDR + smc_address);
+
+       return 0;
+}
+
+int cz_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
+                                               u32 *value, u32 limit)
+{
+       int ret;
+
+       ret = cz_set_smc_sram_address(adev, smc_address, limit);
+       if (ret)
+               return ret;
+
+       *value = RREG32(mmMP0PUB_IND_DATA_0);
+
+       return 0;
+}
+
+int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
+                                               u32 value, u32 limit)
+{
+       int ret;
+
+       ret = cz_set_smc_sram_address(adev, smc_address, limit);
+       if (ret)
+               return ret;
+
+       WREG32(mmMP0PUB_IND_DATA_0, value);
+
+       return 0;
+}
+
+static int cz_smu_request_load_fw(struct amdgpu_device *adev)
+{
+       struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
+
+       uint32_t smc_addr = SMU8_FIRMWARE_HEADER_LOCATION +
+                       offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
+
+       cz_write_smc_sram_dword(adev, smc_addr, 0, smc_addr + 4);
+
+       /*prepare toc buffers*/
+       cz_send_msg_to_smc_with_parameter(adev,
+                               PPSMC_MSG_DriverDramAddrHi,
+                               priv->toc_buffer.mc_addr_high);
+       cz_send_msg_to_smc_with_parameter(adev,
+                               PPSMC_MSG_DriverDramAddrLo,
+                               priv->toc_buffer.mc_addr_low);
+       cz_send_msg_to_smc(adev, PPSMC_MSG_InitJobs);
+
+       /*execute jobs*/
+       cz_send_msg_to_smc_with_parameter(adev,
+                               PPSMC_MSG_ExecuteJob,
+                               priv->toc_entry_aram);
+
+       cz_send_msg_to_smc_with_parameter(adev,
+                               PPSMC_MSG_ExecuteJob,
+                               priv->toc_entry_power_profiling_index);
+
+       cz_send_msg_to_smc_with_parameter(adev,
+                               PPSMC_MSG_ExecuteJob,
+                               priv->toc_entry_initialize_index);
+
+       return 0;
+}
+
+/*
+ *Check if the FW has been loaded, SMU will not return if loading
+ *has not finished.
+ */
+static int cz_smu_check_fw_load_finish(struct amdgpu_device *adev,
+                                               uint32_t fw_mask)
+{
+       int i;
+       uint32_t index = SMN_MP1_SRAM_START_ADDR +
+                       SMU8_FIRMWARE_HEADER_LOCATION +
+                       offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
+
+       WREG32(mmMP0PUB_IND_INDEX, index);
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               if (fw_mask == (RREG32(mmMP0PUB_IND_DATA) & fw_mask))
+                       break;
+               udelay(1);
+       }
+
+       if (i >= adev->usec_timeout) {
+               dev_err(adev->dev,
+               "SMU check loaded firmware failed, expecting 0x%x, getting 0x%x",
+               fw_mask, RREG32(mmMP0PUB_IND_DATA));
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/*
+ * interfaces for different ip blocks to check firmware loading status
+ * 0 for success otherwise failed
+ */
+static int cz_smu_check_finished(struct amdgpu_device *adev,
+                                                       enum AMDGPU_UCODE_ID id)
+{
+       switch (id) {
+       case AMDGPU_UCODE_ID_SDMA0:
+               if (adev->smu.fw_flags & AMDGPU_SDMA0_UCODE_LOADED)
+                       return 0;
+               break;
+       case AMDGPU_UCODE_ID_SDMA1:
+               if (adev->smu.fw_flags & AMDGPU_SDMA1_UCODE_LOADED)
+                       return 0;
+               break;
+       case AMDGPU_UCODE_ID_CP_CE:
+               if (adev->smu.fw_flags & AMDGPU_CPCE_UCODE_LOADED)
+                       return 0;
+               break;
+       case AMDGPU_UCODE_ID_CP_PFP:
+               if (adev->smu.fw_flags & AMDGPU_CPPFP_UCODE_LOADED)
+                       return 0;
+       case AMDGPU_UCODE_ID_CP_ME:
+               if (adev->smu.fw_flags & AMDGPU_CPME_UCODE_LOADED)
+                       return 0;
+               break;
+       case AMDGPU_UCODE_ID_CP_MEC1:
+               if (adev->smu.fw_flags & AMDGPU_CPMEC1_UCODE_LOADED)
+                       return 0;
+               break;
+       case AMDGPU_UCODE_ID_CP_MEC2:
+               if (adev->smu.fw_flags & AMDGPU_CPMEC2_UCODE_LOADED)
+                       return 0;
+               break;
+       case AMDGPU_UCODE_ID_RLC_G:
+               if (adev->smu.fw_flags & AMDGPU_CPRLC_UCODE_LOADED)
+                       return 0;
+               break;
+       case AMDGPU_UCODE_ID_MAXIMUM:
+       default:
+               break;
+       }
+
+       return 1;
+}
+
+static int cz_load_mec_firmware(struct amdgpu_device *adev)
+{
+       struct amdgpu_firmware_info *ucode =
+                               &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
+       uint32_t reg_data;
+       uint32_t tmp;
+
+       if (ucode->fw == NULL)
+               return -EINVAL;
+
+       /* Disable MEC parsing/prefetching */
+       tmp = RREG32(mmCP_MEC_CNTL);
+       tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
+       tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
+       WREG32(mmCP_MEC_CNTL, tmp);
+
+       tmp = RREG32(mmCP_CPC_IC_BASE_CNTL);
+       tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
+       tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
+       tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
+       tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
+       WREG32(mmCP_CPC_IC_BASE_CNTL, tmp);
+
+       reg_data = lower_32_bits(ucode->mc_addr) &
+                       REG_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
+       WREG32(mmCP_CPC_IC_BASE_LO, reg_data);
+
+       reg_data = upper_32_bits(ucode->mc_addr) &
+                       REG_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
+       WREG32(mmCP_CPC_IC_BASE_HI, reg_data);
+
+       return 0;
+}
+
+int cz_smu_start(struct amdgpu_device *adev)
+{
+       int ret = 0;
+
+       uint32_t fw_to_check = UCODE_ID_RLC_G_MASK |
+                               UCODE_ID_SDMA0_MASK |
+                               UCODE_ID_SDMA1_MASK |
+                               UCODE_ID_CP_CE_MASK |
+                               UCODE_ID_CP_ME_MASK |
+                               UCODE_ID_CP_PFP_MASK |
+                               UCODE_ID_CP_MEC_JT1_MASK |
+                               UCODE_ID_CP_MEC_JT2_MASK;
+
+       cz_smu_request_load_fw(adev);
+       ret = cz_smu_check_fw_load_finish(adev, fw_to_check);
+       if (ret)
+               return ret;
+
+       /* manually load MEC firmware for CZ */
+       if (adev->asic_type == CHIP_CARRIZO) {
+               ret = cz_load_mec_firmware(adev);
+               if (ret) {
+                       dev_err(adev->dev, "(%d) Mec Firmware load failed\n", ret);
+                       return ret;
+               }
+       }
+
+       /* setup fw load flag */
+       adev->smu.fw_flags = AMDGPU_SDMA0_UCODE_LOADED |
+                               AMDGPU_SDMA1_UCODE_LOADED |
+                               AMDGPU_CPCE_UCODE_LOADED |
+                               AMDGPU_CPPFP_UCODE_LOADED |
+                               AMDGPU_CPME_UCODE_LOADED |
+                               AMDGPU_CPMEC1_UCODE_LOADED |
+                               AMDGPU_CPMEC2_UCODE_LOADED |
+                               AMDGPU_CPRLC_UCODE_LOADED;
+
+       return ret;
+}
+
+static uint32_t cz_convert_fw_type(uint32_t fw_type)
+{
+       enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
+
+       switch (fw_type) {
+       case UCODE_ID_SDMA0:
+               result = AMDGPU_UCODE_ID_SDMA0;
+               break;
+       case UCODE_ID_SDMA1:
+               result = AMDGPU_UCODE_ID_SDMA1;
+               break;
+       case UCODE_ID_CP_CE:
+               result = AMDGPU_UCODE_ID_CP_CE;
+               break;
+       case UCODE_ID_CP_PFP:
+               result = AMDGPU_UCODE_ID_CP_PFP;
+               break;
+       case UCODE_ID_CP_ME:
+               result = AMDGPU_UCODE_ID_CP_ME;
+               break;
+       case UCODE_ID_CP_MEC_JT1:
+       case UCODE_ID_CP_MEC_JT2:
+               result = AMDGPU_UCODE_ID_CP_MEC1;
+               break;
+       case UCODE_ID_RLC_G:
+               result = AMDGPU_UCODE_ID_RLC_G;
+               break;
+       default:
+               DRM_ERROR("UCode type is out of range!");
+       }
+
+       return result;
+}
+
+static uint8_t cz_smu_translate_firmware_enum_to_arg(
+                       enum cz_scratch_entry firmware_enum)
+{
+       uint8_t ret = 0;
+
+       switch (firmware_enum) {
+       case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0:
+               ret = UCODE_ID_SDMA0;
+               break;
+       case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1:
+               ret = UCODE_ID_SDMA1;
+               break;
+       case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE:
+               ret = UCODE_ID_CP_CE;
+               break;
+       case CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP:
+               ret = UCODE_ID_CP_PFP;
+               break;
+       case CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME:
+               ret = UCODE_ID_CP_ME;
+               break;
+       case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1:
+               ret = UCODE_ID_CP_MEC_JT1;
+               break;
+       case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
+               ret = UCODE_ID_CP_MEC_JT2;
+               break;
+       case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
+               ret = UCODE_ID_GMCON_RENG;
+               break;
+       case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G:
+               ret = UCODE_ID_RLC_G;
+               break;
+       case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH:
+               ret = UCODE_ID_RLC_SCRATCH;
+               break;
+       case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM:
+               ret = UCODE_ID_RLC_SRM_ARAM;
+               break;
+       case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM:
+               ret = UCODE_ID_RLC_SRM_DRAM;
+               break;
+       case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM:
+               ret = UCODE_ID_DMCU_ERAM;
+               break;
+       case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM:
+               ret = UCODE_ID_DMCU_IRAM;
+               break;
+       case CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING:
+               ret = TASK_ARG_INIT_MM_PWR_LOG;
+               break;
+       case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT:
+       case CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING:
+       case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS:
+       case CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT:
+       case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START:
+       case CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS:
+               ret = TASK_ARG_REG_MMIO;
+               break;
+       case CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE:
+               ret = TASK_ARG_INIT_CLK_TABLE;
+               break;
+       }
+
+       return ret;
+}
+
+static int cz_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
+                                       enum cz_scratch_entry firmware_enum,
+                                       struct cz_buffer_entry *entry)
+{
+       uint64_t gpu_addr;
+       uint32_t data_size;
+       uint8_t ucode_id = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
+       enum AMDGPU_UCODE_ID id = cz_convert_fw_type(ucode_id);
+       struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
+       const struct gfx_firmware_header_v1_0 *header;
+
+       if (ucode->fw == NULL)
+               return -EINVAL;
+
+       gpu_addr  = ucode->mc_addr;
+       header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
+       data_size = le32_to_cpu(header->header.ucode_size_bytes);
+
+       if ((firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1) ||
+           (firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2)) {
+               gpu_addr += le32_to_cpu(header->jt_offset) << 2;
+               data_size = le32_to_cpu(header->jt_size) << 2;
+       }
+
+       entry->mc_addr_low = lower_32_bits(gpu_addr);
+       entry->mc_addr_high = upper_32_bits(gpu_addr);
+       entry->data_size = data_size;
+       entry->firmware_ID = firmware_enum;
+
+       return 0;
+}
+
+static int cz_smu_populate_single_scratch_entry(struct amdgpu_device *adev,
+                                       enum cz_scratch_entry scratch_type,
+                                       uint32_t size_in_byte,
+                                       struct cz_buffer_entry *entry)
+{
+       struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
+       uint64_t mc_addr = (((uint64_t) priv->smu_buffer.mc_addr_high) << 32) |
+                                               priv->smu_buffer.mc_addr_low;
+       mc_addr += size_in_byte;
+
+       priv->smu_buffer_used_bytes += size_in_byte;
+       entry->data_size = size_in_byte;
+       entry->kaddr = priv->smu_buffer.kaddr + priv->smu_buffer_used_bytes;
+       entry->mc_addr_low = lower_32_bits(mc_addr);
+       entry->mc_addr_high = upper_32_bits(mc_addr);
+       entry->firmware_ID = scratch_type;
+
+       return 0;
+}
+
+static int cz_smu_populate_single_ucode_load_task(struct amdgpu_device *adev,
+                                               enum cz_scratch_entry firmware_enum,
+                                               bool is_last)
+{
+       uint8_t i;
+       struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
+       struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
+       struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++];
+
+       task->type = TASK_TYPE_UCODE_LOAD;
+       task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
+       task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count;
+
+       for (i = 0; i < priv->driver_buffer_length; i++)
+               if (priv->driver_buffer[i].firmware_ID == firmware_enum)
+                       break;
+
+       if (i >= priv->driver_buffer_length) {
+               dev_err(adev->dev, "Invalid Firmware Type\n");
+               return -EINVAL;
+       }
+
+       task->addr.low = priv->driver_buffer[i].mc_addr_low;
+       task->addr.high = priv->driver_buffer[i].mc_addr_high;
+       task->size_bytes = priv->driver_buffer[i].data_size;
+
+       return 0;
+}
+
+static int cz_smu_populate_single_scratch_task(struct amdgpu_device *adev,
+                                               enum cz_scratch_entry firmware_enum,
+                                               uint8_t type, bool is_last)
+{
+       uint8_t i;
+       struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
+       struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
+       struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++];
+
+       task->type = type;
+       task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
+       task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count;
+
+       for (i = 0; i < priv->scratch_buffer_length; i++)
+               if (priv->scratch_buffer[i].firmware_ID == firmware_enum)
+                       break;
+
+       if (i >= priv->scratch_buffer_length) {
+               dev_err(adev->dev, "Invalid Firmware Type\n");
+               return -EINVAL;
+       }
+
+       task->addr.low = priv->scratch_buffer[i].mc_addr_low;
+       task->addr.high = priv->scratch_buffer[i].mc_addr_high;
+       task->size_bytes = priv->scratch_buffer[i].data_size;
+
+       if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == firmware_enum) {
+               struct cz_ih_meta_data *pIHReg_restore =
+                       (struct cz_ih_meta_data *)priv->scratch_buffer[i].kaddr;
+               pIHReg_restore->command =
+                       METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD;
+       }
+
+       return 0;
+}
+
+static int cz_smu_construct_toc_for_rlc_aram_save(struct amdgpu_device *adev)
+{
+       struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
+       priv->toc_entry_aram = priv->toc_entry_used_count;
+       cz_smu_populate_single_scratch_task(adev,
+                       CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
+                       TASK_TYPE_UCODE_SAVE, true);
+
+       return 0;
+}
+
+static int cz_smu_construct_toc_for_vddgfx_enter(struct amdgpu_device *adev)
+{
+       struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
+       struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
+
+       toc->JobList[JOB_GFX_SAVE] = (uint8_t)priv->toc_entry_used_count;
+       cz_smu_populate_single_scratch_task(adev,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
+                               TASK_TYPE_UCODE_SAVE, false);
+       cz_smu_populate_single_scratch_task(adev,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
+                               TASK_TYPE_UCODE_SAVE, true);
+
+       return 0;
+}
+
+static int cz_smu_construct_toc_for_vddgfx_exit(struct amdgpu_device *adev)
+{
+       struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
+       struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
+
+       toc->JobList[JOB_GFX_RESTORE] = (uint8_t)priv->toc_entry_used_count;
+
+       /* populate ucode */
+       if (adev->firmware.smu_load) {
+               cz_smu_populate_single_ucode_load_task(adev,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
+               cz_smu_populate_single_ucode_load_task(adev,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
+               cz_smu_populate_single_ucode_load_task(adev,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
+               cz_smu_populate_single_ucode_load_task(adev,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
+               cz_smu_populate_single_ucode_load_task(adev,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
+               cz_smu_populate_single_ucode_load_task(adev,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
+       }
+
+       /* populate scratch */
+       cz_smu_populate_single_scratch_task(adev,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
+                               TASK_TYPE_UCODE_LOAD, false);
+       cz_smu_populate_single_scratch_task(adev,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
+                               TASK_TYPE_UCODE_LOAD, false);
+       cz_smu_populate_single_scratch_task(adev,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
+                               TASK_TYPE_UCODE_LOAD, true);
+
+       return 0;
+}
+
+static int cz_smu_construct_toc_for_power_profiling(struct amdgpu_device *adev)
+{
+       struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
+
+       priv->toc_entry_power_profiling_index = priv->toc_entry_used_count;
+
+       cz_smu_populate_single_scratch_task(adev,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
+                               TASK_TYPE_INITIALIZE, true);
+       return 0;
+}
+
+static int cz_smu_construct_toc_for_bootup(struct amdgpu_device *adev)
+{
+       struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
+
+       priv->toc_entry_initialize_index = priv->toc_entry_used_count;
+
+       if (adev->firmware.smu_load) {
+               cz_smu_populate_single_ucode_load_task(adev,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
+               cz_smu_populate_single_ucode_load_task(adev,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
+               cz_smu_populate_single_ucode_load_task(adev,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
+               cz_smu_populate_single_ucode_load_task(adev,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
+               cz_smu_populate_single_ucode_load_task(adev,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
+               cz_smu_populate_single_ucode_load_task(adev,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
+               cz_smu_populate_single_ucode_load_task(adev,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
+               cz_smu_populate_single_ucode_load_task(adev,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
+       }
+
+       return 0;
+}
+
+static int cz_smu_construct_toc_for_clock_table(struct amdgpu_device *adev)
+{
+       struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
+
+       priv->toc_entry_clock_table = priv->toc_entry_used_count;
+
+       cz_smu_populate_single_scratch_task(adev,
+                               CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
+                               TASK_TYPE_INITIALIZE, true);
+
+       return 0;
+}
+
+static int cz_smu_initialize_toc_empty_job_list(struct amdgpu_device *adev)
+{
+       int i;
+       struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
+       struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
+
+       for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
+               toc->JobList[i] = (uint8_t)IGNORE_JOB;
+
+       return 0;
+}
+
+/*
+ * cz smu uninitialization
+ */
+int cz_smu_fini(struct amdgpu_device *adev)
+{
+       amdgpu_bo_unref(&adev->smu.toc_buf);
+       amdgpu_bo_unref(&adev->smu.smu_buf);
+       kfree(adev->smu.priv);
+       adev->smu.priv = NULL;
+       if (adev->firmware.smu_load)
+               amdgpu_ucode_fini_bo(adev);
+
+       return 0;
+}
+
+int cz_smu_download_pptable(struct amdgpu_device *adev, void **table)
+{
+       uint8_t i;
+       struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
+
+       for (i = 0; i < priv->scratch_buffer_length; i++)
+               if (priv->scratch_buffer[i].firmware_ID ==
+                               CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
+                       break;
+
+       if (i >= priv->scratch_buffer_length) {
+               dev_err(adev->dev, "Invalid Scratch Type\n");
+               return -EINVAL;
+       }
+
+       *table = (struct SMU8_Fusion_ClkTable *)priv->scratch_buffer[i].kaddr;
+
+       /* prepare buffer for pptable */
+       cz_send_msg_to_smc_with_parameter(adev,
+                               PPSMC_MSG_SetClkTableAddrHi,
+                               priv->scratch_buffer[i].mc_addr_high);
+       cz_send_msg_to_smc_with_parameter(adev,
+                               PPSMC_MSG_SetClkTableAddrLo,
+                               priv->scratch_buffer[i].mc_addr_low);
+       cz_send_msg_to_smc_with_parameter(adev,
+                               PPSMC_MSG_ExecuteJob,
+                               priv->toc_entry_clock_table);
+
+       /* actual downloading */
+       cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToDram);
+
+       return 0;
+}
+
+int cz_smu_upload_pptable(struct amdgpu_device *adev)
+{
+       uint8_t i;
+       struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
+
+       for (i = 0; i < priv->scratch_buffer_length; i++)
+               if (priv->scratch_buffer[i].firmware_ID ==
+                               CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
+                       break;
+
+       if (i >= priv->scratch_buffer_length) {
+               dev_err(adev->dev, "Invalid Scratch Type\n");
+               return -EINVAL;
+       }
+
+       /* prepare SMU */
+       cz_send_msg_to_smc_with_parameter(adev,
+                               PPSMC_MSG_SetClkTableAddrHi,
+                               priv->scratch_buffer[i].mc_addr_high);
+       cz_send_msg_to_smc_with_parameter(adev,
+                               PPSMC_MSG_SetClkTableAddrLo,
+                               priv->scratch_buffer[i].mc_addr_low);
+       cz_send_msg_to_smc_with_parameter(adev,
+                               PPSMC_MSG_ExecuteJob,
+                               priv->toc_entry_clock_table);
+
+       /* actual uploading */
+       cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToSmu);
+
+       return 0;
+}
+
+/*
+ * cz smumgr functions initialization
+ */
+static const struct amdgpu_smumgr_funcs cz_smumgr_funcs = {
+       .check_fw_load_finish = cz_smu_check_finished,
+       .request_smu_load_fw = NULL,
+       .request_smu_specific_fw = NULL,
+};
+
+/*
+ * cz smu initialization
+ */
+int cz_smu_init(struct amdgpu_device *adev)
+{
+       int ret = -EINVAL;
+       uint64_t mc_addr = 0;
+       struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
+       struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
+       void *toc_buf_ptr = NULL;
+       void *smu_buf_ptr = NULL;
+
+       struct cz_smu_private_data *priv =
+               kzalloc(sizeof(struct cz_smu_private_data), GFP_KERNEL);
+       if (priv == NULL)
+               return -ENOMEM;
+
+       /* allocate firmware buffers */
+       if (adev->firmware.smu_load)
+               amdgpu_ucode_init_bo(adev);
+
+       adev->smu.priv = priv;
+       adev->smu.fw_flags = 0;
+       priv->toc_buffer.data_size = 4096;
+
+       priv->smu_buffer.data_size =
+                               ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) +
+                               ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) +
+                               ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) +
+                               ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
+                               ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
+
+       /* prepare toc buffer and smu buffer:
+       * 1. create amdgpu_bo for toc buffer and smu buffer
+       * 2. pin mc address
+       * 3. map kernel virtual address
+       */
+       ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE,
+                               true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, toc_buf);
+
+       if (ret) {
+               dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret);
+               return ret;
+       }
+
+       ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE,
+                               true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, smu_buf);
+
+       if (ret) {
+               dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret);
+               return ret;
+       }
+
+       /* toc buffer reserve/pin/map */
+       ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
+       if (ret) {
+               amdgpu_bo_unref(&adev->smu.toc_buf);
+               dev_err(adev->dev, "(%d) SMC TOC buffer reserve failed\n", ret);
+               return ret;
+       }
+
+       ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr);
+       if (ret) {
+               amdgpu_bo_unreserve(adev->smu.toc_buf);
+               amdgpu_bo_unref(&adev->smu.toc_buf);
+               dev_err(adev->dev, "(%d) SMC TOC buffer pin failed\n", ret);
+               return ret;
+       }
+
+       ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
+       if (ret)
+               goto smu_init_failed;
+
+       amdgpu_bo_unreserve(adev->smu.toc_buf);
+
+       priv->toc_buffer.mc_addr_low = lower_32_bits(mc_addr);
+       priv->toc_buffer.mc_addr_high = upper_32_bits(mc_addr);
+       priv->toc_buffer.kaddr = toc_buf_ptr;
+
+       /* smu buffer reserve/pin/map */
+       ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
+       if (ret) {
+               amdgpu_bo_unref(&adev->smu.smu_buf);
+               dev_err(adev->dev, "(%d) SMC Internal buffer reserve failed\n", ret);
+               return ret;
+       }
+
+       ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr);
+       if (ret) {
+               amdgpu_bo_unreserve(adev->smu.smu_buf);
+               amdgpu_bo_unref(&adev->smu.smu_buf);
+               dev_err(adev->dev, "(%d) SMC Internal buffer pin failed\n", ret);
+               return ret;
+       }
+
+       ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
+       if (ret)
+               goto smu_init_failed;
+
+       amdgpu_bo_unreserve(adev->smu.smu_buf);
+
+       priv->smu_buffer.mc_addr_low = lower_32_bits(mc_addr);
+       priv->smu_buffer.mc_addr_high = upper_32_bits(mc_addr);
+       priv->smu_buffer.kaddr = smu_buf_ptr;
+
+       if (adev->firmware.smu_load) {
+               if (cz_smu_populate_single_firmware_entry(adev,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
+                               &priv->driver_buffer[priv->driver_buffer_length++]))
+                       goto smu_init_failed;
+               if (cz_smu_populate_single_firmware_entry(adev,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
+                               &priv->driver_buffer[priv->driver_buffer_length++]))
+                       goto smu_init_failed;
+               if (cz_smu_populate_single_firmware_entry(adev,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
+                               &priv->driver_buffer[priv->driver_buffer_length++]))
+                       goto smu_init_failed;
+               if (cz_smu_populate_single_firmware_entry(adev,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
+                               &priv->driver_buffer[priv->driver_buffer_length++]))
+                       goto smu_init_failed;
+               if (cz_smu_populate_single_firmware_entry(adev,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME,
+                               &priv->driver_buffer[priv->driver_buffer_length++]))
+                       goto smu_init_failed;
+               if (cz_smu_populate_single_firmware_entry(adev,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
+                               &priv->driver_buffer[priv->driver_buffer_length++]))
+                       goto smu_init_failed;
+               if (cz_smu_populate_single_firmware_entry(adev,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
+                               &priv->driver_buffer[priv->driver_buffer_length++]))
+                       goto smu_init_failed;
+               if (cz_smu_populate_single_firmware_entry(adev,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
+                               &priv->driver_buffer[priv->driver_buffer_length++]))
+                       goto smu_init_failed;
+       }
+
+       if (cz_smu_populate_single_scratch_entry(adev,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
+                               UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
+                               &priv->scratch_buffer[priv->scratch_buffer_length++]))
+               goto smu_init_failed;
+       if (cz_smu_populate_single_scratch_entry(adev,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
+                               UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
+                               &priv->scratch_buffer[priv->scratch_buffer_length++]))
+               goto smu_init_failed;
+       if (cz_smu_populate_single_scratch_entry(adev,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
+                               UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
+                               &priv->scratch_buffer[priv->scratch_buffer_length++]))
+               goto smu_init_failed;
+       if (cz_smu_populate_single_scratch_entry(adev,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
+                               sizeof(struct SMU8_MultimediaPowerLogData),
+                               &priv->scratch_buffer[priv->scratch_buffer_length++]))
+               goto smu_init_failed;
+       if (cz_smu_populate_single_scratch_entry(adev,
+                               CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
+                               sizeof(struct SMU8_Fusion_ClkTable),
+                               &priv->scratch_buffer[priv->scratch_buffer_length++]))
+               goto smu_init_failed;
+
+       cz_smu_initialize_toc_empty_job_list(adev);
+       cz_smu_construct_toc_for_rlc_aram_save(adev);
+       cz_smu_construct_toc_for_vddgfx_enter(adev);
+       cz_smu_construct_toc_for_vddgfx_exit(adev);
+       cz_smu_construct_toc_for_power_profiling(adev);
+       cz_smu_construct_toc_for_bootup(adev);
+       cz_smu_construct_toc_for_clock_table(adev);
+       /* init the smumgr functions */
+       adev->smu.smumgr_funcs = &cz_smumgr_funcs;
+
+       return 0;
+
+smu_init_failed:
+       amdgpu_bo_unref(toc_buf);
+       amdgpu_bo_unref(smu_buf);
+
+       return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smumgr.h b/drivers/gpu/drm/amd/amdgpu/cz_smumgr.h
new file mode 100644 (file)
index 0000000..924d355
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __CZ_SMC_H__
+#define __CZ_SMC_H__
+
+#define MAX_NUM_FIRMWARE                        8
+#define MAX_NUM_SCRATCH                         11
+#define CZ_SCRATCH_SIZE_NONGFX_CLOCKGATING      1024
+#define CZ_SCRATCH_SIZE_NONGFX_GOLDENSETTING    2048
+#define CZ_SCRATCH_SIZE_SDMA_METADATA           1024
+#define CZ_SCRATCH_SIZE_IH                      ((2*256+1)*4)
+
+enum cz_scratch_entry {
+       CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0 = 0,
+       CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
+       CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
+       CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
+       CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME,
+       CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
+       CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
+       CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG,
+       CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
+       CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
+       CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
+       CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
+       CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM,
+       CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM,
+       CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
+       CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT,
+       CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING,
+       CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS,
+       CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT,
+       CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START,
+       CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS,
+       CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE
+};
+
+struct cz_buffer_entry {
+       uint32_t        data_size;
+       uint32_t        mc_addr_low;
+       uint32_t        mc_addr_high;
+       void            *kaddr;
+       enum cz_scratch_entry firmware_ID;
+};
+
+struct cz_register_index_data_pair {
+       uint32_t        offset;
+       uint32_t        value;
+};
+
+struct cz_ih_meta_data {
+       uint32_t        command;
+       struct cz_register_index_data_pair register_index_value_pair[1];
+};
+
+struct cz_smu_private_data {
+       uint8_t         driver_buffer_length;
+       uint8_t         scratch_buffer_length;
+       uint16_t        toc_entry_used_count;
+       uint16_t        toc_entry_initialize_index;
+       uint16_t        toc_entry_power_profiling_index;
+       uint16_t        toc_entry_aram;
+       uint16_t        toc_entry_ih_register_restore_task_index;
+       uint16_t        toc_entry_clock_table;
+       uint16_t        ih_register_restore_task_size;
+       uint16_t        smu_buffer_used_bytes;
+
+       struct cz_buffer_entry  toc_buffer;
+       struct cz_buffer_entry  smu_buffer;
+       struct cz_buffer_entry  driver_buffer[MAX_NUM_FIRMWARE];
+       struct cz_buffer_entry  scratch_buffer[MAX_NUM_SCRATCH];
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
new file mode 100644 (file)
index 0000000..d412291
--- /dev/null
@@ -0,0 +1,3871 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_i2c.h"
+#include "vid.h"
+#include "atom.h"
+#include "amdgpu_atombios.h"
+#include "atombios_crtc.h"
+#include "atombios_encoders.h"
+#include "amdgpu_pll.h"
+#include "amdgpu_connectors.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+#include "dce/dce_10_0_enum.h"
+#include "oss/oss_3_0_d.h"
+#include "oss/oss_3_0_sh_mask.h"
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev);
+static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev);
+
+static const u32 crtc_offsets[] =
+{
+       CRTC0_REGISTER_OFFSET,
+       CRTC1_REGISTER_OFFSET,
+       CRTC2_REGISTER_OFFSET,
+       CRTC3_REGISTER_OFFSET,
+       CRTC4_REGISTER_OFFSET,
+       CRTC5_REGISTER_OFFSET,
+       CRTC6_REGISTER_OFFSET
+};
+
+static const u32 hpd_offsets[] =
+{
+       HPD0_REGISTER_OFFSET,
+       HPD1_REGISTER_OFFSET,
+       HPD2_REGISTER_OFFSET,
+       HPD3_REGISTER_OFFSET,
+       HPD4_REGISTER_OFFSET,
+       HPD5_REGISTER_OFFSET
+};
+
+static const uint32_t dig_offsets[] = {
+       DIG0_REGISTER_OFFSET,
+       DIG1_REGISTER_OFFSET,
+       DIG2_REGISTER_OFFSET,
+       DIG3_REGISTER_OFFSET,
+       DIG4_REGISTER_OFFSET,
+       DIG5_REGISTER_OFFSET,
+       DIG6_REGISTER_OFFSET
+};
+
+static const struct {
+       uint32_t        reg;
+       uint32_t        vblank;
+       uint32_t        vline;
+       uint32_t        hpd;
+
+} interrupt_status_offsets[] = { {
+       .reg = mmDISP_INTERRUPT_STATUS,
+       .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
+       .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
+       .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
+}, {
+       .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
+       .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
+       .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
+       .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
+}, {
+       .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
+       .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
+       .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
+       .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
+}, {
+       .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
+       .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
+       .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
+       .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
+}, {
+       .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
+       .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
+       .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
+       .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
+}, {
+       .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
+       .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
+       .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
+       .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
+} };
+
+static const u32 golden_settings_tonga_a11[] =
+{
+       mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
+       mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
+       mmFBC_MISC, 0x1f311fff, 0x12300000,
+       mmHDMI_CONTROL, 0x31000111, 0x00000011,
+};
+
+static void dce_v10_0_init_golden_registers(struct amdgpu_device *adev)
+{
+       switch (adev->asic_type) {
+       case CHIP_TONGA:
+               amdgpu_program_register_sequence(adev,
+                                                golden_settings_tonga_a11,
+                                                (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
+               break;
+       default:
+               break;
+       }
+}
+
+static u32 dce_v10_0_audio_endpt_rreg(struct amdgpu_device *adev,
+                                    u32 block_offset, u32 reg)
+{
+       unsigned long flags;
+       u32 r;
+
+       spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
+       WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
+       r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
+       spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
+
+       return r;
+}
+
+static void dce_v10_0_audio_endpt_wreg(struct amdgpu_device *adev,
+                                     u32 block_offset, u32 reg, u32 v)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
+       WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
+       WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
+       spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
+}
+
+static bool dce_v10_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
+{
+       if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) &
+                       CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK)
+               return true;
+       else
+               return false;
+}
+
+static bool dce_v10_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
+{
+       u32 pos1, pos2;
+
+       pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
+       pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+       if (pos1 != pos2)
+               return true;
+       else
+               return false;
+}
+
+/**
+ * dce_v10_0_vblank_wait - vblank wait asic callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (evergreen+).
+ */
+static void dce_v10_0_vblank_wait(struct amdgpu_device *adev, int crtc)
+{
+       unsigned i = 0;
+
+       if (crtc >= adev->mode_info.num_crtc)
+               return;
+
+       if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
+               return;
+
+       /* depending on when we hit vblank, we may be close to active; if so,
+        * wait for another frame.
+        */
+       while (dce_v10_0_is_in_vblank(adev, crtc)) {
+               if (i++ % 100 == 0) {
+                       if (!dce_v10_0_is_counter_moving(adev, crtc))
+                               break;
+               }
+       }
+
+       while (!dce_v10_0_is_in_vblank(adev, crtc)) {
+               if (i++ % 100 == 0) {
+                       if (!dce_v10_0_is_counter_moving(adev, crtc))
+                               break;
+               }
+       }
+}
+
+static u32 dce_v10_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
+{
+       if (crtc >= adev->mode_info.num_crtc)
+               return 0;
+       else
+               return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
+}
+
+/**
+ * dce_v10_0_page_flip - pageflip callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @crtc_id: crtc to cleanup pageflip on
+ * @crtc_base: new address of the crtc (GPU MC address)
+ *
+ * Does the actual pageflip (evergreen+).
+ * During vblank we take the crtc lock and wait for the update_pending
+ * bit to go high, when it does, we release the lock, and allow the
+ * double buffered update to take place.
+ * Returns the current update pending status.
+ */
+static void dce_v10_0_page_flip(struct amdgpu_device *adev,
+                             int crtc_id, u64 crtc_base)
+{
+       struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+       u32 tmp = RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset);
+       int i;
+
+       /* Lock the graphics update lock */
+       tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
+       WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp);
+
+       /* update the scanout addresses */
+       WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+              upper_32_bits(crtc_base));
+       WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+              lower_32_bits(crtc_base));
+
+       WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+              upper_32_bits(crtc_base));
+       WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+              lower_32_bits(crtc_base));
+
+       /* Wait for update_pending to go high. */
+       for (i = 0; i < adev->usec_timeout; i++) {
+               if (RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset) &
+                               GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK)
+                       break;
+               udelay(1);
+       }
+       DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+       /* Unlock the lock, so double-buffering can take place inside vblank */
+       tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
+       WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp);
+}
+
+static int dce_v10_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
+                                       u32 *vbl, u32 *position)
+{
+       if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
+               return -EINVAL;
+
+       *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
+       *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+       return 0;
+}
+
+/**
+ * dce_v10_0_hpd_sense - hpd sense callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Checks if a digital monitor is connected (evergreen+).
+ * Returns true if connected, false if not connected.
+ */
+static bool dce_v10_0_hpd_sense(struct amdgpu_device *adev,
+                              enum amdgpu_hpd_id hpd)
+{
+       int idx;
+       bool connected = false;
+
+       switch (hpd) {
+       case AMDGPU_HPD_1:
+               idx = 0;
+               break;
+       case AMDGPU_HPD_2:
+               idx = 1;
+               break;
+       case AMDGPU_HPD_3:
+               idx = 2;
+               break;
+       case AMDGPU_HPD_4:
+               idx = 3;
+               break;
+       case AMDGPU_HPD_5:
+               idx = 4;
+               break;
+       case AMDGPU_HPD_6:
+               idx = 5;
+               break;
+       default:
+               return connected;
+       }
+
+       if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[idx]) &
+           DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
+               connected = true;
+
+       return connected;
+}
+
+/**
+ * dce_v10_0_hpd_set_polarity - hpd set polarity callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Set the polarity of the hpd pin (evergreen+).
+ */
+static void dce_v10_0_hpd_set_polarity(struct amdgpu_device *adev,
+                                     enum amdgpu_hpd_id hpd)
+{
+       u32 tmp;
+       bool connected = dce_v10_0_hpd_sense(adev, hpd);
+       int idx;
+
+       switch (hpd) {
+       case AMDGPU_HPD_1:
+               idx = 0;
+               break;
+       case AMDGPU_HPD_2:
+               idx = 1;
+               break;
+       case AMDGPU_HPD_3:
+               idx = 2;
+               break;
+       case AMDGPU_HPD_4:
+               idx = 3;
+               break;
+       case AMDGPU_HPD_5:
+               idx = 4;
+               break;
+       case AMDGPU_HPD_6:
+               idx = 5;
+               break;
+       default:
+               return;
+       }
+
+       tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+       if (connected)
+               tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
+       else
+               tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
+       WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+}
+
+/**
+ * dce_v10_0_hpd_init - hpd setup callback.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup the hpd pins used by the card (evergreen+).
+ * Enable the pin, set the polarity, and enable the hpd interrupts.
+ */
+static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
+{
+       struct drm_device *dev = adev->ddev;
+       struct drm_connector *connector;
+       u32 tmp;
+       int idx;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+               if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+                   connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+                       /* don't try to enable hpd on eDP or LVDS avoid breaking the
+                        * aux dp channel on imac and help (but not completely fix)
+                        * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+                        * also avoid interrupt storms during dpms.
+                        */
+                       continue;
+               }
+
+               switch (amdgpu_connector->hpd.hpd) {
+               case AMDGPU_HPD_1:
+                       idx = 0;
+                       break;
+               case AMDGPU_HPD_2:
+                       idx = 1;
+                       break;
+               case AMDGPU_HPD_3:
+                       idx = 2;
+                       break;
+               case AMDGPU_HPD_4:
+                       idx = 3;
+                       break;
+               case AMDGPU_HPD_5:
+                       idx = 4;
+                       break;
+               case AMDGPU_HPD_6:
+                       idx = 5;
+                       break;
+               default:
+                       continue;
+               }
+
+               tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
+               tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
+               WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
+
+               tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx]);
+               tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
+                                   DC_HPD_CONNECT_INT_DELAY,
+                                   AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
+               tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
+                                   DC_HPD_DISCONNECT_INT_DELAY,
+                                   AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
+               WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx], tmp);
+
+               dce_v10_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
+               amdgpu_irq_get(adev, &adev->hpd_irq,
+                              amdgpu_connector->hpd.hpd);
+       }
+}
+
+/**
+ * dce_v10_0_hpd_fini - hpd tear down callback.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tear down the hpd pins used by the card (evergreen+).
+ * Disable the hpd interrupts.
+ */
+static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
+{
+       struct drm_device *dev = adev->ddev;
+       struct drm_connector *connector;
+       u32 tmp;
+       int idx;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+               switch (amdgpu_connector->hpd.hpd) {
+               case AMDGPU_HPD_1:
+                       idx = 0;
+                       break;
+               case AMDGPU_HPD_2:
+                       idx = 1;
+                       break;
+               case AMDGPU_HPD_3:
+                       idx = 2;
+                       break;
+               case AMDGPU_HPD_4:
+                       idx = 3;
+                       break;
+               case AMDGPU_HPD_5:
+                       idx = 4;
+                       break;
+               case AMDGPU_HPD_6:
+                       idx = 5;
+                       break;
+               default:
+                       continue;
+               }
+
+               tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
+               tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
+               WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
+
+               amdgpu_irq_put(adev, &adev->hpd_irq,
+                              amdgpu_connector->hpd.hpd);
+       }
+}
+
+static u32 dce_v10_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
+{
+       return mmDC_GPIO_HPD_A;
+}
+
+static bool dce_v10_0_is_display_hung(struct amdgpu_device *adev)
+{
+       u32 crtc_hung = 0;
+       u32 crtc_status[6];
+       u32 i, j, tmp;
+
+       for (i = 0; i < adev->mode_info.num_crtc; i++) {
+               tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
+               if (REG_GET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN)) {
+                       crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+                       crtc_hung |= (1 << i);
+               }
+       }
+
+       for (j = 0; j < 10; j++) {
+               for (i = 0; i < adev->mode_info.num_crtc; i++) {
+                       if (crtc_hung & (1 << i)) {
+                               tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+                               if (tmp != crtc_status[i])
+                                       crtc_hung &= ~(1 << i);
+                       }
+               }
+               if (crtc_hung == 0)
+                       return false;
+               udelay(100);
+       }
+
+       return true;
+}
+
+static void dce_v10_0_stop_mc_access(struct amdgpu_device *adev,
+                                    struct amdgpu_mode_mc_save *save)
+{
+       u32 crtc_enabled, tmp;
+       int i;
+
+       save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
+       save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
+
+       /* disable VGA render */
+       tmp = RREG32(mmVGA_RENDER_CONTROL);
+       tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
+       WREG32(mmVGA_RENDER_CONTROL, tmp);
+
+       /* blank the display controllers */
+       for (i = 0; i < adev->mode_info.num_crtc; i++) {
+               crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
+                                            CRTC_CONTROL, CRTC_MASTER_EN);
+               if (crtc_enabled) {
+#if 0
+                       u32 frame_count;
+                       int j;
+
+                       save->crtc_enabled[i] = true;
+                       tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
+                       if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
+                               amdgpu_display_vblank_wait(adev, i);
+                               WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+                               tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
+                               WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+                               WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+                       }
+                       /* wait for the next frame */
+                       frame_count = amdgpu_display_vblank_get_counter(adev, i);
+                       for (j = 0; j < adev->usec_timeout; j++) {
+                               if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
+                                       break;
+                               udelay(1);
+                       }
+                       tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
+                       if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) {
+                               tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
+                               WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
+                       }
+                       tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
+                       if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) {
+                               tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1);
+                               WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+                       }
+#else
+                       /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
+                       WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+                       tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
+                       tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
+                       WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
+                       WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+                       save->crtc_enabled[i] = false;
+                       /* ***** */
+#endif
+               } else {
+                       save->crtc_enabled[i] = false;
+               }
+       }
+}
+
+static void dce_v10_0_resume_mc_access(struct amdgpu_device *adev,
+                                      struct amdgpu_mode_mc_save *save)
+{
+       u32 tmp, frame_count;
+       int i, j;
+
+       /* update crtc base addresses */
+       for (i = 0; i < adev->mode_info.num_crtc; i++) {
+               WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+                      upper_32_bits(adev->mc.vram_start));
+               WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+                      upper_32_bits(adev->mc.vram_start));
+               WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
+                      (u32)adev->mc.vram_start);
+               WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
+                      (u32)adev->mc.vram_start);
+
+               if (save->crtc_enabled[i]) {
+                       tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
+                       if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) {
+                               tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3);
+                               WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
+                       }
+                       tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
+                       if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) {
+                               tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
+                               WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
+                       }
+                       tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
+                       if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) {
+                               tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0);
+                               WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+                       }
+                       for (j = 0; j < adev->usec_timeout; j++) {
+                               tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
+                               if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0)
+                                       break;
+                               udelay(1);
+                       }
+                       tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
+                       tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
+                       WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+                       WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+                       WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+                       /* wait for the next frame */
+                       frame_count = amdgpu_display_vblank_get_counter(adev, i);
+                       for (j = 0; j < adev->usec_timeout; j++) {
+                               if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
+                                       break;
+                               udelay(1);
+                       }
+               }
+       }
+
+       WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
+       WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start));
+
+       /* Unlock vga access */
+       WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
+       mdelay(1);
+       WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
+}
+
+static void dce_v10_0_set_vga_render_state(struct amdgpu_device *adev,
+                                          bool render)
+{
+       u32 tmp;
+
+       /* Lockout access through VGA aperture*/
+       tmp = RREG32(mmVGA_HDP_CONTROL);
+       if (render)
+               tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
+       else
+               tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
+       WREG32(mmVGA_HDP_CONTROL, tmp);
+
+       /* disable VGA render */
+       tmp = RREG32(mmVGA_RENDER_CONTROL);
+       if (render)
+               tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
+       else
+               tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
+       WREG32(mmVGA_RENDER_CONTROL, tmp);
+}
+
+static void dce_v10_0_program_fmt(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
+       struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
+       int bpc = 0;
+       u32 tmp = 0;
+       enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
+
+       if (connector) {
+               struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+               bpc = amdgpu_connector_get_monitor_bpc(connector);
+               dither = amdgpu_connector->dither;
+       }
+
+       /* LVDS/eDP FMT is set up by atom */
+       if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+               return;
+
+       /* not needed for analog */
+       if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
+           (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
+               return;
+
+       if (bpc == 0)
+               return;
+
+       switch (bpc) {
+       case 6:
+               if (dither == AMDGPU_FMT_DITHER_ENABLE) {
+                       /* XXX sort out optimal dither settings */
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 0);
+               } else {
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 0);
+               }
+               break;
+       case 8:
+               if (dither == AMDGPU_FMT_DITHER_ENABLE) {
+                       /* XXX sort out optimal dither settings */
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 1);
+               } else {
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 1);
+               }
+               break;
+       case 10:
+               if (dither == AMDGPU_FMT_DITHER_ENABLE) {
+                       /* XXX sort out optimal dither settings */
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 2);
+               } else {
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 2);
+               }
+               break;
+       default:
+               /* not needed */
+               break;
+       }
+
+       WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+}
+
+
+/* display watermark setup */
+/**
+ * dce_v10_0_line_buffer_adjust - Set up the line buffer
+ *
+ * @adev: amdgpu_device pointer
+ * @amdgpu_crtc: the selected display controller
+ * @mode: the current display mode on the selected display
+ * controller
+ *
+ * Setup up the line buffer allocation for
+ * the selected display controller (CIK).
+ * Returns the line buffer size in pixels.
+ */
+static u32 dce_v10_0_line_buffer_adjust(struct amdgpu_device *adev,
+                                      struct amdgpu_crtc *amdgpu_crtc,
+                                      struct drm_display_mode *mode)
+{
+       u32 tmp, buffer_alloc, i, mem_cfg;
+       u32 pipe_offset = amdgpu_crtc->crtc_id;
+       /*
+        * Line Buffer Setup
+        * There are 6 line buffers, one for each display controllers.
+        * There are 3 partitions per LB. Select the number of partitions
+        * to enable based on the display width.  For display widths larger
+        * than 4096, you need use to use 2 display controllers and combine
+        * them using the stereo blender.
+        */
+       if (amdgpu_crtc->base.enabled && mode) {
+               if (mode->crtc_hdisplay < 1920) {
+                       mem_cfg = 1;
+                       buffer_alloc = 2;
+               } else if (mode->crtc_hdisplay < 2560) {
+                       mem_cfg = 2;
+                       buffer_alloc = 2;
+               } else if (mode->crtc_hdisplay < 4096) {
+                       mem_cfg = 0;
+                       buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4;
+               } else {
+                       DRM_DEBUG_KMS("Mode too big for LB!\n");
+                       mem_cfg = 0;
+                       buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4;
+               }
+       } else {
+               mem_cfg = 1;
+               buffer_alloc = 0;
+       }
+
+       tmp = RREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset);
+       tmp = REG_SET_FIELD(tmp, LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mem_cfg);
+       WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, tmp);
+
+       tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
+       tmp = REG_SET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, buffer_alloc);
+       WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, tmp);
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
+               if (REG_GET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED))
+                       break;
+               udelay(1);
+       }
+
+       if (amdgpu_crtc->base.enabled && mode) {
+               switch (mem_cfg) {
+               case 0:
+               default:
+                       return 4096 * 2;
+               case 1:
+                       return 1920 * 2;
+               case 2:
+                       return 2560 * 2;
+               }
+       }
+
+       /* controller not enabled, so no lb used */
+       return 0;
+}
+
+/**
+ * cik_get_number_of_dram_channels - get the number of dram channels
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Look up the number of video ram channels (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the number of dram channels
+ */
+static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
+{
+       u32 tmp = RREG32(mmMC_SHARED_CHMAP);
+
+       switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
+       case 0:
+       default:
+               return 1;
+       case 1:
+               return 2;
+       case 2:
+               return 4;
+       case 3:
+               return 8;
+       case 4:
+               return 3;
+       case 5:
+               return 6;
+       case 6:
+               return 10;
+       case 7:
+               return 12;
+       case 8:
+               return 16;
+       }
+}
+
+struct dce10_wm_params {
+       u32 dram_channels; /* number of dram channels */
+       u32 yclk;          /* bandwidth per dram data pin in kHz */
+       u32 sclk;          /* engine clock in kHz */
+       u32 disp_clk;      /* display clock in kHz */
+       u32 src_width;     /* viewport width */
+       u32 active_time;   /* active display time in ns */
+       u32 blank_time;    /* blank time in ns */
+       bool interlaced;    /* mode is interlaced */
+       fixed20_12 vsc;    /* vertical scale ratio */
+       u32 num_heads;     /* number of active crtcs */
+       u32 bytes_per_pixel; /* bytes per pixel display + overlay */
+       u32 lb_size;       /* line buffer allocated to pipe */
+       u32 vtaps;         /* vertical scaler taps */
+};
+
+/**
+ * dce_v10_0_dram_bandwidth - get the dram bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the raw dram bandwidth (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the dram bandwidth in MBytes/s
+ */
+static u32 dce_v10_0_dram_bandwidth(struct dce10_wm_params *wm)
+{
+       /* Calculate raw DRAM Bandwidth */
+       fixed20_12 dram_efficiency; /* 0.7 */
+       fixed20_12 yclk, dram_channels, bandwidth;
+       fixed20_12 a;
+
+       a.full = dfixed_const(1000);
+       yclk.full = dfixed_const(wm->yclk);
+       yclk.full = dfixed_div(yclk, a);
+       dram_channels.full = dfixed_const(wm->dram_channels * 4);
+       a.full = dfixed_const(10);
+       dram_efficiency.full = dfixed_const(7);
+       dram_efficiency.full = dfixed_div(dram_efficiency, a);
+       bandwidth.full = dfixed_mul(dram_channels, yclk);
+       bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
+
+       return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v10_0_dram_bandwidth_for_display - get the dram bandwidth for display
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the dram bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the dram bandwidth for display in MBytes/s
+ */
+static u32 dce_v10_0_dram_bandwidth_for_display(struct dce10_wm_params *wm)
+{
+       /* Calculate DRAM Bandwidth and the part allocated to display. */
+       fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
+       fixed20_12 yclk, dram_channels, bandwidth;
+       fixed20_12 a;
+
+       a.full = dfixed_const(1000);
+       yclk.full = dfixed_const(wm->yclk);
+       yclk.full = dfixed_div(yclk, a);
+       dram_channels.full = dfixed_const(wm->dram_channels * 4);
+       a.full = dfixed_const(10);
+       disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
+       disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
+       bandwidth.full = dfixed_mul(dram_channels, yclk);
+       bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
+
+       return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v10_0_data_return_bandwidth - get the data return bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the data return bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the data return bandwidth in MBytes/s
+ */
+static u32 dce_v10_0_data_return_bandwidth(struct dce10_wm_params *wm)
+{
+       /* Calculate the display Data return Bandwidth */
+       fixed20_12 return_efficiency; /* 0.8 */
+       fixed20_12 sclk, bandwidth;
+       fixed20_12 a;
+
+       a.full = dfixed_const(1000);
+       sclk.full = dfixed_const(wm->sclk);
+       sclk.full = dfixed_div(sclk, a);
+       a.full = dfixed_const(10);
+       return_efficiency.full = dfixed_const(8);
+       return_efficiency.full = dfixed_div(return_efficiency, a);
+       a.full = dfixed_const(32);
+       bandwidth.full = dfixed_mul(a, sclk);
+       bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
+
+       return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v10_0_dmif_request_bandwidth - get the dmif bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the dmif bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the dmif bandwidth in MBytes/s
+ */
+static u32 dce_v10_0_dmif_request_bandwidth(struct dce10_wm_params *wm)
+{
+       /* Calculate the DMIF Request Bandwidth */
+       fixed20_12 disp_clk_request_efficiency; /* 0.8 */
+       fixed20_12 disp_clk, bandwidth;
+       fixed20_12 a, b;
+
+       a.full = dfixed_const(1000);
+       disp_clk.full = dfixed_const(wm->disp_clk);
+       disp_clk.full = dfixed_div(disp_clk, a);
+       a.full = dfixed_const(32);
+       b.full = dfixed_mul(a, disp_clk);
+
+       a.full = dfixed_const(10);
+       disp_clk_request_efficiency.full = dfixed_const(8);
+       disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
+
+       bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
+
+       return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v10_0_available_bandwidth - get the min available bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the min available bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the min available bandwidth in MBytes/s
+ */
+static u32 dce_v10_0_available_bandwidth(struct dce10_wm_params *wm)
+{
+       /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
+       u32 dram_bandwidth = dce_v10_0_dram_bandwidth(wm);
+       u32 data_return_bandwidth = dce_v10_0_data_return_bandwidth(wm);
+       u32 dmif_req_bandwidth = dce_v10_0_dmif_request_bandwidth(wm);
+
+       return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
+}
+
+/**
+ * dce_v10_0_average_bandwidth - get the average available bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the average available bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the average available bandwidth in MBytes/s
+ */
+static u32 dce_v10_0_average_bandwidth(struct dce10_wm_params *wm)
+{
+       /* Calculate the display mode Average Bandwidth
+        * DisplayMode should contain the source and destination dimensions,
+        * timing, etc.
+        */
+       fixed20_12 bpp;
+       fixed20_12 line_time;
+       fixed20_12 src_width;
+       fixed20_12 bandwidth;
+       fixed20_12 a;
+
+       a.full = dfixed_const(1000);
+       line_time.full = dfixed_const(wm->active_time + wm->blank_time);
+       line_time.full = dfixed_div(line_time, a);
+       bpp.full = dfixed_const(wm->bytes_per_pixel);
+       src_width.full = dfixed_const(wm->src_width);
+       bandwidth.full = dfixed_mul(src_width, bpp);
+       bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
+       bandwidth.full = dfixed_div(bandwidth, line_time);
+
+       return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v10_0_latency_watermark - get the latency watermark
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the latency watermark (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the latency watermark in ns
+ */
+static u32 dce_v10_0_latency_watermark(struct dce10_wm_params *wm)
+{
+       /* First calculate the latency in ns */
+       u32 mc_latency = 2000; /* 2000 ns. */
+       u32 available_bandwidth = dce_v10_0_available_bandwidth(wm);
+       u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
+       u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
+       u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
+       u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
+               (wm->num_heads * cursor_line_pair_return_time);
+       u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
+       u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
+       u32 tmp, dmif_size = 12288;
+       fixed20_12 a, b, c;
+
+       if (wm->num_heads == 0)
+               return 0;
+
+       a.full = dfixed_const(2);
+       b.full = dfixed_const(1);
+       if ((wm->vsc.full > a.full) ||
+           ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
+           (wm->vtaps >= 5) ||
+           ((wm->vsc.full >= a.full) && wm->interlaced))
+               max_src_lines_per_dst_line = 4;
+       else
+               max_src_lines_per_dst_line = 2;
+
+       a.full = dfixed_const(available_bandwidth);
+       b.full = dfixed_const(wm->num_heads);
+       a.full = dfixed_div(a, b);
+
+       b.full = dfixed_const(mc_latency + 512);
+       c.full = dfixed_const(wm->disp_clk);
+       b.full = dfixed_div(b, c);
+
+       c.full = dfixed_const(dmif_size);
+       b.full = dfixed_div(c, b);
+
+       tmp = min(dfixed_trunc(a), dfixed_trunc(b));
+
+       b.full = dfixed_const(1000);
+       c.full = dfixed_const(wm->disp_clk);
+       b.full = dfixed_div(c, b);
+       c.full = dfixed_const(wm->bytes_per_pixel);
+       b.full = dfixed_mul(b, c);
+
+       lb_fill_bw = min(tmp, dfixed_trunc(b));
+
+       a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
+       b.full = dfixed_const(1000);
+       c.full = dfixed_const(lb_fill_bw);
+       b.full = dfixed_div(c, b);
+       a.full = dfixed_div(a, b);
+       line_fill_time = dfixed_trunc(a);
+
+       if (line_fill_time < wm->active_time)
+               return latency;
+       else
+               return latency + (line_fill_time - wm->active_time);
+
+}
+
+/**
+ * dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display - check
+ * average and available dram bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Check if the display average bandwidth fits in the display
+ * dram bandwidth (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns true if the display fits, false if not.
+ */
+static bool dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce10_wm_params *wm)
+{
+       if (dce_v10_0_average_bandwidth(wm) <=
+           (dce_v10_0_dram_bandwidth_for_display(wm) / wm->num_heads))
+               return true;
+       else
+               return false;
+}
+
+/**
+ * dce_v10_0_average_bandwidth_vs_available_bandwidth - check
+ * average and available bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Check if the display average bandwidth fits in the display
+ * available bandwidth (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns true if the display fits, false if not.
+ */
+static bool dce_v10_0_average_bandwidth_vs_available_bandwidth(struct dce10_wm_params *wm)
+{
+       if (dce_v10_0_average_bandwidth(wm) <=
+           (dce_v10_0_available_bandwidth(wm) / wm->num_heads))
+               return true;
+       else
+               return false;
+}
+
+/**
+ * dce_v10_0_check_latency_hiding - check latency hiding
+ *
+ * @wm: watermark calculation data
+ *
+ * Check latency hiding (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns true if the display fits, false if not.
+ */
+static bool dce_v10_0_check_latency_hiding(struct dce10_wm_params *wm)
+{
+       u32 lb_partitions = wm->lb_size / wm->src_width;
+       u32 line_time = wm->active_time + wm->blank_time;
+       u32 latency_tolerant_lines;
+       u32 latency_hiding;
+       fixed20_12 a;
+
+       a.full = dfixed_const(1);
+       if (wm->vsc.full > a.full)
+               latency_tolerant_lines = 1;
+       else {
+               if (lb_partitions <= (wm->vtaps + 1))
+                       latency_tolerant_lines = 1;
+               else
+                       latency_tolerant_lines = 2;
+       }
+
+       latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
+
+       if (dce_v10_0_latency_watermark(wm) <= latency_hiding)
+               return true;
+       else
+               return false;
+}
+
+/**
+ * dce_v10_0_program_watermarks - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ * @amdgpu_crtc: the selected display controller
+ * @lb_size: line buffer size
+ * @num_heads: number of display controllers in use
+ *
+ * Calculate and program the display watermarks for the
+ * selected display controller (CIK).
+ */
+static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
+                                       struct amdgpu_crtc *amdgpu_crtc,
+                                       u32 lb_size, u32 num_heads)
+{
+       struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
+       struct dce10_wm_params wm_low, wm_high;
+       u32 pixel_period;
+       u32 line_time = 0;
+       u32 latency_watermark_a = 0, latency_watermark_b = 0;
+       u32 tmp, wm_mask;
+
+       if (amdgpu_crtc->base.enabled && num_heads && mode) {
+               pixel_period = 1000000 / (u32)mode->clock;
+               line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+
+               /* watermark for high clocks */
+               if (adev->pm.dpm_enabled) {
+                       wm_high.yclk =
+                               amdgpu_dpm_get_mclk(adev, false) * 10;
+                       wm_high.sclk =
+                               amdgpu_dpm_get_sclk(adev, false) * 10;
+               } else {
+                       wm_high.yclk = adev->pm.current_mclk * 10;
+                       wm_high.sclk = adev->pm.current_sclk * 10;
+               }
+
+               wm_high.disp_clk = mode->clock;
+               wm_high.src_width = mode->crtc_hdisplay;
+               wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+               wm_high.blank_time = line_time - wm_high.active_time;
+               wm_high.interlaced = false;
+               if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+                       wm_high.interlaced = true;
+               wm_high.vsc = amdgpu_crtc->vsc;
+               wm_high.vtaps = 1;
+               if (amdgpu_crtc->rmx_type != RMX_OFF)
+                       wm_high.vtaps = 2;
+               wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
+               wm_high.lb_size = lb_size;
+               wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
+               wm_high.num_heads = num_heads;
+
+               /* set for high clocks */
+               latency_watermark_a = min(dce_v10_0_latency_watermark(&wm_high), (u32)65535);
+
+               /* possibly force display priority to high */
+               /* should really do this at mode validation time... */
+               if (!dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
+                   !dce_v10_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
+                   !dce_v10_0_check_latency_hiding(&wm_high) ||
+                   (adev->mode_info.disp_priority == 2)) {
+                       DRM_DEBUG_KMS("force priority to high\n");
+               }
+
+               /* watermark for low clocks */
+               if (adev->pm.dpm_enabled) {
+                       wm_low.yclk =
+                               amdgpu_dpm_get_mclk(adev, true) * 10;
+                       wm_low.sclk =
+                               amdgpu_dpm_get_sclk(adev, true) * 10;
+               } else {
+                       wm_low.yclk = adev->pm.current_mclk * 10;
+                       wm_low.sclk = adev->pm.current_sclk * 10;
+               }
+
+               wm_low.disp_clk = mode->clock;
+               wm_low.src_width = mode->crtc_hdisplay;
+               wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+               wm_low.blank_time = line_time - wm_low.active_time;
+               wm_low.interlaced = false;
+               if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+                       wm_low.interlaced = true;
+               wm_low.vsc = amdgpu_crtc->vsc;
+               wm_low.vtaps = 1;
+               if (amdgpu_crtc->rmx_type != RMX_OFF)
+                       wm_low.vtaps = 2;
+               wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
+               wm_low.lb_size = lb_size;
+               wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
+               wm_low.num_heads = num_heads;
+
+               /* set for low clocks */
+               latency_watermark_b = min(dce_v10_0_latency_watermark(&wm_low), (u32)65535);
+
+               /* possibly force display priority to high */
+               /* should really do this at mode validation time... */
+               if (!dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
+                   !dce_v10_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
+                   !dce_v10_0_check_latency_hiding(&wm_low) ||
+                   (adev->mode_info.disp_priority == 2)) {
+                       DRM_DEBUG_KMS("force priority to high\n");
+               }
+       }
+
+       /* select wm A */
+       wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 1);
+       WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+       tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
+       tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
+       WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+       /* select wm B */
+       tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2);
+       WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+       tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
+       tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
+       WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+       /* restore original selection */
+       WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
+
+       /* save values for DPM */
+       amdgpu_crtc->line_time = line_time;
+       amdgpu_crtc->wm_high = latency_watermark_a;
+       amdgpu_crtc->wm_low = latency_watermark_b;
+}
+
+/**
+ * dce_v10_0_bandwidth_update - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Calculate and program the display watermarks and line
+ * buffer allocation (CIK).
+ */
+static void dce_v10_0_bandwidth_update(struct amdgpu_device *adev)
+{
+       struct drm_display_mode *mode = NULL;
+       u32 num_heads = 0, lb_size;
+       int i;
+
+       amdgpu_update_display_priority(adev);
+
+       for (i = 0; i < adev->mode_info.num_crtc; i++) {
+               if (adev->mode_info.crtcs[i]->base.enabled)
+                       num_heads++;
+       }
+       for (i = 0; i < adev->mode_info.num_crtc; i++) {
+               mode = &adev->mode_info.crtcs[i]->base.mode;
+               lb_size = dce_v10_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
+               dce_v10_0_program_watermarks(adev, adev->mode_info.crtcs[i],
+                                           lb_size, num_heads);
+       }
+}
+
+static void dce_v10_0_audio_get_connected_pins(struct amdgpu_device *adev)
+{
+       int i;
+       u32 offset, tmp;
+
+       for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+               offset = adev->mode_info.audio.pin[i].offset;
+               tmp = RREG32_AUDIO_ENDPT(offset,
+                                        ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
+               if (((tmp &
+               AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
+               AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
+                       adev->mode_info.audio.pin[i].connected = false;
+               else
+                       adev->mode_info.audio.pin[i].connected = true;
+       }
+}
+
+static struct amdgpu_audio_pin *dce_v10_0_audio_get_pin(struct amdgpu_device *adev)
+{
+       int i;
+
+       dce_v10_0_audio_get_connected_pins(adev);
+
+       for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+               if (adev->mode_info.audio.pin[i].connected)
+                       return &adev->mode_info.audio.pin[i];
+       }
+       DRM_ERROR("No connected audio pins found!\n");
+       return NULL;
+}
+
+static void dce_v10_0_afmt_audio_select_pin(struct drm_encoder *encoder)
+{
+       struct amdgpu_device *adev = encoder->dev->dev_private;
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+       u32 tmp;
+
+       if (!dig || !dig->afmt || !dig->afmt->pin)
+               return;
+
+       tmp = RREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset);
+       tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, dig->afmt->pin->id);
+       WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, tmp);
+}
+
+static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
+                                               struct drm_display_mode *mode)
+{
+       struct amdgpu_device *adev = encoder->dev->dev_private;
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+       struct drm_connector *connector;
+       struct amdgpu_connector *amdgpu_connector = NULL;
+       u32 tmp;
+       int interlace = 0;
+
+       if (!dig || !dig->afmt || !dig->afmt->pin)
+               return;
+
+       list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+               if (connector->encoder == encoder) {
+                       amdgpu_connector = to_amdgpu_connector(connector);
+                       break;
+               }
+       }
+
+       if (!amdgpu_connector) {
+               DRM_ERROR("Couldn't find encoder's connector\n");
+               return;
+       }
+
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+               interlace = 1;
+       if (connector->latency_present[interlace]) {
+               tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
+                                   VIDEO_LIPSYNC, connector->video_latency[interlace]);
+               tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
+                                   AUDIO_LIPSYNC, connector->audio_latency[interlace]);
+       } else {
+               tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
+                                   VIDEO_LIPSYNC, 0);
+               tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
+                                   AUDIO_LIPSYNC, 0);
+       }
+       WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
+                          ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
+}
+
+static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
+{
+       struct amdgpu_device *adev = encoder->dev->dev_private;
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+       struct drm_connector *connector;
+       struct amdgpu_connector *amdgpu_connector = NULL;
+       u32 tmp;
+       u8 *sadb = NULL;
+       int sad_count;
+
+       if (!dig || !dig->afmt || !dig->afmt->pin)
+               return;
+
+       list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+               if (connector->encoder == encoder) {
+                       amdgpu_connector = to_amdgpu_connector(connector);
+                       break;
+               }
+       }
+
+       if (!amdgpu_connector) {
+               DRM_ERROR("Couldn't find encoder's connector\n");
+               return;
+       }
+
+       sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
+       if (sad_count < 0) {
+               DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
+               sad_count = 0;
+       }
+
+       /* program the speaker allocation */
+       tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
+                                ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
+       tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+                           DP_CONNECTION, 0);
+       /* set HDMI mode */
+       tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+                           HDMI_CONNECTION, 1);
+       if (sad_count)
+               tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+                                   SPEAKER_ALLOCATION, sadb[0]);
+       else
+               tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+                                   SPEAKER_ALLOCATION, 5); /* stereo */
+       WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
+                          ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
+
+       kfree(sadb);
+}
+
+static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
+{
+       struct amdgpu_device *adev = encoder->dev->dev_private;
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+       struct drm_connector *connector;
+       struct amdgpu_connector *amdgpu_connector = NULL;
+       struct cea_sad *sads;
+       int i, sad_count;
+
+       static const u16 eld_reg_to_type[][2] = {
+               { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
+               { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
+               { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
+               { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
+               { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
+               { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
+               { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
+               { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
+               { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
+               { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
+               { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
+               { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
+       };
+
+       if (!dig || !dig->afmt || !dig->afmt->pin)
+               return;
+
+       list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+               if (connector->encoder == encoder) {
+                       amdgpu_connector = to_amdgpu_connector(connector);
+                       break;
+               }
+       }
+
+       if (!amdgpu_connector) {
+               DRM_ERROR("Couldn't find encoder's connector\n");
+               return;
+       }
+
+       sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
+       if (sad_count <= 0) {
+               DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+               return;
+       }
+       BUG_ON(!sads);
+
+       for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
+               u32 tmp = 0;
+               u8 stereo_freqs = 0;
+               int max_channels = -1;
+               int j;
+
+               for (j = 0; j < sad_count; j++) {
+                       struct cea_sad *sad = &sads[j];
+
+                       if (sad->format == eld_reg_to_type[i][1]) {
+                               if (sad->channels > max_channels) {
+                                       tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
+                                                           MAX_CHANNELS, sad->channels);
+                                       tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
+                                                           DESCRIPTOR_BYTE_2, sad->byte2);
+                                       tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
+                                                           SUPPORTED_FREQUENCIES, sad->freq);
+                                       max_channels = sad->channels;
+                               }
+
+                               if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
+                                       stereo_freqs |= sad->freq;
+                               else
+                                       break;
+                       }
+               }
+
+               tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
+                                   SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
+               WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
+       }
+
+       kfree(sads);
+}
+
+static void dce_v10_0_audio_enable(struct amdgpu_device *adev,
+                                 struct amdgpu_audio_pin *pin,
+                                 bool enable)
+{
+       if (!pin)
+               return;
+
+       WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+                          enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
+}
+
+static const u32 pin_offsets[] =
+{
+       AUD0_REGISTER_OFFSET,
+       AUD1_REGISTER_OFFSET,
+       AUD2_REGISTER_OFFSET,
+       AUD3_REGISTER_OFFSET,
+       AUD4_REGISTER_OFFSET,
+       AUD5_REGISTER_OFFSET,
+       AUD6_REGISTER_OFFSET,
+};
+
+static int dce_v10_0_audio_init(struct amdgpu_device *adev)
+{
+       int i;
+
+       if (!amdgpu_audio)
+               return 0;
+
+       adev->mode_info.audio.enabled = true;
+
+       adev->mode_info.audio.num_pins = 7;
+
+       for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+               adev->mode_info.audio.pin[i].channels = -1;
+               adev->mode_info.audio.pin[i].rate = -1;
+               adev->mode_info.audio.pin[i].bits_per_sample = -1;
+               adev->mode_info.audio.pin[i].status_bits = 0;
+               adev->mode_info.audio.pin[i].category_code = 0;
+               adev->mode_info.audio.pin[i].connected = false;
+               adev->mode_info.audio.pin[i].offset = pin_offsets[i];
+               adev->mode_info.audio.pin[i].id = i;
+               /* disable audio.  it will be set up later */
+               /* XXX remove once we switch to ip funcs */
+               dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
+       }
+
+       return 0;
+}
+
+static void dce_v10_0_audio_fini(struct amdgpu_device *adev)
+{
+       int i;
+
+       if (!adev->mode_info.audio.enabled)
+               return;
+
+       for (i = 0; i < adev->mode_info.audio.num_pins; i++)
+               dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
+
+       adev->mode_info.audio.enabled = false;
+}
+
+/*
+ * update the N and CTS parameters for a given pixel clock rate
+ */
+static void dce_v10_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
+{
+       struct drm_device *dev = encoder->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+       u32 tmp;
+
+       tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
+       tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
+       WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
+       tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
+       tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
+       WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
+
+       tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
+       tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
+       WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
+       tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
+       tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
+       WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
+
+       tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
+       tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
+       WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
+       tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
+       tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
+       WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
+
+}
+
+/*
+ * build a HDMI Video Info Frame
+ */
+static void dce_v10_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
+                                              void *buffer, size_t size)
+{
+       struct drm_device *dev = encoder->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+       uint8_t *frame = buffer + 3;
+       uint8_t *header = buffer;
+
+       WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
+               frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
+       WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
+               frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
+       WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
+               frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
+       WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
+               frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
+}
+
+static void dce_v10_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
+{
+       struct drm_device *dev = encoder->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
+       u32 dto_phase = 24 * 1000;
+       u32 dto_modulo = clock;
+       u32 tmp;
+
+       if (!dig || !dig->afmt)
+               return;
+
+       /* XXX two dtos; generally use dto0 for hdmi */
+       /* Express [24MHz / target pixel clock] as an exact rational
+        * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
+        * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+        */
+       tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
+       tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL,
+                           amdgpu_crtc->crtc_id);
+       WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
+       WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
+       WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
+}
+
+/*
+ * update the info frames with the data from the current display mode
+ */
+static void dce_v10_0_afmt_setmode(struct drm_encoder *encoder,
+                                 struct drm_display_mode *mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+       struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
+       u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
+       struct hdmi_avi_infoframe frame;
+       ssize_t err;
+       u32 tmp;
+       int bpc = 8;
+
+       if (!dig || !dig->afmt)
+               return;
+
+       /* Silent, r600_hdmi_enable will raise WARN for us */
+       if (!dig->afmt->enabled)
+               return;
+
+       /* hdmi deep color mode general control packets setup, if bpc > 8 */
+       if (encoder->crtc) {
+               struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
+               bpc = amdgpu_crtc->bpc;
+       }
+
+       /* disable audio prior to setting up hw */
+       dig->afmt->pin = dce_v10_0_audio_get_pin(adev);
+       dce_v10_0_audio_enable(adev, dig->afmt->pin, false);
+
+       dce_v10_0_audio_set_dto(encoder, mode->clock);
+
+       tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
+       tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
+       WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); /* send null packets when required */
+
+       WREG32(mmAFMT_AUDIO_CRC_CONTROL + dig->afmt->offset, 0x1000);
+
+       tmp = RREG32(mmHDMI_CONTROL + dig->afmt->offset);
+       switch (bpc) {
+       case 0:
+       case 6:
+       case 8:
+       case 16:
+       default:
+               tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 0);
+               tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
+               DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
+                         connector->name, bpc);
+               break;
+       case 10:
+               tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
+               tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1);
+               DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
+                         connector->name);
+               break;
+       case 12:
+               tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
+               tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2);
+               DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
+                         connector->name);
+               break;
+       }
+       WREG32(mmHDMI_CONTROL + dig->afmt->offset, tmp);
+
+       tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
+       tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); /* send null packets when required */
+       tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); /* send general control packets */
+       tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); /* send general control packets every frame */
+       WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
+
+       tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
+       /* enable audio info frames (frames won't be set until audio is enabled) */
+       tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
+       /* required for audio info values to be updated */
+       tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
+       WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
+
+       tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
+       /* required for audio info values to be updated */
+       tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
+       WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
+
+       tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
+       /* anything other than 0 */
+       tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, 2);
+       WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
+
+       WREG32(mmHDMI_GC + dig->afmt->offset, 0); /* unset HDMI_GC_AVMUTE */
+
+       tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
+       /* set the default audio delay */
+       tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
+       /* should be suffient for all audio modes and small enough for all hblanks */
+       tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
+       WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
+
+       tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
+       /* allow 60958 channel status fields to be updated */
+       tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
+       WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
+
+       tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
+       if (bpc > 8)
+               /* clear SW CTS value */
+               tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 0);
+       else
+               /* select SW CTS value */
+               tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1);
+       /* allow hw to sent ACR packets when required */
+       tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
+       WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
+
+       dce_v10_0_afmt_update_ACR(encoder, mode->clock);
+
+       tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
+       tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
+       WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
+
+       tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
+       tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
+       WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
+
+       tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
+       tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
+       tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
+       tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
+       tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
+       tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
+       tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
+       WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
+
+       dce_v10_0_audio_write_speaker_allocation(encoder);
+
+       WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset,
+              (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
+
+       dce_v10_0_afmt_audio_select_pin(encoder);
+       dce_v10_0_audio_write_sad_regs(encoder);
+       dce_v10_0_audio_write_latency_fields(encoder, mode);
+
+       err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+       if (err < 0) {
+               DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
+               return;
+       }
+
+       err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
+       if (err < 0) {
+               DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
+               return;
+       }
+
+       dce_v10_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
+
+       tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
+       /* enable AVI info frames */
+       tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
+       /* required for audio info values to be updated */
+       tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
+       WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
+
+       tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
+       tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
+       WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
+
+       tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
+       /* send audio packets */
+       tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
+       WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
+
+       WREG32(mmAFMT_RAMP_CONTROL0 + dig->afmt->offset, 0x00FFFFFF);
+       WREG32(mmAFMT_RAMP_CONTROL1 + dig->afmt->offset, 0x007FFFFF);
+       WREG32(mmAFMT_RAMP_CONTROL2 + dig->afmt->offset, 0x00000001);
+       WREG32(mmAFMT_RAMP_CONTROL3 + dig->afmt->offset, 0x00000001);
+
+       /* enable audio after to setting up hw */
+       dce_v10_0_audio_enable(adev, dig->afmt->pin, true);
+}
+
+static void dce_v10_0_afmt_enable(struct drm_encoder *encoder, bool enable)
+{
+       struct drm_device *dev = encoder->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+
+       if (!dig || !dig->afmt)
+               return;
+
+       /* Silent, r600_hdmi_enable will raise WARN for us */
+       if (enable && dig->afmt->enabled)
+               return;
+       if (!enable && !dig->afmt->enabled)
+               return;
+
+       if (!enable && dig->afmt->pin) {
+               dce_v10_0_audio_enable(adev, dig->afmt->pin, false);
+               dig->afmt->pin = NULL;
+       }
+
+       dig->afmt->enabled = enable;
+
+       DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
+                 enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
+}
+
+static void dce_v10_0_afmt_init(struct amdgpu_device *adev)
+{
+       int i;
+
+       for (i = 0; i < adev->mode_info.num_dig; i++)
+               adev->mode_info.afmt[i] = NULL;
+
+       /* DCE10 has audio blocks tied to DIG encoders */
+       for (i = 0; i < adev->mode_info.num_dig; i++) {
+               adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
+               if (adev->mode_info.afmt[i]) {
+                       adev->mode_info.afmt[i]->offset = dig_offsets[i];
+                       adev->mode_info.afmt[i]->id = i;
+               }
+       }
+}
+
+static void dce_v10_0_afmt_fini(struct amdgpu_device *adev)
+{
+       int i;
+
+       for (i = 0; i < adev->mode_info.num_dig; i++) {
+               kfree(adev->mode_info.afmt[i]);
+               adev->mode_info.afmt[i] = NULL;
+       }
+}
+
+static const u32 vga_control_regs[6] =
+{
+       mmD1VGA_CONTROL,
+       mmD2VGA_CONTROL,
+       mmD3VGA_CONTROL,
+       mmD4VGA_CONTROL,
+       mmD5VGA_CONTROL,
+       mmD6VGA_CONTROL,
+};
+
+static void dce_v10_0_vga_enable(struct drm_crtc *crtc, bool enable)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       u32 vga_control;
+
+       vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
+       if (enable)
+               WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
+       else
+               WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
+}
+
+static void dce_v10_0_grph_enable(struct drm_crtc *crtc, bool enable)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+
+       if (enable)
+               WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
+       else
+               WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
+}
+
+static void dce_v10_0_tiling_fields(uint64_t tiling_flags, unsigned *bankw,
+                                  unsigned *bankh, unsigned *mtaspect,
+                                  unsigned *tile_split)
+{
+       *bankw = (tiling_flags >> AMDGPU_TILING_EG_BANKW_SHIFT) & AMDGPU_TILING_EG_BANKW_MASK;
+       *bankh = (tiling_flags >> AMDGPU_TILING_EG_BANKH_SHIFT) & AMDGPU_TILING_EG_BANKH_MASK;
+       *mtaspect = (tiling_flags >> AMDGPU_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & AMDGPU_TILING_EG_MACRO_TILE_ASPECT_MASK;
+       *tile_split = (tiling_flags >> AMDGPU_TILING_EG_TILE_SPLIT_SHIFT) & AMDGPU_TILING_EG_TILE_SPLIT_MASK;
+       switch (*bankw) {
+       default:
+       case 1:
+               *bankw = ADDR_SURF_BANK_WIDTH_1;
+               break;
+       case 2:
+               *bankw = ADDR_SURF_BANK_WIDTH_2;
+               break;
+       case 4:
+               *bankw = ADDR_SURF_BANK_WIDTH_4;
+               break;
+       case 8:
+               *bankw = ADDR_SURF_BANK_WIDTH_8;
+               break;
+       }
+       switch (*bankh) {
+       default:
+       case 1:
+               *bankh = ADDR_SURF_BANK_HEIGHT_1;
+               break;
+       case 2:
+               *bankh = ADDR_SURF_BANK_HEIGHT_2;
+               break;
+       case 4:
+               *bankh = ADDR_SURF_BANK_HEIGHT_4;
+               break;
+       case 8:
+               *bankh = ADDR_SURF_BANK_HEIGHT_8;
+               break;
+       }
+       switch (*mtaspect) {
+       default:
+       case 1:
+               *mtaspect = ADDR_SURF_MACRO_ASPECT_1;
+               break;
+       case 2:
+               *mtaspect = ADDR_SURF_MACRO_ASPECT_2;
+               break;
+       case 4:
+               *mtaspect = ADDR_SURF_MACRO_ASPECT_4;
+               break;
+       case 8:
+               *mtaspect = ADDR_SURF_MACRO_ASPECT_8;
+               break;
+       }
+}
+
+static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
+                                    struct drm_framebuffer *fb,
+                                    int x, int y, int atomic)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_framebuffer *amdgpu_fb;
+       struct drm_framebuffer *target_fb;
+       struct drm_gem_object *obj;
+       struct amdgpu_bo *rbo;
+       uint64_t fb_location, tiling_flags;
+       uint32_t fb_format, fb_pitch_pixels;
+       unsigned bankw, bankh, mtaspect, tile_split;
+       u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE);
+       /* XXX change to VI */
+       u32 pipe_config = (adev->gfx.config.tile_mode_array[10] >> 6) & 0x1f;
+       u32 tmp, viewport_w, viewport_h;
+       int r;
+       bool bypass_lut = false;
+
+       /* no fb bound */
+       if (!atomic && !crtc->primary->fb) {
+               DRM_DEBUG_KMS("No FB bound\n");
+               return 0;
+       }
+
+       if (atomic) {
+               amdgpu_fb = to_amdgpu_framebuffer(fb);
+               target_fb = fb;
+       }
+       else {
+               amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+               target_fb = crtc->primary->fb;
+       }
+
+       /* If atomic, assume fb object is pinned & idle & fenced and
+        * just update base pointers
+        */
+       obj = amdgpu_fb->obj;
+       rbo = gem_to_amdgpu_bo(obj);
+       r = amdgpu_bo_reserve(rbo, false);
+       if (unlikely(r != 0))
+               return r;
+
+       if (atomic)
+               fb_location = amdgpu_bo_gpu_offset(rbo);
+       else {
+               r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+               if (unlikely(r != 0)) {
+                       amdgpu_bo_unreserve(rbo);
+                       return -EINVAL;
+               }
+       }
+
+       amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
+       amdgpu_bo_unreserve(rbo);
+
+       switch (target_fb->pixel_format) {
+       case DRM_FORMAT_C8:
+               fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0);
+               fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
+               break;
+       case DRM_FORMAT_XRGB4444:
+       case DRM_FORMAT_ARGB4444:
+               fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
+               fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 2);
+#ifdef __BIG_ENDIAN
+               fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
+                                       ENDIAN_8IN16);
+#endif
+               break;
+       case DRM_FORMAT_XRGB1555:
+       case DRM_FORMAT_ARGB1555:
+               fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
+               fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
+#ifdef __BIG_ENDIAN
+               fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
+                                       ENDIAN_8IN16);
+#endif
+               break;
+       case DRM_FORMAT_BGRX5551:
+       case DRM_FORMAT_BGRA5551:
+               fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
+               fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 5);
+#ifdef __BIG_ENDIAN
+               fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
+                                       ENDIAN_8IN16);
+#endif
+               break;
+       case DRM_FORMAT_RGB565:
+               fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
+               fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
+#ifdef __BIG_ENDIAN
+               fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
+                                       ENDIAN_8IN16);
+#endif
+               break;
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_ARGB8888:
+               fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
+               fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
+#ifdef __BIG_ENDIAN
+               fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
+                                       ENDIAN_8IN32);
+#endif
+               break;
+       case DRM_FORMAT_XRGB2101010:
+       case DRM_FORMAT_ARGB2101010:
+               fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
+               fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
+#ifdef __BIG_ENDIAN
+               fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
+                                       ENDIAN_8IN32);
+#endif
+               /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+               bypass_lut = true;
+               break;
+       case DRM_FORMAT_BGRX1010102:
+       case DRM_FORMAT_BGRA1010102:
+               fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
+               fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 4);
+#ifdef __BIG_ENDIAN
+               fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
+                                       ENDIAN_8IN32);
+#endif
+               /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+               bypass_lut = true;
+               break;
+       default:
+               DRM_ERROR("Unsupported screen format %s\n",
+                       drm_get_format_name(target_fb->pixel_format));
+               return -EINVAL;
+       }
+
+       if (tiling_flags & AMDGPU_TILING_MACRO) {
+               unsigned tileb, index, num_banks, tile_split_bytes;
+
+               dce_v10_0_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
+               /* Set NUM_BANKS. */
+               /* Calculate the macrotile mode index. */
+               tile_split_bytes = 64 << tile_split;
+               tileb = 8 * 8 * target_fb->bits_per_pixel / 8;
+               tileb = min(tile_split_bytes, tileb);
+
+               for (index = 0; tileb > 64; index++) {
+                       tileb >>= 1;
+               }
+
+               if (index >= 16) {
+                       DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n",
+                                 target_fb->bits_per_pixel, tile_split);
+                       return -EINVAL;
+               }
+
+               num_banks = (adev->gfx.config.macrotile_mode_array[index] >> 6) & 0x3;
+               fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_NUM_BANKS, num_banks);
+               fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
+                                         ARRAY_2D_TILED_THIN1);
+               fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_TILE_SPLIT,
+                                         tile_split);
+               fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_WIDTH, bankw);
+               fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_HEIGHT, bankh);
+               fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT,
+                                         mtaspect);
+               fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MICRO_TILE_MODE,
+                                         ADDR_SURF_MICRO_TILING_DISPLAY);
+       } else if (tiling_flags & AMDGPU_TILING_MICRO) {
+               fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
+                                         ARRAY_1D_TILED_THIN1);
+       }
+
+       /* Read the pipe config from the 2D TILED SCANOUT mode.
+        * It should be the same for the other modes too, but not all
+        * modes set the pipe config field. */
+       fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_PIPE_CONFIG,
+                                 pipe_config);
+
+       dce_v10_0_vga_enable(crtc, false);
+
+       WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+              upper_32_bits(fb_location));
+       WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+              upper_32_bits(fb_location));
+       WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+              (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
+       WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+              (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
+       WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
+       WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
+
+       /*
+        * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
+        * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
+        * retain the full precision throughout the pipeline.
+        */
+       tmp = RREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset);
+       if (bypass_lut)
+               tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 1);
+       else
+               tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 0);
+       WREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, tmp);
+
+       if (bypass_lut)
+               DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
+
+       WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
+       WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
+       WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
+       WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
+       WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
+       WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
+
+       fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
+       WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
+
+       dce_v10_0_grph_enable(crtc, true);
+
+       WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
+              target_fb->height);
+
+       x &= ~3;
+       y &= ~1;
+       WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
+              (x << 16) | y);
+       viewport_w = crtc->mode.hdisplay;
+       viewport_h = (crtc->mode.vdisplay + 1) & ~1;
+       WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
+              (viewport_w << 16) | viewport_h);
+
+       /* pageflip setup */
+       /* make sure flip is at vb rather than hb */
+       tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
+                           GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
+       WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+
+       /* set pageflip to happen only at start of vblank interval (front porch) */
+       WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
+
+       if (!atomic && fb && fb != crtc->primary->fb) {
+               amdgpu_fb = to_amdgpu_framebuffer(fb);
+               rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+               r = amdgpu_bo_reserve(rbo, false);
+               if (unlikely(r != 0))
+                       return r;
+               amdgpu_bo_unpin(rbo);
+               amdgpu_bo_unreserve(rbo);
+       }
+
+       /* Bytes per pixel may have changed */
+       dce_v10_0_bandwidth_update(adev);
+
+       return 0;
+}
+
+static void dce_v10_0_set_interleave(struct drm_crtc *crtc,
+                                    struct drm_display_mode *mode)
+{
+       struct drm_device *dev = crtc->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       u32 tmp;
+
+       tmp = RREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset);
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+               tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 1);
+       else
+               tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 0);
+       WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, tmp);
+}
+
+static void dce_v10_0_crtc_load_lut(struct drm_crtc *crtc)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       int i;
+       u32 tmp;
+
+       DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
+
+       tmp = RREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_GRPH_MODE, 0);
+       tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_OVL_MODE, 0);
+       WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+
+       tmp = RREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = REG_SET_FIELD(tmp, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1);
+       WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+
+       tmp = RREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = REG_SET_FIELD(tmp, PRESCALE_OVL_CONTROL, OVL_PRESCALE_BYPASS, 1);
+       WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+
+       tmp = RREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0);
+       tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, OVL_INPUT_GAMMA_MODE, 0);
+       WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+
+       WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
+
+       WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
+       WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
+       WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
+
+       WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
+       WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
+       WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
+
+       WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
+       WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
+
+       WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
+       for (i = 0; i < 256; i++) {
+               WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
+                      (amdgpu_crtc->lut_r[i] << 20) |
+                      (amdgpu_crtc->lut_g[i] << 10) |
+                      (amdgpu_crtc->lut_b[i] << 0));
+       }
+
+       tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, 0);
+       tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, OVL_DEGAMMA_MODE, 0);
+       tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, 0);
+       WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+
+       tmp = RREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, 0);
+       tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, OVL_GAMUT_REMAP_MODE, 0);
+       WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+
+       tmp = RREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, GRPH_REGAMMA_MODE, 0);
+       tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, OVL_REGAMMA_MODE, 0);
+       WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+
+       tmp = RREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, 0);
+       tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_OVL_MODE, 0);
+       WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+
+       /* XXX match this to the depth of the crtc fmt block, move to modeset? */
+       WREG32(mmDENORM_CONTROL + amdgpu_crtc->crtc_offset, 0);
+       /* XXX this only needs to be programmed once per crtc at startup,
+        * not sure where the best place for it is
+        */
+       tmp = RREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = REG_SET_FIELD(tmp, ALPHA_CONTROL, CURSOR_ALPHA_BLND_ENA, 1);
+       WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+}
+
+static int dce_v10_0_pick_dig_encoder(struct drm_encoder *encoder)
+{
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+
+       switch (amdgpu_encoder->encoder_id) {
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+               if (dig->linkb)
+                       return 1;
+               else
+                       return 0;
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+               if (dig->linkb)
+                       return 3;
+               else
+                       return 2;
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+               if (dig->linkb)
+                       return 5;
+               else
+                       return 4;
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+               return 6;
+               break;
+       default:
+               DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
+               return 0;
+       }
+}
+
+/**
+ * dce_v10_0_pick_pll - Allocate a PPLL for use by the crtc.
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
+ * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
+ * monitors a dedicated PPLL must be used.  If a particular board has
+ * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
+ * as there is no need to program the PLL itself.  If we are not able to
+ * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
+ * avoid messing up an existing monitor.
+ *
+ * Asic specific PLL information
+ *
+ * DCE 10.x
+ * Tonga
+ * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
+ * CI
+ * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
+ *
+ */
+static u32 dce_v10_0_pick_pll(struct drm_crtc *crtc)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       u32 pll_in_use;
+       int pll;
+
+       if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
+               if (adev->clock.dp_extclk)
+                       /* skip PPLL programming if using ext clock */
+                       return ATOM_PPLL_INVALID;
+               else {
+                       /* use the same PPLL for all DP monitors */
+                       pll = amdgpu_pll_get_shared_dp_ppll(crtc);
+                       if (pll != ATOM_PPLL_INVALID)
+                               return pll;
+               }
+       } else {
+               /* use the same PPLL for all monitors with the same clock */
+               pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
+               if (pll != ATOM_PPLL_INVALID)
+                       return pll;
+       }
+
+       /* DCE10 has PPLL0, PPLL1, and PPLL2 */
+       pll_in_use = amdgpu_pll_get_use_mask(crtc);
+       if (!(pll_in_use & (1 << ATOM_PPLL2)))
+               return ATOM_PPLL2;
+       if (!(pll_in_use & (1 << ATOM_PPLL1)))
+               return ATOM_PPLL1;
+       if (!(pll_in_use & (1 << ATOM_PPLL0)))
+               return ATOM_PPLL0;
+       DRM_ERROR("unable to allocate a PPLL\n");
+       return ATOM_PPLL_INVALID;
+}
+
+static void dce_v10_0_lock_cursor(struct drm_crtc *crtc, bool lock)
+{
+       struct amdgpu_device *adev = crtc->dev->dev_private;
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       uint32_t cur_lock;
+
+       cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
+       if (lock)
+               cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 1);
+       else
+               cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 0);
+       WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
+}
+
+static void dce_v10_0_hide_cursor(struct drm_crtc *crtc)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct amdgpu_device *adev = crtc->dev->dev_private;
+       u32 tmp;
+
+       tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0);
+       WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+}
+
+static void dce_v10_0_show_cursor(struct drm_crtc *crtc)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct amdgpu_device *adev = crtc->dev->dev_private;
+       u32 tmp;
+
+       tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
+       tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
+       WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+}
+
+static void dce_v10_0_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
+                             uint64_t gpu_addr)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct amdgpu_device *adev = crtc->dev->dev_private;
+
+       WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+              upper_32_bits(gpu_addr));
+       WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+              lower_32_bits(gpu_addr));
+}
+
+static int dce_v10_0_crtc_cursor_move(struct drm_crtc *crtc,
+                                    int x, int y)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct amdgpu_device *adev = crtc->dev->dev_private;
+       int xorigin = 0, yorigin = 0;
+
+       /* avivo cursor are offset into the total surface */
+       x += crtc->x;
+       y += crtc->y;
+       DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
+
+       if (x < 0) {
+               xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
+               x = 0;
+       }
+       if (y < 0) {
+               yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
+               y = 0;
+       }
+
+       dce_v10_0_lock_cursor(crtc, true);
+       WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
+       WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+       WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+              ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
+       dce_v10_0_lock_cursor(crtc, false);
+
+       return 0;
+}
+
+static int dce_v10_0_crtc_cursor_set(struct drm_crtc *crtc,
+                                   struct drm_file *file_priv,
+                                   uint32_t handle,
+                                   uint32_t width,
+                                   uint32_t height)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_gem_object *obj;
+       struct amdgpu_bo *robj;
+       uint64_t gpu_addr;
+       int ret;
+
+       if (!handle) {
+               /* turn off cursor */
+               dce_v10_0_hide_cursor(crtc);
+               obj = NULL;
+               goto unpin;
+       }
+
+       if ((width > amdgpu_crtc->max_cursor_width) ||
+           (height > amdgpu_crtc->max_cursor_height)) {
+               DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
+               return -EINVAL;
+       }
+
+       obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+       if (!obj) {
+               DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
+               return -ENOENT;
+       }
+
+       robj = gem_to_amdgpu_bo(obj);
+       ret = amdgpu_bo_reserve(robj, false);
+       if (unlikely(ret != 0))
+               goto fail;
+       ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM,
+                                      0, &gpu_addr);
+       amdgpu_bo_unreserve(robj);
+       if (ret)
+               goto fail;
+
+       amdgpu_crtc->cursor_width = width;
+       amdgpu_crtc->cursor_height = height;
+
+       dce_v10_0_lock_cursor(crtc, true);
+       dce_v10_0_set_cursor(crtc, obj, gpu_addr);
+       dce_v10_0_show_cursor(crtc);
+       dce_v10_0_lock_cursor(crtc, false);
+
+unpin:
+       if (amdgpu_crtc->cursor_bo) {
+               robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+               ret = amdgpu_bo_reserve(robj, false);
+               if (likely(ret == 0)) {
+                       amdgpu_bo_unpin(robj);
+                       amdgpu_bo_unreserve(robj);
+               }
+               drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
+       }
+
+       amdgpu_crtc->cursor_bo = obj;
+       return 0;
+fail:
+       drm_gem_object_unreference_unlocked(obj);
+
+       return ret;
+}
+
+static void dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+                                   u16 *blue, uint32_t start, uint32_t size)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       int end = (start + size > 256) ? 256 : start + size, i;
+
+       /* userspace palettes are always correct as is */
+       for (i = start; i < end; i++) {
+               amdgpu_crtc->lut_r[i] = red[i] >> 6;
+               amdgpu_crtc->lut_g[i] = green[i] >> 6;
+               amdgpu_crtc->lut_b[i] = blue[i] >> 6;
+       }
+       dce_v10_0_crtc_load_lut(crtc);
+}
+
+static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+       drm_crtc_cleanup(crtc);
+       destroy_workqueue(amdgpu_crtc->pflip_queue);
+       kfree(amdgpu_crtc);
+}
+
+static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = {
+       .cursor_set = dce_v10_0_crtc_cursor_set,
+       .cursor_move = dce_v10_0_crtc_cursor_move,
+       .gamma_set = dce_v10_0_crtc_gamma_set,
+       .set_config = amdgpu_crtc_set_config,
+       .destroy = dce_v10_0_crtc_destroy,
+       .page_flip = amdgpu_crtc_page_flip,
+};
+
+static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+       struct drm_device *dev = crtc->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+               amdgpu_crtc->enabled = true;
+               amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
+               dce_v10_0_vga_enable(crtc, true);
+               amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
+               dce_v10_0_vga_enable(crtc, false);
+               drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
+               dce_v10_0_crtc_load_lut(crtc);
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+       case DRM_MODE_DPMS_OFF:
+               drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id);
+               if (amdgpu_crtc->enabled) {
+                       dce_v10_0_vga_enable(crtc, true);
+                       amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
+                       dce_v10_0_vga_enable(crtc, false);
+               }
+               amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
+               amdgpu_crtc->enabled = false;
+               break;
+       }
+       /* adjust pm to dpms */
+       amdgpu_pm_compute_clocks(adev);
+}
+
+static void dce_v10_0_crtc_prepare(struct drm_crtc *crtc)
+{
+       /* disable crtc pair power gating before programming */
+       amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
+       amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
+       dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void dce_v10_0_crtc_commit(struct drm_crtc *crtc)
+{
+       dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+       amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
+}
+
+static void dce_v10_0_crtc_disable(struct drm_crtc *crtc)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_atom_ss ss;
+       int i;
+
+       dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+       if (crtc->primary->fb) {
+               int r;
+               struct amdgpu_framebuffer *amdgpu_fb;
+               struct amdgpu_bo *rbo;
+
+               amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+               rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+               r = amdgpu_bo_reserve(rbo, false);
+               if (unlikely(r))
+                       DRM_ERROR("failed to reserve rbo before unpin\n");
+               else {
+                       amdgpu_bo_unpin(rbo);
+                       amdgpu_bo_unreserve(rbo);
+               }
+       }
+       /* disable the GRPH */
+       dce_v10_0_grph_enable(crtc, false);
+
+       amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
+
+       for (i = 0; i < adev->mode_info.num_crtc; i++) {
+               if (adev->mode_info.crtcs[i] &&
+                   adev->mode_info.crtcs[i]->enabled &&
+                   i != amdgpu_crtc->crtc_id &&
+                   amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
+                       /* one other crtc is using this pll don't turn
+                        * off the pll
+                        */
+                       goto done;
+               }
+       }
+
+       switch (amdgpu_crtc->pll_id) {
+       case ATOM_PPLL0:
+       case ATOM_PPLL1:
+       case ATOM_PPLL2:
+               /* disable the ppll */
+               amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
+                                         0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
+               break;
+       default:
+               break;
+       }
+done:
+       amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
+       amdgpu_crtc->adjusted_clock = 0;
+       amdgpu_crtc->encoder = NULL;
+       amdgpu_crtc->connector = NULL;
+}
+
+static int dce_v10_0_crtc_mode_set(struct drm_crtc *crtc,
+                                 struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode,
+                                 int x, int y, struct drm_framebuffer *old_fb)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+       if (!amdgpu_crtc->adjusted_clock)
+               return -EINVAL;
+
+       amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
+       amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
+       dce_v10_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
+       amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
+       amdgpu_atombios_crtc_scaler_setup(crtc);
+       /* update the hw version fpr dpm */
+       amdgpu_crtc->hw_mode = *adjusted_mode;
+
+       return 0;
+}
+
+static bool dce_v10_0_crtc_mode_fixup(struct drm_crtc *crtc,
+                                    const struct drm_display_mode *mode,
+                                    struct drm_display_mode *adjusted_mode)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct drm_encoder *encoder;
+
+       /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               if (encoder->crtc == crtc) {
+                       amdgpu_crtc->encoder = encoder;
+                       amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
+                       break;
+               }
+       }
+       if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
+               amdgpu_crtc->encoder = NULL;
+               amdgpu_crtc->connector = NULL;
+               return false;
+       }
+       if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
+               return false;
+       if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
+               return false;
+       /* pick pll */
+       amdgpu_crtc->pll_id = dce_v10_0_pick_pll(crtc);
+       /* if we can't get a PPLL for a non-DP encoder, fail */
+       if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
+           !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
+               return false;
+
+       return true;
+}
+
+static int dce_v10_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+                                 struct drm_framebuffer *old_fb)
+{
+       return dce_v10_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+static int dce_v10_0_crtc_set_base_atomic(struct drm_crtc *crtc,
+                                        struct drm_framebuffer *fb,
+                                        int x, int y, enum mode_set_atomic state)
+{
+       return dce_v10_0_crtc_do_set_base(crtc, fb, x, y, 1);
+}
+
+static const struct drm_crtc_helper_funcs dce_v10_0_crtc_helper_funcs = {
+       .dpms = dce_v10_0_crtc_dpms,
+       .mode_fixup = dce_v10_0_crtc_mode_fixup,
+       .mode_set = dce_v10_0_crtc_mode_set,
+       .mode_set_base = dce_v10_0_crtc_set_base,
+       .mode_set_base_atomic = dce_v10_0_crtc_set_base_atomic,
+       .prepare = dce_v10_0_crtc_prepare,
+       .commit = dce_v10_0_crtc_commit,
+       .load_lut = dce_v10_0_crtc_load_lut,
+       .disable = dce_v10_0_crtc_disable,
+};
+
+static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
+{
+       struct amdgpu_crtc *amdgpu_crtc;
+       int i;
+
+       amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
+                             (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
+       if (amdgpu_crtc == NULL)
+               return -ENOMEM;
+
+       drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v10_0_crtc_funcs);
+
+       drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
+       amdgpu_crtc->crtc_id = index;
+       amdgpu_crtc->pflip_queue = create_singlethread_workqueue("amdgpu-pageflip-queue");
+       adev->mode_info.crtcs[index] = amdgpu_crtc;
+
+       amdgpu_crtc->max_cursor_width = 128;
+       amdgpu_crtc->max_cursor_height = 128;
+       adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
+       adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
+
+       for (i = 0; i < 256; i++) {
+               amdgpu_crtc->lut_r[i] = i << 2;
+               amdgpu_crtc->lut_g[i] = i << 2;
+               amdgpu_crtc->lut_b[i] = i << 2;
+       }
+
+       switch (amdgpu_crtc->crtc_id) {
+       case 0:
+       default:
+               amdgpu_crtc->crtc_offset = CRTC0_REGISTER_OFFSET;
+               break;
+       case 1:
+               amdgpu_crtc->crtc_offset = CRTC1_REGISTER_OFFSET;
+               break;
+       case 2:
+               amdgpu_crtc->crtc_offset = CRTC2_REGISTER_OFFSET;
+               break;
+       case 3:
+               amdgpu_crtc->crtc_offset = CRTC3_REGISTER_OFFSET;
+               break;
+       case 4:
+               amdgpu_crtc->crtc_offset = CRTC4_REGISTER_OFFSET;
+               break;
+       case 5:
+               amdgpu_crtc->crtc_offset = CRTC5_REGISTER_OFFSET;
+               break;
+       }
+
+       amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
+       amdgpu_crtc->adjusted_clock = 0;
+       amdgpu_crtc->encoder = NULL;
+       amdgpu_crtc->connector = NULL;
+       drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v10_0_crtc_helper_funcs);
+
+       return 0;
+}
+
+static int dce_v10_0_early_init(struct amdgpu_device *adev)
+{
+       adev->audio_endpt_rreg = &dce_v10_0_audio_endpt_rreg;
+       adev->audio_endpt_wreg = &dce_v10_0_audio_endpt_wreg;
+
+       dce_v10_0_set_display_funcs(adev);
+       dce_v10_0_set_irq_funcs(adev);
+
+       switch (adev->asic_type) {
+       case CHIP_TONGA:
+               adev->mode_info.num_crtc = 6; /* XXX 7??? */
+               adev->mode_info.num_hpd = 6;
+               adev->mode_info.num_dig = 7;
+               break;
+       default:
+               /* FIXME: not supported yet */
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int dce_v10_0_sw_init(struct amdgpu_device *adev)
+{
+       int r, i;
+
+       for (i = 0; i < adev->mode_info.num_crtc; i++) {
+               r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
+               if (r)
+                       return r;
+       }
+
+       for (i = 8; i < 20; i += 2) {
+               r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
+               if (r)
+                       return r;
+       }
+
+       /* HPD hotplug */
+       r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
+       if (r)
+               return r;
+
+       adev->mode_info.mode_config_initialized = true;
+
+       adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
+
+       adev->ddev->mode_config.max_width = 16384;
+       adev->ddev->mode_config.max_height = 16384;
+
+       adev->ddev->mode_config.preferred_depth = 24;
+       adev->ddev->mode_config.prefer_shadow = 1;
+
+       adev->ddev->mode_config.fb_base = adev->mc.aper_base;
+
+       r = amdgpu_modeset_create_props(adev);
+       if (r)
+               return r;
+
+       adev->ddev->mode_config.max_width = 16384;
+       adev->ddev->mode_config.max_height = 16384;
+
+       /* allocate crtcs */
+       for (i = 0; i < adev->mode_info.num_crtc; i++) {
+               r = dce_v10_0_crtc_init(adev, i);
+               if (r)
+                       return r;
+       }
+
+       if (amdgpu_atombios_get_connector_info_from_object_table(adev))
+               amdgpu_print_display_setup(adev->ddev);
+       else
+               return -EINVAL;
+
+       /* setup afmt */
+       dce_v10_0_afmt_init(adev);
+
+       r = dce_v10_0_audio_init(adev);
+       if (r)
+               return r;
+
+       drm_kms_helper_poll_init(adev->ddev);
+
+       return r;
+}
+
+static int dce_v10_0_sw_fini(struct amdgpu_device *adev)
+{
+       kfree(adev->mode_info.bios_hardcoded_edid);
+
+       drm_kms_helper_poll_fini(adev->ddev);
+
+       dce_v10_0_audio_fini(adev);
+
+       dce_v10_0_afmt_fini(adev);
+
+       drm_mode_config_cleanup(adev->ddev);
+       adev->mode_info.mode_config_initialized = false;
+
+       return 0;
+}
+
+static int dce_v10_0_hw_init(struct amdgpu_device *adev)
+{
+       int i;
+
+       dce_v10_0_init_golden_registers(adev);
+
+       /* init dig PHYs, disp eng pll */
+       amdgpu_atombios_encoder_init_dig(adev);
+       amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
+
+       /* initialize hpd */
+       dce_v10_0_hpd_init(adev);
+
+       for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+               dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
+       }
+
+       return 0;
+}
+
+static int dce_v10_0_hw_fini(struct amdgpu_device *adev)
+{
+       int i;
+
+       dce_v10_0_hpd_fini(adev);
+
+       for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+               dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
+       }
+
+       return 0;
+}
+
+static int dce_v10_0_suspend(struct amdgpu_device *adev)
+{
+       struct drm_connector *connector;
+
+       drm_kms_helper_poll_disable(adev->ddev);
+
+       /* turn off display hw */
+       list_for_each_entry(connector, &adev->ddev->mode_config.connector_list, head) {
+               drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+       }
+
+       amdgpu_atombios_scratch_regs_save(adev);
+
+       dce_v10_0_hpd_fini(adev);
+
+       return 0;
+}
+
+static int dce_v10_0_resume(struct amdgpu_device *adev)
+{
+       struct drm_connector *connector;
+
+       dce_v10_0_init_golden_registers(adev);
+
+       amdgpu_atombios_scratch_regs_restore(adev);
+
+       /* init dig PHYs, disp eng pll */
+       amdgpu_atombios_encoder_init_dig(adev);
+       amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
+       /* turn on the BL */
+       if (adev->mode_info.bl_encoder) {
+               u8 bl_level = amdgpu_display_backlight_get_level(adev,
+                                                                 adev->mode_info.bl_encoder);
+               amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
+                                                   bl_level);
+       }
+
+       /* initialize hpd */
+       dce_v10_0_hpd_init(adev);
+
+       /* blat the mode back in */
+       drm_helper_resume_force_mode(adev->ddev);
+       /* turn on display hw */
+       list_for_each_entry(connector, &adev->ddev->mode_config.connector_list, head) {
+               drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+       }
+
+       drm_kms_helper_poll_enable(adev->ddev);
+
+       return 0;
+}
+
+static bool dce_v10_0_is_idle(struct amdgpu_device *adev)
+{
+       /* XXX todo */
+       return true;
+}
+
+static int dce_v10_0_wait_for_idle(struct amdgpu_device *adev)
+{
+       /* XXX todo */
+       return 0;
+}
+
+static void dce_v10_0_print_status(struct amdgpu_device *adev)
+{
+       dev_info(adev->dev, "DCE 10.x registers\n");
+       /* XXX todo */
+}
+
+static int dce_v10_0_soft_reset(struct amdgpu_device *adev)
+{
+       u32 srbm_soft_reset = 0, tmp;
+
+       if (dce_v10_0_is_display_hung(adev))
+               srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
+
+       if (srbm_soft_reset) {
+               dce_v10_0_print_status(adev);
+
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+               tmp |= srbm_soft_reset;
+               dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+               WREG32(mmSRBM_SOFT_RESET, tmp);
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+
+               udelay(50);
+
+               tmp &= ~srbm_soft_reset;
+               WREG32(mmSRBM_SOFT_RESET, tmp);
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+
+               /* Wait a little for things to settle down */
+               udelay(50);
+               dce_v10_0_print_status(adev);
+       }
+       return 0;
+}
+
+static void dce_v10_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
+                                                    int crtc,
+                                                    enum amdgpu_interrupt_state state)
+{
+       u32 lb_interrupt_mask;
+
+       if (crtc >= adev->mode_info.num_crtc) {
+               DRM_DEBUG("invalid crtc %d\n", crtc);
+               return;
+       }
+
+       switch (state) {
+       case AMDGPU_IRQ_STATE_DISABLE:
+               lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
+               lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
+                                                 VBLANK_INTERRUPT_MASK, 0);
+               WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
+               break;
+       case AMDGPU_IRQ_STATE_ENABLE:
+               lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
+               lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
+                                                 VBLANK_INTERRUPT_MASK, 1);
+               WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
+               break;
+       default:
+               break;
+       }
+}
+
+static void dce_v10_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
+                                                   int crtc,
+                                                   enum amdgpu_interrupt_state state)
+{
+       u32 lb_interrupt_mask;
+
+       if (crtc >= adev->mode_info.num_crtc) {
+               DRM_DEBUG("invalid crtc %d\n", crtc);
+               return;
+       }
+
+       switch (state) {
+       case AMDGPU_IRQ_STATE_DISABLE:
+               lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
+               lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
+                                                 VLINE_INTERRUPT_MASK, 0);
+               WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
+               break;
+       case AMDGPU_IRQ_STATE_ENABLE:
+               lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
+               lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
+                                                 VLINE_INTERRUPT_MASK, 1);
+               WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
+               break;
+       default:
+               break;
+       }
+}
+
+static int dce_v10_0_set_hpd_irq_state(struct amdgpu_device *adev,
+                                      struct amdgpu_irq_src *source,
+                                      unsigned hpd,
+                                      enum amdgpu_interrupt_state state)
+{
+       u32 tmp;
+
+       if (hpd >= adev->mode_info.num_hpd) {
+               DRM_DEBUG("invalid hdp %d\n", hpd);
+               return 0;
+       }
+
+       switch (state) {
+       case AMDGPU_IRQ_STATE_DISABLE:
+               tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
+               tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
+               WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
+               break;
+       case AMDGPU_IRQ_STATE_ENABLE:
+               tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
+               tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 1);
+               WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static int dce_v10_0_set_crtc_irq_state(struct amdgpu_device *adev,
+                                       struct amdgpu_irq_src *source,
+                                       unsigned type,
+                                       enum amdgpu_interrupt_state state)
+{
+       switch (type) {
+       case AMDGPU_CRTC_IRQ_VBLANK1:
+               dce_v10_0_set_crtc_vblank_interrupt_state(adev, 0, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VBLANK2:
+               dce_v10_0_set_crtc_vblank_interrupt_state(adev, 1, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VBLANK3:
+               dce_v10_0_set_crtc_vblank_interrupt_state(adev, 2, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VBLANK4:
+               dce_v10_0_set_crtc_vblank_interrupt_state(adev, 3, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VBLANK5:
+               dce_v10_0_set_crtc_vblank_interrupt_state(adev, 4, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VBLANK6:
+               dce_v10_0_set_crtc_vblank_interrupt_state(adev, 5, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VLINE1:
+               dce_v10_0_set_crtc_vline_interrupt_state(adev, 0, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VLINE2:
+               dce_v10_0_set_crtc_vline_interrupt_state(adev, 1, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VLINE3:
+               dce_v10_0_set_crtc_vline_interrupt_state(adev, 2, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VLINE4:
+               dce_v10_0_set_crtc_vline_interrupt_state(adev, 3, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VLINE5:
+               dce_v10_0_set_crtc_vline_interrupt_state(adev, 4, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VLINE6:
+               dce_v10_0_set_crtc_vline_interrupt_state(adev, 5, state);
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+static int dce_v10_0_set_pageflip_irq_state(struct amdgpu_device *adev,
+                                           struct amdgpu_irq_src *src,
+                                           unsigned type,
+                                           enum amdgpu_interrupt_state state)
+{
+       u32 reg, reg_block;
+       /* now deal with page flip IRQ */
+       switch (type) {
+               case AMDGPU_PAGEFLIP_IRQ_D1:
+                       reg_block = CRTC0_REGISTER_OFFSET;
+                       break;
+               case AMDGPU_PAGEFLIP_IRQ_D2:
+                       reg_block = CRTC1_REGISTER_OFFSET;
+                       break;
+               case AMDGPU_PAGEFLIP_IRQ_D3:
+                       reg_block = CRTC2_REGISTER_OFFSET;
+                       break;
+               case AMDGPU_PAGEFLIP_IRQ_D4:
+                       reg_block = CRTC3_REGISTER_OFFSET;
+                       break;
+               case AMDGPU_PAGEFLIP_IRQ_D5:
+                       reg_block = CRTC4_REGISTER_OFFSET;
+                       break;
+               case AMDGPU_PAGEFLIP_IRQ_D6:
+                       reg_block = CRTC5_REGISTER_OFFSET;
+                       break;
+               default:
+                       DRM_ERROR("invalid pageflip crtc %d\n", type);
+                       return -EINVAL;
+       }
+
+       reg = RREG32(mmGRPH_INTERRUPT_CONTROL + reg_block);
+       if (state == AMDGPU_IRQ_STATE_DISABLE)
+               WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
+       else
+               WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
+
+       return 0;
+}
+
+static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
+                                 struct amdgpu_irq_src *source,
+                                 struct amdgpu_iv_entry *entry)
+{
+       int reg_block;
+       unsigned long flags;
+       unsigned crtc_id;
+       struct amdgpu_crtc *amdgpu_crtc;
+       struct amdgpu_flip_work *works;
+
+       crtc_id = (entry->src_id - 8) >> 1;
+       amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+
+       /* ack the interrupt */
+       switch(crtc_id){
+               case AMDGPU_PAGEFLIP_IRQ_D1:
+                       reg_block = CRTC0_REGISTER_OFFSET;
+                       break;
+               case AMDGPU_PAGEFLIP_IRQ_D2:
+                       reg_block = CRTC1_REGISTER_OFFSET;
+                       break;
+               case AMDGPU_PAGEFLIP_IRQ_D3:
+                       reg_block = CRTC2_REGISTER_OFFSET;
+                       break;
+               case AMDGPU_PAGEFLIP_IRQ_D4:
+                       reg_block = CRTC3_REGISTER_OFFSET;
+                       break;
+               case AMDGPU_PAGEFLIP_IRQ_D5:
+                       reg_block = CRTC4_REGISTER_OFFSET;
+                       break;
+               case AMDGPU_PAGEFLIP_IRQ_D6:
+                       reg_block = CRTC5_REGISTER_OFFSET;
+                       break;
+               default:
+                       DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
+                       return -EINVAL;
+       }
+
+       if (RREG32(mmGRPH_INTERRUPT_STATUS + reg_block) & GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
+               WREG32(mmGRPH_INTERRUPT_STATUS + reg_block, GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
+
+       /* IRQ could occur when in initial stage */
+       if (amdgpu_crtc == NULL)
+               return 0;
+
+       spin_lock_irqsave(&adev->ddev->event_lock, flags);
+       works = amdgpu_crtc->pflip_works;
+       if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
+               DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
+                                                "AMDGPU_FLIP_SUBMITTED(%d)\n",
+                                                amdgpu_crtc->pflip_status,
+                                                AMDGPU_FLIP_SUBMITTED);
+               spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+               return 0;
+       }
+
+       /* page flip completed. clean up */
+       amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
+       amdgpu_crtc->pflip_works = NULL;
+
+       /* wakeup usersapce */
+       if (works->event)
+               drm_send_vblank_event(adev->ddev, crtc_id, works->event);
+
+       spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+
+       drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
+       amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id);
+       queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
+
+       return 0;
+}
+
+static void dce_v10_0_hpd_int_ack(struct amdgpu_device *adev,
+                                 int hpd)
+{
+       u32 tmp;
+
+       if (hpd >= adev->mode_info.num_hpd) {
+               DRM_DEBUG("invalid hdp %d\n", hpd);
+               return;
+       }
+
+       tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
+       tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_ACK, 1);
+       WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
+}
+
+static void dce_v10_0_crtc_vblank_int_ack(struct amdgpu_device *adev,
+                                         int crtc)
+{
+       u32 tmp;
+
+       if (crtc >= adev->mode_info.num_crtc) {
+               DRM_DEBUG("invalid crtc %d\n", crtc);
+               return;
+       }
+
+       tmp = RREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc]);
+       tmp = REG_SET_FIELD(tmp, LB_VBLANK_STATUS, VBLANK_ACK, 1);
+       WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], tmp);
+}
+
+static void dce_v10_0_crtc_vline_int_ack(struct amdgpu_device *adev,
+                                        int crtc)
+{
+       u32 tmp;
+
+       if (crtc >= adev->mode_info.num_crtc) {
+               DRM_DEBUG("invalid crtc %d\n", crtc);
+               return;
+       }
+
+       tmp = RREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc]);
+       tmp = REG_SET_FIELD(tmp, LB_VLINE_STATUS, VLINE_ACK, 1);
+       WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], tmp);
+}
+
+static int dce_v10_0_crtc_irq(struct amdgpu_device *adev,
+                             struct amdgpu_irq_src *source,
+                             struct amdgpu_iv_entry *entry)
+{
+       unsigned crtc = entry->src_id - 1;
+       uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
+       unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
+
+       switch (entry->src_data) {
+       case 0: /* vblank */
+               if (disp_int & interrupt_status_offsets[crtc].vblank) {
+                       dce_v10_0_crtc_vblank_int_ack(adev, crtc);
+                       if (amdgpu_irq_enabled(adev, source, irq_type)) {
+                               drm_handle_vblank(adev->ddev, crtc);
+                       }
+                       DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+               }
+               break;
+       case 1: /* vline */
+               if (disp_int & interrupt_status_offsets[crtc].vline) {
+                       dce_v10_0_crtc_vline_int_ack(adev, crtc);
+                       DRM_DEBUG("IH: D%d vline\n", crtc + 1);
+               }
+               break;
+       default:
+               DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
+               break;
+       }
+
+       return 0;
+}
+
+static int dce_v10_0_hpd_irq(struct amdgpu_device *adev,
+                            struct amdgpu_irq_src *source,
+                            struct amdgpu_iv_entry *entry)
+{
+       uint32_t disp_int, mask;
+       unsigned hpd;
+
+       if (entry->src_data >= adev->mode_info.num_hpd) {
+               DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
+               return 0;
+       }
+
+       hpd = entry->src_data;
+       disp_int = RREG32(interrupt_status_offsets[hpd].reg);
+       mask = interrupt_status_offsets[hpd].hpd;
+
+       if (disp_int & mask) {
+               dce_v10_0_hpd_int_ack(adev, hpd);
+               schedule_work(&adev->hotplug_work);
+               DRM_DEBUG("IH: HPD%d\n", hpd + 1);
+       }
+
+       return 0;
+}
+
+static int dce_v10_0_set_clockgating_state(struct amdgpu_device *adev,
+                                         enum amdgpu_clockgating_state state)
+{
+       return 0;
+}
+
+static int dce_v10_0_set_powergating_state(struct amdgpu_device *adev,
+                                         enum amdgpu_powergating_state state)
+{
+       return 0;
+}
+
+const struct amdgpu_ip_funcs dce_v10_0_ip_funcs = {
+       .early_init = dce_v10_0_early_init,
+       .late_init = NULL,
+       .sw_init = dce_v10_0_sw_init,
+       .sw_fini = dce_v10_0_sw_fini,
+       .hw_init = dce_v10_0_hw_init,
+       .hw_fini = dce_v10_0_hw_fini,
+       .suspend = dce_v10_0_suspend,
+       .resume = dce_v10_0_resume,
+       .is_idle = dce_v10_0_is_idle,
+       .wait_for_idle = dce_v10_0_wait_for_idle,
+       .soft_reset = dce_v10_0_soft_reset,
+       .print_status = dce_v10_0_print_status,
+       .set_clockgating_state = dce_v10_0_set_clockgating_state,
+       .set_powergating_state = dce_v10_0_set_powergating_state,
+};
+
+static void
+dce_v10_0_encoder_mode_set(struct drm_encoder *encoder,
+                         struct drm_display_mode *mode,
+                         struct drm_display_mode *adjusted_mode)
+{
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+       amdgpu_encoder->pixel_clock = adjusted_mode->clock;
+
+       /* need to call this here rather than in prepare() since we need some crtc info */
+       amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+       /* set scaler clears this on some chips */
+       dce_v10_0_set_interleave(encoder->crtc, mode);
+
+       if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
+               dce_v10_0_afmt_enable(encoder, true);
+               dce_v10_0_afmt_setmode(encoder, adjusted_mode);
+       }
+}
+
+static void dce_v10_0_encoder_prepare(struct drm_encoder *encoder)
+{
+       struct amdgpu_device *adev = encoder->dev->dev_private;
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
+
+       if ((amdgpu_encoder->active_device &
+            (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
+           (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
+            ENCODER_OBJECT_ID_NONE)) {
+               struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+               if (dig) {
+                       dig->dig_encoder = dce_v10_0_pick_dig_encoder(encoder);
+                       if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
+                               dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
+               }
+       }
+
+       amdgpu_atombios_scratch_regs_lock(adev, true);
+
+       if (connector) {
+               struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+               /* select the clock/data port if it uses a router */
+               if (amdgpu_connector->router.cd_valid)
+                       amdgpu_i2c_router_select_cd_port(amdgpu_connector);
+
+               /* turn eDP panel on for mode set */
+               if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+                       amdgpu_atombios_encoder_set_edp_panel_power(connector,
+                                                            ATOM_TRANSMITTER_ACTION_POWER_ON);
+       }
+
+       /* this is needed for the pll/ss setup to work correctly in some cases */
+       amdgpu_atombios_encoder_set_crtc_source(encoder);
+       /* set up the FMT blocks */
+       dce_v10_0_program_fmt(encoder);
+}
+
+static void dce_v10_0_encoder_commit(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+
+       /* need to call this here as we need the crtc set up */
+       amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+       amdgpu_atombios_scratch_regs_lock(adev, false);
+}
+
+static void dce_v10_0_encoder_disable(struct drm_encoder *encoder)
+{
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct amdgpu_encoder_atom_dig *dig;
+
+       amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+       if (amdgpu_atombios_encoder_is_digital(encoder)) {
+               if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
+                       dce_v10_0_afmt_enable(encoder, false);
+               dig = amdgpu_encoder->enc_priv;
+               dig->dig_encoder = -1;
+       }
+       amdgpu_encoder->active_device = 0;
+}
+
+/* these are handled by the primary encoders */
+static void dce_v10_0_ext_prepare(struct drm_encoder *encoder)
+{
+
+}
+
+static void dce_v10_0_ext_commit(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+dce_v10_0_ext_mode_set(struct drm_encoder *encoder,
+                     struct drm_display_mode *mode,
+                     struct drm_display_mode *adjusted_mode)
+{
+
+}
+
+static void dce_v10_0_ext_disable(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+dce_v10_0_ext_dpms(struct drm_encoder *encoder, int mode)
+{
+
+}
+
+static bool dce_v10_0_ext_mode_fixup(struct drm_encoder *encoder,
+                                   const struct drm_display_mode *mode,
+                                   struct drm_display_mode *adjusted_mode)
+{
+       return true;
+}
+
+static const struct drm_encoder_helper_funcs dce_v10_0_ext_helper_funcs = {
+       .dpms = dce_v10_0_ext_dpms,
+       .mode_fixup = dce_v10_0_ext_mode_fixup,
+       .prepare = dce_v10_0_ext_prepare,
+       .mode_set = dce_v10_0_ext_mode_set,
+       .commit = dce_v10_0_ext_commit,
+       .disable = dce_v10_0_ext_disable,
+       /* no detect for TMDS/LVDS yet */
+};
+
+static const struct drm_encoder_helper_funcs dce_v10_0_dig_helper_funcs = {
+       .dpms = amdgpu_atombios_encoder_dpms,
+       .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
+       .prepare = dce_v10_0_encoder_prepare,
+       .mode_set = dce_v10_0_encoder_mode_set,
+       .commit = dce_v10_0_encoder_commit,
+       .disable = dce_v10_0_encoder_disable,
+       .detect = amdgpu_atombios_encoder_dig_detect,
+};
+
+static const struct drm_encoder_helper_funcs dce_v10_0_dac_helper_funcs = {
+       .dpms = amdgpu_atombios_encoder_dpms,
+       .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
+       .prepare = dce_v10_0_encoder_prepare,
+       .mode_set = dce_v10_0_encoder_mode_set,
+       .commit = dce_v10_0_encoder_commit,
+       .detect = amdgpu_atombios_encoder_dac_detect,
+};
+
+static void dce_v10_0_encoder_destroy(struct drm_encoder *encoder)
+{
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+               amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
+       kfree(amdgpu_encoder->enc_priv);
+       drm_encoder_cleanup(encoder);
+       kfree(amdgpu_encoder);
+}
+
+static const struct drm_encoder_funcs dce_v10_0_encoder_funcs = {
+       .destroy = dce_v10_0_encoder_destroy,
+};
+
+static void dce_v10_0_encoder_add(struct amdgpu_device *adev,
+                                uint32_t encoder_enum,
+                                uint32_t supported_device,
+                                u16 caps)
+{
+       struct drm_device *dev = adev->ddev;
+       struct drm_encoder *encoder;
+       struct amdgpu_encoder *amdgpu_encoder;
+
+       /* see if we already added it */
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               amdgpu_encoder = to_amdgpu_encoder(encoder);
+               if (amdgpu_encoder->encoder_enum == encoder_enum) {
+                       amdgpu_encoder->devices |= supported_device;
+                       return;
+               }
+
+       }
+
+       /* add a new one */
+       amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
+       if (!amdgpu_encoder)
+               return;
+
+       encoder = &amdgpu_encoder->base;
+       switch (adev->mode_info.num_crtc) {
+       case 1:
+               encoder->possible_crtcs = 0x1;
+               break;
+       case 2:
+       default:
+               encoder->possible_crtcs = 0x3;
+               break;
+       case 4:
+               encoder->possible_crtcs = 0xf;
+               break;
+       case 6:
+               encoder->possible_crtcs = 0x3f;
+               break;
+       }
+
+       amdgpu_encoder->enc_priv = NULL;
+
+       amdgpu_encoder->encoder_enum = encoder_enum;
+       amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+       amdgpu_encoder->devices = supported_device;
+       amdgpu_encoder->rmx_type = RMX_OFF;
+       amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
+       amdgpu_encoder->is_ext_encoder = false;
+       amdgpu_encoder->caps = caps;
+
+       switch (amdgpu_encoder->encoder_id) {
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+               drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
+                                DRM_MODE_ENCODER_DAC);
+               drm_encoder_helper_add(encoder, &dce_v10_0_dac_helper_funcs);
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+               if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+                       amdgpu_encoder->rmx_type = RMX_FULL;
+                       drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
+                                        DRM_MODE_ENCODER_LVDS);
+                       amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
+               } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
+                       drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
+                                        DRM_MODE_ENCODER_DAC);
+                       amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
+               } else {
+                       drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
+                                        DRM_MODE_ENCODER_TMDS);
+                       amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
+               }
+               drm_encoder_helper_add(encoder, &dce_v10_0_dig_helper_funcs);
+               break;
+       case ENCODER_OBJECT_ID_SI170B:
+       case ENCODER_OBJECT_ID_CH7303:
+       case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
+       case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
+       case ENCODER_OBJECT_ID_TITFP513:
+       case ENCODER_OBJECT_ID_VT1623:
+       case ENCODER_OBJECT_ID_HDMI_SI1930:
+       case ENCODER_OBJECT_ID_TRAVIS:
+       case ENCODER_OBJECT_ID_NUTMEG:
+               /* these are handled by the primary encoders */
+               amdgpu_encoder->is_ext_encoder = true;
+               if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+                       drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
+                                        DRM_MODE_ENCODER_LVDS);
+               else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
+                       drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
+                                        DRM_MODE_ENCODER_DAC);
+               else
+                       drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
+                                        DRM_MODE_ENCODER_TMDS);
+               drm_encoder_helper_add(encoder, &dce_v10_0_ext_helper_funcs);
+               break;
+       }
+}
+
+static const struct amdgpu_display_funcs dce_v10_0_display_funcs = {
+       .set_vga_render_state = &dce_v10_0_set_vga_render_state,
+       .bandwidth_update = &dce_v10_0_bandwidth_update,
+       .vblank_get_counter = &dce_v10_0_vblank_get_counter,
+       .vblank_wait = &dce_v10_0_vblank_wait,
+       .is_display_hung = &dce_v10_0_is_display_hung,
+       .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
+       .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
+       .hpd_sense = &dce_v10_0_hpd_sense,
+       .hpd_set_polarity = &dce_v10_0_hpd_set_polarity,
+       .hpd_get_gpio_reg = &dce_v10_0_hpd_get_gpio_reg,
+       .page_flip = &dce_v10_0_page_flip,
+       .page_flip_get_scanoutpos = &dce_v10_0_crtc_get_scanoutpos,
+       .add_encoder = &dce_v10_0_encoder_add,
+       .add_connector = &amdgpu_connector_add,
+       .stop_mc_access = &dce_v10_0_stop_mc_access,
+       .resume_mc_access = &dce_v10_0_resume_mc_access,
+};
+
+static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev)
+{
+       if (adev->mode_info.funcs == NULL)
+               adev->mode_info.funcs = &dce_v10_0_display_funcs;
+}
+
+static const struct amdgpu_irq_src_funcs dce_v10_0_crtc_irq_funcs = {
+       .set = dce_v10_0_set_crtc_irq_state,
+       .process = dce_v10_0_crtc_irq,
+};
+
+static const struct amdgpu_irq_src_funcs dce_v10_0_pageflip_irq_funcs = {
+       .set = dce_v10_0_set_pageflip_irq_state,
+       .process = dce_v10_0_pageflip_irq,
+};
+
+static const struct amdgpu_irq_src_funcs dce_v10_0_hpd_irq_funcs = {
+       .set = dce_v10_0_set_hpd_irq_state,
+       .process = dce_v10_0_hpd_irq,
+};
+
+static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+       adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
+       adev->crtc_irq.funcs = &dce_v10_0_crtc_irq_funcs;
+
+       adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
+       adev->pageflip_irq.funcs = &dce_v10_0_pageflip_irq_funcs;
+
+       adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
+       adev->hpd_irq.funcs = &dce_v10_0_hpd_irq_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
new file mode 100644 (file)
index 0000000..72ca20d
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __DCE_V10_0_H__
+#define __DCE_V10_0_H__
+
+extern const struct amdgpu_ip_funcs dce_v10_0_ip_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
new file mode 100644 (file)
index 0000000..55fef15
--- /dev/null
@@ -0,0 +1,3871 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_i2c.h"
+#include "vid.h"
+#include "atom.h"
+#include "amdgpu_atombios.h"
+#include "atombios_crtc.h"
+#include "atombios_encoders.h"
+#include "amdgpu_pll.h"
+#include "amdgpu_connectors.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+#include "dce/dce_11_0_enum.h"
+#include "oss/oss_3_0_d.h"
+#include "oss/oss_3_0_sh_mask.h"
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev);
+static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev);
+
+static const u32 crtc_offsets[] =
+{
+       CRTC0_REGISTER_OFFSET,
+       CRTC1_REGISTER_OFFSET,
+       CRTC2_REGISTER_OFFSET,
+       CRTC3_REGISTER_OFFSET,
+       CRTC4_REGISTER_OFFSET,
+       CRTC5_REGISTER_OFFSET,
+       CRTC6_REGISTER_OFFSET
+};
+
+static const u32 hpd_offsets[] =
+{
+       HPD0_REGISTER_OFFSET,
+       HPD1_REGISTER_OFFSET,
+       HPD2_REGISTER_OFFSET,
+       HPD3_REGISTER_OFFSET,
+       HPD4_REGISTER_OFFSET,
+       HPD5_REGISTER_OFFSET
+};
+
+static const uint32_t dig_offsets[] = {
+       DIG0_REGISTER_OFFSET,
+       DIG1_REGISTER_OFFSET,
+       DIG2_REGISTER_OFFSET,
+       DIG3_REGISTER_OFFSET,
+       DIG4_REGISTER_OFFSET,
+       DIG5_REGISTER_OFFSET,
+       DIG6_REGISTER_OFFSET,
+       DIG7_REGISTER_OFFSET,
+       DIG8_REGISTER_OFFSET
+};
+
+static const struct {
+       uint32_t        reg;
+       uint32_t        vblank;
+       uint32_t        vline;
+       uint32_t        hpd;
+
+} interrupt_status_offsets[] = { {
+       .reg = mmDISP_INTERRUPT_STATUS,
+       .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
+       .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
+       .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
+}, {
+       .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
+       .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
+       .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
+       .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
+}, {
+       .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
+       .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
+       .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
+       .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
+}, {
+       .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
+       .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
+       .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
+       .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
+}, {
+       .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
+       .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
+       .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
+       .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
+}, {
+       .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
+       .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
+       .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
+       .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
+} };
+
+static const u32 cz_golden_settings_a11[] =
+{
+       mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000,
+       mmFBC_MISC, 0x1f311fff, 0x14300000,
+};
+
+static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
+{
+       switch (adev->asic_type) {
+       case CHIP_CARRIZO:
+               amdgpu_program_register_sequence(adev,
+                                                cz_golden_settings_a11,
+                                                (const u32)ARRAY_SIZE(cz_golden_settings_a11));
+               break;
+       default:
+               break;
+       }
+}
+
+static u32 dce_v11_0_audio_endpt_rreg(struct amdgpu_device *adev,
+                                    u32 block_offset, u32 reg)
+{
+       unsigned long flags;
+       u32 r;
+
+       spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
+       WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
+       r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
+       spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
+
+       return r;
+}
+
+static void dce_v11_0_audio_endpt_wreg(struct amdgpu_device *adev,
+                                     u32 block_offset, u32 reg, u32 v)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
+       WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
+       WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
+       spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
+}
+
+static bool dce_v11_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
+{
+       if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) &
+                       CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK)
+               return true;
+       else
+               return false;
+}
+
+static bool dce_v11_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
+{
+       u32 pos1, pos2;
+
+       pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
+       pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+       if (pos1 != pos2)
+               return true;
+       else
+               return false;
+}
+
+/**
+ * dce_v11_0_vblank_wait - vblank wait asic callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (evergreen+).
+ */
+static void dce_v11_0_vblank_wait(struct amdgpu_device *adev, int crtc)
+{
+       unsigned i = 0;
+
+       if (crtc >= adev->mode_info.num_crtc)
+               return;
+
+       if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
+               return;
+
+       /* depending on when we hit vblank, we may be close to active; if so,
+        * wait for another frame.
+        */
+       while (dce_v11_0_is_in_vblank(adev, crtc)) {
+               if (i++ % 100 == 0) {
+                       if (!dce_v11_0_is_counter_moving(adev, crtc))
+                               break;
+               }
+       }
+
+       while (!dce_v11_0_is_in_vblank(adev, crtc)) {
+               if (i++ % 100 == 0) {
+                       if (!dce_v11_0_is_counter_moving(adev, crtc))
+                               break;
+               }
+       }
+}
+
+static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
+{
+       if (crtc >= adev->mode_info.num_crtc)
+               return 0;
+       else
+               return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
+}
+
+/**
+ * dce_v11_0_page_flip - pageflip callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @crtc_id: crtc to cleanup pageflip on
+ * @crtc_base: new address of the crtc (GPU MC address)
+ *
+ * Does the actual pageflip (evergreen+).
+ * During vblank we take the crtc lock and wait for the update_pending
+ * bit to go high, when it does, we release the lock, and allow the
+ * double buffered update to take place.
+ * Returns the current update pending status.
+ */
+static void dce_v11_0_page_flip(struct amdgpu_device *adev,
+                             int crtc_id, u64 crtc_base)
+{
+       struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+       u32 tmp = RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset);
+       int i;
+
+       /* Lock the graphics update lock */
+       tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
+       WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp);
+
+       /* update the scanout addresses */
+       WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+              upper_32_bits(crtc_base));
+       WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+              lower_32_bits(crtc_base));
+
+       WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+              upper_32_bits(crtc_base));
+       WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+              lower_32_bits(crtc_base));
+
+       /* Wait for update_pending to go high. */
+       for (i = 0; i < adev->usec_timeout; i++) {
+               if (RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset) &
+                               GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK)
+                       break;
+               udelay(1);
+       }
+       DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+       /* Unlock the lock, so double-buffering can take place inside vblank */
+       tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
+       WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp);  
+}
+
+static int dce_v11_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
+                                       u32 *vbl, u32 *position)
+{
+       if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
+               return -EINVAL;
+
+       *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
+       *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+       return 0;
+}
+
+/**
+ * dce_v11_0_hpd_sense - hpd sense callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Checks if a digital monitor is connected (evergreen+).
+ * Returns true if connected, false if not connected.
+ */
+static bool dce_v11_0_hpd_sense(struct amdgpu_device *adev,
+                              enum amdgpu_hpd_id hpd)
+{
+       int idx;
+       bool connected = false;
+
+       switch (hpd) {
+       case AMDGPU_HPD_1:
+               idx = 0;
+               break;
+       case AMDGPU_HPD_2:
+               idx = 1;
+               break;
+       case AMDGPU_HPD_3:
+               idx = 2;
+               break;
+       case AMDGPU_HPD_4:
+               idx = 3;
+               break;
+       case AMDGPU_HPD_5:
+               idx = 4;
+               break;
+       case AMDGPU_HPD_6:
+               idx = 5;
+               break;
+       default:
+               return connected;
+       }
+
+       if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[idx]) &
+           DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
+               connected = true;
+
+       return connected;
+}
+
+/**
+ * dce_v11_0_hpd_set_polarity - hpd set polarity callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Set the polarity of the hpd pin (evergreen+).
+ */
+static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev,
+                                     enum amdgpu_hpd_id hpd)
+{
+       u32 tmp;
+       bool connected = dce_v11_0_hpd_sense(adev, hpd);
+       int idx;
+
+       switch (hpd) {
+       case AMDGPU_HPD_1:
+               idx = 0;
+               break;
+       case AMDGPU_HPD_2:
+               idx = 1;
+               break;
+       case AMDGPU_HPD_3:
+               idx = 2;
+               break;
+       case AMDGPU_HPD_4:
+               idx = 3;
+               break;
+       case AMDGPU_HPD_5:
+               idx = 4;
+               break;
+       case AMDGPU_HPD_6:
+               idx = 5;
+               break;
+       default:
+               return;
+       }
+
+       tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+       if (connected)
+               tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
+       else
+               tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
+       WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+}
+
+/**
+ * dce_v11_0_hpd_init - hpd setup callback.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup the hpd pins used by the card (evergreen+).
+ * Enable the pin, set the polarity, and enable the hpd interrupts.
+ */
+static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
+{
+       struct drm_device *dev = adev->ddev;
+       struct drm_connector *connector;
+       u32 tmp;
+       int idx;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+               if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+                   connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+                       /* don't try to enable hpd on eDP or LVDS avoid breaking the
+                        * aux dp channel on imac and help (but not completely fix)
+                        * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+                        * also avoid interrupt storms during dpms.
+                        */
+                       continue;
+               }
+
+               switch (amdgpu_connector->hpd.hpd) {
+               case AMDGPU_HPD_1:
+                       idx = 0;
+                       break;
+               case AMDGPU_HPD_2:
+                       idx = 1;
+                       break;
+               case AMDGPU_HPD_3:
+                       idx = 2;
+                       break;
+               case AMDGPU_HPD_4:
+                       idx = 3;
+                       break;
+               case AMDGPU_HPD_5:
+                       idx = 4;
+                       break;
+               case AMDGPU_HPD_6:
+                       idx = 5;
+                       break;
+               default:
+                       continue;
+               }
+
+               tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
+               tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
+               WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
+
+               tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx]);
+               tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
+                                   DC_HPD_CONNECT_INT_DELAY,
+                                   AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
+               tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
+                                   DC_HPD_DISCONNECT_INT_DELAY,
+                                   AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
+               WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx], tmp);
+
+               dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
+               amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
+       }
+}
+
+/**
+ * dce_v11_0_hpd_fini - hpd tear down callback.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tear down the hpd pins used by the card (evergreen+).
+ * Disable the hpd interrupts.
+ */
+static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
+{
+       struct drm_device *dev = adev->ddev;
+       struct drm_connector *connector;
+       u32 tmp;
+       int idx;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+               switch (amdgpu_connector->hpd.hpd) {
+               case AMDGPU_HPD_1:
+                       idx = 0;
+                       break;
+               case AMDGPU_HPD_2:
+                       idx = 1;
+                       break;
+               case AMDGPU_HPD_3:
+                       idx = 2;
+                       break;
+               case AMDGPU_HPD_4:
+                       idx = 3;
+                       break;
+               case AMDGPU_HPD_5:
+                       idx = 4;
+                       break;
+               case AMDGPU_HPD_6:
+                       idx = 5;
+                       break;
+               default:
+                       continue;
+               }
+
+               tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
+               tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
+               WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
+
+               amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
+       }
+}
+
+static u32 dce_v11_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
+{
+       return mmDC_GPIO_HPD_A;
+}
+
+static bool dce_v11_0_is_display_hung(struct amdgpu_device *adev)
+{
+       u32 crtc_hung = 0;
+       u32 crtc_status[6];
+       u32 i, j, tmp;
+
+       for (i = 0; i < adev->mode_info.num_crtc; i++) {
+               tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
+               if (REG_GET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN)) {
+                       crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+                       crtc_hung |= (1 << i);
+               }
+       }
+
+       for (j = 0; j < 10; j++) {
+               for (i = 0; i < adev->mode_info.num_crtc; i++) {
+                       if (crtc_hung & (1 << i)) {
+                               tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+                               if (tmp != crtc_status[i])
+                                       crtc_hung &= ~(1 << i);
+                       }
+               }
+               if (crtc_hung == 0)
+                       return false;
+               udelay(100);
+       }
+
+       return true;
+}
+
+static void dce_v11_0_stop_mc_access(struct amdgpu_device *adev,
+                                    struct amdgpu_mode_mc_save *save)
+{
+       u32 crtc_enabled, tmp;
+       int i;
+
+       save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
+       save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
+
+       /* disable VGA render */
+       tmp = RREG32(mmVGA_RENDER_CONTROL);
+       tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
+       WREG32(mmVGA_RENDER_CONTROL, tmp);
+
+       /* blank the display controllers */
+       for (i = 0; i < adev->mode_info.num_crtc; i++) {
+               crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
+                                            CRTC_CONTROL, CRTC_MASTER_EN);
+               if (crtc_enabled) {
+#if 0
+                       u32 frame_count;
+                       int j;
+
+                       save->crtc_enabled[i] = true;
+                       tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
+                       if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
+                               amdgpu_display_vblank_wait(adev, i);
+                               WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+                               tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
+                               WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+                               WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+                       }
+                       /* wait for the next frame */
+                       frame_count = amdgpu_display_vblank_get_counter(adev, i);
+                       for (j = 0; j < adev->usec_timeout; j++) {
+                               if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
+                                       break;
+                               udelay(1);
+                       }
+                       tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
+                       if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) {
+                               tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
+                               WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
+                       }
+                       tmp = RREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+                       if (REG_GET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) {
+                               tmp = REG_SET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1);
+                               WREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+                       }
+#else
+                       /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
+                       WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+                       tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
+                       tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
+                       WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
+                       WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+                       save->crtc_enabled[i] = false;
+                       /* ***** */
+#endif
+               } else {
+                       save->crtc_enabled[i] = false;
+               }
+       }
+}
+
+static void dce_v11_0_resume_mc_access(struct amdgpu_device *adev,
+                                      struct amdgpu_mode_mc_save *save)
+{
+       u32 tmp, frame_count;
+       int i, j;
+
+       /* update crtc base addresses */
+       for (i = 0; i < adev->mode_info.num_crtc; i++) {
+               WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+                      upper_32_bits(adev->mc.vram_start));
+               WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+                      upper_32_bits(adev->mc.vram_start));
+               WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
+                      (u32)adev->mc.vram_start);
+               WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
+                      (u32)adev->mc.vram_start);
+
+               if (save->crtc_enabled[i]) {
+                       tmp = RREG32(mmCRTC_MASTER_UPDATE_MODE + crtc_offsets[i]);
+                       if (REG_GET_FIELD(tmp, CRTC_MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) {
+                               tmp = REG_SET_FIELD(tmp, CRTC_MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3);
+                               WREG32(mmCRTC_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
+                       }
+                       tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
+                       if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) {
+                               tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
+                               WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
+                       }
+                       tmp = RREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+                       if (REG_GET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) {
+                               tmp = REG_SET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0);
+                               WREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+                       }
+                       for (j = 0; j < adev->usec_timeout; j++) {
+                               tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
+                               if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0)
+                                       break;
+                               udelay(1);
+                       }
+                       tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
+                       tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
+                       WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+                       WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+                       WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+                       /* wait for the next frame */
+                       frame_count = amdgpu_display_vblank_get_counter(adev, i);
+                       for (j = 0; j < adev->usec_timeout; j++) {
+                               if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
+                                       break;
+                               udelay(1);
+                       }
+               }
+       }
+
+       WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
+       WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start));
+
+       /* Unlock vga access */
+       WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
+       mdelay(1);
+       WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
+}
+
+static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev,
+                                          bool render)
+{
+       u32 tmp;
+
+       /* Lockout access through VGA aperture*/
+       tmp = RREG32(mmVGA_HDP_CONTROL);
+       if (render)
+               tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
+       else
+               tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
+       WREG32(mmVGA_HDP_CONTROL, tmp);
+
+       /* disable VGA render */
+       tmp = RREG32(mmVGA_RENDER_CONTROL);
+       if (render)
+               tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
+       else
+               tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
+       WREG32(mmVGA_RENDER_CONTROL, tmp);
+}
+
+static void dce_v11_0_program_fmt(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
+       struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
+       int bpc = 0;
+       u32 tmp = 0;
+       enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
+
+       if (connector) {
+               struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+               bpc = amdgpu_connector_get_monitor_bpc(connector);
+               dither = amdgpu_connector->dither;
+       }
+
+       /* LVDS/eDP FMT is set up by atom */
+       if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+               return;
+
+       /* not needed for analog */
+       if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
+           (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
+               return;
+
+       if (bpc == 0)
+               return;
+
+       switch (bpc) {
+       case 6:
+               if (dither == AMDGPU_FMT_DITHER_ENABLE) {
+                       /* XXX sort out optimal dither settings */
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 0);
+               } else {
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 0);
+               }
+               break;
+       case 8:
+               if (dither == AMDGPU_FMT_DITHER_ENABLE) {
+                       /* XXX sort out optimal dither settings */
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 1);
+               } else {
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 1);
+               }
+               break;
+       case 10:
+               if (dither == AMDGPU_FMT_DITHER_ENABLE) {
+                       /* XXX sort out optimal dither settings */
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 2);
+               } else {
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
+                       tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 2);
+               }
+               break;
+       default:
+               /* not needed */
+               break;
+       }
+
+       WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+}
+
+
+/* display watermark setup */
+/**
+ * dce_v11_0_line_buffer_adjust - Set up the line buffer
+ *
+ * @adev: amdgpu_device pointer
+ * @amdgpu_crtc: the selected display controller
+ * @mode: the current display mode on the selected display
+ * controller
+ *
+ * Setup up the line buffer allocation for
+ * the selected display controller (CIK).
+ * Returns the line buffer size in pixels.
+ */
+static u32 dce_v11_0_line_buffer_adjust(struct amdgpu_device *adev,
+                                      struct amdgpu_crtc *amdgpu_crtc,
+                                      struct drm_display_mode *mode)
+{
+       u32 tmp, buffer_alloc, i, mem_cfg;
+       u32 pipe_offset = amdgpu_crtc->crtc_id;
+       /*
+        * Line Buffer Setup
+        * There are 6 line buffers, one for each display controllers.
+        * There are 3 partitions per LB. Select the number of partitions
+        * to enable based on the display width.  For display widths larger
+        * than 4096, you need use to use 2 display controllers and combine
+        * them using the stereo blender.
+        */
+       if (amdgpu_crtc->base.enabled && mode) {
+               if (mode->crtc_hdisplay < 1920) {
+                       mem_cfg = 1;
+                       buffer_alloc = 2;
+               } else if (mode->crtc_hdisplay < 2560) {
+                       mem_cfg = 2;
+                       buffer_alloc = 2;
+               } else if (mode->crtc_hdisplay < 4096) {
+                       mem_cfg = 0;
+                       buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4;
+               } else {
+                       DRM_DEBUG_KMS("Mode too big for LB!\n");
+                       mem_cfg = 0;
+                       buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4;
+               }
+       } else {
+               mem_cfg = 1;
+               buffer_alloc = 0;
+       }
+
+       tmp = RREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset);
+       tmp = REG_SET_FIELD(tmp, LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mem_cfg);
+       WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, tmp);
+
+       tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
+       tmp = REG_SET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, buffer_alloc);
+       WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, tmp);
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
+               if (REG_GET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED))
+                       break;
+               udelay(1);
+       }
+
+       if (amdgpu_crtc->base.enabled && mode) {
+               switch (mem_cfg) {
+               case 0:
+               default:
+                       return 4096 * 2;
+               case 1:
+                       return 1920 * 2;
+               case 2:
+                       return 2560 * 2;
+               }
+       }
+
+       /* controller not enabled, so no lb used */
+       return 0;
+}
+
+/**
+ * cik_get_number_of_dram_channels - get the number of dram channels
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Look up the number of video ram channels (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the number of dram channels
+ */
+static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
+{
+       u32 tmp = RREG32(mmMC_SHARED_CHMAP);
+
+       switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
+       case 0:
+       default:
+               return 1;
+       case 1:
+               return 2;
+       case 2:
+               return 4;
+       case 3:
+               return 8;
+       case 4:
+               return 3;
+       case 5:
+               return 6;
+       case 6:
+               return 10;
+       case 7:
+               return 12;
+       case 8:
+               return 16;
+       }
+}
+
+struct dce10_wm_params {
+       u32 dram_channels; /* number of dram channels */
+       u32 yclk;          /* bandwidth per dram data pin in kHz */
+       u32 sclk;          /* engine clock in kHz */
+       u32 disp_clk;      /* display clock in kHz */
+       u32 src_width;     /* viewport width */
+       u32 active_time;   /* active display time in ns */
+       u32 blank_time;    /* blank time in ns */
+       bool interlaced;    /* mode is interlaced */
+       fixed20_12 vsc;    /* vertical scale ratio */
+       u32 num_heads;     /* number of active crtcs */
+       u32 bytes_per_pixel; /* bytes per pixel display + overlay */
+       u32 lb_size;       /* line buffer allocated to pipe */
+       u32 vtaps;         /* vertical scaler taps */
+};
+
+/**
+ * dce_v11_0_dram_bandwidth - get the dram bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the raw dram bandwidth (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the dram bandwidth in MBytes/s
+ */
+static u32 dce_v11_0_dram_bandwidth(struct dce10_wm_params *wm)
+{
+       /* Calculate raw DRAM Bandwidth */
+       fixed20_12 dram_efficiency; /* 0.7 */
+       fixed20_12 yclk, dram_channels, bandwidth;
+       fixed20_12 a;
+
+       a.full = dfixed_const(1000);
+       yclk.full = dfixed_const(wm->yclk);
+       yclk.full = dfixed_div(yclk, a);
+       dram_channels.full = dfixed_const(wm->dram_channels * 4);
+       a.full = dfixed_const(10);
+       dram_efficiency.full = dfixed_const(7);
+       dram_efficiency.full = dfixed_div(dram_efficiency, a);
+       bandwidth.full = dfixed_mul(dram_channels, yclk);
+       bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
+
+       return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v11_0_dram_bandwidth_for_display - get the dram bandwidth for display
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the dram bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the dram bandwidth for display in MBytes/s
+ */
+static u32 dce_v11_0_dram_bandwidth_for_display(struct dce10_wm_params *wm)
+{
+       /* Calculate DRAM Bandwidth and the part allocated to display. */
+       fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
+       fixed20_12 yclk, dram_channels, bandwidth;
+       fixed20_12 a;
+
+       a.full = dfixed_const(1000);
+       yclk.full = dfixed_const(wm->yclk);
+       yclk.full = dfixed_div(yclk, a);
+       dram_channels.full = dfixed_const(wm->dram_channels * 4);
+       a.full = dfixed_const(10);
+       disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
+       disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
+       bandwidth.full = dfixed_mul(dram_channels, yclk);
+       bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
+
+       return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v11_0_data_return_bandwidth - get the data return bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the data return bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the data return bandwidth in MBytes/s
+ */
+static u32 dce_v11_0_data_return_bandwidth(struct dce10_wm_params *wm)
+{
+       /* Calculate the display Data return Bandwidth */
+       fixed20_12 return_efficiency; /* 0.8 */
+       fixed20_12 sclk, bandwidth;
+       fixed20_12 a;
+
+       a.full = dfixed_const(1000);
+       sclk.full = dfixed_const(wm->sclk);
+       sclk.full = dfixed_div(sclk, a);
+       a.full = dfixed_const(10);
+       return_efficiency.full = dfixed_const(8);
+       return_efficiency.full = dfixed_div(return_efficiency, a);
+       a.full = dfixed_const(32);
+       bandwidth.full = dfixed_mul(a, sclk);
+       bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
+
+       return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v11_0_dmif_request_bandwidth - get the dmif bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the dmif bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the dmif bandwidth in MBytes/s
+ */
+static u32 dce_v11_0_dmif_request_bandwidth(struct dce10_wm_params *wm)
+{
+       /* Calculate the DMIF Request Bandwidth */
+       fixed20_12 disp_clk_request_efficiency; /* 0.8 */
+       fixed20_12 disp_clk, bandwidth;
+       fixed20_12 a, b;
+
+       a.full = dfixed_const(1000);
+       disp_clk.full = dfixed_const(wm->disp_clk);
+       disp_clk.full = dfixed_div(disp_clk, a);
+       a.full = dfixed_const(32);
+       b.full = dfixed_mul(a, disp_clk);
+
+       a.full = dfixed_const(10);
+       disp_clk_request_efficiency.full = dfixed_const(8);
+       disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
+
+       bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
+
+       return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v11_0_available_bandwidth - get the min available bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the min available bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the min available bandwidth in MBytes/s
+ */
+static u32 dce_v11_0_available_bandwidth(struct dce10_wm_params *wm)
+{
+       /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
+       u32 dram_bandwidth = dce_v11_0_dram_bandwidth(wm);
+       u32 data_return_bandwidth = dce_v11_0_data_return_bandwidth(wm);
+       u32 dmif_req_bandwidth = dce_v11_0_dmif_request_bandwidth(wm);
+
+       return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
+}
+
+/**
+ * dce_v11_0_average_bandwidth - get the average available bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the average available bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the average available bandwidth in MBytes/s
+ */
+static u32 dce_v11_0_average_bandwidth(struct dce10_wm_params *wm)
+{
+       /* Calculate the display mode Average Bandwidth
+        * DisplayMode should contain the source and destination dimensions,
+        * timing, etc.
+        */
+       fixed20_12 bpp;
+       fixed20_12 line_time;
+       fixed20_12 src_width;
+       fixed20_12 bandwidth;
+       fixed20_12 a;
+
+       a.full = dfixed_const(1000);
+       line_time.full = dfixed_const(wm->active_time + wm->blank_time);
+       line_time.full = dfixed_div(line_time, a);
+       bpp.full = dfixed_const(wm->bytes_per_pixel);
+       src_width.full = dfixed_const(wm->src_width);
+       bandwidth.full = dfixed_mul(src_width, bpp);
+       bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
+       bandwidth.full = dfixed_div(bandwidth, line_time);
+
+       return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v11_0_latency_watermark - get the latency watermark
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the latency watermark (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the latency watermark in ns
+ */
+static u32 dce_v11_0_latency_watermark(struct dce10_wm_params *wm)
+{
+       /* First calculate the latency in ns */
+       u32 mc_latency = 2000; /* 2000 ns. */
+       u32 available_bandwidth = dce_v11_0_available_bandwidth(wm);
+       u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
+       u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
+       u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
+       u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
+               (wm->num_heads * cursor_line_pair_return_time);
+       u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
+       u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
+       u32 tmp, dmif_size = 12288;
+       fixed20_12 a, b, c;
+
+       if (wm->num_heads == 0)
+               return 0;
+
+       a.full = dfixed_const(2);
+       b.full = dfixed_const(1);
+       if ((wm->vsc.full > a.full) ||
+           ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
+           (wm->vtaps >= 5) ||
+           ((wm->vsc.full >= a.full) && wm->interlaced))
+               max_src_lines_per_dst_line = 4;
+       else
+               max_src_lines_per_dst_line = 2;
+
+       a.full = dfixed_const(available_bandwidth);
+       b.full = dfixed_const(wm->num_heads);
+       a.full = dfixed_div(a, b);
+
+       b.full = dfixed_const(mc_latency + 512);
+       c.full = dfixed_const(wm->disp_clk);
+       b.full = dfixed_div(b, c);
+
+       c.full = dfixed_const(dmif_size);
+       b.full = dfixed_div(c, b);
+
+       tmp = min(dfixed_trunc(a), dfixed_trunc(b));
+
+       b.full = dfixed_const(1000);
+       c.full = dfixed_const(wm->disp_clk);
+       b.full = dfixed_div(c, b);
+       c.full = dfixed_const(wm->bytes_per_pixel);
+       b.full = dfixed_mul(b, c);
+
+       lb_fill_bw = min(tmp, dfixed_trunc(b));
+
+       a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
+       b.full = dfixed_const(1000);
+       c.full = dfixed_const(lb_fill_bw);
+       b.full = dfixed_div(c, b);
+       a.full = dfixed_div(a, b);
+       line_fill_time = dfixed_trunc(a);
+
+       if (line_fill_time < wm->active_time)
+               return latency;
+       else
+               return latency + (line_fill_time - wm->active_time);
+
+}
+
+/**
+ * dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display - check
+ * average and available dram bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Check if the display average bandwidth fits in the display
+ * dram bandwidth (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns true if the display fits, false if not.
+ */
+static bool dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce10_wm_params *wm)
+{
+       if (dce_v11_0_average_bandwidth(wm) <=
+           (dce_v11_0_dram_bandwidth_for_display(wm) / wm->num_heads))
+               return true;
+       else
+               return false;
+}
+
+/**
+ * dce_v11_0_average_bandwidth_vs_available_bandwidth - check
+ * average and available bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Check if the display average bandwidth fits in the display
+ * available bandwidth (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns true if the display fits, false if not.
+ */
+static bool dce_v11_0_average_bandwidth_vs_available_bandwidth(struct dce10_wm_params *wm)
+{
+       if (dce_v11_0_average_bandwidth(wm) <=
+           (dce_v11_0_available_bandwidth(wm) / wm->num_heads))
+               return true;
+       else
+               return false;
+}
+
+/**
+ * dce_v11_0_check_latency_hiding - check latency hiding
+ *
+ * @wm: watermark calculation data
+ *
+ * Check latency hiding (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns true if the display fits, false if not.
+ */
+static bool dce_v11_0_check_latency_hiding(struct dce10_wm_params *wm)
+{
+       u32 lb_partitions = wm->lb_size / wm->src_width;
+       u32 line_time = wm->active_time + wm->blank_time;
+       u32 latency_tolerant_lines;
+       u32 latency_hiding;
+       fixed20_12 a;
+
+       a.full = dfixed_const(1);
+       if (wm->vsc.full > a.full)
+               latency_tolerant_lines = 1;
+       else {
+               if (lb_partitions <= (wm->vtaps + 1))
+                       latency_tolerant_lines = 1;
+               else
+                       latency_tolerant_lines = 2;
+       }
+
+       latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
+
+       if (dce_v11_0_latency_watermark(wm) <= latency_hiding)
+               return true;
+       else
+               return false;
+}
+
+/**
+ * dce_v11_0_program_watermarks - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ * @amdgpu_crtc: the selected display controller
+ * @lb_size: line buffer size
+ * @num_heads: number of display controllers in use
+ *
+ * Calculate and program the display watermarks for the
+ * selected display controller (CIK).
+ */
+static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
+                                       struct amdgpu_crtc *amdgpu_crtc,
+                                       u32 lb_size, u32 num_heads)
+{
+       struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
+       struct dce10_wm_params wm_low, wm_high;
+       u32 pixel_period;
+       u32 line_time = 0;
+       u32 latency_watermark_a = 0, latency_watermark_b = 0;
+       u32 tmp, wm_mask;
+
+       if (amdgpu_crtc->base.enabled && num_heads && mode) {
+               pixel_period = 1000000 / (u32)mode->clock;
+               line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+
+               /* watermark for high clocks */
+               if (adev->pm.dpm_enabled) {
+                       wm_high.yclk =
+                               amdgpu_dpm_get_mclk(adev, false) * 10;
+                       wm_high.sclk =
+                               amdgpu_dpm_get_sclk(adev, false) * 10;
+               } else {
+                       wm_high.yclk = adev->pm.current_mclk * 10;
+                       wm_high.sclk = adev->pm.current_sclk * 10;
+               }
+
+               wm_high.disp_clk = mode->clock;
+               wm_high.src_width = mode->crtc_hdisplay;
+               wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+               wm_high.blank_time = line_time - wm_high.active_time;
+               wm_high.interlaced = false;
+               if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+                       wm_high.interlaced = true;
+               wm_high.vsc = amdgpu_crtc->vsc;
+               wm_high.vtaps = 1;
+               if (amdgpu_crtc->rmx_type != RMX_OFF)
+                       wm_high.vtaps = 2;
+               wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
+               wm_high.lb_size = lb_size;
+               wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
+               wm_high.num_heads = num_heads;
+
+               /* set for high clocks */
+               latency_watermark_a = min(dce_v11_0_latency_watermark(&wm_high), (u32)65535);
+
+               /* possibly force display priority to high */
+               /* should really do this at mode validation time... */
+               if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
+                   !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
+                   !dce_v11_0_check_latency_hiding(&wm_high) ||
+                   (adev->mode_info.disp_priority == 2)) {
+                       DRM_DEBUG_KMS("force priority to high\n");
+               }
+
+               /* watermark for low clocks */
+               if (adev->pm.dpm_enabled) {
+                       wm_low.yclk =
+                               amdgpu_dpm_get_mclk(adev, true) * 10;
+                       wm_low.sclk =
+                               amdgpu_dpm_get_sclk(adev, true) * 10;
+               } else {
+                       wm_low.yclk = adev->pm.current_mclk * 10;
+                       wm_low.sclk = adev->pm.current_sclk * 10;
+               }
+
+               wm_low.disp_clk = mode->clock;
+               wm_low.src_width = mode->crtc_hdisplay;
+               wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+               wm_low.blank_time = line_time - wm_low.active_time;
+               wm_low.interlaced = false;
+               if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+                       wm_low.interlaced = true;
+               wm_low.vsc = amdgpu_crtc->vsc;
+               wm_low.vtaps = 1;
+               if (amdgpu_crtc->rmx_type != RMX_OFF)
+                       wm_low.vtaps = 2;
+               wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
+               wm_low.lb_size = lb_size;
+               wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
+               wm_low.num_heads = num_heads;
+
+               /* set for low clocks */
+               latency_watermark_b = min(dce_v11_0_latency_watermark(&wm_low), (u32)65535);
+
+               /* possibly force display priority to high */
+               /* should really do this at mode validation time... */
+               if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
+                   !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
+                   !dce_v11_0_check_latency_hiding(&wm_low) ||
+                   (adev->mode_info.disp_priority == 2)) {
+                       DRM_DEBUG_KMS("force priority to high\n");
+               }
+       }
+
+       /* select wm A */
+       wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 1);
+       WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+       tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
+       tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
+       WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+       /* select wm B */
+       tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2);
+       WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+       tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
+       tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
+       WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+       /* restore original selection */
+       WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
+
+       /* save values for DPM */
+       amdgpu_crtc->line_time = line_time;
+       amdgpu_crtc->wm_high = latency_watermark_a;
+       amdgpu_crtc->wm_low = latency_watermark_b;
+}
+
+/**
+ * dce_v11_0_bandwidth_update - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Calculate and program the display watermarks and line
+ * buffer allocation (CIK).
+ */
+static void dce_v11_0_bandwidth_update(struct amdgpu_device *adev)
+{
+       struct drm_display_mode *mode = NULL;
+       u32 num_heads = 0, lb_size;
+       int i;
+
+       amdgpu_update_display_priority(adev);
+
+       for (i = 0; i < adev->mode_info.num_crtc; i++) {
+               if (adev->mode_info.crtcs[i]->base.enabled)
+                       num_heads++;
+       }
+       for (i = 0; i < adev->mode_info.num_crtc; i++) {
+               mode = &adev->mode_info.crtcs[i]->base.mode;
+               lb_size = dce_v11_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
+               dce_v11_0_program_watermarks(adev, adev->mode_info.crtcs[i],
+                                           lb_size, num_heads);
+       }
+}
+
+static void dce_v11_0_audio_get_connected_pins(struct amdgpu_device *adev)
+{
+       int i;
+       u32 offset, tmp;
+
+       for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+               offset = adev->mode_info.audio.pin[i].offset;
+               tmp = RREG32_AUDIO_ENDPT(offset,
+                                        ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
+               if (((tmp &
+               AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
+               AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
+                       adev->mode_info.audio.pin[i].connected = false;
+               else
+                       adev->mode_info.audio.pin[i].connected = true;
+       }
+}
+
+static struct amdgpu_audio_pin *dce_v11_0_audio_get_pin(struct amdgpu_device *adev)
+{
+       int i;
+
+       dce_v11_0_audio_get_connected_pins(adev);
+
+       for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+               if (adev->mode_info.audio.pin[i].connected)
+                       return &adev->mode_info.audio.pin[i];
+       }
+       DRM_ERROR("No connected audio pins found!\n");
+       return NULL;
+}
+
+static void dce_v11_0_afmt_audio_select_pin(struct drm_encoder *encoder)
+{
+       struct amdgpu_device *adev = encoder->dev->dev_private;
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+       u32 tmp;
+
+       if (!dig || !dig->afmt || !dig->afmt->pin)
+               return;
+
+       tmp = RREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset);
+       tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, dig->afmt->pin->id);
+       WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, tmp);
+}
+
+static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
+                                               struct drm_display_mode *mode)
+{
+       struct amdgpu_device *adev = encoder->dev->dev_private;
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+       struct drm_connector *connector;
+       struct amdgpu_connector *amdgpu_connector = NULL;
+       u32 tmp;
+       int interlace = 0;
+
+       if (!dig || !dig->afmt || !dig->afmt->pin)
+               return;
+
+       list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+               if (connector->encoder == encoder) {
+                       amdgpu_connector = to_amdgpu_connector(connector);
+                       break;
+               }
+       }
+
+       if (!amdgpu_connector) {
+               DRM_ERROR("Couldn't find encoder's connector\n");
+               return;
+       }
+
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+               interlace = 1;
+       if (connector->latency_present[interlace]) {
+               tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
+                                   VIDEO_LIPSYNC, connector->video_latency[interlace]);
+               tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
+                                   AUDIO_LIPSYNC, connector->audio_latency[interlace]);
+       } else {
+               tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
+                                   VIDEO_LIPSYNC, 0);
+               tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
+                                   AUDIO_LIPSYNC, 0);
+       }
+       WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
+                          ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
+}
+
+static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
+{
+       struct amdgpu_device *adev = encoder->dev->dev_private;
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+       struct drm_connector *connector;
+       struct amdgpu_connector *amdgpu_connector = NULL;
+       u32 tmp;
+       u8 *sadb = NULL;
+       int sad_count;
+
+       if (!dig || !dig->afmt || !dig->afmt->pin)
+               return;
+
+       list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+               if (connector->encoder == encoder) {
+                       amdgpu_connector = to_amdgpu_connector(connector);
+                       break;
+               }
+       }
+
+       if (!amdgpu_connector) {
+               DRM_ERROR("Couldn't find encoder's connector\n");
+               return;
+       }
+
+       sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
+       if (sad_count < 0) {
+               DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
+               sad_count = 0;
+       }
+
+       /* program the speaker allocation */
+       tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
+                                ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
+       tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+                           DP_CONNECTION, 0);
+       /* set HDMI mode */
+       tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+                           HDMI_CONNECTION, 1);
+       if (sad_count)
+               tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+                                   SPEAKER_ALLOCATION, sadb[0]);
+       else
+               tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+                                   SPEAKER_ALLOCATION, 5); /* stereo */
+       WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
+                          ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
+
+       kfree(sadb);
+}
+
+static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
+{
+       struct amdgpu_device *adev = encoder->dev->dev_private;
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+       struct drm_connector *connector;
+       struct amdgpu_connector *amdgpu_connector = NULL;
+       struct cea_sad *sads;
+       int i, sad_count;
+
+       static const u16 eld_reg_to_type[][2] = {
+               { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
+               { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
+               { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
+               { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
+               { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
+               { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
+               { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
+               { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
+               { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
+               { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
+               { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
+               { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
+       };
+
+       if (!dig || !dig->afmt || !dig->afmt->pin)
+               return;
+
+       list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+               if (connector->encoder == encoder) {
+                       amdgpu_connector = to_amdgpu_connector(connector);
+                       break;
+               }
+       }
+
+       if (!amdgpu_connector) {
+               DRM_ERROR("Couldn't find encoder's connector\n");
+               return;
+       }
+
+       sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
+       if (sad_count <= 0) {
+               DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+               return;
+       }
+       BUG_ON(!sads);
+
+       for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
+               u32 tmp = 0;
+               u8 stereo_freqs = 0;
+               int max_channels = -1;
+               int j;
+
+               for (j = 0; j < sad_count; j++) {
+                       struct cea_sad *sad = &sads[j];
+
+                       if (sad->format == eld_reg_to_type[i][1]) {
+                               if (sad->channels > max_channels) {
+                                       tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
+                                                           MAX_CHANNELS, sad->channels);
+                                       tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
+                                                           DESCRIPTOR_BYTE_2, sad->byte2);
+                                       tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
+                                                           SUPPORTED_FREQUENCIES, sad->freq);
+                                       max_channels = sad->channels;
+                               }
+
+                               if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
+                                       stereo_freqs |= sad->freq;
+                               else
+                                       break;
+                       }
+               }
+
+               tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
+                                   SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
+               WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
+       }
+
+       kfree(sads);
+}
+
+static void dce_v11_0_audio_enable(struct amdgpu_device *adev,
+                                 struct amdgpu_audio_pin *pin,
+                                 bool enable)
+{
+       if (!pin)
+               return;
+
+       WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+                          enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
+}
+
+static const u32 pin_offsets[] =
+{
+       AUD0_REGISTER_OFFSET,
+       AUD1_REGISTER_OFFSET,
+       AUD2_REGISTER_OFFSET,
+       AUD3_REGISTER_OFFSET,
+       AUD4_REGISTER_OFFSET,
+       AUD5_REGISTER_OFFSET,
+       AUD6_REGISTER_OFFSET,
+};
+
+static int dce_v11_0_audio_init(struct amdgpu_device *adev)
+{
+       int i;
+
+       if (!amdgpu_audio)
+               return 0;
+
+       adev->mode_info.audio.enabled = true;
+
+       adev->mode_info.audio.num_pins = 7;
+
+       for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+               adev->mode_info.audio.pin[i].channels = -1;
+               adev->mode_info.audio.pin[i].rate = -1;
+               adev->mode_info.audio.pin[i].bits_per_sample = -1;
+               adev->mode_info.audio.pin[i].status_bits = 0;
+               adev->mode_info.audio.pin[i].category_code = 0;
+               adev->mode_info.audio.pin[i].connected = false;
+               adev->mode_info.audio.pin[i].offset = pin_offsets[i];
+               adev->mode_info.audio.pin[i].id = i;
+               /* disable audio.  it will be set up later */
+               /* XXX remove once we switch to ip funcs */
+               dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
+       }
+
+       return 0;
+}
+
+static void dce_v11_0_audio_fini(struct amdgpu_device *adev)
+{
+       int i;
+
+       if (!adev->mode_info.audio.enabled)
+               return;
+
+       for (i = 0; i < adev->mode_info.audio.num_pins; i++)
+               dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
+
+       adev->mode_info.audio.enabled = false;
+}
+
+/*
+ * update the N and CTS parameters for a given pixel clock rate
+ */
+static void dce_v11_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
+{
+       struct drm_device *dev = encoder->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+       u32 tmp;
+
+       tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
+       tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
+       WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
+       tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
+       tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
+       WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
+
+       tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
+       tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
+       WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
+       tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
+       tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
+       WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
+
+       tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
+       tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
+       WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
+       tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
+       tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
+       WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
+
+}
+
+/*
+ * build a HDMI Video Info Frame
+ */
+static void dce_v11_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
+                                              void *buffer, size_t size)
+{
+       struct drm_device *dev = encoder->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+       uint8_t *frame = buffer + 3;
+       uint8_t *header = buffer;
+
+       WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
+               frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
+       WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
+               frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
+       WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
+               frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
+       WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
+               frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
+}
+
+static void dce_v11_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
+{
+       struct drm_device *dev = encoder->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
+       u32 dto_phase = 24 * 1000;
+       u32 dto_modulo = clock;
+       u32 tmp;
+
+       if (!dig || !dig->afmt)
+               return;
+
+       /* XXX two dtos; generally use dto0 for hdmi */
+       /* Express [24MHz / target pixel clock] as an exact rational
+        * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
+        * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+        */
+       tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
+       tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL,
+                           amdgpu_crtc->crtc_id);
+       WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
+       WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
+       WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
+}
+
+/*
+ * update the info frames with the data from the current display mode
+ */
+static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder,
+                                 struct drm_display_mode *mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+       struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
+       u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
+       struct hdmi_avi_infoframe frame;
+       ssize_t err;
+       u32 tmp;
+       int bpc = 8;
+
+       if (!dig || !dig->afmt)
+               return;
+
+       /* Silent, r600_hdmi_enable will raise WARN for us */
+       if (!dig->afmt->enabled)
+               return;
+
+       /* hdmi deep color mode general control packets setup, if bpc > 8 */
+       if (encoder->crtc) {
+               struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
+               bpc = amdgpu_crtc->bpc;
+       }
+
+       /* disable audio prior to setting up hw */
+       dig->afmt->pin = dce_v11_0_audio_get_pin(adev);
+       dce_v11_0_audio_enable(adev, dig->afmt->pin, false);
+
+       dce_v11_0_audio_set_dto(encoder, mode->clock);
+
+       tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
+       tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
+       WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); /* send null packets when required */
+
+       WREG32(mmAFMT_AUDIO_CRC_CONTROL + dig->afmt->offset, 0x1000);
+
+       tmp = RREG32(mmHDMI_CONTROL + dig->afmt->offset);
+       switch (bpc) {
+       case 0:
+       case 6:
+       case 8:
+       case 16:
+       default:
+               tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 0);
+               tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
+               DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
+                         connector->name, bpc);
+               break;
+       case 10:
+               tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
+               tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1);
+               DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
+                         connector->name);
+               break;
+       case 12:
+               tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
+               tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2);
+               DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
+                         connector->name);
+               break;
+       }
+       WREG32(mmHDMI_CONTROL + dig->afmt->offset, tmp);
+
+       tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
+       tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); /* send null packets when required */
+       tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); /* send general control packets */
+       tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); /* send general control packets every frame */
+       WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
+
+       tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
+       /* enable audio info frames (frames won't be set until audio is enabled) */
+       tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
+       /* required for audio info values to be updated */
+       tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
+       WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
+
+       tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
+       /* required for audio info values to be updated */
+       tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
+       WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
+
+       tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
+       /* anything other than 0 */
+       tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, 2);
+       WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
+
+       WREG32(mmHDMI_GC + dig->afmt->offset, 0); /* unset HDMI_GC_AVMUTE */
+
+       tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
+       /* set the default audio delay */
+       tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
+       /* should be suffient for all audio modes and small enough for all hblanks */
+       tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
+       WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
+
+       tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
+       /* allow 60958 channel status fields to be updated */
+       tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
+       WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
+
+       tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
+       if (bpc > 8)
+               /* clear SW CTS value */
+               tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 0);
+       else
+               /* select SW CTS value */
+               tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1);
+       /* allow hw to sent ACR packets when required */
+       tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
+       WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
+
+       dce_v11_0_afmt_update_ACR(encoder, mode->clock);
+
+       tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
+       tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
+       WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
+
+       tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
+       tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
+       WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
+
+       tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
+       tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
+       tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
+       tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
+       tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
+       tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
+       tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
+       WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
+
+       dce_v11_0_audio_write_speaker_allocation(encoder);
+
+       WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset,
+              (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
+
+       dce_v11_0_afmt_audio_select_pin(encoder);
+       dce_v11_0_audio_write_sad_regs(encoder);
+       dce_v11_0_audio_write_latency_fields(encoder, mode);
+
+       err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+       if (err < 0) {
+               DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
+               return;
+       }
+
+       err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
+       if (err < 0) {
+               DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
+               return;
+       }
+
+       dce_v11_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
+
+       tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
+       /* enable AVI info frames */
+       tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
+       /* required for audio info values to be updated */
+       tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
+       WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
+
+       tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
+       tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
+       WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
+
+       tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
+       /* send audio packets */
+       tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
+       WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
+
+       WREG32(mmAFMT_RAMP_CONTROL0 + dig->afmt->offset, 0x00FFFFFF);
+       WREG32(mmAFMT_RAMP_CONTROL1 + dig->afmt->offset, 0x007FFFFF);
+       WREG32(mmAFMT_RAMP_CONTROL2 + dig->afmt->offset, 0x00000001);
+       WREG32(mmAFMT_RAMP_CONTROL3 + dig->afmt->offset, 0x00000001);
+
+       /* enable audio after to setting up hw */
+       dce_v11_0_audio_enable(adev, dig->afmt->pin, true);
+}
+
+static void dce_v11_0_afmt_enable(struct drm_encoder *encoder, bool enable)
+{
+       struct drm_device *dev = encoder->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+
+       if (!dig || !dig->afmt)
+               return;
+
+       /* Silent, r600_hdmi_enable will raise WARN for us */
+       if (enable && dig->afmt->enabled)
+               return;
+       if (!enable && !dig->afmt->enabled)
+               return;
+
+       if (!enable && dig->afmt->pin) {
+               dce_v11_0_audio_enable(adev, dig->afmt->pin, false);
+               dig->afmt->pin = NULL;
+       }
+
+       dig->afmt->enabled = enable;
+
+       DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
+                 enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
+}
+
+static void dce_v11_0_afmt_init(struct amdgpu_device *adev)
+{
+       int i;
+
+       for (i = 0; i < adev->mode_info.num_dig; i++)
+               adev->mode_info.afmt[i] = NULL;
+
+       /* DCE11 has audio blocks tied to DIG encoders */
+       for (i = 0; i < adev->mode_info.num_dig; i++) {
+               adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
+               if (adev->mode_info.afmt[i]) {
+                       adev->mode_info.afmt[i]->offset = dig_offsets[i];
+                       adev->mode_info.afmt[i]->id = i;
+               }
+       }
+}
+
+static void dce_v11_0_afmt_fini(struct amdgpu_device *adev)
+{
+       int i;
+
+       for (i = 0; i < adev->mode_info.num_dig; i++) {
+               kfree(adev->mode_info.afmt[i]);
+               adev->mode_info.afmt[i] = NULL;
+       }
+}
+
+static const u32 vga_control_regs[6] =
+{
+       mmD1VGA_CONTROL,
+       mmD2VGA_CONTROL,
+       mmD3VGA_CONTROL,
+       mmD4VGA_CONTROL,
+       mmD5VGA_CONTROL,
+       mmD6VGA_CONTROL,
+};
+
+static void dce_v11_0_vga_enable(struct drm_crtc *crtc, bool enable)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       u32 vga_control;
+
+       vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
+       if (enable)
+               WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
+       else
+               WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
+}
+
+static void dce_v11_0_grph_enable(struct drm_crtc *crtc, bool enable)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+
+       if (enable)
+               WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
+       else
+               WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
+}
+
+static void dce_v11_0_tiling_fields(uint64_t tiling_flags, unsigned *bankw,
+                                  unsigned *bankh, unsigned *mtaspect,
+                                  unsigned *tile_split)
+{
+       *bankw = (tiling_flags >> AMDGPU_TILING_EG_BANKW_SHIFT) & AMDGPU_TILING_EG_BANKW_MASK;
+       *bankh = (tiling_flags >> AMDGPU_TILING_EG_BANKH_SHIFT) & AMDGPU_TILING_EG_BANKH_MASK;
+       *mtaspect = (tiling_flags >> AMDGPU_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & AMDGPU_TILING_EG_MACRO_TILE_ASPECT_MASK;
+       *tile_split = (tiling_flags >> AMDGPU_TILING_EG_TILE_SPLIT_SHIFT) & AMDGPU_TILING_EG_TILE_SPLIT_MASK;
+       switch (*bankw) {
+       default:
+       case 1:
+               *bankw = ADDR_SURF_BANK_WIDTH_1;
+               break;
+       case 2:
+               *bankw = ADDR_SURF_BANK_WIDTH_2;
+               break;
+       case 4:
+               *bankw = ADDR_SURF_BANK_WIDTH_4;
+               break;
+       case 8:
+               *bankw = ADDR_SURF_BANK_WIDTH_8;
+               break;
+       }
+       switch (*bankh) {
+       default:
+       case 1:
+               *bankh = ADDR_SURF_BANK_HEIGHT_1;
+               break;
+       case 2:
+               *bankh = ADDR_SURF_BANK_HEIGHT_2;
+               break;
+       case 4:
+               *bankh = ADDR_SURF_BANK_HEIGHT_4;
+               break;
+       case 8:
+               *bankh = ADDR_SURF_BANK_HEIGHT_8;
+               break;
+       }
+       switch (*mtaspect) {
+       default:
+       case 1:
+               *mtaspect = ADDR_SURF_MACRO_ASPECT_1;
+               break;
+       case 2:
+               *mtaspect = ADDR_SURF_MACRO_ASPECT_2;
+               break;
+       case 4:
+               *mtaspect = ADDR_SURF_MACRO_ASPECT_4;
+               break;
+       case 8:
+               *mtaspect = ADDR_SURF_MACRO_ASPECT_8;
+               break;
+       }
+}
+
+static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
+                                    struct drm_framebuffer *fb,
+                                    int x, int y, int atomic)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_framebuffer *amdgpu_fb;
+       struct drm_framebuffer *target_fb;
+       struct drm_gem_object *obj;
+       struct amdgpu_bo *rbo;
+       uint64_t fb_location, tiling_flags;
+       uint32_t fb_format, fb_pitch_pixels;
+       unsigned bankw, bankh, mtaspect, tile_split;
+       u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE);
+       /* XXX change to VI */
+       u32 pipe_config = (adev->gfx.config.tile_mode_array[10] >> 6) & 0x1f;
+       u32 tmp, viewport_w, viewport_h;
+       int r;
+       bool bypass_lut = false;
+
+       /* no fb bound */
+       if (!atomic && !crtc->primary->fb) {
+               DRM_DEBUG_KMS("No FB bound\n");
+               return 0;
+       }
+
+       if (atomic) {
+               amdgpu_fb = to_amdgpu_framebuffer(fb);
+               target_fb = fb;
+       }
+       else {
+               amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+               target_fb = crtc->primary->fb;
+       }
+
+       /* If atomic, assume fb object is pinned & idle & fenced and
+        * just update base pointers
+        */
+       obj = amdgpu_fb->obj;
+       rbo = gem_to_amdgpu_bo(obj);
+       r = amdgpu_bo_reserve(rbo, false);
+       if (unlikely(r != 0))
+               return r;
+
+       if (atomic)
+               fb_location = amdgpu_bo_gpu_offset(rbo);
+       else {
+               r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+               if (unlikely(r != 0)) {
+                       amdgpu_bo_unreserve(rbo);
+                       return -EINVAL;
+               }
+       }
+
+       amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
+       amdgpu_bo_unreserve(rbo);
+
+       switch (target_fb->pixel_format) {
+       case DRM_FORMAT_C8:
+               fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0);
+               fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
+               break;
+       case DRM_FORMAT_XRGB4444:
+       case DRM_FORMAT_ARGB4444:
+               fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
+               fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 2);
+#ifdef __BIG_ENDIAN
+               fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
+                                       ENDIAN_8IN16);
+#endif
+               break;
+       case DRM_FORMAT_XRGB1555:
+       case DRM_FORMAT_ARGB1555:
+               fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
+               fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
+#ifdef __BIG_ENDIAN
+               fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
+                                       ENDIAN_8IN16);
+#endif
+               break;
+       case DRM_FORMAT_BGRX5551:
+       case DRM_FORMAT_BGRA5551:
+               fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
+               fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 5);
+#ifdef __BIG_ENDIAN
+               fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
+                                       ENDIAN_8IN16);
+#endif
+               break;
+       case DRM_FORMAT_RGB565:
+               fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
+               fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
+#ifdef __BIG_ENDIAN
+               fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
+                                       ENDIAN_8IN16);
+#endif
+               break;
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_ARGB8888:
+               fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
+               fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
+#ifdef __BIG_ENDIAN
+               fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
+                                       ENDIAN_8IN32);
+#endif
+               break;
+       case DRM_FORMAT_XRGB2101010:
+       case DRM_FORMAT_ARGB2101010:
+               fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
+               fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
+#ifdef __BIG_ENDIAN
+               fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
+                                       ENDIAN_8IN32);
+#endif
+               /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+               bypass_lut = true;
+               break;
+       case DRM_FORMAT_BGRX1010102:
+       case DRM_FORMAT_BGRA1010102:
+               fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
+               fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 4);
+#ifdef __BIG_ENDIAN
+               fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
+                                       ENDIAN_8IN32);
+#endif
+               /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+               bypass_lut = true;
+               break;
+       default:
+               DRM_ERROR("Unsupported screen format %s\n",
+                       drm_get_format_name(target_fb->pixel_format));
+               return -EINVAL;
+       }
+
+       if (tiling_flags & AMDGPU_TILING_MACRO) {
+               unsigned tileb, index, num_banks, tile_split_bytes;
+
+               dce_v11_0_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
+               /* Set NUM_BANKS. */
+               /* Calculate the macrotile mode index. */
+               tile_split_bytes = 64 << tile_split;
+               tileb = 8 * 8 * target_fb->bits_per_pixel / 8;
+               tileb = min(tile_split_bytes, tileb);
+
+               for (index = 0; tileb > 64; index++) {
+                       tileb >>= 1;
+               }
+
+               if (index >= 16) {
+                       DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n",
+                                 target_fb->bits_per_pixel, tile_split);
+                       return -EINVAL;
+               }
+
+               /* XXX fix me for VI */
+               num_banks = (adev->gfx.config.macrotile_mode_array[index] >> 6) & 0x3;
+               fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_NUM_BANKS, num_banks);
+               fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
+                                         ARRAY_2D_TILED_THIN1);
+               fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_TILE_SPLIT,
+                                         tile_split);
+               fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_WIDTH, bankw);
+               fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_HEIGHT, bankh);
+               fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT,
+                                         mtaspect);
+               fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MICRO_TILE_MODE,
+                                         ADDR_SURF_MICRO_TILING_DISPLAY);
+       } else if (tiling_flags & AMDGPU_TILING_MICRO) {
+               fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
+                                         ARRAY_1D_TILED_THIN1);
+       }
+
+       /* Read the pipe config from the 2D TILED SCANOUT mode.
+        * It should be the same for the other modes too, but not all
+        * modes set the pipe config field. */
+       fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_PIPE_CONFIG,
+                                 pipe_config);
+
+       dce_v11_0_vga_enable(crtc, false);
+
+       WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+              upper_32_bits(fb_location));
+       WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+              upper_32_bits(fb_location));
+       WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+              (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
+       WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+              (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
+       WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
+       WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
+
+       /*
+        * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
+        * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
+        * retain the full precision throughout the pipeline.
+        */
+       tmp = RREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset);
+       if (bypass_lut)
+               tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 1);
+       else
+               tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 0);
+       WREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, tmp);
+
+       if (bypass_lut)
+               DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
+
+       WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
+       WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
+       WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
+       WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
+       WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
+       WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
+
+       fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
+       WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
+
+       dce_v11_0_grph_enable(crtc, true);
+
+       WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
+              target_fb->height);
+
+       x &= ~3;
+       y &= ~1;
+       WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
+              (x << 16) | y);
+       viewport_w = crtc->mode.hdisplay;
+       viewport_h = (crtc->mode.vdisplay + 1) & ~1;
+       WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
+              (viewport_w << 16) | viewport_h);
+
+       /* pageflip setup */
+       /* make sure flip is at vb rather than hb */
+       tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
+                           GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
+       WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+
+       /* set pageflip to happen only at start of vblank interval (front porch) */
+       WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
+
+       if (!atomic && fb && fb != crtc->primary->fb) {
+               amdgpu_fb = to_amdgpu_framebuffer(fb);
+               rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+               r = amdgpu_bo_reserve(rbo, false);
+               if (unlikely(r != 0))
+                       return r;
+               amdgpu_bo_unpin(rbo);
+               amdgpu_bo_unreserve(rbo);
+       }
+
+       /* Bytes per pixel may have changed */
+       dce_v11_0_bandwidth_update(adev);
+
+       return 0;
+}
+
+static void dce_v11_0_set_interleave(struct drm_crtc *crtc,
+                                    struct drm_display_mode *mode)
+{
+       struct drm_device *dev = crtc->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       u32 tmp;
+
+       tmp = RREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset);
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+               tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 1);
+       else
+               tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 0);
+       WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, tmp);
+}
+
+static void dce_v11_0_crtc_load_lut(struct drm_crtc *crtc)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       int i;
+       u32 tmp;
+
+       DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
+
+       tmp = RREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_GRPH_MODE, 0);
+       WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+
+       tmp = RREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = REG_SET_FIELD(tmp, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1);
+       WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+
+       tmp = RREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0);
+       WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+
+       WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
+
+       WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
+       WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
+       WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
+
+       WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
+       WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
+       WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
+
+       WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
+       WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
+
+       WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
+       for (i = 0; i < 256; i++) {
+               WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
+                      (amdgpu_crtc->lut_r[i] << 20) |
+                      (amdgpu_crtc->lut_g[i] << 10) |
+                      (amdgpu_crtc->lut_b[i] << 0));
+       }
+
+       tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, 0);
+       tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, 0);
+       tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR2_DEGAMMA_MODE, 0);
+       WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+
+       tmp = RREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, 0);
+       WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+
+       tmp = RREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, GRPH_REGAMMA_MODE, 0);
+       WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+
+       tmp = RREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, 0);
+       WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+
+       /* XXX match this to the depth of the crtc fmt block, move to modeset? */
+       WREG32(mmDENORM_CONTROL + amdgpu_crtc->crtc_offset, 0);
+       /* XXX this only needs to be programmed once per crtc at startup,
+        * not sure where the best place for it is
+        */
+       tmp = RREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = REG_SET_FIELD(tmp, ALPHA_CONTROL, CURSOR_ALPHA_BLND_ENA, 1);
+       WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+}
+
+static int dce_v11_0_pick_dig_encoder(struct drm_encoder *encoder)
+{
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+
+       switch (amdgpu_encoder->encoder_id) {
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+               if (dig->linkb)
+                       return 1;
+               else
+                       return 0;
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+               if (dig->linkb)
+                       return 3;
+               else
+                       return 2;
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+               if (dig->linkb)
+                       return 5;
+               else
+                       return 4;
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+               return 6;
+               break;
+       default:
+               DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
+               return 0;
+       }
+}
+
+/**
+ * dce_v11_0_pick_pll - Allocate a PPLL for use by the crtc.
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
+ * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
+ * monitors a dedicated PPLL must be used.  If a particular board has
+ * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
+ * as there is no need to program the PLL itself.  If we are not able to
+ * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
+ * avoid messing up an existing monitor.
+ *
+ * Asic specific PLL information
+ *
+ * DCE 10.x
+ * Tonga
+ * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
+ * CI
+ * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
+ *
+ */
+static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       u32 pll_in_use;
+       int pll;
+
+       if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
+               if (adev->clock.dp_extclk)
+                       /* skip PPLL programming if using ext clock */
+                       return ATOM_PPLL_INVALID;
+               else {
+                       /* use the same PPLL for all DP monitors */
+                       pll = amdgpu_pll_get_shared_dp_ppll(crtc);
+                       if (pll != ATOM_PPLL_INVALID)
+                               return pll;
+               }
+       } else {
+               /* use the same PPLL for all monitors with the same clock */
+               pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
+               if (pll != ATOM_PPLL_INVALID)
+                       return pll;
+       }
+
+       /* XXX need to determine what plls are available on each DCE11 part */
+       pll_in_use = amdgpu_pll_get_use_mask(crtc);
+       if (adev->asic_type == CHIP_CARRIZO) {
+               if (!(pll_in_use & (1 << ATOM_PPLL1)))
+                       return ATOM_PPLL1;
+               if (!(pll_in_use & (1 << ATOM_PPLL0)))
+                       return ATOM_PPLL0;
+               DRM_ERROR("unable to allocate a PPLL\n");
+               return ATOM_PPLL_INVALID;
+       } else {
+               if (!(pll_in_use & (1 << ATOM_PPLL2)))
+                       return ATOM_PPLL2;
+               if (!(pll_in_use & (1 << ATOM_PPLL1)))
+                       return ATOM_PPLL1;
+               if (!(pll_in_use & (1 << ATOM_PPLL0)))
+                       return ATOM_PPLL0;
+               DRM_ERROR("unable to allocate a PPLL\n");
+               return ATOM_PPLL_INVALID;
+       }
+       return ATOM_PPLL_INVALID;
+}
+
+static void dce_v11_0_lock_cursor(struct drm_crtc *crtc, bool lock)
+{
+       struct amdgpu_device *adev = crtc->dev->dev_private;
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       uint32_t cur_lock;
+
+       cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
+       if (lock)
+               cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 1);
+       else
+               cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 0);
+       WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
+}
+
+static void dce_v11_0_hide_cursor(struct drm_crtc *crtc)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct amdgpu_device *adev = crtc->dev->dev_private;
+       u32 tmp;
+
+       tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0);
+       WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+}
+
+static void dce_v11_0_show_cursor(struct drm_crtc *crtc)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct amdgpu_device *adev = crtc->dev->dev_private;
+       u32 tmp;
+
+       tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
+       tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
+       tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
+       WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+}
+
+static void dce_v11_0_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
+                             uint64_t gpu_addr)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct amdgpu_device *adev = crtc->dev->dev_private;
+
+       WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+              upper_32_bits(gpu_addr));
+       WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+              lower_32_bits(gpu_addr));
+}
+
+static int dce_v11_0_crtc_cursor_move(struct drm_crtc *crtc,
+                                    int x, int y)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct amdgpu_device *adev = crtc->dev->dev_private;
+       int xorigin = 0, yorigin = 0;
+
+       /* avivo cursor are offset into the total surface */
+       x += crtc->x;
+       y += crtc->y;
+       DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
+
+       if (x < 0) {
+               xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
+               x = 0;
+       }
+       if (y < 0) {
+               yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
+               y = 0;
+       }
+
+       dce_v11_0_lock_cursor(crtc, true);
+       WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
+       WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+       WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+              ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
+       dce_v11_0_lock_cursor(crtc, false);
+
+       return 0;
+}
+
+static int dce_v11_0_crtc_cursor_set(struct drm_crtc *crtc,
+                                   struct drm_file *file_priv,
+                                   uint32_t handle,
+                                   uint32_t width,
+                                   uint32_t height)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_gem_object *obj;
+       struct amdgpu_bo *robj;
+       uint64_t gpu_addr;
+       int ret;
+
+       if (!handle) {
+               /* turn off cursor */
+               dce_v11_0_hide_cursor(crtc);
+               obj = NULL;
+               goto unpin;
+       }
+
+       if ((width > amdgpu_crtc->max_cursor_width) ||
+           (height > amdgpu_crtc->max_cursor_height)) {
+               DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
+               return -EINVAL;
+       }
+
+       obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+       if (!obj) {
+               DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
+               return -ENOENT;
+       }
+
+       robj = gem_to_amdgpu_bo(obj);
+       ret = amdgpu_bo_reserve(robj, false);
+       if (unlikely(ret != 0))
+               goto fail;
+       ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM,
+                                      0, &gpu_addr);
+       amdgpu_bo_unreserve(robj);
+       if (ret)
+               goto fail;
+
+       amdgpu_crtc->cursor_width = width;
+       amdgpu_crtc->cursor_height = height;
+
+       dce_v11_0_lock_cursor(crtc, true);
+       dce_v11_0_set_cursor(crtc, obj, gpu_addr);
+       dce_v11_0_show_cursor(crtc);
+       dce_v11_0_lock_cursor(crtc, false);
+
+unpin:
+       if (amdgpu_crtc->cursor_bo) {
+               robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+               ret = amdgpu_bo_reserve(robj, false);
+               if (likely(ret == 0)) {
+                       amdgpu_bo_unpin(robj);
+                       amdgpu_bo_unreserve(robj);
+               }
+               drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
+       }
+
+       amdgpu_crtc->cursor_bo = obj;
+       return 0;
+fail:
+       drm_gem_object_unreference_unlocked(obj);
+
+       return ret;
+}
+
+static void dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+                                   u16 *blue, uint32_t start, uint32_t size)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       int end = (start + size > 256) ? 256 : start + size, i;
+
+       /* userspace palettes are always correct as is */
+       for (i = start; i < end; i++) {
+               amdgpu_crtc->lut_r[i] = red[i] >> 6;
+               amdgpu_crtc->lut_g[i] = green[i] >> 6;
+               amdgpu_crtc->lut_b[i] = blue[i] >> 6;
+       }
+       dce_v11_0_crtc_load_lut(crtc);
+}
+
+static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+       drm_crtc_cleanup(crtc);
+       destroy_workqueue(amdgpu_crtc->pflip_queue);
+       kfree(amdgpu_crtc);
+}
+
+static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = {
+       .cursor_set = dce_v11_0_crtc_cursor_set,
+       .cursor_move = dce_v11_0_crtc_cursor_move,
+       .gamma_set = dce_v11_0_crtc_gamma_set,
+       .set_config = amdgpu_crtc_set_config,
+       .destroy = dce_v11_0_crtc_destroy,
+       .page_flip = amdgpu_crtc_page_flip,
+};
+
+static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+       struct drm_device *dev = crtc->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+               amdgpu_crtc->enabled = true;
+               amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
+               dce_v11_0_vga_enable(crtc, true);
+               amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
+               dce_v11_0_vga_enable(crtc, false);
+               drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
+               dce_v11_0_crtc_load_lut(crtc);
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+       case DRM_MODE_DPMS_OFF:
+               drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id);
+               if (amdgpu_crtc->enabled) {
+                       dce_v11_0_vga_enable(crtc, true);
+                       amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
+                       dce_v11_0_vga_enable(crtc, false);
+               }
+               amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
+               amdgpu_crtc->enabled = false;
+               break;
+       }
+       /* adjust pm to dpms */
+       amdgpu_pm_compute_clocks(adev);
+}
+
+static void dce_v11_0_crtc_prepare(struct drm_crtc *crtc)
+{
+       /* disable crtc pair power gating before programming */
+       amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
+       amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
+       dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void dce_v11_0_crtc_commit(struct drm_crtc *crtc)
+{
+       dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+       amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
+}
+
+static void dce_v11_0_crtc_disable(struct drm_crtc *crtc)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_atom_ss ss;
+       int i;
+
+       dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+       if (crtc->primary->fb) {
+               int r;
+               struct amdgpu_framebuffer *amdgpu_fb;
+               struct amdgpu_bo *rbo;
+
+               amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+               rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+               r = amdgpu_bo_reserve(rbo, false);
+               if (unlikely(r))
+                       DRM_ERROR("failed to reserve rbo before unpin\n");
+               else {
+                       amdgpu_bo_unpin(rbo);
+                       amdgpu_bo_unreserve(rbo);
+               }
+       }
+       /* disable the GRPH */
+       dce_v11_0_grph_enable(crtc, false);
+
+       amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
+
+       for (i = 0; i < adev->mode_info.num_crtc; i++) {
+               if (adev->mode_info.crtcs[i] &&
+                   adev->mode_info.crtcs[i]->enabled &&
+                   i != amdgpu_crtc->crtc_id &&
+                   amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
+                       /* one other crtc is using this pll don't turn
+                        * off the pll
+                        */
+                       goto done;
+               }
+       }
+
+       switch (amdgpu_crtc->pll_id) {
+       case ATOM_PPLL0:
+       case ATOM_PPLL1:
+       case ATOM_PPLL2:
+               /* disable the ppll */
+               amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
+                                         0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
+               break;
+       default:
+               break;
+       }
+done:
+       amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
+       amdgpu_crtc->adjusted_clock = 0;
+       amdgpu_crtc->encoder = NULL;
+       amdgpu_crtc->connector = NULL;
+}
+
+static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
+                                 struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode,
+                                 int x, int y, struct drm_framebuffer *old_fb)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+       if (!amdgpu_crtc->adjusted_clock)
+               return -EINVAL;
+
+       amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
+       amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
+       dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
+       amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
+       amdgpu_atombios_crtc_scaler_setup(crtc);
+       /* update the hw version fpr dpm */
+       amdgpu_crtc->hw_mode = *adjusted_mode;
+
+       return 0;
+}
+
+static bool dce_v11_0_crtc_mode_fixup(struct drm_crtc *crtc,
+                                    const struct drm_display_mode *mode,
+                                    struct drm_display_mode *adjusted_mode)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct drm_encoder *encoder;
+
+       /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               if (encoder->crtc == crtc) {
+                       amdgpu_crtc->encoder = encoder;
+                       amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
+                       break;
+               }
+       }
+       if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
+               amdgpu_crtc->encoder = NULL;
+               amdgpu_crtc->connector = NULL;
+               return false;
+       }
+       if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
+               return false;
+       if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
+               return false;
+       /* pick pll */
+       amdgpu_crtc->pll_id = dce_v11_0_pick_pll(crtc);
+       /* if we can't get a PPLL for a non-DP encoder, fail */
+       if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
+           !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
+               return false;
+
+       return true;
+}
+
+static int dce_v11_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+                                 struct drm_framebuffer *old_fb)
+{
+       return dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+static int dce_v11_0_crtc_set_base_atomic(struct drm_crtc *crtc,
+                                        struct drm_framebuffer *fb,
+                                        int x, int y, enum mode_set_atomic state)
+{
+       return dce_v11_0_crtc_do_set_base(crtc, fb, x, y, 1);
+}
+
+static const struct drm_crtc_helper_funcs dce_v11_0_crtc_helper_funcs = {
+       .dpms = dce_v11_0_crtc_dpms,
+       .mode_fixup = dce_v11_0_crtc_mode_fixup,
+       .mode_set = dce_v11_0_crtc_mode_set,
+       .mode_set_base = dce_v11_0_crtc_set_base,
+       .mode_set_base_atomic = dce_v11_0_crtc_set_base_atomic,
+       .prepare = dce_v11_0_crtc_prepare,
+       .commit = dce_v11_0_crtc_commit,
+       .load_lut = dce_v11_0_crtc_load_lut,
+       .disable = dce_v11_0_crtc_disable,
+};
+
+static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
+{
+       struct amdgpu_crtc *amdgpu_crtc;
+       int i;
+
+       amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
+                             (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
+       if (amdgpu_crtc == NULL)
+               return -ENOMEM;
+
+       drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v11_0_crtc_funcs);
+
+       drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
+       amdgpu_crtc->crtc_id = index;
+       amdgpu_crtc->pflip_queue = create_singlethread_workqueue("amdgpu-pageflip-queue");
+       adev->mode_info.crtcs[index] = amdgpu_crtc;
+
+       amdgpu_crtc->max_cursor_width = 128;
+       amdgpu_crtc->max_cursor_height = 128;
+       adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
+       adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
+
+       for (i = 0; i < 256; i++) {
+               amdgpu_crtc->lut_r[i] = i << 2;
+               amdgpu_crtc->lut_g[i] = i << 2;
+               amdgpu_crtc->lut_b[i] = i << 2;
+       }
+
+       switch (amdgpu_crtc->crtc_id) {
+       case 0:
+       default:
+               amdgpu_crtc->crtc_offset = CRTC0_REGISTER_OFFSET;
+               break;
+       case 1:
+               amdgpu_crtc->crtc_offset = CRTC1_REGISTER_OFFSET;
+               break;
+       case 2:
+               amdgpu_crtc->crtc_offset = CRTC2_REGISTER_OFFSET;
+               break;
+       case 3:
+               amdgpu_crtc->crtc_offset = CRTC3_REGISTER_OFFSET;
+               break;
+       case 4:
+               amdgpu_crtc->crtc_offset = CRTC4_REGISTER_OFFSET;
+               break;
+       case 5:
+               amdgpu_crtc->crtc_offset = CRTC5_REGISTER_OFFSET;
+               break;
+       }
+
+       amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
+       amdgpu_crtc->adjusted_clock = 0;
+       amdgpu_crtc->encoder = NULL;
+       amdgpu_crtc->connector = NULL;
+       drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v11_0_crtc_helper_funcs);
+
+       return 0;
+}
+
+static int dce_v11_0_early_init(struct amdgpu_device *adev)
+{
+       adev->audio_endpt_rreg = &dce_v11_0_audio_endpt_rreg;
+       adev->audio_endpt_wreg = &dce_v11_0_audio_endpt_wreg;
+
+       dce_v11_0_set_display_funcs(adev);
+       dce_v11_0_set_irq_funcs(adev);
+
+       switch (adev->asic_type) {
+       case CHIP_CARRIZO:
+               adev->mode_info.num_crtc = 4;
+               adev->mode_info.num_hpd = 6;
+               adev->mode_info.num_dig = 9;
+               break;
+       default:
+               /* FIXME: not supported yet */
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int dce_v11_0_sw_init(struct amdgpu_device *adev)
+{
+       int r, i;
+
+       for (i = 0; i < adev->mode_info.num_crtc; i++) {
+               r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
+               if (r)
+               return r;
+       }
+
+       for (i = 8; i < 20; i += 2) {
+               r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
+               if (r)
+                       return r;
+       }
+
+       /* HPD hotplug */
+       r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
+       if (r)
+       return r;
+
+       adev->mode_info.mode_config_initialized = true;
+
+       adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
+
+       adev->ddev->mode_config.max_width = 16384;
+       adev->ddev->mode_config.max_height = 16384;
+
+       adev->ddev->mode_config.preferred_depth = 24;
+       adev->ddev->mode_config.prefer_shadow = 1;
+
+       adev->ddev->mode_config.fb_base = adev->mc.aper_base;
+
+       r = amdgpu_modeset_create_props(adev);
+       if (r)
+               return r;
+
+       adev->ddev->mode_config.max_width = 16384;
+       adev->ddev->mode_config.max_height = 16384;
+
+       /* allocate crtcs */
+       for (i = 0; i < adev->mode_info.num_crtc; i++) {
+               r = dce_v11_0_crtc_init(adev, i);
+               if (r)
+                       return r;
+       }
+
+       if (amdgpu_atombios_get_connector_info_from_object_table(adev))
+               amdgpu_print_display_setup(adev->ddev);
+       else
+               return -EINVAL;
+
+       /* setup afmt */
+       dce_v11_0_afmt_init(adev);
+
+       r = dce_v11_0_audio_init(adev);
+       if (r)
+               return r;
+
+       drm_kms_helper_poll_init(adev->ddev);
+
+       return r;
+}
+
+static int dce_v11_0_sw_fini(struct amdgpu_device *adev)
+{
+       kfree(adev->mode_info.bios_hardcoded_edid);
+
+       drm_kms_helper_poll_fini(adev->ddev);
+
+       dce_v11_0_audio_fini(adev);
+
+       dce_v11_0_afmt_fini(adev);
+
+       adev->mode_info.mode_config_initialized = false;
+
+       return 0;
+}
+
+static int dce_v11_0_hw_init(struct amdgpu_device *adev)
+{
+       int i;
+
+       dce_v11_0_init_golden_registers(adev);
+
+       /* init dig PHYs, disp eng pll */
+       amdgpu_atombios_encoder_init_dig(adev);
+       amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
+
+       /* initialize hpd */
+       dce_v11_0_hpd_init(adev);
+
+       for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+               dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
+       }
+
+       return 0;
+}
+
+static int dce_v11_0_hw_fini(struct amdgpu_device *adev)
+{
+       int i;
+
+       dce_v11_0_hpd_fini(adev);
+
+       for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+               dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
+       }
+
+       return 0;
+}
+
+static int dce_v11_0_suspend(struct amdgpu_device *adev)
+{
+       struct drm_connector *connector;
+
+       drm_kms_helper_poll_disable(adev->ddev);
+
+       /* turn off display hw */
+       list_for_each_entry(connector, &adev->ddev->mode_config.connector_list, head) {
+               drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+       }
+
+       amdgpu_atombios_scratch_regs_save(adev);
+
+       dce_v11_0_hpd_fini(adev);
+
+       return 0;
+}
+
+static int dce_v11_0_resume(struct amdgpu_device *adev)
+{
+       struct drm_connector *connector;
+
+       dce_v11_0_init_golden_registers(adev);
+
+       amdgpu_atombios_scratch_regs_restore(adev);
+
+       /* init dig PHYs, disp eng pll */
+       amdgpu_atombios_crtc_powergate_init(adev);
+       amdgpu_atombios_encoder_init_dig(adev);
+       amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
+       /* turn on the BL */
+       if (adev->mode_info.bl_encoder) {
+               u8 bl_level = amdgpu_display_backlight_get_level(adev,
+                                                                 adev->mode_info.bl_encoder);
+               amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
+                                                   bl_level);
+       }
+
+       /* initialize hpd */
+       dce_v11_0_hpd_init(adev);
+
+       /* blat the mode back in */
+       drm_helper_resume_force_mode(adev->ddev);
+       /* turn on display hw */
+       list_for_each_entry(connector, &adev->ddev->mode_config.connector_list, head) {
+               drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+       }
+
+       drm_kms_helper_poll_enable(adev->ddev);
+
+       return 0;
+}
+
+static bool dce_v11_0_is_idle(struct amdgpu_device *adev)
+{
+       /* XXX todo */
+       return true;
+}
+
+static int dce_v11_0_wait_for_idle(struct amdgpu_device *adev)
+{
+       /* XXX todo */
+       return 0;
+}
+
+static void dce_v11_0_print_status(struct amdgpu_device *adev)
+{
+       dev_info(adev->dev, "DCE 10.x registers\n");
+       /* XXX todo */
+}
+
+static int dce_v11_0_soft_reset(struct amdgpu_device *adev)
+{
+       u32 srbm_soft_reset = 0, tmp;
+
+       if (dce_v11_0_is_display_hung(adev))
+               srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
+
+       if (srbm_soft_reset) {
+               dce_v11_0_print_status(adev);
+
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+               tmp |= srbm_soft_reset;
+               dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+               WREG32(mmSRBM_SOFT_RESET, tmp);
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+
+               udelay(50);
+
+               tmp &= ~srbm_soft_reset;
+               WREG32(mmSRBM_SOFT_RESET, tmp);
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+
+               /* Wait a little for things to settle down */
+               udelay(50);
+               dce_v11_0_print_status(adev);
+       }
+       return 0;
+}
+
+static void dce_v11_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
+                                                    int crtc,
+                                                    enum amdgpu_interrupt_state state)
+{
+       u32 lb_interrupt_mask;
+
+       if (crtc >= adev->mode_info.num_crtc) {
+               DRM_DEBUG("invalid crtc %d\n", crtc);
+               return;
+       }
+
+       switch (state) {
+       case AMDGPU_IRQ_STATE_DISABLE:
+               lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
+               lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
+                                                 VBLANK_INTERRUPT_MASK, 0);
+               WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
+               break;
+       case AMDGPU_IRQ_STATE_ENABLE:
+               lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
+               lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
+                                                 VBLANK_INTERRUPT_MASK, 1);
+               WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
+               break;
+       default:
+               break;
+       }
+}
+
+static void dce_v11_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
+                                                   int crtc,
+                                                   enum amdgpu_interrupt_state state)
+{
+       u32 lb_interrupt_mask;
+
+       if (crtc >= adev->mode_info.num_crtc) {
+               DRM_DEBUG("invalid crtc %d\n", crtc);
+               return;
+       }
+
+       switch (state) {
+       case AMDGPU_IRQ_STATE_DISABLE:
+               lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
+               lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
+                                                 VLINE_INTERRUPT_MASK, 0);
+               WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
+               break;
+       case AMDGPU_IRQ_STATE_ENABLE:
+               lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
+               lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
+                                                 VLINE_INTERRUPT_MASK, 1);
+               WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
+               break;
+       default:
+               break;
+       }
+}
+
+static int dce_v11_0_set_hpd_irq_state(struct amdgpu_device *adev,
+                                       struct amdgpu_irq_src *source,
+                                       unsigned hpd,
+                                       enum amdgpu_interrupt_state state)
+{
+       u32 tmp;
+
+       if (hpd >= adev->mode_info.num_hpd) {
+               DRM_DEBUG("invalid hdp %d\n", hpd);
+               return 0;
+       }
+
+       switch (state) {
+       case AMDGPU_IRQ_STATE_DISABLE:
+               tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
+               tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
+               WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
+               break;
+       case AMDGPU_IRQ_STATE_ENABLE:
+               tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
+               tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 1);
+               WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static int dce_v11_0_set_crtc_irq_state(struct amdgpu_device *adev,
+                                       struct amdgpu_irq_src *source,
+                                       unsigned type,
+                                       enum amdgpu_interrupt_state state)
+{
+       switch (type) {
+       case AMDGPU_CRTC_IRQ_VBLANK1:
+               dce_v11_0_set_crtc_vblank_interrupt_state(adev, 0, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VBLANK2:
+               dce_v11_0_set_crtc_vblank_interrupt_state(adev, 1, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VBLANK3:
+               dce_v11_0_set_crtc_vblank_interrupt_state(adev, 2, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VBLANK4:
+               dce_v11_0_set_crtc_vblank_interrupt_state(adev, 3, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VBLANK5:
+               dce_v11_0_set_crtc_vblank_interrupt_state(adev, 4, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VBLANK6:
+               dce_v11_0_set_crtc_vblank_interrupt_state(adev, 5, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VLINE1:
+               dce_v11_0_set_crtc_vline_interrupt_state(adev, 0, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VLINE2:
+               dce_v11_0_set_crtc_vline_interrupt_state(adev, 1, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VLINE3:
+               dce_v11_0_set_crtc_vline_interrupt_state(adev, 2, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VLINE4:
+               dce_v11_0_set_crtc_vline_interrupt_state(adev, 3, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VLINE5:
+               dce_v11_0_set_crtc_vline_interrupt_state(adev, 4, state);
+               break;
+        case AMDGPU_CRTC_IRQ_VLINE6:
+               dce_v11_0_set_crtc_vline_interrupt_state(adev, 5, state);
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+static int dce_v11_0_set_pageflip_irq_state(struct amdgpu_device *adev,
+                                           struct amdgpu_irq_src *src,
+                                           unsigned type,
+                                           enum amdgpu_interrupt_state state)
+{
+       u32 reg, reg_block;
+       /* now deal with page flip IRQ */
+       switch (type) {
+               case AMDGPU_PAGEFLIP_IRQ_D1:
+                       reg_block = CRTC0_REGISTER_OFFSET;
+                       break;
+               case AMDGPU_PAGEFLIP_IRQ_D2:
+                       reg_block = CRTC1_REGISTER_OFFSET;
+                       break;
+               case AMDGPU_PAGEFLIP_IRQ_D3:
+                       reg_block = CRTC2_REGISTER_OFFSET;
+                       break;
+               case AMDGPU_PAGEFLIP_IRQ_D4:
+                       reg_block = CRTC3_REGISTER_OFFSET;
+                       break;
+               case AMDGPU_PAGEFLIP_IRQ_D5:
+                       reg_block = CRTC4_REGISTER_OFFSET;
+                       break;
+               case AMDGPU_PAGEFLIP_IRQ_D6:
+                       reg_block = CRTC5_REGISTER_OFFSET;
+                       break;
+               default:
+                       DRM_ERROR("invalid pageflip crtc %d\n", type);
+                       return -EINVAL;
+       }
+
+       reg = RREG32(mmGRPH_INTERRUPT_CONTROL + reg_block);
+       if (state == AMDGPU_IRQ_STATE_DISABLE)
+               WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
+       else
+               WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
+
+       return 0;
+}
+
+static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
+                                 struct amdgpu_irq_src *source,
+                                 struct amdgpu_iv_entry *entry)
+{
+       int reg_block;
+       unsigned long flags;
+       unsigned crtc_id;
+       struct amdgpu_crtc *amdgpu_crtc;
+       struct amdgpu_flip_work *works;
+
+       crtc_id = (entry->src_id - 8) >> 1;
+       amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+
+       /* ack the interrupt */
+       switch(crtc_id){
+               case AMDGPU_PAGEFLIP_IRQ_D1:
+                       reg_block = CRTC0_REGISTER_OFFSET;
+                       break;
+               case AMDGPU_PAGEFLIP_IRQ_D2:
+                       reg_block = CRTC1_REGISTER_OFFSET;
+                       break;
+               case AMDGPU_PAGEFLIP_IRQ_D3:
+                       reg_block = CRTC2_REGISTER_OFFSET;
+                       break;
+               case AMDGPU_PAGEFLIP_IRQ_D4:
+                       reg_block = CRTC3_REGISTER_OFFSET;
+                       break;
+               case AMDGPU_PAGEFLIP_IRQ_D5:
+                       reg_block = CRTC4_REGISTER_OFFSET;
+                       break;
+               case AMDGPU_PAGEFLIP_IRQ_D6:
+                       reg_block = CRTC5_REGISTER_OFFSET;
+                       break;
+               default:
+                       DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
+                       return -EINVAL;
+       }
+
+       if (RREG32(mmGRPH_INTERRUPT_STATUS + reg_block) & GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
+               WREG32(mmGRPH_INTERRUPT_STATUS + reg_block, GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
+
+       /* IRQ could occur when in initial stage */
+       if(amdgpu_crtc == NULL)
+               return 0;
+
+       spin_lock_irqsave(&adev->ddev->event_lock, flags);
+       works = amdgpu_crtc->pflip_works;
+       if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
+               DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
+                                                "AMDGPU_FLIP_SUBMITTED(%d)\n",
+                                                amdgpu_crtc->pflip_status,
+                                                AMDGPU_FLIP_SUBMITTED);
+               spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+               return 0;
+       }
+
+       /* page flip completed. clean up */
+       amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
+       amdgpu_crtc->pflip_works = NULL;
+
+       /* wakeup usersapce */
+       if(works->event)
+               drm_send_vblank_event(adev->ddev, crtc_id, works->event);
+
+       spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+
+       drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
+       amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id);
+       queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
+
+       return 0;
+}
+
+static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev,
+                                 int hpd)
+{
+       u32 tmp;
+
+       if (hpd >= adev->mode_info.num_hpd) {
+               DRM_DEBUG("invalid hdp %d\n", hpd);
+               return;
+       }
+
+       tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
+       tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_ACK, 1);
+       WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
+}
+
+static void dce_v11_0_crtc_vblank_int_ack(struct amdgpu_device *adev,
+                                         int crtc)
+{
+       u32 tmp;
+
+       if (crtc >= adev->mode_info.num_crtc) {
+               DRM_DEBUG("invalid crtc %d\n", crtc);
+               return;
+       }
+
+       tmp = RREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc]);
+       tmp = REG_SET_FIELD(tmp, LB_VBLANK_STATUS, VBLANK_ACK, 1);
+       WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], tmp);
+}
+
+static void dce_v11_0_crtc_vline_int_ack(struct amdgpu_device *adev,
+                                        int crtc)
+{
+       u32 tmp;
+
+       if (crtc >= adev->mode_info.num_crtc) {
+               DRM_DEBUG("invalid crtc %d\n", crtc);
+               return;
+       }
+
+       tmp = RREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc]);
+       tmp = REG_SET_FIELD(tmp, LB_VLINE_STATUS, VLINE_ACK, 1);
+       WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], tmp);
+}
+
+static int dce_v11_0_crtc_irq(struct amdgpu_device *adev,
+                               struct amdgpu_irq_src *source,
+                               struct amdgpu_iv_entry *entry)
+{
+       unsigned crtc = entry->src_id - 1;
+       uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
+       unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
+
+       switch (entry->src_data) {
+       case 0: /* vblank */
+               if (disp_int & interrupt_status_offsets[crtc].vblank) {
+                       dce_v11_0_crtc_vblank_int_ack(adev, crtc);
+                       if (amdgpu_irq_enabled(adev, source, irq_type)) {
+                               drm_handle_vblank(adev->ddev, crtc);
+                       }
+                       DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+               }
+               break;
+       case 1: /* vline */
+               if (disp_int & interrupt_status_offsets[crtc].vline) {
+                       dce_v11_0_crtc_vline_int_ack(adev, crtc);
+                       DRM_DEBUG("IH: D%d vline\n", crtc + 1);
+               }
+               break;
+       default:
+               DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
+               break;
+       }
+
+       return 0;
+}
+
+static int dce_v11_0_hpd_irq(struct amdgpu_device *adev,
+                            struct amdgpu_irq_src *source,
+                            struct amdgpu_iv_entry *entry)
+{
+       uint32_t disp_int, mask;
+       unsigned hpd;
+
+       if (entry->src_data >= adev->mode_info.num_hpd) {
+               DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
+               return 0;
+       }
+
+       hpd = entry->src_data;
+       disp_int = RREG32(interrupt_status_offsets[hpd].reg);
+       mask = interrupt_status_offsets[hpd].hpd;
+
+       if (disp_int & mask) {
+               dce_v11_0_hpd_int_ack(adev, hpd);
+               schedule_work(&adev->hotplug_work);
+               DRM_DEBUG("IH: HPD%d\n", hpd + 1);
+       }
+
+       return 0;
+}
+
+static int dce_v11_0_set_clockgating_state(struct amdgpu_device *adev,
+                                         enum amdgpu_clockgating_state state)
+{
+       return 0;
+}
+
+static int dce_v11_0_set_powergating_state(struct amdgpu_device *adev,
+                                         enum amdgpu_powergating_state state)
+{
+       return 0;
+}
+
+const struct amdgpu_ip_funcs dce_v11_0_ip_funcs = {
+       .early_init = dce_v11_0_early_init,
+       .late_init = NULL,
+       .sw_init = dce_v11_0_sw_init,
+       .sw_fini = dce_v11_0_sw_fini,
+       .hw_init = dce_v11_0_hw_init,
+       .hw_fini = dce_v11_0_hw_fini,
+       .suspend = dce_v11_0_suspend,
+       .resume = dce_v11_0_resume,
+       .is_idle = dce_v11_0_is_idle,
+       .wait_for_idle = dce_v11_0_wait_for_idle,
+       .soft_reset = dce_v11_0_soft_reset,
+       .print_status = dce_v11_0_print_status,
+       .set_clockgating_state = dce_v11_0_set_clockgating_state,
+       .set_powergating_state = dce_v11_0_set_powergating_state,
+};
+
+static void
+dce_v11_0_encoder_mode_set(struct drm_encoder *encoder,
+                         struct drm_display_mode *mode,
+                         struct drm_display_mode *adjusted_mode)
+{
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+       amdgpu_encoder->pixel_clock = adjusted_mode->clock;
+
+       /* need to call this here rather than in prepare() since we need some crtc info */
+       amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+       /* set scaler clears this on some chips */
+       dce_v11_0_set_interleave(encoder->crtc, mode);
+
+       if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
+               dce_v11_0_afmt_enable(encoder, true);
+               dce_v11_0_afmt_setmode(encoder, adjusted_mode);
+       }
+}
+
+static void dce_v11_0_encoder_prepare(struct drm_encoder *encoder)
+{
+       struct amdgpu_device *adev = encoder->dev->dev_private;
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
+
+       if ((amdgpu_encoder->active_device &
+            (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
+           (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
+            ENCODER_OBJECT_ID_NONE)) {
+               struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+               if (dig) {
+                       dig->dig_encoder = dce_v11_0_pick_dig_encoder(encoder);
+                       if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
+                               dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
+               }
+       }
+
+       amdgpu_atombios_scratch_regs_lock(adev, true);
+
+       if (connector) {
+               struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+               /* select the clock/data port if it uses a router */
+               if (amdgpu_connector->router.cd_valid)
+                       amdgpu_i2c_router_select_cd_port(amdgpu_connector);
+
+               /* turn eDP panel on for mode set */
+               if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+                       amdgpu_atombios_encoder_set_edp_panel_power(connector,
+                                                            ATOM_TRANSMITTER_ACTION_POWER_ON);
+       }
+
+       /* this is needed for the pll/ss setup to work correctly in some cases */
+       amdgpu_atombios_encoder_set_crtc_source(encoder);
+       /* set up the FMT blocks */
+       dce_v11_0_program_fmt(encoder);
+}
+
+static void dce_v11_0_encoder_commit(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+
+       /* need to call this here as we need the crtc set up */
+       amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+       amdgpu_atombios_scratch_regs_lock(adev, false);
+}
+
+static void dce_v11_0_encoder_disable(struct drm_encoder *encoder)
+{
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct amdgpu_encoder_atom_dig *dig;
+
+       amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+       if (amdgpu_atombios_encoder_is_digital(encoder)) {
+               if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
+                       dce_v11_0_afmt_enable(encoder, false);
+               dig = amdgpu_encoder->enc_priv;
+               dig->dig_encoder = -1;
+       }
+       amdgpu_encoder->active_device = 0;
+}
+
+/* these are handled by the primary encoders */
+static void dce_v11_0_ext_prepare(struct drm_encoder *encoder)
+{
+
+}
+
+static void dce_v11_0_ext_commit(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+dce_v11_0_ext_mode_set(struct drm_encoder *encoder,
+                     struct drm_display_mode *mode,
+                     struct drm_display_mode *adjusted_mode)
+{
+
+}
+
+static void dce_v11_0_ext_disable(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+dce_v11_0_ext_dpms(struct drm_encoder *encoder, int mode)
+{
+
+}
+
+static bool dce_v11_0_ext_mode_fixup(struct drm_encoder *encoder,
+                                   const struct drm_display_mode *mode,
+                                   struct drm_display_mode *adjusted_mode)
+{
+       return true;
+}
+
+static const struct drm_encoder_helper_funcs dce_v11_0_ext_helper_funcs = {
+       .dpms = dce_v11_0_ext_dpms,
+       .mode_fixup = dce_v11_0_ext_mode_fixup,
+       .prepare = dce_v11_0_ext_prepare,
+       .mode_set = dce_v11_0_ext_mode_set,
+       .commit = dce_v11_0_ext_commit,
+       .disable = dce_v11_0_ext_disable,
+       /* no detect for TMDS/LVDS yet */
+};
+
+static const struct drm_encoder_helper_funcs dce_v11_0_dig_helper_funcs = {
+       .dpms = amdgpu_atombios_encoder_dpms,
+       .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
+       .prepare = dce_v11_0_encoder_prepare,
+       .mode_set = dce_v11_0_encoder_mode_set,
+       .commit = dce_v11_0_encoder_commit,
+       .disable = dce_v11_0_encoder_disable,
+       .detect = amdgpu_atombios_encoder_dig_detect,
+};
+
+static const struct drm_encoder_helper_funcs dce_v11_0_dac_helper_funcs = {
+       .dpms = amdgpu_atombios_encoder_dpms,
+       .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
+       .prepare = dce_v11_0_encoder_prepare,
+       .mode_set = dce_v11_0_encoder_mode_set,
+       .commit = dce_v11_0_encoder_commit,
+       .detect = amdgpu_atombios_encoder_dac_detect,
+};
+
+static void dce_v11_0_encoder_destroy(struct drm_encoder *encoder)
+{
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+               amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
+       kfree(amdgpu_encoder->enc_priv);
+       drm_encoder_cleanup(encoder);
+       kfree(amdgpu_encoder);
+}
+
+static const struct drm_encoder_funcs dce_v11_0_encoder_funcs = {
+       .destroy = dce_v11_0_encoder_destroy,
+};
+
+static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
+                                uint32_t encoder_enum,
+                                uint32_t supported_device,
+                                u16 caps)
+{
+       struct drm_device *dev = adev->ddev;
+       struct drm_encoder *encoder;
+       struct amdgpu_encoder *amdgpu_encoder;
+
+       /* see if we already added it */
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               amdgpu_encoder = to_amdgpu_encoder(encoder);
+               if (amdgpu_encoder->encoder_enum == encoder_enum) {
+                       amdgpu_encoder->devices |= supported_device;
+                       return;
+               }
+
+       }
+
+       /* add a new one */
+       amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
+       if (!amdgpu_encoder)
+               return;
+
+       encoder = &amdgpu_encoder->base;
+       switch (adev->mode_info.num_crtc) {
+       case 1:
+               encoder->possible_crtcs = 0x1;
+               break;
+       case 2:
+       default:
+               encoder->possible_crtcs = 0x3;
+               break;
+       case 4:
+               encoder->possible_crtcs = 0xf;
+               break;
+       case 6:
+               encoder->possible_crtcs = 0x3f;
+               break;
+       }
+
+       amdgpu_encoder->enc_priv = NULL;
+
+       amdgpu_encoder->encoder_enum = encoder_enum;
+       amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+       amdgpu_encoder->devices = supported_device;
+       amdgpu_encoder->rmx_type = RMX_OFF;
+       amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
+       amdgpu_encoder->is_ext_encoder = false;
+       amdgpu_encoder->caps = caps;
+
+       switch (amdgpu_encoder->encoder_id) {
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+               drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
+                                DRM_MODE_ENCODER_DAC);
+               drm_encoder_helper_add(encoder, &dce_v11_0_dac_helper_funcs);
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+               if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+                       amdgpu_encoder->rmx_type = RMX_FULL;
+                       drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
+                                        DRM_MODE_ENCODER_LVDS);
+                       amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
+               } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
+                       drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
+                                        DRM_MODE_ENCODER_DAC);
+                       amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
+               } else {
+                       drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
+                                        DRM_MODE_ENCODER_TMDS);
+                       amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
+               }
+               drm_encoder_helper_add(encoder, &dce_v11_0_dig_helper_funcs);
+               break;
+       case ENCODER_OBJECT_ID_SI170B:
+       case ENCODER_OBJECT_ID_CH7303:
+       case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
+       case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
+       case ENCODER_OBJECT_ID_TITFP513:
+       case ENCODER_OBJECT_ID_VT1623:
+       case ENCODER_OBJECT_ID_HDMI_SI1930:
+       case ENCODER_OBJECT_ID_TRAVIS:
+       case ENCODER_OBJECT_ID_NUTMEG:
+               /* these are handled by the primary encoders */
+               amdgpu_encoder->is_ext_encoder = true;
+               if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+                       drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
+                                        DRM_MODE_ENCODER_LVDS);
+               else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
+                       drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
+                                        DRM_MODE_ENCODER_DAC);
+               else
+                       drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
+                                        DRM_MODE_ENCODER_TMDS);
+               drm_encoder_helper_add(encoder, &dce_v11_0_ext_helper_funcs);
+               break;
+       }
+}
+
+static const struct amdgpu_display_funcs dce_v11_0_display_funcs = {
+       .set_vga_render_state = &dce_v11_0_set_vga_render_state,
+       .bandwidth_update = &dce_v11_0_bandwidth_update,
+       .vblank_get_counter = &dce_v11_0_vblank_get_counter,
+       .vblank_wait = &dce_v11_0_vblank_wait,
+       .is_display_hung = &dce_v11_0_is_display_hung,
+       .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
+       .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
+       .hpd_sense = &dce_v11_0_hpd_sense,
+       .hpd_set_polarity = &dce_v11_0_hpd_set_polarity,
+       .hpd_get_gpio_reg = &dce_v11_0_hpd_get_gpio_reg,
+       .page_flip = &dce_v11_0_page_flip,
+       .page_flip_get_scanoutpos = &dce_v11_0_crtc_get_scanoutpos,
+       .add_encoder = &dce_v11_0_encoder_add,
+       .add_connector = &amdgpu_connector_add,
+       .stop_mc_access = &dce_v11_0_stop_mc_access,
+       .resume_mc_access = &dce_v11_0_resume_mc_access,
+};
+
+static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev)
+{
+       if (adev->mode_info.funcs == NULL)
+               adev->mode_info.funcs = &dce_v11_0_display_funcs;
+}
+
+static const struct amdgpu_irq_src_funcs dce_v11_0_crtc_irq_funcs = {
+       .set = dce_v11_0_set_crtc_irq_state,
+       .process = dce_v11_0_crtc_irq,
+};
+
+static const struct amdgpu_irq_src_funcs dce_v11_0_pageflip_irq_funcs = {
+       .set = dce_v11_0_set_pageflip_irq_state,
+       .process = dce_v11_0_pageflip_irq,
+};
+
+static const struct amdgpu_irq_src_funcs dce_v11_0_hpd_irq_funcs = {
+       .set = dce_v11_0_set_hpd_irq_state,
+       .process = dce_v11_0_hpd_irq,
+};
+
+static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+       adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
+       adev->crtc_irq.funcs = &dce_v11_0_crtc_irq_funcs;
+
+       adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
+       adev->pageflip_irq.funcs = &dce_v11_0_pageflip_irq_funcs;
+
+       adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
+       adev->hpd_irq.funcs = &dce_v11_0_hpd_irq_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
new file mode 100644 (file)
index 0000000..eeb9a56
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __DCE_V11_0_H__
+#define __DCE_V11_0_H__
+
+extern const struct amdgpu_ip_funcs dce_v11_0_ip_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
new file mode 100644 (file)
index 0000000..a8397dd
--- /dev/null
@@ -0,0 +1,4286 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_gfx.h"
+#include "vi.h"
+#include "vid.h"
+#include "amdgpu_ucode.h"
+#include "clearstate_vi.h"
+
+#include "gmc/gmc_8_2_d.h"
+#include "gmc/gmc_8_2_sh_mask.h"
+
+#include "oss/oss_3_0_d.h"
+#include "oss/oss_3_0_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "gca/gfx_8_0_d.h"
+#include "gca/gfx_8_0_enum.h"
+#include "gca/gfx_8_0_sh_mask.h"
+#include "gca/gfx_8_0_enum.h"
+
+#include "uvd/uvd_5_0_d.h"
+#include "uvd/uvd_5_0_sh_mask.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+#define GFX8_NUM_GFX_RINGS     1
+#define GFX8_NUM_COMPUTE_RINGS 8
+
+#define TOPAZ_GB_ADDR_CONFIG_GOLDEN 0x22010001
+#define CARRIZO_GB_ADDR_CONFIG_GOLDEN 0x22010001
+#define TONGA_GB_ADDR_CONFIG_GOLDEN 0x22011003
+
+#define ARRAY_MODE(x)                                  ((x) << GB_TILE_MODE0__ARRAY_MODE__SHIFT)
+#define PIPE_CONFIG(x)                                 ((x) << GB_TILE_MODE0__PIPE_CONFIG__SHIFT)
+#define TILE_SPLIT(x)                                  ((x) << GB_TILE_MODE0__TILE_SPLIT__SHIFT)
+#define MICRO_TILE_MODE_NEW(x)                         ((x) << GB_TILE_MODE0__MICRO_TILE_MODE_NEW__SHIFT)
+#define SAMPLE_SPLIT(x)                                        ((x) << GB_TILE_MODE0__SAMPLE_SPLIT__SHIFT)
+#define BANK_WIDTH(x)                                  ((x) << GB_MACROTILE_MODE0__BANK_WIDTH__SHIFT)
+#define BANK_HEIGHT(x)                                 ((x) << GB_MACROTILE_MODE0__BANK_HEIGHT__SHIFT)
+#define MACRO_TILE_ASPECT(x)                           ((x) << GB_MACROTILE_MODE0__MACRO_TILE_ASPECT__SHIFT)
+#define NUM_BANKS(x)                                   ((x) << GB_MACROTILE_MODE0__NUM_BANKS__SHIFT)
+
+MODULE_FIRMWARE("radeon/carrizo_ce.bin");
+MODULE_FIRMWARE("radeon/carrizo_pfp.bin");
+MODULE_FIRMWARE("radeon/carrizo_me.bin");
+MODULE_FIRMWARE("radeon/carrizo_mec.bin");
+MODULE_FIRMWARE("radeon/carrizo_mec2.bin");
+MODULE_FIRMWARE("radeon/carrizo_rlc.bin");
+
+MODULE_FIRMWARE("radeon/tonga_ce.bin");
+MODULE_FIRMWARE("radeon/tonga_pfp.bin");
+MODULE_FIRMWARE("radeon/tonga_me.bin");
+MODULE_FIRMWARE("radeon/tonga_mec.bin");
+MODULE_FIRMWARE("radeon/tonga_mec2.bin");
+MODULE_FIRMWARE("radeon/tonga_rlc.bin");
+
+MODULE_FIRMWARE("radeon/topaz_ce.bin");
+MODULE_FIRMWARE("radeon/topaz_pfp.bin");
+MODULE_FIRMWARE("radeon/topaz_me.bin");
+MODULE_FIRMWARE("radeon/topaz_mec.bin");
+MODULE_FIRMWARE("radeon/topaz_mec2.bin");
+MODULE_FIRMWARE("radeon/topaz_rlc.bin");
+
+static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
+{
+       {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
+       {mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1},
+       {mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2},
+       {mmGDS_VMID3_BASE, mmGDS_VMID3_SIZE, mmGDS_GWS_VMID3, mmGDS_OA_VMID3},
+       {mmGDS_VMID4_BASE, mmGDS_VMID4_SIZE, mmGDS_GWS_VMID4, mmGDS_OA_VMID4},
+       {mmGDS_VMID5_BASE, mmGDS_VMID5_SIZE, mmGDS_GWS_VMID5, mmGDS_OA_VMID5},
+       {mmGDS_VMID6_BASE, mmGDS_VMID6_SIZE, mmGDS_GWS_VMID6, mmGDS_OA_VMID6},
+       {mmGDS_VMID7_BASE, mmGDS_VMID7_SIZE, mmGDS_GWS_VMID7, mmGDS_OA_VMID7},
+       {mmGDS_VMID8_BASE, mmGDS_VMID8_SIZE, mmGDS_GWS_VMID8, mmGDS_OA_VMID8},
+       {mmGDS_VMID9_BASE, mmGDS_VMID9_SIZE, mmGDS_GWS_VMID9, mmGDS_OA_VMID9},
+       {mmGDS_VMID10_BASE, mmGDS_VMID10_SIZE, mmGDS_GWS_VMID10, mmGDS_OA_VMID10},
+       {mmGDS_VMID11_BASE, mmGDS_VMID11_SIZE, mmGDS_GWS_VMID11, mmGDS_OA_VMID11},
+       {mmGDS_VMID12_BASE, mmGDS_VMID12_SIZE, mmGDS_GWS_VMID12, mmGDS_OA_VMID12},
+       {mmGDS_VMID13_BASE, mmGDS_VMID13_SIZE, mmGDS_GWS_VMID13, mmGDS_OA_VMID13},
+       {mmGDS_VMID14_BASE, mmGDS_VMID14_SIZE, mmGDS_GWS_VMID14, mmGDS_OA_VMID14},
+       {mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15}
+};
+
+static const u32 golden_settings_tonga_a11[] =
+{
+       mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
+       mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
+       mmDB_DEBUG2, 0xf00fffff, 0x00000400,
+       mmGB_GPU_ID, 0x0000000f, 0x00000000,
+       mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
+       mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x000000fc,
+       mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
+       mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
+       mmTCC_CTRL, 0x00100000, 0xf31fff7f,
+       mmTCP_ADDR_CONFIG, 0x000003ff, 0x000002fb,
+       mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b,
+       mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876,
+};
+
+static const u32 tonga_golden_common_all[] =
+{
+       mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+       mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
+       mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A,
+       mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
+       mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
+       mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
+       mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
+       mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF
+};
+
+static const u32 tonga_mgcg_cgcg_init[] =
+{
+       mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
+       mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+       mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
+       mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
+       mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
+       mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
+       mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
+       mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
+       mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
+       mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
+       mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
+       mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
+       mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
+       mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+       mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+       mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
+       mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+       mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
+       mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
+       mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
+       mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
+       mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
+       mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
+       mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
+       mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
+       mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
+       mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
+       mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
+       mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
+       mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
+       mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
+       mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
+       mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
+       mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
+       mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
+       mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
+       mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
+       mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
+       mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
+       mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
+       mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
+       mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
+       mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
+       mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
+       mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
+       mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
+       mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
+       mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000,
+       mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
+       mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007,
+       mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005,
+       mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
+       mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000,
+       mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
+       mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007,
+       mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005,
+       mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
+       mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
+       mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
+       mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
+       mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
+};
+
+static const u32 golden_settings_iceland_a11[] =
+{
+       mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
+       mmDB_DEBUG2, 0xf00fffff, 0x00000400,
+       mmDB_DEBUG3, 0xc0000000, 0xc0000000,
+       mmGB_GPU_ID, 0x0000000f, 0x00000000,
+       mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
+       mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
+       mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x00000002,
+       mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
+       mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
+       mmTCC_CTRL, 0x00100000, 0xf31fff7f,
+       mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f1,
+       mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
+       mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010,
+};
+
+static const u32 iceland_golden_common_all[] =
+{
+       mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+       mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000002,
+       mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
+       mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001,
+       mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
+       mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
+       mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
+       mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF
+};
+
+static const u32 iceland_mgcg_cgcg_init[] =
+{
+       mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
+       mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+       mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_CP_CLK_CTRL, 0xffffffff, 0xc0000100,
+       mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0xc0000100,
+       mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0xc0000100,
+       mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
+       mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
+       mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
+       mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
+       mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
+       mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
+       mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
+       mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0xff000100,
+       mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
+       mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
+       mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
+       mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+       mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+       mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
+       mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+       mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
+       mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
+       mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x0f840f87,
+       mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
+       mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
+       mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
+       mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
+       mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
+       mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
+       mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
+       mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
+       mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
+       mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
+       mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
+       mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
+       mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
+       mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
+       mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
+       mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
+       mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
+       mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
+       mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
+       mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x0f840f87,
+       mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
+       mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
+       mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
+       mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
+       mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
+       mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
+       mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
+       mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
+       mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
+       mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
+};
+
+static const u32 cz_golden_settings_a11[] =
+{
+       mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
+       mmDB_DEBUG2, 0xf00fffff, 0x00000400,
+       mmGB_GPU_ID, 0x0000000f, 0x00000000,
+       mmPA_SC_ENHANCE, 0xffffffff, 0x00000001,
+       mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
+       mmTA_CNTL_AUX, 0x000f000f, 0x00010000,
+       mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f3,
+       mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001302
+};
+
+static const u32 cz_golden_common_all[] =
+{
+       mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+       mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000002,
+       mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
+       mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001,
+       mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
+       mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
+       mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
+       mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF
+};
+
+static const u32 cz_mgcg_cgcg_init[] =
+{
+       mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
+       mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+       mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
+       mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
+       mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
+       mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
+       mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
+       mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
+       mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
+       mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
+       mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
+       mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
+       mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
+       mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+       mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+       mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
+       mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+       mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
+       mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
+       mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
+       mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
+       mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
+       mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
+       mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
+       mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
+       mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
+       mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
+       mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
+       mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
+       mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
+       mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
+       mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
+       mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
+       mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
+       mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
+       mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
+       mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
+       mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
+       mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
+       mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
+       mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
+       mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
+       mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
+       mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
+       mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
+       mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
+       mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
+       mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000,
+       mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
+       mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007,
+       mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005,
+       mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
+       mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000,
+       mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
+       mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007,
+       mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005,
+       mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
+       mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
+       mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
+       mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
+       mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
+};
+
+static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev);
+static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev);
+static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev);
+
+static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
+{
+       switch (adev->asic_type) {
+       case CHIP_TOPAZ:
+               amdgpu_program_register_sequence(adev,
+                                                iceland_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
+               amdgpu_program_register_sequence(adev,
+                                                golden_settings_iceland_a11,
+                                                (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
+               amdgpu_program_register_sequence(adev,
+                                                iceland_golden_common_all,
+                                                (const u32)ARRAY_SIZE(iceland_golden_common_all));
+               break;
+       case CHIP_TONGA:
+               amdgpu_program_register_sequence(adev,
+                                                tonga_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
+               amdgpu_program_register_sequence(adev,
+                                                golden_settings_tonga_a11,
+                                                (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
+               amdgpu_program_register_sequence(adev,
+                                                tonga_golden_common_all,
+                                                (const u32)ARRAY_SIZE(tonga_golden_common_all));
+               break;
+       case CHIP_CARRIZO:
+               amdgpu_program_register_sequence(adev,
+                                                cz_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
+               amdgpu_program_register_sequence(adev,
+                                                cz_golden_settings_a11,
+                                                (const u32)ARRAY_SIZE(cz_golden_settings_a11));
+               amdgpu_program_register_sequence(adev,
+                                                cz_golden_common_all,
+                                                (const u32)ARRAY_SIZE(cz_golden_common_all));
+               break;
+       default:
+               break;
+       }
+}
+
+static void gfx_v8_0_scratch_init(struct amdgpu_device *adev)
+{
+       int i;
+
+       adev->gfx.scratch.num_reg = 7;
+       adev->gfx.scratch.reg_base = mmSCRATCH_REG0;
+       for (i = 0; i < adev->gfx.scratch.num_reg; i++) {
+               adev->gfx.scratch.free[i] = true;
+               adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i;
+       }
+}
+
+static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       uint32_t scratch;
+       uint32_t tmp = 0;
+       unsigned i;
+       int r;
+
+       r = amdgpu_gfx_scratch_get(adev, &scratch);
+       if (r) {
+               DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
+               return r;
+       }
+       WREG32(scratch, 0xCAFEDEAD);
+       r = amdgpu_ring_lock(ring, 3);
+       if (r) {
+               DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
+                         ring->idx, r);
+               amdgpu_gfx_scratch_free(adev, scratch);
+               return r;
+       }
+       amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
+       amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
+       amdgpu_ring_write(ring, 0xDEADBEEF);
+       amdgpu_ring_unlock_commit(ring);
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               tmp = RREG32(scratch);
+               if (tmp == 0xDEADBEEF)
+                       break;
+               DRM_UDELAY(1);
+       }
+       if (i < adev->usec_timeout) {
+               DRM_INFO("ring test on %d succeeded in %d usecs\n",
+                        ring->idx, i);
+       } else {
+               DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
+                         ring->idx, scratch, tmp);
+               r = -EINVAL;
+       }
+       amdgpu_gfx_scratch_free(adev, scratch);
+       return r;
+}
+
+static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       struct amdgpu_ib ib;
+       uint32_t scratch;
+       uint32_t tmp = 0;
+       unsigned i;
+       int r;
+
+       r = amdgpu_gfx_scratch_get(adev, &scratch);
+       if (r) {
+               DRM_ERROR("amdgpu: failed to get scratch reg (%d).\n", r);
+               return r;
+       }
+       WREG32(scratch, 0xCAFEDEAD);
+       r = amdgpu_ib_get(ring, NULL, 256, &ib);
+       if (r) {
+               DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+               amdgpu_gfx_scratch_free(adev, scratch);
+               return r;
+       }
+       ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
+       ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
+       ib.ptr[2] = 0xDEADBEEF;
+       ib.length_dw = 3;
+       r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
+       if (r) {
+               amdgpu_gfx_scratch_free(adev, scratch);
+               amdgpu_ib_free(adev, &ib);
+               DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
+               return r;
+       }
+       r = amdgpu_fence_wait(ib.fence, false);
+       if (r) {
+               DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+               amdgpu_gfx_scratch_free(adev, scratch);
+               amdgpu_ib_free(adev, &ib);
+               return r;
+       }
+       for (i = 0; i < adev->usec_timeout; i++) {
+               tmp = RREG32(scratch);
+               if (tmp == 0xDEADBEEF)
+                       break;
+               DRM_UDELAY(1);
+       }
+       if (i < adev->usec_timeout) {
+               DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
+                        ib.fence->ring->idx, i);
+       } else {
+               DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
+                         scratch, tmp);
+               r = -EINVAL;
+       }
+       amdgpu_gfx_scratch_free(adev, scratch);
+       amdgpu_ib_free(adev, &ib);
+       return r;
+}
+
+static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
+{
+       const char *chip_name;
+       char fw_name[30];
+       int err;
+       struct amdgpu_firmware_info *info = NULL;
+       const struct common_firmware_header *header = NULL;
+
+       DRM_DEBUG("\n");
+
+       switch (adev->asic_type) {
+       case CHIP_TOPAZ:
+               chip_name = "topaz";
+               break;
+       case CHIP_TONGA:
+               chip_name = "tonga";
+               break;
+       case CHIP_CARRIZO:
+               chip_name = "carrizo";
+               break;
+       default:
+               BUG();
+       }
+
+       snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+       err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
+       if (err)
+               goto out;
+       err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
+       if (err)
+               goto out;
+
+       snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+       err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
+       if (err)
+               goto out;
+       err = amdgpu_ucode_validate(adev->gfx.me_fw);
+       if (err)
+               goto out;
+
+       snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+       err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
+       if (err)
+               goto out;
+       err = amdgpu_ucode_validate(adev->gfx.ce_fw);
+       if (err)
+               goto out;
+
+       snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
+       err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
+       if (err)
+               goto out;
+       err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+
+       snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
+       err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
+       if (err)
+               goto out;
+       err = amdgpu_ucode_validate(adev->gfx.mec_fw);
+       if (err)
+               goto out;
+
+       snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec2.bin", chip_name);
+       err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+       if (!err) {
+               err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
+               if (err)
+                       goto out;
+       } else {
+               err = 0;
+               adev->gfx.mec2_fw = NULL;
+       }
+
+       if (adev->firmware.smu_load) {
+               info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
+               info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
+               info->fw = adev->gfx.pfp_fw;
+               header = (const struct common_firmware_header *)info->fw->data;
+               adev->firmware.fw_size +=
+                       ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+
+               info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
+               info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
+               info->fw = adev->gfx.me_fw;
+               header = (const struct common_firmware_header *)info->fw->data;
+               adev->firmware.fw_size +=
+                       ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+
+               info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
+               info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
+               info->fw = adev->gfx.ce_fw;
+               header = (const struct common_firmware_header *)info->fw->data;
+               adev->firmware.fw_size +=
+                       ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+
+               info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
+               info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
+               info->fw = adev->gfx.rlc_fw;
+               header = (const struct common_firmware_header *)info->fw->data;
+               adev->firmware.fw_size +=
+                       ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+
+               info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
+               info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
+               info->fw = adev->gfx.mec_fw;
+               header = (const struct common_firmware_header *)info->fw->data;
+               adev->firmware.fw_size +=
+                       ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+
+               if (adev->gfx.mec2_fw) {
+                       info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
+                       info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
+                       info->fw = adev->gfx.mec2_fw;
+                       header = (const struct common_firmware_header *)info->fw->data;
+                       adev->firmware.fw_size +=
+                               ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+               }
+
+       }
+
+out:
+       if (err) {
+               dev_err(adev->dev,
+                       "gfx8: Failed to load firmware \"%s\"\n",
+                       fw_name);
+               release_firmware(adev->gfx.pfp_fw);
+               adev->gfx.pfp_fw = NULL;
+               release_firmware(adev->gfx.me_fw);
+               adev->gfx.me_fw = NULL;
+               release_firmware(adev->gfx.ce_fw);
+               adev->gfx.ce_fw = NULL;
+               release_firmware(adev->gfx.rlc_fw);
+               adev->gfx.rlc_fw = NULL;
+               release_firmware(adev->gfx.mec_fw);
+               adev->gfx.mec_fw = NULL;
+               release_firmware(adev->gfx.mec2_fw);
+               adev->gfx.mec2_fw = NULL;
+       }
+       return err;
+}
+
+static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
+{
+       int r;
+
+       if (adev->gfx.mec.hpd_eop_obj) {
+               r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
+               if (unlikely(r != 0))
+                       dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
+               amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
+               amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
+
+               amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj);
+               adev->gfx.mec.hpd_eop_obj = NULL;
+       }
+}
+
+#define MEC_HPD_SIZE 2048
+
+static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
+{
+       int r;
+       u32 *hpd;
+
+       /*
+        * we assign only 1 pipe because all other pipes will
+        * be handled by KFD
+        */
+       adev->gfx.mec.num_mec = 1;
+       adev->gfx.mec.num_pipe = 1;
+       adev->gfx.mec.num_queue = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe * 8;
+
+       if (adev->gfx.mec.hpd_eop_obj == NULL) {
+               r = amdgpu_bo_create(adev,
+                                    adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2,
+                                    PAGE_SIZE, true,
+                                    AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
+                                    &adev->gfx.mec.hpd_eop_obj);
+               if (r) {
+                       dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
+                       return r;
+               }
+       }
+
+       r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
+       if (unlikely(r != 0)) {
+               gfx_v8_0_mec_fini(adev);
+               return r;
+       }
+       r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT,
+                         &adev->gfx.mec.hpd_eop_gpu_addr);
+       if (r) {
+               dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r);
+               gfx_v8_0_mec_fini(adev);
+               return r;
+       }
+       r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd);
+       if (r) {
+               dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r);
+               gfx_v8_0_mec_fini(adev);
+               return r;
+       }
+
+       memset(hpd, 0, adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2);
+
+       amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
+       amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
+
+       return 0;
+}
+
+static int gfx_v8_0_sw_init(struct amdgpu_device *adev)
+{
+       int i, r;
+       struct amdgpu_ring *ring;
+
+       /* EOP Event */
+       r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq);
+       if (r)
+               return r;
+
+       /* Privileged reg */
+       r = amdgpu_irq_add_id(adev, 184, &adev->gfx.priv_reg_irq);
+       if (r)
+               return r;
+
+       /* Privileged inst */
+       r = amdgpu_irq_add_id(adev, 185, &adev->gfx.priv_inst_irq);
+       if (r)
+               return r;
+
+       adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
+
+       gfx_v8_0_scratch_init(adev);
+
+       r = gfx_v8_0_init_microcode(adev);
+       if (r) {
+               DRM_ERROR("Failed to load gfx firmware!\n");
+               return r;
+       }
+
+       r = gfx_v8_0_mec_init(adev);
+       if (r) {
+               DRM_ERROR("Failed to init MEC BOs!\n");
+               return r;
+       }
+
+       r = amdgpu_wb_get(adev, &adev->gfx.ce_sync_offs);
+       if (r) {
+               DRM_ERROR("(%d) gfx.ce_sync_offs wb alloc failed\n", r);
+               return r;
+       }
+
+       /* set up the gfx ring */
+       for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+               ring = &adev->gfx.gfx_ring[i];
+               ring->ring_obj = NULL;
+               sprintf(ring->name, "gfx");
+               /* no gfx doorbells on iceland */
+               if (adev->asic_type != CHIP_TOPAZ) {
+                       ring->use_doorbell = true;
+                       ring->doorbell_index = AMDGPU_DOORBELL_GFX_RING0;
+               }
+
+               r = amdgpu_ring_init(adev, ring, 1024 * 1024,
+                                    PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
+                                    &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
+                                    AMDGPU_RING_TYPE_GFX);
+               if (r)
+                       return r;
+       }
+
+       /* set up the compute queues */
+       for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+               unsigned irq_type;
+
+               /* max 32 queues per MEC */
+               if ((i >= 32) || (i >= AMDGPU_MAX_COMPUTE_RINGS)) {
+                       DRM_ERROR("Too many (%d) compute rings!\n", i);
+                       break;
+               }
+               ring = &adev->gfx.compute_ring[i];
+               ring->ring_obj = NULL;
+               ring->use_doorbell = true;
+               ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + i;
+               ring->me = 1; /* first MEC */
+               ring->pipe = i / 8;
+               ring->queue = i % 8;
+               sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue);
+               irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
+               /* type-2 packets are deprecated on MEC, use type-3 instead */
+               r = amdgpu_ring_init(adev, ring, 1024 * 1024,
+                                    PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
+                                    &adev->gfx.eop_irq, irq_type,
+                                    AMDGPU_RING_TYPE_COMPUTE);
+               if (r)
+                       return r;
+       }
+
+       /* reserve GDS, GWS and OA resource for gfx */
+       r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size,
+                       PAGE_SIZE, true,
+                       AMDGPU_GEM_DOMAIN_GDS, 0,
+                       NULL, &adev->gds.gds_gfx_bo);
+       if (r)
+               return r;
+
+       r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size,
+               PAGE_SIZE, true,
+               AMDGPU_GEM_DOMAIN_GWS, 0,
+               NULL, &adev->gds.gws_gfx_bo);
+       if (r)
+               return r;
+
+       r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size,
+                       PAGE_SIZE, true,
+                       AMDGPU_GEM_DOMAIN_OA, 0,
+                       NULL, &adev->gds.oa_gfx_bo);
+       if (r)
+               return r;
+
+       return 0;
+}
+
+static int gfx_v8_0_sw_fini(struct amdgpu_device *adev)
+{
+       int i;
+
+       amdgpu_bo_unref(&adev->gds.oa_gfx_bo);
+       amdgpu_bo_unref(&adev->gds.gws_gfx_bo);
+       amdgpu_bo_unref(&adev->gds.gds_gfx_bo);
+
+       for (i = 0; i < adev->gfx.num_gfx_rings; i++)
+               amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
+       for (i = 0; i < adev->gfx.num_compute_rings; i++)
+               amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
+
+       amdgpu_wb_free(adev, adev->gfx.ce_sync_offs);
+
+       gfx_v8_0_mec_fini(adev);
+
+       return 0;
+}
+
+static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
+{
+       const u32 num_tile_mode_states = 32;
+       const u32 num_secondary_tile_mode_states = 16;
+       u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
+
+       switch (adev->gfx.config.mem_row_size_in_kb) {
+       case 1:
+               split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
+               break;
+       case 2:
+       default:
+               split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
+               break;
+       case 4:
+               split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
+               break;
+       }
+
+       switch (adev->asic_type) {
+       case CHIP_TOPAZ:
+               for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+                       switch (reg_offset) {
+                       case 0:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+                               break;
+                       case 1:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+                               break;
+                       case 2:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+                               break;
+                       case 3:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+                               break;
+                       case 4:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+                               break;
+                       case 5:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+                               break;
+                       case 6:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+                               break;
+                       case 8:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+                                               PIPE_CONFIG(ADDR_SURF_P2));
+                               break;
+                       case 9:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+                               break;
+                       case 10:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+                               break;
+                       case 11:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+                               break;
+                       case 13:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+                               break;
+                       case 14:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+                               break;
+                       case 15:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+                               break;
+                       case 16:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+                               break;
+                       case 18:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+                               break;
+                       case 19:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+                               break;
+                       case 20:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+                               break;
+                       case 21:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+                               break;
+                       case 22:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+                               break;
+                       case 24:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+                               break;
+                       case 25:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+                               break;
+                       case 26:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+                               break;
+                       case 27:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+                               break;
+                       case 28:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+                               break;
+                       case 29:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+                               break;
+                       case 7:
+                       case 12:
+                       case 17:
+                       case 23:
+                               /* unused idx */
+                               continue;
+                       default:
+                               gb_tile_moden = 0;
+                               break;
+                       };
+                       adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
+                       WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
+               }
+               for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
+                       switch (reg_offset) {
+                       case 0:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                               NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 1:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                               NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 2:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                               NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 3:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                               NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 4:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                               NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 5:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                               NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 6:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                               NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 8:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                               NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 9:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                               NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 10:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                               NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 11:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                               NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 12:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                               NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 13:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                               NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 14:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                               NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 7:
+                               /* unused idx */
+                               continue;
+                       default:
+                               gb_tile_moden = 0;
+                               break;
+                       };
+                       adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
+                       WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
+               }
+       case CHIP_TONGA:
+               for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+                       switch (reg_offset) {
+                       case 0:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+                                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+                               break;
+                       case 1:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+                                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+                               break;
+                       case 2:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+                                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+                               break;
+                       case 3:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+                                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+                               break;
+                       case 4:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+                                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+                               break;
+                       case 5:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+                                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+                               break;
+                       case 6:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+                                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+                               break;
+                       case 7:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+                                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+                               break;
+                       case 8:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+                                               PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
+                               break;
+                       case 9:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+                               break;
+                       case 10:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+                               break;
+                       case 11:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+                               break;
+                       case 12:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+                               break;
+                       case 13:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+                               break;
+                       case 14:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+                               break;
+                       case 15:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+                               break;
+                       case 16:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+                               break;
+                       case 17:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+                               break;
+                       case 18:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+                                               PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+                               break;
+                       case 19:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+                                               PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+                               break;
+                       case 20:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+                                               PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+                               break;
+                       case 21:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
+                                               PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+                               break;
+                       case 22:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
+                                               PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+                               break;
+                       case 23:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
+                                               PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+                               break;
+                       case 24:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+                                               PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+                               break;
+                       case 25:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
+                                               PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+                               break;
+                       case 26:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
+                                               PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+                               break;
+                       case 27:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+                               break;
+                       case 28:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+                               break;
+                       case 29:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+                               break;
+                       case 30:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+                               break;
+                       default:
+                               gb_tile_moden = 0;
+                               break;
+                       };
+                       adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
+                       WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
+               }
+               for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
+                       switch (reg_offset) {
+                       case 0:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                               NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 1:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                               NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 2:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                               NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 3:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                               NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 4:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                               NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 5:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                               NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 6:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                               NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 8:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                               NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 9:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                               NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 10:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                               NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 11:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                               NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 12:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                               NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 13:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                               NUM_BANKS(ADDR_SURF_4_BANK));
+                               break;
+                       case 14:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                               NUM_BANKS(ADDR_SURF_4_BANK));
+                               break;
+                       case 7:
+                               /* unused idx */
+                               continue;
+                       default:
+                               gb_tile_moden = 0;
+                               break;
+                       };
+                       adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
+                       WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
+               }
+               break;
+       case CHIP_CARRIZO:
+       default:
+               for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+                       switch (reg_offset) {
+                       case 0:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+                               break;
+                       case 1:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+                               break;
+                       case 2:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+                               break;
+                       case 3:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+                               break;
+                       case 4:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+                               break;
+                       case 5:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+                               break;
+                       case 6:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+                               break;
+                       case 8:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+                                               PIPE_CONFIG(ADDR_SURF_P2));
+                               break;
+                       case 9:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+                               break;
+                       case 10:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+                               break;
+                       case 11:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+                               break;
+                       case 13:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+                               break;
+                       case 14:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+                               break;
+                       case 15:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+                               break;
+                       case 16:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+                               break;
+                       case 18:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+                               break;
+                       case 19:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+                               break;
+                       case 20:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+                               break;
+                       case 21:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+                               break;
+                       case 22:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+                               break;
+                       case 24:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+                               break;
+                       case 25:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+                               break;
+                       case 26:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+                               break;
+                       case 27:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+                               break;
+                       case 28:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+                               break;
+                       case 29:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+                                               PIPE_CONFIG(ADDR_SURF_P2) |
+                                               MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+                                               SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+                               break;
+                       case 7:
+                       case 12:
+                       case 17:
+                       case 23:
+                               /* unused idx */
+                               continue;
+                       default:
+                               gb_tile_moden = 0;
+                               break;
+                       };
+                       adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
+                       WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
+               }
+               for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
+                       switch (reg_offset) {
+                       case 0:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                               NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 1:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                               NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 2:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                               NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 3:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                               NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 4:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                               NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 5:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                               NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 6:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                               NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 8:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                               NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 9:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                               NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 10:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                               NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 11:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                               NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 12:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                               NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 13:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                               NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 14:
+                               gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                               BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                               MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                               NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 7:
+                               /* unused idx */
+                               continue;
+                       default:
+                               gb_tile_moden = 0;
+                               break;
+                       };
+                       adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
+                       WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
+               }
+       }
+}
+
+static u32 gfx_v8_0_create_bitmask(u32 bit_width)
+{
+       u32 i, mask = 0;
+
+       for (i = 0; i < bit_width; i++) {
+               mask <<= 1;
+               mask |= 1;
+       }
+       return mask;
+}
+
+void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num)
+{
+       u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
+
+       if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) {
+               data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
+               data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
+       } else if (se_num == 0xffffffff) {
+               data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
+               data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
+       } else if (sh_num == 0xffffffff) {
+               data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
+               data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
+       } else {
+               data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
+               data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
+       }
+       WREG32(mmGRBM_GFX_INDEX, data);
+}
+
+static u32 gfx_v8_0_get_rb_disabled(struct amdgpu_device *adev,
+                                   u32 max_rb_num_per_se,
+                                   u32 sh_per_se)
+{
+       u32 data, mask;
+
+       data = RREG32(mmCC_RB_BACKEND_DISABLE);
+       if (data & 1)
+               data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
+       else
+               data = 0;
+
+       data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
+
+       data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
+
+       mask = gfx_v8_0_create_bitmask(max_rb_num_per_se / sh_per_se);
+
+       return data & mask;
+}
+
+static void gfx_v8_0_setup_rb(struct amdgpu_device *adev,
+                             u32 se_num, u32 sh_per_se,
+                             u32 max_rb_num_per_se)
+{
+       int i, j;
+       u32 data, mask;
+       u32 disabled_rbs = 0;
+       u32 enabled_rbs = 0;
+
+       mutex_lock(&adev->grbm_idx_mutex);
+       for (i = 0; i < se_num; i++) {
+               for (j = 0; j < sh_per_se; j++) {
+                       gfx_v8_0_select_se_sh(adev, i, j);
+                       data = gfx_v8_0_get_rb_disabled(adev,
+                                             max_rb_num_per_se, sh_per_se);
+                       disabled_rbs |= data << ((i * sh_per_se + j) *
+                                                RB_BITMAP_WIDTH_PER_SH);
+               }
+       }
+       gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+       mutex_unlock(&adev->grbm_idx_mutex);
+
+       mask = 1;
+       for (i = 0; i < max_rb_num_per_se * se_num; i++) {
+               if (!(disabled_rbs & mask))
+                       enabled_rbs |= mask;
+               mask <<= 1;
+       }
+
+       adev->gfx.config.backend_enable_mask = enabled_rbs;
+
+       mutex_lock(&adev->grbm_idx_mutex);
+       for (i = 0; i < se_num; i++) {
+               gfx_v8_0_select_se_sh(adev, i, 0xffffffff);
+               data = 0;
+               for (j = 0; j < sh_per_se; j++) {
+                       switch (enabled_rbs & 3) {
+                       case 0:
+                               if (j == 0)
+                                       data |= (RASTER_CONFIG_RB_MAP_3 <<
+                                                PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT);
+                               else
+                                       data |= (RASTER_CONFIG_RB_MAP_0 <<
+                                                PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT);
+                               break;
+                       case 1:
+                               data |= (RASTER_CONFIG_RB_MAP_0 <<
+                                        (i * sh_per_se + j) * 2);
+                               break;
+                       case 2:
+                               data |= (RASTER_CONFIG_RB_MAP_3 <<
+                                        (i * sh_per_se + j) * 2);
+                               break;
+                       case 3:
+                       default:
+                               data |= (RASTER_CONFIG_RB_MAP_2 <<
+                                        (i * sh_per_se + j) * 2);
+                               break;
+                       }
+                       enabled_rbs >>= 2;
+               }
+               WREG32(mmPA_SC_RASTER_CONFIG, data);
+       }
+       gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+       mutex_unlock(&adev->grbm_idx_mutex);
+}
+
+static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
+{
+       u32 gb_addr_config;
+       u32 mc_shared_chmap, mc_arb_ramcfg;
+       u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
+       u32 tmp;
+       int i;
+
+       switch (adev->asic_type) {
+       case CHIP_TOPAZ:
+               adev->gfx.config.max_shader_engines = 1;
+               adev->gfx.config.max_tile_pipes = 2;
+               adev->gfx.config.max_cu_per_sh = 6;
+               adev->gfx.config.max_sh_per_se = 1;
+               adev->gfx.config.max_backends_per_se = 2;
+               adev->gfx.config.max_texture_channel_caches = 2;
+               adev->gfx.config.max_gprs = 256;
+               adev->gfx.config.max_gs_threads = 32;
+               adev->gfx.config.max_hw_contexts = 8;
+
+               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+               adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = TOPAZ_GB_ADDR_CONFIG_GOLDEN;
+               break;
+       case CHIP_TONGA:
+               adev->gfx.config.max_shader_engines = 4;
+               adev->gfx.config.max_tile_pipes = 8;
+               adev->gfx.config.max_cu_per_sh = 8;
+               adev->gfx.config.max_sh_per_se = 1;
+               adev->gfx.config.max_backends_per_se = 2;
+               adev->gfx.config.max_texture_channel_caches = 8;
+               adev->gfx.config.max_gprs = 256;
+               adev->gfx.config.max_gs_threads = 32;
+               adev->gfx.config.max_hw_contexts = 8;
+
+               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+               adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
+               break;
+       case CHIP_CARRIZO:
+               adev->gfx.config.max_shader_engines = 1;
+               adev->gfx.config.max_tile_pipes = 2;
+               adev->gfx.config.max_cu_per_sh = 8;
+               adev->gfx.config.max_sh_per_se = 1;
+               adev->gfx.config.max_backends_per_se = 2;
+               adev->gfx.config.max_texture_channel_caches = 2;
+               adev->gfx.config.max_gprs = 256;
+               adev->gfx.config.max_gs_threads = 32;
+               adev->gfx.config.max_hw_contexts = 8;
+
+               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+               adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
+               break;
+       default:
+               adev->gfx.config.max_shader_engines = 2;
+               adev->gfx.config.max_tile_pipes = 4;
+               adev->gfx.config.max_cu_per_sh = 2;
+               adev->gfx.config.max_sh_per_se = 1;
+               adev->gfx.config.max_backends_per_se = 2;
+               adev->gfx.config.max_texture_channel_caches = 4;
+               adev->gfx.config.max_gprs = 256;
+               adev->gfx.config.max_gs_threads = 32;
+               adev->gfx.config.max_hw_contexts = 8;
+
+               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+               adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
+               break;
+       }
+
+       tmp = RREG32(mmGRBM_CNTL);
+       tmp = REG_SET_FIELD(tmp, GRBM_CNTL, READ_TIMEOUT, 0xff);
+       WREG32(mmGRBM_CNTL, tmp);
+
+       mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
+       adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
+       mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
+
+       adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
+       adev->gfx.config.mem_max_burst_length_bytes = 256;
+       if (adev->flags & AMDGPU_IS_APU) {
+               /* Get memory bank mapping mode. */
+               tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
+               dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
+               dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
+
+               tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
+               dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
+               dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
+
+               /* Validate settings in case only one DIMM installed. */
+               if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
+                       dimm00_addr_map = 0;
+               if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
+                       dimm01_addr_map = 0;
+               if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
+                       dimm10_addr_map = 0;
+               if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
+                       dimm11_addr_map = 0;
+
+               /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
+               /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
+               if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
+                       adev->gfx.config.mem_row_size_in_kb = 2;
+               else
+                       adev->gfx.config.mem_row_size_in_kb = 1;
+       } else {
+               tmp = REG_GET_FIELD(mc_arb_ramcfg, MC_ARB_RAMCFG, NOOFCOLS);
+               adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
+               if (adev->gfx.config.mem_row_size_in_kb > 4)
+                       adev->gfx.config.mem_row_size_in_kb = 4;
+       }
+
+       adev->gfx.config.shader_engine_tile_size = 32;
+       adev->gfx.config.num_gpus = 1;
+       adev->gfx.config.multi_gpu_tile_size = 64;
+
+       /* fix up row size */
+       switch (adev->gfx.config.mem_row_size_in_kb) {
+       case 1:
+       default:
+               gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 0);
+               break;
+       case 2:
+               gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 1);
+               break;
+       case 4:
+               gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 2);
+               break;
+       }
+       adev->gfx.config.gb_addr_config = gb_addr_config;
+
+       WREG32(mmGB_ADDR_CONFIG, gb_addr_config);
+       WREG32(mmHDP_ADDR_CONFIG, gb_addr_config);
+       WREG32(mmDMIF_ADDR_CALC, gb_addr_config);
+       WREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET,
+              gb_addr_config & 0x70);
+       WREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET,
+              gb_addr_config & 0x70);
+       WREG32(mmUVD_UDEC_ADDR_CONFIG, gb_addr_config);
+       WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
+       WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
+
+       gfx_v8_0_tiling_mode_table_init(adev);
+
+       gfx_v8_0_setup_rb(adev, adev->gfx.config.max_shader_engines,
+                                adev->gfx.config.max_sh_per_se,
+                                adev->gfx.config.max_backends_per_se);
+
+       /* XXX SH_MEM regs */
+       /* where to put LDS, scratch, GPUVM in FSA64 space */
+       mutex_lock(&adev->srbm_mutex);
+       for (i = 0; i < 16; i++) {
+               vi_srbm_select(adev, 0, 0, 0, i);
+               /* CP and shaders */
+               if (i == 0) {
+                       tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_UC);
+                       tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC);
+                       WREG32(mmSH_MEM_CONFIG, tmp);
+               } else {
+                       tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_NC);
+                       tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_NC);
+                       WREG32(mmSH_MEM_CONFIG, tmp);
+               }
+
+               WREG32(mmSH_MEM_APE1_BASE, 1);
+               WREG32(mmSH_MEM_APE1_LIMIT, 0);
+               WREG32(mmSH_MEM_BASES, 0);
+       }
+       vi_srbm_select(adev, 0, 0, 0, 0);
+       mutex_unlock(&adev->srbm_mutex);
+
+       mutex_lock(&adev->grbm_idx_mutex);
+       /*
+        * making sure that the following register writes will be broadcasted
+        * to all the shaders
+        */
+       gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+
+       WREG32(mmPA_SC_FIFO_SIZE,
+                  (adev->gfx.config.sc_prim_fifo_size_frontend <<
+                       PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
+                  (adev->gfx.config.sc_prim_fifo_size_backend <<
+                       PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
+                  (adev->gfx.config.sc_hiz_tile_fifo_size <<
+                       PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
+                  (adev->gfx.config.sc_earlyz_tile_fifo_size <<
+                       PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
+       mutex_unlock(&adev->grbm_idx_mutex);
+
+}
+
+static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
+{
+       u32 i, j, k;
+       u32 mask;
+
+       mutex_lock(&adev->grbm_idx_mutex);
+       for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+               for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+                       gfx_v8_0_select_se_sh(adev, i, j);
+                       for (k = 0; k < adev->usec_timeout; k++) {
+                               if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
+                                       break;
+                               udelay(1);
+                       }
+               }
+       }
+       gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+       mutex_unlock(&adev->grbm_idx_mutex);
+
+       mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
+               RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
+               RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
+               RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
+       for (k = 0; k < adev->usec_timeout; k++) {
+               if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
+                       break;
+               udelay(1);
+       }
+}
+
+static void gfx_v8_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
+                                              bool enable)
+{
+       u32 tmp = RREG32(mmCP_INT_CNTL_RING0);
+
+       if (enable) {
+               tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, 1);
+               tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, 1);
+               tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, 1);
+               tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, 1);
+       } else {
+               tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, 0);
+               tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, 0);
+               tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, 0);
+               tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, 0);
+       }
+       WREG32(mmCP_INT_CNTL_RING0, tmp);
+}
+
+void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
+{
+       u32 tmp = RREG32(mmRLC_CNTL);
+
+       tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
+       WREG32(mmRLC_CNTL, tmp);
+
+       gfx_v8_0_enable_gui_idle_interrupt(adev, false);
+
+       gfx_v8_0_wait_for_rlc_serdes(adev);
+}
+
+static void gfx_v8_0_rlc_reset(struct amdgpu_device *adev)
+{
+       u32 tmp = RREG32(mmGRBM_SOFT_RESET);
+
+       tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
+       WREG32(mmGRBM_SOFT_RESET, tmp);
+       udelay(50);
+       tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
+       WREG32(mmGRBM_SOFT_RESET, tmp);
+       udelay(50);
+}
+
+static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
+{
+       u32 tmp = RREG32(mmRLC_CNTL);
+
+       tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 1);
+       WREG32(mmRLC_CNTL, tmp);
+
+       /* carrizo do enable cp interrupt after cp inited */
+       if (adev->asic_type != CHIP_CARRIZO)
+               gfx_v8_0_enable_gui_idle_interrupt(adev, true);
+
+       udelay(50);
+}
+
+static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev)
+{
+       const struct rlc_firmware_header_v2_0 *hdr;
+       const __le32 *fw_data;
+       unsigned i, fw_size;
+
+       if (!adev->gfx.rlc_fw)
+               return -EINVAL;
+
+       hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
+       amdgpu_ucode_print_rlc_hdr(&hdr->header);
+       adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version);
+
+       fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
+                          le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+       fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+
+       WREG32(mmRLC_GPM_UCODE_ADDR, 0);
+       for (i = 0; i < fw_size; i++)
+               WREG32(mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
+       WREG32(mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
+
+       return 0;
+}
+
+static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
+{
+       int r;
+
+       gfx_v8_0_rlc_stop(adev);
+
+       /* disable CG */
+       WREG32(mmRLC_CGCG_CGLS_CTRL, 0);
+
+       /* disable PG */
+       WREG32(mmRLC_PG_CNTL, 0);
+
+       gfx_v8_0_rlc_reset(adev);
+
+       if (!adev->firmware.smu_load) {
+               /* legacy rlc firmware loading */
+               r = gfx_v8_0_rlc_load_microcode(adev);
+               if (r)
+                       return r;
+       } else {
+               r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
+                                               AMDGPU_UCODE_ID_RLC_G);
+               if (r)
+                       return -EINVAL;
+       }
+
+       gfx_v8_0_rlc_start(adev);
+
+       return 0;
+}
+
+static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
+{
+       int i;
+       u32 tmp = RREG32(mmCP_ME_CNTL);
+
+       if (enable) {
+               tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 0);
+               tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 0);
+               tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 0);
+       } else {
+               tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
+               tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
+               tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
+               for (i = 0; i < adev->gfx.num_gfx_rings; i++)
+                       adev->gfx.gfx_ring[i].ready = false;
+       }
+       WREG32(mmCP_ME_CNTL, tmp);
+       udelay(50);
+}
+
+static int gfx_v8_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
+{
+       const struct gfx_firmware_header_v1_0 *pfp_hdr;
+       const struct gfx_firmware_header_v1_0 *ce_hdr;
+       const struct gfx_firmware_header_v1_0 *me_hdr;
+       const __le32 *fw_data;
+       unsigned i, fw_size;
+
+       if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
+               return -EINVAL;
+
+       pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
+               adev->gfx.pfp_fw->data;
+       ce_hdr = (const struct gfx_firmware_header_v1_0 *)
+               adev->gfx.ce_fw->data;
+       me_hdr = (const struct gfx_firmware_header_v1_0 *)
+               adev->gfx.me_fw->data;
+
+       amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
+       amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
+       amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
+       adev->gfx.pfp_fw_version = le32_to_cpu(pfp_hdr->header.ucode_version);
+       adev->gfx.ce_fw_version = le32_to_cpu(ce_hdr->header.ucode_version);
+       adev->gfx.me_fw_version = le32_to_cpu(me_hdr->header.ucode_version);
+
+       gfx_v8_0_cp_gfx_enable(adev, false);
+
+       /* PFP */
+       fw_data = (const __le32 *)
+               (adev->gfx.pfp_fw->data +
+                le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
+       fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
+       WREG32(mmCP_PFP_UCODE_ADDR, 0);
+       for (i = 0; i < fw_size; i++)
+               WREG32(mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
+       WREG32(mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
+
+       /* CE */
+       fw_data = (const __le32 *)
+               (adev->gfx.ce_fw->data +
+                le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
+       fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
+       WREG32(mmCP_CE_UCODE_ADDR, 0);
+       for (i = 0; i < fw_size; i++)
+               WREG32(mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
+       WREG32(mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
+
+       /* ME */
+       fw_data = (const __le32 *)
+               (adev->gfx.me_fw->data +
+                le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
+       fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
+       WREG32(mmCP_ME_RAM_WADDR, 0);
+       for (i = 0; i < fw_size; i++)
+               WREG32(mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
+       WREG32(mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
+
+       return 0;
+}
+
+static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev)
+{
+       u32 count = 0;
+       const struct cs_section_def *sect = NULL;
+       const struct cs_extent_def *ext = NULL;
+
+       /* begin clear state */
+       count += 2;
+       /* context control state */
+       count += 3;
+
+       for (sect = vi_cs_data; sect->section != NULL; ++sect) {
+               for (ext = sect->section; ext->extent != NULL; ++ext) {
+                       if (sect->id == SECT_CONTEXT)
+                               count += 2 + ext->reg_count;
+                       else
+                               return 0;
+               }
+       }
+       /* pa_sc_raster_config/pa_sc_raster_config1 */
+       count += 4;
+       /* end clear state */
+       count += 2;
+       /* clear state */
+       count += 2;
+
+       return count;
+}
+
+static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
+{
+       struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
+       const struct cs_section_def *sect = NULL;
+       const struct cs_extent_def *ext = NULL;
+       int r, i;
+
+       /* init the CP */
+       WREG32(mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
+       WREG32(mmCP_ENDIAN_SWAP, 0);
+       WREG32(mmCP_DEVICE_ID, 1);
+
+       gfx_v8_0_cp_gfx_enable(adev, true);
+
+       r = amdgpu_ring_lock(ring, gfx_v8_0_get_csb_size(adev) + 4);
+       if (r) {
+               DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
+               return r;
+       }
+
+       /* clear state buffer */
+       amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+       amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+       amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+       amdgpu_ring_write(ring, 0x80000000);
+       amdgpu_ring_write(ring, 0x80000000);
+
+       for (sect = vi_cs_data; sect->section != NULL; ++sect) {
+               for (ext = sect->section; ext->extent != NULL; ++ext) {
+                       if (sect->id == SECT_CONTEXT) {
+                               amdgpu_ring_write(ring,
+                                      PACKET3(PACKET3_SET_CONTEXT_REG,
+                                              ext->reg_count));
+                               amdgpu_ring_write(ring,
+                                      ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
+                               for (i = 0; i < ext->reg_count; i++)
+                                       amdgpu_ring_write(ring, ext->extent[i]);
+                       }
+               }
+       }
+
+       amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+       amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
+       switch (adev->asic_type) {
+       case CHIP_TONGA:
+               amdgpu_ring_write(ring, 0x16000012);
+               amdgpu_ring_write(ring, 0x0000002A);
+               break;
+       case CHIP_TOPAZ:
+       case CHIP_CARRIZO:
+               amdgpu_ring_write(ring, 0x00000002);
+               amdgpu_ring_write(ring, 0x00000000);
+               break;
+       default:
+               BUG();
+       }
+
+       amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+       amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+       amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+       amdgpu_ring_write(ring, 0);
+
+       /* init the CE partitions */
+       amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
+       amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
+       amdgpu_ring_write(ring, 0x8000);
+       amdgpu_ring_write(ring, 0x8000);
+
+       amdgpu_ring_unlock_commit(ring);
+
+       return 0;
+}
+
+static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
+{
+       struct amdgpu_ring *ring;
+       u32 tmp;
+       u32 rb_bufsz;
+       u64 rb_addr, rptr_addr;
+       int r;
+
+       /* Set the write pointer delay */
+       WREG32(mmCP_RB_WPTR_DELAY, 0);
+
+       /* set the RB to use vmid 0 */
+       WREG32(mmCP_RB_VMID, 0);
+
+       /* Set ring buffer size */
+       ring = &adev->gfx.gfx_ring[0];
+       rb_bufsz = order_base_2(ring->ring_size / 8);
+       tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
+       tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
+       tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, MTYPE, 3);
+       tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, MIN_IB_AVAILSZ, 1);
+#ifdef __BIG_ENDIAN
+       tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
+#endif
+       WREG32(mmCP_RB0_CNTL, tmp);
+
+       /* Initialize the ring buffer's read and write pointers */
+       WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK);
+       ring->wptr = 0;
+       WREG32(mmCP_RB0_WPTR, ring->wptr);
+
+       /* set the wb address wether it's enabled or not */
+       rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+       WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
+       WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
+
+       mdelay(1);
+       WREG32(mmCP_RB0_CNTL, tmp);
+
+       rb_addr = ring->gpu_addr >> 8;
+       WREG32(mmCP_RB0_BASE, rb_addr);
+       WREG32(mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
+
+       /* no gfx doorbells on iceland */
+       if (adev->asic_type != CHIP_TOPAZ) {
+               tmp = RREG32(mmCP_RB_DOORBELL_CONTROL);
+               if (ring->use_doorbell) {
+                       tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
+                                           DOORBELL_OFFSET, ring->doorbell_index);
+                       tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
+                                           DOORBELL_EN, 1);
+               } else {
+                       tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
+                                           DOORBELL_EN, 0);
+               }
+               WREG32(mmCP_RB_DOORBELL_CONTROL, tmp);
+
+               if (adev->asic_type == CHIP_TONGA) {
+                       tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
+                                           DOORBELL_RANGE_LOWER,
+                                           AMDGPU_DOORBELL_GFX_RING0);
+                       WREG32(mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
+
+                       WREG32(mmCP_RB_DOORBELL_RANGE_UPPER,
+                              CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
+               }
+
+       }
+
+       /* start the ring */
+       gfx_v8_0_cp_gfx_start(adev);
+       ring->ready = true;
+       r = amdgpu_ring_test_ring(ring);
+       if (r) {
+               ring->ready = false;
+               return r;
+       }
+
+       return 0;
+}
+
+static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
+{
+       int i;
+
+       if (enable) {
+               WREG32(mmCP_MEC_CNTL, 0);
+       } else {
+               WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
+               for (i = 0; i < adev->gfx.num_compute_rings; i++)
+                       adev->gfx.compute_ring[i].ready = false;
+       }
+       udelay(50);
+}
+
+static int gfx_v8_0_cp_compute_start(struct amdgpu_device *adev)
+{
+       gfx_v8_0_cp_compute_enable(adev, true);
+
+       return 0;
+}
+
+static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev)
+{
+       const struct gfx_firmware_header_v1_0 *mec_hdr;
+       const __le32 *fw_data;
+       unsigned i, fw_size;
+
+       if (!adev->gfx.mec_fw)
+               return -EINVAL;
+
+       gfx_v8_0_cp_compute_enable(adev, false);
+
+       mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+       amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
+       adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version);
+
+       fw_data = (const __le32 *)
+               (adev->gfx.mec_fw->data +
+                le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
+       fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
+
+       /* MEC1 */
+       WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0);
+       for (i = 0; i < fw_size; i++)
+               WREG32(mmCP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data+i));
+       WREG32(mmCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version);
+
+       /* Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
+       if (adev->gfx.mec2_fw) {
+               const struct gfx_firmware_header_v1_0 *mec2_hdr;
+
+               mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
+               amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
+               adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version);
+
+               fw_data = (const __le32 *)
+                       (adev->gfx.mec2_fw->data +
+                        le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes));
+               fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4;
+
+               WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0);
+               for (i = 0; i < fw_size; i++)
+                       WREG32(mmCP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data+i));
+               WREG32(mmCP_MEC_ME2_UCODE_ADDR, adev->gfx.mec2_fw_version);
+       }
+
+       return 0;
+}
+
+struct vi_mqd {
+       uint32_t header;  /* ordinal0 */
+       uint32_t compute_dispatch_initiator;  /* ordinal1 */
+       uint32_t compute_dim_x;  /* ordinal2 */
+       uint32_t compute_dim_y;  /* ordinal3 */
+       uint32_t compute_dim_z;  /* ordinal4 */
+       uint32_t compute_start_x;  /* ordinal5 */
+       uint32_t compute_start_y;  /* ordinal6 */
+       uint32_t compute_start_z;  /* ordinal7 */
+       uint32_t compute_num_thread_x;  /* ordinal8 */
+       uint32_t compute_num_thread_y;  /* ordinal9 */
+       uint32_t compute_num_thread_z;  /* ordinal10 */
+       uint32_t compute_pipelinestat_enable;  /* ordinal11 */
+       uint32_t compute_perfcount_enable;  /* ordinal12 */
+       uint32_t compute_pgm_lo;  /* ordinal13 */
+       uint32_t compute_pgm_hi;  /* ordinal14 */
+       uint32_t compute_tba_lo;  /* ordinal15 */
+       uint32_t compute_tba_hi;  /* ordinal16 */
+       uint32_t compute_tma_lo;  /* ordinal17 */
+       uint32_t compute_tma_hi;  /* ordinal18 */
+       uint32_t compute_pgm_rsrc1;  /* ordinal19 */
+       uint32_t compute_pgm_rsrc2;  /* ordinal20 */
+       uint32_t compute_vmid;  /* ordinal21 */
+       uint32_t compute_resource_limits;  /* ordinal22 */
+       uint32_t compute_static_thread_mgmt_se0;  /* ordinal23 */
+       uint32_t compute_static_thread_mgmt_se1;  /* ordinal24 */
+       uint32_t compute_tmpring_size;  /* ordinal25 */
+       uint32_t compute_static_thread_mgmt_se2;  /* ordinal26 */
+       uint32_t compute_static_thread_mgmt_se3;  /* ordinal27 */
+       uint32_t compute_restart_x;  /* ordinal28 */
+       uint32_t compute_restart_y;  /* ordinal29 */
+       uint32_t compute_restart_z;  /* ordinal30 */
+       uint32_t compute_thread_trace_enable;  /* ordinal31 */
+       uint32_t compute_misc_reserved;  /* ordinal32 */
+       uint32_t compute_dispatch_id;  /* ordinal33 */
+       uint32_t compute_threadgroup_id;  /* ordinal34 */
+       uint32_t compute_relaunch;  /* ordinal35 */
+       uint32_t compute_wave_restore_addr_lo;  /* ordinal36 */
+       uint32_t compute_wave_restore_addr_hi;  /* ordinal37 */
+       uint32_t compute_wave_restore_control;  /* ordinal38 */
+       uint32_t reserved9;  /* ordinal39 */
+       uint32_t reserved10;  /* ordinal40 */
+       uint32_t reserved11;  /* ordinal41 */
+       uint32_t reserved12;  /* ordinal42 */
+       uint32_t reserved13;  /* ordinal43 */
+       uint32_t reserved14;  /* ordinal44 */
+       uint32_t reserved15;  /* ordinal45 */
+       uint32_t reserved16;  /* ordinal46 */
+       uint32_t reserved17;  /* ordinal47 */
+       uint32_t reserved18;  /* ordinal48 */
+       uint32_t reserved19;  /* ordinal49 */
+       uint32_t reserved20;  /* ordinal50 */
+       uint32_t reserved21;  /* ordinal51 */
+       uint32_t reserved22;  /* ordinal52 */
+       uint32_t reserved23;  /* ordinal53 */
+       uint32_t reserved24;  /* ordinal54 */
+       uint32_t reserved25;  /* ordinal55 */
+       uint32_t reserved26;  /* ordinal56 */
+       uint32_t reserved27;  /* ordinal57 */
+       uint32_t reserved28;  /* ordinal58 */
+       uint32_t reserved29;  /* ordinal59 */
+       uint32_t reserved30;  /* ordinal60 */
+       uint32_t reserved31;  /* ordinal61 */
+       uint32_t reserved32;  /* ordinal62 */
+       uint32_t reserved33;  /* ordinal63 */
+       uint32_t reserved34;  /* ordinal64 */
+       uint32_t compute_user_data_0;  /* ordinal65 */
+       uint32_t compute_user_data_1;  /* ordinal66 */
+       uint32_t compute_user_data_2;  /* ordinal67 */
+       uint32_t compute_user_data_3;  /* ordinal68 */
+       uint32_t compute_user_data_4;  /* ordinal69 */
+       uint32_t compute_user_data_5;  /* ordinal70 */
+       uint32_t compute_user_data_6;  /* ordinal71 */
+       uint32_t compute_user_data_7;  /* ordinal72 */
+       uint32_t compute_user_data_8;  /* ordinal73 */
+       uint32_t compute_user_data_9;  /* ordinal74 */
+       uint32_t compute_user_data_10;  /* ordinal75 */
+       uint32_t compute_user_data_11;  /* ordinal76 */
+       uint32_t compute_user_data_12;  /* ordinal77 */
+       uint32_t compute_user_data_13;  /* ordinal78 */
+       uint32_t compute_user_data_14;  /* ordinal79 */
+       uint32_t compute_user_data_15;  /* ordinal80 */
+       uint32_t cp_compute_csinvoc_count_lo;  /* ordinal81 */
+       uint32_t cp_compute_csinvoc_count_hi;  /* ordinal82 */
+       uint32_t reserved35;  /* ordinal83 */
+       uint32_t reserved36;  /* ordinal84 */
+       uint32_t reserved37;  /* ordinal85 */
+       uint32_t cp_mqd_query_time_lo;  /* ordinal86 */
+       uint32_t cp_mqd_query_time_hi;  /* ordinal87 */
+       uint32_t cp_mqd_connect_start_time_lo;  /* ordinal88 */
+       uint32_t cp_mqd_connect_start_time_hi;  /* ordinal89 */
+       uint32_t cp_mqd_connect_end_time_lo;  /* ordinal90 */
+       uint32_t cp_mqd_connect_end_time_hi;  /* ordinal91 */
+       uint32_t cp_mqd_connect_end_wf_count;  /* ordinal92 */
+       uint32_t cp_mqd_connect_end_pq_rptr;  /* ordinal93 */
+       uint32_t cp_mqd_connect_end_pq_wptr;  /* ordinal94 */
+       uint32_t cp_mqd_connect_end_ib_rptr;  /* ordinal95 */
+       uint32_t reserved38;  /* ordinal96 */
+       uint32_t reserved39;  /* ordinal97 */
+       uint32_t cp_mqd_save_start_time_lo;  /* ordinal98 */
+       uint32_t cp_mqd_save_start_time_hi;  /* ordinal99 */
+       uint32_t cp_mqd_save_end_time_lo;  /* ordinal100 */
+       uint32_t cp_mqd_save_end_time_hi;  /* ordinal101 */
+       uint32_t cp_mqd_restore_start_time_lo;  /* ordinal102 */
+       uint32_t cp_mqd_restore_start_time_hi;  /* ordinal103 */
+       uint32_t cp_mqd_restore_end_time_lo;  /* ordinal104 */
+       uint32_t cp_mqd_restore_end_time_hi;  /* ordinal105 */
+       uint32_t reserved40;  /* ordinal106 */
+       uint32_t reserved41;  /* ordinal107 */
+       uint32_t gds_cs_ctxsw_cnt0;  /* ordinal108 */
+       uint32_t gds_cs_ctxsw_cnt1;  /* ordinal109 */
+       uint32_t gds_cs_ctxsw_cnt2;  /* ordinal110 */
+       uint32_t gds_cs_ctxsw_cnt3;  /* ordinal111 */
+       uint32_t reserved42;  /* ordinal112 */
+       uint32_t reserved43;  /* ordinal113 */
+       uint32_t cp_pq_exe_status_lo;  /* ordinal114 */
+       uint32_t cp_pq_exe_status_hi;  /* ordinal115 */
+       uint32_t cp_packet_id_lo;  /* ordinal116 */
+       uint32_t cp_packet_id_hi;  /* ordinal117 */
+       uint32_t cp_packet_exe_status_lo;  /* ordinal118 */
+       uint32_t cp_packet_exe_status_hi;  /* ordinal119 */
+       uint32_t gds_save_base_addr_lo;  /* ordinal120 */
+       uint32_t gds_save_base_addr_hi;  /* ordinal121 */
+       uint32_t gds_save_mask_lo;  /* ordinal122 */
+       uint32_t gds_save_mask_hi;  /* ordinal123 */
+       uint32_t ctx_save_base_addr_lo;  /* ordinal124 */
+       uint32_t ctx_save_base_addr_hi;  /* ordinal125 */
+       uint32_t reserved44;  /* ordinal126 */
+       uint32_t reserved45;  /* ordinal127 */
+       uint32_t cp_mqd_base_addr_lo;  /* ordinal128 */
+       uint32_t cp_mqd_base_addr_hi;  /* ordinal129 */
+       uint32_t cp_hqd_active;  /* ordinal130 */
+       uint32_t cp_hqd_vmid;  /* ordinal131 */
+       uint32_t cp_hqd_persistent_state;  /* ordinal132 */
+       uint32_t cp_hqd_pipe_priority;  /* ordinal133 */
+       uint32_t cp_hqd_queue_priority;  /* ordinal134 */
+       uint32_t cp_hqd_quantum;  /* ordinal135 */
+       uint32_t cp_hqd_pq_base_lo;  /* ordinal136 */
+       uint32_t cp_hqd_pq_base_hi;  /* ordinal137 */
+       uint32_t cp_hqd_pq_rptr;  /* ordinal138 */
+       uint32_t cp_hqd_pq_rptr_report_addr_lo;  /* ordinal139 */
+       uint32_t cp_hqd_pq_rptr_report_addr_hi;  /* ordinal140 */
+       uint32_t cp_hqd_pq_wptr_poll_addr;  /* ordinal141 */
+       uint32_t cp_hqd_pq_wptr_poll_addr_hi;  /* ordinal142 */
+       uint32_t cp_hqd_pq_doorbell_control;  /* ordinal143 */
+       uint32_t cp_hqd_pq_wptr;  /* ordinal144 */
+       uint32_t cp_hqd_pq_control;  /* ordinal145 */
+       uint32_t cp_hqd_ib_base_addr_lo;  /* ordinal146 */
+       uint32_t cp_hqd_ib_base_addr_hi;  /* ordinal147 */
+       uint32_t cp_hqd_ib_rptr;  /* ordinal148 */
+       uint32_t cp_hqd_ib_control;  /* ordinal149 */
+       uint32_t cp_hqd_iq_timer;  /* ordinal150 */
+       uint32_t cp_hqd_iq_rptr;  /* ordinal151 */
+       uint32_t cp_hqd_dequeue_request;  /* ordinal152 */
+       uint32_t cp_hqd_dma_offload;  /* ordinal153 */
+       uint32_t cp_hqd_sema_cmd;  /* ordinal154 */
+       uint32_t cp_hqd_msg_type;  /* ordinal155 */
+       uint32_t cp_hqd_atomic0_preop_lo;  /* ordinal156 */
+       uint32_t cp_hqd_atomic0_preop_hi;  /* ordinal157 */
+       uint32_t cp_hqd_atomic1_preop_lo;  /* ordinal158 */
+       uint32_t cp_hqd_atomic1_preop_hi;  /* ordinal159 */
+       uint32_t cp_hqd_hq_status0;  /* ordinal160 */
+       uint32_t cp_hqd_hq_control0;  /* ordinal161 */
+       uint32_t cp_mqd_control;  /* ordinal162 */
+       uint32_t cp_hqd_hq_status1;  /* ordinal163 */
+       uint32_t cp_hqd_hq_control1;  /* ordinal164 */
+       uint32_t cp_hqd_eop_base_addr_lo;  /* ordinal165 */
+       uint32_t cp_hqd_eop_base_addr_hi;  /* ordinal166 */
+       uint32_t cp_hqd_eop_control;  /* ordinal167 */
+       uint32_t cp_hqd_eop_rptr;  /* ordinal168 */
+       uint32_t cp_hqd_eop_wptr;  /* ordinal169 */
+       uint32_t cp_hqd_eop_done_events;  /* ordinal170 */
+       uint32_t cp_hqd_ctx_save_base_addr_lo;  /* ordinal171 */
+       uint32_t cp_hqd_ctx_save_base_addr_hi;  /* ordinal172 */
+       uint32_t cp_hqd_ctx_save_control;  /* ordinal173 */
+       uint32_t cp_hqd_cntl_stack_offset;  /* ordinal174 */
+       uint32_t cp_hqd_cntl_stack_size;  /* ordinal175 */
+       uint32_t cp_hqd_wg_state_offset;  /* ordinal176 */
+       uint32_t cp_hqd_ctx_save_size;  /* ordinal177 */
+       uint32_t cp_hqd_gds_resource_state;  /* ordinal178 */
+       uint32_t cp_hqd_error;  /* ordinal179 */
+       uint32_t cp_hqd_eop_wptr_mem;  /* ordinal180 */
+       uint32_t cp_hqd_eop_dones;  /* ordinal181 */
+       uint32_t reserved46;  /* ordinal182 */
+       uint32_t reserved47;  /* ordinal183 */
+       uint32_t reserved48;  /* ordinal184 */
+       uint32_t reserved49;  /* ordinal185 */
+       uint32_t reserved50;  /* ordinal186 */
+       uint32_t reserved51;  /* ordinal187 */
+       uint32_t reserved52;  /* ordinal188 */
+       uint32_t reserved53;  /* ordinal189 */
+       uint32_t reserved54;  /* ordinal190 */
+       uint32_t reserved55;  /* ordinal191 */
+       uint32_t iqtimer_pkt_header;  /* ordinal192 */
+       uint32_t iqtimer_pkt_dw0;  /* ordinal193 */
+       uint32_t iqtimer_pkt_dw1;  /* ordinal194 */
+       uint32_t iqtimer_pkt_dw2;  /* ordinal195 */
+       uint32_t iqtimer_pkt_dw3;  /* ordinal196 */
+       uint32_t iqtimer_pkt_dw4;  /* ordinal197 */
+       uint32_t iqtimer_pkt_dw5;  /* ordinal198 */
+       uint32_t iqtimer_pkt_dw6;  /* ordinal199 */
+       uint32_t iqtimer_pkt_dw7;  /* ordinal200 */
+       uint32_t iqtimer_pkt_dw8;  /* ordinal201 */
+       uint32_t iqtimer_pkt_dw9;  /* ordinal202 */
+       uint32_t iqtimer_pkt_dw10;  /* ordinal203 */
+       uint32_t iqtimer_pkt_dw11;  /* ordinal204 */
+       uint32_t iqtimer_pkt_dw12;  /* ordinal205 */
+       uint32_t iqtimer_pkt_dw13;  /* ordinal206 */
+       uint32_t iqtimer_pkt_dw14;  /* ordinal207 */
+       uint32_t iqtimer_pkt_dw15;  /* ordinal208 */
+       uint32_t iqtimer_pkt_dw16;  /* ordinal209 */
+       uint32_t iqtimer_pkt_dw17;  /* ordinal210 */
+       uint32_t iqtimer_pkt_dw18;  /* ordinal211 */
+       uint32_t iqtimer_pkt_dw19;  /* ordinal212 */
+       uint32_t iqtimer_pkt_dw20;  /* ordinal213 */
+       uint32_t iqtimer_pkt_dw21;  /* ordinal214 */
+       uint32_t iqtimer_pkt_dw22;  /* ordinal215 */
+       uint32_t iqtimer_pkt_dw23;  /* ordinal216 */
+       uint32_t iqtimer_pkt_dw24;  /* ordinal217 */
+       uint32_t iqtimer_pkt_dw25;  /* ordinal218 */
+       uint32_t iqtimer_pkt_dw26;  /* ordinal219 */
+       uint32_t iqtimer_pkt_dw27;  /* ordinal220 */
+       uint32_t iqtimer_pkt_dw28;  /* ordinal221 */
+       uint32_t iqtimer_pkt_dw29;  /* ordinal222 */
+       uint32_t iqtimer_pkt_dw30;  /* ordinal223 */
+       uint32_t iqtimer_pkt_dw31;  /* ordinal224 */
+       uint32_t reserved56;  /* ordinal225 */
+       uint32_t reserved57;  /* ordinal226 */
+       uint32_t reserved58;  /* ordinal227 */
+       uint32_t set_resources_header;  /* ordinal228 */
+       uint32_t set_resources_dw1;  /* ordinal229 */
+       uint32_t set_resources_dw2;  /* ordinal230 */
+       uint32_t set_resources_dw3;  /* ordinal231 */
+       uint32_t set_resources_dw4;  /* ordinal232 */
+       uint32_t set_resources_dw5;  /* ordinal233 */
+       uint32_t set_resources_dw6;  /* ordinal234 */
+       uint32_t set_resources_dw7;  /* ordinal235 */
+       uint32_t reserved59;  /* ordinal236 */
+       uint32_t reserved60;  /* ordinal237 */
+       uint32_t reserved61;  /* ordinal238 */
+       uint32_t reserved62;  /* ordinal239 */
+       uint32_t reserved63;  /* ordinal240 */
+       uint32_t reserved64;  /* ordinal241 */
+       uint32_t reserved65;  /* ordinal242 */
+       uint32_t reserved66;  /* ordinal243 */
+       uint32_t reserved67;  /* ordinal244 */
+       uint32_t reserved68;  /* ordinal245 */
+       uint32_t reserved69;  /* ordinal246 */
+       uint32_t reserved70;  /* ordinal247 */
+       uint32_t reserved71;  /* ordinal248 */
+       uint32_t reserved72;  /* ordinal249 */
+       uint32_t reserved73;  /* ordinal250 */
+       uint32_t reserved74;  /* ordinal251 */
+       uint32_t reserved75;  /* ordinal252 */
+       uint32_t reserved76;  /* ordinal253 */
+       uint32_t reserved77;  /* ordinal254 */
+       uint32_t reserved78;  /* ordinal255 */
+
+       uint32_t reserved_t[256]; /* Reserve 256 dword buffer used by ucode */
+};
+
+static void gfx_v8_0_cp_compute_fini(struct amdgpu_device *adev)
+{
+       int i, r;
+
+       for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+               struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
+
+               if (ring->mqd_obj) {
+                       r = amdgpu_bo_reserve(ring->mqd_obj, false);
+                       if (unlikely(r != 0))
+                               dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r);
+
+                       amdgpu_bo_unpin(ring->mqd_obj);
+                       amdgpu_bo_unreserve(ring->mqd_obj);
+
+                       amdgpu_bo_unref(&ring->mqd_obj);
+                       ring->mqd_obj = NULL;
+               }
+       }
+}
+
+static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
+{
+       int r, i, j;
+       u32 tmp;
+       bool use_doorbell = true;
+       u64 hqd_gpu_addr;
+       u64 mqd_gpu_addr;
+       u64 eop_gpu_addr;
+       u64 wb_gpu_addr;
+       u32 *buf;
+       struct vi_mqd *mqd;
+
+       /* init the pipes */
+       mutex_lock(&adev->srbm_mutex);
+       for (i = 0; i < (adev->gfx.mec.num_pipe * adev->gfx.mec.num_mec); i++) {
+               int me = (i < 4) ? 1 : 2;
+               int pipe = (i < 4) ? i : (i - 4);
+
+               eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE);
+               eop_gpu_addr >>= 8;
+
+               vi_srbm_select(adev, me, pipe, 0, 0);
+
+               /* write the EOP addr */
+               WREG32(mmCP_HQD_EOP_BASE_ADDR, eop_gpu_addr);
+               WREG32(mmCP_HQD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr));
+
+               /* set the VMID assigned */
+               WREG32(mmCP_HQD_VMID, 0);
+
+               /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
+               tmp = RREG32(mmCP_HQD_EOP_CONTROL);
+               tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
+                                   (order_base_2(MEC_HPD_SIZE / 4) - 1));
+               WREG32(mmCP_HQD_EOP_CONTROL, tmp);
+       }
+       vi_srbm_select(adev, 0, 0, 0, 0);
+       mutex_unlock(&adev->srbm_mutex);
+
+       /* init the queues.  Just two for now. */
+       for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+               struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
+
+               if (ring->mqd_obj == NULL) {
+                       r = amdgpu_bo_create(adev,
+                                            sizeof(struct vi_mqd),
+                                            PAGE_SIZE, true,
+                                            AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
+                                            &ring->mqd_obj);
+                       if (r) {
+                               dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
+                               return r;
+                       }
+               }
+
+               r = amdgpu_bo_reserve(ring->mqd_obj, false);
+               if (unlikely(r != 0)) {
+                       gfx_v8_0_cp_compute_fini(adev);
+                       return r;
+               }
+               r = amdgpu_bo_pin(ring->mqd_obj, AMDGPU_GEM_DOMAIN_GTT,
+                                 &mqd_gpu_addr);
+               if (r) {
+                       dev_warn(adev->dev, "(%d) pin MQD bo failed\n", r);
+                       gfx_v8_0_cp_compute_fini(adev);
+                       return r;
+               }
+               r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&buf);
+               if (r) {
+                       dev_warn(adev->dev, "(%d) map MQD bo failed\n", r);
+                       gfx_v8_0_cp_compute_fini(adev);
+                       return r;
+               }
+
+               /* init the mqd struct */
+               memset(buf, 0, sizeof(struct vi_mqd));
+
+               mqd = (struct vi_mqd *)buf;
+               mqd->header = 0xC0310800;
+               mqd->compute_pipelinestat_enable = 0x00000001;
+               mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
+               mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
+               mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
+               mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
+               mqd->compute_misc_reserved = 0x00000003;
+
+               mutex_lock(&adev->srbm_mutex);
+               vi_srbm_select(adev, ring->me,
+                              ring->pipe,
+                              ring->queue, 0);
+
+               /* disable wptr polling */
+               tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL);
+               tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0);
+               WREG32(mmCP_PQ_WPTR_POLL_CNTL, tmp);
+
+               mqd->cp_hqd_eop_base_addr_lo =
+                       RREG32(mmCP_HQD_EOP_BASE_ADDR);
+               mqd->cp_hqd_eop_base_addr_hi =
+                       RREG32(mmCP_HQD_EOP_BASE_ADDR_HI);
+
+               /* enable doorbell? */
+               tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
+               if (use_doorbell) {
+                       tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
+               } else {
+                       tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 0);
+               }
+               WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, tmp);
+               mqd->cp_hqd_pq_doorbell_control = tmp;
+
+               /* disable the queue if it's active */
+               mqd->cp_hqd_dequeue_request = 0;
+               mqd->cp_hqd_pq_rptr = 0;
+               mqd->cp_hqd_pq_wptr= 0;
+               if (RREG32(mmCP_HQD_ACTIVE) & 1) {
+                       WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1);
+                       for (j = 0; j < adev->usec_timeout; j++) {
+                               if (!(RREG32(mmCP_HQD_ACTIVE) & 1))
+                                       break;
+                               udelay(1);
+                       }
+                       WREG32(mmCP_HQD_DEQUEUE_REQUEST, mqd->cp_hqd_dequeue_request);
+                       WREG32(mmCP_HQD_PQ_RPTR, mqd->cp_hqd_pq_rptr);
+                       WREG32(mmCP_HQD_PQ_WPTR, mqd->cp_hqd_pq_wptr);
+               }
+
+               /* set the pointer to the MQD */
+               mqd->cp_mqd_base_addr_lo = mqd_gpu_addr & 0xfffffffc;
+               mqd->cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr);
+               WREG32(mmCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr_lo);
+               WREG32(mmCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi);
+
+               /* set MQD vmid to 0 */
+               tmp = RREG32(mmCP_MQD_CONTROL);
+               tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
+               WREG32(mmCP_MQD_CONTROL, tmp);
+               mqd->cp_mqd_control = tmp;
+
+               /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
+               hqd_gpu_addr = ring->gpu_addr >> 8;
+               mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
+               mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
+               WREG32(mmCP_HQD_PQ_BASE, mqd->cp_hqd_pq_base_lo);
+               WREG32(mmCP_HQD_PQ_BASE_HI, mqd->cp_hqd_pq_base_hi);
+
+               /* set up the HQD, this is similar to CP_RB0_CNTL */
+               tmp = RREG32(mmCP_HQD_PQ_CONTROL);
+               tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
+                                   (order_base_2(ring->ring_size / 4) - 1));
+               tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
+                              ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
+#ifdef __BIG_ENDIAN
+               tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
+#endif
+               tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
+               tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
+               tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
+               tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+               WREG32(mmCP_HQD_PQ_CONTROL, tmp);
+               mqd->cp_hqd_pq_control = tmp;
+
+               /* set the wb address wether it's enabled or not */
+               wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+               mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
+               mqd->cp_hqd_pq_rptr_report_addr_hi =
+                       upper_32_bits(wb_gpu_addr) & 0xffff;
+               WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR,
+                      mqd->cp_hqd_pq_rptr_report_addr_lo);
+               WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
+                      mqd->cp_hqd_pq_rptr_report_addr_hi);
+
+               /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
+               wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+               mqd->cp_hqd_pq_wptr_poll_addr = wb_gpu_addr & 0xfffffffc;
+               mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
+               WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, mqd->cp_hqd_pq_wptr_poll_addr);
+               WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
+                      mqd->cp_hqd_pq_wptr_poll_addr_hi);
+
+               /* enable the doorbell if requested */
+               if (use_doorbell) {
+                       if (adev->asic_type == CHIP_CARRIZO) {
+                               WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
+                                      AMDGPU_DOORBELL_KIQ << 2);
+                               WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
+                                      AMDGPU_DOORBELL_MEC_RING7 << 2);
+                       }
+                       tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
+                       tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
+                                           DOORBELL_OFFSET, ring->doorbell_index);
+                       tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
+                       tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_SOURCE, 0);
+                       tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_HIT, 0);
+                       mqd->cp_hqd_pq_doorbell_control = tmp;
+
+               } else {
+                       mqd->cp_hqd_pq_doorbell_control = 0;
+               }
+               WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL,
+                      mqd->cp_hqd_pq_doorbell_control);
+
+               /* set the vmid for the queue */
+               mqd->cp_hqd_vmid = 0;
+               WREG32(mmCP_HQD_VMID, mqd->cp_hqd_vmid);
+
+               tmp = RREG32(mmCP_HQD_PERSISTENT_STATE);
+               tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
+               WREG32(mmCP_HQD_PERSISTENT_STATE, tmp);
+               mqd->cp_hqd_persistent_state = tmp;
+
+               /* activate the queue */
+               mqd->cp_hqd_active = 1;
+               WREG32(mmCP_HQD_ACTIVE, mqd->cp_hqd_active);
+
+               vi_srbm_select(adev, 0, 0, 0, 0);
+               mutex_unlock(&adev->srbm_mutex);
+
+               amdgpu_bo_kunmap(ring->mqd_obj);
+               amdgpu_bo_unreserve(ring->mqd_obj);
+       }
+
+       if (use_doorbell) {
+               tmp = RREG32(mmCP_PQ_STATUS);
+               tmp = REG_SET_FIELD(tmp, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
+               WREG32(mmCP_PQ_STATUS, tmp);
+       }
+
+       r = gfx_v8_0_cp_compute_start(adev);
+       if (r)
+               return r;
+
+       for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+               struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
+
+               ring->ready = true;
+               r = amdgpu_ring_test_ring(ring);
+               if (r)
+                       ring->ready = false;
+       }
+
+       return 0;
+}
+
+static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
+{
+       int r;
+
+       if (adev->asic_type != CHIP_CARRIZO)
+               gfx_v8_0_enable_gui_idle_interrupt(adev, false);
+
+       if (!adev->firmware.smu_load) {
+               /* legacy firmware loading */
+               r = gfx_v8_0_cp_gfx_load_microcode(adev);
+               if (r)
+                       return r;
+
+               r = gfx_v8_0_cp_compute_load_microcode(adev);
+               if (r)
+                       return r;
+       } else {
+               r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
+                                               AMDGPU_UCODE_ID_CP_CE);
+               if (r)
+                       return -EINVAL;
+
+               r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
+                                               AMDGPU_UCODE_ID_CP_PFP);
+               if (r)
+                       return -EINVAL;
+
+               r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
+                                               AMDGPU_UCODE_ID_CP_ME);
+               if (r)
+                       return -EINVAL;
+
+               r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
+                                               AMDGPU_UCODE_ID_CP_MEC1);
+               if (r)
+                       return -EINVAL;
+       }
+
+       r = gfx_v8_0_cp_gfx_resume(adev);
+       if (r)
+               return r;
+
+       r = gfx_v8_0_cp_compute_resume(adev);
+       if (r)
+               return r;
+
+       gfx_v8_0_enable_gui_idle_interrupt(adev, true);
+
+       return 0;
+}
+
+static void gfx_v8_0_cp_enable(struct amdgpu_device *adev, bool enable)
+{
+       gfx_v8_0_cp_gfx_enable(adev, enable);
+       gfx_v8_0_cp_compute_enable(adev, enable);
+}
+
+static int gfx_v8_0_hw_init(struct amdgpu_device *adev)
+{
+       int r;
+
+       gfx_v8_0_init_golden_registers(adev);
+
+       gfx_v8_0_gpu_init(adev);
+
+       r = gfx_v8_0_rlc_resume(adev);
+       if (r)
+               return r;
+
+       r = gfx_v8_0_cp_resume(adev);
+       if (r)
+               return r;
+
+       return r;
+}
+
+static int gfx_v8_0_hw_fini(struct amdgpu_device *adev)
+{
+       gfx_v8_0_cp_enable(adev, false);
+       gfx_v8_0_rlc_stop(adev);
+       gfx_v8_0_cp_compute_fini(adev);
+
+       return 0;
+}
+
+static int gfx_v8_0_suspend(struct amdgpu_device *adev)
+{
+       return gfx_v8_0_hw_fini(adev);
+}
+
+static int gfx_v8_0_resume(struct amdgpu_device *adev)
+{
+       return gfx_v8_0_hw_init(adev);
+}
+
+static bool gfx_v8_0_is_idle(struct amdgpu_device *adev)
+{
+       if (REG_GET_FIELD(RREG32(mmGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE))
+               return false;
+       else
+               return true;
+}
+
+static int gfx_v8_0_wait_for_idle(struct amdgpu_device *adev)
+{
+       unsigned i;
+       u32 tmp;
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               /* read MC_STATUS */
+               tmp = RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK;
+
+               if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
+                       return 0;
+               udelay(1);
+       }
+       return -ETIMEDOUT;
+}
+
+static void gfx_v8_0_print_status(struct amdgpu_device *adev)
+{
+       int i;
+
+       dev_info(adev->dev, "GFX 8.x registers\n");
+       dev_info(adev->dev, "  GRBM_STATUS=0x%08X\n",
+                RREG32(mmGRBM_STATUS));
+       dev_info(adev->dev, "  GRBM_STATUS2=0x%08X\n",
+                RREG32(mmGRBM_STATUS2));
+       dev_info(adev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
+                RREG32(mmGRBM_STATUS_SE0));
+       dev_info(adev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
+                RREG32(mmGRBM_STATUS_SE1));
+       dev_info(adev->dev, "  GRBM_STATUS_SE2=0x%08X\n",
+                RREG32(mmGRBM_STATUS_SE2));
+       dev_info(adev->dev, "  GRBM_STATUS_SE3=0x%08X\n",
+                RREG32(mmGRBM_STATUS_SE3));
+       dev_info(adev->dev, "  CP_STAT = 0x%08x\n", RREG32(mmCP_STAT));
+       dev_info(adev->dev, "  CP_STALLED_STAT1 = 0x%08x\n",
+                RREG32(mmCP_STALLED_STAT1));
+       dev_info(adev->dev, "  CP_STALLED_STAT2 = 0x%08x\n",
+                RREG32(mmCP_STALLED_STAT2));
+       dev_info(adev->dev, "  CP_STALLED_STAT3 = 0x%08x\n",
+                RREG32(mmCP_STALLED_STAT3));
+       dev_info(adev->dev, "  CP_CPF_BUSY_STAT = 0x%08x\n",
+                RREG32(mmCP_CPF_BUSY_STAT));
+       dev_info(adev->dev, "  CP_CPF_STALLED_STAT1 = 0x%08x\n",
+                RREG32(mmCP_CPF_STALLED_STAT1));
+       dev_info(adev->dev, "  CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS));
+       dev_info(adev->dev, "  CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT));
+       dev_info(adev->dev, "  CP_CPC_STALLED_STAT1 = 0x%08x\n",
+                RREG32(mmCP_CPC_STALLED_STAT1));
+       dev_info(adev->dev, "  CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS));
+
+       for (i = 0; i < 32; i++) {
+               dev_info(adev->dev, "  GB_TILE_MODE%d=0x%08X\n",
+                        i, RREG32(mmGB_TILE_MODE0 + (i * 4)));
+       }
+       for (i = 0; i < 16; i++) {
+               dev_info(adev->dev, "  GB_MACROTILE_MODE%d=0x%08X\n",
+                        i, RREG32(mmGB_MACROTILE_MODE0 + (i * 4)));
+       }
+       for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+               dev_info(adev->dev, "  se: %d\n", i);
+               gfx_v8_0_select_se_sh(adev, i, 0xffffffff);
+               dev_info(adev->dev, "  PA_SC_RASTER_CONFIG=0x%08X\n",
+                        RREG32(mmPA_SC_RASTER_CONFIG));
+               dev_info(adev->dev, "  PA_SC_RASTER_CONFIG_1=0x%08X\n",
+                        RREG32(mmPA_SC_RASTER_CONFIG_1));
+       }
+       gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+
+       dev_info(adev->dev, "  GB_ADDR_CONFIG=0x%08X\n",
+                RREG32(mmGB_ADDR_CONFIG));
+       dev_info(adev->dev, "  HDP_ADDR_CONFIG=0x%08X\n",
+                RREG32(mmHDP_ADDR_CONFIG));
+       dev_info(adev->dev, "  DMIF_ADDR_CALC=0x%08X\n",
+                RREG32(mmDMIF_ADDR_CALC));
+       dev_info(adev->dev, "  SDMA0_TILING_CONFIG=0x%08X\n",
+                RREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET));
+       dev_info(adev->dev, "  SDMA1_TILING_CONFIG=0x%08X\n",
+                RREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET));
+       dev_info(adev->dev, "  UVD_UDEC_ADDR_CONFIG=0x%08X\n",
+                RREG32(mmUVD_UDEC_ADDR_CONFIG));
+       dev_info(adev->dev, "  UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
+                RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
+       dev_info(adev->dev, "  UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
+                RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
+
+       dev_info(adev->dev, "  CP_MEQ_THRESHOLDS=0x%08X\n",
+                RREG32(mmCP_MEQ_THRESHOLDS));
+       dev_info(adev->dev, "  SX_DEBUG_1=0x%08X\n",
+                RREG32(mmSX_DEBUG_1));
+       dev_info(adev->dev, "  TA_CNTL_AUX=0x%08X\n",
+                RREG32(mmTA_CNTL_AUX));
+       dev_info(adev->dev, "  SPI_CONFIG_CNTL=0x%08X\n",
+                RREG32(mmSPI_CONFIG_CNTL));
+       dev_info(adev->dev, "  SQ_CONFIG=0x%08X\n",
+                RREG32(mmSQ_CONFIG));
+       dev_info(adev->dev, "  DB_DEBUG=0x%08X\n",
+                RREG32(mmDB_DEBUG));
+       dev_info(adev->dev, "  DB_DEBUG2=0x%08X\n",
+                RREG32(mmDB_DEBUG2));
+       dev_info(adev->dev, "  DB_DEBUG3=0x%08X\n",
+                RREG32(mmDB_DEBUG3));
+       dev_info(adev->dev, "  CB_HW_CONTROL=0x%08X\n",
+                RREG32(mmCB_HW_CONTROL));
+       dev_info(adev->dev, "  SPI_CONFIG_CNTL_1=0x%08X\n",
+                RREG32(mmSPI_CONFIG_CNTL_1));
+       dev_info(adev->dev, "  PA_SC_FIFO_SIZE=0x%08X\n",
+                RREG32(mmPA_SC_FIFO_SIZE));
+       dev_info(adev->dev, "  VGT_NUM_INSTANCES=0x%08X\n",
+                RREG32(mmVGT_NUM_INSTANCES));
+       dev_info(adev->dev, "  CP_PERFMON_CNTL=0x%08X\n",
+                RREG32(mmCP_PERFMON_CNTL));
+       dev_info(adev->dev, "  PA_SC_FORCE_EOV_MAX_CNTS=0x%08X\n",
+                RREG32(mmPA_SC_FORCE_EOV_MAX_CNTS));
+       dev_info(adev->dev, "  VGT_CACHE_INVALIDATION=0x%08X\n",
+                RREG32(mmVGT_CACHE_INVALIDATION));
+       dev_info(adev->dev, "  VGT_GS_VERTEX_REUSE=0x%08X\n",
+                RREG32(mmVGT_GS_VERTEX_REUSE));
+       dev_info(adev->dev, "  PA_SC_LINE_STIPPLE_STATE=0x%08X\n",
+                RREG32(mmPA_SC_LINE_STIPPLE_STATE));
+       dev_info(adev->dev, "  PA_CL_ENHANCE=0x%08X\n",
+                RREG32(mmPA_CL_ENHANCE));
+       dev_info(adev->dev, "  PA_SC_ENHANCE=0x%08X\n",
+                RREG32(mmPA_SC_ENHANCE));
+
+       dev_info(adev->dev, "  CP_ME_CNTL=0x%08X\n",
+                RREG32(mmCP_ME_CNTL));
+       dev_info(adev->dev, "  CP_MAX_CONTEXT=0x%08X\n",
+                RREG32(mmCP_MAX_CONTEXT));
+       dev_info(adev->dev, "  CP_ENDIAN_SWAP=0x%08X\n",
+                RREG32(mmCP_ENDIAN_SWAP));
+       dev_info(adev->dev, "  CP_DEVICE_ID=0x%08X\n",
+                RREG32(mmCP_DEVICE_ID));
+
+       dev_info(adev->dev, "  CP_SEM_WAIT_TIMER=0x%08X\n",
+                RREG32(mmCP_SEM_WAIT_TIMER));
+
+       dev_info(adev->dev, "  CP_RB_WPTR_DELAY=0x%08X\n",
+                RREG32(mmCP_RB_WPTR_DELAY));
+       dev_info(adev->dev, "  CP_RB_VMID=0x%08X\n",
+                RREG32(mmCP_RB_VMID));
+       dev_info(adev->dev, "  CP_RB0_CNTL=0x%08X\n",
+                RREG32(mmCP_RB0_CNTL));
+       dev_info(adev->dev, "  CP_RB0_WPTR=0x%08X\n",
+                RREG32(mmCP_RB0_WPTR));
+       dev_info(adev->dev, "  CP_RB0_RPTR_ADDR=0x%08X\n",
+                RREG32(mmCP_RB0_RPTR_ADDR));
+       dev_info(adev->dev, "  CP_RB0_RPTR_ADDR_HI=0x%08X\n",
+                RREG32(mmCP_RB0_RPTR_ADDR_HI));
+       dev_info(adev->dev, "  CP_RB0_CNTL=0x%08X\n",
+                RREG32(mmCP_RB0_CNTL));
+       dev_info(adev->dev, "  CP_RB0_BASE=0x%08X\n",
+                RREG32(mmCP_RB0_BASE));
+       dev_info(adev->dev, "  CP_RB0_BASE_HI=0x%08X\n",
+                RREG32(mmCP_RB0_BASE_HI));
+       dev_info(adev->dev, "  CP_MEC_CNTL=0x%08X\n",
+                RREG32(mmCP_MEC_CNTL));
+       dev_info(adev->dev, "  CP_CPF_DEBUG=0x%08X\n",
+                RREG32(mmCP_CPF_DEBUG));
+
+       dev_info(adev->dev, "  SCRATCH_ADDR=0x%08X\n",
+                RREG32(mmSCRATCH_ADDR));
+       dev_info(adev->dev, "  SCRATCH_UMSK=0x%08X\n",
+                RREG32(mmSCRATCH_UMSK));
+
+       dev_info(adev->dev, "  CP_INT_CNTL_RING0=0x%08X\n",
+                RREG32(mmCP_INT_CNTL_RING0));
+       dev_info(adev->dev, "  RLC_LB_CNTL=0x%08X\n",
+                RREG32(mmRLC_LB_CNTL));
+       dev_info(adev->dev, "  RLC_CNTL=0x%08X\n",
+                RREG32(mmRLC_CNTL));
+       dev_info(adev->dev, "  RLC_CGCG_CGLS_CTRL=0x%08X\n",
+                RREG32(mmRLC_CGCG_CGLS_CTRL));
+       dev_info(adev->dev, "  RLC_LB_CNTR_INIT=0x%08X\n",
+                RREG32(mmRLC_LB_CNTR_INIT));
+       dev_info(adev->dev, "  RLC_LB_CNTR_MAX=0x%08X\n",
+                RREG32(mmRLC_LB_CNTR_MAX));
+       dev_info(adev->dev, "  RLC_LB_INIT_CU_MASK=0x%08X\n",
+                RREG32(mmRLC_LB_INIT_CU_MASK));
+       dev_info(adev->dev, "  RLC_LB_PARAMS=0x%08X\n",
+                RREG32(mmRLC_LB_PARAMS));
+       dev_info(adev->dev, "  RLC_LB_CNTL=0x%08X\n",
+                RREG32(mmRLC_LB_CNTL));
+       dev_info(adev->dev, "  RLC_MC_CNTL=0x%08X\n",
+                RREG32(mmRLC_MC_CNTL));
+       dev_info(adev->dev, "  RLC_UCODE_CNTL=0x%08X\n",
+                RREG32(mmRLC_UCODE_CNTL));
+
+       mutex_lock(&adev->srbm_mutex);
+       for (i = 0; i < 16; i++) {
+               vi_srbm_select(adev, 0, 0, 0, i);
+               dev_info(adev->dev, "  VM %d:\n", i);
+               dev_info(adev->dev, "  SH_MEM_CONFIG=0x%08X\n",
+                        RREG32(mmSH_MEM_CONFIG));
+               dev_info(adev->dev, "  SH_MEM_APE1_BASE=0x%08X\n",
+                        RREG32(mmSH_MEM_APE1_BASE));
+               dev_info(adev->dev, "  SH_MEM_APE1_LIMIT=0x%08X\n",
+                        RREG32(mmSH_MEM_APE1_LIMIT));
+               dev_info(adev->dev, "  SH_MEM_BASES=0x%08X\n",
+                        RREG32(mmSH_MEM_BASES));
+       }
+       vi_srbm_select(adev, 0, 0, 0, 0);
+       mutex_unlock(&adev->srbm_mutex);
+}
+
+static int gfx_v8_0_soft_reset(struct amdgpu_device *adev)
+{
+       u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+       u32 tmp;
+
+       /* GRBM_STATUS */
+       tmp = RREG32(mmGRBM_STATUS);
+       if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
+                  GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
+                  GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
+                  GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
+                  GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
+                  GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
+               grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
+                                               GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
+               grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
+                                               GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
+       }
+
+       if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
+               grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
+                                               GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
+               srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
+                                               SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
+       }
+
+       /* GRBM_STATUS2 */
+       tmp = RREG32(mmGRBM_STATUS2);
+       if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
+               grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
+                                               GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
+
+       /* SRBM_STATUS */
+       tmp = RREG32(mmSRBM_STATUS);
+       if (REG_GET_FIELD(tmp, SRBM_STATUS, GRBM_RQ_PENDING))
+               srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
+                                               SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
+
+       if (grbm_soft_reset || srbm_soft_reset) {
+               gfx_v8_0_print_status(adev);
+               /* stop the rlc */
+               gfx_v8_0_rlc_stop(adev);
+
+               /* Disable GFX parsing/prefetching */
+               gfx_v8_0_cp_gfx_enable(adev, false);
+
+               /* Disable MEC parsing/prefetching */
+               /* XXX todo */
+
+               if (grbm_soft_reset) {
+                       tmp = RREG32(mmGRBM_SOFT_RESET);
+                       tmp |= grbm_soft_reset;
+                       dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+                       WREG32(mmGRBM_SOFT_RESET, tmp);
+                       tmp = RREG32(mmGRBM_SOFT_RESET);
+
+                       udelay(50);
+
+                       tmp &= ~grbm_soft_reset;
+                       WREG32(mmGRBM_SOFT_RESET, tmp);
+                       tmp = RREG32(mmGRBM_SOFT_RESET);
+               }
+
+               if (srbm_soft_reset) {
+                       tmp = RREG32(mmSRBM_SOFT_RESET);
+                       tmp |= srbm_soft_reset;
+                       dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+                       WREG32(mmSRBM_SOFT_RESET, tmp);
+                       tmp = RREG32(mmSRBM_SOFT_RESET);
+
+                       udelay(50);
+
+                       tmp &= ~srbm_soft_reset;
+                       WREG32(mmSRBM_SOFT_RESET, tmp);
+                       tmp = RREG32(mmSRBM_SOFT_RESET);
+               }
+               /* Wait a little for things to settle down */
+               udelay(50);
+               gfx_v8_0_print_status(adev);
+       }
+       return 0;
+}
+
+/**
+ * gfx_v8_0_get_gpu_clock_counter - return GPU clock counter snapshot
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Fetches a GPU clock counter snapshot.
+ * Returns the 64 bit clock counter snapshot.
+ */
+uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev)
+{
+       uint64_t clock;
+
+       mutex_lock(&adev->gfx.gpu_clock_mutex);
+       WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+       clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) |
+               ((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+       mutex_unlock(&adev->gfx.gpu_clock_mutex);
+       return clock;
+}
+
+static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
+                                         uint32_t vmid,
+                                         uint32_t gds_base, uint32_t gds_size,
+                                         uint32_t gws_base, uint32_t gws_size,
+                                         uint32_t oa_base, uint32_t oa_size)
+{
+       gds_base = gds_base >> AMDGPU_GDS_SHIFT;
+       gds_size = gds_size >> AMDGPU_GDS_SHIFT;
+
+       gws_base = gws_base >> AMDGPU_GWS_SHIFT;
+       gws_size = gws_size >> AMDGPU_GWS_SHIFT;
+
+       oa_base = oa_base >> AMDGPU_OA_SHIFT;
+       oa_size = oa_size >> AMDGPU_OA_SHIFT;
+
+       /* GDS Base */
+       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+       amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+                               WRITE_DATA_DST_SEL(0)));
+       amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_base);
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, gds_base);
+
+       /* GDS Size */
+       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+       amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+                               WRITE_DATA_DST_SEL(0)));
+       amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_size);
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, gds_size);
+
+       /* GWS */
+       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+       amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+                               WRITE_DATA_DST_SEL(0)));
+       amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].gws);
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
+
+       /* OA */
+       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+       amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+                               WRITE_DATA_DST_SEL(0)));
+       amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].oa);
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
+}
+
+static int gfx_v8_0_early_init(struct amdgpu_device *adev)
+{
+
+       adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS;
+       adev->gfx.num_compute_rings = GFX8_NUM_COMPUTE_RINGS;
+       gfx_v8_0_set_ring_funcs(adev);
+       gfx_v8_0_set_irq_funcs(adev);
+       gfx_v8_0_set_gds_init(adev);
+
+       return 0;
+}
+
+static int gfx_v8_0_set_powergating_state(struct amdgpu_device *adev,
+                                         enum amdgpu_powergating_state state)
+{
+       return 0;
+}
+
+static int gfx_v8_0_set_clockgating_state(struct amdgpu_device *adev,
+                                         enum amdgpu_clockgating_state state)
+{
+       return 0;
+}
+
+static u32 gfx_v8_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
+{
+       u32 rptr;
+
+       rptr = ring->adev->wb.wb[ring->rptr_offs];
+
+       return rptr;
+}
+
+static u32 gfx_v8_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       u32 wptr;
+
+       if (ring->use_doorbell)
+               /* XXX check if swapping is necessary on BE */
+               wptr = ring->adev->wb.wb[ring->wptr_offs];
+       else
+               wptr = RREG32(mmCP_RB0_WPTR);
+
+       return wptr;
+}
+
+static void gfx_v8_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       if (ring->use_doorbell) {
+               /* XXX check if swapping is necessary on BE */
+               adev->wb.wb[ring->wptr_offs] = ring->wptr;
+               WDOORBELL32(ring->doorbell_index, ring->wptr);
+       } else {
+               WREG32(mmCP_RB0_WPTR, ring->wptr);
+               (void)RREG32(mmCP_RB0_WPTR);
+       }
+}
+
+static void gfx_v8_0_hdp_flush_cp_ring_emit(struct amdgpu_ring *ring)
+{
+       u32 ref_and_mask, reg_mem_engine;
+
+       if (ring->type == AMDGPU_RING_TYPE_COMPUTE) {
+               switch (ring->me) {
+               case 1:
+                       ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
+                       break;
+               case 2:
+                       ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe;
+                       break;
+               default:
+                       return;
+               }
+               reg_mem_engine = 0;
+       } else {
+               ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
+               reg_mem_engine = WAIT_REG_MEM_ENGINE(1); /* pfp */
+       }
+
+       amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+       amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
+                                WAIT_REG_MEM_FUNCTION(3) |  /* == */
+                                reg_mem_engine));
+       amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ);
+       amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE);
+       amdgpu_ring_write(ring, ref_and_mask);
+       amdgpu_ring_write(ring, ref_and_mask);
+       amdgpu_ring_write(ring, 0x20); /* poll interval */
+}
+
+static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
+                                 struct amdgpu_ib *ib)
+{
+       u32 header, control = 0;
+       u32 next_rptr = ring->wptr + 5;
+       if (ring->type == AMDGPU_RING_TYPE_COMPUTE)
+               control |= INDIRECT_BUFFER_VALID;
+
+       if (ib->flush_hdp_writefifo)
+               next_rptr += 7;
+
+       if (ring->need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX)
+               next_rptr += 2;
+
+       next_rptr += 4;
+       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+       amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
+       amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+       amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
+       amdgpu_ring_write(ring, next_rptr);
+
+       if (ib->flush_hdp_writefifo)
+               gfx_v8_0_hdp_flush_cp_ring_emit(ring);
+
+       /* insert SWITCH_BUFFER packet before first IB in the ring frame */
+       if (ring->need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) {
+               amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+               amdgpu_ring_write(ring, 0);
+               ring->need_ctx_switch = false;
+       }
+
+       if (ib->is_const_ib)
+               header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
+       else
+               header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
+
+       control |= ib->length_dw |
+               (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
+
+       amdgpu_ring_write(ring, header);
+       amdgpu_ring_write(ring,
+#ifdef __BIG_ENDIAN
+                         (2 << 0) |
+#endif
+                         (ib->gpu_addr & 0xFFFFFFFC));
+       amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
+       amdgpu_ring_write(ring, control);
+}
+
+static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
+                                        u64 seq, bool write64bit)
+{
+       /* EVENT_WRITE_EOP - flush caches, send int */
+       amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+       amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
+                                EOP_TC_ACTION_EN |
+                                EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
+                                EVENT_INDEX(5)));
+       amdgpu_ring_write(ring, addr & 0xfffffffc);
+       amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | 
+                         DATA_SEL(write64bit ? 2 : 1) | INT_SEL(2));
+       amdgpu_ring_write(ring, lower_32_bits(seq));
+       amdgpu_ring_write(ring, upper_32_bits(seq));
+}
+
+/**
+ * gfx_v8_0_ring_emit_semaphore - emit a semaphore on the CP ring
+ *
+ * @ring: amdgpu ring buffer object
+ * @semaphore: amdgpu semaphore object
+ * @emit_wait: Is this a sempahore wait?
+ *
+ * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
+ * from running ahead of semaphore waits.
+ */
+static bool gfx_v8_0_ring_emit_semaphore(struct amdgpu_ring *ring,
+                                        struct amdgpu_semaphore *semaphore,
+                                        bool emit_wait)
+{
+       uint64_t addr = semaphore->gpu_addr;
+       unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
+
+       if (ring->adev->asic_type == CHIP_TOPAZ ||
+           ring->adev->asic_type == CHIP_TONGA) {
+               amdgpu_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
+               amdgpu_ring_write(ring, lower_32_bits(addr));
+               amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
+       } else {
+               amdgpu_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 2));
+               amdgpu_ring_write(ring, lower_32_bits(addr));
+               amdgpu_ring_write(ring, upper_32_bits(addr));
+               amdgpu_ring_write(ring, sel);
+       }
+
+       if (emit_wait && (ring->type == AMDGPU_RING_TYPE_GFX)) {
+               /* Prevent the PFP from running ahead of the semaphore wait */
+               amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+               amdgpu_ring_write(ring, 0x0);
+       }
+
+       return true;
+}
+
+static void gfx_v8_0_ce_sync_me(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       u64 gpu_addr = adev->wb.gpu_addr + adev->gfx.ce_sync_offs * 4;
+
+       /* instruct DE to set a magic number */
+       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+       amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+                                                        WRITE_DATA_DST_SEL(5)));
+       amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
+       amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
+       amdgpu_ring_write(ring, 1);
+
+       /* let CE wait till condition satisfied */
+       amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+       amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
+                                                        WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
+                                                        WAIT_REG_MEM_FUNCTION(3) |  /* == */
+                                                        WAIT_REG_MEM_ENGINE(2)));   /* ce */
+       amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
+       amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
+       amdgpu_ring_write(ring, 1);
+       amdgpu_ring_write(ring, 0xffffffff);
+       amdgpu_ring_write(ring, 4); /* poll interval */
+
+       /* instruct CE to reset wb of ce_sync to zero */
+       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+       amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
+                                                        WRITE_DATA_DST_SEL(5) |
+                                                        WR_CONFIRM));
+       amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
+       amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
+       amdgpu_ring_write(ring, 0);
+}
+
+static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+                                       unsigned vm_id, uint64_t pd_addr)
+{
+       int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+       u32 srbm_gfx_cntl = 0;
+
+       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+       amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
+                                WRITE_DATA_DST_SEL(0)));
+       if (vm_id < 8) {
+               amdgpu_ring_write(ring,
+                                 (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
+       } else {
+               amdgpu_ring_write(ring,
+                                 (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
+       }
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, pd_addr >> 12);
+
+       /* update SH_MEM_* regs */
+       srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vm_id);
+       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+       amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+                                WRITE_DATA_DST_SEL(0)));
+       amdgpu_ring_write(ring, mmSRBM_GFX_CNTL);
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, srbm_gfx_cntl);
+
+       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6));
+       amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+                                WRITE_DATA_DST_SEL(0)));
+       amdgpu_ring_write(ring, mmSH_MEM_BASES);
+       amdgpu_ring_write(ring, 0);
+
+       amdgpu_ring_write(ring, 0); /* SH_MEM_BASES */
+       amdgpu_ring_write(ring, 0); /* SH_MEM_CONFIG */
+       amdgpu_ring_write(ring, 1); /* SH_MEM_APE1_BASE */
+       amdgpu_ring_write(ring, 0); /* SH_MEM_APE1_LIMIT */
+
+       srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, 0);
+       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+       amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+                                WRITE_DATA_DST_SEL(0)));
+       amdgpu_ring_write(ring, mmSRBM_GFX_CNTL);
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, srbm_gfx_cntl);
+
+
+       /* bits 0-15 are the VM contexts0-15 */
+       /* invalidate the cache */
+       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+       amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+                                WRITE_DATA_DST_SEL(0)));
+       amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, 1 << vm_id);
+
+       /* wait for the invalidate to complete */
+       amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+       amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
+                                WAIT_REG_MEM_FUNCTION(0) |  /* always */
+                                WAIT_REG_MEM_ENGINE(0))); /* me */
+       amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, 0); /* ref */
+       amdgpu_ring_write(ring, 0); /* mask */
+       amdgpu_ring_write(ring, 0x20); /* poll interval */
+
+       /* compute doesn't have PFP */
+       if (usepfp) {
+               /* sync PFP to ME, otherwise we might get invalid PFP reads */
+               amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+               amdgpu_ring_write(ring, 0x0);
+
+               /* synce CE with ME to prevent CE fetch CEIB before context switch done */
+               gfx_v8_0_ce_sync_me(ring);
+       }
+}
+
+static bool gfx_v8_0_ring_is_lockup(struct amdgpu_ring *ring)
+{
+       if (gfx_v8_0_is_idle(ring->adev)) {
+               amdgpu_ring_lockup_update(ring);
+               return false;
+       }
+       return amdgpu_ring_test_lockup(ring);
+}
+
+static u32 gfx_v8_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
+{
+       return ring->adev->wb.wb[ring->rptr_offs];
+}
+
+static u32 gfx_v8_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
+{
+       return ring->adev->wb.wb[ring->wptr_offs];
+}
+
+static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       /* XXX check if swapping is necessary on BE */
+       adev->wb.wb[ring->wptr_offs] = ring->wptr;
+       WDOORBELL32(ring->doorbell_index, ring->wptr);
+}
+
+static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
+                                            u64 addr, u64 seq,
+                                            bool write64bits)
+{
+       /* RELEASE_MEM - flush caches, send int */
+       amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
+       amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
+                                EOP_TC_ACTION_EN |
+                                EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
+                                EVENT_INDEX(5)));
+       amdgpu_ring_write(ring, DATA_SEL(write64bits ? 2 : 1) | INT_SEL(2));
+       amdgpu_ring_write(ring, addr & 0xfffffffc);
+       amdgpu_ring_write(ring, upper_32_bits(addr));
+       amdgpu_ring_write(ring, lower_32_bits(seq));
+       amdgpu_ring_write(ring, upper_32_bits(seq));
+}
+
+static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
+                                                enum amdgpu_interrupt_state state)
+{
+       u32 cp_int_cntl;
+
+       switch (state) {
+       case AMDGPU_IRQ_STATE_DISABLE:
+               cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
+               cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
+                                           TIME_STAMP_INT_ENABLE, 0);
+               WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
+               break;
+       case AMDGPU_IRQ_STATE_ENABLE:
+               cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
+               cp_int_cntl =
+                       REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
+                                     TIME_STAMP_INT_ENABLE, 1);
+               WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
+               break;
+       default:
+               break;
+       }
+}
+
+static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
+                                                    int me, int pipe,
+                                                    enum amdgpu_interrupt_state state)
+{
+       u32 mec_int_cntl, mec_int_cntl_reg;
+
+       /*
+        * amdgpu controls only pipe 0 of MEC1. That's why this function only
+        * handles the setting of interrupts for this specific pipe. All other
+        * pipes' interrupts are set by amdkfd.
+        */
+
+       if (me == 1) {
+               switch (pipe) {
+               case 0:
+                       mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL;
+                       break;
+               default:
+                       DRM_DEBUG("invalid pipe %d\n", pipe);
+                       return;
+               }
+       } else {
+               DRM_DEBUG("invalid me %d\n", me);
+               return;
+       }
+
+       switch (state) {
+       case AMDGPU_IRQ_STATE_DISABLE:
+               mec_int_cntl = RREG32(mec_int_cntl_reg);
+               mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+                                            TIME_STAMP_INT_ENABLE, 0);
+               WREG32(mec_int_cntl_reg, mec_int_cntl);
+               break;
+       case AMDGPU_IRQ_STATE_ENABLE:
+               mec_int_cntl = RREG32(mec_int_cntl_reg);
+               mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+                                            TIME_STAMP_INT_ENABLE, 1);
+               WREG32(mec_int_cntl_reg, mec_int_cntl);
+               break;
+       default:
+               break;
+       }
+}
+
+static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
+                                            struct amdgpu_irq_src *source,
+                                            unsigned type,
+                                            enum amdgpu_interrupt_state state)
+{
+       u32 cp_int_cntl;
+
+       switch (state) {
+       case AMDGPU_IRQ_STATE_DISABLE:
+               cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
+               cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
+                                           PRIV_REG_INT_ENABLE, 0);
+               WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
+               break;
+       case AMDGPU_IRQ_STATE_ENABLE:
+               cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
+               cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
+                                           PRIV_REG_INT_ENABLE, 0);
+               WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static int gfx_v8_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
+                                             struct amdgpu_irq_src *source,
+                                             unsigned type,
+                                             enum amdgpu_interrupt_state state)
+{
+       u32 cp_int_cntl;
+
+       switch (state) {
+       case AMDGPU_IRQ_STATE_DISABLE:
+               cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
+               cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
+                                           PRIV_INSTR_INT_ENABLE, 0);
+               WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
+               break;
+       case AMDGPU_IRQ_STATE_ENABLE:
+               cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
+               cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
+                                           PRIV_INSTR_INT_ENABLE, 1);
+               WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static int gfx_v8_0_set_eop_interrupt_state(struct amdgpu_device *adev,
+                                           struct amdgpu_irq_src *src,
+                                           unsigned type,
+                                           enum amdgpu_interrupt_state state)
+{
+       switch (type) {
+       case AMDGPU_CP_IRQ_GFX_EOP:
+               gfx_v8_0_set_gfx_eop_interrupt_state(adev, state);
+               break;
+       case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
+               gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
+               break;
+       case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
+               gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
+               break;
+       case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
+               gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
+               break;
+       case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
+               gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
+               break;
+       case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
+               gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
+               break;
+       case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
+               gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
+               break;
+       case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
+               gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
+               break;
+       case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
+               gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+static int gfx_v8_0_eop_irq(struct amdgpu_device *adev,
+                           struct amdgpu_irq_src *source,
+                           struct amdgpu_iv_entry *entry)
+{
+       int i;
+       u8 me_id, pipe_id, queue_id;
+       struct amdgpu_ring *ring;
+
+       DRM_DEBUG("IH: CP EOP\n");
+       me_id = (entry->ring_id & 0x0c) >> 2;
+       pipe_id = (entry->ring_id & 0x03) >> 0;
+       queue_id = (entry->ring_id & 0x70) >> 4;
+
+       switch (me_id) {
+       case 0:
+               amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
+               break;
+       case 1:
+       case 2:
+               for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+                       ring = &adev->gfx.compute_ring[i];
+                       /* Per-queue interrupt is supported for MEC starting from VI.
+                         * The interrupt can only be enabled/disabled per pipe instead of per queue.
+                         */
+                       if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
+                               amdgpu_fence_process(ring);
+               }
+               break;
+       }
+       return 0;
+}
+
+static int gfx_v8_0_priv_reg_irq(struct amdgpu_device *adev,
+                                struct amdgpu_irq_src *source,
+                                struct amdgpu_iv_entry *entry)
+{
+       DRM_ERROR("Illegal register access in command stream\n");
+       schedule_work(&adev->reset_work);
+       return 0;
+}
+
+static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
+                                 struct amdgpu_irq_src *source,
+                                 struct amdgpu_iv_entry *entry)
+{
+       DRM_ERROR("Illegal instruction in command stream\n");
+       schedule_work(&adev->reset_work);
+       return 0;
+}
+
+const struct amdgpu_ip_funcs gfx_v8_0_ip_funcs = {
+       .early_init = gfx_v8_0_early_init,
+       .late_init = NULL,
+       .sw_init = gfx_v8_0_sw_init,
+       .sw_fini = gfx_v8_0_sw_fini,
+       .hw_init = gfx_v8_0_hw_init,
+       .hw_fini = gfx_v8_0_hw_fini,
+       .suspend = gfx_v8_0_suspend,
+       .resume = gfx_v8_0_resume,
+       .is_idle = gfx_v8_0_is_idle,
+       .wait_for_idle = gfx_v8_0_wait_for_idle,
+       .soft_reset = gfx_v8_0_soft_reset,
+       .print_status = gfx_v8_0_print_status,
+       .set_clockgating_state = gfx_v8_0_set_clockgating_state,
+       .set_powergating_state = gfx_v8_0_set_powergating_state,
+};
+
+static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
+       .get_rptr = gfx_v8_0_ring_get_rptr_gfx,
+       .get_wptr = gfx_v8_0_ring_get_wptr_gfx,
+       .set_wptr = gfx_v8_0_ring_set_wptr_gfx,
+       .parse_cs = NULL,
+       .emit_ib = gfx_v8_0_ring_emit_ib,
+       .emit_fence = gfx_v8_0_ring_emit_fence_gfx,
+       .emit_semaphore = gfx_v8_0_ring_emit_semaphore,
+       .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
+       .emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
+       .test_ring = gfx_v8_0_ring_test_ring,
+       .test_ib = gfx_v8_0_ring_test_ib,
+       .is_lockup = gfx_v8_0_ring_is_lockup,
+};
+
+static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
+       .get_rptr = gfx_v8_0_ring_get_rptr_compute,
+       .get_wptr = gfx_v8_0_ring_get_wptr_compute,
+       .set_wptr = gfx_v8_0_ring_set_wptr_compute,
+       .parse_cs = NULL,
+       .emit_ib = gfx_v8_0_ring_emit_ib,
+       .emit_fence = gfx_v8_0_ring_emit_fence_compute,
+       .emit_semaphore = gfx_v8_0_ring_emit_semaphore,
+       .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
+       .emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
+       .test_ring = gfx_v8_0_ring_test_ring,
+       .test_ib = gfx_v8_0_ring_test_ib,
+       .is_lockup = gfx_v8_0_ring_is_lockup,
+};
+
+static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
+{
+       int i;
+
+       for (i = 0; i < adev->gfx.num_gfx_rings; i++)
+               adev->gfx.gfx_ring[i].funcs = &gfx_v8_0_ring_funcs_gfx;
+
+       for (i = 0; i < adev->gfx.num_compute_rings; i++)
+               adev->gfx.compute_ring[i].funcs = &gfx_v8_0_ring_funcs_compute;
+}
+
+static const struct amdgpu_irq_src_funcs gfx_v8_0_eop_irq_funcs = {
+       .set = gfx_v8_0_set_eop_interrupt_state,
+       .process = gfx_v8_0_eop_irq,
+};
+
+static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_reg_irq_funcs = {
+       .set = gfx_v8_0_set_priv_reg_fault_state,
+       .process = gfx_v8_0_priv_reg_irq,
+};
+
+static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_inst_irq_funcs = {
+       .set = gfx_v8_0_set_priv_inst_fault_state,
+       .process = gfx_v8_0_priv_inst_irq,
+};
+
+static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+       adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
+       adev->gfx.eop_irq.funcs = &gfx_v8_0_eop_irq_funcs;
+
+       adev->gfx.priv_reg_irq.num_types = 1;
+       adev->gfx.priv_reg_irq.funcs = &gfx_v8_0_priv_reg_irq_funcs;
+
+       adev->gfx.priv_inst_irq.num_types = 1;
+       adev->gfx.priv_inst_irq.funcs = &gfx_v8_0_priv_inst_irq_funcs;
+}
+
+static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev)
+{
+       /* init asci gds info */
+       adev->gds.mem.total_size = RREG32(mmGDS_VMID0_SIZE);
+       adev->gds.gws.total_size = 64;
+       adev->gds.oa.total_size = 16;
+
+       if (adev->gds.mem.total_size == 64 * 1024) {
+               adev->gds.mem.gfx_partition_size = 4096;
+               adev->gds.mem.cs_partition_size = 4096;
+
+               adev->gds.gws.gfx_partition_size = 4;
+               adev->gds.gws.cs_partition_size = 4;
+
+               adev->gds.oa.gfx_partition_size = 4;
+               adev->gds.oa.cs_partition_size = 1;
+       } else {
+               adev->gds.mem.gfx_partition_size = 1024;
+               adev->gds.mem.cs_partition_size = 1024;
+
+               adev->gds.gws.gfx_partition_size = 16;
+               adev->gds.gws.cs_partition_size = 16;
+
+               adev->gds.oa.gfx_partition_size = 4;
+               adev->gds.oa.cs_partition_size = 4;
+       }
+}
+
+static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev,
+               u32 se, u32 sh)
+{
+       u32 mask = 0, tmp, tmp1;
+       int i;
+
+       gfx_v8_0_select_se_sh(adev, se, sh);
+       tmp = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
+       tmp1 = RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
+       gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+
+       tmp &= 0xffff0000;
+
+       tmp |= tmp1;
+       tmp >>= 16;
+
+       for (i = 0; i < adev->gfx.config.max_cu_per_sh; i ++) {
+               mask <<= 1;
+               mask |= 1;
+       }
+
+       return (~tmp) & mask;
+}
+
+int gfx_v8_0_get_cu_info(struct amdgpu_device *adev,
+                                                struct amdgpu_cu_info *cu_info)
+{
+       int i, j, k, counter, active_cu_number = 0;
+       u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
+
+       if (!adev || !cu_info)
+               return -EINVAL;
+
+       mutex_lock(&adev->grbm_idx_mutex);
+       for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+               for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+                       mask = 1;
+                       ao_bitmap = 0;
+                       counter = 0;
+                       bitmap = gfx_v8_0_get_cu_active_bitmap(adev, i, j);
+                       cu_info->bitmap[i][j] = bitmap;
+
+                       for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
+                               if (bitmap & mask) {
+                                       if (counter < 2)
+                                               ao_bitmap |= mask;
+                                       counter ++;
+                               }
+                               mask <<= 1;
+                       }
+                       active_cu_number += counter;
+                       ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
+               }
+       }
+
+       cu_info->number = active_cu_number;
+       cu_info->ao_cu_mask = ao_cu_mask;
+       mutex_unlock(&adev->grbm_idx_mutex);
+       return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
new file mode 100644 (file)
index 0000000..be8a5f8
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __GFX_V8_0_H__
+#define __GFX_V8_0_H__
+
+extern const struct amdgpu_ip_funcs gfx_v8_0_ip_funcs;
+
+uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev);
+void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
+int gfx_v8_0_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
new file mode 100644 (file)
index 0000000..ac8cff8
--- /dev/null
@@ -0,0 +1,1271 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "gmc_v8_0.h"
+#include "amdgpu_ucode.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "oss/oss_3_0_d.h"
+#include "oss/oss_3_0_sh_mask.h"
+
+#include "vid.h"
+#include "vi.h"
+
+static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
+static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
+
+MODULE_FIRMWARE("radeon/topaz_mc.bin");
+MODULE_FIRMWARE("radeon/tonga_mc.bin");
+
+static const u32 golden_settings_tonga_a11[] =
+{
+       mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
+       mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
+       mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
+       mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+       mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+       mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+       mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+};
+
+static const u32 tonga_mgcg_cgcg_init[] =
+{
+       mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
+};
+
+static const u32 golden_settings_iceland_a11[] =
+{
+       mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+       mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+       mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+       mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
+};
+
+static const u32 iceland_mgcg_cgcg_init[] =
+{
+       mmMC_MEM_POWER_LS, 0xffffffff,        0x00000104
+};
+
+static const u32 cz_mgcg_cgcg_init[] =
+{
+       mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
+};
+
+static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
+{
+       switch (adev->asic_type) {
+       case CHIP_TOPAZ:
+               amdgpu_program_register_sequence(adev,
+                                                iceland_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
+               amdgpu_program_register_sequence(adev,
+                                                golden_settings_iceland_a11,
+                                                (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
+               break;
+       case CHIP_TONGA:
+               amdgpu_program_register_sequence(adev,
+                                                tonga_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
+               amdgpu_program_register_sequence(adev,
+                                                golden_settings_tonga_a11,
+                                                (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
+               break;
+       case CHIP_CARRIZO:
+               amdgpu_program_register_sequence(adev,
+                                                cz_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
+               break;
+       default:
+               break;
+       }
+}
+
+/**
+ * gmc8_mc_wait_for_idle - wait for MC idle callback.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Wait for the MC (memory controller) to be idle.
+ * (evergreen+).
+ * Returns 0 if the MC is idle, -1 if not.
+ */
+int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev)
+{
+       unsigned i;
+       u32 tmp;
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               /* read MC_STATUS */
+               tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__VMC_BUSY_MASK |
+                                              SRBM_STATUS__MCB_BUSY_MASK |
+                                              SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
+                                              SRBM_STATUS__MCC_BUSY_MASK |
+                                              SRBM_STATUS__MCD_BUSY_MASK |
+                                              SRBM_STATUS__VMC1_BUSY_MASK);
+               if (!tmp)
+                       return 0;
+               udelay(1);
+       }
+       return -1;
+}
+
+void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
+                     struct amdgpu_mode_mc_save *save)
+{
+       u32 blackout;
+
+       if (adev->mode_info.num_crtc)
+               amdgpu_display_stop_mc_access(adev, save);
+
+       amdgpu_asic_wait_for_mc_idle(adev);
+
+       blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
+       if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
+               /* Block CPU access */
+               WREG32(mmBIF_FB_EN, 0);
+               /* blackout the MC */
+               blackout = REG_SET_FIELD(blackout,
+                                        MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 1);
+               WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
+       }
+       /* wait for the MC to settle */
+       udelay(100);
+}
+
+void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
+                       struct amdgpu_mode_mc_save *save)
+{
+       u32 tmp;
+
+       /* unblackout the MC */
+       tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
+       tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
+       WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
+       /* allow CPU access */
+       tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
+       tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
+       WREG32(mmBIF_FB_EN, tmp);
+
+       if (adev->mode_info.num_crtc)
+               amdgpu_display_resume_mc_access(adev, save);
+}
+
+/**
+ * gmc_v8_0_init_microcode - load ucode images from disk
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Use the firmware interface to load the ucode images into
+ * the driver (not loaded into hw).
+ * Returns 0 on success, error on failure.
+ */
+static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
+{
+       const char *chip_name;
+       char fw_name[30];
+       int err;
+
+       DRM_DEBUG("\n");
+
+       switch (adev->asic_type) {
+       case CHIP_TOPAZ:
+               chip_name = "topaz";
+               break;
+       case CHIP_TONGA:
+               chip_name = "tonga";
+               break;
+       case CHIP_CARRIZO:
+               return 0;
+       default: BUG();
+       }
+
+       snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+       err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
+       if (err)
+               goto out;
+       err = amdgpu_ucode_validate(adev->mc.fw);
+
+out:
+       if (err) {
+               printk(KERN_ERR
+                      "mc: Failed to load firmware \"%s\"\n",
+                      fw_name);
+               release_firmware(adev->mc.fw);
+               adev->mc.fw = NULL;
+       }
+       return err;
+}
+
+/**
+ * gmc_v8_0_mc_load_microcode - load MC ucode into the hw
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Load the GDDR MC ucode into the hw (CIK).
+ * Returns 0 on success, error on failure.
+ */
+static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev)
+{
+       const struct mc_firmware_header_v1_0 *hdr;
+       const __le32 *fw_data = NULL;
+       const __le32 *io_mc_regs = NULL;
+       u32 running, blackout = 0;
+       int i, ucode_size, regs_size;
+
+       if (!adev->mc.fw)
+               return -EINVAL;
+
+       hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
+       amdgpu_ucode_print_mc_hdr(&hdr->header);
+
+       adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
+       regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
+       io_mc_regs = (const __le32 *)
+               (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
+       ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+       fw_data = (const __le32 *)
+               (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+       running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
+
+       if (running == 0) {
+               if (running) {
+                       blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
+                       WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
+               }
+
+               /* reset the engine and set to writable */
+               WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
+               WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
+
+               /* load mc io regs */
+               for (i = 0; i < regs_size; i++) {
+                       WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
+                       WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
+               }
+               /* load the MC ucode */
+               for (i = 0; i < ucode_size; i++)
+                       WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
+
+               /* put the engine back into the active state */
+               WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
+               WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
+               WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
+
+               /* wait for training to complete */
+               for (i = 0; i < adev->usec_timeout; i++) {
+                       if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
+                                         MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
+                               break;
+                       udelay(1);
+               }
+               for (i = 0; i < adev->usec_timeout; i++) {
+                       if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
+                                         MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
+                               break;
+                       udelay(1);
+               }
+
+               if (running)
+                       WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
+       }
+
+       return 0;
+}
+
+static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
+                                      struct amdgpu_mc *mc)
+{
+       if (mc->mc_vram_size > 0xFFC0000000ULL) {
+               /* leave room for at least 1024M GTT */
+               dev_warn(adev->dev, "limiting VRAM\n");
+               mc->real_vram_size = 0xFFC0000000ULL;
+               mc->mc_vram_size = 0xFFC0000000ULL;
+       }
+       amdgpu_vram_location(adev, &adev->mc, 0);
+       adev->mc.gtt_base_align = 0;
+       amdgpu_gtt_location(adev, mc);
+}
+
+/**
+ * gmc_v8_0_mc_program - program the GPU memory controller
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set the location of vram, gart, and AGP in the GPU's
+ * physical address space (CIK).
+ */
+static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
+{
+       struct amdgpu_mode_mc_save save;
+       u32 tmp;
+       int i, j;
+
+       /* Initialize HDP */
+       for (i = 0, j = 0; i < 32; i++, j += 0x6) {
+               WREG32((0xb05 + j), 0x00000000);
+               WREG32((0xb06 + j), 0x00000000);
+               WREG32((0xb07 + j), 0x00000000);
+               WREG32((0xb08 + j), 0x00000000);
+               WREG32((0xb09 + j), 0x00000000);
+       }
+       WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
+
+       if (adev->mode_info.num_crtc)
+               amdgpu_display_set_vga_render_state(adev, false);
+
+       gmc_v8_0_mc_stop(adev, &save);
+       if (amdgpu_asic_wait_for_mc_idle(adev)) {
+               dev_warn(adev->dev, "Wait for MC idle timedout !\n");
+       }
+       /* Update configuration */
+       WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+              adev->mc.vram_start >> 12);
+       WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+              adev->mc.vram_end >> 12);
+       WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
+              adev->vram_scratch.gpu_addr >> 12);
+       tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
+       tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
+       WREG32(mmMC_VM_FB_LOCATION, tmp);
+       /* XXX double check these! */
+       WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
+       WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
+       WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
+       WREG32(mmMC_VM_AGP_BASE, 0);
+       WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
+       WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
+       if (amdgpu_asic_wait_for_mc_idle(adev)) {
+               dev_warn(adev->dev, "Wait for MC idle timedout !\n");
+       }
+       gmc_v8_0_mc_resume(adev, &save);
+
+       WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
+
+       tmp = RREG32(mmHDP_MISC_CNTL);
+       tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
+       WREG32(mmHDP_MISC_CNTL, tmp);
+
+       tmp = RREG32(mmHDP_HOST_PATH_CNTL);
+       WREG32(mmHDP_HOST_PATH_CNTL, tmp);
+}
+
+/**
+ * gmc_v8_0_mc_init - initialize the memory controller driver params
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Look up the amount of vram, vram width, and decide how to place
+ * vram and gart within the GPU's physical address space (CIK).
+ * Returns 0 for success.
+ */
+static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
+{
+       u32 tmp;
+       int chansize, numchan;
+
+       /* Get VRAM informations */
+       tmp = RREG32(mmMC_ARB_RAMCFG);
+       if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
+               chansize = 64;
+       } else {
+               chansize = 32;
+       }
+       tmp = RREG32(mmMC_SHARED_CHMAP);
+       switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
+       case 0:
+       default:
+               numchan = 1;
+               break;
+       case 1:
+               numchan = 2;
+               break;
+       case 2:
+               numchan = 4;
+               break;
+       case 3:
+               numchan = 8;
+               break;
+       case 4:
+               numchan = 3;
+               break;
+       case 5:
+               numchan = 6;
+               break;
+       case 6:
+               numchan = 10;
+               break;
+       case 7:
+               numchan = 12;
+               break;
+       case 8:
+               numchan = 16;
+               break;
+       }
+       adev->mc.vram_width = numchan * chansize;
+       /* Could aper size report 0 ? */
+       adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
+       adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
+       /* size in MB on si */
+       adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+       adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+       adev->mc.visible_vram_size = adev->mc.aper_size;
+
+       /* unless the user had overridden it, set the gart
+        * size equal to the 1024 or vram, whichever is larger.
+        */
+       if (amdgpu_gart_size == -1)
+               adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
+       else
+               adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
+
+       gmc_v8_0_vram_gtt_location(adev, &adev->mc);
+
+       return 0;
+}
+
+/*
+ * GART
+ * VMID 0 is the physical GPU addresses as used by the kernel.
+ * VMIDs 1-15 are used for userspace clients and are handled
+ * by the amdgpu vm/hsa code.
+ */
+
+/**
+ * gmc_v8_0_gart_flush_gpu_tlb - gart tlb flush callback
+ *
+ * @adev: amdgpu_device pointer
+ * @vmid: vm instance to flush
+ *
+ * Flush the TLB for the requested page table (CIK).
+ */
+static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
+                                       uint32_t vmid)
+{
+       /* flush hdp cache */
+       WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
+
+       /* bits 0-15 are the VM contexts0-15 */
+       WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
+}
+
+/**
+ * gmc_v8_0_gart_set_pte_pde - update the page tables using MMIO
+ *
+ * @adev: amdgpu_device pointer
+ * @cpu_pt_addr: cpu address of the page table
+ * @gpu_page_idx: entry in the page table to update
+ * @addr: dst addr to write into pte/pde
+ * @flags: access flags
+ *
+ * Update the page tables using the CPU.
+ */
+static int gmc_v8_0_gart_set_pte_pde(struct amdgpu_device *adev,
+                                    void *cpu_pt_addr,
+                                    uint32_t gpu_page_idx,
+                                    uint64_t addr,
+                                    uint32_t flags)
+{
+       void __iomem *ptr = (void *)cpu_pt_addr;
+       uint64_t value;
+
+       /*
+        * PTE format on VI:
+        * 63:40 reserved
+        * 39:12 4k physical page base address
+        * 11:7 fragment
+        * 6 write
+        * 5 read
+        * 4 exe
+        * 3 reserved
+        * 2 snooped
+        * 1 system
+        * 0 valid
+        *
+        * PDE format on VI:
+        * 63:59 block fragment size
+        * 58:40 reserved
+        * 39:1 physical base address of PTE
+        * bits 5:1 must be 0.
+        * 0 valid
+        */
+       value = addr & 0x000000FFFFFFF000ULL;
+       value |= flags;
+       writeq(value, ptr + (gpu_page_idx * 8));
+
+       return 0;
+}
+
+/**
+ * gmc_v8_0_gart_enable - gart enable
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * This sets up the TLBs, programs the page tables for VMID0,
+ * sets up the hw for VMIDs 1-15 which are allocated on
+ * demand, and sets up the global locations for the LDS, GDS,
+ * and GPUVM for FSA64 clients (CIK).
+ * Returns 0 for success, errors for failure.
+ */
+static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
+{
+       int r, i;
+       u32 tmp;
+
+       if (adev->gart.robj == NULL) {
+               dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
+               return -EINVAL;
+       }
+       r = amdgpu_gart_table_vram_pin(adev);
+       if (r)
+               return r;
+       /* Setup TLB control */
+       tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
+       tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
+       tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
+       tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
+       tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
+       tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
+       WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
+       /* Setup L2 cache */
+       tmp = RREG32(mmVM_L2_CNTL);
+       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
+       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
+       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
+       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
+       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
+       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
+       WREG32(mmVM_L2_CNTL, tmp);
+       tmp = RREG32(mmVM_L2_CNTL2);
+       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
+       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
+       WREG32(mmVM_L2_CNTL2, tmp);
+       tmp = RREG32(mmVM_L2_CNTL3);
+       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
+       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4);
+       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4);
+       WREG32(mmVM_L2_CNTL3, tmp);
+       /* XXX: set to enable PTE/PDE in system memory */
+       tmp = RREG32(mmVM_L2_CNTL4);
+       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL, 0);
+       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED, 0);
+       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP, 0);
+       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL, 0);
+       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED, 0);
+       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP, 0);
+       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL, 0);
+       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED, 0);
+       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP, 0);
+       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL, 0);
+       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED, 0);
+       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
+       WREG32(mmVM_L2_CNTL4, tmp);
+       /* setup context0 */
+       WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
+       WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
+       WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
+       WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+                       (u32)(adev->dummy_page.addr >> 12));
+       WREG32(mmVM_CONTEXT0_CNTL2, 0);
+       tmp = RREG32(mmVM_CONTEXT0_CNTL);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+       WREG32(mmVM_CONTEXT0_CNTL, tmp);
+
+       WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR, 0);
+       WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR, 0);
+       WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET, 0);
+
+       /* empty context1-15 */
+       /* FIXME start with 4G, once using 2 level pt switch to full
+        * vm size space
+        */
+       /* set vm size, must be a multiple of 4 */
+       WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
+       WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn);
+       for (i = 1; i < 16; i++) {
+               if (i < 8)
+                       WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
+                              adev->gart.table_addr >> 12);
+               else
+                       WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
+                              adev->gart.table_addr >> 12);
+       }
+
+       /* enable context1-15 */
+       WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
+              (u32)(adev->dummy_page.addr >> 12));
+       WREG32(mmVM_CONTEXT1_CNTL2, 4);
+       tmp = RREG32(mmVM_CONTEXT1_CNTL);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
+                           amdgpu_vm_block_size - 9);
+       WREG32(mmVM_CONTEXT1_CNTL, tmp);
+
+       gmc_v8_0_gart_flush_gpu_tlb(adev, 0);
+       DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+                (unsigned)(adev->mc.gtt_size >> 20),
+                (unsigned long long)adev->gart.table_addr);
+       adev->gart.ready = true;
+       return 0;
+}
+
+static int gmc_v8_0_gart_init(struct amdgpu_device *adev)
+{
+       int r;
+
+       if (adev->gart.robj) {
+               WARN(1, "R600 PCIE GART already initialized\n");
+               return 0;
+       }
+       /* Initialize common gart structure */
+       r = amdgpu_gart_init(adev);
+       if (r)
+               return r;
+       adev->gart.table_size = adev->gart.num_gpu_pages * 8;
+       return amdgpu_gart_table_vram_alloc(adev);
+}
+
+/**
+ * gmc_v8_0_gart_disable - gart disable
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * This disables all VM page table (CIK).
+ */
+static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
+{
+       u32 tmp;
+
+       /* Disable all tables */
+       WREG32(mmVM_CONTEXT0_CNTL, 0);
+       WREG32(mmVM_CONTEXT1_CNTL, 0);
+       /* Setup TLB control */
+       tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
+       tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
+       tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
+       tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
+       WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
+       /* Setup L2 cache */
+       tmp = RREG32(mmVM_L2_CNTL);
+       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
+       WREG32(mmVM_L2_CNTL, tmp);
+       WREG32(mmVM_L2_CNTL2, 0);
+       amdgpu_gart_table_vram_unpin(adev);
+}
+
+/**
+ * gmc_v8_0_gart_fini - vm fini callback
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tears down the driver GART/VM setup (CIK).
+ */
+static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
+{
+       amdgpu_gart_table_vram_free(adev);
+       amdgpu_gart_fini(adev);
+}
+
+/*
+ * vm
+ * VMID 0 is the physical GPU addresses as used by the kernel.
+ * VMIDs 1-15 are used for userspace clients and are handled
+ * by the amdgpu vm/hsa code.
+ */
+/**
+ * gmc_v8_0_vm_init - cik vm init callback
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Inits cik specific vm parameters (number of VMs, base of vram for
+ * VMIDs 1-15) (CIK).
+ * Returns 0 for success.
+ */
+static int gmc_v8_0_vm_init(struct amdgpu_device *adev)
+{
+       /*
+        * number of VMs
+        * VMID 0 is reserved for System
+        * amdgpu graphics/compute will use VMIDs 1-7
+        * amdkfd will use VMIDs 8-15
+        */
+       adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS;
+
+       /* base offset of vram pages */
+       if (adev->flags & AMDGPU_IS_APU) {
+               u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
+               tmp <<= 22;
+               adev->vm_manager.vram_base_offset = tmp;
+       } else
+               adev->vm_manager.vram_base_offset = 0;
+
+       return 0;
+}
+
+/**
+ * gmc_v8_0_vm_fini - cik vm fini callback
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tear down any asic specific VM setup (CIK).
+ */
+static void gmc_v8_0_vm_fini(struct amdgpu_device *adev)
+{
+}
+
+/**
+ * gmc_v8_0_vm_decode_fault - print human readable fault info
+ *
+ * @adev: amdgpu_device pointer
+ * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
+ * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
+ *
+ * Print human readable fault information (CIK).
+ */
+static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev,
+                                    u32 status, u32 addr, u32 mc_client)
+{
+       u32 mc_id;
+       u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
+       u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+                                       PROTECTIONS);
+       char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
+               (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
+
+       mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+                             MEMORY_CLIENT_ID);
+
+       printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
+              protections, vmid, addr,
+              REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+                            MEMORY_CLIENT_RW) ?
+              "write" : "read", block, mc_client, mc_id);
+}
+
+static int gmc_v8_0_early_init(struct amdgpu_device *adev)
+{
+       gmc_v8_0_set_gart_funcs(adev);
+       gmc_v8_0_set_irq_funcs(adev);
+
+       if (adev->flags & AMDGPU_IS_APU) {
+               adev->mc.is_gddr5 = false;
+       } else {
+               u32 tmp = RREG32(mmMC_SEQ_MISC0);
+
+               if (((tmp & MC_SEQ_MISC0__GDDR5_MASK) >>
+                    MC_SEQ_MISC0__GDDR5__SHIFT) == MC_SEQ_MISC0__GDDR5_VALUE)
+                       adev->mc.is_gddr5 = true;
+               else
+                       adev->mc.is_gddr5 = false;
+       }
+
+       return 0;
+}
+
+static int gmc_v8_0_sw_init(struct amdgpu_device *adev)
+{
+       int r;
+       int dma_bits;
+
+       r = amdgpu_gem_init(adev);
+       if (r)
+               return r;
+
+       r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
+       if (r)
+               return r;
+
+       r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault);
+       if (r)
+               return r;
+
+       /* Adjust VM size here.
+        * Currently set to 4GB ((1 << 20) 4k pages).
+        * Max GPUVM size for cayman and SI is 40 bits.
+        */
+       adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
+
+       /* Set the internal MC address mask
+        * This is the max address of the GPU's
+        * internal address space.
+        */
+       adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
+
+       /* set DMA mask + need_dma32 flags.
+        * PCIE - can handle 40-bits.
+        * IGP - can handle 40-bits
+        * PCI - dma32 for legacy pci gart, 40 bits on newer asics
+        */
+       adev->need_dma32 = false;
+       dma_bits = adev->need_dma32 ? 32 : 40;
+       r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
+       if (r) {
+               adev->need_dma32 = true;
+               dma_bits = 32;
+               printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
+       }
+       r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
+       if (r) {
+               pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
+               printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
+       }
+
+       r = gmc_v8_0_init_microcode(adev);
+       if (r) {
+               DRM_ERROR("Failed to load mc firmware!\n");
+               return r;
+       }
+
+       r = gmc_v8_0_mc_init(adev);
+       if (r)
+               return r;
+
+       /* Memory manager */
+       r = amdgpu_bo_init(adev);
+       if (r)
+               return r;
+
+       r = gmc_v8_0_gart_init(adev);
+       if (r)
+               return r;
+
+       if (!adev->vm_manager.enabled) {
+               r = gmc_v8_0_vm_init(adev);
+               if (r) {
+                       dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
+                       return r;
+               }
+               adev->vm_manager.enabled = true;
+       }
+
+       return r;
+}
+
+static int gmc_v8_0_sw_fini(struct amdgpu_device *adev)
+{
+       int i;
+
+       if (adev->vm_manager.enabled) {
+               for (i = 0; i < AMDGPU_NUM_VM; ++i)
+                       amdgpu_fence_unref(&adev->vm_manager.active[i]);
+               gmc_v8_0_vm_fini(adev);
+               adev->vm_manager.enabled = false;
+       }
+       gmc_v8_0_gart_fini(adev);
+       amdgpu_gem_fini(adev);
+       amdgpu_bo_fini(adev);
+
+       return 0;
+}
+
+static int gmc_v8_0_hw_init(struct amdgpu_device *adev)
+{
+       int r;
+
+       gmc_v8_0_init_golden_registers(adev);
+
+       gmc_v8_0_mc_program(adev);
+
+       if (!(adev->flags & AMDGPU_IS_APU)) {
+               r = gmc_v8_0_mc_load_microcode(adev);
+               if (r) {
+                       DRM_ERROR("Failed to load MC firmware!\n");
+                       return r;
+               }
+       }
+
+       r = gmc_v8_0_gart_enable(adev);
+       if (r)
+               return r;
+
+       return r;
+}
+
+static int gmc_v8_0_hw_fini(struct amdgpu_device *adev)
+{
+       gmc_v8_0_gart_disable(adev);
+
+       return 0;
+}
+
+static int gmc_v8_0_suspend(struct amdgpu_device *adev)
+{
+       int i;
+
+       if (adev->vm_manager.enabled) {
+               for (i = 0; i < AMDGPU_NUM_VM; ++i)
+                       amdgpu_fence_unref(&adev->vm_manager.active[i]);
+               gmc_v8_0_vm_fini(adev);
+               adev->vm_manager.enabled = false;
+       }
+       gmc_v8_0_hw_fini(adev);
+
+       return 0;
+}
+
+static int gmc_v8_0_resume(struct amdgpu_device *adev)
+{
+       int r;
+
+       r = gmc_v8_0_hw_init(adev);
+       if (r)
+               return r;
+
+       if (!adev->vm_manager.enabled) {
+               r = gmc_v8_0_vm_init(adev);
+               if (r) {
+                       dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
+                       return r;
+               }
+               adev->vm_manager.enabled = true;
+       }
+
+       return r;
+}
+
+static bool gmc_v8_0_is_idle(struct amdgpu_device *adev)
+{
+       u32 tmp = RREG32(mmSRBM_STATUS);
+
+       if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
+                  SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
+               return false;
+
+       return true;
+}
+
+static int gmc_v8_0_wait_for_idle(struct amdgpu_device *adev)
+{
+       unsigned i;
+       u32 tmp;
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               /* read MC_STATUS */
+               tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
+                                              SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
+                                              SRBM_STATUS__MCC_BUSY_MASK |
+                                              SRBM_STATUS__MCD_BUSY_MASK |
+                                              SRBM_STATUS__VMC_BUSY_MASK |
+                                              SRBM_STATUS__VMC1_BUSY_MASK);
+               if (!tmp)
+                       return 0;
+               udelay(1);
+       }
+       return -ETIMEDOUT;
+
+}
+
+static void gmc_v8_0_print_status(struct amdgpu_device *adev)
+{
+       int i, j;
+
+       dev_info(adev->dev, "GMC 8.x registers\n");
+       dev_info(adev->dev, "  SRBM_STATUS=0x%08X\n",
+               RREG32(mmSRBM_STATUS));
+       dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
+               RREG32(mmSRBM_STATUS2));
+
+       dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
+                RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
+       dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+                RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
+       dev_info(adev->dev, "  MC_VM_MX_L1_TLB_CNTL=0x%08X\n",
+                RREG32(mmMC_VM_MX_L1_TLB_CNTL));
+       dev_info(adev->dev, "  VM_L2_CNTL=0x%08X\n",
+                RREG32(mmVM_L2_CNTL));
+       dev_info(adev->dev, "  VM_L2_CNTL2=0x%08X\n",
+                RREG32(mmVM_L2_CNTL2));
+       dev_info(adev->dev, "  VM_L2_CNTL3=0x%08X\n",
+                RREG32(mmVM_L2_CNTL3));
+       dev_info(adev->dev, "  VM_L2_CNTL4=0x%08X\n",
+                RREG32(mmVM_L2_CNTL4));
+       dev_info(adev->dev, "  VM_CONTEXT0_PAGE_TABLE_START_ADDR=0x%08X\n",
+                RREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR));
+       dev_info(adev->dev, "  VM_CONTEXT0_PAGE_TABLE_END_ADDR=0x%08X\n",
+                RREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR));
+       dev_info(adev->dev, "  VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
+                RREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR));
+       dev_info(adev->dev, "  VM_CONTEXT0_CNTL2=0x%08X\n",
+                RREG32(mmVM_CONTEXT0_CNTL2));
+       dev_info(adev->dev, "  VM_CONTEXT0_CNTL=0x%08X\n",
+                RREG32(mmVM_CONTEXT0_CNTL));
+       dev_info(adev->dev, "  VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR=0x%08X\n",
+                RREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR));
+       dev_info(adev->dev, "  VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR=0x%08X\n",
+                RREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR));
+       dev_info(adev->dev, "  mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET=0x%08X\n",
+                RREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET));
+       dev_info(adev->dev, "  VM_CONTEXT1_PAGE_TABLE_START_ADDR=0x%08X\n",
+                RREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR));
+       dev_info(adev->dev, "  VM_CONTEXT1_PAGE_TABLE_END_ADDR=0x%08X\n",
+                RREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR));
+       dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
+                RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR));
+       dev_info(adev->dev, "  VM_CONTEXT1_CNTL2=0x%08X\n",
+                RREG32(mmVM_CONTEXT1_CNTL2));
+       dev_info(adev->dev, "  VM_CONTEXT1_CNTL=0x%08X\n",
+                RREG32(mmVM_CONTEXT1_CNTL));
+       for (i = 0; i < 16; i++) {
+               if (i < 8)
+                       dev_info(adev->dev, "  VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
+                                i, RREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i));
+               else
+                       dev_info(adev->dev, "  VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
+                                i, RREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8));
+       }
+       dev_info(adev->dev, "  MC_VM_SYSTEM_APERTURE_LOW_ADDR=0x%08X\n",
+                RREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR));
+       dev_info(adev->dev, "  MC_VM_SYSTEM_APERTURE_HIGH_ADDR=0x%08X\n",
+                RREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR));
+       dev_info(adev->dev, "  MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR=0x%08X\n",
+                RREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR));
+       dev_info(adev->dev, "  MC_VM_FB_LOCATION=0x%08X\n",
+                RREG32(mmMC_VM_FB_LOCATION));
+       dev_info(adev->dev, "  MC_VM_AGP_BASE=0x%08X\n",
+                RREG32(mmMC_VM_AGP_BASE));
+       dev_info(adev->dev, "  MC_VM_AGP_TOP=0x%08X\n",
+                RREG32(mmMC_VM_AGP_TOP));
+       dev_info(adev->dev, "  MC_VM_AGP_BOT=0x%08X\n",
+                RREG32(mmMC_VM_AGP_BOT));
+
+       dev_info(adev->dev, "  HDP_REG_COHERENCY_FLUSH_CNTL=0x%08X\n",
+                RREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL));
+       dev_info(adev->dev, "  HDP_NONSURFACE_BASE=0x%08X\n",
+                RREG32(mmHDP_NONSURFACE_BASE));
+       dev_info(adev->dev, "  HDP_NONSURFACE_INFO=0x%08X\n",
+                RREG32(mmHDP_NONSURFACE_INFO));
+       dev_info(adev->dev, "  HDP_NONSURFACE_SIZE=0x%08X\n",
+                RREG32(mmHDP_NONSURFACE_SIZE));
+       dev_info(adev->dev, "  HDP_MISC_CNTL=0x%08X\n",
+                RREG32(mmHDP_MISC_CNTL));
+       dev_info(adev->dev, "  HDP_HOST_PATH_CNTL=0x%08X\n",
+                RREG32(mmHDP_HOST_PATH_CNTL));
+
+       for (i = 0, j = 0; i < 32; i++, j += 0x6) {
+               dev_info(adev->dev, "  %d:\n", i);
+               dev_info(adev->dev, "  0x%04X=0x%08X\n",
+                        0xb05 + j, RREG32(0xb05 + j));
+               dev_info(adev->dev, "  0x%04X=0x%08X\n",
+                        0xb06 + j, RREG32(0xb06 + j));
+               dev_info(adev->dev, "  0x%04X=0x%08X\n",
+                        0xb07 + j, RREG32(0xb07 + j));
+               dev_info(adev->dev, "  0x%04X=0x%08X\n",
+                        0xb08 + j, RREG32(0xb08 + j));
+               dev_info(adev->dev, "  0x%04X=0x%08X\n",
+                        0xb09 + j, RREG32(0xb09 + j));
+       }
+
+       dev_info(adev->dev, "  BIF_FB_EN=0x%08X\n",
+                RREG32(mmBIF_FB_EN));
+}
+
+static int gmc_v8_0_soft_reset(struct amdgpu_device *adev)
+{
+       struct amdgpu_mode_mc_save save;
+       u32 srbm_soft_reset = 0;
+       u32 tmp = RREG32(mmSRBM_STATUS);
+
+       if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
+               srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
+                                               SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
+
+       if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
+                  SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
+               if (!(adev->flags & AMDGPU_IS_APU))
+                       srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
+                                                       SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
+       }
+
+       if (srbm_soft_reset) {
+               gmc_v8_0_print_status(adev);
+
+               gmc_v8_0_mc_stop(adev, &save);
+               if (gmc_v8_0_wait_for_idle(adev)) {
+                       dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
+               }
+
+
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+               tmp |= srbm_soft_reset;
+               dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+               WREG32(mmSRBM_SOFT_RESET, tmp);
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+
+               udelay(50);
+
+               tmp &= ~srbm_soft_reset;
+               WREG32(mmSRBM_SOFT_RESET, tmp);
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+
+               /* Wait a little for things to settle down */
+               udelay(50);
+
+               gmc_v8_0_mc_resume(adev, &save);
+               udelay(50);
+
+               gmc_v8_0_print_status(adev);
+       }
+
+       return 0;
+}
+
+static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
+                                            struct amdgpu_irq_src *src,
+                                            unsigned type,
+                                            enum amdgpu_interrupt_state state)
+{
+       u32 tmp;
+       u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+                   VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+                   VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+                   VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+                   VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+                   VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+                   VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
+
+       switch (state) {
+       case AMDGPU_IRQ_STATE_DISABLE:
+               /* system context */
+               tmp = RREG32(mmVM_CONTEXT0_CNTL);
+               tmp &= ~bits;
+               WREG32(mmVM_CONTEXT0_CNTL, tmp);
+               /* VMs */
+               tmp = RREG32(mmVM_CONTEXT1_CNTL);
+               tmp &= ~bits;
+               WREG32(mmVM_CONTEXT1_CNTL, tmp);
+               break;
+       case AMDGPU_IRQ_STATE_ENABLE:
+               /* system context */
+               tmp = RREG32(mmVM_CONTEXT0_CNTL);
+               tmp |= bits;
+               WREG32(mmVM_CONTEXT0_CNTL, tmp);
+               /* VMs */
+               tmp = RREG32(mmVM_CONTEXT1_CNTL);
+               tmp |= bits;
+               WREG32(mmVM_CONTEXT1_CNTL, tmp);
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
+                                     struct amdgpu_irq_src *source,
+                                     struct amdgpu_iv_entry *entry)
+{
+       u32 addr, status, mc_client;
+
+       addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
+       status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
+       mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
+       dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
+               entry->src_id, entry->src_data);
+       dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
+               addr);
+       dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+               status);
+       gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client);
+       /* reset addr and status */
+       WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
+
+       return 0;
+}
+
+static int gmc_v8_0_set_clockgating_state(struct amdgpu_device *adev,
+                                         enum amdgpu_clockgating_state state)
+{
+       /* XXX handled via the smc on VI */
+
+       return 0;
+}
+
+static int gmc_v8_0_set_powergating_state(struct amdgpu_device *adev,
+                                         enum amdgpu_powergating_state state)
+{
+       return 0;
+}
+
+const struct amdgpu_ip_funcs gmc_v8_0_ip_funcs = {
+       .early_init = gmc_v8_0_early_init,
+       .late_init = NULL,
+       .sw_init = gmc_v8_0_sw_init,
+       .sw_fini = gmc_v8_0_sw_fini,
+       .hw_init = gmc_v8_0_hw_init,
+       .hw_fini = gmc_v8_0_hw_fini,
+       .suspend = gmc_v8_0_suspend,
+       .resume = gmc_v8_0_resume,
+       .is_idle = gmc_v8_0_is_idle,
+       .wait_for_idle = gmc_v8_0_wait_for_idle,
+       .soft_reset = gmc_v8_0_soft_reset,
+       .print_status = gmc_v8_0_print_status,
+       .set_clockgating_state = gmc_v8_0_set_clockgating_state,
+       .set_powergating_state = gmc_v8_0_set_powergating_state,
+};
+
+static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs = {
+       .flush_gpu_tlb = gmc_v8_0_gart_flush_gpu_tlb,
+       .set_pte_pde = gmc_v8_0_gart_set_pte_pde,
+};
+
+static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
+       .set = gmc_v8_0_vm_fault_interrupt_state,
+       .process = gmc_v8_0_process_interrupt,
+};
+
+static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev)
+{
+       if (adev->gart.gart_funcs == NULL)
+               adev->gart.gart_funcs = &gmc_v8_0_gart_funcs;
+}
+
+static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+       adev->mc.vm_fault.num_types = 1;
+       adev->mc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
new file mode 100644 (file)
index 0000000..2dd7f80
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __GMC_V8_0_H__
+#define __GMC_V8_0_H__
+
+extern const struct amdgpu_ip_funcs gmc_v8_0_ip_funcs;
+
+/* XXX these shouldn't be exported */
+void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
+                     struct amdgpu_mode_mc_save *save);
+void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
+                       struct amdgpu_mode_mc_save *save);
+int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
new file mode 100644 (file)
index 0000000..8f5c54b
--- /dev/null
@@ -0,0 +1,172 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "iceland_smumgr.h"
+
+MODULE_FIRMWARE("radeon/topaz_smc.bin");
+
+static void iceland_dpm_set_funcs(struct amdgpu_device *adev);
+
+static int iceland_dpm_early_init(struct amdgpu_device *adev)
+{
+       iceland_dpm_set_funcs(adev);
+
+       return 0;
+}
+
+static int iceland_dpm_init_microcode(struct amdgpu_device *adev)
+{
+       char fw_name[30] = "radeon/topaz_smc.bin";
+       int err;
+
+       err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
+       if (err)
+               goto out;
+       err = amdgpu_ucode_validate(adev->pm.fw);
+
+out:
+       if (err) {
+               DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
+               release_firmware(adev->pm.fw);
+               adev->pm.fw = NULL;
+       }
+       return err;
+}
+
+static int iceland_dpm_sw_init(struct amdgpu_device *adev)
+{
+       int ret;
+
+       ret = iceland_dpm_init_microcode(adev);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int iceland_dpm_sw_fini(struct amdgpu_device *adev)
+{
+       return 0;
+}
+
+static int iceland_dpm_hw_init(struct amdgpu_device *adev)
+{
+       int ret;
+
+       mutex_lock(&adev->pm.mutex);
+
+       ret = iceland_smu_init(adev);
+       if (ret) {
+               DRM_ERROR("SMU initialization failed\n");
+               goto fail;
+       }
+
+       ret = iceland_smu_start(adev);
+       if (ret) {
+               DRM_ERROR("SMU start failed\n");
+               goto fail;
+       }
+
+       mutex_unlock(&adev->pm.mutex);
+       return 0;
+
+fail:
+       adev->firmware.smu_load = false;
+       mutex_unlock(&adev->pm.mutex);
+       return -EINVAL;
+}
+
+static int iceland_dpm_hw_fini(struct amdgpu_device *adev)
+{
+       mutex_lock(&adev->pm.mutex);
+       iceland_smu_fini(adev);
+       mutex_unlock(&adev->pm.mutex);
+       return 0;
+}
+
+static int iceland_dpm_suspend(struct amdgpu_device *adev)
+{
+       iceland_dpm_hw_fini(adev);
+
+       return 0;
+}
+
+static int iceland_dpm_resume(struct amdgpu_device *adev)
+{
+       iceland_dpm_hw_init(adev);
+
+       return 0;
+}
+
+static int iceland_dpm_set_clockgating_state(struct amdgpu_device *adev,
+                       enum amdgpu_clockgating_state state)
+{
+       return 0;
+}
+
+static int iceland_dpm_set_powergating_state(struct amdgpu_device *adev,
+                       enum amdgpu_powergating_state state)
+{
+       return 0;
+}
+
+const struct amdgpu_ip_funcs iceland_dpm_ip_funcs = {
+       .early_init = iceland_dpm_early_init,
+       .late_init = NULL,
+       .sw_init = iceland_dpm_sw_init,
+       .sw_fini = iceland_dpm_sw_fini,
+       .hw_init = iceland_dpm_hw_init,
+       .hw_fini = iceland_dpm_hw_fini,
+       .suspend = iceland_dpm_suspend,
+       .resume = iceland_dpm_resume,
+       .is_idle = NULL,
+       .wait_for_idle = NULL,
+       .soft_reset = NULL,
+       .print_status = NULL,
+       .set_clockgating_state = iceland_dpm_set_clockgating_state,
+       .set_powergating_state = iceland_dpm_set_powergating_state,
+};
+
+static const struct amdgpu_dpm_funcs iceland_dpm_funcs = {
+       .get_temperature = NULL,
+       .pre_set_power_state = NULL,
+       .set_power_state = NULL,
+       .post_set_power_state = NULL,
+       .display_configuration_changed = NULL,
+       .get_sclk = NULL,
+       .get_mclk = NULL,
+       .print_power_state = NULL,
+       .debugfs_print_current_performance_level = NULL,
+       .force_performance_level = NULL,
+       .vblank_too_short = NULL,
+       .powergate_uvd = NULL,
+};
+
+static void iceland_dpm_set_funcs(struct amdgpu_device *adev)
+{
+       if (NULL == adev->pm.funcs)
+               adev->pm.funcs = &iceland_dpm_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
new file mode 100644 (file)
index 0000000..2de8adf
--- /dev/null
@@ -0,0 +1,435 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_ih.h"
+#include "vid.h"
+
+#include "oss/oss_2_4_d.h"
+#include "oss/oss_2_4_sh_mask.h"
+
+#include "bif/bif_5_1_d.h"
+#include "bif/bif_5_1_sh_mask.h"
+
+/*
+ * Interrupts
+ * Starting with r6xx, interrupts are handled via a ring buffer.
+ * Ring buffers are areas of GPU accessible memory that the GPU
+ * writes interrupt vectors into and the host reads vectors out of.
+ * There is a rptr (read pointer) that determines where the
+ * host is currently reading, and a wptr (write pointer)
+ * which determines where the GPU has written.  When the
+ * pointers are equal, the ring is idle.  When the GPU
+ * writes vectors to the ring buffer, it increments the
+ * wptr.  When there is an interrupt, the host then starts
+ * fetching commands and processing them until the pointers are
+ * equal again at which point it updates the rptr.
+ */
+
+static void iceland_ih_set_interrupt_funcs(struct amdgpu_device *adev);
+
+/**
+ * iceland_ih_enable_interrupts - Enable the interrupt ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Enable the interrupt ring buffer (VI).
+ */
+static void iceland_ih_enable_interrupts(struct amdgpu_device *adev)
+{
+       u32 ih_cntl = RREG32(mmIH_CNTL);
+       u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL);
+
+       ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, ENABLE_INTR, 1);
+       ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
+       WREG32(mmIH_CNTL, ih_cntl);
+       WREG32(mmIH_RB_CNTL, ih_rb_cntl);
+       adev->irq.ih.enabled = true;
+}
+
+/**
+ * iceland_ih_disable_interrupts - Disable the interrupt ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Disable the interrupt ring buffer (VI).
+ */
+static void iceland_ih_disable_interrupts(struct amdgpu_device *adev)
+{
+       u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL);
+       u32 ih_cntl = RREG32(mmIH_CNTL);
+
+       ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
+       ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, ENABLE_INTR, 0);
+       WREG32(mmIH_RB_CNTL, ih_rb_cntl);
+       WREG32(mmIH_CNTL, ih_cntl);
+       /* set rptr, wptr to 0 */
+       WREG32(mmIH_RB_RPTR, 0);
+       WREG32(mmIH_RB_WPTR, 0);
+       adev->irq.ih.enabled = false;
+       adev->irq.ih.rptr = 0;
+}
+
+/**
+ * iceland_ih_irq_init - init and enable the interrupt ring
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate a ring buffer for the interrupt controller,
+ * enable the RLC, disable interrupts, enable the IH
+ * ring buffer and enable it (VI).
+ * Called at device load and reume.
+ * Returns 0 for success, errors for failure.
+ */
+static int iceland_ih_irq_init(struct amdgpu_device *adev)
+{
+       int ret = 0;
+       int rb_bufsz;
+       u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
+       u64 wptr_off;
+
+       /* disable irqs */
+       iceland_ih_disable_interrupts(adev);
+
+       /* setup interrupt control */
+       WREG32(mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8);
+       interrupt_cntl = RREG32(mmINTERRUPT_CNTL);
+       /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
+        * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
+        */
+       interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0);
+       /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
+       interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0);
+       WREG32(mmINTERRUPT_CNTL, interrupt_cntl);
+
+       /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
+       WREG32(mmIH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
+
+       rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
+       ih_rb_cntl = REG_SET_FIELD(0, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 1);
+       ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+       ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
+
+       /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register value is written to memory */
+       ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_WRITEBACK_ENABLE, 1);
+
+       /* set the writeback address whether it's enabled or not */
+       wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
+       WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
+       WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
+
+       WREG32(mmIH_RB_CNTL, ih_rb_cntl);
+
+       /* set rptr, wptr to 0 */
+       WREG32(mmIH_RB_RPTR, 0);
+       WREG32(mmIH_RB_WPTR, 0);
+
+       /* Default settings for IH_CNTL (disabled at first) */
+       ih_cntl = RREG32(mmIH_CNTL);
+       ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, MC_VMID, 0);
+
+       if (adev->irq.msi_enabled)
+               ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, RPTR_REARM, 1);
+       WREG32(mmIH_CNTL, ih_cntl);
+
+       pci_set_master(adev->pdev);
+
+       /* enable interrupts */
+       iceland_ih_enable_interrupts(adev);
+
+       return ret;
+}
+
+/**
+ * iceland_ih_irq_disable - disable interrupts
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Disable interrupts on the hw (VI).
+ */
+static void iceland_ih_irq_disable(struct amdgpu_device *adev)
+{
+       iceland_ih_disable_interrupts(adev);
+
+       /* Wait and acknowledge irq */
+       mdelay(1);
+}
+
+/**
+ * iceland_ih_get_wptr - get the IH ring buffer wptr
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Get the IH ring buffer wptr from either the register
+ * or the writeback memory buffer (VI).  Also check for
+ * ring buffer overflow and deal with it.
+ * Used by cz_irq_process(VI).
+ * Returns the value of the wptr.
+ */
+static u32 iceland_ih_get_wptr(struct amdgpu_device *adev)
+{
+       u32 wptr, tmp;
+
+       wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]);
+
+       if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
+               wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+               /* When a ring buffer overflow happen start parsing interrupt
+                * from the last not overwritten vector (wptr + 16). Hopefully
+                * this should allow us to catchup.
+                */
+               dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+                       wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask);
+               adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask;
+               tmp = RREG32(mmIH_RB_CNTL);
+               tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+               WREG32(mmIH_RB_CNTL, tmp);
+       }
+       return (wptr & adev->irq.ih.ptr_mask);
+}
+
+/**
+ * iceland_ih_decode_iv - decode an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Decodes the interrupt vector at the current rptr
+ * position and also advance the position.
+ */
+static void iceland_ih_decode_iv(struct amdgpu_device *adev,
+                                struct amdgpu_iv_entry *entry)
+{
+       /* wptr/rptr are in bytes! */
+       u32 ring_index = adev->irq.ih.rptr >> 2;
+       uint32_t dw[4];
+
+       dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
+       dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
+       dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
+       dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
+
+       entry->src_id = dw[0] & 0xff;
+       entry->src_data = dw[1] & 0xfffffff;
+       entry->ring_id = dw[2] & 0xff;
+       entry->vm_id = (dw[2] >> 8) & 0xff;
+       entry->pas_id = (dw[2] >> 16) & 0xffff;
+
+       /* wptr/rptr are in bytes! */
+       adev->irq.ih.rptr += 16;
+}
+
+/**
+ * iceland_ih_set_rptr - set the IH ring buffer rptr
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set the IH ring buffer rptr.
+ */
+static void iceland_ih_set_rptr(struct amdgpu_device *adev)
+{
+       WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr);
+}
+
+static int iceland_ih_early_init(struct amdgpu_device *adev)
+{
+       iceland_ih_set_interrupt_funcs(adev);
+       return 0;
+}
+
+static int iceland_ih_sw_init(struct amdgpu_device *adev)
+{
+       int r;
+
+       r = amdgpu_ih_ring_init(adev, 64 * 1024, false);
+       if (r)
+               return r;
+
+       r = amdgpu_irq_init(adev);
+
+       return r;
+}
+
+static int iceland_ih_sw_fini(struct amdgpu_device *adev)
+{
+       amdgpu_irq_fini(adev);
+       amdgpu_ih_ring_fini(adev);
+
+       return 0;
+}
+
+static int iceland_ih_hw_init(struct amdgpu_device *adev)
+{
+       int r;
+
+       r = iceland_ih_irq_init(adev);
+       if (r)
+               return r;
+
+       return 0;
+}
+
+static int iceland_ih_hw_fini(struct amdgpu_device *adev)
+{
+       iceland_ih_irq_disable(adev);
+
+       return 0;
+}
+
+static int iceland_ih_suspend(struct amdgpu_device *adev)
+{
+       return iceland_ih_hw_fini(adev);
+}
+
+static int iceland_ih_resume(struct amdgpu_device *adev)
+{
+       return iceland_ih_hw_init(adev);
+}
+
+static bool iceland_ih_is_idle(struct amdgpu_device *adev)
+{
+       u32 tmp = RREG32(mmSRBM_STATUS);
+
+       if (REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY))
+               return false;
+
+       return true;
+}
+
+static int iceland_ih_wait_for_idle(struct amdgpu_device *adev)
+{
+       unsigned i;
+       u32 tmp;
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               /* read MC_STATUS */
+               tmp = RREG32(mmSRBM_STATUS);
+               if (!REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY))
+                       return 0;
+               udelay(1);
+       }
+       return -ETIMEDOUT;
+}
+
+static void iceland_ih_print_status(struct amdgpu_device *adev)
+{
+       dev_info(adev->dev, "ICELAND IH registers\n");
+       dev_info(adev->dev, "  SRBM_STATUS=0x%08X\n",
+               RREG32(mmSRBM_STATUS));
+       dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
+               RREG32(mmSRBM_STATUS2));
+       dev_info(adev->dev, "  INTERRUPT_CNTL=0x%08X\n",
+                RREG32(mmINTERRUPT_CNTL));
+       dev_info(adev->dev, "  INTERRUPT_CNTL2=0x%08X\n",
+                RREG32(mmINTERRUPT_CNTL2));
+       dev_info(adev->dev, "  IH_CNTL=0x%08X\n",
+                RREG32(mmIH_CNTL));
+       dev_info(adev->dev, "  IH_RB_CNTL=0x%08X\n",
+                RREG32(mmIH_RB_CNTL));
+       dev_info(adev->dev, "  IH_RB_BASE=0x%08X\n",
+                RREG32(mmIH_RB_BASE));
+       dev_info(adev->dev, "  IH_RB_WPTR_ADDR_LO=0x%08X\n",
+                RREG32(mmIH_RB_WPTR_ADDR_LO));
+       dev_info(adev->dev, "  IH_RB_WPTR_ADDR_HI=0x%08X\n",
+                RREG32(mmIH_RB_WPTR_ADDR_HI));
+       dev_info(adev->dev, "  IH_RB_RPTR=0x%08X\n",
+                RREG32(mmIH_RB_RPTR));
+       dev_info(adev->dev, "  IH_RB_WPTR=0x%08X\n",
+                RREG32(mmIH_RB_WPTR));
+}
+
+static int iceland_ih_soft_reset(struct amdgpu_device *adev)
+{
+       u32 srbm_soft_reset = 0;
+       u32 tmp = RREG32(mmSRBM_STATUS);
+
+       if (tmp & SRBM_STATUS__IH_BUSY_MASK)
+               srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET,
+                                               SOFT_RESET_IH, 1);
+
+       if (srbm_soft_reset) {
+               iceland_ih_print_status(adev);
+
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+               tmp |= srbm_soft_reset;
+               dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+               WREG32(mmSRBM_SOFT_RESET, tmp);
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+
+               udelay(50);
+
+               tmp &= ~srbm_soft_reset;
+               WREG32(mmSRBM_SOFT_RESET, tmp);
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+
+               /* Wait a little for things to settle down */
+               udelay(50);
+
+               iceland_ih_print_status(adev);
+       }
+
+       return 0;
+}
+
+static int iceland_ih_set_clockgating_state(struct amdgpu_device *adev,
+                                         enum amdgpu_clockgating_state state)
+{
+       // TODO
+       return 0;
+}
+
+static int iceland_ih_set_powergating_state(struct amdgpu_device *adev,
+                                         enum amdgpu_powergating_state state)
+{
+       // TODO
+       return 0;
+}
+
+const struct amdgpu_ip_funcs iceland_ih_ip_funcs = {
+       .early_init = iceland_ih_early_init,
+       .late_init = NULL,
+       .sw_init = iceland_ih_sw_init,
+       .sw_fini = iceland_ih_sw_fini,
+       .hw_init = iceland_ih_hw_init,
+       .hw_fini = iceland_ih_hw_fini,
+       .suspend = iceland_ih_suspend,
+       .resume = iceland_ih_resume,
+       .is_idle = iceland_ih_is_idle,
+       .wait_for_idle = iceland_ih_wait_for_idle,
+       .soft_reset = iceland_ih_soft_reset,
+       .print_status = iceland_ih_print_status,
+       .set_clockgating_state = iceland_ih_set_clockgating_state,
+       .set_powergating_state = iceland_ih_set_powergating_state,
+};
+
+static const struct amdgpu_ih_funcs iceland_ih_funcs = {
+       .get_wptr = iceland_ih_get_wptr,
+       .decode_iv = iceland_ih_decode_iv,
+       .set_rptr = iceland_ih_set_rptr
+};
+
+static void iceland_ih_set_interrupt_funcs(struct amdgpu_device *adev)
+{
+       if (adev->irq.ih_funcs == NULL)
+               adev->irq.ih_funcs = &iceland_ih_funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.h b/drivers/gpu/drm/amd/amdgpu/iceland_ih.h
new file mode 100644 (file)
index 0000000..d001895
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __ICELAND_IH_H__
+#define __ICELAND_IH_H__
+
+extern const struct amdgpu_ip_funcs iceland_ih_ip_funcs;
+
+#endif /* __ICELAND_IH_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h b/drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h
new file mode 100644 (file)
index 0000000..c723602
--- /dev/null
@@ -0,0 +1,2167 @@
+/*
+ * Copyright (C) 2014  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __ICELAND_SDMA_PKT_OPEN_H_
+#define __ICELAND_SDMA_PKT_OPEN_H_
+
+#define SDMA_OP_NOP  0
+#define SDMA_OP_COPY  1
+#define SDMA_OP_WRITE  2
+#define SDMA_OP_INDIRECT  4
+#define SDMA_OP_FENCE  5
+#define SDMA_OP_TRAP  6
+#define SDMA_OP_SEM  7
+#define SDMA_OP_POLL_REGMEM  8
+#define SDMA_OP_COND_EXE  9
+#define SDMA_OP_ATOMIC  10
+#define SDMA_OP_CONST_FILL  11
+#define SDMA_OP_GEN_PTEPDE  12
+#define SDMA_OP_TIMESTAMP  13
+#define SDMA_OP_SRBM_WRITE  14
+#define SDMA_OP_PRE_EXE  15
+#define SDMA_SUBOP_TIMESTAMP_SET  0
+#define SDMA_SUBOP_TIMESTAMP_GET  1
+#define SDMA_SUBOP_TIMESTAMP_GET_GLOBAL  2
+#define SDMA_SUBOP_COPY_LINEAR  0
+#define SDMA_SUBOP_COPY_LINEAR_SUB_WIND  4
+#define SDMA_SUBOP_COPY_TILED  1
+#define SDMA_SUBOP_COPY_TILED_SUB_WIND  5
+#define SDMA_SUBOP_COPY_T2T_SUB_WIND  6
+#define SDMA_SUBOP_COPY_SOA  3
+#define SDMA_SUBOP_WRITE_LINEAR  0
+#define SDMA_SUBOP_WRITE_TILED  1
+
+/*define for op field*/
+#define SDMA_PKT_HEADER_op_offset 0
+#define SDMA_PKT_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_HEADER_op_shift  0
+#define SDMA_PKT_HEADER_OP(x) (((x) & SDMA_PKT_HEADER_op_mask) << SDMA_PKT_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_HEADER_sub_op_offset 0
+#define SDMA_PKT_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_HEADER_sub_op_shift  8
+#define SDMA_PKT_HEADER_SUB_OP(x) (((x) & SDMA_PKT_HEADER_sub_op_mask) << SDMA_PKT_HEADER_sub_op_shift)
+
+/*
+** Definitions for SDMA_PKT_COPY_LINEAR packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_COPY_LINEAR_HEADER_op_offset 0
+#define SDMA_PKT_COPY_LINEAR_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_COPY_LINEAR_HEADER_op_shift  0
+#define SDMA_PKT_COPY_LINEAR_HEADER_OP(x) (((x) & SDMA_PKT_COPY_LINEAR_HEADER_op_mask) << SDMA_PKT_COPY_LINEAR_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_COPY_LINEAR_HEADER_sub_op_offset 0
+#define SDMA_PKT_COPY_LINEAR_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_COPY_LINEAR_HEADER_sub_op_shift  8
+#define SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_LINEAR_HEADER_sub_op_mask) << SDMA_PKT_COPY_LINEAR_HEADER_sub_op_shift)
+
+/*define for broadcast field*/
+#define SDMA_PKT_COPY_LINEAR_HEADER_broadcast_offset 0
+#define SDMA_PKT_COPY_LINEAR_HEADER_broadcast_mask   0x00000001
+#define SDMA_PKT_COPY_LINEAR_HEADER_broadcast_shift  27
+#define SDMA_PKT_COPY_LINEAR_HEADER_BROADCAST(x) (((x) & SDMA_PKT_COPY_LINEAR_HEADER_broadcast_mask) << SDMA_PKT_COPY_LINEAR_HEADER_broadcast_shift)
+
+/*define for COUNT word*/
+/*define for count field*/
+#define SDMA_PKT_COPY_LINEAR_COUNT_count_offset 1
+#define SDMA_PKT_COPY_LINEAR_COUNT_count_mask   0x003FFFFF
+#define SDMA_PKT_COPY_LINEAR_COUNT_count_shift  0
+#define SDMA_PKT_COPY_LINEAR_COUNT_COUNT(x) (((x) & SDMA_PKT_COPY_LINEAR_COUNT_count_mask) << SDMA_PKT_COPY_LINEAR_COUNT_count_shift)
+
+/*define for PARAMETER word*/
+/*define for dst_sw field*/
+#define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_sw_offset 2
+#define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_sw_mask   0x00000003
+#define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_sw_shift  16
+#define SDMA_PKT_COPY_LINEAR_PARAMETER_DST_SW(x) (((x) & SDMA_PKT_COPY_LINEAR_PARAMETER_dst_sw_mask) << SDMA_PKT_COPY_LINEAR_PARAMETER_dst_sw_shift)
+
+/*define for dst_ha field*/
+#define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_ha_offset 2
+#define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_ha_mask   0x00000001
+#define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_ha_shift  22
+#define SDMA_PKT_COPY_LINEAR_PARAMETER_DST_HA(x) (((x) & SDMA_PKT_COPY_LINEAR_PARAMETER_dst_ha_mask) << SDMA_PKT_COPY_LINEAR_PARAMETER_dst_ha_shift)
+
+/*define for src_sw field*/
+#define SDMA_PKT_COPY_LINEAR_PARAMETER_src_sw_offset 2
+#define SDMA_PKT_COPY_LINEAR_PARAMETER_src_sw_mask   0x00000003
+#define SDMA_PKT_COPY_LINEAR_PARAMETER_src_sw_shift  24
+#define SDMA_PKT_COPY_LINEAR_PARAMETER_SRC_SW(x) (((x) & SDMA_PKT_COPY_LINEAR_PARAMETER_src_sw_mask) << SDMA_PKT_COPY_LINEAR_PARAMETER_src_sw_shift)
+
+/*define for src_ha field*/
+#define SDMA_PKT_COPY_LINEAR_PARAMETER_src_ha_offset 2
+#define SDMA_PKT_COPY_LINEAR_PARAMETER_src_ha_mask   0x00000001
+#define SDMA_PKT_COPY_LINEAR_PARAMETER_src_ha_shift  30
+#define SDMA_PKT_COPY_LINEAR_PARAMETER_SRC_HA(x) (((x) & SDMA_PKT_COPY_LINEAR_PARAMETER_src_ha_mask) << SDMA_PKT_COPY_LINEAR_PARAMETER_src_ha_shift)
+
+/*define for SRC_ADDR_LO word*/
+/*define for src_addr_31_0 field*/
+#define SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_src_addr_31_0_offset 3
+#define SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_src_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_src_addr_31_0_shift  0
+#define SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_SRC_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_src_addr_31_0_mask) << SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_src_addr_31_0_shift)
+
+/*define for SRC_ADDR_HI word*/
+/*define for src_addr_63_32 field*/
+#define SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_src_addr_63_32_offset 4
+#define SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_src_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_src_addr_63_32_shift  0
+#define SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_SRC_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_src_addr_63_32_mask) << SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_src_addr_63_32_shift)
+
+/*define for DST_ADDR_LO word*/
+/*define for dst_addr_31_0 field*/
+#define SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_dst_addr_31_0_offset 5
+#define SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_dst_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_dst_addr_31_0_shift  0
+#define SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_dst_addr_31_0_shift)
+
+/*define for DST_ADDR_HI word*/
+/*define for dst_addr_63_32 field*/
+#define SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_dst_addr_63_32_offset 6
+#define SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_dst_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_dst_addr_63_32_shift  0
+#define SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_dst_addr_63_32_shift)
+
+
+/*
+** Definitions for SDMA_PKT_COPY_BROADCAST_LINEAR packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_op_offset 0
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_op_shift  0
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_OP(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_op_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_sub_op_offset 0
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_sub_op_shift  8
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_sub_op_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_sub_op_shift)
+
+/*define for broadcast field*/
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_broadcast_offset 0
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_broadcast_mask   0x00000001
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_broadcast_shift  27
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_BROADCAST(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_broadcast_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_broadcast_shift)
+
+/*define for COUNT word*/
+/*define for count field*/
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_count_offset 1
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_count_mask   0x003FFFFF
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_count_shift  0
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_COUNT(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_count_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_count_shift)
+
+/*define for PARAMETER word*/
+/*define for dst2_sw field*/
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_sw_offset 2
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_sw_mask   0x00000003
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_sw_shift  8
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_DST2_SW(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_sw_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_sw_shift)
+
+/*define for dst2_ha field*/
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_ha_offset 2
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_ha_mask   0x00000001
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_ha_shift  14
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_DST2_HA(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_ha_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_ha_shift)
+
+/*define for dst1_sw field*/
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_sw_offset 2
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_sw_mask   0x00000003
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_sw_shift  16
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_DST1_SW(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_sw_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_sw_shift)
+
+/*define for dst1_ha field*/
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_ha_offset 2
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_ha_mask   0x00000001
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_ha_shift  22
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_DST1_HA(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_ha_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_ha_shift)
+
+/*define for src_sw field*/
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_sw_offset 2
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_sw_mask   0x00000003
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_sw_shift  24
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_SRC_SW(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_sw_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_sw_shift)
+
+/*define for src_ha field*/
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_ha_offset 2
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_ha_mask   0x00000001
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_ha_shift  30
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_SRC_HA(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_ha_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_ha_shift)
+
+/*define for SRC_ADDR_LO word*/
+/*define for src_addr_31_0 field*/
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_src_addr_31_0_offset 3
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_src_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_src_addr_31_0_shift  0
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_SRC_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_src_addr_31_0_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_src_addr_31_0_shift)
+
+/*define for SRC_ADDR_HI word*/
+/*define for src_addr_63_32 field*/
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_src_addr_63_32_offset 4
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_src_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_src_addr_63_32_shift  0
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_SRC_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_src_addr_63_32_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_src_addr_63_32_shift)
+
+/*define for DST1_ADDR_LO word*/
+/*define for dst1_addr_31_0 field*/
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_dst1_addr_31_0_offset 5
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_dst1_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_dst1_addr_31_0_shift  0
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_DST1_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_dst1_addr_31_0_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_dst1_addr_31_0_shift)
+
+/*define for DST1_ADDR_HI word*/
+/*define for dst1_addr_63_32 field*/
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_dst1_addr_63_32_offset 6
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_dst1_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_dst1_addr_63_32_shift  0
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_DST1_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_dst1_addr_63_32_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_dst1_addr_63_32_shift)
+
+/*define for DST2_ADDR_LO word*/
+/*define for dst2_addr_31_0 field*/
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_dst2_addr_31_0_offset 7
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_dst2_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_dst2_addr_31_0_shift  0
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_DST2_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_dst2_addr_31_0_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_dst2_addr_31_0_shift)
+
+/*define for DST2_ADDR_HI word*/
+/*define for dst2_addr_63_32 field*/
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_dst2_addr_63_32_offset 8
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_dst2_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_dst2_addr_63_32_shift  0
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_DST2_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_dst2_addr_63_32_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_dst2_addr_63_32_shift)
+
+
+/*
+** Definitions for SDMA_PKT_COPY_LINEAR_SUBWIN packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_op_offset 0
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_op_shift  0
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_OP(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_op_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_sub_op_offset 0
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_sub_op_shift  8
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_sub_op_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_sub_op_shift)
+
+/*define for elementsize field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_elementsize_offset 0
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_elementsize_mask   0x00000007
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_elementsize_shift  29
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_ELEMENTSIZE(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_elementsize_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_elementsize_shift)
+
+/*define for SRC_ADDR_LO word*/
+/*define for src_addr_31_0 field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_src_addr_31_0_offset 1
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_src_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_src_addr_31_0_shift  0
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_SRC_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_src_addr_31_0_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_src_addr_31_0_shift)
+
+/*define for SRC_ADDR_HI word*/
+/*define for src_addr_63_32 field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_src_addr_63_32_offset 2
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_src_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_src_addr_63_32_shift  0
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_SRC_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_src_addr_63_32_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_src_addr_63_32_shift)
+
+/*define for DW_3 word*/
+/*define for src_x field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_x_offset 3
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_x_mask   0x00003FFF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_x_shift  0
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_SRC_X(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_x_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_x_shift)
+
+/*define for src_y field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_y_offset 3
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_y_mask   0x00003FFF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_y_shift  16
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_SRC_Y(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_y_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_y_shift)
+
+/*define for DW_4 word*/
+/*define for src_z field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_z_offset 4
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_z_mask   0x000007FF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_z_shift  0
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_SRC_Z(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_z_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_z_shift)
+
+/*define for src_pitch field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_pitch_offset 4
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_pitch_mask   0x00003FFF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_pitch_shift  16
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_SRC_PITCH(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_pitch_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_pitch_shift)
+
+/*define for DW_5 word*/
+/*define for src_slice_pitch field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_src_slice_pitch_offset 5
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_src_slice_pitch_mask   0x0FFFFFFF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_src_slice_pitch_shift  0
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_SRC_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_src_slice_pitch_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_src_slice_pitch_shift)
+
+/*define for DST_ADDR_LO word*/
+/*define for dst_addr_31_0 field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_dst_addr_31_0_offset 6
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_dst_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_dst_addr_31_0_shift  0
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_dst_addr_31_0_shift)
+
+/*define for DST_ADDR_HI word*/
+/*define for dst_addr_63_32 field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_dst_addr_63_32_offset 7
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_dst_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_dst_addr_63_32_shift  0
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_dst_addr_63_32_shift)
+
+/*define for DW_8 word*/
+/*define for dst_x field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_x_offset 8
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_x_mask   0x00003FFF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_x_shift  0
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_DST_X(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_x_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_x_shift)
+
+/*define for dst_y field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_y_offset 8
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_y_mask   0x00003FFF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_y_shift  16
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_DST_Y(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_y_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_y_shift)
+
+/*define for DW_9 word*/
+/*define for dst_z field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_z_offset 9
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_z_mask   0x000007FF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_z_shift  0
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_DST_Z(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_z_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_z_shift)
+
+/*define for dst_pitch field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_pitch_offset 9
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_pitch_mask   0x00003FFF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_pitch_shift  16
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_DST_PITCH(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_pitch_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_pitch_shift)
+
+/*define for DW_10 word*/
+/*define for dst_slice_pitch field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_dst_slice_pitch_offset 10
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_dst_slice_pitch_mask   0x0FFFFFFF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_dst_slice_pitch_shift  0
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_DST_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_dst_slice_pitch_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_dst_slice_pitch_shift)
+
+/*define for DW_11 word*/
+/*define for rect_x field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_x_offset 11
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_x_mask   0x00003FFF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_x_shift  0
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_RECT_X(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_x_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_x_shift)
+
+/*define for rect_y field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_y_offset 11
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_y_mask   0x00003FFF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_y_shift  16
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_RECT_Y(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_y_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_y_shift)
+
+/*define for DW_12 word*/
+/*define for rect_z field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_rect_z_offset 12
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_rect_z_mask   0x000007FF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_rect_z_shift  0
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_RECT_Z(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_rect_z_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_rect_z_shift)
+
+/*define for dst_sw field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_sw_offset 12
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_sw_mask   0x00000003
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_sw_shift  16
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_DST_SW(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_sw_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_sw_shift)
+
+/*define for dst_ha field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_ha_offset 12
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_ha_mask   0x00000001
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_ha_shift  22
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_DST_HA(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_ha_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_ha_shift)
+
+/*define for src_sw field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_sw_offset 12
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_sw_mask   0x00000003
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_sw_shift  24
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_SRC_SW(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_sw_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_sw_shift)
+
+/*define for src_ha field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_ha_offset 12
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_ha_mask   0x00000001
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_ha_shift  30
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_SRC_HA(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_ha_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_ha_shift)
+
+
+/*
+** Definitions for SDMA_PKT_COPY_TILED packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_COPY_TILED_HEADER_op_offset 0
+#define SDMA_PKT_COPY_TILED_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_COPY_TILED_HEADER_op_shift  0
+#define SDMA_PKT_COPY_TILED_HEADER_OP(x) (((x) & SDMA_PKT_COPY_TILED_HEADER_op_mask) << SDMA_PKT_COPY_TILED_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_COPY_TILED_HEADER_sub_op_offset 0
+#define SDMA_PKT_COPY_TILED_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_COPY_TILED_HEADER_sub_op_shift  8
+#define SDMA_PKT_COPY_TILED_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_TILED_HEADER_sub_op_mask) << SDMA_PKT_COPY_TILED_HEADER_sub_op_shift)
+
+/*define for detile field*/
+#define SDMA_PKT_COPY_TILED_HEADER_detile_offset 0
+#define SDMA_PKT_COPY_TILED_HEADER_detile_mask   0x00000001
+#define SDMA_PKT_COPY_TILED_HEADER_detile_shift  31
+#define SDMA_PKT_COPY_TILED_HEADER_DETILE(x) (((x) & SDMA_PKT_COPY_TILED_HEADER_detile_mask) << SDMA_PKT_COPY_TILED_HEADER_detile_shift)
+
+/*define for TILED_ADDR_LO word*/
+/*define for tiled_addr_31_0 field*/
+#define SDMA_PKT_COPY_TILED_TILED_ADDR_LO_tiled_addr_31_0_offset 1
+#define SDMA_PKT_COPY_TILED_TILED_ADDR_LO_tiled_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_TILED_TILED_ADDR_LO_tiled_addr_31_0_shift  0
+#define SDMA_PKT_COPY_TILED_TILED_ADDR_LO_TILED_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_TILED_TILED_ADDR_LO_tiled_addr_31_0_mask) << SDMA_PKT_COPY_TILED_TILED_ADDR_LO_tiled_addr_31_0_shift)
+
+/*define for TILED_ADDR_HI word*/
+/*define for tiled_addr_63_32 field*/
+#define SDMA_PKT_COPY_TILED_TILED_ADDR_HI_tiled_addr_63_32_offset 2
+#define SDMA_PKT_COPY_TILED_TILED_ADDR_HI_tiled_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_TILED_TILED_ADDR_HI_tiled_addr_63_32_shift  0
+#define SDMA_PKT_COPY_TILED_TILED_ADDR_HI_TILED_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_TILED_TILED_ADDR_HI_tiled_addr_63_32_mask) << SDMA_PKT_COPY_TILED_TILED_ADDR_HI_tiled_addr_63_32_shift)
+
+/*define for DW_3 word*/
+/*define for pitch_in_tile field*/
+#define SDMA_PKT_COPY_TILED_DW_3_pitch_in_tile_offset 3
+#define SDMA_PKT_COPY_TILED_DW_3_pitch_in_tile_mask   0x000007FF
+#define SDMA_PKT_COPY_TILED_DW_3_pitch_in_tile_shift  0
+#define SDMA_PKT_COPY_TILED_DW_3_PITCH_IN_TILE(x) (((x) & SDMA_PKT_COPY_TILED_DW_3_pitch_in_tile_mask) << SDMA_PKT_COPY_TILED_DW_3_pitch_in_tile_shift)
+
+/*define for height field*/
+#define SDMA_PKT_COPY_TILED_DW_3_height_offset 3
+#define SDMA_PKT_COPY_TILED_DW_3_height_mask   0x00003FFF
+#define SDMA_PKT_COPY_TILED_DW_3_height_shift  16
+#define SDMA_PKT_COPY_TILED_DW_3_HEIGHT(x) (((x) & SDMA_PKT_COPY_TILED_DW_3_height_mask) << SDMA_PKT_COPY_TILED_DW_3_height_shift)
+
+/*define for DW_4 word*/
+/*define for slice_pitch field*/
+#define SDMA_PKT_COPY_TILED_DW_4_slice_pitch_offset 4
+#define SDMA_PKT_COPY_TILED_DW_4_slice_pitch_mask   0x003FFFFF
+#define SDMA_PKT_COPY_TILED_DW_4_slice_pitch_shift  0
+#define SDMA_PKT_COPY_TILED_DW_4_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_TILED_DW_4_slice_pitch_mask) << SDMA_PKT_COPY_TILED_DW_4_slice_pitch_shift)
+
+/*define for DW_5 word*/
+/*define for element_size field*/
+#define SDMA_PKT_COPY_TILED_DW_5_element_size_offset 5
+#define SDMA_PKT_COPY_TILED_DW_5_element_size_mask   0x00000007
+#define SDMA_PKT_COPY_TILED_DW_5_element_size_shift  0
+#define SDMA_PKT_COPY_TILED_DW_5_ELEMENT_SIZE(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_element_size_mask) << SDMA_PKT_COPY_TILED_DW_5_element_size_shift)
+
+/*define for array_mode field*/
+#define SDMA_PKT_COPY_TILED_DW_5_array_mode_offset 5
+#define SDMA_PKT_COPY_TILED_DW_5_array_mode_mask   0x0000000F
+#define SDMA_PKT_COPY_TILED_DW_5_array_mode_shift  3
+#define SDMA_PKT_COPY_TILED_DW_5_ARRAY_MODE(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_array_mode_mask) << SDMA_PKT_COPY_TILED_DW_5_array_mode_shift)
+
+/*define for mit_mode field*/
+#define SDMA_PKT_COPY_TILED_DW_5_mit_mode_offset 5
+#define SDMA_PKT_COPY_TILED_DW_5_mit_mode_mask   0x00000007
+#define SDMA_PKT_COPY_TILED_DW_5_mit_mode_shift  8
+#define SDMA_PKT_COPY_TILED_DW_5_MIT_MODE(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_mit_mode_mask) << SDMA_PKT_COPY_TILED_DW_5_mit_mode_shift)
+
+/*define for tilesplit_size field*/
+#define SDMA_PKT_COPY_TILED_DW_5_tilesplit_size_offset 5
+#define SDMA_PKT_COPY_TILED_DW_5_tilesplit_size_mask   0x00000007
+#define SDMA_PKT_COPY_TILED_DW_5_tilesplit_size_shift  11
+#define SDMA_PKT_COPY_TILED_DW_5_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_tilesplit_size_mask) << SDMA_PKT_COPY_TILED_DW_5_tilesplit_size_shift)
+
+/*define for bank_w field*/
+#define SDMA_PKT_COPY_TILED_DW_5_bank_w_offset 5
+#define SDMA_PKT_COPY_TILED_DW_5_bank_w_mask   0x00000003
+#define SDMA_PKT_COPY_TILED_DW_5_bank_w_shift  15
+#define SDMA_PKT_COPY_TILED_DW_5_BANK_W(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_bank_w_mask) << SDMA_PKT_COPY_TILED_DW_5_bank_w_shift)
+
+/*define for bank_h field*/
+#define SDMA_PKT_COPY_TILED_DW_5_bank_h_offset 5
+#define SDMA_PKT_COPY_TILED_DW_5_bank_h_mask   0x00000003
+#define SDMA_PKT_COPY_TILED_DW_5_bank_h_shift  18
+#define SDMA_PKT_COPY_TILED_DW_5_BANK_H(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_bank_h_mask) << SDMA_PKT_COPY_TILED_DW_5_bank_h_shift)
+
+/*define for num_bank field*/
+#define SDMA_PKT_COPY_TILED_DW_5_num_bank_offset 5
+#define SDMA_PKT_COPY_TILED_DW_5_num_bank_mask   0x00000003
+#define SDMA_PKT_COPY_TILED_DW_5_num_bank_shift  21
+#define SDMA_PKT_COPY_TILED_DW_5_NUM_BANK(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_num_bank_mask) << SDMA_PKT_COPY_TILED_DW_5_num_bank_shift)
+
+/*define for mat_aspt field*/
+#define SDMA_PKT_COPY_TILED_DW_5_mat_aspt_offset 5
+#define SDMA_PKT_COPY_TILED_DW_5_mat_aspt_mask   0x00000003
+#define SDMA_PKT_COPY_TILED_DW_5_mat_aspt_shift  24
+#define SDMA_PKT_COPY_TILED_DW_5_MAT_ASPT(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_mat_aspt_mask) << SDMA_PKT_COPY_TILED_DW_5_mat_aspt_shift)
+
+/*define for pipe_config field*/
+#define SDMA_PKT_COPY_TILED_DW_5_pipe_config_offset 5
+#define SDMA_PKT_COPY_TILED_DW_5_pipe_config_mask   0x0000001F
+#define SDMA_PKT_COPY_TILED_DW_5_pipe_config_shift  26
+#define SDMA_PKT_COPY_TILED_DW_5_PIPE_CONFIG(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_pipe_config_mask) << SDMA_PKT_COPY_TILED_DW_5_pipe_config_shift)
+
+/*define for DW_6 word*/
+/*define for x field*/
+#define SDMA_PKT_COPY_TILED_DW_6_x_offset 6
+#define SDMA_PKT_COPY_TILED_DW_6_x_mask   0x00003FFF
+#define SDMA_PKT_COPY_TILED_DW_6_x_shift  0
+#define SDMA_PKT_COPY_TILED_DW_6_X(x) (((x) & SDMA_PKT_COPY_TILED_DW_6_x_mask) << SDMA_PKT_COPY_TILED_DW_6_x_shift)
+
+/*define for y field*/
+#define SDMA_PKT_COPY_TILED_DW_6_y_offset 6
+#define SDMA_PKT_COPY_TILED_DW_6_y_mask   0x00003FFF
+#define SDMA_PKT_COPY_TILED_DW_6_y_shift  16
+#define SDMA_PKT_COPY_TILED_DW_6_Y(x) (((x) & SDMA_PKT_COPY_TILED_DW_6_y_mask) << SDMA_PKT_COPY_TILED_DW_6_y_shift)
+
+/*define for DW_7 word*/
+/*define for z field*/
+#define SDMA_PKT_COPY_TILED_DW_7_z_offset 7
+#define SDMA_PKT_COPY_TILED_DW_7_z_mask   0x00000FFF
+#define SDMA_PKT_COPY_TILED_DW_7_z_shift  0
+#define SDMA_PKT_COPY_TILED_DW_7_Z(x) (((x) & SDMA_PKT_COPY_TILED_DW_7_z_mask) << SDMA_PKT_COPY_TILED_DW_7_z_shift)
+
+/*define for linear_sw field*/
+#define SDMA_PKT_COPY_TILED_DW_7_linear_sw_offset 7
+#define SDMA_PKT_COPY_TILED_DW_7_linear_sw_mask   0x00000003
+#define SDMA_PKT_COPY_TILED_DW_7_linear_sw_shift  16
+#define SDMA_PKT_COPY_TILED_DW_7_LINEAR_SW(x) (((x) & SDMA_PKT_COPY_TILED_DW_7_linear_sw_mask) << SDMA_PKT_COPY_TILED_DW_7_linear_sw_shift)
+
+/*define for tile_sw field*/
+#define SDMA_PKT_COPY_TILED_DW_7_tile_sw_offset 7
+#define SDMA_PKT_COPY_TILED_DW_7_tile_sw_mask   0x00000003
+#define SDMA_PKT_COPY_TILED_DW_7_tile_sw_shift  24
+#define SDMA_PKT_COPY_TILED_DW_7_TILE_SW(x) (((x) & SDMA_PKT_COPY_TILED_DW_7_tile_sw_mask) << SDMA_PKT_COPY_TILED_DW_7_tile_sw_shift)
+
+/*define for LINEAR_ADDR_LO word*/
+/*define for linear_addr_31_0 field*/
+#define SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_linear_addr_31_0_offset 8
+#define SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_linear_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_linear_addr_31_0_shift  0
+#define SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_LINEAR_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_linear_addr_31_0_mask) << SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_linear_addr_31_0_shift)
+
+/*define for LINEAR_ADDR_HI word*/
+/*define for linear_addr_63_32 field*/
+#define SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_linear_addr_63_32_offset 9
+#define SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_linear_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_linear_addr_63_32_shift  0
+#define SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_LINEAR_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_linear_addr_63_32_mask) << SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_linear_addr_63_32_shift)
+
+/*define for LINEAR_PITCH word*/
+/*define for linear_pitch field*/
+#define SDMA_PKT_COPY_TILED_LINEAR_PITCH_linear_pitch_offset 10
+#define SDMA_PKT_COPY_TILED_LINEAR_PITCH_linear_pitch_mask   0x0007FFFF
+#define SDMA_PKT_COPY_TILED_LINEAR_PITCH_linear_pitch_shift  0
+#define SDMA_PKT_COPY_TILED_LINEAR_PITCH_LINEAR_PITCH(x) (((x) & SDMA_PKT_COPY_TILED_LINEAR_PITCH_linear_pitch_mask) << SDMA_PKT_COPY_TILED_LINEAR_PITCH_linear_pitch_shift)
+
+/*define for COUNT word*/
+/*define for count field*/
+#define SDMA_PKT_COPY_TILED_COUNT_count_offset 11
+#define SDMA_PKT_COPY_TILED_COUNT_count_mask   0x000FFFFF
+#define SDMA_PKT_COPY_TILED_COUNT_count_shift  0
+#define SDMA_PKT_COPY_TILED_COUNT_COUNT(x) (((x) & SDMA_PKT_COPY_TILED_COUNT_count_mask) << SDMA_PKT_COPY_TILED_COUNT_count_shift)
+
+
+/*
+** Definitions for SDMA_PKT_COPY_L2T_BROADCAST packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_op_offset 0
+#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_op_shift  0
+#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_OP(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_HEADER_op_mask) << SDMA_PKT_COPY_L2T_BROADCAST_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_sub_op_offset 0
+#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_sub_op_shift  8
+#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_HEADER_sub_op_mask) << SDMA_PKT_COPY_L2T_BROADCAST_HEADER_sub_op_shift)
+
+/*define for videocopy field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_videocopy_offset 0
+#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_videocopy_mask   0x00000001
+#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_videocopy_shift  26
+#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_VIDEOCOPY(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_HEADER_videocopy_mask) << SDMA_PKT_COPY_L2T_BROADCAST_HEADER_videocopy_shift)
+
+/*define for broadcast field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_broadcast_offset 0
+#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_broadcast_mask   0x00000001
+#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_broadcast_shift  27
+#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_BROADCAST(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_HEADER_broadcast_mask) << SDMA_PKT_COPY_L2T_BROADCAST_HEADER_broadcast_shift)
+
+/*define for TILED_ADDR_LO_0 word*/
+/*define for tiled_addr0_31_0 field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_tiled_addr0_31_0_offset 1
+#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_tiled_addr0_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_tiled_addr0_31_0_shift  0
+#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_TILED_ADDR0_31_0(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_tiled_addr0_31_0_mask) << SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_tiled_addr0_31_0_shift)
+
+/*define for TILED_ADDR_HI_0 word*/
+/*define for tiled_addr0_63_32 field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_tiled_addr0_63_32_offset 2
+#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_tiled_addr0_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_tiled_addr0_63_32_shift  0
+#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_TILED_ADDR0_63_32(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_tiled_addr0_63_32_mask) << SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_tiled_addr0_63_32_shift)
+
+/*define for TILED_ADDR_LO_1 word*/
+/*define for tiled_addr1_31_0 field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_tiled_addr1_31_0_offset 3
+#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_tiled_addr1_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_tiled_addr1_31_0_shift  0
+#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_TILED_ADDR1_31_0(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_tiled_addr1_31_0_mask) << SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_tiled_addr1_31_0_shift)
+
+/*define for TILED_ADDR_HI_1 word*/
+/*define for tiled_addr1_63_32 field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_tiled_addr1_63_32_offset 4
+#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_tiled_addr1_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_tiled_addr1_63_32_shift  0
+#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_TILED_ADDR1_63_32(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_tiled_addr1_63_32_mask) << SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_tiled_addr1_63_32_shift)
+
+/*define for DW_5 word*/
+/*define for pitch_in_tile field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_pitch_in_tile_offset 5
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_pitch_in_tile_mask   0x000007FF
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_pitch_in_tile_shift  0
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_PITCH_IN_TILE(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_5_pitch_in_tile_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_5_pitch_in_tile_shift)
+
+/*define for height field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_height_offset 5
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_height_mask   0x00003FFF
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_height_shift  16
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_HEIGHT(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_5_height_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_5_height_shift)
+
+/*define for DW_6 word*/
+/*define for slice_pitch field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_6_slice_pitch_offset 6
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_6_slice_pitch_mask   0x003FFFFF
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_6_slice_pitch_shift  0
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_6_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_6_slice_pitch_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_6_slice_pitch_shift)
+
+/*define for DW_7 word*/
+/*define for element_size field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_element_size_offset 7
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_element_size_mask   0x00000007
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_element_size_shift  0
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_ELEMENT_SIZE(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_element_size_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_element_size_shift)
+
+/*define for array_mode field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_array_mode_offset 7
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_array_mode_mask   0x0000000F
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_array_mode_shift  3
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_ARRAY_MODE(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_array_mode_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_array_mode_shift)
+
+/*define for mit_mode field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mit_mode_offset 7
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mit_mode_mask   0x00000007
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mit_mode_shift  8
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_MIT_MODE(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mit_mode_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mit_mode_shift)
+
+/*define for tilesplit_size field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_tilesplit_size_offset 7
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_tilesplit_size_mask   0x00000007
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_tilesplit_size_shift  11
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_tilesplit_size_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_tilesplit_size_shift)
+
+/*define for bank_w field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_w_offset 7
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_w_mask   0x00000003
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_w_shift  15
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_BANK_W(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_w_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_w_shift)
+
+/*define for bank_h field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_h_offset 7
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_h_mask   0x00000003
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_h_shift  18
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_BANK_H(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_h_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_h_shift)
+
+/*define for num_bank field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_num_bank_offset 7
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_num_bank_mask   0x00000003
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_num_bank_shift  21
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_NUM_BANK(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_num_bank_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_num_bank_shift)
+
+/*define for mat_aspt field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mat_aspt_offset 7
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mat_aspt_mask   0x00000003
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mat_aspt_shift  24
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_MAT_ASPT(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mat_aspt_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mat_aspt_shift)
+
+/*define for pipe_config field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_pipe_config_offset 7
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_pipe_config_mask   0x0000001F
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_pipe_config_shift  26
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_PIPE_CONFIG(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_pipe_config_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_pipe_config_shift)
+
+/*define for DW_8 word*/
+/*define for x field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_x_offset 8
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_x_mask   0x00003FFF
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_x_shift  0
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_X(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_8_x_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_8_x_shift)
+
+/*define for y field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_y_offset 8
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_y_mask   0x00003FFF
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_y_shift  16
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_Y(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_8_y_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_8_y_shift)
+
+/*define for DW_9 word*/
+/*define for z field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_9_z_offset 9
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_9_z_mask   0x00000FFF
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_9_z_shift  0
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_9_Z(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_9_z_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_9_z_shift)
+
+/*define for DW_10 word*/
+/*define for dst2_sw field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_sw_offset 10
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_sw_mask   0x00000003
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_sw_shift  8
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_DST2_SW(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_sw_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_sw_shift)
+
+/*define for dst2_ha field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_ha_offset 10
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_ha_mask   0x00000001
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_ha_shift  14
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_DST2_HA(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_ha_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_ha_shift)
+
+/*define for linear_sw field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_linear_sw_offset 10
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_linear_sw_mask   0x00000003
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_linear_sw_shift  16
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_LINEAR_SW(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_10_linear_sw_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_10_linear_sw_shift)
+
+/*define for tile_sw field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_tile_sw_offset 10
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_tile_sw_mask   0x00000003
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_tile_sw_shift  24
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_TILE_SW(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_10_tile_sw_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_10_tile_sw_shift)
+
+/*define for LINEAR_ADDR_LO word*/
+/*define for linear_addr_31_0 field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_linear_addr_31_0_offset 11
+#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_linear_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_linear_addr_31_0_shift  0
+#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_LINEAR_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_linear_addr_31_0_mask) << SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_linear_addr_31_0_shift)
+
+/*define for LINEAR_ADDR_HI word*/
+/*define for linear_addr_63_32 field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_linear_addr_63_32_offset 12
+#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_linear_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_linear_addr_63_32_shift  0
+#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_LINEAR_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_linear_addr_63_32_mask) << SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_linear_addr_63_32_shift)
+
+/*define for LINEAR_PITCH word*/
+/*define for linear_pitch field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_linear_pitch_offset 13
+#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_linear_pitch_mask   0x0007FFFF
+#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_linear_pitch_shift  0
+#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_LINEAR_PITCH(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_linear_pitch_mask) << SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_linear_pitch_shift)
+
+/*define for COUNT word*/
+/*define for count field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_COUNT_count_offset 14
+#define SDMA_PKT_COPY_L2T_BROADCAST_COUNT_count_mask   0x000FFFFF
+#define SDMA_PKT_COPY_L2T_BROADCAST_COUNT_count_shift  0
+#define SDMA_PKT_COPY_L2T_BROADCAST_COUNT_COUNT(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_COUNT_count_mask) << SDMA_PKT_COPY_L2T_BROADCAST_COUNT_count_shift)
+
+
+/*
+** Definitions for SDMA_PKT_COPY_T2T packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_COPY_T2T_HEADER_op_offset 0
+#define SDMA_PKT_COPY_T2T_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_COPY_T2T_HEADER_op_shift  0
+#define SDMA_PKT_COPY_T2T_HEADER_OP(x) (((x) & SDMA_PKT_COPY_T2T_HEADER_op_mask) << SDMA_PKT_COPY_T2T_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_COPY_T2T_HEADER_sub_op_offset 0
+#define SDMA_PKT_COPY_T2T_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_COPY_T2T_HEADER_sub_op_shift  8
+#define SDMA_PKT_COPY_T2T_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_T2T_HEADER_sub_op_mask) << SDMA_PKT_COPY_T2T_HEADER_sub_op_shift)
+
+/*define for SRC_ADDR_LO word*/
+/*define for src_addr_31_0 field*/
+#define SDMA_PKT_COPY_T2T_SRC_ADDR_LO_src_addr_31_0_offset 1
+#define SDMA_PKT_COPY_T2T_SRC_ADDR_LO_src_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_T2T_SRC_ADDR_LO_src_addr_31_0_shift  0
+#define SDMA_PKT_COPY_T2T_SRC_ADDR_LO_SRC_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_T2T_SRC_ADDR_LO_src_addr_31_0_mask) << SDMA_PKT_COPY_T2T_SRC_ADDR_LO_src_addr_31_0_shift)
+
+/*define for SRC_ADDR_HI word*/
+/*define for src_addr_63_32 field*/
+#define SDMA_PKT_COPY_T2T_SRC_ADDR_HI_src_addr_63_32_offset 2
+#define SDMA_PKT_COPY_T2T_SRC_ADDR_HI_src_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_T2T_SRC_ADDR_HI_src_addr_63_32_shift  0
+#define SDMA_PKT_COPY_T2T_SRC_ADDR_HI_SRC_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_T2T_SRC_ADDR_HI_src_addr_63_32_mask) << SDMA_PKT_COPY_T2T_SRC_ADDR_HI_src_addr_63_32_shift)
+
+/*define for DW_3 word*/
+/*define for src_x field*/
+#define SDMA_PKT_COPY_T2T_DW_3_src_x_offset 3
+#define SDMA_PKT_COPY_T2T_DW_3_src_x_mask   0x00003FFF
+#define SDMA_PKT_COPY_T2T_DW_3_src_x_shift  0
+#define SDMA_PKT_COPY_T2T_DW_3_SRC_X(x) (((x) & SDMA_PKT_COPY_T2T_DW_3_src_x_mask) << SDMA_PKT_COPY_T2T_DW_3_src_x_shift)
+
+/*define for src_y field*/
+#define SDMA_PKT_COPY_T2T_DW_3_src_y_offset 3
+#define SDMA_PKT_COPY_T2T_DW_3_src_y_mask   0x00003FFF
+#define SDMA_PKT_COPY_T2T_DW_3_src_y_shift  16
+#define SDMA_PKT_COPY_T2T_DW_3_SRC_Y(x) (((x) & SDMA_PKT_COPY_T2T_DW_3_src_y_mask) << SDMA_PKT_COPY_T2T_DW_3_src_y_shift)
+
+/*define for DW_4 word*/
+/*define for src_z field*/
+#define SDMA_PKT_COPY_T2T_DW_4_src_z_offset 4
+#define SDMA_PKT_COPY_T2T_DW_4_src_z_mask   0x000007FF
+#define SDMA_PKT_COPY_T2T_DW_4_src_z_shift  0
+#define SDMA_PKT_COPY_T2T_DW_4_SRC_Z(x) (((x) & SDMA_PKT_COPY_T2T_DW_4_src_z_mask) << SDMA_PKT_COPY_T2T_DW_4_src_z_shift)
+
+/*define for src_pitch_in_tile field*/
+#define SDMA_PKT_COPY_T2T_DW_4_src_pitch_in_tile_offset 4
+#define SDMA_PKT_COPY_T2T_DW_4_src_pitch_in_tile_mask   0x00000FFF
+#define SDMA_PKT_COPY_T2T_DW_4_src_pitch_in_tile_shift  16
+#define SDMA_PKT_COPY_T2T_DW_4_SRC_PITCH_IN_TILE(x) (((x) & SDMA_PKT_COPY_T2T_DW_4_src_pitch_in_tile_mask) << SDMA_PKT_COPY_T2T_DW_4_src_pitch_in_tile_shift)
+
+/*define for DW_5 word*/
+/*define for src_slice_pitch field*/
+#define SDMA_PKT_COPY_T2T_DW_5_src_slice_pitch_offset 5
+#define SDMA_PKT_COPY_T2T_DW_5_src_slice_pitch_mask   0x003FFFFF
+#define SDMA_PKT_COPY_T2T_DW_5_src_slice_pitch_shift  0
+#define SDMA_PKT_COPY_T2T_DW_5_SRC_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_T2T_DW_5_src_slice_pitch_mask) << SDMA_PKT_COPY_T2T_DW_5_src_slice_pitch_shift)
+
+/*define for DW_6 word*/
+/*define for src_element_size field*/
+#define SDMA_PKT_COPY_T2T_DW_6_src_element_size_offset 6
+#define SDMA_PKT_COPY_T2T_DW_6_src_element_size_mask   0x00000007
+#define SDMA_PKT_COPY_T2T_DW_6_src_element_size_shift  0
+#define SDMA_PKT_COPY_T2T_DW_6_SRC_ELEMENT_SIZE(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_element_size_mask) << SDMA_PKT_COPY_T2T_DW_6_src_element_size_shift)
+
+/*define for src_array_mode field*/
+#define SDMA_PKT_COPY_T2T_DW_6_src_array_mode_offset 6
+#define SDMA_PKT_COPY_T2T_DW_6_src_array_mode_mask   0x0000000F
+#define SDMA_PKT_COPY_T2T_DW_6_src_array_mode_shift  3
+#define SDMA_PKT_COPY_T2T_DW_6_SRC_ARRAY_MODE(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_array_mode_mask) << SDMA_PKT_COPY_T2T_DW_6_src_array_mode_shift)
+
+/*define for src_mit_mode field*/
+#define SDMA_PKT_COPY_T2T_DW_6_src_mit_mode_offset 6
+#define SDMA_PKT_COPY_T2T_DW_6_src_mit_mode_mask   0x00000007
+#define SDMA_PKT_COPY_T2T_DW_6_src_mit_mode_shift  8
+#define SDMA_PKT_COPY_T2T_DW_6_SRC_MIT_MODE(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_mit_mode_mask) << SDMA_PKT_COPY_T2T_DW_6_src_mit_mode_shift)
+
+/*define for src_tilesplit_size field*/
+#define SDMA_PKT_COPY_T2T_DW_6_src_tilesplit_size_offset 6
+#define SDMA_PKT_COPY_T2T_DW_6_src_tilesplit_size_mask   0x00000007
+#define SDMA_PKT_COPY_T2T_DW_6_src_tilesplit_size_shift  11
+#define SDMA_PKT_COPY_T2T_DW_6_SRC_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_tilesplit_size_mask) << SDMA_PKT_COPY_T2T_DW_6_src_tilesplit_size_shift)
+
+/*define for src_bank_w field*/
+#define SDMA_PKT_COPY_T2T_DW_6_src_bank_w_offset 6
+#define SDMA_PKT_COPY_T2T_DW_6_src_bank_w_mask   0x00000003
+#define SDMA_PKT_COPY_T2T_DW_6_src_bank_w_shift  15
+#define SDMA_PKT_COPY_T2T_DW_6_SRC_BANK_W(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_bank_w_mask) << SDMA_PKT_COPY_T2T_DW_6_src_bank_w_shift)
+
+/*define for src_bank_h field*/
+#define SDMA_PKT_COPY_T2T_DW_6_src_bank_h_offset 6
+#define SDMA_PKT_COPY_T2T_DW_6_src_bank_h_mask   0x00000003
+#define SDMA_PKT_COPY_T2T_DW_6_src_bank_h_shift  18
+#define SDMA_PKT_COPY_T2T_DW_6_SRC_BANK_H(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_bank_h_mask) << SDMA_PKT_COPY_T2T_DW_6_src_bank_h_shift)
+
+/*define for src_num_bank field*/
+#define SDMA_PKT_COPY_T2T_DW_6_src_num_bank_offset 6
+#define SDMA_PKT_COPY_T2T_DW_6_src_num_bank_mask   0x00000003
+#define SDMA_PKT_COPY_T2T_DW_6_src_num_bank_shift  21
+#define SDMA_PKT_COPY_T2T_DW_6_SRC_NUM_BANK(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_num_bank_mask) << SDMA_PKT_COPY_T2T_DW_6_src_num_bank_shift)
+
+/*define for src_mat_aspt field*/
+#define SDMA_PKT_COPY_T2T_DW_6_src_mat_aspt_offset 6
+#define SDMA_PKT_COPY_T2T_DW_6_src_mat_aspt_mask   0x00000003
+#define SDMA_PKT_COPY_T2T_DW_6_src_mat_aspt_shift  24
+#define SDMA_PKT_COPY_T2T_DW_6_SRC_MAT_ASPT(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_mat_aspt_mask) << SDMA_PKT_COPY_T2T_DW_6_src_mat_aspt_shift)
+
+/*define for src_pipe_config field*/
+#define SDMA_PKT_COPY_T2T_DW_6_src_pipe_config_offset 6
+#define SDMA_PKT_COPY_T2T_DW_6_src_pipe_config_mask   0x0000001F
+#define SDMA_PKT_COPY_T2T_DW_6_src_pipe_config_shift  26
+#define SDMA_PKT_COPY_T2T_DW_6_SRC_PIPE_CONFIG(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_pipe_config_mask) << SDMA_PKT_COPY_T2T_DW_6_src_pipe_config_shift)
+
+/*define for DST_ADDR_LO word*/
+/*define for dst_addr_31_0 field*/
+#define SDMA_PKT_COPY_T2T_DST_ADDR_LO_dst_addr_31_0_offset 7
+#define SDMA_PKT_COPY_T2T_DST_ADDR_LO_dst_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_T2T_DST_ADDR_LO_dst_addr_31_0_shift  0
+#define SDMA_PKT_COPY_T2T_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_T2T_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_COPY_T2T_DST_ADDR_LO_dst_addr_31_0_shift)
+
+/*define for DST_ADDR_HI word*/
+/*define for dst_addr_63_32 field*/
+#define SDMA_PKT_COPY_T2T_DST_ADDR_HI_dst_addr_63_32_offset 8
+#define SDMA_PKT_COPY_T2T_DST_ADDR_HI_dst_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_T2T_DST_ADDR_HI_dst_addr_63_32_shift  0
+#define SDMA_PKT_COPY_T2T_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_T2T_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_COPY_T2T_DST_ADDR_HI_dst_addr_63_32_shift)
+
+/*define for DW_9 word*/
+/*define for dst_x field*/
+#define SDMA_PKT_COPY_T2T_DW_9_dst_x_offset 9
+#define SDMA_PKT_COPY_T2T_DW_9_dst_x_mask   0x00003FFF
+#define SDMA_PKT_COPY_T2T_DW_9_dst_x_shift  0
+#define SDMA_PKT_COPY_T2T_DW_9_DST_X(x) (((x) & SDMA_PKT_COPY_T2T_DW_9_dst_x_mask) << SDMA_PKT_COPY_T2T_DW_9_dst_x_shift)
+
+/*define for dst_y field*/
+#define SDMA_PKT_COPY_T2T_DW_9_dst_y_offset 9
+#define SDMA_PKT_COPY_T2T_DW_9_dst_y_mask   0x00003FFF
+#define SDMA_PKT_COPY_T2T_DW_9_dst_y_shift  16
+#define SDMA_PKT_COPY_T2T_DW_9_DST_Y(x) (((x) & SDMA_PKT_COPY_T2T_DW_9_dst_y_mask) << SDMA_PKT_COPY_T2T_DW_9_dst_y_shift)
+
+/*define for DW_10 word*/
+/*define for dst_z field*/
+#define SDMA_PKT_COPY_T2T_DW_10_dst_z_offset 10
+#define SDMA_PKT_COPY_T2T_DW_10_dst_z_mask   0x000007FF
+#define SDMA_PKT_COPY_T2T_DW_10_dst_z_shift  0
+#define SDMA_PKT_COPY_T2T_DW_10_DST_Z(x) (((x) & SDMA_PKT_COPY_T2T_DW_10_dst_z_mask) << SDMA_PKT_COPY_T2T_DW_10_dst_z_shift)
+
+/*define for dst_pitch_in_tile field*/
+#define SDMA_PKT_COPY_T2T_DW_10_dst_pitch_in_tile_offset 10
+#define SDMA_PKT_COPY_T2T_DW_10_dst_pitch_in_tile_mask   0x00000FFF
+#define SDMA_PKT_COPY_T2T_DW_10_dst_pitch_in_tile_shift  16
+#define SDMA_PKT_COPY_T2T_DW_10_DST_PITCH_IN_TILE(x) (((x) & SDMA_PKT_COPY_T2T_DW_10_dst_pitch_in_tile_mask) << SDMA_PKT_COPY_T2T_DW_10_dst_pitch_in_tile_shift)
+
+/*define for DW_11 word*/
+/*define for dst_slice_pitch field*/
+#define SDMA_PKT_COPY_T2T_DW_11_dst_slice_pitch_offset 11
+#define SDMA_PKT_COPY_T2T_DW_11_dst_slice_pitch_mask   0x003FFFFF
+#define SDMA_PKT_COPY_T2T_DW_11_dst_slice_pitch_shift  0
+#define SDMA_PKT_COPY_T2T_DW_11_DST_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_T2T_DW_11_dst_slice_pitch_mask) << SDMA_PKT_COPY_T2T_DW_11_dst_slice_pitch_shift)
+
+/*define for DW_12 word*/
+/*define for dst_array_mode field*/
+#define SDMA_PKT_COPY_T2T_DW_12_dst_array_mode_offset 12
+#define SDMA_PKT_COPY_T2T_DW_12_dst_array_mode_mask   0x0000000F
+#define SDMA_PKT_COPY_T2T_DW_12_dst_array_mode_shift  3
+#define SDMA_PKT_COPY_T2T_DW_12_DST_ARRAY_MODE(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_array_mode_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_array_mode_shift)
+
+/*define for dst_mit_mode field*/
+#define SDMA_PKT_COPY_T2T_DW_12_dst_mit_mode_offset 12
+#define SDMA_PKT_COPY_T2T_DW_12_dst_mit_mode_mask   0x00000007
+#define SDMA_PKT_COPY_T2T_DW_12_dst_mit_mode_shift  8
+#define SDMA_PKT_COPY_T2T_DW_12_DST_MIT_MODE(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_mit_mode_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_mit_mode_shift)
+
+/*define for dst_tilesplit_size field*/
+#define SDMA_PKT_COPY_T2T_DW_12_dst_tilesplit_size_offset 12
+#define SDMA_PKT_COPY_T2T_DW_12_dst_tilesplit_size_mask   0x00000007
+#define SDMA_PKT_COPY_T2T_DW_12_dst_tilesplit_size_shift  11
+#define SDMA_PKT_COPY_T2T_DW_12_DST_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_tilesplit_size_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_tilesplit_size_shift)
+
+/*define for dst_bank_w field*/
+#define SDMA_PKT_COPY_T2T_DW_12_dst_bank_w_offset 12
+#define SDMA_PKT_COPY_T2T_DW_12_dst_bank_w_mask   0x00000003
+#define SDMA_PKT_COPY_T2T_DW_12_dst_bank_w_shift  15
+#define SDMA_PKT_COPY_T2T_DW_12_DST_BANK_W(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_bank_w_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_bank_w_shift)
+
+/*define for dst_bank_h field*/
+#define SDMA_PKT_COPY_T2T_DW_12_dst_bank_h_offset 12
+#define SDMA_PKT_COPY_T2T_DW_12_dst_bank_h_mask   0x00000003
+#define SDMA_PKT_COPY_T2T_DW_12_dst_bank_h_shift  18
+#define SDMA_PKT_COPY_T2T_DW_12_DST_BANK_H(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_bank_h_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_bank_h_shift)
+
+/*define for dst_num_bank field*/
+#define SDMA_PKT_COPY_T2T_DW_12_dst_num_bank_offset 12
+#define SDMA_PKT_COPY_T2T_DW_12_dst_num_bank_mask   0x00000003
+#define SDMA_PKT_COPY_T2T_DW_12_dst_num_bank_shift  21
+#define SDMA_PKT_COPY_T2T_DW_12_DST_NUM_BANK(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_num_bank_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_num_bank_shift)
+
+/*define for dst_mat_aspt field*/
+#define SDMA_PKT_COPY_T2T_DW_12_dst_mat_aspt_offset 12
+#define SDMA_PKT_COPY_T2T_DW_12_dst_mat_aspt_mask   0x00000003
+#define SDMA_PKT_COPY_T2T_DW_12_dst_mat_aspt_shift  24
+#define SDMA_PKT_COPY_T2T_DW_12_DST_MAT_ASPT(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_mat_aspt_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_mat_aspt_shift)
+
+/*define for dst_pipe_config field*/
+#define SDMA_PKT_COPY_T2T_DW_12_dst_pipe_config_offset 12
+#define SDMA_PKT_COPY_T2T_DW_12_dst_pipe_config_mask   0x0000001F
+#define SDMA_PKT_COPY_T2T_DW_12_dst_pipe_config_shift  26
+#define SDMA_PKT_COPY_T2T_DW_12_DST_PIPE_CONFIG(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_pipe_config_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_pipe_config_shift)
+
+/*define for DW_13 word*/
+/*define for rect_x field*/
+#define SDMA_PKT_COPY_T2T_DW_13_rect_x_offset 13
+#define SDMA_PKT_COPY_T2T_DW_13_rect_x_mask   0x00003FFF
+#define SDMA_PKT_COPY_T2T_DW_13_rect_x_shift  0
+#define SDMA_PKT_COPY_T2T_DW_13_RECT_X(x) (((x) & SDMA_PKT_COPY_T2T_DW_13_rect_x_mask) << SDMA_PKT_COPY_T2T_DW_13_rect_x_shift)
+
+/*define for rect_y field*/
+#define SDMA_PKT_COPY_T2T_DW_13_rect_y_offset 13
+#define SDMA_PKT_COPY_T2T_DW_13_rect_y_mask   0x00003FFF
+#define SDMA_PKT_COPY_T2T_DW_13_rect_y_shift  16
+#define SDMA_PKT_COPY_T2T_DW_13_RECT_Y(x) (((x) & SDMA_PKT_COPY_T2T_DW_13_rect_y_mask) << SDMA_PKT_COPY_T2T_DW_13_rect_y_shift)
+
+/*define for DW_14 word*/
+/*define for rect_z field*/
+#define SDMA_PKT_COPY_T2T_DW_14_rect_z_offset 14
+#define SDMA_PKT_COPY_T2T_DW_14_rect_z_mask   0x000007FF
+#define SDMA_PKT_COPY_T2T_DW_14_rect_z_shift  0
+#define SDMA_PKT_COPY_T2T_DW_14_RECT_Z(x) (((x) & SDMA_PKT_COPY_T2T_DW_14_rect_z_mask) << SDMA_PKT_COPY_T2T_DW_14_rect_z_shift)
+
+/*define for dst_sw field*/
+#define SDMA_PKT_COPY_T2T_DW_14_dst_sw_offset 14
+#define SDMA_PKT_COPY_T2T_DW_14_dst_sw_mask   0x00000003
+#define SDMA_PKT_COPY_T2T_DW_14_dst_sw_shift  16
+#define SDMA_PKT_COPY_T2T_DW_14_DST_SW(x) (((x) & SDMA_PKT_COPY_T2T_DW_14_dst_sw_mask) << SDMA_PKT_COPY_T2T_DW_14_dst_sw_shift)
+
+/*define for src_sw field*/
+#define SDMA_PKT_COPY_T2T_DW_14_src_sw_offset 14
+#define SDMA_PKT_COPY_T2T_DW_14_src_sw_mask   0x00000003
+#define SDMA_PKT_COPY_T2T_DW_14_src_sw_shift  24
+#define SDMA_PKT_COPY_T2T_DW_14_SRC_SW(x) (((x) & SDMA_PKT_COPY_T2T_DW_14_src_sw_mask) << SDMA_PKT_COPY_T2T_DW_14_src_sw_shift)
+
+
+/*
+** Definitions for SDMA_PKT_COPY_TILED_SUBWIN packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_op_offset 0
+#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_op_shift  0
+#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_OP(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_HEADER_op_mask) << SDMA_PKT_COPY_TILED_SUBWIN_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_sub_op_offset 0
+#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_sub_op_shift  8
+#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_HEADER_sub_op_mask) << SDMA_PKT_COPY_TILED_SUBWIN_HEADER_sub_op_shift)
+
+/*define for detile field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_detile_offset 0
+#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_detile_mask   0x00000001
+#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_detile_shift  31
+#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_DETILE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_HEADER_detile_mask) << SDMA_PKT_COPY_TILED_SUBWIN_HEADER_detile_shift)
+
+/*define for TILED_ADDR_LO word*/
+/*define for tiled_addr_31_0 field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_tiled_addr_31_0_offset 1
+#define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_tiled_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_tiled_addr_31_0_shift  0
+#define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_TILED_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_tiled_addr_31_0_mask) << SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_tiled_addr_31_0_shift)
+
+/*define for TILED_ADDR_HI word*/
+/*define for tiled_addr_63_32 field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_tiled_addr_63_32_offset 2
+#define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_tiled_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_tiled_addr_63_32_shift  0
+#define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_TILED_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_tiled_addr_63_32_mask) << SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_tiled_addr_63_32_shift)
+
+/*define for DW_3 word*/
+/*define for tiled_x field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_x_offset 3
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_x_mask   0x00003FFF
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_x_shift  0
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_TILED_X(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_x_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_x_shift)
+
+/*define for tiled_y field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_y_offset 3
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_y_mask   0x00003FFF
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_y_shift  16
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_TILED_Y(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_y_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_y_shift)
+
+/*define for DW_4 word*/
+/*define for tiled_z field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_tiled_z_offset 4
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_tiled_z_mask   0x000007FF
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_tiled_z_shift  0
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_TILED_Z(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_4_tiled_z_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_4_tiled_z_shift)
+
+/*define for pitch_in_tile field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_pitch_in_tile_offset 4
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_pitch_in_tile_mask   0x00000FFF
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_pitch_in_tile_shift  16
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_PITCH_IN_TILE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_4_pitch_in_tile_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_4_pitch_in_tile_shift)
+
+/*define for DW_5 word*/
+/*define for slice_pitch field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_5_slice_pitch_offset 5
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_5_slice_pitch_mask   0x003FFFFF
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_5_slice_pitch_shift  0
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_5_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_5_slice_pitch_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_5_slice_pitch_shift)
+
+/*define for DW_6 word*/
+/*define for element_size field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_element_size_offset 6
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_element_size_mask   0x00000007
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_element_size_shift  0
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_ELEMENT_SIZE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_element_size_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_element_size_shift)
+
+/*define for array_mode field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_array_mode_offset 6
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_array_mode_mask   0x0000000F
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_array_mode_shift  3
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_ARRAY_MODE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_array_mode_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_array_mode_shift)
+
+/*define for mit_mode field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mit_mode_offset 6
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mit_mode_mask   0x00000007
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mit_mode_shift  8
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_MIT_MODE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mit_mode_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mit_mode_shift)
+
+/*define for tilesplit_size field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_tilesplit_size_offset 6
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_tilesplit_size_mask   0x00000007
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_tilesplit_size_shift  11
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_tilesplit_size_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_tilesplit_size_shift)
+
+/*define for bank_w field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_w_offset 6
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_w_mask   0x00000003
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_w_shift  15
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_BANK_W(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_w_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_w_shift)
+
+/*define for bank_h field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_h_offset 6
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_h_mask   0x00000003
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_h_shift  18
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_BANK_H(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_h_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_h_shift)
+
+/*define for num_bank field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_num_bank_offset 6
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_num_bank_mask   0x00000003
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_num_bank_shift  21
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_NUM_BANK(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_num_bank_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_num_bank_shift)
+
+/*define for mat_aspt field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mat_aspt_offset 6
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mat_aspt_mask   0x00000003
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mat_aspt_shift  24
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_MAT_ASPT(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mat_aspt_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mat_aspt_shift)
+
+/*define for pipe_config field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_pipe_config_offset 6
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_pipe_config_mask   0x0000001F
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_pipe_config_shift  26
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_PIPE_CONFIG(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_pipe_config_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_pipe_config_shift)
+
+/*define for LINEAR_ADDR_LO word*/
+/*define for linear_addr_31_0 field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_linear_addr_31_0_offset 7
+#define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_linear_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_linear_addr_31_0_shift  0
+#define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_LINEAR_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_linear_addr_31_0_mask) << SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_linear_addr_31_0_shift)
+
+/*define for LINEAR_ADDR_HI word*/
+/*define for linear_addr_63_32 field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_linear_addr_63_32_offset 8
+#define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_linear_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_linear_addr_63_32_shift  0
+#define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_LINEAR_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_linear_addr_63_32_mask) << SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_linear_addr_63_32_shift)
+
+/*define for DW_9 word*/
+/*define for linear_x field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_x_offset 9
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_x_mask   0x00003FFF
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_x_shift  0
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_LINEAR_X(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_x_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_x_shift)
+
+/*define for linear_y field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_y_offset 9
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_y_mask   0x00003FFF
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_y_shift  16
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_LINEAR_Y(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_y_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_y_shift)
+
+/*define for DW_10 word*/
+/*define for linear_z field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_z_offset 10
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_z_mask   0x000007FF
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_z_shift  0
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_LINEAR_Z(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_z_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_z_shift)
+
+/*define for linear_pitch field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_pitch_offset 10
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_pitch_mask   0x00003FFF
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_pitch_shift  16
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_LINEAR_PITCH(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_pitch_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_pitch_shift)
+
+/*define for DW_11 word*/
+/*define for linear_slice_pitch field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_11_linear_slice_pitch_offset 11
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_11_linear_slice_pitch_mask   0x0FFFFFFF
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_11_linear_slice_pitch_shift  0
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_11_LINEAR_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_11_linear_slice_pitch_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_11_linear_slice_pitch_shift)
+
+/*define for DW_12 word*/
+/*define for rect_x field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_x_offset 12
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_x_mask   0x00003FFF
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_x_shift  0
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_RECT_X(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_x_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_x_shift)
+
+/*define for rect_y field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_y_offset 12
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_y_mask   0x00003FFF
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_y_shift  16
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_RECT_Y(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_y_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_y_shift)
+
+/*define for DW_13 word*/
+/*define for rect_z field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_rect_z_offset 13
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_rect_z_mask   0x000007FF
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_rect_z_shift  0
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_RECT_Z(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_13_rect_z_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_13_rect_z_shift)
+
+/*define for linear_sw field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_linear_sw_offset 13
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_linear_sw_mask   0x00000003
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_linear_sw_shift  16
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_LINEAR_SW(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_13_linear_sw_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_13_linear_sw_shift)
+
+/*define for tile_sw field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_tile_sw_offset 13
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_tile_sw_mask   0x00000003
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_tile_sw_shift  24
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_TILE_SW(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_13_tile_sw_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_13_tile_sw_shift)
+
+
+/*
+** Definitions for SDMA_PKT_COPY_STRUCT packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_COPY_STRUCT_HEADER_op_offset 0
+#define SDMA_PKT_COPY_STRUCT_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_COPY_STRUCT_HEADER_op_shift  0
+#define SDMA_PKT_COPY_STRUCT_HEADER_OP(x) (((x) & SDMA_PKT_COPY_STRUCT_HEADER_op_mask) << SDMA_PKT_COPY_STRUCT_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_COPY_STRUCT_HEADER_sub_op_offset 0
+#define SDMA_PKT_COPY_STRUCT_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_COPY_STRUCT_HEADER_sub_op_shift  8
+#define SDMA_PKT_COPY_STRUCT_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_STRUCT_HEADER_sub_op_mask) << SDMA_PKT_COPY_STRUCT_HEADER_sub_op_shift)
+
+/*define for detile field*/
+#define SDMA_PKT_COPY_STRUCT_HEADER_detile_offset 0
+#define SDMA_PKT_COPY_STRUCT_HEADER_detile_mask   0x00000001
+#define SDMA_PKT_COPY_STRUCT_HEADER_detile_shift  31
+#define SDMA_PKT_COPY_STRUCT_HEADER_DETILE(x) (((x) & SDMA_PKT_COPY_STRUCT_HEADER_detile_mask) << SDMA_PKT_COPY_STRUCT_HEADER_detile_shift)
+
+/*define for SB_ADDR_LO word*/
+/*define for sb_addr_31_0 field*/
+#define SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_sb_addr_31_0_offset 1
+#define SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_sb_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_sb_addr_31_0_shift  0
+#define SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_SB_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_sb_addr_31_0_mask) << SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_sb_addr_31_0_shift)
+
+/*define for SB_ADDR_HI word*/
+/*define for sb_addr_63_32 field*/
+#define SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_sb_addr_63_32_offset 2
+#define SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_sb_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_sb_addr_63_32_shift  0
+#define SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_SB_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_sb_addr_63_32_mask) << SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_sb_addr_63_32_shift)
+
+/*define for START_INDEX word*/
+/*define for start_index field*/
+#define SDMA_PKT_COPY_STRUCT_START_INDEX_start_index_offset 3
+#define SDMA_PKT_COPY_STRUCT_START_INDEX_start_index_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_STRUCT_START_INDEX_start_index_shift  0
+#define SDMA_PKT_COPY_STRUCT_START_INDEX_START_INDEX(x) (((x) & SDMA_PKT_COPY_STRUCT_START_INDEX_start_index_mask) << SDMA_PKT_COPY_STRUCT_START_INDEX_start_index_shift)
+
+/*define for COUNT word*/
+/*define for count field*/
+#define SDMA_PKT_COPY_STRUCT_COUNT_count_offset 4
+#define SDMA_PKT_COPY_STRUCT_COUNT_count_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_STRUCT_COUNT_count_shift  0
+#define SDMA_PKT_COPY_STRUCT_COUNT_COUNT(x) (((x) & SDMA_PKT_COPY_STRUCT_COUNT_count_mask) << SDMA_PKT_COPY_STRUCT_COUNT_count_shift)
+
+/*define for DW_5 word*/
+/*define for stride field*/
+#define SDMA_PKT_COPY_STRUCT_DW_5_stride_offset 5
+#define SDMA_PKT_COPY_STRUCT_DW_5_stride_mask   0x000007FF
+#define SDMA_PKT_COPY_STRUCT_DW_5_stride_shift  0
+#define SDMA_PKT_COPY_STRUCT_DW_5_STRIDE(x) (((x) & SDMA_PKT_COPY_STRUCT_DW_5_stride_mask) << SDMA_PKT_COPY_STRUCT_DW_5_stride_shift)
+
+/*define for struct_sw field*/
+#define SDMA_PKT_COPY_STRUCT_DW_5_struct_sw_offset 5
+#define SDMA_PKT_COPY_STRUCT_DW_5_struct_sw_mask   0x00000003
+#define SDMA_PKT_COPY_STRUCT_DW_5_struct_sw_shift  16
+#define SDMA_PKT_COPY_STRUCT_DW_5_STRUCT_SW(x) (((x) & SDMA_PKT_COPY_STRUCT_DW_5_struct_sw_mask) << SDMA_PKT_COPY_STRUCT_DW_5_struct_sw_shift)
+
+/*define for struct_ha field*/
+#define SDMA_PKT_COPY_STRUCT_DW_5_struct_ha_offset 5
+#define SDMA_PKT_COPY_STRUCT_DW_5_struct_ha_mask   0x00000001
+#define SDMA_PKT_COPY_STRUCT_DW_5_struct_ha_shift  22
+#define SDMA_PKT_COPY_STRUCT_DW_5_STRUCT_HA(x) (((x) & SDMA_PKT_COPY_STRUCT_DW_5_struct_ha_mask) << SDMA_PKT_COPY_STRUCT_DW_5_struct_ha_shift)
+
+/*define for linear_sw field*/
+#define SDMA_PKT_COPY_STRUCT_DW_5_linear_sw_offset 5
+#define SDMA_PKT_COPY_STRUCT_DW_5_linear_sw_mask   0x00000003
+#define SDMA_PKT_COPY_STRUCT_DW_5_linear_sw_shift  24
+#define SDMA_PKT_COPY_STRUCT_DW_5_LINEAR_SW(x) (((x) & SDMA_PKT_COPY_STRUCT_DW_5_linear_sw_mask) << SDMA_PKT_COPY_STRUCT_DW_5_linear_sw_shift)
+
+/*define for linear_ha field*/
+#define SDMA_PKT_COPY_STRUCT_DW_5_linear_ha_offset 5
+#define SDMA_PKT_COPY_STRUCT_DW_5_linear_ha_mask   0x00000001
+#define SDMA_PKT_COPY_STRUCT_DW_5_linear_ha_shift  30
+#define SDMA_PKT_COPY_STRUCT_DW_5_LINEAR_HA(x) (((x) & SDMA_PKT_COPY_STRUCT_DW_5_linear_ha_mask) << SDMA_PKT_COPY_STRUCT_DW_5_linear_ha_shift)
+
+/*define for LINEAR_ADDR_LO word*/
+/*define for linear_addr_31_0 field*/
+#define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_linear_addr_31_0_offset 6
+#define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_linear_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_linear_addr_31_0_shift  0
+#define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_LINEAR_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_linear_addr_31_0_mask) << SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_linear_addr_31_0_shift)
+
+/*define for LINEAR_ADDR_HI word*/
+/*define for linear_addr_63_32 field*/
+#define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_linear_addr_63_32_offset 7
+#define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_linear_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_linear_addr_63_32_shift  0
+#define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_LINEAR_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_linear_addr_63_32_mask) << SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_linear_addr_63_32_shift)
+
+
+/*
+** Definitions for SDMA_PKT_WRITE_UNTILED packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_WRITE_UNTILED_HEADER_op_offset 0
+#define SDMA_PKT_WRITE_UNTILED_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_WRITE_UNTILED_HEADER_op_shift  0
+#define SDMA_PKT_WRITE_UNTILED_HEADER_OP(x) (((x) & SDMA_PKT_WRITE_UNTILED_HEADER_op_mask) << SDMA_PKT_WRITE_UNTILED_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_WRITE_UNTILED_HEADER_sub_op_offset 0
+#define SDMA_PKT_WRITE_UNTILED_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_WRITE_UNTILED_HEADER_sub_op_shift  8
+#define SDMA_PKT_WRITE_UNTILED_HEADER_SUB_OP(x) (((x) & SDMA_PKT_WRITE_UNTILED_HEADER_sub_op_mask) << SDMA_PKT_WRITE_UNTILED_HEADER_sub_op_shift)
+
+/*define for DST_ADDR_LO word*/
+/*define for dst_addr_31_0 field*/
+#define SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_dst_addr_31_0_offset 1
+#define SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_dst_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_dst_addr_31_0_shift  0
+#define SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_dst_addr_31_0_shift)
+
+/*define for DST_ADDR_HI word*/
+/*define for dst_addr_63_32 field*/
+#define SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_dst_addr_63_32_offset 2
+#define SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_dst_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_dst_addr_63_32_shift  0
+#define SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_dst_addr_63_32_shift)
+
+/*define for DW_3 word*/
+/*define for count field*/
+#define SDMA_PKT_WRITE_UNTILED_DW_3_count_offset 3
+#define SDMA_PKT_WRITE_UNTILED_DW_3_count_mask   0x003FFFFF
+#define SDMA_PKT_WRITE_UNTILED_DW_3_count_shift  0
+#define SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(x) (((x) & SDMA_PKT_WRITE_UNTILED_DW_3_count_mask) << SDMA_PKT_WRITE_UNTILED_DW_3_count_shift)
+
+/*define for sw field*/
+#define SDMA_PKT_WRITE_UNTILED_DW_3_sw_offset 3
+#define SDMA_PKT_WRITE_UNTILED_DW_3_sw_mask   0x00000003
+#define SDMA_PKT_WRITE_UNTILED_DW_3_sw_shift  24
+#define SDMA_PKT_WRITE_UNTILED_DW_3_SW(x) (((x) & SDMA_PKT_WRITE_UNTILED_DW_3_sw_mask) << SDMA_PKT_WRITE_UNTILED_DW_3_sw_shift)
+
+/*define for DATA0 word*/
+/*define for data0 field*/
+#define SDMA_PKT_WRITE_UNTILED_DATA0_data0_offset 4
+#define SDMA_PKT_WRITE_UNTILED_DATA0_data0_mask   0xFFFFFFFF
+#define SDMA_PKT_WRITE_UNTILED_DATA0_data0_shift  0
+#define SDMA_PKT_WRITE_UNTILED_DATA0_DATA0(x) (((x) & SDMA_PKT_WRITE_UNTILED_DATA0_data0_mask) << SDMA_PKT_WRITE_UNTILED_DATA0_data0_shift)
+
+
+/*
+** Definitions for SDMA_PKT_WRITE_TILED packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_WRITE_TILED_HEADER_op_offset 0
+#define SDMA_PKT_WRITE_TILED_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_WRITE_TILED_HEADER_op_shift  0
+#define SDMA_PKT_WRITE_TILED_HEADER_OP(x) (((x) & SDMA_PKT_WRITE_TILED_HEADER_op_mask) << SDMA_PKT_WRITE_TILED_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_WRITE_TILED_HEADER_sub_op_offset 0
+#define SDMA_PKT_WRITE_TILED_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_WRITE_TILED_HEADER_sub_op_shift  8
+#define SDMA_PKT_WRITE_TILED_HEADER_SUB_OP(x) (((x) & SDMA_PKT_WRITE_TILED_HEADER_sub_op_mask) << SDMA_PKT_WRITE_TILED_HEADER_sub_op_shift)
+
+/*define for DST_ADDR_LO word*/
+/*define for dst_addr_31_0 field*/
+#define SDMA_PKT_WRITE_TILED_DST_ADDR_LO_dst_addr_31_0_offset 1
+#define SDMA_PKT_WRITE_TILED_DST_ADDR_LO_dst_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_WRITE_TILED_DST_ADDR_LO_dst_addr_31_0_shift  0
+#define SDMA_PKT_WRITE_TILED_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_WRITE_TILED_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_WRITE_TILED_DST_ADDR_LO_dst_addr_31_0_shift)
+
+/*define for DST_ADDR_HI word*/
+/*define for dst_addr_63_32 field*/
+#define SDMA_PKT_WRITE_TILED_DST_ADDR_HI_dst_addr_63_32_offset 2
+#define SDMA_PKT_WRITE_TILED_DST_ADDR_HI_dst_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_WRITE_TILED_DST_ADDR_HI_dst_addr_63_32_shift  0
+#define SDMA_PKT_WRITE_TILED_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_WRITE_TILED_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_WRITE_TILED_DST_ADDR_HI_dst_addr_63_32_shift)
+
+/*define for DW_3 word*/
+/*define for pitch_in_tile field*/
+#define SDMA_PKT_WRITE_TILED_DW_3_pitch_in_tile_offset 3
+#define SDMA_PKT_WRITE_TILED_DW_3_pitch_in_tile_mask   0x000007FF
+#define SDMA_PKT_WRITE_TILED_DW_3_pitch_in_tile_shift  0
+#define SDMA_PKT_WRITE_TILED_DW_3_PITCH_IN_TILE(x) (((x) & SDMA_PKT_WRITE_TILED_DW_3_pitch_in_tile_mask) << SDMA_PKT_WRITE_TILED_DW_3_pitch_in_tile_shift)
+
+/*define for height field*/
+#define SDMA_PKT_WRITE_TILED_DW_3_height_offset 3
+#define SDMA_PKT_WRITE_TILED_DW_3_height_mask   0x00003FFF
+#define SDMA_PKT_WRITE_TILED_DW_3_height_shift  16
+#define SDMA_PKT_WRITE_TILED_DW_3_HEIGHT(x) (((x) & SDMA_PKT_WRITE_TILED_DW_3_height_mask) << SDMA_PKT_WRITE_TILED_DW_3_height_shift)
+
+/*define for DW_4 word*/
+/*define for slice_pitch field*/
+#define SDMA_PKT_WRITE_TILED_DW_4_slice_pitch_offset 4
+#define SDMA_PKT_WRITE_TILED_DW_4_slice_pitch_mask   0x003FFFFF
+#define SDMA_PKT_WRITE_TILED_DW_4_slice_pitch_shift  0
+#define SDMA_PKT_WRITE_TILED_DW_4_SLICE_PITCH(x) (((x) & SDMA_PKT_WRITE_TILED_DW_4_slice_pitch_mask) << SDMA_PKT_WRITE_TILED_DW_4_slice_pitch_shift)
+
+/*define for DW_5 word*/
+/*define for element_size field*/
+#define SDMA_PKT_WRITE_TILED_DW_5_element_size_offset 5
+#define SDMA_PKT_WRITE_TILED_DW_5_element_size_mask   0x00000007
+#define SDMA_PKT_WRITE_TILED_DW_5_element_size_shift  0
+#define SDMA_PKT_WRITE_TILED_DW_5_ELEMENT_SIZE(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_element_size_mask) << SDMA_PKT_WRITE_TILED_DW_5_element_size_shift)
+
+/*define for array_mode field*/
+#define SDMA_PKT_WRITE_TILED_DW_5_array_mode_offset 5
+#define SDMA_PKT_WRITE_TILED_DW_5_array_mode_mask   0x0000000F
+#define SDMA_PKT_WRITE_TILED_DW_5_array_mode_shift  3
+#define SDMA_PKT_WRITE_TILED_DW_5_ARRAY_MODE(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_array_mode_mask) << SDMA_PKT_WRITE_TILED_DW_5_array_mode_shift)
+
+/*define for mit_mode field*/
+#define SDMA_PKT_WRITE_TILED_DW_5_mit_mode_offset 5
+#define SDMA_PKT_WRITE_TILED_DW_5_mit_mode_mask   0x00000007
+#define SDMA_PKT_WRITE_TILED_DW_5_mit_mode_shift  8
+#define SDMA_PKT_WRITE_TILED_DW_5_MIT_MODE(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_mit_mode_mask) << SDMA_PKT_WRITE_TILED_DW_5_mit_mode_shift)
+
+/*define for tilesplit_size field*/
+#define SDMA_PKT_WRITE_TILED_DW_5_tilesplit_size_offset 5
+#define SDMA_PKT_WRITE_TILED_DW_5_tilesplit_size_mask   0x00000007
+#define SDMA_PKT_WRITE_TILED_DW_5_tilesplit_size_shift  11
+#define SDMA_PKT_WRITE_TILED_DW_5_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_tilesplit_size_mask) << SDMA_PKT_WRITE_TILED_DW_5_tilesplit_size_shift)
+
+/*define for bank_w field*/
+#define SDMA_PKT_WRITE_TILED_DW_5_bank_w_offset 5
+#define SDMA_PKT_WRITE_TILED_DW_5_bank_w_mask   0x00000003
+#define SDMA_PKT_WRITE_TILED_DW_5_bank_w_shift  15
+#define SDMA_PKT_WRITE_TILED_DW_5_BANK_W(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_bank_w_mask) << SDMA_PKT_WRITE_TILED_DW_5_bank_w_shift)
+
+/*define for bank_h field*/
+#define SDMA_PKT_WRITE_TILED_DW_5_bank_h_offset 5
+#define SDMA_PKT_WRITE_TILED_DW_5_bank_h_mask   0x00000003
+#define SDMA_PKT_WRITE_TILED_DW_5_bank_h_shift  18
+#define SDMA_PKT_WRITE_TILED_DW_5_BANK_H(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_bank_h_mask) << SDMA_PKT_WRITE_TILED_DW_5_bank_h_shift)
+
+/*define for num_bank field*/
+#define SDMA_PKT_WRITE_TILED_DW_5_num_bank_offset 5
+#define SDMA_PKT_WRITE_TILED_DW_5_num_bank_mask   0x00000003
+#define SDMA_PKT_WRITE_TILED_DW_5_num_bank_shift  21
+#define SDMA_PKT_WRITE_TILED_DW_5_NUM_BANK(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_num_bank_mask) << SDMA_PKT_WRITE_TILED_DW_5_num_bank_shift)
+
+/*define for mat_aspt field*/
+#define SDMA_PKT_WRITE_TILED_DW_5_mat_aspt_offset 5
+#define SDMA_PKT_WRITE_TILED_DW_5_mat_aspt_mask   0x00000003
+#define SDMA_PKT_WRITE_TILED_DW_5_mat_aspt_shift  24
+#define SDMA_PKT_WRITE_TILED_DW_5_MAT_ASPT(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_mat_aspt_mask) << SDMA_PKT_WRITE_TILED_DW_5_mat_aspt_shift)
+
+/*define for pipe_config field*/
+#define SDMA_PKT_WRITE_TILED_DW_5_pipe_config_offset 5
+#define SDMA_PKT_WRITE_TILED_DW_5_pipe_config_mask   0x0000001F
+#define SDMA_PKT_WRITE_TILED_DW_5_pipe_config_shift  26
+#define SDMA_PKT_WRITE_TILED_DW_5_PIPE_CONFIG(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_pipe_config_mask) << SDMA_PKT_WRITE_TILED_DW_5_pipe_config_shift)
+
+/*define for DW_6 word*/
+/*define for x field*/
+#define SDMA_PKT_WRITE_TILED_DW_6_x_offset 6
+#define SDMA_PKT_WRITE_TILED_DW_6_x_mask   0x00003FFF
+#define SDMA_PKT_WRITE_TILED_DW_6_x_shift  0
+#define SDMA_PKT_WRITE_TILED_DW_6_X(x) (((x) & SDMA_PKT_WRITE_TILED_DW_6_x_mask) << SDMA_PKT_WRITE_TILED_DW_6_x_shift)
+
+/*define for y field*/
+#define SDMA_PKT_WRITE_TILED_DW_6_y_offset 6
+#define SDMA_PKT_WRITE_TILED_DW_6_y_mask   0x00003FFF
+#define SDMA_PKT_WRITE_TILED_DW_6_y_shift  16
+#define SDMA_PKT_WRITE_TILED_DW_6_Y(x) (((x) & SDMA_PKT_WRITE_TILED_DW_6_y_mask) << SDMA_PKT_WRITE_TILED_DW_6_y_shift)
+
+/*define for DW_7 word*/
+/*define for z field*/
+#define SDMA_PKT_WRITE_TILED_DW_7_z_offset 7
+#define SDMA_PKT_WRITE_TILED_DW_7_z_mask   0x00000FFF
+#define SDMA_PKT_WRITE_TILED_DW_7_z_shift  0
+#define SDMA_PKT_WRITE_TILED_DW_7_Z(x) (((x) & SDMA_PKT_WRITE_TILED_DW_7_z_mask) << SDMA_PKT_WRITE_TILED_DW_7_z_shift)
+
+/*define for sw field*/
+#define SDMA_PKT_WRITE_TILED_DW_7_sw_offset 7
+#define SDMA_PKT_WRITE_TILED_DW_7_sw_mask   0x00000003
+#define SDMA_PKT_WRITE_TILED_DW_7_sw_shift  24
+#define SDMA_PKT_WRITE_TILED_DW_7_SW(x) (((x) & SDMA_PKT_WRITE_TILED_DW_7_sw_mask) << SDMA_PKT_WRITE_TILED_DW_7_sw_shift)
+
+/*define for COUNT word*/
+/*define for count field*/
+#define SDMA_PKT_WRITE_TILED_COUNT_count_offset 8
+#define SDMA_PKT_WRITE_TILED_COUNT_count_mask   0x003FFFFF
+#define SDMA_PKT_WRITE_TILED_COUNT_count_shift  0
+#define SDMA_PKT_WRITE_TILED_COUNT_COUNT(x) (((x) & SDMA_PKT_WRITE_TILED_COUNT_count_mask) << SDMA_PKT_WRITE_TILED_COUNT_count_shift)
+
+/*define for DATA0 word*/
+/*define for data0 field*/
+#define SDMA_PKT_WRITE_TILED_DATA0_data0_offset 9
+#define SDMA_PKT_WRITE_TILED_DATA0_data0_mask   0xFFFFFFFF
+#define SDMA_PKT_WRITE_TILED_DATA0_data0_shift  0
+#define SDMA_PKT_WRITE_TILED_DATA0_DATA0(x) (((x) & SDMA_PKT_WRITE_TILED_DATA0_data0_mask) << SDMA_PKT_WRITE_TILED_DATA0_data0_shift)
+
+
+/*
+** Definitions for SDMA_PKT_WRITE_INCR packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_WRITE_INCR_HEADER_op_offset 0
+#define SDMA_PKT_WRITE_INCR_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_WRITE_INCR_HEADER_op_shift  0
+#define SDMA_PKT_WRITE_INCR_HEADER_OP(x) (((x) & SDMA_PKT_WRITE_INCR_HEADER_op_mask) << SDMA_PKT_WRITE_INCR_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_WRITE_INCR_HEADER_sub_op_offset 0
+#define SDMA_PKT_WRITE_INCR_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_WRITE_INCR_HEADER_sub_op_shift  8
+#define SDMA_PKT_WRITE_INCR_HEADER_SUB_OP(x) (((x) & SDMA_PKT_WRITE_INCR_HEADER_sub_op_mask) << SDMA_PKT_WRITE_INCR_HEADER_sub_op_shift)
+
+/*define for DST_ADDR_LO word*/
+/*define for dst_addr_31_0 field*/
+#define SDMA_PKT_WRITE_INCR_DST_ADDR_LO_dst_addr_31_0_offset 1
+#define SDMA_PKT_WRITE_INCR_DST_ADDR_LO_dst_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_WRITE_INCR_DST_ADDR_LO_dst_addr_31_0_shift  0
+#define SDMA_PKT_WRITE_INCR_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_WRITE_INCR_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_WRITE_INCR_DST_ADDR_LO_dst_addr_31_0_shift)
+
+/*define for DST_ADDR_HI word*/
+/*define for dst_addr_63_32 field*/
+#define SDMA_PKT_WRITE_INCR_DST_ADDR_HI_dst_addr_63_32_offset 2
+#define SDMA_PKT_WRITE_INCR_DST_ADDR_HI_dst_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_WRITE_INCR_DST_ADDR_HI_dst_addr_63_32_shift  0
+#define SDMA_PKT_WRITE_INCR_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_WRITE_INCR_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_WRITE_INCR_DST_ADDR_HI_dst_addr_63_32_shift)
+
+/*define for MASK_DW0 word*/
+/*define for mask_dw0 field*/
+#define SDMA_PKT_WRITE_INCR_MASK_DW0_mask_dw0_offset 3
+#define SDMA_PKT_WRITE_INCR_MASK_DW0_mask_dw0_mask   0xFFFFFFFF
+#define SDMA_PKT_WRITE_INCR_MASK_DW0_mask_dw0_shift  0
+#define SDMA_PKT_WRITE_INCR_MASK_DW0_MASK_DW0(x) (((x) & SDMA_PKT_WRITE_INCR_MASK_DW0_mask_dw0_mask) << SDMA_PKT_WRITE_INCR_MASK_DW0_mask_dw0_shift)
+
+/*define for MASK_DW1 word*/
+/*define for mask_dw1 field*/
+#define SDMA_PKT_WRITE_INCR_MASK_DW1_mask_dw1_offset 4
+#define SDMA_PKT_WRITE_INCR_MASK_DW1_mask_dw1_mask   0xFFFFFFFF
+#define SDMA_PKT_WRITE_INCR_MASK_DW1_mask_dw1_shift  0
+#define SDMA_PKT_WRITE_INCR_MASK_DW1_MASK_DW1(x) (((x) & SDMA_PKT_WRITE_INCR_MASK_DW1_mask_dw1_mask) << SDMA_PKT_WRITE_INCR_MASK_DW1_mask_dw1_shift)
+
+/*define for INIT_DW0 word*/
+/*define for init_dw0 field*/
+#define SDMA_PKT_WRITE_INCR_INIT_DW0_init_dw0_offset 5
+#define SDMA_PKT_WRITE_INCR_INIT_DW0_init_dw0_mask   0xFFFFFFFF
+#define SDMA_PKT_WRITE_INCR_INIT_DW0_init_dw0_shift  0
+#define SDMA_PKT_WRITE_INCR_INIT_DW0_INIT_DW0(x) (((x) & SDMA_PKT_WRITE_INCR_INIT_DW0_init_dw0_mask) << SDMA_PKT_WRITE_INCR_INIT_DW0_init_dw0_shift)
+
+/*define for INIT_DW1 word*/
+/*define for init_dw1 field*/
+#define SDMA_PKT_WRITE_INCR_INIT_DW1_init_dw1_offset 6
+#define SDMA_PKT_WRITE_INCR_INIT_DW1_init_dw1_mask   0xFFFFFFFF
+#define SDMA_PKT_WRITE_INCR_INIT_DW1_init_dw1_shift  0
+#define SDMA_PKT_WRITE_INCR_INIT_DW1_INIT_DW1(x) (((x) & SDMA_PKT_WRITE_INCR_INIT_DW1_init_dw1_mask) << SDMA_PKT_WRITE_INCR_INIT_DW1_init_dw1_shift)
+
+/*define for INCR_DW0 word*/
+/*define for incr_dw0 field*/
+#define SDMA_PKT_WRITE_INCR_INCR_DW0_incr_dw0_offset 7
+#define SDMA_PKT_WRITE_INCR_INCR_DW0_incr_dw0_mask   0xFFFFFFFF
+#define SDMA_PKT_WRITE_INCR_INCR_DW0_incr_dw0_shift  0
+#define SDMA_PKT_WRITE_INCR_INCR_DW0_INCR_DW0(x) (((x) & SDMA_PKT_WRITE_INCR_INCR_DW0_incr_dw0_mask) << SDMA_PKT_WRITE_INCR_INCR_DW0_incr_dw0_shift)
+
+/*define for INCR_DW1 word*/
+/*define for incr_dw1 field*/
+#define SDMA_PKT_WRITE_INCR_INCR_DW1_incr_dw1_offset 8
+#define SDMA_PKT_WRITE_INCR_INCR_DW1_incr_dw1_mask   0xFFFFFFFF
+#define SDMA_PKT_WRITE_INCR_INCR_DW1_incr_dw1_shift  0
+#define SDMA_PKT_WRITE_INCR_INCR_DW1_INCR_DW1(x) (((x) & SDMA_PKT_WRITE_INCR_INCR_DW1_incr_dw1_mask) << SDMA_PKT_WRITE_INCR_INCR_DW1_incr_dw1_shift)
+
+/*define for COUNT word*/
+/*define for count field*/
+#define SDMA_PKT_WRITE_INCR_COUNT_count_offset 9
+#define SDMA_PKT_WRITE_INCR_COUNT_count_mask   0x0007FFFF
+#define SDMA_PKT_WRITE_INCR_COUNT_count_shift  0
+#define SDMA_PKT_WRITE_INCR_COUNT_COUNT(x) (((x) & SDMA_PKT_WRITE_INCR_COUNT_count_mask) << SDMA_PKT_WRITE_INCR_COUNT_count_shift)
+
+
+/*
+** Definitions for SDMA_PKT_INDIRECT packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_INDIRECT_HEADER_op_offset 0
+#define SDMA_PKT_INDIRECT_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_INDIRECT_HEADER_op_shift  0
+#define SDMA_PKT_INDIRECT_HEADER_OP(x) (((x) & SDMA_PKT_INDIRECT_HEADER_op_mask) << SDMA_PKT_INDIRECT_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_INDIRECT_HEADER_sub_op_offset 0
+#define SDMA_PKT_INDIRECT_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_INDIRECT_HEADER_sub_op_shift  8
+#define SDMA_PKT_INDIRECT_HEADER_SUB_OP(x) (((x) & SDMA_PKT_INDIRECT_HEADER_sub_op_mask) << SDMA_PKT_INDIRECT_HEADER_sub_op_shift)
+
+/*define for vmid field*/
+#define SDMA_PKT_INDIRECT_HEADER_vmid_offset 0
+#define SDMA_PKT_INDIRECT_HEADER_vmid_mask   0x0000000F
+#define SDMA_PKT_INDIRECT_HEADER_vmid_shift  16
+#define SDMA_PKT_INDIRECT_HEADER_VMID(x) (((x) & SDMA_PKT_INDIRECT_HEADER_vmid_mask) << SDMA_PKT_INDIRECT_HEADER_vmid_shift)
+
+/*define for BASE_LO word*/
+/*define for ib_base_31_0 field*/
+#define SDMA_PKT_INDIRECT_BASE_LO_ib_base_31_0_offset 1
+#define SDMA_PKT_INDIRECT_BASE_LO_ib_base_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_INDIRECT_BASE_LO_ib_base_31_0_shift  0
+#define SDMA_PKT_INDIRECT_BASE_LO_IB_BASE_31_0(x) (((x) & SDMA_PKT_INDIRECT_BASE_LO_ib_base_31_0_mask) << SDMA_PKT_INDIRECT_BASE_LO_ib_base_31_0_shift)
+
+/*define for BASE_HI word*/
+/*define for ib_base_63_32 field*/
+#define SDMA_PKT_INDIRECT_BASE_HI_ib_base_63_32_offset 2
+#define SDMA_PKT_INDIRECT_BASE_HI_ib_base_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_INDIRECT_BASE_HI_ib_base_63_32_shift  0
+#define SDMA_PKT_INDIRECT_BASE_HI_IB_BASE_63_32(x) (((x) & SDMA_PKT_INDIRECT_BASE_HI_ib_base_63_32_mask) << SDMA_PKT_INDIRECT_BASE_HI_ib_base_63_32_shift)
+
+/*define for IB_SIZE word*/
+/*define for ib_size field*/
+#define SDMA_PKT_INDIRECT_IB_SIZE_ib_size_offset 3
+#define SDMA_PKT_INDIRECT_IB_SIZE_ib_size_mask   0x000FFFFF
+#define SDMA_PKT_INDIRECT_IB_SIZE_ib_size_shift  0
+#define SDMA_PKT_INDIRECT_IB_SIZE_IB_SIZE(x) (((x) & SDMA_PKT_INDIRECT_IB_SIZE_ib_size_mask) << SDMA_PKT_INDIRECT_IB_SIZE_ib_size_shift)
+
+/*define for CSA_ADDR_LO word*/
+/*define for csa_addr_31_0 field*/
+#define SDMA_PKT_INDIRECT_CSA_ADDR_LO_csa_addr_31_0_offset 4
+#define SDMA_PKT_INDIRECT_CSA_ADDR_LO_csa_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_INDIRECT_CSA_ADDR_LO_csa_addr_31_0_shift  0
+#define SDMA_PKT_INDIRECT_CSA_ADDR_LO_CSA_ADDR_31_0(x) (((x) & SDMA_PKT_INDIRECT_CSA_ADDR_LO_csa_addr_31_0_mask) << SDMA_PKT_INDIRECT_CSA_ADDR_LO_csa_addr_31_0_shift)
+
+/*define for CSA_ADDR_HI word*/
+/*define for csa_addr_63_32 field*/
+#define SDMA_PKT_INDIRECT_CSA_ADDR_HI_csa_addr_63_32_offset 5
+#define SDMA_PKT_INDIRECT_CSA_ADDR_HI_csa_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_INDIRECT_CSA_ADDR_HI_csa_addr_63_32_shift  0
+#define SDMA_PKT_INDIRECT_CSA_ADDR_HI_CSA_ADDR_63_32(x) (((x) & SDMA_PKT_INDIRECT_CSA_ADDR_HI_csa_addr_63_32_mask) << SDMA_PKT_INDIRECT_CSA_ADDR_HI_csa_addr_63_32_shift)
+
+
+/*
+** Definitions for SDMA_PKT_SEMAPHORE packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_SEMAPHORE_HEADER_op_offset 0
+#define SDMA_PKT_SEMAPHORE_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_SEMAPHORE_HEADER_op_shift  0
+#define SDMA_PKT_SEMAPHORE_HEADER_OP(x) (((x) & SDMA_PKT_SEMAPHORE_HEADER_op_mask) << SDMA_PKT_SEMAPHORE_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_SEMAPHORE_HEADER_sub_op_offset 0
+#define SDMA_PKT_SEMAPHORE_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_SEMAPHORE_HEADER_sub_op_shift  8
+#define SDMA_PKT_SEMAPHORE_HEADER_SUB_OP(x) (((x) & SDMA_PKT_SEMAPHORE_HEADER_sub_op_mask) << SDMA_PKT_SEMAPHORE_HEADER_sub_op_shift)
+
+/*define for write_one field*/
+#define SDMA_PKT_SEMAPHORE_HEADER_write_one_offset 0
+#define SDMA_PKT_SEMAPHORE_HEADER_write_one_mask   0x00000001
+#define SDMA_PKT_SEMAPHORE_HEADER_write_one_shift  29
+#define SDMA_PKT_SEMAPHORE_HEADER_WRITE_ONE(x) (((x) & SDMA_PKT_SEMAPHORE_HEADER_write_one_mask) << SDMA_PKT_SEMAPHORE_HEADER_write_one_shift)
+
+/*define for signal field*/
+#define SDMA_PKT_SEMAPHORE_HEADER_signal_offset 0
+#define SDMA_PKT_SEMAPHORE_HEADER_signal_mask   0x00000001
+#define SDMA_PKT_SEMAPHORE_HEADER_signal_shift  30
+#define SDMA_PKT_SEMAPHORE_HEADER_SIGNAL(x) (((x) & SDMA_PKT_SEMAPHORE_HEADER_signal_mask) << SDMA_PKT_SEMAPHORE_HEADER_signal_shift)
+
+/*define for mailbox field*/
+#define SDMA_PKT_SEMAPHORE_HEADER_mailbox_offset 0
+#define SDMA_PKT_SEMAPHORE_HEADER_mailbox_mask   0x00000001
+#define SDMA_PKT_SEMAPHORE_HEADER_mailbox_shift  31
+#define SDMA_PKT_SEMAPHORE_HEADER_MAILBOX(x) (((x) & SDMA_PKT_SEMAPHORE_HEADER_mailbox_mask) << SDMA_PKT_SEMAPHORE_HEADER_mailbox_shift)
+
+/*define for ADDR_LO word*/
+/*define for addr_31_0 field*/
+#define SDMA_PKT_SEMAPHORE_ADDR_LO_addr_31_0_offset 1
+#define SDMA_PKT_SEMAPHORE_ADDR_LO_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_SEMAPHORE_ADDR_LO_addr_31_0_shift  0
+#define SDMA_PKT_SEMAPHORE_ADDR_LO_ADDR_31_0(x) (((x) & SDMA_PKT_SEMAPHORE_ADDR_LO_addr_31_0_mask) << SDMA_PKT_SEMAPHORE_ADDR_LO_addr_31_0_shift)
+
+/*define for ADDR_HI word*/
+/*define for addr_63_32 field*/
+#define SDMA_PKT_SEMAPHORE_ADDR_HI_addr_63_32_offset 2
+#define SDMA_PKT_SEMAPHORE_ADDR_HI_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_SEMAPHORE_ADDR_HI_addr_63_32_shift  0
+#define SDMA_PKT_SEMAPHORE_ADDR_HI_ADDR_63_32(x) (((x) & SDMA_PKT_SEMAPHORE_ADDR_HI_addr_63_32_mask) << SDMA_PKT_SEMAPHORE_ADDR_HI_addr_63_32_shift)
+
+
+/*
+** Definitions for SDMA_PKT_FENCE packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_FENCE_HEADER_op_offset 0
+#define SDMA_PKT_FENCE_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_FENCE_HEADER_op_shift  0
+#define SDMA_PKT_FENCE_HEADER_OP(x) (((x) & SDMA_PKT_FENCE_HEADER_op_mask) << SDMA_PKT_FENCE_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_FENCE_HEADER_sub_op_offset 0
+#define SDMA_PKT_FENCE_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_FENCE_HEADER_sub_op_shift  8
+#define SDMA_PKT_FENCE_HEADER_SUB_OP(x) (((x) & SDMA_PKT_FENCE_HEADER_sub_op_mask) << SDMA_PKT_FENCE_HEADER_sub_op_shift)
+
+/*define for ADDR_LO word*/
+/*define for addr_31_0 field*/
+#define SDMA_PKT_FENCE_ADDR_LO_addr_31_0_offset 1
+#define SDMA_PKT_FENCE_ADDR_LO_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_FENCE_ADDR_LO_addr_31_0_shift  0
+#define SDMA_PKT_FENCE_ADDR_LO_ADDR_31_0(x) (((x) & SDMA_PKT_FENCE_ADDR_LO_addr_31_0_mask) << SDMA_PKT_FENCE_ADDR_LO_addr_31_0_shift)
+
+/*define for ADDR_HI word*/
+/*define for addr_63_32 field*/
+#define SDMA_PKT_FENCE_ADDR_HI_addr_63_32_offset 2
+#define SDMA_PKT_FENCE_ADDR_HI_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_FENCE_ADDR_HI_addr_63_32_shift  0
+#define SDMA_PKT_FENCE_ADDR_HI_ADDR_63_32(x) (((x) & SDMA_PKT_FENCE_ADDR_HI_addr_63_32_mask) << SDMA_PKT_FENCE_ADDR_HI_addr_63_32_shift)
+
+/*define for DATA word*/
+/*define for data field*/
+#define SDMA_PKT_FENCE_DATA_data_offset 3
+#define SDMA_PKT_FENCE_DATA_data_mask   0xFFFFFFFF
+#define SDMA_PKT_FENCE_DATA_data_shift  0
+#define SDMA_PKT_FENCE_DATA_DATA(x) (((x) & SDMA_PKT_FENCE_DATA_data_mask) << SDMA_PKT_FENCE_DATA_data_shift)
+
+
+/*
+** Definitions for SDMA_PKT_SRBM_WRITE packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_SRBM_WRITE_HEADER_op_offset 0
+#define SDMA_PKT_SRBM_WRITE_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_SRBM_WRITE_HEADER_op_shift  0
+#define SDMA_PKT_SRBM_WRITE_HEADER_OP(x) (((x) & SDMA_PKT_SRBM_WRITE_HEADER_op_mask) << SDMA_PKT_SRBM_WRITE_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_SRBM_WRITE_HEADER_sub_op_offset 0
+#define SDMA_PKT_SRBM_WRITE_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_SRBM_WRITE_HEADER_sub_op_shift  8
+#define SDMA_PKT_SRBM_WRITE_HEADER_SUB_OP(x) (((x) & SDMA_PKT_SRBM_WRITE_HEADER_sub_op_mask) << SDMA_PKT_SRBM_WRITE_HEADER_sub_op_shift)
+
+/*define for byte_en field*/
+#define SDMA_PKT_SRBM_WRITE_HEADER_byte_en_offset 0
+#define SDMA_PKT_SRBM_WRITE_HEADER_byte_en_mask   0x0000000F
+#define SDMA_PKT_SRBM_WRITE_HEADER_byte_en_shift  28
+#define SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(x) (((x) & SDMA_PKT_SRBM_WRITE_HEADER_byte_en_mask) << SDMA_PKT_SRBM_WRITE_HEADER_byte_en_shift)
+
+/*define for ADDR word*/
+/*define for addr field*/
+#define SDMA_PKT_SRBM_WRITE_ADDR_addr_offset 1
+#define SDMA_PKT_SRBM_WRITE_ADDR_addr_mask   0x0000FFFF
+#define SDMA_PKT_SRBM_WRITE_ADDR_addr_shift  0
+#define SDMA_PKT_SRBM_WRITE_ADDR_ADDR(x) (((x) & SDMA_PKT_SRBM_WRITE_ADDR_addr_mask) << SDMA_PKT_SRBM_WRITE_ADDR_addr_shift)
+
+/*define for DATA word*/
+/*define for data field*/
+#define SDMA_PKT_SRBM_WRITE_DATA_data_offset 2
+#define SDMA_PKT_SRBM_WRITE_DATA_data_mask   0xFFFFFFFF
+#define SDMA_PKT_SRBM_WRITE_DATA_data_shift  0
+#define SDMA_PKT_SRBM_WRITE_DATA_DATA(x) (((x) & SDMA_PKT_SRBM_WRITE_DATA_data_mask) << SDMA_PKT_SRBM_WRITE_DATA_data_shift)
+
+
+/*
+** Definitions for SDMA_PKT_PRE_EXE packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_PRE_EXE_HEADER_op_offset 0
+#define SDMA_PKT_PRE_EXE_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_PRE_EXE_HEADER_op_shift  0
+#define SDMA_PKT_PRE_EXE_HEADER_OP(x) (((x) & SDMA_PKT_PRE_EXE_HEADER_op_mask) << SDMA_PKT_PRE_EXE_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_PRE_EXE_HEADER_sub_op_offset 0
+#define SDMA_PKT_PRE_EXE_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_PRE_EXE_HEADER_sub_op_shift  8
+#define SDMA_PKT_PRE_EXE_HEADER_SUB_OP(x) (((x) & SDMA_PKT_PRE_EXE_HEADER_sub_op_mask) << SDMA_PKT_PRE_EXE_HEADER_sub_op_shift)
+
+/*define for dev_sel field*/
+#define SDMA_PKT_PRE_EXE_HEADER_dev_sel_offset 0
+#define SDMA_PKT_PRE_EXE_HEADER_dev_sel_mask   0x000000FF
+#define SDMA_PKT_PRE_EXE_HEADER_dev_sel_shift  16
+#define SDMA_PKT_PRE_EXE_HEADER_DEV_SEL(x) (((x) & SDMA_PKT_PRE_EXE_HEADER_dev_sel_mask) << SDMA_PKT_PRE_EXE_HEADER_dev_sel_shift)
+
+/*define for EXEC_COUNT word*/
+/*define for exec_count field*/
+#define SDMA_PKT_PRE_EXE_EXEC_COUNT_exec_count_offset 1
+#define SDMA_PKT_PRE_EXE_EXEC_COUNT_exec_count_mask   0x00003FFF
+#define SDMA_PKT_PRE_EXE_EXEC_COUNT_exec_count_shift  0
+#define SDMA_PKT_PRE_EXE_EXEC_COUNT_EXEC_COUNT(x) (((x) & SDMA_PKT_PRE_EXE_EXEC_COUNT_exec_count_mask) << SDMA_PKT_PRE_EXE_EXEC_COUNT_exec_count_shift)
+
+
+/*
+** Definitions for SDMA_PKT_COND_EXE packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_COND_EXE_HEADER_op_offset 0
+#define SDMA_PKT_COND_EXE_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_COND_EXE_HEADER_op_shift  0
+#define SDMA_PKT_COND_EXE_HEADER_OP(x) (((x) & SDMA_PKT_COND_EXE_HEADER_op_mask) << SDMA_PKT_COND_EXE_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_COND_EXE_HEADER_sub_op_offset 0
+#define SDMA_PKT_COND_EXE_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_COND_EXE_HEADER_sub_op_shift  8
+#define SDMA_PKT_COND_EXE_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COND_EXE_HEADER_sub_op_mask) << SDMA_PKT_COND_EXE_HEADER_sub_op_shift)
+
+/*define for ADDR_LO word*/
+/*define for addr_31_0 field*/
+#define SDMA_PKT_COND_EXE_ADDR_LO_addr_31_0_offset 1
+#define SDMA_PKT_COND_EXE_ADDR_LO_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COND_EXE_ADDR_LO_addr_31_0_shift  0
+#define SDMA_PKT_COND_EXE_ADDR_LO_ADDR_31_0(x) (((x) & SDMA_PKT_COND_EXE_ADDR_LO_addr_31_0_mask) << SDMA_PKT_COND_EXE_ADDR_LO_addr_31_0_shift)
+
+/*define for ADDR_HI word*/
+/*define for addr_63_32 field*/
+#define SDMA_PKT_COND_EXE_ADDR_HI_addr_63_32_offset 2
+#define SDMA_PKT_COND_EXE_ADDR_HI_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COND_EXE_ADDR_HI_addr_63_32_shift  0
+#define SDMA_PKT_COND_EXE_ADDR_HI_ADDR_63_32(x) (((x) & SDMA_PKT_COND_EXE_ADDR_HI_addr_63_32_mask) << SDMA_PKT_COND_EXE_ADDR_HI_addr_63_32_shift)
+
+/*define for REFERENCE word*/
+/*define for reference field*/
+#define SDMA_PKT_COND_EXE_REFERENCE_reference_offset 3
+#define SDMA_PKT_COND_EXE_REFERENCE_reference_mask   0xFFFFFFFF
+#define SDMA_PKT_COND_EXE_REFERENCE_reference_shift  0
+#define SDMA_PKT_COND_EXE_REFERENCE_REFERENCE(x) (((x) & SDMA_PKT_COND_EXE_REFERENCE_reference_mask) << SDMA_PKT_COND_EXE_REFERENCE_reference_shift)
+
+/*define for EXEC_COUNT word*/
+/*define for exec_count field*/
+#define SDMA_PKT_COND_EXE_EXEC_COUNT_exec_count_offset 4
+#define SDMA_PKT_COND_EXE_EXEC_COUNT_exec_count_mask   0x00003FFF
+#define SDMA_PKT_COND_EXE_EXEC_COUNT_exec_count_shift  0
+#define SDMA_PKT_COND_EXE_EXEC_COUNT_EXEC_COUNT(x) (((x) & SDMA_PKT_COND_EXE_EXEC_COUNT_exec_count_mask) << SDMA_PKT_COND_EXE_EXEC_COUNT_exec_count_shift)
+
+
+/*
+** Definitions for SDMA_PKT_CONSTANT_FILL packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_CONSTANT_FILL_HEADER_op_offset 0
+#define SDMA_PKT_CONSTANT_FILL_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_CONSTANT_FILL_HEADER_op_shift  0
+#define SDMA_PKT_CONSTANT_FILL_HEADER_OP(x) (((x) & SDMA_PKT_CONSTANT_FILL_HEADER_op_mask) << SDMA_PKT_CONSTANT_FILL_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_CONSTANT_FILL_HEADER_sub_op_offset 0
+#define SDMA_PKT_CONSTANT_FILL_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_CONSTANT_FILL_HEADER_sub_op_shift  8
+#define SDMA_PKT_CONSTANT_FILL_HEADER_SUB_OP(x) (((x) & SDMA_PKT_CONSTANT_FILL_HEADER_sub_op_mask) << SDMA_PKT_CONSTANT_FILL_HEADER_sub_op_shift)
+
+/*define for sw field*/
+#define SDMA_PKT_CONSTANT_FILL_HEADER_sw_offset 0
+#define SDMA_PKT_CONSTANT_FILL_HEADER_sw_mask   0x00000003
+#define SDMA_PKT_CONSTANT_FILL_HEADER_sw_shift  16
+#define SDMA_PKT_CONSTANT_FILL_HEADER_SW(x) (((x) & SDMA_PKT_CONSTANT_FILL_HEADER_sw_mask) << SDMA_PKT_CONSTANT_FILL_HEADER_sw_shift)
+
+/*define for fillsize field*/
+#define SDMA_PKT_CONSTANT_FILL_HEADER_fillsize_offset 0
+#define SDMA_PKT_CONSTANT_FILL_HEADER_fillsize_mask   0x00000003
+#define SDMA_PKT_CONSTANT_FILL_HEADER_fillsize_shift  30
+#define SDMA_PKT_CONSTANT_FILL_HEADER_FILLSIZE(x) (((x) & SDMA_PKT_CONSTANT_FILL_HEADER_fillsize_mask) << SDMA_PKT_CONSTANT_FILL_HEADER_fillsize_shift)
+
+/*define for DST_ADDR_LO word*/
+/*define for dst_addr_31_0 field*/
+#define SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_dst_addr_31_0_offset 1
+#define SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_dst_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_dst_addr_31_0_shift  0
+#define SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_dst_addr_31_0_shift)
+
+/*define for DST_ADDR_HI word*/
+/*define for dst_addr_63_32 field*/
+#define SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_dst_addr_63_32_offset 2
+#define SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_dst_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_dst_addr_63_32_shift  0
+#define SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_dst_addr_63_32_shift)
+
+/*define for DATA word*/
+/*define for src_data_31_0 field*/
+#define SDMA_PKT_CONSTANT_FILL_DATA_src_data_31_0_offset 3
+#define SDMA_PKT_CONSTANT_FILL_DATA_src_data_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_CONSTANT_FILL_DATA_src_data_31_0_shift  0
+#define SDMA_PKT_CONSTANT_FILL_DATA_SRC_DATA_31_0(x) (((x) & SDMA_PKT_CONSTANT_FILL_DATA_src_data_31_0_mask) << SDMA_PKT_CONSTANT_FILL_DATA_src_data_31_0_shift)
+
+/*define for COUNT word*/
+/*define for count field*/
+#define SDMA_PKT_CONSTANT_FILL_COUNT_count_offset 4
+#define SDMA_PKT_CONSTANT_FILL_COUNT_count_mask   0x003FFFFF
+#define SDMA_PKT_CONSTANT_FILL_COUNT_count_shift  0
+#define SDMA_PKT_CONSTANT_FILL_COUNT_COUNT(x) (((x) & SDMA_PKT_CONSTANT_FILL_COUNT_count_mask) << SDMA_PKT_CONSTANT_FILL_COUNT_count_shift)
+
+
+/*
+** Definitions for SDMA_PKT_POLL_REGMEM packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_POLL_REGMEM_HEADER_op_offset 0
+#define SDMA_PKT_POLL_REGMEM_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_POLL_REGMEM_HEADER_op_shift  0
+#define SDMA_PKT_POLL_REGMEM_HEADER_OP(x) (((x) & SDMA_PKT_POLL_REGMEM_HEADER_op_mask) << SDMA_PKT_POLL_REGMEM_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_POLL_REGMEM_HEADER_sub_op_offset 0
+#define SDMA_PKT_POLL_REGMEM_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_POLL_REGMEM_HEADER_sub_op_shift  8
+#define SDMA_PKT_POLL_REGMEM_HEADER_SUB_OP(x) (((x) & SDMA_PKT_POLL_REGMEM_HEADER_sub_op_mask) << SDMA_PKT_POLL_REGMEM_HEADER_sub_op_shift)
+
+/*define for hdp_flush field*/
+#define SDMA_PKT_POLL_REGMEM_HEADER_hdp_flush_offset 0
+#define SDMA_PKT_POLL_REGMEM_HEADER_hdp_flush_mask   0x00000001
+#define SDMA_PKT_POLL_REGMEM_HEADER_hdp_flush_shift  26
+#define SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(x) (((x) & SDMA_PKT_POLL_REGMEM_HEADER_hdp_flush_mask) << SDMA_PKT_POLL_REGMEM_HEADER_hdp_flush_shift)
+
+/*define for func field*/
+#define SDMA_PKT_POLL_REGMEM_HEADER_func_offset 0
+#define SDMA_PKT_POLL_REGMEM_HEADER_func_mask   0x00000007
+#define SDMA_PKT_POLL_REGMEM_HEADER_func_shift  28
+#define SDMA_PKT_POLL_REGMEM_HEADER_FUNC(x) (((x) & SDMA_PKT_POLL_REGMEM_HEADER_func_mask) << SDMA_PKT_POLL_REGMEM_HEADER_func_shift)
+
+/*define for mem_poll field*/
+#define SDMA_PKT_POLL_REGMEM_HEADER_mem_poll_offset 0
+#define SDMA_PKT_POLL_REGMEM_HEADER_mem_poll_mask   0x00000001
+#define SDMA_PKT_POLL_REGMEM_HEADER_mem_poll_shift  31
+#define SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(x) (((x) & SDMA_PKT_POLL_REGMEM_HEADER_mem_poll_mask) << SDMA_PKT_POLL_REGMEM_HEADER_mem_poll_shift)
+
+/*define for ADDR_LO word*/
+/*define for addr_31_0 field*/
+#define SDMA_PKT_POLL_REGMEM_ADDR_LO_addr_31_0_offset 1
+#define SDMA_PKT_POLL_REGMEM_ADDR_LO_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_POLL_REGMEM_ADDR_LO_addr_31_0_shift  0
+#define SDMA_PKT_POLL_REGMEM_ADDR_LO_ADDR_31_0(x) (((x) & SDMA_PKT_POLL_REGMEM_ADDR_LO_addr_31_0_mask) << SDMA_PKT_POLL_REGMEM_ADDR_LO_addr_31_0_shift)
+
+/*define for ADDR_HI word*/
+/*define for addr_63_32 field*/
+#define SDMA_PKT_POLL_REGMEM_ADDR_HI_addr_63_32_offset 2
+#define SDMA_PKT_POLL_REGMEM_ADDR_HI_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_POLL_REGMEM_ADDR_HI_addr_63_32_shift  0
+#define SDMA_PKT_POLL_REGMEM_ADDR_HI_ADDR_63_32(x) (((x) & SDMA_PKT_POLL_REGMEM_ADDR_HI_addr_63_32_mask) << SDMA_PKT_POLL_REGMEM_ADDR_HI_addr_63_32_shift)
+
+/*define for VALUE word*/
+/*define for value field*/
+#define SDMA_PKT_POLL_REGMEM_VALUE_value_offset 3
+#define SDMA_PKT_POLL_REGMEM_VALUE_value_mask   0xFFFFFFFF
+#define SDMA_PKT_POLL_REGMEM_VALUE_value_shift  0
+#define SDMA_PKT_POLL_REGMEM_VALUE_VALUE(x) (((x) & SDMA_PKT_POLL_REGMEM_VALUE_value_mask) << SDMA_PKT_POLL_REGMEM_VALUE_value_shift)
+
+/*define for MASK word*/
+/*define for mask field*/
+#define SDMA_PKT_POLL_REGMEM_MASK_mask_offset 4
+#define SDMA_PKT_POLL_REGMEM_MASK_mask_mask   0xFFFFFFFF
+#define SDMA_PKT_POLL_REGMEM_MASK_mask_shift  0
+#define SDMA_PKT_POLL_REGMEM_MASK_MASK(x) (((x) & SDMA_PKT_POLL_REGMEM_MASK_mask_mask) << SDMA_PKT_POLL_REGMEM_MASK_mask_shift)
+
+/*define for DW5 word*/
+/*define for interval field*/
+#define SDMA_PKT_POLL_REGMEM_DW5_interval_offset 5
+#define SDMA_PKT_POLL_REGMEM_DW5_interval_mask   0x0000FFFF
+#define SDMA_PKT_POLL_REGMEM_DW5_interval_shift  0
+#define SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(x) (((x) & SDMA_PKT_POLL_REGMEM_DW5_interval_mask) << SDMA_PKT_POLL_REGMEM_DW5_interval_shift)
+
+/*define for retry_count field*/
+#define SDMA_PKT_POLL_REGMEM_DW5_retry_count_offset 5
+#define SDMA_PKT_POLL_REGMEM_DW5_retry_count_mask   0x00000FFF
+#define SDMA_PKT_POLL_REGMEM_DW5_retry_count_shift  16
+#define SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(x) (((x) & SDMA_PKT_POLL_REGMEM_DW5_retry_count_mask) << SDMA_PKT_POLL_REGMEM_DW5_retry_count_shift)
+
+
+/*
+** Definitions for SDMA_PKT_TIMESTAMP_SET packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_TIMESTAMP_SET_HEADER_op_offset 0
+#define SDMA_PKT_TIMESTAMP_SET_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_TIMESTAMP_SET_HEADER_op_shift  0
+#define SDMA_PKT_TIMESTAMP_SET_HEADER_OP(x) (((x) & SDMA_PKT_TIMESTAMP_SET_HEADER_op_mask) << SDMA_PKT_TIMESTAMP_SET_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_TIMESTAMP_SET_HEADER_sub_op_offset 0
+#define SDMA_PKT_TIMESTAMP_SET_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_TIMESTAMP_SET_HEADER_sub_op_shift  8
+#define SDMA_PKT_TIMESTAMP_SET_HEADER_SUB_OP(x) (((x) & SDMA_PKT_TIMESTAMP_SET_HEADER_sub_op_mask) << SDMA_PKT_TIMESTAMP_SET_HEADER_sub_op_shift)
+
+/*define for INIT_DATA_LO word*/
+/*define for init_data_31_0 field*/
+#define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_init_data_31_0_offset 1
+#define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_init_data_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_init_data_31_0_shift  0
+#define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_INIT_DATA_31_0(x) (((x) & SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_init_data_31_0_mask) << SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_init_data_31_0_shift)
+
+/*define for INIT_DATA_HI word*/
+/*define for init_data_63_32 field*/
+#define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_init_data_63_32_offset 2
+#define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_init_data_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_init_data_63_32_shift  0
+#define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_INIT_DATA_63_32(x) (((x) & SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_init_data_63_32_mask) << SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_init_data_63_32_shift)
+
+
+/*
+** Definitions for SDMA_PKT_TIMESTAMP_GET packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_TIMESTAMP_GET_HEADER_op_offset 0
+#define SDMA_PKT_TIMESTAMP_GET_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_TIMESTAMP_GET_HEADER_op_shift  0
+#define SDMA_PKT_TIMESTAMP_GET_HEADER_OP(x) (((x) & SDMA_PKT_TIMESTAMP_GET_HEADER_op_mask) << SDMA_PKT_TIMESTAMP_GET_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_TIMESTAMP_GET_HEADER_sub_op_offset 0
+#define SDMA_PKT_TIMESTAMP_GET_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_TIMESTAMP_GET_HEADER_sub_op_shift  8
+#define SDMA_PKT_TIMESTAMP_GET_HEADER_SUB_OP(x) (((x) & SDMA_PKT_TIMESTAMP_GET_HEADER_sub_op_mask) << SDMA_PKT_TIMESTAMP_GET_HEADER_sub_op_shift)
+
+/*define for WRITE_ADDR_LO word*/
+/*define for write_addr_31_3 field*/
+#define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_write_addr_31_3_offset 1
+#define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_write_addr_31_3_mask   0x1FFFFFFF
+#define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_write_addr_31_3_shift  3
+#define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_WRITE_ADDR_31_3(x) (((x) & SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_write_addr_31_3_mask) << SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_write_addr_31_3_shift)
+
+/*define for WRITE_ADDR_HI word*/
+/*define for write_addr_63_32 field*/
+#define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_write_addr_63_32_offset 2
+#define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_write_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_write_addr_63_32_shift  0
+#define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_WRITE_ADDR_63_32(x) (((x) & SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_write_addr_63_32_mask) << SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_write_addr_63_32_shift)
+
+
+/*
+** Definitions for SDMA_PKT_TIMESTAMP_GET_GLOBAL packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_op_offset 0
+#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_op_shift  0
+#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_OP(x) (((x) & SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_op_mask) << SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_sub_op_offset 0
+#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_sub_op_shift  8
+#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_SUB_OP(x) (((x) & SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_sub_op_mask) << SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_sub_op_shift)
+
+/*define for WRITE_ADDR_LO word*/
+/*define for write_addr_31_3 field*/
+#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_write_addr_31_3_offset 1
+#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_write_addr_31_3_mask   0x1FFFFFFF
+#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_write_addr_31_3_shift  3
+#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_WRITE_ADDR_31_3(x) (((x) & SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_write_addr_31_3_mask) << SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_write_addr_31_3_shift)
+
+/*define for WRITE_ADDR_HI word*/
+/*define for write_addr_63_32 field*/
+#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_write_addr_63_32_offset 2
+#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_write_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_write_addr_63_32_shift  0
+#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_WRITE_ADDR_63_32(x) (((x) & SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_write_addr_63_32_mask) << SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_write_addr_63_32_shift)
+
+
+/*
+** Definitions for SDMA_PKT_TRAP packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_TRAP_HEADER_op_offset 0
+#define SDMA_PKT_TRAP_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_TRAP_HEADER_op_shift  0
+#define SDMA_PKT_TRAP_HEADER_OP(x) (((x) & SDMA_PKT_TRAP_HEADER_op_mask) << SDMA_PKT_TRAP_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_TRAP_HEADER_sub_op_offset 0
+#define SDMA_PKT_TRAP_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_TRAP_HEADER_sub_op_shift  8
+#define SDMA_PKT_TRAP_HEADER_SUB_OP(x) (((x) & SDMA_PKT_TRAP_HEADER_sub_op_mask) << SDMA_PKT_TRAP_HEADER_sub_op_shift)
+
+/*define for INT_CONTEXT word*/
+/*define for int_context field*/
+#define SDMA_PKT_TRAP_INT_CONTEXT_int_context_offset 1
+#define SDMA_PKT_TRAP_INT_CONTEXT_int_context_mask   0x0FFFFFFF
+#define SDMA_PKT_TRAP_INT_CONTEXT_int_context_shift  0
+#define SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(x) (((x) & SDMA_PKT_TRAP_INT_CONTEXT_int_context_mask) << SDMA_PKT_TRAP_INT_CONTEXT_int_context_shift)
+
+
+/*
+** Definitions for SDMA_PKT_NOP packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_NOP_HEADER_op_offset 0
+#define SDMA_PKT_NOP_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_NOP_HEADER_op_shift  0
+#define SDMA_PKT_NOP_HEADER_OP(x) (((x) & SDMA_PKT_NOP_HEADER_op_mask) << SDMA_PKT_NOP_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_NOP_HEADER_sub_op_offset 0
+#define SDMA_PKT_NOP_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_NOP_HEADER_sub_op_shift  8
+#define SDMA_PKT_NOP_HEADER_SUB_OP(x) (((x) & SDMA_PKT_NOP_HEADER_sub_op_mask) << SDMA_PKT_NOP_HEADER_sub_op_shift)
+
+
+#endif /* __ICELAND_SDMA_PKT_OPEN_H_ */
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
new file mode 100644 (file)
index 0000000..c6f1e2f
--- /dev/null
@@ -0,0 +1,675 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "ppsmc.h"
+#include "iceland_smumgr.h"
+#include "smu_ucode_xfer_vi.h"
+#include "amdgpu_ucode.h"
+
+#include "smu/smu_7_1_1_d.h"
+#include "smu/smu_7_1_1_sh_mask.h"
+
+#define ICELAND_SMC_SIZE 0x20000
+
+static int iceland_set_smc_sram_address(struct amdgpu_device *adev,
+                                       uint32_t smc_address, uint32_t limit)
+{
+       uint32_t val;
+
+       if (smc_address & 3)
+               return -EINVAL;
+
+       if ((smc_address + 3) > limit)
+               return -EINVAL;
+
+       WREG32(mmSMC_IND_INDEX_0, smc_address);
+
+       val = RREG32(mmSMC_IND_ACCESS_CNTL);
+       val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
+       WREG32(mmSMC_IND_ACCESS_CNTL, val);
+
+       return 0;
+}
+
+static int iceland_copy_bytes_to_smc(struct amdgpu_device *adev,
+                                    uint32_t smc_start_address,
+                                    const uint8_t *src,
+                                    uint32_t byte_count, uint32_t limit)
+{
+       uint32_t addr;
+       uint32_t data, orig_data;
+       int result = 0;
+       uint32_t extra_shift;
+       unsigned long flags;
+
+       if (smc_start_address & 3)
+               return -EINVAL;
+
+       if ((smc_start_address + byte_count) > limit)
+               return -EINVAL;
+
+       addr = smc_start_address;
+
+       spin_lock_irqsave(&adev->smc_idx_lock, flags);
+       while (byte_count >= 4) {
+               /* Bytes are written into the SMC addres space with the MSB first */
+               data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
+
+               result = iceland_set_smc_sram_address(adev, addr, limit);
+
+               if (result)
+                       goto out;
+
+               WREG32(mmSMC_IND_DATA_0, data);
+
+               src += 4;
+               byte_count -= 4;
+               addr += 4;
+       }
+
+       if (0 != byte_count) {
+               /* Now write odd bytes left, do a read modify write cycle */
+               data = 0;
+
+               result = iceland_set_smc_sram_address(adev, addr, limit);
+               if (result)
+                       goto out;
+
+               orig_data = RREG32(mmSMC_IND_DATA_0);
+               extra_shift = 8 * (4 - byte_count);
+
+               while (byte_count > 0) {
+                       data = (data << 8) + *src++;
+                       byte_count--;
+               }
+
+               data <<= extra_shift;
+               data |= (orig_data & ~((~0UL) << extra_shift));
+
+               result = iceland_set_smc_sram_address(adev, addr, limit);
+               if (result)
+                       goto out;
+
+               WREG32(mmSMC_IND_DATA_0, data);
+       }
+
+out:
+       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+       return result;
+}
+
+void iceland_start_smc(struct amdgpu_device *adev)
+{
+       uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
+
+       val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
+       WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
+}
+
+void iceland_reset_smc(struct amdgpu_device *adev)
+{
+       uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
+
+       val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
+       WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
+}
+
+static int iceland_program_jump_on_start(struct amdgpu_device *adev)
+{
+       static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
+       iceland_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
+
+       return 0;
+}
+
+void iceland_stop_smc_clock(struct amdgpu_device *adev)
+{
+       uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+
+       val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
+       WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
+}
+
+void iceland_start_smc_clock(struct amdgpu_device *adev)
+{
+       uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+
+       val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
+       WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
+}
+
+static bool iceland_is_smc_ram_running(struct amdgpu_device *adev)
+{
+       uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+       val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
+
+       return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
+}
+
+static int wait_smu_response(struct amdgpu_device *adev)
+{
+       int i;
+       uint32_t val;
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               val = RREG32(mmSMC_RESP_0);
+               if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
+                       break;
+               udelay(1);
+       }
+
+       if (i == adev->usec_timeout)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int iceland_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
+{
+       if (!iceland_is_smc_ram_running(adev))
+               return -EINVAL;
+
+       if (wait_smu_response(adev)) {
+               DRM_ERROR("Failed to send previous message\n");
+               return -EINVAL;
+       }
+
+       WREG32(mmSMC_MESSAGE_0, msg);
+
+       if (wait_smu_response(adev)) {
+               DRM_ERROR("Failed to send message\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int iceland_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
+                                                  PPSMC_Msg msg)
+{
+       if (!iceland_is_smc_ram_running(adev))
+               return -EINVAL;;
+
+       if (wait_smu_response(adev)) {
+               DRM_ERROR("Failed to send previous message\n");
+               return -EINVAL;
+       }
+
+       WREG32(mmSMC_MESSAGE_0, msg);
+
+       return 0;
+}
+
+static int iceland_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
+                                                 PPSMC_Msg msg,
+                                                 uint32_t parameter)
+{
+       WREG32(mmSMC_MSG_ARG_0, parameter);
+
+       return iceland_send_msg_to_smc(adev, msg);
+}
+
+static int iceland_send_msg_to_smc_with_parameter_without_waiting(
+                                       struct amdgpu_device *adev,
+                                       PPSMC_Msg msg, uint32_t parameter)
+{
+       WREG32(mmSMC_MSG_ARG_0, parameter);
+
+       return iceland_send_msg_to_smc_without_waiting(adev, msg);
+}
+
+#if 0 /* not used yet */
+static int iceland_wait_for_smc_inactive(struct amdgpu_device *adev)
+{
+       int i;
+       uint32_t val;
+
+       if (!iceland_is_smc_ram_running(adev))
+               return -EINVAL;
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+               if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
+                       break;
+               udelay(1);
+       }
+
+       if (i == adev->usec_timeout)
+               return -EINVAL;
+
+       return 0;
+}
+#endif
+
+static int iceland_smu_upload_firmware_image(struct amdgpu_device *adev)
+{
+       const struct smc_firmware_header_v1_0 *hdr;
+       uint32_t ucode_size;
+       uint32_t ucode_start_address;
+       const uint8_t *src;
+       uint32_t val;
+       uint32_t byte_count;
+       uint32_t data;
+       unsigned long flags;
+       int i;
+
+       if (!adev->pm.fw)
+               return -EINVAL;
+
+       hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
+       amdgpu_ucode_print_smc_hdr(&hdr->header);
+
+       adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
+       ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
+       ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
+       src = (const uint8_t *)
+               (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+       if (ucode_size & 3) {
+               DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
+               return -EINVAL;
+       }
+
+       if (ucode_size > ICELAND_SMC_SIZE) {
+               DRM_ERROR("SMC address is beyond the SMC RAM area\n");
+               return -EINVAL;
+       }
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               val = RREG32_SMC(ixRCU_UC_EVENTS);
+               if (REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done) == 0)
+                       break;
+               udelay(1);
+       }
+       val = RREG32_SMC(ixSMC_SYSCON_MISC_CNTL);
+       WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, val | 1);
+
+       iceland_stop_smc_clock(adev);
+       iceland_reset_smc(adev);
+
+       spin_lock_irqsave(&adev->smc_idx_lock, flags);
+       WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
+
+       val = RREG32(mmSMC_IND_ACCESS_CNTL);
+       val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
+       WREG32(mmSMC_IND_ACCESS_CNTL, val);
+
+       byte_count = ucode_size;
+       while (byte_count >= 4) {
+               data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
+               WREG32(mmSMC_IND_DATA_0, data);
+               src += 4;
+               byte_count -= 4;
+       }
+       val = RREG32(mmSMC_IND_ACCESS_CNTL);
+       val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
+       WREG32(mmSMC_IND_ACCESS_CNTL, val);
+       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+       return 0;
+}
+
+#if 0 /* not used yet */
+static int iceland_read_smc_sram_dword(struct amdgpu_device *adev,
+                                      uint32_t smc_address,
+                                      uint32_t *value,
+                                      uint32_t limit)
+{
+       int result;
+       unsigned long flags;
+
+       spin_lock_irqsave(&adev->smc_idx_lock, flags);
+       result = iceland_set_smc_sram_address(adev, smc_address, limit);
+       if (result == 0)
+               *value = RREG32(mmSMC_IND_DATA_0);
+       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+       return result;
+}
+
+static int iceland_write_smc_sram_dword(struct amdgpu_device *adev,
+                                       uint32_t smc_address,
+                                       uint32_t value,
+                                       uint32_t limit)
+{
+       int result;
+       unsigned long flags;
+
+       spin_lock_irqsave(&adev->smc_idx_lock, flags);
+       result = iceland_set_smc_sram_address(adev, smc_address, limit);
+       if (result == 0)
+               WREG32(mmSMC_IND_DATA_0, value);
+       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+       return result;
+}
+
+static int iceland_smu_stop_smc(struct amdgpu_device *adev)
+{
+       iceland_reset_smc(adev);
+       iceland_stop_smc_clock(adev);
+
+       return 0;
+}
+#endif
+
+static int iceland_smu_start_smc(struct amdgpu_device *adev)
+{
+       int i;
+       uint32_t val;
+
+       iceland_program_jump_on_start(adev);
+       iceland_start_smc_clock(adev);
+       iceland_start_smc(adev);
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               val = RREG32_SMC(ixFIRMWARE_FLAGS);
+               if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED) == 1)
+                       break;
+               udelay(1);
+       }
+       return 0;
+}
+
+static enum AMDGPU_UCODE_ID iceland_convert_fw_type(uint32_t fw_type)
+{
+       switch (fw_type) {
+               case UCODE_ID_SDMA0:
+                       return AMDGPU_UCODE_ID_SDMA0;
+               case UCODE_ID_SDMA1:
+                       return AMDGPU_UCODE_ID_SDMA1;
+               case UCODE_ID_CP_CE:
+                       return AMDGPU_UCODE_ID_CP_CE;
+               case UCODE_ID_CP_PFP:
+                       return AMDGPU_UCODE_ID_CP_PFP;
+               case UCODE_ID_CP_ME:
+                       return AMDGPU_UCODE_ID_CP_ME;
+               case UCODE_ID_CP_MEC:
+               case UCODE_ID_CP_MEC_JT1:
+                       return AMDGPU_UCODE_ID_CP_MEC1;
+               case UCODE_ID_CP_MEC_JT2:
+                       return AMDGPU_UCODE_ID_CP_MEC2;
+               case UCODE_ID_RLC_G:
+                       return AMDGPU_UCODE_ID_RLC_G;
+               default:
+                       DRM_ERROR("ucode type is out of range!\n");
+                       return AMDGPU_UCODE_ID_MAXIMUM;
+       }
+}
+
+static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type)
+{
+       switch (fw_type) {
+               case AMDGPU_UCODE_ID_SDMA0:
+                       return UCODE_ID_SDMA0_MASK;
+               case AMDGPU_UCODE_ID_SDMA1:
+                       return UCODE_ID_SDMA1_MASK;
+               case AMDGPU_UCODE_ID_CP_CE:
+                       return UCODE_ID_CP_CE_MASK;
+               case AMDGPU_UCODE_ID_CP_PFP:
+                       return UCODE_ID_CP_PFP_MASK;
+               case AMDGPU_UCODE_ID_CP_ME:
+                       return UCODE_ID_CP_ME_MASK;
+               case AMDGPU_UCODE_ID_CP_MEC1:
+                       return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK | UCODE_ID_CP_MEC_JT2_MASK;
+               case AMDGPU_UCODE_ID_CP_MEC2:
+                       return UCODE_ID_CP_MEC_MASK;
+               case AMDGPU_UCODE_ID_RLC_G:
+                       return UCODE_ID_RLC_G_MASK;
+               default:
+                       DRM_ERROR("ucode type is out of range!\n");
+                       return 0;
+       }
+}
+
+static int iceland_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
+                                                     uint32_t fw_type,
+                                                     struct SMU_Entry *entry)
+{
+       enum AMDGPU_UCODE_ID id = iceland_convert_fw_type(fw_type);
+       struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
+       const struct gfx_firmware_header_v1_0 *header = NULL;
+       uint64_t gpu_addr;
+       uint32_t data_size;
+
+       if (ucode->fw == NULL)
+               return -EINVAL;
+
+       gpu_addr  = ucode->mc_addr;
+       header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
+       data_size = le32_to_cpu(header->header.ucode_size_bytes);
+
+       entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
+       entry->id = (uint16_t)fw_type;
+       entry->image_addr_high = upper_32_bits(gpu_addr);
+       entry->image_addr_low = lower_32_bits(gpu_addr);
+       entry->meta_data_addr_high = 0;
+       entry->meta_data_addr_low = 0;
+       entry->data_size_byte = data_size;
+       entry->num_register_entries = 0;
+       entry->flags = 0;
+
+       return 0;
+}
+
+static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
+{
+       struct iceland_smu_private_data *private = (struct iceland_smu_private_data *)adev->smu.priv;
+       struct SMU_DRAMData_TOC *toc;
+       uint32_t fw_to_load;
+
+       toc = (struct SMU_DRAMData_TOC *)private->header;
+       toc->num_entries = 0;
+       toc->structure_version = 1;
+
+       if (!adev->firmware.smu_load)
+               return 0;
+
+       if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
+                       &toc->entry[toc->num_entries++])) {
+               DRM_ERROR("Failed to get firmware entry for RLC\n");
+               return -EINVAL;
+       }
+
+       if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
+                       &toc->entry[toc->num_entries++])) {
+               DRM_ERROR("Failed to get firmware entry for CE\n");
+               return -EINVAL;
+       }
+
+       if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
+                       &toc->entry[toc->num_entries++])) {
+               DRM_ERROR("Failed to get firmware entry for PFP\n");
+               return -EINVAL;
+       }
+
+       if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
+                       &toc->entry[toc->num_entries++])) {
+               DRM_ERROR("Failed to get firmware entry for ME\n");
+               return -EINVAL;
+       }
+
+       if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
+                       &toc->entry[toc->num_entries++])) {
+               DRM_ERROR("Failed to get firmware entry for MEC\n");
+               return -EINVAL;
+       }
+
+       if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
+                       &toc->entry[toc->num_entries++])) {
+               DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
+               return -EINVAL;
+       }
+
+       if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
+                       &toc->entry[toc->num_entries++])) {
+               DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
+               return -EINVAL;
+       }
+
+       if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
+                       &toc->entry[toc->num_entries++])) {
+               DRM_ERROR("Failed to get firmware entry for SDMA0\n");
+               return -EINVAL;
+       }
+
+       if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
+                       &toc->entry[toc->num_entries++])) {
+               DRM_ERROR("Failed to get firmware entry for SDMA1\n");
+               return -EINVAL;
+       }
+
+       iceland_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
+       iceland_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
+
+       fw_to_load = UCODE_ID_RLC_G_MASK |
+                       UCODE_ID_SDMA0_MASK |
+                       UCODE_ID_SDMA1_MASK |
+                       UCODE_ID_CP_CE_MASK |
+                       UCODE_ID_CP_ME_MASK |
+                       UCODE_ID_CP_PFP_MASK |
+                       UCODE_ID_CP_MEC_MASK |
+                       UCODE_ID_CP_MEC_JT1_MASK |
+                       UCODE_ID_CP_MEC_JT2_MASK;
+
+       if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
+               DRM_ERROR("Fail to request SMU load ucode\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int iceland_smu_check_fw_load_finish(struct amdgpu_device *adev,
+                                           uint32_t fw_type)
+{
+       uint32_t fw_mask = iceland_smu_get_mask_for_fw_type(fw_type);
+       int i;
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_27) & fw_mask))
+                       break;
+               udelay(1);
+       }
+
+       if (i == adev->usec_timeout) {
+               DRM_ERROR("check firmware loading failed\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int iceland_smu_start(struct amdgpu_device *adev)
+{
+       int result;
+
+       result = iceland_smu_upload_firmware_image(adev);
+       if (result)
+               return result;
+       result = iceland_smu_start_smc(adev);
+       if (result)
+               return result;
+
+       return iceland_smu_request_load_fw(adev);
+}
+
+static const struct amdgpu_smumgr_funcs iceland_smumgr_funcs = {
+       .check_fw_load_finish = iceland_smu_check_fw_load_finish,
+       .request_smu_load_fw = NULL,
+       .request_smu_specific_fw = NULL,
+};
+
+int iceland_smu_init(struct amdgpu_device *adev)
+{
+       struct iceland_smu_private_data *private;
+       uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
+       struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
+       uint64_t mc_addr;
+       void *toc_buf_ptr;
+       int ret;
+
+       private = kzalloc(sizeof(struct iceland_smu_private_data), GFP_KERNEL);
+       if (NULL == private)
+               return -ENOMEM;
+
+       /* allocate firmware buffers */
+       if (adev->firmware.smu_load)
+               amdgpu_ucode_init_bo(adev);
+
+       adev->smu.priv = private;
+       adev->smu.fw_flags = 0;
+
+       /* Allocate FW image data structure and header buffer */
+       ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
+                              true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, toc_buf);
+       if (ret) {
+               DRM_ERROR("Failed to allocate memory for TOC buffer\n");
+               return -ENOMEM;
+       }
+
+       /* Retrieve GPU address for header buffer and internal buffer */
+       ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
+       if (ret) {
+               amdgpu_bo_unref(&adev->smu.toc_buf);
+               DRM_ERROR("Failed to reserve the TOC buffer\n");
+               return -EINVAL;
+       }
+
+       ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
+       if (ret) {
+               amdgpu_bo_unreserve(adev->smu.toc_buf);
+               amdgpu_bo_unref(&adev->smu.toc_buf);
+               DRM_ERROR("Failed to pin the TOC buffer\n");
+               return -EINVAL;
+       }
+
+       ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
+       if (ret) {
+               amdgpu_bo_unreserve(adev->smu.toc_buf);
+               amdgpu_bo_unref(&adev->smu.toc_buf);
+               DRM_ERROR("Failed to map the TOC buffer\n");
+               return -EINVAL;
+       }
+
+       amdgpu_bo_unreserve(adev->smu.toc_buf);
+       private->header_addr_low = lower_32_bits(mc_addr);
+       private->header_addr_high = upper_32_bits(mc_addr);
+       private->header = toc_buf_ptr;
+
+       adev->smu.smumgr_funcs = &iceland_smumgr_funcs;
+
+       return 0;
+}
+
+int iceland_smu_fini(struct amdgpu_device *adev)
+{
+       amdgpu_bo_unref(&adev->smu.toc_buf);
+       kfree(adev->smu.priv);
+       adev->smu.priv = NULL;
+       if (adev->firmware.fw_buf)
+               amdgpu_ucode_fini_bo(adev);
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h b/drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h
new file mode 100644 (file)
index 0000000..1e0769e
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef ICELAND_SMUMGR_H
+#define ICELAND_SMUMGR_H
+
+#include "ppsmc.h"
+
+extern int iceland_smu_init(struct amdgpu_device *adev);
+extern int iceland_smu_fini(struct amdgpu_device *adev);
+extern int iceland_smu_start(struct amdgpu_device *adev);
+
+struct iceland_smu_private_data
+{
+       uint8_t *header;
+       uint8_t *mec_image;
+       uint32_t header_addr_high;
+       uint32_t header_addr_low;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
new file mode 100644 (file)
index 0000000..a83029d
--- /dev/null
@@ -0,0 +1,1447 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_ucode.h"
+#include "amdgpu_trace.h"
+#include "vi.h"
+#include "vid.h"
+
+#include "oss/oss_2_4_d.h"
+#include "oss/oss_2_4_sh_mask.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "gca/gfx_8_0_d.h"
+#include "gca/gfx_8_0_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "iceland_sdma_pkt_open.h"
+
+static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev);
+static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev);
+static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev);
+static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev);
+
+MODULE_FIRMWARE("radeon/topaz_sdma.bin");
+MODULE_FIRMWARE("radeon/topaz_sdma1.bin");
+
+static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
+{
+       SDMA0_REGISTER_OFFSET,
+       SDMA1_REGISTER_OFFSET
+};
+
+static const u32 golden_settings_iceland_a11[] =
+{
+       mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
+       mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
+       mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
+       mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
+};
+
+static const u32 iceland_mgcg_cgcg_init[] =
+{
+       mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
+       mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
+};
+
+/*
+ * sDMA - System DMA
+ * Starting with CIK, the GPU has new asynchronous
+ * DMA engines.  These engines are used for compute
+ * and gfx.  There are two DMA engines (SDMA0, SDMA1)
+ * and each one supports 1 ring buffer used for gfx
+ * and 2 queues used for compute.
+ *
+ * The programming model is very similar to the CP
+ * (ring buffer, IBs, etc.), but sDMA has it's own
+ * packet format that is different from the PM4 format
+ * used by the CP. sDMA supports copying data, writing
+ * embedded data, solid fills, and a number of other
+ * things.  It also has support for tiling/detiling of
+ * buffers.
+ */
+
+static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev)
+{
+       switch (adev->asic_type) {
+       case CHIP_TOPAZ:
+               amdgpu_program_register_sequence(adev,
+                                                iceland_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
+               amdgpu_program_register_sequence(adev,
+                                                golden_settings_iceland_a11,
+                                                (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
+               break;
+       default:
+               break;
+       }
+}
+
+/**
+ * sdma_v2_4_init_microcode - load ucode images from disk
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Use the firmware interface to load the ucode images into
+ * the driver (not loaded into hw).
+ * Returns 0 on success, error on failure.
+ */
+static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
+{
+       const char *chip_name;
+       char fw_name[30];
+       int err, i;
+       struct amdgpu_firmware_info *info = NULL;
+       const struct common_firmware_header *header = NULL;
+
+       DRM_DEBUG("\n");
+
+       switch (adev->asic_type) {
+       case CHIP_TOPAZ:
+               chip_name = "topaz";
+               break;
+       default: BUG();
+       }
+
+       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+               if (i == 0)
+                       snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
+               else
+                       snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name);
+               err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev);
+               if (err)
+                       goto out;
+               err = amdgpu_ucode_validate(adev->sdma[i].fw);
+               if (err)
+                       goto out;
+
+               if (adev->firmware.smu_load) {
+                       info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
+                       info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
+                       info->fw = adev->sdma[i].fw;
+                       header = (const struct common_firmware_header *)info->fw->data;
+                       adev->firmware.fw_size +=
+                               ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+               }
+       }
+
+out:
+       if (err) {
+               printk(KERN_ERR
+                      "sdma_v2_4: Failed to load firmware \"%s\"\n",
+                      fw_name);
+               for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+                       release_firmware(adev->sdma[i].fw);
+                       adev->sdma[i].fw = NULL;
+               }
+       }
+       return err;
+}
+
+/**
+ * sdma_v2_4_ring_get_rptr - get the current read pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Get the current rptr from the hardware (VI+).
+ */
+static uint32_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring)
+{
+       u32 rptr;
+
+       /* XXX check if swapping is necessary on BE */
+       rptr = ring->adev->wb.wb[ring->rptr_offs] >> 2;
+
+       return rptr;
+}
+
+/**
+ * sdma_v2_4_ring_get_wptr - get the current write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Get the current wptr from the hardware (VI+).
+ */
+static uint32_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1;
+       u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
+
+       return wptr;
+}
+
+/**
+ * sdma_v2_4_ring_set_wptr - commit the write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Write the wptr back to the hardware (VI+).
+ */
+static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1;
+
+       WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2);
+}
+
+static void sdma_v2_4_hdp_flush_ring_emit(struct amdgpu_ring *);
+
+/**
+ * sdma_v2_4_ring_emit_ib - Schedule an IB on the DMA engine
+ *
+ * @ring: amdgpu ring pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (VI).
+ */
+static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
+                                  struct amdgpu_ib *ib)
+{
+       u32 vmid = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf;
+       u32 next_rptr = ring->wptr + 5;
+
+       if (ib->flush_hdp_writefifo)
+               next_rptr += 6;
+
+       while ((next_rptr & 7) != 2)
+               next_rptr++;
+
+       next_rptr += 6;
+
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
+                         SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
+       amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc);
+       amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
+       amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
+       amdgpu_ring_write(ring, next_rptr);
+
+       if (ib->flush_hdp_writefifo) {
+               /* flush HDP */
+               sdma_v2_4_hdp_flush_ring_emit(ring);
+       }
+
+       /* IB packet must end on a 8 DW boundary */
+       while ((ring->wptr & 7) != 2)
+               amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_NOP));
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
+                         SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
+       /* base must be 32 byte aligned */
+       amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
+       amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+       amdgpu_ring_write(ring, ib->length_dw);
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, 0);
+
+}
+
+/**
+ * sdma_v2_4_hdp_flush_ring_emit - emit an hdp flush on the DMA ring
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Emit an hdp flush packet on the requested DMA ring.
+ */
+static void sdma_v2_4_hdp_flush_ring_emit(struct amdgpu_ring *ring)
+{
+       u32 ref_and_mask = 0;
+
+       if (ring == &ring->adev->sdma[0].ring)
+               ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
+       else
+               ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
+
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+                         SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
+                         SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
+       amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
+       amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
+       amdgpu_ring_write(ring, ref_and_mask); /* reference */
+       amdgpu_ring_write(ring, ref_and_mask); /* mask */
+       amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+                         SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
+}
+
+/**
+ * sdma_v2_4_ring_emit_fence - emit a fence on the DMA ring
+ *
+ * @ring: amdgpu ring pointer
+ * @fence: amdgpu fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (VI).
+ */
+static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+                                     bool write64bits)
+{
+       /* write the fence */
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
+       amdgpu_ring_write(ring, lower_32_bits(addr));
+       amdgpu_ring_write(ring, upper_32_bits(addr));
+       amdgpu_ring_write(ring, lower_32_bits(seq));
+
+       /* optionally write high bits as well */
+       if (write64bits) {
+               addr += 4;
+               amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
+               amdgpu_ring_write(ring, lower_32_bits(addr));
+               amdgpu_ring_write(ring, upper_32_bits(addr));
+               amdgpu_ring_write(ring, upper_32_bits(seq));
+       }
+
+       /* generate an interrupt */
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
+       amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
+}
+
+/**
+ * sdma_v2_4_ring_emit_semaphore - emit a semaphore on the dma ring
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @semaphore: amdgpu semaphore object
+ * @emit_wait: wait or signal semaphore
+ *
+ * Add a DMA semaphore packet to the ring wait on or signal
+ * other rings (VI).
+ */
+static bool sdma_v2_4_ring_emit_semaphore(struct amdgpu_ring *ring,
+                                         struct amdgpu_semaphore *semaphore,
+                                         bool emit_wait)
+{
+       u64 addr = semaphore->gpu_addr;
+       u32 sig = emit_wait ? 0 : 1;
+
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SEM) |
+                         SDMA_PKT_SEMAPHORE_HEADER_SIGNAL(sig));
+       amdgpu_ring_write(ring, lower_32_bits(addr) & 0xfffffff8);
+       amdgpu_ring_write(ring, upper_32_bits(addr));
+
+       return true;
+}
+
+/**
+ * sdma_v2_4_gfx_stop - stop the gfx async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Stop the gfx async dma ring buffers (VI).
+ */
+static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
+{
+       struct amdgpu_ring *sdma0 = &adev->sdma[0].ring;
+       struct amdgpu_ring *sdma1 = &adev->sdma[1].ring;
+       u32 rb_cntl, ib_cntl;
+       int i;
+
+       if ((adev->mman.buffer_funcs_ring == sdma0) ||
+           (adev->mman.buffer_funcs_ring == sdma1))
+               amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
+
+       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+               rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
+               rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
+               WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
+               ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
+               ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
+               WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
+       }
+       sdma0->ready = false;
+       sdma1->ready = false;
+}
+
+/**
+ * sdma_v2_4_rlc_stop - stop the compute async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Stop the compute async dma queues (VI).
+ */
+static void sdma_v2_4_rlc_stop(struct amdgpu_device *adev)
+{
+       /* XXX todo */
+}
+
+/**
+ * sdma_v2_4_enable - stop the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ * @enable: enable/disable the DMA MEs.
+ *
+ * Halt or unhalt the async dma engines (VI).
+ */
+static void sdma_v2_4_enable(struct amdgpu_device *adev, bool enable)
+{
+       u32 f32_cntl;
+       int i;
+
+       if (enable == false) {
+               sdma_v2_4_gfx_stop(adev);
+               sdma_v2_4_rlc_stop(adev);
+       }
+
+       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+               f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
+               if (enable)
+                       f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0);
+               else
+                       f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
+               WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], f32_cntl);
+       }
+}
+
+/**
+ * sdma_v2_4_gfx_resume - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set up the gfx DMA ring buffers and enable them (VI).
+ * Returns 0 for success, error for failure.
+ */
+static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
+{
+       struct amdgpu_ring *ring;
+       u32 rb_cntl, ib_cntl;
+       u32 rb_bufsz;
+       u32 wb_offset;
+       int i, j, r;
+
+       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+               ring = &adev->sdma[i].ring;
+               wb_offset = (ring->rptr_offs * 4);
+
+               mutex_lock(&adev->srbm_mutex);
+               for (j = 0; j < 16; j++) {
+                       vi_srbm_select(adev, 0, 0, 0, j);
+                       /* SDMA GFX */
+                       WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
+                       WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
+               }
+               vi_srbm_select(adev, 0, 0, 0, 0);
+               mutex_unlock(&adev->srbm_mutex);
+
+               WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
+
+               /* Set ring buffer size in dwords */
+               rb_bufsz = order_base_2(ring->ring_size / 4);
+               rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
+               rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
+#ifdef __BIG_ENDIAN
+               rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
+               rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
+                                       RPTR_WRITEBACK_SWAP_ENABLE, 1);
+#endif
+               WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
+
+               /* Initialize the ring buffer's read and write pointers */
+               WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
+               WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
+
+               /* set the wb address whether it's enabled or not */
+               WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
+                      upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
+               WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i],
+                      lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
+
+               rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
+
+               WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
+               WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
+
+               ring->wptr = 0;
+               WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
+
+               /* enable DMA RB */
+               rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
+               WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
+
+               ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
+               ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
+#ifdef __BIG_ENDIAN
+               ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
+#endif
+               /* enable DMA IBs */
+               WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
+
+               ring->ready = true;
+
+               r = amdgpu_ring_test_ring(ring);
+               if (r) {
+                       ring->ready = false;
+                       return r;
+               }
+
+               if (adev->mman.buffer_funcs_ring == ring)
+                       amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
+       }
+
+       return 0;
+}
+
+/**
+ * sdma_v2_4_rlc_resume - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set up the compute DMA queues and enable them (VI).
+ * Returns 0 for success, error for failure.
+ */
+static int sdma_v2_4_rlc_resume(struct amdgpu_device *adev)
+{
+       /* XXX todo */
+       return 0;
+}
+
+/**
+ * sdma_v2_4_load_microcode - load the sDMA ME ucode
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Loads the sDMA0/1 ucode.
+ * Returns 0 for success, -EINVAL if the ucode is not available.
+ */
+static int sdma_v2_4_load_microcode(struct amdgpu_device *adev)
+{
+       const struct sdma_firmware_header_v1_0 *hdr;
+       const __le32 *fw_data;
+       u32 fw_size;
+       int i, j;
+       bool smc_loads_fw = false; /* XXX fix me */
+
+       if (!adev->sdma[0].fw || !adev->sdma[1].fw)
+               return -EINVAL;
+
+       /* halt the MEs */
+       sdma_v2_4_enable(adev, false);
+
+       if (smc_loads_fw) {
+               /* XXX query SMC for fw load complete */
+       } else {
+               for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+                       hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
+                       amdgpu_ucode_print_sdma_hdr(&hdr->header);
+                       fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+                       adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
+
+                       fw_data = (const __le32 *)
+                               (adev->sdma[i].fw->data +
+                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
+                       for (j = 0; j < fw_size; j++)
+                               WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
+                       WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version);
+               }
+       }
+
+       return 0;
+}
+
+/**
+ * sdma_v2_4_start - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set up the DMA engines and enable them (VI).
+ * Returns 0 for success, error for failure.
+ */
+static int sdma_v2_4_start(struct amdgpu_device *adev)
+{
+       int r;
+
+       if (!adev->firmware.smu_load) {
+               r = sdma_v2_4_load_microcode(adev);
+               if (r)
+                       return r;
+       } else {
+               r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
+                                               AMDGPU_UCODE_ID_SDMA0);
+               if (r)
+                       return -EINVAL;
+               r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
+                                               AMDGPU_UCODE_ID_SDMA1);
+               if (r)
+                       return -EINVAL;
+       }
+
+       /* unhalt the MEs */
+       sdma_v2_4_enable(adev, true);
+
+       /* start the gfx rings and rlc compute queues */
+       r = sdma_v2_4_gfx_resume(adev);
+       if (r)
+               return r;
+       r = sdma_v2_4_rlc_resume(adev);
+       if (r)
+               return r;
+
+       return 0;
+}
+
+/**
+ * sdma_v2_4_ring_test_ring - simple async dma engine test
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Test the DMA engine by writing using it to write an
+ * value to memory. (VI).
+ * Returns 0 for success, error for failure.
+ */
+static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       unsigned i;
+       unsigned index;
+       int r;
+       u32 tmp;
+       u64 gpu_addr;
+
+       r = amdgpu_wb_get(adev, &index);
+       if (r) {
+               dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+               return r;
+       }
+
+       gpu_addr = adev->wb.gpu_addr + (index * 4);
+       tmp = 0xCAFEDEAD;
+       adev->wb.wb[index] = cpu_to_le32(tmp);
+
+       r = amdgpu_ring_lock(ring, 5);
+       if (r) {
+               DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
+               amdgpu_wb_free(adev, index);
+               return r;
+       }
+
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
+                         SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
+       amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
+       amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
+       amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
+       amdgpu_ring_write(ring, 0xDEADBEEF);
+       amdgpu_ring_unlock_commit(ring);
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               tmp = le32_to_cpu(adev->wb.wb[index]);
+               if (tmp == 0xDEADBEEF)
+                       break;
+               DRM_UDELAY(1);
+       }
+
+       if (i < adev->usec_timeout) {
+               DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+       } else {
+               DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
+                         ring->idx, tmp);
+               r = -EINVAL;
+       }
+       amdgpu_wb_free(adev, index);
+
+       return r;
+}
+
+/**
+ * sdma_v2_4_ring_test_ib - test an IB on the DMA engine
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Test a simple IB in the DMA ring (VI).
+ * Returns 0 on success, error on failure.
+ */
+static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       struct amdgpu_ib ib;
+       unsigned i;
+       unsigned index;
+       int r;
+       u32 tmp = 0;
+       u64 gpu_addr;
+
+       r = amdgpu_wb_get(adev, &index);
+       if (r) {
+               dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+               return r;
+       }
+
+       gpu_addr = adev->wb.gpu_addr + (index * 4);
+       tmp = 0xCAFEDEAD;
+       adev->wb.wb[index] = cpu_to_le32(tmp);
+
+       r = amdgpu_ib_get(ring, NULL, 256, &ib);
+       if (r) {
+               amdgpu_wb_free(adev, index);
+               DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+               return r;
+       }
+
+       ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
+               SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
+       ib.ptr[1] = lower_32_bits(gpu_addr);
+       ib.ptr[2] = upper_32_bits(gpu_addr);
+       ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1);
+       ib.ptr[4] = 0xDEADBEEF;
+       ib.ptr[5] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
+       ib.ptr[6] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
+       ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
+       ib.length_dw = 8;
+
+       r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
+       if (r) {
+               amdgpu_ib_free(adev, &ib);
+               amdgpu_wb_free(adev, index);
+               DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
+               return r;
+       }
+       r = amdgpu_fence_wait(ib.fence, false);
+       if (r) {
+               amdgpu_ib_free(adev, &ib);
+               amdgpu_wb_free(adev, index);
+               DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+               return r;
+       }
+       for (i = 0; i < adev->usec_timeout; i++) {
+               tmp = le32_to_cpu(adev->wb.wb[index]);
+               if (tmp == 0xDEADBEEF)
+                       break;
+               DRM_UDELAY(1);
+       }
+       if (i < adev->usec_timeout) {
+               DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
+                        ib.fence->ring->idx, i);
+       } else {
+               DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+               r = -EINVAL;
+       }
+       amdgpu_ib_free(adev, &ib);
+       amdgpu_wb_free(adev, index);
+       return r;
+}
+
+/**
+ * sdma_v2_4_vm_copy_pte - update PTEs by copying them from the GART
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @src: src addr to copy from
+ * @count: number of page entries to update
+ *
+ * Update PTEs by copying them from the GART using sDMA (CIK).
+ */
+static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib,
+                                 uint64_t pe, uint64_t src,
+                                 unsigned count)
+{
+       while (count) {
+               unsigned bytes = count * 8;
+               if (bytes > 0x1FFFF8)
+                       bytes = 0x1FFFF8;
+
+               ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
+                       SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+               ib->ptr[ib->length_dw++] = bytes;
+               ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+               ib->ptr[ib->length_dw++] = lower_32_bits(src);
+               ib->ptr[ib->length_dw++] = upper_32_bits(src);
+               ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+               ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+
+               pe += bytes;
+               src += bytes;
+               count -= bytes / 8;
+       }
+}
+
+/**
+ * sdma_v2_4_vm_write_pte - update PTEs by writing them manually
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update PTEs by writing them manually using sDMA (CIK).
+ */
+static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib,
+                                  uint64_t pe,
+                                  uint64_t addr, unsigned count,
+                                  uint32_t incr, uint32_t flags)
+{
+       uint64_t value;
+       unsigned ndw;
+
+       while (count) {
+               ndw = count * 2;
+               if (ndw > 0xFFFFE)
+                       ndw = 0xFFFFE;
+
+               /* for non-physically contiguous pages (system) */
+               ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
+                       SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+               ib->ptr[ib->length_dw++] = pe;
+               ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+               ib->ptr[ib->length_dw++] = ndw;
+               for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+                       if (flags & AMDGPU_PTE_SYSTEM) {
+                               value = amdgpu_vm_map_gart(ib->ring->adev, addr);
+                               value &= 0xFFFFFFFFFFFFF000ULL;
+                       } else if (flags & AMDGPU_PTE_VALID) {
+                               value = addr;
+                       } else {
+                               value = 0;
+                       }
+                       addr += incr;
+                       value |= flags;
+                       ib->ptr[ib->length_dw++] = value;
+                       ib->ptr[ib->length_dw++] = upper_32_bits(value);
+               }
+       }
+}
+
+/**
+ * sdma_v2_4_vm_set_pte_pde - update the page tables using sDMA
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using sDMA (CIK).
+ */
+static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib,
+                                    uint64_t pe,
+                                    uint64_t addr, unsigned count,
+                                    uint32_t incr, uint32_t flags)
+{
+       uint64_t value;
+       unsigned ndw;
+
+       while (count) {
+               ndw = count;
+               if (ndw > 0x7FFFF)
+                       ndw = 0x7FFFF;
+
+               if (flags & AMDGPU_PTE_VALID)
+                       value = addr;
+               else
+                       value = 0;
+
+               /* for physically contiguous pages (vram) */
+               ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
+               ib->ptr[ib->length_dw++] = pe; /* dst addr */
+               ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+               ib->ptr[ib->length_dw++] = flags; /* mask */
+               ib->ptr[ib->length_dw++] = 0;
+               ib->ptr[ib->length_dw++] = value; /* value */
+               ib->ptr[ib->length_dw++] = upper_32_bits(value);
+               ib->ptr[ib->length_dw++] = incr; /* increment size */
+               ib->ptr[ib->length_dw++] = 0;
+               ib->ptr[ib->length_dw++] = ndw; /* number of entries */
+
+               pe += ndw * 8;
+               addr += ndw * incr;
+               count -= ndw;
+       }
+}
+
+/**
+ * sdma_v2_4_vm_pad_ib - pad the IB to the required number of dw
+ *
+ * @ib: indirect buffer to fill with padding
+ *
+ */
+static void sdma_v2_4_vm_pad_ib(struct amdgpu_ib *ib)
+{
+       while (ib->length_dw & 0x7)
+               ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
+}
+
+/**
+ * sdma_v2_4_ring_emit_vm_flush - cik vm flush using sDMA
+ *
+ * @ring: amdgpu_ring pointer
+ * @vm: amdgpu_vm pointer
+ *
+ * Update the page table base and flush the VM TLB
+ * using sDMA (VI).
+ */
+static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
+                                        unsigned vm_id, uint64_t pd_addr)
+{
+       u32 srbm_gfx_cntl = 0;
+
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+                         SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+       if (vm_id < 8) {
+               amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
+       } else {
+               amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
+       }
+       amdgpu_ring_write(ring, pd_addr >> 12);
+
+       /* update SH_MEM_* regs */
+       srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vm_id);
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+                         SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+       amdgpu_ring_write(ring, mmSRBM_GFX_CNTL);
+       amdgpu_ring_write(ring, srbm_gfx_cntl);
+
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+                         SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+       amdgpu_ring_write(ring, mmSH_MEM_BASES);
+       amdgpu_ring_write(ring, 0);
+
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+                         SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+       amdgpu_ring_write(ring, mmSH_MEM_CONFIG);
+       amdgpu_ring_write(ring, 0);
+
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+                         SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+       amdgpu_ring_write(ring, mmSH_MEM_APE1_BASE);
+       amdgpu_ring_write(ring, 1);
+
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+                         SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+       amdgpu_ring_write(ring, mmSH_MEM_APE1_LIMIT);
+       amdgpu_ring_write(ring, 0);
+
+       srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, 0);
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+                         SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+       amdgpu_ring_write(ring, mmSRBM_GFX_CNTL);
+       amdgpu_ring_write(ring, srbm_gfx_cntl);
+
+
+       /* flush TLB */
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+                         SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+       amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
+       amdgpu_ring_write(ring, 1 << vm_id);
+
+       /* wait for flush */
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+                         SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
+                         SDMA_PKT_POLL_REGMEM_HEADER_FUNC(0)); /* always */
+       amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, 0); /* reference */
+       amdgpu_ring_write(ring, 0); /* mask */
+       amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+                         SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
+}
+
+static int sdma_v2_4_early_init(struct amdgpu_device *adev)
+{
+       sdma_v2_4_set_ring_funcs(adev);
+       sdma_v2_4_set_buffer_funcs(adev);
+       sdma_v2_4_set_vm_pte_funcs(adev);
+       sdma_v2_4_set_irq_funcs(adev);
+
+       return 0;
+}
+
+static int sdma_v2_4_sw_init(struct amdgpu_device *adev)
+{
+       struct amdgpu_ring *ring;
+       int r;
+
+       /* SDMA trap event */
+       r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq);
+       if (r)
+               return r;
+
+       /* SDMA Privileged inst */
+       r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq);
+       if (r)
+               return r;
+
+       /* SDMA Privileged inst */
+       r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq);
+       if (r)
+               return r;
+
+       r = sdma_v2_4_init_microcode(adev);
+       if (r) {
+               DRM_ERROR("Failed to load sdma firmware!\n");
+               return r;
+       }
+
+       ring = &adev->sdma[0].ring;
+       ring->ring_obj = NULL;
+       ring->use_doorbell = false;
+
+       ring = &adev->sdma[1].ring;
+       ring->ring_obj = NULL;
+       ring->use_doorbell = false;
+
+       ring = &adev->sdma[0].ring;
+       sprintf(ring->name, "sdma0");
+       r = amdgpu_ring_init(adev, ring, 256 * 1024,
+                            SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
+                            &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0,
+                            AMDGPU_RING_TYPE_SDMA);
+       if (r)
+               return r;
+
+       ring = &adev->sdma[1].ring;
+       sprintf(ring->name, "sdma1");
+       r = amdgpu_ring_init(adev, ring, 256 * 1024,
+                            SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
+                            &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1,
+                            AMDGPU_RING_TYPE_SDMA);
+       if (r)
+               return r;
+
+       return r;
+}
+
+static int sdma_v2_4_sw_fini(struct amdgpu_device *adev)
+{
+       amdgpu_ring_fini(&adev->sdma[0].ring);
+       amdgpu_ring_fini(&adev->sdma[1].ring);
+
+       return 0;
+}
+
+static int sdma_v2_4_hw_init(struct amdgpu_device *adev)
+{
+       int r;
+
+       sdma_v2_4_init_golden_registers(adev);
+
+       r = sdma_v2_4_start(adev);
+       if (r)
+               return r;
+
+       return r;
+}
+
+static int sdma_v2_4_hw_fini(struct amdgpu_device *adev)
+{
+       sdma_v2_4_enable(adev, false);
+
+       return 0;
+}
+
+static int sdma_v2_4_suspend(struct amdgpu_device *adev)
+{
+
+       return sdma_v2_4_hw_fini(adev);
+}
+
+static int sdma_v2_4_resume(struct amdgpu_device *adev)
+{
+
+       return sdma_v2_4_hw_init(adev);
+}
+
+static bool sdma_v2_4_is_idle(struct amdgpu_device *adev)
+{
+       u32 tmp = RREG32(mmSRBM_STATUS2);
+
+       if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
+                  SRBM_STATUS2__SDMA1_BUSY_MASK))
+           return false;
+
+       return true;
+}
+
+static int sdma_v2_4_wait_for_idle(struct amdgpu_device *adev)
+{
+       unsigned i;
+       u32 tmp;
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
+                               SRBM_STATUS2__SDMA1_BUSY_MASK);
+
+               if (!tmp)
+                       return 0;
+               udelay(1);
+       }
+       return -ETIMEDOUT;
+}
+
+static void sdma_v2_4_print_status(struct amdgpu_device *adev)
+{
+       int i, j;
+
+       dev_info(adev->dev, "VI SDMA registers\n");
+       dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
+                RREG32(mmSRBM_STATUS2));
+       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+               dev_info(adev->dev, "  SDMA%d_STATUS_REG=0x%08X\n",
+                        i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
+               dev_info(adev->dev, "  SDMA%d_F32_CNTL=0x%08X\n",
+                        i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]));
+               dev_info(adev->dev, "  SDMA%d_CNTL=0x%08X\n",
+                        i, RREG32(mmSDMA0_CNTL + sdma_offsets[i]));
+               dev_info(adev->dev, "  SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n",
+                        i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i]));
+               dev_info(adev->dev, "  SDMA%d_GFX_IB_CNTL=0x%08X\n",
+                        i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]));
+               dev_info(adev->dev, "  SDMA%d_GFX_RB_CNTL=0x%08X\n",
+                        i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]));
+               dev_info(adev->dev, "  SDMA%d_GFX_RB_RPTR=0x%08X\n",
+                        i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i]));
+               dev_info(adev->dev, "  SDMA%d_GFX_RB_WPTR=0x%08X\n",
+                        i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i]));
+               dev_info(adev->dev, "  SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n",
+                        i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i]));
+               dev_info(adev->dev, "  SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n",
+                        i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i]));
+               dev_info(adev->dev, "  SDMA%d_GFX_RB_BASE=0x%08X\n",
+                        i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i]));
+               dev_info(adev->dev, "  SDMA%d_GFX_RB_BASE_HI=0x%08X\n",
+                        i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i]));
+               mutex_lock(&adev->srbm_mutex);
+               for (j = 0; j < 16; j++) {
+                       vi_srbm_select(adev, 0, 0, 0, j);
+                       dev_info(adev->dev, "  VM %d:\n", j);
+                       dev_info(adev->dev, "  SDMA%d_GFX_VIRTUAL_ADDR=0x%08X\n",
+                                i, RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i]));
+                       dev_info(adev->dev, "  SDMA%d_GFX_APE1_CNTL=0x%08X\n",
+                                i, RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i]));
+               }
+               vi_srbm_select(adev, 0, 0, 0, 0);
+               mutex_unlock(&adev->srbm_mutex);
+       }
+}
+
+static int sdma_v2_4_soft_reset(struct amdgpu_device *adev)
+{
+       u32 srbm_soft_reset = 0;
+       u32 tmp = RREG32(mmSRBM_STATUS2);
+
+       if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
+               /* sdma0 */
+               tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
+               tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
+               WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
+               srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
+       }
+       if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
+               /* sdma1 */
+               tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
+               tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
+               WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
+               srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
+       }
+
+       if (srbm_soft_reset) {
+               sdma_v2_4_print_status(adev);
+
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+               tmp |= srbm_soft_reset;
+               dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+               WREG32(mmSRBM_SOFT_RESET, tmp);
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+
+               udelay(50);
+
+               tmp &= ~srbm_soft_reset;
+               WREG32(mmSRBM_SOFT_RESET, tmp);
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+
+               /* Wait a little for things to settle down */
+               udelay(50);
+
+               sdma_v2_4_print_status(adev);
+       }
+
+       return 0;
+}
+
+static int sdma_v2_4_set_trap_irq_state(struct amdgpu_device *adev,
+                                       struct amdgpu_irq_src *src,
+                                       unsigned type,
+                                       enum amdgpu_interrupt_state state)
+{
+       u32 sdma_cntl;
+
+       switch (type) {
+       case AMDGPU_SDMA_IRQ_TRAP0:
+               switch (state) {
+               case AMDGPU_IRQ_STATE_DISABLE:
+                       sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
+                       sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
+                       WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
+                       break;
+               case AMDGPU_IRQ_STATE_ENABLE:
+                       sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
+                       sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
+                       WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
+                       break;
+               default:
+                       break;
+               }
+               break;
+       case AMDGPU_SDMA_IRQ_TRAP1:
+               switch (state) {
+               case AMDGPU_IRQ_STATE_DISABLE:
+                       sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
+                       sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
+                       WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
+                       break;
+               case AMDGPU_IRQ_STATE_ENABLE:
+                       sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
+                       sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
+                       WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
+                       break;
+               default:
+                       break;
+               }
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+static int sdma_v2_4_process_trap_irq(struct amdgpu_device *adev,
+                                     struct amdgpu_irq_src *source,
+                                     struct amdgpu_iv_entry *entry)
+{
+       u8 instance_id, queue_id;
+
+       instance_id = (entry->ring_id & 0x3) >> 0;
+       queue_id = (entry->ring_id & 0xc) >> 2;
+       DRM_DEBUG("IH: SDMA trap\n");
+       switch (instance_id) {
+       case 0:
+               switch (queue_id) {
+               case 0:
+                       amdgpu_fence_process(&adev->sdma[0].ring);
+                       break;
+               case 1:
+                       /* XXX compute */
+                       break;
+               case 2:
+                       /* XXX compute */
+                       break;
+               }
+               break;
+       case 1:
+               switch (queue_id) {
+               case 0:
+                       amdgpu_fence_process(&adev->sdma[1].ring);
+                       break;
+               case 1:
+                       /* XXX compute */
+                       break;
+               case 2:
+                       /* XXX compute */
+                       break;
+               }
+               break;
+       }
+       return 0;
+}
+
+static int sdma_v2_4_process_illegal_inst_irq(struct amdgpu_device *adev,
+                                             struct amdgpu_irq_src *source,
+                                             struct amdgpu_iv_entry *entry)
+{
+       DRM_ERROR("Illegal instruction in SDMA command stream\n");
+       schedule_work(&adev->reset_work);
+       return 0;
+}
+
+static int sdma_v2_4_set_clockgating_state(struct amdgpu_device *adev,
+                                         enum amdgpu_clockgating_state state)
+{
+       /* XXX handled via the smc on VI */
+
+       return 0;
+}
+
+static int sdma_v2_4_set_powergating_state(struct amdgpu_device *adev,
+                                         enum amdgpu_powergating_state state)
+{
+       return 0;
+}
+
+const struct amdgpu_ip_funcs sdma_v2_4_ip_funcs = {
+       .early_init = sdma_v2_4_early_init,
+       .late_init = NULL,
+       .sw_init = sdma_v2_4_sw_init,
+       .sw_fini = sdma_v2_4_sw_fini,
+       .hw_init = sdma_v2_4_hw_init,
+       .hw_fini = sdma_v2_4_hw_fini,
+       .suspend = sdma_v2_4_suspend,
+       .resume = sdma_v2_4_resume,
+       .is_idle = sdma_v2_4_is_idle,
+       .wait_for_idle = sdma_v2_4_wait_for_idle,
+       .soft_reset = sdma_v2_4_soft_reset,
+       .print_status = sdma_v2_4_print_status,
+       .set_clockgating_state = sdma_v2_4_set_clockgating_state,
+       .set_powergating_state = sdma_v2_4_set_powergating_state,
+};
+
+/**
+ * sdma_v2_4_ring_is_lockup - Check if the DMA engine is locked up
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up (VI).
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+static bool sdma_v2_4_ring_is_lockup(struct amdgpu_ring *ring)
+{
+
+       if (sdma_v2_4_is_idle(ring->adev)) {
+               amdgpu_ring_lockup_update(ring);
+               return false;
+       }
+       return amdgpu_ring_test_lockup(ring);
+}
+
+static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
+       .get_rptr = sdma_v2_4_ring_get_rptr,
+       .get_wptr = sdma_v2_4_ring_get_wptr,
+       .set_wptr = sdma_v2_4_ring_set_wptr,
+       .parse_cs = NULL,
+       .emit_ib = sdma_v2_4_ring_emit_ib,
+       .emit_fence = sdma_v2_4_ring_emit_fence,
+       .emit_semaphore = sdma_v2_4_ring_emit_semaphore,
+       .emit_vm_flush = sdma_v2_4_ring_emit_vm_flush,
+       .test_ring = sdma_v2_4_ring_test_ring,
+       .test_ib = sdma_v2_4_ring_test_ib,
+       .is_lockup = sdma_v2_4_ring_is_lockup,
+};
+
+static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
+{
+       adev->sdma[0].ring.funcs = &sdma_v2_4_ring_funcs;
+       adev->sdma[1].ring.funcs = &sdma_v2_4_ring_funcs;
+}
+
+static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = {
+       .set = sdma_v2_4_set_trap_irq_state,
+       .process = sdma_v2_4_process_trap_irq,
+};
+
+static const struct amdgpu_irq_src_funcs sdma_v2_4_illegal_inst_irq_funcs = {
+       .process = sdma_v2_4_process_illegal_inst_irq,
+};
+
+static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev)
+{
+       adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
+       adev->sdma_trap_irq.funcs = &sdma_v2_4_trap_irq_funcs;
+       adev->sdma_illegal_inst_irq.funcs = &sdma_v2_4_illegal_inst_irq_funcs;
+}
+
+/**
+ * sdma_v2_4_emit_copy_buffer - copy buffer using the sDMA engine
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @byte_count: number of bytes to xfer
+ *
+ * Copy GPU buffers using the DMA engine (VI).
+ * Used by the amdgpu ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ring *ring,
+                                      uint64_t src_offset,
+                                      uint64_t dst_offset,
+                                      uint32_t byte_count)
+{
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
+                         SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR));
+       amdgpu_ring_write(ring, byte_count);
+       amdgpu_ring_write(ring, 0); /* src/dst endian swap */
+       amdgpu_ring_write(ring, lower_32_bits(src_offset));
+       amdgpu_ring_write(ring, upper_32_bits(src_offset));
+       amdgpu_ring_write(ring, lower_32_bits(dst_offset));
+       amdgpu_ring_write(ring, upper_32_bits(dst_offset));
+}
+
+/**
+ * sdma_v2_4_emit_fill_buffer - fill buffer using the sDMA engine
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @src_data: value to write to buffer
+ * @dst_offset: dst GPU address
+ * @byte_count: number of bytes to xfer
+ *
+ * Fill GPU buffers using the DMA engine (VI).
+ */
+static void sdma_v2_4_emit_fill_buffer(struct amdgpu_ring *ring,
+                                      uint32_t src_data,
+                                      uint64_t dst_offset,
+                                      uint32_t byte_count)
+{
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL));
+       amdgpu_ring_write(ring, lower_32_bits(dst_offset));
+       amdgpu_ring_write(ring, upper_32_bits(dst_offset));
+       amdgpu_ring_write(ring, src_data);
+       amdgpu_ring_write(ring, byte_count);
+}
+
+static const struct amdgpu_buffer_funcs sdma_v2_4_buffer_funcs = {
+       .copy_max_bytes = 0x1fffff,
+       .copy_num_dw = 7,
+       .emit_copy_buffer = sdma_v2_4_emit_copy_buffer,
+
+       .fill_max_bytes = 0x1fffff,
+       .fill_num_dw = 7,
+       .emit_fill_buffer = sdma_v2_4_emit_fill_buffer,
+};
+
+static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev)
+{
+       if (adev->mman.buffer_funcs == NULL) {
+               adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs;
+               adev->mman.buffer_funcs_ring = &adev->sdma[0].ring;
+       }
+}
+
+static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
+       .copy_pte = sdma_v2_4_vm_copy_pte,
+       .write_pte = sdma_v2_4_vm_write_pte,
+       .set_pte_pde = sdma_v2_4_vm_set_pte_pde,
+       .pad_ib = sdma_v2_4_vm_pad_ib,
+};
+
+static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
+{
+       if (adev->vm_manager.vm_pte_funcs == NULL) {
+               adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
+               adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
+       }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h
new file mode 100644 (file)
index 0000000..6cdf894
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __SDMA_V2_4_H__
+#define __SDMA_V2_4_H__
+
+extern const struct amdgpu_ip_funcs sdma_v2_4_ip_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
new file mode 100644 (file)
index 0000000..dd547c7
--- /dev/null
@@ -0,0 +1,1514 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_ucode.h"
+#include "amdgpu_trace.h"
+#include "vi.h"
+#include "vid.h"
+
+#include "oss/oss_3_0_d.h"
+#include "oss/oss_3_0_sh_mask.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "gca/gfx_8_0_d.h"
+#include "gca/gfx_8_0_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "tonga_sdma_pkt_open.h"
+
+static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev);
+static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev);
+static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev);
+static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev);
+
+MODULE_FIRMWARE("radeon/tonga_sdma.bin");
+MODULE_FIRMWARE("radeon/tonga_sdma1.bin");
+MODULE_FIRMWARE("radeon/carrizo_sdma.bin");
+MODULE_FIRMWARE("radeon/carrizo_sdma1.bin");
+
+static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
+{
+       SDMA0_REGISTER_OFFSET,
+       SDMA1_REGISTER_OFFSET
+};
+
+static const u32 golden_settings_tonga_a11[] =
+{
+       mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
+       mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
+       mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
+       mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
+       mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
+       mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
+       mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
+       mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
+       mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
+       mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
+};
+
+static const u32 tonga_mgcg_cgcg_init[] =
+{
+       mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
+       mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
+};
+
+static const u32 cz_golden_settings_a11[] =
+{
+       mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
+       mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
+       mmSDMA0_GFX_IB_CNTL, 0x00000100, 0x00000100,
+       mmSDMA0_POWER_CNTL, 0x00000800, 0x0003c800,
+       mmSDMA0_RLC0_IB_CNTL, 0x00000100, 0x00000100,
+       mmSDMA0_RLC1_IB_CNTL, 0x00000100, 0x00000100,
+       mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
+       mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
+       mmSDMA1_GFX_IB_CNTL, 0x00000100, 0x00000100,
+       mmSDMA1_POWER_CNTL, 0x00000800, 0x0003c800,
+       mmSDMA1_RLC0_IB_CNTL, 0x00000100, 0x00000100,
+       mmSDMA1_RLC1_IB_CNTL, 0x00000100, 0x00000100,
+};
+
+static const u32 cz_mgcg_cgcg_init[] =
+{
+       mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
+       mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
+};
+
+/*
+ * sDMA - System DMA
+ * Starting with CIK, the GPU has new asynchronous
+ * DMA engines.  These engines are used for compute
+ * and gfx.  There are two DMA engines (SDMA0, SDMA1)
+ * and each one supports 1 ring buffer used for gfx
+ * and 2 queues used for compute.
+ *
+ * The programming model is very similar to the CP
+ * (ring buffer, IBs, etc.), but sDMA has it's own
+ * packet format that is different from the PM4 format
+ * used by the CP. sDMA supports copying data, writing
+ * embedded data, solid fills, and a number of other
+ * things.  It also has support for tiling/detiling of
+ * buffers.
+ */
+
+static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
+{
+       switch (adev->asic_type) {
+       case CHIP_TONGA:
+               amdgpu_program_register_sequence(adev,
+                                                tonga_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
+               amdgpu_program_register_sequence(adev,
+                                                golden_settings_tonga_a11,
+                                                (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
+               break;
+       case CHIP_CARRIZO:
+               amdgpu_program_register_sequence(adev,
+                                                cz_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
+               amdgpu_program_register_sequence(adev,
+                                                cz_golden_settings_a11,
+                                                (const u32)ARRAY_SIZE(cz_golden_settings_a11));
+               break;
+       default:
+               break;
+       }
+}
+
+/**
+ * sdma_v3_0_init_microcode - load ucode images from disk
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Use the firmware interface to load the ucode images into
+ * the driver (not loaded into hw).
+ * Returns 0 on success, error on failure.
+ */
+static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
+{
+       const char *chip_name;
+       char fw_name[30];
+       int err, i;
+       struct amdgpu_firmware_info *info = NULL;
+       const struct common_firmware_header *header = NULL;
+
+       DRM_DEBUG("\n");
+
+       switch (adev->asic_type) {
+       case CHIP_TONGA:
+               chip_name = "tonga";
+               break;
+       case CHIP_CARRIZO:
+               chip_name = "carrizo";
+               break;
+       default: BUG();
+       }
+
+       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+               if (i == 0)
+                       snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
+               else
+                       snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name);
+               err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev);
+               if (err)
+                       goto out;
+               err = amdgpu_ucode_validate(adev->sdma[i].fw);
+               if (err)
+                       goto out;
+
+               if (adev->firmware.smu_load) {
+                       info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
+                       info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
+                       info->fw = adev->sdma[i].fw;
+                       header = (const struct common_firmware_header *)info->fw->data;
+                       adev->firmware.fw_size +=
+                               ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+               }
+       }
+out:
+       if (err) {
+               printk(KERN_ERR
+                      "sdma_v3_0: Failed to load firmware \"%s\"\n",
+                      fw_name);
+               for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+                       release_firmware(adev->sdma[i].fw);
+                       adev->sdma[i].fw = NULL;
+               }
+       }
+       return err;
+}
+
+/**
+ * sdma_v3_0_ring_get_rptr - get the current read pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Get the current rptr from the hardware (VI+).
+ */
+static uint32_t sdma_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
+{
+       u32 rptr;
+
+       /* XXX check if swapping is necessary on BE */
+       rptr = ring->adev->wb.wb[ring->rptr_offs] >> 2;
+
+       return rptr;
+}
+
+/**
+ * sdma_v3_0_ring_get_wptr - get the current write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Get the current wptr from the hardware (VI+).
+ */
+static uint32_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       u32 wptr;
+
+       if (ring->use_doorbell) {
+               /* XXX check if swapping is necessary on BE */
+               wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2;
+       } else {
+               int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1;
+
+               wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
+       }
+
+       return wptr;
+}
+
+/**
+ * sdma_v3_0_ring_set_wptr - commit the write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Write the wptr back to the hardware (VI+).
+ */
+static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       if (ring->use_doorbell) {
+               /* XXX check if swapping is necessary on BE */
+               adev->wb.wb[ring->wptr_offs] = ring->wptr << 2;
+               WDOORBELL32(ring->doorbell_index, ring->wptr << 2);
+       } else {
+               int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1;
+
+               WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2);
+       }
+}
+
+static void sdma_v3_0_hdp_flush_ring_emit(struct amdgpu_ring *);
+
+/**
+ * sdma_v3_0_ring_emit_ib - Schedule an IB on the DMA engine
+ *
+ * @ring: amdgpu ring pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (VI).
+ */
+static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
+                                  struct amdgpu_ib *ib)
+{
+       u32 vmid = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf;
+       u32 next_rptr = ring->wptr + 5;
+
+       if (ib->flush_hdp_writefifo)
+               next_rptr += 6;
+
+       while ((next_rptr & 7) != 2)
+               next_rptr++;
+       next_rptr += 6;
+
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
+                         SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
+       amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc);
+       amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
+       amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
+       amdgpu_ring_write(ring, next_rptr);
+
+       /* flush HDP */
+       if (ib->flush_hdp_writefifo) {
+               sdma_v3_0_hdp_flush_ring_emit(ring);
+       }
+
+       /* IB packet must end on a 8 DW boundary */
+       while ((ring->wptr & 7) != 2)
+               amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_NOP));
+
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
+                         SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
+       /* base must be 32 byte aligned */
+       amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
+       amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+       amdgpu_ring_write(ring, ib->length_dw);
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, 0);
+
+}
+
+/**
+ * sdma_v3_0_hdp_flush_ring_emit - emit an hdp flush on the DMA ring
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Emit an hdp flush packet on the requested DMA ring.
+ */
+static void sdma_v3_0_hdp_flush_ring_emit(struct amdgpu_ring *ring)
+{
+       u32 ref_and_mask = 0;
+
+       if (ring == &ring->adev->sdma[0].ring)
+               ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
+       else
+               ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
+
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+                         SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
+                         SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
+       amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
+       amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
+       amdgpu_ring_write(ring, ref_and_mask); /* reference */
+       amdgpu_ring_write(ring, ref_and_mask); /* mask */
+       amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+                         SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
+}
+
+/**
+ * sdma_v3_0_ring_emit_fence - emit a fence on the DMA ring
+ *
+ * @ring: amdgpu ring pointer
+ * @fence: amdgpu fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (VI).
+ */
+static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+                                     bool write64bits)
+{
+       /* write the fence */
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
+       amdgpu_ring_write(ring, lower_32_bits(addr));
+       amdgpu_ring_write(ring, upper_32_bits(addr));
+       amdgpu_ring_write(ring, lower_32_bits(seq));
+
+       /* optionally write high bits as well */
+       if (write64bits) {
+               addr += 4;
+               amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
+               amdgpu_ring_write(ring, lower_32_bits(addr));
+               amdgpu_ring_write(ring, upper_32_bits(addr));
+               amdgpu_ring_write(ring, upper_32_bits(seq));
+       }
+
+       /* generate an interrupt */
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
+       amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
+}
+
+
+/**
+ * sdma_v3_0_ring_emit_semaphore - emit a semaphore on the dma ring
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @semaphore: amdgpu semaphore object
+ * @emit_wait: wait or signal semaphore
+ *
+ * Add a DMA semaphore packet to the ring wait on or signal
+ * other rings (VI).
+ */
+static bool sdma_v3_0_ring_emit_semaphore(struct amdgpu_ring *ring,
+                                         struct amdgpu_semaphore *semaphore,
+                                         bool emit_wait)
+{
+       u64 addr = semaphore->gpu_addr;
+       u32 sig = emit_wait ? 0 : 1;
+
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SEM) |
+                         SDMA_PKT_SEMAPHORE_HEADER_SIGNAL(sig));
+       amdgpu_ring_write(ring, lower_32_bits(addr) & 0xfffffff8);
+       amdgpu_ring_write(ring, upper_32_bits(addr));
+
+       return true;
+}
+
+/**
+ * sdma_v3_0_gfx_stop - stop the gfx async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Stop the gfx async dma ring buffers (VI).
+ */
+static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
+{
+       struct amdgpu_ring *sdma0 = &adev->sdma[0].ring;
+       struct amdgpu_ring *sdma1 = &adev->sdma[1].ring;
+       u32 rb_cntl, ib_cntl;
+       int i;
+
+       if ((adev->mman.buffer_funcs_ring == sdma0) ||
+           (adev->mman.buffer_funcs_ring == sdma1))
+               amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
+
+       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+               rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
+               rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
+               WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
+               ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
+               ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
+               WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
+       }
+       sdma0->ready = false;
+       sdma1->ready = false;
+}
+
+/**
+ * sdma_v3_0_rlc_stop - stop the compute async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Stop the compute async dma queues (VI).
+ */
+static void sdma_v3_0_rlc_stop(struct amdgpu_device *adev)
+{
+       /* XXX todo */
+}
+
+/**
+ * sdma_v3_0_enable - stop the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ * @enable: enable/disable the DMA MEs.
+ *
+ * Halt or unhalt the async dma engines (VI).
+ */
+static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable)
+{
+       u32 f32_cntl;
+       int i;
+
+       if (enable == false) {
+               sdma_v3_0_gfx_stop(adev);
+               sdma_v3_0_rlc_stop(adev);
+       }
+
+       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+               f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
+               if (enable)
+                       f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0);
+               else
+                       f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
+               WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], f32_cntl);
+       }
+}
+
+/**
+ * sdma_v3_0_gfx_resume - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set up the gfx DMA ring buffers and enable them (VI).
+ * Returns 0 for success, error for failure.
+ */
+static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
+{
+       struct amdgpu_ring *ring;
+       u32 rb_cntl, ib_cntl;
+       u32 rb_bufsz;
+       u32 wb_offset;
+       u32 doorbell;
+       int i, j, r;
+
+       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+               ring = &adev->sdma[i].ring;
+               wb_offset = (ring->rptr_offs * 4);
+
+               mutex_lock(&adev->srbm_mutex);
+               for (j = 0; j < 16; j++) {
+                       vi_srbm_select(adev, 0, 0, 0, j);
+                       /* SDMA GFX */
+                       WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
+                       WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
+               }
+               vi_srbm_select(adev, 0, 0, 0, 0);
+               mutex_unlock(&adev->srbm_mutex);
+
+               WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
+
+               /* Set ring buffer size in dwords */
+               rb_bufsz = order_base_2(ring->ring_size / 4);
+               rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
+               rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
+#ifdef __BIG_ENDIAN
+               rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
+               rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
+                                       RPTR_WRITEBACK_SWAP_ENABLE, 1);
+#endif
+               WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
+
+               /* Initialize the ring buffer's read and write pointers */
+               WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
+               WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
+
+               /* set the wb address whether it's enabled or not */
+               WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
+                      upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
+               WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i],
+                      lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
+
+               rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
+
+               WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
+               WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
+
+               ring->wptr = 0;
+               WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
+
+               doorbell = RREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i]);
+
+               if (ring->use_doorbell) {
+                       doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL,
+                                                OFFSET, ring->doorbell_index);
+                       doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
+               } else {
+                       doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
+               }
+               WREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i], doorbell);
+
+               /* enable DMA RB */
+               rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
+               WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
+
+               ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
+               ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
+#ifdef __BIG_ENDIAN
+               ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
+#endif
+               /* enable DMA IBs */
+               WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
+
+               ring->ready = true;
+
+               r = amdgpu_ring_test_ring(ring);
+               if (r) {
+                       ring->ready = false;
+                       return r;
+               }
+
+               if (adev->mman.buffer_funcs_ring == ring)
+                       amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
+       }
+
+       return 0;
+}
+
+/**
+ * sdma_v3_0_rlc_resume - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set up the compute DMA queues and enable them (VI).
+ * Returns 0 for success, error for failure.
+ */
+static int sdma_v3_0_rlc_resume(struct amdgpu_device *adev)
+{
+       /* XXX todo */
+       return 0;
+}
+
+/**
+ * sdma_v3_0_load_microcode - load the sDMA ME ucode
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Loads the sDMA0/1 ucode.
+ * Returns 0 for success, -EINVAL if the ucode is not available.
+ */
+static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
+{
+       const struct sdma_firmware_header_v1_0 *hdr;
+       const __le32 *fw_data;
+       u32 fw_size;
+       int i, j;
+
+       if (!adev->sdma[0].fw || !adev->sdma[1].fw)
+               return -EINVAL;
+
+       /* halt the MEs */
+       sdma_v3_0_enable(adev, false);
+
+       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+               hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
+               amdgpu_ucode_print_sdma_hdr(&hdr->header);
+               fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+               adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
+
+               fw_data = (const __le32 *)
+                       (adev->sdma[i].fw->data +
+                               le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+               WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
+               for (j = 0; j < fw_size; j++)
+                       WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
+               WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version);
+       }
+
+       return 0;
+}
+
+/**
+ * sdma_v3_0_start - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set up the DMA engines and enable them (VI).
+ * Returns 0 for success, error for failure.
+ */
+static int sdma_v3_0_start(struct amdgpu_device *adev)
+{
+       int r;
+
+       if (!adev->firmware.smu_load) {
+               r = sdma_v3_0_load_microcode(adev);
+               if (r)
+                       return r;
+       } else {
+               r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
+                                               AMDGPU_UCODE_ID_SDMA0);
+               if (r)
+                       return -EINVAL;
+               r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
+                                               AMDGPU_UCODE_ID_SDMA1);
+               if (r)
+                       return -EINVAL;
+       }
+
+       /* unhalt the MEs */
+       sdma_v3_0_enable(adev, true);
+
+       /* start the gfx rings and rlc compute queues */
+       r = sdma_v3_0_gfx_resume(adev);
+       if (r)
+               return r;
+       r = sdma_v3_0_rlc_resume(adev);
+       if (r)
+               return r;
+
+       return 0;
+}
+
+/**
+ * sdma_v3_0_ring_test_ring - simple async dma engine test
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Test the DMA engine by writing using it to write an
+ * value to memory. (VI).
+ * Returns 0 for success, error for failure.
+ */
+static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       unsigned i;
+       unsigned index;
+       int r;
+       u32 tmp;
+       u64 gpu_addr;
+
+       r = amdgpu_wb_get(adev, &index);
+       if (r) {
+               dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+               return r;
+       }
+
+       gpu_addr = adev->wb.gpu_addr + (index * 4);
+       tmp = 0xCAFEDEAD;
+       adev->wb.wb[index] = cpu_to_le32(tmp);
+
+       r = amdgpu_ring_lock(ring, 5);
+       if (r) {
+               DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
+               amdgpu_wb_free(adev, index);
+               return r;
+       }
+
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
+                         SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
+       amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
+       amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
+       amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
+       amdgpu_ring_write(ring, 0xDEADBEEF);
+       amdgpu_ring_unlock_commit(ring);
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               tmp = le32_to_cpu(adev->wb.wb[index]);
+               if (tmp == 0xDEADBEEF)
+                       break;
+               DRM_UDELAY(1);
+       }
+
+       if (i < adev->usec_timeout) {
+               DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+       } else {
+               DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
+                         ring->idx, tmp);
+               r = -EINVAL;
+       }
+       amdgpu_wb_free(adev, index);
+
+       return r;
+}
+
+/**
+ * sdma_v3_0_ring_test_ib - test an IB on the DMA engine
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Test a simple IB in the DMA ring (VI).
+ * Returns 0 on success, error on failure.
+ */
+static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       struct amdgpu_ib ib;
+       unsigned i;
+       unsigned index;
+       int r;
+       u32 tmp = 0;
+       u64 gpu_addr;
+
+       r = amdgpu_wb_get(adev, &index);
+       if (r) {
+               dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+               return r;
+       }
+
+       gpu_addr = adev->wb.gpu_addr + (index * 4);
+       tmp = 0xCAFEDEAD;
+       adev->wb.wb[index] = cpu_to_le32(tmp);
+
+       r = amdgpu_ib_get(ring, NULL, 256, &ib);
+       if (r) {
+               amdgpu_wb_free(adev, index);
+               DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+               return r;
+       }
+
+       ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
+               SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
+       ib.ptr[1] = lower_32_bits(gpu_addr);
+       ib.ptr[2] = upper_32_bits(gpu_addr);
+       ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1);
+       ib.ptr[4] = 0xDEADBEEF;
+       ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
+       ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
+       ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
+       ib.length_dw = 8;
+
+       r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
+       if (r) {
+               amdgpu_ib_free(adev, &ib);
+               amdgpu_wb_free(adev, index);
+               DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
+               return r;
+       }
+       r = amdgpu_fence_wait(ib.fence, false);
+       if (r) {
+               amdgpu_ib_free(adev, &ib);
+               amdgpu_wb_free(adev, index);
+               DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+               return r;
+       }
+       for (i = 0; i < adev->usec_timeout; i++) {
+               tmp = le32_to_cpu(adev->wb.wb[index]);
+               if (tmp == 0xDEADBEEF)
+                       break;
+               DRM_UDELAY(1);
+       }
+       if (i < adev->usec_timeout) {
+               DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
+                        ib.fence->ring->idx, i);
+       } else {
+               DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+               r = -EINVAL;
+       }
+       amdgpu_ib_free(adev, &ib);
+       amdgpu_wb_free(adev, index);
+       return r;
+}
+
+/**
+ * sdma_v3_0_vm_copy_pte - update PTEs by copying them from the GART
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @src: src addr to copy from
+ * @count: number of page entries to update
+ *
+ * Update PTEs by copying them from the GART using sDMA (CIK).
+ */
+static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib,
+                                 uint64_t pe, uint64_t src,
+                                 unsigned count)
+{
+       while (count) {
+               unsigned bytes = count * 8;
+               if (bytes > 0x1FFFF8)
+                       bytes = 0x1FFFF8;
+
+               ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
+                       SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+               ib->ptr[ib->length_dw++] = bytes;
+               ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+               ib->ptr[ib->length_dw++] = lower_32_bits(src);
+               ib->ptr[ib->length_dw++] = upper_32_bits(src);
+               ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+               ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+
+               pe += bytes;
+               src += bytes;
+               count -= bytes / 8;
+       }
+}
+
+/**
+ * sdma_v3_0_vm_write_pte - update PTEs by writing them manually
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update PTEs by writing them manually using sDMA (CIK).
+ */
+static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib,
+                                  uint64_t pe,
+                                  uint64_t addr, unsigned count,
+                                  uint32_t incr, uint32_t flags)
+{
+       uint64_t value;
+       unsigned ndw;
+
+       while (count) {
+               ndw = count * 2;
+               if (ndw > 0xFFFFE)
+                       ndw = 0xFFFFE;
+
+               /* for non-physically contiguous pages (system) */
+               ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
+                       SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+               ib->ptr[ib->length_dw++] = pe;
+               ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+               ib->ptr[ib->length_dw++] = ndw;
+               for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+                       if (flags & AMDGPU_PTE_SYSTEM) {
+                               value = amdgpu_vm_map_gart(ib->ring->adev, addr);
+                               value &= 0xFFFFFFFFFFFFF000ULL;
+                       } else if (flags & AMDGPU_PTE_VALID) {
+                               value = addr;
+                       } else {
+                               value = 0;
+                       }
+                       addr += incr;
+                       value |= flags;
+                       ib->ptr[ib->length_dw++] = value;
+                       ib->ptr[ib->length_dw++] = upper_32_bits(value);
+               }
+       }
+}
+
+/**
+ * sdma_v3_0_vm_set_pte_pde - update the page tables using sDMA
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using sDMA (CIK).
+ */
+static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib,
+                                    uint64_t pe,
+                                    uint64_t addr, unsigned count,
+                                    uint32_t incr, uint32_t flags)
+{
+       uint64_t value;
+       unsigned ndw;
+
+       while (count) {
+               ndw = count;
+               if (ndw > 0x7FFFF)
+                       ndw = 0x7FFFF;
+
+               if (flags & AMDGPU_PTE_VALID)
+                       value = addr;
+               else
+                       value = 0;
+
+               /* for physically contiguous pages (vram) */
+               ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
+               ib->ptr[ib->length_dw++] = pe; /* dst addr */
+               ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+               ib->ptr[ib->length_dw++] = flags; /* mask */
+               ib->ptr[ib->length_dw++] = 0;
+               ib->ptr[ib->length_dw++] = value; /* value */
+               ib->ptr[ib->length_dw++] = upper_32_bits(value);
+               ib->ptr[ib->length_dw++] = incr; /* increment size */
+               ib->ptr[ib->length_dw++] = 0;
+               ib->ptr[ib->length_dw++] = ndw; /* number of entries */
+
+               pe += ndw * 8;
+               addr += ndw * incr;
+               count -= ndw;
+       }
+}
+
+/**
+ * sdma_v3_0_vm_pad_ib - pad the IB to the required number of dw
+ *
+ * @ib: indirect buffer to fill with padding
+ *
+ */
+static void sdma_v3_0_vm_pad_ib(struct amdgpu_ib *ib)
+{
+       while (ib->length_dw & 0x7)
+               ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
+}
+
+/**
+ * sdma_v3_0_ring_emit_vm_flush - cik vm flush using sDMA
+ *
+ * @ring: amdgpu_ring pointer
+ * @vm: amdgpu_vm pointer
+ *
+ * Update the page table base and flush the VM TLB
+ * using sDMA (VI).
+ */
+static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+                                        unsigned vm_id, uint64_t pd_addr)
+{
+       u32 srbm_gfx_cntl = 0;
+
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+                         SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+       if (vm_id < 8) {
+               amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
+       } else {
+               amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
+       }
+       amdgpu_ring_write(ring, pd_addr >> 12);
+
+       /* update SH_MEM_* regs */
+       srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vm_id);
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+                         SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+       amdgpu_ring_write(ring, mmSRBM_GFX_CNTL);
+       amdgpu_ring_write(ring, srbm_gfx_cntl);
+
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+                         SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+       amdgpu_ring_write(ring, mmSH_MEM_BASES);
+       amdgpu_ring_write(ring, 0);
+
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+                         SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+       amdgpu_ring_write(ring, mmSH_MEM_CONFIG);
+       amdgpu_ring_write(ring, 0);
+
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+                         SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+       amdgpu_ring_write(ring, mmSH_MEM_APE1_BASE);
+       amdgpu_ring_write(ring, 1);
+
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+                         SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+       amdgpu_ring_write(ring, mmSH_MEM_APE1_LIMIT);
+       amdgpu_ring_write(ring, 0);
+
+       srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, 0);
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+                         SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+       amdgpu_ring_write(ring, mmSRBM_GFX_CNTL);
+       amdgpu_ring_write(ring, srbm_gfx_cntl);
+
+
+       /* flush TLB */
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+                         SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+       amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
+       amdgpu_ring_write(ring, 1 << vm_id);
+
+       /* wait for flush */
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+                         SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
+                         SDMA_PKT_POLL_REGMEM_HEADER_FUNC(0)); /* always */
+       amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, 0); /* reference */
+       amdgpu_ring_write(ring, 0); /* mask */
+       amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+                         SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
+}
+
+static int sdma_v3_0_early_init(struct amdgpu_device *adev)
+{
+       sdma_v3_0_set_ring_funcs(adev);
+       sdma_v3_0_set_buffer_funcs(adev);
+       sdma_v3_0_set_vm_pte_funcs(adev);
+       sdma_v3_0_set_irq_funcs(adev);
+
+       return 0;
+}
+
+static int sdma_v3_0_sw_init(struct amdgpu_device *adev)
+{
+       struct amdgpu_ring *ring;
+       int r;
+
+       /* SDMA trap event */
+       r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq);
+       if (r)
+               return r;
+
+       /* SDMA Privileged inst */
+       r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq);
+       if (r)
+               return r;
+
+       /* SDMA Privileged inst */
+       r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq);
+       if (r)
+               return r;
+
+       r = sdma_v3_0_init_microcode(adev);
+       if (r) {
+               DRM_ERROR("Failed to load sdma firmware!\n");
+               return r;
+       }
+
+       ring = &adev->sdma[0].ring;
+       ring->ring_obj = NULL;
+       ring->use_doorbell = true;
+       ring->doorbell_index = AMDGPU_DOORBELL_sDMA_ENGINE0;
+
+       ring = &adev->sdma[1].ring;
+       ring->ring_obj = NULL;
+       ring->use_doorbell = true;
+       ring->doorbell_index = AMDGPU_DOORBELL_sDMA_ENGINE1;
+
+       ring = &adev->sdma[0].ring;
+       sprintf(ring->name, "sdma0");
+       r = amdgpu_ring_init(adev, ring, 256 * 1024,
+                            SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
+                            &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0,
+                            AMDGPU_RING_TYPE_SDMA);
+       if (r)
+               return r;
+
+       ring = &adev->sdma[1].ring;
+       sprintf(ring->name, "sdma1");
+       r = amdgpu_ring_init(adev, ring, 256 * 1024,
+                            SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
+                            &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1,
+                            AMDGPU_RING_TYPE_SDMA);
+       if (r)
+               return r;
+
+       return r;
+}
+
+static int sdma_v3_0_sw_fini(struct amdgpu_device *adev)
+{
+       amdgpu_ring_fini(&adev->sdma[0].ring);
+       amdgpu_ring_fini(&adev->sdma[1].ring);
+
+       return 0;
+}
+
+static int sdma_v3_0_hw_init(struct amdgpu_device *adev)
+{
+       int r;
+
+       sdma_v3_0_init_golden_registers(adev);
+
+       r = sdma_v3_0_start(adev);
+       if (r)
+               return r;
+
+       return r;
+}
+
+static int sdma_v3_0_hw_fini(struct amdgpu_device *adev)
+{
+       sdma_v3_0_enable(adev, false);
+
+       return 0;
+}
+
+static int sdma_v3_0_suspend(struct amdgpu_device *adev)
+{
+
+       return sdma_v3_0_hw_fini(adev);
+}
+
+static int sdma_v3_0_resume(struct amdgpu_device *adev)
+{
+
+       return sdma_v3_0_hw_init(adev);
+}
+
+static bool sdma_v3_0_is_idle(struct amdgpu_device *adev)
+{
+       u32 tmp = RREG32(mmSRBM_STATUS2);
+
+       if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
+                  SRBM_STATUS2__SDMA1_BUSY_MASK))
+           return false;
+
+       return true;
+}
+
+static int sdma_v3_0_wait_for_idle(struct amdgpu_device *adev)
+{
+       unsigned i;
+       u32 tmp;
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
+                               SRBM_STATUS2__SDMA1_BUSY_MASK);
+
+               if (!tmp)
+                       return 0;
+               udelay(1);
+       }
+       return -ETIMEDOUT;
+}
+
+static void sdma_v3_0_print_status(struct amdgpu_device *adev)
+{
+       int i, j;
+
+       dev_info(adev->dev, "VI SDMA registers\n");
+       dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
+                RREG32(mmSRBM_STATUS2));
+       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+               dev_info(adev->dev, "  SDMA%d_STATUS_REG=0x%08X\n",
+                        i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
+               dev_info(adev->dev, "  SDMA%d_F32_CNTL=0x%08X\n",
+                        i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]));
+               dev_info(adev->dev, "  SDMA%d_CNTL=0x%08X\n",
+                        i, RREG32(mmSDMA0_CNTL + sdma_offsets[i]));
+               dev_info(adev->dev, "  SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n",
+                        i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i]));
+               dev_info(adev->dev, "  SDMA%d_GFX_IB_CNTL=0x%08X\n",
+                        i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]));
+               dev_info(adev->dev, "  SDMA%d_GFX_RB_CNTL=0x%08X\n",
+                        i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]));
+               dev_info(adev->dev, "  SDMA%d_GFX_RB_RPTR=0x%08X\n",
+                        i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i]));
+               dev_info(adev->dev, "  SDMA%d_GFX_RB_WPTR=0x%08X\n",
+                        i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i]));
+               dev_info(adev->dev, "  SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n",
+                        i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i]));
+               dev_info(adev->dev, "  SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n",
+                        i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i]));
+               dev_info(adev->dev, "  SDMA%d_GFX_RB_BASE=0x%08X\n",
+                        i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i]));
+               dev_info(adev->dev, "  SDMA%d_GFX_RB_BASE_HI=0x%08X\n",
+                        i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i]));
+               dev_info(adev->dev, "  SDMA%d_GFX_DOORBELL=0x%08X\n",
+                        i, RREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i]));
+               mutex_lock(&adev->srbm_mutex);
+               for (j = 0; j < 16; j++) {
+                       vi_srbm_select(adev, 0, 0, 0, j);
+                       dev_info(adev->dev, "  VM %d:\n", j);
+                       dev_info(adev->dev, "  SDMA%d_GFX_VIRTUAL_ADDR=0x%08X\n",
+                                i, RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i]));
+                       dev_info(adev->dev, "  SDMA%d_GFX_APE1_CNTL=0x%08X\n",
+                                i, RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i]));
+               }
+               vi_srbm_select(adev, 0, 0, 0, 0);
+               mutex_unlock(&adev->srbm_mutex);
+       }
+}
+
+static int sdma_v3_0_soft_reset(struct amdgpu_device *adev)
+{
+       u32 srbm_soft_reset = 0;
+       u32 tmp = RREG32(mmSRBM_STATUS2);
+
+       if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
+               /* sdma0 */
+               tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
+               tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
+               WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
+               srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
+       }
+       if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
+               /* sdma1 */
+               tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
+               tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
+               WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
+               srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
+       }
+
+       if (srbm_soft_reset) {
+               sdma_v3_0_print_status(adev);
+
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+               tmp |= srbm_soft_reset;
+               dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+               WREG32(mmSRBM_SOFT_RESET, tmp);
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+
+               udelay(50);
+
+               tmp &= ~srbm_soft_reset;
+               WREG32(mmSRBM_SOFT_RESET, tmp);
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+
+               /* Wait a little for things to settle down */
+               udelay(50);
+
+               sdma_v3_0_print_status(adev);
+       }
+
+       return 0;
+}
+
+static int sdma_v3_0_set_trap_irq_state(struct amdgpu_device *adev,
+                                       struct amdgpu_irq_src *source,
+                                       unsigned type,
+                                       enum amdgpu_interrupt_state state)
+{
+       u32 sdma_cntl;
+
+       switch (type) {
+       case AMDGPU_SDMA_IRQ_TRAP0:
+               switch (state) {
+               case AMDGPU_IRQ_STATE_DISABLE:
+                       sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
+                       sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
+                       WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
+                       break;
+               case AMDGPU_IRQ_STATE_ENABLE:
+                       sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
+                       sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
+                       WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
+                       break;
+               default:
+                       break;
+               }
+               break;
+       case AMDGPU_SDMA_IRQ_TRAP1:
+               switch (state) {
+               case AMDGPU_IRQ_STATE_DISABLE:
+                       sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
+                       sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
+                       WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
+                       break;
+               case AMDGPU_IRQ_STATE_ENABLE:
+                       sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
+                       sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
+                       WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
+                       break;
+               default:
+                       break;
+               }
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+static int sdma_v3_0_process_trap_irq(struct amdgpu_device *adev,
+                                     struct amdgpu_irq_src *source,
+                                     struct amdgpu_iv_entry *entry)
+{
+       u8 instance_id, queue_id;
+
+       instance_id = (entry->ring_id & 0x3) >> 0;
+       queue_id = (entry->ring_id & 0xc) >> 2;
+       DRM_DEBUG("IH: SDMA trap\n");
+       switch (instance_id) {
+       case 0:
+               switch (queue_id) {
+               case 0:
+                       amdgpu_fence_process(&adev->sdma[0].ring);
+                       break;
+               case 1:
+                       /* XXX compute */
+                       break;
+               case 2:
+                       /* XXX compute */
+                       break;
+               }
+               break;
+       case 1:
+               switch (queue_id) {
+               case 0:
+                       amdgpu_fence_process(&adev->sdma[1].ring);
+                       break;
+               case 1:
+                       /* XXX compute */
+                       break;
+               case 2:
+                       /* XXX compute */
+                       break;
+               }
+               break;
+       }
+       return 0;
+}
+
+static int sdma_v3_0_process_illegal_inst_irq(struct amdgpu_device *adev,
+                                             struct amdgpu_irq_src *source,
+                                             struct amdgpu_iv_entry *entry)
+{
+       DRM_ERROR("Illegal instruction in SDMA command stream\n");
+       schedule_work(&adev->reset_work);
+       return 0;
+}
+
+static int sdma_v3_0_set_clockgating_state(struct amdgpu_device *adev,
+                                         enum amdgpu_clockgating_state state)
+{
+       /* XXX handled via the smc on VI */
+
+       return 0;
+}
+
+static int sdma_v3_0_set_powergating_state(struct amdgpu_device *adev,
+                                         enum amdgpu_powergating_state state)
+{
+       return 0;
+}
+
+const struct amdgpu_ip_funcs sdma_v3_0_ip_funcs = {
+       .early_init = sdma_v3_0_early_init,
+       .late_init = NULL,
+       .sw_init = sdma_v3_0_sw_init,
+       .sw_fini = sdma_v3_0_sw_fini,
+       .hw_init = sdma_v3_0_hw_init,
+       .hw_fini = sdma_v3_0_hw_fini,
+       .suspend = sdma_v3_0_suspend,
+       .resume = sdma_v3_0_resume,
+       .is_idle = sdma_v3_0_is_idle,
+       .wait_for_idle = sdma_v3_0_wait_for_idle,
+       .soft_reset = sdma_v3_0_soft_reset,
+       .print_status = sdma_v3_0_print_status,
+       .set_clockgating_state = sdma_v3_0_set_clockgating_state,
+       .set_powergating_state = sdma_v3_0_set_powergating_state,
+};
+
+/**
+ * sdma_v3_0_ring_is_lockup - Check if the DMA engine is locked up
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up (VI).
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+static bool sdma_v3_0_ring_is_lockup(struct amdgpu_ring *ring)
+{
+
+       if (sdma_v3_0_is_idle(ring->adev)) {
+               amdgpu_ring_lockup_update(ring);
+               return false;
+       }
+       return amdgpu_ring_test_lockup(ring);
+}
+
+static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
+       .get_rptr = sdma_v3_0_ring_get_rptr,
+       .get_wptr = sdma_v3_0_ring_get_wptr,
+       .set_wptr = sdma_v3_0_ring_set_wptr,
+       .parse_cs = NULL,
+       .emit_ib = sdma_v3_0_ring_emit_ib,
+       .emit_fence = sdma_v3_0_ring_emit_fence,
+       .emit_semaphore = sdma_v3_0_ring_emit_semaphore,
+       .emit_vm_flush = sdma_v3_0_ring_emit_vm_flush,
+       .test_ring = sdma_v3_0_ring_test_ring,
+       .test_ib = sdma_v3_0_ring_test_ib,
+       .is_lockup = sdma_v3_0_ring_is_lockup,
+};
+
+static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
+{
+       adev->sdma[0].ring.funcs = &sdma_v3_0_ring_funcs;
+       adev->sdma[1].ring.funcs = &sdma_v3_0_ring_funcs;
+}
+
+static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = {
+       .set = sdma_v3_0_set_trap_irq_state,
+       .process = sdma_v3_0_process_trap_irq,
+};
+
+static const struct amdgpu_irq_src_funcs sdma_v3_0_illegal_inst_irq_funcs = {
+       .process = sdma_v3_0_process_illegal_inst_irq,
+};
+
+static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+       adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
+       adev->sdma_trap_irq.funcs = &sdma_v3_0_trap_irq_funcs;
+       adev->sdma_illegal_inst_irq.funcs = &sdma_v3_0_illegal_inst_irq_funcs;
+}
+
+/**
+ * sdma_v3_0_emit_copy_buffer - copy buffer using the sDMA engine
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @byte_count: number of bytes to xfer
+ *
+ * Copy GPU buffers using the DMA engine (VI).
+ * Used by the amdgpu ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ring *ring,
+                                      uint64_t src_offset,
+                                      uint64_t dst_offset,
+                                      uint32_t byte_count)
+{
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
+                         SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR));
+       amdgpu_ring_write(ring, byte_count);
+       amdgpu_ring_write(ring, 0); /* src/dst endian swap */
+       amdgpu_ring_write(ring, lower_32_bits(src_offset));
+       amdgpu_ring_write(ring, upper_32_bits(src_offset));
+       amdgpu_ring_write(ring, lower_32_bits(dst_offset));
+       amdgpu_ring_write(ring, upper_32_bits(dst_offset));
+}
+
+/**
+ * sdma_v3_0_emit_fill_buffer - fill buffer using the sDMA engine
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @src_data: value to write to buffer
+ * @dst_offset: dst GPU address
+ * @byte_count: number of bytes to xfer
+ *
+ * Fill GPU buffers using the DMA engine (VI).
+ */
+static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ring *ring,
+                                      uint32_t src_data,
+                                      uint64_t dst_offset,
+                                      uint32_t byte_count)
+{
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL));
+       amdgpu_ring_write(ring, lower_32_bits(dst_offset));
+       amdgpu_ring_write(ring, upper_32_bits(dst_offset));
+       amdgpu_ring_write(ring, src_data);
+       amdgpu_ring_write(ring, byte_count);
+}
+
+static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs = {
+       .copy_max_bytes = 0x1fffff,
+       .copy_num_dw = 7,
+       .emit_copy_buffer = sdma_v3_0_emit_copy_buffer,
+
+       .fill_max_bytes = 0x1fffff,
+       .fill_num_dw = 5,
+       .emit_fill_buffer = sdma_v3_0_emit_fill_buffer,
+};
+
+static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev)
+{
+       if (adev->mman.buffer_funcs == NULL) {
+               adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs;
+               adev->mman.buffer_funcs_ring = &adev->sdma[0].ring;
+       }
+}
+
+static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
+       .copy_pte = sdma_v3_0_vm_copy_pte,
+       .write_pte = sdma_v3_0_vm_write_pte,
+       .set_pte_pde = sdma_v3_0_vm_set_pte_pde,
+       .pad_ib = sdma_v3_0_vm_pad_ib,
+};
+
+static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
+{
+       if (adev->vm_manager.vm_pte_funcs == NULL) {
+               adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
+               adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
+       }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h
new file mode 100644 (file)
index 0000000..85bf2ac
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __SDMA_V3_0_H__
+#define __SDMA_V3_0_H__
+
+extern const struct amdgpu_ip_funcs sdma_v3_0_ip_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/smu8.h b/drivers/gpu/drm/amd/amdgpu/smu8.h
new file mode 100644 (file)
index 0000000..d758d07
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU8_H
+#define SMU8_H
+
+#pragma pack(push, 1)
+
+#define ENABLE_DEBUG_FEATURES
+
+struct SMU8_Firmware_Header {
+       uint32_t Version;
+       uint32_t ImageSize;
+       uint32_t CodeSize;
+       uint32_t HeaderSize;
+       uint32_t EntryPoint;
+       uint32_t Rtos;
+       uint32_t UcodeLoadStatus;
+       uint32_t DpmTable;
+       uint32_t FanTable;
+       uint32_t PmFuseTable;
+       uint32_t Globals;
+       uint32_t Reserved[20];
+       uint32_t Signature;
+};
+
+struct SMU8_MultimediaPowerLogData {
+       uint32_t avgTotalPower;
+       uint32_t avgGpuPower;
+       uint32_t avgUvdPower;
+       uint32_t avgVcePower;
+
+       uint32_t avgSclk;
+       uint32_t avgDclk;
+       uint32_t avgVclk;
+       uint32_t avgEclk;
+
+       uint32_t startTimeHi;
+       uint32_t startTimeLo;
+
+       uint32_t endTimeHi;
+       uint32_t endTimeLo;
+};
+
+#define SMU8_FIRMWARE_HEADER_LOCATION 0x1FF80
+#define SMU8_UNBCSR_START_ADDR 0xC0100000
+
+#define SMN_MP1_SRAM_START_ADDR 0x10000000
+
+#pragma pack(pop)
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/smu8_fusion.h b/drivers/gpu/drm/amd/amdgpu/smu8_fusion.h
new file mode 100644 (file)
index 0000000..5c9cc3c
--- /dev/null
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU8_FUSION_H
+#define SMU8_FUSION_H
+
+#include "smu8.h"
+
+#pragma pack(push, 1)
+
+#define SMU8_MAX_CUS 2
+#define SMU8_PSMS_PER_CU 4
+#define SMU8_CACS_PER_CU 4
+
+struct SMU8_GfxCuPgScoreboard {
+    uint8_t Enabled;
+    uint8_t spare[3];
+};
+
+struct SMU8_Port80MonitorTable {
+       uint32_t MmioAddress;
+       uint32_t MemoryBaseHi;
+       uint32_t MemoryBaseLo;
+       uint16_t MemoryBufferSize;
+       uint16_t MemoryPosition;
+       uint16_t PollingInterval;
+       uint8_t  EnableCsrShadow;
+       uint8_t  EnableDramShadow;
+};
+
+/* Clock Table Definitions */
+#define NUM_SCLK_LEVELS     8
+#define NUM_LCLK_LEVELS     8
+#define NUM_UVD_LEVELS      8
+#define NUM_ECLK_LEVELS     8
+#define NUM_ACLK_LEVELS     8
+
+struct SMU8_Fusion_ClkLevel {
+       uint8_t         GnbVid;
+       uint8_t         GfxVid;
+       uint8_t         DfsDid;
+       uint8_t         DeepSleepDid;
+       uint32_t        DfsBypass;
+       uint32_t        Frequency;
+};
+
+struct SMU8_Fusion_SclkBreakdownTable {
+       struct SMU8_Fusion_ClkLevel ClkLevel[NUM_SCLK_LEVELS];
+       struct SMU8_Fusion_ClkLevel DpmOffLevel;
+       /* SMU8_Fusion_ClkLevel PwrOffLevel; */
+       uint32_t    SclkValidMask;
+       uint32_t    MaxSclkIndex;
+};
+
+struct SMU8_Fusion_LclkBreakdownTable {
+       struct SMU8_Fusion_ClkLevel ClkLevel[NUM_LCLK_LEVELS];
+       struct SMU8_Fusion_ClkLevel DpmOffLevel;
+    /* SMU8_Fusion_ClkLevel PwrOffLevel; */
+       uint32_t    LclkValidMask;
+       uint32_t    MaxLclkIndex;
+};
+
+struct SMU8_Fusion_EclkBreakdownTable {
+       struct SMU8_Fusion_ClkLevel ClkLevel[NUM_ECLK_LEVELS];
+       struct SMU8_Fusion_ClkLevel DpmOffLevel;
+       struct SMU8_Fusion_ClkLevel PwrOffLevel;
+       uint32_t    EclkValidMask;
+       uint32_t    MaxEclkIndex;
+};
+
+struct SMU8_Fusion_VclkBreakdownTable {
+       struct SMU8_Fusion_ClkLevel ClkLevel[NUM_UVD_LEVELS];
+       struct SMU8_Fusion_ClkLevel DpmOffLevel;
+       struct SMU8_Fusion_ClkLevel PwrOffLevel;
+       uint32_t    VclkValidMask;
+       uint32_t    MaxVclkIndex;
+};
+
+struct SMU8_Fusion_DclkBreakdownTable {
+       struct SMU8_Fusion_ClkLevel ClkLevel[NUM_UVD_LEVELS];
+       struct SMU8_Fusion_ClkLevel DpmOffLevel;
+       struct SMU8_Fusion_ClkLevel PwrOffLevel;
+       uint32_t    DclkValidMask;
+       uint32_t    MaxDclkIndex;
+};
+
+struct SMU8_Fusion_AclkBreakdownTable {
+       struct SMU8_Fusion_ClkLevel ClkLevel[NUM_ACLK_LEVELS];
+       struct SMU8_Fusion_ClkLevel DpmOffLevel;
+       struct SMU8_Fusion_ClkLevel PwrOffLevel;
+       uint32_t    AclkValidMask;
+       uint32_t    MaxAclkIndex;
+};
+
+
+struct SMU8_Fusion_ClkTable {
+       struct SMU8_Fusion_SclkBreakdownTable SclkBreakdownTable;
+       struct SMU8_Fusion_LclkBreakdownTable LclkBreakdownTable;
+       struct SMU8_Fusion_EclkBreakdownTable EclkBreakdownTable;
+       struct SMU8_Fusion_VclkBreakdownTable VclkBreakdownTable;
+       struct SMU8_Fusion_DclkBreakdownTable DclkBreakdownTable;
+       struct SMU8_Fusion_AclkBreakdownTable AclkBreakdownTable;
+};
+
+#pragma pack(pop)
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_cz.h b/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_cz.h
new file mode 100644 (file)
index 0000000..f8ba071
--- /dev/null
@@ -0,0 +1,147 @@
+// CZ Ucode Loading Definitions
+#ifndef SMU_UCODE_XFER_CZ_H
+#define SMU_UCODE_XFER_CZ_H
+
+#define NUM_JOBLIST_ENTRIES      32
+
+#define TASK_TYPE_NO_ACTION      0
+#define TASK_TYPE_UCODE_LOAD     1
+#define TASK_TYPE_UCODE_SAVE     2
+#define TASK_TYPE_REG_LOAD       3
+#define TASK_TYPE_REG_SAVE       4
+#define TASK_TYPE_INITIALIZE     5
+
+#define TASK_ARG_REG_SMCIND      0
+#define TASK_ARG_REG_MMIO        1
+#define TASK_ARG_REG_FCH         2
+#define TASK_ARG_REG_UNB         3
+
+#define TASK_ARG_INIT_MM_PWR_LOG 0
+#define TASK_ARG_INIT_CLK_TABLE  1
+
+#define JOB_GFX_SAVE             0
+#define JOB_GFX_RESTORE          1
+#define JOB_FCH_SAVE             2
+#define JOB_FCH_RESTORE          3
+#define JOB_UNB_SAVE             4
+#define JOB_UNB_RESTORE          5
+#define JOB_GMC_SAVE             6
+#define JOB_GMC_RESTORE          7
+#define JOB_GNB_SAVE             8
+#define JOB_GNB_RESTORE          9
+
+#define IGNORE_JOB               0xff
+#define END_OF_TASK_LIST     (uint16_t)0xffff
+
+// Size of DRAM regions (in bytes) requested by SMU:
+#define SMU_DRAM_REQ_MM_PWR_LOG 48 
+
+#define UCODE_ID_SDMA0           0
+#define UCODE_ID_SDMA1           1
+#define UCODE_ID_CP_CE           2
+#define UCODE_ID_CP_PFP          3
+#define UCODE_ID_CP_ME           4
+#define UCODE_ID_CP_MEC_JT1      5
+#define UCODE_ID_CP_MEC_JT2      6
+#define UCODE_ID_GMCON_RENG      7
+#define UCODE_ID_RLC_G           8
+#define UCODE_ID_RLC_SCRATCH     9
+#define UCODE_ID_RLC_SRM_ARAM    10
+#define UCODE_ID_RLC_SRM_DRAM    11
+#define UCODE_ID_DMCU_ERAM       12
+#define UCODE_ID_DMCU_IRAM       13
+
+#define UCODE_ID_SDMA0_MASK           0x00000001       
+#define UCODE_ID_SDMA1_MASK           0x00000002        
+#define UCODE_ID_CP_CE_MASK           0x00000004      
+#define UCODE_ID_CP_PFP_MASK          0x00000008         
+#define UCODE_ID_CP_ME_MASK           0x00000010          
+#define UCODE_ID_CP_MEC_JT1_MASK      0x00000020             
+#define UCODE_ID_CP_MEC_JT2_MASK      0x00000040          
+#define UCODE_ID_GMCON_RENG_MASK      0x00000080            
+#define UCODE_ID_RLC_G_MASK           0x00000100           
+#define UCODE_ID_RLC_SCRATCH_MASK     0x00000200         
+#define UCODE_ID_RLC_SRM_ARAM_MASK    0x00000400                
+#define UCODE_ID_RLC_SRM_DRAM_MASK    0x00000800                 
+#define UCODE_ID_DMCU_ERAM_MASK       0x00001000             
+#define UCODE_ID_DMCU_IRAM_MASK       0x00002000              
+
+#define UCODE_ID_SDMA0_SIZE_BYTE           10368        
+#define UCODE_ID_SDMA1_SIZE_BYTE           10368          
+#define UCODE_ID_CP_CE_SIZE_BYTE           8576        
+#define UCODE_ID_CP_PFP_SIZE_BYTE          16768           
+#define UCODE_ID_CP_ME_SIZE_BYTE           16768            
+#define UCODE_ID_CP_MEC_JT1_SIZE_BYTE      384               
+#define UCODE_ID_CP_MEC_JT2_SIZE_BYTE      384            
+#define UCODE_ID_GMCON_RENG_SIZE_BYTE      4096              
+#define UCODE_ID_RLC_G_SIZE_BYTE           2048             
+#define UCODE_ID_RLC_SCRATCH_SIZE_BYTE     132           
+#define UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE    8192                  
+#define UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE    4096                   
+#define UCODE_ID_DMCU_ERAM_SIZE_BYTE       24576               
+#define UCODE_ID_DMCU_IRAM_SIZE_BYTE       1024                 
+
+#define NUM_UCODES               14
+
+typedef struct {
+       uint32_t high;
+       uint32_t low;
+} data_64_t;
+
+struct SMU_Task {
+    uint8_t type;
+    uint8_t arg;
+    uint16_t next;
+    data_64_t addr;
+    uint32_t size_bytes;
+};
+typedef struct SMU_Task SMU_Task;
+
+struct TOC {
+    uint8_t JobList[NUM_JOBLIST_ENTRIES];
+    SMU_Task tasks[1];
+};
+
+// META DATA COMMAND Definitions
+#define METADATA_CMD_MODE0         0x00000103 
+#define METADATA_CMD_MODE1         0x00000113 
+#define METADATA_CMD_MODE2         0x00000123 
+#define METADATA_CMD_MODE3         0x00000133
+#define METADATA_CMD_DELAY         0x00000203
+#define METADATA_CMD_CHNG_REGSPACE 0x00000303
+#define METADATA_PERFORM_ON_SAVE   0x00001000
+#define METADATA_PERFORM_ON_LOAD   0x00002000
+#define METADATA_CMD_ARG_MASK      0xFFFF0000
+#define METADATA_CMD_ARG_SHIFT     16
+
+// Simple register addr/data fields
+struct SMU_MetaData_Mode0 {
+    uint32_t register_address;
+    uint32_t register_data;
+};
+typedef struct SMU_MetaData_Mode0 SMU_MetaData_Mode0;
+
+// Register addr/data with mask
+struct SMU_MetaData_Mode1 {
+    uint32_t register_address;
+    uint32_t register_mask;
+    uint32_t register_data;
+};
+typedef struct SMU_MetaData_Mode1 SMU_MetaData_Mode1;
+
+struct SMU_MetaData_Mode2 {
+    uint32_t register_address;
+    uint32_t register_mask;
+    uint32_t target_value;
+};
+typedef struct SMU_MetaData_Mode2 SMU_MetaData_Mode2;
+
+// Always write data (even on a save operation)
+struct SMU_MetaData_Mode3 {
+    uint32_t register_address;
+    uint32_t register_mask;
+    uint32_t register_data;
+};
+typedef struct SMU_MetaData_Mode3 SMU_MetaData_Mode3;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_vi.h b/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_vi.h
new file mode 100644 (file)
index 0000000..c24a81e
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU_UCODE_XFER_VI_H
+#define SMU_UCODE_XFER_VI_H
+
+#define SMU_DRAMData_TOC_VERSION  1
+#define MAX_IH_REGISTER_COUNT     65535
+#define SMU_DIGEST_SIZE_BYTES     20
+#define SMU_FB_SIZE_BYTES         1048576
+#define SMU_MAX_ENTRIES           12
+
+#define UCODE_ID_SMU              0
+#define UCODE_ID_SDMA0            1
+#define UCODE_ID_SDMA1            2
+#define UCODE_ID_CP_CE            3
+#define UCODE_ID_CP_PFP           4
+#define UCODE_ID_CP_ME            5
+#define UCODE_ID_CP_MEC           6
+#define UCODE_ID_CP_MEC_JT1       7
+#define UCODE_ID_CP_MEC_JT2       8
+#define UCODE_ID_GMCON_RENG       9
+#define UCODE_ID_RLC_G            10
+#define UCODE_ID_IH_REG_RESTORE   11
+#define UCODE_ID_VBIOS            12
+#define UCODE_ID_MISC_METADATA    13
+#define UCODE_ID_RLC_SCRATCH      32
+#define UCODE_ID_RLC_SRM_ARAM     33
+#define UCODE_ID_RLC_SRM_DRAM     34
+#define UCODE_ID_MEC_STORAGE      35
+#define UCODE_ID_VBIOS_PARAMETERS 36
+#define UCODE_META_DATA           0xFF
+
+#define UCODE_ID_SMU_MASK             0x00000001
+#define UCODE_ID_SDMA0_MASK           0x00000002
+#define UCODE_ID_SDMA1_MASK           0x00000004
+#define UCODE_ID_CP_CE_MASK           0x00000008
+#define UCODE_ID_CP_PFP_MASK          0x00000010
+#define UCODE_ID_CP_ME_MASK           0x00000020
+#define UCODE_ID_CP_MEC_MASK          0x00000040
+#define UCODE_ID_CP_MEC_JT1_MASK      0x00000080
+#define UCODE_ID_CP_MEC_JT2_MASK      0x00000100
+#define UCODE_ID_GMCON_RENG_MASK      0x00000200
+#define UCODE_ID_RLC_G_MASK           0x00000400
+#define UCODE_ID_IH_REG_RESTORE_MASK  0x00000800
+#define UCODE_ID_VBIOS_MASK           0x00001000
+
+#define UCODE_FLAG_UNHALT_MASK   0x1
+
+struct SMU_Entry {
+#ifndef __BIG_ENDIAN
+       uint16_t id;
+       uint16_t version;
+       uint32_t image_addr_high;
+       uint32_t image_addr_low;
+       uint32_t meta_data_addr_high;
+       uint32_t meta_data_addr_low;
+       uint32_t data_size_byte;
+       uint16_t flags;
+       uint16_t num_register_entries;
+#else
+       uint16_t version;
+       uint16_t id;
+       uint32_t image_addr_high;
+       uint32_t image_addr_low;
+       uint32_t meta_data_addr_high;
+       uint32_t meta_data_addr_low;
+       uint32_t data_size_byte;
+       uint16_t num_register_entries;
+       uint16_t flags;
+#endif
+};
+
+struct SMU_DRAMData_TOC {
+       uint32_t structure_version;
+       uint32_t num_entries;
+       struct SMU_Entry entry[SMU_MAX_ENTRIES];
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
new file mode 100644 (file)
index 0000000..98bd707
--- /dev/null
@@ -0,0 +1,172 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "tonga_smumgr.h"
+
+MODULE_FIRMWARE("radeon/tonga_smc.bin");
+
+static void tonga_dpm_set_funcs(struct amdgpu_device *adev);
+
+static int tonga_dpm_early_init(struct amdgpu_device *adev)
+{
+       tonga_dpm_set_funcs(adev);
+
+       return 0;
+}
+
+static int tonga_dpm_init_microcode(struct amdgpu_device *adev)
+{
+       char fw_name[30] = "radeon/tonga_smc.bin";
+       int err;
+
+       err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
+       if (err)
+               goto out;
+       err = amdgpu_ucode_validate(adev->pm.fw);
+
+out:
+       if (err) {
+               DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
+               release_firmware(adev->pm.fw);
+               adev->pm.fw = NULL;
+       }
+       return err;
+}
+
+static int tonga_dpm_sw_init(struct amdgpu_device *adev)
+{
+       int ret;
+
+       ret = tonga_dpm_init_microcode(adev);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int tonga_dpm_sw_fini(struct amdgpu_device *adev)
+{
+       return 0;
+}
+
+static int tonga_dpm_hw_init(struct amdgpu_device *adev)
+{
+       int ret;
+
+       mutex_lock(&adev->pm.mutex);
+
+       ret = tonga_smu_init(adev);
+       if (ret) {
+               DRM_ERROR("SMU initialization failed\n");
+               goto fail;
+       }
+
+       ret = tonga_smu_start(adev);
+       if (ret) {
+               DRM_ERROR("SMU start failed\n");
+               goto fail;
+       }
+
+       mutex_unlock(&adev->pm.mutex);
+       return 0;
+
+fail:
+       adev->firmware.smu_load = false;
+       mutex_unlock(&adev->pm.mutex);
+       return -EINVAL;
+}
+
+static int tonga_dpm_hw_fini(struct amdgpu_device *adev)
+{
+       mutex_lock(&adev->pm.mutex);
+       tonga_smu_fini(adev);
+       mutex_unlock(&adev->pm.mutex);
+       return 0;
+}
+
+static int tonga_dpm_suspend(struct amdgpu_device *adev)
+{
+       tonga_dpm_hw_fini(adev);
+
+       return 0;
+}
+
+static int tonga_dpm_resume(struct amdgpu_device *adev)
+{
+       tonga_dpm_hw_init(adev);
+
+       return 0;
+}
+
+static int tonga_dpm_set_clockgating_state(struct amdgpu_device *adev,
+                       enum amdgpu_clockgating_state state)
+{
+       return 0;
+}
+
+static int tonga_dpm_set_powergating_state(struct amdgpu_device *adev,
+                       enum amdgpu_powergating_state state)
+{
+       return 0;
+}
+
+const struct amdgpu_ip_funcs tonga_dpm_ip_funcs = {
+       .early_init = tonga_dpm_early_init,
+       .late_init = NULL,
+       .sw_init = tonga_dpm_sw_init,
+       .sw_fini = tonga_dpm_sw_fini,
+       .hw_init = tonga_dpm_hw_init,
+       .hw_fini = tonga_dpm_hw_fini,
+       .suspend = tonga_dpm_suspend,
+       .resume = tonga_dpm_resume,
+       .is_idle = NULL,
+       .wait_for_idle = NULL,
+       .soft_reset = NULL,
+       .print_status = NULL,
+       .set_clockgating_state = tonga_dpm_set_clockgating_state,
+       .set_powergating_state = tonga_dpm_set_powergating_state,
+};
+
+static const struct amdgpu_dpm_funcs tonga_dpm_funcs = {
+       .get_temperature = NULL,
+       .pre_set_power_state = NULL,
+       .set_power_state = NULL,
+       .post_set_power_state = NULL,
+       .display_configuration_changed = NULL,
+       .get_sclk = NULL,
+       .get_mclk = NULL,
+       .print_power_state = NULL,
+       .debugfs_print_current_performance_level = NULL,
+       .force_performance_level = NULL,
+       .vblank_too_short = NULL,
+       .powergate_uvd = NULL,
+};
+
+static void tonga_dpm_set_funcs(struct amdgpu_device *adev)
+{
+       if (NULL == adev->pm.funcs)
+               adev->pm.funcs = &tonga_dpm_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
new file mode 100644 (file)
index 0000000..cff1b8b
--- /dev/null
@@ -0,0 +1,458 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_ih.h"
+#include "vid.h"
+
+#include "oss/oss_3_0_d.h"
+#include "oss/oss_3_0_sh_mask.h"
+
+#include "bif/bif_5_1_d.h"
+#include "bif/bif_5_1_sh_mask.h"
+
+/*
+ * Interrupts
+ * Starting with r6xx, interrupts are handled via a ring buffer.
+ * Ring buffers are areas of GPU accessible memory that the GPU
+ * writes interrupt vectors into and the host reads vectors out of.
+ * There is a rptr (read pointer) that determines where the
+ * host is currently reading, and a wptr (write pointer)
+ * which determines where the GPU has written.  When the
+ * pointers are equal, the ring is idle.  When the GPU
+ * writes vectors to the ring buffer, it increments the
+ * wptr.  When there is an interrupt, the host then starts
+ * fetching commands and processing them until the pointers are
+ * equal again at which point it updates the rptr.
+ */
+
+static void tonga_ih_set_interrupt_funcs(struct amdgpu_device *adev);
+
+/**
+ * tonga_ih_enable_interrupts - Enable the interrupt ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Enable the interrupt ring buffer (VI).
+ */
+static void tonga_ih_enable_interrupts(struct amdgpu_device *adev)
+{
+       u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL);
+
+       ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
+       ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
+       WREG32(mmIH_RB_CNTL, ih_rb_cntl);
+       adev->irq.ih.enabled = true;
+}
+
+/**
+ * tonga_ih_disable_interrupts - Disable the interrupt ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Disable the interrupt ring buffer (VI).
+ */
+static void tonga_ih_disable_interrupts(struct amdgpu_device *adev)
+{
+       u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL);
+
+       ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
+       ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
+       WREG32(mmIH_RB_CNTL, ih_rb_cntl);
+       /* set rptr, wptr to 0 */
+       WREG32(mmIH_RB_RPTR, 0);
+       WREG32(mmIH_RB_WPTR, 0);
+       adev->irq.ih.enabled = false;
+       adev->irq.ih.rptr = 0;
+}
+
+/**
+ * tonga_ih_irq_init - init and enable the interrupt ring
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate a ring buffer for the interrupt controller,
+ * enable the RLC, disable interrupts, enable the IH
+ * ring buffer and enable it (VI).
+ * Called at device load and reume.
+ * Returns 0 for success, errors for failure.
+ */
+static int tonga_ih_irq_init(struct amdgpu_device *adev)
+{
+       int ret = 0;
+       int rb_bufsz;
+       u32 interrupt_cntl, ih_rb_cntl, ih_doorbell_rtpr;
+       u64 wptr_off;
+
+       /* disable irqs */
+       tonga_ih_disable_interrupts(adev);
+
+       /* setup interrupt control */
+       WREG32(mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8);
+       interrupt_cntl = RREG32(mmINTERRUPT_CNTL);
+       /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
+        * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
+        */
+       interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0);
+       /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
+       interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0);
+       WREG32(mmINTERRUPT_CNTL, interrupt_cntl);
+
+       /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
+       if (adev->irq.ih.use_bus_addr)
+               WREG32(mmIH_RB_BASE, adev->irq.ih.rb_dma_addr >> 8);
+       else
+               WREG32(mmIH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
+
+       rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
+       ih_rb_cntl = REG_SET_FIELD(0, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+       ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
+       /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register value is written to memory */
+       ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_WRITEBACK_ENABLE, 1);
+       ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
+
+       if (adev->irq.msi_enabled)
+               ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM, 1);
+
+       WREG32(mmIH_RB_CNTL, ih_rb_cntl);
+
+       /* set the writeback address whether it's enabled or not */
+       if (adev->irq.ih.use_bus_addr)
+               wptr_off = adev->irq.ih.rb_dma_addr + (adev->irq.ih.wptr_offs * 4);
+       else
+               wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
+       WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
+       WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
+
+       /* set rptr, wptr to 0 */
+       WREG32(mmIH_RB_RPTR, 0);
+       WREG32(mmIH_RB_WPTR, 0);
+
+       ih_doorbell_rtpr = RREG32(mmIH_DOORBELL_RPTR);
+       if (adev->irq.ih.use_doorbell) {
+               ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR,
+                                                OFFSET, adev->irq.ih.doorbell_index);
+               ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR,
+                                                ENABLE, 1);
+       } else {
+               ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR,
+                                                ENABLE, 0);
+       }
+       WREG32(mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
+
+       pci_set_master(adev->pdev);
+
+       /* enable interrupts */
+       tonga_ih_enable_interrupts(adev);
+
+       return ret;
+}
+
+/**
+ * tonga_ih_irq_disable - disable interrupts
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Disable interrupts on the hw (VI).
+ */
+static void tonga_ih_irq_disable(struct amdgpu_device *adev)
+{
+       tonga_ih_disable_interrupts(adev);
+
+       /* Wait and acknowledge irq */
+       mdelay(1);
+}
+
+/**
+ * tonga_ih_get_wptr - get the IH ring buffer wptr
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Get the IH ring buffer wptr from either the register
+ * or the writeback memory buffer (VI).  Also check for
+ * ring buffer overflow and deal with it.
+ * Used by cz_irq_process(VI).
+ * Returns the value of the wptr.
+ */
+static u32 tonga_ih_get_wptr(struct amdgpu_device *adev)
+{
+       u32 wptr, tmp;
+
+       if (adev->irq.ih.use_bus_addr)
+               wptr = le32_to_cpu(adev->irq.ih.ring[adev->irq.ih.wptr_offs]);
+       else
+               wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]);
+
+       if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
+               wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+               /* When a ring buffer overflow happen start parsing interrupt
+                * from the last not overwritten vector (wptr + 16). Hopefully
+                * this should allow us to catchup.
+                */
+               dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+                       wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask);
+               adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask;
+               tmp = RREG32(mmIH_RB_CNTL);
+               tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+               WREG32(mmIH_RB_CNTL, tmp);
+       }
+       return (wptr & adev->irq.ih.ptr_mask);
+}
+
+/**
+ * tonga_ih_decode_iv - decode an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Decodes the interrupt vector at the current rptr
+ * position and also advance the position.
+ */
+static void tonga_ih_decode_iv(struct amdgpu_device *adev,
+                                struct amdgpu_iv_entry *entry)
+{
+       /* wptr/rptr are in bytes! */
+       u32 ring_index = adev->irq.ih.rptr >> 2;
+       uint32_t dw[4];
+
+       dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
+       dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
+       dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
+       dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
+
+       entry->src_id = dw[0] & 0xff;
+       entry->src_data = dw[1] & 0xfffffff;
+       entry->ring_id = dw[2] & 0xff;
+       entry->vm_id = (dw[2] >> 8) & 0xff;
+       entry->pas_id = (dw[2] >> 16) & 0xffff;
+
+       /* wptr/rptr are in bytes! */
+       adev->irq.ih.rptr += 16;
+}
+
+/**
+ * tonga_ih_set_rptr - set the IH ring buffer rptr
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set the IH ring buffer rptr.
+ */
+static void tonga_ih_set_rptr(struct amdgpu_device *adev)
+{
+       if (adev->irq.ih.use_doorbell) {
+               /* XXX check if swapping is necessary on BE */
+               if (adev->irq.ih.use_bus_addr)
+                       adev->irq.ih.ring[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr;
+               else
+                       adev->wb.wb[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr;
+               WDOORBELL32(adev->irq.ih.doorbell_index, adev->irq.ih.rptr);
+       } else {
+               WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr);
+       }
+}
+
+static int tonga_ih_early_init(struct amdgpu_device *adev)
+{
+       tonga_ih_set_interrupt_funcs(adev);
+       return 0;
+}
+
+static int tonga_ih_sw_init(struct amdgpu_device *adev)
+{
+       int r;
+
+       r = amdgpu_ih_ring_init(adev, 4 * 1024, true);
+       if (r)
+               return r;
+
+       adev->irq.ih.use_doorbell = true;
+       adev->irq.ih.doorbell_index = AMDGPU_DOORBELL_IH;
+
+       r = amdgpu_irq_init(adev);
+
+       return r;
+}
+
+static int tonga_ih_sw_fini(struct amdgpu_device *adev)
+{
+       amdgpu_irq_fini(adev);
+       amdgpu_ih_ring_fini(adev);
+
+       return 0;
+}
+
+static int tonga_ih_hw_init(struct amdgpu_device *adev)
+{
+       int r;
+
+       r = tonga_ih_irq_init(adev);
+       if (r)
+               return r;
+
+       return 0;
+}
+
+static int tonga_ih_hw_fini(struct amdgpu_device *adev)
+{
+       tonga_ih_irq_disable(adev);
+
+       return 0;
+}
+
+static int tonga_ih_suspend(struct amdgpu_device *adev)
+{
+       return tonga_ih_hw_fini(adev);
+}
+
+static int tonga_ih_resume(struct amdgpu_device *adev)
+{
+       return tonga_ih_hw_init(adev);
+}
+
+static bool tonga_ih_is_idle(struct amdgpu_device *adev)
+{
+       u32 tmp = RREG32(mmSRBM_STATUS);
+
+       if (REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY))
+               return false;
+
+       return true;
+}
+
+static int tonga_ih_wait_for_idle(struct amdgpu_device *adev)
+{
+       unsigned i;
+       u32 tmp;
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               /* read MC_STATUS */
+               tmp = RREG32(mmSRBM_STATUS);
+               if (!REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY))
+                       return 0;
+               udelay(1);
+       }
+       return -ETIMEDOUT;
+}
+
+static void tonga_ih_print_status(struct amdgpu_device *adev)
+{
+       dev_info(adev->dev, "TONGA IH registers\n");
+       dev_info(adev->dev, "  SRBM_STATUS=0x%08X\n",
+               RREG32(mmSRBM_STATUS));
+       dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
+               RREG32(mmSRBM_STATUS2));
+       dev_info(adev->dev, "  INTERRUPT_CNTL=0x%08X\n",
+                RREG32(mmINTERRUPT_CNTL));
+       dev_info(adev->dev, "  INTERRUPT_CNTL2=0x%08X\n",
+                RREG32(mmINTERRUPT_CNTL2));
+       dev_info(adev->dev, "  IH_CNTL=0x%08X\n",
+                RREG32(mmIH_CNTL));
+       dev_info(adev->dev, "  IH_RB_CNTL=0x%08X\n",
+                RREG32(mmIH_RB_CNTL));
+       dev_info(adev->dev, "  IH_RB_BASE=0x%08X\n",
+                RREG32(mmIH_RB_BASE));
+       dev_info(adev->dev, "  IH_RB_WPTR_ADDR_LO=0x%08X\n",
+                RREG32(mmIH_RB_WPTR_ADDR_LO));
+       dev_info(adev->dev, "  IH_RB_WPTR_ADDR_HI=0x%08X\n",
+                RREG32(mmIH_RB_WPTR_ADDR_HI));
+       dev_info(adev->dev, "  IH_RB_RPTR=0x%08X\n",
+                RREG32(mmIH_RB_RPTR));
+       dev_info(adev->dev, "  IH_RB_WPTR=0x%08X\n",
+                RREG32(mmIH_RB_WPTR));
+}
+
+static int tonga_ih_soft_reset(struct amdgpu_device *adev)
+{
+       u32 srbm_soft_reset = 0;
+       u32 tmp = RREG32(mmSRBM_STATUS);
+
+       if (tmp & SRBM_STATUS__IH_BUSY_MASK)
+               srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET,
+                                               SOFT_RESET_IH, 1);
+
+       if (srbm_soft_reset) {
+               tonga_ih_print_status(adev);
+
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+               tmp |= srbm_soft_reset;
+               dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+               WREG32(mmSRBM_SOFT_RESET, tmp);
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+
+               udelay(50);
+
+               tmp &= ~srbm_soft_reset;
+               WREG32(mmSRBM_SOFT_RESET, tmp);
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+
+               /* Wait a little for things to settle down */
+               udelay(50);
+
+               tonga_ih_print_status(adev);
+       }
+
+       return 0;
+}
+
+static int tonga_ih_set_clockgating_state(struct amdgpu_device *adev,
+                                         enum amdgpu_clockgating_state state)
+{
+       // TODO
+       return 0;
+}
+
+static int tonga_ih_set_powergating_state(struct amdgpu_device *adev,
+                                         enum amdgpu_powergating_state state)
+{
+       // TODO
+       return 0;
+}
+
+const struct amdgpu_ip_funcs tonga_ih_ip_funcs = {
+       .early_init = tonga_ih_early_init,
+       .late_init = NULL,
+       .sw_init = tonga_ih_sw_init,
+       .sw_fini = tonga_ih_sw_fini,
+       .hw_init = tonga_ih_hw_init,
+       .hw_fini = tonga_ih_hw_fini,
+       .suspend = tonga_ih_suspend,
+       .resume = tonga_ih_resume,
+       .is_idle = tonga_ih_is_idle,
+       .wait_for_idle = tonga_ih_wait_for_idle,
+       .soft_reset = tonga_ih_soft_reset,
+       .print_status = tonga_ih_print_status,
+       .set_clockgating_state = tonga_ih_set_clockgating_state,
+       .set_powergating_state = tonga_ih_set_powergating_state,
+};
+
+static const struct amdgpu_ih_funcs tonga_ih_funcs = {
+       .get_wptr = tonga_ih_get_wptr,
+       .decode_iv = tonga_ih_decode_iv,
+       .set_rptr = tonga_ih_set_rptr
+};
+
+static void tonga_ih_set_interrupt_funcs(struct amdgpu_device *adev)
+{
+       if (adev->irq.ih_funcs == NULL)
+               adev->irq.ih_funcs = &tonga_ih_funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.h b/drivers/gpu/drm/amd/amdgpu/tonga_ih.h
new file mode 100644 (file)
index 0000000..7c9bae8
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __TONGA_IH_H__
+#define __TONGA_IH_H__
+
+extern const struct amdgpu_ip_funcs tonga_ih_ip_funcs;
+
+#endif /* __CZ_IH_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ppsmc.h b/drivers/gpu/drm/amd/amdgpu/tonga_ppsmc.h
new file mode 100644 (file)
index 0000000..811781f
--- /dev/null
@@ -0,0 +1,198 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef TONGA_PP_SMC_H
+#define TONGA_PP_SMC_H
+
+#pragma pack(push, 1)
+
+#define PPSMC_SWSTATE_FLAG_DC                           0x01
+#define PPSMC_SWSTATE_FLAG_UVD                          0x02
+#define PPSMC_SWSTATE_FLAG_VCE                          0x04
+#define PPSMC_SWSTATE_FLAG_PCIE_X1                      0x08
+
+#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL             0x00
+#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL             0x01
+#define PPSMC_THERMAL_PROTECT_TYPE_NONE                 0xff 
+
+#define PPSMC_SYSTEMFLAG_GPIO_DC                        0x01 
+#define PPSMC_SYSTEMFLAG_STEPVDDC                       0x02 
+#define PPSMC_SYSTEMFLAG_GDDR5                          0x04 
+
+#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP               0x08 
+
+#define PPSMC_SYSTEMFLAG_REGULATOR_HOT                  0x10 
+#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG           0x20 
+#define PPSMC_SYSTEMFLAG_12CHANNEL                      0x40
+
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK              0x07
+#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK     0x08
+
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE   0x00
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE  0x01
+
+#define PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH      0x10
+#define PPSMC_EXTRAFLAGS_DRIVER_TO_GPIO17               0x20
+#define PPSMC_EXTRAFLAGS_PCC_TO_GPIO17                  0x40
+
+#define PPSMC_DPM2FLAGS_TDPCLMP                         0x01 
+#define PPSMC_DPM2FLAGS_PWRSHFT                         0x02 
+#define PPSMC_DPM2FLAGS_OCP                             0x04 
+
+#define PPSMC_DISPLAY_WATERMARK_LOW                     0
+#define PPSMC_DISPLAY_WATERMARK_HIGH                    1
+
+#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP                0x01
+#define PPSMC_STATEFLAG_POWERBOOST                     0x02
+#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT             0x04
+#define PPSMC_STATEFLAG_POWERSHIFT                     0x08
+#define PPSMC_STATEFLAG_SLOW_READ_MARGIN               0x10
+#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE             0x20
+#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS               0x40
+
+#define FDO_MODE_HARDWARE 0
+#define FDO_MODE_PIECE_WISE_LINEAR 1
+
+enum FAN_CONTROL {
+       FAN_CONTROL_FUZZY,
+       FAN_CONTROL_TABLE
+};
+
+#define PPSMC_Result_OK                                ((uint16_t)0x01)
+#define PPSMC_Result_NoMore                            ((uint16_t)0x02)
+#define PPSMC_Result_NotNow                            ((uint16_t)0x03)
+#define PPSMC_Result_Failed                            ((uint16_t)0xFF) 
+#define PPSMC_Result_UnknownCmd                        ((uint16_t)0xFE) 
+#define PPSMC_Result_UnknownVT                         ((uint16_t)0xFD) 
+
+typedef uint16_t PPSMC_Result;
+
+#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x))
+
+#define PPSMC_MSG_Halt                                 ((uint16_t)0x10)
+#define PPSMC_MSG_Resume                               ((uint16_t)0x11)
+#define PPSMC_MSG_EnableDPMLevel                       ((uint16_t)0x12)
+#define PPSMC_MSG_ZeroLevelsDisabled                   ((uint16_t)0x13)
+#define PPSMC_MSG_OneLevelsDisabled                    ((uint16_t)0x14)
+#define PPSMC_MSG_TwoLevelsDisabled                    ((uint16_t)0x15)
+#define PPSMC_MSG_EnableThermalInterrupt               ((uint16_t)0x16)
+#define PPSMC_MSG_RunningOnAC                          ((uint16_t)0x17)
+#define PPSMC_MSG_LevelUp                              ((uint16_t)0x18)
+#define PPSMC_MSG_LevelDown                            ((uint16_t)0x19)
+#define PPSMC_MSG_ResetDPMCounters                     ((uint16_t)0x1a)
+#define PPSMC_MSG_SwitchToSwState                      ((uint16_t)0x20)
+#define PPSMC_MSG_SwitchToSwStateLast                  ((uint16_t)0x3f)
+#define PPSMC_MSG_SwitchToInitialState                 ((uint16_t)0x40)
+#define PPSMC_MSG_NoForcedLevel                        ((uint16_t)0x41)
+#define PPSMC_MSG_ForceHigh                            ((uint16_t)0x42)
+#define PPSMC_MSG_ForceMediumOrHigh                    ((uint16_t)0x43)
+#define PPSMC_MSG_SwitchToMinimumPower                 ((uint16_t)0x51)
+#define PPSMC_MSG_ResumeFromMinimumPower               ((uint16_t)0x52)
+#define PPSMC_MSG_EnableCac                            ((uint16_t)0x53)
+#define PPSMC_MSG_DisableCac                           ((uint16_t)0x54)
+#define PPSMC_DPMStateHistoryStart                     ((uint16_t)0x55)
+#define PPSMC_DPMStateHistoryStop                      ((uint16_t)0x56)
+#define PPSMC_CACHistoryStart                          ((uint16_t)0x57)
+#define PPSMC_CACHistoryStop                           ((uint16_t)0x58)
+#define PPSMC_TDPClampingActive                        ((uint16_t)0x59)
+#define PPSMC_TDPClampingInactive                      ((uint16_t)0x5A)
+#define PPSMC_StartFanControl                          ((uint16_t)0x5B)
+#define PPSMC_StopFanControl                           ((uint16_t)0x5C)
+#define PPSMC_NoDisplay                                ((uint16_t)0x5D)
+#define PPSMC_HasDisplay                               ((uint16_t)0x5E)
+#define PPSMC_MSG_UVDPowerOFF                          ((uint16_t)0x60)
+#define PPSMC_MSG_UVDPowerON                           ((uint16_t)0x61)
+#define PPSMC_MSG_EnableULV                            ((uint16_t)0x62)
+#define PPSMC_MSG_DisableULV                           ((uint16_t)0x63)
+#define PPSMC_MSG_EnterULV                             ((uint16_t)0x64)
+#define PPSMC_MSG_ExitULV                              ((uint16_t)0x65)
+#define PPSMC_PowerShiftActive                         ((uint16_t)0x6A)
+#define PPSMC_PowerShiftInactive                       ((uint16_t)0x6B)
+#define PPSMC_OCPActive                                ((uint16_t)0x6C)
+#define PPSMC_OCPInactive                              ((uint16_t)0x6D)
+#define PPSMC_CACLongTermAvgEnable                     ((uint16_t)0x6E)
+#define PPSMC_CACLongTermAvgDisable                    ((uint16_t)0x6F)
+#define PPSMC_MSG_InferredStateSweep_Start             ((uint16_t)0x70)
+#define PPSMC_MSG_InferredStateSweep_Stop              ((uint16_t)0x71)
+#define PPSMC_MSG_SwitchToLowestInfState               ((uint16_t)0x72)
+#define PPSMC_MSG_SwitchToNonInfState                  ((uint16_t)0x73)
+#define PPSMC_MSG_AllStateSweep_Start                  ((uint16_t)0x74)
+#define PPSMC_MSG_AllStateSweep_Stop                   ((uint16_t)0x75)
+#define PPSMC_MSG_SwitchNextLowerInfState              ((uint16_t)0x76)
+#define PPSMC_MSG_SwitchNextHigherInfState             ((uint16_t)0x77)
+#define PPSMC_MSG_MclkRetrainingTest                   ((uint16_t)0x78)
+#define PPSMC_MSG_ForceTDPClamping                     ((uint16_t)0x79)
+#define PPSMC_MSG_CollectCAC_PowerCorreln              ((uint16_t)0x7A)
+#define PPSMC_MSG_CollectCAC_WeightCalib               ((uint16_t)0x7B)
+#define PPSMC_MSG_CollectCAC_SQonly                    ((uint16_t)0x7C)
+#define PPSMC_MSG_CollectCAC_TemperaturePwr            ((uint16_t)0x7D)
+#define PPSMC_MSG_ExtremitiesTest_Start                ((uint16_t)0x7E)
+#define PPSMC_MSG_ExtremitiesTest_Stop                 ((uint16_t)0x7F)
+#define PPSMC_FlushDataCache                           ((uint16_t)0x80)
+#define PPSMC_FlushInstrCache                          ((uint16_t)0x81)
+#define PPSMC_MSG_SetEnabledLevels                     ((uint16_t)0x82)
+#define PPSMC_MSG_SetForcedLevels                      ((uint16_t)0x83)
+#define PPSMC_MSG_ResetToDefaults                      ((uint16_t)0x84)
+#define PPSMC_MSG_SetForcedLevelsAndJump               ((uint16_t)0x85)
+#define PPSMC_MSG_SetCACHistoryMode                    ((uint16_t)0x86)
+#define PPSMC_MSG_EnableDTE                            ((uint16_t)0x87)
+#define PPSMC_MSG_DisableDTE                           ((uint16_t)0x88)
+#define PPSMC_MSG_SmcSpaceSetAddress                   ((uint16_t)0x89)
+#define PPSMC_MSG_SmcSpaceWriteDWordInc                ((uint16_t)0x8A)
+#define PPSMC_MSG_SmcSpaceWriteWordInc                 ((uint16_t)0x8B)
+#define PPSMC_MSG_SmcSpaceWriteByteInc                 ((uint16_t)0x8C)
+#define PPSMC_MSG_ChangeNearTDPLimit                   ((uint16_t)0x90)
+#define PPSMC_MSG_ChangeSafePowerLimit                 ((uint16_t)0x91)
+#define PPSMC_MSG_DPMStateSweepStart                   ((uint16_t)0x92)
+#define PPSMC_MSG_DPMStateSweepStop                    ((uint16_t)0x93)
+#define PPSMC_MSG_OVRDDisableSCLKDS                    ((uint16_t)0x94)
+#define PPSMC_MSG_CancelDisableOVRDSCLKDS              ((uint16_t)0x95)
+#define PPSMC_MSG_ThrottleOVRDSCLKDS                   ((uint16_t)0x96)
+#define PPSMC_MSG_CancelThrottleOVRDSCLKDS             ((uint16_t)0x97)
+#define PPSMC_MSG_GPIO17                               ((uint16_t)0x98)
+#define PPSMC_MSG_API_SetSvi2Volt_Vddc                 ((uint16_t)0x99)
+#define PPSMC_MSG_API_SetSvi2Volt_Vddci                ((uint16_t)0x9A)
+#define PPSMC_MSG_API_SetSvi2Volt_Mvdd                 ((uint16_t)0x9B)
+#define PPSMC_MSG_API_GetSvi2Volt_Vddc                 ((uint16_t)0x9C)
+#define PPSMC_MSG_API_GetSvi2Volt_Vddci                ((uint16_t)0x9D)
+#define PPSMC_MSG_API_GetSvi2Volt_Mvdd                 ((uint16_t)0x9E)
+
+#define PPSMC_MSG_BREAK                                ((uint16_t)0xF8)
+
+#define PPSMC_MSG_Test                                 ((uint16_t)0x100)
+#define PPSMC_MSG_DRV_DRAM_ADDR_HI                     ((uint16_t)0x250)
+#define PPSMC_MSG_DRV_DRAM_ADDR_LO                     ((uint16_t)0x251)
+#define PPSMC_MSG_SMU_DRAM_ADDR_HI                     ((uint16_t)0x252)
+#define PPSMC_MSG_SMU_DRAM_ADDR_LO                     ((uint16_t)0x253)
+#define PPSMC_MSG_LoadUcodes                           ((uint16_t)0x254)
+
+typedef uint16_t PPSMC_Msg;
+
+#define PPSMC_EVENT_STATUS_THERMAL                     0x00000001
+#define PPSMC_EVENT_STATUS_REGULATORHOT                0x00000002
+#define PPSMC_EVENT_STATUS_DC                          0x00000004
+#define PPSMC_EVENT_STATUS_GPIO17                      0x00000008
+
+#pragma pack(pop)
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h b/drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h
new file mode 100644 (file)
index 0000000..099b7b5
--- /dev/null
@@ -0,0 +1,2240 @@
+/*
+ * Copyright (C) 2014  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __TONGA_SDMA_PKT_OPEN_H_
+#define __TONGA_SDMA_PKT_OPEN_H_
+
+#define SDMA_OP_NOP  0
+#define SDMA_OP_COPY  1
+#define SDMA_OP_WRITE  2
+#define SDMA_OP_INDIRECT  4
+#define SDMA_OP_FENCE  5
+#define SDMA_OP_TRAP  6
+#define SDMA_OP_SEM  7
+#define SDMA_OP_POLL_REGMEM  8
+#define SDMA_OP_COND_EXE  9
+#define SDMA_OP_ATOMIC  10
+#define SDMA_OP_CONST_FILL  11
+#define SDMA_OP_GEN_PTEPDE  12
+#define SDMA_OP_TIMESTAMP  13
+#define SDMA_OP_SRBM_WRITE  14
+#define SDMA_OP_PRE_EXE  15
+#define SDMA_SUBOP_TIMESTAMP_SET  0
+#define SDMA_SUBOP_TIMESTAMP_GET  1
+#define SDMA_SUBOP_TIMESTAMP_GET_GLOBAL  2
+#define SDMA_SUBOP_COPY_LINEAR  0
+#define SDMA_SUBOP_COPY_LINEAR_SUB_WIND  4
+#define SDMA_SUBOP_COPY_TILED  1
+#define SDMA_SUBOP_COPY_TILED_SUB_WIND  5
+#define SDMA_SUBOP_COPY_T2T_SUB_WIND  6
+#define SDMA_SUBOP_COPY_SOA  3
+#define SDMA_SUBOP_WRITE_LINEAR  0
+#define SDMA_SUBOP_WRITE_TILED  1
+
+/*define for op field*/
+#define SDMA_PKT_HEADER_op_offset 0
+#define SDMA_PKT_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_HEADER_op_shift  0
+#define SDMA_PKT_HEADER_OP(x) (((x) & SDMA_PKT_HEADER_op_mask) << SDMA_PKT_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_HEADER_sub_op_offset 0
+#define SDMA_PKT_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_HEADER_sub_op_shift  8
+#define SDMA_PKT_HEADER_SUB_OP(x) (((x) & SDMA_PKT_HEADER_sub_op_mask) << SDMA_PKT_HEADER_sub_op_shift)
+
+/*
+** Definitions for SDMA_PKT_COPY_LINEAR packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_COPY_LINEAR_HEADER_op_offset 0
+#define SDMA_PKT_COPY_LINEAR_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_COPY_LINEAR_HEADER_op_shift  0
+#define SDMA_PKT_COPY_LINEAR_HEADER_OP(x) (((x) & SDMA_PKT_COPY_LINEAR_HEADER_op_mask) << SDMA_PKT_COPY_LINEAR_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_COPY_LINEAR_HEADER_sub_op_offset 0
+#define SDMA_PKT_COPY_LINEAR_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_COPY_LINEAR_HEADER_sub_op_shift  8
+#define SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_LINEAR_HEADER_sub_op_mask) << SDMA_PKT_COPY_LINEAR_HEADER_sub_op_shift)
+
+/*define for broadcast field*/
+#define SDMA_PKT_COPY_LINEAR_HEADER_broadcast_offset 0
+#define SDMA_PKT_COPY_LINEAR_HEADER_broadcast_mask   0x00000001
+#define SDMA_PKT_COPY_LINEAR_HEADER_broadcast_shift  27
+#define SDMA_PKT_COPY_LINEAR_HEADER_BROADCAST(x) (((x) & SDMA_PKT_COPY_LINEAR_HEADER_broadcast_mask) << SDMA_PKT_COPY_LINEAR_HEADER_broadcast_shift)
+
+/*define for COUNT word*/
+/*define for count field*/
+#define SDMA_PKT_COPY_LINEAR_COUNT_count_offset 1
+#define SDMA_PKT_COPY_LINEAR_COUNT_count_mask   0x003FFFFF
+#define SDMA_PKT_COPY_LINEAR_COUNT_count_shift  0
+#define SDMA_PKT_COPY_LINEAR_COUNT_COUNT(x) (((x) & SDMA_PKT_COPY_LINEAR_COUNT_count_mask) << SDMA_PKT_COPY_LINEAR_COUNT_count_shift)
+
+/*define for PARAMETER word*/
+/*define for dst_sw field*/
+#define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_sw_offset 2
+#define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_sw_mask   0x00000003
+#define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_sw_shift  16
+#define SDMA_PKT_COPY_LINEAR_PARAMETER_DST_SW(x) (((x) & SDMA_PKT_COPY_LINEAR_PARAMETER_dst_sw_mask) << SDMA_PKT_COPY_LINEAR_PARAMETER_dst_sw_shift)
+
+/*define for dst_ha field*/
+#define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_ha_offset 2
+#define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_ha_mask   0x00000001
+#define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_ha_shift  22
+#define SDMA_PKT_COPY_LINEAR_PARAMETER_DST_HA(x) (((x) & SDMA_PKT_COPY_LINEAR_PARAMETER_dst_ha_mask) << SDMA_PKT_COPY_LINEAR_PARAMETER_dst_ha_shift)
+
+/*define for src_sw field*/
+#define SDMA_PKT_COPY_LINEAR_PARAMETER_src_sw_offset 2
+#define SDMA_PKT_COPY_LINEAR_PARAMETER_src_sw_mask   0x00000003
+#define SDMA_PKT_COPY_LINEAR_PARAMETER_src_sw_shift  24
+#define SDMA_PKT_COPY_LINEAR_PARAMETER_SRC_SW(x) (((x) & SDMA_PKT_COPY_LINEAR_PARAMETER_src_sw_mask) << SDMA_PKT_COPY_LINEAR_PARAMETER_src_sw_shift)
+
+/*define for src_ha field*/
+#define SDMA_PKT_COPY_LINEAR_PARAMETER_src_ha_offset 2
+#define SDMA_PKT_COPY_LINEAR_PARAMETER_src_ha_mask   0x00000001
+#define SDMA_PKT_COPY_LINEAR_PARAMETER_src_ha_shift  30
+#define SDMA_PKT_COPY_LINEAR_PARAMETER_SRC_HA(x) (((x) & SDMA_PKT_COPY_LINEAR_PARAMETER_src_ha_mask) << SDMA_PKT_COPY_LINEAR_PARAMETER_src_ha_shift)
+
+/*define for SRC_ADDR_LO word*/
+/*define for src_addr_31_0 field*/
+#define SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_src_addr_31_0_offset 3
+#define SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_src_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_src_addr_31_0_shift  0
+#define SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_SRC_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_src_addr_31_0_mask) << SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_src_addr_31_0_shift)
+
+/*define for SRC_ADDR_HI word*/
+/*define for src_addr_63_32 field*/
+#define SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_src_addr_63_32_offset 4
+#define SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_src_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_src_addr_63_32_shift  0
+#define SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_SRC_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_src_addr_63_32_mask) << SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_src_addr_63_32_shift)
+
+/*define for DST_ADDR_LO word*/
+/*define for dst_addr_31_0 field*/
+#define SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_dst_addr_31_0_offset 5
+#define SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_dst_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_dst_addr_31_0_shift  0
+#define SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_dst_addr_31_0_shift)
+
+/*define for DST_ADDR_HI word*/
+/*define for dst_addr_63_32 field*/
+#define SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_dst_addr_63_32_offset 6
+#define SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_dst_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_dst_addr_63_32_shift  0
+#define SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_dst_addr_63_32_shift)
+
+
+/*
+** Definitions for SDMA_PKT_COPY_BROADCAST_LINEAR packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_op_offset 0
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_op_shift  0
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_OP(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_op_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_sub_op_offset 0
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_sub_op_shift  8
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_sub_op_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_sub_op_shift)
+
+/*define for broadcast field*/
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_broadcast_offset 0
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_broadcast_mask   0x00000001
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_broadcast_shift  27
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_BROADCAST(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_broadcast_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_broadcast_shift)
+
+/*define for COUNT word*/
+/*define for count field*/
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_count_offset 1
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_count_mask   0x003FFFFF
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_count_shift  0
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_COUNT(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_count_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_count_shift)
+
+/*define for PARAMETER word*/
+/*define for dst2_sw field*/
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_sw_offset 2
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_sw_mask   0x00000003
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_sw_shift  8
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_DST2_SW(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_sw_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_sw_shift)
+
+/*define for dst2_ha field*/
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_ha_offset 2
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_ha_mask   0x00000001
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_ha_shift  14
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_DST2_HA(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_ha_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_ha_shift)
+
+/*define for dst1_sw field*/
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_sw_offset 2
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_sw_mask   0x00000003
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_sw_shift  16
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_DST1_SW(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_sw_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_sw_shift)
+
+/*define for dst1_ha field*/
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_ha_offset 2
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_ha_mask   0x00000001
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_ha_shift  22
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_DST1_HA(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_ha_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_ha_shift)
+
+/*define for src_sw field*/
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_sw_offset 2
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_sw_mask   0x00000003
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_sw_shift  24
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_SRC_SW(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_sw_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_sw_shift)
+
+/*define for src_ha field*/
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_ha_offset 2
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_ha_mask   0x00000001
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_ha_shift  30
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_SRC_HA(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_ha_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_ha_shift)
+
+/*define for SRC_ADDR_LO word*/
+/*define for src_addr_31_0 field*/
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_src_addr_31_0_offset 3
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_src_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_src_addr_31_0_shift  0
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_SRC_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_src_addr_31_0_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_src_addr_31_0_shift)
+
+/*define for SRC_ADDR_HI word*/
+/*define for src_addr_63_32 field*/
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_src_addr_63_32_offset 4
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_src_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_src_addr_63_32_shift  0
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_SRC_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_src_addr_63_32_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_src_addr_63_32_shift)
+
+/*define for DST1_ADDR_LO word*/
+/*define for dst1_addr_31_0 field*/
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_dst1_addr_31_0_offset 5
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_dst1_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_dst1_addr_31_0_shift  0
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_DST1_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_dst1_addr_31_0_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_dst1_addr_31_0_shift)
+
+/*define for DST1_ADDR_HI word*/
+/*define for dst1_addr_63_32 field*/
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_dst1_addr_63_32_offset 6
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_dst1_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_dst1_addr_63_32_shift  0
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_DST1_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_dst1_addr_63_32_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_dst1_addr_63_32_shift)
+
+/*define for DST2_ADDR_LO word*/
+/*define for dst2_addr_31_0 field*/
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_dst2_addr_31_0_offset 7
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_dst2_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_dst2_addr_31_0_shift  0
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_DST2_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_dst2_addr_31_0_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_dst2_addr_31_0_shift)
+
+/*define for DST2_ADDR_HI word*/
+/*define for dst2_addr_63_32 field*/
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_dst2_addr_63_32_offset 8
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_dst2_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_dst2_addr_63_32_shift  0
+#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_DST2_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_dst2_addr_63_32_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_dst2_addr_63_32_shift)
+
+
+/*
+** Definitions for SDMA_PKT_COPY_LINEAR_SUBWIN packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_op_offset 0
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_op_shift  0
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_OP(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_op_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_sub_op_offset 0
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_sub_op_shift  8
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_sub_op_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_sub_op_shift)
+
+/*define for elementsize field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_elementsize_offset 0
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_elementsize_mask   0x00000007
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_elementsize_shift  29
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_ELEMENTSIZE(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_elementsize_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_elementsize_shift)
+
+/*define for SRC_ADDR_LO word*/
+/*define for src_addr_31_0 field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_src_addr_31_0_offset 1
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_src_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_src_addr_31_0_shift  0
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_SRC_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_src_addr_31_0_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_src_addr_31_0_shift)
+
+/*define for SRC_ADDR_HI word*/
+/*define for src_addr_63_32 field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_src_addr_63_32_offset 2
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_src_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_src_addr_63_32_shift  0
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_SRC_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_src_addr_63_32_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_src_addr_63_32_shift)
+
+/*define for DW_3 word*/
+/*define for src_x field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_x_offset 3
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_x_mask   0x00003FFF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_x_shift  0
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_SRC_X(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_x_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_x_shift)
+
+/*define for src_y field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_y_offset 3
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_y_mask   0x00003FFF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_y_shift  16
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_SRC_Y(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_y_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_y_shift)
+
+/*define for DW_4 word*/
+/*define for src_z field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_z_offset 4
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_z_mask   0x000007FF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_z_shift  0
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_SRC_Z(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_z_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_z_shift)
+
+/*define for src_pitch field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_pitch_offset 4
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_pitch_mask   0x00003FFF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_pitch_shift  16
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_SRC_PITCH(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_pitch_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_pitch_shift)
+
+/*define for DW_5 word*/
+/*define for src_slice_pitch field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_src_slice_pitch_offset 5
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_src_slice_pitch_mask   0x0FFFFFFF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_src_slice_pitch_shift  0
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_SRC_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_src_slice_pitch_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_src_slice_pitch_shift)
+
+/*define for DST_ADDR_LO word*/
+/*define for dst_addr_31_0 field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_dst_addr_31_0_offset 6
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_dst_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_dst_addr_31_0_shift  0
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_dst_addr_31_0_shift)
+
+/*define for DST_ADDR_HI word*/
+/*define for dst_addr_63_32 field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_dst_addr_63_32_offset 7
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_dst_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_dst_addr_63_32_shift  0
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_dst_addr_63_32_shift)
+
+/*define for DW_8 word*/
+/*define for dst_x field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_x_offset 8
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_x_mask   0x00003FFF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_x_shift  0
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_DST_X(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_x_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_x_shift)
+
+/*define for dst_y field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_y_offset 8
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_y_mask   0x00003FFF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_y_shift  16
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_DST_Y(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_y_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_y_shift)
+
+/*define for DW_9 word*/
+/*define for dst_z field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_z_offset 9
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_z_mask   0x000007FF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_z_shift  0
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_DST_Z(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_z_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_z_shift)
+
+/*define for dst_pitch field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_pitch_offset 9
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_pitch_mask   0x00003FFF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_pitch_shift  16
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_DST_PITCH(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_pitch_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_pitch_shift)
+
+/*define for DW_10 word*/
+/*define for dst_slice_pitch field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_dst_slice_pitch_offset 10
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_dst_slice_pitch_mask   0x0FFFFFFF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_dst_slice_pitch_shift  0
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_DST_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_dst_slice_pitch_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_dst_slice_pitch_shift)
+
+/*define for DW_11 word*/
+/*define for rect_x field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_x_offset 11
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_x_mask   0x00003FFF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_x_shift  0
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_RECT_X(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_x_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_x_shift)
+
+/*define for rect_y field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_y_offset 11
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_y_mask   0x00003FFF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_y_shift  16
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_RECT_Y(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_y_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_y_shift)
+
+/*define for DW_12 word*/
+/*define for rect_z field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_rect_z_offset 12
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_rect_z_mask   0x000007FF
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_rect_z_shift  0
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_RECT_Z(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_rect_z_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_rect_z_shift)
+
+/*define for dst_sw field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_sw_offset 12
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_sw_mask   0x00000003
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_sw_shift  16
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_DST_SW(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_sw_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_sw_shift)
+
+/*define for dst_ha field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_ha_offset 12
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_ha_mask   0x00000001
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_ha_shift  22
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_DST_HA(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_ha_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_ha_shift)
+
+/*define for src_sw field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_sw_offset 12
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_sw_mask   0x00000003
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_sw_shift  24
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_SRC_SW(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_sw_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_sw_shift)
+
+/*define for src_ha field*/
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_ha_offset 12
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_ha_mask   0x00000001
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_ha_shift  30
+#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_SRC_HA(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_ha_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_ha_shift)
+
+
+/*
+** Definitions for SDMA_PKT_COPY_TILED packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_COPY_TILED_HEADER_op_offset 0
+#define SDMA_PKT_COPY_TILED_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_COPY_TILED_HEADER_op_shift  0
+#define SDMA_PKT_COPY_TILED_HEADER_OP(x) (((x) & SDMA_PKT_COPY_TILED_HEADER_op_mask) << SDMA_PKT_COPY_TILED_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_COPY_TILED_HEADER_sub_op_offset 0
+#define SDMA_PKT_COPY_TILED_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_COPY_TILED_HEADER_sub_op_shift  8
+#define SDMA_PKT_COPY_TILED_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_TILED_HEADER_sub_op_mask) << SDMA_PKT_COPY_TILED_HEADER_sub_op_shift)
+
+/*define for detile field*/
+#define SDMA_PKT_COPY_TILED_HEADER_detile_offset 0
+#define SDMA_PKT_COPY_TILED_HEADER_detile_mask   0x00000001
+#define SDMA_PKT_COPY_TILED_HEADER_detile_shift  31
+#define SDMA_PKT_COPY_TILED_HEADER_DETILE(x) (((x) & SDMA_PKT_COPY_TILED_HEADER_detile_mask) << SDMA_PKT_COPY_TILED_HEADER_detile_shift)
+
+/*define for TILED_ADDR_LO word*/
+/*define for tiled_addr_31_0 field*/
+#define SDMA_PKT_COPY_TILED_TILED_ADDR_LO_tiled_addr_31_0_offset 1
+#define SDMA_PKT_COPY_TILED_TILED_ADDR_LO_tiled_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_TILED_TILED_ADDR_LO_tiled_addr_31_0_shift  0
+#define SDMA_PKT_COPY_TILED_TILED_ADDR_LO_TILED_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_TILED_TILED_ADDR_LO_tiled_addr_31_0_mask) << SDMA_PKT_COPY_TILED_TILED_ADDR_LO_tiled_addr_31_0_shift)
+
+/*define for TILED_ADDR_HI word*/
+/*define for tiled_addr_63_32 field*/
+#define SDMA_PKT_COPY_TILED_TILED_ADDR_HI_tiled_addr_63_32_offset 2
+#define SDMA_PKT_COPY_TILED_TILED_ADDR_HI_tiled_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_TILED_TILED_ADDR_HI_tiled_addr_63_32_shift  0
+#define SDMA_PKT_COPY_TILED_TILED_ADDR_HI_TILED_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_TILED_TILED_ADDR_HI_tiled_addr_63_32_mask) << SDMA_PKT_COPY_TILED_TILED_ADDR_HI_tiled_addr_63_32_shift)
+
+/*define for DW_3 word*/
+/*define for pitch_in_tile field*/
+#define SDMA_PKT_COPY_TILED_DW_3_pitch_in_tile_offset 3
+#define SDMA_PKT_COPY_TILED_DW_3_pitch_in_tile_mask   0x000007FF
+#define SDMA_PKT_COPY_TILED_DW_3_pitch_in_tile_shift  0
+#define SDMA_PKT_COPY_TILED_DW_3_PITCH_IN_TILE(x) (((x) & SDMA_PKT_COPY_TILED_DW_3_pitch_in_tile_mask) << SDMA_PKT_COPY_TILED_DW_3_pitch_in_tile_shift)
+
+/*define for height field*/
+#define SDMA_PKT_COPY_TILED_DW_3_height_offset 3
+#define SDMA_PKT_COPY_TILED_DW_3_height_mask   0x00003FFF
+#define SDMA_PKT_COPY_TILED_DW_3_height_shift  16
+#define SDMA_PKT_COPY_TILED_DW_3_HEIGHT(x) (((x) & SDMA_PKT_COPY_TILED_DW_3_height_mask) << SDMA_PKT_COPY_TILED_DW_3_height_shift)
+
+/*define for DW_4 word*/
+/*define for slice_pitch field*/
+#define SDMA_PKT_COPY_TILED_DW_4_slice_pitch_offset 4
+#define SDMA_PKT_COPY_TILED_DW_4_slice_pitch_mask   0x003FFFFF
+#define SDMA_PKT_COPY_TILED_DW_4_slice_pitch_shift  0
+#define SDMA_PKT_COPY_TILED_DW_4_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_TILED_DW_4_slice_pitch_mask) << SDMA_PKT_COPY_TILED_DW_4_slice_pitch_shift)
+
+/*define for DW_5 word*/
+/*define for element_size field*/
+#define SDMA_PKT_COPY_TILED_DW_5_element_size_offset 5
+#define SDMA_PKT_COPY_TILED_DW_5_element_size_mask   0x00000007
+#define SDMA_PKT_COPY_TILED_DW_5_element_size_shift  0
+#define SDMA_PKT_COPY_TILED_DW_5_ELEMENT_SIZE(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_element_size_mask) << SDMA_PKT_COPY_TILED_DW_5_element_size_shift)
+
+/*define for array_mode field*/
+#define SDMA_PKT_COPY_TILED_DW_5_array_mode_offset 5
+#define SDMA_PKT_COPY_TILED_DW_5_array_mode_mask   0x0000000F
+#define SDMA_PKT_COPY_TILED_DW_5_array_mode_shift  3
+#define SDMA_PKT_COPY_TILED_DW_5_ARRAY_MODE(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_array_mode_mask) << SDMA_PKT_COPY_TILED_DW_5_array_mode_shift)
+
+/*define for mit_mode field*/
+#define SDMA_PKT_COPY_TILED_DW_5_mit_mode_offset 5
+#define SDMA_PKT_COPY_TILED_DW_5_mit_mode_mask   0x00000007
+#define SDMA_PKT_COPY_TILED_DW_5_mit_mode_shift  8
+#define SDMA_PKT_COPY_TILED_DW_5_MIT_MODE(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_mit_mode_mask) << SDMA_PKT_COPY_TILED_DW_5_mit_mode_shift)
+
+/*define for tilesplit_size field*/
+#define SDMA_PKT_COPY_TILED_DW_5_tilesplit_size_offset 5
+#define SDMA_PKT_COPY_TILED_DW_5_tilesplit_size_mask   0x00000007
+#define SDMA_PKT_COPY_TILED_DW_5_tilesplit_size_shift  11
+#define SDMA_PKT_COPY_TILED_DW_5_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_tilesplit_size_mask) << SDMA_PKT_COPY_TILED_DW_5_tilesplit_size_shift)
+
+/*define for bank_w field*/
+#define SDMA_PKT_COPY_TILED_DW_5_bank_w_offset 5
+#define SDMA_PKT_COPY_TILED_DW_5_bank_w_mask   0x00000003
+#define SDMA_PKT_COPY_TILED_DW_5_bank_w_shift  15
+#define SDMA_PKT_COPY_TILED_DW_5_BANK_W(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_bank_w_mask) << SDMA_PKT_COPY_TILED_DW_5_bank_w_shift)
+
+/*define for bank_h field*/
+#define SDMA_PKT_COPY_TILED_DW_5_bank_h_offset 5
+#define SDMA_PKT_COPY_TILED_DW_5_bank_h_mask   0x00000003
+#define SDMA_PKT_COPY_TILED_DW_5_bank_h_shift  18
+#define SDMA_PKT_COPY_TILED_DW_5_BANK_H(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_bank_h_mask) << SDMA_PKT_COPY_TILED_DW_5_bank_h_shift)
+
+/*define for num_bank field*/
+#define SDMA_PKT_COPY_TILED_DW_5_num_bank_offset 5
+#define SDMA_PKT_COPY_TILED_DW_5_num_bank_mask   0x00000003
+#define SDMA_PKT_COPY_TILED_DW_5_num_bank_shift  21
+#define SDMA_PKT_COPY_TILED_DW_5_NUM_BANK(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_num_bank_mask) << SDMA_PKT_COPY_TILED_DW_5_num_bank_shift)
+
+/*define for mat_aspt field*/
+#define SDMA_PKT_COPY_TILED_DW_5_mat_aspt_offset 5
+#define SDMA_PKT_COPY_TILED_DW_5_mat_aspt_mask   0x00000003
+#define SDMA_PKT_COPY_TILED_DW_5_mat_aspt_shift  24
+#define SDMA_PKT_COPY_TILED_DW_5_MAT_ASPT(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_mat_aspt_mask) << SDMA_PKT_COPY_TILED_DW_5_mat_aspt_shift)
+
+/*define for pipe_config field*/
+#define SDMA_PKT_COPY_TILED_DW_5_pipe_config_offset 5
+#define SDMA_PKT_COPY_TILED_DW_5_pipe_config_mask   0x0000001F
+#define SDMA_PKT_COPY_TILED_DW_5_pipe_config_shift  26
+#define SDMA_PKT_COPY_TILED_DW_5_PIPE_CONFIG(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_pipe_config_mask) << SDMA_PKT_COPY_TILED_DW_5_pipe_config_shift)
+
+/*define for DW_6 word*/
+/*define for x field*/
+#define SDMA_PKT_COPY_TILED_DW_6_x_offset 6
+#define SDMA_PKT_COPY_TILED_DW_6_x_mask   0x00003FFF
+#define SDMA_PKT_COPY_TILED_DW_6_x_shift  0
+#define SDMA_PKT_COPY_TILED_DW_6_X(x) (((x) & SDMA_PKT_COPY_TILED_DW_6_x_mask) << SDMA_PKT_COPY_TILED_DW_6_x_shift)
+
+/*define for y field*/
+#define SDMA_PKT_COPY_TILED_DW_6_y_offset 6
+#define SDMA_PKT_COPY_TILED_DW_6_y_mask   0x00003FFF
+#define SDMA_PKT_COPY_TILED_DW_6_y_shift  16
+#define SDMA_PKT_COPY_TILED_DW_6_Y(x) (((x) & SDMA_PKT_COPY_TILED_DW_6_y_mask) << SDMA_PKT_COPY_TILED_DW_6_y_shift)
+
+/*define for DW_7 word*/
+/*define for z field*/
+#define SDMA_PKT_COPY_TILED_DW_7_z_offset 7
+#define SDMA_PKT_COPY_TILED_DW_7_z_mask   0x00000FFF
+#define SDMA_PKT_COPY_TILED_DW_7_z_shift  0
+#define SDMA_PKT_COPY_TILED_DW_7_Z(x) (((x) & SDMA_PKT_COPY_TILED_DW_7_z_mask) << SDMA_PKT_COPY_TILED_DW_7_z_shift)
+
+/*define for linear_sw field*/
+#define SDMA_PKT_COPY_TILED_DW_7_linear_sw_offset 7
+#define SDMA_PKT_COPY_TILED_DW_7_linear_sw_mask   0x00000003
+#define SDMA_PKT_COPY_TILED_DW_7_linear_sw_shift  16
+#define SDMA_PKT_COPY_TILED_DW_7_LINEAR_SW(x) (((x) & SDMA_PKT_COPY_TILED_DW_7_linear_sw_mask) << SDMA_PKT_COPY_TILED_DW_7_linear_sw_shift)
+
+/*define for tile_sw field*/
+#define SDMA_PKT_COPY_TILED_DW_7_tile_sw_offset 7
+#define SDMA_PKT_COPY_TILED_DW_7_tile_sw_mask   0x00000003
+#define SDMA_PKT_COPY_TILED_DW_7_tile_sw_shift  24
+#define SDMA_PKT_COPY_TILED_DW_7_TILE_SW(x) (((x) & SDMA_PKT_COPY_TILED_DW_7_tile_sw_mask) << SDMA_PKT_COPY_TILED_DW_7_tile_sw_shift)
+
+/*define for LINEAR_ADDR_LO word*/
+/*define for linear_addr_31_0 field*/
+#define SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_linear_addr_31_0_offset 8
+#define SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_linear_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_linear_addr_31_0_shift  0
+#define SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_LINEAR_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_linear_addr_31_0_mask) << SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_linear_addr_31_0_shift)
+
+/*define for LINEAR_ADDR_HI word*/
+/*define for linear_addr_63_32 field*/
+#define SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_linear_addr_63_32_offset 9
+#define SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_linear_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_linear_addr_63_32_shift  0
+#define SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_LINEAR_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_linear_addr_63_32_mask) << SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_linear_addr_63_32_shift)
+
+/*define for LINEAR_PITCH word*/
+/*define for linear_pitch field*/
+#define SDMA_PKT_COPY_TILED_LINEAR_PITCH_linear_pitch_offset 10
+#define SDMA_PKT_COPY_TILED_LINEAR_PITCH_linear_pitch_mask   0x0007FFFF
+#define SDMA_PKT_COPY_TILED_LINEAR_PITCH_linear_pitch_shift  0
+#define SDMA_PKT_COPY_TILED_LINEAR_PITCH_LINEAR_PITCH(x) (((x) & SDMA_PKT_COPY_TILED_LINEAR_PITCH_linear_pitch_mask) << SDMA_PKT_COPY_TILED_LINEAR_PITCH_linear_pitch_shift)
+
+/*define for COUNT word*/
+/*define for count field*/
+#define SDMA_PKT_COPY_TILED_COUNT_count_offset 11
+#define SDMA_PKT_COPY_TILED_COUNT_count_mask   0x000FFFFF
+#define SDMA_PKT_COPY_TILED_COUNT_count_shift  0
+#define SDMA_PKT_COPY_TILED_COUNT_COUNT(x) (((x) & SDMA_PKT_COPY_TILED_COUNT_count_mask) << SDMA_PKT_COPY_TILED_COUNT_count_shift)
+
+
+/*
+** Definitions for SDMA_PKT_COPY_L2T_BROADCAST packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_op_offset 0
+#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_op_shift  0
+#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_OP(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_HEADER_op_mask) << SDMA_PKT_COPY_L2T_BROADCAST_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_sub_op_offset 0
+#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_sub_op_shift  8
+#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_HEADER_sub_op_mask) << SDMA_PKT_COPY_L2T_BROADCAST_HEADER_sub_op_shift)
+
+/*define for videocopy field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_videocopy_offset 0
+#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_videocopy_mask   0x00000001
+#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_videocopy_shift  26
+#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_VIDEOCOPY(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_HEADER_videocopy_mask) << SDMA_PKT_COPY_L2T_BROADCAST_HEADER_videocopy_shift)
+
+/*define for broadcast field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_broadcast_offset 0
+#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_broadcast_mask   0x00000001
+#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_broadcast_shift  27
+#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_BROADCAST(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_HEADER_broadcast_mask) << SDMA_PKT_COPY_L2T_BROADCAST_HEADER_broadcast_shift)
+
+/*define for TILED_ADDR_LO_0 word*/
+/*define for tiled_addr0_31_0 field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_tiled_addr0_31_0_offset 1
+#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_tiled_addr0_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_tiled_addr0_31_0_shift  0
+#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_TILED_ADDR0_31_0(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_tiled_addr0_31_0_mask) << SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_tiled_addr0_31_0_shift)
+
+/*define for TILED_ADDR_HI_0 word*/
+/*define for tiled_addr0_63_32 field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_tiled_addr0_63_32_offset 2
+#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_tiled_addr0_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_tiled_addr0_63_32_shift  0
+#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_TILED_ADDR0_63_32(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_tiled_addr0_63_32_mask) << SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_tiled_addr0_63_32_shift)
+
+/*define for TILED_ADDR_LO_1 word*/
+/*define for tiled_addr1_31_0 field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_tiled_addr1_31_0_offset 3
+#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_tiled_addr1_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_tiled_addr1_31_0_shift  0
+#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_TILED_ADDR1_31_0(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_tiled_addr1_31_0_mask) << SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_tiled_addr1_31_0_shift)
+
+/*define for TILED_ADDR_HI_1 word*/
+/*define for tiled_addr1_63_32 field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_tiled_addr1_63_32_offset 4
+#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_tiled_addr1_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_tiled_addr1_63_32_shift  0
+#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_TILED_ADDR1_63_32(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_tiled_addr1_63_32_mask) << SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_tiled_addr1_63_32_shift)
+
+/*define for DW_5 word*/
+/*define for pitch_in_tile field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_pitch_in_tile_offset 5
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_pitch_in_tile_mask   0x000007FF
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_pitch_in_tile_shift  0
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_PITCH_IN_TILE(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_5_pitch_in_tile_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_5_pitch_in_tile_shift)
+
+/*define for height field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_height_offset 5
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_height_mask   0x00003FFF
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_height_shift  16
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_HEIGHT(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_5_height_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_5_height_shift)
+
+/*define for DW_6 word*/
+/*define for slice_pitch field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_6_slice_pitch_offset 6
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_6_slice_pitch_mask   0x003FFFFF
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_6_slice_pitch_shift  0
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_6_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_6_slice_pitch_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_6_slice_pitch_shift)
+
+/*define for DW_7 word*/
+/*define for element_size field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_element_size_offset 7
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_element_size_mask   0x00000007
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_element_size_shift  0
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_ELEMENT_SIZE(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_element_size_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_element_size_shift)
+
+/*define for array_mode field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_array_mode_offset 7
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_array_mode_mask   0x0000000F
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_array_mode_shift  3
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_ARRAY_MODE(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_array_mode_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_array_mode_shift)
+
+/*define for mit_mode field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mit_mode_offset 7
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mit_mode_mask   0x00000007
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mit_mode_shift  8
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_MIT_MODE(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mit_mode_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mit_mode_shift)
+
+/*define for tilesplit_size field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_tilesplit_size_offset 7
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_tilesplit_size_mask   0x00000007
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_tilesplit_size_shift  11
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_tilesplit_size_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_tilesplit_size_shift)
+
+/*define for bank_w field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_w_offset 7
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_w_mask   0x00000003
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_w_shift  15
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_BANK_W(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_w_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_w_shift)
+
+/*define for bank_h field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_h_offset 7
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_h_mask   0x00000003
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_h_shift  18
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_BANK_H(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_h_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_h_shift)
+
+/*define for num_bank field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_num_bank_offset 7
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_num_bank_mask   0x00000003
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_num_bank_shift  21
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_NUM_BANK(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_num_bank_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_num_bank_shift)
+
+/*define for mat_aspt field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mat_aspt_offset 7
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mat_aspt_mask   0x00000003
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mat_aspt_shift  24
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_MAT_ASPT(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mat_aspt_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mat_aspt_shift)
+
+/*define for pipe_config field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_pipe_config_offset 7
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_pipe_config_mask   0x0000001F
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_pipe_config_shift  26
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_PIPE_CONFIG(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_pipe_config_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_pipe_config_shift)
+
+/*define for DW_8 word*/
+/*define for x field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_x_offset 8
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_x_mask   0x00003FFF
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_x_shift  0
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_X(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_8_x_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_8_x_shift)
+
+/*define for y field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_y_offset 8
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_y_mask   0x00003FFF
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_y_shift  16
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_Y(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_8_y_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_8_y_shift)
+
+/*define for DW_9 word*/
+/*define for z field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_9_z_offset 9
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_9_z_mask   0x00000FFF
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_9_z_shift  0
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_9_Z(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_9_z_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_9_z_shift)
+
+/*define for DW_10 word*/
+/*define for dst2_sw field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_sw_offset 10
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_sw_mask   0x00000003
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_sw_shift  8
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_DST2_SW(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_sw_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_sw_shift)
+
+/*define for dst2_ha field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_ha_offset 10
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_ha_mask   0x00000001
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_ha_shift  14
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_DST2_HA(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_ha_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_ha_shift)
+
+/*define for linear_sw field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_linear_sw_offset 10
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_linear_sw_mask   0x00000003
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_linear_sw_shift  16
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_LINEAR_SW(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_10_linear_sw_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_10_linear_sw_shift)
+
+/*define for tile_sw field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_tile_sw_offset 10
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_tile_sw_mask   0x00000003
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_tile_sw_shift  24
+#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_TILE_SW(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_10_tile_sw_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_10_tile_sw_shift)
+
+/*define for LINEAR_ADDR_LO word*/
+/*define for linear_addr_31_0 field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_linear_addr_31_0_offset 11
+#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_linear_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_linear_addr_31_0_shift  0
+#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_LINEAR_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_linear_addr_31_0_mask) << SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_linear_addr_31_0_shift)
+
+/*define for LINEAR_ADDR_HI word*/
+/*define for linear_addr_63_32 field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_linear_addr_63_32_offset 12
+#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_linear_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_linear_addr_63_32_shift  0
+#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_LINEAR_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_linear_addr_63_32_mask) << SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_linear_addr_63_32_shift)
+
+/*define for LINEAR_PITCH word*/
+/*define for linear_pitch field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_linear_pitch_offset 13
+#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_linear_pitch_mask   0x0007FFFF
+#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_linear_pitch_shift  0
+#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_LINEAR_PITCH(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_linear_pitch_mask) << SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_linear_pitch_shift)
+
+/*define for COUNT word*/
+/*define for count field*/
+#define SDMA_PKT_COPY_L2T_BROADCAST_COUNT_count_offset 14
+#define SDMA_PKT_COPY_L2T_BROADCAST_COUNT_count_mask   0x000FFFFF
+#define SDMA_PKT_COPY_L2T_BROADCAST_COUNT_count_shift  0
+#define SDMA_PKT_COPY_L2T_BROADCAST_COUNT_COUNT(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_COUNT_count_mask) << SDMA_PKT_COPY_L2T_BROADCAST_COUNT_count_shift)
+
+
+/*
+** Definitions for SDMA_PKT_COPY_T2T packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_COPY_T2T_HEADER_op_offset 0
+#define SDMA_PKT_COPY_T2T_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_COPY_T2T_HEADER_op_shift  0
+#define SDMA_PKT_COPY_T2T_HEADER_OP(x) (((x) & SDMA_PKT_COPY_T2T_HEADER_op_mask) << SDMA_PKT_COPY_T2T_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_COPY_T2T_HEADER_sub_op_offset 0
+#define SDMA_PKT_COPY_T2T_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_COPY_T2T_HEADER_sub_op_shift  8
+#define SDMA_PKT_COPY_T2T_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_T2T_HEADER_sub_op_mask) << SDMA_PKT_COPY_T2T_HEADER_sub_op_shift)
+
+/*define for SRC_ADDR_LO word*/
+/*define for src_addr_31_0 field*/
+#define SDMA_PKT_COPY_T2T_SRC_ADDR_LO_src_addr_31_0_offset 1
+#define SDMA_PKT_COPY_T2T_SRC_ADDR_LO_src_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_T2T_SRC_ADDR_LO_src_addr_31_0_shift  0
+#define SDMA_PKT_COPY_T2T_SRC_ADDR_LO_SRC_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_T2T_SRC_ADDR_LO_src_addr_31_0_mask) << SDMA_PKT_COPY_T2T_SRC_ADDR_LO_src_addr_31_0_shift)
+
+/*define for SRC_ADDR_HI word*/
+/*define for src_addr_63_32 field*/
+#define SDMA_PKT_COPY_T2T_SRC_ADDR_HI_src_addr_63_32_offset 2
+#define SDMA_PKT_COPY_T2T_SRC_ADDR_HI_src_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_T2T_SRC_ADDR_HI_src_addr_63_32_shift  0
+#define SDMA_PKT_COPY_T2T_SRC_ADDR_HI_SRC_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_T2T_SRC_ADDR_HI_src_addr_63_32_mask) << SDMA_PKT_COPY_T2T_SRC_ADDR_HI_src_addr_63_32_shift)
+
+/*define for DW_3 word*/
+/*define for src_x field*/
+#define SDMA_PKT_COPY_T2T_DW_3_src_x_offset 3
+#define SDMA_PKT_COPY_T2T_DW_3_src_x_mask   0x00003FFF
+#define SDMA_PKT_COPY_T2T_DW_3_src_x_shift  0
+#define SDMA_PKT_COPY_T2T_DW_3_SRC_X(x) (((x) & SDMA_PKT_COPY_T2T_DW_3_src_x_mask) << SDMA_PKT_COPY_T2T_DW_3_src_x_shift)
+
+/*define for src_y field*/
+#define SDMA_PKT_COPY_T2T_DW_3_src_y_offset 3
+#define SDMA_PKT_COPY_T2T_DW_3_src_y_mask   0x00003FFF
+#define SDMA_PKT_COPY_T2T_DW_3_src_y_shift  16
+#define SDMA_PKT_COPY_T2T_DW_3_SRC_Y(x) (((x) & SDMA_PKT_COPY_T2T_DW_3_src_y_mask) << SDMA_PKT_COPY_T2T_DW_3_src_y_shift)
+
+/*define for DW_4 word*/
+/*define for src_z field*/
+#define SDMA_PKT_COPY_T2T_DW_4_src_z_offset 4
+#define SDMA_PKT_COPY_T2T_DW_4_src_z_mask   0x000007FF
+#define SDMA_PKT_COPY_T2T_DW_4_src_z_shift  0
+#define SDMA_PKT_COPY_T2T_DW_4_SRC_Z(x) (((x) & SDMA_PKT_COPY_T2T_DW_4_src_z_mask) << SDMA_PKT_COPY_T2T_DW_4_src_z_shift)
+
+/*define for src_pitch_in_tile field*/
+#define SDMA_PKT_COPY_T2T_DW_4_src_pitch_in_tile_offset 4
+#define SDMA_PKT_COPY_T2T_DW_4_src_pitch_in_tile_mask   0x00000FFF
+#define SDMA_PKT_COPY_T2T_DW_4_src_pitch_in_tile_shift  16
+#define SDMA_PKT_COPY_T2T_DW_4_SRC_PITCH_IN_TILE(x) (((x) & SDMA_PKT_COPY_T2T_DW_4_src_pitch_in_tile_mask) << SDMA_PKT_COPY_T2T_DW_4_src_pitch_in_tile_shift)
+
+/*define for DW_5 word*/
+/*define for src_slice_pitch field*/
+#define SDMA_PKT_COPY_T2T_DW_5_src_slice_pitch_offset 5
+#define SDMA_PKT_COPY_T2T_DW_5_src_slice_pitch_mask   0x003FFFFF
+#define SDMA_PKT_COPY_T2T_DW_5_src_slice_pitch_shift  0
+#define SDMA_PKT_COPY_T2T_DW_5_SRC_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_T2T_DW_5_src_slice_pitch_mask) << SDMA_PKT_COPY_T2T_DW_5_src_slice_pitch_shift)
+
+/*define for DW_6 word*/
+/*define for src_element_size field*/
+#define SDMA_PKT_COPY_T2T_DW_6_src_element_size_offset 6
+#define SDMA_PKT_COPY_T2T_DW_6_src_element_size_mask   0x00000007
+#define SDMA_PKT_COPY_T2T_DW_6_src_element_size_shift  0
+#define SDMA_PKT_COPY_T2T_DW_6_SRC_ELEMENT_SIZE(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_element_size_mask) << SDMA_PKT_COPY_T2T_DW_6_src_element_size_shift)
+
+/*define for src_array_mode field*/
+#define SDMA_PKT_COPY_T2T_DW_6_src_array_mode_offset 6
+#define SDMA_PKT_COPY_T2T_DW_6_src_array_mode_mask   0x0000000F
+#define SDMA_PKT_COPY_T2T_DW_6_src_array_mode_shift  3
+#define SDMA_PKT_COPY_T2T_DW_6_SRC_ARRAY_MODE(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_array_mode_mask) << SDMA_PKT_COPY_T2T_DW_6_src_array_mode_shift)
+
+/*define for src_mit_mode field*/
+#define SDMA_PKT_COPY_T2T_DW_6_src_mit_mode_offset 6
+#define SDMA_PKT_COPY_T2T_DW_6_src_mit_mode_mask   0x00000007
+#define SDMA_PKT_COPY_T2T_DW_6_src_mit_mode_shift  8
+#define SDMA_PKT_COPY_T2T_DW_6_SRC_MIT_MODE(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_mit_mode_mask) << SDMA_PKT_COPY_T2T_DW_6_src_mit_mode_shift)
+
+/*define for src_tilesplit_size field*/
+#define SDMA_PKT_COPY_T2T_DW_6_src_tilesplit_size_offset 6
+#define SDMA_PKT_COPY_T2T_DW_6_src_tilesplit_size_mask   0x00000007
+#define SDMA_PKT_COPY_T2T_DW_6_src_tilesplit_size_shift  11
+#define SDMA_PKT_COPY_T2T_DW_6_SRC_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_tilesplit_size_mask) << SDMA_PKT_COPY_T2T_DW_6_src_tilesplit_size_shift)
+
+/*define for src_bank_w field*/
+#define SDMA_PKT_COPY_T2T_DW_6_src_bank_w_offset 6
+#define SDMA_PKT_COPY_T2T_DW_6_src_bank_w_mask   0x00000003
+#define SDMA_PKT_COPY_T2T_DW_6_src_bank_w_shift  15
+#define SDMA_PKT_COPY_T2T_DW_6_SRC_BANK_W(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_bank_w_mask) << SDMA_PKT_COPY_T2T_DW_6_src_bank_w_shift)
+
+/*define for src_bank_h field*/
+#define SDMA_PKT_COPY_T2T_DW_6_src_bank_h_offset 6
+#define SDMA_PKT_COPY_T2T_DW_6_src_bank_h_mask   0x00000003
+#define SDMA_PKT_COPY_T2T_DW_6_src_bank_h_shift  18
+#define SDMA_PKT_COPY_T2T_DW_6_SRC_BANK_H(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_bank_h_mask) << SDMA_PKT_COPY_T2T_DW_6_src_bank_h_shift)
+
+/*define for src_num_bank field*/
+#define SDMA_PKT_COPY_T2T_DW_6_src_num_bank_offset 6
+#define SDMA_PKT_COPY_T2T_DW_6_src_num_bank_mask   0x00000003
+#define SDMA_PKT_COPY_T2T_DW_6_src_num_bank_shift  21
+#define SDMA_PKT_COPY_T2T_DW_6_SRC_NUM_BANK(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_num_bank_mask) << SDMA_PKT_COPY_T2T_DW_6_src_num_bank_shift)
+
+/*define for src_mat_aspt field*/
+#define SDMA_PKT_COPY_T2T_DW_6_src_mat_aspt_offset 6
+#define SDMA_PKT_COPY_T2T_DW_6_src_mat_aspt_mask   0x00000003
+#define SDMA_PKT_COPY_T2T_DW_6_src_mat_aspt_shift  24
+#define SDMA_PKT_COPY_T2T_DW_6_SRC_MAT_ASPT(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_mat_aspt_mask) << SDMA_PKT_COPY_T2T_DW_6_src_mat_aspt_shift)
+
+/*define for src_pipe_config field*/
+#define SDMA_PKT_COPY_T2T_DW_6_src_pipe_config_offset 6
+#define SDMA_PKT_COPY_T2T_DW_6_src_pipe_config_mask   0x0000001F
+#define SDMA_PKT_COPY_T2T_DW_6_src_pipe_config_shift  26
+#define SDMA_PKT_COPY_T2T_DW_6_SRC_PIPE_CONFIG(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_pipe_config_mask) << SDMA_PKT_COPY_T2T_DW_6_src_pipe_config_shift)
+
+/*define for DST_ADDR_LO word*/
+/*define for dst_addr_31_0 field*/
+#define SDMA_PKT_COPY_T2T_DST_ADDR_LO_dst_addr_31_0_offset 7
+#define SDMA_PKT_COPY_T2T_DST_ADDR_LO_dst_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_T2T_DST_ADDR_LO_dst_addr_31_0_shift  0
+#define SDMA_PKT_COPY_T2T_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_T2T_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_COPY_T2T_DST_ADDR_LO_dst_addr_31_0_shift)
+
+/*define for DST_ADDR_HI word*/
+/*define for dst_addr_63_32 field*/
+#define SDMA_PKT_COPY_T2T_DST_ADDR_HI_dst_addr_63_32_offset 8
+#define SDMA_PKT_COPY_T2T_DST_ADDR_HI_dst_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_T2T_DST_ADDR_HI_dst_addr_63_32_shift  0
+#define SDMA_PKT_COPY_T2T_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_T2T_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_COPY_T2T_DST_ADDR_HI_dst_addr_63_32_shift)
+
+/*define for DW_9 word*/
+/*define for dst_x field*/
+#define SDMA_PKT_COPY_T2T_DW_9_dst_x_offset 9
+#define SDMA_PKT_COPY_T2T_DW_9_dst_x_mask   0x00003FFF
+#define SDMA_PKT_COPY_T2T_DW_9_dst_x_shift  0
+#define SDMA_PKT_COPY_T2T_DW_9_DST_X(x) (((x) & SDMA_PKT_COPY_T2T_DW_9_dst_x_mask) << SDMA_PKT_COPY_T2T_DW_9_dst_x_shift)
+
+/*define for dst_y field*/
+#define SDMA_PKT_COPY_T2T_DW_9_dst_y_offset 9
+#define SDMA_PKT_COPY_T2T_DW_9_dst_y_mask   0x00003FFF
+#define SDMA_PKT_COPY_T2T_DW_9_dst_y_shift  16
+#define SDMA_PKT_COPY_T2T_DW_9_DST_Y(x) (((x) & SDMA_PKT_COPY_T2T_DW_9_dst_y_mask) << SDMA_PKT_COPY_T2T_DW_9_dst_y_shift)
+
+/*define for DW_10 word*/
+/*define for dst_z field*/
+#define SDMA_PKT_COPY_T2T_DW_10_dst_z_offset 10
+#define SDMA_PKT_COPY_T2T_DW_10_dst_z_mask   0x000007FF
+#define SDMA_PKT_COPY_T2T_DW_10_dst_z_shift  0
+#define SDMA_PKT_COPY_T2T_DW_10_DST_Z(x) (((x) & SDMA_PKT_COPY_T2T_DW_10_dst_z_mask) << SDMA_PKT_COPY_T2T_DW_10_dst_z_shift)
+
+/*define for dst_pitch_in_tile field*/
+#define SDMA_PKT_COPY_T2T_DW_10_dst_pitch_in_tile_offset 10
+#define SDMA_PKT_COPY_T2T_DW_10_dst_pitch_in_tile_mask   0x00000FFF
+#define SDMA_PKT_COPY_T2T_DW_10_dst_pitch_in_tile_shift  16
+#define SDMA_PKT_COPY_T2T_DW_10_DST_PITCH_IN_TILE(x) (((x) & SDMA_PKT_COPY_T2T_DW_10_dst_pitch_in_tile_mask) << SDMA_PKT_COPY_T2T_DW_10_dst_pitch_in_tile_shift)
+
+/*define for DW_11 word*/
+/*define for dst_slice_pitch field*/
+#define SDMA_PKT_COPY_T2T_DW_11_dst_slice_pitch_offset 11
+#define SDMA_PKT_COPY_T2T_DW_11_dst_slice_pitch_mask   0x003FFFFF
+#define SDMA_PKT_COPY_T2T_DW_11_dst_slice_pitch_shift  0
+#define SDMA_PKT_COPY_T2T_DW_11_DST_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_T2T_DW_11_dst_slice_pitch_mask) << SDMA_PKT_COPY_T2T_DW_11_dst_slice_pitch_shift)
+
+/*define for DW_12 word*/
+/*define for dst_array_mode field*/
+#define SDMA_PKT_COPY_T2T_DW_12_dst_array_mode_offset 12
+#define SDMA_PKT_COPY_T2T_DW_12_dst_array_mode_mask   0x0000000F
+#define SDMA_PKT_COPY_T2T_DW_12_dst_array_mode_shift  3
+#define SDMA_PKT_COPY_T2T_DW_12_DST_ARRAY_MODE(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_array_mode_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_array_mode_shift)
+
+/*define for dst_mit_mode field*/
+#define SDMA_PKT_COPY_T2T_DW_12_dst_mit_mode_offset 12
+#define SDMA_PKT_COPY_T2T_DW_12_dst_mit_mode_mask   0x00000007
+#define SDMA_PKT_COPY_T2T_DW_12_dst_mit_mode_shift  8
+#define SDMA_PKT_COPY_T2T_DW_12_DST_MIT_MODE(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_mit_mode_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_mit_mode_shift)
+
+/*define for dst_tilesplit_size field*/
+#define SDMA_PKT_COPY_T2T_DW_12_dst_tilesplit_size_offset 12
+#define SDMA_PKT_COPY_T2T_DW_12_dst_tilesplit_size_mask   0x00000007
+#define SDMA_PKT_COPY_T2T_DW_12_dst_tilesplit_size_shift  11
+#define SDMA_PKT_COPY_T2T_DW_12_DST_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_tilesplit_size_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_tilesplit_size_shift)
+
+/*define for dst_bank_w field*/
+#define SDMA_PKT_COPY_T2T_DW_12_dst_bank_w_offset 12
+#define SDMA_PKT_COPY_T2T_DW_12_dst_bank_w_mask   0x00000003
+#define SDMA_PKT_COPY_T2T_DW_12_dst_bank_w_shift  15
+#define SDMA_PKT_COPY_T2T_DW_12_DST_BANK_W(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_bank_w_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_bank_w_shift)
+
+/*define for dst_bank_h field*/
+#define SDMA_PKT_COPY_T2T_DW_12_dst_bank_h_offset 12
+#define SDMA_PKT_COPY_T2T_DW_12_dst_bank_h_mask   0x00000003
+#define SDMA_PKT_COPY_T2T_DW_12_dst_bank_h_shift  18
+#define SDMA_PKT_COPY_T2T_DW_12_DST_BANK_H(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_bank_h_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_bank_h_shift)
+
+/*define for dst_num_bank field*/
+#define SDMA_PKT_COPY_T2T_DW_12_dst_num_bank_offset 12
+#define SDMA_PKT_COPY_T2T_DW_12_dst_num_bank_mask   0x00000003
+#define SDMA_PKT_COPY_T2T_DW_12_dst_num_bank_shift  21
+#define SDMA_PKT_COPY_T2T_DW_12_DST_NUM_BANK(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_num_bank_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_num_bank_shift)
+
+/*define for dst_mat_aspt field*/
+#define SDMA_PKT_COPY_T2T_DW_12_dst_mat_aspt_offset 12
+#define SDMA_PKT_COPY_T2T_DW_12_dst_mat_aspt_mask   0x00000003
+#define SDMA_PKT_COPY_T2T_DW_12_dst_mat_aspt_shift  24
+#define SDMA_PKT_COPY_T2T_DW_12_DST_MAT_ASPT(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_mat_aspt_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_mat_aspt_shift)
+
+/*define for dst_pipe_config field*/
+#define SDMA_PKT_COPY_T2T_DW_12_dst_pipe_config_offset 12
+#define SDMA_PKT_COPY_T2T_DW_12_dst_pipe_config_mask   0x0000001F
+#define SDMA_PKT_COPY_T2T_DW_12_dst_pipe_config_shift  26
+#define SDMA_PKT_COPY_T2T_DW_12_DST_PIPE_CONFIG(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_pipe_config_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_pipe_config_shift)
+
+/*define for DW_13 word*/
+/*define for rect_x field*/
+#define SDMA_PKT_COPY_T2T_DW_13_rect_x_offset 13
+#define SDMA_PKT_COPY_T2T_DW_13_rect_x_mask   0x00003FFF
+#define SDMA_PKT_COPY_T2T_DW_13_rect_x_shift  0
+#define SDMA_PKT_COPY_T2T_DW_13_RECT_X(x) (((x) & SDMA_PKT_COPY_T2T_DW_13_rect_x_mask) << SDMA_PKT_COPY_T2T_DW_13_rect_x_shift)
+
+/*define for rect_y field*/
+#define SDMA_PKT_COPY_T2T_DW_13_rect_y_offset 13
+#define SDMA_PKT_COPY_T2T_DW_13_rect_y_mask   0x00003FFF
+#define SDMA_PKT_COPY_T2T_DW_13_rect_y_shift  16
+#define SDMA_PKT_COPY_T2T_DW_13_RECT_Y(x) (((x) & SDMA_PKT_COPY_T2T_DW_13_rect_y_mask) << SDMA_PKT_COPY_T2T_DW_13_rect_y_shift)
+
+/*define for DW_14 word*/
+/*define for rect_z field*/
+#define SDMA_PKT_COPY_T2T_DW_14_rect_z_offset 14
+#define SDMA_PKT_COPY_T2T_DW_14_rect_z_mask   0x000007FF
+#define SDMA_PKT_COPY_T2T_DW_14_rect_z_shift  0
+#define SDMA_PKT_COPY_T2T_DW_14_RECT_Z(x) (((x) & SDMA_PKT_COPY_T2T_DW_14_rect_z_mask) << SDMA_PKT_COPY_T2T_DW_14_rect_z_shift)
+
+/*define for dst_sw field*/
+#define SDMA_PKT_COPY_T2T_DW_14_dst_sw_offset 14
+#define SDMA_PKT_COPY_T2T_DW_14_dst_sw_mask   0x00000003
+#define SDMA_PKT_COPY_T2T_DW_14_dst_sw_shift  16
+#define SDMA_PKT_COPY_T2T_DW_14_DST_SW(x) (((x) & SDMA_PKT_COPY_T2T_DW_14_dst_sw_mask) << SDMA_PKT_COPY_T2T_DW_14_dst_sw_shift)
+
+/*define for src_sw field*/
+#define SDMA_PKT_COPY_T2T_DW_14_src_sw_offset 14
+#define SDMA_PKT_COPY_T2T_DW_14_src_sw_mask   0x00000003
+#define SDMA_PKT_COPY_T2T_DW_14_src_sw_shift  24
+#define SDMA_PKT_COPY_T2T_DW_14_SRC_SW(x) (((x) & SDMA_PKT_COPY_T2T_DW_14_src_sw_mask) << SDMA_PKT_COPY_T2T_DW_14_src_sw_shift)
+
+
+/*
+** Definitions for SDMA_PKT_COPY_TILED_SUBWIN packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_op_offset 0
+#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_op_shift  0
+#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_OP(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_HEADER_op_mask) << SDMA_PKT_COPY_TILED_SUBWIN_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_sub_op_offset 0
+#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_sub_op_shift  8
+#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_HEADER_sub_op_mask) << SDMA_PKT_COPY_TILED_SUBWIN_HEADER_sub_op_shift)
+
+/*define for detile field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_detile_offset 0
+#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_detile_mask   0x00000001
+#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_detile_shift  31
+#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_DETILE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_HEADER_detile_mask) << SDMA_PKT_COPY_TILED_SUBWIN_HEADER_detile_shift)
+
+/*define for TILED_ADDR_LO word*/
+/*define for tiled_addr_31_0 field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_tiled_addr_31_0_offset 1
+#define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_tiled_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_tiled_addr_31_0_shift  0
+#define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_TILED_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_tiled_addr_31_0_mask) << SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_tiled_addr_31_0_shift)
+
+/*define for TILED_ADDR_HI word*/
+/*define for tiled_addr_63_32 field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_tiled_addr_63_32_offset 2
+#define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_tiled_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_tiled_addr_63_32_shift  0
+#define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_TILED_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_tiled_addr_63_32_mask) << SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_tiled_addr_63_32_shift)
+
+/*define for DW_3 word*/
+/*define for tiled_x field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_x_offset 3
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_x_mask   0x00003FFF
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_x_shift  0
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_TILED_X(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_x_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_x_shift)
+
+/*define for tiled_y field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_y_offset 3
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_y_mask   0x00003FFF
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_y_shift  16
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_TILED_Y(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_y_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_y_shift)
+
+/*define for DW_4 word*/
+/*define for tiled_z field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_tiled_z_offset 4
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_tiled_z_mask   0x000007FF
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_tiled_z_shift  0
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_TILED_Z(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_4_tiled_z_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_4_tiled_z_shift)
+
+/*define for pitch_in_tile field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_pitch_in_tile_offset 4
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_pitch_in_tile_mask   0x00000FFF
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_pitch_in_tile_shift  16
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_PITCH_IN_TILE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_4_pitch_in_tile_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_4_pitch_in_tile_shift)
+
+/*define for DW_5 word*/
+/*define for slice_pitch field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_5_slice_pitch_offset 5
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_5_slice_pitch_mask   0x003FFFFF
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_5_slice_pitch_shift  0
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_5_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_5_slice_pitch_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_5_slice_pitch_shift)
+
+/*define for DW_6 word*/
+/*define for element_size field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_element_size_offset 6
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_element_size_mask   0x00000007
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_element_size_shift  0
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_ELEMENT_SIZE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_element_size_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_element_size_shift)
+
+/*define for array_mode field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_array_mode_offset 6
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_array_mode_mask   0x0000000F
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_array_mode_shift  3
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_ARRAY_MODE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_array_mode_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_array_mode_shift)
+
+/*define for mit_mode field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mit_mode_offset 6
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mit_mode_mask   0x00000007
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mit_mode_shift  8
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_MIT_MODE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mit_mode_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mit_mode_shift)
+
+/*define for tilesplit_size field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_tilesplit_size_offset 6
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_tilesplit_size_mask   0x00000007
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_tilesplit_size_shift  11
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_tilesplit_size_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_tilesplit_size_shift)
+
+/*define for bank_w field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_w_offset 6
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_w_mask   0x00000003
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_w_shift  15
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_BANK_W(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_w_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_w_shift)
+
+/*define for bank_h field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_h_offset 6
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_h_mask   0x00000003
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_h_shift  18
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_BANK_H(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_h_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_h_shift)
+
+/*define for num_bank field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_num_bank_offset 6
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_num_bank_mask   0x00000003
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_num_bank_shift  21
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_NUM_BANK(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_num_bank_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_num_bank_shift)
+
+/*define for mat_aspt field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mat_aspt_offset 6
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mat_aspt_mask   0x00000003
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mat_aspt_shift  24
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_MAT_ASPT(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mat_aspt_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mat_aspt_shift)
+
+/*define for pipe_config field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_pipe_config_offset 6
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_pipe_config_mask   0x0000001F
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_pipe_config_shift  26
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_PIPE_CONFIG(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_pipe_config_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_pipe_config_shift)
+
+/*define for LINEAR_ADDR_LO word*/
+/*define for linear_addr_31_0 field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_linear_addr_31_0_offset 7
+#define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_linear_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_linear_addr_31_0_shift  0
+#define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_LINEAR_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_linear_addr_31_0_mask) << SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_linear_addr_31_0_shift)
+
+/*define for LINEAR_ADDR_HI word*/
+/*define for linear_addr_63_32 field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_linear_addr_63_32_offset 8
+#define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_linear_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_linear_addr_63_32_shift  0
+#define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_LINEAR_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_linear_addr_63_32_mask) << SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_linear_addr_63_32_shift)
+
+/*define for DW_9 word*/
+/*define for linear_x field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_x_offset 9
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_x_mask   0x00003FFF
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_x_shift  0
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_LINEAR_X(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_x_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_x_shift)
+
+/*define for linear_y field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_y_offset 9
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_y_mask   0x00003FFF
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_y_shift  16
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_LINEAR_Y(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_y_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_y_shift)
+
+/*define for DW_10 word*/
+/*define for linear_z field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_z_offset 10
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_z_mask   0x000007FF
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_z_shift  0
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_LINEAR_Z(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_z_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_z_shift)
+
+/*define for linear_pitch field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_pitch_offset 10
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_pitch_mask   0x00003FFF
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_pitch_shift  16
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_LINEAR_PITCH(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_pitch_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_pitch_shift)
+
+/*define for DW_11 word*/
+/*define for linear_slice_pitch field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_11_linear_slice_pitch_offset 11
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_11_linear_slice_pitch_mask   0x0FFFFFFF
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_11_linear_slice_pitch_shift  0
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_11_LINEAR_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_11_linear_slice_pitch_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_11_linear_slice_pitch_shift)
+
+/*define for DW_12 word*/
+/*define for rect_x field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_x_offset 12
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_x_mask   0x00003FFF
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_x_shift  0
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_RECT_X(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_x_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_x_shift)
+
+/*define for rect_y field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_y_offset 12
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_y_mask   0x00003FFF
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_y_shift  16
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_RECT_Y(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_y_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_y_shift)
+
+/*define for DW_13 word*/
+/*define for rect_z field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_rect_z_offset 13
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_rect_z_mask   0x000007FF
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_rect_z_shift  0
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_RECT_Z(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_13_rect_z_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_13_rect_z_shift)
+
+/*define for linear_sw field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_linear_sw_offset 13
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_linear_sw_mask   0x00000003
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_linear_sw_shift  16
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_LINEAR_SW(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_13_linear_sw_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_13_linear_sw_shift)
+
+/*define for tile_sw field*/
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_tile_sw_offset 13
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_tile_sw_mask   0x00000003
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_tile_sw_shift  24
+#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_TILE_SW(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_13_tile_sw_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_13_tile_sw_shift)
+
+
+/*
+** Definitions for SDMA_PKT_COPY_STRUCT packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_COPY_STRUCT_HEADER_op_offset 0
+#define SDMA_PKT_COPY_STRUCT_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_COPY_STRUCT_HEADER_op_shift  0
+#define SDMA_PKT_COPY_STRUCT_HEADER_OP(x) (((x) & SDMA_PKT_COPY_STRUCT_HEADER_op_mask) << SDMA_PKT_COPY_STRUCT_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_COPY_STRUCT_HEADER_sub_op_offset 0
+#define SDMA_PKT_COPY_STRUCT_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_COPY_STRUCT_HEADER_sub_op_shift  8
+#define SDMA_PKT_COPY_STRUCT_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_STRUCT_HEADER_sub_op_mask) << SDMA_PKT_COPY_STRUCT_HEADER_sub_op_shift)
+
+/*define for detile field*/
+#define SDMA_PKT_COPY_STRUCT_HEADER_detile_offset 0
+#define SDMA_PKT_COPY_STRUCT_HEADER_detile_mask   0x00000001
+#define SDMA_PKT_COPY_STRUCT_HEADER_detile_shift  31
+#define SDMA_PKT_COPY_STRUCT_HEADER_DETILE(x) (((x) & SDMA_PKT_COPY_STRUCT_HEADER_detile_mask) << SDMA_PKT_COPY_STRUCT_HEADER_detile_shift)
+
+/*define for SB_ADDR_LO word*/
+/*define for sb_addr_31_0 field*/
+#define SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_sb_addr_31_0_offset 1
+#define SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_sb_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_sb_addr_31_0_shift  0
+#define SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_SB_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_sb_addr_31_0_mask) << SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_sb_addr_31_0_shift)
+
+/*define for SB_ADDR_HI word*/
+/*define for sb_addr_63_32 field*/
+#define SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_sb_addr_63_32_offset 2
+#define SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_sb_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_sb_addr_63_32_shift  0
+#define SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_SB_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_sb_addr_63_32_mask) << SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_sb_addr_63_32_shift)
+
+/*define for START_INDEX word*/
+/*define for start_index field*/
+#define SDMA_PKT_COPY_STRUCT_START_INDEX_start_index_offset 3
+#define SDMA_PKT_COPY_STRUCT_START_INDEX_start_index_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_STRUCT_START_INDEX_start_index_shift  0
+#define SDMA_PKT_COPY_STRUCT_START_INDEX_START_INDEX(x) (((x) & SDMA_PKT_COPY_STRUCT_START_INDEX_start_index_mask) << SDMA_PKT_COPY_STRUCT_START_INDEX_start_index_shift)
+
+/*define for COUNT word*/
+/*define for count field*/
+#define SDMA_PKT_COPY_STRUCT_COUNT_count_offset 4
+#define SDMA_PKT_COPY_STRUCT_COUNT_count_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_STRUCT_COUNT_count_shift  0
+#define SDMA_PKT_COPY_STRUCT_COUNT_COUNT(x) (((x) & SDMA_PKT_COPY_STRUCT_COUNT_count_mask) << SDMA_PKT_COPY_STRUCT_COUNT_count_shift)
+
+/*define for DW_5 word*/
+/*define for stride field*/
+#define SDMA_PKT_COPY_STRUCT_DW_5_stride_offset 5
+#define SDMA_PKT_COPY_STRUCT_DW_5_stride_mask   0x000007FF
+#define SDMA_PKT_COPY_STRUCT_DW_5_stride_shift  0
+#define SDMA_PKT_COPY_STRUCT_DW_5_STRIDE(x) (((x) & SDMA_PKT_COPY_STRUCT_DW_5_stride_mask) << SDMA_PKT_COPY_STRUCT_DW_5_stride_shift)
+
+/*define for struct_sw field*/
+#define SDMA_PKT_COPY_STRUCT_DW_5_struct_sw_offset 5
+#define SDMA_PKT_COPY_STRUCT_DW_5_struct_sw_mask   0x00000003
+#define SDMA_PKT_COPY_STRUCT_DW_5_struct_sw_shift  16
+#define SDMA_PKT_COPY_STRUCT_DW_5_STRUCT_SW(x) (((x) & SDMA_PKT_COPY_STRUCT_DW_5_struct_sw_mask) << SDMA_PKT_COPY_STRUCT_DW_5_struct_sw_shift)
+
+/*define for struct_ha field*/
+#define SDMA_PKT_COPY_STRUCT_DW_5_struct_ha_offset 5
+#define SDMA_PKT_COPY_STRUCT_DW_5_struct_ha_mask   0x00000001
+#define SDMA_PKT_COPY_STRUCT_DW_5_struct_ha_shift  22
+#define SDMA_PKT_COPY_STRUCT_DW_5_STRUCT_HA(x) (((x) & SDMA_PKT_COPY_STRUCT_DW_5_struct_ha_mask) << SDMA_PKT_COPY_STRUCT_DW_5_struct_ha_shift)
+
+/*define for linear_sw field*/
+#define SDMA_PKT_COPY_STRUCT_DW_5_linear_sw_offset 5
+#define SDMA_PKT_COPY_STRUCT_DW_5_linear_sw_mask   0x00000003
+#define SDMA_PKT_COPY_STRUCT_DW_5_linear_sw_shift  24
+#define SDMA_PKT_COPY_STRUCT_DW_5_LINEAR_SW(x) (((x) & SDMA_PKT_COPY_STRUCT_DW_5_linear_sw_mask) << SDMA_PKT_COPY_STRUCT_DW_5_linear_sw_shift)
+
+/*define for linear_ha field*/
+#define SDMA_PKT_COPY_STRUCT_DW_5_linear_ha_offset 5
+#define SDMA_PKT_COPY_STRUCT_DW_5_linear_ha_mask   0x00000001
+#define SDMA_PKT_COPY_STRUCT_DW_5_linear_ha_shift  30
+#define SDMA_PKT_COPY_STRUCT_DW_5_LINEAR_HA(x) (((x) & SDMA_PKT_COPY_STRUCT_DW_5_linear_ha_mask) << SDMA_PKT_COPY_STRUCT_DW_5_linear_ha_shift)
+
+/*define for LINEAR_ADDR_LO word*/
+/*define for linear_addr_31_0 field*/
+#define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_linear_addr_31_0_offset 6
+#define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_linear_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_linear_addr_31_0_shift  0
+#define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_LINEAR_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_linear_addr_31_0_mask) << SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_linear_addr_31_0_shift)
+
+/*define for LINEAR_ADDR_HI word*/
+/*define for linear_addr_63_32 field*/
+#define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_linear_addr_63_32_offset 7
+#define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_linear_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_linear_addr_63_32_shift  0
+#define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_LINEAR_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_linear_addr_63_32_mask) << SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_linear_addr_63_32_shift)
+
+
+/*
+** Definitions for SDMA_PKT_WRITE_UNTILED packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_WRITE_UNTILED_HEADER_op_offset 0
+#define SDMA_PKT_WRITE_UNTILED_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_WRITE_UNTILED_HEADER_op_shift  0
+#define SDMA_PKT_WRITE_UNTILED_HEADER_OP(x) (((x) & SDMA_PKT_WRITE_UNTILED_HEADER_op_mask) << SDMA_PKT_WRITE_UNTILED_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_WRITE_UNTILED_HEADER_sub_op_offset 0
+#define SDMA_PKT_WRITE_UNTILED_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_WRITE_UNTILED_HEADER_sub_op_shift  8
+#define SDMA_PKT_WRITE_UNTILED_HEADER_SUB_OP(x) (((x) & SDMA_PKT_WRITE_UNTILED_HEADER_sub_op_mask) << SDMA_PKT_WRITE_UNTILED_HEADER_sub_op_shift)
+
+/*define for DST_ADDR_LO word*/
+/*define for dst_addr_31_0 field*/
+#define SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_dst_addr_31_0_offset 1
+#define SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_dst_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_dst_addr_31_0_shift  0
+#define SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_dst_addr_31_0_shift)
+
+/*define for DST_ADDR_HI word*/
+/*define for dst_addr_63_32 field*/
+#define SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_dst_addr_63_32_offset 2
+#define SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_dst_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_dst_addr_63_32_shift  0
+#define SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_dst_addr_63_32_shift)
+
+/*define for DW_3 word*/
+/*define for count field*/
+#define SDMA_PKT_WRITE_UNTILED_DW_3_count_offset 3
+#define SDMA_PKT_WRITE_UNTILED_DW_3_count_mask   0x003FFFFF
+#define SDMA_PKT_WRITE_UNTILED_DW_3_count_shift  0
+#define SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(x) (((x) & SDMA_PKT_WRITE_UNTILED_DW_3_count_mask) << SDMA_PKT_WRITE_UNTILED_DW_3_count_shift)
+
+/*define for sw field*/
+#define SDMA_PKT_WRITE_UNTILED_DW_3_sw_offset 3
+#define SDMA_PKT_WRITE_UNTILED_DW_3_sw_mask   0x00000003
+#define SDMA_PKT_WRITE_UNTILED_DW_3_sw_shift  24
+#define SDMA_PKT_WRITE_UNTILED_DW_3_SW(x) (((x) & SDMA_PKT_WRITE_UNTILED_DW_3_sw_mask) << SDMA_PKT_WRITE_UNTILED_DW_3_sw_shift)
+
+/*define for DATA0 word*/
+/*define for data0 field*/
+#define SDMA_PKT_WRITE_UNTILED_DATA0_data0_offset 4
+#define SDMA_PKT_WRITE_UNTILED_DATA0_data0_mask   0xFFFFFFFF
+#define SDMA_PKT_WRITE_UNTILED_DATA0_data0_shift  0
+#define SDMA_PKT_WRITE_UNTILED_DATA0_DATA0(x) (((x) & SDMA_PKT_WRITE_UNTILED_DATA0_data0_mask) << SDMA_PKT_WRITE_UNTILED_DATA0_data0_shift)
+
+
+/*
+** Definitions for SDMA_PKT_WRITE_TILED packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_WRITE_TILED_HEADER_op_offset 0
+#define SDMA_PKT_WRITE_TILED_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_WRITE_TILED_HEADER_op_shift  0
+#define SDMA_PKT_WRITE_TILED_HEADER_OP(x) (((x) & SDMA_PKT_WRITE_TILED_HEADER_op_mask) << SDMA_PKT_WRITE_TILED_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_WRITE_TILED_HEADER_sub_op_offset 0
+#define SDMA_PKT_WRITE_TILED_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_WRITE_TILED_HEADER_sub_op_shift  8
+#define SDMA_PKT_WRITE_TILED_HEADER_SUB_OP(x) (((x) & SDMA_PKT_WRITE_TILED_HEADER_sub_op_mask) << SDMA_PKT_WRITE_TILED_HEADER_sub_op_shift)
+
+/*define for DST_ADDR_LO word*/
+/*define for dst_addr_31_0 field*/
+#define SDMA_PKT_WRITE_TILED_DST_ADDR_LO_dst_addr_31_0_offset 1
+#define SDMA_PKT_WRITE_TILED_DST_ADDR_LO_dst_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_WRITE_TILED_DST_ADDR_LO_dst_addr_31_0_shift  0
+#define SDMA_PKT_WRITE_TILED_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_WRITE_TILED_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_WRITE_TILED_DST_ADDR_LO_dst_addr_31_0_shift)
+
+/*define for DST_ADDR_HI word*/
+/*define for dst_addr_63_32 field*/
+#define SDMA_PKT_WRITE_TILED_DST_ADDR_HI_dst_addr_63_32_offset 2
+#define SDMA_PKT_WRITE_TILED_DST_ADDR_HI_dst_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_WRITE_TILED_DST_ADDR_HI_dst_addr_63_32_shift  0
+#define SDMA_PKT_WRITE_TILED_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_WRITE_TILED_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_WRITE_TILED_DST_ADDR_HI_dst_addr_63_32_shift)
+
+/*define for DW_3 word*/
+/*define for pitch_in_tile field*/
+#define SDMA_PKT_WRITE_TILED_DW_3_pitch_in_tile_offset 3
+#define SDMA_PKT_WRITE_TILED_DW_3_pitch_in_tile_mask   0x000007FF
+#define SDMA_PKT_WRITE_TILED_DW_3_pitch_in_tile_shift  0
+#define SDMA_PKT_WRITE_TILED_DW_3_PITCH_IN_TILE(x) (((x) & SDMA_PKT_WRITE_TILED_DW_3_pitch_in_tile_mask) << SDMA_PKT_WRITE_TILED_DW_3_pitch_in_tile_shift)
+
+/*define for height field*/
+#define SDMA_PKT_WRITE_TILED_DW_3_height_offset 3
+#define SDMA_PKT_WRITE_TILED_DW_3_height_mask   0x00003FFF
+#define SDMA_PKT_WRITE_TILED_DW_3_height_shift  16
+#define SDMA_PKT_WRITE_TILED_DW_3_HEIGHT(x) (((x) & SDMA_PKT_WRITE_TILED_DW_3_height_mask) << SDMA_PKT_WRITE_TILED_DW_3_height_shift)
+
+/*define for DW_4 word*/
+/*define for slice_pitch field*/
+#define SDMA_PKT_WRITE_TILED_DW_4_slice_pitch_offset 4
+#define SDMA_PKT_WRITE_TILED_DW_4_slice_pitch_mask   0x003FFFFF
+#define SDMA_PKT_WRITE_TILED_DW_4_slice_pitch_shift  0
+#define SDMA_PKT_WRITE_TILED_DW_4_SLICE_PITCH(x) (((x) & SDMA_PKT_WRITE_TILED_DW_4_slice_pitch_mask) << SDMA_PKT_WRITE_TILED_DW_4_slice_pitch_shift)
+
+/*define for DW_5 word*/
+/*define for element_size field*/
+#define SDMA_PKT_WRITE_TILED_DW_5_element_size_offset 5
+#define SDMA_PKT_WRITE_TILED_DW_5_element_size_mask   0x00000007
+#define SDMA_PKT_WRITE_TILED_DW_5_element_size_shift  0
+#define SDMA_PKT_WRITE_TILED_DW_5_ELEMENT_SIZE(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_element_size_mask) << SDMA_PKT_WRITE_TILED_DW_5_element_size_shift)
+
+/*define for array_mode field*/
+#define SDMA_PKT_WRITE_TILED_DW_5_array_mode_offset 5
+#define SDMA_PKT_WRITE_TILED_DW_5_array_mode_mask   0x0000000F
+#define SDMA_PKT_WRITE_TILED_DW_5_array_mode_shift  3
+#define SDMA_PKT_WRITE_TILED_DW_5_ARRAY_MODE(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_array_mode_mask) << SDMA_PKT_WRITE_TILED_DW_5_array_mode_shift)
+
+/*define for mit_mode field*/
+#define SDMA_PKT_WRITE_TILED_DW_5_mit_mode_offset 5
+#define SDMA_PKT_WRITE_TILED_DW_5_mit_mode_mask   0x00000007
+#define SDMA_PKT_WRITE_TILED_DW_5_mit_mode_shift  8
+#define SDMA_PKT_WRITE_TILED_DW_5_MIT_MODE(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_mit_mode_mask) << SDMA_PKT_WRITE_TILED_DW_5_mit_mode_shift)
+
+/*define for tilesplit_size field*/
+#define SDMA_PKT_WRITE_TILED_DW_5_tilesplit_size_offset 5
+#define SDMA_PKT_WRITE_TILED_DW_5_tilesplit_size_mask   0x00000007
+#define SDMA_PKT_WRITE_TILED_DW_5_tilesplit_size_shift  11
+#define SDMA_PKT_WRITE_TILED_DW_5_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_tilesplit_size_mask) << SDMA_PKT_WRITE_TILED_DW_5_tilesplit_size_shift)
+
+/*define for bank_w field*/
+#define SDMA_PKT_WRITE_TILED_DW_5_bank_w_offset 5
+#define SDMA_PKT_WRITE_TILED_DW_5_bank_w_mask   0x00000003
+#define SDMA_PKT_WRITE_TILED_DW_5_bank_w_shift  15
+#define SDMA_PKT_WRITE_TILED_DW_5_BANK_W(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_bank_w_mask) << SDMA_PKT_WRITE_TILED_DW_5_bank_w_shift)
+
+/*define for bank_h field*/
+#define SDMA_PKT_WRITE_TILED_DW_5_bank_h_offset 5
+#define SDMA_PKT_WRITE_TILED_DW_5_bank_h_mask   0x00000003
+#define SDMA_PKT_WRITE_TILED_DW_5_bank_h_shift  18
+#define SDMA_PKT_WRITE_TILED_DW_5_BANK_H(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_bank_h_mask) << SDMA_PKT_WRITE_TILED_DW_5_bank_h_shift)
+
+/*define for num_bank field*/
+#define SDMA_PKT_WRITE_TILED_DW_5_num_bank_offset 5
+#define SDMA_PKT_WRITE_TILED_DW_5_num_bank_mask   0x00000003
+#define SDMA_PKT_WRITE_TILED_DW_5_num_bank_shift  21
+#define SDMA_PKT_WRITE_TILED_DW_5_NUM_BANK(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_num_bank_mask) << SDMA_PKT_WRITE_TILED_DW_5_num_bank_shift)
+
+/*define for mat_aspt field*/
+#define SDMA_PKT_WRITE_TILED_DW_5_mat_aspt_offset 5
+#define SDMA_PKT_WRITE_TILED_DW_5_mat_aspt_mask   0x00000003
+#define SDMA_PKT_WRITE_TILED_DW_5_mat_aspt_shift  24
+#define SDMA_PKT_WRITE_TILED_DW_5_MAT_ASPT(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_mat_aspt_mask) << SDMA_PKT_WRITE_TILED_DW_5_mat_aspt_shift)
+
+/*define for pipe_config field*/
+#define SDMA_PKT_WRITE_TILED_DW_5_pipe_config_offset 5
+#define SDMA_PKT_WRITE_TILED_DW_5_pipe_config_mask   0x0000001F
+#define SDMA_PKT_WRITE_TILED_DW_5_pipe_config_shift  26
+#define SDMA_PKT_WRITE_TILED_DW_5_PIPE_CONFIG(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_pipe_config_mask) << SDMA_PKT_WRITE_TILED_DW_5_pipe_config_shift)
+
+/*define for DW_6 word*/
+/*define for x field*/
+#define SDMA_PKT_WRITE_TILED_DW_6_x_offset 6
+#define SDMA_PKT_WRITE_TILED_DW_6_x_mask   0x00003FFF
+#define SDMA_PKT_WRITE_TILED_DW_6_x_shift  0
+#define SDMA_PKT_WRITE_TILED_DW_6_X(x) (((x) & SDMA_PKT_WRITE_TILED_DW_6_x_mask) << SDMA_PKT_WRITE_TILED_DW_6_x_shift)
+
+/*define for y field*/
+#define SDMA_PKT_WRITE_TILED_DW_6_y_offset 6
+#define SDMA_PKT_WRITE_TILED_DW_6_y_mask   0x00003FFF
+#define SDMA_PKT_WRITE_TILED_DW_6_y_shift  16
+#define SDMA_PKT_WRITE_TILED_DW_6_Y(x) (((x) & SDMA_PKT_WRITE_TILED_DW_6_y_mask) << SDMA_PKT_WRITE_TILED_DW_6_y_shift)
+
+/*define for DW_7 word*/
+/*define for z field*/
+#define SDMA_PKT_WRITE_TILED_DW_7_z_offset 7
+#define SDMA_PKT_WRITE_TILED_DW_7_z_mask   0x00000FFF
+#define SDMA_PKT_WRITE_TILED_DW_7_z_shift  0
+#define SDMA_PKT_WRITE_TILED_DW_7_Z(x) (((x) & SDMA_PKT_WRITE_TILED_DW_7_z_mask) << SDMA_PKT_WRITE_TILED_DW_7_z_shift)
+
+/*define for sw field*/
+#define SDMA_PKT_WRITE_TILED_DW_7_sw_offset 7
+#define SDMA_PKT_WRITE_TILED_DW_7_sw_mask   0x00000003
+#define SDMA_PKT_WRITE_TILED_DW_7_sw_shift  24
+#define SDMA_PKT_WRITE_TILED_DW_7_SW(x) (((x) & SDMA_PKT_WRITE_TILED_DW_7_sw_mask) << SDMA_PKT_WRITE_TILED_DW_7_sw_shift)
+
+/*define for COUNT word*/
+/*define for count field*/
+#define SDMA_PKT_WRITE_TILED_COUNT_count_offset 8
+#define SDMA_PKT_WRITE_TILED_COUNT_count_mask   0x003FFFFF
+#define SDMA_PKT_WRITE_TILED_COUNT_count_shift  0
+#define SDMA_PKT_WRITE_TILED_COUNT_COUNT(x) (((x) & SDMA_PKT_WRITE_TILED_COUNT_count_mask) << SDMA_PKT_WRITE_TILED_COUNT_count_shift)
+
+/*define for DATA0 word*/
+/*define for data0 field*/
+#define SDMA_PKT_WRITE_TILED_DATA0_data0_offset 9
+#define SDMA_PKT_WRITE_TILED_DATA0_data0_mask   0xFFFFFFFF
+#define SDMA_PKT_WRITE_TILED_DATA0_data0_shift  0
+#define SDMA_PKT_WRITE_TILED_DATA0_DATA0(x) (((x) & SDMA_PKT_WRITE_TILED_DATA0_data0_mask) << SDMA_PKT_WRITE_TILED_DATA0_data0_shift)
+
+
+/*
+** Definitions for SDMA_PKT_WRITE_INCR packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_WRITE_INCR_HEADER_op_offset 0
+#define SDMA_PKT_WRITE_INCR_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_WRITE_INCR_HEADER_op_shift  0
+#define SDMA_PKT_WRITE_INCR_HEADER_OP(x) (((x) & SDMA_PKT_WRITE_INCR_HEADER_op_mask) << SDMA_PKT_WRITE_INCR_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_WRITE_INCR_HEADER_sub_op_offset 0
+#define SDMA_PKT_WRITE_INCR_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_WRITE_INCR_HEADER_sub_op_shift  8
+#define SDMA_PKT_WRITE_INCR_HEADER_SUB_OP(x) (((x) & SDMA_PKT_WRITE_INCR_HEADER_sub_op_mask) << SDMA_PKT_WRITE_INCR_HEADER_sub_op_shift)
+
+/*define for DST_ADDR_LO word*/
+/*define for dst_addr_31_0 field*/
+#define SDMA_PKT_WRITE_INCR_DST_ADDR_LO_dst_addr_31_0_offset 1
+#define SDMA_PKT_WRITE_INCR_DST_ADDR_LO_dst_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_WRITE_INCR_DST_ADDR_LO_dst_addr_31_0_shift  0
+#define SDMA_PKT_WRITE_INCR_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_WRITE_INCR_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_WRITE_INCR_DST_ADDR_LO_dst_addr_31_0_shift)
+
+/*define for DST_ADDR_HI word*/
+/*define for dst_addr_63_32 field*/
+#define SDMA_PKT_WRITE_INCR_DST_ADDR_HI_dst_addr_63_32_offset 2
+#define SDMA_PKT_WRITE_INCR_DST_ADDR_HI_dst_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_WRITE_INCR_DST_ADDR_HI_dst_addr_63_32_shift  0
+#define SDMA_PKT_WRITE_INCR_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_WRITE_INCR_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_WRITE_INCR_DST_ADDR_HI_dst_addr_63_32_shift)
+
+/*define for MASK_DW0 word*/
+/*define for mask_dw0 field*/
+#define SDMA_PKT_WRITE_INCR_MASK_DW0_mask_dw0_offset 3
+#define SDMA_PKT_WRITE_INCR_MASK_DW0_mask_dw0_mask   0xFFFFFFFF
+#define SDMA_PKT_WRITE_INCR_MASK_DW0_mask_dw0_shift  0
+#define SDMA_PKT_WRITE_INCR_MASK_DW0_MASK_DW0(x) (((x) & SDMA_PKT_WRITE_INCR_MASK_DW0_mask_dw0_mask) << SDMA_PKT_WRITE_INCR_MASK_DW0_mask_dw0_shift)
+
+/*define for MASK_DW1 word*/
+/*define for mask_dw1 field*/
+#define SDMA_PKT_WRITE_INCR_MASK_DW1_mask_dw1_offset 4
+#define SDMA_PKT_WRITE_INCR_MASK_DW1_mask_dw1_mask   0xFFFFFFFF
+#define SDMA_PKT_WRITE_INCR_MASK_DW1_mask_dw1_shift  0
+#define SDMA_PKT_WRITE_INCR_MASK_DW1_MASK_DW1(x) (((x) & SDMA_PKT_WRITE_INCR_MASK_DW1_mask_dw1_mask) << SDMA_PKT_WRITE_INCR_MASK_DW1_mask_dw1_shift)
+
+/*define for INIT_DW0 word*/
+/*define for init_dw0 field*/
+#define SDMA_PKT_WRITE_INCR_INIT_DW0_init_dw0_offset 5
+#define SDMA_PKT_WRITE_INCR_INIT_DW0_init_dw0_mask   0xFFFFFFFF
+#define SDMA_PKT_WRITE_INCR_INIT_DW0_init_dw0_shift  0
+#define SDMA_PKT_WRITE_INCR_INIT_DW0_INIT_DW0(x) (((x) & SDMA_PKT_WRITE_INCR_INIT_DW0_init_dw0_mask) << SDMA_PKT_WRITE_INCR_INIT_DW0_init_dw0_shift)
+
+/*define for INIT_DW1 word*/
+/*define for init_dw1 field*/
+#define SDMA_PKT_WRITE_INCR_INIT_DW1_init_dw1_offset 6
+#define SDMA_PKT_WRITE_INCR_INIT_DW1_init_dw1_mask   0xFFFFFFFF
+#define SDMA_PKT_WRITE_INCR_INIT_DW1_init_dw1_shift  0
+#define SDMA_PKT_WRITE_INCR_INIT_DW1_INIT_DW1(x) (((x) & SDMA_PKT_WRITE_INCR_INIT_DW1_init_dw1_mask) << SDMA_PKT_WRITE_INCR_INIT_DW1_init_dw1_shift)
+
+/*define for INCR_DW0 word*/
+/*define for incr_dw0 field*/
+#define SDMA_PKT_WRITE_INCR_INCR_DW0_incr_dw0_offset 7
+#define SDMA_PKT_WRITE_INCR_INCR_DW0_incr_dw0_mask   0xFFFFFFFF
+#define SDMA_PKT_WRITE_INCR_INCR_DW0_incr_dw0_shift  0
+#define SDMA_PKT_WRITE_INCR_INCR_DW0_INCR_DW0(x) (((x) & SDMA_PKT_WRITE_INCR_INCR_DW0_incr_dw0_mask) << SDMA_PKT_WRITE_INCR_INCR_DW0_incr_dw0_shift)
+
+/*define for INCR_DW1 word*/
+/*define for incr_dw1 field*/
+#define SDMA_PKT_WRITE_INCR_INCR_DW1_incr_dw1_offset 8
+#define SDMA_PKT_WRITE_INCR_INCR_DW1_incr_dw1_mask   0xFFFFFFFF
+#define SDMA_PKT_WRITE_INCR_INCR_DW1_incr_dw1_shift  0
+#define SDMA_PKT_WRITE_INCR_INCR_DW1_INCR_DW1(x) (((x) & SDMA_PKT_WRITE_INCR_INCR_DW1_incr_dw1_mask) << SDMA_PKT_WRITE_INCR_INCR_DW1_incr_dw1_shift)
+
+/*define for COUNT word*/
+/*define for count field*/
+#define SDMA_PKT_WRITE_INCR_COUNT_count_offset 9
+#define SDMA_PKT_WRITE_INCR_COUNT_count_mask   0x0007FFFF
+#define SDMA_PKT_WRITE_INCR_COUNT_count_shift  0
+#define SDMA_PKT_WRITE_INCR_COUNT_COUNT(x) (((x) & SDMA_PKT_WRITE_INCR_COUNT_count_mask) << SDMA_PKT_WRITE_INCR_COUNT_count_shift)
+
+
+/*
+** Definitions for SDMA_PKT_INDIRECT packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_INDIRECT_HEADER_op_offset 0
+#define SDMA_PKT_INDIRECT_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_INDIRECT_HEADER_op_shift  0
+#define SDMA_PKT_INDIRECT_HEADER_OP(x) (((x) & SDMA_PKT_INDIRECT_HEADER_op_mask) << SDMA_PKT_INDIRECT_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_INDIRECT_HEADER_sub_op_offset 0
+#define SDMA_PKT_INDIRECT_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_INDIRECT_HEADER_sub_op_shift  8
+#define SDMA_PKT_INDIRECT_HEADER_SUB_OP(x) (((x) & SDMA_PKT_INDIRECT_HEADER_sub_op_mask) << SDMA_PKT_INDIRECT_HEADER_sub_op_shift)
+
+/*define for vmid field*/
+#define SDMA_PKT_INDIRECT_HEADER_vmid_offset 0
+#define SDMA_PKT_INDIRECT_HEADER_vmid_mask   0x0000000F
+#define SDMA_PKT_INDIRECT_HEADER_vmid_shift  16
+#define SDMA_PKT_INDIRECT_HEADER_VMID(x) (((x) & SDMA_PKT_INDIRECT_HEADER_vmid_mask) << SDMA_PKT_INDIRECT_HEADER_vmid_shift)
+
+/*define for BASE_LO word*/
+/*define for ib_base_31_0 field*/
+#define SDMA_PKT_INDIRECT_BASE_LO_ib_base_31_0_offset 1
+#define SDMA_PKT_INDIRECT_BASE_LO_ib_base_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_INDIRECT_BASE_LO_ib_base_31_0_shift  0
+#define SDMA_PKT_INDIRECT_BASE_LO_IB_BASE_31_0(x) (((x) & SDMA_PKT_INDIRECT_BASE_LO_ib_base_31_0_mask) << SDMA_PKT_INDIRECT_BASE_LO_ib_base_31_0_shift)
+
+/*define for BASE_HI word*/
+/*define for ib_base_63_32 field*/
+#define SDMA_PKT_INDIRECT_BASE_HI_ib_base_63_32_offset 2
+#define SDMA_PKT_INDIRECT_BASE_HI_ib_base_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_INDIRECT_BASE_HI_ib_base_63_32_shift  0
+#define SDMA_PKT_INDIRECT_BASE_HI_IB_BASE_63_32(x) (((x) & SDMA_PKT_INDIRECT_BASE_HI_ib_base_63_32_mask) << SDMA_PKT_INDIRECT_BASE_HI_ib_base_63_32_shift)
+
+/*define for IB_SIZE word*/
+/*define for ib_size field*/
+#define SDMA_PKT_INDIRECT_IB_SIZE_ib_size_offset 3
+#define SDMA_PKT_INDIRECT_IB_SIZE_ib_size_mask   0x000FFFFF
+#define SDMA_PKT_INDIRECT_IB_SIZE_ib_size_shift  0
+#define SDMA_PKT_INDIRECT_IB_SIZE_IB_SIZE(x) (((x) & SDMA_PKT_INDIRECT_IB_SIZE_ib_size_mask) << SDMA_PKT_INDIRECT_IB_SIZE_ib_size_shift)
+
+/*define for CSA_ADDR_LO word*/
+/*define for csa_addr_31_0 field*/
+#define SDMA_PKT_INDIRECT_CSA_ADDR_LO_csa_addr_31_0_offset 4
+#define SDMA_PKT_INDIRECT_CSA_ADDR_LO_csa_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_INDIRECT_CSA_ADDR_LO_csa_addr_31_0_shift  0
+#define SDMA_PKT_INDIRECT_CSA_ADDR_LO_CSA_ADDR_31_0(x) (((x) & SDMA_PKT_INDIRECT_CSA_ADDR_LO_csa_addr_31_0_mask) << SDMA_PKT_INDIRECT_CSA_ADDR_LO_csa_addr_31_0_shift)
+
+/*define for CSA_ADDR_HI word*/
+/*define for csa_addr_63_32 field*/
+#define SDMA_PKT_INDIRECT_CSA_ADDR_HI_csa_addr_63_32_offset 5
+#define SDMA_PKT_INDIRECT_CSA_ADDR_HI_csa_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_INDIRECT_CSA_ADDR_HI_csa_addr_63_32_shift  0
+#define SDMA_PKT_INDIRECT_CSA_ADDR_HI_CSA_ADDR_63_32(x) (((x) & SDMA_PKT_INDIRECT_CSA_ADDR_HI_csa_addr_63_32_mask) << SDMA_PKT_INDIRECT_CSA_ADDR_HI_csa_addr_63_32_shift)
+
+
+/*
+** Definitions for SDMA_PKT_SEMAPHORE packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_SEMAPHORE_HEADER_op_offset 0
+#define SDMA_PKT_SEMAPHORE_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_SEMAPHORE_HEADER_op_shift  0
+#define SDMA_PKT_SEMAPHORE_HEADER_OP(x) (((x) & SDMA_PKT_SEMAPHORE_HEADER_op_mask) << SDMA_PKT_SEMAPHORE_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_SEMAPHORE_HEADER_sub_op_offset 0
+#define SDMA_PKT_SEMAPHORE_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_SEMAPHORE_HEADER_sub_op_shift  8
+#define SDMA_PKT_SEMAPHORE_HEADER_SUB_OP(x) (((x) & SDMA_PKT_SEMAPHORE_HEADER_sub_op_mask) << SDMA_PKT_SEMAPHORE_HEADER_sub_op_shift)
+
+/*define for write_one field*/
+#define SDMA_PKT_SEMAPHORE_HEADER_write_one_offset 0
+#define SDMA_PKT_SEMAPHORE_HEADER_write_one_mask   0x00000001
+#define SDMA_PKT_SEMAPHORE_HEADER_write_one_shift  29
+#define SDMA_PKT_SEMAPHORE_HEADER_WRITE_ONE(x) (((x) & SDMA_PKT_SEMAPHORE_HEADER_write_one_mask) << SDMA_PKT_SEMAPHORE_HEADER_write_one_shift)
+
+/*define for signal field*/
+#define SDMA_PKT_SEMAPHORE_HEADER_signal_offset 0
+#define SDMA_PKT_SEMAPHORE_HEADER_signal_mask   0x00000001
+#define SDMA_PKT_SEMAPHORE_HEADER_signal_shift  30
+#define SDMA_PKT_SEMAPHORE_HEADER_SIGNAL(x) (((x) & SDMA_PKT_SEMAPHORE_HEADER_signal_mask) << SDMA_PKT_SEMAPHORE_HEADER_signal_shift)
+
+/*define for mailbox field*/
+#define SDMA_PKT_SEMAPHORE_HEADER_mailbox_offset 0
+#define SDMA_PKT_SEMAPHORE_HEADER_mailbox_mask   0x00000001
+#define SDMA_PKT_SEMAPHORE_HEADER_mailbox_shift  31
+#define SDMA_PKT_SEMAPHORE_HEADER_MAILBOX(x) (((x) & SDMA_PKT_SEMAPHORE_HEADER_mailbox_mask) << SDMA_PKT_SEMAPHORE_HEADER_mailbox_shift)
+
+/*define for ADDR_LO word*/
+/*define for addr_31_0 field*/
+#define SDMA_PKT_SEMAPHORE_ADDR_LO_addr_31_0_offset 1
+#define SDMA_PKT_SEMAPHORE_ADDR_LO_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_SEMAPHORE_ADDR_LO_addr_31_0_shift  0
+#define SDMA_PKT_SEMAPHORE_ADDR_LO_ADDR_31_0(x) (((x) & SDMA_PKT_SEMAPHORE_ADDR_LO_addr_31_0_mask) << SDMA_PKT_SEMAPHORE_ADDR_LO_addr_31_0_shift)
+
+/*define for ADDR_HI word*/
+/*define for addr_63_32 field*/
+#define SDMA_PKT_SEMAPHORE_ADDR_HI_addr_63_32_offset 2
+#define SDMA_PKT_SEMAPHORE_ADDR_HI_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_SEMAPHORE_ADDR_HI_addr_63_32_shift  0
+#define SDMA_PKT_SEMAPHORE_ADDR_HI_ADDR_63_32(x) (((x) & SDMA_PKT_SEMAPHORE_ADDR_HI_addr_63_32_mask) << SDMA_PKT_SEMAPHORE_ADDR_HI_addr_63_32_shift)
+
+
+/*
+** Definitions for SDMA_PKT_FENCE packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_FENCE_HEADER_op_offset 0
+#define SDMA_PKT_FENCE_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_FENCE_HEADER_op_shift  0
+#define SDMA_PKT_FENCE_HEADER_OP(x) (((x) & SDMA_PKT_FENCE_HEADER_op_mask) << SDMA_PKT_FENCE_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_FENCE_HEADER_sub_op_offset 0
+#define SDMA_PKT_FENCE_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_FENCE_HEADER_sub_op_shift  8
+#define SDMA_PKT_FENCE_HEADER_SUB_OP(x) (((x) & SDMA_PKT_FENCE_HEADER_sub_op_mask) << SDMA_PKT_FENCE_HEADER_sub_op_shift)
+
+/*define for ADDR_LO word*/
+/*define for addr_31_0 field*/
+#define SDMA_PKT_FENCE_ADDR_LO_addr_31_0_offset 1
+#define SDMA_PKT_FENCE_ADDR_LO_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_FENCE_ADDR_LO_addr_31_0_shift  0
+#define SDMA_PKT_FENCE_ADDR_LO_ADDR_31_0(x) (((x) & SDMA_PKT_FENCE_ADDR_LO_addr_31_0_mask) << SDMA_PKT_FENCE_ADDR_LO_addr_31_0_shift)
+
+/*define for ADDR_HI word*/
+/*define for addr_63_32 field*/
+#define SDMA_PKT_FENCE_ADDR_HI_addr_63_32_offset 2
+#define SDMA_PKT_FENCE_ADDR_HI_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_FENCE_ADDR_HI_addr_63_32_shift  0
+#define SDMA_PKT_FENCE_ADDR_HI_ADDR_63_32(x) (((x) & SDMA_PKT_FENCE_ADDR_HI_addr_63_32_mask) << SDMA_PKT_FENCE_ADDR_HI_addr_63_32_shift)
+
+/*define for DATA word*/
+/*define for data field*/
+#define SDMA_PKT_FENCE_DATA_data_offset 3
+#define SDMA_PKT_FENCE_DATA_data_mask   0xFFFFFFFF
+#define SDMA_PKT_FENCE_DATA_data_shift  0
+#define SDMA_PKT_FENCE_DATA_DATA(x) (((x) & SDMA_PKT_FENCE_DATA_data_mask) << SDMA_PKT_FENCE_DATA_data_shift)
+
+
+/*
+** Definitions for SDMA_PKT_SRBM_WRITE packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_SRBM_WRITE_HEADER_op_offset 0
+#define SDMA_PKT_SRBM_WRITE_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_SRBM_WRITE_HEADER_op_shift  0
+#define SDMA_PKT_SRBM_WRITE_HEADER_OP(x) (((x) & SDMA_PKT_SRBM_WRITE_HEADER_op_mask) << SDMA_PKT_SRBM_WRITE_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_SRBM_WRITE_HEADER_sub_op_offset 0
+#define SDMA_PKT_SRBM_WRITE_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_SRBM_WRITE_HEADER_sub_op_shift  8
+#define SDMA_PKT_SRBM_WRITE_HEADER_SUB_OP(x) (((x) & SDMA_PKT_SRBM_WRITE_HEADER_sub_op_mask) << SDMA_PKT_SRBM_WRITE_HEADER_sub_op_shift)
+
+/*define for byte_en field*/
+#define SDMA_PKT_SRBM_WRITE_HEADER_byte_en_offset 0
+#define SDMA_PKT_SRBM_WRITE_HEADER_byte_en_mask   0x0000000F
+#define SDMA_PKT_SRBM_WRITE_HEADER_byte_en_shift  28
+#define SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(x) (((x) & SDMA_PKT_SRBM_WRITE_HEADER_byte_en_mask) << SDMA_PKT_SRBM_WRITE_HEADER_byte_en_shift)
+
+/*define for ADDR word*/
+/*define for addr field*/
+#define SDMA_PKT_SRBM_WRITE_ADDR_addr_offset 1
+#define SDMA_PKT_SRBM_WRITE_ADDR_addr_mask   0x0000FFFF
+#define SDMA_PKT_SRBM_WRITE_ADDR_addr_shift  0
+#define SDMA_PKT_SRBM_WRITE_ADDR_ADDR(x) (((x) & SDMA_PKT_SRBM_WRITE_ADDR_addr_mask) << SDMA_PKT_SRBM_WRITE_ADDR_addr_shift)
+
+/*define for DATA word*/
+/*define for data field*/
+#define SDMA_PKT_SRBM_WRITE_DATA_data_offset 2
+#define SDMA_PKT_SRBM_WRITE_DATA_data_mask   0xFFFFFFFF
+#define SDMA_PKT_SRBM_WRITE_DATA_data_shift  0
+#define SDMA_PKT_SRBM_WRITE_DATA_DATA(x) (((x) & SDMA_PKT_SRBM_WRITE_DATA_data_mask) << SDMA_PKT_SRBM_WRITE_DATA_data_shift)
+
+
+/*
+** Definitions for SDMA_PKT_PRE_EXE packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_PRE_EXE_HEADER_op_offset 0
+#define SDMA_PKT_PRE_EXE_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_PRE_EXE_HEADER_op_shift  0
+#define SDMA_PKT_PRE_EXE_HEADER_OP(x) (((x) & SDMA_PKT_PRE_EXE_HEADER_op_mask) << SDMA_PKT_PRE_EXE_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_PRE_EXE_HEADER_sub_op_offset 0
+#define SDMA_PKT_PRE_EXE_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_PRE_EXE_HEADER_sub_op_shift  8
+#define SDMA_PKT_PRE_EXE_HEADER_SUB_OP(x) (((x) & SDMA_PKT_PRE_EXE_HEADER_sub_op_mask) << SDMA_PKT_PRE_EXE_HEADER_sub_op_shift)
+
+/*define for dev_sel field*/
+#define SDMA_PKT_PRE_EXE_HEADER_dev_sel_offset 0
+#define SDMA_PKT_PRE_EXE_HEADER_dev_sel_mask   0x000000FF
+#define SDMA_PKT_PRE_EXE_HEADER_dev_sel_shift  16
+#define SDMA_PKT_PRE_EXE_HEADER_DEV_SEL(x) (((x) & SDMA_PKT_PRE_EXE_HEADER_dev_sel_mask) << SDMA_PKT_PRE_EXE_HEADER_dev_sel_shift)
+
+/*define for EXEC_COUNT word*/
+/*define for exec_count field*/
+#define SDMA_PKT_PRE_EXE_EXEC_COUNT_exec_count_offset 1
+#define SDMA_PKT_PRE_EXE_EXEC_COUNT_exec_count_mask   0x00003FFF
+#define SDMA_PKT_PRE_EXE_EXEC_COUNT_exec_count_shift  0
+#define SDMA_PKT_PRE_EXE_EXEC_COUNT_EXEC_COUNT(x) (((x) & SDMA_PKT_PRE_EXE_EXEC_COUNT_exec_count_mask) << SDMA_PKT_PRE_EXE_EXEC_COUNT_exec_count_shift)
+
+
+/*
+** Definitions for SDMA_PKT_COND_EXE packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_COND_EXE_HEADER_op_offset 0
+#define SDMA_PKT_COND_EXE_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_COND_EXE_HEADER_op_shift  0
+#define SDMA_PKT_COND_EXE_HEADER_OP(x) (((x) & SDMA_PKT_COND_EXE_HEADER_op_mask) << SDMA_PKT_COND_EXE_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_COND_EXE_HEADER_sub_op_offset 0
+#define SDMA_PKT_COND_EXE_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_COND_EXE_HEADER_sub_op_shift  8
+#define SDMA_PKT_COND_EXE_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COND_EXE_HEADER_sub_op_mask) << SDMA_PKT_COND_EXE_HEADER_sub_op_shift)
+
+/*define for ADDR_LO word*/
+/*define for addr_31_0 field*/
+#define SDMA_PKT_COND_EXE_ADDR_LO_addr_31_0_offset 1
+#define SDMA_PKT_COND_EXE_ADDR_LO_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_COND_EXE_ADDR_LO_addr_31_0_shift  0
+#define SDMA_PKT_COND_EXE_ADDR_LO_ADDR_31_0(x) (((x) & SDMA_PKT_COND_EXE_ADDR_LO_addr_31_0_mask) << SDMA_PKT_COND_EXE_ADDR_LO_addr_31_0_shift)
+
+/*define for ADDR_HI word*/
+/*define for addr_63_32 field*/
+#define SDMA_PKT_COND_EXE_ADDR_HI_addr_63_32_offset 2
+#define SDMA_PKT_COND_EXE_ADDR_HI_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_COND_EXE_ADDR_HI_addr_63_32_shift  0
+#define SDMA_PKT_COND_EXE_ADDR_HI_ADDR_63_32(x) (((x) & SDMA_PKT_COND_EXE_ADDR_HI_addr_63_32_mask) << SDMA_PKT_COND_EXE_ADDR_HI_addr_63_32_shift)
+
+/*define for REFERENCE word*/
+/*define for reference field*/
+#define SDMA_PKT_COND_EXE_REFERENCE_reference_offset 3
+#define SDMA_PKT_COND_EXE_REFERENCE_reference_mask   0xFFFFFFFF
+#define SDMA_PKT_COND_EXE_REFERENCE_reference_shift  0
+#define SDMA_PKT_COND_EXE_REFERENCE_REFERENCE(x) (((x) & SDMA_PKT_COND_EXE_REFERENCE_reference_mask) << SDMA_PKT_COND_EXE_REFERENCE_reference_shift)
+
+/*define for EXEC_COUNT word*/
+/*define for exec_count field*/
+#define SDMA_PKT_COND_EXE_EXEC_COUNT_exec_count_offset 4
+#define SDMA_PKT_COND_EXE_EXEC_COUNT_exec_count_mask   0x00003FFF
+#define SDMA_PKT_COND_EXE_EXEC_COUNT_exec_count_shift  0
+#define SDMA_PKT_COND_EXE_EXEC_COUNT_EXEC_COUNT(x) (((x) & SDMA_PKT_COND_EXE_EXEC_COUNT_exec_count_mask) << SDMA_PKT_COND_EXE_EXEC_COUNT_exec_count_shift)
+
+
+/*
+** Definitions for SDMA_PKT_CONSTANT_FILL packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_CONSTANT_FILL_HEADER_op_offset 0
+#define SDMA_PKT_CONSTANT_FILL_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_CONSTANT_FILL_HEADER_op_shift  0
+#define SDMA_PKT_CONSTANT_FILL_HEADER_OP(x) (((x) & SDMA_PKT_CONSTANT_FILL_HEADER_op_mask) << SDMA_PKT_CONSTANT_FILL_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_CONSTANT_FILL_HEADER_sub_op_offset 0
+#define SDMA_PKT_CONSTANT_FILL_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_CONSTANT_FILL_HEADER_sub_op_shift  8
+#define SDMA_PKT_CONSTANT_FILL_HEADER_SUB_OP(x) (((x) & SDMA_PKT_CONSTANT_FILL_HEADER_sub_op_mask) << SDMA_PKT_CONSTANT_FILL_HEADER_sub_op_shift)
+
+/*define for sw field*/
+#define SDMA_PKT_CONSTANT_FILL_HEADER_sw_offset 0
+#define SDMA_PKT_CONSTANT_FILL_HEADER_sw_mask   0x00000003
+#define SDMA_PKT_CONSTANT_FILL_HEADER_sw_shift  16
+#define SDMA_PKT_CONSTANT_FILL_HEADER_SW(x) (((x) & SDMA_PKT_CONSTANT_FILL_HEADER_sw_mask) << SDMA_PKT_CONSTANT_FILL_HEADER_sw_shift)
+
+/*define for fillsize field*/
+#define SDMA_PKT_CONSTANT_FILL_HEADER_fillsize_offset 0
+#define SDMA_PKT_CONSTANT_FILL_HEADER_fillsize_mask   0x00000003
+#define SDMA_PKT_CONSTANT_FILL_HEADER_fillsize_shift  30
+#define SDMA_PKT_CONSTANT_FILL_HEADER_FILLSIZE(x) (((x) & SDMA_PKT_CONSTANT_FILL_HEADER_fillsize_mask) << SDMA_PKT_CONSTANT_FILL_HEADER_fillsize_shift)
+
+/*define for DST_ADDR_LO word*/
+/*define for dst_addr_31_0 field*/
+#define SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_dst_addr_31_0_offset 1
+#define SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_dst_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_dst_addr_31_0_shift  0
+#define SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_dst_addr_31_0_shift)
+
+/*define for DST_ADDR_HI word*/
+/*define for dst_addr_63_32 field*/
+#define SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_dst_addr_63_32_offset 2
+#define SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_dst_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_dst_addr_63_32_shift  0
+#define SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_dst_addr_63_32_shift)
+
+/*define for DATA word*/
+/*define for src_data_31_0 field*/
+#define SDMA_PKT_CONSTANT_FILL_DATA_src_data_31_0_offset 3
+#define SDMA_PKT_CONSTANT_FILL_DATA_src_data_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_CONSTANT_FILL_DATA_src_data_31_0_shift  0
+#define SDMA_PKT_CONSTANT_FILL_DATA_SRC_DATA_31_0(x) (((x) & SDMA_PKT_CONSTANT_FILL_DATA_src_data_31_0_mask) << SDMA_PKT_CONSTANT_FILL_DATA_src_data_31_0_shift)
+
+/*define for COUNT word*/
+/*define for count field*/
+#define SDMA_PKT_CONSTANT_FILL_COUNT_count_offset 4
+#define SDMA_PKT_CONSTANT_FILL_COUNT_count_mask   0x003FFFFF
+#define SDMA_PKT_CONSTANT_FILL_COUNT_count_shift  0
+#define SDMA_PKT_CONSTANT_FILL_COUNT_COUNT(x) (((x) & SDMA_PKT_CONSTANT_FILL_COUNT_count_mask) << SDMA_PKT_CONSTANT_FILL_COUNT_count_shift)
+
+
+/*
+** Definitions for SDMA_PKT_POLL_REGMEM packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_POLL_REGMEM_HEADER_op_offset 0
+#define SDMA_PKT_POLL_REGMEM_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_POLL_REGMEM_HEADER_op_shift  0
+#define SDMA_PKT_POLL_REGMEM_HEADER_OP(x) (((x) & SDMA_PKT_POLL_REGMEM_HEADER_op_mask) << SDMA_PKT_POLL_REGMEM_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_POLL_REGMEM_HEADER_sub_op_offset 0
+#define SDMA_PKT_POLL_REGMEM_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_POLL_REGMEM_HEADER_sub_op_shift  8
+#define SDMA_PKT_POLL_REGMEM_HEADER_SUB_OP(x) (((x) & SDMA_PKT_POLL_REGMEM_HEADER_sub_op_mask) << SDMA_PKT_POLL_REGMEM_HEADER_sub_op_shift)
+
+/*define for hdp_flush field*/
+#define SDMA_PKT_POLL_REGMEM_HEADER_hdp_flush_offset 0
+#define SDMA_PKT_POLL_REGMEM_HEADER_hdp_flush_mask   0x00000001
+#define SDMA_PKT_POLL_REGMEM_HEADER_hdp_flush_shift  26
+#define SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(x) (((x) & SDMA_PKT_POLL_REGMEM_HEADER_hdp_flush_mask) << SDMA_PKT_POLL_REGMEM_HEADER_hdp_flush_shift)
+
+/*define for func field*/
+#define SDMA_PKT_POLL_REGMEM_HEADER_func_offset 0
+#define SDMA_PKT_POLL_REGMEM_HEADER_func_mask   0x00000007
+#define SDMA_PKT_POLL_REGMEM_HEADER_func_shift  28
+#define SDMA_PKT_POLL_REGMEM_HEADER_FUNC(x) (((x) & SDMA_PKT_POLL_REGMEM_HEADER_func_mask) << SDMA_PKT_POLL_REGMEM_HEADER_func_shift)
+
+/*define for mem_poll field*/
+#define SDMA_PKT_POLL_REGMEM_HEADER_mem_poll_offset 0
+#define SDMA_PKT_POLL_REGMEM_HEADER_mem_poll_mask   0x00000001
+#define SDMA_PKT_POLL_REGMEM_HEADER_mem_poll_shift  31
+#define SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(x) (((x) & SDMA_PKT_POLL_REGMEM_HEADER_mem_poll_mask) << SDMA_PKT_POLL_REGMEM_HEADER_mem_poll_shift)
+
+/*define for ADDR_LO word*/
+/*define for addr_31_0 field*/
+#define SDMA_PKT_POLL_REGMEM_ADDR_LO_addr_31_0_offset 1
+#define SDMA_PKT_POLL_REGMEM_ADDR_LO_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_POLL_REGMEM_ADDR_LO_addr_31_0_shift  0
+#define SDMA_PKT_POLL_REGMEM_ADDR_LO_ADDR_31_0(x) (((x) & SDMA_PKT_POLL_REGMEM_ADDR_LO_addr_31_0_mask) << SDMA_PKT_POLL_REGMEM_ADDR_LO_addr_31_0_shift)
+
+/*define for ADDR_HI word*/
+/*define for addr_63_32 field*/
+#define SDMA_PKT_POLL_REGMEM_ADDR_HI_addr_63_32_offset 2
+#define SDMA_PKT_POLL_REGMEM_ADDR_HI_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_POLL_REGMEM_ADDR_HI_addr_63_32_shift  0
+#define SDMA_PKT_POLL_REGMEM_ADDR_HI_ADDR_63_32(x) (((x) & SDMA_PKT_POLL_REGMEM_ADDR_HI_addr_63_32_mask) << SDMA_PKT_POLL_REGMEM_ADDR_HI_addr_63_32_shift)
+
+/*define for VALUE word*/
+/*define for value field*/
+#define SDMA_PKT_POLL_REGMEM_VALUE_value_offset 3
+#define SDMA_PKT_POLL_REGMEM_VALUE_value_mask   0xFFFFFFFF
+#define SDMA_PKT_POLL_REGMEM_VALUE_value_shift  0
+#define SDMA_PKT_POLL_REGMEM_VALUE_VALUE(x) (((x) & SDMA_PKT_POLL_REGMEM_VALUE_value_mask) << SDMA_PKT_POLL_REGMEM_VALUE_value_shift)
+
+/*define for MASK word*/
+/*define for mask field*/
+#define SDMA_PKT_POLL_REGMEM_MASK_mask_offset 4
+#define SDMA_PKT_POLL_REGMEM_MASK_mask_mask   0xFFFFFFFF
+#define SDMA_PKT_POLL_REGMEM_MASK_mask_shift  0
+#define SDMA_PKT_POLL_REGMEM_MASK_MASK(x) (((x) & SDMA_PKT_POLL_REGMEM_MASK_mask_mask) << SDMA_PKT_POLL_REGMEM_MASK_mask_shift)
+
+/*define for DW5 word*/
+/*define for interval field*/
+#define SDMA_PKT_POLL_REGMEM_DW5_interval_offset 5
+#define SDMA_PKT_POLL_REGMEM_DW5_interval_mask   0x0000FFFF
+#define SDMA_PKT_POLL_REGMEM_DW5_interval_shift  0
+#define SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(x) (((x) & SDMA_PKT_POLL_REGMEM_DW5_interval_mask) << SDMA_PKT_POLL_REGMEM_DW5_interval_shift)
+
+/*define for retry_count field*/
+#define SDMA_PKT_POLL_REGMEM_DW5_retry_count_offset 5
+#define SDMA_PKT_POLL_REGMEM_DW5_retry_count_mask   0x00000FFF
+#define SDMA_PKT_POLL_REGMEM_DW5_retry_count_shift  16
+#define SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(x) (((x) & SDMA_PKT_POLL_REGMEM_DW5_retry_count_mask) << SDMA_PKT_POLL_REGMEM_DW5_retry_count_shift)
+
+
+/*
+** Definitions for SDMA_PKT_ATOMIC packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_ATOMIC_HEADER_op_offset 0
+#define SDMA_PKT_ATOMIC_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_ATOMIC_HEADER_op_shift  0
+#define SDMA_PKT_ATOMIC_HEADER_OP(x) (((x) & SDMA_PKT_ATOMIC_HEADER_op_mask) << SDMA_PKT_ATOMIC_HEADER_op_shift)
+
+/*define for loop field*/
+#define SDMA_PKT_ATOMIC_HEADER_loop_offset 0
+#define SDMA_PKT_ATOMIC_HEADER_loop_mask   0x00000001
+#define SDMA_PKT_ATOMIC_HEADER_loop_shift  16
+#define SDMA_PKT_ATOMIC_HEADER_LOOP(x) (((x) & SDMA_PKT_ATOMIC_HEADER_loop_mask) << SDMA_PKT_ATOMIC_HEADER_loop_shift)
+
+/*define for atomic_op field*/
+#define SDMA_PKT_ATOMIC_HEADER_atomic_op_offset 0
+#define SDMA_PKT_ATOMIC_HEADER_atomic_op_mask   0x0000007F
+#define SDMA_PKT_ATOMIC_HEADER_atomic_op_shift  25
+#define SDMA_PKT_ATOMIC_HEADER_ATOMIC_OP(x) (((x) & SDMA_PKT_ATOMIC_HEADER_atomic_op_mask) << SDMA_PKT_ATOMIC_HEADER_atomic_op_shift)
+
+/*define for ADDR_LO word*/
+/*define for addr_31_0 field*/
+#define SDMA_PKT_ATOMIC_ADDR_LO_addr_31_0_offset 1
+#define SDMA_PKT_ATOMIC_ADDR_LO_addr_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_ATOMIC_ADDR_LO_addr_31_0_shift  0
+#define SDMA_PKT_ATOMIC_ADDR_LO_ADDR_31_0(x) (((x) & SDMA_PKT_ATOMIC_ADDR_LO_addr_31_0_mask) << SDMA_PKT_ATOMIC_ADDR_LO_addr_31_0_shift)
+
+/*define for ADDR_HI word*/
+/*define for addr_63_32 field*/
+#define SDMA_PKT_ATOMIC_ADDR_HI_addr_63_32_offset 2
+#define SDMA_PKT_ATOMIC_ADDR_HI_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_ATOMIC_ADDR_HI_addr_63_32_shift  0
+#define SDMA_PKT_ATOMIC_ADDR_HI_ADDR_63_32(x) (((x) & SDMA_PKT_ATOMIC_ADDR_HI_addr_63_32_mask) << SDMA_PKT_ATOMIC_ADDR_HI_addr_63_32_shift)
+
+/*define for SRC_DATA_LO word*/
+/*define for src_data_31_0 field*/
+#define SDMA_PKT_ATOMIC_SRC_DATA_LO_src_data_31_0_offset 3
+#define SDMA_PKT_ATOMIC_SRC_DATA_LO_src_data_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_ATOMIC_SRC_DATA_LO_src_data_31_0_shift  0
+#define SDMA_PKT_ATOMIC_SRC_DATA_LO_SRC_DATA_31_0(x) (((x) & SDMA_PKT_ATOMIC_SRC_DATA_LO_src_data_31_0_mask) << SDMA_PKT_ATOMIC_SRC_DATA_LO_src_data_31_0_shift)
+
+/*define for SRC_DATA_HI word*/
+/*define for src_data_63_32 field*/
+#define SDMA_PKT_ATOMIC_SRC_DATA_HI_src_data_63_32_offset 4
+#define SDMA_PKT_ATOMIC_SRC_DATA_HI_src_data_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_ATOMIC_SRC_DATA_HI_src_data_63_32_shift  0
+#define SDMA_PKT_ATOMIC_SRC_DATA_HI_SRC_DATA_63_32(x) (((x) & SDMA_PKT_ATOMIC_SRC_DATA_HI_src_data_63_32_mask) << SDMA_PKT_ATOMIC_SRC_DATA_HI_src_data_63_32_shift)
+
+/*define for CMP_DATA_LO word*/
+/*define for cmp_data_31_0 field*/
+#define SDMA_PKT_ATOMIC_CMP_DATA_LO_cmp_data_31_0_offset 5
+#define SDMA_PKT_ATOMIC_CMP_DATA_LO_cmp_data_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_ATOMIC_CMP_DATA_LO_cmp_data_31_0_shift  0
+#define SDMA_PKT_ATOMIC_CMP_DATA_LO_CMP_DATA_31_0(x) (((x) & SDMA_PKT_ATOMIC_CMP_DATA_LO_cmp_data_31_0_mask) << SDMA_PKT_ATOMIC_CMP_DATA_LO_cmp_data_31_0_shift)
+
+/*define for CMP_DATA_HI word*/
+/*define for cmp_data_63_32 field*/
+#define SDMA_PKT_ATOMIC_CMP_DATA_HI_cmp_data_63_32_offset 6
+#define SDMA_PKT_ATOMIC_CMP_DATA_HI_cmp_data_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_ATOMIC_CMP_DATA_HI_cmp_data_63_32_shift  0
+#define SDMA_PKT_ATOMIC_CMP_DATA_HI_CMP_DATA_63_32(x) (((x) & SDMA_PKT_ATOMIC_CMP_DATA_HI_cmp_data_63_32_mask) << SDMA_PKT_ATOMIC_CMP_DATA_HI_cmp_data_63_32_shift)
+
+/*define for LOOP_INTERVAL word*/
+/*define for loop_interval field*/
+#define SDMA_PKT_ATOMIC_LOOP_INTERVAL_loop_interval_offset 7
+#define SDMA_PKT_ATOMIC_LOOP_INTERVAL_loop_interval_mask   0x00001FFF
+#define SDMA_PKT_ATOMIC_LOOP_INTERVAL_loop_interval_shift  0
+#define SDMA_PKT_ATOMIC_LOOP_INTERVAL_LOOP_INTERVAL(x) (((x) & SDMA_PKT_ATOMIC_LOOP_INTERVAL_loop_interval_mask) << SDMA_PKT_ATOMIC_LOOP_INTERVAL_loop_interval_shift)
+
+
+/*
+** Definitions for SDMA_PKT_TIMESTAMP_SET packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_TIMESTAMP_SET_HEADER_op_offset 0
+#define SDMA_PKT_TIMESTAMP_SET_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_TIMESTAMP_SET_HEADER_op_shift  0
+#define SDMA_PKT_TIMESTAMP_SET_HEADER_OP(x) (((x) & SDMA_PKT_TIMESTAMP_SET_HEADER_op_mask) << SDMA_PKT_TIMESTAMP_SET_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_TIMESTAMP_SET_HEADER_sub_op_offset 0
+#define SDMA_PKT_TIMESTAMP_SET_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_TIMESTAMP_SET_HEADER_sub_op_shift  8
+#define SDMA_PKT_TIMESTAMP_SET_HEADER_SUB_OP(x) (((x) & SDMA_PKT_TIMESTAMP_SET_HEADER_sub_op_mask) << SDMA_PKT_TIMESTAMP_SET_HEADER_sub_op_shift)
+
+/*define for INIT_DATA_LO word*/
+/*define for init_data_31_0 field*/
+#define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_init_data_31_0_offset 1
+#define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_init_data_31_0_mask   0xFFFFFFFF
+#define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_init_data_31_0_shift  0
+#define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_INIT_DATA_31_0(x) (((x) & SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_init_data_31_0_mask) << SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_init_data_31_0_shift)
+
+/*define for INIT_DATA_HI word*/
+/*define for init_data_63_32 field*/
+#define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_init_data_63_32_offset 2
+#define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_init_data_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_init_data_63_32_shift  0
+#define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_INIT_DATA_63_32(x) (((x) & SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_init_data_63_32_mask) << SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_init_data_63_32_shift)
+
+
+/*
+** Definitions for SDMA_PKT_TIMESTAMP_GET packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_TIMESTAMP_GET_HEADER_op_offset 0
+#define SDMA_PKT_TIMESTAMP_GET_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_TIMESTAMP_GET_HEADER_op_shift  0
+#define SDMA_PKT_TIMESTAMP_GET_HEADER_OP(x) (((x) & SDMA_PKT_TIMESTAMP_GET_HEADER_op_mask) << SDMA_PKT_TIMESTAMP_GET_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_TIMESTAMP_GET_HEADER_sub_op_offset 0
+#define SDMA_PKT_TIMESTAMP_GET_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_TIMESTAMP_GET_HEADER_sub_op_shift  8
+#define SDMA_PKT_TIMESTAMP_GET_HEADER_SUB_OP(x) (((x) & SDMA_PKT_TIMESTAMP_GET_HEADER_sub_op_mask) << SDMA_PKT_TIMESTAMP_GET_HEADER_sub_op_shift)
+
+/*define for WRITE_ADDR_LO word*/
+/*define for write_addr_31_3 field*/
+#define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_write_addr_31_3_offset 1
+#define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_write_addr_31_3_mask   0x1FFFFFFF
+#define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_write_addr_31_3_shift  3
+#define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_WRITE_ADDR_31_3(x) (((x) & SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_write_addr_31_3_mask) << SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_write_addr_31_3_shift)
+
+/*define for WRITE_ADDR_HI word*/
+/*define for write_addr_63_32 field*/
+#define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_write_addr_63_32_offset 2
+#define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_write_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_write_addr_63_32_shift  0
+#define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_WRITE_ADDR_63_32(x) (((x) & SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_write_addr_63_32_mask) << SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_write_addr_63_32_shift)
+
+
+/*
+** Definitions for SDMA_PKT_TIMESTAMP_GET_GLOBAL packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_op_offset 0
+#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_op_shift  0
+#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_OP(x) (((x) & SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_op_mask) << SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_sub_op_offset 0
+#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_sub_op_shift  8
+#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_SUB_OP(x) (((x) & SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_sub_op_mask) << SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_sub_op_shift)
+
+/*define for WRITE_ADDR_LO word*/
+/*define for write_addr_31_3 field*/
+#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_write_addr_31_3_offset 1
+#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_write_addr_31_3_mask   0x1FFFFFFF
+#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_write_addr_31_3_shift  3
+#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_WRITE_ADDR_31_3(x) (((x) & SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_write_addr_31_3_mask) << SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_write_addr_31_3_shift)
+
+/*define for WRITE_ADDR_HI word*/
+/*define for write_addr_63_32 field*/
+#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_write_addr_63_32_offset 2
+#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_write_addr_63_32_mask   0xFFFFFFFF
+#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_write_addr_63_32_shift  0
+#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_WRITE_ADDR_63_32(x) (((x) & SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_write_addr_63_32_mask) << SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_write_addr_63_32_shift)
+
+
+/*
+** Definitions for SDMA_PKT_TRAP packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_TRAP_HEADER_op_offset 0
+#define SDMA_PKT_TRAP_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_TRAP_HEADER_op_shift  0
+#define SDMA_PKT_TRAP_HEADER_OP(x) (((x) & SDMA_PKT_TRAP_HEADER_op_mask) << SDMA_PKT_TRAP_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_TRAP_HEADER_sub_op_offset 0
+#define SDMA_PKT_TRAP_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_TRAP_HEADER_sub_op_shift  8
+#define SDMA_PKT_TRAP_HEADER_SUB_OP(x) (((x) & SDMA_PKT_TRAP_HEADER_sub_op_mask) << SDMA_PKT_TRAP_HEADER_sub_op_shift)
+
+/*define for INT_CONTEXT word*/
+/*define for int_context field*/
+#define SDMA_PKT_TRAP_INT_CONTEXT_int_context_offset 1
+#define SDMA_PKT_TRAP_INT_CONTEXT_int_context_mask   0x0FFFFFFF
+#define SDMA_PKT_TRAP_INT_CONTEXT_int_context_shift  0
+#define SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(x) (((x) & SDMA_PKT_TRAP_INT_CONTEXT_int_context_mask) << SDMA_PKT_TRAP_INT_CONTEXT_int_context_shift)
+
+
+/*
+** Definitions for SDMA_PKT_NOP packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_NOP_HEADER_op_offset 0
+#define SDMA_PKT_NOP_HEADER_op_mask   0x000000FF
+#define SDMA_PKT_NOP_HEADER_op_shift  0
+#define SDMA_PKT_NOP_HEADER_OP(x) (((x) & SDMA_PKT_NOP_HEADER_op_mask) << SDMA_PKT_NOP_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_NOP_HEADER_sub_op_offset 0
+#define SDMA_PKT_NOP_HEADER_sub_op_mask   0x000000FF
+#define SDMA_PKT_NOP_HEADER_sub_op_shift  8
+#define SDMA_PKT_NOP_HEADER_SUB_OP(x) (((x) & SDMA_PKT_NOP_HEADER_sub_op_mask) << SDMA_PKT_NOP_HEADER_sub_op_shift)
+
+
+#endif /* __TONGA_SDMA_PKT_OPEN_H_ */
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
new file mode 100644 (file)
index 0000000..5fc53a4
--- /dev/null
@@ -0,0 +1,852 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "tonga_ppsmc.h"
+#include "tonga_smumgr.h"
+#include "smu_ucode_xfer_vi.h"
+#include "amdgpu_ucode.h"
+
+#include "smu/smu_7_1_2_d.h"
+#include "smu/smu_7_1_2_sh_mask.h"
+
+#define TONGA_SMC_SIZE 0x20000
+
+static int tonga_set_smc_sram_address(struct amdgpu_device *adev, uint32_t smc_address, uint32_t limit)
+{
+       uint32_t val;
+
+       if (smc_address & 3)
+               return -EINVAL;
+
+       if ((smc_address + 3) > limit)
+               return -EINVAL;
+
+       WREG32(mmSMC_IND_INDEX_0, smc_address);
+
+       val = RREG32(mmSMC_IND_ACCESS_CNTL);
+       val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
+       WREG32(mmSMC_IND_ACCESS_CNTL, val);
+
+       return 0;
+}
+
+static int tonga_copy_bytes_to_smc(struct amdgpu_device *adev, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit)
+{
+       uint32_t addr;
+       uint32_t data, orig_data;
+       int result = 0;
+       uint32_t extra_shift;
+       unsigned long flags;
+
+       if (smc_start_address & 3)
+               return -EINVAL;
+
+       if ((smc_start_address + byte_count) > limit)
+               return -EINVAL;
+
+       addr = smc_start_address;
+
+       spin_lock_irqsave(&adev->smc_idx_lock, flags);
+       while (byte_count >= 4) {
+               /* Bytes are written into the SMC addres space with the MSB first */
+               data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
+
+               result = tonga_set_smc_sram_address(adev, addr, limit);
+
+               if (result)
+                       goto out;
+
+               WREG32(mmSMC_IND_DATA_0, data);
+
+               src += 4;
+               byte_count -= 4;
+               addr += 4;
+       }
+
+       if (0 != byte_count) {
+               /* Now write odd bytes left, do a read modify write cycle */
+               data = 0;
+
+               result = tonga_set_smc_sram_address(adev, addr, limit);
+               if (result)
+                       goto out;
+
+               orig_data = RREG32(mmSMC_IND_DATA_0);
+               extra_shift = 8 * (4 - byte_count);
+
+               while (byte_count > 0) {
+                       data = (data << 8) + *src++;
+                       byte_count--;
+               }
+
+               data <<= extra_shift;
+               data |= (orig_data & ~((~0UL) << extra_shift));
+
+               result = tonga_set_smc_sram_address(adev, addr, limit);
+               if (result)
+                       goto out;
+
+               WREG32(mmSMC_IND_DATA_0, data);
+       }
+
+out:
+       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+       return result;
+}
+
+static int tonga_program_jump_on_start(struct amdgpu_device *adev)
+{
+       static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
+       tonga_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
+
+       return 0;
+}
+
+static bool tonga_is_smc_ram_running(struct amdgpu_device *adev)
+{
+       uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+       val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
+
+       return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
+}
+
+static int wait_smu_response(struct amdgpu_device *adev)
+{
+       int i;
+       uint32_t val;
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               val = RREG32(mmSMC_RESP_0);
+               if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
+                       break;
+               udelay(1);
+       }
+
+       if (i == adev->usec_timeout)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int tonga_send_msg_to_smc_offset(struct amdgpu_device *adev)
+{
+       if (wait_smu_response(adev)) {
+               DRM_ERROR("Failed to send previous message\n");
+               return -EINVAL;
+       }
+
+       WREG32(mmSMC_MSG_ARG_0, 0x20000);
+       WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_Test);
+
+       if (wait_smu_response(adev)) {
+               DRM_ERROR("Failed to send message\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int tonga_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
+{
+       if (!tonga_is_smc_ram_running(adev))
+       {
+               return -EINVAL;;
+       }
+
+       if (wait_smu_response(adev)) {
+               DRM_ERROR("Failed to send previous message\n");
+               return -EINVAL;
+       }
+
+       WREG32(mmSMC_MESSAGE_0, msg);
+
+       if (wait_smu_response(adev)) {
+               DRM_ERROR("Failed to send message\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int tonga_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
+                                               PPSMC_Msg msg)
+{
+       if (wait_smu_response(adev)) {
+               DRM_ERROR("Failed to send previous message\n");
+               return -EINVAL;
+       }
+
+       WREG32(mmSMC_MESSAGE_0, msg);
+
+       return 0;
+}
+
+static int tonga_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
+                                               PPSMC_Msg msg,
+                                               uint32_t parameter)
+{
+       if (!tonga_is_smc_ram_running(adev))
+               return -EINVAL;
+
+       if (wait_smu_response(adev)) {
+               DRM_ERROR("Failed to send previous message\n");
+               return -EINVAL;
+       }
+
+       WREG32(mmSMC_MSG_ARG_0, parameter);
+
+       return tonga_send_msg_to_smc(adev, msg);
+}
+
+static int tonga_send_msg_to_smc_with_parameter_without_waiting(
+                                       struct amdgpu_device *adev,
+                                       PPSMC_Msg msg, uint32_t parameter)
+{
+       if (wait_smu_response(adev)) {
+               DRM_ERROR("Failed to send previous message\n");
+               return -EINVAL;
+       }
+
+       WREG32(mmSMC_MSG_ARG_0, parameter);
+
+       return tonga_send_msg_to_smc_without_waiting(adev, msg);
+}
+
+#if 0 /* not used yet */
+static int tonga_wait_for_smc_inactive(struct amdgpu_device *adev)
+{
+       int i;
+       uint32_t val;
+
+       if (!tonga_is_smc_ram_running(adev))
+               return -EINVAL;
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+               if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
+                       break;
+               udelay(1);
+       }
+
+       if (i == adev->usec_timeout)
+               return -EINVAL;
+
+       return 0;
+}
+#endif
+
+static int tonga_smu_upload_firmware_image(struct amdgpu_device *adev)
+{
+       const struct smc_firmware_header_v1_0 *hdr;
+       uint32_t ucode_size;
+       uint32_t ucode_start_address;
+       const uint8_t *src;
+       uint32_t val;
+       uint32_t byte_count;
+       uint32_t *data;
+       unsigned long flags;
+
+       if (!adev->pm.fw)
+               return -EINVAL;
+
+       hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
+       amdgpu_ucode_print_smc_hdr(&hdr->header);
+
+       adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
+       ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
+       ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
+       src = (const uint8_t *)
+               (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+       if (ucode_size & 3) {
+               DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
+               return -EINVAL;
+       }
+
+       if (ucode_size > TONGA_SMC_SIZE) {
+               DRM_ERROR("SMC address is beyond the SMC RAM area\n");
+               return -EINVAL;
+       }
+
+       spin_lock_irqsave(&adev->smc_idx_lock, flags);
+       WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
+
+       val = RREG32(mmSMC_IND_ACCESS_CNTL);
+       val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
+       WREG32(mmSMC_IND_ACCESS_CNTL, val);
+
+       byte_count = ucode_size;
+       data = (uint32_t *)src;
+       for (; byte_count >= 4; data++, byte_count -= 4)
+               WREG32(mmSMC_IND_DATA_0, data[0]);
+
+       val = RREG32(mmSMC_IND_ACCESS_CNTL);
+       val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
+       WREG32(mmSMC_IND_ACCESS_CNTL, val);
+       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+       return 0;
+}
+
+#if 0 /* not used yet */
+static int tonga_read_smc_sram_dword(struct amdgpu_device *adev,
+                               uint32_t smc_address,
+                               uint32_t *value,
+                               uint32_t limit)
+{
+       int result;
+       unsigned long flags;
+
+       spin_lock_irqsave(&adev->smc_idx_lock, flags);
+       result = tonga_set_smc_sram_address(adev, smc_address, limit);
+       if (result == 0)
+               *value = RREG32(mmSMC_IND_DATA_0);
+       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+       return result;
+}
+
+static int tonga_write_smc_sram_dword(struct amdgpu_device *adev,
+                               uint32_t smc_address,
+                               uint32_t value,
+                               uint32_t limit)
+{
+       int result;
+       unsigned long flags;
+
+       spin_lock_irqsave(&adev->smc_idx_lock, flags);
+       result = tonga_set_smc_sram_address(adev, smc_address, limit);
+       if (result == 0)
+               WREG32(mmSMC_IND_DATA_0, value);
+       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+       return result;
+}
+
+static int tonga_smu_stop_smc(struct amdgpu_device *adev)
+{
+       uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
+       val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
+       WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
+
+       val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+       val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
+       WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
+
+       return 0;
+}
+#endif
+
+static enum AMDGPU_UCODE_ID tonga_convert_fw_type(uint32_t fw_type)
+{
+       switch (fw_type) {
+               case UCODE_ID_SDMA0:
+                       return AMDGPU_UCODE_ID_SDMA0;
+               case UCODE_ID_SDMA1:
+                       return AMDGPU_UCODE_ID_SDMA1;
+               case UCODE_ID_CP_CE:
+                       return AMDGPU_UCODE_ID_CP_CE;
+               case UCODE_ID_CP_PFP:
+                       return AMDGPU_UCODE_ID_CP_PFP;
+               case UCODE_ID_CP_ME:
+                       return AMDGPU_UCODE_ID_CP_ME;
+               case UCODE_ID_CP_MEC:
+               case UCODE_ID_CP_MEC_JT1:
+                       return AMDGPU_UCODE_ID_CP_MEC1;
+               case UCODE_ID_CP_MEC_JT2:
+                       return AMDGPU_UCODE_ID_CP_MEC2;
+               case UCODE_ID_RLC_G:
+                       return AMDGPU_UCODE_ID_RLC_G;
+               default:
+                       DRM_ERROR("ucode type is out of range!\n");
+                       return AMDGPU_UCODE_ID_MAXIMUM;
+       }
+}
+
+static int tonga_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
+                                               uint32_t fw_type,
+                                               struct SMU_Entry *entry)
+{
+       enum AMDGPU_UCODE_ID id = tonga_convert_fw_type(fw_type);
+       struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
+       const struct gfx_firmware_header_v1_0 *header = NULL;
+       uint64_t gpu_addr;
+       uint32_t data_size;
+
+       if (ucode->fw == NULL)
+               return -EINVAL;
+
+       gpu_addr  = ucode->mc_addr;
+       header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
+       data_size = le32_to_cpu(header->header.ucode_size_bytes);
+
+       if ((fw_type == UCODE_ID_CP_MEC_JT1) ||
+               (fw_type == UCODE_ID_CP_MEC_JT2)) {
+               gpu_addr += le32_to_cpu(header->jt_offset) << 2;
+               data_size = le32_to_cpu(header->jt_size) << 2;
+       }
+
+       entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
+       entry->id = (uint16_t)fw_type;
+       entry->image_addr_high = upper_32_bits(gpu_addr);
+       entry->image_addr_low = lower_32_bits(gpu_addr);
+       entry->meta_data_addr_high = 0;
+       entry->meta_data_addr_low = 0;
+       entry->data_size_byte = data_size;
+       entry->num_register_entries = 0;
+
+       if (fw_type == UCODE_ID_RLC_G)
+               entry->flags = 1;
+       else
+               entry->flags = 0;
+
+       return 0;
+}
+
+static int tonga_smu_request_load_fw(struct amdgpu_device *adev)
+{
+       struct tonga_smu_private_data *private = (struct tonga_smu_private_data *)adev->smu.priv;
+       struct SMU_DRAMData_TOC *toc;
+       uint32_t fw_to_load;
+
+       WREG32_SMC(ixSOFT_REGISTERS_TABLE_28, 0);
+
+       tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_HI, private->smu_buffer_addr_high);
+       tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_LO, private->smu_buffer_addr_low);
+
+       toc = (struct SMU_DRAMData_TOC *)private->header;
+       toc->num_entries = 0;
+       toc->structure_version = 1;
+
+       if (!adev->firmware.smu_load)
+               return 0;
+
+       if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
+                       &toc->entry[toc->num_entries++])) {
+               DRM_ERROR("Failed to get firmware entry for RLC\n");
+               return -EINVAL;
+       }
+
+       if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
+                       &toc->entry[toc->num_entries++])) {
+               DRM_ERROR("Failed to get firmware entry for CE\n");
+               return -EINVAL;
+       }
+
+       if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
+                       &toc->entry[toc->num_entries++])) {
+               DRM_ERROR("Failed to get firmware entry for PFP\n");
+               return -EINVAL;
+       }
+
+       if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
+                       &toc->entry[toc->num_entries++])) {
+               DRM_ERROR("Failed to get firmware entry for ME\n");
+               return -EINVAL;
+       }
+
+       if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
+                       &toc->entry[toc->num_entries++])) {
+               DRM_ERROR("Failed to get firmware entry for MEC\n");
+               return -EINVAL;
+       }
+
+       if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
+                       &toc->entry[toc->num_entries++])) {
+               DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
+               return -EINVAL;
+       }
+
+       if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
+                       &toc->entry[toc->num_entries++])) {
+               DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
+               return -EINVAL;
+       }
+
+       if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
+                       &toc->entry[toc->num_entries++])) {
+               DRM_ERROR("Failed to get firmware entry for SDMA0\n");
+               return -EINVAL;
+       }
+
+       if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
+                       &toc->entry[toc->num_entries++])) {
+               DRM_ERROR("Failed to get firmware entry for SDMA1\n");
+               return -EINVAL;
+       }
+
+       tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
+       tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
+
+       fw_to_load = UCODE_ID_RLC_G_MASK |
+                       UCODE_ID_SDMA0_MASK |
+                       UCODE_ID_SDMA1_MASK |
+                       UCODE_ID_CP_CE_MASK |
+                       UCODE_ID_CP_ME_MASK |
+                       UCODE_ID_CP_PFP_MASK |
+                       UCODE_ID_CP_MEC_MASK;
+
+       if (tonga_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
+               DRM_ERROR("Fail to request SMU load ucode\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static uint32_t tonga_smu_get_mask_for_fw_type(uint32_t fw_type)
+{
+       switch (fw_type) {
+               case AMDGPU_UCODE_ID_SDMA0:
+                       return UCODE_ID_SDMA0_MASK;
+               case AMDGPU_UCODE_ID_SDMA1:
+                       return UCODE_ID_SDMA1_MASK;
+               case AMDGPU_UCODE_ID_CP_CE:
+                       return UCODE_ID_CP_CE_MASK;
+               case AMDGPU_UCODE_ID_CP_PFP:
+                       return UCODE_ID_CP_PFP_MASK;
+               case AMDGPU_UCODE_ID_CP_ME:
+                       return UCODE_ID_CP_ME_MASK;
+               case AMDGPU_UCODE_ID_CP_MEC1:
+                       return UCODE_ID_CP_MEC_MASK;
+               case AMDGPU_UCODE_ID_CP_MEC2:
+                       return UCODE_ID_CP_MEC_MASK;
+               case AMDGPU_UCODE_ID_RLC_G:
+                       return UCODE_ID_RLC_G_MASK;
+               default:
+                       DRM_ERROR("ucode type is out of range!\n");
+                       return 0;
+       }
+}
+
+static int tonga_smu_check_fw_load_finish(struct amdgpu_device *adev,
+                                       uint32_t fw_type)
+{
+       uint32_t fw_mask = tonga_smu_get_mask_for_fw_type(fw_type);
+       int i;
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_28) & fw_mask))
+                       break;
+               udelay(1);
+       }
+
+       if (i == adev->usec_timeout) {
+               DRM_ERROR("check firmware loading failed\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int tonga_smu_start_in_protection_mode(struct amdgpu_device *adev)
+{
+       int result;
+       uint32_t val;
+       int i;
+
+       /* Assert reset */
+       val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
+       val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
+       WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
+
+       result = tonga_smu_upload_firmware_image(adev);
+       if (result)
+               return result;
+
+       /* Clear status */
+       WREG32_SMC(ixSMU_STATUS, 0);
+
+       /* Enable clock */
+       val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+       val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
+       WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
+
+       /* De-assert reset */
+       val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
+       val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
+       WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
+
+       /* Set SMU Auto Start */
+       val = RREG32_SMC(ixSMU_INPUT_DATA);
+       val = REG_SET_FIELD(val, SMU_INPUT_DATA, AUTO_START, 1);
+       WREG32_SMC(ixSMU_INPUT_DATA, val);
+
+       /* Clear firmware interrupt enable flag */
+       WREG32_SMC(ixFIRMWARE_FLAGS, 0);
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               val = RREG32_SMC(ixRCU_UC_EVENTS);
+               if (REG_GET_FIELD(val, RCU_UC_EVENTS, INTERRUPTS_ENABLED))
+                       break;
+               udelay(1);
+       }
+
+       if (i == adev->usec_timeout) {
+               DRM_ERROR("Interrupt is not enabled by firmware\n");
+               return -EINVAL;
+       }
+
+       /* Call Test SMU message with 0x20000 offset
+        * to trigger SMU start
+        */
+       tonga_send_msg_to_smc_offset(adev);
+
+       /* Wait for done bit to be set */
+       for (i = 0; i < adev->usec_timeout; i++) {
+               val = RREG32_SMC(ixSMU_STATUS);
+               if (REG_GET_FIELD(val, SMU_STATUS, SMU_DONE))
+                       break;
+               udelay(1);
+       }
+
+       if (i == adev->usec_timeout) {
+               DRM_ERROR("Timeout for SMU start\n");
+               return -EINVAL;
+       }
+
+       /* Check pass/failed indicator */
+       val = RREG32_SMC(ixSMU_STATUS);
+       if (!REG_GET_FIELD(val, SMU_STATUS, SMU_PASS)) {
+               DRM_ERROR("SMU Firmware start failed\n");
+               return -EINVAL;
+       }
+
+       /* Wait for firmware to initialize */
+       for (i = 0; i < adev->usec_timeout; i++) {
+               val = RREG32_SMC(ixFIRMWARE_FLAGS);
+               if(REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
+                       break;
+               udelay(1);
+       }
+
+       if (i == adev->usec_timeout) {
+               DRM_ERROR("SMU firmware initialization failed\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int tonga_smu_start_in_non_protection_mode(struct amdgpu_device *adev)
+{
+       int i, result;
+       uint32_t val;
+
+       /* wait for smc boot up */
+       for (i = 0; i < adev->usec_timeout; i++) {
+               val = RREG32_SMC(ixRCU_UC_EVENTS);
+               val = REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done);
+               if (val)
+                       break;
+               udelay(1);
+       }
+
+       if (i == adev->usec_timeout) {
+               DRM_ERROR("SMC boot sequence is not completed\n");
+               return -EINVAL;
+       }
+
+       /* Clear firmware interrupt enable flag */
+       WREG32_SMC(ixFIRMWARE_FLAGS, 0);
+
+       /* Assert reset */
+       val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
+       val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
+       WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
+
+       result = tonga_smu_upload_firmware_image(adev);
+       if (result)
+               return result;
+
+       /* Set smc instruct start point at 0x0 */
+       tonga_program_jump_on_start(adev);
+
+       /* Enable clock */
+       val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+       val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
+       WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
+
+       /* De-assert reset */
+       val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
+       val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
+       WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
+
+       /* Wait for firmware to initialize */
+       for (i = 0; i < adev->usec_timeout; i++) {
+               val = RREG32_SMC(ixFIRMWARE_FLAGS);
+               if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
+                       break;
+               udelay(1);
+       }
+
+       if (i == adev->usec_timeout) {
+               DRM_ERROR("Timeout for SMC firmware initialization\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int tonga_smu_start(struct amdgpu_device *adev)
+{
+       int result;
+       uint32_t val;
+
+       if (!tonga_is_smc_ram_running(adev)) {
+               val = RREG32_SMC(ixSMU_FIRMWARE);
+               if (!REG_GET_FIELD(val, SMU_FIRMWARE, SMU_MODE)) {
+                       result = tonga_smu_start_in_non_protection_mode(adev);
+                       if (result)
+                               return result;
+               } else {
+                       result = tonga_smu_start_in_protection_mode(adev);
+                       if (result)
+                               return result;
+               }
+       }
+
+       return tonga_smu_request_load_fw(adev);
+}
+
+static const struct amdgpu_smumgr_funcs tonga_smumgr_funcs = {
+       .check_fw_load_finish = tonga_smu_check_fw_load_finish,
+       .request_smu_load_fw = NULL,
+       .request_smu_specific_fw = NULL,
+};
+
+int tonga_smu_init(struct amdgpu_device *adev)
+{
+       struct tonga_smu_private_data *private;
+       uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
+       uint32_t smu_internal_buffer_size = 200*4096;
+       struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
+       struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
+       uint64_t mc_addr;
+       void *toc_buf_ptr;
+       void *smu_buf_ptr;
+       int ret;
+
+       private = kzalloc(sizeof(struct tonga_smu_private_data), GFP_KERNEL);
+       if (NULL == private)
+               return -ENOMEM;
+
+       /* allocate firmware buffers */
+       if (adev->firmware.smu_load)
+               amdgpu_ucode_init_bo(adev);
+
+       adev->smu.priv = private;
+       adev->smu.fw_flags = 0;
+
+       /* Allocate FW image data structure and header buffer */
+       ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
+                               true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, toc_buf);
+       if (ret) {
+               DRM_ERROR("Failed to allocate memory for TOC buffer\n");
+               return -ENOMEM;
+       }
+
+       /* Allocate buffer for SMU internal buffer */
+       ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
+                               true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, smu_buf);
+       if (ret) {
+               DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
+               return -ENOMEM;
+       }
+
+       /* Retrieve GPU address for header buffer and internal buffer */
+       ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
+       if (ret) {
+               amdgpu_bo_unref(&adev->smu.toc_buf);
+               DRM_ERROR("Failed to reserve the TOC buffer\n");
+               return -EINVAL;
+       }
+
+       ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
+       if (ret) {
+               amdgpu_bo_unreserve(adev->smu.toc_buf);
+               amdgpu_bo_unref(&adev->smu.toc_buf);
+               DRM_ERROR("Failed to pin the TOC buffer\n");
+               return -EINVAL;
+       }
+
+       ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
+       if (ret) {
+               amdgpu_bo_unreserve(adev->smu.toc_buf);
+               amdgpu_bo_unref(&adev->smu.toc_buf);
+               DRM_ERROR("Failed to map the TOC buffer\n");
+               return -EINVAL;
+       }
+
+       amdgpu_bo_unreserve(adev->smu.toc_buf);
+       private->header_addr_low = lower_32_bits(mc_addr);
+       private->header_addr_high = upper_32_bits(mc_addr);
+       private->header = toc_buf_ptr;
+
+       ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
+       if (ret) {
+               amdgpu_bo_unref(&adev->smu.smu_buf);
+               amdgpu_bo_unref(&adev->smu.toc_buf);
+               DRM_ERROR("Failed to reserve the SMU internal buffer\n");
+               return -EINVAL;
+       }
+
+       ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
+       if (ret) {
+               amdgpu_bo_unreserve(adev->smu.smu_buf);
+               amdgpu_bo_unref(&adev->smu.smu_buf);
+               amdgpu_bo_unref(&adev->smu.toc_buf);
+               DRM_ERROR("Failed to pin the SMU internal buffer\n");
+               return -EINVAL;
+       }
+
+       ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
+       if (ret) {
+               amdgpu_bo_unreserve(adev->smu.smu_buf);
+               amdgpu_bo_unref(&adev->smu.smu_buf);
+               amdgpu_bo_unref(&adev->smu.toc_buf);
+               DRM_ERROR("Failed to map the SMU internal buffer\n");
+               return -EINVAL;
+       }
+
+       amdgpu_bo_unreserve(adev->smu.smu_buf);
+       private->smu_buffer_addr_low = lower_32_bits(mc_addr);
+       private->smu_buffer_addr_high = upper_32_bits(mc_addr);
+
+       adev->smu.smumgr_funcs = &tonga_smumgr_funcs;
+
+       return 0;
+}
+
+int tonga_smu_fini(struct amdgpu_device *adev)
+{
+       amdgpu_bo_unref(&adev->smu.toc_buf);
+       amdgpu_bo_unref(&adev->smu.smu_buf);
+       kfree(adev->smu.priv);
+       adev->smu.priv = NULL;
+       if (adev->firmware.fw_buf)
+               amdgpu_ucode_fini_bo(adev);
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smumgr.h b/drivers/gpu/drm/amd/amdgpu/tonga_smumgr.h
new file mode 100644 (file)
index 0000000..c031ff9
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef TONGA_SMUMGR_H
+#define TONGA_SMUMGR_H
+
+#include "tonga_ppsmc.h"
+
+int tonga_smu_init(struct amdgpu_device *adev);
+int tonga_smu_fini(struct amdgpu_device *adev);
+int tonga_smu_start(struct amdgpu_device *adev);
+
+struct tonga_smu_private_data
+{
+       uint8_t *header;
+       uint32_t smu_buffer_addr_high;
+       uint32_t smu_buffer_addr_low;
+       uint32_t header_addr_high;
+       uint32_t header_addr_low;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
new file mode 100644 (file)
index 0000000..f3b3026
--- /dev/null
@@ -0,0 +1,830 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König <christian.koenig@amd.com>
+ */
+
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_uvd.h"
+#include "vid.h"
+#include "uvd/uvd_5_0_d.h"
+#include "uvd/uvd_5_0_sh_mask.h"
+#include "oss/oss_2_0_d.h"
+#include "oss/oss_2_0_sh_mask.h"
+
+static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
+static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
+static int uvd_v5_0_start(struct amdgpu_device *adev);
+static void uvd_v5_0_stop(struct amdgpu_device *adev);
+
+/**
+ * uvd_v5_0_ring_get_rptr - get read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+static uint32_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       return RREG32(mmUVD_RBC_RB_RPTR);
+}
+
+/**
+ * uvd_v5_0_ring_get_wptr - get write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+static uint32_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       return RREG32(mmUVD_RBC_RB_WPTR);
+}
+
+/**
+ * uvd_v5_0_ring_set_wptr - set write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
+}
+
+static int uvd_v5_0_early_init(struct amdgpu_device *adev)
+{
+       uvd_v5_0_set_ring_funcs(adev);
+       uvd_v5_0_set_irq_funcs(adev);
+
+       return 0;
+}
+
+static int uvd_v5_0_sw_init(struct amdgpu_device *adev)
+{
+       struct amdgpu_ring *ring;
+       int r;
+
+       /* UVD TRAP */
+       r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq);
+       if (r)
+               return r;
+
+       r = amdgpu_uvd_sw_init(adev);
+       if (r)
+               return r;
+
+       r = amdgpu_uvd_resume(adev);
+       if (r)
+               return r;
+
+       ring = &adev->uvd.ring;
+       sprintf(ring->name, "uvd");
+       r = amdgpu_ring_init(adev, ring, 4096, CP_PACKET2, 0xf,
+                            &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
+
+       return r;
+}
+
+static int uvd_v5_0_sw_fini(struct amdgpu_device *adev)
+{
+       int r;
+
+       r = amdgpu_uvd_suspend(adev);
+       if (r)
+               return r;
+
+       r = amdgpu_uvd_sw_fini(adev);
+       if (r)
+               return r;
+
+       return r;
+}
+
+/**
+ * uvd_v5_0_hw_init - start and test UVD block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Initialize the hardware, boot up the VCPU and do some testing
+ */
+static int uvd_v5_0_hw_init(struct amdgpu_device *adev)
+{
+       struct amdgpu_ring *ring = &adev->uvd.ring;
+       uint32_t tmp;
+       int r;
+
+       /* raise clocks while booting up the VCPU */
+       amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
+
+       r = uvd_v5_0_start(adev);
+       if (r)
+               goto done;
+
+       ring->ready = true;
+       r = amdgpu_ring_test_ring(ring);
+       if (r) {
+               ring->ready = false;
+               goto done;
+       }
+
+       r = amdgpu_ring_lock(ring, 10);
+       if (r) {
+               DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
+               goto done;
+       }
+
+       tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
+       amdgpu_ring_write(ring, tmp);
+       amdgpu_ring_write(ring, 0xFFFFF);
+
+       tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
+       amdgpu_ring_write(ring, tmp);
+       amdgpu_ring_write(ring, 0xFFFFF);
+
+       tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
+       amdgpu_ring_write(ring, tmp);
+       amdgpu_ring_write(ring, 0xFFFFF);
+
+       /* Clear timeout status bits */
+       amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
+       amdgpu_ring_write(ring, 0x8);
+
+       amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
+       amdgpu_ring_write(ring, 3);
+
+       amdgpu_ring_unlock_commit(ring);
+
+done:
+       /* lower clocks again */
+       amdgpu_asic_set_uvd_clocks(adev, 0, 0);
+
+       if (!r)
+               DRM_INFO("UVD initialized successfully.\n");
+
+       return r;
+}
+
+/**
+ * uvd_v5_0_hw_fini - stop the hardware block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Stop the UVD block, mark ring as not ready any more
+ */
+static int uvd_v5_0_hw_fini(struct amdgpu_device *adev)
+{
+       struct amdgpu_ring *ring = &adev->uvd.ring;
+
+       uvd_v5_0_stop(adev);
+       ring->ready = false;
+
+       return 0;
+}
+
+static int uvd_v5_0_suspend(struct amdgpu_device *adev)
+{
+       int r;
+
+       r = uvd_v5_0_hw_fini(adev);
+       if (r)
+               return r;
+
+       r = amdgpu_uvd_suspend(adev);
+       if (r)
+               return r;
+
+       return r;
+}
+
+static int uvd_v5_0_resume(struct amdgpu_device *adev)
+{
+       int r;
+
+       r = amdgpu_uvd_resume(adev);
+       if (r)
+               return r;
+
+       r = uvd_v5_0_hw_init(adev);
+       if (r)
+               return r;
+
+       return r;
+}
+
+/**
+ * uvd_v5_0_mc_resume - memory controller programming
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Let the UVD memory controller know it's offsets
+ */
+static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
+{
+       uint64_t offset;
+       uint32_t size;
+
+       /* programm memory controller bits 0-27 */
+       WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+                       lower_32_bits(adev->uvd.gpu_addr));
+       WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+                       upper_32_bits(adev->uvd.gpu_addr));
+
+       offset = AMDGPU_UVD_FIRMWARE_OFFSET;
+       size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
+       WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
+       WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
+
+       offset += size;
+       size = AMDGPU_UVD_STACK_SIZE;
+       WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
+       WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
+
+       offset += size;
+       size = AMDGPU_UVD_HEAP_SIZE;
+       WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
+       WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
+}
+
+/**
+ * uvd_v5_0_start - start UVD block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup and start the UVD block
+ */
+static int uvd_v5_0_start(struct amdgpu_device *adev)
+{
+       struct amdgpu_ring *ring = &adev->uvd.ring;
+       uint32_t rb_bufsz, tmp;
+       uint32_t lmi_swap_cntl;
+       uint32_t mp_swap_cntl;
+       int i, j, r;
+
+       /*disable DPG */
+       WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2));
+
+       /* disable byte swapping */
+       lmi_swap_cntl = 0;
+       mp_swap_cntl = 0;
+
+       uvd_v5_0_mc_resume(adev);
+
+       /* disable clock gating */
+       WREG32(mmUVD_CGC_GATE, 0);
+
+       /* disable interupt */
+       WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
+
+       /* stall UMC and register bus before resetting VCPU */
+       WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+       mdelay(1);
+
+       /* put LMI, VCPU, RBC etc... into reset */
+       WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
+               UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
+               UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
+               UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
+               UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
+       mdelay(5);
+
+       /* take UVD block out of reset */
+       WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
+       mdelay(5);
+
+       /* initialize UVD memory controller */
+       WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
+                            (1 << 21) | (1 << 9) | (1 << 20));
+
+#ifdef __BIG_ENDIAN
+       /* swap (8 in 32) RB and IB */
+       lmi_swap_cntl = 0xa;
+       mp_swap_cntl = 0;
+#endif
+       WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
+       WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
+
+       WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
+       WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
+       WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
+       WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
+       WREG32(mmUVD_MPC_SET_ALU, 0);
+       WREG32(mmUVD_MPC_SET_MUX, 0x88);
+
+       /* take all subblocks out of reset, except VCPU */
+       WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+       mdelay(5);
+
+       /* enable VCPU clock */
+       WREG32(mmUVD_VCPU_CNTL,  1 << 9);
+
+       /* enable UMC */
+       WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
+
+       /* boot up the VCPU */
+       WREG32(mmUVD_SOFT_RESET, 0);
+       mdelay(10);
+
+       for (i = 0; i < 10; ++i) {
+               uint32_t status;
+               for (j = 0; j < 100; ++j) {
+                       status = RREG32(mmUVD_STATUS);
+                       if (status & 2)
+                               break;
+                       mdelay(10);
+               }
+               r = 0;
+               if (status & 2)
+                       break;
+
+               DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
+               WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
+                               ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+               mdelay(10);
+               WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+               mdelay(10);
+               r = -1;
+       }
+
+       if (r) {
+               DRM_ERROR("UVD not responding, giving up!!!\n");
+               return r;
+       }
+       /* enable master interrupt */
+       WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1));
+
+       /* clear the bit 4 of UVD_STATUS */
+       WREG32_P(mmUVD_STATUS, 0, ~(2 << 1));
+
+       rb_bufsz = order_base_2(ring->ring_size);
+       tmp = 0;
+       tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
+       tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
+       tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
+       tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
+       tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
+       tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
+       /* force RBC into idle state */
+       WREG32(mmUVD_RBC_RB_CNTL, tmp);
+
+       /* set the write pointer delay */
+       WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
+
+       /* set the wb address */
+       WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
+
+       /* programm the RB_BASE for ring buffer */
+       WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
+                       lower_32_bits(ring->gpu_addr));
+       WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
+                       upper_32_bits(ring->gpu_addr));
+
+       /* Initialize the ring buffer's read and write pointers */
+       WREG32(mmUVD_RBC_RB_RPTR, 0);
+
+       ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
+       WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
+
+       WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
+
+       return 0;
+}
+
+/**
+ * uvd_v5_0_stop - stop UVD block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * stop the UVD block
+ */
+static void uvd_v5_0_stop(struct amdgpu_device *adev)
+{
+       /* force RBC into idle state */
+       WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
+
+       /* Stall UMC and register bus before resetting VCPU */
+       WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+       mdelay(1);
+
+       /* put VCPU into reset */
+       WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+       mdelay(5);
+
+       /* disable VCPU clock */
+       WREG32(mmUVD_VCPU_CNTL, 0x0);
+
+       /* Unstall UMC and register bus */
+       WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
+}
+
+/**
+ * uvd_v5_0_ring_emit_fence - emit an fence & trap command
+ *
+ * @ring: amdgpu_ring pointer
+ * @fence: fence to emit
+ *
+ * Write a fence and a trap command to the ring.
+ */
+static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+                                    bool write64bit)
+{
+       WARN_ON(write64bit);
+
+       amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
+       amdgpu_ring_write(ring, seq);
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
+       amdgpu_ring_write(ring, addr & 0xffffffff);
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
+       amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
+       amdgpu_ring_write(ring, 0);
+
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
+       amdgpu_ring_write(ring, 2);
+}
+
+/**
+ * uvd_v5_0_ring_emit_semaphore - emit semaphore command
+ *
+ * @ring: amdgpu_ring pointer
+ * @semaphore: semaphore to emit commands for
+ * @emit_wait: true if we should emit a wait command
+ *
+ * Emit a semaphore command (either wait or signal) to the UVD ring.
+ */
+static bool uvd_v5_0_ring_emit_semaphore(struct amdgpu_ring *ring,
+                                        struct amdgpu_semaphore *semaphore,
+                                        bool emit_wait)
+{
+       uint64_t addr = semaphore->gpu_addr;
+
+       amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_LOW, 0));
+       amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF);
+
+       amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_HIGH, 0));
+       amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF);
+
+       amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CMD, 0));
+       amdgpu_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
+
+       return true;
+}
+
+/**
+ * uvd_v5_0_ring_test_ring - register write test
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Test if we can successfully write to the context register
+ */
+static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       uint32_t tmp = 0;
+       unsigned i;
+       int r;
+
+       WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
+       r = amdgpu_ring_lock(ring, 3);
+       if (r) {
+               DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
+                         ring->idx, r);
+               return r;
+       }
+       amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
+       amdgpu_ring_write(ring, 0xDEADBEEF);
+       amdgpu_ring_unlock_commit(ring);
+       for (i = 0; i < adev->usec_timeout; i++) {
+               tmp = RREG32(mmUVD_CONTEXT_ID);
+               if (tmp == 0xDEADBEEF)
+                       break;
+               DRM_UDELAY(1);
+       }
+
+       if (i < adev->usec_timeout) {
+               DRM_INFO("ring test on %d succeeded in %d usecs\n",
+                        ring->idx, i);
+       } else {
+               DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
+                         ring->idx, tmp);
+               r = -EINVAL;
+       }
+       return r;
+}
+
+/**
+ * uvd_v5_0_ring_emit_ib - execute indirect buffer
+ *
+ * @ring: amdgpu_ring pointer
+ * @ib: indirect buffer to execute
+ *
+ * Write ring commands to execute the indirect buffer
+ */
+static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
+                                 struct amdgpu_ib *ib)
+{
+       amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
+       amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+       amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
+       amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+       amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
+       amdgpu_ring_write(ring, ib->length_dw);
+}
+
+/**
+ * uvd_v5_0_ring_test_ib - test ib execution
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Test if we can successfully execute an IB
+ */
+static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       struct amdgpu_fence *fence = NULL;
+       int r;
+
+       r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
+       if (r) {
+               DRM_ERROR("amdgpu: failed to raise UVD clocks (%d).\n", r);
+               return r;
+       }
+
+       r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
+       if (r) {
+               DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
+               goto error;
+       }
+
+       r = amdgpu_uvd_get_destroy_msg(ring, 1, &fence);
+       if (r) {
+               DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
+               goto error;
+       }
+
+       r = amdgpu_fence_wait(fence, false);
+       if (r) {
+               DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+               goto error;
+       }
+       DRM_INFO("ib test on ring %d succeeded\n",  ring->idx);
+error:
+       amdgpu_fence_unref(&fence);
+       amdgpu_asic_set_uvd_clocks(adev, 0, 0);
+       return r;
+}
+
+static bool uvd_v5_0_is_idle(struct amdgpu_device *adev)
+{
+       return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
+}
+
+static int uvd_v5_0_wait_for_idle(struct amdgpu_device *adev)
+{
+       unsigned i;
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
+                       return 0;
+       }
+       return -ETIMEDOUT;
+}
+
+static int uvd_v5_0_soft_reset(struct amdgpu_device *adev)
+{
+       uvd_v5_0_stop(adev);
+
+       WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
+                       ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
+       mdelay(5);
+
+       return uvd_v5_0_start(adev);
+}
+
+static void uvd_v5_0_print_status(struct amdgpu_device *adev)
+{
+       dev_info(adev->dev, "UVD 5.0 registers\n");
+       dev_info(adev->dev, "  UVD_SEMA_ADDR_LOW=0x%08X\n",
+                RREG32(mmUVD_SEMA_ADDR_LOW));
+       dev_info(adev->dev, "  UVD_SEMA_ADDR_HIGH=0x%08X\n",
+                RREG32(mmUVD_SEMA_ADDR_HIGH));
+       dev_info(adev->dev, "  UVD_SEMA_CMD=0x%08X\n",
+                RREG32(mmUVD_SEMA_CMD));
+       dev_info(adev->dev, "  UVD_GPCOM_VCPU_CMD=0x%08X\n",
+                RREG32(mmUVD_GPCOM_VCPU_CMD));
+       dev_info(adev->dev, "  UVD_GPCOM_VCPU_DATA0=0x%08X\n",
+                RREG32(mmUVD_GPCOM_VCPU_DATA0));
+       dev_info(adev->dev, "  UVD_GPCOM_VCPU_DATA1=0x%08X\n",
+                RREG32(mmUVD_GPCOM_VCPU_DATA1));
+       dev_info(adev->dev, "  UVD_ENGINE_CNTL=0x%08X\n",
+                RREG32(mmUVD_ENGINE_CNTL));
+       dev_info(adev->dev, "  UVD_UDEC_ADDR_CONFIG=0x%08X\n",
+                RREG32(mmUVD_UDEC_ADDR_CONFIG));
+       dev_info(adev->dev, "  UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
+                RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
+       dev_info(adev->dev, "  UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
+                RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
+       dev_info(adev->dev, "  UVD_SEMA_CNTL=0x%08X\n",
+                RREG32(mmUVD_SEMA_CNTL));
+       dev_info(adev->dev, "  UVD_LMI_EXT40_ADDR=0x%08X\n",
+                RREG32(mmUVD_LMI_EXT40_ADDR));
+       dev_info(adev->dev, "  UVD_CTX_INDEX=0x%08X\n",
+                RREG32(mmUVD_CTX_INDEX));
+       dev_info(adev->dev, "  UVD_CTX_DATA=0x%08X\n",
+                RREG32(mmUVD_CTX_DATA));
+       dev_info(adev->dev, "  UVD_CGC_GATE=0x%08X\n",
+                RREG32(mmUVD_CGC_GATE));
+       dev_info(adev->dev, "  UVD_CGC_CTRL=0x%08X\n",
+                RREG32(mmUVD_CGC_CTRL));
+       dev_info(adev->dev, "  UVD_LMI_CTRL2=0x%08X\n",
+                RREG32(mmUVD_LMI_CTRL2));
+       dev_info(adev->dev, "  UVD_MASTINT_EN=0x%08X\n",
+                RREG32(mmUVD_MASTINT_EN));
+       dev_info(adev->dev, "  UVD_LMI_ADDR_EXT=0x%08X\n",
+                RREG32(mmUVD_LMI_ADDR_EXT));
+       dev_info(adev->dev, "  UVD_LMI_CTRL=0x%08X\n",
+                RREG32(mmUVD_LMI_CTRL));
+       dev_info(adev->dev, "  UVD_LMI_SWAP_CNTL=0x%08X\n",
+                RREG32(mmUVD_LMI_SWAP_CNTL));
+       dev_info(adev->dev, "  UVD_MP_SWAP_CNTL=0x%08X\n",
+                RREG32(mmUVD_MP_SWAP_CNTL));
+       dev_info(adev->dev, "  UVD_MPC_SET_MUXA0=0x%08X\n",
+                RREG32(mmUVD_MPC_SET_MUXA0));
+       dev_info(adev->dev, "  UVD_MPC_SET_MUXA1=0x%08X\n",
+                RREG32(mmUVD_MPC_SET_MUXA1));
+       dev_info(adev->dev, "  UVD_MPC_SET_MUXB0=0x%08X\n",
+                RREG32(mmUVD_MPC_SET_MUXB0));
+       dev_info(adev->dev, "  UVD_MPC_SET_MUXB1=0x%08X\n",
+                RREG32(mmUVD_MPC_SET_MUXB1));
+       dev_info(adev->dev, "  UVD_MPC_SET_MUX=0x%08X\n",
+                RREG32(mmUVD_MPC_SET_MUX));
+       dev_info(adev->dev, "  UVD_MPC_SET_ALU=0x%08X\n",
+                RREG32(mmUVD_MPC_SET_ALU));
+       dev_info(adev->dev, "  UVD_VCPU_CACHE_OFFSET0=0x%08X\n",
+                RREG32(mmUVD_VCPU_CACHE_OFFSET0));
+       dev_info(adev->dev, "  UVD_VCPU_CACHE_SIZE0=0x%08X\n",
+                RREG32(mmUVD_VCPU_CACHE_SIZE0));
+       dev_info(adev->dev, "  UVD_VCPU_CACHE_OFFSET1=0x%08X\n",
+                RREG32(mmUVD_VCPU_CACHE_OFFSET1));
+       dev_info(adev->dev, "  UVD_VCPU_CACHE_SIZE1=0x%08X\n",
+                RREG32(mmUVD_VCPU_CACHE_SIZE1));
+       dev_info(adev->dev, "  UVD_VCPU_CACHE_OFFSET2=0x%08X\n",
+                RREG32(mmUVD_VCPU_CACHE_OFFSET2));
+       dev_info(adev->dev, "  UVD_VCPU_CACHE_SIZE2=0x%08X\n",
+                RREG32(mmUVD_VCPU_CACHE_SIZE2));
+       dev_info(adev->dev, "  UVD_VCPU_CNTL=0x%08X\n",
+                RREG32(mmUVD_VCPU_CNTL));
+       dev_info(adev->dev, "  UVD_SOFT_RESET=0x%08X\n",
+                RREG32(mmUVD_SOFT_RESET));
+       dev_info(adev->dev, "  UVD_LMI_RBC_IB_64BIT_BAR_LOW=0x%08X\n",
+                RREG32(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW));
+       dev_info(adev->dev, "  UVD_LMI_RBC_IB_64BIT_BAR_HIGH=0x%08X\n",
+                RREG32(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH));
+       dev_info(adev->dev, "  UVD_RBC_IB_SIZE=0x%08X\n",
+                RREG32(mmUVD_RBC_IB_SIZE));
+       dev_info(adev->dev, "  UVD_LMI_RBC_RB_64BIT_BAR_LOW=0x%08X\n",
+                RREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW));
+       dev_info(adev->dev, "  UVD_LMI_RBC_RB_64BIT_BAR_HIGH=0x%08X\n",
+                RREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH));
+       dev_info(adev->dev, "  UVD_RBC_RB_RPTR=0x%08X\n",
+                RREG32(mmUVD_RBC_RB_RPTR));
+       dev_info(adev->dev, "  UVD_RBC_RB_WPTR=0x%08X\n",
+                RREG32(mmUVD_RBC_RB_WPTR));
+       dev_info(adev->dev, "  UVD_RBC_RB_WPTR_CNTL=0x%08X\n",
+                RREG32(mmUVD_RBC_RB_WPTR_CNTL));
+       dev_info(adev->dev, "  UVD_RBC_RB_CNTL=0x%08X\n",
+                RREG32(mmUVD_RBC_RB_CNTL));
+       dev_info(adev->dev, "  UVD_STATUS=0x%08X\n",
+                RREG32(mmUVD_STATUS));
+       dev_info(adev->dev, "  UVD_SEMA_TIMEOUT_STATUS=0x%08X\n",
+                RREG32(mmUVD_SEMA_TIMEOUT_STATUS));
+       dev_info(adev->dev, "  UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n",
+                RREG32(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL));
+       dev_info(adev->dev, "  UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL=0x%08X\n",
+                RREG32(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL));
+       dev_info(adev->dev, "  UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n",
+                RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL));
+       dev_info(adev->dev, "  UVD_CONTEXT_ID=0x%08X\n",
+                RREG32(mmUVD_CONTEXT_ID));
+}
+
+static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev,
+                                       struct amdgpu_irq_src *source,
+                                       unsigned type,
+                                       enum amdgpu_interrupt_state state)
+{
+       // TODO
+       return 0;
+}
+
+static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
+                                     struct amdgpu_irq_src *source,
+                                     struct amdgpu_iv_entry *entry)
+{
+       DRM_DEBUG("IH: UVD TRAP\n");
+       amdgpu_fence_process(&adev->uvd.ring);
+       return 0;
+}
+
+static int uvd_v5_0_set_clockgating_state(struct amdgpu_device *adev,
+                                         enum amdgpu_clockgating_state state)
+{
+       //TODO
+
+       return 0;
+}
+
+static int uvd_v5_0_set_powergating_state(struct amdgpu_device *adev,
+                                         enum amdgpu_powergating_state state)
+{
+       /* This doesn't actually powergate the UVD block.
+        * That's done in the dpm code via the SMC.  This
+        * just re-inits the block as necessary.  The actual
+        * gating still happens in the dpm code.  We should
+        * revisit this when there is a cleaner line between
+        * the smc and the hw blocks
+        */
+       if (state == AMDGPU_PG_STATE_GATE) {
+               uvd_v5_0_stop(adev);
+               return 0;
+       } else {
+               return uvd_v5_0_start(adev);
+       }
+}
+
+const struct amdgpu_ip_funcs uvd_v5_0_ip_funcs = {
+       .early_init = uvd_v5_0_early_init,
+       .late_init = NULL,
+       .sw_init = uvd_v5_0_sw_init,
+       .sw_fini = uvd_v5_0_sw_fini,
+       .hw_init = uvd_v5_0_hw_init,
+       .hw_fini = uvd_v5_0_hw_fini,
+       .suspend = uvd_v5_0_suspend,
+       .resume = uvd_v5_0_resume,
+       .is_idle = uvd_v5_0_is_idle,
+       .wait_for_idle = uvd_v5_0_wait_for_idle,
+       .soft_reset = uvd_v5_0_soft_reset,
+       .print_status = uvd_v5_0_print_status,
+       .set_clockgating_state = uvd_v5_0_set_clockgating_state,
+       .set_powergating_state = uvd_v5_0_set_powergating_state,
+};
+
+static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
+       .get_rptr = uvd_v5_0_ring_get_rptr,
+       .get_wptr = uvd_v5_0_ring_get_wptr,
+       .set_wptr = uvd_v5_0_ring_set_wptr,
+       .parse_cs = amdgpu_uvd_ring_parse_cs,
+       .emit_ib = uvd_v5_0_ring_emit_ib,
+       .emit_fence = uvd_v5_0_ring_emit_fence,
+       .emit_semaphore = uvd_v5_0_ring_emit_semaphore,
+       .test_ring = uvd_v5_0_ring_test_ring,
+       .test_ib = uvd_v5_0_ring_test_ib,
+       .is_lockup = amdgpu_ring_test_lockup,
+};
+
+static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
+{
+       adev->uvd.ring.funcs = &uvd_v5_0_ring_funcs;
+}
+
+static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = {
+       .set = uvd_v5_0_set_interrupt_state,
+       .process = uvd_v5_0_process_interrupt,
+};
+
+static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+       adev->uvd.irq.num_types = 1;
+       adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h
new file mode 100644 (file)
index 0000000..7d7a152
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __UVD_V5_0_H__
+#define __UVD_V5_0_H__
+
+extern const struct amdgpu_ip_funcs uvd_v5_0_ip_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
new file mode 100644 (file)
index 0000000..f59942d
--- /dev/null
@@ -0,0 +1,810 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König <christian.koenig@amd.com>
+ */
+
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_uvd.h"
+#include "vid.h"
+#include "uvd/uvd_6_0_d.h"
+#include "uvd/uvd_6_0_sh_mask.h"
+#include "oss/oss_2_0_d.h"
+#include "oss/oss_2_0_sh_mask.h"
+
+static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
+static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
+static int uvd_v6_0_start(struct amdgpu_device *adev);
+static void uvd_v6_0_stop(struct amdgpu_device *adev);
+
+/**
+ * uvd_v6_0_ring_get_rptr - get read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+static uint32_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       return RREG32(mmUVD_RBC_RB_RPTR);
+}
+
+/**
+ * uvd_v6_0_ring_get_wptr - get write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+static uint32_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       return RREG32(mmUVD_RBC_RB_WPTR);
+}
+
+/**
+ * uvd_v6_0_ring_set_wptr - set write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
+}
+
+static int uvd_v6_0_early_init(struct amdgpu_device *adev)
+{
+       uvd_v6_0_set_ring_funcs(adev);
+       uvd_v6_0_set_irq_funcs(adev);
+
+       return 0;
+}
+
+static int uvd_v6_0_sw_init(struct amdgpu_device *adev)
+{
+       struct amdgpu_ring *ring;
+       int r;
+
+       /* UVD TRAP */
+       r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq);
+       if (r)
+               return r;
+
+       r = amdgpu_uvd_sw_init(adev);
+       if (r)
+               return r;
+
+       r = amdgpu_uvd_resume(adev);
+       if (r)
+               return r;
+
+       ring = &adev->uvd.ring;
+       sprintf(ring->name, "uvd");
+       r = amdgpu_ring_init(adev, ring, 4096, CP_PACKET2, 0xf,
+                            &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
+
+       return r;
+}
+
+static int uvd_v6_0_sw_fini(struct amdgpu_device *adev)
+{
+       int r;
+
+       r = amdgpu_uvd_suspend(adev);
+       if (r)
+               return r;
+
+       r = amdgpu_uvd_sw_fini(adev);
+       if (r)
+               return r;
+
+       return r;
+}
+
+/**
+ * uvd_v6_0_hw_init - start and test UVD block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Initialize the hardware, boot up the VCPU and do some testing
+ */
+static int uvd_v6_0_hw_init(struct amdgpu_device *adev)
+{
+       struct amdgpu_ring *ring = &adev->uvd.ring;
+       uint32_t tmp;
+       int r;
+
+       r = uvd_v6_0_start(adev);
+       if (r)
+               goto done;
+
+       ring->ready = true;
+       r = amdgpu_ring_test_ring(ring);
+       if (r) {
+               ring->ready = false;
+               goto done;
+       }
+
+       r = amdgpu_ring_lock(ring, 10);
+       if (r) {
+               DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
+               goto done;
+       }
+
+       tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
+       amdgpu_ring_write(ring, tmp);
+       amdgpu_ring_write(ring, 0xFFFFF);
+
+       tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
+       amdgpu_ring_write(ring, tmp);
+       amdgpu_ring_write(ring, 0xFFFFF);
+
+       tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
+       amdgpu_ring_write(ring, tmp);
+       amdgpu_ring_write(ring, 0xFFFFF);
+
+       /* Clear timeout status bits */
+       amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
+       amdgpu_ring_write(ring, 0x8);
+
+       amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
+       amdgpu_ring_write(ring, 3);
+
+       amdgpu_ring_unlock_commit(ring);
+
+done:
+       if (!r)
+               DRM_INFO("UVD initialized successfully.\n");
+
+       return r;
+}
+
+/**
+ * uvd_v6_0_hw_fini - stop the hardware block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Stop the UVD block, mark ring as not ready any more
+ */
+static int uvd_v6_0_hw_fini(struct amdgpu_device *adev)
+{
+       struct amdgpu_ring *ring = &adev->uvd.ring;
+
+       uvd_v6_0_stop(adev);
+       ring->ready = false;
+
+       return 0;
+}
+
+static int uvd_v6_0_suspend(struct amdgpu_device *adev)
+{
+       int r;
+
+       r = uvd_v6_0_hw_fini(adev);
+       if (r)
+               return r;
+
+       r = amdgpu_uvd_suspend(adev);
+       if (r)
+               return r;
+
+       return r;
+}
+
+static int uvd_v6_0_resume(struct amdgpu_device *adev)
+{
+       int r;
+
+       r = amdgpu_uvd_resume(adev);
+       if (r)
+               return r;
+
+       r = uvd_v6_0_hw_init(adev);
+       if (r)
+               return r;
+
+       return r;
+}
+
+/**
+ * uvd_v6_0_mc_resume - memory controller programming
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Let the UVD memory controller know it's offsets
+ */
+static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
+{
+       uint64_t offset;
+       uint32_t size;
+
+       /* programm memory controller bits 0-27 */
+       WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+                       lower_32_bits(adev->uvd.gpu_addr));
+       WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+                       upper_32_bits(adev->uvd.gpu_addr));
+
+       offset = AMDGPU_UVD_FIRMWARE_OFFSET;
+       size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
+       WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
+       WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
+
+       offset += size;
+       size = AMDGPU_UVD_STACK_SIZE;
+       WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
+       WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
+
+       offset += size;
+       size = AMDGPU_UVD_HEAP_SIZE;
+       WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
+       WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
+}
+
+/**
+ * uvd_v6_0_start - start UVD block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup and start the UVD block
+ */
+static int uvd_v6_0_start(struct amdgpu_device *adev)
+{
+       struct amdgpu_ring *ring = &adev->uvd.ring;
+       uint32_t rb_bufsz, tmp;
+       uint32_t lmi_swap_cntl;
+       uint32_t mp_swap_cntl;
+       int i, j, r;
+
+       /*disable DPG */
+       WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2));
+
+       /* disable byte swapping */
+       lmi_swap_cntl = 0;
+       mp_swap_cntl = 0;
+
+       uvd_v6_0_mc_resume(adev);
+
+       /* disable clock gating */
+       WREG32(mmUVD_CGC_GATE, 0);
+
+       /* disable interupt */
+       WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
+
+       /* stall UMC and register bus before resetting VCPU */
+       WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+       mdelay(1);
+
+       /* put LMI, VCPU, RBC etc... into reset */
+       WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
+               UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
+               UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
+               UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
+               UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
+       mdelay(5);
+
+       /* take UVD block out of reset */
+       WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
+       mdelay(5);
+
+       /* initialize UVD memory controller */
+       WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
+                            (1 << 21) | (1 << 9) | (1 << 20));
+
+#ifdef __BIG_ENDIAN
+       /* swap (8 in 32) RB and IB */
+       lmi_swap_cntl = 0xa;
+       mp_swap_cntl = 0;
+#endif
+       WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
+       WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
+
+       WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
+       WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
+       WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
+       WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
+       WREG32(mmUVD_MPC_SET_ALU, 0);
+       WREG32(mmUVD_MPC_SET_MUX, 0x88);
+
+       /* take all subblocks out of reset, except VCPU */
+       WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+       mdelay(5);
+
+       /* enable VCPU clock */
+       WREG32(mmUVD_VCPU_CNTL,  1 << 9);
+
+       /* enable UMC */
+       WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
+
+       /* boot up the VCPU */
+       WREG32(mmUVD_SOFT_RESET, 0);
+       mdelay(10);
+
+       for (i = 0; i < 10; ++i) {
+               uint32_t status;
+
+               for (j = 0; j < 100; ++j) {
+                       status = RREG32(mmUVD_STATUS);
+                       if (status & 2)
+                               break;
+                       mdelay(10);
+               }
+               r = 0;
+               if (status & 2)
+                       break;
+
+               DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
+               WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
+                               ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+               mdelay(10);
+               WREG32_P(mmUVD_SOFT_RESET, 0,
+                        ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+               mdelay(10);
+               r = -1;
+       }
+
+       if (r) {
+               DRM_ERROR("UVD not responding, giving up!!!\n");
+               return r;
+       }
+       /* enable master interrupt */
+       WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1));
+
+       /* clear the bit 4 of UVD_STATUS */
+       WREG32_P(mmUVD_STATUS, 0, ~(2 << 1));
+
+       rb_bufsz = order_base_2(ring->ring_size);
+       tmp = 0;
+       tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
+       tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
+       tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
+       tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
+       tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
+       tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
+       /* force RBC into idle state */
+       WREG32(mmUVD_RBC_RB_CNTL, tmp);
+
+       /* set the write pointer delay */
+       WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
+
+       /* set the wb address */
+       WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
+
+       /* programm the RB_BASE for ring buffer */
+       WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
+                       lower_32_bits(ring->gpu_addr));
+       WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
+                       upper_32_bits(ring->gpu_addr));
+
+       /* Initialize the ring buffer's read and write pointers */
+       WREG32(mmUVD_RBC_RB_RPTR, 0);
+
+       ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
+       WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
+
+       WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
+
+       return 0;
+}
+
+/**
+ * uvd_v6_0_stop - stop UVD block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * stop the UVD block
+ */
+static void uvd_v6_0_stop(struct amdgpu_device *adev)
+{
+       /* force RBC into idle state */
+       WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
+
+       /* Stall UMC and register bus before resetting VCPU */
+       WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+       mdelay(1);
+
+       /* put VCPU into reset */
+       WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+       mdelay(5);
+
+       /* disable VCPU clock */
+       WREG32(mmUVD_VCPU_CNTL, 0x0);
+
+       /* Unstall UMC and register bus */
+       WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
+}
+
+/**
+ * uvd_v6_0_ring_emit_fence - emit an fence & trap command
+ *
+ * @ring: amdgpu_ring pointer
+ * @fence: fence to emit
+ *
+ * Write a fence and a trap command to the ring.
+ */
+static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+                                    bool write64bit)
+{
+       WARN_ON(write64bit);
+
+       amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
+       amdgpu_ring_write(ring, seq);
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
+       amdgpu_ring_write(ring, addr & 0xffffffff);
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
+       amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
+       amdgpu_ring_write(ring, 0);
+
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
+       amdgpu_ring_write(ring, 2);
+}
+
+/**
+ * uvd_v6_0_ring_emit_semaphore - emit semaphore command
+ *
+ * @ring: amdgpu_ring pointer
+ * @semaphore: semaphore to emit commands for
+ * @emit_wait: true if we should emit a wait command
+ *
+ * Emit a semaphore command (either wait or signal) to the UVD ring.
+ */
+static bool uvd_v6_0_ring_emit_semaphore(struct amdgpu_ring *ring,
+                                        struct amdgpu_semaphore *semaphore,
+                                        bool emit_wait)
+{
+       uint64_t addr = semaphore->gpu_addr;
+
+       amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_LOW, 0));
+       amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF);
+
+       amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_HIGH, 0));
+       amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF);
+
+       amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CMD, 0));
+       amdgpu_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
+
+       return true;
+}
+
+/**
+ * uvd_v6_0_ring_test_ring - register write test
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Test if we can successfully write to the context register
+ */
+static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       uint32_t tmp = 0;
+       unsigned i;
+       int r;
+
+       WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
+       r = amdgpu_ring_lock(ring, 3);
+       if (r) {
+               DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
+                         ring->idx, r);
+               return r;
+       }
+       amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
+       amdgpu_ring_write(ring, 0xDEADBEEF);
+       amdgpu_ring_unlock_commit(ring);
+       for (i = 0; i < adev->usec_timeout; i++) {
+               tmp = RREG32(mmUVD_CONTEXT_ID);
+               if (tmp == 0xDEADBEEF)
+                       break;
+               DRM_UDELAY(1);
+       }
+
+       if (i < adev->usec_timeout) {
+               DRM_INFO("ring test on %d succeeded in %d usecs\n",
+                        ring->idx, i);
+       } else {
+               DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
+                         ring->idx, tmp);
+               r = -EINVAL;
+       }
+       return r;
+}
+
+/**
+ * uvd_v6_0_ring_emit_ib - execute indirect buffer
+ *
+ * @ring: amdgpu_ring pointer
+ * @ib: indirect buffer to execute
+ *
+ * Write ring commands to execute the indirect buffer
+ */
+static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
+                                 struct amdgpu_ib *ib)
+{
+       amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
+       amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+       amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
+       amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+       amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
+       amdgpu_ring_write(ring, ib->length_dw);
+}
+
+/**
+ * uvd_v6_0_ring_test_ib - test ib execution
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Test if we can successfully execute an IB
+ */
+static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring)
+{
+       struct amdgpu_fence *fence = NULL;
+       int r;
+
+       r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
+       if (r) {
+               DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
+               goto error;
+       }
+
+       r = amdgpu_uvd_get_destroy_msg(ring, 1, &fence);
+       if (r) {
+               DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
+               goto error;
+       }
+
+       r = amdgpu_fence_wait(fence, false);
+       if (r) {
+               DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+               goto error;
+       }
+       DRM_INFO("ib test on ring %d succeeded\n",  ring->idx);
+error:
+       amdgpu_fence_unref(&fence);
+       return r;
+}
+
+static bool uvd_v6_0_is_idle(struct amdgpu_device *adev)
+{
+       return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
+}
+
+static int uvd_v6_0_wait_for_idle(struct amdgpu_device *adev)
+{
+       unsigned i;
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
+                       return 0;
+       }
+       return -ETIMEDOUT;
+}
+
+static int uvd_v6_0_soft_reset(struct amdgpu_device *adev)
+{
+       uvd_v6_0_stop(adev);
+
+       WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
+                       ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
+       mdelay(5);
+
+       return uvd_v6_0_start(adev);
+}
+
+static void uvd_v6_0_print_status(struct amdgpu_device *adev)
+{
+       dev_info(adev->dev, "UVD 6.0 registers\n");
+       dev_info(adev->dev, "  UVD_SEMA_ADDR_LOW=0x%08X\n",
+                RREG32(mmUVD_SEMA_ADDR_LOW));
+       dev_info(adev->dev, "  UVD_SEMA_ADDR_HIGH=0x%08X\n",
+                RREG32(mmUVD_SEMA_ADDR_HIGH));
+       dev_info(adev->dev, "  UVD_SEMA_CMD=0x%08X\n",
+                RREG32(mmUVD_SEMA_CMD));
+       dev_info(adev->dev, "  UVD_GPCOM_VCPU_CMD=0x%08X\n",
+                RREG32(mmUVD_GPCOM_VCPU_CMD));
+       dev_info(adev->dev, "  UVD_GPCOM_VCPU_DATA0=0x%08X\n",
+                RREG32(mmUVD_GPCOM_VCPU_DATA0));
+       dev_info(adev->dev, "  UVD_GPCOM_VCPU_DATA1=0x%08X\n",
+                RREG32(mmUVD_GPCOM_VCPU_DATA1));
+       dev_info(adev->dev, "  UVD_ENGINE_CNTL=0x%08X\n",
+                RREG32(mmUVD_ENGINE_CNTL));
+       dev_info(adev->dev, "  UVD_UDEC_ADDR_CONFIG=0x%08X\n",
+                RREG32(mmUVD_UDEC_ADDR_CONFIG));
+       dev_info(adev->dev, "  UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
+                RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
+       dev_info(adev->dev, "  UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
+                RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
+       dev_info(adev->dev, "  UVD_SEMA_CNTL=0x%08X\n",
+                RREG32(mmUVD_SEMA_CNTL));
+       dev_info(adev->dev, "  UVD_LMI_EXT40_ADDR=0x%08X\n",
+                RREG32(mmUVD_LMI_EXT40_ADDR));
+       dev_info(adev->dev, "  UVD_CTX_INDEX=0x%08X\n",
+                RREG32(mmUVD_CTX_INDEX));
+       dev_info(adev->dev, "  UVD_CTX_DATA=0x%08X\n",
+                RREG32(mmUVD_CTX_DATA));
+       dev_info(adev->dev, "  UVD_CGC_GATE=0x%08X\n",
+                RREG32(mmUVD_CGC_GATE));
+       dev_info(adev->dev, "  UVD_CGC_CTRL=0x%08X\n",
+                RREG32(mmUVD_CGC_CTRL));
+       dev_info(adev->dev, "  UVD_LMI_CTRL2=0x%08X\n",
+                RREG32(mmUVD_LMI_CTRL2));
+       dev_info(adev->dev, "  UVD_MASTINT_EN=0x%08X\n",
+                RREG32(mmUVD_MASTINT_EN));
+       dev_info(adev->dev, "  UVD_LMI_ADDR_EXT=0x%08X\n",
+                RREG32(mmUVD_LMI_ADDR_EXT));
+       dev_info(adev->dev, "  UVD_LMI_CTRL=0x%08X\n",
+                RREG32(mmUVD_LMI_CTRL));
+       dev_info(adev->dev, "  UVD_LMI_SWAP_CNTL=0x%08X\n",
+                RREG32(mmUVD_LMI_SWAP_CNTL));
+       dev_info(adev->dev, "  UVD_MP_SWAP_CNTL=0x%08X\n",
+                RREG32(mmUVD_MP_SWAP_CNTL));
+       dev_info(adev->dev, "  UVD_MPC_SET_MUXA0=0x%08X\n",
+                RREG32(mmUVD_MPC_SET_MUXA0));
+       dev_info(adev->dev, "  UVD_MPC_SET_MUXA1=0x%08X\n",
+                RREG32(mmUVD_MPC_SET_MUXA1));
+       dev_info(adev->dev, "  UVD_MPC_SET_MUXB0=0x%08X\n",
+                RREG32(mmUVD_MPC_SET_MUXB0));
+       dev_info(adev->dev, "  UVD_MPC_SET_MUXB1=0x%08X\n",
+                RREG32(mmUVD_MPC_SET_MUXB1));
+       dev_info(adev->dev, "  UVD_MPC_SET_MUX=0x%08X\n",
+                RREG32(mmUVD_MPC_SET_MUX));
+       dev_info(adev->dev, "  UVD_MPC_SET_ALU=0x%08X\n",
+                RREG32(mmUVD_MPC_SET_ALU));
+       dev_info(adev->dev, "  UVD_VCPU_CACHE_OFFSET0=0x%08X\n",
+                RREG32(mmUVD_VCPU_CACHE_OFFSET0));
+       dev_info(adev->dev, "  UVD_VCPU_CACHE_SIZE0=0x%08X\n",
+                RREG32(mmUVD_VCPU_CACHE_SIZE0));
+       dev_info(adev->dev, "  UVD_VCPU_CACHE_OFFSET1=0x%08X\n",
+                RREG32(mmUVD_VCPU_CACHE_OFFSET1));
+       dev_info(adev->dev, "  UVD_VCPU_CACHE_SIZE1=0x%08X\n",
+                RREG32(mmUVD_VCPU_CACHE_SIZE1));
+       dev_info(adev->dev, "  UVD_VCPU_CACHE_OFFSET2=0x%08X\n",
+                RREG32(mmUVD_VCPU_CACHE_OFFSET2));
+       dev_info(adev->dev, "  UVD_VCPU_CACHE_SIZE2=0x%08X\n",
+                RREG32(mmUVD_VCPU_CACHE_SIZE2));
+       dev_info(adev->dev, "  UVD_VCPU_CNTL=0x%08X\n",
+                RREG32(mmUVD_VCPU_CNTL));
+       dev_info(adev->dev, "  UVD_SOFT_RESET=0x%08X\n",
+                RREG32(mmUVD_SOFT_RESET));
+       dev_info(adev->dev, "  UVD_RBC_IB_SIZE=0x%08X\n",
+                RREG32(mmUVD_RBC_IB_SIZE));
+       dev_info(adev->dev, "  UVD_RBC_RB_RPTR=0x%08X\n",
+                RREG32(mmUVD_RBC_RB_RPTR));
+       dev_info(adev->dev, "  UVD_RBC_RB_WPTR=0x%08X\n",
+                RREG32(mmUVD_RBC_RB_WPTR));
+       dev_info(adev->dev, "  UVD_RBC_RB_WPTR_CNTL=0x%08X\n",
+                RREG32(mmUVD_RBC_RB_WPTR_CNTL));
+       dev_info(adev->dev, "  UVD_RBC_RB_CNTL=0x%08X\n",
+                RREG32(mmUVD_RBC_RB_CNTL));
+       dev_info(adev->dev, "  UVD_STATUS=0x%08X\n",
+                RREG32(mmUVD_STATUS));
+       dev_info(adev->dev, "  UVD_SEMA_TIMEOUT_STATUS=0x%08X\n",
+                RREG32(mmUVD_SEMA_TIMEOUT_STATUS));
+       dev_info(adev->dev, "  UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n",
+                RREG32(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL));
+       dev_info(adev->dev, "  UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL=0x%08X\n",
+                RREG32(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL));
+       dev_info(adev->dev, "  UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n",
+                RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL));
+       dev_info(adev->dev, "  UVD_CONTEXT_ID=0x%08X\n",
+                RREG32(mmUVD_CONTEXT_ID));
+}
+
+static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev,
+                                       struct amdgpu_irq_src *source,
+                                       unsigned type,
+                                       enum amdgpu_interrupt_state state)
+{
+       // TODO
+       return 0;
+}
+
+static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
+                                     struct amdgpu_irq_src *source,
+                                     struct amdgpu_iv_entry *entry)
+{
+       DRM_DEBUG("IH: UVD TRAP\n");
+       amdgpu_fence_process(&adev->uvd.ring);
+       return 0;
+}
+
+static int uvd_v6_0_set_clockgating_state(struct amdgpu_device *adev,
+                                         enum amdgpu_clockgating_state state)
+{
+       //TODO
+
+       return 0;
+}
+
+static int uvd_v6_0_set_powergating_state(struct amdgpu_device *adev,
+                                         enum amdgpu_powergating_state state)
+{
+       /* This doesn't actually powergate the UVD block.
+        * That's done in the dpm code via the SMC.  This
+        * just re-inits the block as necessary.  The actual
+        * gating still happens in the dpm code.  We should
+        * revisit this when there is a cleaner line between
+        * the smc and the hw blocks
+        */
+       if (state == AMDGPU_PG_STATE_GATE) {
+               uvd_v6_0_stop(adev);
+               return 0;
+       } else {
+               return uvd_v6_0_start(adev);
+       }
+}
+
+const struct amdgpu_ip_funcs uvd_v6_0_ip_funcs = {
+       .early_init = uvd_v6_0_early_init,
+       .late_init = NULL,
+       .sw_init = uvd_v6_0_sw_init,
+       .sw_fini = uvd_v6_0_sw_fini,
+       .hw_init = uvd_v6_0_hw_init,
+       .hw_fini = uvd_v6_0_hw_fini,
+       .suspend = uvd_v6_0_suspend,
+       .resume = uvd_v6_0_resume,
+       .is_idle = uvd_v6_0_is_idle,
+       .wait_for_idle = uvd_v6_0_wait_for_idle,
+       .soft_reset = uvd_v6_0_soft_reset,
+       .print_status = uvd_v6_0_print_status,
+       .set_clockgating_state = uvd_v6_0_set_clockgating_state,
+       .set_powergating_state = uvd_v6_0_set_powergating_state,
+};
+
+static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = {
+       .get_rptr = uvd_v6_0_ring_get_rptr,
+       .get_wptr = uvd_v6_0_ring_get_wptr,
+       .set_wptr = uvd_v6_0_ring_set_wptr,
+       .parse_cs = amdgpu_uvd_ring_parse_cs,
+       .emit_ib = uvd_v6_0_ring_emit_ib,
+       .emit_fence = uvd_v6_0_ring_emit_fence,
+       .emit_semaphore = uvd_v6_0_ring_emit_semaphore,
+       .test_ring = uvd_v6_0_ring_test_ring,
+       .test_ib = uvd_v6_0_ring_test_ib,
+       .is_lockup = amdgpu_ring_test_lockup,
+};
+
+static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
+{
+       adev->uvd.ring.funcs = &uvd_v6_0_ring_funcs;
+}
+
+static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
+       .set = uvd_v6_0_set_interrupt_state,
+       .process = uvd_v6_0_process_interrupt,
+};
+
+static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+       adev->uvd.irq.num_types = 1;
+       adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h
new file mode 100644 (file)
index 0000000..bc21afc
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __UVD_V6_0_H__
+#define __UVD_V6_0_H__
+
+extern const struct amdgpu_ip_funcs uvd_v6_0_ip_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
new file mode 100644 (file)
index 0000000..384c45e
--- /dev/null
@@ -0,0 +1,521 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * Authors: Christian König <christian.koenig@amd.com>
+ */
+
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_vce.h"
+#include "vid.h"
+#include "vce/vce_3_0_d.h"
+#include "vce/vce_3_0_sh_mask.h"
+#include "oss/oss_2_0_d.h"
+#include "oss/oss_2_0_sh_mask.h"
+
+static void vce_v3_0_mc_resume(struct amdgpu_device *adev);
+static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
+static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
+
+/**
+ * vce_v3_0_ring_get_rptr - get read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+static uint32_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       if (ring == &adev->vce.ring[0])
+               return RREG32(mmVCE_RB_RPTR);
+       else
+               return RREG32(mmVCE_RB_RPTR2);
+}
+
+/**
+ * vce_v3_0_ring_get_wptr - get write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+static uint32_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       if (ring == &adev->vce.ring[0])
+               return RREG32(mmVCE_RB_WPTR);
+       else
+               return RREG32(mmVCE_RB_WPTR2);
+}
+
+/**
+ * vce_v3_0_ring_set_wptr - set write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       if (ring == &adev->vce.ring[0])
+               WREG32(mmVCE_RB_WPTR, ring->wptr);
+       else
+               WREG32(mmVCE_RB_WPTR2, ring->wptr);
+}
+
+/**
+ * vce_v3_0_start - start VCE block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup and start the VCE block
+ */
+static int vce_v3_0_start(struct amdgpu_device *adev)
+{
+       struct amdgpu_ring *ring;
+       int i, j, r;
+
+       vce_v3_0_mc_resume(adev);
+
+       /* set BUSY flag */
+       WREG32_P(mmVCE_STATUS, 1, ~1);
+
+       ring = &adev->vce.ring[0];
+       WREG32(mmVCE_RB_RPTR, ring->wptr);
+       WREG32(mmVCE_RB_WPTR, ring->wptr);
+       WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
+       WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+       WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
+
+       ring = &adev->vce.ring[1];
+       WREG32(mmVCE_RB_RPTR2, ring->wptr);
+       WREG32(mmVCE_RB_WPTR2, ring->wptr);
+       WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
+       WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+       WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
+
+       WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
+
+       WREG32_P(mmVCE_SOFT_RESET,
+                VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
+                ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+
+       mdelay(100);
+
+       WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+
+       for (i = 0; i < 10; ++i) {
+               uint32_t status;
+               for (j = 0; j < 100; ++j) {
+                       status = RREG32(mmVCE_STATUS);
+                       if (status & 2)
+                               break;
+                       mdelay(10);
+               }
+               r = 0;
+               if (status & 2)
+                       break;
+
+               DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
+               WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
+                               ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+               mdelay(10);
+               WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+               mdelay(10);
+               r = -1;
+       }
+
+       /* clear BUSY flag */
+       WREG32_P(mmVCE_STATUS, 0, ~1);
+
+       if (r) {
+               DRM_ERROR("VCE not responding, giving up!!!\n");
+               return r;
+       }
+
+       return 0;
+}
+
+static int vce_v3_0_early_init(struct amdgpu_device *adev)
+{
+       vce_v3_0_set_ring_funcs(adev);
+       vce_v3_0_set_irq_funcs(adev);
+
+       return 0;
+}
+
+static int vce_v3_0_sw_init(struct amdgpu_device *adev)
+{
+       struct amdgpu_ring *ring;
+       int r;
+
+       /* VCE */
+       r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq);
+       if (r)
+               return r;
+
+       r = amdgpu_vce_sw_init(adev);
+       if (r)
+               return r;
+
+       r = amdgpu_vce_resume(adev);
+       if (r)
+               return r;
+
+       ring = &adev->vce.ring[0];
+       sprintf(ring->name, "vce0");
+       r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf,
+                            &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
+       if (r)
+               return r;
+
+       ring = &adev->vce.ring[1];
+       sprintf(ring->name, "vce1");
+       r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf,
+                            &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
+       if (r)
+               return r;
+
+       return r;
+}
+
+static int vce_v3_0_sw_fini(struct amdgpu_device *adev)
+{
+       int r;
+
+       r = amdgpu_vce_suspend(adev);
+       if (r)
+               return r;
+
+       r = amdgpu_vce_sw_fini(adev);
+       if (r)
+               return r;
+
+       return r;
+}
+
+static int vce_v3_0_hw_init(struct amdgpu_device *adev)
+{
+       struct amdgpu_ring *ring;
+       int r;
+
+       r = vce_v3_0_start(adev);
+       if (r)
+               return r;
+
+       ring = &adev->vce.ring[0];
+       ring->ready = true;
+       r = amdgpu_ring_test_ring(ring);
+       if (r) {
+               ring->ready = false;
+               return r;
+       }
+
+       ring = &adev->vce.ring[1];
+       ring->ready = true;
+       r = amdgpu_ring_test_ring(ring);
+       if (r) {
+               ring->ready = false;
+               return r;
+       }
+
+       DRM_INFO("VCE initialized successfully.\n");
+
+       return 0;
+}
+
+static int vce_v3_0_hw_fini(struct amdgpu_device *adev)
+{
+       // TODO
+       return 0;
+}
+
+static int vce_v3_0_suspend(struct amdgpu_device *adev)
+{
+       int r;
+
+       r = vce_v3_0_hw_fini(adev);
+       if (r)
+               return r;
+
+       r = amdgpu_vce_suspend(adev);
+       if (r)
+               return r;
+
+       return r;
+}
+
+static int vce_v3_0_resume(struct amdgpu_device *adev)
+{
+       int r;
+
+       r = amdgpu_vce_resume(adev);
+       if (r)
+               return r;
+
+       r = vce_v3_0_hw_init(adev);
+       if (r)
+               return r;
+
+       return r;
+}
+
+static void vce_v3_0_mc_resume(struct amdgpu_device *adev)
+{
+       uint32_t offset, size;
+
+       WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
+       WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
+       WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
+       WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
+
+       WREG32(mmVCE_LMI_CTRL, 0x00398000);
+       WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
+       WREG32(mmVCE_LMI_SWAP_CNTL, 0);
+       WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
+       WREG32(mmVCE_LMI_VM_CTRL, 0);
+
+       WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
+       offset = AMDGPU_VCE_FIRMWARE_OFFSET;
+       size = AMDGPU_GPU_PAGE_ALIGN(adev->vce.fw->size);
+       WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
+       WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
+
+       offset += size;
+       size = AMDGPU_VCE_STACK_SIZE;
+       WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff);
+       WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
+
+       offset += size;
+       size = AMDGPU_VCE_HEAP_SIZE;
+       WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff);
+       WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
+
+       WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
+
+       WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
+                ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
+}
+
+static bool vce_v3_0_is_idle(struct amdgpu_device *adev)
+{
+       return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
+}
+
+static int vce_v3_0_wait_for_idle(struct amdgpu_device *adev)
+{
+       unsigned i;
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               if (!(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK))
+                       return 0;
+       }
+       return -ETIMEDOUT;
+}
+
+static int vce_v3_0_soft_reset(struct amdgpu_device *adev)
+{
+       WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK,
+                       ~SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK);
+       mdelay(5);
+
+       return vce_v3_0_start(adev);
+}
+
+static void vce_v3_0_print_status(struct amdgpu_device *adev)
+{
+       dev_info(adev->dev, "VCE 3.0 registers\n");
+       dev_info(adev->dev, "  VCE_STATUS=0x%08X\n",
+                RREG32(mmVCE_STATUS));
+       dev_info(adev->dev, "  VCE_VCPU_CNTL=0x%08X\n",
+                RREG32(mmVCE_VCPU_CNTL));
+       dev_info(adev->dev, "  VCE_VCPU_CACHE_OFFSET0=0x%08X\n",
+                RREG32(mmVCE_VCPU_CACHE_OFFSET0));
+       dev_info(adev->dev, "  VCE_VCPU_CACHE_SIZE0=0x%08X\n",
+                RREG32(mmVCE_VCPU_CACHE_SIZE0));
+       dev_info(adev->dev, "  VCE_VCPU_CACHE_OFFSET1=0x%08X\n",
+                RREG32(mmVCE_VCPU_CACHE_OFFSET1));
+       dev_info(adev->dev, "  VCE_VCPU_CACHE_SIZE1=0x%08X\n",
+                RREG32(mmVCE_VCPU_CACHE_SIZE1));
+       dev_info(adev->dev, "  VCE_VCPU_CACHE_OFFSET2=0x%08X\n",
+                RREG32(mmVCE_VCPU_CACHE_OFFSET2));
+       dev_info(adev->dev, "  VCE_VCPU_CACHE_SIZE2=0x%08X\n",
+                RREG32(mmVCE_VCPU_CACHE_SIZE2));
+       dev_info(adev->dev, "  VCE_SOFT_RESET=0x%08X\n",
+                RREG32(mmVCE_SOFT_RESET));
+       dev_info(adev->dev, "  VCE_RB_BASE_LO2=0x%08X\n",
+                RREG32(mmVCE_RB_BASE_LO2));
+       dev_info(adev->dev, "  VCE_RB_BASE_HI2=0x%08X\n",
+                RREG32(mmVCE_RB_BASE_HI2));
+       dev_info(adev->dev, "  VCE_RB_SIZE2=0x%08X\n",
+                RREG32(mmVCE_RB_SIZE2));
+       dev_info(adev->dev, "  VCE_RB_RPTR2=0x%08X\n",
+                RREG32(mmVCE_RB_RPTR2));
+       dev_info(adev->dev, "  VCE_RB_WPTR2=0x%08X\n",
+                RREG32(mmVCE_RB_WPTR2));
+       dev_info(adev->dev, "  VCE_RB_BASE_LO=0x%08X\n",
+                RREG32(mmVCE_RB_BASE_LO));
+       dev_info(adev->dev, "  VCE_RB_BASE_HI=0x%08X\n",
+                RREG32(mmVCE_RB_BASE_HI));
+       dev_info(adev->dev, "  VCE_RB_SIZE=0x%08X\n",
+                RREG32(mmVCE_RB_SIZE));
+       dev_info(adev->dev, "  VCE_RB_RPTR=0x%08X\n",
+                RREG32(mmVCE_RB_RPTR));
+       dev_info(adev->dev, "  VCE_RB_WPTR=0x%08X\n",
+                RREG32(mmVCE_RB_WPTR));
+       dev_info(adev->dev, "  VCE_CLOCK_GATING_A=0x%08X\n",
+                RREG32(mmVCE_CLOCK_GATING_A));
+       dev_info(adev->dev, "  VCE_CLOCK_GATING_B=0x%08X\n",
+                RREG32(mmVCE_CLOCK_GATING_B));
+       dev_info(adev->dev, "  VCE_UENC_CLOCK_GATING=0x%08X\n",
+                RREG32(mmVCE_UENC_CLOCK_GATING));
+       dev_info(adev->dev, "  VCE_UENC_REG_CLOCK_GATING=0x%08X\n",
+                RREG32(mmVCE_UENC_REG_CLOCK_GATING));
+       dev_info(adev->dev, "  VCE_SYS_INT_EN=0x%08X\n",
+                RREG32(mmVCE_SYS_INT_EN));
+       dev_info(adev->dev, "  VCE_LMI_CTRL2=0x%08X\n",
+                RREG32(mmVCE_LMI_CTRL2));
+       dev_info(adev->dev, "  VCE_LMI_CTRL=0x%08X\n",
+                RREG32(mmVCE_LMI_CTRL));
+       dev_info(adev->dev, "  VCE_LMI_VM_CTRL=0x%08X\n",
+                RREG32(mmVCE_LMI_VM_CTRL));
+       dev_info(adev->dev, "  VCE_LMI_SWAP_CNTL=0x%08X\n",
+                RREG32(mmVCE_LMI_SWAP_CNTL));
+       dev_info(adev->dev, "  VCE_LMI_SWAP_CNTL1=0x%08X\n",
+                RREG32(mmVCE_LMI_SWAP_CNTL1));
+       dev_info(adev->dev, "  VCE_LMI_CACHE_CTRL=0x%08X\n",
+                RREG32(mmVCE_LMI_CACHE_CTRL));
+}
+
+static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev,
+                                       struct amdgpu_irq_src *source,
+                                       unsigned type,
+                                       enum amdgpu_interrupt_state state)
+{
+       uint32_t val = 0;
+
+       if (state == AMDGPU_IRQ_STATE_ENABLE)
+               val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
+
+       WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
+       return 0;
+}
+
+static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
+                                     struct amdgpu_irq_src *source,
+                                     struct amdgpu_iv_entry *entry)
+{
+       DRM_DEBUG("IH: VCE\n");
+       switch (entry->src_data) {
+       case 0:
+               amdgpu_fence_process(&adev->vce.ring[0]);
+               break;
+       case 1:
+               amdgpu_fence_process(&adev->vce.ring[1]);
+               break;
+       default:
+               DRM_ERROR("Unhandled interrupt: %d %d\n",
+                         entry->src_id, entry->src_data);
+               break;
+       }
+
+       return 0;
+}
+
+static int vce_v3_0_set_clockgating_state(struct amdgpu_device *adev,
+                                         enum amdgpu_clockgating_state state)
+{
+       //TODO
+       return 0;
+}
+
+static int vce_v3_0_set_powergating_state(struct amdgpu_device *adev,
+                                         enum amdgpu_powergating_state state)
+{
+       /* This doesn't actually powergate the VCE block.
+        * That's done in the dpm code via the SMC.  This
+        * just re-inits the block as necessary.  The actual
+        * gating still happens in the dpm code.  We should
+        * revisit this when there is a cleaner line between
+        * the smc and the hw blocks
+        */
+       if (state == AMDGPU_PG_STATE_GATE)
+               /* XXX do we need a vce_v3_0_stop()? */
+               return 0;
+       else
+               return vce_v3_0_start(adev);
+}
+
+const struct amdgpu_ip_funcs vce_v3_0_ip_funcs = {
+       .early_init = vce_v3_0_early_init,
+       .late_init = NULL,
+       .sw_init = vce_v3_0_sw_init,
+       .sw_fini = vce_v3_0_sw_fini,
+       .hw_init = vce_v3_0_hw_init,
+       .hw_fini = vce_v3_0_hw_fini,
+       .suspend = vce_v3_0_suspend,
+       .resume = vce_v3_0_resume,
+       .is_idle = vce_v3_0_is_idle,
+       .wait_for_idle = vce_v3_0_wait_for_idle,
+       .soft_reset = vce_v3_0_soft_reset,
+       .print_status = vce_v3_0_print_status,
+       .set_clockgating_state = vce_v3_0_set_clockgating_state,
+       .set_powergating_state = vce_v3_0_set_powergating_state,
+};
+
+static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
+       .get_rptr = vce_v3_0_ring_get_rptr,
+       .get_wptr = vce_v3_0_ring_get_wptr,
+       .set_wptr = vce_v3_0_ring_set_wptr,
+       .parse_cs = amdgpu_vce_ring_parse_cs,
+       .emit_ib = amdgpu_vce_ring_emit_ib,
+       .emit_fence = amdgpu_vce_ring_emit_fence,
+       .emit_semaphore = amdgpu_vce_ring_emit_semaphore,
+       .test_ring = amdgpu_vce_ring_test_ring,
+       .test_ib = amdgpu_vce_ring_test_ib,
+       .is_lockup = amdgpu_ring_test_lockup,
+};
+
+static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
+{
+       adev->vce.ring[0].funcs = &vce_v3_0_ring_funcs;
+       adev->vce.ring[1].funcs = &vce_v3_0_ring_funcs;
+}
+
+static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = {
+       .set = vce_v3_0_set_interrupt_state,
+       .process = vce_v3_0_process_interrupt,
+};
+
+static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+       adev->vce.irq.num_types = 1;
+       adev->vce.irq.funcs = &vce_v3_0_irq_funcs;
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h
new file mode 100644 (file)
index 0000000..f3c2ba9
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __VCE_V3_0_H__
+#define __VCE_V3_0_H__
+
+extern const struct amdgpu_ip_funcs vce_v3_0_ip_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
new file mode 100644 (file)
index 0000000..20a1598
--- /dev/null
@@ -0,0 +1,1373 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_atombios.h"
+#include "amdgpu_ih.h"
+#include "amdgpu_uvd.h"
+#include "amdgpu_vce.h"
+#include "amdgpu_ucode.h"
+#include "atom.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "oss/oss_3_0_d.h"
+#include "oss/oss_3_0_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "gca/gfx_8_0_d.h"
+#include "gca/gfx_8_0_sh_mask.h"
+
+#include "smu/smu_7_1_1_d.h"
+#include "smu/smu_7_1_1_sh_mask.h"
+
+#include "uvd/uvd_5_0_d.h"
+#include "uvd/uvd_5_0_sh_mask.h"
+
+#include "vce/vce_3_0_d.h"
+#include "vce/vce_3_0_sh_mask.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+#include "vid.h"
+#include "vi.h"
+#include "vi_dpm.h"
+#include "gmc_v8_0.h"
+#include "gfx_v8_0.h"
+#include "sdma_v2_4.h"
+#include "sdma_v3_0.h"
+#include "dce_v10_0.h"
+#include "dce_v11_0.h"
+#include "iceland_ih.h"
+#include "tonga_ih.h"
+#include "cz_ih.h"
+#include "uvd_v5_0.h"
+#include "uvd_v6_0.h"
+#include "vce_v3_0.h"
+
+/*
+ * Indirect registers accessor
+ */
+static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
+{
+       unsigned long flags;
+       u32 r;
+
+       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+       WREG32(mmPCIE_INDEX, reg);
+       (void)RREG32(mmPCIE_INDEX);
+       r = RREG32(mmPCIE_DATA);
+       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+       return r;
+}
+
+static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+       WREG32(mmPCIE_INDEX, reg);
+       (void)RREG32(mmPCIE_INDEX);
+       WREG32(mmPCIE_DATA, v);
+       (void)RREG32(mmPCIE_DATA);
+       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
+{
+       unsigned long flags;
+       u32 r;
+
+       spin_lock_irqsave(&adev->smc_idx_lock, flags);
+       WREG32(mmSMC_IND_INDEX_0, (reg));
+       r = RREG32(mmSMC_IND_DATA_0);
+       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+       return r;
+}
+
+static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&adev->smc_idx_lock, flags);
+       WREG32(mmSMC_IND_INDEX_0, (reg));
+       WREG32(mmSMC_IND_DATA_0, (v));
+       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+}
+
+static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
+{
+       unsigned long flags;
+       u32 r;
+
+       spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
+       WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
+       r = RREG32(mmUVD_CTX_DATA);
+       spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
+       return r;
+}
+
+static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
+       WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
+       WREG32(mmUVD_CTX_DATA, (v));
+       spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
+}
+
+static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg)
+{
+       unsigned long flags;
+       u32 r;
+
+       spin_lock_irqsave(&adev->didt_idx_lock, flags);
+       WREG32(mmDIDT_IND_INDEX, (reg));
+       r = RREG32(mmDIDT_IND_DATA);
+       spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
+       return r;
+}
+
+static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&adev->didt_idx_lock, flags);
+       WREG32(mmDIDT_IND_INDEX, (reg));
+       WREG32(mmDIDT_IND_DATA, (v));
+       spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
+}
+
+static const u32 tonga_mgcg_cgcg_init[] =
+{
+       mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
+       mmPCIE_INDEX, 0xffffffff, 0x0140001c,
+       mmPCIE_DATA, 0x000f0000, 0x00000000,
+       mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
+       mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
+       mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
+       mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
+       mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
+       mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
+};
+
+static const u32 iceland_mgcg_cgcg_init[] =
+{
+       mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
+       mmPCIE_DATA, 0x000f0000, 0x00000000,
+       mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0,
+       mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
+       mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
+};
+
+static const u32 cz_mgcg_cgcg_init[] =
+{
+       mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
+       mmPCIE_INDEX, 0xffffffff, 0x0140001c,
+       mmPCIE_DATA, 0x000f0000, 0x00000000,
+       mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
+       mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
+       mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
+       mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
+};
+
+static void vi_init_golden_registers(struct amdgpu_device *adev)
+{
+       /* Some of the registers might be dependent on GRBM_GFX_INDEX */
+       mutex_lock(&adev->grbm_idx_mutex);
+
+       switch (adev->asic_type) {
+       case CHIP_TOPAZ:
+               amdgpu_program_register_sequence(adev,
+                                                iceland_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
+               break;
+       case CHIP_TONGA:
+               amdgpu_program_register_sequence(adev,
+                                                tonga_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
+               break;
+       case CHIP_CARRIZO:
+               amdgpu_program_register_sequence(adev,
+                                                cz_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
+               break;
+       default:
+               break;
+       }
+       mutex_unlock(&adev->grbm_idx_mutex);
+}
+
+/**
+ * vi_get_xclk - get the xclk
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns the reference clock used by the gfx engine
+ * (VI).
+ */
+static u32 vi_get_xclk(struct amdgpu_device *adev)
+{
+       u32 reference_clock = adev->clock.spll.reference_freq;
+       u32 tmp;
+
+       if (adev->flags & AMDGPU_IS_APU)
+               return reference_clock;
+
+       tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
+       if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
+               return 1000;
+
+       tmp = RREG32_SMC(ixCG_CLKPIN_CNTL);
+       if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE))
+               return reference_clock / 4;
+
+       return reference_clock;
+}
+
+/**
+ * vi_srbm_select - select specific register instances
+ *
+ * @adev: amdgpu_device pointer
+ * @me: selected ME (micro engine)
+ * @pipe: pipe
+ * @queue: queue
+ * @vmid: VMID
+ *
+ * Switches the currently active registers instances.  Some
+ * registers are instanced per VMID, others are instanced per
+ * me/pipe/queue combination.
+ */
+void vi_srbm_select(struct amdgpu_device *adev,
+                    u32 me, u32 pipe, u32 queue, u32 vmid)
+{
+       u32 srbm_gfx_cntl = 0;
+       srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe);
+       srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me);
+       srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid);
+       srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue);
+       WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
+}
+
+static void vi_vga_set_state(struct amdgpu_device *adev, bool state)
+{
+       /* todo */
+}
+
+static bool vi_read_disabled_bios(struct amdgpu_device *adev)
+{
+       u32 bus_cntl;
+       u32 d1vga_control = 0;
+       u32 d2vga_control = 0;
+       u32 vga_render_control = 0;
+       u32 rom_cntl;
+       bool r;
+
+       bus_cntl = RREG32(mmBUS_CNTL);
+       if (adev->mode_info.num_crtc) {
+               d1vga_control = RREG32(mmD1VGA_CONTROL);
+               d2vga_control = RREG32(mmD2VGA_CONTROL);
+               vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
+       }
+       rom_cntl = RREG32_SMC(ixROM_CNTL);
+
+       /* enable the rom */
+       WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
+       if (adev->mode_info.num_crtc) {
+               /* Disable VGA mode */
+               WREG32(mmD1VGA_CONTROL,
+                      (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
+                                         D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
+               WREG32(mmD2VGA_CONTROL,
+                      (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK |
+                                         D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK)));
+               WREG32(mmVGA_RENDER_CONTROL,
+                      (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
+       }
+       WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
+
+       r = amdgpu_read_bios(adev);
+
+       /* restore regs */
+       WREG32(mmBUS_CNTL, bus_cntl);
+       if (adev->mode_info.num_crtc) {
+               WREG32(mmD1VGA_CONTROL, d1vga_control);
+               WREG32(mmD2VGA_CONTROL, d2vga_control);
+               WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
+       }
+       WREG32_SMC(ixROM_CNTL, rom_cntl);
+       return r;
+}
+static struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
+       {mmGB_MACROTILE_MODE7, true},
+};
+
+static struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = {
+       {mmGB_TILE_MODE7, true},
+       {mmGB_TILE_MODE12, true},
+       {mmGB_TILE_MODE17, true},
+       {mmGB_TILE_MODE23, true},
+       {mmGB_MACROTILE_MODE7, true},
+};
+
+static struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
+       {mmGRBM_STATUS, false},
+       {mmGB_ADDR_CONFIG, false},
+       {mmMC_ARB_RAMCFG, false},
+       {mmGB_TILE_MODE0, false},
+       {mmGB_TILE_MODE1, false},
+       {mmGB_TILE_MODE2, false},
+       {mmGB_TILE_MODE3, false},
+       {mmGB_TILE_MODE4, false},
+       {mmGB_TILE_MODE5, false},
+       {mmGB_TILE_MODE6, false},
+       {mmGB_TILE_MODE7, false},
+       {mmGB_TILE_MODE8, false},
+       {mmGB_TILE_MODE9, false},
+       {mmGB_TILE_MODE10, false},
+       {mmGB_TILE_MODE11, false},
+       {mmGB_TILE_MODE12, false},
+       {mmGB_TILE_MODE13, false},
+       {mmGB_TILE_MODE14, false},
+       {mmGB_TILE_MODE15, false},
+       {mmGB_TILE_MODE16, false},
+       {mmGB_TILE_MODE17, false},
+       {mmGB_TILE_MODE18, false},
+       {mmGB_TILE_MODE19, false},
+       {mmGB_TILE_MODE20, false},
+       {mmGB_TILE_MODE21, false},
+       {mmGB_TILE_MODE22, false},
+       {mmGB_TILE_MODE23, false},
+       {mmGB_TILE_MODE24, false},
+       {mmGB_TILE_MODE25, false},
+       {mmGB_TILE_MODE26, false},
+       {mmGB_TILE_MODE27, false},
+       {mmGB_TILE_MODE28, false},
+       {mmGB_TILE_MODE29, false},
+       {mmGB_TILE_MODE30, false},
+       {mmGB_TILE_MODE31, false},
+       {mmGB_MACROTILE_MODE0, false},
+       {mmGB_MACROTILE_MODE1, false},
+       {mmGB_MACROTILE_MODE2, false},
+       {mmGB_MACROTILE_MODE3, false},
+       {mmGB_MACROTILE_MODE4, false},
+       {mmGB_MACROTILE_MODE5, false},
+       {mmGB_MACROTILE_MODE6, false},
+       {mmGB_MACROTILE_MODE7, false},
+       {mmGB_MACROTILE_MODE8, false},
+       {mmGB_MACROTILE_MODE9, false},
+       {mmGB_MACROTILE_MODE10, false},
+       {mmGB_MACROTILE_MODE11, false},
+       {mmGB_MACROTILE_MODE12, false},
+       {mmGB_MACROTILE_MODE13, false},
+       {mmGB_MACROTILE_MODE14, false},
+       {mmGB_MACROTILE_MODE15, false},
+       {mmCC_RB_BACKEND_DISABLE, false, true},
+       {mmGC_USER_RB_BACKEND_DISABLE, false, true},
+       {mmGB_BACKEND_MAP, false, false},
+       {mmPA_SC_RASTER_CONFIG, false, true},
+       {mmPA_SC_RASTER_CONFIG_1, false, true},
+};
+
+static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
+                                        u32 sh_num, u32 reg_offset)
+{
+       uint32_t val;
+
+       mutex_lock(&adev->grbm_idx_mutex);
+       if (se_num != 0xffffffff || sh_num != 0xffffffff)
+               gfx_v8_0_select_se_sh(adev, se_num, sh_num);
+
+       val = RREG32(reg_offset);
+
+       if (se_num != 0xffffffff || sh_num != 0xffffffff)
+               gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+       mutex_unlock(&adev->grbm_idx_mutex);
+       return val;
+}
+
+static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
+                           u32 sh_num, u32 reg_offset, u32 *value)
+{
+       struct amdgpu_allowed_register_entry *asic_register_table = NULL;
+       struct amdgpu_allowed_register_entry *asic_register_entry;
+       uint32_t size, i;
+
+       *value = 0;
+       switch (adev->asic_type) {
+       case CHIP_TOPAZ:
+               asic_register_table = tonga_allowed_read_registers;
+               size = ARRAY_SIZE(tonga_allowed_read_registers);
+               break;
+       case CHIP_TONGA:
+       case CHIP_CARRIZO:
+               asic_register_table = cz_allowed_read_registers;
+               size = ARRAY_SIZE(cz_allowed_read_registers);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (asic_register_table) {
+               for (i = 0; i < size; i++) {
+                       asic_register_entry = asic_register_table + i;
+                       if (reg_offset != asic_register_entry->reg_offset)
+                               continue;
+                       if (!asic_register_entry->untouched)
+                               *value = asic_register_entry->grbm_indexed ?
+                                       vi_read_indexed_register(adev, se_num,
+                                                                sh_num, reg_offset) :
+                                       RREG32(reg_offset);
+                       return 0;
+               }
+       }
+
+       for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) {
+               if (reg_offset != vi_allowed_read_registers[i].reg_offset)
+                       continue;
+
+               if (!vi_allowed_read_registers[i].untouched)
+                       *value = vi_allowed_read_registers[i].grbm_indexed ?
+                               vi_read_indexed_register(adev, se_num,
+                                                        sh_num, reg_offset) :
+                               RREG32(reg_offset);
+               return 0;
+       }
+       return -EINVAL;
+}
+
+static void vi_print_gpu_status_regs(struct amdgpu_device *adev)
+{
+       dev_info(adev->dev, "  GRBM_STATUS=0x%08X\n",
+               RREG32(mmGRBM_STATUS));
+       dev_info(adev->dev, "  GRBM_STATUS2=0x%08X\n",
+               RREG32(mmGRBM_STATUS2));
+       dev_info(adev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
+               RREG32(mmGRBM_STATUS_SE0));
+       dev_info(adev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
+               RREG32(mmGRBM_STATUS_SE1));
+       dev_info(adev->dev, "  GRBM_STATUS_SE2=0x%08X\n",
+               RREG32(mmGRBM_STATUS_SE2));
+       dev_info(adev->dev, "  GRBM_STATUS_SE3=0x%08X\n",
+               RREG32(mmGRBM_STATUS_SE3));
+       dev_info(adev->dev, "  SRBM_STATUS=0x%08X\n",
+               RREG32(mmSRBM_STATUS));
+       dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
+               RREG32(mmSRBM_STATUS2));
+       dev_info(adev->dev, "  SDMA0_STATUS_REG   = 0x%08X\n",
+               RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET));
+       dev_info(adev->dev, "  SDMA1_STATUS_REG   = 0x%08X\n",
+                RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET));
+       dev_info(adev->dev, "  CP_STAT = 0x%08x\n", RREG32(mmCP_STAT));
+       dev_info(adev->dev, "  CP_STALLED_STAT1 = 0x%08x\n",
+                RREG32(mmCP_STALLED_STAT1));
+       dev_info(adev->dev, "  CP_STALLED_STAT2 = 0x%08x\n",
+                RREG32(mmCP_STALLED_STAT2));
+       dev_info(adev->dev, "  CP_STALLED_STAT3 = 0x%08x\n",
+                RREG32(mmCP_STALLED_STAT3));
+       dev_info(adev->dev, "  CP_CPF_BUSY_STAT = 0x%08x\n",
+                RREG32(mmCP_CPF_BUSY_STAT));
+       dev_info(adev->dev, "  CP_CPF_STALLED_STAT1 = 0x%08x\n",
+                RREG32(mmCP_CPF_STALLED_STAT1));
+       dev_info(adev->dev, "  CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS));
+       dev_info(adev->dev, "  CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT));
+       dev_info(adev->dev, "  CP_CPC_STALLED_STAT1 = 0x%08x\n",
+                RREG32(mmCP_CPC_STALLED_STAT1));
+       dev_info(adev->dev, "  CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS));
+}
+
+/**
+ * vi_gpu_check_soft_reset - check which blocks are busy
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Check which blocks are busy and return the relevant reset
+ * mask to be used by vi_gpu_soft_reset().
+ * Returns a mask of the blocks to be reset.
+ */
+u32 vi_gpu_check_soft_reset(struct amdgpu_device *adev)
+{
+       u32 reset_mask = 0;
+       u32 tmp;
+
+       /* GRBM_STATUS */
+       tmp = RREG32(mmGRBM_STATUS);
+       if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
+                  GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
+                  GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
+                  GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
+                  GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
+                  GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK))
+               reset_mask |= AMDGPU_RESET_GFX;
+
+       if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK))
+               reset_mask |= AMDGPU_RESET_CP;
+
+       /* GRBM_STATUS2 */
+       tmp = RREG32(mmGRBM_STATUS2);
+       if (tmp & GRBM_STATUS2__RLC_BUSY_MASK)
+               reset_mask |= AMDGPU_RESET_RLC;
+
+       if (tmp & (GRBM_STATUS2__CPF_BUSY_MASK |
+                  GRBM_STATUS2__CPC_BUSY_MASK |
+                  GRBM_STATUS2__CPG_BUSY_MASK))
+               reset_mask |= AMDGPU_RESET_CP;
+
+       /* SRBM_STATUS2 */
+       tmp = RREG32(mmSRBM_STATUS2);
+       if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK)
+               reset_mask |= AMDGPU_RESET_DMA;
+
+       if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)
+               reset_mask |= AMDGPU_RESET_DMA1;
+
+       /* SRBM_STATUS */
+       tmp = RREG32(mmSRBM_STATUS);
+
+       if (tmp & SRBM_STATUS__IH_BUSY_MASK)
+               reset_mask |= AMDGPU_RESET_IH;
+
+       if (tmp & SRBM_STATUS__SEM_BUSY_MASK)
+               reset_mask |= AMDGPU_RESET_SEM;
+
+       if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK)
+               reset_mask |= AMDGPU_RESET_GRBM;
+
+       if (adev->asic_type != CHIP_TOPAZ) {
+               if (tmp & (SRBM_STATUS__UVD_RQ_PENDING_MASK |
+                          SRBM_STATUS__UVD_BUSY_MASK))
+                       reset_mask |= AMDGPU_RESET_UVD;
+       }
+
+       if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
+               reset_mask |= AMDGPU_RESET_VMC;
+
+       if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
+                  SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK))
+               reset_mask |= AMDGPU_RESET_MC;
+
+       /* SDMA0_STATUS_REG */
+       tmp = RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET);
+       if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
+               reset_mask |= AMDGPU_RESET_DMA;
+
+       /* SDMA1_STATUS_REG */
+       tmp = RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET);
+       if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
+               reset_mask |= AMDGPU_RESET_DMA1;
+#if 0
+       /* VCE_STATUS */
+       if (adev->asic_type != CHIP_TOPAZ) {
+               tmp = RREG32(mmVCE_STATUS);
+               if (tmp & VCE_STATUS__VCPU_REPORT_RB0_BUSY_MASK)
+                       reset_mask |= AMDGPU_RESET_VCE;
+               if (tmp & VCE_STATUS__VCPU_REPORT_RB1_BUSY_MASK)
+                       reset_mask |= AMDGPU_RESET_VCE1;
+
+       }
+
+       if (adev->asic_type != CHIP_TOPAZ) {
+               if (amdgpu_display_is_display_hung(adev))
+                       reset_mask |= AMDGPU_RESET_DISPLAY;
+       }
+#endif
+
+       /* Skip MC reset as it's mostly likely not hung, just busy */
+       if (reset_mask & AMDGPU_RESET_MC) {
+               DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
+               reset_mask &= ~AMDGPU_RESET_MC;
+       }
+
+       return reset_mask;
+}
+
+/**
+ * vi_gpu_soft_reset - soft reset GPU
+ *
+ * @adev: amdgpu_device pointer
+ * @reset_mask: mask of which blocks to reset
+ *
+ * Soft reset the blocks specified in @reset_mask.
+ */
+static void vi_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask)
+{
+       struct amdgpu_mode_mc_save save;
+       u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+       u32 tmp;
+
+       if (reset_mask == 0)
+               return;
+
+       dev_info(adev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+       vi_print_gpu_status_regs(adev);
+       dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
+                RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
+       dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+                RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
+
+       /* disable CG/PG */
+
+       /* stop the rlc */
+       //XXX
+       //gfx_v8_0_rlc_stop(adev);
+
+       /* Disable GFX parsing/prefetching */
+       tmp = RREG32(mmCP_ME_CNTL);
+       tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
+       tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
+       tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
+       WREG32(mmCP_ME_CNTL, tmp);
+
+       /* Disable MEC parsing/prefetching */
+       tmp = RREG32(mmCP_MEC_CNTL);
+       tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
+       tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
+       WREG32(mmCP_MEC_CNTL, tmp);
+
+       if (reset_mask & AMDGPU_RESET_DMA) {
+               /* sdma0 */
+               tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
+               tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
+               WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
+       }
+       if (reset_mask & AMDGPU_RESET_DMA1) {
+               /* sdma1 */
+               tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
+               tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
+               WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
+       }
+
+       gmc_v8_0_mc_stop(adev, &save);
+       if (amdgpu_asic_wait_for_mc_idle(adev)) {
+               dev_warn(adev->dev, "Wait for MC idle timedout !\n");
+       }
+
+       if (reset_mask & (AMDGPU_RESET_GFX | AMDGPU_RESET_COMPUTE | AMDGPU_RESET_CP)) {
+               grbm_soft_reset =
+                       REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
+               grbm_soft_reset =
+                       REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
+       }
+
+       if (reset_mask & AMDGPU_RESET_CP) {
+               grbm_soft_reset =
+                       REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
+               srbm_soft_reset =
+                       REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
+       }
+
+       if (reset_mask & AMDGPU_RESET_DMA)
+               srbm_soft_reset =
+                       REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA, 1);
+
+       if (reset_mask & AMDGPU_RESET_DMA1)
+               srbm_soft_reset =
+                       REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1, 1);
+
+       if (reset_mask & AMDGPU_RESET_DISPLAY)
+               srbm_soft_reset =
+                       REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_DC, 1);
+
+       if (reset_mask & AMDGPU_RESET_RLC)
+               grbm_soft_reset =
+                       REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
+
+       if (reset_mask & AMDGPU_RESET_SEM)
+               srbm_soft_reset =
+                       REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
+
+       if (reset_mask & AMDGPU_RESET_IH)
+               srbm_soft_reset =
+                       REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_IH, 1);
+
+       if (reset_mask & AMDGPU_RESET_GRBM)
+               srbm_soft_reset =
+                       REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
+
+       if (reset_mask & AMDGPU_RESET_VMC)
+               srbm_soft_reset =
+                       REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
+
+       if (reset_mask & AMDGPU_RESET_UVD)
+               srbm_soft_reset =
+                       REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
+
+       if (reset_mask & AMDGPU_RESET_VCE)
+               srbm_soft_reset =
+                       REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
+
+       if (reset_mask & AMDGPU_RESET_VCE)
+               srbm_soft_reset =
+                       REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
+
+       if (!(adev->flags & AMDGPU_IS_APU)) {
+               if (reset_mask & AMDGPU_RESET_MC)
+               srbm_soft_reset =
+                       REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
+       }
+
+       if (grbm_soft_reset) {
+               tmp = RREG32(mmGRBM_SOFT_RESET);
+               tmp |= grbm_soft_reset;
+               dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+               WREG32(mmGRBM_SOFT_RESET, tmp);
+               tmp = RREG32(mmGRBM_SOFT_RESET);
+
+               udelay(50);
+
+               tmp &= ~grbm_soft_reset;
+               WREG32(mmGRBM_SOFT_RESET, tmp);
+               tmp = RREG32(mmGRBM_SOFT_RESET);
+       }
+
+       if (srbm_soft_reset) {
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+               tmp |= srbm_soft_reset;
+               dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+               WREG32(mmSRBM_SOFT_RESET, tmp);
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+
+               udelay(50);
+
+               tmp &= ~srbm_soft_reset;
+               WREG32(mmSRBM_SOFT_RESET, tmp);
+               tmp = RREG32(mmSRBM_SOFT_RESET);
+       }
+
+       /* Wait a little for things to settle down */
+       udelay(50);
+
+       gmc_v8_0_mc_resume(adev, &save);
+       udelay(50);
+
+       vi_print_gpu_status_regs(adev);
+}
+
+static void vi_gpu_pci_config_reset(struct amdgpu_device *adev)
+{
+       struct amdgpu_mode_mc_save save;
+       u32 tmp, i;
+
+       dev_info(adev->dev, "GPU pci config reset\n");
+
+       /* disable dpm? */
+
+       /* disable cg/pg */
+
+       /* Disable GFX parsing/prefetching */
+       tmp = RREG32(mmCP_ME_CNTL);
+       tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
+       tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
+       tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
+       WREG32(mmCP_ME_CNTL, tmp);
+
+       /* Disable MEC parsing/prefetching */
+       tmp = RREG32(mmCP_MEC_CNTL);
+       tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
+       tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
+       WREG32(mmCP_MEC_CNTL, tmp);
+
+       /* Disable GFX parsing/prefetching */
+       WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK |
+               CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
+
+       /* Disable MEC parsing/prefetching */
+       WREG32(mmCP_MEC_CNTL,
+                       CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
+
+       /* sdma0 */
+       tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
+       tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
+       WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
+
+       /* sdma1 */
+       tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
+       tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
+       WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
+
+       /* XXX other engines? */
+
+       /* halt the rlc, disable cp internal ints */
+       //XXX
+       //gfx_v8_0_rlc_stop(adev);
+
+       udelay(50);
+
+       /* disable mem access */
+       gmc_v8_0_mc_stop(adev, &save);
+       if (amdgpu_asic_wait_for_mc_idle(adev)) {
+               dev_warn(adev->dev, "Wait for MC idle timed out !\n");
+       }
+
+       /* disable BM */
+       pci_clear_master(adev->pdev);
+       /* reset */
+       amdgpu_pci_config_reset(adev);
+
+       udelay(100);
+
+       /* wait for asic to come out of reset */
+       for (i = 0; i < adev->usec_timeout; i++) {
+               if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff)
+                       break;
+               udelay(1);
+       }
+
+}
+
+static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
+{
+       u32 tmp = RREG32(mmBIOS_SCRATCH_3);
+
+       if (hung)
+               tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+       else
+               tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+
+       WREG32(mmBIOS_SCRATCH_3, tmp);
+}
+
+/**
+ * vi_asic_reset - soft reset GPU
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Look up which blocks are hung and attempt
+ * to reset them.
+ * Returns 0 for success.
+ */
+static int vi_asic_reset(struct amdgpu_device *adev)
+{
+       u32 reset_mask;
+
+       reset_mask = vi_gpu_check_soft_reset(adev);
+
+       if (reset_mask)
+               vi_set_bios_scratch_engine_hung(adev, true);
+
+       /* try soft reset */
+       vi_gpu_soft_reset(adev, reset_mask);
+
+       reset_mask = vi_gpu_check_soft_reset(adev);
+
+       /* try pci config reset */
+       if (reset_mask && amdgpu_hard_reset)
+               vi_gpu_pci_config_reset(adev);
+
+       reset_mask = vi_gpu_check_soft_reset(adev);
+
+       if (!reset_mask)
+               vi_set_bios_scratch_engine_hung(adev, false);
+
+       return 0;
+}
+
+static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
+                       u32 cntl_reg, u32 status_reg)
+{
+       int r, i;
+       struct atom_clock_dividers dividers;
+       uint32_t tmp;
+
+       r = amdgpu_atombios_get_clock_dividers(adev,
+                                              COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+                                              clock, false, &dividers);
+       if (r)
+               return r;
+
+       tmp = RREG32_SMC(cntl_reg);
+       tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
+               CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
+       tmp |= dividers.post_divider;
+       WREG32_SMC(cntl_reg, tmp);
+
+       for (i = 0; i < 100; i++) {
+               if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK)
+                       break;
+               mdelay(10);
+       }
+       if (i == 100)
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
+{
+       int r;
+
+       r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
+       if (r)
+               return r;
+
+       r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
+
+       return 0;
+}
+
+static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
+{
+       /* todo */
+
+       return 0;
+}
+
+static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
+{
+       u32 mask;
+       int ret;
+
+       if (amdgpu_pcie_gen2 == 0)
+               return;
+
+       if (adev->flags & AMDGPU_IS_APU)
+               return;
+
+       ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
+       if (ret != 0)
+               return;
+
+       if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
+               return;
+
+       /* todo */
+}
+
+static void vi_program_aspm(struct amdgpu_device *adev)
+{
+
+       if (amdgpu_aspm == 0)
+               return;
+
+       /* todo */
+}
+
+static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
+                                       bool enable)
+{
+       u32 tmp;
+
+       /* not necessary on CZ */
+       if (adev->flags & AMDGPU_IS_APU)
+               return;
+
+       tmp = RREG32(mmBIF_DOORBELL_APER_EN);
+       if (enable)
+               tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1);
+       else
+               tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0);
+
+       WREG32(mmBIF_DOORBELL_APER_EN, tmp);
+}
+
+/* topaz has no DCE, UVD, VCE */
+static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
+{
+       /* ORDER MATTERS! */
+       {
+               .type = AMDGPU_IP_BLOCK_TYPE_COMMON,
+               .major = 2,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &vi_common_ip_funcs,
+       },
+       {
+               .type = AMDGPU_IP_BLOCK_TYPE_GMC,
+               .major = 8,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &gmc_v8_0_ip_funcs,
+       },
+       {
+               .type = AMDGPU_IP_BLOCK_TYPE_IH,
+               .major = 2,
+               .minor = 4,
+               .rev = 0,
+               .funcs = &iceland_ih_ip_funcs,
+       },
+       {
+               .type = AMDGPU_IP_BLOCK_TYPE_SMC,
+               .major = 7,
+               .minor = 1,
+               .rev = 0,
+               .funcs = &iceland_dpm_ip_funcs,
+       },
+       {
+               .type = AMDGPU_IP_BLOCK_TYPE_GFX,
+               .major = 8,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &gfx_v8_0_ip_funcs,
+       },
+       {
+               .type = AMDGPU_IP_BLOCK_TYPE_SDMA,
+               .major = 2,
+               .minor = 4,
+               .rev = 0,
+               .funcs = &sdma_v2_4_ip_funcs,
+       },
+};
+
+static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
+{
+       /* ORDER MATTERS! */
+       {
+               .type = AMDGPU_IP_BLOCK_TYPE_COMMON,
+               .major = 2,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &vi_common_ip_funcs,
+       },
+       {
+               .type = AMDGPU_IP_BLOCK_TYPE_GMC,
+               .major = 8,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &gmc_v8_0_ip_funcs,
+       },
+       {
+               .type = AMDGPU_IP_BLOCK_TYPE_IH,
+               .major = 3,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &tonga_ih_ip_funcs,
+       },
+       {
+               .type = AMDGPU_IP_BLOCK_TYPE_SMC,
+               .major = 7,
+               .minor = 1,
+               .rev = 0,
+               .funcs = &tonga_dpm_ip_funcs,
+       },
+       {
+               .type = AMDGPU_IP_BLOCK_TYPE_DCE,
+               .major = 10,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &dce_v10_0_ip_funcs,
+       },
+       {
+               .type = AMDGPU_IP_BLOCK_TYPE_GFX,
+               .major = 8,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &gfx_v8_0_ip_funcs,
+       },
+       {
+               .type = AMDGPU_IP_BLOCK_TYPE_SDMA,
+               .major = 3,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &sdma_v3_0_ip_funcs,
+       },
+       {
+               .type = AMDGPU_IP_BLOCK_TYPE_UVD,
+               .major = 5,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &uvd_v5_0_ip_funcs,
+       },
+       {
+               .type = AMDGPU_IP_BLOCK_TYPE_VCE,
+               .major = 3,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &vce_v3_0_ip_funcs,
+       },
+};
+
+static const struct amdgpu_ip_block_version cz_ip_blocks[] =
+{
+       /* ORDER MATTERS! */
+       {
+               .type = AMDGPU_IP_BLOCK_TYPE_COMMON,
+               .major = 2,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &vi_common_ip_funcs,
+       },
+       {
+               .type = AMDGPU_IP_BLOCK_TYPE_GMC,
+               .major = 8,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &gmc_v8_0_ip_funcs,
+       },
+       {
+               .type = AMDGPU_IP_BLOCK_TYPE_IH,
+               .major = 3,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &cz_ih_ip_funcs,
+       },
+       {
+               .type = AMDGPU_IP_BLOCK_TYPE_SMC,
+               .major = 8,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &cz_dpm_ip_funcs,
+       },
+       {
+               .type = AMDGPU_IP_BLOCK_TYPE_DCE,
+               .major = 11,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &dce_v11_0_ip_funcs,
+       },
+       {
+               .type = AMDGPU_IP_BLOCK_TYPE_GFX,
+               .major = 8,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &gfx_v8_0_ip_funcs,
+       },
+       {
+               .type = AMDGPU_IP_BLOCK_TYPE_SDMA,
+               .major = 3,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &sdma_v3_0_ip_funcs,
+       },
+       {
+               .type = AMDGPU_IP_BLOCK_TYPE_UVD,
+               .major = 6,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &uvd_v6_0_ip_funcs,
+       },
+       {
+               .type = AMDGPU_IP_BLOCK_TYPE_VCE,
+               .major = 3,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &vce_v3_0_ip_funcs,
+       },
+};
+
+int vi_set_ip_blocks(struct amdgpu_device *adev)
+{
+       switch (adev->asic_type) {
+       case CHIP_TOPAZ:
+               adev->ip_blocks = topaz_ip_blocks;
+               adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
+               break;
+       case CHIP_TONGA:
+               adev->ip_blocks = tonga_ip_blocks;
+               adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
+               break;
+       case CHIP_CARRIZO:
+               adev->ip_blocks = cz_ip_blocks;
+               adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
+               break;
+       default:
+               /* FIXME: not supported yet */
+               return -EINVAL;
+       }
+
+       adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL);
+       if (adev->ip_block_enabled == NULL)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
+{
+       if (adev->asic_type == CHIP_TOPAZ)
+               return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
+                       >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
+       else
+               return (RREG32(mmCC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
+                       >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
+}
+
+static const struct amdgpu_asic_funcs vi_asic_funcs =
+{
+       .read_disabled_bios = &vi_read_disabled_bios,
+       .read_register = &vi_read_register,
+       .reset = &vi_asic_reset,
+       .set_vga_state = &vi_vga_set_state,
+       .get_xclk = &vi_get_xclk,
+       .set_uvd_clocks = &vi_set_uvd_clocks,
+       .set_vce_clocks = &vi_set_vce_clocks,
+       .get_cu_info = &gfx_v8_0_get_cu_info,
+       /* these should be moved to their own ip modules */
+       .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
+       .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle,
+};
+
+static int vi_common_early_init(struct amdgpu_device *adev)
+{
+       bool smc_enabled = false;
+
+       adev->smc_rreg = &vi_smc_rreg;
+       adev->smc_wreg = &vi_smc_wreg;
+       adev->pcie_rreg = &vi_pcie_rreg;
+       adev->pcie_wreg = &vi_pcie_wreg;
+       adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
+       adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
+       adev->didt_rreg = &vi_didt_rreg;
+       adev->didt_wreg = &vi_didt_wreg;
+
+       adev->asic_funcs = &vi_asic_funcs;
+
+       if (amdgpu_get_ip_block(adev, AMDGPU_IP_BLOCK_TYPE_SMC) &&
+               (amdgpu_ip_block_mask & (1 << AMDGPU_IP_BLOCK_TYPE_SMC)))
+               smc_enabled = true;
+
+       adev->rev_id = vi_get_rev_id(adev);
+       adev->external_rev_id = 0xFF;
+       switch (adev->asic_type) {
+       case CHIP_TOPAZ:
+               adev->has_uvd = false;
+               adev->cg_flags = 0;
+               adev->pg_flags = 0;
+               adev->external_rev_id = 0x1;
+               if (amdgpu_smc_load_fw && smc_enabled)
+                       adev->firmware.smu_load = true;
+               break;
+       case CHIP_TONGA:
+               adev->has_uvd = true;
+               adev->cg_flags = 0;
+               adev->pg_flags = 0;
+               adev->external_rev_id = adev->rev_id + 0x14;
+               if (amdgpu_smc_load_fw && smc_enabled)
+                       adev->firmware.smu_load = true;
+               break;
+       case CHIP_CARRIZO:
+               adev->has_uvd = true;
+               adev->cg_flags = 0;
+               adev->pg_flags = 0;
+               adev->external_rev_id = adev->rev_id + 0x1;
+               if (amdgpu_smc_load_fw && smc_enabled)
+                       adev->firmware.smu_load = true;
+               break;
+       default:
+               /* FIXME: not supported yet */
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int vi_common_sw_init(struct amdgpu_device *adev)
+{
+       return 0;
+}
+
+static int vi_common_sw_fini(struct amdgpu_device *adev)
+{
+       return 0;
+}
+
+static int vi_common_hw_init(struct amdgpu_device *adev)
+{
+       /* move the golden regs per IP block */
+       vi_init_golden_registers(adev);
+       /* enable pcie gen2/3 link */
+       vi_pcie_gen3_enable(adev);
+       /* enable aspm */
+       vi_program_aspm(adev);
+       /* enable the doorbell aperture */
+       vi_enable_doorbell_aperture(adev, true);
+
+       return 0;
+}
+
+static int vi_common_hw_fini(struct amdgpu_device *adev)
+{
+       /* enable the doorbell aperture */
+       vi_enable_doorbell_aperture(adev, false);
+
+       return 0;
+}
+
+static int vi_common_suspend(struct amdgpu_device *adev)
+{
+       return vi_common_hw_fini(adev);
+}
+
+static int vi_common_resume(struct amdgpu_device *adev)
+{
+       return vi_common_hw_init(adev);
+}
+
+static bool vi_common_is_idle(struct amdgpu_device *adev)
+{
+       return true;
+}
+
+static int vi_common_wait_for_idle(struct amdgpu_device *adev)
+{
+       return 0;
+}
+
+static void vi_common_print_status(struct amdgpu_device *adev)
+{
+
+}
+
+static int vi_common_soft_reset(struct amdgpu_device *adev)
+{
+       /* XXX hard reset?? */
+       return 0;
+}
+
+static int vi_common_set_clockgating_state(struct amdgpu_device *adev,
+                                           enum amdgpu_clockgating_state state)
+{
+       return 0;
+}
+
+static int vi_common_set_powergating_state(struct amdgpu_device *adev,
+                                           enum amdgpu_powergating_state state)
+{
+       return 0;
+}
+
+const struct amdgpu_ip_funcs vi_common_ip_funcs = {
+       .early_init = vi_common_early_init,
+       .late_init = NULL,
+       .sw_init = vi_common_sw_init,
+       .sw_fini = vi_common_sw_fini,
+       .hw_init = vi_common_hw_init,
+       .hw_fini = vi_common_hw_fini,
+       .suspend = vi_common_suspend,
+       .resume = vi_common_resume,
+       .is_idle = vi_common_is_idle,
+       .wait_for_idle = vi_common_wait_for_idle,
+       .soft_reset = vi_common_soft_reset,
+       .print_status = vi_common_print_status,
+       .set_clockgating_state = vi_common_set_clockgating_state,
+       .set_powergating_state = vi_common_set_powergating_state,
+};
+
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h
new file mode 100644 (file)
index 0000000..d16a5f7
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __VI_H__
+#define __VI_H__
+
+extern const struct amdgpu_ip_funcs vi_common_ip_funcs;
+
+void vi_srbm_select(struct amdgpu_device *adev,
+                   u32 me, u32 pipe, u32 queue, u32 vmid);
+int vi_set_ip_blocks(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vi_dpm.h b/drivers/gpu/drm/amd/amdgpu/vi_dpm.h
new file mode 100644 (file)
index 0000000..11cb1f7
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __VI_DPM_H__
+#define __VI_DPM_H__
+
+extern const struct amdgpu_ip_funcs cz_dpm_ip_funcs;
+int cz_smu_init(struct amdgpu_device *adev);
+int cz_smu_start(struct amdgpu_device *adev);
+int cz_smu_fini(struct amdgpu_device *adev);
+
+extern const struct amdgpu_ip_funcs tonga_dpm_ip_funcs;
+
+extern const struct amdgpu_ip_funcs iceland_dpm_ip_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
new file mode 100644 (file)
index 0000000..385267c
--- /dev/null
@@ -0,0 +1,363 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef VI_H
+#define VI_H
+
+#define SDMA0_REGISTER_OFFSET                             0x0 /* not a register */
+#define SDMA1_REGISTER_OFFSET                             0x200 /* not a register */
+#define SDMA_MAX_INSTANCE 2
+
+/* crtc instance offsets */
+#define CRTC0_REGISTER_OFFSET                 (0x1b9c - 0x1b9c)
+#define CRTC1_REGISTER_OFFSET                 (0x1d9c - 0x1b9c)
+#define CRTC2_REGISTER_OFFSET                 (0x1f9c - 0x1b9c)
+#define CRTC3_REGISTER_OFFSET                 (0x419c - 0x1b9c)
+#define CRTC4_REGISTER_OFFSET                 (0x439c - 0x1b9c)
+#define CRTC5_REGISTER_OFFSET                 (0x459c - 0x1b9c)
+#define CRTC6_REGISTER_OFFSET                 (0x479c - 0x1b9c)
+
+/* dig instance offsets */
+#define DIG0_REGISTER_OFFSET                 (0x4a00 - 0x4a00)
+#define DIG1_REGISTER_OFFSET                 (0x4b00 - 0x4a00)
+#define DIG2_REGISTER_OFFSET                 (0x4c00 - 0x4a00)
+#define DIG3_REGISTER_OFFSET                 (0x4d00 - 0x4a00)
+#define DIG4_REGISTER_OFFSET                 (0x4e00 - 0x4a00)
+#define DIG5_REGISTER_OFFSET                 (0x4f00 - 0x4a00)
+#define DIG6_REGISTER_OFFSET                 (0x5400 - 0x4a00)
+#define DIG7_REGISTER_OFFSET                 (0x5600 - 0x4a00)
+#define DIG8_REGISTER_OFFSET                 (0x5700 - 0x4a00)
+
+/* audio endpt instance offsets */
+#define AUD0_REGISTER_OFFSET                 (0x17a8 - 0x17a8)
+#define AUD1_REGISTER_OFFSET                 (0x17ac - 0x17a8)
+#define AUD2_REGISTER_OFFSET                 (0x17b0 - 0x17a8)
+#define AUD3_REGISTER_OFFSET                 (0x17b4 - 0x17a8)
+#define AUD4_REGISTER_OFFSET                 (0x17b8 - 0x17a8)
+#define AUD5_REGISTER_OFFSET                 (0x17bc - 0x17a8)
+#define AUD6_REGISTER_OFFSET                 (0x17c4 - 0x17a8)
+
+/* hpd instance offsets */
+#define HPD0_REGISTER_OFFSET                 (0x1898 - 0x1898)
+#define HPD1_REGISTER_OFFSET                 (0x18a0 - 0x1898)
+#define HPD2_REGISTER_OFFSET                 (0x18a8 - 0x1898)
+#define HPD3_REGISTER_OFFSET                 (0x18b0 - 0x1898)
+#define HPD4_REGISTER_OFFSET                 (0x18b8 - 0x1898)
+#define HPD5_REGISTER_OFFSET                 (0x18c0 - 0x1898)
+
+#define AMDGPU_NUM_OF_VMIDS                    8
+
+#define RB_BITMAP_WIDTH_PER_SH     2
+
+#define MC_SEQ_MISC0__GDDR5__SHIFT     0x1c
+#define MC_SEQ_MISC0__GDDR5_MASK       0xf0000000
+#define MC_SEQ_MISC0__GDDR5_VALUE      5
+
+/*
+ * PM4
+ */
+#define        PACKET_TYPE0    0
+#define        PACKET_TYPE1    1
+#define        PACKET_TYPE2    2
+#define        PACKET_TYPE3    3
+
+#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
+#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
+#define CP_PACKET0_GET_REG(h) ((h) & 0xFFFF)
+#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
+#define PACKET0(reg, n)        ((PACKET_TYPE0 << 30) |                         \
+                        ((reg) & 0xFFFF) |                     \
+                        ((n) & 0x3FFF) << 16)
+#define CP_PACKET2                     0x80000000
+#define                PACKET2_PAD_SHIFT               0
+#define                PACKET2_PAD_MASK                (0x3fffffff << 0)
+
+#define PACKET2(v)     (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+
+#define PACKET3(op, n) ((PACKET_TYPE3 << 30) |                         \
+                        (((op) & 0xFF) << 8) |                         \
+                        ((n) & 0x3FFF) << 16)
+
+#define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1)
+
+/* Packet 3 types */
+#define        PACKET3_NOP                                     0x10
+#define        PACKET3_SET_BASE                                0x11
+#define                PACKET3_BASE_INDEX(x)                  ((x) << 0)
+#define                        CE_PARTITION_BASE               3
+#define        PACKET3_CLEAR_STATE                             0x12
+#define        PACKET3_INDEX_BUFFER_SIZE                       0x13
+#define        PACKET3_DISPATCH_DIRECT                         0x15
+#define        PACKET3_DISPATCH_INDIRECT                       0x16
+#define        PACKET3_ATOMIC_GDS                              0x1D
+#define        PACKET3_ATOMIC_MEM                              0x1E
+#define        PACKET3_OCCLUSION_QUERY                         0x1F
+#define        PACKET3_SET_PREDICATION                         0x20
+#define        PACKET3_REG_RMW                                 0x21
+#define        PACKET3_COND_EXEC                               0x22
+#define        PACKET3_PRED_EXEC                               0x23
+#define        PACKET3_DRAW_INDIRECT                           0x24
+#define        PACKET3_DRAW_INDEX_INDIRECT                     0x25
+#define        PACKET3_INDEX_BASE                              0x26
+#define        PACKET3_DRAW_INDEX_2                            0x27
+#define        PACKET3_CONTEXT_CONTROL                         0x28
+#define        PACKET3_INDEX_TYPE                              0x2A
+#define        PACKET3_DRAW_INDIRECT_MULTI                     0x2C
+#define        PACKET3_DRAW_INDEX_AUTO                         0x2D
+#define        PACKET3_NUM_INSTANCES                           0x2F
+#define        PACKET3_DRAW_INDEX_MULTI_AUTO                   0x30
+#define        PACKET3_INDIRECT_BUFFER_CONST                   0x33
+#define        PACKET3_STRMOUT_BUFFER_UPDATE                   0x34
+#define        PACKET3_DRAW_INDEX_OFFSET_2                     0x35
+#define        PACKET3_DRAW_PREAMBLE                           0x36
+#define        PACKET3_WRITE_DATA                              0x37
+#define                WRITE_DATA_DST_SEL(x)                   ((x) << 8)
+               /* 0 - register
+                * 1 - memory (sync - via GRBM)
+                * 2 - gl2
+                * 3 - gds
+                * 4 - reserved
+                * 5 - memory (async - direct)
+                */
+#define                WR_ONE_ADDR                             (1 << 16)
+#define                WR_CONFIRM                              (1 << 20)
+#define                WRITE_DATA_CACHE_POLICY(x)              ((x) << 25)
+               /* 0 - LRU
+                * 1 - Stream
+                */
+#define                WRITE_DATA_ENGINE_SEL(x)                ((x) << 30)
+               /* 0 - me
+                * 1 - pfp
+                * 2 - ce
+                */
+#define        PACKET3_DRAW_INDEX_INDIRECT_MULTI               0x38
+#define        PACKET3_MEM_SEMAPHORE                           0x39
+#              define PACKET3_SEM_USE_MAILBOX       (0x1 << 16)
+#              define PACKET3_SEM_SEL_SIGNAL_TYPE   (0x1 << 20) /* 0 = increment, 1 = write 1 */
+#              define PACKET3_SEM_CLIENT_CODE      ((x) << 24) /* 0 = CP, 1 = CB, 2 = DB */
+#              define PACKET3_SEM_SEL_SIGNAL       (0x6 << 29)
+#              define PACKET3_SEM_SEL_WAIT         (0x7 << 29)
+#define        PACKET3_WAIT_REG_MEM                            0x3C
+#define                WAIT_REG_MEM_FUNCTION(x)                ((x) << 0)
+               /* 0 - always
+                * 1 - <
+                * 2 - <=
+                * 3 - ==
+                * 4 - !=
+                * 5 - >=
+                * 6 - >
+                */
+#define                WAIT_REG_MEM_MEM_SPACE(x)               ((x) << 4)
+               /* 0 - reg
+                * 1 - mem
+                */
+#define                WAIT_REG_MEM_OPERATION(x)               ((x) << 6)
+               /* 0 - wait_reg_mem
+                * 1 - wr_wait_wr_reg
+                */
+#define                WAIT_REG_MEM_ENGINE(x)                  ((x) << 8)
+               /* 0 - me
+                * 1 - pfp
+                */
+#define        PACKET3_INDIRECT_BUFFER                         0x3F
+#define                INDIRECT_BUFFER_TCL2_VOLATILE           (1 << 22)
+#define                INDIRECT_BUFFER_VALID                   (1 << 23)
+#define                INDIRECT_BUFFER_CACHE_POLICY(x)         ((x) << 28)
+               /* 0 - LRU
+                * 1 - Stream
+                * 2 - Bypass
+                */
+#define        PACKET3_COPY_DATA                               0x40
+#define        PACKET3_PFP_SYNC_ME                             0x42
+#define        PACKET3_SURFACE_SYNC                            0x43
+#              define PACKET3_DEST_BASE_0_ENA      (1 << 0)
+#              define PACKET3_DEST_BASE_1_ENA      (1 << 1)
+#              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
+#              define PACKET3_CB1_DEST_BASE_ENA    (1 << 7)
+#              define PACKET3_CB2_DEST_BASE_ENA    (1 << 8)
+#              define PACKET3_CB3_DEST_BASE_ENA    (1 << 9)
+#              define PACKET3_CB4_DEST_BASE_ENA    (1 << 10)
+#              define PACKET3_CB5_DEST_BASE_ENA    (1 << 11)
+#              define PACKET3_CB6_DEST_BASE_ENA    (1 << 12)
+#              define PACKET3_CB7_DEST_BASE_ENA    (1 << 13)
+#              define PACKET3_DB_DEST_BASE_ENA     (1 << 14)
+#              define PACKET3_TCL1_VOL_ACTION_ENA  (1 << 15)
+#              define PACKET3_TC_VOL_ACTION_ENA    (1 << 16) /* L2 */
+#              define PACKET3_TC_WB_ACTION_ENA     (1 << 18) /* L2 */
+#              define PACKET3_DEST_BASE_2_ENA      (1 << 19)
+#              define PACKET3_DEST_BASE_3_ENA      (1 << 21)
+#              define PACKET3_TCL1_ACTION_ENA      (1 << 22)
+#              define PACKET3_TC_ACTION_ENA        (1 << 23) /* L2 */
+#              define PACKET3_CB_ACTION_ENA        (1 << 25)
+#              define PACKET3_DB_ACTION_ENA        (1 << 26)
+#              define PACKET3_SH_KCACHE_ACTION_ENA (1 << 27)
+#              define PACKET3_SH_KCACHE_VOL_ACTION_ENA (1 << 28)
+#              define PACKET3_SH_ICACHE_ACTION_ENA (1 << 29)
+#define        PACKET3_COND_WRITE                              0x45
+#define        PACKET3_EVENT_WRITE                             0x46
+#define                EVENT_TYPE(x)                           ((x) << 0)
+#define                EVENT_INDEX(x)                          ((x) << 8)
+               /* 0 - any non-TS event
+                * 1 - ZPASS_DONE, PIXEL_PIPE_STAT_*
+                * 2 - SAMPLE_PIPELINESTAT
+                * 3 - SAMPLE_STREAMOUTSTAT*
+                * 4 - *S_PARTIAL_FLUSH
+                * 5 - EOP events
+                * 6 - EOS events
+                */
+#define        PACKET3_EVENT_WRITE_EOP                         0x47
+#define                EOP_TCL1_VOL_ACTION_EN                  (1 << 12)
+#define                EOP_TC_VOL_ACTION_EN                    (1 << 13) /* L2 */
+#define                EOP_TC_WB_ACTION_EN                     (1 << 15) /* L2 */
+#define                EOP_TCL1_ACTION_EN                      (1 << 16)
+#define                EOP_TC_ACTION_EN                        (1 << 17) /* L2 */
+#define                EOP_TCL2_VOLATILE                       (1 << 24)
+#define                EOP_CACHE_POLICY(x)                     ((x) << 25)
+               /* 0 - LRU
+                * 1 - Stream
+                * 2 - Bypass
+                */
+#define                DATA_SEL(x)                             ((x) << 29)
+               /* 0 - discard
+                * 1 - send low 32bit data
+                * 2 - send 64bit data
+                * 3 - send 64bit GPU counter value
+                * 4 - send 64bit sys counter value
+                */
+#define                INT_SEL(x)                              ((x) << 24)
+               /* 0 - none
+                * 1 - interrupt only (DATA_SEL = 0)
+                * 2 - interrupt when data write is confirmed
+                */
+#define                DST_SEL(x)                              ((x) << 16)
+               /* 0 - MC
+                * 1 - TC/L2
+                */
+#define        PACKET3_EVENT_WRITE_EOS                         0x48
+#define        PACKET3_RELEASE_MEM                             0x49
+#define        PACKET3_PREAMBLE_CNTL                           0x4A
+#              define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE     (2 << 28)
+#              define PACKET3_PREAMBLE_END_CLEAR_STATE       (3 << 28)
+#define        PACKET3_DMA_DATA                                0x50
+/* 1. header
+ * 2. CONTROL
+ * 3. SRC_ADDR_LO or DATA [31:0]
+ * 4. SRC_ADDR_HI [31:0]
+ * 5. DST_ADDR_LO [31:0]
+ * 6. DST_ADDR_HI [7:0]
+ * 7. COMMAND [30:21] | BYTE_COUNT [20:0]
+ */
+/* CONTROL */
+#              define PACKET3_DMA_DATA_ENGINE(x)     ((x) << 0)
+               /* 0 - ME
+                * 1 - PFP
+                */
+#              define PACKET3_DMA_DATA_SRC_CACHE_POLICY(x) ((x) << 13)
+               /* 0 - LRU
+                * 1 - Stream
+                * 2 - Bypass
+                */
+#              define PACKET3_DMA_DATA_SRC_VOLATILE (1 << 15)
+#              define PACKET3_DMA_DATA_DST_SEL(x)  ((x) << 20)
+               /* 0 - DST_ADDR using DAS
+                * 1 - GDS
+                * 3 - DST_ADDR using L2
+                */
+#              define PACKET3_DMA_DATA_DST_CACHE_POLICY(x) ((x) << 25)
+               /* 0 - LRU
+                * 1 - Stream
+                * 2 - Bypass
+                */
+#              define PACKET3_DMA_DATA_DST_VOLATILE (1 << 27)
+#              define PACKET3_DMA_DATA_SRC_SEL(x)  ((x) << 29)
+               /* 0 - SRC_ADDR using SAS
+                * 1 - GDS
+                * 2 - DATA
+                * 3 - SRC_ADDR using L2
+                */
+#              define PACKET3_DMA_DATA_CP_SYNC     (1 << 31)
+/* COMMAND */
+#              define PACKET3_DMA_DATA_DIS_WC      (1 << 21)
+#              define PACKET3_DMA_DATA_CMD_SRC_SWAP(x) ((x) << 22)
+               /* 0 - none
+                * 1 - 8 in 16
+                * 2 - 8 in 32
+                * 3 - 8 in 64
+                */
+#              define PACKET3_DMA_DATA_CMD_DST_SWAP(x) ((x) << 24)
+               /* 0 - none
+                * 1 - 8 in 16
+                * 2 - 8 in 32
+                * 3 - 8 in 64
+                */
+#              define PACKET3_DMA_DATA_CMD_SAS     (1 << 26)
+               /* 0 - memory
+                * 1 - register
+                */
+#              define PACKET3_DMA_DATA_CMD_DAS     (1 << 27)
+               /* 0 - memory
+                * 1 - register
+                */
+#              define PACKET3_DMA_DATA_CMD_SAIC    (1 << 28)
+#              define PACKET3_DMA_DATA_CMD_DAIC    (1 << 29)
+#              define PACKET3_DMA_DATA_CMD_RAW_WAIT  (1 << 30)
+#define        PACKET3_AQUIRE_MEM                              0x58
+#define        PACKET3_REWIND                                  0x59
+#define        PACKET3_LOAD_UCONFIG_REG                        0x5E
+#define        PACKET3_LOAD_SH_REG                             0x5F
+#define        PACKET3_LOAD_CONFIG_REG                         0x60
+#define        PACKET3_LOAD_CONTEXT_REG                        0x61
+#define        PACKET3_SET_CONFIG_REG                          0x68
+#define                PACKET3_SET_CONFIG_REG_START                    0x00002000
+#define                PACKET3_SET_CONFIG_REG_END                      0x00002c00
+#define        PACKET3_SET_CONTEXT_REG                         0x69
+#define                PACKET3_SET_CONTEXT_REG_START                   0x0000a000
+#define                PACKET3_SET_CONTEXT_REG_END                     0x0000a400
+#define        PACKET3_SET_CONTEXT_REG_INDIRECT                0x73
+#define        PACKET3_SET_SH_REG                              0x76
+#define                PACKET3_SET_SH_REG_START                        0x00002c00
+#define                PACKET3_SET_SH_REG_END                          0x00003000
+#define        PACKET3_SET_SH_REG_OFFSET                       0x77
+#define        PACKET3_SET_QUEUE_REG                           0x78
+#define        PACKET3_SET_UCONFIG_REG                         0x79
+#define                PACKET3_SET_UCONFIG_REG_START                   0x0000c000
+#define                PACKET3_SET_UCONFIG_REG_END                     0x0000c400
+#define        PACKET3_SCRATCH_RAM_WRITE                       0x7D
+#define        PACKET3_SCRATCH_RAM_READ                        0x7E
+#define        PACKET3_LOAD_CONST_RAM                          0x80
+#define        PACKET3_WRITE_CONST_RAM                         0x81
+#define        PACKET3_DUMP_CONST_RAM                          0x83
+#define        PACKET3_INCREMENT_CE_COUNTER                    0x84
+#define        PACKET3_INCREMENT_DE_COUNTER                    0x85
+#define        PACKET3_WAIT_ON_CE_COUNTER                      0x86
+#define        PACKET3_WAIT_ON_DE_COUNTER_DIFF                 0x88
+#define        PACKET3_SWITCH_BUFFER                           0x8B
+
+#define VCE_CMD_NO_OP          0x00000000
+#define VCE_CMD_END            0x00000001
+#define VCE_CMD_IB             0x00000002
+#define VCE_CMD_FENCE          0x00000003
+#define VCE_CMD_TRAP           0x00000004
+#define VCE_CMD_IB_AUTO        0x00000005
+#define VCE_CMD_SEMAPHORE      0x00000006
+
+#endif