drm/amdgpu: fix vm size and block size for VMPT (v5)
authorZhang, Jerry <Jerry.Zhang@amd.com>
Wed, 29 Mar 2017 08:08:32 +0000 (16:08 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 6 Apr 2017 17:27:17 +0000 (13:27 -0400)
Set reasonable defaults per family.

v2: set both of them in gmc
v3: move vm size and block size in vm manager
v4: squash in warning fix from Alex Xie
v5: squash in min() warning fix

Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c

index d78c523c93fe23458e3004dea6596769f1708734..831c2bfd2072ba3dcf7a3d83b5cfb6225278109f 100644 (file)
@@ -1042,14 +1042,6 @@ static bool amdgpu_check_pot_argument(int arg)
 
 static void amdgpu_get_block_size(struct amdgpu_device *adev)
 {
-       /* from AI, asic starts to support multiple level VMPT */
-       if (adev->asic_type >= CHIP_VEGA10) {
-               if (amdgpu_vm_block_size != 9)
-                       dev_warn(adev->dev,
-                                "Multi-VMPT limits block size to one page!\n");
-               amdgpu_vm_block_size = 9;
-               return;
-       }
        /* defines number of bits in page table versus page directory,
         * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
         * page table and the remaining bits are in the page directory */
index 14f0889f7563056ba266f993eb6d5669ad88c793..2895d9d86f29b8ee64b239614bb077e3bdd2e471 100644 (file)
@@ -100,13 +100,14 @@ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
        if (level == 0)
                /* For the root directory */
                return adev->vm_manager.max_pfn >>
-                       (amdgpu_vm_block_size * adev->vm_manager.num_level);
+                       (adev->vm_manager.block_size *
+                        adev->vm_manager.num_level);
        else if (level == adev->vm_manager.num_level)
                /* For the page tables on the leaves */
-               return AMDGPU_VM_PTE_COUNT;
+               return AMDGPU_VM_PTE_COUNT(adev);
        else
                /* Everything in between */
-               return 1 << amdgpu_vm_block_size;
+               return 1 << adev->vm_manager.block_size;
 }
 
 /**
@@ -271,7 +272,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
                                  unsigned level)
 {
        unsigned shift = (adev->vm_manager.num_level - level) *
-               amdgpu_vm_block_size;
+               adev->vm_manager.block_size;
        unsigned pt_idx, from, to;
        int r;
 
@@ -976,7 +977,7 @@ static struct amdgpu_bo *amdgpu_vm_get_pt(struct amdgpu_pte_update_params *p,
        unsigned idx, level = p->adev->vm_manager.num_level;
 
        while (entry->entries) {
-               idx = addr >> (amdgpu_vm_block_size * level--);
+               idx = addr >> (p->adev->vm_manager.block_size * level--);
                idx %= amdgpu_bo_size(entry->bo) / 8;
                entry = &entry->entries[idx];
        }
@@ -1003,7 +1004,8 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
                                  uint64_t start, uint64_t end,
                                  uint64_t dst, uint64_t flags)
 {
-       const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
+       struct amdgpu_device *adev = params->adev;
+       const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
 
        uint64_t cur_pe_start, cur_nptes, cur_dst;
        uint64_t addr; /* next GPU address to be updated */
@@ -1027,7 +1029,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
        if ((addr & ~mask) == (end & ~mask))
                nptes = end - addr;
        else
-               nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
+               nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
 
        cur_pe_start = amdgpu_bo_gpu_offset(pt);
        cur_pe_start += (addr & mask) * 8;
@@ -1055,7 +1057,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
                if ((addr & ~mask) == (end & ~mask))
                        nptes = end - addr;
                else
-                       nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
+                       nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
 
                next_pe_start = amdgpu_bo_gpu_offset(pt);
                next_pe_start += (addr & mask) * 8;
@@ -1202,7 +1204,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
         * reserve space for one command every (1 << BLOCK_SIZE)
         *  entries or 2k dwords (whatever is smaller)
         */
-       ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1;
+       ncmds = (nptes >> min(adev->vm_manager.block_size, 11u)) + 1;
 
        /* padding, etc. */
        ndw = 64;
@@ -2073,7 +2075,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 {
        const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
-               AMDGPU_VM_PTE_COUNT * 8);
+               AMDGPU_VM_PTE_COUNT(adev) * 8);
        unsigned ring_instance;
        struct amdgpu_ring *ring;
        struct amd_sched_rq *rq;
index ed2467881b7472b23576a3c7f98c7eda72947046..02b0dd3b135f2b802f92ba27f44dd56b976c4271 100644 (file)
@@ -45,7 +45,7 @@ struct amdgpu_bo_list_entry;
 #define AMDGPU_VM_MAX_UPDATE_SIZE      0x3FFFF
 
 /* number of entries in page table */
-#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
+#define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size)
 
 /* PTBs (Page Table Blocks) need to be aligned to 32K */
 #define AMDGPU_VM_PTB_ALIGN_SIZE   32768
@@ -162,6 +162,8 @@ struct amdgpu_vm_manager {
 
        uint64_t                                max_pfn;
        uint32_t                                num_level;
+       uint64_t                                vm_size;
+       uint32_t                                block_size;
        /* vram base address for page table entry  */
        u64                                     vram_base_offset;
        /* is vm enabled? */
index b808d4ce86d65e083734aed037e45f252e9c7b7e..70c21f9b904b370be8c1d62d4d6ce3e00b076912 100644 (file)
@@ -222,7 +222,7 @@ int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
                                EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
                tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
                                PAGE_TABLE_BLOCK_SIZE,
-                                   amdgpu_vm_block_size - 9);
+                               adev->vm_manager.block_size - 9);
                WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL) + i, tmp);
                WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32) + i*2, 0);
                WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32) + i*2, 0);
index d9586601a437e8c68ce03308d2b692245331cf8c..8f18d14f8edab35209f3f50d67e139ce0e250175 100644 (file)
@@ -543,7 +543,8 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
        WREG32(mmVM_CONTEXT1_CNTL,
               VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
               (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
-              ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
+              ((adev->vm_manager.block_size - 9)
+              << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
        if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
                gmc_v6_0_set_fault_enable_default(adev, false);
        else
@@ -848,7 +849,12 @@ static int gmc_v6_0_sw_init(void *handle)
        if (r)
                return r;
 
-       adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
+       adev->vm_manager.vm_size = amdgpu_vm_size;
+       adev->vm_manager.block_size = amdgpu_vm_block_size;
+       adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
+
+       DRM_INFO("vm size is %llu GB, block size is %d-bit\n",
+               adev->vm_manager.vm_size, adev->vm_manager.block_size);
 
        adev->mc.mc_mask = 0xffffffffffULL;
 
index 78643a1baa5c44c35f69db1dcf817223bdc0cf1c..b86b454197f8c101f73daf3107f2f74e44b6c37a 100644 (file)
@@ -644,7 +644,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
        tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
        tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
        tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
-                           amdgpu_vm_block_size - 9);
+                           adev->vm_manager.block_size - 9);
        WREG32(mmVM_CONTEXT1_CNTL, tmp);
        if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
                gmc_v7_0_set_fault_enable_default(adev, false);
@@ -1003,7 +1003,12 @@ static int gmc_v7_0_sw_init(void *handle)
         * Currently set to 4GB ((1 << 20) 4k pages).
         * Max GPUVM size for cayman and SI is 40 bits.
         */
-       adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
+       adev->vm_manager.vm_size = amdgpu_vm_size;
+       adev->vm_manager.block_size = amdgpu_vm_block_size;
+       adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
+
+       DRM_INFO("vm size is %llu GB, block size is %d-bit\n",
+               adev->vm_manager.vm_size, adev->vm_manager.block_size);
 
        /* Set the internal MC address mask
         * This is the max address of the GPU's
index 42b2f357a7994a5a6d5839d3f421031a1108b67b..108a20e832cf465b4523152e7ed16f581628efee 100644 (file)
@@ -853,7 +853,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
        tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
        tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
        tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
-                           amdgpu_vm_block_size - 9);
+                           adev->vm_manager.block_size - 9);
        WREG32(mmVM_CONTEXT1_CNTL, tmp);
        if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
                gmc_v8_0_set_fault_enable_default(adev, false);
@@ -1087,7 +1087,12 @@ static int gmc_v8_0_sw_init(void *handle)
         * Currently set to 4GB ((1 << 20) 4k pages).
         * Max GPUVM size for cayman and SI is 40 bits.
         */
-       adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
+       adev->vm_manager.vm_size = amdgpu_vm_size;
+       adev->vm_manager.block_size = amdgpu_vm_block_size;
+       adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
+
+       DRM_INFO("vm size is %llu GB, block size is %d-bit\n",
+               adev->vm_manager.vm_size, adev->vm_manager.block_size);
 
        /* Set the internal MC address mask
         * This is the max address of the GPU's
index d81372357ae687e7dde7c847383997147c54d316..7632f4ad7aa9edc0e9a94cf4ce6d4742cf1aae7e 100644 (file)
@@ -532,11 +532,23 @@ static int gmc_v9_0_sw_init(void *handle)
 
        if (adev->flags & AMD_IS_APU) {
                adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+               adev->vm_manager.vm_size = amdgpu_vm_size;
+               adev->vm_manager.block_size = amdgpu_vm_block_size;
        } else {
                /* XXX Don't know how to get VRAM type yet. */
                adev->mc.vram_type = AMDGPU_VRAM_TYPE_HBM;
+               /*
+                * To fulfill 4-level page support,
+                * vm size is 256TB (48bit), maximum size of Vega10,
+                * block size 512 (9bit)
+                */
+               adev->vm_manager.vm_size = 1U << 18;
+               adev->vm_manager.block_size = 9;
        }
 
+       DRM_INFO("vm size is %llu GB, block size is %d-bit\n",
+               adev->vm_manager.vm_size, adev->vm_manager.block_size);
+
        /* This interrupt is VMC page fault.*/
        r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0,
                                &adev->mc.vm_fault);
@@ -546,14 +558,7 @@ static int gmc_v9_0_sw_init(void *handle)
        if (r)
                return r;
 
-       /* Because of four level VMPTs, vm size is at least 512GB.
-        * The maximum size is 256TB (48bit).
-        */
-       if (amdgpu_vm_size < 512) {
-               DRM_WARN("VM size is at least 512GB!\n");
-               amdgpu_vm_size = 512;
-       }
-       adev->vm_manager.max_pfn = (uint64_t)amdgpu_vm_size << 18;
+       adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
 
        /* Set the internal MC address mask
         * This is the max address of the GPU's
index a065b4394ea78b8cd34f427b20d88c83f64c370e..3c9e27effc6a875df797d22b5c00344068c7eb4a 100644 (file)
@@ -242,7 +242,7 @@ int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
                                EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
                tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
                                PAGE_TABLE_BLOCK_SIZE,
-                               amdgpu_vm_block_size - 9);
+                               adev->vm_manager.block_size - 9);
                WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL) + i, tmp);
                WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32) + i*2, 0);
                WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32) + i*2, 0);