drm/amdgpu: update pd shadow while updating pd V2
authorChunming Zhou <David1.Zhou@amd.com>
Mon, 15 Aug 2016 03:36:54 +0000 (11:36 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 22 Aug 2016 17:47:19 +0000 (13:47 -0400)
V2:
Checking if shadow is valid.

Signed-off-by: Chunming Zhou <David1.Zhou@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

index 1d6f077d605bd021b0791398c86361db5028b2b8..60030f951152cfcbbdd138fba82ee78f4aae325e 100644 (file)
@@ -816,6 +816,7 @@ struct amdgpu_ring {
 struct amdgpu_vm_pt {
        struct amdgpu_bo_list_entry     entry;
        uint64_t                        addr;
+       uint64_t                        shadow_addr;
 };
 
 struct amdgpu_vm {
index 7ca2e8fa5906cb06c30bf1ef0222f6100f2d5938..1eae307cdfd4783c66b084e929541db7e7ab331c 100644 (file)
@@ -604,24 +604,14 @@ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
        return result;
 }
 
-/**
- * amdgpu_vm_update_pdes - make sure that page directory is valid
- *
- * @adev: amdgpu_device pointer
- * @vm: requested vm
- * @start: start of GPU address range
- * @end: end of GPU address range
- *
- * Allocates new page tables if necessary
- * and updates the page directory.
- * Returns 0 for success, error for failure.
- */
-int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
-                                   struct amdgpu_vm *vm)
+static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
+                                        struct amdgpu_vm *vm,
+                                        bool shadow)
 {
        struct amdgpu_ring *ring;
-       struct amdgpu_bo *pd = vm->page_directory;
-       uint64_t pd_addr = amdgpu_bo_gpu_offset(pd);
+       struct amdgpu_bo *pd = shadow ? vm->page_directory->shadow :
+               vm->page_directory;
+       uint64_t pd_addr;
        uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
        uint64_t last_pde = ~0, last_pt = ~0;
        unsigned count = 0, pt_idx, ndw;
@@ -631,6 +621,9 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
 
        int r;
 
+       if (!pd)
+               return 0;
+       pd_addr = amdgpu_bo_gpu_offset(pd);
        ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
 
        /* padding, etc. */
@@ -656,9 +649,15 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
                        continue;
 
                pt = amdgpu_bo_gpu_offset(bo);
-               if (vm->page_tables[pt_idx].addr == pt)
-                       continue;
-               vm->page_tables[pt_idx].addr = pt;
+               if (!shadow) {
+                       if (vm->page_tables[pt_idx].addr == pt)
+                               continue;
+                       vm->page_tables[pt_idx].addr = pt;
+               } else {
+                       if (vm->page_tables[pt_idx].shadow_addr == pt)
+                               continue;
+                       vm->page_tables[pt_idx].shadow_addr = pt;
+               }
 
                pde = pd_addr + pt_idx * 8;
                if (((last_pde + 8 * count) != pde) ||
@@ -709,6 +708,29 @@ error_free:
        return r;
 }
 
+/*
+ * amdgpu_vm_update_pdes - make sure that page directory is valid
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ *
+ * Allocates new page tables if necessary
+ * and updates the page directory.
+ * Returns 0 for success, error for failure.
+ */
+int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
+                                   struct amdgpu_vm *vm)
+{
+       int r;
+
+       r = amdgpu_vm_update_pd_or_shadow(adev, vm, true);
+       if (r)
+               return r;
+       return amdgpu_vm_update_pd_or_shadow(adev, vm, false);
+}
+
 /**
  * amdgpu_vm_update_ptes - make sure that page tables are valid
  *