drm/amdgpu: cleanup the write_pte implementations
authorChristian König <christian.koenig@amd.com>
Fri, 12 Aug 2016 09:33:30 +0000 (11:33 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 16 Aug 2016 14:44:10 +0000 (10:44 -0400)
We don't need the gart mapping handling here any more.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Edward O'Callaghan <funfunctor@folklore1984.net>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c

index d18ae01bd339eacd0021a8c963e3b173293a7042..387b4979f45aa89a509c76a632ee65a87b470882 100644 (file)
@@ -250,10 +250,9 @@ struct amdgpu_vm_pte_funcs {
                         uint64_t pe, uint64_t src,
                         unsigned count);
        /* write pte one entry at a time with addr mapping */
-       void (*write_pte)(struct amdgpu_ib *ib,
-                         const dma_addr_t *pages_addr, uint64_t pe,
-                         uint64_t addr, unsigned count,
-                         uint32_t incr, uint32_t flags);
+       void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
+                         uint64_t value, unsigned count,
+                         uint32_t incr);
        /* for linear pte/pde updates without addr mapping */
        void (*set_pte_pde)(struct amdgpu_ib *ib,
                            uint64_t pe,
@@ -965,7 +964,6 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
                      struct amdgpu_job *job);
 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
-uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
                                    struct amdgpu_vm *vm);
 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
@@ -2271,7 +2269,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
 #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
 #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
-#define amdgpu_vm_write_pte(adev, ib, pa, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pa), (pe), (addr), (count), (incr), (flags)))
+#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
 #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
 #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
 #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
index 51db44abdfa5ecec3fb146897f0669e19b5efd18..e0e40aca0f786752c215e667683cb24ef0851090 100644 (file)
@@ -491,8 +491,8 @@ static void amdgpu_vm_update_pages(struct amdgpu_pte_update_params *params,
                        pe, (params->src + (addr >> 12) * 8), count);
 
        } else if (count < 3) {
-               amdgpu_vm_write_pte(params->adev, params->ib, NULL, pe, addr,
-                                   count, incr, flags);
+               amdgpu_vm_write_pte(params->adev, params->ib, pe,
+                                   addr | flags, count, incr);
 
        } else {
                amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
@@ -569,21 +569,15 @@ error:
  * Look up the physical address of the page that the pte resolves
  * to and return the pointer for the page table entry.
  */
-uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
+static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
 {
        uint64_t result;
 
-       if (pages_addr) {
-               /* page table offset */
-               result = pages_addr[addr >> PAGE_SHIFT];
-
-               /* in case cpu page size != gpu page size*/
-               result |= addr & (~PAGE_MASK);
+       /* page table offset */
+       result = pages_addr[addr >> PAGE_SHIFT];
 
-       } else {
-               /* No mapping required */
-               result = addr;
-       }
+       /* in case cpu page size != gpu page size*/
+       result |= addr & (~PAGE_MASK);
 
        result &= 0xFFFFFFFFFFFFF000ULL;
 
index ee6466912497b7cfc608db429e144d3a2dc3387e..e5e44f42e20e77993754b1604acb1e51137d10a5 100644 (file)
@@ -719,39 +719,27 @@ static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib,
  *
  * @ib: indirect buffer to fill with commands
  * @pe: addr of the page entry
- * @addr: dst addr to write into pe
+ * @value: dst addr to write into pe
  * @count: number of page entries to update
  * @incr: increase next addr by incr bytes
- * @flags: access flags
  *
  * Update PTEs by writing them manually using sDMA (CIK).
  */
-static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib,
-                                 const dma_addr_t *pages_addr, uint64_t pe,
-                                 uint64_t addr, unsigned count,
-                                 uint32_t incr, uint32_t flags)
+static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
+                                 uint64_t value, unsigned count,
+                                 uint32_t incr)
 {
-       uint64_t value;
-       unsigned ndw;
-
-       while (count) {
-               ndw = count * 2;
-               if (ndw > 0xFFFFE)
-                       ndw = 0xFFFFE;
-
-               /* for non-physically contiguous pages (system) */
-               ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
-                       SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
-               ib->ptr[ib->length_dw++] = pe;
-               ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-               ib->ptr[ib->length_dw++] = ndw;
-               for (; ndw > 0; ndw -= 2, --count, pe += 8) {
-                       value = amdgpu_vm_map_gart(pages_addr, addr);
-                       addr += incr;
-                       value |= flags;
-                       ib->ptr[ib->length_dw++] = value;
-                       ib->ptr[ib->length_dw++] = upper_32_bits(value);
-               }
+       unsigned ndw = count * 2;
+
+       ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
+               SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+       ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+       ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+       ib->ptr[ib->length_dw++] = ndw;
+       for (; ndw > 0; ndw -= 2) {
+               ib->ptr[ib->length_dw++] = lower_32_bits(value);
+               ib->ptr[ib->length_dw++] = upper_32_bits(value);
+               value += incr;
        }
 }
 
index 1351c7e834a21653a358ad3578dad91f60de9da9..af0f0d283472477242226eb2c31467bd452d6526 100644 (file)
@@ -774,39 +774,27 @@ static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib,
  *
  * @ib: indirect buffer to fill with commands
  * @pe: addr of the page entry
- * @addr: dst addr to write into pe
+ * @value: dst addr to write into pe
  * @count: number of page entries to update
  * @incr: increase next addr by incr bytes
- * @flags: access flags
  *
  * Update PTEs by writing them manually using sDMA (CIK).
  */
-static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib,
-                                  const dma_addr_t *pages_addr, uint64_t pe,
-                                  uint64_t addr, unsigned count,
-                                  uint32_t incr, uint32_t flags)
+static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
+                                  uint64_t value, unsigned count,
+                                  uint32_t incr)
 {
-       uint64_t value;
-       unsigned ndw;
-
-       while (count) {
-               ndw = count * 2;
-               if (ndw > 0xFFFFE)
-                       ndw = 0xFFFFE;
+       unsigned ndw = count * 2;
 
-               /* for non-physically contiguous pages (system) */
-               ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
-                       SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
-               ib->ptr[ib->length_dw++] = pe;
-               ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-               ib->ptr[ib->length_dw++] = ndw;
-               for (; ndw > 0; ndw -= 2, --count, pe += 8) {
-                       value = amdgpu_vm_map_gart(pages_addr, addr);
-                       addr += incr;
-                       value |= flags;
-                       ib->ptr[ib->length_dw++] = value;
-                       ib->ptr[ib->length_dw++] = upper_32_bits(value);
-               }
+       ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
+               SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+       ib->ptr[ib->length_dw++] = pe;
+       ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+       ib->ptr[ib->length_dw++] = ndw;
+       for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+               ib->ptr[ib->length_dw++] = lower_32_bits(value);
+               ib->ptr[ib->length_dw++] = upper_32_bits(value);
+               value += incr;
        }
 }
 
index 3a63a4fcf8074d1e84b029cdc5ca3106caf792dd..88faaee37258704d3ff2712659dad8540f247f4a 100644 (file)
@@ -1001,39 +1001,27 @@ static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib,
  *
  * @ib: indirect buffer to fill with commands
  * @pe: addr of the page entry
- * @addr: dst addr to write into pe
+ * @value: dst addr to write into pe
  * @count: number of page entries to update
  * @incr: increase next addr by incr bytes
- * @flags: access flags
  *
  * Update PTEs by writing them manually using sDMA (CIK).
  */
-static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib,
-                                  const dma_addr_t *pages_addr, uint64_t pe,
-                                  uint64_t addr, unsigned count,
-                                  uint32_t incr, uint32_t flags)
+static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
+                                  uint64_t value, unsigned count,
+                                  uint32_t incr)
 {
-       uint64_t value;
-       unsigned ndw;
-
-       while (count) {
-               ndw = count * 2;
-               if (ndw > 0xFFFFE)
-                       ndw = 0xFFFFE;
+       unsigned ndw = count * 2;
 
-               /* for non-physically contiguous pages (system) */
-               ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
-                       SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
-               ib->ptr[ib->length_dw++] = pe;
-               ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-               ib->ptr[ib->length_dw++] = ndw;
-               for (; ndw > 0; ndw -= 2, --count, pe += 8) {
-                       value = amdgpu_vm_map_gart(pages_addr, addr);
-                       addr += incr;
-                       value |= flags;
-                       ib->ptr[ib->length_dw++] = value;
-                       ib->ptr[ib->length_dw++] = upper_32_bits(value);
-               }
+       ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
+               SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+       ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+       ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+       ib->ptr[ib->length_dw++] = ndw;
+       for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+               ib->ptr[ib->length_dw++] = lower_32_bits(value);
+               ib->ptr[ib->length_dw++] = upper_32_bits(value);
+               value += incr;
        }
 }