drm/radeon: split r6xx and r7xx copy_dma functions
authorAlex Deucher <alexander.deucher@amd.com>
Fri, 4 Jan 2013 14:24:18 +0000 (09:24 -0500)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 4 Jan 2013 14:29:54 +0000 (09:29 -0500)
- r6xx actually uses a slightly different packet format,
although both formats seem to work ok.
- r7xx doesn't have the count multiple of 2 limitation.

Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_asic.h
drivers/gpu/drm/radeon/rv770.c

index 923f9364704235cc478903c21d444072608dcf91..537e259b383720d97bbd0b452e74a1c2688eec49 100644 (file)
@@ -2646,7 +2646,7 @@ int r600_copy_blit(struct radeon_device *rdev,
  * @num_gpu_pages: number of GPU pages to xfer
  * @fence: radeon fence object
  *
- * Copy GPU paging using the DMA engine (r6xx-r7xx).
+ * Copy GPU paging using the DMA engine (r6xx).
  * Used by the radeon ttm implementation to move pages if
  * registered as the asic copy callback.
  */
@@ -2669,8 +2669,8 @@ int r600_copy_dma(struct radeon_device *rdev,
        }
 
        size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
-       num_loops = DIV_ROUND_UP(size_in_dw, 0xffff);
-       r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
+       num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
+       r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
        if (r) {
                DRM_ERROR("radeon: moving bo (%d).\n", r);
                radeon_semaphore_free(rdev, &sem, NULL);
@@ -2693,8 +2693,8 @@ int r600_copy_dma(struct radeon_device *rdev,
                radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
                radeon_ring_write(ring, dst_offset & 0xfffffffc);
                radeon_ring_write(ring, src_offset & 0xfffffffc);
-               radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
-               radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+               radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
+                                        (upper_32_bits(src_offset) & 0xff)));
                src_offset += cur_size_in_dw * 4;
                dst_offset += cur_size_in_dw * 4;
        }
index 596bcbe80ed06d5a8e50c271ded50d0a9003afaf..9056fafb00ea35eda70d2a576726abf677808750 100644 (file)
@@ -1140,9 +1140,9 @@ static struct radeon_asic rv770_asic = {
        .copy = {
                .blit = &r600_copy_blit,
                .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .dma = &r600_copy_dma,
+               .dma = &rv770_copy_dma,
                .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
-               .copy = &r600_copy_dma,
+               .copy = &rv770_copy_dma,
                .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
        },
        .surface = {
index 5f4882cc2152eda52e3bf25bf3bd9573bf4e610a..15d70e613076d48e89aff920becbd7e18419a7d8 100644 (file)
@@ -403,6 +403,10 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
 void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
 void r700_cp_stop(struct radeon_device *rdev);
 void r700_cp_fini(struct radeon_device *rdev);
+int rv770_copy_dma(struct radeon_device *rdev,
+                 uint64_t src_offset, uint64_t dst_offset,
+                 unsigned num_gpu_pages,
+                  struct radeon_fence **fence);
 
 /*
  * evergreen
index 87c979c4f721c7e3c5a6ab757a29ef07bb6e9788..1b2444f4d8f4ffa7327fd24093704a594389b151 100644 (file)
@@ -887,6 +887,80 @@ static int rv770_mc_init(struct radeon_device *rdev)
        return 0;
 }
 
+/**
+ * rv770_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (r7xx).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int rv770_copy_dma(struct radeon_device *rdev,
+                 uint64_t src_offset, uint64_t dst_offset,
+                 unsigned num_gpu_pages,
+                 struct radeon_fence **fence)
+{
+       struct radeon_semaphore *sem = NULL;
+       int ring_index = rdev->asic->copy.dma_ring_index;
+       struct radeon_ring *ring = &rdev->ring[ring_index];
+       u32 size_in_dw, cur_size_in_dw;
+       int i, num_loops;
+       int r = 0;
+
+       r = radeon_semaphore_create(rdev, &sem);
+       if (r) {
+               DRM_ERROR("radeon: moving bo (%d).\n", r);
+               return r;
+       }
+
+       size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+       num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
+       r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
+       if (r) {
+               DRM_ERROR("radeon: moving bo (%d).\n", r);
+               radeon_semaphore_free(rdev, &sem, NULL);
+               return r;
+       }
+
+       if (radeon_fence_need_sync(*fence, ring->idx)) {
+               radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+                                           ring->idx);
+               radeon_fence_note_sync(*fence, ring->idx);
+       } else {
+               radeon_semaphore_free(rdev, &sem, NULL);
+       }
+
+       for (i = 0; i < num_loops; i++) {
+               cur_size_in_dw = size_in_dw;
+               if (cur_size_in_dw > 0xFFFF)
+                       cur_size_in_dw = 0xFFFF;
+               size_in_dw -= cur_size_in_dw;
+               radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+               radeon_ring_write(ring, dst_offset & 0xfffffffc);
+               radeon_ring_write(ring, src_offset & 0xfffffffc);
+               radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+               radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+               src_offset += cur_size_in_dw * 4;
+               dst_offset += cur_size_in_dw * 4;
+       }
+
+       r = radeon_fence_emit(rdev, fence, ring->idx);
+       if (r) {
+               radeon_ring_unlock_undo(rdev, ring);
+               return r;
+       }
+
+       radeon_ring_unlock_commit(rdev, ring);
+       radeon_semaphore_free(rdev, &sem, *fence);
+
+       return r;
+}
+
 static int rv770_startup(struct radeon_device *rdev)
 {
        struct radeon_ring *ring;