drm/radeon: implement bo copy callback using CP DMA (v2)
authorAlex Deucher <alexander.deucher@amd.com>
Thu, 11 Jul 2013 18:48:05 +0000 (14:48 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Sun, 14 Jul 2013 14:11:26 +0000 (10:11 -0400)
Lighter weight than using the 3D engine.

v2: fix ring count

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600d.h
drivers/gpu/drm/radeon/radeon_asic.h

index 2d3655f7f41e5a72bf898618e836cb7f816fd633..f7d494f264a57baff93eeefd1846bf4666f4321d 100644 (file)
@@ -3144,6 +3144,87 @@ int r600_copy_blit(struct radeon_device *rdev,
        return 0;
 }
 
+/**
+ * r600_copy_cpdma - copy pages using the CP DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the CP DMA engine (r6xx+).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int r600_copy_cpdma(struct radeon_device *rdev,
+                   uint64_t src_offset, uint64_t dst_offset,
+                   unsigned num_gpu_pages,
+                   struct radeon_fence **fence)
+{
+       struct radeon_semaphore *sem = NULL;
+       int ring_index = rdev->asic->copy.blit_ring_index;
+       struct radeon_ring *ring = &rdev->ring[ring_index];
+       u32 size_in_bytes, cur_size_in_bytes, tmp;
+       int i, num_loops;
+       int r = 0;
+
+       r = radeon_semaphore_create(rdev, &sem);
+       if (r) {
+               DRM_ERROR("radeon: moving bo (%d).\n", r);
+               return r;
+       }
+
+       size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
+       num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
+       r = radeon_ring_lock(rdev, ring, num_loops * 6 + 21);
+       if (r) {
+               DRM_ERROR("radeon: moving bo (%d).\n", r);
+               radeon_semaphore_free(rdev, &sem, NULL);
+               return r;
+       }
+
+       if (radeon_fence_need_sync(*fence, ring->idx)) {
+               radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+                                           ring->idx);
+               radeon_fence_note_sync(*fence, ring->idx);
+       } else {
+               radeon_semaphore_free(rdev, &sem, NULL);
+       }
+
+       for (i = 0; i < num_loops; i++) {
+               cur_size_in_bytes = size_in_bytes;
+               if (cur_size_in_bytes > 0x1fffff)
+                       cur_size_in_bytes = 0x1fffff;
+               size_in_bytes -= cur_size_in_bytes;
+               tmp = upper_32_bits(src_offset) & 0xff;
+               if (size_in_bytes == 0)
+                       tmp |= PACKET3_CP_DMA_CP_SYNC;
+               radeon_ring_write(ring, PACKET3(PACKET3_CP_DMA, 4));
+               radeon_ring_write(ring, src_offset & 0xffffffff);
+               radeon_ring_write(ring, tmp);
+               radeon_ring_write(ring, dst_offset & 0xffffffff);
+               radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+               radeon_ring_write(ring, cur_size_in_bytes);
+               src_offset += cur_size_in_bytes;
+               dst_offset += cur_size_in_bytes;
+       }
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+       radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit);
+
+       r = radeon_fence_emit(rdev, fence, ring->idx);
+       if (r) {
+               radeon_ring_unlock_undo(rdev, ring);
+               return r;
+       }
+
+       radeon_ring_unlock_commit(rdev, ring);
+       radeon_semaphore_free(rdev, &sem, *fence);
+
+       return r;
+}
+
 /**
  * r600_copy_dma - copy pages using the DMA engine
  *
index f1b3084d8f5162d56515992facc21842a9e15fea..8e3fe815edab3224492e126bf7e47d5193be448f 100644 (file)
 #define                L2_BUSY                                         (1 << 0)
 
 #define        WAIT_UNTIL                                      0x8040
+#define         WAIT_CP_DMA_IDLE_bit                            (1 << 8)
 #define         WAIT_2D_IDLE_bit                                (1 << 14)
 #define         WAIT_3D_IDLE_bit                                (1 << 15)
 #define         WAIT_2D_IDLECLEAN_bit                           (1 << 16)
index 45d0693cddd5cc2493abc7247d78a1e1651f4d10..b04b5789f4a87bde7f83859134070d3441a7b27d 100644 (file)
@@ -340,6 +340,9 @@ int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
 int r600_copy_blit(struct radeon_device *rdev,
                   uint64_t src_offset, uint64_t dst_offset,
                   unsigned num_gpu_pages, struct radeon_fence **fence);
+int r600_copy_cpdma(struct radeon_device *rdev,
+                   uint64_t src_offset, uint64_t dst_offset,
+                   unsigned num_gpu_pages, struct radeon_fence **fence);
 int r600_copy_dma(struct radeon_device *rdev,
                  uint64_t src_offset, uint64_t dst_offset,
                  unsigned num_gpu_pages, struct radeon_fence **fence);