drm/amdgpu: bind BOs with GTT space allocated directly v2
authorChristian König <christian.koenig@amd.com>
Fri, 30 Jun 2017 08:41:07 +0000 (10:41 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 14 Jul 2017 15:05:59 +0000 (11:05 -0400)
This avoids binding them later on.

v2: fix typo in function name

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h

index f7d22c44034d43cce77ecd096c953e18aadc955d..1ef625550442d237b088f66495aa6cabcd833be6 100644 (file)
@@ -80,6 +80,20 @@ static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
        return 0;
 }
 
+/**
+ * amdgpu_gtt_mgr_is_allocated - Check if mem has address space
+ *
+ * @mem: the mem object to check
+ *
+ * Check if a mem object has already address space allocated.
+ */
+bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem)
+{
+       struct drm_mm_node *node = mem->mm_node;
+
+       return (node->start != AMDGPU_BO_INVALID_OFFSET);
+}
+
 /**
  * amdgpu_gtt_mgr_alloc - allocate new ranges
  *
@@ -101,7 +115,7 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
        unsigned long fpfn, lpfn;
        int r;
 
-       if (node->start != AMDGPU_BO_INVALID_OFFSET)
+       if (amdgpu_gtt_mgr_is_allocated(mem))
                return 0;
 
        if (place)
index e97dfe888d55db112ff0d282111e73f03daa20f1..7064d31f0be5547847bf2fbde2e1cd440e973b56 100644 (file)
@@ -681,6 +681,31 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
        sg_free_table(ttm->sg);
 }
 
+static int amdgpu_ttm_do_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
+{
+       struct amdgpu_ttm_tt *gtt = (void *)ttm;
+       uint64_t flags;
+       int r;
+
+       spin_lock(&gtt->adev->gtt_list_lock);
+       flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, mem);
+       gtt->offset = (u64)mem->start << PAGE_SHIFT;
+       r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
+               ttm->pages, gtt->ttm.dma_address, flags);
+
+       if (r) {
+               DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
+                         ttm->num_pages, gtt->offset);
+               goto error_gart_bind;
+       }
+
+       list_add_tail(&gtt->list, &gtt->adev->gtt_list);
+error_gart_bind:
+       spin_unlock(&gtt->adev->gtt_list_lock);
+       return r;
+
+}
+
 static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
                                   struct ttm_mem_reg *bo_mem)
 {
@@ -704,7 +729,10 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
            bo_mem->mem_type == AMDGPU_PL_OA)
                return -EINVAL;
 
-       return 0;
+       if (amdgpu_gtt_mgr_is_allocated(bo_mem))
+           r = amdgpu_ttm_do_bind(ttm, bo_mem);
+
+       return r;
 }
 
 bool amdgpu_ttm_is_bound(struct ttm_tt *ttm)
@@ -717,8 +745,6 @@ bool amdgpu_ttm_is_bound(struct ttm_tt *ttm)
 int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
 {
        struct ttm_tt *ttm = bo->ttm;
-       struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
-       uint64_t flags;
        int r;
 
        if (!ttm || amdgpu_ttm_is_bound(ttm))
@@ -731,22 +757,7 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
                return r;
        }
 
-       spin_lock(&gtt->adev->gtt_list_lock);
-       flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
-       gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
-       r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
-               ttm->pages, gtt->ttm.dma_address, flags);
-
-       if (r) {
-               DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
-                         ttm->num_pages, gtt->offset);
-               goto error_gart_bind;
-       }
-
-       list_add_tail(&gtt->list, &gtt->adev->gtt_list);
-error_gart_bind:
-       spin_unlock(&gtt->adev->gtt_list_lock);
-       return r;
+       return amdgpu_ttm_do_bind(ttm, bo_mem);
 }
 
 int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
index cd5bbfa2773fc5272af71a292f688a1ed56abd5e..776a20ae40c4e61bafb2102f73049fd434ee2350 100644 (file)
@@ -56,6 +56,7 @@ struct amdgpu_mman {
 extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
 extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
 
+bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem);
 int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
                         struct ttm_buffer_object *tbo,
                         const struct ttm_place *place,