drm/amdgpu: use per VM entity for page table updates (v2)
authorChristian König <christian.koenig@amd.com>
Mon, 1 Feb 2016 11:53:58 +0000 (12:53 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 12 Feb 2016 20:35:16 +0000 (15:35 -0500)
Updates from different VMs can be processed independently.

v2: agd: rebase on upstream

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

index 99e660fec190b5c77682ef2d2dfc5abb6e79c0af..5947a95ac8530bc36cd1d5e0e4187824914cd02e 100644 (file)
@@ -800,7 +800,8 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
                             struct amdgpu_job **job);
 void amdgpu_job_free(struct amdgpu_job *job);
 int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
-                     void *owner, struct fence **f);
+                     struct amd_sched_entity *entity, void *owner,
+                     struct fence **f);
 
 struct amdgpu_ring {
        struct amdgpu_device            *adev;
@@ -917,6 +918,9 @@ struct amdgpu_vm {
 
        /* protecting freed */
        spinlock_t              freed_lock;
+
+       /* Scheduler entity for page table updates */
+       struct amd_sched_entity entity;
 };
 
 struct amdgpu_vm_manager_id {
index 0f6719e0ace0599d6ac9ae939a426d6e560a5dd8..97db6beeca13253937c309e9d1691b0cc7139ddf 100644 (file)
@@ -80,13 +80,17 @@ void amdgpu_job_free(struct amdgpu_job *job)
 }
 
 int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
-                     void *owner, struct fence **f)
+                     struct amd_sched_entity *entity, void *owner,
+                     struct fence **f)
 {
        struct amdgpu_device *adev = job->adev;
 
+       if (!entity)
+               entity = &adev->kernel_ctx.rings[ring->idx].entity;
+
        job->ring = ring;
        job->base.sched = &ring->sched;
-       job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
+       job->base.s_entity = entity;
        job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner);
        if (!job->base.s_fence)
                return -ENOMEM;
index e47d5188c886bfed9ee7a4554f9435bc343077de..3deb7d3b218af4078feedf0da60b2f0747ff9697 100644 (file)
@@ -1053,7 +1053,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
 
        amdgpu_ring_pad_ib(ring, &job->ibs[0]);
        WARN_ON(job->ibs[0].length_dw > num_dw);
-       r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_UNDEFINED, fence);
+       r = amdgpu_job_submit(job, ring, NULL, AMDGPU_FENCE_OWNER_UNDEFINED, fence);
        if (r)
                goto error_free;
 
index c536630580f8106fe3c50a03c8111c312c50b276..f4283432bf4e432136fb800251354e57c54d2b8d 100644 (file)
@@ -880,7 +880,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
 
                amdgpu_job_free(job);
        } else {
-               r = amdgpu_job_submit(job, ring,
+               r = amdgpu_job_submit(job, ring, NULL,
                                      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
                if (r)
                        goto err_free;
index fb2ce3ed9aab188c6c2b19cb59ae47260e2cce6e..8a3119379cd8dc23557da14d6c23ad0d37d5e073 100644 (file)
@@ -481,7 +481,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 
                amdgpu_job_free(job);
        } else {
-               r = amdgpu_job_submit(job, ring,
+               r = amdgpu_job_submit(job, ring, NULL,
                                      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
                if (r)
                        goto err;
index b291b1a4611a89211c5288a488a0c31e83e3b5ba..5e38b344d56b566313e6a827e5b9a17d7c88026f 100644 (file)
@@ -322,6 +322,7 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
  * need to reserve bo first before calling it.
  */
 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
+                             struct amdgpu_vm *vm,
                              struct amdgpu_bo *bo)
 {
        struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
@@ -351,7 +352,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
        amdgpu_ring_pad_ib(ring, &job->ibs[0]);
 
        WARN_ON(job->ibs[0].length_dw > 64);
-       r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_VM, &fence);
+       r = amdgpu_job_submit(job, ring, &vm->entity,
+                             AMDGPU_FENCE_OWNER_VM, &fence);
        if (r)
                goto error_free;
 
@@ -476,7 +478,8 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
                amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
                                 AMDGPU_FENCE_OWNER_VM);
                WARN_ON(ib->length_dw > ndw);
-               r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_VM, &fence);
+               r = amdgpu_job_submit(job, ring, &vm->entity,
+                                     AMDGPU_FENCE_OWNER_VM, &fence);
                if (r)
                        goto error_free;
 
@@ -729,7 +732,8 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 
        amdgpu_ring_pad_ib(ring, ib);
        WARN_ON(ib->length_dw > ndw);
-       r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_VM, &f);
+       r = amdgpu_job_submit(job, ring, &vm->entity,
+                             AMDGPU_FENCE_OWNER_VM, &f);
        if (r)
                goto error_free;
 
@@ -1104,7 +1108,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
                 */
                pt->parent = amdgpu_bo_ref(vm->page_directory);
 
-               r = amdgpu_vm_clear_bo(adev, pt);
+               r = amdgpu_vm_clear_bo(adev, vm, pt);
                if (r) {
                        amdgpu_bo_unref(&pt);
                        goto error_free;
@@ -1265,9 +1269,11 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
  */
 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 {
+       struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
        const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
                AMDGPU_VM_PTE_COUNT * 8);
        unsigned pd_size, pd_entries;
+       struct amd_sched_rq *rq;
        int i, r;
 
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -1291,6 +1297,13 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
                return -ENOMEM;
        }
 
+       /* create scheduler entity for page table updates */
+       rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
+       r = amd_sched_entity_init(&ring->sched, &vm->entity,
+                                 rq, amdgpu_sched_jobs);
+       if (r)
+               return r;
+
        vm->page_directory_fence = NULL;
 
        r = amdgpu_bo_create(adev, pd_size, align, true,
@@ -1298,22 +1311,27 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
                             AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
                             NULL, NULL, &vm->page_directory);
        if (r)
-               return r;
+               goto error_free_sched_entity;
+
        r = amdgpu_bo_reserve(vm->page_directory, false);
-       if (r) {
-               amdgpu_bo_unref(&vm->page_directory);
-               vm->page_directory = NULL;
-               return r;
-       }
-       r = amdgpu_vm_clear_bo(adev, vm->page_directory);
+       if (r)
+               goto error_free_page_directory;
+
+       r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory);
        amdgpu_bo_unreserve(vm->page_directory);
-       if (r) {
-               amdgpu_bo_unref(&vm->page_directory);
-               vm->page_directory = NULL;
-               return r;
-       }
+       if (r)
+               goto error_free_page_directory;
 
        return 0;
+
+error_free_page_directory:
+       amdgpu_bo_unref(&vm->page_directory);
+       vm->page_directory = NULL;
+
+error_free_sched_entity:
+       amd_sched_entity_fini(&ring->sched, &vm->entity);
+
+       return r;
 }
 
 /**
@@ -1327,9 +1345,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
  */
 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 {
+       struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
        struct amdgpu_bo_va_mapping *mapping, *tmp;
        int i;
 
+       amd_sched_entity_fini(&ring->sched, &vm->entity);
+
        if (!RB_EMPTY_ROOT(&vm->va)) {
                dev_err(adev->dev, "still active bo inside vm\n");
        }