drm/amdgpu: add context entity init
authorChunming Zhou <david1.zhou@amd.com>
Tue, 21 Jul 2015 05:17:19 +0000 (13:17 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 17 Aug 2015 20:50:32 +0000 (16:50 -0400)
Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Acked-by: Christian K?nig <christian.koenig@amd.com>
Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c

index 815d40f5e6e126b539cc83031845448d854f8b9f..776339c2a95e7e68d48bde9578a59bf0e2e4717e 100644 (file)
@@ -994,10 +994,12 @@ struct amdgpu_vm_manager {
 struct amdgpu_ctx_ring {
        uint64_t        sequence;
        struct fence    *fences[AMDGPU_CTX_MAX_CS_PENDING];
+       struct amd_context_entity c_entity;
 };
 
 struct amdgpu_ctx {
        struct kref             refcount;
+       struct amdgpu_device    *adev;
        unsigned                reset_counter;
        spinlock_t              ring_lock;
        struct amdgpu_ctx_ring  rings[AMDGPU_MAX_RINGS];
index 144edc97c6fedf6e6eecb318ffc53f90f7139304..557fb60f416b40bcd4d70735576f42bb6f7fac9f 100644 (file)
 static void amdgpu_ctx_do_release(struct kref *ref)
 {
        struct amdgpu_ctx *ctx;
+       struct amdgpu_device *adev;
        unsigned i, j;
 
        ctx = container_of(ref, struct amdgpu_ctx, refcount);
+       adev = ctx->adev;
+
 
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
                for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j)
                        fence_put(ctx->rings[i].fences[j]);
+
+       if (amdgpu_enable_scheduler) {
+               for (i = 0; i < adev->num_rings; i++)
+                       amd_context_entity_fini(adev->rings[i]->scheduler,
+                                               &ctx->rings[i].c_entity);
+       }
+
        kfree(ctx);
 }
 
@@ -43,7 +53,7 @@ int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
 {
        struct amdgpu_ctx *ctx;
        struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
-       int i, r;
+       int i, j, r;
 
        ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
        if (!ctx)
@@ -59,11 +69,35 @@ int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
        *id = (uint32_t)r;
 
        memset(ctx, 0, sizeof(*ctx));
+       ctx->adev = adev;
        kref_init(&ctx->refcount);
        spin_lock_init(&ctx->ring_lock);
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
                ctx->rings[i].sequence = 1;
        mutex_unlock(&mgr->lock);
+       if (amdgpu_enable_scheduler) {
+               /* create context entity for each ring */
+               for (i = 0; i < adev->num_rings; i++) {
+                       struct amd_run_queue *rq;
+                       if (fpriv)
+                               rq = &adev->rings[i]->scheduler->sched_rq;
+                       else
+                               rq = &adev->rings[i]->scheduler->kernel_rq;
+                       r = amd_context_entity_init(adev->rings[i]->scheduler,
+                                                   &ctx->rings[i].c_entity,
+                                                   NULL, rq, *id);
+                       if (r)
+                               break;
+               }
+
+               if (i < adev->num_rings) {
+                       for (j = 0; j < i; j++)
+                               amd_context_entity_fini(adev->rings[j]->scheduler,
+                                                       &ctx->rings[j].c_entity);
+                       kfree(ctx);
+                       return -EINVAL;
+               }
+       }
 
        return 0;
 }