drm/amdgpu: abstract amdgpu_job for scheduler
authorChunming Zhou <david1.zhou@amd.com>
Tue, 18 Aug 2015 07:16:40 +0000 (15:16 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 20 Aug 2015 21:00:35 +0000 (17:00 -0400)
Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Christian K?nig <christian.koenig@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.h

index 2fc58e6589866b215125931303f53364ddc360cc..95d4969369a6fd618ebb31dca21abbc8b28d1a5d 100644 (file)
@@ -183,6 +183,7 @@ struct amdgpu_vm;
 struct amdgpu_ring;
 struct amdgpu_semaphore;
 struct amdgpu_cs_parser;
+struct amdgpu_job;
 struct amdgpu_irq_src;
 struct amdgpu_fpriv;
 
@@ -871,7 +872,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
                                         struct amdgpu_ring *ring,
                                         struct amdgpu_ib *ibs,
                                         unsigned num_ibs,
-                                        int (*free_job)(struct amdgpu_cs_parser *),
+                                        int (*free_job)(struct amdgpu_job *),
                                         void *owner,
                                         struct fence **fence);
 
@@ -1040,6 +1041,7 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx);
 
 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
 int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
+struct amdgpu_ctx *amdgpu_ctx_get_ref(struct amdgpu_ctx *ctx);
 
 uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
                              struct fence *fence, uint64_t queued_seq);
@@ -1265,6 +1267,18 @@ struct amdgpu_cs_parser {
        struct amd_sched_fence *s_fence;
 };
 
+struct amdgpu_job {
+       struct amd_sched_job    base;
+       struct amdgpu_device    *adev;
+       struct amdgpu_ctx       *ctx;
+       struct drm_file         *owner;
+       struct amdgpu_ib        *ibs;
+       uint32_t                num_ibs;
+       struct mutex            job_lock;
+       struct amdgpu_user_fence uf;
+       int (*free_job)(struct amdgpu_job *sched_job);
+};
+
 static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
 {
        return p->ibs[ib_idx].ptr[idx];
index e4424b4db5d35512ac2962587aa98243b79171a0..c8de4b6194e8328674261f5d6519551136288733 100644 (file)
@@ -126,19 +126,6 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
        return 0;
 }
 
-static void amdgpu_job_work_func(struct work_struct *work)
-{
-       struct amdgpu_cs_parser *sched_job =
-               container_of(work, struct amdgpu_cs_parser,
-                            job_work);
-       mutex_lock(&sched_job->job_lock);
-       if (sched_job->free_job)
-               sched_job->free_job(sched_job);
-       mutex_unlock(&sched_job->job_lock);
-       /* after processing job, free memory */
-       fence_put(&sched_job->s_fence->base);
-       kfree(sched_job);
-}
 struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
                                                struct drm_file *filp,
                                                struct amdgpu_ctx *ctx,
@@ -157,10 +144,6 @@ struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
        parser->ctx = ctx;
        parser->ibs = ibs;
        parser->num_ibs = num_ibs;
-       if (amdgpu_enable_scheduler) {
-               mutex_init(&parser->job_lock);
-               INIT_WORK(&parser->job_work, amdgpu_job_work_func);
-       }
        for (i = 0; i < num_ibs; i++)
                ibs[i].ctx = ctx;
 
@@ -508,15 +491,17 @@ static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
        for (i = 0; i < parser->nchunks; i++)
                drm_free_large(parser->chunks[i].kdata);
        kfree(parser->chunks);
-       if (parser->ibs)
-               for (i = 0; i < parser->num_ibs; i++)
-                       amdgpu_ib_free(parser->adev, &parser->ibs[i]);
-       kfree(parser->ibs);
-       if (parser->uf.bo)
-               drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
-
        if (!amdgpu_enable_scheduler)
-               kfree(parser);
+       {
+               if (parser->ibs)
+                       for (i = 0; i < parser->num_ibs; i++)
+                               amdgpu_ib_free(parser->adev, &parser->ibs[i]);
+               kfree(parser->ibs);
+               if (parser->uf.bo)
+                       drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
+       }
+
+       kfree(parser);
 }
 
 /**
@@ -533,12 +518,6 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
        amdgpu_cs_parser_fini_late(parser);
 }
 
-static int amdgpu_cs_parser_free_job(struct amdgpu_cs_parser *sched_job)
-{
-       amdgpu_cs_parser_fini_late(sched_job);
-       return 0;
-}
-
 static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
                                   struct amdgpu_vm *vm)
 {
@@ -874,6 +853,19 @@ static struct amdgpu_ring *amdgpu_cs_parser_get_ring(
        return ring;
 }
 
+static int amdgpu_cs_free_job(struct amdgpu_job *sched_job)
+{
+       int i;
+       amdgpu_ctx_put(sched_job->ctx);
+       if (sched_job->ibs)
+               for (i = 0; i < sched_job->num_ibs; i++)
+                       amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]);
+       kfree(sched_job->ibs);
+       if (sched_job->uf.bo)
+               drm_gem_object_unreference_unlocked(&sched_job->uf.bo->gem_base);
+       return 0;
+}
+
 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 {
        struct amdgpu_device *adev = dev->dev_private;
@@ -900,33 +892,50 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
        }
 
        if (amdgpu_enable_scheduler && parser->num_ibs) {
+               struct amdgpu_job *job;
                struct amdgpu_ring * ring =
                        amdgpu_cs_parser_get_ring(adev, parser);
                r = amdgpu_cs_parser_prepare_job(parser);
                if (r)
                        goto out;
-               parser->ring = ring;
-               parser->free_job = amdgpu_cs_parser_free_job;
-               mutex_lock(&parser->job_lock);
-               r = amd_sched_push_job(ring->scheduler,
-                                      &parser->ctx->rings[ring->idx].entity,
-                                      parser,
-                                      &parser->s_fence);
+               job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
+               if (!job)
+                       return -ENOMEM;
+               job->base.sched = ring->scheduler;
+               job->base.s_entity = &parser->ctx->rings[ring->idx].entity;
+               job->adev = parser->adev;
+               job->ibs = parser->ibs;
+               job->num_ibs = parser->num_ibs;
+               job->owner = parser->filp;
+               job->ctx = amdgpu_ctx_get_ref(parser->ctx);
+               mutex_init(&job->job_lock);
+               if (job->ibs[job->num_ibs - 1].user) {
+                       memcpy(&job->uf,  &parser->uf,
+                              sizeof(struct amdgpu_user_fence));
+                       job->ibs[job->num_ibs - 1].user = &job->uf;
+               }
+
+               job->free_job = amdgpu_cs_free_job;
+               mutex_lock(&job->job_lock);
+               r = amd_sched_push_job((struct amd_sched_job *)job);
                if (r) {
-                       mutex_unlock(&parser->job_lock);
+                       mutex_unlock(&job->job_lock);
+                       amdgpu_cs_free_job(job);
+                       kfree(job);
                        goto out;
                }
-               parser->ibs[parser->num_ibs - 1].sequence =
-                       amdgpu_ctx_add_fence(parser->ctx, ring,
-                                            &parser->s_fence->base,
-                                            parser->s_fence->v_seq);
-               cs->out.handle = parser->s_fence->v_seq;
+               job->ibs[parser->num_ibs - 1].sequence =
+                       amdgpu_ctx_add_fence(job->ctx, ring,
+                                            &job->base.s_fence->base,
+                                            job->base.s_fence->v_seq);
+               cs->out.handle = job->base.s_fence->v_seq;
                list_sort(NULL, &parser->validated, cmp_size_smaller_first);
                ttm_eu_fence_buffer_objects(&parser->ticket,
                                &parser->validated,
-                               &parser->s_fence->base);
+                               &job->base.s_fence->base);
 
-               mutex_unlock(&parser->job_lock);
+               mutex_unlock(&job->job_lock);
+               amdgpu_cs_parser_fini_late(parser);
                up_read(&adev->exclusive_lock);
                return 0;
        }
index 08bc7722ddb8d38b8d87f03ac5cd126b7ef46bc6..8660c0854a1eab597c6d3478b916ce1ed76570b9 100644 (file)
@@ -219,6 +219,13 @@ struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
        return ctx;
 }
 
+struct amdgpu_ctx *amdgpu_ctx_get_ref(struct amdgpu_ctx *ctx)
+{
+       if (ctx)
+               kref_get(&ctx->refcount);
+       return ctx;
+}
+
 int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
 {
        if (ctx == NULL)
index a86e38158afaead697bfbc8b9b9cddb1ea9b77cf..5b1ae18f5e8d953424978eca2d949f1653f9f4fc 100644 (file)
 #include <drm/drmP.h>
 #include "amdgpu.h"
 
-static int amdgpu_sched_prepare_job(struct amd_gpu_scheduler *sched,
-                                   struct amd_sched_entity *entity,
-                                   struct amd_sched_job *job)
-{
-       int r = 0;
-       struct amdgpu_cs_parser *sched_job;
-       if (!job || !job->data) {
-               DRM_ERROR("job is null\n");
-               return -EINVAL;
-       }
-
-       sched_job = (struct amdgpu_cs_parser *)job->data;
-       if (sched_job->prepare_job) {
-               r = sched_job->prepare_job(sched_job);
-               if (r) {
-                       DRM_ERROR("Prepare job error\n");
-                       schedule_work(&sched_job->job_work);
-               }
-       }
-       return r;
-}
-
 static struct fence *amdgpu_sched_run_job(struct amd_gpu_scheduler *sched,
                                          struct amd_sched_entity *entity,
                                          struct amd_sched_job *job)
 {
        int r = 0;
-       struct amdgpu_cs_parser *sched_job;
+       struct amdgpu_job *sched_job;
        struct amdgpu_fence *fence;
 
-       if (!job || !job->data) {
+       if (!job) {
                DRM_ERROR("job is null\n");
                return NULL;
        }
-       sched_job = (struct amdgpu_cs_parser *)job->data;
+       sched_job = (struct amdgpu_job *)job;
        mutex_lock(&sched_job->job_lock);
        r = amdgpu_ib_schedule(sched_job->adev,
                               sched_job->num_ibs,
                               sched_job->ibs,
-                              sched_job->filp);
+                              sched_job->owner);
        if (r)
                goto err;
        fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence);
 
-       if (sched_job->run_job) {
-               r = sched_job->run_job(sched_job);
-               if (r)
-                       goto err;
-       }
-
        mutex_unlock(&sched_job->job_lock);
        return &fence->base;
 
 err:
        DRM_ERROR("Run job error\n");
        mutex_unlock(&sched_job->job_lock);
-       schedule_work(&sched_job->job_work);
+       sched->ops->process_job(sched, (struct amd_sched_job *)sched_job);
        return NULL;
 }
 
 static void amdgpu_sched_process_job(struct amd_gpu_scheduler *sched,
                                     struct amd_sched_job *job)
 {
-       struct amdgpu_cs_parser *sched_job;
+       struct amdgpu_job *sched_job;
 
-       if (!job || !job->data) {
+       if (!job) {
                DRM_ERROR("job is null\n");
                return;
        }
-       sched_job = (struct amdgpu_cs_parser *)job->data;
-       schedule_work(&sched_job->job_work);
+       sched_job = (struct amdgpu_job *)job;
+       mutex_lock(&sched_job->job_lock);
+       if (sched_job->free_job)
+               sched_job->free_job(sched_job);
+       mutex_unlock(&sched_job->job_lock);
+       /* after processing job, free memory */
+       fence_put(&sched_job->base.s_fence->base);
+       kfree(sched_job);
 }
 
 struct amd_sched_backend_ops amdgpu_sched_ops = {
-       .prepare_job = amdgpu_sched_prepare_job,
        .run_job = amdgpu_sched_run_job,
        .process_job = amdgpu_sched_process_job
 };
@@ -110,31 +87,34 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
                                         struct amdgpu_ring *ring,
                                         struct amdgpu_ib *ibs,
                                         unsigned num_ibs,
-                                        int (*free_job)(struct amdgpu_cs_parser *),
+                                        int (*free_job)(struct amdgpu_job *),
                                         void *owner,
                                         struct fence **f)
 {
        int r = 0;
        if (amdgpu_enable_scheduler) {
-               struct amdgpu_cs_parser *sched_job =
-                       amdgpu_cs_parser_create(adev, owner, &adev->kernel_ctx,
-                                               ibs, num_ibs);
-               if(!sched_job) {
+               struct amdgpu_job *job =
+                       kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
+               if (!job)
                        return -ENOMEM;
-               }
-               sched_job->free_job = free_job;
-               mutex_lock(&sched_job->job_lock);
-               r = amd_sched_push_job(ring->scheduler,
-                                      &adev->kernel_ctx.rings[ring->idx].entity,
-                                      sched_job, &sched_job->s_fence);
+               job->base.sched = ring->scheduler;
+               job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
+               job->adev = adev;
+               job->ibs = ibs;
+               job->num_ibs = num_ibs;
+               job->owner = owner;
+               mutex_init(&job->job_lock);
+               job->free_job = free_job;
+               mutex_lock(&job->job_lock);
+               r = amd_sched_push_job((struct amd_sched_job *)job);
                if (r) {
-                       mutex_unlock(&sched_job->job_lock);
-                       kfree(sched_job);
+                       mutex_unlock(&job->job_lock);
+                       kfree(job);
                        return r;
                }
-               ibs[num_ibs - 1].sequence = sched_job->s_fence->v_seq;
-               *f = fence_get(&sched_job->s_fence->base);
-               mutex_unlock(&sched_job->job_lock);
+               ibs[num_ibs - 1].sequence = job->base.s_fence->v_seq;
+               *f = fence_get(&job->base.s_fence->base);
+               mutex_unlock(&job->job_lock);
        } else {
                r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
                if (r)
index 68369cf1e3185b022df0174fbc4e9398fa93e82f..b87355ccfb1d071282d183ceab7d6bc22c86fffe 100644 (file)
@@ -807,7 +807,7 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
 }
 
 static int amdgpu_uvd_free_job(
-       struct amdgpu_cs_parser *sched_job)
+       struct amdgpu_job *sched_job)
 {
        amdgpu_ib_free(sched_job->adev, sched_job->ibs);
        kfree(sched_job->ibs);
index 33ee6ae28f37b07cecaf8c25797c89c39099c745..1a984c934b1f274b7d099f9acf4c8352d3d6b59f 100644 (file)
@@ -340,7 +340,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
 }
 
 static int amdgpu_vce_free_job(
-       struct amdgpu_cs_parser *sched_job)
+       struct amdgpu_job *sched_job)
 {
        amdgpu_ib_free(sched_job->adev, sched_job->ibs);
        kfree(sched_job->ibs);
index a78a206e176ecdc8a2c96262c1d823cc3417b8f3..5b99214d0ba6f30451f1d2933727de856b3cd9ad 100644 (file)
@@ -307,7 +307,7 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
 }
 
 static int amdgpu_vm_free_job(
-       struct amdgpu_cs_parser *sched_job)
+       struct amdgpu_job *sched_job)
 {
        int i;
        for (i = 0; i < sched_job->num_ibs; i++)
index 265d3e2f63cc97903d1436089a80475aba5011e5..462c1617d56ede1d5feeaad54b12c3009ba3867a 100644 (file)
@@ -282,30 +282,18 @@ int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
  *          scheduler consum some queued command.
  *       -1 other fail.
 */
-int amd_sched_push_job(struct amd_gpu_scheduler *sched,
-                      struct amd_sched_entity *c_entity,
-                      void *data,
-                      struct amd_sched_fence **fence)
+int amd_sched_push_job(struct amd_sched_job *sched_job)
 {
-       struct amd_sched_job *job;
-
+       struct amd_sched_fence  *fence =
+               amd_sched_fence_create(sched_job->s_entity);
        if (!fence)
                return -EINVAL;
-       job = kzalloc(sizeof(struct amd_sched_job), GFP_KERNEL);
-       if (!job)
-               return -ENOMEM;
-       job->sched = sched;
-       job->s_entity = c_entity;
-       job->data = data;
-       *fence = amd_sched_fence_create(c_entity);
-       if ((*fence) == NULL) {
-               kfree(job);
-               return -EINVAL;
-       }
-       fence_get(&(*fence)->base);
-       job->s_fence = *fence;
-       while (kfifo_in_spinlocked(&c_entity->job_queue, &job, sizeof(void *),
-                                  &c_entity->queue_lock) != sizeof(void *)) {
+       fence_get(&fence->base);
+       sched_job->s_fence = fence;
+       while (kfifo_in_spinlocked(&sched_job->s_entity->job_queue,
+                                  &sched_job, sizeof(void *),
+                                  &sched_job->s_entity->queue_lock) !=
+              sizeof(void *)) {
                /**
                 * Current context used up all its IB slots
                 * wait here, or need to check whether GPU is hung
@@ -313,8 +301,8 @@ int amd_sched_push_job(struct amd_gpu_scheduler *sched,
                schedule();
        }
        /* first job wake up scheduler */
-       if ((kfifo_len(&c_entity->job_queue) / sizeof(void *)) == 1)
-               wake_up_interruptible(&sched->wait_queue);
+       if ((kfifo_len(&sched_job->s_entity->job_queue) / sizeof(void *)) == 1)
+               wake_up_interruptible(&sched_job->sched->wait_queue);
        return 0;
 }
 
@@ -333,10 +321,8 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
        list_del(&sched_job->list);
        atomic64_dec(&sched->hw_rq_count);
        spin_unlock_irqrestore(&sched->queue_lock, flags);
-
-       sched->ops->process_job(sched, sched_job);
        fence_put(&sched_job->s_fence->base);
-       kfree(sched_job);
+       sched->ops->process_job(sched, sched_job);
        wake_up_interruptible(&sched->wait_queue);
 }
 
@@ -359,7 +345,9 @@ static int amd_sched_main(void *param)
                r = kfifo_out(&c_entity->job_queue, &job, sizeof(void *));
                if (r != sizeof(void *))
                        continue;
-               r = sched->ops->prepare_job(sched, c_entity, job);
+               r = 0;
+               if (sched->ops->prepare_job)
+                       r = sched->ops->prepare_job(sched, c_entity, job);
                if (!r) {
                        unsigned long flags;
                        spin_lock_irqsave(&sched->queue_lock, flags);
index ceb5918bfbeb4b5fd4a31622ebbbd13d5702e742..25e38d0301572066661e2b2160c39c37b49c1c9d 100644 (file)
@@ -81,7 +81,6 @@ struct amd_sched_job {
        struct fence_cb                 cb;
        struct amd_gpu_scheduler        *sched;
        struct amd_sched_entity         *s_entity;
-       void                            *data;
        struct amd_sched_fence          *s_fence;
 };
 
@@ -140,10 +139,7 @@ struct amd_gpu_scheduler *amd_sched_create(void *device,
                                uint32_t hw_submission);
 int amd_sched_destroy(struct amd_gpu_scheduler *sched);
 
-int amd_sched_push_job(struct amd_gpu_scheduler *sched,
-                      struct amd_sched_entity *c_entity,
-                      void *data,
-                      struct amd_sched_fence **fence);
+int amd_sched_push_job(struct amd_sched_job *sched_job);
 
 int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
                          struct amd_sched_entity *entity,