drm/amdgpu: remove amd_sched_wait_emit v2
authorChristian König <christian.koenig@amd.com>
Mon, 10 Aug 2015 12:04:12 +0000 (14:04 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 17 Aug 2015 20:51:15 +0000 (16:51 -0400)
Not used any more.

v2: remove amd_sched_emit as well.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.h

index 6a7e83edcaa7b6c28cc08f127c13c17e9238d367..d2e5f3b90a3c76b05cae70b2fd3fde46c4251ce4 100644 (file)
@@ -77,8 +77,6 @@ static struct fence *amdgpu_sched_run_job(struct amd_gpu_scheduler *sched,
                        goto err;
        }
 
-       amd_sched_emit(entity, sched_job->ibs[sched_job->num_ibs - 1].sequence);
-
        mutex_unlock(&sched_job->job_lock);
        return &fence->base;
 
index 402086d96889f02fe1aec31114bbd364934f60e6..90abefed86cc3057eef41956176210b4767e953b 100644 (file)
@@ -202,7 +202,6 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
                return -EINVAL;
 
        spin_lock_init(&entity->queue_lock);
-       atomic64_set(&entity->last_emitted_v_seq, seq_ring);
        atomic64_set(&entity->last_queued_v_seq, seq_ring);
        atomic64_set(&entity->last_signaled_v_seq, seq_ring);
 
@@ -329,53 +328,6 @@ int amd_sched_push_job(struct amd_gpu_scheduler *sched,
        return 0;
 }
 
-/**
- * Wait for a virtual sequence number to be emitted.
- *
- * @c_entity   The pointer to a valid context entity
- * @seq         The virtual sequence number to wait
- * @intr       Interruptible or not
- * @timeout    Timeout in ms, wait infinitely if <0
- * @emit        wait for emit or signal
- *
- * return =0 signaled ,  <0 failed
-*/
-int amd_sched_wait_emit(struct amd_sched_entity *c_entity,
-                       uint64_t seq,
-                       bool intr,
-                       long timeout)
-{
-       atomic64_t *v_seq = &c_entity->last_emitted_v_seq;
-       wait_queue_head_t *wait_queue = &c_entity->wait_emit;
-
-       if (intr && (timeout < 0)) {
-               wait_event_interruptible(
-                       *wait_queue,
-                       seq <= atomic64_read(v_seq));
-               return 0;
-       } else if (intr && (timeout >= 0)) {
-               wait_event_interruptible_timeout(
-                       *wait_queue,
-                       seq <= atomic64_read(v_seq),
-                       msecs_to_jiffies(timeout));
-               return (seq <= atomic64_read(v_seq)) ?
-                       0 : -1;
-       } else if (!intr && (timeout < 0)) {
-               wait_event(
-                       *wait_queue,
-                       seq <= atomic64_read(v_seq));
-               return 0;
-       } else if (!intr && (timeout >= 0)) {
-               wait_event_timeout(
-                       *wait_queue,
-                       seq <= atomic64_read(v_seq),
-                       msecs_to_jiffies(timeout));
-               return (seq <= atomic64_read(v_seq)) ?
-                       0 : -1;
-       }
-       return 0;
-}
-
 static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
 {
        struct amd_sched_job *sched_job =
@@ -510,19 +462,6 @@ int amd_sched_destroy(struct amd_gpu_scheduler *sched)
        return  0;
 }
 
-/**
- * Update emitted sequence and wake up the waiters, called by run_job
- * in driver side
- *
- * @entity The context entity
- * @seq The sequence number for the latest emitted job
-*/
-void amd_sched_emit(struct amd_sched_entity *c_entity, uint64_t seq)
-{
-       atomic64_set(&c_entity->last_emitted_v_seq, seq);
-       wake_up_all(&c_entity->wait_emit);
-}
-
 /**
  * Get next queued sequence number
  *
index 300132f14d7416d317d04aea2b275d2bee502a2d..aa942033d4b3a123ed1702b1c0fb223b398d78b5 100644 (file)
@@ -44,7 +44,6 @@ struct amd_sched_entity {
        spinlock_t                      lock;
        /* the virtual_seq is unique per context per ring */
        atomic64_t                      last_queued_v_seq;
-       atomic64_t                      last_emitted_v_seq;
        atomic64_t                      last_signaled_v_seq;
        /* the job_queue maintains the jobs submitted by clients */
        struct kfifo                    job_queue;
@@ -154,13 +153,6 @@ int amd_sched_push_job(struct amd_gpu_scheduler *sched,
                       void *data,
                       struct amd_sched_fence **fence);
 
-int amd_sched_wait_emit(struct amd_sched_entity *c_entity,
-                       uint64_t seq,
-                       bool intr,
-                       long timeout);
-
-uint64_t amd_sched_get_handled_seq(struct amd_gpu_scheduler *sched);
-
 int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
                          struct amd_sched_entity *entity,
                          struct amd_run_queue *rq,
@@ -168,8 +160,6 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
 int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
                          struct amd_sched_entity *entity);
 
-void amd_sched_emit(struct amd_sched_entity *c_entity, uint64_t seq);
-
 uint64_t amd_sched_next_queued_seq(struct amd_sched_entity *c_entity);
 
 struct amd_sched_fence *amd_sched_fence_create(