void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
- void (*yield_task) (struct rq *rq, struct task_struct *p);
+ void (*yield_task) (struct rq *rq);
void (*check_preempt_curr) (struct rq *rq, struct task_struct *p);
struct rq *rq = this_rq_lock();
schedstat_inc(rq, yld_cnt);
- current->sched_class->yield_task(rq, current);
+ current->sched_class->yield_task(rq);
/*
* Since we are going to call schedule() anyway, there's
*
* If compat_yield is turned on then we requeue to the end of the tree.
*/
-static void yield_task_fair(struct rq *rq, struct task_struct *p)
+static void yield_task_fair(struct rq *rq)
{
- struct cfs_rq *cfs_rq = task_cfs_rq(p);
+ struct cfs_rq *cfs_rq = &rq->cfs;
struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
- struct sched_entity *rightmost, *se = &p->se;
+ struct sched_entity *rightmost, *se = &rq->curr->se;
struct rb_node *parent;
/*
* Dequeue and enqueue the task to update its
* position within the tree:
*/
- dequeue_entity(cfs_rq, &p->se, 0);
- enqueue_entity(cfs_rq, &p->se, 0);
+ dequeue_entity(cfs_rq, se, 0);
+ enqueue_entity(cfs_rq, se, 0);
return;
}
}
static void
-yield_task_rt(struct rq *rq, struct task_struct *p)
+yield_task_rt(struct rq *rq)
{
- requeue_task_rt(rq, p);
+ requeue_task_rt(rq, rq->curr);
}
/*