From 5522d5d5f70005faeffff3ffc0cfa8eec0155de4 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 15 Oct 2007 17:00:12 +0200 Subject: [PATCH] sched: mark scheduling classes as const mark scheduling classes as const. The speeds up the code a bit and shrinks it: text data bss dec hex filename 40027 4018 292 44337 ad31 sched.o.before 40190 3842 292 44324 ad24 sched.o.after Signed-off-by: Ingo Molnar Reviewed-by: Thomas Gleixner --- include/linux/sched.h | 4 ++-- kernel/sched.c | 17 +++++------------ kernel/sched_fair.c | 5 ++--- kernel/sched_idletask.c | 3 ++- kernel/sched_rt.c | 3 ++- 5 files changed, 13 insertions(+), 19 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 97f736b749c2..47e3717a0356 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -863,7 +863,7 @@ struct rq; struct sched_domain; struct sched_class { - struct sched_class *next; + const struct sched_class *next; void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup); void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); @@ -949,7 +949,7 @@ struct task_struct { int prio, static_prio, normal_prio; struct list_head run_list; - struct sched_class *sched_class; + const struct sched_class *sched_class; struct sched_entity se; #ifdef CONFIG_PREEMPT_NOTIFIERS diff --git a/kernel/sched.c b/kernel/sched.c index e1657e0c86d0..f582e2cedb09 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -847,9 +847,9 @@ static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, int *this_best_prio, struct rq_iterator *iterator); #include "sched_stats.h" -#include "sched_rt.c" -#include "sched_fair.c" #include "sched_idletask.c" +#include "sched_fair.c" +#include "sched_rt.c" #ifdef CONFIG_SCHED_DEBUG # include "sched_debug.c" #endif @@ -2251,7 +2251,7 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, struct sched_domain *sd, enum cpu_idle_type idle, int *all_pinned) { - struct sched_class *class = sched_class_highest; + const struct sched_class *class = sched_class_highest; unsigned long total_load_moved = 0; int this_best_prio = this_rq->curr->prio; @@ -2276,7 +2276,7 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, struct sched_domain *sd, enum cpu_idle_type idle) { - struct sched_class *class; + const struct sched_class *class; int this_best_prio = MAX_PRIO; for (class = sched_class_highest; class; class = class->next) @@ -3432,7 +3432,7 @@ static inline void schedule_debug(struct task_struct *prev) static inline struct task_struct * pick_next_task(struct rq *rq, struct task_struct *prev) { - struct sched_class *class; + const struct sched_class *class; struct task_struct *p; /* @@ -6504,13 +6504,6 @@ void __init sched_init(void) int highest_cpu = 0; int i, j; - /* - * Link up the scheduling class hierarchy: - */ - rt_sched_class.next = &fair_sched_class; - fair_sched_class.next = &idle_sched_class; - idle_sched_class.next = NULL; - for_each_possible_cpu(i) { struct rt_prio_array *array; struct rq *rq; diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index de13a6f5b977..32fd976f8566 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -76,8 +76,6 @@ const_debug unsigned int sysctl_sched_batch_wakeup_granularity = 25000000UL; */ const_debug unsigned int sysctl_sched_wakeup_granularity = 2000000UL; -extern struct sched_class fair_sched_class; - /************************************************************** * CFS operations on generic schedulable entities: */ @@ -1031,7 +1029,8 @@ static void set_curr_task_fair(struct rq *rq) /* * All the scheduling class methods: */ -struct sched_class fair_sched_class __read_mostly = { +static const struct sched_class fair_sched_class = { + .next = &idle_sched_class, .enqueue_task = enqueue_task_fair, .dequeue_task = dequeue_task_fair, .yield_task = yield_task_fair, diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c index 5ebf829cdd73..6e2ead41516e 100644 --- a/kernel/sched_idletask.c +++ b/kernel/sched_idletask.c @@ -57,7 +57,8 @@ static void set_curr_task_idle(struct rq *rq) /* * Simple, special scheduling class for the per-CPU idle tasks: */ -static struct sched_class idle_sched_class __read_mostly = { +const struct sched_class idle_sched_class = { + /* .next is NULL */ /* no enqueue/yield_task for idle tasks */ /* dequeue is not valid, we print a debug message there: */ diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index e1d5f1c8b532..dbe4d8cf80d6 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -225,7 +225,8 @@ static void set_curr_task_rt(struct rq *rq) p->se.exec_start = rq->clock; } -static struct sched_class rt_sched_class __read_mostly = { +const struct sched_class rt_sched_class = { + .next = &fair_sched_class, .enqueue_task = enqueue_task_rt, .dequeue_task = dequeue_task_rt, .yield_task = yield_task_rt, -- 2.20.1