X-Git-Url: https://git.stricted.de/?a=blobdiff_plain;f=kernel%2Fsched%2Fsched.h;h=323eec3924d8e306657e9de37893e3a5ab8d53f8;hb=6fa3eb70c07b7ce2061fd6602159ac2d45a7dc3d;hp=ce39224d615599c40a1d6e7f8f4a78d728e2acad;hpb=6fad8d02ef9ab12d5e178014a4c5c297e1707f23;p=GitHub%2Fmt8127%2Fandroid_kernel_alcatel_ttab.git diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index ce39224d6155..323eec3924d8 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -21,6 +21,7 @@ extern __read_mostly int scheduler_running; #define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20) #define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio) +extern unsigned long get_cpu_load(int cpu); /* * 'User priority' is the nice value converted to something we * can work with better when scaling various scheduler parameters, @@ -140,9 +141,10 @@ struct task_group { struct cfs_rq **cfs_rq; unsigned long shares; - atomic_t load_weight; - atomic64_t load_avg; - atomic_t runnable_avg; +#ifdef CONFIG_SMP + atomic_long_t load_avg; + atomic_t runnable_avg, usage_avg; +#endif #endif #ifdef CONFIG_RT_GROUP_SCHED @@ -261,28 +263,25 @@ struct cfs_rq { #endif #ifdef CONFIG_SMP -/* - * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be - * removed when useful for applications beyond shares distribution (e.g. - * load-balance). - */ -#ifdef CONFIG_FAIR_GROUP_SCHED /* * CFS Load tracking * Under CFS, load is tracked on a per-entity basis and aggregated up. * This allows for the description of both thread and group usage (in * the FAIR_GROUP_SCHED case). */ - u64 runnable_load_avg, blocked_load_avg; - atomic64_t decay_counter, removed_load; + unsigned long runnable_load_avg, blocked_load_avg; + atomic64_t decay_counter; u64 last_decay; -#endif /* CONFIG_FAIR_GROUP_SCHED */ -/* These always depend on CONFIG_FAIR_GROUP_SCHED */ + atomic_long_t removed_load; + #ifdef CONFIG_FAIR_GROUP_SCHED - u32 tg_runnable_contrib; - u64 tg_load_contrib; + /* Required to track per-cpu representation of a task_group */ + u32 tg_runnable_contrib, tg_usage_contrib; + unsigned long tg_load_contrib; #endif /* CONFIG_FAIR_GROUP_SCHED */ + struct sched_avg avg; + /* * h_load = weight * f(tg) * @@ -344,6 +343,7 @@ struct rt_rq { struct plist_head pushable_tasks; #endif int rt_throttled; + int rt_disable_borrow; u64 rt_time; u64 rt_runtime; /* Nests inside the rq lock: */ @@ -464,6 +464,9 @@ struct rq { int active_balance; int push_cpu; struct cpu_stop_work active_balance_work; +#ifdef CONFIG_SCHED_HMP + struct task_struct *migrate_task; +#endif /* cpu of this runqueue: */ int cpu; int online; @@ -642,6 +645,12 @@ static inline unsigned int group_first_cpu(struct sched_group *group) extern int group_balance_cpu(struct sched_group *sg); +#ifdef CONFIG_SCHED_HMP +static LIST_HEAD(hmp_domains); +DECLARE_PER_CPU(struct hmp_domain *, hmp_cpu_domain); +#define hmp_cpu_domain(cpu) (per_cpu(hmp_cpu_domain, (cpu))) +#endif /* CONFIG_SCHED_HMP */ + #endif /* CONFIG_SMP */ #include "stats.h" @@ -706,6 +715,11 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) */ smp_wmb(); task_thread_info(p)->cpu = cpu; + +#ifdef CONFIG_FAIR_GROUP_SCHED + BUG_ON(p->se.cfs_rq->rq->cpu != cpu); +#endif + #endif } @@ -810,6 +824,34 @@ static inline int task_running(struct rq *rq, struct task_struct *p) # define finish_arch_post_lock_switch() do { } while (0) #endif +#ifdef CONFIG_MT_RT_SCHED +extern void mt_check_rt_policy(struct rq *this_rq); +extern int push_need_released_rt_task(struct rq *rq, struct task_struct *p); +extern int pull_rt_task(struct rq *this_rq); +extern int mt_post_schedule(struct rq *rq); +#endif + +#ifdef CONFIG_MT_RT_SCHED_LOG + #ifdef CONFIG_MT_RT_SCHED_DEBUG +#define mt_rt_printf(x...) \ + do{ \ + char strings[128]=""; \ + snprintf(strings, 128, x); \ + printk(KERN_NOTICE x); \ + trace_sched_rt_log(strings); \ + }while (0) + #else +#define mt_rt_printf(x...) \ + do{ \ + char strings[128]=""; \ + snprintf(strings, 128, x); \ + trace_sched_rt_log(strings); \ + }while (0) + #endif +#else +#define mt_rt_printf do {} while (0) +#endif + #ifndef __ARCH_WANT_UNLOCKED_CTXSW static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) { @@ -844,7 +886,12 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) * prev into current: */ spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); - +#ifdef CONFIG_MT_RT_SCHED + if(test_tsk_need_released(prev)){ + clear_tsk_need_released(prev); + push_need_released_rt_task(rq, prev); + } +#endif raw_spin_unlock_irq(&rq->lock); } @@ -1028,17 +1075,19 @@ extern void update_group_power(struct sched_domain *sd, int cpu); extern void trigger_load_balance(struct rq *rq, int cpu); extern void idle_balance(int this_cpu, struct rq *this_rq); -/* - * Only depends on SMP, FAIR_GROUP_SCHED may be removed when runnable_avg - * becomes useful in lb - */ -#if defined(CONFIG_FAIR_GROUP_SCHED) extern void idle_enter_fair(struct rq *this_rq); extern void idle_exit_fair(struct rq *this_rq); -#else -static inline void idle_enter_fair(struct rq *this_rq) {} -static inline void idle_exit_fair(struct rq *this_rq) {} -#endif + + +# ifdef CONFIG_MTK_SCHED_CMP_TGS +extern int group_leader_is_empty(struct task_struct *p); +# endif /* CONFIG_MTK_SCHED_CMP_TGS */ + +# ifdef CONFIG_MTK_SCHED_CMP +extern void get_cluster_cpus(struct cpumask *cpus, int cluster_id, + bool exclusive_offline); +extern int get_cluster_id(unsigned int cpu); +# endif /* CONFIG_MTK_SCHED_CMP */ #else /* CONFIG_SMP */ @@ -1050,6 +1099,9 @@ static inline void idle_balance(int cpu, struct rq *rq) extern void sysrq_sched_debug_show(void); extern void sched_init_granularity(void); +#if defined (CONFIG_MTK_SCHED_CMP_PACK_SMALL_TASK) || defined (CONFIG_HMP_PACK_SMALL_TASK) +extern void update_packing_domain(int cpu); +#endif /* CONFIG_HMP_PACK_SMALL_TASK */ extern void update_max_interval(void); extern int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu); extern void init_sched_rt_class(void); @@ -1063,6 +1115,8 @@ extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime extern void update_idle_cpu_load(struct rq *this_rq); +extern void init_task_runnable_average(struct task_struct *p); + #ifdef CONFIG_PARAVIRT static inline u64 steal_ticks(u64 steal) { @@ -1075,6 +1129,9 @@ static inline u64 steal_ticks(u64 steal) static inline void inc_nr_running(struct rq *rq) { +#ifdef CONFIG_MTK_SCHED_RQAVG_KS + sched_update_nr_prod(cpu_of(rq), rq->nr_running, true); +#endif /* CONFIG_MTK_SCHED_RQAVG_KS */ rq->nr_running++; #ifdef CONFIG_NO_HZ_FULL @@ -1090,6 +1147,9 @@ static inline void inc_nr_running(struct rq *rq) static inline void dec_nr_running(struct rq *rq) { +#ifdef CONFIG_MTK_SCHED_RQAVG_KS + sched_update_nr_prod(cpu_of(rq), rq->nr_running, false); +#endif /* CONFIG_MTK_SCHED_RQAVG_KS */ rq->nr_running--; } @@ -1317,8 +1377,10 @@ extern void print_rt_stats(struct seq_file *m, int cpu); extern void init_cfs_rq(struct cfs_rq *cfs_rq); extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq); +extern void unthrottle_offline_rt_rqs(struct rq *rq); -extern void account_cfs_bandwidth_used(int enabled, int was_enabled); +extern void cfs_bandwidth_usage_inc(void); +extern void cfs_bandwidth_usage_dec(void); #ifdef CONFIG_NO_HZ_COMMON enum rq_nohz_flag_bits { @@ -1377,3 +1439,10 @@ static inline u64 irq_time_read(int cpu) } #endif /* CONFIG_64BIT */ #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ + +#ifdef CONFIG_SMP +static inline int rq_cpu(const struct rq *rq) { return rq->cpu; } +#else +static inline int rq_cpu(const struct rq *rq) { return 0; } +#endif +