#include <linux/gfp.h>
#include <asm/processor.h>
+#include <linux/rtpm_prio.h>
struct exec_domain;
struct futex_pi_state;
extern unsigned long nr_iowait(void);
extern unsigned long nr_iowait_cpu(int cpu);
extern unsigned long this_cpu_load(void);
-
-
+extern unsigned long get_cpu_load(int cpu);
+extern unsigned long long mt_get_thread_cputime(pid_t pid);
+extern unsigned long long mt_get_cpu_idle(int cpu);
+extern unsigned long long mt_sched_clock(void);
extern void calc_global_load(unsigned long ticks);
extern void update_cpu_load_nohz(void);
atomic_t sigcnt;
atomic_t live;
int nr_threads;
+ struct list_head thread_head;
wait_queue_head_t wait_chldexit; /* for wait4() */
unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
#endif
unsigned long locked_shm; /* How many pages of mlocked shm ? */
+ unsigned long unix_inflight; /* How many files in flight in unix sockets */
+ atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */
#ifdef CONFIG_KEYS
struct key *uid_keyring; /* UID specific keyring */
#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */
#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
#define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */
+
+#ifdef CONFIG_HMP_PACK_SMALL_TASK
+#define SD_SHARE_POWERLINE 0x0100 /* Domain members share power domain */
+#endif /* CONFIG_HMP_PACK_SMALL_TASK */
+
#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
+#ifdef CONFIG_MTK_SCHED_CMP_TGS
+#define SD_BALANCE_TG 0x4000 /* Balance for thread group */
+#endif
+#ifdef CONFIG_MTK_SCHED_CMP_PACK_SMALL_TASK
+#define SD_SHARE_POWERLINE 0x8000 /* Domain members share power domain */
+#endif
extern int __weak arch_sd_sibiling_asym_packing(void);
unsigned long last_balance; /* init to jiffies. units in jiffies */
unsigned int balance_interval; /* initialise to 1. units in ms. */
unsigned int nr_balance_failed; /* initialise to 0 */
+#ifdef CONFIG_MT_LOAD_BALANCE_PROFILER
+ unsigned int mt_lbprof_nr_balance_failed; /* initialise to 0 */
+#endif
u64 last_update;
bool cpus_share_cache(int this_cpu, int that_cpu);
+struct clb_stats {
+ int ncpu; /* The number of CPU */
+ int ntask; /* The number of tasks */
+ int load_avg; /* Arithmetic average of task load ratio */
+ int cpu_capacity; /* Current CPU capacity */
+ int cpu_power; /* Max CPU capacity */
+ int acap; /* Available CPU capacity */
+ int scaled_acap; /* Scaled available CPU capacity */
+ int scaled_atask; /* Scaled available task */
+ int threshold; /* Dynamic threshold */
+#ifdef CONFIG_SCHED_HMP_PRIO_FILTER
+ int nr_normal_prio_task; /* The number of normal-prio tasks */
+ int nr_dequeuing_low_prio; /* The number of dequeuing low-prio tasks */
+#endif
+};
+
+#ifdef CONFIG_SCHED_HMP
+struct hmp_domain {
+ struct cpumask cpus;
+ struct cpumask possible_cpus;
+ struct list_head hmp_domains;
+};
+
+#ifdef CONFIG_SCHED_HMP_ENHANCEMENT
+#ifdef CONFIG_HMP_TRACER
+struct hmp_statisic {
+ unsigned int nr_force_up; /* The number of task force up-migration */
+ unsigned int nr_force_down; /* The number of task force down-migration */
+};
+#endif /* CONFIG_HMP_TRACER */
+#endif /* CONFIG_SCHED_HMP_ENHANCEMENT */
+#endif /* CONFIG_SCHED_HMP */
#else /* CONFIG_SMP */
struct sched_domain_attr;
u64 last_runnable_update;
s64 decay_count;
unsigned long load_avg_contrib;
+ unsigned long load_avg_ratio;
+#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_HMP_ENHANCEMENT
+ unsigned long pending_load;
+ u32 nr_pending;
+#ifdef CONFIG_SCHED_HMP_PRIO_FILTER
+ u32 nr_dequeuing_low_prio;
+ u32 nr_normal_prio;
+#endif
+#endif
+ u64 hmp_last_up_migration;
+ u64 hmp_last_down_migration;
+#endif /* CONFIG_SCHED_HMP */
+ u32 usage_avg_sum;
};
#ifdef CONFIG_SCHEDSTATS
};
#endif
+#ifdef CONFIG_MTPROF_CPUTIME
+struct mtk_isr_info{
+ int isr_num;
+ int isr_count;
+ u64 isr_time;
+ char *isr_name;
+ struct mtk_isr_info *next;
+} ;
+#endif
struct sched_entity {
struct load_weight load; /* for load-balancing */
struct rb_node run_node;
struct cfs_rq *my_q;
#endif
-/*
- * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
- * removed when useful for applications beyond shares distribution (e.g.
- * load-balance).
- */
-#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
+#ifdef CONFIG_SMP
/* Per-entity load-tracking */
struct sched_avg avg;
#endif
+#ifdef CONFIG_MTPROF_CPUTIME
+ u64 mtk_isr_time;
+ int mtk_isr_count;
+ struct mtk_isr_info *mtk_isr;
+#endif
};
struct sched_rt_entity {
perf_nr_task_contexts,
};
+#ifdef CONFIG_MTK_SCHED_CMP_TGS
+#define NUM_CLUSTER 2
+struct thread_group_info_t {
+ /* # of cfs threas in the thread group per cluster*/
+ unsigned long cfs_nr_running;
+ /* # of threads in the thread group per cluster */
+ unsigned long nr_running;
+ /* runnable load of the thread group per cluster */
+ unsigned long load_avg_ratio;
+};
+
+#endif
+
+#ifdef CONFIG_MT_SCHED_NOTICE
+ #ifdef CONFIG_MT_SCHED_DEBUG
+#define mt_sched_printf(x...) \
+ do{ \
+ char strings[128]=""; \
+ snprintf(strings, 128, x); \
+ printk(KERN_NOTICE x); \
+ trace_sched_log(strings); \
+ }while (0)
+ #else
+#define mt_sched_printf(x...) \
+ do{ \
+ char strings[128]=""; \
+ snprintf(strings, 128, x); \
+ trace_sched_log(strings); \
+ }while (0)
+ #endif
+
+#else
+#define mt_sched_printf(x...) do {} while (0)
+#endif
+
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
void *stack;
* execve */
unsigned in_iowait:1;
- /* task may not gain privileges */
- unsigned no_new_privs:1;
-
/* Revert to default priority/policy when forking */
unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1;
+ unsigned long atomic_flags; /* Flags needing atomic access. */
+
pid_t pid;
pid_t tgid;
struct list_head sibling; /* linkage in my parent's children list */
struct task_struct *group_leader; /* threadgroup leader */
+#ifdef CONFIG_MTK_SCHED_CMP_TGS
+ raw_spinlock_t thread_group_info_lock;
+ struct thread_group_info_t thread_group_info[NUM_CLUSTER];
+#endif
+
/*
* ptraced is the list of tasks this task is using ptrace on.
* This includes both natural children and PTRACE_ATTACH targets.
/* PID/PID hash table linkage. */
struct pid_link pids[PIDTYPE_MAX];
struct list_head thread_group;
+ struct list_head thread_node;
struct completion *vfork_done; /* for vfork() */
int __user *set_child_tid; /* CLONE_CHILD_SETTID */
struct timespec real_start_time; /* boot based time */
/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
unsigned long min_flt, maj_flt;
+/* for thrashing accounting */
+#ifdef CONFIG_ZRAM
+ unsigned long fm_flt, swap_in, swap_out;
+#endif
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
} memcg_batch;
unsigned int memcg_kmem_skip_account;
+ struct memcg_oom_info {
+ struct mem_cgroup *memcg;
+ gfp_t gfp_mask;
+ int order;
+ unsigned int may_oom:1;
+ } memcg_oom;
#endif
#ifdef CONFIG_HAVE_HW_BREAKPOINT
atomic_t ptrace_bp_refcnt;
extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
+extern int task_free_register(struct notifier_block *n);
+extern int task_free_unregister(struct notifier_block *n);
+
/*
* Per process flags
*/
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
+#define PF_MTKPASR 0x80000000 /* I am in MTKPASR process */
+
+#define task_in_mtkpasr(task) unlikely(task->flags & PF_MTKPASR)
/*
* Only the _current_ task can read/write to tsk->flags, but other
#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
#define used_math() tsk_used_math(current)
-/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags */
+/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags
+ * __GFP_FS is also cleared as it implies __GFP_IO.
+ */
static inline gfp_t memalloc_noio_flags(gfp_t flags)
{
if (unlikely(current->flags & PF_MEMALLOC_NOIO))
- flags &= ~__GFP_IO;
+ flags &= ~(__GFP_IO | __GFP_FS);
return flags;
}
current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
}
+/* Per-process atomic flags. */
+#define PFA_NO_NEW_PRIVS 0x00000001 /* May not gain new privileges. */
+
+static inline bool task_no_new_privs(struct task_struct *p)
+{
+ return test_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags);
+}
+
+static inline void task_set_no_new_privs(struct task_struct *p)
+{
+ set_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags);
+}
+
/*
* task->jobctl flags
*/
const struct sched_param *);
extern int sched_setscheduler_nocheck(struct task_struct *, int,
const struct sched_param *);
+
+#ifdef CONFIG_MT_PRIO_TRACER
+extern void set_user_nice_core(struct task_struct *p, long nice);
+extern int sched_setscheduler_core(struct task_struct *, int,
+ const struct sched_param *);
+extern int sched_setscheduler_nocheck_core(struct task_struct *, int,
+ const struct sched_param *);
+#endif
+
extern struct task_struct *idle_task(int cpu);
/**
* is_idle_task - is the specified task an idle task?
#define while_each_thread(g, t) \
while ((t = next_thread(t)) != g)
+#define __for_each_thread(signal, t) \
+ list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
+
+#define for_each_thread(p, t) \
+ __for_each_thread((p)->signal, t)
+
+/* Careful: this is a double loop, 'break' won't work as expected. */
+#define for_each_process_thread(p, t) \
+ for_each_process(p) for_each_thread(p, t)
+
static inline int get_nr_threads(struct task_struct *tsk)
{
return tsk->signal->nr_threads;
* all we care about is that we have a task with the appropriate
* pid, we don't actually care if we have the right task.
*/
-static inline int has_group_leader_pid(struct task_struct *p)
+static inline bool has_group_leader_pid(struct task_struct *p)
{
- return p->pid == p->tgid;
+ return task_pid(p) == p->signal->leader_pid;
}
static inline
-int same_thread_group(struct task_struct *p1, struct task_struct *p2)
+bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
{
- return p1->tgid == p2->tgid;
+ return p1->signal == p2->signal;
}
static inline struct task_struct *next_thread(const struct task_struct *p)
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
+#if defined(CONFIG_MT_RT_SCHED) || defined(CONFIG_MT_RT_SCHED_LOG)
+static inline void set_tsk_need_released(struct task_struct *tsk)
+{
+ set_tsk_thread_flag(tsk, TIF_NEED_RELEASED);
+}
+
+static inline void clear_tsk_need_released(struct task_struct *tsk)
+{
+ clear_tsk_thread_flag(tsk,TIF_NEED_RELEASED);
+}
+
+static inline int test_tsk_need_released(struct task_struct *tsk)
+{
+ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RELEASED));
+}
+#endif
+
static inline int restart_syscall(void)
{
set_tsk_thread_flag(current, TIF_SIGPENDING);
return task_rlimit_max(current, limit);
}
+#ifdef CONFIG_MTK_SCHED_RQAVG_US
+/*
+ * @cpu: cpu id
+ * @reset: reset the statistic start time after this time query
+ * @use_maxfreq: caculate cpu loading with max cpu max frequency
+ * return: cpu loading as percentage (0~100)
+ */
+extern unsigned int sched_get_percpu_load(int cpu, bool reset, bool use_maxfreq);
+
+/*
+ * return: heavy task(loading>90%) number in the system
+ */
+extern unsigned int sched_get_nr_heavy_task(void);
+
+/*
+ * @threshold: heavy task loading threshold (0~1023)
+ * return: heavy task(loading>threshold) number in the system
+ */
+extern unsigned int sched_get_nr_heavy_task_by_threshold(unsigned int threshold);
+#endif /* CONFIG_MTK_SCHED_RQAVG_US */
+
+#ifdef CONFIG_MTK_SCHED_RQAVG_KS
+extern void sched_update_nr_prod(int cpu, unsigned long nr, bool inc);
+extern void sched_get_nr_running_avg(int *avg, int *iowait_avg);
+#endif /* CONFIG_MTK_SCHED_RQAVG_KS */
+
+extern void sched_get_big_little_cpus(struct cpumask *big, struct cpumask *little);
+
#endif