struct util_est util_est;
};
+struct ontime_avg {
+ u64 ontime_migration_time;
+ u64 load_sum;
+ u32 period_contrib;
+ unsigned long load_avg;
+};
+
+struct ontime_entity {
+ struct ontime_avg avg;
+ int migrating;
+ int cpu;
+};
+
struct sched_statistics {
#ifdef CONFIG_SCHEDSTATS
u64 wait_start;
*/
struct sched_avg avg ____cacheline_aligned_in_smp;
#endif
+ struct ontime_entity ontime;
};
#ifdef CONFIG_SCHED_WALT
/* rq "owned" by this entity/group: */
struct rt_rq *my_q;
#endif
+
+#ifdef CONFIG_SMP
+#ifdef CONFIG_SCHED_USE_FLUID_RT
+ int sync_flag;
+#endif
+ /*
+ * Per entity load average tracking.
+ *
+ * Put into separate cache line so it does not
+ * collide with read-mostly values above.
+ */
+ struct sched_avg avg;// ____cacheline_aligned_in_smp;
+#endif
} __randomize_layout;
struct sched_dl_entity {
u32 init_load_pct;
u64 last_sleep_ts;
#endif
+#ifdef CONFIG_SCHED_USE_FLUID_RT
+ int victim_flag;
+#endif
+
+#ifdef CONFIG_SCHED_EMS
+ struct task_band *band;
+ struct list_head band_members;
+#endif
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
unsigned sched_contributes_to_load:1;
unsigned sched_migrated:1;
unsigned sched_remote_wakeup:1;
+#ifdef CONFIG_PSI
+ unsigned sched_psi_wake_requeue:1;
+#endif
+
/* Force alignment to the next boundary: */
unsigned :0;
siginfo_t *last_siginfo;
struct task_io_accounting ioac;
+#ifdef CONFIG_PSI
+ /* Pressure stall state */
+ unsigned int psi_flags;
+#endif
#ifdef CONFIG_TASK_XACCT
/* Accumulated RSS usage: */
u64 acct_rss_mem1;
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
+#define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
#define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/
#define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */
#define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */
+#define PFA_LMK_WAITING 7 /* Lowmemorykiller is waiting */
#define TASK_PFA_TEST(name, func) \
static inline bool task_##func(struct task_struct *p) \
TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
+TASK_PFA_TEST(LMK_WAITING, lmk_waiting)
+TASK_PFA_SET(LMK_WAITING, lmk_waiting)
+
static inline void
current_restore_flags(unsigned long orig_flags, unsigned long flags)
{