From: Johnlay Park Date: Fri, 23 Feb 2018 11:01:29 +0000 (+0900) Subject: [COMMON] sched/rt: Sharing the primitives in common X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=be280c5164bfdebff2948dda3ea569b4b2fad77e;p=GitHub%2FLineageOS%2Fandroid_kernel_motorola_exynos9610.git [COMMON] sched/rt: Sharing the primitives in common Such as, add/sub positive, which used in fair class exclusively. Change-Id: I18fdc114e702013adbb4cd2cb2b3dde3805fa693 Signed-off-by: Johnlay Park --- diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index d7a44420aa6e..5a1ea8b00488 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3100,26 +3100,6 @@ __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq) cfs_rq->curr != NULL, cfs_rq, NULL); } -/* - * Signed add and clamp on underflow. - * - * Explicitly do a load-store to ensure the intermediate value never hits - * memory. This allows lockless observations without ever seeing the negative - * values. - */ -#define add_positive(_ptr, _val) do { \ - typeof(_ptr) ptr = (_ptr); \ - typeof(_val) val = (_val); \ - typeof(*ptr) res, var = READ_ONCE(*ptr); \ - \ - res = var + val; \ - \ - if (val < 0 && res > var) \ - res = 0; \ - \ - WRITE_ONCE(*ptr, res); \ -} while (0) - #ifdef CONFIG_FAIR_GROUP_SCHED /** * update_tg_load_avg - update the tg's load avg @@ -3369,23 +3349,6 @@ static inline void set_tg_cfs_propagate(struct cfs_rq *cfs_rq) {} #endif /* CONFIG_FAIR_GROUP_SCHED */ -/* - * Unsigned subtract and clamp on underflow. - * - * Explicitly do a load-store to ensure the intermediate value never hits - * memory. This allows lockless observations without ever seeing the negative - * values. - */ -#define sub_positive(_ptr, _val) do { \ - typeof(_ptr) ptr = (_ptr); \ - typeof(*ptr) val = (_val); \ - typeof(*ptr) res, var = READ_ONCE(*ptr); \ - res = var - val; \ - if (res > var) \ - res = 0; \ - WRITE_ONCE(*ptr, res); \ -} while (0) - /** * update_cfs_rq_load_avg - update the cfs_rq's load/util averages * @now: current time, as per cfs_rq_clock_task() diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 0aae6a73c4c0..c9447be011f9 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -106,6 +106,43 @@ static inline void cpu_load_update_active(struct rq *this_rq) { } */ #define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT) +/* + * Signed add and clamp on underflow. + * + * Explicitly do a load-store to ensure the intermediate value never hits + * memory. This allows lockless observations without ever seeing the negative + * values. + */ +#define add_positive(_ptr, _val) do { \ + typeof(_ptr) ptr = (_ptr); \ + typeof(_val) val = (_val); \ + typeof(*ptr) res, var = READ_ONCE(*ptr); \ + \ + res = var + val; \ + \ + if (val < 0 && res > var) \ + res = 0; \ + \ + WRITE_ONCE(*ptr, res); \ +} while (0) + +/* + * Unsigned subtract and clamp on underflow. + * + * Explicitly do a load-store to ensure the intermediate value never hits + * memory. This allows lockless observations without ever seeing the negative + * values. + */ +#define sub_positive(_ptr, _val) do { \ + typeof(_ptr) ptr = (_ptr); \ + typeof(*ptr) val = (_val); \ + typeof(*ptr) res, var = READ_ONCE(*ptr); \ + res = var - val; \ + if (res > var) \ + res = 0; \ + WRITE_ONCE(*ptr, res); \ +} while (0) + /* * Single value that decides SCHED_DEADLINE internal math precision. * 10 -> just above 1us