[COMMON] sched/rt: Sharing the primitives in common
authorJohnlay Park <jonglae.park@samsung.com>
Fri, 23 Feb 2018 11:01:29 +0000 (20:01 +0900)
committerChungwoo Park <cww.park@samsung.com>
Mon, 21 May 2018 08:33:08 +0000 (17:33 +0900)
Such as, add/sub positive, which used in fair class
exclusively.

Change-Id: I18fdc114e702013adbb4cd2cb2b3dde3805fa693
Signed-off-by: Johnlay Park <jonglae.park@samsung.com>
kernel/sched/fair.c
kernel/sched/sched.h

index d7a44420aa6e400e74f8f2f4280bd9f998845adc..5a1ea8b0048884df0383959a0c3d1af9a28e5357 100644 (file)
@@ -3100,26 +3100,6 @@ __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq)
                        cfs_rq->curr != NULL, cfs_rq, NULL);
 }
 
-/*
- * Signed add and clamp on underflow.
- *
- * Explicitly do a load-store to ensure the intermediate value never hits
- * memory. This allows lockless observations without ever seeing the negative
- * values.
- */
-#define add_positive(_ptr, _val) do {                           \
-       typeof(_ptr) ptr = (_ptr);                              \
-       typeof(_val) val = (_val);                              \
-       typeof(*ptr) res, var = READ_ONCE(*ptr);                \
-                                                               \
-       res = var + val;                                        \
-                                                               \
-       if (val < 0 && res > var)                               \
-               res = 0;                                        \
-                                                               \
-       WRITE_ONCE(*ptr, res);                                  \
-} while (0)
-
 #ifdef CONFIG_FAIR_GROUP_SCHED
 /**
  * update_tg_load_avg - update the tg's load avg
@@ -3369,23 +3349,6 @@ static inline void set_tg_cfs_propagate(struct cfs_rq *cfs_rq) {}
 
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
-/*
- * Unsigned subtract and clamp on underflow.
- *
- * Explicitly do a load-store to ensure the intermediate value never hits
- * memory. This allows lockless observations without ever seeing the negative
- * values.
- */
-#define sub_positive(_ptr, _val) do {                          \
-       typeof(_ptr) ptr = (_ptr);                              \
-       typeof(*ptr) val = (_val);                              \
-       typeof(*ptr) res, var = READ_ONCE(*ptr);                \
-       res = var - val;                                        \
-       if (res > var)                                          \
-               res = 0;                                        \
-       WRITE_ONCE(*ptr, res);                                  \
-} while (0)
-
 /**
  * update_cfs_rq_load_avg - update the cfs_rq's load/util averages
  * @now: current time, as per cfs_rq_clock_task()
index 0aae6a73c4c0a460e8a332f183e14c033f5bf2ac..c9447be011f9a4effbadfb1b39e65328feb209be 100644 (file)
@@ -106,6 +106,43 @@ static inline void cpu_load_update_active(struct rq *this_rq) { }
  */
 #define NICE_0_LOAD            (1L << NICE_0_LOAD_SHIFT)
 
+/*
+ * Signed add and clamp on underflow.
+ *
+ * Explicitly do a load-store to ensure the intermediate value never hits
+ * memory. This allows lockless observations without ever seeing the negative
+ * values.
+ */
+#define add_positive(_ptr, _val) do {                           \
+       typeof(_ptr) ptr = (_ptr);                              \
+       typeof(_val) val = (_val);                              \
+       typeof(*ptr) res, var = READ_ONCE(*ptr);                \
+                                                               \
+       res = var + val;                                        \
+                                                               \
+       if (val < 0 && res > var)                               \
+               res = 0;                                        \
+                                                               \
+       WRITE_ONCE(*ptr, res);                                  \
+} while (0)
+
+/*
+ * Unsigned subtract and clamp on underflow.
+ *
+ * Explicitly do a load-store to ensure the intermediate value never hits
+ * memory. This allows lockless observations without ever seeing the negative
+ * values.
+ */
+#define sub_positive(_ptr, _val) do {                          \
+       typeof(_ptr) ptr = (_ptr);                              \
+       typeof(*ptr) val = (_val);                              \
+       typeof(*ptr) res, var = READ_ONCE(*ptr);                \
+       res = var - val;                                        \
+       if (res > var)                                          \
+               res = 0;                                        \
+       WRITE_ONCE(*ptr, res);                                  \
+} while (0)
+
 /*
  * Single value that decides SCHED_DEADLINE internal math precision.
  * 10 -> just above 1us