mm: emit tracepoint when rss watermark is hit
authorJoel Fernandes <joelaf@google.com>
Sat, 5 May 2018 21:58:08 +0000 (14:58 -0700)
committerPDO SCM Team <hudsoncm@motorola.com>
Fri, 15 Nov 2019 06:58:48 +0000 (00:58 -0600)
Useful to track how rss is chanGing per tgid. Required for the
memory visibility work being done for Android.

OriGinal patch by Tim Murray:
https://partner-android-review.googlesource.com/c/kernel/private/msm-google/+/1081280

Changes from oriGinal patch:
- don't bloat mm_struct
- add some noise reduction to rss tracking

Mot-CRs-fixed: (CR)

Change-Id: Ief904334235ff4380244e5803d7853579e70d202
Signed-off-by: Joel Fernandes <joelaf@google.com>
Reviewed-on: https://gerrit.mot.com/1453726
SME-Granted: SME Approvals Granted
Tested-by: Jira Key
SLTApproved: Slta Waiver
Reviewed-by: Xiangpo Zhao <zhaoxp3@motorola.com>
Submit-Approved: Jira Key

include/linux/mm.h
include/trace/events/kmem.h
mm/memory.c

index b14ca733a2b9b7929ab139cdcd6153cccdd737f8..131536db63dbdcb046c2fe7da0dd607e408ddf41 100644 (file)
@@ -1504,19 +1504,27 @@ static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
        return (unsigned long)val;
 }
 
+void mm_trace_rss_stat(int member, long count, long value);
+
 static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
 {
-       atomic_long_add(value, &mm->rss_stat.count[member]);
+       long count = atomic_long_add_return(value, &mm->rss_stat.count[member]);
+
+       mm_trace_rss_stat(member, count, value);
 }
 
 static inline void inc_mm_counter(struct mm_struct *mm, int member)
 {
-       atomic_long_inc(&mm->rss_stat.count[member]);
+       long count = atomic_long_inc_return(&mm->rss_stat.count[member]);
+
+       mm_trace_rss_stat(member, count, 1);
 }
 
 static inline void dec_mm_counter(struct mm_struct *mm, int member)
 {
-       atomic_long_dec(&mm->rss_stat.count[member]);
+       long count = atomic_long_dec_return(&mm->rss_stat.count[member]);
+
+       mm_trace_rss_stat(member, count, -1);
 }
 
 /* Optimized variant when page is already known not to be PageAnon */
index 285feeadac39fb7729d33c6118421d1a340e71ab..b5529c0d4e019bff24348fbf9e4d57ffacf43d9e 100644 (file)
@@ -318,6 +318,27 @@ TRACE_EVENT(mm_page_alloc_extfrag,
                __entry->change_ownership)
 );
 
+TRACE_EVENT(rss_stat,
+
+       TP_PROTO(int member,
+               long count),
+
+       TP_ARGS(member, count),
+
+       TP_STRUCT__entry(
+               __field(int, member)
+               __field(long, size)
+       ),
+
+       TP_fast_assign(
+               __entry->member = member;
+               __entry->size = (count << PAGE_SHIFT);
+       ),
+
+       TP_printk("member=%d size=%ldB",
+               __entry->member,
+               __entry->size)
+       );
 #endif /* _TRACE_KMEM_H */
 
 /* This part must be outside protection */
index 16956844e5535450ea80926db381c4b98c34ff52..a92eba1f28217e866fae36ec18b0597b1511900f 100644 (file)
@@ -71,6 +71,8 @@
 #include <linux/dax.h>
 #include <linux/oom.h>
 
+#include <trace/events/kmem.h>
+
 #include <asm/io.h>
 #include <asm/mmu_context.h>
 #include <asm/pgalloc.h>
@@ -139,6 +141,21 @@ static int __init init_zero_pfn(void)
 }
 core_initcall(init_zero_pfn);
 
+/*
+ * This threshold is the boundary in the value space, that the counter has to
+ * advance before we trace it. Should be a power of 2. It is to reduce unwanted
+ * trace overhead. The counter is number of pages.
+ */
+#define TRACE_MM_COUNTER_THRESHOLD 128
+
+void mm_trace_rss_stat(int member, long count, long value)
+{
+       long thresh_mask = ~(TRACE_MM_COUNTER_THRESHOLD - 1);
+
+       /* Threshold roll-over, trace it */
+       if ((count & thresh_mask) != ((count - value) & thresh_mask))
+               trace_rss_stat(member, count);
+}
 
 #if defined(SPLIT_RSS_COUNTING)