perf: Optimize throttling code
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Wed, 16 Feb 2011 10:22:34 +0000 (11:22 +0100)
committerIngo Molnar <mingo@elte.hu>
Wed, 16 Feb 2011 12:30:55 +0000 (13:30 +0100)
By pre-computing the maximum number of samples per tick we can avoid a
multiplication and a conditional since MAX_INTERRUPTS >
max_samples_per_tick.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
include/linux/perf_event.h
kernel/perf_event.c
kernel/sysctl.c

index 38c8b25548428e961cfa2cf1002f36f239beabb0..8ceb5a6fd9c91ca2fbbcbacecdddd04b406bd69b 100644 (file)
@@ -1110,6 +1110,10 @@ extern int sysctl_perf_event_paranoid;
 extern int sysctl_perf_event_mlock;
 extern int sysctl_perf_event_sample_rate;
 
+extern int perf_proc_update_handler(struct ctl_table *table, int write,
+               void __user *buffer, size_t *lenp,
+               loff_t *ppos);
+
 static inline bool perf_paranoid_tracepoint_raw(void)
 {
        return sysctl_perf_event_paranoid > -1;
index 65dcdc76d709efff7f5596e149a24e2c6fd551aa..e03be08d0ddff5c2a55d6af7b8de53c683961804 100644 (file)
@@ -150,7 +150,24 @@ int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */
 /*
  * max perf event sample rate
  */
-int sysctl_perf_event_sample_rate __read_mostly = 100000;
+#define DEFAULT_MAX_SAMPLE_RATE 100000
+int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
+static int max_samples_per_tick __read_mostly =
+       DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
+
+int perf_proc_update_handler(struct ctl_table *table, int write,
+               void __user *buffer, size_t *lenp,
+               loff_t *ppos)
+{
+       int ret = proc_dointvec(table, write, buffer, lenp, ppos);
+
+       if (ret || !write)
+               return ret;
+
+       max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
+
+       return 0;
+}
 
 static atomic64_t perf_event_id;
 
@@ -4941,26 +4958,14 @@ static int __perf_event_overflow(struct perf_event *event, int nmi,
        if (unlikely(!is_sampling_event(event)))
                return 0;
 
-       if (!throttle) {
-               hwc->interrupts++;
-       } else {
-               if (hwc->interrupts != MAX_INTERRUPTS) {
-                       hwc->interrupts++;
-                       if (HZ * hwc->interrupts >
-                                       (u64)sysctl_perf_event_sample_rate) {
-                               hwc->interrupts = MAX_INTERRUPTS;
-                               perf_log_throttle(event, 0);
-                               ret = 1;
-                       }
-               } else {
-                       /*
-                        * Keep re-disabling events even though on the previous
-                        * pass we disabled it - just in case we raced with a
-                        * sched-in and the event got enabled again:
-                        */
+       if (unlikely(hwc->interrupts >= max_samples_per_tick)) {
+               if (throttle) {
+                       hwc->interrupts = MAX_INTERRUPTS;
+                       perf_log_throttle(event, 0);
                        ret = 1;
                }
-       }
+       } else
+               hwc->interrupts++;
 
        if (event->attr.freq) {
                u64 now = perf_clock();
index 0f1bd83db98523333b9fabde37d200512b20b77e..daef911cbadb81e9c96cc4689a72e84f56e10736 100644 (file)
@@ -948,7 +948,7 @@ static struct ctl_table kern_table[] = {
                .data           = &sysctl_perf_event_sample_rate,
                .maxlen         = sizeof(sysctl_perf_event_sample_rate),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = perf_proc_update_handler,
        },
 #endif
 #ifdef CONFIG_KMEMCHECK