X-Git-Url: https://git.stricted.de/?p=GitHub%2Fmt8127%2Fandroid_kernel_alcatel_ttab.git;a=blobdiff_plain;f=kernel%2Fhrtimer.c;h=03c39c893f67dc82059652a3fe405eba3fa62b57;hp=fd4b13b131f8db23fb17055caf5d33e23bec7b50;hb=HEAD;hpb=8d7a8fe2ce2f242953aef46226eaa8a4a1a2c380 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index fd4b13b131f8..03c39c893f67 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -47,11 +47,15 @@ #include #include #include +#include #include #include +#include + +//#define MTK_HRTIME_DEBUG /*MTK debug func*/ /* * The timer bases: * @@ -245,6 +249,11 @@ again: goto again; } timer->base = new_base; + } else { + if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) { + cpu = this_cpu; + goto again; + } } return new_base; } @@ -580,6 +589,23 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) cpu_base->expires_next.tv64 = expires_next.tv64; + /* + * If a hang was detected in the last timer interrupt then we + * leave the hang delay active in the hardware. We want the + * system to make progress. That also prevents the following + * scenario: + * T1 expires 50ms from now + * T2 expires 5s from now + * + * T1 is removed, so this code is called and would reprogram + * the hardware to 5s from now. Any hrtimer_start after that + * will not reprogram the hardware due to hang_detected being + * set. So we'd effectivly block all timers until the T2 event + * fires. + */ + if (cpu_base->hang_detected) + return; + if (cpu_base->expires_next.tv64 != KTIME_MAX) tick_program_event(cpu_base->expires_next, 1); } @@ -721,17 +747,20 @@ static int hrtimer_switch_to_hres(void) return 1; } +static void clock_was_set_work(struct work_struct *work) +{ + clock_was_set(); +} + +static DECLARE_WORK(hrtimer_work, clock_was_set_work); + /* - * Called from timekeeping code to reprogramm the hrtimer interrupt - * device. If called from the timer interrupt context we defer it to - * softirq context. + * Called from timekeeping and resume code to reprogramm the hrtimer + * interrupt device on all cpus. */ void clock_was_set_delayed(void) { - struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); - - cpu_base->clock_was_set = 1; - __raise_softirq_irqoff(HRTIMER_SOFTIRQ); + schedule_work(&hrtimer_work); } #else @@ -780,8 +809,10 @@ void hrtimers_resume(void) WARN_ONCE(!irqs_disabled(), KERN_INFO "hrtimers_resume() called with IRQs enabled!"); + /* Retrigger on the local CPU */ retrigger_next_event(NULL); - timerfd_clock_was_set(); + /* And schedule a retrigger for all others */ + clock_was_set_delayed(); } static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer) @@ -966,17 +997,19 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, struct hrtimer_clock_base *base, *new_base; unsigned long flags; int ret, leftmost; - + /*add MTK debug log for ALPS01804694*/ + if(timer->function == NULL) { + pr_alert("add hrtimer but do nothing"); + dump_stack(); + } + base = lock_hrtimer_base(timer, &flags); /* Remove an active timer from the queue: */ ret = remove_hrtimer(timer, base); - /* Switch the timer base, if necessary: */ - new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED); - if (mode & HRTIMER_MODE_REL) { - tim = ktime_add_safe(tim, new_base->get_time()); + tim = ktime_add_safe(tim, base->get_time()); /* * CONFIG_TIME_LOW_RES is a temporary way for architectures * to signal that they simply return xtime in @@ -991,6 +1024,9 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, hrtimer_set_expires_range_ns(timer, tim, delta_ns); + /* Switch the timer base, if necessary: */ + new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED); + timer_stats_hrtimer_set_start_info(timer); leftmost = enqueue_hrtimer(timer, new_base); @@ -1225,6 +1261,31 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) } EXPORT_SYMBOL_GPL(hrtimer_get_res); +#ifdef MTK_HRTIME_DEBUG +static void dump_hrtimer_callinfo(struct hrtimer *timer) +{ + + char symname[KSYM_NAME_LEN]; + if (lookup_symbol_name((unsigned long)(timer->function), symname) < 0) { + pr_err("timer info1: state/%lx, func/%pK\n", + timer->state, timer->function); + } else { + pr_err("timer info2: state/%lx, func/%s\n", + timer->state, symname); + } + + #ifdef CONFIG_TIMER_STATS + if (lookup_symbol_name((unsigned long)(timer->start_site), + symname) < 0) { + pr_err("timer stats1: pid/%d(%s), site/%pK\n", + timer->start_pid, timer->start_comm, timer->start_site); + } else { + pr_err("timer stats2: pid/%d(%s), site/%s\n", + timer->start_pid, timer->start_comm, symname); + } + #endif +} +#endif static void __run_hrtimer(struct hrtimer *timer, ktime_t *now) { struct hrtimer_clock_base *base = timer->base; @@ -1246,7 +1307,10 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now) */ raw_spin_unlock(&cpu_base->lock); trace_hrtimer_expire_entry(timer, now); + + mt_trace_hrt_start(fn); restart = fn(timer); + mt_trace_hrt_end(fn); trace_hrtimer_expire_exit(timer); raw_spin_lock(&cpu_base->lock); @@ -1432,13 +1496,6 @@ void hrtimer_peek_ahead_timers(void) static void run_hrtimer_softirq(struct softirq_action *h) { - struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); - - if (cpu_base->clock_was_set) { - cpu_base->clock_was_set = 0; - clock_was_set(); - } - hrtimer_peek_ahead_timers(); } @@ -1545,7 +1602,7 @@ static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mod t->task = NULL; if (likely(t->task)) - schedule(); + freezable_schedule(); hrtimer_cancel(&t->timer); mode = HRTIMER_MODE_ABS;