#include <linux/tick.h>
#include <linux/irq.h>
+#ifndef CONFIG_AMLOGIC_MODIFY
+#define KSOFTIRQD_HIGH_RATE
+#endif
+
#define CREATE_TRACE_POINTS
#include <trace/events/irq.h>
* If ksoftirqd is scheduled, we do not want to process pending softirqs
* right now. Let ksoftirqd handle this at its own rate, to get fairness.
*/
+#ifdef KSOFTIRQD_HIGH_RATE
static bool ksoftirqd_running(void)
{
struct task_struct *tsk = __this_cpu_read(ksoftirqd);
return tsk && (tsk->state == TASK_RUNNING);
}
+#endif
/*
* preempt_count and SOFTIRQ_OFFSET usage:
pending = local_softirq_pending();
- if (pending && !ksoftirqd_running())
+#ifdef KSOFTIRQD_HIGH_RATE
+ if (pending && !ksoftirqd_running()) {
+#else
+ if (pending) {
+#endif
do_softirq_own_stack();
+ }
local_irq_restore(flags);
}
static inline void invoke_softirq(void)
{
+#ifdef KSOFTIRQD_HIGH_RATE
if (ksoftirqd_running())
return;
-
+#endif
if (!force_irqthreads) {
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
/*