1 /* SPDX-License-Identifier: GPL-2.0 */
3 * IRQ subsystem internal functions and variables:
5 * Do not ever include this file from anything else than
6 * kernel/irq/. Do not even think about using any information outside
7 * of this file for your non core code.
9 #include <linux/irqdesc.h>
10 #include <linux/kernel_stat.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/sched/clock.h>
14 #ifdef CONFIG_SPARSE_IRQ
15 # define IRQ_BITMAP_BITS (NR_IRQS + 8196)
17 # define IRQ_BITMAP_BITS NR_IRQS
20 #define istate core_internal_state__do_not_mess_with_it
22 extern bool noirqdebug
;
24 extern struct irqaction chained_action
;
27 * Bits used by threaded handlers:
28 * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
29 * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
30 * IRQTF_AFFINITY - irq thread is requested to adjust affinity
31 * IRQTF_FORCED_THREAD - irq action is force threaded
41 * Bit masks for desc->core_internal_state__do_not_mess_with_it
43 * IRQS_AUTODETECT - autodetection in progress
44 * IRQS_SPURIOUS_DISABLED - was disabled due to spurious interrupt
46 * IRQS_POLL_INPROGRESS - polling in progress
47 * IRQS_ONESHOT - irq is not unmasked in primary handler
48 * IRQS_REPLAY - irq is replayed
49 * IRQS_WAITING - irq is waiting
50 * IRQS_PENDING - irq is pending and replayed later
51 * IRQS_SUSPENDED - irq is suspended
54 IRQS_AUTODETECT
= 0x00000001,
55 IRQS_SPURIOUS_DISABLED
= 0x00000002,
56 IRQS_POLL_INPROGRESS
= 0x00000008,
57 IRQS_ONESHOT
= 0x00000020,
58 IRQS_REPLAY
= 0x00000040,
59 IRQS_WAITING
= 0x00000080,
60 IRQS_PENDING
= 0x00000200,
61 IRQS_SUSPENDED
= 0x00000800,
62 IRQS_TIMINGS
= 0x00001000,
68 extern int __irq_set_trigger(struct irq_desc
*desc
, unsigned long flags
);
69 extern void __disable_irq(struct irq_desc
*desc
);
70 extern void __enable_irq(struct irq_desc
*desc
);
72 #define IRQ_RESEND true
73 #define IRQ_NORESEND false
75 #define IRQ_START_FORCE true
76 #define IRQ_START_COND false
78 extern int irq_startup(struct irq_desc
*desc
, bool resend
, bool force
);
80 extern void irq_shutdown(struct irq_desc
*desc
);
81 extern void irq_enable(struct irq_desc
*desc
);
82 extern void irq_disable(struct irq_desc
*desc
);
83 extern void irq_percpu_enable(struct irq_desc
*desc
, unsigned int cpu
);
84 extern void irq_percpu_disable(struct irq_desc
*desc
, unsigned int cpu
);
85 extern void mask_irq(struct irq_desc
*desc
);
86 extern void unmask_irq(struct irq_desc
*desc
);
87 extern void unmask_threaded_irq(struct irq_desc
*desc
);
89 #ifdef CONFIG_SPARSE_IRQ
90 static inline void irq_mark_irq(unsigned int irq
) { }
92 extern void irq_mark_irq(unsigned int irq
);
95 extern void init_kstat_irqs(struct irq_desc
*desc
, int node
, int nr
);
97 irqreturn_t
__handle_irq_event_percpu(struct irq_desc
*desc
, unsigned int *flags
);
98 irqreturn_t
handle_irq_event_percpu(struct irq_desc
*desc
);
99 irqreturn_t
handle_irq_event(struct irq_desc
*desc
);
101 /* Resending of interrupts :*/
102 void check_irq_resend(struct irq_desc
*desc
);
103 bool irq_wait_for_poll(struct irq_desc
*desc
);
104 void __irq_wake_thread(struct irq_desc
*desc
, struct irqaction
*action
);
106 #ifdef CONFIG_PROC_FS
107 extern void register_irq_proc(unsigned int irq
, struct irq_desc
*desc
);
108 extern void unregister_irq_proc(unsigned int irq
, struct irq_desc
*desc
);
109 extern void register_handler_proc(unsigned int irq
, struct irqaction
*action
);
110 extern void unregister_handler_proc(unsigned int irq
, struct irqaction
*action
);
112 static inline void register_irq_proc(unsigned int irq
, struct irq_desc
*desc
) { }
113 static inline void unregister_irq_proc(unsigned int irq
, struct irq_desc
*desc
) { }
114 static inline void register_handler_proc(unsigned int irq
,
115 struct irqaction
*action
) { }
116 static inline void unregister_handler_proc(unsigned int irq
,
117 struct irqaction
*action
) { }
120 extern bool irq_can_set_affinity_usr(unsigned int irq
);
122 extern int irq_select_affinity_usr(unsigned int irq
);
124 extern void irq_set_thread_affinity(struct irq_desc
*desc
);
126 extern int irq_do_set_affinity(struct irq_data
*data
,
127 const struct cpumask
*dest
, bool force
);
130 extern int irq_setup_affinity(struct irq_desc
*desc
);
132 static inline int irq_setup_affinity(struct irq_desc
*desc
) { return 0; }
135 /* Inline functions for support of irq chips on slow busses */
136 static inline void chip_bus_lock(struct irq_desc
*desc
)
138 if (unlikely(desc
->irq_data
.chip
->irq_bus_lock
))
139 desc
->irq_data
.chip
->irq_bus_lock(&desc
->irq_data
);
142 static inline void chip_bus_sync_unlock(struct irq_desc
*desc
)
144 if (unlikely(desc
->irq_data
.chip
->irq_bus_sync_unlock
))
145 desc
->irq_data
.chip
->irq_bus_sync_unlock(&desc
->irq_data
);
148 #define _IRQ_DESC_CHECK (1 << 0)
149 #define _IRQ_DESC_PERCPU (1 << 1)
151 #define IRQ_GET_DESC_CHECK_GLOBAL (_IRQ_DESC_CHECK)
152 #define IRQ_GET_DESC_CHECK_PERCPU (_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU)
154 #define for_each_action_of_desc(desc, act) \
155 for (act = desc->action; act; act = act->next)
158 __irq_get_desc_lock(unsigned int irq
, unsigned long *flags
, bool bus
,
160 void __irq_put_desc_unlock(struct irq_desc
*desc
, unsigned long flags
, bool bus
);
162 static inline struct irq_desc
*
163 irq_get_desc_buslock(unsigned int irq
, unsigned long *flags
, unsigned int check
)
165 return __irq_get_desc_lock(irq
, flags
, true, check
);
169 irq_put_desc_busunlock(struct irq_desc
*desc
, unsigned long flags
)
171 __irq_put_desc_unlock(desc
, flags
, true);
174 static inline struct irq_desc
*
175 irq_get_desc_lock(unsigned int irq
, unsigned long *flags
, unsigned int check
)
177 return __irq_get_desc_lock(irq
, flags
, false, check
);
181 irq_put_desc_unlock(struct irq_desc
*desc
, unsigned long flags
)
183 __irq_put_desc_unlock(desc
, flags
, false);
186 #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
188 static inline unsigned int irqd_get(struct irq_data
*d
)
190 return __irqd_to_state(d
);
194 * Manipulation functions for irq_data.state
196 static inline void irqd_set_move_pending(struct irq_data
*d
)
198 __irqd_to_state(d
) |= IRQD_SETAFFINITY_PENDING
;
201 static inline void irqd_clr_move_pending(struct irq_data
*d
)
203 __irqd_to_state(d
) &= ~IRQD_SETAFFINITY_PENDING
;
206 static inline void irqd_set_managed_shutdown(struct irq_data
*d
)
208 __irqd_to_state(d
) |= IRQD_MANAGED_SHUTDOWN
;
211 static inline void irqd_clr_managed_shutdown(struct irq_data
*d
)
213 __irqd_to_state(d
) &= ~IRQD_MANAGED_SHUTDOWN
;
216 static inline void irqd_clear(struct irq_data
*d
, unsigned int mask
)
218 __irqd_to_state(d
) &= ~mask
;
221 static inline void irqd_set(struct irq_data
*d
, unsigned int mask
)
223 __irqd_to_state(d
) |= mask
;
226 static inline bool irqd_has_set(struct irq_data
*d
, unsigned int mask
)
228 return __irqd_to_state(d
) & mask
;
231 static inline void irq_state_set_disabled(struct irq_desc
*desc
)
233 irqd_set(&desc
->irq_data
, IRQD_IRQ_DISABLED
);
236 static inline void irq_state_set_masked(struct irq_desc
*desc
)
238 irqd_set(&desc
->irq_data
, IRQD_IRQ_MASKED
);
241 #undef __irqd_to_state
243 static inline void __kstat_incr_irqs_this_cpu(struct irq_desc
*desc
)
245 __this_cpu_inc(*desc
->kstat_irqs
);
246 __this_cpu_inc(kstat
.irqs_sum
);
249 static inline void kstat_incr_irqs_this_cpu(struct irq_desc
*desc
)
251 __kstat_incr_irqs_this_cpu(desc
);
255 static inline int irq_desc_get_node(struct irq_desc
*desc
)
257 return irq_common_data_get_node(&desc
->irq_common_data
);
260 static inline int irq_desc_is_chained(struct irq_desc
*desc
)
262 return (desc
->action
&& desc
->action
== &chained_action
);
265 #ifdef CONFIG_PM_SLEEP
266 bool irq_pm_check_wakeup(struct irq_desc
*desc
);
267 void irq_pm_install_action(struct irq_desc
*desc
, struct irqaction
*action
);
268 void irq_pm_remove_action(struct irq_desc
*desc
, struct irqaction
*action
);
270 static inline bool irq_pm_check_wakeup(struct irq_desc
*desc
) { return false; }
272 irq_pm_install_action(struct irq_desc
*desc
, struct irqaction
*action
) { }
274 irq_pm_remove_action(struct irq_desc
*desc
, struct irqaction
*action
) { }
277 #ifdef CONFIG_IRQ_TIMINGS
279 #define IRQ_TIMINGS_SHIFT 5
280 #define IRQ_TIMINGS_SIZE (1 << IRQ_TIMINGS_SHIFT)
281 #define IRQ_TIMINGS_MASK (IRQ_TIMINGS_SIZE - 1)
284 * struct irq_timings - irq timings storing structure
285 * @values: a circular buffer of u64 encoded <timestamp,irq> values
286 * @count: the number of elements in the array
289 u64 values
[IRQ_TIMINGS_SIZE
];
293 DECLARE_PER_CPU(struct irq_timings
, irq_timings
);
295 extern void irq_timings_free(int irq
);
296 extern int irq_timings_alloc(int irq
);
298 static inline void irq_remove_timings(struct irq_desc
*desc
)
300 desc
->istate
&= ~IRQS_TIMINGS
;
302 irq_timings_free(irq_desc_get_irq(desc
));
305 static inline void irq_setup_timings(struct irq_desc
*desc
, struct irqaction
*act
)
307 int irq
= irq_desc_get_irq(desc
);
311 * We don't need the measurement because the idle code already
312 * knows the next expiry event.
314 if (act
->flags
& __IRQF_TIMER
)
318 * In case the timing allocation fails, we just want to warn,
319 * not fail, so letting the system boot anyway.
321 ret
= irq_timings_alloc(irq
);
323 pr_warn("Failed to allocate irq timing stats for irq%d (%d)",
328 desc
->istate
|= IRQS_TIMINGS
;
331 extern void irq_timings_enable(void);
332 extern void irq_timings_disable(void);
334 DECLARE_STATIC_KEY_FALSE(irq_timing_enabled
);
337 * The interrupt number and the timestamp are encoded into a single
338 * u64 variable to optimize the size.
339 * 48 bit time stamp and 16 bit IRQ number is way sufficient.
340 * Who cares an IRQ after 78 hours of idle time?
342 static inline u64
irq_timing_encode(u64 timestamp
, int irq
)
344 return (timestamp
<< 16) | irq
;
347 static inline int irq_timing_decode(u64 value
, u64
*timestamp
)
349 *timestamp
= value
>> 16;
350 return value
& U16_MAX
;
354 * The function record_irq_time is only called in one place in the
355 * interrupts handler. We want this function always inline so the code
356 * inside is embedded in the function and the static key branching
357 * code can act at the higher level. Without the explicit
358 * __always_inline we can end up with a function call and a small
359 * overhead in the hotpath for nothing.
361 static __always_inline
void record_irq_time(struct irq_desc
*desc
)
363 if (!static_branch_likely(&irq_timing_enabled
))
366 if (desc
->istate
& IRQS_TIMINGS
) {
367 struct irq_timings
*timings
= this_cpu_ptr(&irq_timings
);
369 timings
->values
[timings
->count
& IRQ_TIMINGS_MASK
] =
370 irq_timing_encode(local_clock(),
371 irq_desc_get_irq(desc
));
377 static inline void irq_remove_timings(struct irq_desc
*desc
) {}
378 static inline void irq_setup_timings(struct irq_desc
*desc
,
379 struct irqaction
*act
) {};
380 static inline void record_irq_time(struct irq_desc
*desc
) {}
381 #endif /* CONFIG_IRQ_TIMINGS */
384 #ifdef CONFIG_GENERIC_IRQ_CHIP
385 void irq_init_generic_chip(struct irq_chip_generic
*gc
, const char *name
,
386 int num_ct
, unsigned int irq_base
,
387 void __iomem
*reg_base
, irq_flow_handler_t handler
);
390 irq_init_generic_chip(struct irq_chip_generic
*gc
, const char *name
,
391 int num_ct
, unsigned int irq_base
,
392 void __iomem
*reg_base
, irq_flow_handler_t handler
) { }
393 #endif /* CONFIG_GENERIC_IRQ_CHIP */
395 #ifdef CONFIG_GENERIC_PENDING_IRQ
396 static inline bool irq_can_move_pcntxt(struct irq_data
*data
)
398 return irqd_can_move_in_process_context(data
);
400 static inline bool irq_move_pending(struct irq_data
*data
)
402 return irqd_is_setaffinity_pending(data
);
405 irq_copy_pending(struct irq_desc
*desc
, const struct cpumask
*mask
)
407 cpumask_copy(desc
->pending_mask
, mask
);
410 irq_get_pending(struct cpumask
*mask
, struct irq_desc
*desc
)
412 cpumask_copy(mask
, desc
->pending_mask
);
414 static inline struct cpumask
*irq_desc_get_pending_mask(struct irq_desc
*desc
)
416 return desc
->pending_mask
;
418 bool irq_fixup_move_pending(struct irq_desc
*desc
, bool force_clear
);
419 #else /* CONFIG_GENERIC_PENDING_IRQ */
420 static inline bool irq_can_move_pcntxt(struct irq_data
*data
)
424 static inline bool irq_move_pending(struct irq_data
*data
)
429 irq_copy_pending(struct irq_desc
*desc
, const struct cpumask
*mask
)
433 irq_get_pending(struct cpumask
*mask
, struct irq_desc
*desc
)
436 static inline struct cpumask
*irq_desc_get_pending_mask(struct irq_desc
*desc
)
440 static inline bool irq_fixup_move_pending(struct irq_desc
*desc
, bool fclear
)
444 #endif /* !CONFIG_GENERIC_PENDING_IRQ */
446 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
447 #include <linux/debugfs.h>
449 void irq_add_debugfs_entry(unsigned int irq
, struct irq_desc
*desc
);
450 static inline void irq_remove_debugfs_entry(struct irq_desc
*desc
)
452 debugfs_remove(desc
->debugfs_file
);
454 # ifdef CONFIG_IRQ_DOMAIN
455 void irq_domain_debugfs_init(struct dentry
*root
);
457 static inline void irq_domain_debugfs_init(struct dentry
*root
)
461 #else /* CONFIG_GENERIC_IRQ_DEBUGFS */
462 static inline void irq_add_debugfs_entry(unsigned int irq
, struct irq_desc
*d
)
465 static inline void irq_remove_debugfs_entry(struct irq_desc
*d
)
468 #endif /* CONFIG_GENERIC_IRQ_DEBUGFS */