| 1 | /* |
| 2 | * Derived from arch/i386/kernel/irq.c |
| 3 | * Copyright (C) 1992 Linus Torvalds |
| 4 | * Adapted from arch/i386 by Gary Thomas |
| 5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
| 6 | * Updated and modified by Cort Dougan <cort@fsmlabs.com> |
| 7 | * Copyright (C) 1996-2001 Cort Dougan |
| 8 | * Adapted for Power Macintosh by Paul Mackerras |
| 9 | * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) |
| 10 | * |
| 11 | * This program is free software; you can redistribute it and/or |
| 12 | * modify it under the terms of the GNU General Public License |
| 13 | * as published by the Free Software Foundation; either version |
| 14 | * 2 of the License, or (at your option) any later version. |
| 15 | * |
| 16 | * This file contains the code used by various IRQ handling routines: |
| 17 | * asking for different IRQ's should be done through these routines |
| 18 | * instead of just grabbing them. Thus setups with different IRQ numbers |
| 19 | * shouldn't result in any weird surprises, and installing new handlers |
| 20 | * should be easier. |
| 21 | * |
| 22 | * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the |
| 23 | * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit |
| 24 | * mask register (of which only 16 are defined), hence the weird shifting |
| 25 | * and complement of the cached_irq_mask. I want to be able to stuff |
| 26 | * this right into the SIU SMASK register. |
| 27 | * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx |
| 28 | * to reduce code space and undefined function references. |
| 29 | */ |
| 30 | |
| 31 | #undef DEBUG |
| 32 | |
| 33 | #include <linux/export.h> |
| 34 | #include <linux/threads.h> |
| 35 | #include <linux/kernel_stat.h> |
| 36 | #include <linux/signal.h> |
| 37 | #include <linux/sched.h> |
| 38 | #include <linux/ptrace.h> |
| 39 | #include <linux/ioport.h> |
| 40 | #include <linux/interrupt.h> |
| 41 | #include <linux/timex.h> |
| 42 | #include <linux/init.h> |
| 43 | #include <linux/slab.h> |
| 44 | #include <linux/delay.h> |
| 45 | #include <linux/irq.h> |
| 46 | #include <linux/seq_file.h> |
| 47 | #include <linux/cpumask.h> |
| 48 | #include <linux/profile.h> |
| 49 | #include <linux/bitops.h> |
| 50 | #include <linux/list.h> |
| 51 | #include <linux/radix-tree.h> |
| 52 | #include <linux/mutex.h> |
| 53 | #include <linux/bootmem.h> |
| 54 | #include <linux/pci.h> |
| 55 | #include <linux/debugfs.h> |
| 56 | #include <linux/of.h> |
| 57 | #include <linux/of_irq.h> |
| 58 | |
| 59 | #include <asm/uaccess.h> |
| 60 | #include <asm/io.h> |
| 61 | #include <asm/pgtable.h> |
| 62 | #include <asm/irq.h> |
| 63 | #include <asm/cache.h> |
| 64 | #include <asm/prom.h> |
| 65 | #include <asm/ptrace.h> |
| 66 | #include <asm/machdep.h> |
| 67 | #include <asm/udbg.h> |
| 68 | #include <asm/smp.h> |
| 69 | #include <asm/debug.h> |
| 70 | |
| 71 | #ifdef CONFIG_PPC64 |
| 72 | #include <asm/paca.h> |
| 73 | #include <asm/firmware.h> |
| 74 | #include <asm/lv1call.h> |
| 75 | #endif |
| 76 | #define CREATE_TRACE_POINTS |
| 77 | #include <asm/trace.h> |
| 78 | |
| 79 | DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); |
| 80 | EXPORT_PER_CPU_SYMBOL(irq_stat); |
| 81 | |
| 82 | int __irq_offset_value; |
| 83 | |
| 84 | #ifdef CONFIG_PPC32 |
| 85 | EXPORT_SYMBOL(__irq_offset_value); |
| 86 | atomic_t ppc_n_lost_interrupts; |
| 87 | |
| 88 | #ifdef CONFIG_TAU_INT |
| 89 | extern int tau_initialized; |
| 90 | extern int tau_interrupts(int); |
| 91 | #endif |
| 92 | #endif /* CONFIG_PPC32 */ |
| 93 | |
| 94 | #ifdef CONFIG_PPC64 |
| 95 | |
| 96 | int distribute_irqs = 1; |
| 97 | |
| 98 | static inline notrace unsigned long get_irq_happened(void) |
| 99 | { |
| 100 | unsigned long happened; |
| 101 | |
| 102 | __asm__ __volatile__("lbz %0,%1(13)" |
| 103 | : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened))); |
| 104 | |
| 105 | return happened; |
| 106 | } |
| 107 | |
| 108 | static inline notrace void set_soft_enabled(unsigned long enable) |
| 109 | { |
| 110 | __asm__ __volatile__("stb %0,%1(13)" |
| 111 | : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); |
| 112 | } |
| 113 | |
| 114 | static inline notrace int decrementer_check_overflow(void) |
| 115 | { |
| 116 | u64 now = get_tb_or_rtc(); |
| 117 | u64 *next_tb = &__get_cpu_var(decrementers_next_tb); |
| 118 | |
| 119 | if (now >= *next_tb) |
| 120 | set_dec(1); |
| 121 | return now >= *next_tb; |
| 122 | } |
| 123 | |
| 124 | /* This is called whenever we are re-enabling interrupts |
| 125 | * and returns either 0 (nothing to do) or 500/900/280/a00/e80 if |
| 126 | * there's an EE, DEC or DBELL to generate. |
| 127 | * |
| 128 | * This is called in two contexts: From arch_local_irq_restore() |
| 129 | * before soft-enabling interrupts, and from the exception exit |
| 130 | * path when returning from an interrupt from a soft-disabled to |
| 131 | * a soft enabled context. In both case we have interrupts hard |
| 132 | * disabled. |
| 133 | * |
| 134 | * We take care of only clearing the bits we handled in the |
| 135 | * PACA irq_happened field since we can only re-emit one at a |
| 136 | * time and we don't want to "lose" one. |
| 137 | */ |
| 138 | notrace unsigned int __check_irq_replay(void) |
| 139 | { |
| 140 | /* |
| 141 | * We use local_paca rather than get_paca() to avoid all |
| 142 | * the debug_smp_processor_id() business in this low level |
| 143 | * function |
| 144 | */ |
| 145 | unsigned char happened = local_paca->irq_happened; |
| 146 | |
| 147 | /* Clear bit 0 which we wouldn't clear otherwise */ |
| 148 | local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; |
| 149 | |
| 150 | /* |
| 151 | * Force the delivery of pending soft-disabled interrupts on PS3. |
| 152 | * Any HV call will have this side effect. |
| 153 | */ |
| 154 | if (firmware_has_feature(FW_FEATURE_PS3_LV1)) { |
| 155 | u64 tmp, tmp2; |
| 156 | lv1_get_version_info(&tmp, &tmp2); |
| 157 | } |
| 158 | |
| 159 | /* |
| 160 | * We may have missed a decrementer interrupt. We check the |
| 161 | * decrementer itself rather than the paca irq_happened field |
| 162 | * in case we also had a rollover while hard disabled |
| 163 | */ |
| 164 | local_paca->irq_happened &= ~PACA_IRQ_DEC; |
| 165 | if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow()) |
| 166 | return 0x900; |
| 167 | |
| 168 | /* Finally check if an external interrupt happened */ |
| 169 | local_paca->irq_happened &= ~PACA_IRQ_EE; |
| 170 | if (happened & PACA_IRQ_EE) |
| 171 | return 0x500; |
| 172 | |
| 173 | #ifdef CONFIG_PPC_BOOK3E |
| 174 | /* Finally check if an EPR external interrupt happened |
| 175 | * this bit is typically set if we need to handle another |
| 176 | * "edge" interrupt from within the MPIC "EPR" handler |
| 177 | */ |
| 178 | local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE; |
| 179 | if (happened & PACA_IRQ_EE_EDGE) |
| 180 | return 0x500; |
| 181 | |
| 182 | local_paca->irq_happened &= ~PACA_IRQ_DBELL; |
| 183 | if (happened & PACA_IRQ_DBELL) |
| 184 | return 0x280; |
| 185 | #else |
| 186 | local_paca->irq_happened &= ~PACA_IRQ_DBELL; |
| 187 | if (happened & PACA_IRQ_DBELL) { |
| 188 | if (cpu_has_feature(CPU_FTR_HVMODE)) |
| 189 | return 0xe80; |
| 190 | return 0xa00; |
| 191 | } |
| 192 | #endif /* CONFIG_PPC_BOOK3E */ |
| 193 | |
| 194 | /* There should be nothing left ! */ |
| 195 | BUG_ON(local_paca->irq_happened != 0); |
| 196 | |
| 197 | return 0; |
| 198 | } |
| 199 | |
| 200 | notrace void arch_local_irq_restore(unsigned long en) |
| 201 | { |
| 202 | unsigned char irq_happened; |
| 203 | unsigned int replay; |
| 204 | |
| 205 | /* Write the new soft-enabled value */ |
| 206 | set_soft_enabled(en); |
| 207 | if (!en) |
| 208 | return; |
| 209 | /* |
| 210 | * From this point onward, we can take interrupts, preempt, |
| 211 | * etc... unless we got hard-disabled. We check if an event |
| 212 | * happened. If none happened, we know we can just return. |
| 213 | * |
| 214 | * We may have preempted before the check below, in which case |
| 215 | * we are checking the "new" CPU instead of the old one. This |
| 216 | * is only a problem if an event happened on the "old" CPU. |
| 217 | * |
| 218 | * External interrupt events will have caused interrupts to |
| 219 | * be hard-disabled, so there is no problem, we |
| 220 | * cannot have preempted. |
| 221 | */ |
| 222 | irq_happened = get_irq_happened(); |
| 223 | if (!irq_happened) |
| 224 | return; |
| 225 | |
| 226 | /* |
| 227 | * We need to hard disable to get a trusted value from |
| 228 | * __check_irq_replay(). We also need to soft-disable |
| 229 | * again to avoid warnings in there due to the use of |
| 230 | * per-cpu variables. |
| 231 | * |
| 232 | * We know that if the value in irq_happened is exactly 0x01 |
| 233 | * then we are already hard disabled (there are other less |
| 234 | * common cases that we'll ignore for now), so we skip the |
| 235 | * (expensive) mtmsrd. |
| 236 | */ |
| 237 | if (unlikely(irq_happened != PACA_IRQ_HARD_DIS)) |
| 238 | __hard_irq_disable(); |
| 239 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 240 | else { |
| 241 | /* |
| 242 | * We should already be hard disabled here. We had bugs |
| 243 | * where that wasn't the case so let's dbl check it and |
| 244 | * warn if we are wrong. Only do that when IRQ tracing |
| 245 | * is enabled as mfmsr() can be costly. |
| 246 | */ |
| 247 | if (WARN_ON(mfmsr() & MSR_EE)) |
| 248 | __hard_irq_disable(); |
| 249 | } |
| 250 | #endif /* CONFIG_TRACE_IRQFLAG */ |
| 251 | |
| 252 | set_soft_enabled(0); |
| 253 | |
| 254 | /* |
| 255 | * Check if anything needs to be re-emitted. We haven't |
| 256 | * soft-enabled yet to avoid warnings in decrementer_check_overflow |
| 257 | * accessing per-cpu variables |
| 258 | */ |
| 259 | replay = __check_irq_replay(); |
| 260 | |
| 261 | /* We can soft-enable now */ |
| 262 | set_soft_enabled(1); |
| 263 | |
| 264 | /* |
| 265 | * And replay if we have to. This will return with interrupts |
| 266 | * hard-enabled. |
| 267 | */ |
| 268 | if (replay) { |
| 269 | __replay_interrupt(replay); |
| 270 | return; |
| 271 | } |
| 272 | |
| 273 | /* Finally, let's ensure we are hard enabled */ |
| 274 | __hard_irq_enable(); |
| 275 | } |
| 276 | EXPORT_SYMBOL(arch_local_irq_restore); |
| 277 | |
| 278 | /* |
| 279 | * This is specifically called by assembly code to re-enable interrupts |
| 280 | * if they are currently disabled. This is typically called before |
| 281 | * schedule() or do_signal() when returning to userspace. We do it |
| 282 | * in C to avoid the burden of dealing with lockdep etc... |
| 283 | * |
| 284 | * NOTE: This is called with interrupts hard disabled but not marked |
| 285 | * as such in paca->irq_happened, so we need to resync this. |
| 286 | */ |
| 287 | void notrace restore_interrupts(void) |
| 288 | { |
| 289 | if (irqs_disabled()) { |
| 290 | local_paca->irq_happened |= PACA_IRQ_HARD_DIS; |
| 291 | local_irq_enable(); |
| 292 | } else |
| 293 | __hard_irq_enable(); |
| 294 | } |
| 295 | |
| 296 | /* |
| 297 | * This is a helper to use when about to go into idle low-power |
| 298 | * when the latter has the side effect of re-enabling interrupts |
| 299 | * (such as calling H_CEDE under pHyp). |
| 300 | * |
| 301 | * You call this function with interrupts soft-disabled (this is |
| 302 | * already the case when ppc_md.power_save is called). The function |
| 303 | * will return whether to enter power save or just return. |
| 304 | * |
| 305 | * In the former case, it will have notified lockdep of interrupts |
| 306 | * being re-enabled and generally sanitized the lazy irq state, |
| 307 | * and in the latter case it will leave with interrupts hard |
| 308 | * disabled and marked as such, so the local_irq_enable() call |
| 309 | * in cpu_idle() will properly re-enable everything. |
| 310 | */ |
| 311 | bool prep_irq_for_idle(void) |
| 312 | { |
| 313 | /* |
| 314 | * First we need to hard disable to ensure no interrupt |
| 315 | * occurs before we effectively enter the low power state |
| 316 | */ |
| 317 | hard_irq_disable(); |
| 318 | |
| 319 | /* |
| 320 | * If anything happened while we were soft-disabled, |
| 321 | * we return now and do not enter the low power state. |
| 322 | */ |
| 323 | if (lazy_irq_pending()) |
| 324 | return false; |
| 325 | |
| 326 | /* Tell lockdep we are about to re-enable */ |
| 327 | trace_hardirqs_on(); |
| 328 | |
| 329 | /* |
| 330 | * Mark interrupts as soft-enabled and clear the |
| 331 | * PACA_IRQ_HARD_DIS from the pending mask since we |
| 332 | * are about to hard enable as well as a side effect |
| 333 | * of entering the low power state. |
| 334 | */ |
| 335 | local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; |
| 336 | local_paca->soft_enabled = 1; |
| 337 | |
| 338 | /* Tell the caller to enter the low power state */ |
| 339 | return true; |
| 340 | } |
| 341 | |
| 342 | #endif /* CONFIG_PPC64 */ |
| 343 | |
| 344 | int arch_show_interrupts(struct seq_file *p, int prec) |
| 345 | { |
| 346 | int j; |
| 347 | |
| 348 | #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT) |
| 349 | if (tau_initialized) { |
| 350 | seq_printf(p, "%*s: ", prec, "TAU"); |
| 351 | for_each_online_cpu(j) |
| 352 | seq_printf(p, "%10u ", tau_interrupts(j)); |
| 353 | seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); |
| 354 | } |
| 355 | #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */ |
| 356 | |
| 357 | seq_printf(p, "%*s: ", prec, "LOC"); |
| 358 | for_each_online_cpu(j) |
| 359 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs); |
| 360 | seq_printf(p, " Local timer interrupts\n"); |
| 361 | |
| 362 | seq_printf(p, "%*s: ", prec, "SPU"); |
| 363 | for_each_online_cpu(j) |
| 364 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs); |
| 365 | seq_printf(p, " Spurious interrupts\n"); |
| 366 | |
| 367 | seq_printf(p, "%*s: ", prec, "CNT"); |
| 368 | for_each_online_cpu(j) |
| 369 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs); |
| 370 | seq_printf(p, " Performance monitoring interrupts\n"); |
| 371 | |
| 372 | seq_printf(p, "%*s: ", prec, "MCE"); |
| 373 | for_each_online_cpu(j) |
| 374 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions); |
| 375 | seq_printf(p, " Machine check exceptions\n"); |
| 376 | |
| 377 | #ifdef CONFIG_PPC_DOORBELL |
| 378 | if (cpu_has_feature(CPU_FTR_DBELL)) { |
| 379 | seq_printf(p, "%*s: ", prec, "DBL"); |
| 380 | for_each_online_cpu(j) |
| 381 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs); |
| 382 | seq_printf(p, " Doorbell interrupts\n"); |
| 383 | } |
| 384 | #endif |
| 385 | |
| 386 | return 0; |
| 387 | } |
| 388 | |
| 389 | /* |
| 390 | * /proc/stat helpers |
| 391 | */ |
| 392 | u64 arch_irq_stat_cpu(unsigned int cpu) |
| 393 | { |
| 394 | u64 sum = per_cpu(irq_stat, cpu).timer_irqs; |
| 395 | |
| 396 | sum += per_cpu(irq_stat, cpu).pmu_irqs; |
| 397 | sum += per_cpu(irq_stat, cpu).mce_exceptions; |
| 398 | sum += per_cpu(irq_stat, cpu).spurious_irqs; |
| 399 | #ifdef CONFIG_PPC_DOORBELL |
| 400 | sum += per_cpu(irq_stat, cpu).doorbell_irqs; |
| 401 | #endif |
| 402 | |
| 403 | return sum; |
| 404 | } |
| 405 | |
| 406 | #ifdef CONFIG_HOTPLUG_CPU |
| 407 | void migrate_irqs(void) |
| 408 | { |
| 409 | struct irq_desc *desc; |
| 410 | unsigned int irq; |
| 411 | static int warned; |
| 412 | cpumask_var_t mask; |
| 413 | const struct cpumask *map = cpu_online_mask; |
| 414 | |
| 415 | alloc_cpumask_var(&mask, GFP_KERNEL); |
| 416 | |
| 417 | for_each_irq_desc(irq, desc) { |
| 418 | struct irq_data *data; |
| 419 | struct irq_chip *chip; |
| 420 | |
| 421 | data = irq_desc_get_irq_data(desc); |
| 422 | if (irqd_is_per_cpu(data)) |
| 423 | continue; |
| 424 | |
| 425 | chip = irq_data_get_irq_chip(data); |
| 426 | |
| 427 | cpumask_and(mask, data->affinity, map); |
| 428 | if (cpumask_any(mask) >= nr_cpu_ids) { |
| 429 | printk("Breaking affinity for irq %i\n", irq); |
| 430 | cpumask_copy(mask, map); |
| 431 | } |
| 432 | if (chip->irq_set_affinity) |
| 433 | chip->irq_set_affinity(data, mask, true); |
| 434 | else if (desc->action && !(warned++)) |
| 435 | printk("Cannot set affinity for irq %i\n", irq); |
| 436 | } |
| 437 | |
| 438 | free_cpumask_var(mask); |
| 439 | |
| 440 | local_irq_enable(); |
| 441 | mdelay(1); |
| 442 | local_irq_disable(); |
| 443 | } |
| 444 | #endif |
| 445 | |
| 446 | static inline void handle_one_irq(unsigned int irq) |
| 447 | { |
| 448 | struct thread_info *curtp, *irqtp; |
| 449 | unsigned long saved_sp_limit; |
| 450 | struct irq_desc *desc; |
| 451 | |
| 452 | desc = irq_to_desc(irq); |
| 453 | if (!desc) |
| 454 | return; |
| 455 | |
| 456 | /* Switch to the irq stack to handle this */ |
| 457 | curtp = current_thread_info(); |
| 458 | irqtp = hardirq_ctx[smp_processor_id()]; |
| 459 | |
| 460 | if (curtp == irqtp) { |
| 461 | /* We're already on the irq stack, just handle it */ |
| 462 | desc->handle_irq(irq, desc); |
| 463 | return; |
| 464 | } |
| 465 | |
| 466 | saved_sp_limit = current->thread.ksp_limit; |
| 467 | |
| 468 | irqtp->task = curtp->task; |
| 469 | irqtp->flags = 0; |
| 470 | |
| 471 | /* Copy the softirq bits in preempt_count so that the |
| 472 | * softirq checks work in the hardirq context. */ |
| 473 | irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) | |
| 474 | (curtp->preempt_count & SOFTIRQ_MASK); |
| 475 | |
| 476 | current->thread.ksp_limit = (unsigned long)irqtp + |
| 477 | _ALIGN_UP(sizeof(struct thread_info), 16); |
| 478 | |
| 479 | call_handle_irq(irq, desc, irqtp, desc->handle_irq); |
| 480 | current->thread.ksp_limit = saved_sp_limit; |
| 481 | irqtp->task = NULL; |
| 482 | |
| 483 | /* Set any flag that may have been set on the |
| 484 | * alternate stack |
| 485 | */ |
| 486 | if (irqtp->flags) |
| 487 | set_bits(irqtp->flags, &curtp->flags); |
| 488 | } |
| 489 | |
| 490 | static inline void check_stack_overflow(void) |
| 491 | { |
| 492 | #ifdef CONFIG_DEBUG_STACKOVERFLOW |
| 493 | long sp; |
| 494 | |
| 495 | sp = __get_SP() & (THREAD_SIZE-1); |
| 496 | |
| 497 | /* check for stack overflow: is there less than 2KB free? */ |
| 498 | if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { |
| 499 | printk("do_IRQ: stack overflow: %ld\n", |
| 500 | sp - sizeof(struct thread_info)); |
| 501 | dump_stack(); |
| 502 | } |
| 503 | #endif |
| 504 | } |
| 505 | |
| 506 | void do_IRQ(struct pt_regs *regs) |
| 507 | { |
| 508 | struct pt_regs *old_regs = set_irq_regs(regs); |
| 509 | unsigned int irq; |
| 510 | |
| 511 | irq_enter(); |
| 512 | |
| 513 | trace_irq_entry(regs); |
| 514 | |
| 515 | check_stack_overflow(); |
| 516 | |
| 517 | /* |
| 518 | * Query the platform PIC for the interrupt & ack it. |
| 519 | * |
| 520 | * This will typically lower the interrupt line to the CPU |
| 521 | */ |
| 522 | irq = ppc_md.get_irq(); |
| 523 | |
| 524 | /* We can hard enable interrupts now */ |
| 525 | may_hard_irq_enable(); |
| 526 | |
| 527 | /* And finally process it */ |
| 528 | if (irq != NO_IRQ) |
| 529 | handle_one_irq(irq); |
| 530 | else |
| 531 | __get_cpu_var(irq_stat).spurious_irqs++; |
| 532 | |
| 533 | trace_irq_exit(regs); |
| 534 | |
| 535 | irq_exit(); |
| 536 | set_irq_regs(old_regs); |
| 537 | } |
| 538 | |
| 539 | void __init init_IRQ(void) |
| 540 | { |
| 541 | if (ppc_md.init_IRQ) |
| 542 | ppc_md.init_IRQ(); |
| 543 | |
| 544 | exc_lvl_ctx_init(); |
| 545 | |
| 546 | irq_ctx_init(); |
| 547 | } |
| 548 | |
| 549 | #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) |
| 550 | struct thread_info *critirq_ctx[NR_CPUS] __read_mostly; |
| 551 | struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly; |
| 552 | struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly; |
| 553 | |
| 554 | void exc_lvl_ctx_init(void) |
| 555 | { |
| 556 | struct thread_info *tp; |
| 557 | int i, cpu_nr; |
| 558 | |
| 559 | for_each_possible_cpu(i) { |
| 560 | #ifdef CONFIG_PPC64 |
| 561 | cpu_nr = i; |
| 562 | #else |
| 563 | cpu_nr = get_hard_smp_processor_id(i); |
| 564 | #endif |
| 565 | memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE); |
| 566 | tp = critirq_ctx[cpu_nr]; |
| 567 | tp->cpu = cpu_nr; |
| 568 | tp->preempt_count = 0; |
| 569 | |
| 570 | #ifdef CONFIG_BOOKE |
| 571 | memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE); |
| 572 | tp = dbgirq_ctx[cpu_nr]; |
| 573 | tp->cpu = cpu_nr; |
| 574 | tp->preempt_count = 0; |
| 575 | |
| 576 | memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE); |
| 577 | tp = mcheckirq_ctx[cpu_nr]; |
| 578 | tp->cpu = cpu_nr; |
| 579 | tp->preempt_count = HARDIRQ_OFFSET; |
| 580 | #endif |
| 581 | } |
| 582 | } |
| 583 | #endif |
| 584 | |
| 585 | struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; |
| 586 | struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly; |
| 587 | |
| 588 | void irq_ctx_init(void) |
| 589 | { |
| 590 | struct thread_info *tp; |
| 591 | int i; |
| 592 | |
| 593 | for_each_possible_cpu(i) { |
| 594 | memset((void *)softirq_ctx[i], 0, THREAD_SIZE); |
| 595 | tp = softirq_ctx[i]; |
| 596 | tp->cpu = i; |
| 597 | tp->preempt_count = 0; |
| 598 | |
| 599 | memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); |
| 600 | tp = hardirq_ctx[i]; |
| 601 | tp->cpu = i; |
| 602 | tp->preempt_count = HARDIRQ_OFFSET; |
| 603 | } |
| 604 | } |
| 605 | |
| 606 | static inline void do_softirq_onstack(void) |
| 607 | { |
| 608 | struct thread_info *curtp, *irqtp; |
| 609 | unsigned long saved_sp_limit = current->thread.ksp_limit; |
| 610 | |
| 611 | curtp = current_thread_info(); |
| 612 | irqtp = softirq_ctx[smp_processor_id()]; |
| 613 | irqtp->task = curtp->task; |
| 614 | irqtp->flags = 0; |
| 615 | current->thread.ksp_limit = (unsigned long)irqtp + |
| 616 | _ALIGN_UP(sizeof(struct thread_info), 16); |
| 617 | call_do_softirq(irqtp); |
| 618 | current->thread.ksp_limit = saved_sp_limit; |
| 619 | irqtp->task = NULL; |
| 620 | |
| 621 | /* Set any flag that may have been set on the |
| 622 | * alternate stack |
| 623 | */ |
| 624 | if (irqtp->flags) |
| 625 | set_bits(irqtp->flags, &curtp->flags); |
| 626 | } |
| 627 | |
| 628 | void do_softirq(void) |
| 629 | { |
| 630 | unsigned long flags; |
| 631 | |
| 632 | if (in_interrupt()) |
| 633 | return; |
| 634 | |
| 635 | local_irq_save(flags); |
| 636 | |
| 637 | if (local_softirq_pending()) |
| 638 | do_softirq_onstack(); |
| 639 | |
| 640 | local_irq_restore(flags); |
| 641 | } |
| 642 | |
| 643 | irq_hw_number_t virq_to_hw(unsigned int virq) |
| 644 | { |
| 645 | struct irq_data *irq_data = irq_get_irq_data(virq); |
| 646 | return WARN_ON(!irq_data) ? 0 : irq_data->hwirq; |
| 647 | } |
| 648 | EXPORT_SYMBOL_GPL(virq_to_hw); |
| 649 | |
| 650 | #ifdef CONFIG_SMP |
| 651 | int irq_choose_cpu(const struct cpumask *mask) |
| 652 | { |
| 653 | int cpuid; |
| 654 | |
| 655 | if (cpumask_equal(mask, cpu_online_mask)) { |
| 656 | static int irq_rover; |
| 657 | static DEFINE_RAW_SPINLOCK(irq_rover_lock); |
| 658 | unsigned long flags; |
| 659 | |
| 660 | /* Round-robin distribution... */ |
| 661 | do_round_robin: |
| 662 | raw_spin_lock_irqsave(&irq_rover_lock, flags); |
| 663 | |
| 664 | irq_rover = cpumask_next(irq_rover, cpu_online_mask); |
| 665 | if (irq_rover >= nr_cpu_ids) |
| 666 | irq_rover = cpumask_first(cpu_online_mask); |
| 667 | |
| 668 | cpuid = irq_rover; |
| 669 | |
| 670 | raw_spin_unlock_irqrestore(&irq_rover_lock, flags); |
| 671 | } else { |
| 672 | cpuid = cpumask_first_and(mask, cpu_online_mask); |
| 673 | if (cpuid >= nr_cpu_ids) |
| 674 | goto do_round_robin; |
| 675 | } |
| 676 | |
| 677 | return get_hard_smp_processor_id(cpuid); |
| 678 | } |
| 679 | #else |
| 680 | int irq_choose_cpu(const struct cpumask *mask) |
| 681 | { |
| 682 | return hard_smp_processor_id(); |
| 683 | } |
| 684 | #endif |
| 685 | |
| 686 | int arch_early_irq_init(void) |
| 687 | { |
| 688 | return 0; |
| 689 | } |
| 690 | |
| 691 | #ifdef CONFIG_PPC64 |
| 692 | static int __init setup_noirqdistrib(char *str) |
| 693 | { |
| 694 | distribute_irqs = 0; |
| 695 | return 1; |
| 696 | } |
| 697 | |
| 698 | __setup("noirqdistrib", setup_noirqdistrib); |
| 699 | #endif /* CONFIG_PPC64 */ |