[SPARC]: bpp: remove sleep_on usage
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / sparc64 / kernel / irq.c
CommitLineData
1da177e4
LT
1/* $Id: irq.c,v 1.114 2002/01/11 08:45:38 davem Exp $
2 * irq.c: UltraSparc IRQ handling/init/registry.
3 *
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
7 */
8
9#include <linux/config.h>
10#include <linux/module.h>
11#include <linux/sched.h>
12#include <linux/ptrace.h>
13#include <linux/errno.h>
14#include <linux/kernel_stat.h>
15#include <linux/signal.h>
16#include <linux/mm.h>
17#include <linux/interrupt.h>
18#include <linux/slab.h>
19#include <linux/random.h>
20#include <linux/init.h>
21#include <linux/delay.h>
22#include <linux/proc_fs.h>
23#include <linux/seq_file.h>
24
25#include <asm/ptrace.h>
26#include <asm/processor.h>
27#include <asm/atomic.h>
28#include <asm/system.h>
29#include <asm/irq.h>
30#include <asm/sbus.h>
31#include <asm/iommu.h>
32#include <asm/upa.h>
33#include <asm/oplib.h>
34#include <asm/timer.h>
35#include <asm/smp.h>
36#include <asm/starfire.h>
37#include <asm/uaccess.h>
38#include <asm/cache.h>
39#include <asm/cpudata.h>
63b61452 40#include <asm/auxio.h>
1da177e4
LT
41
42#ifdef CONFIG_SMP
43static void distribute_irqs(void);
44#endif
45
46/* UPA nodes send interrupt packet to UltraSparc with first data reg
47 * value low 5 (7 on Starfire) bits holding the IRQ identifier being
48 * delivered. We must translate this into a non-vector IRQ so we can
49 * set the softint on this cpu.
50 *
51 * To make processing these packets efficient and race free we use
52 * an array of irq buckets below. The interrupt vector handler in
53 * entry.S feeds incoming packets into per-cpu pil-indexed lists.
54 * The IVEC handler does not need to act atomically, the PIL dispatch
55 * code uses CAS to get an atomic snapshot of the list and clear it
56 * at the same time.
57 */
58
59struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES)));
60
61/* This has to be in the main kernel image, it cannot be
62 * turned into per-cpu data. The reason is that the main
63 * kernel image is locked into the TLB and this structure
64 * is accessed from the vectored interrupt trap handler. If
65 * access to this structure takes a TLB miss it could cause
66 * the 5-level sparc v9 trap stack to overflow.
67 */
68struct irq_work_struct {
69 unsigned int irq_worklists[16];
70};
71struct irq_work_struct __irq_work[NR_CPUS];
72#define irq_work(__cpu, __pil) &(__irq_work[(__cpu)].irq_worklists[(__pil)])
73
74#ifdef CONFIG_PCI
75/* This is a table of physical addresses used to deal with IBF_DMA_SYNC.
76 * It is used for PCI only to synchronize DMA transfers with IRQ delivery
77 * for devices behind busses other than APB on Sabre systems.
78 *
79 * Currently these physical addresses are just config space accesses
80 * to the command register for that device.
81 */
82unsigned long pci_dma_wsync;
83unsigned long dma_sync_reg_table[256];
84unsigned char dma_sync_reg_table_entry = 0;
85#endif
86
87/* This is based upon code in the 32-bit Sparc kernel written mostly by
88 * David Redman (djhr@tadpole.co.uk).
89 */
90#define MAX_STATIC_ALLOC 4
91static struct irqaction static_irqaction[MAX_STATIC_ALLOC];
92static int static_irq_count;
93
94/* This is exported so that fast IRQ handlers can get at it... -DaveM */
95struct irqaction *irq_action[NR_IRQS+1] = {
96 NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL,
97 NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL
98};
99
100/* This only synchronizes entities which modify IRQ handler
101 * state and some selected user-level spots that want to
102 * read things in the table. IRQ handler processing orders
103 * its' accesses such that no locking is needed.
104 */
105static DEFINE_SPINLOCK(irq_action_lock);
106
107static void register_irq_proc (unsigned int irq);
108
109/*
110 * Upper 2b of irqaction->flags holds the ino.
111 * irqaction->mask holds the smp affinity information.
112 */
113#define put_ino_in_irqaction(action, irq) \
114 action->flags &= 0xffffffffffffUL; \
115 if (__bucket(irq) == &pil0_dummy_bucket) \
116 action->flags |= 0xdeadUL << 48; \
117 else \
118 action->flags |= __irq_ino(irq) << 48;
119#define get_ino_in_irqaction(action) (action->flags >> 48)
120
121#define put_smpaff_in_irqaction(action, smpaff) (action)->mask = (smpaff)
122#define get_smpaff_in_irqaction(action) ((action)->mask)
123
124int show_interrupts(struct seq_file *p, void *v)
125{
126 unsigned long flags;
127 int i = *(loff_t *) v;
128 struct irqaction *action;
129#ifdef CONFIG_SMP
130 int j;
131#endif
132
133 spin_lock_irqsave(&irq_action_lock, flags);
134 if (i <= NR_IRQS) {
135 if (!(action = *(i + irq_action)))
136 goto out_unlock;
137 seq_printf(p, "%3d: ", i);
138#ifndef CONFIG_SMP
139 seq_printf(p, "%10u ", kstat_irqs(i));
140#else
141 for (j = 0; j < NR_CPUS; j++) {
142 if (!cpu_online(j))
143 continue;
144 seq_printf(p, "%10u ",
145 kstat_cpu(j).irqs[i]);
146 }
147#endif
148 seq_printf(p, " %s:%lx", action->name,
149 get_ino_in_irqaction(action));
150 for (action = action->next; action; action = action->next) {
151 seq_printf(p, ", %s:%lx", action->name,
152 get_ino_in_irqaction(action));
153 }
154 seq_putc(p, '\n');
155 }
156out_unlock:
157 spin_unlock_irqrestore(&irq_action_lock, flags);
158
159 return 0;
160}
161
162/* Now these are always passed a true fully specified sun4u INO. */
163void enable_irq(unsigned int irq)
164{
165 struct ino_bucket *bucket = __bucket(irq);
166 unsigned long imap;
167 unsigned long tid;
168
169 imap = bucket->imap;
170 if (imap == 0UL)
171 return;
172
173 preempt_disable();
174
175 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
176 unsigned long ver;
177
178 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
179 if ((ver >> 32) == 0x003e0016) {
180 /* We set it to our JBUS ID. */
181 __asm__ __volatile__("ldxa [%%g0] %1, %0"
182 : "=r" (tid)
183 : "i" (ASI_JBUS_CONFIG));
184 tid = ((tid & (0x1fUL<<17)) << 9);
185 tid &= IMAP_TID_JBUS;
186 } else {
187 /* We set it to our Safari AID. */
188 __asm__ __volatile__("ldxa [%%g0] %1, %0"
189 : "=r" (tid)
190 : "i" (ASI_SAFARI_CONFIG));
191 tid = ((tid & (0x3ffUL<<17)) << 9);
192 tid &= IMAP_AID_SAFARI;
193 }
194 } else if (this_is_starfire == 0) {
195 /* We set it to our UPA MID. */
196 __asm__ __volatile__("ldxa [%%g0] %1, %0"
197 : "=r" (tid)
198 : "i" (ASI_UPA_CONFIG));
199 tid = ((tid & UPA_CONFIG_MID) << 9);
200 tid &= IMAP_TID_UPA;
201 } else {
202 tid = (starfire_translate(imap, smp_processor_id()) << 26);
203 tid &= IMAP_TID_UPA;
204 }
205
206 /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product
207 * of this SYSIO's preconfigured IGN in the SYSIO Control
208 * Register, the hardware just mirrors that value here.
209 * However for Graphics and UPA Slave devices the full
210 * IMAP_INR field can be set by the programmer here.
211 *
212 * Things like FFB can now be handled via the new IRQ mechanism.
213 */
214 upa_writel(tid | IMAP_VALID, imap);
215
216 preempt_enable();
217}
218
219/* This now gets passed true ino's as well. */
220void disable_irq(unsigned int irq)
221{
222 struct ino_bucket *bucket = __bucket(irq);
223 unsigned long imap;
224
225 imap = bucket->imap;
226 if (imap != 0UL) {
227 u32 tmp;
228
229 /* NOTE: We do not want to futz with the IRQ clear registers
230 * and move the state to IDLE, the SCSI code does call
231 * disable_irq() to assure atomicity in the queue cmd
232 * SCSI adapter driver code. Thus we'd lose interrupts.
233 */
234 tmp = upa_readl(imap);
235 tmp &= ~IMAP_VALID;
236 upa_writel(tmp, imap);
237 }
238}
239
240/* The timer is the one "weird" interrupt which is generated by
241 * the CPU %tick register and not by some normal vectored interrupt
242 * source. To handle this special case, we use this dummy INO bucket.
243 */
244static struct ino_bucket pil0_dummy_bucket = {
245 0, /* irq_chain */
246 0, /* pil */
247 0, /* pending */
248 0, /* flags */
249 0, /* __unused */
250 NULL, /* irq_info */
251 0UL, /* iclr */
252 0UL, /* imap */
253};
254
255unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap)
256{
257 struct ino_bucket *bucket;
258 int ino;
259
260 if (pil == 0) {
261 if (iclr != 0UL || imap != 0UL) {
262 prom_printf("Invalid dummy bucket for PIL0 (%lx:%lx)\n",
263 iclr, imap);
264 prom_halt();
265 }
266 return __irq(&pil0_dummy_bucket);
267 }
268
269 /* RULE: Both must be specified in all other cases. */
270 if (iclr == 0UL || imap == 0UL) {
271 prom_printf("Invalid build_irq %d %d %016lx %016lx\n",
272 pil, inofixup, iclr, imap);
273 prom_halt();
274 }
275
276 ino = (upa_readl(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
277 if (ino > NUM_IVECS) {
278 prom_printf("Invalid INO %04x (%d:%d:%016lx:%016lx)\n",
279 ino, pil, inofixup, iclr, imap);
280 prom_halt();
281 }
282
283 /* Ok, looks good, set it up. Don't touch the irq_chain or
284 * the pending flag.
285 */
286 bucket = &ivector_table[ino];
287 if ((bucket->flags & IBF_ACTIVE) ||
288 (bucket->irq_info != NULL)) {
289 /* This is a gross fatal error if it happens here. */
290 prom_printf("IRQ: Trying to reinit INO bucket, fatal error.\n");
291 prom_printf("IRQ: Request INO %04x (%d:%d:%016lx:%016lx)\n",
292 ino, pil, inofixup, iclr, imap);
293 prom_printf("IRQ: Existing (%d:%016lx:%016lx)\n",
294 bucket->pil, bucket->iclr, bucket->imap);
295 prom_printf("IRQ: Cannot continue, halting...\n");
296 prom_halt();
297 }
298 bucket->imap = imap;
299 bucket->iclr = iclr;
300 bucket->pil = pil;
301 bucket->flags = 0;
302
303 bucket->irq_info = NULL;
304
305 return __irq(bucket);
306}
307
308static void atomic_bucket_insert(struct ino_bucket *bucket)
309{
310 unsigned long pstate;
311 unsigned int *ent;
312
313 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
314 __asm__ __volatile__("wrpr %0, %1, %%pstate"
315 : : "r" (pstate), "i" (PSTATE_IE));
316 ent = irq_work(smp_processor_id(), bucket->pil);
317 bucket->irq_chain = *ent;
318 *ent = __irq(bucket);
319 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
320}
321
322int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
323 unsigned long irqflags, const char *name, void *dev_id)
324{
325 struct irqaction *action, *tmp = NULL;
326 struct ino_bucket *bucket = __bucket(irq);
327 unsigned long flags;
328 int pending = 0;
329
330 if ((bucket != &pil0_dummy_bucket) &&
331 (bucket < &ivector_table[0] ||
332 bucket >= &ivector_table[NUM_IVECS])) {
333 unsigned int *caller;
334
335 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
336 printk(KERN_CRIT "request_irq: Old style IRQ registry attempt "
337 "from %p, irq %08x.\n", caller, irq);
338 return -EINVAL;
339 }
340 if (!handler)
341 return -EINVAL;
342
343 if ((bucket != &pil0_dummy_bucket) && (irqflags & SA_SAMPLE_RANDOM)) {
344 /*
345 * This function might sleep, we want to call it first,
346 * outside of the atomic block. In SA_STATIC_ALLOC case,
347 * random driver's kmalloc will fail, but it is safe.
348 * If already initialized, random driver will not reinit.
349 * Yes, this might clear the entropy pool if the wrong
350 * driver is attempted to be loaded, without actually
351 * installing a new handler, but is this really a problem,
352 * only the sysadmin is able to do this.
353 */
354 rand_initialize_irq(irq);
355 }
356
357 spin_lock_irqsave(&irq_action_lock, flags);
358
359 action = *(bucket->pil + irq_action);
360 if (action) {
361 if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ))
362 for (tmp = action; tmp->next; tmp = tmp->next)
363 ;
364 else {
365 spin_unlock_irqrestore(&irq_action_lock, flags);
366 return -EBUSY;
367 }
368 action = NULL; /* Or else! */
369 }
370
371 /* If this is flagged as statically allocated then we use our
372 * private struct which is never freed.
373 */
374 if (irqflags & SA_STATIC_ALLOC) {
375 if (static_irq_count < MAX_STATIC_ALLOC)
376 action = &static_irqaction[static_irq_count++];
377 else
378 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
379 "using kmalloc\n", irq, name);
380 }
381 if (action == NULL)
382 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
383 GFP_ATOMIC);
384
385 if (!action) {
386 spin_unlock_irqrestore(&irq_action_lock, flags);
387 return -ENOMEM;
388 }
389
390 if (bucket == &pil0_dummy_bucket) {
391 bucket->irq_info = action;
392 bucket->flags |= IBF_ACTIVE;
393 } else {
394 if ((bucket->flags & IBF_ACTIVE) != 0) {
395 void *orig = bucket->irq_info;
396 void **vector = NULL;
397
398 if ((bucket->flags & IBF_PCI) == 0) {
399 printk("IRQ: Trying to share non-PCI bucket.\n");
400 goto free_and_ebusy;
401 }
402 if ((bucket->flags & IBF_MULTI) == 0) {
403 vector = kmalloc(sizeof(void *) * 4, GFP_ATOMIC);
404 if (vector == NULL)
405 goto free_and_enomem;
406
407 /* We might have slept. */
408 if ((bucket->flags & IBF_MULTI) != 0) {
409 int ent;
410
411 kfree(vector);
412 vector = (void **)bucket->irq_info;
413 for(ent = 0; ent < 4; ent++) {
414 if (vector[ent] == NULL) {
415 vector[ent] = action;
416 break;
417 }
418 }
419 if (ent == 4)
420 goto free_and_ebusy;
421 } else {
422 vector[0] = orig;
423 vector[1] = action;
424 vector[2] = NULL;
425 vector[3] = NULL;
426 bucket->irq_info = vector;
427 bucket->flags |= IBF_MULTI;
428 }
429 } else {
430 int ent;
431
432 vector = (void **)orig;
433 for (ent = 0; ent < 4; ent++) {
434 if (vector[ent] == NULL) {
435 vector[ent] = action;
436 break;
437 }
438 }
439 if (ent == 4)
440 goto free_and_ebusy;
441 }
442 } else {
443 bucket->irq_info = action;
444 bucket->flags |= IBF_ACTIVE;
445 }
446 pending = bucket->pending;
447 if (pending)
448 bucket->pending = 0;
449 }
450
451 action->handler = handler;
452 action->flags = irqflags;
453 action->name = name;
454 action->next = NULL;
455 action->dev_id = dev_id;
456 put_ino_in_irqaction(action, irq);
457 put_smpaff_in_irqaction(action, CPU_MASK_NONE);
458
459 if (tmp)
460 tmp->next = action;
461 else
462 *(bucket->pil + irq_action) = action;
463
464 enable_irq(irq);
465
466 /* We ate the IVEC already, this makes sure it does not get lost. */
467 if (pending) {
468 atomic_bucket_insert(bucket);
469 set_softint(1 << bucket->pil);
470 }
471 spin_unlock_irqrestore(&irq_action_lock, flags);
472 if ((bucket != &pil0_dummy_bucket) && (!(irqflags & SA_STATIC_ALLOC)))
473 register_irq_proc(__irq_ino(irq));
474
475#ifdef CONFIG_SMP
476 distribute_irqs();
477#endif
478 return 0;
479
480free_and_ebusy:
481 kfree(action);
482 spin_unlock_irqrestore(&irq_action_lock, flags);
483 return -EBUSY;
484
485free_and_enomem:
486 kfree(action);
487 spin_unlock_irqrestore(&irq_action_lock, flags);
488 return -ENOMEM;
489}
490
491EXPORT_SYMBOL(request_irq);
492
493void free_irq(unsigned int irq, void *dev_id)
494{
495 struct irqaction *action;
496 struct irqaction *tmp = NULL;
497 unsigned long flags;
498 struct ino_bucket *bucket = __bucket(irq), *bp;
499
500 if ((bucket != &pil0_dummy_bucket) &&
501 (bucket < &ivector_table[0] ||
502 bucket >= &ivector_table[NUM_IVECS])) {
503 unsigned int *caller;
504
505 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
506 printk(KERN_CRIT "free_irq: Old style IRQ removal attempt "
507 "from %p, irq %08x.\n", caller, irq);
508 return;
509 }
510
511 spin_lock_irqsave(&irq_action_lock, flags);
512
513 action = *(bucket->pil + irq_action);
514 if (!action->handler) {
515 printk("Freeing free IRQ %d\n", bucket->pil);
516 return;
517 }
518 if (dev_id) {
519 for ( ; action; action = action->next) {
520 if (action->dev_id == dev_id)
521 break;
522 tmp = action;
523 }
524 if (!action) {
525 printk("Trying to free free shared IRQ %d\n", bucket->pil);
526 spin_unlock_irqrestore(&irq_action_lock, flags);
527 return;
528 }
529 } else if (action->flags & SA_SHIRQ) {
530 printk("Trying to free shared IRQ %d with NULL device ID\n", bucket->pil);
531 spin_unlock_irqrestore(&irq_action_lock, flags);
532 return;
533 }
534
535 if (action->flags & SA_STATIC_ALLOC) {
536 printk("Attempt to free statically allocated IRQ %d (%s)\n",
537 bucket->pil, action->name);
538 spin_unlock_irqrestore(&irq_action_lock, flags);
539 return;
540 }
541
542 if (action && tmp)
543 tmp->next = action->next;
544 else
545 *(bucket->pil + irq_action) = action->next;
546
547 spin_unlock_irqrestore(&irq_action_lock, flags);
548
549 synchronize_irq(irq);
550
551 spin_lock_irqsave(&irq_action_lock, flags);
552
553 if (bucket != &pil0_dummy_bucket) {
554 unsigned long imap = bucket->imap;
555 void **vector, *orig;
556 int ent;
557
558 orig = bucket->irq_info;
559 vector = (void **)orig;
560
561 if ((bucket->flags & IBF_MULTI) != 0) {
562 int other = 0;
563 void *orphan = NULL;
564 for (ent = 0; ent < 4; ent++) {
565 if (vector[ent] == action)
566 vector[ent] = NULL;
567 else if (vector[ent] != NULL) {
568 orphan = vector[ent];
569 other++;
570 }
571 }
572
573 /* Only free when no other shared irq
574 * uses this bucket.
575 */
576 if (other) {
577 if (other == 1) {
578 /* Convert back to non-shared bucket. */
579 bucket->irq_info = orphan;
580 bucket->flags &= ~(IBF_MULTI);
581 kfree(vector);
582 }
583 goto out;
584 }
585 } else {
586 bucket->irq_info = NULL;
587 }
588
589 /* This unique interrupt source is now inactive. */
590 bucket->flags &= ~IBF_ACTIVE;
591
592 /* See if any other buckets share this bucket's IMAP
593 * and are still active.
594 */
595 for (ent = 0; ent < NUM_IVECS; ent++) {
596 bp = &ivector_table[ent];
597 if (bp != bucket &&
598 bp->imap == imap &&
599 (bp->flags & IBF_ACTIVE) != 0)
600 break;
601 }
602
603 /* Only disable when no other sub-irq levels of
604 * the same IMAP are active.
605 */
606 if (ent == NUM_IVECS)
607 disable_irq(irq);
608 }
609
610out:
611 kfree(action);
612 spin_unlock_irqrestore(&irq_action_lock, flags);
613}
614
615EXPORT_SYMBOL(free_irq);
616
617#ifdef CONFIG_SMP
618void synchronize_irq(unsigned int irq)
619{
620 struct ino_bucket *bucket = __bucket(irq);
621
622#if 0
623 /* The following is how I wish I could implement this.
624 * Unfortunately the ICLR registers are read-only, you can
625 * only write ICLR_foo values to them. To get the current
626 * IRQ status you would need to get at the IRQ diag registers
627 * in the PCI/SBUS controller and the layout of those vary
628 * from one controller to the next, sigh... -DaveM
629 */
630 unsigned long iclr = bucket->iclr;
631
632 while (1) {
633 u32 tmp = upa_readl(iclr);
634
635 if (tmp == ICLR_TRANSMIT ||
636 tmp == ICLR_PENDING) {
637 cpu_relax();
638 continue;
639 }
640 break;
641 }
642#else
643 /* So we have to do this with a INPROGRESS bit just like x86. */
644 while (bucket->flags & IBF_INPROGRESS)
645 cpu_relax();
646#endif
647}
648#endif /* CONFIG_SMP */
649
650void catch_disabled_ivec(struct pt_regs *regs)
651{
652 int cpu = smp_processor_id();
653 struct ino_bucket *bucket = __bucket(*irq_work(cpu, 0));
654
655 /* We can actually see this on Ultra/PCI PCI cards, which are bridges
656 * to other devices. Here a single IMAP enabled potentially multiple
657 * unique interrupt sources (which each do have a unique ICLR register.
658 *
659 * So what we do is just register that the IVEC arrived, when registered
660 * for real the request_irq() code will check the bit and signal
661 * a local CPU interrupt for it.
662 */
663#if 0
664 printk("IVEC: Spurious interrupt vector (%x) received at (%016lx)\n",
665 bucket - &ivector_table[0], regs->tpc);
666#endif
667 *irq_work(cpu, 0) = 0;
668 bucket->pending = 1;
669}
670
671/* Tune this... */
672#define FORWARD_VOLUME 12
673
674#ifdef CONFIG_SMP
675
676static inline void redirect_intr(int cpu, struct ino_bucket *bp)
677{
678 /* Ok, here is what is going on:
679 * 1) Retargeting IRQs on Starfire is very
680 * expensive so just forget about it on them.
681 * 2) Moving around very high priority interrupts
682 * is a losing game.
683 * 3) If the current cpu is idle, interrupts are
684 * useful work, so keep them here. But do not
685 * pass to our neighbour if he is not very idle.
686 * 4) If sysadmin explicitly asks for directed intrs,
687 * Just Do It.
688 */
689 struct irqaction *ap = bp->irq_info;
690 cpumask_t cpu_mask;
691 unsigned int buddy, ticks;
692
693 cpu_mask = get_smpaff_in_irqaction(ap);
694 cpus_and(cpu_mask, cpu_mask, cpu_online_map);
695 if (cpus_empty(cpu_mask))
696 cpu_mask = cpu_online_map;
697
698 if (this_is_starfire != 0 ||
699 bp->pil >= 10 || current->pid == 0)
700 goto out;
701
702 /* 'cpu' is the MID (ie. UPAID), calculate the MID
703 * of our buddy.
704 */
705 buddy = cpu + 1;
706 if (buddy >= NR_CPUS)
707 buddy = 0;
708
709 ticks = 0;
710 while (!cpu_isset(buddy, cpu_mask)) {
711 if (++buddy >= NR_CPUS)
712 buddy = 0;
713 if (++ticks > NR_CPUS) {
714 put_smpaff_in_irqaction(ap, CPU_MASK_NONE);
715 goto out;
716 }
717 }
718
719 if (buddy == cpu)
720 goto out;
721
722 /* Voo-doo programming. */
723 if (cpu_data(buddy).idle_volume < FORWARD_VOLUME)
724 goto out;
725
726 /* This just so happens to be correct on Cheetah
727 * at the moment.
728 */
729 buddy <<= 26;
730
731 /* Push it to our buddy. */
732 upa_writel(buddy | IMAP_VALID, bp->imap);
733
734out:
735 return;
736}
737
738#endif
739
740void handler_irq(int irq, struct pt_regs *regs)
741{
742 struct ino_bucket *bp, *nbp;
743 int cpu = smp_processor_id();
744
745#ifndef CONFIG_SMP
746 /*
747 * Check for TICK_INT on level 14 softint.
748 */
749 {
750 unsigned long clr_mask = 1 << irq;
751 unsigned long tick_mask = tick_ops->softint_mask;
752
753 if ((irq == 14) && (get_softint() & tick_mask)) {
754 irq = 0;
755 clr_mask = tick_mask;
756 }
757 clear_softint(clr_mask);
758 }
759#else
41832a08 760 int should_forward = 0;
1da177e4
LT
761
762 clear_softint(1 << irq);
763#endif
764
765 irq_enter();
766 kstat_this_cpu.irqs[irq]++;
767
768 /* Sliiiick... */
769#ifndef CONFIG_SMP
770 bp = ((irq != 0) ?
771 __bucket(xchg32(irq_work(cpu, irq), 0)) :
772 &pil0_dummy_bucket);
773#else
774 bp = __bucket(xchg32(irq_work(cpu, irq), 0));
775#endif
776 for ( ; bp != NULL; bp = nbp) {
777 unsigned char flags = bp->flags;
778 unsigned char random = 0;
779
780 nbp = __bucket(bp->irq_chain);
781 bp->irq_chain = 0;
782
783 bp->flags |= IBF_INPROGRESS;
784
785 if ((flags & IBF_ACTIVE) != 0) {
786#ifdef CONFIG_PCI
787 if ((flags & IBF_DMA_SYNC) != 0) {
788 upa_readl(dma_sync_reg_table[bp->synctab_ent]);
789 upa_readq(pci_dma_wsync);
790 }
791#endif
792 if ((flags & IBF_MULTI) == 0) {
793 struct irqaction *ap = bp->irq_info;
794 int ret;
795
796 ret = ap->handler(__irq(bp), ap->dev_id, regs);
797 if (ret == IRQ_HANDLED)
798 random |= ap->flags;
799 } else {
800 void **vector = (void **)bp->irq_info;
801 int ent;
802 for (ent = 0; ent < 4; ent++) {
803 struct irqaction *ap = vector[ent];
804 if (ap != NULL) {
805 int ret;
806
807 ret = ap->handler(__irq(bp),
808 ap->dev_id,
809 regs);
810 if (ret == IRQ_HANDLED)
811 random |= ap->flags;
812 }
813 }
814 }
815 /* Only the dummy bucket lacks IMAP/ICLR. */
816 if (bp->pil != 0) {
817#ifdef CONFIG_SMP
818 if (should_forward) {
819 redirect_intr(cpu, bp);
820 should_forward = 0;
821 }
822#endif
823 upa_writel(ICLR_IDLE, bp->iclr);
824
825 /* Test and add entropy */
826 if (random & SA_SAMPLE_RANDOM)
827 add_interrupt_randomness(irq);
828 }
829 } else
830 bp->pending = 1;
831
832 bp->flags &= ~IBF_INPROGRESS;
833 }
834 irq_exit();
835}
836
837#ifdef CONFIG_BLK_DEV_FD
63b61452 838extern irqreturn_t floppy_interrupt(int, void *, struct pt_regs *);;
1da177e4 839
63b61452
DM
840/* XXX No easy way to include asm/floppy.h XXX */
841extern unsigned char *pdma_vaddr;
842extern unsigned long pdma_size;
843extern volatile int doing_pdma;
844extern unsigned long fdc_status;
1da177e4 845
63b61452 846irqreturn_t sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs)
1da177e4 847{
63b61452
DM
848 if (likely(doing_pdma)) {
849 void __iomem *stat = (void __iomem *) fdc_status;
850 unsigned char *vaddr = pdma_vaddr;
851 unsigned long size = pdma_size;
852 u8 val;
853
854 while (size) {
855 val = readb(stat);
856 if (unlikely(!(val & 0x80))) {
857 pdma_vaddr = vaddr;
858 pdma_size = size;
859 return IRQ_HANDLED;
860 }
861 if (unlikely(!(val & 0x20))) {
862 pdma_vaddr = vaddr;
863 pdma_size = size;
864 doing_pdma = 0;
865 goto main_interrupt;
866 }
867 if (val & 0x40) {
868 /* read */
869 *vaddr++ = readb(stat + 1);
870 } else {
871 unsigned char data = *vaddr++;
1da177e4 872
63b61452
DM
873 /* write */
874 writeb(data, stat + 1);
875 }
876 size--;
877 }
1da177e4 878
63b61452
DM
879 pdma_vaddr = vaddr;
880 pdma_size = size;
1da177e4 881
63b61452
DM
882 /* Send Terminal Count pulse to floppy controller. */
883 val = readb(auxio_register);
884 val |= AUXIO_AUX1_FTCNT;
885 writeb(val, auxio_register);
886 val &= AUXIO_AUX1_FTCNT;
887 writeb(val, auxio_register);
1da177e4 888
63b61452 889 doing_pdma = 0;
1da177e4 890 }
1da177e4 891
63b61452
DM
892main_interrupt:
893 return floppy_interrupt(irq, dev_cookie, regs);
1da177e4 894}
63b61452
DM
895EXPORT_SYMBOL(sparc_floppy_irq);
896#endif
1da177e4
LT
897
898/* We really don't need these at all on the Sparc. We only have
899 * stubs here because they are exported to modules.
900 */
901unsigned long probe_irq_on(void)
902{
903 return 0;
904}
905
906EXPORT_SYMBOL(probe_irq_on);
907
908int probe_irq_off(unsigned long mask)
909{
910 return 0;
911}
912
913EXPORT_SYMBOL(probe_irq_off);
914
915#ifdef CONFIG_SMP
916static int retarget_one_irq(struct irqaction *p, int goal_cpu)
917{
918 struct ino_bucket *bucket = get_ino_in_irqaction(p) + ivector_table;
919 unsigned long imap = bucket->imap;
920 unsigned int tid;
921
922 while (!cpu_online(goal_cpu)) {
923 if (++goal_cpu >= NR_CPUS)
924 goal_cpu = 0;
925 }
926
927 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
928 tid = goal_cpu << 26;
929 tid &= IMAP_AID_SAFARI;
930 } else if (this_is_starfire == 0) {
931 tid = goal_cpu << 26;
932 tid &= IMAP_TID_UPA;
933 } else {
934 tid = (starfire_translate(imap, goal_cpu) << 26);
935 tid &= IMAP_TID_UPA;
936 }
937 upa_writel(tid | IMAP_VALID, imap);
938
cee2824f 939 do {
1da177e4
LT
940 if (++goal_cpu >= NR_CPUS)
941 goal_cpu = 0;
cee2824f 942 } while (!cpu_online(goal_cpu));
1da177e4
LT
943
944 return goal_cpu;
945}
946
947/* Called from request_irq. */
948static void distribute_irqs(void)
949{
950 unsigned long flags;
951 int cpu, level;
952
953 spin_lock_irqsave(&irq_action_lock, flags);
954 cpu = 0;
955
956 /*
957 * Skip the timer at [0], and very rare error/power intrs at [15].
958 * Also level [12], it causes problems on Ex000 systems.
959 */
960 for (level = 1; level < NR_IRQS; level++) {
961 struct irqaction *p = irq_action[level];
962 if (level == 12) continue;
963 while(p) {
964 cpu = retarget_one_irq(p, cpu);
965 p = p->next;
966 }
967 }
968 spin_unlock_irqrestore(&irq_action_lock, flags);
969}
970#endif
971
972
973struct sun5_timer *prom_timers;
974static u64 prom_limit0, prom_limit1;
975
976static void map_prom_timers(void)
977{
978 unsigned int addr[3];
979 int tnode, err;
980
981 /* PROM timer node hangs out in the top level of device siblings... */
982 tnode = prom_finddevice("/counter-timer");
983
984 /* Assume if node is not present, PROM uses different tick mechanism
985 * which we should not care about.
986 */
987 if (tnode == 0 || tnode == -1) {
988 prom_timers = (struct sun5_timer *) 0;
989 return;
990 }
991
992 /* If PROM is really using this, it must be mapped by him. */
993 err = prom_getproperty(tnode, "address", (char *)addr, sizeof(addr));
994 if (err == -1) {
995 prom_printf("PROM does not have timer mapped, trying to continue.\n");
996 prom_timers = (struct sun5_timer *) 0;
997 return;
998 }
999 prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
1000}
1001
1002static void kill_prom_timer(void)
1003{
1004 if (!prom_timers)
1005 return;
1006
1007 /* Save them away for later. */
1008 prom_limit0 = prom_timers->limit0;
1009 prom_limit1 = prom_timers->limit1;
1010
1011 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
1012 * We turn both off here just to be paranoid.
1013 */
1014 prom_timers->limit0 = 0;
1015 prom_timers->limit1 = 0;
1016
1017 /* Wheee, eat the interrupt packet too... */
1018 __asm__ __volatile__(
1019" mov 0x40, %%g2\n"
1020" ldxa [%%g0] %0, %%g1\n"
1021" ldxa [%%g2] %1, %%g1\n"
1022" stxa %%g0, [%%g0] %0\n"
1023" membar #Sync\n"
1024 : /* no outputs */
1025 : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
1026 : "g1", "g2");
1027}
1028
1029void enable_prom_timer(void)
1030{
1031 if (!prom_timers)
1032 return;
1033
1034 /* Set it to whatever was there before. */
1035 prom_timers->limit1 = prom_limit1;
1036 prom_timers->count1 = 0;
1037 prom_timers->limit0 = prom_limit0;
1038 prom_timers->count0 = 0;
1039}
1040
1041void init_irqwork_curcpu(void)
1042{
1043 register struct irq_work_struct *workp asm("o2");
1044 register unsigned long tmp asm("o3");
1045 int cpu = hard_smp_processor_id();
1046
1047 memset(__irq_work + cpu, 0, sizeof(*workp));
1048
1049 /* Make sure we are called with PSTATE_IE disabled. */
1050 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
1051 : "=r" (tmp));
1052 if (tmp & PSTATE_IE) {
1053 prom_printf("BUG: init_irqwork_curcpu() called with "
1054 "PSTATE_IE enabled, bailing.\n");
1055 __asm__ __volatile__("mov %%i7, %0\n\t"
1056 : "=r" (tmp));
1057 prom_printf("BUG: Called from %lx\n", tmp);
1058 prom_halt();
1059 }
1060
1061 /* Set interrupt globals. */
1062 workp = &__irq_work[cpu];
1063 __asm__ __volatile__(
1064 "rdpr %%pstate, %0\n\t"
1065 "wrpr %0, %1, %%pstate\n\t"
1066 "mov %2, %%g6\n\t"
1067 "wrpr %0, 0x0, %%pstate\n\t"
1068 : "=&r" (tmp)
1069 : "i" (PSTATE_IG), "r" (workp));
1070}
1071
1072/* Only invoked on boot processor. */
1073void __init init_IRQ(void)
1074{
1075 map_prom_timers();
1076 kill_prom_timer();
1077 memset(&ivector_table[0], 0, sizeof(ivector_table));
1078
1079 /* We need to clear any IRQ's pending in the soft interrupt
1080 * registers, a spurious one could be left around from the
1081 * PROM timer which we just disabled.
1082 */
1083 clear_softint(get_softint());
1084
1085 /* Now that ivector table is initialized, it is safe
1086 * to receive IRQ vector traps. We will normally take
1087 * one or two right now, in case some device PROM used
1088 * to boot us wants to speak to us. We just ignore them.
1089 */
1090 __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
1091 "or %%g1, %0, %%g1\n\t"
1092 "wrpr %%g1, 0x0, %%pstate"
1093 : /* No outputs */
1094 : "i" (PSTATE_IE)
1095 : "g1");
1096}
1097
1098static struct proc_dir_entry * root_irq_dir;
1099static struct proc_dir_entry * irq_dir [NUM_IVECS];
1100
1101#ifdef CONFIG_SMP
1102
1103static int irq_affinity_read_proc (char *page, char **start, off_t off,
1104 int count, int *eof, void *data)
1105{
1106 struct ino_bucket *bp = ivector_table + (long)data;
1107 struct irqaction *ap = bp->irq_info;
1108 cpumask_t mask;
1109 int len;
1110
1111 mask = get_smpaff_in_irqaction(ap);
1112 if (cpus_empty(mask))
1113 mask = cpu_online_map;
1114
1115 len = cpumask_scnprintf(page, count, mask);
1116 if (count - len < 2)
1117 return -EINVAL;
1118 len += sprintf(page + len, "\n");
1119 return len;
1120}
1121
1122static inline void set_intr_affinity(int irq, cpumask_t hw_aff)
1123{
1124 struct ino_bucket *bp = ivector_table + irq;
1125
1126 /* Users specify affinity in terms of hw cpu ids.
1127 * As soon as we do this, handler_irq() might see and take action.
1128 */
1129 put_smpaff_in_irqaction((struct irqaction *)bp->irq_info, hw_aff);
1130
1131 /* Migration is simply done by the next cpu to service this
1132 * interrupt.
1133 */
1134}
1135
1136static int irq_affinity_write_proc (struct file *file, const char __user *buffer,
1137 unsigned long count, void *data)
1138{
1139 int irq = (long) data, full_count = count, err;
1140 cpumask_t new_value;
1141
1142 err = cpumask_parse(buffer, count, new_value);
1143
1144 /*
1145 * Do not allow disabling IRQs completely - it's a too easy
1146 * way to make the system unusable accidentally :-) At least
1147 * one online CPU still has to be targeted.
1148 */
1149 cpus_and(new_value, new_value, cpu_online_map);
1150 if (cpus_empty(new_value))
1151 return -EINVAL;
1152
1153 set_intr_affinity(irq, new_value);
1154
1155 return full_count;
1156}
1157
1158#endif
1159
1160#define MAX_NAMELEN 10
1161
1162static void register_irq_proc (unsigned int irq)
1163{
1164 char name [MAX_NAMELEN];
1165
1166 if (!root_irq_dir || irq_dir[irq])
1167 return;
1168
1169 memset(name, 0, MAX_NAMELEN);
1170 sprintf(name, "%x", irq);
1171
1172 /* create /proc/irq/1234 */
1173 irq_dir[irq] = proc_mkdir(name, root_irq_dir);
1174
1175#ifdef CONFIG_SMP
1176 /* XXX SMP affinity not supported on starfire yet. */
1177 if (this_is_starfire == 0) {
1178 struct proc_dir_entry *entry;
1179
1180 /* create /proc/irq/1234/smp_affinity */
1181 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
1182
1183 if (entry) {
1184 entry->nlink = 1;
1185 entry->data = (void *)(long)irq;
1186 entry->read_proc = irq_affinity_read_proc;
1187 entry->write_proc = irq_affinity_write_proc;
1188 }
1189 }
1190#endif
1191}
1192
1193void init_irq_proc (void)
1194{
1195 /* create /proc/irq */
1196 root_irq_dir = proc_mkdir("irq", NULL);
1197}
1198