i386: move xen
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / i386 / kernel / i8259_32.c
1 #include <linux/errno.h>
2 #include <linux/signal.h>
3 #include <linux/sched.h>
4 #include <linux/ioport.h>
5 #include <linux/interrupt.h>
6 #include <linux/slab.h>
7 #include <linux/random.h>
8 #include <linux/init.h>
9 #include <linux/kernel_stat.h>
10 #include <linux/sysdev.h>
11 #include <linux/bitops.h>
12
13 #include <asm/8253pit.h>
14 #include <asm/atomic.h>
15 #include <asm/system.h>
16 #include <asm/io.h>
17 #include <asm/timer.h>
18 #include <asm/pgtable.h>
19 #include <asm/delay.h>
20 #include <asm/desc.h>
21 #include <asm/apic.h>
22 #include <asm/arch_hooks.h>
23 #include <asm/i8259.h>
24
25 #include <io_ports.h>
26
27 /*
28 * This is the 'legacy' 8259A Programmable Interrupt Controller,
29 * present in the majority of PC/AT boxes.
30 * plus some generic x86 specific things if generic specifics makes
31 * any sense at all.
32 * this file should become arch/i386/kernel/irq.c when the old irq.c
33 * moves to arch independent land
34 */
35
36 static int i8259A_auto_eoi;
37 DEFINE_SPINLOCK(i8259A_lock);
38 static void mask_and_ack_8259A(unsigned int);
39
40 static struct irq_chip i8259A_chip = {
41 .name = "XT-PIC",
42 .mask = disable_8259A_irq,
43 .disable = disable_8259A_irq,
44 .unmask = enable_8259A_irq,
45 .mask_ack = mask_and_ack_8259A,
46 };
47
48 /*
49 * 8259A PIC functions to handle ISA devices:
50 */
51
52 /*
53 * This contains the irq mask for both 8259A irq controllers,
54 */
55 unsigned int cached_irq_mask = 0xffff;
56
57 /*
58 * Not all IRQs can be routed through the IO-APIC, eg. on certain (older)
59 * boards the timer interrupt is not really connected to any IO-APIC pin,
60 * it's fed to the master 8259A's IR0 line only.
61 *
62 * Any '1' bit in this mask means the IRQ is routed through the IO-APIC.
63 * this 'mixed mode' IRQ handling costs nothing because it's only used
64 * at IRQ setup time.
65 */
66 unsigned long io_apic_irqs;
67
68 void disable_8259A_irq(unsigned int irq)
69 {
70 unsigned int mask = 1 << irq;
71 unsigned long flags;
72
73 spin_lock_irqsave(&i8259A_lock, flags);
74 cached_irq_mask |= mask;
75 if (irq & 8)
76 outb(cached_slave_mask, PIC_SLAVE_IMR);
77 else
78 outb(cached_master_mask, PIC_MASTER_IMR);
79 spin_unlock_irqrestore(&i8259A_lock, flags);
80 }
81
82 void enable_8259A_irq(unsigned int irq)
83 {
84 unsigned int mask = ~(1 << irq);
85 unsigned long flags;
86
87 spin_lock_irqsave(&i8259A_lock, flags);
88 cached_irq_mask &= mask;
89 if (irq & 8)
90 outb(cached_slave_mask, PIC_SLAVE_IMR);
91 else
92 outb(cached_master_mask, PIC_MASTER_IMR);
93 spin_unlock_irqrestore(&i8259A_lock, flags);
94 }
95
96 int i8259A_irq_pending(unsigned int irq)
97 {
98 unsigned int mask = 1<<irq;
99 unsigned long flags;
100 int ret;
101
102 spin_lock_irqsave(&i8259A_lock, flags);
103 if (irq < 8)
104 ret = inb(PIC_MASTER_CMD) & mask;
105 else
106 ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
107 spin_unlock_irqrestore(&i8259A_lock, flags);
108
109 return ret;
110 }
111
112 void make_8259A_irq(unsigned int irq)
113 {
114 disable_irq_nosync(irq);
115 io_apic_irqs &= ~(1<<irq);
116 set_irq_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
117 "XT");
118 enable_irq(irq);
119 }
120
121 /*
122 * This function assumes to be called rarely. Switching between
123 * 8259A registers is slow.
124 * This has to be protected by the irq controller spinlock
125 * before being called.
126 */
127 static inline int i8259A_irq_real(unsigned int irq)
128 {
129 int value;
130 int irqmask = 1<<irq;
131
132 if (irq < 8) {
133 outb(0x0B,PIC_MASTER_CMD); /* ISR register */
134 value = inb(PIC_MASTER_CMD) & irqmask;
135 outb(0x0A,PIC_MASTER_CMD); /* back to the IRR register */
136 return value;
137 }
138 outb(0x0B,PIC_SLAVE_CMD); /* ISR register */
139 value = inb(PIC_SLAVE_CMD) & (irqmask >> 8);
140 outb(0x0A,PIC_SLAVE_CMD); /* back to the IRR register */
141 return value;
142 }
143
144 /*
145 * Careful! The 8259A is a fragile beast, it pretty
146 * much _has_ to be done exactly like this (mask it
147 * first, _then_ send the EOI, and the order of EOI
148 * to the two 8259s is important!
149 */
150 static void mask_and_ack_8259A(unsigned int irq)
151 {
152 unsigned int irqmask = 1 << irq;
153 unsigned long flags;
154
155 spin_lock_irqsave(&i8259A_lock, flags);
156 /*
157 * Lightweight spurious IRQ detection. We do not want
158 * to overdo spurious IRQ handling - it's usually a sign
159 * of hardware problems, so we only do the checks we can
160 * do without slowing down good hardware unnecessarily.
161 *
162 * Note that IRQ7 and IRQ15 (the two spurious IRQs
163 * usually resulting from the 8259A-1|2 PICs) occur
164 * even if the IRQ is masked in the 8259A. Thus we
165 * can check spurious 8259A IRQs without doing the
166 * quite slow i8259A_irq_real() call for every IRQ.
167 * This does not cover 100% of spurious interrupts,
168 * but should be enough to warn the user that there
169 * is something bad going on ...
170 */
171 if (cached_irq_mask & irqmask)
172 goto spurious_8259A_irq;
173 cached_irq_mask |= irqmask;
174
175 handle_real_irq:
176 if (irq & 8) {
177 inb(PIC_SLAVE_IMR); /* DUMMY - (do we need this?) */
178 outb(cached_slave_mask, PIC_SLAVE_IMR);
179 outb(0x60+(irq&7),PIC_SLAVE_CMD);/* 'Specific EOI' to slave */
180 outb(0x60+PIC_CASCADE_IR,PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */
181 } else {
182 inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */
183 outb(cached_master_mask, PIC_MASTER_IMR);
184 outb(0x60+irq,PIC_MASTER_CMD); /* 'Specific EOI to master */
185 }
186 spin_unlock_irqrestore(&i8259A_lock, flags);
187 return;
188
189 spurious_8259A_irq:
190 /*
191 * this is the slow path - should happen rarely.
192 */
193 if (i8259A_irq_real(irq))
194 /*
195 * oops, the IRQ _is_ in service according to the
196 * 8259A - not spurious, go handle it.
197 */
198 goto handle_real_irq;
199
200 {
201 static int spurious_irq_mask;
202 /*
203 * At this point we can be sure the IRQ is spurious,
204 * lets ACK and report it. [once per IRQ]
205 */
206 if (!(spurious_irq_mask & irqmask)) {
207 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
208 spurious_irq_mask |= irqmask;
209 }
210 atomic_inc(&irq_err_count);
211 /*
212 * Theoretically we do not have to handle this IRQ,
213 * but in Linux this does not cause problems and is
214 * simpler for us.
215 */
216 goto handle_real_irq;
217 }
218 }
219
220 static char irq_trigger[2];
221 /**
222 * ELCR registers (0x4d0, 0x4d1) control edge/level of IRQ
223 */
224 static void restore_ELCR(char *trigger)
225 {
226 outb(trigger[0], 0x4d0);
227 outb(trigger[1], 0x4d1);
228 }
229
230 static void save_ELCR(char *trigger)
231 {
232 /* IRQ 0,1,2,8,13 are marked as reserved */
233 trigger[0] = inb(0x4d0) & 0xF8;
234 trigger[1] = inb(0x4d1) & 0xDE;
235 }
236
237 static int i8259A_resume(struct sys_device *dev)
238 {
239 init_8259A(i8259A_auto_eoi);
240 restore_ELCR(irq_trigger);
241 return 0;
242 }
243
244 static int i8259A_suspend(struct sys_device *dev, pm_message_t state)
245 {
246 save_ELCR(irq_trigger);
247 return 0;
248 }
249
250 static int i8259A_shutdown(struct sys_device *dev)
251 {
252 /* Put the i8259A into a quiescent state that
253 * the kernel initialization code can get it
254 * out of.
255 */
256 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
257 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */
258 return 0;
259 }
260
261 static struct sysdev_class i8259_sysdev_class = {
262 set_kset_name("i8259"),
263 .suspend = i8259A_suspend,
264 .resume = i8259A_resume,
265 .shutdown = i8259A_shutdown,
266 };
267
268 static struct sys_device device_i8259A = {
269 .id = 0,
270 .cls = &i8259_sysdev_class,
271 };
272
273 static int __init i8259A_init_sysfs(void)
274 {
275 int error = sysdev_class_register(&i8259_sysdev_class);
276 if (!error)
277 error = sysdev_register(&device_i8259A);
278 return error;
279 }
280
281 device_initcall(i8259A_init_sysfs);
282
283 void init_8259A(int auto_eoi)
284 {
285 unsigned long flags;
286
287 i8259A_auto_eoi = auto_eoi;
288
289 spin_lock_irqsave(&i8259A_lock, flags);
290
291 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
292 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
293
294 /*
295 * outb_p - this has to work on a wide range of PC hardware.
296 */
297 outb_p(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */
298 outb_p(0x20 + 0, PIC_MASTER_IMR); /* ICW2: 8259A-1 IR0-7 mapped to 0x20-0x27 */
299 outb_p(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); /* 8259A-1 (the master) has a slave on IR2 */
300 if (auto_eoi) /* master does Auto EOI */
301 outb_p(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR);
302 else /* master expects normal EOI */
303 outb_p(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR);
304
305 outb_p(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */
306 outb_p(0x20 + 8, PIC_SLAVE_IMR); /* ICW2: 8259A-2 IR0-7 mapped to 0x28-0x2f */
307 outb_p(PIC_CASCADE_IR, PIC_SLAVE_IMR); /* 8259A-2 is a slave on master's IR2 */
308 outb_p(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */
309 if (auto_eoi)
310 /*
311 * In AEOI mode we just have to mask the interrupt
312 * when acking.
313 */
314 i8259A_chip.mask_ack = disable_8259A_irq;
315 else
316 i8259A_chip.mask_ack = mask_and_ack_8259A;
317
318 udelay(100); /* wait for 8259A to initialize */
319
320 outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
321 outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
322
323 spin_unlock_irqrestore(&i8259A_lock, flags);
324 }
325
326 /*
327 * Note that on a 486, we don't want to do a SIGFPE on an irq13
328 * as the irq is unreliable, and exception 16 works correctly
329 * (ie as explained in the intel literature). On a 386, you
330 * can't use exception 16 due to bad IBM design, so we have to
331 * rely on the less exact irq13.
332 *
333 * Careful.. Not only is IRQ13 unreliable, but it is also
334 * leads to races. IBM designers who came up with it should
335 * be shot.
336 */
337
338
339 static irqreturn_t math_error_irq(int cpl, void *dev_id)
340 {
341 extern void math_error(void __user *);
342 outb(0,0xF0);
343 if (ignore_fpu_irq || !boot_cpu_data.hard_math)
344 return IRQ_NONE;
345 math_error((void __user *)get_irq_regs()->eip);
346 return IRQ_HANDLED;
347 }
348
349 /*
350 * New motherboards sometimes make IRQ 13 be a PCI interrupt,
351 * so allow interrupt sharing.
352 */
353 static struct irqaction fpu_irq = { math_error_irq, 0, CPU_MASK_NONE, "fpu", NULL, NULL };
354
355 void __init init_ISA_irqs (void)
356 {
357 int i;
358
359 #ifdef CONFIG_X86_LOCAL_APIC
360 init_bsp_APIC();
361 #endif
362 init_8259A(0);
363
364 for (i = 0; i < NR_IRQS; i++) {
365 irq_desc[i].status = IRQ_DISABLED;
366 irq_desc[i].action = NULL;
367 irq_desc[i].depth = 1;
368
369 if (i < 16) {
370 /*
371 * 16 old-style INTA-cycle interrupts:
372 */
373 set_irq_chip_and_handler_name(i, &i8259A_chip,
374 handle_level_irq, "XT");
375 } else {
376 /*
377 * 'high' PCI IRQs filled in on demand
378 */
379 irq_desc[i].chip = &no_irq_chip;
380 }
381 }
382 }
383
384 /* Overridden in paravirt.c */
385 void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
386
387 void __init native_init_IRQ(void)
388 {
389 int i;
390
391 /* all the set up before the call gates are initialised */
392 pre_intr_init_hook();
393
394 /*
395 * Cover the whole vector space, no vector can escape
396 * us. (some of these will be overridden and become
397 * 'special' SMP interrupts)
398 */
399 for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) {
400 int vector = FIRST_EXTERNAL_VECTOR + i;
401 if (i >= NR_IRQS)
402 break;
403 if (vector != SYSCALL_VECTOR)
404 set_intr_gate(vector, interrupt[i]);
405 }
406
407 /* setup after call gates are initialised (usually add in
408 * the architecture specific gates)
409 */
410 intr_init_hook();
411
412 /*
413 * External FPU? Set up irq13 if so, for
414 * original braindamaged IBM FERR coupling.
415 */
416 if (boot_cpu_data.hard_math && !cpu_has_fpu)
417 setup_irq(FPU_IRQ, &fpu_irq);
418
419 irq_ctx_init(smp_processor_id());
420 }