Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / pci / intr_remapping.c
1 #include <linux/interrupt.h>
2 #include <linux/dmar.h>
3 #include <linux/spinlock.h>
4 #include <linux/jiffies.h>
5 #include <linux/hpet.h>
6 #include <linux/pci.h>
7 #include <linux/irq.h>
8 #include <asm/io_apic.h>
9 #include <asm/smp.h>
10 #include <asm/cpu.h>
11 #include <linux/intel-iommu.h>
12 #include "intr_remapping.h"
13 #include <acpi/acpi.h>
14 #include <asm/pci-direct.h>
15 #include "pci.h"
16
17 static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
18 static struct hpet_scope ir_hpet[MAX_HPET_TBS];
19 static int ir_ioapic_num, ir_hpet_num;
20 int intr_remapping_enabled;
21
22 static int disable_intremap;
23 static __init int setup_nointremap(char *str)
24 {
25 disable_intremap = 1;
26 return 0;
27 }
28 early_param("nointremap", setup_nointremap);
29
30 struct irq_2_iommu {
31 struct intel_iommu *iommu;
32 u16 irte_index;
33 u16 sub_handle;
34 u8 irte_mask;
35 };
36
37 #ifdef CONFIG_GENERIC_HARDIRQS
38 static struct irq_2_iommu *get_one_free_irq_2_iommu(int node)
39 {
40 struct irq_2_iommu *iommu;
41
42 iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node);
43 printk(KERN_DEBUG "alloc irq_2_iommu on node %d\n", node);
44
45 return iommu;
46 }
47
48 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
49 {
50 struct irq_desc *desc;
51
52 desc = irq_to_desc(irq);
53
54 if (WARN_ON_ONCE(!desc))
55 return NULL;
56
57 return desc->irq_2_iommu;
58 }
59
60 static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
61 {
62 struct irq_desc *desc;
63 struct irq_2_iommu *irq_iommu;
64
65 desc = irq_to_desc(irq);
66 if (!desc) {
67 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
68 return NULL;
69 }
70
71 irq_iommu = desc->irq_2_iommu;
72
73 if (!irq_iommu)
74 desc->irq_2_iommu = get_one_free_irq_2_iommu(irq_node(irq));
75
76 return desc->irq_2_iommu;
77 }
78
79 #else /* !CONFIG_SPARSE_IRQ */
80
81 static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
82
83 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
84 {
85 if (irq < nr_irqs)
86 return &irq_2_iommuX[irq];
87
88 return NULL;
89 }
90 static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
91 {
92 return irq_2_iommu(irq);
93 }
94 #endif
95
96 static DEFINE_SPINLOCK(irq_2_ir_lock);
97
98 static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq)
99 {
100 struct irq_2_iommu *irq_iommu;
101
102 irq_iommu = irq_2_iommu(irq);
103
104 if (!irq_iommu)
105 return NULL;
106
107 if (!irq_iommu->iommu)
108 return NULL;
109
110 return irq_iommu;
111 }
112
113 int irq_remapped(int irq)
114 {
115 return valid_irq_2_iommu(irq) != NULL;
116 }
117
118 int get_irte(int irq, struct irte *entry)
119 {
120 int index;
121 struct irq_2_iommu *irq_iommu;
122 unsigned long flags;
123
124 if (!entry)
125 return -1;
126
127 spin_lock_irqsave(&irq_2_ir_lock, flags);
128 irq_iommu = valid_irq_2_iommu(irq);
129 if (!irq_iommu) {
130 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
131 return -1;
132 }
133
134 index = irq_iommu->irte_index + irq_iommu->sub_handle;
135 *entry = *(irq_iommu->iommu->ir_table->base + index);
136
137 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
138 return 0;
139 }
140
141 int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
142 {
143 struct ir_table *table = iommu->ir_table;
144 struct irq_2_iommu *irq_iommu;
145 u16 index, start_index;
146 unsigned int mask = 0;
147 unsigned long flags;
148 int i;
149
150 if (!count)
151 return -1;
152
153 #ifndef CONFIG_SPARSE_IRQ
154 /* protect irq_2_iommu_alloc later */
155 if (irq >= nr_irqs)
156 return -1;
157 #endif
158
159 /*
160 * start the IRTE search from index 0.
161 */
162 index = start_index = 0;
163
164 if (count > 1) {
165 count = __roundup_pow_of_two(count);
166 mask = ilog2(count);
167 }
168
169 if (mask > ecap_max_handle_mask(iommu->ecap)) {
170 printk(KERN_ERR
171 "Requested mask %x exceeds the max invalidation handle"
172 " mask value %Lx\n", mask,
173 ecap_max_handle_mask(iommu->ecap));
174 return -1;
175 }
176
177 spin_lock_irqsave(&irq_2_ir_lock, flags);
178 do {
179 for (i = index; i < index + count; i++)
180 if (table->base[i].present)
181 break;
182 /* empty index found */
183 if (i == index + count)
184 break;
185
186 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
187
188 if (index == start_index) {
189 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
190 printk(KERN_ERR "can't allocate an IRTE\n");
191 return -1;
192 }
193 } while (1);
194
195 for (i = index; i < index + count; i++)
196 table->base[i].present = 1;
197
198 irq_iommu = irq_2_iommu_alloc(irq);
199 if (!irq_iommu) {
200 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
201 printk(KERN_ERR "can't allocate irq_2_iommu\n");
202 return -1;
203 }
204
205 irq_iommu->iommu = iommu;
206 irq_iommu->irte_index = index;
207 irq_iommu->sub_handle = 0;
208 irq_iommu->irte_mask = mask;
209
210 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
211
212 return index;
213 }
214
215 static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
216 {
217 struct qi_desc desc;
218
219 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
220 | QI_IEC_SELECTIVE;
221 desc.high = 0;
222
223 return qi_submit_sync(&desc, iommu);
224 }
225
226 int map_irq_to_irte_handle(int irq, u16 *sub_handle)
227 {
228 int index;
229 struct irq_2_iommu *irq_iommu;
230 unsigned long flags;
231
232 spin_lock_irqsave(&irq_2_ir_lock, flags);
233 irq_iommu = valid_irq_2_iommu(irq);
234 if (!irq_iommu) {
235 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
236 return -1;
237 }
238
239 *sub_handle = irq_iommu->sub_handle;
240 index = irq_iommu->irte_index;
241 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
242 return index;
243 }
244
245 int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
246 {
247 struct irq_2_iommu *irq_iommu;
248 unsigned long flags;
249
250 spin_lock_irqsave(&irq_2_ir_lock, flags);
251
252 irq_iommu = irq_2_iommu_alloc(irq);
253
254 if (!irq_iommu) {
255 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
256 printk(KERN_ERR "can't allocate irq_2_iommu\n");
257 return -1;
258 }
259
260 irq_iommu->iommu = iommu;
261 irq_iommu->irte_index = index;
262 irq_iommu->sub_handle = subhandle;
263 irq_iommu->irte_mask = 0;
264
265 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
266
267 return 0;
268 }
269
270 int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
271 {
272 struct irq_2_iommu *irq_iommu;
273 unsigned long flags;
274
275 spin_lock_irqsave(&irq_2_ir_lock, flags);
276 irq_iommu = valid_irq_2_iommu(irq);
277 if (!irq_iommu) {
278 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
279 return -1;
280 }
281
282 irq_iommu->iommu = NULL;
283 irq_iommu->irte_index = 0;
284 irq_iommu->sub_handle = 0;
285 irq_2_iommu(irq)->irte_mask = 0;
286
287 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
288
289 return 0;
290 }
291
292 int modify_irte(int irq, struct irte *irte_modified)
293 {
294 int rc;
295 int index;
296 struct irte *irte;
297 struct intel_iommu *iommu;
298 struct irq_2_iommu *irq_iommu;
299 unsigned long flags;
300
301 spin_lock_irqsave(&irq_2_ir_lock, flags);
302 irq_iommu = valid_irq_2_iommu(irq);
303 if (!irq_iommu) {
304 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
305 return -1;
306 }
307
308 iommu = irq_iommu->iommu;
309
310 index = irq_iommu->irte_index + irq_iommu->sub_handle;
311 irte = &iommu->ir_table->base[index];
312
313 set_64bit((unsigned long *)&irte->low, irte_modified->low);
314 set_64bit((unsigned long *)&irte->high, irte_modified->high);
315 __iommu_flush_cache(iommu, irte, sizeof(*irte));
316
317 rc = qi_flush_iec(iommu, index, 0);
318 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
319
320 return rc;
321 }
322
323 int flush_irte(int irq)
324 {
325 int rc;
326 int index;
327 struct intel_iommu *iommu;
328 struct irq_2_iommu *irq_iommu;
329 unsigned long flags;
330
331 spin_lock_irqsave(&irq_2_ir_lock, flags);
332 irq_iommu = valid_irq_2_iommu(irq);
333 if (!irq_iommu) {
334 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
335 return -1;
336 }
337
338 iommu = irq_iommu->iommu;
339
340 index = irq_iommu->irte_index + irq_iommu->sub_handle;
341
342 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
343 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
344
345 return rc;
346 }
347
348 struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
349 {
350 int i;
351
352 for (i = 0; i < MAX_HPET_TBS; i++)
353 if (ir_hpet[i].id == hpet_id)
354 return ir_hpet[i].iommu;
355 return NULL;
356 }
357
358 struct intel_iommu *map_ioapic_to_ir(int apic)
359 {
360 int i;
361
362 for (i = 0; i < MAX_IO_APICS; i++)
363 if (ir_ioapic[i].id == apic)
364 return ir_ioapic[i].iommu;
365 return NULL;
366 }
367
368 struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
369 {
370 struct dmar_drhd_unit *drhd;
371
372 drhd = dmar_find_matched_drhd_unit(dev);
373 if (!drhd)
374 return NULL;
375
376 return drhd->iommu;
377 }
378
379 static int clear_entries(struct irq_2_iommu *irq_iommu)
380 {
381 struct irte *start, *entry, *end;
382 struct intel_iommu *iommu;
383 int index;
384
385 if (irq_iommu->sub_handle)
386 return 0;
387
388 iommu = irq_iommu->iommu;
389 index = irq_iommu->irte_index + irq_iommu->sub_handle;
390
391 start = iommu->ir_table->base + index;
392 end = start + (1 << irq_iommu->irte_mask);
393
394 for (entry = start; entry < end; entry++) {
395 set_64bit((unsigned long *)&entry->low, 0);
396 set_64bit((unsigned long *)&entry->high, 0);
397 }
398
399 return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
400 }
401
402 int free_irte(int irq)
403 {
404 int rc = 0;
405 struct irq_2_iommu *irq_iommu;
406 unsigned long flags;
407
408 spin_lock_irqsave(&irq_2_ir_lock, flags);
409 irq_iommu = valid_irq_2_iommu(irq);
410 if (!irq_iommu) {
411 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
412 return -1;
413 }
414
415 rc = clear_entries(irq_iommu);
416
417 irq_iommu->iommu = NULL;
418 irq_iommu->irte_index = 0;
419 irq_iommu->sub_handle = 0;
420 irq_iommu->irte_mask = 0;
421
422 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
423
424 return rc;
425 }
426
427 /*
428 * source validation type
429 */
430 #define SVT_NO_VERIFY 0x0 /* no verification is required */
431 #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fiels */
432 #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
433
434 /*
435 * source-id qualifier
436 */
437 #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
438 #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
439 * the third least significant bit
440 */
441 #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
442 * the second and third least significant bits
443 */
444 #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
445 * the least three significant bits
446 */
447
448 /*
449 * set SVT, SQ and SID fields of irte to verify
450 * source ids of interrupt requests
451 */
452 static void set_irte_sid(struct irte *irte, unsigned int svt,
453 unsigned int sq, unsigned int sid)
454 {
455 irte->svt = svt;
456 irte->sq = sq;
457 irte->sid = sid;
458 }
459
460 int set_ioapic_sid(struct irte *irte, int apic)
461 {
462 int i;
463 u16 sid = 0;
464
465 if (!irte)
466 return -1;
467
468 for (i = 0; i < MAX_IO_APICS; i++) {
469 if (ir_ioapic[i].id == apic) {
470 sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
471 break;
472 }
473 }
474
475 if (sid == 0) {
476 pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
477 return -1;
478 }
479
480 set_irte_sid(irte, 1, 0, sid);
481
482 return 0;
483 }
484
485 int set_hpet_sid(struct irte *irte, u8 id)
486 {
487 int i;
488 u16 sid = 0;
489
490 if (!irte)
491 return -1;
492
493 for (i = 0; i < MAX_HPET_TBS; i++) {
494 if (ir_hpet[i].id == id) {
495 sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
496 break;
497 }
498 }
499
500 if (sid == 0) {
501 pr_warning("Failed to set source-id of HPET block (%d)\n", id);
502 return -1;
503 }
504
505 /*
506 * Should really use SQ_ALL_16. Some platforms are broken.
507 * While we figure out the right quirks for these broken platforms, use
508 * SQ_13_IGNORE_3 for now.
509 */
510 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
511
512 return 0;
513 }
514
515 int set_msi_sid(struct irte *irte, struct pci_dev *dev)
516 {
517 struct pci_dev *bridge;
518
519 if (!irte || !dev)
520 return -1;
521
522 /* PCIe device or Root Complex integrated PCI device */
523 if (pci_is_pcie(dev) || !dev->bus->parent) {
524 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
525 (dev->bus->number << 8) | dev->devfn);
526 return 0;
527 }
528
529 bridge = pci_find_upstream_pcie_bridge(dev);
530 if (bridge) {
531 if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */
532 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
533 (bridge->bus->number << 8) | dev->bus->number);
534 else /* this is a legacy PCI bridge */
535 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
536 (bridge->bus->number << 8) | bridge->devfn);
537 }
538
539 return 0;
540 }
541
542 static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
543 {
544 u64 addr;
545 u32 sts;
546 unsigned long flags;
547
548 addr = virt_to_phys((void *)iommu->ir_table->base);
549
550 spin_lock_irqsave(&iommu->register_lock, flags);
551
552 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
553 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
554
555 /* Set interrupt-remapping table pointer */
556 iommu->gcmd |= DMA_GCMD_SIRTP;
557 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
558
559 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
560 readl, (sts & DMA_GSTS_IRTPS), sts);
561 spin_unlock_irqrestore(&iommu->register_lock, flags);
562
563 /*
564 * global invalidation of interrupt entry cache before enabling
565 * interrupt-remapping.
566 */
567 qi_global_iec(iommu);
568
569 spin_lock_irqsave(&iommu->register_lock, flags);
570
571 /* Enable interrupt-remapping */
572 iommu->gcmd |= DMA_GCMD_IRE;
573 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
574
575 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
576 readl, (sts & DMA_GSTS_IRES), sts);
577
578 spin_unlock_irqrestore(&iommu->register_lock, flags);
579 }
580
581
582 static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
583 {
584 struct ir_table *ir_table;
585 struct page *pages;
586
587 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
588 GFP_ATOMIC);
589
590 if (!iommu->ir_table)
591 return -ENOMEM;
592
593 pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
594 INTR_REMAP_PAGE_ORDER);
595
596 if (!pages) {
597 printk(KERN_ERR "failed to allocate pages of order %d\n",
598 INTR_REMAP_PAGE_ORDER);
599 kfree(iommu->ir_table);
600 return -ENOMEM;
601 }
602
603 ir_table->base = page_address(pages);
604
605 iommu_set_intr_remapping(iommu, mode);
606 return 0;
607 }
608
609 /*
610 * Disable Interrupt Remapping.
611 */
612 static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
613 {
614 unsigned long flags;
615 u32 sts;
616
617 if (!ecap_ir_support(iommu->ecap))
618 return;
619
620 /*
621 * global invalidation of interrupt entry cache before disabling
622 * interrupt-remapping.
623 */
624 qi_global_iec(iommu);
625
626 spin_lock_irqsave(&iommu->register_lock, flags);
627
628 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
629 if (!(sts & DMA_GSTS_IRES))
630 goto end;
631
632 iommu->gcmd &= ~DMA_GCMD_IRE;
633 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
634
635 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
636 readl, !(sts & DMA_GSTS_IRES), sts);
637
638 end:
639 spin_unlock_irqrestore(&iommu->register_lock, flags);
640 }
641
642 int __init intr_remapping_supported(void)
643 {
644 struct dmar_drhd_unit *drhd;
645
646 if (disable_intremap)
647 return 0;
648
649 if (!dmar_ir_support())
650 return 0;
651
652 for_each_drhd_unit(drhd) {
653 struct intel_iommu *iommu = drhd->iommu;
654
655 if (!ecap_ir_support(iommu->ecap))
656 return 0;
657 }
658
659 return 1;
660 }
661
662 int __init enable_intr_remapping(int eim)
663 {
664 struct dmar_drhd_unit *drhd;
665 int setup = 0;
666
667 if (parse_ioapics_under_ir() != 1) {
668 printk(KERN_INFO "Not enable interrupt remapping\n");
669 return -1;
670 }
671
672 for_each_drhd_unit(drhd) {
673 struct intel_iommu *iommu = drhd->iommu;
674
675 /*
676 * If the queued invalidation is already initialized,
677 * shouldn't disable it.
678 */
679 if (iommu->qi)
680 continue;
681
682 /*
683 * Clear previous faults.
684 */
685 dmar_fault(-1, iommu);
686
687 /*
688 * Disable intr remapping and queued invalidation, if already
689 * enabled prior to OS handover.
690 */
691 iommu_disable_intr_remapping(iommu);
692
693 dmar_disable_qi(iommu);
694 }
695
696 /*
697 * check for the Interrupt-remapping support
698 */
699 for_each_drhd_unit(drhd) {
700 struct intel_iommu *iommu = drhd->iommu;
701
702 if (!ecap_ir_support(iommu->ecap))
703 continue;
704
705 if (eim && !ecap_eim_support(iommu->ecap)) {
706 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
707 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
708 return -1;
709 }
710 }
711
712 /*
713 * Enable queued invalidation for all the DRHD's.
714 */
715 for_each_drhd_unit(drhd) {
716 int ret;
717 struct intel_iommu *iommu = drhd->iommu;
718 ret = dmar_enable_qi(iommu);
719
720 if (ret) {
721 printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
722 " invalidation, ecap %Lx, ret %d\n",
723 drhd->reg_base_addr, iommu->ecap, ret);
724 return -1;
725 }
726 }
727
728 /*
729 * Setup Interrupt-remapping for all the DRHD's now.
730 */
731 for_each_drhd_unit(drhd) {
732 struct intel_iommu *iommu = drhd->iommu;
733
734 if (!ecap_ir_support(iommu->ecap))
735 continue;
736
737 if (setup_intr_remapping(iommu, eim))
738 goto error;
739
740 setup = 1;
741 }
742
743 if (!setup)
744 goto error;
745
746 intr_remapping_enabled = 1;
747
748 return 0;
749
750 error:
751 /*
752 * handle error condition gracefully here!
753 */
754 return -1;
755 }
756
757 static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
758 struct intel_iommu *iommu)
759 {
760 struct acpi_dmar_pci_path *path;
761 u8 bus;
762 int count;
763
764 bus = scope->bus;
765 path = (struct acpi_dmar_pci_path *)(scope + 1);
766 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
767 / sizeof(struct acpi_dmar_pci_path);
768
769 while (--count > 0) {
770 /*
771 * Access PCI directly due to the PCI
772 * subsystem isn't initialized yet.
773 */
774 bus = read_pci_config_byte(bus, path->dev, path->fn,
775 PCI_SECONDARY_BUS);
776 path++;
777 }
778 ir_hpet[ir_hpet_num].bus = bus;
779 ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn);
780 ir_hpet[ir_hpet_num].iommu = iommu;
781 ir_hpet[ir_hpet_num].id = scope->enumeration_id;
782 ir_hpet_num++;
783 }
784
785 static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
786 struct intel_iommu *iommu)
787 {
788 struct acpi_dmar_pci_path *path;
789 u8 bus;
790 int count;
791
792 bus = scope->bus;
793 path = (struct acpi_dmar_pci_path *)(scope + 1);
794 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
795 / sizeof(struct acpi_dmar_pci_path);
796
797 while (--count > 0) {
798 /*
799 * Access PCI directly due to the PCI
800 * subsystem isn't initialized yet.
801 */
802 bus = read_pci_config_byte(bus, path->dev, path->fn,
803 PCI_SECONDARY_BUS);
804 path++;
805 }
806
807 ir_ioapic[ir_ioapic_num].bus = bus;
808 ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn);
809 ir_ioapic[ir_ioapic_num].iommu = iommu;
810 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
811 ir_ioapic_num++;
812 }
813
814 static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
815 struct intel_iommu *iommu)
816 {
817 struct acpi_dmar_hardware_unit *drhd;
818 struct acpi_dmar_device_scope *scope;
819 void *start, *end;
820
821 drhd = (struct acpi_dmar_hardware_unit *)header;
822
823 start = (void *)(drhd + 1);
824 end = ((void *)drhd) + header->length;
825
826 while (start < end) {
827 scope = start;
828 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
829 if (ir_ioapic_num == MAX_IO_APICS) {
830 printk(KERN_WARNING "Exceeded Max IO APICS\n");
831 return -1;
832 }
833
834 printk(KERN_INFO "IOAPIC id %d under DRHD base"
835 " 0x%Lx\n", scope->enumeration_id,
836 drhd->address);
837
838 ir_parse_one_ioapic_scope(scope, iommu);
839 } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
840 if (ir_hpet_num == MAX_HPET_TBS) {
841 printk(KERN_WARNING "Exceeded Max HPET blocks\n");
842 return -1;
843 }
844
845 printk(KERN_INFO "HPET id %d under DRHD base"
846 " 0x%Lx\n", scope->enumeration_id,
847 drhd->address);
848
849 ir_parse_one_hpet_scope(scope, iommu);
850 }
851 start += scope->length;
852 }
853
854 return 0;
855 }
856
857 /*
858 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
859 * hardware unit.
860 */
861 int __init parse_ioapics_under_ir(void)
862 {
863 struct dmar_drhd_unit *drhd;
864 int ir_supported = 0;
865
866 for_each_drhd_unit(drhd) {
867 struct intel_iommu *iommu = drhd->iommu;
868
869 if (ecap_ir_support(iommu->ecap)) {
870 if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
871 return -1;
872
873 ir_supported = 1;
874 }
875 }
876
877 if (ir_supported && ir_ioapic_num != nr_ioapics) {
878 printk(KERN_WARNING
879 "Not all IO-APIC's listed under remapping hardware\n");
880 return -1;
881 }
882
883 return ir_supported;
884 }
885
886 void disable_intr_remapping(void)
887 {
888 struct dmar_drhd_unit *drhd;
889 struct intel_iommu *iommu = NULL;
890
891 /*
892 * Disable Interrupt-remapping for all the DRHD's now.
893 */
894 for_each_iommu(iommu, drhd) {
895 if (!ecap_ir_support(iommu->ecap))
896 continue;
897
898 iommu_disable_intr_remapping(iommu);
899 }
900 }
901
902 int reenable_intr_remapping(int eim)
903 {
904 struct dmar_drhd_unit *drhd;
905 int setup = 0;
906 struct intel_iommu *iommu = NULL;
907
908 for_each_iommu(iommu, drhd)
909 if (iommu->qi)
910 dmar_reenable_qi(iommu);
911
912 /*
913 * Setup Interrupt-remapping for all the DRHD's now.
914 */
915 for_each_iommu(iommu, drhd) {
916 if (!ecap_ir_support(iommu->ecap))
917 continue;
918
919 /* Set up interrupt remapping for iommu.*/
920 iommu_set_intr_remapping(iommu, eim);
921 setup = 1;
922 }
923
924 if (!setup)
925 goto error;
926
927 return 0;
928
929 error:
930 /*
931 * handle error condition gracefully here!
932 */
933 return -1;
934 }
935