pci: intr-remap: Free irte memory if SPARSE_IRQ=y
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / pci / intr_remapping.c
1 #include <linux/interrupt.h>
2 #include <linux/dmar.h>
3 #include <linux/spinlock.h>
4 #include <linux/slab.h>
5 #include <linux/jiffies.h>
6 #include <linux/hpet.h>
7 #include <linux/pci.h>
8 #include <linux/irq.h>
9 #include <asm/io_apic.h>
10 #include <asm/smp.h>
11 #include <asm/cpu.h>
12 #include <linux/intel-iommu.h>
13 #include "intr_remapping.h"
14 #include <acpi/acpi.h>
15 #include <asm/pci-direct.h>
16 #include "pci.h"
17
18 static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
19 static struct hpet_scope ir_hpet[MAX_HPET_TBS];
20 static int ir_ioapic_num, ir_hpet_num;
21 int intr_remapping_enabled;
22
23 static int disable_intremap;
24 static int disable_sourceid_checking;
25
26 static __init int setup_nointremap(char *str)
27 {
28 disable_intremap = 1;
29 return 0;
30 }
31 early_param("nointremap", setup_nointremap);
32
33 static __init int setup_intremap(char *str)
34 {
35 if (!str)
36 return -EINVAL;
37
38 if (!strncmp(str, "on", 2))
39 disable_intremap = 0;
40 else if (!strncmp(str, "off", 3))
41 disable_intremap = 1;
42 else if (!strncmp(str, "nosid", 5))
43 disable_sourceid_checking = 1;
44
45 return 0;
46 }
47 early_param("intremap", setup_intremap);
48
49 struct irq_2_iommu {
50 struct intel_iommu *iommu;
51 u16 irte_index;
52 u16 sub_handle;
53 u8 irte_mask;
54 };
55
56 #ifdef CONFIG_GENERIC_HARDIRQS
57 static struct irq_2_iommu *get_one_free_irq_2_iommu(int node)
58 {
59 struct irq_2_iommu *iommu;
60
61 iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node);
62 printk(KERN_DEBUG "alloc irq_2_iommu on node %d\n", node);
63
64 return iommu;
65 }
66
67 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
68 {
69 struct irq_desc *desc;
70
71 desc = irq_to_desc(irq);
72
73 if (WARN_ON_ONCE(!desc))
74 return NULL;
75
76 return desc->irq_2_iommu;
77 }
78
79 static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
80 {
81 struct irq_desc *desc;
82 struct irq_2_iommu *irq_iommu;
83
84 desc = irq_to_desc(irq);
85 if (!desc) {
86 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
87 return NULL;
88 }
89
90 irq_iommu = desc->irq_2_iommu;
91
92 if (!irq_iommu)
93 desc->irq_2_iommu = get_one_free_irq_2_iommu(irq_node(irq));
94
95 return desc->irq_2_iommu;
96 }
97
98 static void irq_2_iommu_free(unsigned int irq)
99 {
100 struct irq_data *d = irq_get_irq_data(irq);
101 struct irq_2_iommu *p = d->irq_2_iommu;
102
103 d->irq_2_iommu = NULL;
104 kfree(p);
105 }
106
107 #else /* !CONFIG_SPARSE_IRQ */
108
109 static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
110
111 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
112 {
113 if (irq < nr_irqs)
114 return &irq_2_iommuX[irq];
115
116 return NULL;
117 }
118 static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
119 {
120 return irq_2_iommu(irq);
121 }
122
123 static void irq_2_iommu_free(unsigned int irq) { }
124
125 #endif
126
127 static DEFINE_SPINLOCK(irq_2_ir_lock);
128
129 static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq)
130 {
131 struct irq_2_iommu *irq_iommu;
132
133 irq_iommu = irq_2_iommu(irq);
134
135 if (!irq_iommu)
136 return NULL;
137
138 if (!irq_iommu->iommu)
139 return NULL;
140
141 return irq_iommu;
142 }
143
144 int irq_remapped(int irq)
145 {
146 return valid_irq_2_iommu(irq) != NULL;
147 }
148
149 int get_irte(int irq, struct irte *entry)
150 {
151 int index;
152 struct irq_2_iommu *irq_iommu;
153 unsigned long flags;
154
155 if (!entry)
156 return -1;
157
158 spin_lock_irqsave(&irq_2_ir_lock, flags);
159 irq_iommu = valid_irq_2_iommu(irq);
160 if (!irq_iommu) {
161 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
162 return -1;
163 }
164
165 index = irq_iommu->irte_index + irq_iommu->sub_handle;
166 *entry = *(irq_iommu->iommu->ir_table->base + index);
167
168 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
169 return 0;
170 }
171
172 int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
173 {
174 struct ir_table *table = iommu->ir_table;
175 struct irq_2_iommu *irq_iommu;
176 u16 index, start_index;
177 unsigned int mask = 0;
178 unsigned long flags;
179 int i;
180
181 if (!count)
182 return -1;
183
184 #ifndef CONFIG_SPARSE_IRQ
185 /* protect irq_2_iommu_alloc later */
186 if (irq >= nr_irqs)
187 return -1;
188 #endif
189
190 /*
191 * start the IRTE search from index 0.
192 */
193 index = start_index = 0;
194
195 if (count > 1) {
196 count = __roundup_pow_of_two(count);
197 mask = ilog2(count);
198 }
199
200 if (mask > ecap_max_handle_mask(iommu->ecap)) {
201 printk(KERN_ERR
202 "Requested mask %x exceeds the max invalidation handle"
203 " mask value %Lx\n", mask,
204 ecap_max_handle_mask(iommu->ecap));
205 return -1;
206 }
207
208 spin_lock_irqsave(&irq_2_ir_lock, flags);
209 do {
210 for (i = index; i < index + count; i++)
211 if (table->base[i].present)
212 break;
213 /* empty index found */
214 if (i == index + count)
215 break;
216
217 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
218
219 if (index == start_index) {
220 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
221 printk(KERN_ERR "can't allocate an IRTE\n");
222 return -1;
223 }
224 } while (1);
225
226 for (i = index; i < index + count; i++)
227 table->base[i].present = 1;
228
229 irq_iommu = irq_2_iommu_alloc(irq);
230 if (!irq_iommu) {
231 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
232 printk(KERN_ERR "can't allocate irq_2_iommu\n");
233 return -1;
234 }
235
236 irq_iommu->iommu = iommu;
237 irq_iommu->irte_index = index;
238 irq_iommu->sub_handle = 0;
239 irq_iommu->irte_mask = mask;
240
241 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
242
243 return index;
244 }
245
246 static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
247 {
248 struct qi_desc desc;
249
250 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
251 | QI_IEC_SELECTIVE;
252 desc.high = 0;
253
254 return qi_submit_sync(&desc, iommu);
255 }
256
257 int map_irq_to_irte_handle(int irq, u16 *sub_handle)
258 {
259 int index;
260 struct irq_2_iommu *irq_iommu;
261 unsigned long flags;
262
263 spin_lock_irqsave(&irq_2_ir_lock, flags);
264 irq_iommu = valid_irq_2_iommu(irq);
265 if (!irq_iommu) {
266 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
267 return -1;
268 }
269
270 *sub_handle = irq_iommu->sub_handle;
271 index = irq_iommu->irte_index;
272 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
273 return index;
274 }
275
276 int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
277 {
278 struct irq_2_iommu *irq_iommu;
279 unsigned long flags;
280
281 spin_lock_irqsave(&irq_2_ir_lock, flags);
282
283 irq_iommu = irq_2_iommu_alloc(irq);
284
285 if (!irq_iommu) {
286 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
287 printk(KERN_ERR "can't allocate irq_2_iommu\n");
288 return -1;
289 }
290
291 irq_iommu->iommu = iommu;
292 irq_iommu->irte_index = index;
293 irq_iommu->sub_handle = subhandle;
294 irq_iommu->irte_mask = 0;
295
296 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
297
298 return 0;
299 }
300
301 int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
302 {
303 struct irq_2_iommu *irq_iommu;
304 unsigned long flags;
305
306 spin_lock_irqsave(&irq_2_ir_lock, flags);
307 irq_iommu = valid_irq_2_iommu(irq);
308 if (!irq_iommu) {
309 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
310 return -1;
311 }
312
313 irq_iommu->iommu = NULL;
314 irq_iommu->irte_index = 0;
315 irq_iommu->sub_handle = 0;
316 irq_2_iommu(irq)->irte_mask = 0;
317
318 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
319
320 return 0;
321 }
322
323 int modify_irte(int irq, struct irte *irte_modified)
324 {
325 int rc;
326 int index;
327 struct irte *irte;
328 struct intel_iommu *iommu;
329 struct irq_2_iommu *irq_iommu;
330 unsigned long flags;
331
332 spin_lock_irqsave(&irq_2_ir_lock, flags);
333 irq_iommu = valid_irq_2_iommu(irq);
334 if (!irq_iommu) {
335 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
336 return -1;
337 }
338
339 iommu = irq_iommu->iommu;
340
341 index = irq_iommu->irte_index + irq_iommu->sub_handle;
342 irte = &iommu->ir_table->base[index];
343
344 set_64bit(&irte->low, irte_modified->low);
345 set_64bit(&irte->high, irte_modified->high);
346 __iommu_flush_cache(iommu, irte, sizeof(*irte));
347
348 rc = qi_flush_iec(iommu, index, 0);
349 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
350
351 return rc;
352 }
353
354 int flush_irte(int irq)
355 {
356 int rc;
357 int index;
358 struct intel_iommu *iommu;
359 struct irq_2_iommu *irq_iommu;
360 unsigned long flags;
361
362 spin_lock_irqsave(&irq_2_ir_lock, flags);
363 irq_iommu = valid_irq_2_iommu(irq);
364 if (!irq_iommu) {
365 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
366 return -1;
367 }
368
369 iommu = irq_iommu->iommu;
370
371 index = irq_iommu->irte_index + irq_iommu->sub_handle;
372
373 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
374 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
375
376 return rc;
377 }
378
379 struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
380 {
381 int i;
382
383 for (i = 0; i < MAX_HPET_TBS; i++)
384 if (ir_hpet[i].id == hpet_id)
385 return ir_hpet[i].iommu;
386 return NULL;
387 }
388
389 struct intel_iommu *map_ioapic_to_ir(int apic)
390 {
391 int i;
392
393 for (i = 0; i < MAX_IO_APICS; i++)
394 if (ir_ioapic[i].id == apic)
395 return ir_ioapic[i].iommu;
396 return NULL;
397 }
398
399 struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
400 {
401 struct dmar_drhd_unit *drhd;
402
403 drhd = dmar_find_matched_drhd_unit(dev);
404 if (!drhd)
405 return NULL;
406
407 return drhd->iommu;
408 }
409
410 static int clear_entries(struct irq_2_iommu *irq_iommu)
411 {
412 struct irte *start, *entry, *end;
413 struct intel_iommu *iommu;
414 int index;
415
416 if (irq_iommu->sub_handle)
417 return 0;
418
419 iommu = irq_iommu->iommu;
420 index = irq_iommu->irte_index + irq_iommu->sub_handle;
421
422 start = iommu->ir_table->base + index;
423 end = start + (1 << irq_iommu->irte_mask);
424
425 for (entry = start; entry < end; entry++) {
426 set_64bit(&entry->low, 0);
427 set_64bit(&entry->high, 0);
428 }
429
430 return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
431 }
432
433 int free_irte(int irq)
434 {
435 int rc = 0;
436 struct irq_2_iommu *irq_iommu;
437 unsigned long flags;
438
439 spin_lock_irqsave(&irq_2_ir_lock, flags);
440 irq_iommu = valid_irq_2_iommu(irq);
441 if (!irq_iommu) {
442 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
443 return -1;
444 }
445
446 rc = clear_entries(irq_iommu);
447
448 irq_iommu->iommu = NULL;
449 irq_iommu->irte_index = 0;
450 irq_iommu->sub_handle = 0;
451 irq_iommu->irte_mask = 0;
452
453 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
454
455 irq_2_iommu_free(irq);
456
457 return rc;
458 }
459
460 /*
461 * source validation type
462 */
463 #define SVT_NO_VERIFY 0x0 /* no verification is required */
464 #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fiels */
465 #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
466
467 /*
468 * source-id qualifier
469 */
470 #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
471 #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
472 * the third least significant bit
473 */
474 #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
475 * the second and third least significant bits
476 */
477 #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
478 * the least three significant bits
479 */
480
481 /*
482 * set SVT, SQ and SID fields of irte to verify
483 * source ids of interrupt requests
484 */
485 static void set_irte_sid(struct irte *irte, unsigned int svt,
486 unsigned int sq, unsigned int sid)
487 {
488 if (disable_sourceid_checking)
489 svt = SVT_NO_VERIFY;
490 irte->svt = svt;
491 irte->sq = sq;
492 irte->sid = sid;
493 }
494
495 int set_ioapic_sid(struct irte *irte, int apic)
496 {
497 int i;
498 u16 sid = 0;
499
500 if (!irte)
501 return -1;
502
503 for (i = 0; i < MAX_IO_APICS; i++) {
504 if (ir_ioapic[i].id == apic) {
505 sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
506 break;
507 }
508 }
509
510 if (sid == 0) {
511 pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
512 return -1;
513 }
514
515 set_irte_sid(irte, 1, 0, sid);
516
517 return 0;
518 }
519
520 int set_hpet_sid(struct irte *irte, u8 id)
521 {
522 int i;
523 u16 sid = 0;
524
525 if (!irte)
526 return -1;
527
528 for (i = 0; i < MAX_HPET_TBS; i++) {
529 if (ir_hpet[i].id == id) {
530 sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
531 break;
532 }
533 }
534
535 if (sid == 0) {
536 pr_warning("Failed to set source-id of HPET block (%d)\n", id);
537 return -1;
538 }
539
540 /*
541 * Should really use SQ_ALL_16. Some platforms are broken.
542 * While we figure out the right quirks for these broken platforms, use
543 * SQ_13_IGNORE_3 for now.
544 */
545 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
546
547 return 0;
548 }
549
550 int set_msi_sid(struct irte *irte, struct pci_dev *dev)
551 {
552 struct pci_dev *bridge;
553
554 if (!irte || !dev)
555 return -1;
556
557 /* PCIe device or Root Complex integrated PCI device */
558 if (pci_is_pcie(dev) || !dev->bus->parent) {
559 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
560 (dev->bus->number << 8) | dev->devfn);
561 return 0;
562 }
563
564 bridge = pci_find_upstream_pcie_bridge(dev);
565 if (bridge) {
566 if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */
567 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
568 (bridge->bus->number << 8) | dev->bus->number);
569 else /* this is a legacy PCI bridge */
570 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
571 (bridge->bus->number << 8) | bridge->devfn);
572 }
573
574 return 0;
575 }
576
577 static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
578 {
579 u64 addr;
580 u32 sts;
581 unsigned long flags;
582
583 addr = virt_to_phys((void *)iommu->ir_table->base);
584
585 spin_lock_irqsave(&iommu->register_lock, flags);
586
587 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
588 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
589
590 /* Set interrupt-remapping table pointer */
591 iommu->gcmd |= DMA_GCMD_SIRTP;
592 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
593
594 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
595 readl, (sts & DMA_GSTS_IRTPS), sts);
596 spin_unlock_irqrestore(&iommu->register_lock, flags);
597
598 /*
599 * global invalidation of interrupt entry cache before enabling
600 * interrupt-remapping.
601 */
602 qi_global_iec(iommu);
603
604 spin_lock_irqsave(&iommu->register_lock, flags);
605
606 /* Enable interrupt-remapping */
607 iommu->gcmd |= DMA_GCMD_IRE;
608 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
609
610 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
611 readl, (sts & DMA_GSTS_IRES), sts);
612
613 spin_unlock_irqrestore(&iommu->register_lock, flags);
614 }
615
616
617 static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
618 {
619 struct ir_table *ir_table;
620 struct page *pages;
621
622 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
623 GFP_ATOMIC);
624
625 if (!iommu->ir_table)
626 return -ENOMEM;
627
628 pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
629 INTR_REMAP_PAGE_ORDER);
630
631 if (!pages) {
632 printk(KERN_ERR "failed to allocate pages of order %d\n",
633 INTR_REMAP_PAGE_ORDER);
634 kfree(iommu->ir_table);
635 return -ENOMEM;
636 }
637
638 ir_table->base = page_address(pages);
639
640 iommu_set_intr_remapping(iommu, mode);
641 return 0;
642 }
643
644 /*
645 * Disable Interrupt Remapping.
646 */
647 static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
648 {
649 unsigned long flags;
650 u32 sts;
651
652 if (!ecap_ir_support(iommu->ecap))
653 return;
654
655 /*
656 * global invalidation of interrupt entry cache before disabling
657 * interrupt-remapping.
658 */
659 qi_global_iec(iommu);
660
661 spin_lock_irqsave(&iommu->register_lock, flags);
662
663 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
664 if (!(sts & DMA_GSTS_IRES))
665 goto end;
666
667 iommu->gcmd &= ~DMA_GCMD_IRE;
668 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
669
670 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
671 readl, !(sts & DMA_GSTS_IRES), sts);
672
673 end:
674 spin_unlock_irqrestore(&iommu->register_lock, flags);
675 }
676
677 int __init intr_remapping_supported(void)
678 {
679 struct dmar_drhd_unit *drhd;
680
681 if (disable_intremap)
682 return 0;
683
684 if (!dmar_ir_support())
685 return 0;
686
687 for_each_drhd_unit(drhd) {
688 struct intel_iommu *iommu = drhd->iommu;
689
690 if (!ecap_ir_support(iommu->ecap))
691 return 0;
692 }
693
694 return 1;
695 }
696
697 int __init enable_intr_remapping(int eim)
698 {
699 struct dmar_drhd_unit *drhd;
700 int setup = 0;
701
702 if (parse_ioapics_under_ir() != 1) {
703 printk(KERN_INFO "Not enable interrupt remapping\n");
704 return -1;
705 }
706
707 for_each_drhd_unit(drhd) {
708 struct intel_iommu *iommu = drhd->iommu;
709
710 /*
711 * If the queued invalidation is already initialized,
712 * shouldn't disable it.
713 */
714 if (iommu->qi)
715 continue;
716
717 /*
718 * Clear previous faults.
719 */
720 dmar_fault(-1, iommu);
721
722 /*
723 * Disable intr remapping and queued invalidation, if already
724 * enabled prior to OS handover.
725 */
726 iommu_disable_intr_remapping(iommu);
727
728 dmar_disable_qi(iommu);
729 }
730
731 /*
732 * check for the Interrupt-remapping support
733 */
734 for_each_drhd_unit(drhd) {
735 struct intel_iommu *iommu = drhd->iommu;
736
737 if (!ecap_ir_support(iommu->ecap))
738 continue;
739
740 if (eim && !ecap_eim_support(iommu->ecap)) {
741 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
742 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
743 return -1;
744 }
745 }
746
747 /*
748 * Enable queued invalidation for all the DRHD's.
749 */
750 for_each_drhd_unit(drhd) {
751 int ret;
752 struct intel_iommu *iommu = drhd->iommu;
753 ret = dmar_enable_qi(iommu);
754
755 if (ret) {
756 printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
757 " invalidation, ecap %Lx, ret %d\n",
758 drhd->reg_base_addr, iommu->ecap, ret);
759 return -1;
760 }
761 }
762
763 /*
764 * Setup Interrupt-remapping for all the DRHD's now.
765 */
766 for_each_drhd_unit(drhd) {
767 struct intel_iommu *iommu = drhd->iommu;
768
769 if (!ecap_ir_support(iommu->ecap))
770 continue;
771
772 if (setup_intr_remapping(iommu, eim))
773 goto error;
774
775 setup = 1;
776 }
777
778 if (!setup)
779 goto error;
780
781 intr_remapping_enabled = 1;
782
783 return 0;
784
785 error:
786 /*
787 * handle error condition gracefully here!
788 */
789 return -1;
790 }
791
792 static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
793 struct intel_iommu *iommu)
794 {
795 struct acpi_dmar_pci_path *path;
796 u8 bus;
797 int count;
798
799 bus = scope->bus;
800 path = (struct acpi_dmar_pci_path *)(scope + 1);
801 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
802 / sizeof(struct acpi_dmar_pci_path);
803
804 while (--count > 0) {
805 /*
806 * Access PCI directly due to the PCI
807 * subsystem isn't initialized yet.
808 */
809 bus = read_pci_config_byte(bus, path->dev, path->fn,
810 PCI_SECONDARY_BUS);
811 path++;
812 }
813 ir_hpet[ir_hpet_num].bus = bus;
814 ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn);
815 ir_hpet[ir_hpet_num].iommu = iommu;
816 ir_hpet[ir_hpet_num].id = scope->enumeration_id;
817 ir_hpet_num++;
818 }
819
820 static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
821 struct intel_iommu *iommu)
822 {
823 struct acpi_dmar_pci_path *path;
824 u8 bus;
825 int count;
826
827 bus = scope->bus;
828 path = (struct acpi_dmar_pci_path *)(scope + 1);
829 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
830 / sizeof(struct acpi_dmar_pci_path);
831
832 while (--count > 0) {
833 /*
834 * Access PCI directly due to the PCI
835 * subsystem isn't initialized yet.
836 */
837 bus = read_pci_config_byte(bus, path->dev, path->fn,
838 PCI_SECONDARY_BUS);
839 path++;
840 }
841
842 ir_ioapic[ir_ioapic_num].bus = bus;
843 ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn);
844 ir_ioapic[ir_ioapic_num].iommu = iommu;
845 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
846 ir_ioapic_num++;
847 }
848
849 static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
850 struct intel_iommu *iommu)
851 {
852 struct acpi_dmar_hardware_unit *drhd;
853 struct acpi_dmar_device_scope *scope;
854 void *start, *end;
855
856 drhd = (struct acpi_dmar_hardware_unit *)header;
857
858 start = (void *)(drhd + 1);
859 end = ((void *)drhd) + header->length;
860
861 while (start < end) {
862 scope = start;
863 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
864 if (ir_ioapic_num == MAX_IO_APICS) {
865 printk(KERN_WARNING "Exceeded Max IO APICS\n");
866 return -1;
867 }
868
869 printk(KERN_INFO "IOAPIC id %d under DRHD base "
870 " 0x%Lx IOMMU %d\n", scope->enumeration_id,
871 drhd->address, iommu->seq_id);
872
873 ir_parse_one_ioapic_scope(scope, iommu);
874 } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
875 if (ir_hpet_num == MAX_HPET_TBS) {
876 printk(KERN_WARNING "Exceeded Max HPET blocks\n");
877 return -1;
878 }
879
880 printk(KERN_INFO "HPET id %d under DRHD base"
881 " 0x%Lx\n", scope->enumeration_id,
882 drhd->address);
883
884 ir_parse_one_hpet_scope(scope, iommu);
885 }
886 start += scope->length;
887 }
888
889 return 0;
890 }
891
892 /*
893 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
894 * hardware unit.
895 */
896 int __init parse_ioapics_under_ir(void)
897 {
898 struct dmar_drhd_unit *drhd;
899 int ir_supported = 0;
900
901 for_each_drhd_unit(drhd) {
902 struct intel_iommu *iommu = drhd->iommu;
903
904 if (ecap_ir_support(iommu->ecap)) {
905 if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
906 return -1;
907
908 ir_supported = 1;
909 }
910 }
911
912 if (ir_supported && ir_ioapic_num != nr_ioapics) {
913 printk(KERN_WARNING
914 "Not all IO-APIC's listed under remapping hardware\n");
915 return -1;
916 }
917
918 return ir_supported;
919 }
920
921 void disable_intr_remapping(void)
922 {
923 struct dmar_drhd_unit *drhd;
924 struct intel_iommu *iommu = NULL;
925
926 /*
927 * Disable Interrupt-remapping for all the DRHD's now.
928 */
929 for_each_iommu(iommu, drhd) {
930 if (!ecap_ir_support(iommu->ecap))
931 continue;
932
933 iommu_disable_intr_remapping(iommu);
934 }
935 }
936
937 int reenable_intr_remapping(int eim)
938 {
939 struct dmar_drhd_unit *drhd;
940 int setup = 0;
941 struct intel_iommu *iommu = NULL;
942
943 for_each_iommu(iommu, drhd)
944 if (iommu->qi)
945 dmar_reenable_qi(iommu);
946
947 /*
948 * Setup Interrupt-remapping for all the DRHD's now.
949 */
950 for_each_iommu(iommu, drhd) {
951 if (!ecap_ir_support(iommu->ecap))
952 continue;
953
954 /* Set up interrupt remapping for iommu.*/
955 iommu_set_intr_remapping(iommu, eim);
956 setup = 1;
957 }
958
959 if (!setup)
960 goto error;
961
962 return 0;
963
964 error:
965 /*
966 * handle error condition gracefully here!
967 */
968 return -1;
969 }
970