1 #include <linux/interrupt.h>
2 #include <linux/dmar.h>
3 #include <linux/spinlock.h>
4 #include <linux/slab.h>
5 #include <linux/jiffies.h>
6 #include <linux/hpet.h>
9 #include <asm/io_apic.h>
12 #include <linux/intel-iommu.h>
13 #include "intr_remapping.h"
14 #include <acpi/acpi.h>
15 #include <asm/pci-direct.h>
18 static struct ioapic_scope ir_ioapic
[MAX_IO_APICS
];
19 static struct hpet_scope ir_hpet
[MAX_HPET_TBS
];
20 static int ir_ioapic_num
, ir_hpet_num
;
21 int intr_remapping_enabled
;
23 static int disable_intremap
;
24 static int disable_sourceid_checking
;
26 static __init
int setup_nointremap(char *str
)
31 early_param("nointremap", setup_nointremap
);
33 static __init
int setup_intremap(char *str
)
38 if (!strncmp(str
, "on", 2))
40 else if (!strncmp(str
, "off", 3))
42 else if (!strncmp(str
, "nosid", 5))
43 disable_sourceid_checking
= 1;
47 early_param("intremap", setup_intremap
);
50 struct intel_iommu
*iommu
;
56 #ifdef CONFIG_GENERIC_HARDIRQS
57 static struct irq_2_iommu
*get_one_free_irq_2_iommu(int node
)
59 struct irq_2_iommu
*iommu
;
61 iommu
= kzalloc_node(sizeof(*iommu
), GFP_ATOMIC
, node
);
62 printk(KERN_DEBUG
"alloc irq_2_iommu on node %d\n", node
);
67 static struct irq_2_iommu
*irq_2_iommu(unsigned int irq
)
69 struct irq_desc
*desc
;
71 desc
= irq_to_desc(irq
);
73 if (WARN_ON_ONCE(!desc
))
76 return desc
->irq_2_iommu
;
79 static struct irq_2_iommu
*irq_2_iommu_alloc(unsigned int irq
)
81 struct irq_desc
*desc
;
82 struct irq_2_iommu
*irq_iommu
;
84 desc
= irq_to_desc(irq
);
86 printk(KERN_INFO
"can not get irq_desc for %d\n", irq
);
90 irq_iommu
= desc
->irq_2_iommu
;
93 desc
->irq_2_iommu
= get_one_free_irq_2_iommu(irq_node(irq
));
95 return desc
->irq_2_iommu
;
98 static void irq_2_iommu_free(unsigned int irq
)
100 struct irq_data
*d
= irq_get_irq_data(irq
);
101 struct irq_2_iommu
*p
= d
->irq_2_iommu
;
103 d
->irq_2_iommu
= NULL
;
107 #else /* !CONFIG_SPARSE_IRQ */
109 static struct irq_2_iommu irq_2_iommuX
[NR_IRQS
];
111 static struct irq_2_iommu
*irq_2_iommu(unsigned int irq
)
114 return &irq_2_iommuX
[irq
];
118 static struct irq_2_iommu
*irq_2_iommu_alloc(unsigned int irq
)
120 return irq_2_iommu(irq
);
123 static void irq_2_iommu_free(unsigned int irq
) { }
127 static DEFINE_SPINLOCK(irq_2_ir_lock
);
129 static struct irq_2_iommu
*valid_irq_2_iommu(unsigned int irq
)
131 struct irq_2_iommu
*irq_iommu
;
133 irq_iommu
= irq_2_iommu(irq
);
138 if (!irq_iommu
->iommu
)
144 int irq_remapped(int irq
)
146 return valid_irq_2_iommu(irq
) != NULL
;
149 int get_irte(int irq
, struct irte
*entry
)
152 struct irq_2_iommu
*irq_iommu
;
158 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
159 irq_iommu
= valid_irq_2_iommu(irq
);
161 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
165 index
= irq_iommu
->irte_index
+ irq_iommu
->sub_handle
;
166 *entry
= *(irq_iommu
->iommu
->ir_table
->base
+ index
);
168 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
172 int alloc_irte(struct intel_iommu
*iommu
, int irq
, u16 count
)
174 struct ir_table
*table
= iommu
->ir_table
;
175 struct irq_2_iommu
*irq_iommu
;
176 u16 index
, start_index
;
177 unsigned int mask
= 0;
184 #ifndef CONFIG_SPARSE_IRQ
185 /* protect irq_2_iommu_alloc later */
191 * start the IRTE search from index 0.
193 index
= start_index
= 0;
196 count
= __roundup_pow_of_two(count
);
200 if (mask
> ecap_max_handle_mask(iommu
->ecap
)) {
202 "Requested mask %x exceeds the max invalidation handle"
203 " mask value %Lx\n", mask
,
204 ecap_max_handle_mask(iommu
->ecap
));
208 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
210 for (i
= index
; i
< index
+ count
; i
++)
211 if (table
->base
[i
].present
)
213 /* empty index found */
214 if (i
== index
+ count
)
217 index
= (index
+ count
) % INTR_REMAP_TABLE_ENTRIES
;
219 if (index
== start_index
) {
220 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
221 printk(KERN_ERR
"can't allocate an IRTE\n");
226 for (i
= index
; i
< index
+ count
; i
++)
227 table
->base
[i
].present
= 1;
229 irq_iommu
= irq_2_iommu_alloc(irq
);
231 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
232 printk(KERN_ERR
"can't allocate irq_2_iommu\n");
236 irq_iommu
->iommu
= iommu
;
237 irq_iommu
->irte_index
= index
;
238 irq_iommu
->sub_handle
= 0;
239 irq_iommu
->irte_mask
= mask
;
241 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
246 static int qi_flush_iec(struct intel_iommu
*iommu
, int index
, int mask
)
250 desc
.low
= QI_IEC_IIDEX(index
) | QI_IEC_TYPE
| QI_IEC_IM(mask
)
254 return qi_submit_sync(&desc
, iommu
);
257 int map_irq_to_irte_handle(int irq
, u16
*sub_handle
)
260 struct irq_2_iommu
*irq_iommu
;
263 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
264 irq_iommu
= valid_irq_2_iommu(irq
);
266 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
270 *sub_handle
= irq_iommu
->sub_handle
;
271 index
= irq_iommu
->irte_index
;
272 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
276 int set_irte_irq(int irq
, struct intel_iommu
*iommu
, u16 index
, u16 subhandle
)
278 struct irq_2_iommu
*irq_iommu
;
281 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
283 irq_iommu
= irq_2_iommu_alloc(irq
);
286 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
287 printk(KERN_ERR
"can't allocate irq_2_iommu\n");
291 irq_iommu
->iommu
= iommu
;
292 irq_iommu
->irte_index
= index
;
293 irq_iommu
->sub_handle
= subhandle
;
294 irq_iommu
->irte_mask
= 0;
296 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
301 int clear_irte_irq(int irq
, struct intel_iommu
*iommu
, u16 index
)
303 struct irq_2_iommu
*irq_iommu
;
306 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
307 irq_iommu
= valid_irq_2_iommu(irq
);
309 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
313 irq_iommu
->iommu
= NULL
;
314 irq_iommu
->irte_index
= 0;
315 irq_iommu
->sub_handle
= 0;
316 irq_2_iommu(irq
)->irte_mask
= 0;
318 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
323 int modify_irte(int irq
, struct irte
*irte_modified
)
328 struct intel_iommu
*iommu
;
329 struct irq_2_iommu
*irq_iommu
;
332 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
333 irq_iommu
= valid_irq_2_iommu(irq
);
335 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
339 iommu
= irq_iommu
->iommu
;
341 index
= irq_iommu
->irte_index
+ irq_iommu
->sub_handle
;
342 irte
= &iommu
->ir_table
->base
[index
];
344 set_64bit(&irte
->low
, irte_modified
->low
);
345 set_64bit(&irte
->high
, irte_modified
->high
);
346 __iommu_flush_cache(iommu
, irte
, sizeof(*irte
));
348 rc
= qi_flush_iec(iommu
, index
, 0);
349 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
354 int flush_irte(int irq
)
358 struct intel_iommu
*iommu
;
359 struct irq_2_iommu
*irq_iommu
;
362 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
363 irq_iommu
= valid_irq_2_iommu(irq
);
365 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
369 iommu
= irq_iommu
->iommu
;
371 index
= irq_iommu
->irte_index
+ irq_iommu
->sub_handle
;
373 rc
= qi_flush_iec(iommu
, index
, irq_iommu
->irte_mask
);
374 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
379 struct intel_iommu
*map_hpet_to_ir(u8 hpet_id
)
383 for (i
= 0; i
< MAX_HPET_TBS
; i
++)
384 if (ir_hpet
[i
].id
== hpet_id
)
385 return ir_hpet
[i
].iommu
;
389 struct intel_iommu
*map_ioapic_to_ir(int apic
)
393 for (i
= 0; i
< MAX_IO_APICS
; i
++)
394 if (ir_ioapic
[i
].id
== apic
)
395 return ir_ioapic
[i
].iommu
;
399 struct intel_iommu
*map_dev_to_ir(struct pci_dev
*dev
)
401 struct dmar_drhd_unit
*drhd
;
403 drhd
= dmar_find_matched_drhd_unit(dev
);
410 static int clear_entries(struct irq_2_iommu
*irq_iommu
)
412 struct irte
*start
, *entry
, *end
;
413 struct intel_iommu
*iommu
;
416 if (irq_iommu
->sub_handle
)
419 iommu
= irq_iommu
->iommu
;
420 index
= irq_iommu
->irte_index
+ irq_iommu
->sub_handle
;
422 start
= iommu
->ir_table
->base
+ index
;
423 end
= start
+ (1 << irq_iommu
->irte_mask
);
425 for (entry
= start
; entry
< end
; entry
++) {
426 set_64bit(&entry
->low
, 0);
427 set_64bit(&entry
->high
, 0);
430 return qi_flush_iec(iommu
, index
, irq_iommu
->irte_mask
);
433 int free_irte(int irq
)
436 struct irq_2_iommu
*irq_iommu
;
439 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
440 irq_iommu
= valid_irq_2_iommu(irq
);
442 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
446 rc
= clear_entries(irq_iommu
);
448 irq_iommu
->iommu
= NULL
;
449 irq_iommu
->irte_index
= 0;
450 irq_iommu
->sub_handle
= 0;
451 irq_iommu
->irte_mask
= 0;
453 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
455 irq_2_iommu_free(irq
);
461 * source validation type
463 #define SVT_NO_VERIFY 0x0 /* no verification is required */
464 #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fiels */
465 #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
468 * source-id qualifier
470 #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
471 #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
472 * the third least significant bit
474 #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
475 * the second and third least significant bits
477 #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
478 * the least three significant bits
482 * set SVT, SQ and SID fields of irte to verify
483 * source ids of interrupt requests
485 static void set_irte_sid(struct irte
*irte
, unsigned int svt
,
486 unsigned int sq
, unsigned int sid
)
488 if (disable_sourceid_checking
)
495 int set_ioapic_sid(struct irte
*irte
, int apic
)
503 for (i
= 0; i
< MAX_IO_APICS
; i
++) {
504 if (ir_ioapic
[i
].id
== apic
) {
505 sid
= (ir_ioapic
[i
].bus
<< 8) | ir_ioapic
[i
].devfn
;
511 pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic
);
515 set_irte_sid(irte
, 1, 0, sid
);
520 int set_hpet_sid(struct irte
*irte
, u8 id
)
528 for (i
= 0; i
< MAX_HPET_TBS
; i
++) {
529 if (ir_hpet
[i
].id
== id
) {
530 sid
= (ir_hpet
[i
].bus
<< 8) | ir_hpet
[i
].devfn
;
536 pr_warning("Failed to set source-id of HPET block (%d)\n", id
);
541 * Should really use SQ_ALL_16. Some platforms are broken.
542 * While we figure out the right quirks for these broken platforms, use
543 * SQ_13_IGNORE_3 for now.
545 set_irte_sid(irte
, SVT_VERIFY_SID_SQ
, SQ_13_IGNORE_3
, sid
);
550 int set_msi_sid(struct irte
*irte
, struct pci_dev
*dev
)
552 struct pci_dev
*bridge
;
557 /* PCIe device or Root Complex integrated PCI device */
558 if (pci_is_pcie(dev
) || !dev
->bus
->parent
) {
559 set_irte_sid(irte
, SVT_VERIFY_SID_SQ
, SQ_ALL_16
,
560 (dev
->bus
->number
<< 8) | dev
->devfn
);
564 bridge
= pci_find_upstream_pcie_bridge(dev
);
566 if (pci_is_pcie(bridge
))/* this is a PCIe-to-PCI/PCIX bridge */
567 set_irte_sid(irte
, SVT_VERIFY_BUS
, SQ_ALL_16
,
568 (bridge
->bus
->number
<< 8) | dev
->bus
->number
);
569 else /* this is a legacy PCI bridge */
570 set_irte_sid(irte
, SVT_VERIFY_SID_SQ
, SQ_ALL_16
,
571 (bridge
->bus
->number
<< 8) | bridge
->devfn
);
577 static void iommu_set_intr_remapping(struct intel_iommu
*iommu
, int mode
)
583 addr
= virt_to_phys((void *)iommu
->ir_table
->base
);
585 spin_lock_irqsave(&iommu
->register_lock
, flags
);
587 dmar_writeq(iommu
->reg
+ DMAR_IRTA_REG
,
588 (addr
) | IR_X2APIC_MODE(mode
) | INTR_REMAP_TABLE_REG_SIZE
);
590 /* Set interrupt-remapping table pointer */
591 iommu
->gcmd
|= DMA_GCMD_SIRTP
;
592 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
594 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
595 readl
, (sts
& DMA_GSTS_IRTPS
), sts
);
596 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
599 * global invalidation of interrupt entry cache before enabling
600 * interrupt-remapping.
602 qi_global_iec(iommu
);
604 spin_lock_irqsave(&iommu
->register_lock
, flags
);
606 /* Enable interrupt-remapping */
607 iommu
->gcmd
|= DMA_GCMD_IRE
;
608 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
610 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
611 readl
, (sts
& DMA_GSTS_IRES
), sts
);
613 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
617 static int setup_intr_remapping(struct intel_iommu
*iommu
, int mode
)
619 struct ir_table
*ir_table
;
622 ir_table
= iommu
->ir_table
= kzalloc(sizeof(struct ir_table
),
625 if (!iommu
->ir_table
)
628 pages
= alloc_pages_node(iommu
->node
, GFP_ATOMIC
| __GFP_ZERO
,
629 INTR_REMAP_PAGE_ORDER
);
632 printk(KERN_ERR
"failed to allocate pages of order %d\n",
633 INTR_REMAP_PAGE_ORDER
);
634 kfree(iommu
->ir_table
);
638 ir_table
->base
= page_address(pages
);
640 iommu_set_intr_remapping(iommu
, mode
);
645 * Disable Interrupt Remapping.
647 static void iommu_disable_intr_remapping(struct intel_iommu
*iommu
)
652 if (!ecap_ir_support(iommu
->ecap
))
656 * global invalidation of interrupt entry cache before disabling
657 * interrupt-remapping.
659 qi_global_iec(iommu
);
661 spin_lock_irqsave(&iommu
->register_lock
, flags
);
663 sts
= dmar_readq(iommu
->reg
+ DMAR_GSTS_REG
);
664 if (!(sts
& DMA_GSTS_IRES
))
667 iommu
->gcmd
&= ~DMA_GCMD_IRE
;
668 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
670 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
671 readl
, !(sts
& DMA_GSTS_IRES
), sts
);
674 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
677 int __init
intr_remapping_supported(void)
679 struct dmar_drhd_unit
*drhd
;
681 if (disable_intremap
)
684 if (!dmar_ir_support())
687 for_each_drhd_unit(drhd
) {
688 struct intel_iommu
*iommu
= drhd
->iommu
;
690 if (!ecap_ir_support(iommu
->ecap
))
697 int __init
enable_intr_remapping(int eim
)
699 struct dmar_drhd_unit
*drhd
;
702 if (parse_ioapics_under_ir() != 1) {
703 printk(KERN_INFO
"Not enable interrupt remapping\n");
707 for_each_drhd_unit(drhd
) {
708 struct intel_iommu
*iommu
= drhd
->iommu
;
711 * If the queued invalidation is already initialized,
712 * shouldn't disable it.
718 * Clear previous faults.
720 dmar_fault(-1, iommu
);
723 * Disable intr remapping and queued invalidation, if already
724 * enabled prior to OS handover.
726 iommu_disable_intr_remapping(iommu
);
728 dmar_disable_qi(iommu
);
732 * check for the Interrupt-remapping support
734 for_each_drhd_unit(drhd
) {
735 struct intel_iommu
*iommu
= drhd
->iommu
;
737 if (!ecap_ir_support(iommu
->ecap
))
740 if (eim
&& !ecap_eim_support(iommu
->ecap
)) {
741 printk(KERN_INFO
"DRHD %Lx: EIM not supported by DRHD, "
742 " ecap %Lx\n", drhd
->reg_base_addr
, iommu
->ecap
);
748 * Enable queued invalidation for all the DRHD's.
750 for_each_drhd_unit(drhd
) {
752 struct intel_iommu
*iommu
= drhd
->iommu
;
753 ret
= dmar_enable_qi(iommu
);
756 printk(KERN_ERR
"DRHD %Lx: failed to enable queued, "
757 " invalidation, ecap %Lx, ret %d\n",
758 drhd
->reg_base_addr
, iommu
->ecap
, ret
);
764 * Setup Interrupt-remapping for all the DRHD's now.
766 for_each_drhd_unit(drhd
) {
767 struct intel_iommu
*iommu
= drhd
->iommu
;
769 if (!ecap_ir_support(iommu
->ecap
))
772 if (setup_intr_remapping(iommu
, eim
))
781 intr_remapping_enabled
= 1;
787 * handle error condition gracefully here!
792 static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope
*scope
,
793 struct intel_iommu
*iommu
)
795 struct acpi_dmar_pci_path
*path
;
800 path
= (struct acpi_dmar_pci_path
*)(scope
+ 1);
801 count
= (scope
->length
- sizeof(struct acpi_dmar_device_scope
))
802 / sizeof(struct acpi_dmar_pci_path
);
804 while (--count
> 0) {
806 * Access PCI directly due to the PCI
807 * subsystem isn't initialized yet.
809 bus
= read_pci_config_byte(bus
, path
->dev
, path
->fn
,
813 ir_hpet
[ir_hpet_num
].bus
= bus
;
814 ir_hpet
[ir_hpet_num
].devfn
= PCI_DEVFN(path
->dev
, path
->fn
);
815 ir_hpet
[ir_hpet_num
].iommu
= iommu
;
816 ir_hpet
[ir_hpet_num
].id
= scope
->enumeration_id
;
820 static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope
*scope
,
821 struct intel_iommu
*iommu
)
823 struct acpi_dmar_pci_path
*path
;
828 path
= (struct acpi_dmar_pci_path
*)(scope
+ 1);
829 count
= (scope
->length
- sizeof(struct acpi_dmar_device_scope
))
830 / sizeof(struct acpi_dmar_pci_path
);
832 while (--count
> 0) {
834 * Access PCI directly due to the PCI
835 * subsystem isn't initialized yet.
837 bus
= read_pci_config_byte(bus
, path
->dev
, path
->fn
,
842 ir_ioapic
[ir_ioapic_num
].bus
= bus
;
843 ir_ioapic
[ir_ioapic_num
].devfn
= PCI_DEVFN(path
->dev
, path
->fn
);
844 ir_ioapic
[ir_ioapic_num
].iommu
= iommu
;
845 ir_ioapic
[ir_ioapic_num
].id
= scope
->enumeration_id
;
849 static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header
*header
,
850 struct intel_iommu
*iommu
)
852 struct acpi_dmar_hardware_unit
*drhd
;
853 struct acpi_dmar_device_scope
*scope
;
856 drhd
= (struct acpi_dmar_hardware_unit
*)header
;
858 start
= (void *)(drhd
+ 1);
859 end
= ((void *)drhd
) + header
->length
;
861 while (start
< end
) {
863 if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_IOAPIC
) {
864 if (ir_ioapic_num
== MAX_IO_APICS
) {
865 printk(KERN_WARNING
"Exceeded Max IO APICS\n");
869 printk(KERN_INFO
"IOAPIC id %d under DRHD base "
870 " 0x%Lx IOMMU %d\n", scope
->enumeration_id
,
871 drhd
->address
, iommu
->seq_id
);
873 ir_parse_one_ioapic_scope(scope
, iommu
);
874 } else if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_HPET
) {
875 if (ir_hpet_num
== MAX_HPET_TBS
) {
876 printk(KERN_WARNING
"Exceeded Max HPET blocks\n");
880 printk(KERN_INFO
"HPET id %d under DRHD base"
881 " 0x%Lx\n", scope
->enumeration_id
,
884 ir_parse_one_hpet_scope(scope
, iommu
);
886 start
+= scope
->length
;
893 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
896 int __init
parse_ioapics_under_ir(void)
898 struct dmar_drhd_unit
*drhd
;
899 int ir_supported
= 0;
901 for_each_drhd_unit(drhd
) {
902 struct intel_iommu
*iommu
= drhd
->iommu
;
904 if (ecap_ir_support(iommu
->ecap
)) {
905 if (ir_parse_ioapic_hpet_scope(drhd
->hdr
, iommu
))
912 if (ir_supported
&& ir_ioapic_num
!= nr_ioapics
) {
914 "Not all IO-APIC's listed under remapping hardware\n");
921 void disable_intr_remapping(void)
923 struct dmar_drhd_unit
*drhd
;
924 struct intel_iommu
*iommu
= NULL
;
927 * Disable Interrupt-remapping for all the DRHD's now.
929 for_each_iommu(iommu
, drhd
) {
930 if (!ecap_ir_support(iommu
->ecap
))
933 iommu_disable_intr_remapping(iommu
);
937 int reenable_intr_remapping(int eim
)
939 struct dmar_drhd_unit
*drhd
;
941 struct intel_iommu
*iommu
= NULL
;
943 for_each_iommu(iommu
, drhd
)
945 dmar_reenable_qi(iommu
);
948 * Setup Interrupt-remapping for all the DRHD's now.
950 for_each_iommu(iommu
, drhd
) {
951 if (!ecap_ir_support(iommu
->ecap
))
954 /* Set up interrupt remapping for iommu.*/
955 iommu_set_intr_remapping(iommu
, eim
);
966 * handle error condition gracefully here!