1 #include <linux/interrupt.h>
2 #include <linux/dmar.h>
3 #include <linux/spinlock.h>
4 #include <linux/slab.h>
5 #include <linux/jiffies.h>
6 #include <linux/hpet.h>
9 #include <asm/io_apic.h>
12 #include <linux/intel-iommu.h>
13 #include "intr_remapping.h"
14 #include <acpi/acpi.h>
15 #include <asm/pci-direct.h>
18 static struct ioapic_scope ir_ioapic
[MAX_IO_APICS
];
19 static struct hpet_scope ir_hpet
[MAX_HPET_TBS
];
20 static int ir_ioapic_num
, ir_hpet_num
;
21 int intr_remapping_enabled
;
23 static int disable_intremap
;
24 static int disable_sourceid_checking
;
26 static __init
int setup_nointremap(char *str
)
31 early_param("nointremap", setup_nointremap
);
33 static __init
int setup_intremap(char *str
)
38 if (!strncmp(str
, "on", 2))
40 else if (!strncmp(str
, "off", 3))
42 else if (!strncmp(str
, "nosid", 5))
43 disable_sourceid_checking
= 1;
47 early_param("intremap", setup_intremap
);
50 struct intel_iommu
*iommu
;
56 #ifdef CONFIG_GENERIC_HARDIRQS
57 static struct irq_2_iommu
*irq_2_iommu(unsigned int irq
)
59 return get_irq_iommu(irq
);
62 static struct irq_2_iommu
*irq_2_iommu_alloc(unsigned int irq
)
64 struct irq_data
*data
= irq_get_irq_data(irq
);
66 if (WARN_ONCE(data
->irq_2_iommu
,
67 KERN_DEBUG
"irq_2_iommu!=NULL irq %u\n", irq
))
68 return data
->irq_2_iommu
;
70 data
->irq_2_iommu
= kzalloc_node(sizeof(*data
->irq_2_iommu
),
71 GFP_ATOMIC
, data
->node
);
72 return data
->irq_2_iommu
;
75 static void irq_2_iommu_free(unsigned int irq
)
77 struct irq_data
*d
= irq_get_irq_data(irq
);
78 struct irq_2_iommu
*p
= d
->irq_2_iommu
;
80 d
->irq_2_iommu
= NULL
;
84 #else /* !CONFIG_SPARSE_IRQ */
86 static struct irq_2_iommu irq_2_iommuX
[NR_IRQS
];
88 static struct irq_2_iommu
*irq_2_iommu(unsigned int irq
)
91 return &irq_2_iommuX
[irq
];
95 static struct irq_2_iommu
*irq_2_iommu_alloc(unsigned int irq
)
97 return irq_2_iommu(irq
);
100 static void irq_2_iommu_free(unsigned int irq
) { }
104 static DEFINE_SPINLOCK(irq_2_ir_lock
);
106 static struct irq_2_iommu
*valid_irq_2_iommu(unsigned int irq
)
108 struct irq_2_iommu
*irq_iommu
;
110 irq_iommu
= irq_2_iommu(irq
);
115 if (!irq_iommu
->iommu
)
121 int irq_remapped(int irq
)
123 return valid_irq_2_iommu(irq
) != NULL
;
126 int get_irte(int irq
, struct irte
*entry
)
129 struct irq_2_iommu
*irq_iommu
;
135 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
136 irq_iommu
= valid_irq_2_iommu(irq
);
138 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
142 index
= irq_iommu
->irte_index
+ irq_iommu
->sub_handle
;
143 *entry
= *(irq_iommu
->iommu
->ir_table
->base
+ index
);
145 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
149 int alloc_irte(struct intel_iommu
*iommu
, int irq
, u16 count
)
151 struct ir_table
*table
= iommu
->ir_table
;
152 struct irq_2_iommu
*irq_iommu
;
153 u16 index
, start_index
;
154 unsigned int mask
= 0;
161 #ifndef CONFIG_SPARSE_IRQ
162 /* protect irq_2_iommu_alloc later */
168 * start the IRTE search from index 0.
170 index
= start_index
= 0;
173 count
= __roundup_pow_of_two(count
);
177 if (mask
> ecap_max_handle_mask(iommu
->ecap
)) {
179 "Requested mask %x exceeds the max invalidation handle"
180 " mask value %Lx\n", mask
,
181 ecap_max_handle_mask(iommu
->ecap
));
185 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
187 for (i
= index
; i
< index
+ count
; i
++)
188 if (table
->base
[i
].present
)
190 /* empty index found */
191 if (i
== index
+ count
)
194 index
= (index
+ count
) % INTR_REMAP_TABLE_ENTRIES
;
196 if (index
== start_index
) {
197 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
198 printk(KERN_ERR
"can't allocate an IRTE\n");
203 for (i
= index
; i
< index
+ count
; i
++)
204 table
->base
[i
].present
= 1;
206 irq_iommu
= irq_2_iommu_alloc(irq
);
208 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
209 printk(KERN_ERR
"can't allocate irq_2_iommu\n");
213 irq_iommu
->iommu
= iommu
;
214 irq_iommu
->irte_index
= index
;
215 irq_iommu
->sub_handle
= 0;
216 irq_iommu
->irte_mask
= mask
;
218 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
223 static int qi_flush_iec(struct intel_iommu
*iommu
, int index
, int mask
)
227 desc
.low
= QI_IEC_IIDEX(index
) | QI_IEC_TYPE
| QI_IEC_IM(mask
)
231 return qi_submit_sync(&desc
, iommu
);
234 int map_irq_to_irte_handle(int irq
, u16
*sub_handle
)
237 struct irq_2_iommu
*irq_iommu
;
240 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
241 irq_iommu
= valid_irq_2_iommu(irq
);
243 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
247 *sub_handle
= irq_iommu
->sub_handle
;
248 index
= irq_iommu
->irte_index
;
249 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
253 int set_irte_irq(int irq
, struct intel_iommu
*iommu
, u16 index
, u16 subhandle
)
255 struct irq_2_iommu
*irq_iommu
;
258 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
260 irq_iommu
= irq_2_iommu_alloc(irq
);
263 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
264 printk(KERN_ERR
"can't allocate irq_2_iommu\n");
268 irq_iommu
->iommu
= iommu
;
269 irq_iommu
->irte_index
= index
;
270 irq_iommu
->sub_handle
= subhandle
;
271 irq_iommu
->irte_mask
= 0;
273 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
278 int clear_irte_irq(int irq
, struct intel_iommu
*iommu
, u16 index
)
280 struct irq_2_iommu
*irq_iommu
;
283 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
284 irq_iommu
= valid_irq_2_iommu(irq
);
286 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
290 irq_iommu
->iommu
= NULL
;
291 irq_iommu
->irte_index
= 0;
292 irq_iommu
->sub_handle
= 0;
293 irq_2_iommu(irq
)->irte_mask
= 0;
295 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
300 int modify_irte(int irq
, struct irte
*irte_modified
)
305 struct intel_iommu
*iommu
;
306 struct irq_2_iommu
*irq_iommu
;
309 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
310 irq_iommu
= valid_irq_2_iommu(irq
);
312 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
316 iommu
= irq_iommu
->iommu
;
318 index
= irq_iommu
->irte_index
+ irq_iommu
->sub_handle
;
319 irte
= &iommu
->ir_table
->base
[index
];
321 set_64bit(&irte
->low
, irte_modified
->low
);
322 set_64bit(&irte
->high
, irte_modified
->high
);
323 __iommu_flush_cache(iommu
, irte
, sizeof(*irte
));
325 rc
= qi_flush_iec(iommu
, index
, 0);
326 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
331 int flush_irte(int irq
)
335 struct intel_iommu
*iommu
;
336 struct irq_2_iommu
*irq_iommu
;
339 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
340 irq_iommu
= valid_irq_2_iommu(irq
);
342 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
346 iommu
= irq_iommu
->iommu
;
348 index
= irq_iommu
->irte_index
+ irq_iommu
->sub_handle
;
350 rc
= qi_flush_iec(iommu
, index
, irq_iommu
->irte_mask
);
351 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
356 struct intel_iommu
*map_hpet_to_ir(u8 hpet_id
)
360 for (i
= 0; i
< MAX_HPET_TBS
; i
++)
361 if (ir_hpet
[i
].id
== hpet_id
)
362 return ir_hpet
[i
].iommu
;
366 struct intel_iommu
*map_ioapic_to_ir(int apic
)
370 for (i
= 0; i
< MAX_IO_APICS
; i
++)
371 if (ir_ioapic
[i
].id
== apic
)
372 return ir_ioapic
[i
].iommu
;
376 struct intel_iommu
*map_dev_to_ir(struct pci_dev
*dev
)
378 struct dmar_drhd_unit
*drhd
;
380 drhd
= dmar_find_matched_drhd_unit(dev
);
387 static int clear_entries(struct irq_2_iommu
*irq_iommu
)
389 struct irte
*start
, *entry
, *end
;
390 struct intel_iommu
*iommu
;
393 if (irq_iommu
->sub_handle
)
396 iommu
= irq_iommu
->iommu
;
397 index
= irq_iommu
->irte_index
+ irq_iommu
->sub_handle
;
399 start
= iommu
->ir_table
->base
+ index
;
400 end
= start
+ (1 << irq_iommu
->irte_mask
);
402 for (entry
= start
; entry
< end
; entry
++) {
403 set_64bit(&entry
->low
, 0);
404 set_64bit(&entry
->high
, 0);
407 return qi_flush_iec(iommu
, index
, irq_iommu
->irte_mask
);
410 int free_irte(int irq
)
413 struct irq_2_iommu
*irq_iommu
;
416 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
417 irq_iommu
= valid_irq_2_iommu(irq
);
419 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
423 rc
= clear_entries(irq_iommu
);
425 irq_iommu
->iommu
= NULL
;
426 irq_iommu
->irte_index
= 0;
427 irq_iommu
->sub_handle
= 0;
428 irq_iommu
->irte_mask
= 0;
430 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
432 irq_2_iommu_free(irq
);
438 * source validation type
440 #define SVT_NO_VERIFY 0x0 /* no verification is required */
441 #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fiels */
442 #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
445 * source-id qualifier
447 #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
448 #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
449 * the third least significant bit
451 #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
452 * the second and third least significant bits
454 #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
455 * the least three significant bits
459 * set SVT, SQ and SID fields of irte to verify
460 * source ids of interrupt requests
462 static void set_irte_sid(struct irte
*irte
, unsigned int svt
,
463 unsigned int sq
, unsigned int sid
)
465 if (disable_sourceid_checking
)
472 int set_ioapic_sid(struct irte
*irte
, int apic
)
480 for (i
= 0; i
< MAX_IO_APICS
; i
++) {
481 if (ir_ioapic
[i
].id
== apic
) {
482 sid
= (ir_ioapic
[i
].bus
<< 8) | ir_ioapic
[i
].devfn
;
488 pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic
);
492 set_irte_sid(irte
, 1, 0, sid
);
497 int set_hpet_sid(struct irte
*irte
, u8 id
)
505 for (i
= 0; i
< MAX_HPET_TBS
; i
++) {
506 if (ir_hpet
[i
].id
== id
) {
507 sid
= (ir_hpet
[i
].bus
<< 8) | ir_hpet
[i
].devfn
;
513 pr_warning("Failed to set source-id of HPET block (%d)\n", id
);
518 * Should really use SQ_ALL_16. Some platforms are broken.
519 * While we figure out the right quirks for these broken platforms, use
520 * SQ_13_IGNORE_3 for now.
522 set_irte_sid(irte
, SVT_VERIFY_SID_SQ
, SQ_13_IGNORE_3
, sid
);
527 int set_msi_sid(struct irte
*irte
, struct pci_dev
*dev
)
529 struct pci_dev
*bridge
;
534 /* PCIe device or Root Complex integrated PCI device */
535 if (pci_is_pcie(dev
) || !dev
->bus
->parent
) {
536 set_irte_sid(irte
, SVT_VERIFY_SID_SQ
, SQ_ALL_16
,
537 (dev
->bus
->number
<< 8) | dev
->devfn
);
541 bridge
= pci_find_upstream_pcie_bridge(dev
);
543 if (pci_is_pcie(bridge
))/* this is a PCIe-to-PCI/PCIX bridge */
544 set_irte_sid(irte
, SVT_VERIFY_BUS
, SQ_ALL_16
,
545 (bridge
->bus
->number
<< 8) | dev
->bus
->number
);
546 else /* this is a legacy PCI bridge */
547 set_irte_sid(irte
, SVT_VERIFY_SID_SQ
, SQ_ALL_16
,
548 (bridge
->bus
->number
<< 8) | bridge
->devfn
);
554 static void iommu_set_intr_remapping(struct intel_iommu
*iommu
, int mode
)
560 addr
= virt_to_phys((void *)iommu
->ir_table
->base
);
562 spin_lock_irqsave(&iommu
->register_lock
, flags
);
564 dmar_writeq(iommu
->reg
+ DMAR_IRTA_REG
,
565 (addr
) | IR_X2APIC_MODE(mode
) | INTR_REMAP_TABLE_REG_SIZE
);
567 /* Set interrupt-remapping table pointer */
568 iommu
->gcmd
|= DMA_GCMD_SIRTP
;
569 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
571 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
572 readl
, (sts
& DMA_GSTS_IRTPS
), sts
);
573 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
576 * global invalidation of interrupt entry cache before enabling
577 * interrupt-remapping.
579 qi_global_iec(iommu
);
581 spin_lock_irqsave(&iommu
->register_lock
, flags
);
583 /* Enable interrupt-remapping */
584 iommu
->gcmd
|= DMA_GCMD_IRE
;
585 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
587 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
588 readl
, (sts
& DMA_GSTS_IRES
), sts
);
590 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
594 static int setup_intr_remapping(struct intel_iommu
*iommu
, int mode
)
596 struct ir_table
*ir_table
;
599 ir_table
= iommu
->ir_table
= kzalloc(sizeof(struct ir_table
),
602 if (!iommu
->ir_table
)
605 pages
= alloc_pages_node(iommu
->node
, GFP_ATOMIC
| __GFP_ZERO
,
606 INTR_REMAP_PAGE_ORDER
);
609 printk(KERN_ERR
"failed to allocate pages of order %d\n",
610 INTR_REMAP_PAGE_ORDER
);
611 kfree(iommu
->ir_table
);
615 ir_table
->base
= page_address(pages
);
617 iommu_set_intr_remapping(iommu
, mode
);
622 * Disable Interrupt Remapping.
624 static void iommu_disable_intr_remapping(struct intel_iommu
*iommu
)
629 if (!ecap_ir_support(iommu
->ecap
))
633 * global invalidation of interrupt entry cache before disabling
634 * interrupt-remapping.
636 qi_global_iec(iommu
);
638 spin_lock_irqsave(&iommu
->register_lock
, flags
);
640 sts
= dmar_readq(iommu
->reg
+ DMAR_GSTS_REG
);
641 if (!(sts
& DMA_GSTS_IRES
))
644 iommu
->gcmd
&= ~DMA_GCMD_IRE
;
645 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
647 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
648 readl
, !(sts
& DMA_GSTS_IRES
), sts
);
651 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
654 int __init
intr_remapping_supported(void)
656 struct dmar_drhd_unit
*drhd
;
658 if (disable_intremap
)
661 if (!dmar_ir_support())
664 for_each_drhd_unit(drhd
) {
665 struct intel_iommu
*iommu
= drhd
->iommu
;
667 if (!ecap_ir_support(iommu
->ecap
))
674 int __init
enable_intr_remapping(int eim
)
676 struct dmar_drhd_unit
*drhd
;
679 if (parse_ioapics_under_ir() != 1) {
680 printk(KERN_INFO
"Not enable interrupt remapping\n");
684 for_each_drhd_unit(drhd
) {
685 struct intel_iommu
*iommu
= drhd
->iommu
;
688 * If the queued invalidation is already initialized,
689 * shouldn't disable it.
695 * Clear previous faults.
697 dmar_fault(-1, iommu
);
700 * Disable intr remapping and queued invalidation, if already
701 * enabled prior to OS handover.
703 iommu_disable_intr_remapping(iommu
);
705 dmar_disable_qi(iommu
);
709 * check for the Interrupt-remapping support
711 for_each_drhd_unit(drhd
) {
712 struct intel_iommu
*iommu
= drhd
->iommu
;
714 if (!ecap_ir_support(iommu
->ecap
))
717 if (eim
&& !ecap_eim_support(iommu
->ecap
)) {
718 printk(KERN_INFO
"DRHD %Lx: EIM not supported by DRHD, "
719 " ecap %Lx\n", drhd
->reg_base_addr
, iommu
->ecap
);
725 * Enable queued invalidation for all the DRHD's.
727 for_each_drhd_unit(drhd
) {
729 struct intel_iommu
*iommu
= drhd
->iommu
;
730 ret
= dmar_enable_qi(iommu
);
733 printk(KERN_ERR
"DRHD %Lx: failed to enable queued, "
734 " invalidation, ecap %Lx, ret %d\n",
735 drhd
->reg_base_addr
, iommu
->ecap
, ret
);
741 * Setup Interrupt-remapping for all the DRHD's now.
743 for_each_drhd_unit(drhd
) {
744 struct intel_iommu
*iommu
= drhd
->iommu
;
746 if (!ecap_ir_support(iommu
->ecap
))
749 if (setup_intr_remapping(iommu
, eim
))
758 intr_remapping_enabled
= 1;
764 * handle error condition gracefully here!
769 static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope
*scope
,
770 struct intel_iommu
*iommu
)
772 struct acpi_dmar_pci_path
*path
;
777 path
= (struct acpi_dmar_pci_path
*)(scope
+ 1);
778 count
= (scope
->length
- sizeof(struct acpi_dmar_device_scope
))
779 / sizeof(struct acpi_dmar_pci_path
);
781 while (--count
> 0) {
783 * Access PCI directly due to the PCI
784 * subsystem isn't initialized yet.
786 bus
= read_pci_config_byte(bus
, path
->dev
, path
->fn
,
790 ir_hpet
[ir_hpet_num
].bus
= bus
;
791 ir_hpet
[ir_hpet_num
].devfn
= PCI_DEVFN(path
->dev
, path
->fn
);
792 ir_hpet
[ir_hpet_num
].iommu
= iommu
;
793 ir_hpet
[ir_hpet_num
].id
= scope
->enumeration_id
;
797 static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope
*scope
,
798 struct intel_iommu
*iommu
)
800 struct acpi_dmar_pci_path
*path
;
805 path
= (struct acpi_dmar_pci_path
*)(scope
+ 1);
806 count
= (scope
->length
- sizeof(struct acpi_dmar_device_scope
))
807 / sizeof(struct acpi_dmar_pci_path
);
809 while (--count
> 0) {
811 * Access PCI directly due to the PCI
812 * subsystem isn't initialized yet.
814 bus
= read_pci_config_byte(bus
, path
->dev
, path
->fn
,
819 ir_ioapic
[ir_ioapic_num
].bus
= bus
;
820 ir_ioapic
[ir_ioapic_num
].devfn
= PCI_DEVFN(path
->dev
, path
->fn
);
821 ir_ioapic
[ir_ioapic_num
].iommu
= iommu
;
822 ir_ioapic
[ir_ioapic_num
].id
= scope
->enumeration_id
;
826 static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header
*header
,
827 struct intel_iommu
*iommu
)
829 struct acpi_dmar_hardware_unit
*drhd
;
830 struct acpi_dmar_device_scope
*scope
;
833 drhd
= (struct acpi_dmar_hardware_unit
*)header
;
835 start
= (void *)(drhd
+ 1);
836 end
= ((void *)drhd
) + header
->length
;
838 while (start
< end
) {
840 if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_IOAPIC
) {
841 if (ir_ioapic_num
== MAX_IO_APICS
) {
842 printk(KERN_WARNING
"Exceeded Max IO APICS\n");
846 printk(KERN_INFO
"IOAPIC id %d under DRHD base "
847 " 0x%Lx IOMMU %d\n", scope
->enumeration_id
,
848 drhd
->address
, iommu
->seq_id
);
850 ir_parse_one_ioapic_scope(scope
, iommu
);
851 } else if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_HPET
) {
852 if (ir_hpet_num
== MAX_HPET_TBS
) {
853 printk(KERN_WARNING
"Exceeded Max HPET blocks\n");
857 printk(KERN_INFO
"HPET id %d under DRHD base"
858 " 0x%Lx\n", scope
->enumeration_id
,
861 ir_parse_one_hpet_scope(scope
, iommu
);
863 start
+= scope
->length
;
870 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
873 int __init
parse_ioapics_under_ir(void)
875 struct dmar_drhd_unit
*drhd
;
876 int ir_supported
= 0;
878 for_each_drhd_unit(drhd
) {
879 struct intel_iommu
*iommu
= drhd
->iommu
;
881 if (ecap_ir_support(iommu
->ecap
)) {
882 if (ir_parse_ioapic_hpet_scope(drhd
->hdr
, iommu
))
889 if (ir_supported
&& ir_ioapic_num
!= nr_ioapics
) {
891 "Not all IO-APIC's listed under remapping hardware\n");
898 void disable_intr_remapping(void)
900 struct dmar_drhd_unit
*drhd
;
901 struct intel_iommu
*iommu
= NULL
;
904 * Disable Interrupt-remapping for all the DRHD's now.
906 for_each_iommu(iommu
, drhd
) {
907 if (!ecap_ir_support(iommu
->ecap
))
910 iommu_disable_intr_remapping(iommu
);
914 int reenable_intr_remapping(int eim
)
916 struct dmar_drhd_unit
*drhd
;
918 struct intel_iommu
*iommu
= NULL
;
920 for_each_iommu(iommu
, drhd
)
922 dmar_reenable_qi(iommu
);
925 * Setup Interrupt-remapping for all the DRHD's now.
927 for_each_iommu(iommu
, drhd
) {
928 if (!ecap_ir_support(iommu
->ecap
))
931 /* Set up interrupt remapping for iommu.*/
932 iommu_set_intr_remapping(iommu
, eim
);
943 * handle error condition gracefully here!