1 #include <linux/interrupt.h>
2 #include <linux/dmar.h>
3 #include <linux/spinlock.h>
4 #include <linux/slab.h>
5 #include <linux/jiffies.h>
6 #include <linux/hpet.h>
9 #include <asm/io_apic.h>
12 #include <linux/intel-iommu.h>
13 #include "intr_remapping.h"
14 #include <acpi/acpi.h>
15 #include <asm/pci-direct.h>
18 static struct ioapic_scope ir_ioapic
[MAX_IO_APICS
];
19 static struct hpet_scope ir_hpet
[MAX_HPET_TBS
];
20 static int ir_ioapic_num
, ir_hpet_num
;
21 int intr_remapping_enabled
;
23 static int disable_intremap
;
24 static int disable_sourceid_checking
;
26 static __init
int setup_nointremap(char *str
)
31 early_param("nointremap", setup_nointremap
);
33 static __init
int setup_intremap(char *str
)
38 if (!strncmp(str
, "on", 2))
40 else if (!strncmp(str
, "off", 3))
42 else if (!strncmp(str
, "nosid", 5))
43 disable_sourceid_checking
= 1;
47 early_param("intremap", setup_intremap
);
49 #ifdef CONFIG_GENERIC_HARDIRQS
50 static struct irq_2_iommu
*irq_2_iommu(unsigned int irq
)
52 return get_irq_iommu(irq
);
55 static struct irq_2_iommu
*irq_2_iommu_alloc(unsigned int irq
)
57 struct irq_data
*data
= irq_get_irq_data(irq
);
59 if (WARN_ONCE(data
->irq_2_iommu
,
60 KERN_DEBUG
"irq_2_iommu!=NULL irq %u\n", irq
))
61 return data
->irq_2_iommu
;
63 data
->irq_2_iommu
= kzalloc_node(sizeof(*data
->irq_2_iommu
),
64 GFP_ATOMIC
, data
->node
);
65 return data
->irq_2_iommu
;
68 static void irq_2_iommu_free(unsigned int irq
)
70 struct irq_data
*d
= irq_get_irq_data(irq
);
71 struct irq_2_iommu
*p
= d
->irq_2_iommu
;
73 d
->irq_2_iommu
= NULL
;
77 #else /* !CONFIG_SPARSE_IRQ */
79 static struct irq_2_iommu irq_2_iommuX
[NR_IRQS
];
81 static struct irq_2_iommu
*irq_2_iommu(unsigned int irq
)
84 return &irq_2_iommuX
[irq
];
88 static struct irq_2_iommu
*irq_2_iommu_alloc(unsigned int irq
)
90 return irq_2_iommu(irq
);
93 static void irq_2_iommu_free(unsigned int irq
) { }
97 static DEFINE_SPINLOCK(irq_2_ir_lock
);
99 static struct irq_2_iommu
*valid_irq_2_iommu(unsigned int irq
)
101 struct irq_2_iommu
*irq_iommu
;
103 irq_iommu
= irq_2_iommu(irq
);
108 if (!irq_iommu
->iommu
)
114 int irq_remapped(int irq
)
116 return valid_irq_2_iommu(irq
) != NULL
;
119 int get_irte(int irq
, struct irte
*entry
)
122 struct irq_2_iommu
*irq_iommu
;
128 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
129 irq_iommu
= valid_irq_2_iommu(irq
);
131 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
135 index
= irq_iommu
->irte_index
+ irq_iommu
->sub_handle
;
136 *entry
= *(irq_iommu
->iommu
->ir_table
->base
+ index
);
138 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
142 int alloc_irte(struct intel_iommu
*iommu
, int irq
, u16 count
)
144 struct ir_table
*table
= iommu
->ir_table
;
145 struct irq_2_iommu
*irq_iommu
;
146 u16 index
, start_index
;
147 unsigned int mask
= 0;
154 #ifndef CONFIG_SPARSE_IRQ
155 /* protect irq_2_iommu_alloc later */
161 * start the IRTE search from index 0.
163 index
= start_index
= 0;
166 count
= __roundup_pow_of_two(count
);
170 if (mask
> ecap_max_handle_mask(iommu
->ecap
)) {
172 "Requested mask %x exceeds the max invalidation handle"
173 " mask value %Lx\n", mask
,
174 ecap_max_handle_mask(iommu
->ecap
));
178 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
180 for (i
= index
; i
< index
+ count
; i
++)
181 if (table
->base
[i
].present
)
183 /* empty index found */
184 if (i
== index
+ count
)
187 index
= (index
+ count
) % INTR_REMAP_TABLE_ENTRIES
;
189 if (index
== start_index
) {
190 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
191 printk(KERN_ERR
"can't allocate an IRTE\n");
196 for (i
= index
; i
< index
+ count
; i
++)
197 table
->base
[i
].present
= 1;
199 irq_iommu
= irq_2_iommu_alloc(irq
);
201 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
202 printk(KERN_ERR
"can't allocate irq_2_iommu\n");
206 irq_iommu
->iommu
= iommu
;
207 irq_iommu
->irte_index
= index
;
208 irq_iommu
->sub_handle
= 0;
209 irq_iommu
->irte_mask
= mask
;
211 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
216 static int qi_flush_iec(struct intel_iommu
*iommu
, int index
, int mask
)
220 desc
.low
= QI_IEC_IIDEX(index
) | QI_IEC_TYPE
| QI_IEC_IM(mask
)
224 return qi_submit_sync(&desc
, iommu
);
227 int map_irq_to_irte_handle(int irq
, u16
*sub_handle
)
230 struct irq_2_iommu
*irq_iommu
;
233 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
234 irq_iommu
= valid_irq_2_iommu(irq
);
236 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
240 *sub_handle
= irq_iommu
->sub_handle
;
241 index
= irq_iommu
->irte_index
;
242 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
246 int set_irte_irq(int irq
, struct intel_iommu
*iommu
, u16 index
, u16 subhandle
)
248 struct irq_2_iommu
*irq_iommu
;
251 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
253 irq_iommu
= irq_2_iommu_alloc(irq
);
256 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
257 printk(KERN_ERR
"can't allocate irq_2_iommu\n");
261 irq_iommu
->iommu
= iommu
;
262 irq_iommu
->irte_index
= index
;
263 irq_iommu
->sub_handle
= subhandle
;
264 irq_iommu
->irte_mask
= 0;
266 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
271 int modify_irte(int irq
, struct irte
*irte_modified
)
276 struct intel_iommu
*iommu
;
277 struct irq_2_iommu
*irq_iommu
;
280 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
281 irq_iommu
= valid_irq_2_iommu(irq
);
283 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
287 iommu
= irq_iommu
->iommu
;
289 index
= irq_iommu
->irte_index
+ irq_iommu
->sub_handle
;
290 irte
= &iommu
->ir_table
->base
[index
];
292 set_64bit(&irte
->low
, irte_modified
->low
);
293 set_64bit(&irte
->high
, irte_modified
->high
);
294 __iommu_flush_cache(iommu
, irte
, sizeof(*irte
));
296 rc
= qi_flush_iec(iommu
, index
, 0);
297 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
302 struct intel_iommu
*map_hpet_to_ir(u8 hpet_id
)
306 for (i
= 0; i
< MAX_HPET_TBS
; i
++)
307 if (ir_hpet
[i
].id
== hpet_id
)
308 return ir_hpet
[i
].iommu
;
312 struct intel_iommu
*map_ioapic_to_ir(int apic
)
316 for (i
= 0; i
< MAX_IO_APICS
; i
++)
317 if (ir_ioapic
[i
].id
== apic
)
318 return ir_ioapic
[i
].iommu
;
322 struct intel_iommu
*map_dev_to_ir(struct pci_dev
*dev
)
324 struct dmar_drhd_unit
*drhd
;
326 drhd
= dmar_find_matched_drhd_unit(dev
);
333 static int clear_entries(struct irq_2_iommu
*irq_iommu
)
335 struct irte
*start
, *entry
, *end
;
336 struct intel_iommu
*iommu
;
339 if (irq_iommu
->sub_handle
)
342 iommu
= irq_iommu
->iommu
;
343 index
= irq_iommu
->irte_index
+ irq_iommu
->sub_handle
;
345 start
= iommu
->ir_table
->base
+ index
;
346 end
= start
+ (1 << irq_iommu
->irte_mask
);
348 for (entry
= start
; entry
< end
; entry
++) {
349 set_64bit(&entry
->low
, 0);
350 set_64bit(&entry
->high
, 0);
353 return qi_flush_iec(iommu
, index
, irq_iommu
->irte_mask
);
356 int free_irte(int irq
)
359 struct irq_2_iommu
*irq_iommu
;
362 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
363 irq_iommu
= valid_irq_2_iommu(irq
);
365 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
369 rc
= clear_entries(irq_iommu
);
371 irq_iommu
->iommu
= NULL
;
372 irq_iommu
->irte_index
= 0;
373 irq_iommu
->sub_handle
= 0;
374 irq_iommu
->irte_mask
= 0;
376 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
378 irq_2_iommu_free(irq
);
384 * source validation type
386 #define SVT_NO_VERIFY 0x0 /* no verification is required */
387 #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fiels */
388 #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
391 * source-id qualifier
393 #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
394 #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
395 * the third least significant bit
397 #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
398 * the second and third least significant bits
400 #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
401 * the least three significant bits
405 * set SVT, SQ and SID fields of irte to verify
406 * source ids of interrupt requests
408 static void set_irte_sid(struct irte
*irte
, unsigned int svt
,
409 unsigned int sq
, unsigned int sid
)
411 if (disable_sourceid_checking
)
418 int set_ioapic_sid(struct irte
*irte
, int apic
)
426 for (i
= 0; i
< MAX_IO_APICS
; i
++) {
427 if (ir_ioapic
[i
].id
== apic
) {
428 sid
= (ir_ioapic
[i
].bus
<< 8) | ir_ioapic
[i
].devfn
;
434 pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic
);
438 set_irte_sid(irte
, 1, 0, sid
);
443 int set_hpet_sid(struct irte
*irte
, u8 id
)
451 for (i
= 0; i
< MAX_HPET_TBS
; i
++) {
452 if (ir_hpet
[i
].id
== id
) {
453 sid
= (ir_hpet
[i
].bus
<< 8) | ir_hpet
[i
].devfn
;
459 pr_warning("Failed to set source-id of HPET block (%d)\n", id
);
464 * Should really use SQ_ALL_16. Some platforms are broken.
465 * While we figure out the right quirks for these broken platforms, use
466 * SQ_13_IGNORE_3 for now.
468 set_irte_sid(irte
, SVT_VERIFY_SID_SQ
, SQ_13_IGNORE_3
, sid
);
473 int set_msi_sid(struct irte
*irte
, struct pci_dev
*dev
)
475 struct pci_dev
*bridge
;
480 /* PCIe device or Root Complex integrated PCI device */
481 if (pci_is_pcie(dev
) || !dev
->bus
->parent
) {
482 set_irte_sid(irte
, SVT_VERIFY_SID_SQ
, SQ_ALL_16
,
483 (dev
->bus
->number
<< 8) | dev
->devfn
);
487 bridge
= pci_find_upstream_pcie_bridge(dev
);
489 if (pci_is_pcie(bridge
))/* this is a PCIe-to-PCI/PCIX bridge */
490 set_irte_sid(irte
, SVT_VERIFY_BUS
, SQ_ALL_16
,
491 (bridge
->bus
->number
<< 8) | dev
->bus
->number
);
492 else /* this is a legacy PCI bridge */
493 set_irte_sid(irte
, SVT_VERIFY_SID_SQ
, SQ_ALL_16
,
494 (bridge
->bus
->number
<< 8) | bridge
->devfn
);
500 static void iommu_set_intr_remapping(struct intel_iommu
*iommu
, int mode
)
506 addr
= virt_to_phys((void *)iommu
->ir_table
->base
);
508 spin_lock_irqsave(&iommu
->register_lock
, flags
);
510 dmar_writeq(iommu
->reg
+ DMAR_IRTA_REG
,
511 (addr
) | IR_X2APIC_MODE(mode
) | INTR_REMAP_TABLE_REG_SIZE
);
513 /* Set interrupt-remapping table pointer */
514 iommu
->gcmd
|= DMA_GCMD_SIRTP
;
515 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
517 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
518 readl
, (sts
& DMA_GSTS_IRTPS
), sts
);
519 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
522 * global invalidation of interrupt entry cache before enabling
523 * interrupt-remapping.
525 qi_global_iec(iommu
);
527 spin_lock_irqsave(&iommu
->register_lock
, flags
);
529 /* Enable interrupt-remapping */
530 iommu
->gcmd
|= DMA_GCMD_IRE
;
531 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
533 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
534 readl
, (sts
& DMA_GSTS_IRES
), sts
);
536 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
540 static int setup_intr_remapping(struct intel_iommu
*iommu
, int mode
)
542 struct ir_table
*ir_table
;
545 ir_table
= iommu
->ir_table
= kzalloc(sizeof(struct ir_table
),
548 if (!iommu
->ir_table
)
551 pages
= alloc_pages_node(iommu
->node
, GFP_ATOMIC
| __GFP_ZERO
,
552 INTR_REMAP_PAGE_ORDER
);
555 printk(KERN_ERR
"failed to allocate pages of order %d\n",
556 INTR_REMAP_PAGE_ORDER
);
557 kfree(iommu
->ir_table
);
561 ir_table
->base
= page_address(pages
);
563 iommu_set_intr_remapping(iommu
, mode
);
568 * Disable Interrupt Remapping.
570 static void iommu_disable_intr_remapping(struct intel_iommu
*iommu
)
575 if (!ecap_ir_support(iommu
->ecap
))
579 * global invalidation of interrupt entry cache before disabling
580 * interrupt-remapping.
582 qi_global_iec(iommu
);
584 spin_lock_irqsave(&iommu
->register_lock
, flags
);
586 sts
= dmar_readq(iommu
->reg
+ DMAR_GSTS_REG
);
587 if (!(sts
& DMA_GSTS_IRES
))
590 iommu
->gcmd
&= ~DMA_GCMD_IRE
;
591 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
593 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
594 readl
, !(sts
& DMA_GSTS_IRES
), sts
);
597 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
600 int __init
intr_remapping_supported(void)
602 struct dmar_drhd_unit
*drhd
;
604 if (disable_intremap
)
607 if (!dmar_ir_support())
610 for_each_drhd_unit(drhd
) {
611 struct intel_iommu
*iommu
= drhd
->iommu
;
613 if (!ecap_ir_support(iommu
->ecap
))
620 int __init
enable_intr_remapping(int eim
)
622 struct dmar_drhd_unit
*drhd
;
625 if (parse_ioapics_under_ir() != 1) {
626 printk(KERN_INFO
"Not enable interrupt remapping\n");
630 for_each_drhd_unit(drhd
) {
631 struct intel_iommu
*iommu
= drhd
->iommu
;
634 * If the queued invalidation is already initialized,
635 * shouldn't disable it.
641 * Clear previous faults.
643 dmar_fault(-1, iommu
);
646 * Disable intr remapping and queued invalidation, if already
647 * enabled prior to OS handover.
649 iommu_disable_intr_remapping(iommu
);
651 dmar_disable_qi(iommu
);
655 * check for the Interrupt-remapping support
657 for_each_drhd_unit(drhd
) {
658 struct intel_iommu
*iommu
= drhd
->iommu
;
660 if (!ecap_ir_support(iommu
->ecap
))
663 if (eim
&& !ecap_eim_support(iommu
->ecap
)) {
664 printk(KERN_INFO
"DRHD %Lx: EIM not supported by DRHD, "
665 " ecap %Lx\n", drhd
->reg_base_addr
, iommu
->ecap
);
671 * Enable queued invalidation for all the DRHD's.
673 for_each_drhd_unit(drhd
) {
675 struct intel_iommu
*iommu
= drhd
->iommu
;
676 ret
= dmar_enable_qi(iommu
);
679 printk(KERN_ERR
"DRHD %Lx: failed to enable queued, "
680 " invalidation, ecap %Lx, ret %d\n",
681 drhd
->reg_base_addr
, iommu
->ecap
, ret
);
687 * Setup Interrupt-remapping for all the DRHD's now.
689 for_each_drhd_unit(drhd
) {
690 struct intel_iommu
*iommu
= drhd
->iommu
;
692 if (!ecap_ir_support(iommu
->ecap
))
695 if (setup_intr_remapping(iommu
, eim
))
704 intr_remapping_enabled
= 1;
710 * handle error condition gracefully here!
715 static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope
*scope
,
716 struct intel_iommu
*iommu
)
718 struct acpi_dmar_pci_path
*path
;
723 path
= (struct acpi_dmar_pci_path
*)(scope
+ 1);
724 count
= (scope
->length
- sizeof(struct acpi_dmar_device_scope
))
725 / sizeof(struct acpi_dmar_pci_path
);
727 while (--count
> 0) {
729 * Access PCI directly due to the PCI
730 * subsystem isn't initialized yet.
732 bus
= read_pci_config_byte(bus
, path
->dev
, path
->fn
,
736 ir_hpet
[ir_hpet_num
].bus
= bus
;
737 ir_hpet
[ir_hpet_num
].devfn
= PCI_DEVFN(path
->dev
, path
->fn
);
738 ir_hpet
[ir_hpet_num
].iommu
= iommu
;
739 ir_hpet
[ir_hpet_num
].id
= scope
->enumeration_id
;
743 static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope
*scope
,
744 struct intel_iommu
*iommu
)
746 struct acpi_dmar_pci_path
*path
;
751 path
= (struct acpi_dmar_pci_path
*)(scope
+ 1);
752 count
= (scope
->length
- sizeof(struct acpi_dmar_device_scope
))
753 / sizeof(struct acpi_dmar_pci_path
);
755 while (--count
> 0) {
757 * Access PCI directly due to the PCI
758 * subsystem isn't initialized yet.
760 bus
= read_pci_config_byte(bus
, path
->dev
, path
->fn
,
765 ir_ioapic
[ir_ioapic_num
].bus
= bus
;
766 ir_ioapic
[ir_ioapic_num
].devfn
= PCI_DEVFN(path
->dev
, path
->fn
);
767 ir_ioapic
[ir_ioapic_num
].iommu
= iommu
;
768 ir_ioapic
[ir_ioapic_num
].id
= scope
->enumeration_id
;
772 static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header
*header
,
773 struct intel_iommu
*iommu
)
775 struct acpi_dmar_hardware_unit
*drhd
;
776 struct acpi_dmar_device_scope
*scope
;
779 drhd
= (struct acpi_dmar_hardware_unit
*)header
;
781 start
= (void *)(drhd
+ 1);
782 end
= ((void *)drhd
) + header
->length
;
784 while (start
< end
) {
786 if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_IOAPIC
) {
787 if (ir_ioapic_num
== MAX_IO_APICS
) {
788 printk(KERN_WARNING
"Exceeded Max IO APICS\n");
792 printk(KERN_INFO
"IOAPIC id %d under DRHD base "
793 " 0x%Lx IOMMU %d\n", scope
->enumeration_id
,
794 drhd
->address
, iommu
->seq_id
);
796 ir_parse_one_ioapic_scope(scope
, iommu
);
797 } else if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_HPET
) {
798 if (ir_hpet_num
== MAX_HPET_TBS
) {
799 printk(KERN_WARNING
"Exceeded Max HPET blocks\n");
803 printk(KERN_INFO
"HPET id %d under DRHD base"
804 " 0x%Lx\n", scope
->enumeration_id
,
807 ir_parse_one_hpet_scope(scope
, iommu
);
809 start
+= scope
->length
;
816 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
819 int __init
parse_ioapics_under_ir(void)
821 struct dmar_drhd_unit
*drhd
;
822 int ir_supported
= 0;
824 for_each_drhd_unit(drhd
) {
825 struct intel_iommu
*iommu
= drhd
->iommu
;
827 if (ecap_ir_support(iommu
->ecap
)) {
828 if (ir_parse_ioapic_hpet_scope(drhd
->hdr
, iommu
))
835 if (ir_supported
&& ir_ioapic_num
!= nr_ioapics
) {
837 "Not all IO-APIC's listed under remapping hardware\n");
844 void disable_intr_remapping(void)
846 struct dmar_drhd_unit
*drhd
;
847 struct intel_iommu
*iommu
= NULL
;
850 * Disable Interrupt-remapping for all the DRHD's now.
852 for_each_iommu(iommu
, drhd
) {
853 if (!ecap_ir_support(iommu
->ecap
))
856 iommu_disable_intr_remapping(iommu
);
860 int reenable_intr_remapping(int eim
)
862 struct dmar_drhd_unit
*drhd
;
864 struct intel_iommu
*iommu
= NULL
;
866 for_each_iommu(iommu
, drhd
)
868 dmar_reenable_qi(iommu
);
871 * Setup Interrupt-remapping for all the DRHD's now.
873 for_each_iommu(iommu
, drhd
) {
874 if (!ecap_ir_support(iommu
->ecap
))
877 /* Set up interrupt remapping for iommu.*/
878 iommu_set_intr_remapping(iommu
, eim
);
889 * handle error condition gracefully here!