Commit | Line | Data |
---|---|---|
5aeecaf4 | 1 | #include <linux/interrupt.h> |
ad3ad3f6 | 2 | #include <linux/dmar.h> |
2ae21010 | 3 | #include <linux/spinlock.h> |
5a0e3ad6 | 4 | #include <linux/slab.h> |
2ae21010 | 5 | #include <linux/jiffies.h> |
20f3097b | 6 | #include <linux/hpet.h> |
2ae21010 | 7 | #include <linux/pci.h> |
b6fcb33a | 8 | #include <linux/irq.h> |
ad3ad3f6 | 9 | #include <asm/io_apic.h> |
17483a1f | 10 | #include <asm/smp.h> |
6d652ea1 | 11 | #include <asm/cpu.h> |
38717946 | 12 | #include <linux/intel-iommu.h> |
46f06b72 | 13 | #include <acpi/acpi.h> |
8a8f422d | 14 | #include <asm/irq_remapping.h> |
f007e99c | 15 | #include <asm/pci-direct.h> |
5e2b930b | 16 | #include <asm/msidef.h> |
ad3ad3f6 | 17 | |
8a8f422d | 18 | #include "irq_remapping.h" |
736baef4 | 19 | |
eef93fdb JR |
20 | struct ioapic_scope { |
21 | struct intel_iommu *iommu; | |
22 | unsigned int id; | |
23 | unsigned int bus; /* PCI bus number */ | |
24 | unsigned int devfn; /* PCI devfn number */ | |
25 | }; | |
26 | ||
27 | struct hpet_scope { | |
28 | struct intel_iommu *iommu; | |
29 | u8 id; | |
30 | unsigned int bus; | |
31 | unsigned int devfn; | |
32 | }; | |
33 | ||
34 | #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0) | |
0c3f173a | 35 | #define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8) |
eef93fdb | 36 | |
ad3ad3f6 | 37 | static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; |
20f3097b SS |
38 | static struct hpet_scope ir_hpet[MAX_HPET_TBS]; |
39 | static int ir_ioapic_num, ir_hpet_num; | |
d1423d56 | 40 | |
96f8e98b | 41 | static DEFINE_RAW_SPINLOCK(irq_2_ir_lock); |
d585d060 | 42 | |
e420dfb4 YL |
43 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) |
44 | { | |
dced35ae | 45 | struct irq_cfg *cfg = irq_get_chip_data(irq); |
349d6767 | 46 | return cfg ? &cfg->irq_2_iommu : NULL; |
0b8f1efa YL |
47 | } |
48 | ||
b6fcb33a SS |
49 | int get_irte(int irq, struct irte *entry) |
50 | { | |
d585d060 | 51 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
4c5502b1 | 52 | unsigned long flags; |
d585d060 | 53 | int index; |
b6fcb33a | 54 | |
d585d060 | 55 | if (!entry || !irq_iommu) |
b6fcb33a SS |
56 | return -1; |
57 | ||
96f8e98b | 58 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
b6fcb33a | 59 | |
e420dfb4 YL |
60 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
61 | *entry = *(irq_iommu->iommu->ir_table->base + index); | |
b6fcb33a | 62 | |
96f8e98b | 63 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
64 | return 0; |
65 | } | |
66 | ||
263b5e86 | 67 | static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) |
b6fcb33a SS |
68 | { |
69 | struct ir_table *table = iommu->ir_table; | |
d585d060 | 70 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
9b1b0e42 | 71 | struct irq_cfg *cfg = irq_get_chip_data(irq); |
b6fcb33a SS |
72 | u16 index, start_index; |
73 | unsigned int mask = 0; | |
4c5502b1 | 74 | unsigned long flags; |
b6fcb33a SS |
75 | int i; |
76 | ||
d585d060 | 77 | if (!count || !irq_iommu) |
e420dfb4 | 78 | return -1; |
e420dfb4 | 79 | |
b6fcb33a SS |
80 | /* |
81 | * start the IRTE search from index 0. | |
82 | */ | |
83 | index = start_index = 0; | |
84 | ||
85 | if (count > 1) { | |
86 | count = __roundup_pow_of_two(count); | |
87 | mask = ilog2(count); | |
88 | } | |
89 | ||
90 | if (mask > ecap_max_handle_mask(iommu->ecap)) { | |
91 | printk(KERN_ERR | |
92 | "Requested mask %x exceeds the max invalidation handle" | |
93 | " mask value %Lx\n", mask, | |
94 | ecap_max_handle_mask(iommu->ecap)); | |
95 | return -1; | |
96 | } | |
97 | ||
96f8e98b | 98 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
b6fcb33a SS |
99 | do { |
100 | for (i = index; i < index + count; i++) | |
101 | if (table->base[i].present) | |
102 | break; | |
103 | /* empty index found */ | |
104 | if (i == index + count) | |
105 | break; | |
106 | ||
107 | index = (index + count) % INTR_REMAP_TABLE_ENTRIES; | |
108 | ||
109 | if (index == start_index) { | |
96f8e98b | 110 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
111 | printk(KERN_ERR "can't allocate an IRTE\n"); |
112 | return -1; | |
113 | } | |
114 | } while (1); | |
115 | ||
116 | for (i = index; i < index + count; i++) | |
117 | table->base[i].present = 1; | |
118 | ||
9b1b0e42 | 119 | cfg->remapped = 1; |
e420dfb4 YL |
120 | irq_iommu->iommu = iommu; |
121 | irq_iommu->irte_index = index; | |
122 | irq_iommu->sub_handle = 0; | |
123 | irq_iommu->irte_mask = mask; | |
b6fcb33a | 124 | |
96f8e98b | 125 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
126 | |
127 | return index; | |
128 | } | |
129 | ||
704126ad | 130 | static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) |
b6fcb33a SS |
131 | { |
132 | struct qi_desc desc; | |
133 | ||
134 | desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask) | |
135 | | QI_IEC_SELECTIVE; | |
136 | desc.high = 0; | |
137 | ||
704126ad | 138 | return qi_submit_sync(&desc, iommu); |
b6fcb33a SS |
139 | } |
140 | ||
263b5e86 | 141 | static int map_irq_to_irte_handle(int irq, u16 *sub_handle) |
b6fcb33a | 142 | { |
d585d060 | 143 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
4c5502b1 | 144 | unsigned long flags; |
d585d060 | 145 | int index; |
b6fcb33a | 146 | |
d585d060 | 147 | if (!irq_iommu) |
b6fcb33a | 148 | return -1; |
b6fcb33a | 149 | |
96f8e98b | 150 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
e420dfb4 YL |
151 | *sub_handle = irq_iommu->sub_handle; |
152 | index = irq_iommu->irte_index; | |
96f8e98b | 153 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
154 | return index; |
155 | } | |
156 | ||
263b5e86 | 157 | static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) |
b6fcb33a | 158 | { |
d585d060 | 159 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
9b1b0e42 | 160 | struct irq_cfg *cfg = irq_get_chip_data(irq); |
4c5502b1 | 161 | unsigned long flags; |
e420dfb4 | 162 | |
d585d060 | 163 | if (!irq_iommu) |
0b8f1efa | 164 | return -1; |
d585d060 | 165 | |
96f8e98b | 166 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
0b8f1efa | 167 | |
9b1b0e42 | 168 | cfg->remapped = 1; |
e420dfb4 YL |
169 | irq_iommu->iommu = iommu; |
170 | irq_iommu->irte_index = index; | |
171 | irq_iommu->sub_handle = subhandle; | |
172 | irq_iommu->irte_mask = 0; | |
b6fcb33a | 173 | |
96f8e98b | 174 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
175 | |
176 | return 0; | |
177 | } | |
178 | ||
263b5e86 | 179 | static int modify_irte(int irq, struct irte *irte_modified) |
b6fcb33a | 180 | { |
d585d060 | 181 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
b6fcb33a | 182 | struct intel_iommu *iommu; |
4c5502b1 | 183 | unsigned long flags; |
d585d060 TG |
184 | struct irte *irte; |
185 | int rc, index; | |
b6fcb33a | 186 | |
d585d060 | 187 | if (!irq_iommu) |
b6fcb33a | 188 | return -1; |
d585d060 | 189 | |
96f8e98b | 190 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
b6fcb33a | 191 | |
e420dfb4 | 192 | iommu = irq_iommu->iommu; |
b6fcb33a | 193 | |
e420dfb4 | 194 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
b6fcb33a SS |
195 | irte = &iommu->ir_table->base[index]; |
196 | ||
c513b67e LT |
197 | set_64bit(&irte->low, irte_modified->low); |
198 | set_64bit(&irte->high, irte_modified->high); | |
b6fcb33a SS |
199 | __iommu_flush_cache(iommu, irte, sizeof(*irte)); |
200 | ||
704126ad | 201 | rc = qi_flush_iec(iommu, index, 0); |
96f8e98b | 202 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
704126ad YZ |
203 | |
204 | return rc; | |
b6fcb33a SS |
205 | } |
206 | ||
263b5e86 | 207 | static struct intel_iommu *map_hpet_to_ir(u8 hpet_id) |
20f3097b SS |
208 | { |
209 | int i; | |
210 | ||
211 | for (i = 0; i < MAX_HPET_TBS; i++) | |
212 | if (ir_hpet[i].id == hpet_id) | |
213 | return ir_hpet[i].iommu; | |
214 | return NULL; | |
215 | } | |
216 | ||
263b5e86 | 217 | static struct intel_iommu *map_ioapic_to_ir(int apic) |
89027d35 SS |
218 | { |
219 | int i; | |
220 | ||
221 | for (i = 0; i < MAX_IO_APICS; i++) | |
222 | if (ir_ioapic[i].id == apic) | |
223 | return ir_ioapic[i].iommu; | |
224 | return NULL; | |
225 | } | |
226 | ||
263b5e86 | 227 | static struct intel_iommu *map_dev_to_ir(struct pci_dev *dev) |
75c46fa6 SS |
228 | { |
229 | struct dmar_drhd_unit *drhd; | |
230 | ||
231 | drhd = dmar_find_matched_drhd_unit(dev); | |
232 | if (!drhd) | |
233 | return NULL; | |
234 | ||
235 | return drhd->iommu; | |
236 | } | |
237 | ||
c4658b4e WH |
238 | static int clear_entries(struct irq_2_iommu *irq_iommu) |
239 | { | |
240 | struct irte *start, *entry, *end; | |
241 | struct intel_iommu *iommu; | |
242 | int index; | |
243 | ||
244 | if (irq_iommu->sub_handle) | |
245 | return 0; | |
246 | ||
247 | iommu = irq_iommu->iommu; | |
248 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | |
249 | ||
250 | start = iommu->ir_table->base + index; | |
251 | end = start + (1 << irq_iommu->irte_mask); | |
252 | ||
253 | for (entry = start; entry < end; entry++) { | |
c513b67e LT |
254 | set_64bit(&entry->low, 0); |
255 | set_64bit(&entry->high, 0); | |
c4658b4e WH |
256 | } |
257 | ||
258 | return qi_flush_iec(iommu, index, irq_iommu->irte_mask); | |
259 | } | |
260 | ||
9d619f65 | 261 | static int free_irte(int irq) |
b6fcb33a | 262 | { |
d585d060 | 263 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
4c5502b1 | 264 | unsigned long flags; |
d585d060 | 265 | int rc; |
b6fcb33a | 266 | |
d585d060 | 267 | if (!irq_iommu) |
b6fcb33a | 268 | return -1; |
d585d060 | 269 | |
96f8e98b | 270 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); |
b6fcb33a | 271 | |
c4658b4e | 272 | rc = clear_entries(irq_iommu); |
b6fcb33a | 273 | |
e420dfb4 YL |
274 | irq_iommu->iommu = NULL; |
275 | irq_iommu->irte_index = 0; | |
276 | irq_iommu->sub_handle = 0; | |
277 | irq_iommu->irte_mask = 0; | |
b6fcb33a | 278 | |
96f8e98b | 279 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a | 280 | |
704126ad | 281 | return rc; |
b6fcb33a SS |
282 | } |
283 | ||
f007e99c WH |
284 | /* |
285 | * source validation type | |
286 | */ | |
287 | #define SVT_NO_VERIFY 0x0 /* no verification is required */ | |
25985edc | 288 | #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fields */ |
f007e99c WH |
289 | #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */ |
290 | ||
291 | /* | |
292 | * source-id qualifier | |
293 | */ | |
294 | #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */ | |
295 | #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore | |
296 | * the third least significant bit | |
297 | */ | |
298 | #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore | |
299 | * the second and third least significant bits | |
300 | */ | |
301 | #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore | |
302 | * the least three significant bits | |
303 | */ | |
304 | ||
305 | /* | |
306 | * set SVT, SQ and SID fields of irte to verify | |
307 | * source ids of interrupt requests | |
308 | */ | |
309 | static void set_irte_sid(struct irte *irte, unsigned int svt, | |
310 | unsigned int sq, unsigned int sid) | |
311 | { | |
d1423d56 CW |
312 | if (disable_sourceid_checking) |
313 | svt = SVT_NO_VERIFY; | |
f007e99c WH |
314 | irte->svt = svt; |
315 | irte->sq = sq; | |
316 | irte->sid = sid; | |
317 | } | |
318 | ||
263b5e86 | 319 | static int set_ioapic_sid(struct irte *irte, int apic) |
f007e99c WH |
320 | { |
321 | int i; | |
322 | u16 sid = 0; | |
323 | ||
324 | if (!irte) | |
325 | return -1; | |
326 | ||
327 | for (i = 0; i < MAX_IO_APICS; i++) { | |
328 | if (ir_ioapic[i].id == apic) { | |
329 | sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn; | |
330 | break; | |
331 | } | |
332 | } | |
333 | ||
334 | if (sid == 0) { | |
335 | pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic); | |
336 | return -1; | |
337 | } | |
338 | ||
339 | set_irte_sid(irte, 1, 0, sid); | |
340 | ||
341 | return 0; | |
342 | } | |
343 | ||
263b5e86 | 344 | static int set_hpet_sid(struct irte *irte, u8 id) |
20f3097b SS |
345 | { |
346 | int i; | |
347 | u16 sid = 0; | |
348 | ||
349 | if (!irte) | |
350 | return -1; | |
351 | ||
352 | for (i = 0; i < MAX_HPET_TBS; i++) { | |
353 | if (ir_hpet[i].id == id) { | |
354 | sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn; | |
355 | break; | |
356 | } | |
357 | } | |
358 | ||
359 | if (sid == 0) { | |
360 | pr_warning("Failed to set source-id of HPET block (%d)\n", id); | |
361 | return -1; | |
362 | } | |
363 | ||
364 | /* | |
365 | * Should really use SQ_ALL_16. Some platforms are broken. | |
366 | * While we figure out the right quirks for these broken platforms, use | |
367 | * SQ_13_IGNORE_3 for now. | |
368 | */ | |
369 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid); | |
370 | ||
371 | return 0; | |
372 | } | |
373 | ||
263b5e86 | 374 | static int set_msi_sid(struct irte *irte, struct pci_dev *dev) |
f007e99c WH |
375 | { |
376 | struct pci_dev *bridge; | |
377 | ||
378 | if (!irte || !dev) | |
379 | return -1; | |
380 | ||
381 | /* PCIe device or Root Complex integrated PCI device */ | |
5f4d91a1 | 382 | if (pci_is_pcie(dev) || !dev->bus->parent) { |
f007e99c WH |
383 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, |
384 | (dev->bus->number << 8) | dev->devfn); | |
385 | return 0; | |
386 | } | |
387 | ||
388 | bridge = pci_find_upstream_pcie_bridge(dev); | |
389 | if (bridge) { | |
45e829ea | 390 | if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */ |
f007e99c WH |
391 | set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16, |
392 | (bridge->bus->number << 8) | dev->bus->number); | |
393 | else /* this is a legacy PCI bridge */ | |
394 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, | |
395 | (bridge->bus->number << 8) | bridge->devfn); | |
396 | } | |
397 | ||
398 | return 0; | |
399 | } | |
400 | ||
95a02e97 | 401 | static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode) |
2ae21010 SS |
402 | { |
403 | u64 addr; | |
c416daa9 | 404 | u32 sts; |
2ae21010 SS |
405 | unsigned long flags; |
406 | ||
407 | addr = virt_to_phys((void *)iommu->ir_table->base); | |
408 | ||
1f5b3c3f | 409 | raw_spin_lock_irqsave(&iommu->register_lock, flags); |
2ae21010 SS |
410 | |
411 | dmar_writeq(iommu->reg + DMAR_IRTA_REG, | |
412 | (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); | |
413 | ||
414 | /* Set interrupt-remapping table pointer */ | |
161fde08 | 415 | iommu->gcmd |= DMA_GCMD_SIRTP; |
c416daa9 | 416 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); |
2ae21010 SS |
417 | |
418 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | |
419 | readl, (sts & DMA_GSTS_IRTPS), sts); | |
1f5b3c3f | 420 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
2ae21010 SS |
421 | |
422 | /* | |
423 | * global invalidation of interrupt entry cache before enabling | |
424 | * interrupt-remapping. | |
425 | */ | |
426 | qi_global_iec(iommu); | |
427 | ||
1f5b3c3f | 428 | raw_spin_lock_irqsave(&iommu->register_lock, flags); |
2ae21010 SS |
429 | |
430 | /* Enable interrupt-remapping */ | |
2ae21010 | 431 | iommu->gcmd |= DMA_GCMD_IRE; |
af8d102f | 432 | iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */ |
c416daa9 | 433 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); |
2ae21010 SS |
434 | |
435 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | |
436 | readl, (sts & DMA_GSTS_IRES), sts); | |
437 | ||
af8d102f AL |
438 | /* |
439 | * With CFI clear in the Global Command register, we should be | |
440 | * protected from dangerous (i.e. compatibility) interrupts | |
441 | * regardless of x2apic status. Check just to be sure. | |
442 | */ | |
443 | if (sts & DMA_GSTS_CFIS) | |
444 | WARN(1, KERN_WARNING | |
445 | "Compatibility-format IRQs enabled despite intr remapping;\n" | |
446 | "you are vulnerable to IRQ injection.\n"); | |
447 | ||
1f5b3c3f | 448 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
2ae21010 SS |
449 | } |
450 | ||
451 | ||
95a02e97 | 452 | static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode) |
2ae21010 SS |
453 | { |
454 | struct ir_table *ir_table; | |
455 | struct page *pages; | |
456 | ||
457 | ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), | |
fa4b57cc | 458 | GFP_ATOMIC); |
2ae21010 SS |
459 | |
460 | if (!iommu->ir_table) | |
461 | return -ENOMEM; | |
462 | ||
824cd75b SS |
463 | pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, |
464 | INTR_REMAP_PAGE_ORDER); | |
2ae21010 SS |
465 | |
466 | if (!pages) { | |
467 | printk(KERN_ERR "failed to allocate pages of order %d\n", | |
468 | INTR_REMAP_PAGE_ORDER); | |
469 | kfree(iommu->ir_table); | |
470 | return -ENOMEM; | |
471 | } | |
472 | ||
473 | ir_table->base = page_address(pages); | |
474 | ||
95a02e97 | 475 | iommu_set_irq_remapping(iommu, mode); |
2ae21010 SS |
476 | return 0; |
477 | } | |
478 | ||
eba67e5d SS |
479 | /* |
480 | * Disable Interrupt Remapping. | |
481 | */ | |
95a02e97 | 482 | static void iommu_disable_irq_remapping(struct intel_iommu *iommu) |
eba67e5d SS |
483 | { |
484 | unsigned long flags; | |
485 | u32 sts; | |
486 | ||
487 | if (!ecap_ir_support(iommu->ecap)) | |
488 | return; | |
489 | ||
b24696bc FY |
490 | /* |
491 | * global invalidation of interrupt entry cache before disabling | |
492 | * interrupt-remapping. | |
493 | */ | |
494 | qi_global_iec(iommu); | |
495 | ||
1f5b3c3f | 496 | raw_spin_lock_irqsave(&iommu->register_lock, flags); |
eba67e5d | 497 | |
1631f179 | 498 | sts = readl(iommu->reg + DMAR_GSTS_REG); |
eba67e5d SS |
499 | if (!(sts & DMA_GSTS_IRES)) |
500 | goto end; | |
501 | ||
502 | iommu->gcmd &= ~DMA_GCMD_IRE; | |
503 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); | |
504 | ||
505 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | |
506 | readl, !(sts & DMA_GSTS_IRES), sts); | |
507 | ||
508 | end: | |
1f5b3c3f | 509 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
eba67e5d SS |
510 | } |
511 | ||
41750d31 SS |
512 | static int __init dmar_x2apic_optout(void) |
513 | { | |
514 | struct acpi_table_dmar *dmar; | |
515 | dmar = (struct acpi_table_dmar *)dmar_tbl; | |
516 | if (!dmar || no_x2apic_optout) | |
517 | return 0; | |
518 | return dmar->flags & DMAR_X2APIC_OPT_OUT; | |
519 | } | |
520 | ||
95a02e97 | 521 | static int __init intel_irq_remapping_supported(void) |
93758238 WH |
522 | { |
523 | struct dmar_drhd_unit *drhd; | |
524 | ||
95a02e97 | 525 | if (disable_irq_remap) |
03ea8155 | 526 | return 0; |
03bbcb2e | 527 | if (irq_remap_broken) { |
64545123 NH |
528 | printk(KERN_WARNING |
529 | "This system BIOS has enabled interrupt remapping\n" | |
530 | "on a chipset that contains an erratum making that\n" | |
531 | "feature unstable. To maintain system stability\n" | |
532 | "interrupt remapping is being disabled. Please\n" | |
533 | "contact your BIOS vendor for an update\n"); | |
534 | add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); | |
03bbcb2e NH |
535 | disable_irq_remap = 1; |
536 | return 0; | |
537 | } | |
03ea8155 | 538 | |
074835f0 YS |
539 | if (!dmar_ir_support()) |
540 | return 0; | |
541 | ||
93758238 WH |
542 | for_each_drhd_unit(drhd) { |
543 | struct intel_iommu *iommu = drhd->iommu; | |
544 | ||
545 | if (!ecap_ir_support(iommu->ecap)) | |
546 | return 0; | |
547 | } | |
548 | ||
549 | return 1; | |
550 | } | |
551 | ||
95a02e97 | 552 | static int __init intel_enable_irq_remapping(void) |
2ae21010 SS |
553 | { |
554 | struct dmar_drhd_unit *drhd; | |
af8d102f | 555 | bool x2apic_present; |
2ae21010 | 556 | int setup = 0; |
41750d31 | 557 | int eim = 0; |
2ae21010 | 558 | |
af8d102f AL |
559 | x2apic_present = x2apic_supported(); |
560 | ||
e936d077 YS |
561 | if (parse_ioapics_under_ir() != 1) { |
562 | printk(KERN_INFO "Not enable interrupt remapping\n"); | |
af8d102f | 563 | goto error; |
e936d077 YS |
564 | } |
565 | ||
af8d102f | 566 | if (x2apic_present) { |
41750d31 | 567 | eim = !dmar_x2apic_optout(); |
af8d102f AL |
568 | if (!eim) |
569 | printk(KERN_WARNING | |
570 | "Your BIOS is broken and requested that x2apic be disabled.\n" | |
571 | "This will slightly decrease performance.\n" | |
572 | "Use 'intremap=no_x2apic_optout' to override BIOS request.\n"); | |
41750d31 SS |
573 | } |
574 | ||
1531a6a6 SS |
575 | for_each_drhd_unit(drhd) { |
576 | struct intel_iommu *iommu = drhd->iommu; | |
577 | ||
34aaaa94 HW |
578 | /* |
579 | * If the queued invalidation is already initialized, | |
580 | * shouldn't disable it. | |
581 | */ | |
582 | if (iommu->qi) | |
583 | continue; | |
584 | ||
1531a6a6 SS |
585 | /* |
586 | * Clear previous faults. | |
587 | */ | |
588 | dmar_fault(-1, iommu); | |
589 | ||
590 | /* | |
591 | * Disable intr remapping and queued invalidation, if already | |
592 | * enabled prior to OS handover. | |
593 | */ | |
95a02e97 | 594 | iommu_disable_irq_remapping(iommu); |
1531a6a6 SS |
595 | |
596 | dmar_disable_qi(iommu); | |
597 | } | |
598 | ||
2ae21010 SS |
599 | /* |
600 | * check for the Interrupt-remapping support | |
601 | */ | |
602 | for_each_drhd_unit(drhd) { | |
603 | struct intel_iommu *iommu = drhd->iommu; | |
604 | ||
605 | if (!ecap_ir_support(iommu->ecap)) | |
606 | continue; | |
607 | ||
608 | if (eim && !ecap_eim_support(iommu->ecap)) { | |
609 | printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, " | |
610 | " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap); | |
af8d102f | 611 | goto error; |
2ae21010 SS |
612 | } |
613 | } | |
614 | ||
615 | /* | |
616 | * Enable queued invalidation for all the DRHD's. | |
617 | */ | |
618 | for_each_drhd_unit(drhd) { | |
619 | int ret; | |
620 | struct intel_iommu *iommu = drhd->iommu; | |
621 | ret = dmar_enable_qi(iommu); | |
622 | ||
623 | if (ret) { | |
624 | printk(KERN_ERR "DRHD %Lx: failed to enable queued, " | |
625 | " invalidation, ecap %Lx, ret %d\n", | |
626 | drhd->reg_base_addr, iommu->ecap, ret); | |
af8d102f | 627 | goto error; |
2ae21010 SS |
628 | } |
629 | } | |
630 | ||
631 | /* | |
632 | * Setup Interrupt-remapping for all the DRHD's now. | |
633 | */ | |
634 | for_each_drhd_unit(drhd) { | |
635 | struct intel_iommu *iommu = drhd->iommu; | |
636 | ||
637 | if (!ecap_ir_support(iommu->ecap)) | |
638 | continue; | |
639 | ||
95a02e97 | 640 | if (intel_setup_irq_remapping(iommu, eim)) |
2ae21010 SS |
641 | goto error; |
642 | ||
643 | setup = 1; | |
644 | } | |
645 | ||
646 | if (!setup) | |
647 | goto error; | |
648 | ||
95a02e97 | 649 | irq_remapping_enabled = 1; |
afcc8a40 JR |
650 | |
651 | /* | |
652 | * VT-d has a different layout for IO-APIC entries when | |
653 | * interrupt remapping is enabled. So it needs a special routine | |
654 | * to print IO-APIC entries for debugging purposes too. | |
655 | */ | |
656 | x86_io_apic_ops.print_entries = intel_ir_io_apic_print_entries; | |
657 | ||
41750d31 | 658 | pr_info("Enabled IRQ remapping in %s mode\n", eim ? "x2apic" : "xapic"); |
2ae21010 | 659 | |
41750d31 | 660 | return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE; |
2ae21010 SS |
661 | |
662 | error: | |
663 | /* | |
664 | * handle error condition gracefully here! | |
665 | */ | |
af8d102f AL |
666 | |
667 | if (x2apic_present) | |
668 | WARN(1, KERN_WARNING | |
669 | "Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n"); | |
670 | ||
2ae21010 SS |
671 | return -1; |
672 | } | |
ad3ad3f6 | 673 | |
20f3097b SS |
674 | static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, |
675 | struct intel_iommu *iommu) | |
676 | { | |
677 | struct acpi_dmar_pci_path *path; | |
678 | u8 bus; | |
679 | int count; | |
680 | ||
681 | bus = scope->bus; | |
682 | path = (struct acpi_dmar_pci_path *)(scope + 1); | |
683 | count = (scope->length - sizeof(struct acpi_dmar_device_scope)) | |
684 | / sizeof(struct acpi_dmar_pci_path); | |
685 | ||
686 | while (--count > 0) { | |
687 | /* | |
688 | * Access PCI directly due to the PCI | |
689 | * subsystem isn't initialized yet. | |
690 | */ | |
691 | bus = read_pci_config_byte(bus, path->dev, path->fn, | |
692 | PCI_SECONDARY_BUS); | |
693 | path++; | |
694 | } | |
695 | ir_hpet[ir_hpet_num].bus = bus; | |
696 | ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn); | |
697 | ir_hpet[ir_hpet_num].iommu = iommu; | |
698 | ir_hpet[ir_hpet_num].id = scope->enumeration_id; | |
699 | ir_hpet_num++; | |
700 | } | |
701 | ||
f007e99c WH |
702 | static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, |
703 | struct intel_iommu *iommu) | |
704 | { | |
705 | struct acpi_dmar_pci_path *path; | |
706 | u8 bus; | |
707 | int count; | |
708 | ||
709 | bus = scope->bus; | |
710 | path = (struct acpi_dmar_pci_path *)(scope + 1); | |
711 | count = (scope->length - sizeof(struct acpi_dmar_device_scope)) | |
712 | / sizeof(struct acpi_dmar_pci_path); | |
713 | ||
714 | while (--count > 0) { | |
715 | /* | |
716 | * Access PCI directly due to the PCI | |
717 | * subsystem isn't initialized yet. | |
718 | */ | |
719 | bus = read_pci_config_byte(bus, path->dev, path->fn, | |
720 | PCI_SECONDARY_BUS); | |
721 | path++; | |
722 | } | |
723 | ||
724 | ir_ioapic[ir_ioapic_num].bus = bus; | |
725 | ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn); | |
726 | ir_ioapic[ir_ioapic_num].iommu = iommu; | |
727 | ir_ioapic[ir_ioapic_num].id = scope->enumeration_id; | |
728 | ir_ioapic_num++; | |
729 | } | |
730 | ||
20f3097b SS |
731 | static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header, |
732 | struct intel_iommu *iommu) | |
ad3ad3f6 SS |
733 | { |
734 | struct acpi_dmar_hardware_unit *drhd; | |
735 | struct acpi_dmar_device_scope *scope; | |
736 | void *start, *end; | |
737 | ||
738 | drhd = (struct acpi_dmar_hardware_unit *)header; | |
739 | ||
740 | start = (void *)(drhd + 1); | |
741 | end = ((void *)drhd) + header->length; | |
742 | ||
743 | while (start < end) { | |
744 | scope = start; | |
745 | if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) { | |
746 | if (ir_ioapic_num == MAX_IO_APICS) { | |
747 | printk(KERN_WARNING "Exceeded Max IO APICS\n"); | |
748 | return -1; | |
749 | } | |
750 | ||
680a7524 YL |
751 | printk(KERN_INFO "IOAPIC id %d under DRHD base " |
752 | " 0x%Lx IOMMU %d\n", scope->enumeration_id, | |
753 | drhd->address, iommu->seq_id); | |
ad3ad3f6 | 754 | |
f007e99c | 755 | ir_parse_one_ioapic_scope(scope, iommu); |
20f3097b SS |
756 | } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) { |
757 | if (ir_hpet_num == MAX_HPET_TBS) { | |
758 | printk(KERN_WARNING "Exceeded Max HPET blocks\n"); | |
759 | return -1; | |
760 | } | |
761 | ||
762 | printk(KERN_INFO "HPET id %d under DRHD base" | |
763 | " 0x%Lx\n", scope->enumeration_id, | |
764 | drhd->address); | |
765 | ||
766 | ir_parse_one_hpet_scope(scope, iommu); | |
ad3ad3f6 SS |
767 | } |
768 | start += scope->length; | |
769 | } | |
770 | ||
771 | return 0; | |
772 | } | |
773 | ||
774 | /* | |
775 | * Finds the assocaition between IOAPIC's and its Interrupt-remapping | |
776 | * hardware unit. | |
777 | */ | |
778 | int __init parse_ioapics_under_ir(void) | |
779 | { | |
780 | struct dmar_drhd_unit *drhd; | |
781 | int ir_supported = 0; | |
32ab31e0 | 782 | int ioapic_idx; |
ad3ad3f6 SS |
783 | |
784 | for_each_drhd_unit(drhd) { | |
785 | struct intel_iommu *iommu = drhd->iommu; | |
786 | ||
787 | if (ecap_ir_support(iommu->ecap)) { | |
20f3097b | 788 | if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu)) |
ad3ad3f6 SS |
789 | return -1; |
790 | ||
791 | ir_supported = 1; | |
792 | } | |
793 | } | |
794 | ||
32ab31e0 SF |
795 | if (!ir_supported) |
796 | return 0; | |
797 | ||
798 | for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) { | |
799 | int ioapic_id = mpc_ioapic_id(ioapic_idx); | |
800 | if (!map_ioapic_to_ir(ioapic_id)) { | |
801 | pr_err(FW_BUG "ioapic %d has no mapping iommu, " | |
802 | "interrupt remapping will be disabled\n", | |
803 | ioapic_id); | |
804 | return -1; | |
805 | } | |
ad3ad3f6 SS |
806 | } |
807 | ||
32ab31e0 | 808 | return 1; |
ad3ad3f6 | 809 | } |
b24696bc | 810 | |
61ed26e3 | 811 | int __init ir_dev_scope_init(void) |
c2c7286a | 812 | { |
95a02e97 | 813 | if (!irq_remapping_enabled) |
c2c7286a SS |
814 | return 0; |
815 | ||
816 | return dmar_dev_scope_init(); | |
817 | } | |
818 | rootfs_initcall(ir_dev_scope_init); | |
819 | ||
95a02e97 | 820 | static void disable_irq_remapping(void) |
b24696bc FY |
821 | { |
822 | struct dmar_drhd_unit *drhd; | |
823 | struct intel_iommu *iommu = NULL; | |
824 | ||
825 | /* | |
826 | * Disable Interrupt-remapping for all the DRHD's now. | |
827 | */ | |
828 | for_each_iommu(iommu, drhd) { | |
829 | if (!ecap_ir_support(iommu->ecap)) | |
830 | continue; | |
831 | ||
95a02e97 | 832 | iommu_disable_irq_remapping(iommu); |
b24696bc FY |
833 | } |
834 | } | |
835 | ||
95a02e97 | 836 | static int reenable_irq_remapping(int eim) |
b24696bc FY |
837 | { |
838 | struct dmar_drhd_unit *drhd; | |
839 | int setup = 0; | |
840 | struct intel_iommu *iommu = NULL; | |
841 | ||
842 | for_each_iommu(iommu, drhd) | |
843 | if (iommu->qi) | |
844 | dmar_reenable_qi(iommu); | |
845 | ||
846 | /* | |
847 | * Setup Interrupt-remapping for all the DRHD's now. | |
848 | */ | |
849 | for_each_iommu(iommu, drhd) { | |
850 | if (!ecap_ir_support(iommu->ecap)) | |
851 | continue; | |
852 | ||
853 | /* Set up interrupt remapping for iommu.*/ | |
95a02e97 | 854 | iommu_set_irq_remapping(iommu, eim); |
b24696bc FY |
855 | setup = 1; |
856 | } | |
857 | ||
858 | if (!setup) | |
859 | goto error; | |
860 | ||
861 | return 0; | |
862 | ||
863 | error: | |
864 | /* | |
865 | * handle error condition gracefully here! | |
866 | */ | |
867 | return -1; | |
868 | } | |
869 | ||
0c3f173a JR |
870 | static void prepare_irte(struct irte *irte, int vector, |
871 | unsigned int dest) | |
872 | { | |
873 | memset(irte, 0, sizeof(*irte)); | |
874 | ||
875 | irte->present = 1; | |
876 | irte->dst_mode = apic->irq_dest_mode; | |
877 | /* | |
878 | * Trigger mode in the IRTE will always be edge, and for IO-APIC, the | |
879 | * actual level or edge trigger will be setup in the IO-APIC | |
880 | * RTE. This will help simplify level triggered irq migration. | |
881 | * For more details, see the comments (in io_apic.c) explainig IO-APIC | |
882 | * irq migration in the presence of interrupt-remapping. | |
883 | */ | |
884 | irte->trigger_mode = 0; | |
885 | irte->dlvry_mode = apic->irq_delivery_mode; | |
886 | irte->vector = vector; | |
887 | irte->dest_id = IRTE_DEST(dest); | |
888 | irte->redir_hint = 1; | |
889 | } | |
890 | ||
891 | static int intel_setup_ioapic_entry(int irq, | |
892 | struct IO_APIC_route_entry *route_entry, | |
893 | unsigned int destination, int vector, | |
894 | struct io_apic_irq_attr *attr) | |
895 | { | |
896 | int ioapic_id = mpc_ioapic_id(attr->ioapic); | |
897 | struct intel_iommu *iommu = map_ioapic_to_ir(ioapic_id); | |
898 | struct IR_IO_APIC_route_entry *entry; | |
899 | struct irte irte; | |
900 | int index; | |
901 | ||
902 | if (!iommu) { | |
903 | pr_warn("No mapping iommu for ioapic %d\n", ioapic_id); | |
904 | return -ENODEV; | |
905 | } | |
906 | ||
907 | entry = (struct IR_IO_APIC_route_entry *)route_entry; | |
908 | ||
909 | index = alloc_irte(iommu, irq, 1); | |
910 | if (index < 0) { | |
911 | pr_warn("Failed to allocate IRTE for ioapic %d\n", ioapic_id); | |
912 | return -ENOMEM; | |
913 | } | |
914 | ||
915 | prepare_irte(&irte, vector, destination); | |
916 | ||
917 | /* Set source-id of interrupt request */ | |
918 | set_ioapic_sid(&irte, ioapic_id); | |
919 | ||
920 | modify_irte(irq, &irte); | |
921 | ||
922 | apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: " | |
923 | "Set IRTE entry (P:%d FPD:%d Dst_Mode:%d " | |
924 | "Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X " | |
925 | "Avail:%X Vector:%02X Dest:%08X " | |
926 | "SID:%04X SQ:%X SVT:%X)\n", | |
927 | attr->ioapic, irte.present, irte.fpd, irte.dst_mode, | |
928 | irte.redir_hint, irte.trigger_mode, irte.dlvry_mode, | |
929 | irte.avail, irte.vector, irte.dest_id, | |
930 | irte.sid, irte.sq, irte.svt); | |
931 | ||
932 | memset(entry, 0, sizeof(*entry)); | |
933 | ||
934 | entry->index2 = (index >> 15) & 0x1; | |
935 | entry->zero = 0; | |
936 | entry->format = 1; | |
937 | entry->index = (index & 0x7fff); | |
938 | /* | |
939 | * IO-APIC RTE will be configured with virtual vector. | |
940 | * irq handler will do the explicit EOI to the io-apic. | |
941 | */ | |
942 | entry->vector = attr->ioapic_pin; | |
943 | entry->mask = 0; /* enable IRQ */ | |
944 | entry->trigger = attr->trigger; | |
945 | entry->polarity = attr->polarity; | |
946 | ||
947 | /* Mask level triggered irqs. | |
948 | * Use IRQ_DELAYED_DISABLE for edge triggered irqs. | |
949 | */ | |
950 | if (attr->trigger) | |
951 | entry->mask = 1; | |
952 | ||
953 | return 0; | |
954 | } | |
955 | ||
4c1bad6a JR |
956 | /* |
957 | * Migrate the IO-APIC irq in the presence of intr-remapping. | |
958 | * | |
959 | * For both level and edge triggered, irq migration is a simple atomic | |
960 | * update(of vector and cpu destination) of IRTE and flush the hardware cache. | |
961 | * | |
962 | * For level triggered, we eliminate the io-apic RTE modification (with the | |
963 | * updated vector information), by using a virtual vector (io-apic pin number). | |
964 | * Real vector that is used for interrupting cpu will be coming from | |
965 | * the interrupt-remapping table entry. | |
966 | * | |
967 | * As the migration is a simple atomic update of IRTE, the same mechanism | |
968 | * is used to migrate MSI irq's in the presence of interrupt-remapping. | |
969 | */ | |
970 | static int | |
971 | intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, | |
972 | bool force) | |
973 | { | |
974 | struct irq_cfg *cfg = data->chip_data; | |
975 | unsigned int dest, irq = data->irq; | |
976 | struct irte irte; | |
ff164324 | 977 | int err; |
4c1bad6a | 978 | |
7eb9ae07 SS |
979 | if (!config_enabled(CONFIG_SMP)) |
980 | return -EINVAL; | |
981 | ||
4c1bad6a JR |
982 | if (!cpumask_intersects(mask, cpu_online_mask)) |
983 | return -EINVAL; | |
984 | ||
985 | if (get_irte(irq, &irte)) | |
986 | return -EBUSY; | |
987 | ||
ff164324 AG |
988 | err = assign_irq_vector(irq, cfg, mask); |
989 | if (err) | |
990 | return err; | |
4c1bad6a | 991 | |
ff164324 AG |
992 | err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest); |
993 | if (err) { | |
ed88bed8 | 994 | if (assign_irq_vector(irq, cfg, data->affinity)) |
ff164324 AG |
995 | pr_err("Failed to recover vector for irq %d\n", irq); |
996 | return err; | |
997 | } | |
4c1bad6a JR |
998 | |
999 | irte.vector = cfg->vector; | |
1000 | irte.dest_id = IRTE_DEST(dest); | |
1001 | ||
1002 | /* | |
1003 | * Atomically updates the IRTE with the new destination, vector | |
1004 | * and flushes the interrupt entry cache. | |
1005 | */ | |
1006 | modify_irte(irq, &irte); | |
1007 | ||
1008 | /* | |
1009 | * After this point, all the interrupts will start arriving | |
1010 | * at the new destination. So, time to cleanup the previous | |
1011 | * vector allocation. | |
1012 | */ | |
1013 | if (cfg->move_in_progress) | |
1014 | send_cleanup_vector(cfg); | |
1015 | ||
1016 | cpumask_copy(data->affinity, mask); | |
1017 | return 0; | |
1018 | } | |
0c3f173a | 1019 | |
5e2b930b JR |
1020 | static void intel_compose_msi_msg(struct pci_dev *pdev, |
1021 | unsigned int irq, unsigned int dest, | |
1022 | struct msi_msg *msg, u8 hpet_id) | |
1023 | { | |
1024 | struct irq_cfg *cfg; | |
1025 | struct irte irte; | |
c558df4a | 1026 | u16 sub_handle = 0; |
5e2b930b JR |
1027 | int ir_index; |
1028 | ||
1029 | cfg = irq_get_chip_data(irq); | |
1030 | ||
1031 | ir_index = map_irq_to_irte_handle(irq, &sub_handle); | |
1032 | BUG_ON(ir_index == -1); | |
1033 | ||
1034 | prepare_irte(&irte, cfg->vector, dest); | |
1035 | ||
1036 | /* Set source-id of interrupt request */ | |
1037 | if (pdev) | |
1038 | set_msi_sid(&irte, pdev); | |
1039 | else | |
1040 | set_hpet_sid(&irte, hpet_id); | |
1041 | ||
1042 | modify_irte(irq, &irte); | |
1043 | ||
1044 | msg->address_hi = MSI_ADDR_BASE_HI; | |
1045 | msg->data = sub_handle; | |
1046 | msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT | | |
1047 | MSI_ADDR_IR_SHV | | |
1048 | MSI_ADDR_IR_INDEX1(ir_index) | | |
1049 | MSI_ADDR_IR_INDEX2(ir_index); | |
1050 | } | |
1051 | ||
1052 | /* | |
1053 | * Map the PCI dev to the corresponding remapping hardware unit | |
1054 | * and allocate 'nvec' consecutive interrupt-remapping table entries | |
1055 | * in it. | |
1056 | */ | |
1057 | static int intel_msi_alloc_irq(struct pci_dev *dev, int irq, int nvec) | |
1058 | { | |
1059 | struct intel_iommu *iommu; | |
1060 | int index; | |
1061 | ||
1062 | iommu = map_dev_to_ir(dev); | |
1063 | if (!iommu) { | |
1064 | printk(KERN_ERR | |
1065 | "Unable to map PCI %s to iommu\n", pci_name(dev)); | |
1066 | return -ENOENT; | |
1067 | } | |
1068 | ||
1069 | index = alloc_irte(iommu, irq, nvec); | |
1070 | if (index < 0) { | |
1071 | printk(KERN_ERR | |
1072 | "Unable to allocate %d IRTE for PCI %s\n", nvec, | |
1073 | pci_name(dev)); | |
1074 | return -ENOSPC; | |
1075 | } | |
1076 | return index; | |
1077 | } | |
1078 | ||
1079 | static int intel_msi_setup_irq(struct pci_dev *pdev, unsigned int irq, | |
1080 | int index, int sub_handle) | |
1081 | { | |
1082 | struct intel_iommu *iommu; | |
1083 | ||
1084 | iommu = map_dev_to_ir(pdev); | |
1085 | if (!iommu) | |
1086 | return -ENOENT; | |
1087 | /* | |
1088 | * setup the mapping between the irq and the IRTE | |
1089 | * base index, the sub_handle pointing to the | |
1090 | * appropriate interrupt remap table entry. | |
1091 | */ | |
1092 | set_irte_irq(irq, iommu, index, sub_handle); | |
1093 | ||
1094 | return 0; | |
1095 | } | |
1096 | ||
1097 | static int intel_setup_hpet_msi(unsigned int irq, unsigned int id) | |
1098 | { | |
1099 | struct intel_iommu *iommu = map_hpet_to_ir(id); | |
1100 | int index; | |
1101 | ||
1102 | if (!iommu) | |
1103 | return -1; | |
1104 | ||
1105 | index = alloc_irte(iommu, irq, 1); | |
1106 | if (index < 0) | |
1107 | return -1; | |
1108 | ||
1109 | return 0; | |
1110 | } | |
1111 | ||
736baef4 | 1112 | struct irq_remap_ops intel_irq_remap_ops = { |
95a02e97 SS |
1113 | .supported = intel_irq_remapping_supported, |
1114 | .prepare = dmar_table_init, | |
1115 | .enable = intel_enable_irq_remapping, | |
1116 | .disable = disable_irq_remapping, | |
1117 | .reenable = reenable_irq_remapping, | |
4f3d8b67 | 1118 | .enable_faulting = enable_drhd_fault_handling, |
0c3f173a | 1119 | .setup_ioapic_entry = intel_setup_ioapic_entry, |
4c1bad6a | 1120 | .set_affinity = intel_ioapic_set_affinity, |
9d619f65 | 1121 | .free_irq = free_irte, |
5e2b930b JR |
1122 | .compose_msi_msg = intel_compose_msi_msg, |
1123 | .msi_alloc_irq = intel_msi_alloc_irq, | |
1124 | .msi_setup_irq = intel_msi_setup_irq, | |
1125 | .setup_hpet_msi = intel_setup_hpet_msi, | |
736baef4 | 1126 | }; |