ARM: Fix build after memfd_create syscall
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / drivers / iommu / dmar.c
CommitLineData
10e5247f
KA
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
98bcef56 17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
10e5247f 21 *
e61d98d8 22 * This file implements early detection/parsing of Remapping Devices
10e5247f
KA
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
e61d98d8
SS
25 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
10e5247f
KA
27 */
28
e9071b0b
DD
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
30
10e5247f
KA
31#include <linux/pci.h>
32#include <linux/dmar.h>
38717946
KA
33#include <linux/iova.h>
34#include <linux/intel-iommu.h>
fe962e90 35#include <linux/timer.h>
0ac2491f
SS
36#include <linux/irq.h>
37#include <linux/interrupt.h>
69575d38 38#include <linux/tboot.h>
eb27cae8 39#include <linux/dmi.h>
5a0e3ad6 40#include <linux/slab.h>
8a8f422d 41#include <asm/irq_remapping.h>
4db77ff3 42#include <asm/iommu_table.h>
10e5247f 43
078e1ee2
JR
44#include "irq_remapping.h"
45
10e5247f
KA
46/* No locks are needed as DMA remapping hardware unit
47 * list is constructed at boot time and hotplug of
48 * these units are not supported by the architecture.
49 */
50LIST_HEAD(dmar_drhd_units);
10e5247f 51
41750d31 52struct acpi_table_header * __initdata dmar_tbl;
8e1568f3 53static acpi_size dmar_tbl_size;
10e5247f
KA
54
55static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
56{
57 /*
58 * add INCLUDE_ALL at the tail, so scan the list will find it at
59 * the very end.
60 */
61 if (drhd->include_all)
62 list_add_tail(&drhd->list, &dmar_drhd_units);
63 else
64 list_add(&drhd->list, &dmar_drhd_units);
65}
66
10e5247f
KA
67static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
68 struct pci_dev **dev, u16 segment)
69{
70 struct pci_bus *bus;
71 struct pci_dev *pdev = NULL;
72 struct acpi_dmar_pci_path *path;
73 int count;
74
75 bus = pci_find_bus(segment, scope->bus);
76 path = (struct acpi_dmar_pci_path *)(scope + 1);
77 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
78 / sizeof(struct acpi_dmar_pci_path);
79
80 while (count) {
81 if (pdev)
82 pci_dev_put(pdev);
83 /*
84 * Some BIOSes list non-exist devices in DMAR table, just
85 * ignore it
86 */
87 if (!bus) {
e9071b0b 88 pr_warn("Device scope bus [%d] not found\n", scope->bus);
10e5247f
KA
89 break;
90 }
91 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
92 if (!pdev) {
e9071b0b 93 /* warning will be printed below */
10e5247f
KA
94 break;
95 }
96 path ++;
97 count --;
98 bus = pdev->subordinate;
99 }
100 if (!pdev) {
e9071b0b 101 pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
bf947fcb 102 segment, scope->bus, path->dev, path->fn);
10e5247f
KA
103 *dev = NULL;
104 return 0;
105 }
106 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
107 pdev->subordinate) || (scope->entry_type == \
108 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
109 pci_dev_put(pdev);
e9071b0b
DD
110 pr_warn("Device scope type does not match for %s\n",
111 pci_name(pdev));
10e5247f
KA
112 return -EINVAL;
113 }
114 *dev = pdev;
115 return 0;
116}
117
318fe7df
SS
118int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
119 struct pci_dev ***devices, u16 segment)
10e5247f
KA
120{
121 struct acpi_dmar_device_scope *scope;
122 void * tmp = start;
123 int index;
124 int ret;
125
126 *cnt = 0;
127 while (start < end) {
128 scope = start;
129 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
130 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
131 (*cnt)++;
ae3e7f3a
LC
132 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
133 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
e9071b0b 134 pr_warn("Unsupported device scope\n");
5715f0f9 135 }
10e5247f
KA
136 start += scope->length;
137 }
138 if (*cnt == 0)
139 return 0;
140
141 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
142 if (!*devices)
143 return -ENOMEM;
144
145 start = tmp;
146 index = 0;
147 while (start < end) {
148 scope = start;
149 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
150 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
151 ret = dmar_parse_one_dev_scope(scope,
152 &(*devices)[index], segment);
153 if (ret) {
154 kfree(*devices);
155 return ret;
156 }
157 index ++;
158 }
159 start += scope->length;
160 }
161
162 return 0;
163}
164
165/**
166 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
167 * structure which uniquely represent one DMA remapping hardware unit
168 * present in the platform
169 */
170static int __init
171dmar_parse_one_drhd(struct acpi_dmar_header *header)
172{
173 struct acpi_dmar_hardware_unit *drhd;
174 struct dmar_drhd_unit *dmaru;
175 int ret = 0;
10e5247f 176
e523b38e 177 drhd = (struct acpi_dmar_hardware_unit *)header;
10e5247f
KA
178 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
179 if (!dmaru)
180 return -ENOMEM;
181
1886e8a9 182 dmaru->hdr = header;
10e5247f 183 dmaru->reg_base_addr = drhd->address;
276dbf99 184 dmaru->segment = drhd->segment;
10e5247f
KA
185 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
186
1886e8a9
SS
187 ret = alloc_iommu(dmaru);
188 if (ret) {
189 kfree(dmaru);
190 return ret;
191 }
192 dmar_register_drhd_unit(dmaru);
193 return 0;
194}
195
f82851a8 196static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
1886e8a9
SS
197{
198 struct acpi_dmar_hardware_unit *drhd;
f82851a8 199 int ret = 0;
1886e8a9
SS
200
201 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
202
2e824f79
YZ
203 if (dmaru->include_all)
204 return 0;
205
206 ret = dmar_parse_dev_scope((void *)(drhd + 1),
1886e8a9 207 ((void *)drhd) + drhd->header.length,
10e5247f
KA
208 &dmaru->devices_cnt, &dmaru->devices,
209 drhd->segment);
1c7d1bca 210 if (ret) {
1886e8a9 211 list_del(&dmaru->list);
10e5247f 212 kfree(dmaru);
1886e8a9 213 }
10e5247f
KA
214 return ret;
215}
216
aa697079 217#ifdef CONFIG_ACPI_NUMA
ee34b32d
SS
218static int __init
219dmar_parse_one_rhsa(struct acpi_dmar_header *header)
220{
221 struct acpi_dmar_rhsa *rhsa;
222 struct dmar_drhd_unit *drhd;
223
224 rhsa = (struct acpi_dmar_rhsa *)header;
aa697079 225 for_each_drhd_unit(drhd) {
ee34b32d
SS
226 if (drhd->reg_base_addr == rhsa->base_address) {
227 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
228
229 if (!node_online(node))
230 node = -1;
231 drhd->iommu->node = node;
aa697079
DW
232 return 0;
233 }
ee34b32d 234 }
fd0c8894
BH
235 WARN_TAINT(
236 1, TAINT_FIRMWARE_WORKAROUND,
237 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
238 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
239 drhd->reg_base_addr,
240 dmi_get_system_info(DMI_BIOS_VENDOR),
241 dmi_get_system_info(DMI_BIOS_VERSION),
242 dmi_get_system_info(DMI_PRODUCT_VERSION));
ee34b32d 243
aa697079 244 return 0;
ee34b32d 245}
aa697079 246#endif
ee34b32d 247
10e5247f
KA
248static void __init
249dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
250{
251 struct acpi_dmar_hardware_unit *drhd;
252 struct acpi_dmar_reserved_memory *rmrr;
aa5d2b51 253 struct acpi_dmar_atsr *atsr;
17b60977 254 struct acpi_dmar_rhsa *rhsa;
10e5247f
KA
255
256 switch (header->type) {
257 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
aa5d2b51
YZ
258 drhd = container_of(header, struct acpi_dmar_hardware_unit,
259 header);
e9071b0b 260 pr_info("DRHD base: %#016Lx flags: %#x\n",
aa5d2b51 261 (unsigned long long)drhd->address, drhd->flags);
10e5247f
KA
262 break;
263 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
aa5d2b51
YZ
264 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
265 header);
e9071b0b 266 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
5b6985ce
FY
267 (unsigned long long)rmrr->base_address,
268 (unsigned long long)rmrr->end_address);
10e5247f 269 break;
aa5d2b51
YZ
270 case ACPI_DMAR_TYPE_ATSR:
271 atsr = container_of(header, struct acpi_dmar_atsr, header);
e9071b0b 272 pr_info("ATSR flags: %#x\n", atsr->flags);
aa5d2b51 273 break;
17b60977
RD
274 case ACPI_DMAR_HARDWARE_AFFINITY:
275 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
e9071b0b 276 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
17b60977
RD
277 (unsigned long long)rhsa->base_address,
278 rhsa->proximity_domain);
279 break;
10e5247f
KA
280 }
281}
282
f6dd5c31
YL
283/**
284 * dmar_table_detect - checks to see if the platform supports DMAR devices
285 */
286static int __init dmar_table_detect(void)
287{
288 acpi_status status = AE_OK;
289
290 /* if we could find DMAR table, then there are DMAR devices */
8e1568f3
YL
291 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
292 (struct acpi_table_header **)&dmar_tbl,
293 &dmar_tbl_size);
f6dd5c31
YL
294
295 if (ACPI_SUCCESS(status) && !dmar_tbl) {
e9071b0b 296 pr_warn("Unable to map DMAR\n");
f6dd5c31
YL
297 status = AE_NOT_FOUND;
298 }
299
300 return (ACPI_SUCCESS(status) ? 1 : 0);
301}
aaa9d1dd 302
10e5247f
KA
303/**
304 * parse_dmar_table - parses the DMA reporting table
305 */
306static int __init
307parse_dmar_table(void)
308{
309 struct acpi_table_dmar *dmar;
310 struct acpi_dmar_header *entry_header;
311 int ret = 0;
312
f6dd5c31
YL
313 /*
314 * Do it again, earlier dmar_tbl mapping could be mapped with
315 * fixed map.
316 */
317 dmar_table_detect();
318
a59b50e9
JC
319 /*
320 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
321 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
322 */
323 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
324
10e5247f
KA
325 dmar = (struct acpi_table_dmar *)dmar_tbl;
326 if (!dmar)
327 return -ENODEV;
328
5b6985ce 329 if (dmar->width < PAGE_SHIFT - 1) {
e9071b0b 330 pr_warn("Invalid DMAR haw\n");
10e5247f
KA
331 return -EINVAL;
332 }
333
e9071b0b 334 pr_info("Host address width %d\n", dmar->width + 1);
10e5247f
KA
335
336 entry_header = (struct acpi_dmar_header *)(dmar + 1);
337 while (((unsigned long)entry_header) <
338 (((unsigned long)dmar) + dmar_tbl->length)) {
084eb960
TB
339 /* Avoid looping forever on bad ACPI tables */
340 if (entry_header->length == 0) {
e9071b0b 341 pr_warn("Invalid 0-length structure\n");
084eb960
TB
342 ret = -EINVAL;
343 break;
344 }
345
10e5247f
KA
346 dmar_table_print_dmar_entry(entry_header);
347
348 switch (entry_header->type) {
349 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
350 ret = dmar_parse_one_drhd(entry_header);
351 break;
352 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
353 ret = dmar_parse_one_rmrr(entry_header);
aa5d2b51
YZ
354 break;
355 case ACPI_DMAR_TYPE_ATSR:
aa5d2b51 356 ret = dmar_parse_one_atsr(entry_header);
10e5247f 357 break;
17b60977 358 case ACPI_DMAR_HARDWARE_AFFINITY:
aa697079 359#ifdef CONFIG_ACPI_NUMA
ee34b32d 360 ret = dmar_parse_one_rhsa(entry_header);
aa697079 361#endif
17b60977 362 break;
10e5247f 363 default:
e9071b0b 364 pr_warn("Unknown DMAR structure type %d\n",
4de75cf9 365 entry_header->type);
10e5247f
KA
366 ret = 0; /* for forward compatibility */
367 break;
368 }
369 if (ret)
370 break;
371
372 entry_header = ((void *)entry_header + entry_header->length);
373 }
374 return ret;
375}
376
dda56549 377static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
e61d98d8
SS
378 struct pci_dev *dev)
379{
380 int index;
381
382 while (dev) {
383 for (index = 0; index < cnt; index++)
384 if (dev == devices[index])
385 return 1;
386
387 /* Check our parent */
388 dev = dev->bus->self;
389 }
390
391 return 0;
392}
393
394struct dmar_drhd_unit *
395dmar_find_matched_drhd_unit(struct pci_dev *dev)
396{
2e824f79
YZ
397 struct dmar_drhd_unit *dmaru = NULL;
398 struct acpi_dmar_hardware_unit *drhd;
399
dda56549
Y
400 dev = pci_physfn(dev);
401
2e824f79
YZ
402 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
403 drhd = container_of(dmaru->hdr,
404 struct acpi_dmar_hardware_unit,
405 header);
406
407 if (dmaru->include_all &&
408 drhd->segment == pci_domain_nr(dev->bus))
409 return dmaru;
e61d98d8 410
2e824f79
YZ
411 if (dmar_pci_device_match(dmaru->devices,
412 dmaru->devices_cnt, dev))
413 return dmaru;
e61d98d8
SS
414 }
415
416 return NULL;
417}
418
1886e8a9
SS
419int __init dmar_dev_scope_init(void)
420{
c2c7286a 421 static int dmar_dev_scope_initialized;
04e2ea67 422 struct dmar_drhd_unit *drhd, *drhd_n;
1886e8a9
SS
423 int ret = -ENODEV;
424
c2c7286a
SS
425 if (dmar_dev_scope_initialized)
426 return dmar_dev_scope_initialized;
427
318fe7df
SS
428 if (list_empty(&dmar_drhd_units))
429 goto fail;
430
04e2ea67 431 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
1886e8a9
SS
432 ret = dmar_parse_dev(drhd);
433 if (ret)
c2c7286a 434 goto fail;
1886e8a9
SS
435 }
436
318fe7df
SS
437 ret = dmar_parse_rmrr_atsr_dev();
438 if (ret)
439 goto fail;
1886e8a9 440
c2c7286a
SS
441 dmar_dev_scope_initialized = 1;
442 return 0;
443
444fail:
445 dmar_dev_scope_initialized = ret;
1886e8a9
SS
446 return ret;
447}
448
10e5247f
KA
449
450int __init dmar_table_init(void)
451{
1886e8a9 452 static int dmar_table_initialized;
093f87d2
FY
453 int ret;
454
1886e8a9
SS
455 if (dmar_table_initialized)
456 return 0;
457
458 dmar_table_initialized = 1;
459
093f87d2
FY
460 ret = parse_dmar_table();
461 if (ret) {
1886e8a9 462 if (ret != -ENODEV)
e9071b0b 463 pr_info("parse DMAR table failure.\n");
093f87d2
FY
464 return ret;
465 }
466
10e5247f 467 if (list_empty(&dmar_drhd_units)) {
e9071b0b 468 pr_info("No DMAR devices found\n");
10e5247f
KA
469 return -ENODEV;
470 }
093f87d2 471
10e5247f
KA
472 return 0;
473}
474
3a8663ee
BH
475static void warn_invalid_dmar(u64 addr, const char *message)
476{
fd0c8894
BH
477 WARN_TAINT_ONCE(
478 1, TAINT_FIRMWARE_WORKAROUND,
479 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
480 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
481 addr, message,
482 dmi_get_system_info(DMI_BIOS_VENDOR),
483 dmi_get_system_info(DMI_BIOS_VERSION),
484 dmi_get_system_info(DMI_PRODUCT_VERSION));
3a8663ee 485}
6ecbf01c 486
86cf898e
DW
487int __init check_zero_address(void)
488{
489 struct acpi_table_dmar *dmar;
490 struct acpi_dmar_header *entry_header;
491 struct acpi_dmar_hardware_unit *drhd;
492
493 dmar = (struct acpi_table_dmar *)dmar_tbl;
494 entry_header = (struct acpi_dmar_header *)(dmar + 1);
495
496 while (((unsigned long)entry_header) <
497 (((unsigned long)dmar) + dmar_tbl->length)) {
498 /* Avoid looping forever on bad ACPI tables */
499 if (entry_header->length == 0) {
e9071b0b 500 pr_warn("Invalid 0-length structure\n");
86cf898e
DW
501 return 0;
502 }
503
504 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
2c992208
CW
505 void __iomem *addr;
506 u64 cap, ecap;
507
86cf898e
DW
508 drhd = (void *)entry_header;
509 if (!drhd->address) {
3a8663ee 510 warn_invalid_dmar(0, "");
2c992208
CW
511 goto failed;
512 }
513
514 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
515 if (!addr ) {
516 printk("IOMMU: can't validate: %llx\n", drhd->address);
517 goto failed;
518 }
519 cap = dmar_readq(addr + DMAR_CAP_REG);
520 ecap = dmar_readq(addr + DMAR_ECAP_REG);
521 early_iounmap(addr, VTD_PAGE_SIZE);
522 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
3a8663ee
BH
523 warn_invalid_dmar(drhd->address,
524 " returns all ones");
2c992208 525 goto failed;
86cf898e 526 }
86cf898e
DW
527 }
528
529 entry_header = ((void *)entry_header + entry_header->length);
530 }
531 return 1;
2c992208
CW
532
533failed:
2c992208 534 return 0;
86cf898e
DW
535}
536
480125ba 537int __init detect_intel_iommu(void)
2ae21010
SS
538{
539 int ret;
540
f6dd5c31 541 ret = dmar_table_detect();
86cf898e
DW
542 if (ret)
543 ret = check_zero_address();
2ae21010 544 {
1cb11583 545 struct acpi_table_dmar *dmar;
b3a530e4 546
1cb11583 547 dmar = (struct acpi_table_dmar *) dmar_tbl;
f5d1b97b 548
95a02e97 549 if (ret && irq_remapping_enabled && cpu_has_x2apic &&
f5d1b97b 550 dmar->flags & 0x1)
e9071b0b 551 pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
f5d1b97b 552
11bd04f6 553 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
2ae21010 554 iommu_detected = 1;
5d990b62
CW
555 /* Make sure ACS will be enabled */
556 pci_request_acs();
557 }
f5d1b97b 558
9d5ce73a
FT
559#ifdef CONFIG_X86
560 if (ret)
561 x86_init.iommu.iommu_init = intel_iommu_init;
2ae21010 562#endif
cacd4213 563 }
8e1568f3 564 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
f6dd5c31 565 dmar_tbl = NULL;
480125ba 566
4db77ff3 567 return ret ? 1 : -ENODEV;
2ae21010
SS
568}
569
570
6f5cf521
DD
571static void unmap_iommu(struct intel_iommu *iommu)
572{
573 iounmap(iommu->reg);
574 release_mem_region(iommu->reg_phys, iommu->reg_size);
575}
576
577/**
578 * map_iommu: map the iommu's registers
579 * @iommu: the iommu to map
580 * @phys_addr: the physical address of the base resgister
e9071b0b 581 *
6f5cf521 582 * Memory map the iommu's registers. Start w/ a single page, and
e9071b0b 583 * possibly expand if that turns out to be insufficent.
6f5cf521
DD
584 */
585static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
586{
587 int map_size, err=0;
588
589 iommu->reg_phys = phys_addr;
590 iommu->reg_size = VTD_PAGE_SIZE;
591
592 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
593 pr_err("IOMMU: can't reserve memory\n");
594 err = -EBUSY;
595 goto out;
596 }
597
598 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
599 if (!iommu->reg) {
600 pr_err("IOMMU: can't map the region\n");
601 err = -ENOMEM;
602 goto release;
603 }
604
605 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
606 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
607
608 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
609 err = -EINVAL;
610 warn_invalid_dmar(phys_addr, " returns all ones");
611 goto unmap;
612 }
613
614 /* the registers might be more than one page */
615 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
616 cap_max_fault_reg_offset(iommu->cap));
617 map_size = VTD_PAGE_ALIGN(map_size);
618 if (map_size > iommu->reg_size) {
619 iounmap(iommu->reg);
620 release_mem_region(iommu->reg_phys, iommu->reg_size);
621 iommu->reg_size = map_size;
622 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
623 iommu->name)) {
624 pr_err("IOMMU: can't reserve memory\n");
625 err = -EBUSY;
626 goto out;
627 }
628 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
629 if (!iommu->reg) {
630 pr_err("IOMMU: can't map the region\n");
631 err = -ENOMEM;
632 goto release;
633 }
634 }
635 err = 0;
636 goto out;
637
638unmap:
639 iounmap(iommu->reg);
640release:
641 release_mem_region(iommu->reg_phys, iommu->reg_size);
642out:
643 return err;
644}
645
1886e8a9 646int alloc_iommu(struct dmar_drhd_unit *drhd)
e61d98d8 647{
c42d9f32 648 struct intel_iommu *iommu;
3a93c841 649 u32 ver, sts;
c42d9f32 650 static int iommu_allocated = 0;
43f7392b 651 int agaw = 0;
4ed0d3e6 652 int msagaw = 0;
6f5cf521 653 int err;
c42d9f32 654
6ecbf01c 655 if (!drhd->reg_base_addr) {
3a8663ee 656 warn_invalid_dmar(0, "");
6ecbf01c
DW
657 return -EINVAL;
658 }
659
c42d9f32
SS
660 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
661 if (!iommu)
1886e8a9 662 return -ENOMEM;
c42d9f32
SS
663
664 iommu->seq_id = iommu_allocated++;
9d783ba0 665 sprintf (iommu->name, "dmar%d", iommu->seq_id);
e61d98d8 666
6f5cf521
DD
667 err = map_iommu(iommu, drhd->reg_base_addr);
668 if (err) {
669 pr_err("IOMMU: failed to map %s\n", iommu->name);
e61d98d8
SS
670 goto error;
671 }
0815565a 672
6f5cf521 673 err = -EINVAL;
1b573683
WH
674 agaw = iommu_calculate_agaw(iommu);
675 if (agaw < 0) {
bf947fcb
DD
676 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
677 iommu->seq_id);
0815565a 678 goto err_unmap;
4ed0d3e6
FY
679 }
680 msagaw = iommu_calculate_max_sagaw(iommu);
681 if (msagaw < 0) {
bf947fcb 682 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1b573683 683 iommu->seq_id);
0815565a 684 goto err_unmap;
1b573683
WH
685 }
686 iommu->agaw = agaw;
4ed0d3e6 687 iommu->msagaw = msagaw;
1b573683 688
ee34b32d
SS
689 iommu->node = -1;
690
e61d98d8 691 ver = readl(iommu->reg + DMAR_VER_REG);
680a7524
YL
692 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
693 iommu->seq_id,
5b6985ce
FY
694 (unsigned long long)drhd->reg_base_addr,
695 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
696 (unsigned long long)iommu->cap,
697 (unsigned long long)iommu->ecap);
e61d98d8 698
3a93c841
TI
699 /* Reflect status in gcmd */
700 sts = readl(iommu->reg + DMAR_GSTS_REG);
701 if (sts & DMA_GSTS_IRES)
702 iommu->gcmd |= DMA_GCMD_IRE;
703 if (sts & DMA_GSTS_TES)
704 iommu->gcmd |= DMA_GCMD_TE;
705 if (sts & DMA_GSTS_QIES)
706 iommu->gcmd |= DMA_GCMD_QIE;
707
1f5b3c3f 708 raw_spin_lock_init(&iommu->register_lock);
e61d98d8
SS
709
710 drhd->iommu = iommu;
1886e8a9 711 return 0;
0815565a
DW
712
713 err_unmap:
6f5cf521 714 unmap_iommu(iommu);
0815565a 715 error:
e61d98d8 716 kfree(iommu);
6f5cf521 717 return err;
e61d98d8
SS
718}
719
720void free_iommu(struct intel_iommu *iommu)
721{
722 if (!iommu)
723 return;
724
e61d98d8 725 free_dmar_iommu(iommu);
e61d98d8
SS
726
727 if (iommu->reg)
6f5cf521
DD
728 unmap_iommu(iommu);
729
e61d98d8
SS
730 kfree(iommu);
731}
fe962e90
SS
732
733/*
734 * Reclaim all the submitted descriptors which have completed its work.
735 */
736static inline void reclaim_free_desc(struct q_inval *qi)
737{
6ba6c3a4
YZ
738 while (qi->desc_status[qi->free_tail] == QI_DONE ||
739 qi->desc_status[qi->free_tail] == QI_ABORT) {
fe962e90
SS
740 qi->desc_status[qi->free_tail] = QI_FREE;
741 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
742 qi->free_cnt++;
743 }
744}
745
704126ad
YZ
746static int qi_check_fault(struct intel_iommu *iommu, int index)
747{
748 u32 fault;
6ba6c3a4 749 int head, tail;
704126ad
YZ
750 struct q_inval *qi = iommu->qi;
751 int wait_index = (index + 1) % QI_LENGTH;
752
6ba6c3a4
YZ
753 if (qi->desc_status[wait_index] == QI_ABORT)
754 return -EAGAIN;
755
704126ad
YZ
756 fault = readl(iommu->reg + DMAR_FSTS_REG);
757
758 /*
759 * If IQE happens, the head points to the descriptor associated
760 * with the error. No new descriptors are fetched until the IQE
761 * is cleared.
762 */
763 if (fault & DMA_FSTS_IQE) {
764 head = readl(iommu->reg + DMAR_IQH_REG);
6ba6c3a4 765 if ((head >> DMAR_IQ_SHIFT) == index) {
bf947fcb 766 pr_err("VT-d detected invalid descriptor: "
6ba6c3a4
YZ
767 "low=%llx, high=%llx\n",
768 (unsigned long long)qi->desc[index].low,
769 (unsigned long long)qi->desc[index].high);
704126ad
YZ
770 memcpy(&qi->desc[index], &qi->desc[wait_index],
771 sizeof(struct qi_desc));
772 __iommu_flush_cache(iommu, &qi->desc[index],
773 sizeof(struct qi_desc));
774 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
775 return -EINVAL;
776 }
777 }
778
6ba6c3a4
YZ
779 /*
780 * If ITE happens, all pending wait_desc commands are aborted.
781 * No new descriptors are fetched until the ITE is cleared.
782 */
783 if (fault & DMA_FSTS_ITE) {
784 head = readl(iommu->reg + DMAR_IQH_REG);
785 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
786 head |= 1;
787 tail = readl(iommu->reg + DMAR_IQT_REG);
788 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
789
790 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
791
792 do {
793 if (qi->desc_status[head] == QI_IN_USE)
794 qi->desc_status[head] = QI_ABORT;
795 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
796 } while (head != tail);
797
798 if (qi->desc_status[wait_index] == QI_ABORT)
799 return -EAGAIN;
800 }
801
802 if (fault & DMA_FSTS_ICE)
803 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
804
704126ad
YZ
805 return 0;
806}
807
fe962e90
SS
808/*
809 * Submit the queued invalidation descriptor to the remapping
810 * hardware unit and wait for its completion.
811 */
704126ad 812int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
fe962e90 813{
6ba6c3a4 814 int rc;
fe962e90
SS
815 struct q_inval *qi = iommu->qi;
816 struct qi_desc *hw, wait_desc;
817 int wait_index, index;
818 unsigned long flags;
819
820 if (!qi)
704126ad 821 return 0;
fe962e90
SS
822
823 hw = qi->desc;
824
6ba6c3a4
YZ
825restart:
826 rc = 0;
827
3b8f4048 828 raw_spin_lock_irqsave(&qi->q_lock, flags);
fe962e90 829 while (qi->free_cnt < 3) {
3b8f4048 830 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
fe962e90 831 cpu_relax();
3b8f4048 832 raw_spin_lock_irqsave(&qi->q_lock, flags);
fe962e90
SS
833 }
834
835 index = qi->free_head;
836 wait_index = (index + 1) % QI_LENGTH;
837
838 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
839
840 hw[index] = *desc;
841
704126ad
YZ
842 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
843 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
fe962e90
SS
844 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
845
846 hw[wait_index] = wait_desc;
847
848 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
849 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
850
851 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
852 qi->free_cnt -= 2;
853
fe962e90
SS
854 /*
855 * update the HW tail register indicating the presence of
856 * new descriptors.
857 */
6ba6c3a4 858 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
fe962e90
SS
859
860 while (qi->desc_status[wait_index] != QI_DONE) {
f05810c9
SS
861 /*
862 * We will leave the interrupts disabled, to prevent interrupt
863 * context to queue another cmd while a cmd is already submitted
864 * and waiting for completion on this cpu. This is to avoid
865 * a deadlock where the interrupt context can wait indefinitely
866 * for free slots in the queue.
867 */
704126ad
YZ
868 rc = qi_check_fault(iommu, index);
869 if (rc)
6ba6c3a4 870 break;
704126ad 871
3b8f4048 872 raw_spin_unlock(&qi->q_lock);
fe962e90 873 cpu_relax();
3b8f4048 874 raw_spin_lock(&qi->q_lock);
fe962e90 875 }
6ba6c3a4
YZ
876
877 qi->desc_status[index] = QI_DONE;
fe962e90
SS
878
879 reclaim_free_desc(qi);
3b8f4048 880 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
704126ad 881
6ba6c3a4
YZ
882 if (rc == -EAGAIN)
883 goto restart;
884
704126ad 885 return rc;
fe962e90
SS
886}
887
888/*
889 * Flush the global interrupt entry cache.
890 */
891void qi_global_iec(struct intel_iommu *iommu)
892{
893 struct qi_desc desc;
894
895 desc.low = QI_IEC_TYPE;
896 desc.high = 0;
897
704126ad 898 /* should never fail */
fe962e90
SS
899 qi_submit_sync(&desc, iommu);
900}
901
4c25a2c1
DW
902void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
903 u64 type)
3481f210 904{
3481f210
YS
905 struct qi_desc desc;
906
3481f210
YS
907 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
908 | QI_CC_GRAN(type) | QI_CC_TYPE;
909 desc.high = 0;
910
4c25a2c1 911 qi_submit_sync(&desc, iommu);
3481f210
YS
912}
913
1f0ef2aa
DW
914void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
915 unsigned int size_order, u64 type)
3481f210
YS
916{
917 u8 dw = 0, dr = 0;
918
919 struct qi_desc desc;
920 int ih = 0;
921
3481f210
YS
922 if (cap_write_drain(iommu->cap))
923 dw = 1;
924
925 if (cap_read_drain(iommu->cap))
926 dr = 1;
927
928 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
929 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
930 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
931 | QI_IOTLB_AM(size_order);
932
1f0ef2aa 933 qi_submit_sync(&desc, iommu);
3481f210
YS
934}
935
6ba6c3a4
YZ
936void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
937 u64 addr, unsigned mask)
938{
939 struct qi_desc desc;
940
941 if (mask) {
942 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
943 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
944 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
945 } else
946 desc.high = QI_DEV_IOTLB_ADDR(addr);
947
948 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
949 qdep = 0;
950
951 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
952 QI_DIOTLB_TYPE;
953
954 qi_submit_sync(&desc, iommu);
955}
956
eba67e5d
SS
957/*
958 * Disable Queued Invalidation interface.
959 */
960void dmar_disable_qi(struct intel_iommu *iommu)
961{
962 unsigned long flags;
963 u32 sts;
964 cycles_t start_time = get_cycles();
965
966 if (!ecap_qis(iommu->ecap))
967 return;
968
1f5b3c3f 969 raw_spin_lock_irqsave(&iommu->register_lock, flags);
eba67e5d 970
3e3e6a7c 971 sts = readl(iommu->reg + DMAR_GSTS_REG);
eba67e5d
SS
972 if (!(sts & DMA_GSTS_QIES))
973 goto end;
974
975 /*
976 * Give a chance to HW to complete the pending invalidation requests.
977 */
978 while ((readl(iommu->reg + DMAR_IQT_REG) !=
979 readl(iommu->reg + DMAR_IQH_REG)) &&
980 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
981 cpu_relax();
982
983 iommu->gcmd &= ~DMA_GCMD_QIE;
eba67e5d
SS
984 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
985
986 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
987 !(sts & DMA_GSTS_QIES), sts);
988end:
1f5b3c3f 989 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
eba67e5d
SS
990}
991
eb4a52bc
FY
992/*
993 * Enable queued invalidation.
994 */
995static void __dmar_enable_qi(struct intel_iommu *iommu)
996{
c416daa9 997 u32 sts;
eb4a52bc
FY
998 unsigned long flags;
999 struct q_inval *qi = iommu->qi;
1000
1001 qi->free_head = qi->free_tail = 0;
1002 qi->free_cnt = QI_LENGTH;
1003
1f5b3c3f 1004 raw_spin_lock_irqsave(&iommu->register_lock, flags);
eb4a52bc
FY
1005
1006 /* write zero to the tail reg */
1007 writel(0, iommu->reg + DMAR_IQT_REG);
1008
1009 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1010
eb4a52bc 1011 iommu->gcmd |= DMA_GCMD_QIE;
c416daa9 1012 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
eb4a52bc
FY
1013
1014 /* Make sure hardware complete it */
1015 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1016
1f5b3c3f 1017 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
eb4a52bc
FY
1018}
1019
fe962e90
SS
1020/*
1021 * Enable Queued Invalidation interface. This is a must to support
1022 * interrupt-remapping. Also used by DMA-remapping, which replaces
1023 * register based IOTLB invalidation.
1024 */
1025int dmar_enable_qi(struct intel_iommu *iommu)
1026{
fe962e90 1027 struct q_inval *qi;
751cafe3 1028 struct page *desc_page;
fe962e90
SS
1029
1030 if (!ecap_qis(iommu->ecap))
1031 return -ENOENT;
1032
1033 /*
1034 * queued invalidation is already setup and enabled.
1035 */
1036 if (iommu->qi)
1037 return 0;
1038
fa4b57cc 1039 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
fe962e90
SS
1040 if (!iommu->qi)
1041 return -ENOMEM;
1042
1043 qi = iommu->qi;
1044
751cafe3
SS
1045
1046 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1047 if (!desc_page) {
fe962e90
SS
1048 kfree(qi);
1049 iommu->qi = 0;
1050 return -ENOMEM;
1051 }
1052
751cafe3
SS
1053 qi->desc = page_address(desc_page);
1054
37a40710 1055 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
fe962e90
SS
1056 if (!qi->desc_status) {
1057 free_page((unsigned long) qi->desc);
1058 kfree(qi);
1059 iommu->qi = 0;
1060 return -ENOMEM;
1061 }
1062
1063 qi->free_head = qi->free_tail = 0;
1064 qi->free_cnt = QI_LENGTH;
1065
3b8f4048 1066 raw_spin_lock_init(&qi->q_lock);
fe962e90 1067
eb4a52bc 1068 __dmar_enable_qi(iommu);
fe962e90
SS
1069
1070 return 0;
1071}
0ac2491f
SS
1072
1073/* iommu interrupt handling. Most stuff are MSI-like. */
1074
9d783ba0
SS
1075enum faulttype {
1076 DMA_REMAP,
1077 INTR_REMAP,
1078 UNKNOWN,
1079};
1080
1081static const char *dma_remap_fault_reasons[] =
0ac2491f
SS
1082{
1083 "Software",
1084 "Present bit in root entry is clear",
1085 "Present bit in context entry is clear",
1086 "Invalid context entry",
1087 "Access beyond MGAW",
1088 "PTE Write access is not set",
1089 "PTE Read access is not set",
1090 "Next page table ptr is invalid",
1091 "Root table address invalid",
1092 "Context table ptr is invalid",
1093 "non-zero reserved fields in RTP",
1094 "non-zero reserved fields in CTP",
1095 "non-zero reserved fields in PTE",
4ecccd9e 1096 "PCE for translation request specifies blocking",
0ac2491f 1097};
9d783ba0 1098
95a02e97 1099static const char *irq_remap_fault_reasons[] =
9d783ba0
SS
1100{
1101 "Detected reserved fields in the decoded interrupt-remapped request",
1102 "Interrupt index exceeded the interrupt-remapping table size",
1103 "Present field in the IRTE entry is clear",
1104 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1105 "Detected reserved fields in the IRTE entry",
1106 "Blocked a compatibility format interrupt request",
1107 "Blocked an interrupt request due to source-id verification failure",
1108};
1109
0ac2491f
SS
1110#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1111
9d783ba0 1112const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
0ac2491f 1113{
fefe1ed1
DC
1114 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1115 ARRAY_SIZE(irq_remap_fault_reasons))) {
9d783ba0 1116 *fault_type = INTR_REMAP;
95a02e97 1117 return irq_remap_fault_reasons[fault_reason - 0x20];
9d783ba0
SS
1118 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1119 *fault_type = DMA_REMAP;
1120 return dma_remap_fault_reasons[fault_reason];
1121 } else {
1122 *fault_type = UNKNOWN;
0ac2491f 1123 return "Unknown";
9d783ba0 1124 }
0ac2491f
SS
1125}
1126
5c2837fb 1127void dmar_msi_unmask(struct irq_data *data)
0ac2491f 1128{
dced35ae 1129 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
0ac2491f
SS
1130 unsigned long flag;
1131
1132 /* unmask it */
1f5b3c3f 1133 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1134 writel(0, iommu->reg + DMAR_FECTL_REG);
1135 /* Read a reg to force flush the post write */
1136 readl(iommu->reg + DMAR_FECTL_REG);
1f5b3c3f 1137 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1138}
1139
5c2837fb 1140void dmar_msi_mask(struct irq_data *data)
0ac2491f
SS
1141{
1142 unsigned long flag;
dced35ae 1143 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
0ac2491f
SS
1144
1145 /* mask it */
1f5b3c3f 1146 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1147 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1148 /* Read a reg to force flush the post write */
1149 readl(iommu->reg + DMAR_FECTL_REG);
1f5b3c3f 1150 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1151}
1152
1153void dmar_msi_write(int irq, struct msi_msg *msg)
1154{
dced35ae 1155 struct intel_iommu *iommu = irq_get_handler_data(irq);
0ac2491f
SS
1156 unsigned long flag;
1157
1f5b3c3f 1158 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1159 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1160 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1161 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1f5b3c3f 1162 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1163}
1164
1165void dmar_msi_read(int irq, struct msi_msg *msg)
1166{
dced35ae 1167 struct intel_iommu *iommu = irq_get_handler_data(irq);
0ac2491f
SS
1168 unsigned long flag;
1169
1f5b3c3f 1170 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1171 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1172 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1173 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1f5b3c3f 1174 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1175}
1176
1177static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1178 u8 fault_reason, u16 source_id, unsigned long long addr)
1179{
1180 const char *reason;
9d783ba0 1181 int fault_type;
0ac2491f 1182
9d783ba0 1183 reason = dmar_get_fault_reason(fault_reason, &fault_type);
0ac2491f 1184
9d783ba0 1185 if (fault_type == INTR_REMAP)
bf947fcb 1186 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
9d783ba0
SS
1187 "fault index %llx\n"
1188 "INTR-REMAP:[fault reason %02d] %s\n",
1189 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1190 PCI_FUNC(source_id & 0xFF), addr >> 48,
1191 fault_reason, reason);
1192 else
bf947fcb 1193 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
9d783ba0
SS
1194 "fault addr %llx \n"
1195 "DMAR:[fault reason %02d] %s\n",
1196 (type ? "DMA Read" : "DMA Write"),
1197 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1198 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
0ac2491f
SS
1199 return 0;
1200}
1201
1202#define PRIMARY_FAULT_REG_LEN (16)
1531a6a6 1203irqreturn_t dmar_fault(int irq, void *dev_id)
0ac2491f
SS
1204{
1205 struct intel_iommu *iommu = dev_id;
1206 int reg, fault_index;
1207 u32 fault_status;
1208 unsigned long flag;
1209
1f5b3c3f 1210 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f 1211 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
9d783ba0 1212 if (fault_status)
bf947fcb 1213 pr_err("DRHD: handling fault status reg %x\n", fault_status);
0ac2491f
SS
1214
1215 /* TBD: ignore advanced fault log currently */
1216 if (!(fault_status & DMA_FSTS_PPF))
bd5cdad0 1217 goto unlock_exit;
0ac2491f
SS
1218
1219 fault_index = dma_fsts_fault_record_index(fault_status);
1220 reg = cap_fault_reg_offset(iommu->cap);
1221 while (1) {
1222 u8 fault_reason;
1223 u16 source_id;
1224 u64 guest_addr;
1225 int type;
1226 u32 data;
1227
1228 /* highest 32 bits */
1229 data = readl(iommu->reg + reg +
1230 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1231 if (!(data & DMA_FRCD_F))
1232 break;
1233
1234 fault_reason = dma_frcd_fault_reason(data);
1235 type = dma_frcd_type(data);
1236
1237 data = readl(iommu->reg + reg +
1238 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1239 source_id = dma_frcd_source_id(data);
1240
1241 guest_addr = dmar_readq(iommu->reg + reg +
1242 fault_index * PRIMARY_FAULT_REG_LEN);
1243 guest_addr = dma_frcd_page_addr(guest_addr);
1244 /* clear the fault */
1245 writel(DMA_FRCD_F, iommu->reg + reg +
1246 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1247
1f5b3c3f 1248 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1249
1250 dmar_fault_do_one(iommu, type, fault_reason,
1251 source_id, guest_addr);
1252
1253 fault_index++;
8211a7b5 1254 if (fault_index >= cap_num_fault_regs(iommu->cap))
0ac2491f 1255 fault_index = 0;
1f5b3c3f 1256 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f 1257 }
0ac2491f 1258
bd5cdad0
LZH
1259 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1260
1261unlock_exit:
1f5b3c3f 1262 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1263 return IRQ_HANDLED;
1264}
1265
1266int dmar_set_interrupt(struct intel_iommu *iommu)
1267{
1268 int irq, ret;
1269
9d783ba0
SS
1270 /*
1271 * Check if the fault interrupt is already initialized.
1272 */
1273 if (iommu->irq)
1274 return 0;
1275
0ac2491f
SS
1276 irq = create_irq();
1277 if (!irq) {
bf947fcb 1278 pr_err("IOMMU: no free vectors\n");
0ac2491f
SS
1279 return -EINVAL;
1280 }
1281
dced35ae 1282 irq_set_handler_data(irq, iommu);
0ac2491f
SS
1283 iommu->irq = irq;
1284
1285 ret = arch_setup_dmar_msi(irq);
1286 if (ret) {
dced35ae 1287 irq_set_handler_data(irq, NULL);
0ac2491f
SS
1288 iommu->irq = 0;
1289 destroy_irq(irq);
dd726435 1290 return ret;
0ac2491f
SS
1291 }
1292
477694e7 1293 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
0ac2491f 1294 if (ret)
bf947fcb 1295 pr_err("IOMMU: can't request irq\n");
0ac2491f
SS
1296 return ret;
1297}
9d783ba0
SS
1298
1299int __init enable_drhd_fault_handling(void)
1300{
1301 struct dmar_drhd_unit *drhd;
1302
1303 /*
1304 * Enable fault control interrupt.
1305 */
1306 for_each_drhd_unit(drhd) {
1307 int ret;
1308 struct intel_iommu *iommu = drhd->iommu;
bd5cdad0 1309 u32 fault_status;
9d783ba0
SS
1310 ret = dmar_set_interrupt(iommu);
1311
1312 if (ret) {
e9071b0b 1313 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
9d783ba0
SS
1314 (unsigned long long)drhd->reg_base_addr, ret);
1315 return -1;
1316 }
7f99d946
SS
1317
1318 /*
1319 * Clear any previous faults.
1320 */
1321 dmar_fault(iommu->irq, iommu);
bd5cdad0
LZH
1322 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1323 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
9d783ba0
SS
1324 }
1325
1326 return 0;
1327}
eb4a52bc
FY
1328
1329/*
1330 * Re-enable Queued Invalidation interface.
1331 */
1332int dmar_reenable_qi(struct intel_iommu *iommu)
1333{
1334 if (!ecap_qis(iommu->ecap))
1335 return -ENOENT;
1336
1337 if (!iommu->qi)
1338 return -ENOENT;
1339
1340 /*
1341 * First disable queued invalidation.
1342 */
1343 dmar_disable_qi(iommu);
1344 /*
1345 * Then enable queued invalidation again. Since there is no pending
1346 * invalidation requests now, it's safe to re-enable queued
1347 * invalidation.
1348 */
1349 __dmar_enable_qi(iommu);
1350
1351 return 0;
1352}
074835f0
YS
1353
1354/*
1355 * Check interrupt remapping support in DMAR table description.
1356 */
0b8973a8 1357int __init dmar_ir_support(void)
074835f0
YS
1358{
1359 struct acpi_table_dmar *dmar;
1360 dmar = (struct acpi_table_dmar *)dmar_tbl;
4f506e07
AP
1361 if (!dmar)
1362 return 0;
074835f0
YS
1363 return dmar->flags & 0x1;
1364}
4db77ff3 1365IOMMU_INIT_POST(detect_intel_iommu);