PCI: add busn_res in struct pci_bus
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / iommu / intel-iommu.c
CommitLineData
ba395927
KA
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
98bcef56 17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
5b6985ce 21 * Author: Fenghua Yu <fenghua.yu@intel.com>
ba395927
KA
22 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
5e0d2a6f 26#include <linux/debugfs.h>
54485c30 27#include <linux/export.h>
ba395927
KA
28#include <linux/slab.h>
29#include <linux/irq.h>
30#include <linux/interrupt.h>
ba395927
KA
31#include <linux/spinlock.h>
32#include <linux/pci.h>
33#include <linux/dmar.h>
34#include <linux/dma-mapping.h>
35#include <linux/mempool.h>
5e0d2a6f 36#include <linux/timer.h>
38717946 37#include <linux/iova.h>
5d450806 38#include <linux/iommu.h>
38717946 39#include <linux/intel-iommu.h>
134fac3f 40#include <linux/syscore_ops.h>
69575d38 41#include <linux/tboot.h>
adb2fe02 42#include <linux/dmi.h>
5cdede24 43#include <linux/pci-ats.h>
0ee332c1 44#include <linux/memblock.h>
8a8f422d 45#include <asm/irq_remapping.h>
ba395927 46#include <asm/cacheflush.h>
46a7fa27 47#include <asm/iommu.h>
ba395927 48
5b6985ce
FY
49#define ROOT_SIZE VTD_PAGE_SIZE
50#define CONTEXT_SIZE VTD_PAGE_SIZE
51
ba395927
KA
52#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
53#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
e0fc7e0b 54#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
ba395927
KA
55
56#define IOAPIC_RANGE_START (0xfee00000)
57#define IOAPIC_RANGE_END (0xfeefffff)
58#define IOVA_START_ADDR (0x1000)
59
60#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
61
4ed0d3e6
FY
62#define MAX_AGAW_WIDTH 64
63
2ebe3151
DW
64#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
65#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
66
67/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
68 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
69#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
70 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
71#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
ba395927 72
f27be03b 73#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
284901a9 74#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
6a35528a 75#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
5e0d2a6f 76
df08cdc7
AM
77/* page table handling */
78#define LEVEL_STRIDE (9)
79#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
80
6d1c56a9
OBC
81/*
82 * This bitmap is used to advertise the page sizes our hardware support
83 * to the IOMMU core, which will then use this information to split
84 * physically contiguous memory regions it is mapping into page sizes
85 * that we support.
86 *
87 * Traditionally the IOMMU core just handed us the mappings directly,
88 * after making sure the size is an order of a 4KiB page and that the
89 * mapping has natural alignment.
90 *
91 * To retain this behavior, we currently advertise that we support
92 * all page sizes that are an order of 4KiB.
93 *
94 * If at some point we'd like to utilize the IOMMU core's new behavior,
95 * we could change this to advertise the real page sizes we support.
96 */
97#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
98
df08cdc7
AM
99static inline int agaw_to_level(int agaw)
100{
101 return agaw + 2;
102}
103
104static inline int agaw_to_width(int agaw)
105{
106 return 30 + agaw * LEVEL_STRIDE;
107}
108
109static inline int width_to_agaw(int width)
110{
111 return (width - 30) / LEVEL_STRIDE;
112}
113
114static inline unsigned int level_to_offset_bits(int level)
115{
116 return (level - 1) * LEVEL_STRIDE;
117}
118
119static inline int pfn_level_offset(unsigned long pfn, int level)
120{
121 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
122}
123
124static inline unsigned long level_mask(int level)
125{
126 return -1UL << level_to_offset_bits(level);
127}
128
129static inline unsigned long level_size(int level)
130{
131 return 1UL << level_to_offset_bits(level);
132}
133
134static inline unsigned long align_to_level(unsigned long pfn, int level)
135{
136 return (pfn + level_size(level) - 1) & level_mask(level);
137}
fd18de50 138
6dd9a7c7
YS
139static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
140{
141 return 1 << ((lvl - 1) * LEVEL_STRIDE);
142}
143
dd4e8319
DW
144/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
145 are never going to work. */
146static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
147{
148 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
149}
150
151static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
152{
153 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
154}
155static inline unsigned long page_to_dma_pfn(struct page *pg)
156{
157 return mm_to_dma_pfn(page_to_pfn(pg));
158}
159static inline unsigned long virt_to_dma_pfn(void *p)
160{
161 return page_to_dma_pfn(virt_to_page(p));
162}
163
d9630fe9
WH
164/* global iommu list, set NULL for ignored DMAR units */
165static struct intel_iommu **g_iommus;
166
e0fc7e0b 167static void __init check_tylersburg_isoch(void);
9af88143
DW
168static int rwbf_quirk;
169
b779260b
JC
170/*
171 * set to 1 to panic kernel if can't successfully enable VT-d
172 * (used when kernel is launched w/ TXT)
173 */
174static int force_on = 0;
175
46b08e1a
MM
176/*
177 * 0: Present
178 * 1-11: Reserved
179 * 12-63: Context Ptr (12 - (haw-1))
180 * 64-127: Reserved
181 */
182struct root_entry {
183 u64 val;
184 u64 rsvd1;
185};
186#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
187static inline bool root_present(struct root_entry *root)
188{
189 return (root->val & 1);
190}
191static inline void set_root_present(struct root_entry *root)
192{
193 root->val |= 1;
194}
195static inline void set_root_value(struct root_entry *root, unsigned long value)
196{
197 root->val |= value & VTD_PAGE_MASK;
198}
199
200static inline struct context_entry *
201get_context_addr_from_root(struct root_entry *root)
202{
203 return (struct context_entry *)
204 (root_present(root)?phys_to_virt(
205 root->val & VTD_PAGE_MASK) :
206 NULL);
207}
208
7a8fc25e
MM
209/*
210 * low 64 bits:
211 * 0: present
212 * 1: fault processing disable
213 * 2-3: translation type
214 * 12-63: address space root
215 * high 64 bits:
216 * 0-2: address width
217 * 3-6: aval
218 * 8-23: domain id
219 */
220struct context_entry {
221 u64 lo;
222 u64 hi;
223};
c07e7d21
MM
224
225static inline bool context_present(struct context_entry *context)
226{
227 return (context->lo & 1);
228}
229static inline void context_set_present(struct context_entry *context)
230{
231 context->lo |= 1;
232}
233
234static inline void context_set_fault_enable(struct context_entry *context)
235{
236 context->lo &= (((u64)-1) << 2) | 1;
237}
238
c07e7d21
MM
239static inline void context_set_translation_type(struct context_entry *context,
240 unsigned long value)
241{
242 context->lo &= (((u64)-1) << 4) | 3;
243 context->lo |= (value & 3) << 2;
244}
245
246static inline void context_set_address_root(struct context_entry *context,
247 unsigned long value)
248{
249 context->lo |= value & VTD_PAGE_MASK;
250}
251
252static inline void context_set_address_width(struct context_entry *context,
253 unsigned long value)
254{
255 context->hi |= value & 7;
256}
257
258static inline void context_set_domain_id(struct context_entry *context,
259 unsigned long value)
260{
261 context->hi |= (value & ((1 << 16) - 1)) << 8;
262}
263
264static inline void context_clear_entry(struct context_entry *context)
265{
266 context->lo = 0;
267 context->hi = 0;
268}
7a8fc25e 269
622ba12a
MM
270/*
271 * 0: readable
272 * 1: writable
273 * 2-6: reserved
274 * 7: super page
9cf06697
SY
275 * 8-10: available
276 * 11: snoop behavior
622ba12a
MM
277 * 12-63: Host physcial address
278 */
279struct dma_pte {
280 u64 val;
281};
622ba12a 282
19c239ce
MM
283static inline void dma_clear_pte(struct dma_pte *pte)
284{
285 pte->val = 0;
286}
287
288static inline void dma_set_pte_readable(struct dma_pte *pte)
289{
290 pte->val |= DMA_PTE_READ;
291}
292
293static inline void dma_set_pte_writable(struct dma_pte *pte)
294{
295 pte->val |= DMA_PTE_WRITE;
296}
297
9cf06697
SY
298static inline void dma_set_pte_snp(struct dma_pte *pte)
299{
300 pte->val |= DMA_PTE_SNP;
301}
302
19c239ce
MM
303static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
304{
305 pte->val = (pte->val & ~3) | (prot & 3);
306}
307
308static inline u64 dma_pte_addr(struct dma_pte *pte)
309{
c85994e4
DW
310#ifdef CONFIG_64BIT
311 return pte->val & VTD_PAGE_MASK;
312#else
313 /* Must have a full atomic 64-bit read */
1a8bd481 314 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
c85994e4 315#endif
19c239ce
MM
316}
317
dd4e8319 318static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
19c239ce 319{
dd4e8319 320 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
19c239ce
MM
321}
322
323static inline bool dma_pte_present(struct dma_pte *pte)
324{
325 return (pte->val & 3) != 0;
326}
622ba12a 327
4399c8bf
AK
328static inline bool dma_pte_superpage(struct dma_pte *pte)
329{
330 return (pte->val & (1 << 7));
331}
332
75e6bf96
DW
333static inline int first_pte_in_page(struct dma_pte *pte)
334{
335 return !((unsigned long)pte & ~VTD_PAGE_MASK);
336}
337
2c2e2c38
FY
338/*
339 * This domain is a statically identity mapping domain.
340 * 1. This domain creats a static 1:1 mapping to all usable memory.
341 * 2. It maps to each iommu if successful.
342 * 3. Each iommu mapps to this domain if successful.
343 */
19943b0e
DW
344static struct dmar_domain *si_domain;
345static int hw_pass_through = 1;
2c2e2c38 346
3b5410e7 347/* devices under the same p2p bridge are owned in one domain */
cdc7b837 348#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
3b5410e7 349
1ce28feb
WH
350/* domain represents a virtual machine, more than one devices
351 * across iommus may be owned in one domain, e.g. kvm guest.
352 */
353#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
354
2c2e2c38
FY
355/* si_domain contains mulitple devices */
356#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
357
1b198bb0
MT
358/* define the limit of IOMMUs supported in each domain */
359#ifdef CONFIG_X86
360# define IOMMU_UNITS_SUPPORTED MAX_IO_APICS
361#else
362# define IOMMU_UNITS_SUPPORTED 64
363#endif
364
99126f7c
MM
365struct dmar_domain {
366 int id; /* domain id */
4c923d47 367 int nid; /* node id */
1b198bb0
MT
368 DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
369 /* bitmap of iommus this domain uses*/
99126f7c
MM
370
371 struct list_head devices; /* all devices' list */
372 struct iova_domain iovad; /* iova's that belong to this domain */
373
374 struct dma_pte *pgd; /* virtual address */
99126f7c
MM
375 int gaw; /* max guest address width */
376
377 /* adjusted guest address width, 0 is level 2 30-bit */
378 int agaw;
379
3b5410e7 380 int flags; /* flags to find out type of domain */
8e604097
WH
381
382 int iommu_coherency;/* indicate coherency of iommu access */
58c610bd 383 int iommu_snooping; /* indicate snooping control feature*/
c7151a8d 384 int iommu_count; /* reference count of iommu */
6dd9a7c7
YS
385 int iommu_superpage;/* Level of superpages supported:
386 0 == 4KiB (no superpages), 1 == 2MiB,
387 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
c7151a8d 388 spinlock_t iommu_lock; /* protect iommu set in domain */
fe40f1e0 389 u64 max_addr; /* maximum mapped address */
99126f7c
MM
390};
391
a647dacb
MM
392/* PCI domain-device relationship */
393struct device_domain_info {
394 struct list_head link; /* link to domain siblings */
395 struct list_head global; /* link to global list */
276dbf99
DW
396 int segment; /* PCI domain */
397 u8 bus; /* PCI bus number */
a647dacb 398 u8 devfn; /* PCI devfn number */
45e829ea 399 struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
93a23a72 400 struct intel_iommu *iommu; /* IOMMU used by this device */
a647dacb
MM
401 struct dmar_domain *domain; /* pointer to domain */
402};
403
5e0d2a6f 404static void flush_unmaps_timeout(unsigned long data);
405
406DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
407
80b20dd8 408#define HIGH_WATER_MARK 250
409struct deferred_flush_tables {
410 int next;
411 struct iova *iova[HIGH_WATER_MARK];
412 struct dmar_domain *domain[HIGH_WATER_MARK];
413};
414
415static struct deferred_flush_tables *deferred_flush;
416
5e0d2a6f 417/* bitmap for indexing intel_iommus */
5e0d2a6f 418static int g_num_of_iommus;
419
420static DEFINE_SPINLOCK(async_umap_flush_lock);
421static LIST_HEAD(unmaps_to_do);
422
423static int timer_on;
424static long list_size;
5e0d2a6f 425
ba395927
KA
426static void domain_remove_dev_info(struct dmar_domain *domain);
427
d3f13810 428#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
0cd5c3c8
KM
429int dmar_disabled = 0;
430#else
431int dmar_disabled = 1;
d3f13810 432#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
0cd5c3c8 433
8bc1f85c
ED
434int intel_iommu_enabled = 0;
435EXPORT_SYMBOL_GPL(intel_iommu_enabled);
436
2d9e667e 437static int dmar_map_gfx = 1;
7d3b03ce 438static int dmar_forcedac;
5e0d2a6f 439static int intel_iommu_strict;
6dd9a7c7 440static int intel_iommu_superpage = 1;
ba395927 441
c0771df8
DW
442int intel_iommu_gfx_mapped;
443EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
444
ba395927
KA
445#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
446static DEFINE_SPINLOCK(device_domain_lock);
447static LIST_HEAD(device_domain_list);
448
a8bcbb0d
JR
449static struct iommu_ops intel_iommu_ops;
450
ba395927
KA
451static int __init intel_iommu_setup(char *str)
452{
453 if (!str)
454 return -EINVAL;
455 while (*str) {
0cd5c3c8
KM
456 if (!strncmp(str, "on", 2)) {
457 dmar_disabled = 0;
458 printk(KERN_INFO "Intel-IOMMU: enabled\n");
459 } else if (!strncmp(str, "off", 3)) {
ba395927 460 dmar_disabled = 1;
0cd5c3c8 461 printk(KERN_INFO "Intel-IOMMU: disabled\n");
ba395927
KA
462 } else if (!strncmp(str, "igfx_off", 8)) {
463 dmar_map_gfx = 0;
464 printk(KERN_INFO
465 "Intel-IOMMU: disable GFX device mapping\n");
7d3b03ce 466 } else if (!strncmp(str, "forcedac", 8)) {
5e0d2a6f 467 printk(KERN_INFO
7d3b03ce
KA
468 "Intel-IOMMU: Forcing DAC for PCI devices\n");
469 dmar_forcedac = 1;
5e0d2a6f 470 } else if (!strncmp(str, "strict", 6)) {
471 printk(KERN_INFO
472 "Intel-IOMMU: disable batched IOTLB flush\n");
473 intel_iommu_strict = 1;
6dd9a7c7
YS
474 } else if (!strncmp(str, "sp_off", 6)) {
475 printk(KERN_INFO
476 "Intel-IOMMU: disable supported super page\n");
477 intel_iommu_superpage = 0;
ba395927
KA
478 }
479
480 str += strcspn(str, ",");
481 while (*str == ',')
482 str++;
483 }
484 return 0;
485}
486__setup("intel_iommu=", intel_iommu_setup);
487
488static struct kmem_cache *iommu_domain_cache;
489static struct kmem_cache *iommu_devinfo_cache;
490static struct kmem_cache *iommu_iova_cache;
491
4c923d47 492static inline void *alloc_pgtable_page(int node)
eb3fa7cb 493{
4c923d47
SS
494 struct page *page;
495 void *vaddr = NULL;
eb3fa7cb 496
4c923d47
SS
497 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
498 if (page)
499 vaddr = page_address(page);
eb3fa7cb 500 return vaddr;
ba395927
KA
501}
502
503static inline void free_pgtable_page(void *vaddr)
504{
505 free_page((unsigned long)vaddr);
506}
507
508static inline void *alloc_domain_mem(void)
509{
354bb65e 510 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
ba395927
KA
511}
512
38717946 513static void free_domain_mem(void *vaddr)
ba395927
KA
514{
515 kmem_cache_free(iommu_domain_cache, vaddr);
516}
517
518static inline void * alloc_devinfo_mem(void)
519{
354bb65e 520 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
ba395927
KA
521}
522
523static inline void free_devinfo_mem(void *vaddr)
524{
525 kmem_cache_free(iommu_devinfo_cache, vaddr);
526}
527
528struct iova *alloc_iova_mem(void)
529{
354bb65e 530 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
ba395927
KA
531}
532
533void free_iova_mem(struct iova *iova)
534{
535 kmem_cache_free(iommu_iova_cache, iova);
536}
537
1b573683 538
4ed0d3e6 539static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
1b573683
WH
540{
541 unsigned long sagaw;
542 int agaw = -1;
543
544 sagaw = cap_sagaw(iommu->cap);
4ed0d3e6 545 for (agaw = width_to_agaw(max_gaw);
1b573683
WH
546 agaw >= 0; agaw--) {
547 if (test_bit(agaw, &sagaw))
548 break;
549 }
550
551 return agaw;
552}
553
4ed0d3e6
FY
554/*
555 * Calculate max SAGAW for each iommu.
556 */
557int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
558{
559 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
560}
561
562/*
563 * calculate agaw for each iommu.
564 * "SAGAW" may be different across iommus, use a default agaw, and
565 * get a supported less agaw for iommus that don't support the default agaw.
566 */
567int iommu_calculate_agaw(struct intel_iommu *iommu)
568{
569 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
570}
571
2c2e2c38 572/* This functionin only returns single iommu in a domain */
8c11e798
WH
573static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
574{
575 int iommu_id;
576
2c2e2c38 577 /* si_domain and vm domain should not get here. */
1ce28feb 578 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
2c2e2c38 579 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
1ce28feb 580
1b198bb0 581 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
8c11e798
WH
582 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
583 return NULL;
584
585 return g_iommus[iommu_id];
586}
587
8e604097
WH
588static void domain_update_iommu_coherency(struct dmar_domain *domain)
589{
590 int i;
591
592 domain->iommu_coherency = 1;
593
1b198bb0 594 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
8e604097
WH
595 if (!ecap_coherent(g_iommus[i]->ecap)) {
596 domain->iommu_coherency = 0;
597 break;
598 }
8e604097
WH
599 }
600}
601
58c610bd
SY
602static void domain_update_iommu_snooping(struct dmar_domain *domain)
603{
604 int i;
605
606 domain->iommu_snooping = 1;
607
1b198bb0 608 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
58c610bd
SY
609 if (!ecap_sc_support(g_iommus[i]->ecap)) {
610 domain->iommu_snooping = 0;
611 break;
612 }
58c610bd
SY
613 }
614}
615
6dd9a7c7
YS
616static void domain_update_iommu_superpage(struct dmar_domain *domain)
617{
8140a95d
AK
618 struct dmar_drhd_unit *drhd;
619 struct intel_iommu *iommu = NULL;
620 int mask = 0xf;
6dd9a7c7
YS
621
622 if (!intel_iommu_superpage) {
623 domain->iommu_superpage = 0;
624 return;
625 }
626
8140a95d
AK
627 /* set iommu_superpage to the smallest common denominator */
628 for_each_active_iommu(iommu, drhd) {
629 mask &= cap_super_page_val(iommu->cap);
6dd9a7c7
YS
630 if (!mask) {
631 break;
632 }
633 }
634 domain->iommu_superpage = fls(mask);
635}
636
58c610bd
SY
637/* Some capabilities may be different across iommus */
638static void domain_update_iommu_cap(struct dmar_domain *domain)
639{
640 domain_update_iommu_coherency(domain);
641 domain_update_iommu_snooping(domain);
6dd9a7c7 642 domain_update_iommu_superpage(domain);
58c610bd
SY
643}
644
276dbf99 645static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
c7151a8d
WH
646{
647 struct dmar_drhd_unit *drhd = NULL;
648 int i;
649
650 for_each_drhd_unit(drhd) {
651 if (drhd->ignored)
652 continue;
276dbf99
DW
653 if (segment != drhd->segment)
654 continue;
c7151a8d 655
924b6231 656 for (i = 0; i < drhd->devices_cnt; i++) {
288e4877
DH
657 if (drhd->devices[i] &&
658 drhd->devices[i]->bus->number == bus &&
c7151a8d
WH
659 drhd->devices[i]->devfn == devfn)
660 return drhd->iommu;
4958c5dc
DW
661 if (drhd->devices[i] &&
662 drhd->devices[i]->subordinate &&
924b6231
DW
663 drhd->devices[i]->subordinate->number <= bus &&
664 drhd->devices[i]->subordinate->subordinate >= bus)
665 return drhd->iommu;
666 }
c7151a8d
WH
667
668 if (drhd->include_all)
669 return drhd->iommu;
670 }
671
672 return NULL;
673}
674
5331fe6f
WH
675static void domain_flush_cache(struct dmar_domain *domain,
676 void *addr, int size)
677{
678 if (!domain->iommu_coherency)
679 clflush_cache_range(addr, size);
680}
681
ba395927
KA
682/* Gets context entry for a given bus and devfn */
683static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
684 u8 bus, u8 devfn)
685{
686 struct root_entry *root;
687 struct context_entry *context;
688 unsigned long phy_addr;
689 unsigned long flags;
690
691 spin_lock_irqsave(&iommu->lock, flags);
692 root = &iommu->root_entry[bus];
693 context = get_context_addr_from_root(root);
694 if (!context) {
4c923d47
SS
695 context = (struct context_entry *)
696 alloc_pgtable_page(iommu->node);
ba395927
KA
697 if (!context) {
698 spin_unlock_irqrestore(&iommu->lock, flags);
699 return NULL;
700 }
5b6985ce 701 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
ba395927
KA
702 phy_addr = virt_to_phys((void *)context);
703 set_root_value(root, phy_addr);
704 set_root_present(root);
705 __iommu_flush_cache(iommu, root, sizeof(*root));
706 }
707 spin_unlock_irqrestore(&iommu->lock, flags);
708 return &context[devfn];
709}
710
711static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
712{
713 struct root_entry *root;
714 struct context_entry *context;
715 int ret;
716 unsigned long flags;
717
718 spin_lock_irqsave(&iommu->lock, flags);
719 root = &iommu->root_entry[bus];
720 context = get_context_addr_from_root(root);
721 if (!context) {
722 ret = 0;
723 goto out;
724 }
c07e7d21 725 ret = context_present(&context[devfn]);
ba395927
KA
726out:
727 spin_unlock_irqrestore(&iommu->lock, flags);
728 return ret;
729}
730
731static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
732{
733 struct root_entry *root;
734 struct context_entry *context;
735 unsigned long flags;
736
737 spin_lock_irqsave(&iommu->lock, flags);
738 root = &iommu->root_entry[bus];
739 context = get_context_addr_from_root(root);
740 if (context) {
c07e7d21 741 context_clear_entry(&context[devfn]);
ba395927
KA
742 __iommu_flush_cache(iommu, &context[devfn], \
743 sizeof(*context));
744 }
745 spin_unlock_irqrestore(&iommu->lock, flags);
746}
747
748static void free_context_table(struct intel_iommu *iommu)
749{
750 struct root_entry *root;
751 int i;
752 unsigned long flags;
753 struct context_entry *context;
754
755 spin_lock_irqsave(&iommu->lock, flags);
756 if (!iommu->root_entry) {
757 goto out;
758 }
759 for (i = 0; i < ROOT_ENTRY_NR; i++) {
760 root = &iommu->root_entry[i];
761 context = get_context_addr_from_root(root);
762 if (context)
763 free_pgtable_page(context);
764 }
765 free_pgtable_page(iommu->root_entry);
766 iommu->root_entry = NULL;
767out:
768 spin_unlock_irqrestore(&iommu->lock, flags);
769}
770
b026fd28 771static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
4399c8bf 772 unsigned long pfn, int target_level)
ba395927 773{
b026fd28 774 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
ba395927
KA
775 struct dma_pte *parent, *pte = NULL;
776 int level = agaw_to_level(domain->agaw);
4399c8bf 777 int offset;
ba395927
KA
778
779 BUG_ON(!domain->pgd);
b026fd28 780 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
ba395927
KA
781 parent = domain->pgd;
782
ba395927
KA
783 while (level > 0) {
784 void *tmp_page;
785
b026fd28 786 offset = pfn_level_offset(pfn, level);
ba395927 787 pte = &parent[offset];
4399c8bf 788 if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
6dd9a7c7
YS
789 break;
790 if (level == target_level)
ba395927
KA
791 break;
792
19c239ce 793 if (!dma_pte_present(pte)) {
c85994e4
DW
794 uint64_t pteval;
795
4c923d47 796 tmp_page = alloc_pgtable_page(domain->nid);
ba395927 797
206a73c1 798 if (!tmp_page)
ba395927 799 return NULL;
206a73c1 800
c85994e4 801 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
64de5af0 802 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
c85994e4
DW
803 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
804 /* Someone else set it while we were thinking; use theirs. */
805 free_pgtable_page(tmp_page);
806 } else {
807 dma_pte_addr(pte);
808 domain_flush_cache(domain, pte, sizeof(*pte));
809 }
ba395927 810 }
19c239ce 811 parent = phys_to_virt(dma_pte_addr(pte));
ba395927
KA
812 level--;
813 }
814
ba395927
KA
815 return pte;
816}
817
6dd9a7c7 818
ba395927 819/* return address's pte at specific level */
90dcfb5e
DW
820static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
821 unsigned long pfn,
6dd9a7c7 822 int level, int *large_page)
ba395927
KA
823{
824 struct dma_pte *parent, *pte = NULL;
825 int total = agaw_to_level(domain->agaw);
826 int offset;
827
828 parent = domain->pgd;
829 while (level <= total) {
90dcfb5e 830 offset = pfn_level_offset(pfn, total);
ba395927
KA
831 pte = &parent[offset];
832 if (level == total)
833 return pte;
834
6dd9a7c7
YS
835 if (!dma_pte_present(pte)) {
836 *large_page = total;
ba395927 837 break;
6dd9a7c7
YS
838 }
839
840 if (pte->val & DMA_PTE_LARGE_PAGE) {
841 *large_page = total;
842 return pte;
843 }
844
19c239ce 845 parent = phys_to_virt(dma_pte_addr(pte));
ba395927
KA
846 total--;
847 }
848 return NULL;
849}
850
ba395927 851/* clear last level pte, a tlb flush should be followed */
292827cb 852static int dma_pte_clear_range(struct dmar_domain *domain,
595badf5
DW
853 unsigned long start_pfn,
854 unsigned long last_pfn)
ba395927 855{
04b18e65 856 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
6dd9a7c7 857 unsigned int large_page = 1;
310a5ab9 858 struct dma_pte *first_pte, *pte;
292827cb 859 int order;
66eae846 860
04b18e65 861 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
595badf5 862 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
59c36286 863 BUG_ON(start_pfn > last_pfn);
ba395927 864
04b18e65 865 /* we don't need lock here; nobody else touches the iova range */
59c36286 866 do {
6dd9a7c7
YS
867 large_page = 1;
868 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
310a5ab9 869 if (!pte) {
6dd9a7c7 870 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
310a5ab9
DW
871 continue;
872 }
6dd9a7c7 873 do {
310a5ab9 874 dma_clear_pte(pte);
6dd9a7c7 875 start_pfn += lvl_to_nr_pages(large_page);
310a5ab9 876 pte++;
75e6bf96
DW
877 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
878
310a5ab9
DW
879 domain_flush_cache(domain, first_pte,
880 (void *)pte - (void *)first_pte);
59c36286
DW
881
882 } while (start_pfn && start_pfn <= last_pfn);
292827cb
AK
883
884 order = (large_page - 1) * 9;
885 return order;
ba395927
KA
886}
887
888/* free page table pages. last level pte should already be cleared */
889static void dma_pte_free_pagetable(struct dmar_domain *domain,
d794dc9b
DW
890 unsigned long start_pfn,
891 unsigned long last_pfn)
ba395927 892{
6660c63a 893 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
f3a0a52f 894 struct dma_pte *first_pte, *pte;
ba395927
KA
895 int total = agaw_to_level(domain->agaw);
896 int level;
6660c63a 897 unsigned long tmp;
6dd9a7c7 898 int large_page = 2;
ba395927 899
6660c63a
DW
900 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
901 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
59c36286 902 BUG_ON(start_pfn > last_pfn);
ba395927 903
f3a0a52f 904 /* We don't need lock here; nobody else touches the iova range */
ba395927
KA
905 level = 2;
906 while (level <= total) {
6660c63a
DW
907 tmp = align_to_level(start_pfn, level);
908
f3a0a52f 909 /* If we can't even clear one PTE at this level, we're done */
6660c63a 910 if (tmp + level_size(level) - 1 > last_pfn)
ba395927
KA
911 return;
912
59c36286 913 do {
6dd9a7c7
YS
914 large_page = level;
915 first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
916 if (large_page > level)
917 level = large_page + 1;
f3a0a52f
DW
918 if (!pte) {
919 tmp = align_to_level(tmp + 1, level + 1);
920 continue;
921 }
75e6bf96 922 do {
6a43e574
DW
923 if (dma_pte_present(pte)) {
924 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
925 dma_clear_pte(pte);
926 }
f3a0a52f
DW
927 pte++;
928 tmp += level_size(level);
75e6bf96
DW
929 } while (!first_pte_in_page(pte) &&
930 tmp + level_size(level) - 1 <= last_pfn);
931
f3a0a52f
DW
932 domain_flush_cache(domain, first_pte,
933 (void *)pte - (void *)first_pte);
934
59c36286 935 } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
ba395927
KA
936 level++;
937 }
938 /* free pgd */
d794dc9b 939 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
ba395927
KA
940 free_pgtable_page(domain->pgd);
941 domain->pgd = NULL;
942 }
943}
944
945/* iommu handling */
946static int iommu_alloc_root_entry(struct intel_iommu *iommu)
947{
948 struct root_entry *root;
949 unsigned long flags;
950
4c923d47 951 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
ba395927
KA
952 if (!root)
953 return -ENOMEM;
954
5b6985ce 955 __iommu_flush_cache(iommu, root, ROOT_SIZE);
ba395927
KA
956
957 spin_lock_irqsave(&iommu->lock, flags);
958 iommu->root_entry = root;
959 spin_unlock_irqrestore(&iommu->lock, flags);
960
961 return 0;
962}
963
ba395927
KA
964static void iommu_set_root_entry(struct intel_iommu *iommu)
965{
966 void *addr;
c416daa9 967 u32 sts;
ba395927
KA
968 unsigned long flag;
969
970 addr = iommu->root_entry;
971
1f5b3c3f 972 raw_spin_lock_irqsave(&iommu->register_lock, flag);
ba395927
KA
973 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
974
c416daa9 975 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
ba395927
KA
976
977 /* Make sure hardware complete it */
978 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
c416daa9 979 readl, (sts & DMA_GSTS_RTPS), sts);
ba395927 980
1f5b3c3f 981 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
ba395927
KA
982}
983
984static void iommu_flush_write_buffer(struct intel_iommu *iommu)
985{
986 u32 val;
987 unsigned long flag;
988
9af88143 989 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
ba395927 990 return;
ba395927 991
1f5b3c3f 992 raw_spin_lock_irqsave(&iommu->register_lock, flag);
462b60f6 993 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
ba395927
KA
994
995 /* Make sure hardware complete it */
996 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
c416daa9 997 readl, (!(val & DMA_GSTS_WBFS)), val);
ba395927 998
1f5b3c3f 999 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
ba395927
KA
1000}
1001
1002/* return value determine if we need a write buffer flush */
4c25a2c1
DW
1003static void __iommu_flush_context(struct intel_iommu *iommu,
1004 u16 did, u16 source_id, u8 function_mask,
1005 u64 type)
ba395927
KA
1006{
1007 u64 val = 0;
1008 unsigned long flag;
1009
ba395927
KA
1010 switch (type) {
1011 case DMA_CCMD_GLOBAL_INVL:
1012 val = DMA_CCMD_GLOBAL_INVL;
1013 break;
1014 case DMA_CCMD_DOMAIN_INVL:
1015 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1016 break;
1017 case DMA_CCMD_DEVICE_INVL:
1018 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1019 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1020 break;
1021 default:
1022 BUG();
1023 }
1024 val |= DMA_CCMD_ICC;
1025
1f5b3c3f 1026 raw_spin_lock_irqsave(&iommu->register_lock, flag);
ba395927
KA
1027 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1028
1029 /* Make sure hardware complete it */
1030 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1031 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1032
1f5b3c3f 1033 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
ba395927
KA
1034}
1035
ba395927 1036/* return value determine if we need a write buffer flush */
1f0ef2aa
DW
1037static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1038 u64 addr, unsigned int size_order, u64 type)
ba395927
KA
1039{
1040 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1041 u64 val = 0, val_iva = 0;
1042 unsigned long flag;
1043
ba395927
KA
1044 switch (type) {
1045 case DMA_TLB_GLOBAL_FLUSH:
1046 /* global flush doesn't need set IVA_REG */
1047 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1048 break;
1049 case DMA_TLB_DSI_FLUSH:
1050 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1051 break;
1052 case DMA_TLB_PSI_FLUSH:
1053 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1054 /* Note: always flush non-leaf currently */
1055 val_iva = size_order | addr;
1056 break;
1057 default:
1058 BUG();
1059 }
1060 /* Note: set drain read/write */
1061#if 0
1062 /*
1063 * This is probably to be super secure.. Looks like we can
1064 * ignore it without any impact.
1065 */
1066 if (cap_read_drain(iommu->cap))
1067 val |= DMA_TLB_READ_DRAIN;
1068#endif
1069 if (cap_write_drain(iommu->cap))
1070 val |= DMA_TLB_WRITE_DRAIN;
1071
1f5b3c3f 1072 raw_spin_lock_irqsave(&iommu->register_lock, flag);
ba395927
KA
1073 /* Note: Only uses first TLB reg currently */
1074 if (val_iva)
1075 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1076 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1077
1078 /* Make sure hardware complete it */
1079 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1080 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1081
1f5b3c3f 1082 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
ba395927
KA
1083
1084 /* check IOTLB invalidation granularity */
1085 if (DMA_TLB_IAIG(val) == 0)
1086 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1087 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1088 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
5b6985ce
FY
1089 (unsigned long long)DMA_TLB_IIRG(type),
1090 (unsigned long long)DMA_TLB_IAIG(val));
ba395927
KA
1091}
1092
93a23a72
YZ
1093static struct device_domain_info *iommu_support_dev_iotlb(
1094 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
1095{
1096 int found = 0;
1097 unsigned long flags;
1098 struct device_domain_info *info;
1099 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1100
1101 if (!ecap_dev_iotlb_support(iommu->ecap))
1102 return NULL;
1103
1104 if (!iommu->qi)
1105 return NULL;
1106
1107 spin_lock_irqsave(&device_domain_lock, flags);
1108 list_for_each_entry(info, &domain->devices, link)
1109 if (info->bus == bus && info->devfn == devfn) {
1110 found = 1;
1111 break;
1112 }
1113 spin_unlock_irqrestore(&device_domain_lock, flags);
1114
1115 if (!found || !info->dev)
1116 return NULL;
1117
1118 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1119 return NULL;
1120
1121 if (!dmar_find_matched_atsr_unit(info->dev))
1122 return NULL;
1123
1124 info->iommu = iommu;
1125
1126 return info;
1127}
1128
1129static void iommu_enable_dev_iotlb(struct device_domain_info *info)
ba395927 1130{
93a23a72
YZ
1131 if (!info)
1132 return;
1133
1134 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1135}
1136
1137static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1138{
1139 if (!info->dev || !pci_ats_enabled(info->dev))
1140 return;
1141
1142 pci_disable_ats(info->dev);
1143}
1144
1145static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1146 u64 addr, unsigned mask)
1147{
1148 u16 sid, qdep;
1149 unsigned long flags;
1150 struct device_domain_info *info;
1151
1152 spin_lock_irqsave(&device_domain_lock, flags);
1153 list_for_each_entry(info, &domain->devices, link) {
1154 if (!info->dev || !pci_ats_enabled(info->dev))
1155 continue;
1156
1157 sid = info->bus << 8 | info->devfn;
1158 qdep = pci_ats_queue_depth(info->dev);
1159 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1160 }
1161 spin_unlock_irqrestore(&device_domain_lock, flags);
1162}
1163
1f0ef2aa 1164static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
82653633 1165 unsigned long pfn, unsigned int pages, int map)
ba395927 1166{
9dd2fe89 1167 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
03d6a246 1168 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
ba395927 1169
ba395927
KA
1170 BUG_ON(pages == 0);
1171
ba395927 1172 /*
9dd2fe89
YZ
1173 * Fallback to domain selective flush if no PSI support or the size is
1174 * too big.
ba395927
KA
1175 * PSI requires page size to be 2 ^ x, and the base address is naturally
1176 * aligned to the size
1177 */
9dd2fe89
YZ
1178 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1179 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1f0ef2aa 1180 DMA_TLB_DSI_FLUSH);
9dd2fe89
YZ
1181 else
1182 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1183 DMA_TLB_PSI_FLUSH);
bf92df30
YZ
1184
1185 /*
82653633
NA
1186 * In caching mode, changes of pages from non-present to present require
1187 * flush. However, device IOTLB doesn't need to be flushed in this case.
bf92df30 1188 */
82653633 1189 if (!cap_caching_mode(iommu->cap) || !map)
93a23a72 1190 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
ba395927
KA
1191}
1192
f8bab735 1193static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1194{
1195 u32 pmen;
1196 unsigned long flags;
1197
1f5b3c3f 1198 raw_spin_lock_irqsave(&iommu->register_lock, flags);
f8bab735 1199 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1200 pmen &= ~DMA_PMEN_EPM;
1201 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1202
1203 /* wait for the protected region status bit to clear */
1204 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1205 readl, !(pmen & DMA_PMEN_PRS), pmen);
1206
1f5b3c3f 1207 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
f8bab735 1208}
1209
ba395927
KA
1210static int iommu_enable_translation(struct intel_iommu *iommu)
1211{
1212 u32 sts;
1213 unsigned long flags;
1214
1f5b3c3f 1215 raw_spin_lock_irqsave(&iommu->register_lock, flags);
c416daa9
DW
1216 iommu->gcmd |= DMA_GCMD_TE;
1217 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
ba395927
KA
1218
1219 /* Make sure hardware complete it */
1220 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
c416daa9 1221 readl, (sts & DMA_GSTS_TES), sts);
ba395927 1222
1f5b3c3f 1223 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
ba395927
KA
1224 return 0;
1225}
1226
1227static int iommu_disable_translation(struct intel_iommu *iommu)
1228{
1229 u32 sts;
1230 unsigned long flag;
1231
1f5b3c3f 1232 raw_spin_lock_irqsave(&iommu->register_lock, flag);
ba395927
KA
1233 iommu->gcmd &= ~DMA_GCMD_TE;
1234 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1235
1236 /* Make sure hardware complete it */
1237 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
c416daa9 1238 readl, (!(sts & DMA_GSTS_TES)), sts);
ba395927 1239
1f5b3c3f 1240 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
ba395927
KA
1241 return 0;
1242}
1243
3460a6d9 1244
ba395927
KA
1245static int iommu_init_domains(struct intel_iommu *iommu)
1246{
1247 unsigned long ndomains;
1248 unsigned long nlongs;
1249
1250 ndomains = cap_ndoms(iommu->cap);
68aeb968 1251 pr_debug("IOMMU %d: Number of Domains supported <%ld>\n", iommu->seq_id,
680a7524 1252 ndomains);
ba395927
KA
1253 nlongs = BITS_TO_LONGS(ndomains);
1254
94a91b50
DD
1255 spin_lock_init(&iommu->lock);
1256
ba395927
KA
1257 /* TBD: there might be 64K domains,
1258 * consider other allocation for future chip
1259 */
1260 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1261 if (!iommu->domain_ids) {
1262 printk(KERN_ERR "Allocating domain id array failed\n");
1263 return -ENOMEM;
1264 }
1265 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1266 GFP_KERNEL);
1267 if (!iommu->domains) {
1268 printk(KERN_ERR "Allocating domain array failed\n");
ba395927
KA
1269 return -ENOMEM;
1270 }
1271
1272 /*
1273 * if Caching mode is set, then invalid translations are tagged
1274 * with domainid 0. Hence we need to pre-allocate it.
1275 */
1276 if (cap_caching_mode(iommu->cap))
1277 set_bit(0, iommu->domain_ids);
1278 return 0;
1279}
ba395927 1280
ba395927
KA
1281
1282static void domain_exit(struct dmar_domain *domain);
5e98c4b1 1283static void vm_domain_exit(struct dmar_domain *domain);
e61d98d8
SS
1284
1285void free_dmar_iommu(struct intel_iommu *iommu)
ba395927
KA
1286{
1287 struct dmar_domain *domain;
1288 int i;
c7151a8d 1289 unsigned long flags;
ba395927 1290
94a91b50 1291 if ((iommu->domains) && (iommu->domain_ids)) {
a45946ab 1292 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
94a91b50
DD
1293 domain = iommu->domains[i];
1294 clear_bit(i, iommu->domain_ids);
1295
1296 spin_lock_irqsave(&domain->iommu_lock, flags);
1297 if (--domain->iommu_count == 0) {
1298 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1299 vm_domain_exit(domain);
1300 else
1301 domain_exit(domain);
1302 }
1303 spin_unlock_irqrestore(&domain->iommu_lock, flags);
5e98c4b1 1304 }
ba395927
KA
1305 }
1306
1307 if (iommu->gcmd & DMA_GCMD_TE)
1308 iommu_disable_translation(iommu);
1309
1310 if (iommu->irq) {
dced35ae 1311 irq_set_handler_data(iommu->irq, NULL);
ba395927
KA
1312 /* This will mask the irq */
1313 free_irq(iommu->irq, iommu);
1314 destroy_irq(iommu->irq);
1315 }
1316
1317 kfree(iommu->domains);
1318 kfree(iommu->domain_ids);
1319
d9630fe9
WH
1320 g_iommus[iommu->seq_id] = NULL;
1321
1322 /* if all iommus are freed, free g_iommus */
1323 for (i = 0; i < g_num_of_iommus; i++) {
1324 if (g_iommus[i])
1325 break;
1326 }
1327
1328 if (i == g_num_of_iommus)
1329 kfree(g_iommus);
1330
ba395927
KA
1331 /* free context mapping */
1332 free_context_table(iommu);
ba395927
KA
1333}
1334
2c2e2c38 1335static struct dmar_domain *alloc_domain(void)
ba395927 1336{
ba395927 1337 struct dmar_domain *domain;
ba395927
KA
1338
1339 domain = alloc_domain_mem();
1340 if (!domain)
1341 return NULL;
1342
4c923d47 1343 domain->nid = -1;
1b198bb0 1344 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
2c2e2c38
FY
1345 domain->flags = 0;
1346
1347 return domain;
1348}
1349
1350static int iommu_attach_domain(struct dmar_domain *domain,
1351 struct intel_iommu *iommu)
1352{
1353 int num;
1354 unsigned long ndomains;
1355 unsigned long flags;
1356
ba395927
KA
1357 ndomains = cap_ndoms(iommu->cap);
1358
1359 spin_lock_irqsave(&iommu->lock, flags);
2c2e2c38 1360
ba395927
KA
1361 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1362 if (num >= ndomains) {
1363 spin_unlock_irqrestore(&iommu->lock, flags);
ba395927 1364 printk(KERN_ERR "IOMMU: no free domain ids\n");
2c2e2c38 1365 return -ENOMEM;
ba395927
KA
1366 }
1367
ba395927 1368 domain->id = num;
2c2e2c38 1369 set_bit(num, iommu->domain_ids);
1b198bb0 1370 set_bit(iommu->seq_id, domain->iommu_bmp);
ba395927
KA
1371 iommu->domains[num] = domain;
1372 spin_unlock_irqrestore(&iommu->lock, flags);
1373
2c2e2c38 1374 return 0;
ba395927
KA
1375}
1376
2c2e2c38
FY
1377static void iommu_detach_domain(struct dmar_domain *domain,
1378 struct intel_iommu *iommu)
ba395927
KA
1379{
1380 unsigned long flags;
2c2e2c38
FY
1381 int num, ndomains;
1382 int found = 0;
ba395927 1383
8c11e798 1384 spin_lock_irqsave(&iommu->lock, flags);
2c2e2c38 1385 ndomains = cap_ndoms(iommu->cap);
a45946ab 1386 for_each_set_bit(num, iommu->domain_ids, ndomains) {
2c2e2c38
FY
1387 if (iommu->domains[num] == domain) {
1388 found = 1;
1389 break;
1390 }
2c2e2c38
FY
1391 }
1392
1393 if (found) {
1394 clear_bit(num, iommu->domain_ids);
1b198bb0 1395 clear_bit(iommu->seq_id, domain->iommu_bmp);
2c2e2c38
FY
1396 iommu->domains[num] = NULL;
1397 }
8c11e798 1398 spin_unlock_irqrestore(&iommu->lock, flags);
ba395927
KA
1399}
1400
1401static struct iova_domain reserved_iova_list;
8a443df4 1402static struct lock_class_key reserved_rbtree_key;
ba395927 1403
51a63e67 1404static int dmar_init_reserved_ranges(void)
ba395927
KA
1405{
1406 struct pci_dev *pdev = NULL;
1407 struct iova *iova;
1408 int i;
ba395927 1409
f661197e 1410 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
ba395927 1411
8a443df4
MG
1412 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1413 &reserved_rbtree_key);
1414
ba395927
KA
1415 /* IOAPIC ranges shouldn't be accessed by DMA */
1416 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1417 IOVA_PFN(IOAPIC_RANGE_END));
51a63e67 1418 if (!iova) {
ba395927 1419 printk(KERN_ERR "Reserve IOAPIC range failed\n");
51a63e67
JC
1420 return -ENODEV;
1421 }
ba395927
KA
1422
1423 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1424 for_each_pci_dev(pdev) {
1425 struct resource *r;
1426
1427 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1428 r = &pdev->resource[i];
1429 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1430 continue;
1a4a4551
DW
1431 iova = reserve_iova(&reserved_iova_list,
1432 IOVA_PFN(r->start),
1433 IOVA_PFN(r->end));
51a63e67 1434 if (!iova) {
ba395927 1435 printk(KERN_ERR "Reserve iova failed\n");
51a63e67
JC
1436 return -ENODEV;
1437 }
ba395927
KA
1438 }
1439 }
51a63e67 1440 return 0;
ba395927
KA
1441}
1442
1443static void domain_reserve_special_ranges(struct dmar_domain *domain)
1444{
1445 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1446}
1447
1448static inline int guestwidth_to_adjustwidth(int gaw)
1449{
1450 int agaw;
1451 int r = (gaw - 12) % 9;
1452
1453 if (r == 0)
1454 agaw = gaw;
1455 else
1456 agaw = gaw + 9 - r;
1457 if (agaw > 64)
1458 agaw = 64;
1459 return agaw;
1460}
1461
1462static int domain_init(struct dmar_domain *domain, int guest_width)
1463{
1464 struct intel_iommu *iommu;
1465 int adjust_width, agaw;
1466 unsigned long sagaw;
1467
f661197e 1468 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
c7151a8d 1469 spin_lock_init(&domain->iommu_lock);
ba395927
KA
1470
1471 domain_reserve_special_ranges(domain);
1472
1473 /* calculate AGAW */
8c11e798 1474 iommu = domain_get_iommu(domain);
ba395927
KA
1475 if (guest_width > cap_mgaw(iommu->cap))
1476 guest_width = cap_mgaw(iommu->cap);
1477 domain->gaw = guest_width;
1478 adjust_width = guestwidth_to_adjustwidth(guest_width);
1479 agaw = width_to_agaw(adjust_width);
1480 sagaw = cap_sagaw(iommu->cap);
1481 if (!test_bit(agaw, &sagaw)) {
1482 /* hardware doesn't support it, choose a bigger one */
1483 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1484 agaw = find_next_bit(&sagaw, 5, agaw);
1485 if (agaw >= 5)
1486 return -ENODEV;
1487 }
1488 domain->agaw = agaw;
1489 INIT_LIST_HEAD(&domain->devices);
1490
8e604097
WH
1491 if (ecap_coherent(iommu->ecap))
1492 domain->iommu_coherency = 1;
1493 else
1494 domain->iommu_coherency = 0;
1495
58c610bd
SY
1496 if (ecap_sc_support(iommu->ecap))
1497 domain->iommu_snooping = 1;
1498 else
1499 domain->iommu_snooping = 0;
1500
6dd9a7c7 1501 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
c7151a8d 1502 domain->iommu_count = 1;
4c923d47 1503 domain->nid = iommu->node;
c7151a8d 1504
ba395927 1505 /* always allocate the top pgd */
4c923d47 1506 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
ba395927
KA
1507 if (!domain->pgd)
1508 return -ENOMEM;
5b6985ce 1509 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
ba395927
KA
1510 return 0;
1511}
1512
1513static void domain_exit(struct dmar_domain *domain)
1514{
2c2e2c38
FY
1515 struct dmar_drhd_unit *drhd;
1516 struct intel_iommu *iommu;
ba395927
KA
1517
1518 /* Domain 0 is reserved, so dont process it */
1519 if (!domain)
1520 return;
1521
7b668357
AW
1522 /* Flush any lazy unmaps that may reference this domain */
1523 if (!intel_iommu_strict)
1524 flush_unmaps_timeout(0);
1525
ba395927
KA
1526 domain_remove_dev_info(domain);
1527 /* destroy iovas */
1528 put_iova_domain(&domain->iovad);
ba395927
KA
1529
1530 /* clear ptes */
595badf5 1531 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
ba395927
KA
1532
1533 /* free page tables */
d794dc9b 1534 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
ba395927 1535
2c2e2c38 1536 for_each_active_iommu(iommu, drhd)
1b198bb0 1537 if (test_bit(iommu->seq_id, domain->iommu_bmp))
2c2e2c38
FY
1538 iommu_detach_domain(domain, iommu);
1539
ba395927
KA
1540 free_domain_mem(domain);
1541}
1542
4ed0d3e6
FY
1543static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1544 u8 bus, u8 devfn, int translation)
ba395927
KA
1545{
1546 struct context_entry *context;
ba395927 1547 unsigned long flags;
5331fe6f 1548 struct intel_iommu *iommu;
ea6606b0
WH
1549 struct dma_pte *pgd;
1550 unsigned long num;
1551 unsigned long ndomains;
1552 int id;
1553 int agaw;
93a23a72 1554 struct device_domain_info *info = NULL;
ba395927
KA
1555
1556 pr_debug("Set context mapping for %02x:%02x.%d\n",
1557 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
4ed0d3e6 1558
ba395927 1559 BUG_ON(!domain->pgd);
4ed0d3e6
FY
1560 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1561 translation != CONTEXT_TT_MULTI_LEVEL);
5331fe6f 1562
276dbf99 1563 iommu = device_to_iommu(segment, bus, devfn);
5331fe6f
WH
1564 if (!iommu)
1565 return -ENODEV;
1566
ba395927
KA
1567 context = device_to_context_entry(iommu, bus, devfn);
1568 if (!context)
1569 return -ENOMEM;
1570 spin_lock_irqsave(&iommu->lock, flags);
c07e7d21 1571 if (context_present(context)) {
ba395927
KA
1572 spin_unlock_irqrestore(&iommu->lock, flags);
1573 return 0;
1574 }
1575
ea6606b0
WH
1576 id = domain->id;
1577 pgd = domain->pgd;
1578
2c2e2c38
FY
1579 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1580 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
ea6606b0
WH
1581 int found = 0;
1582
1583 /* find an available domain id for this device in iommu */
1584 ndomains = cap_ndoms(iommu->cap);
a45946ab 1585 for_each_set_bit(num, iommu->domain_ids, ndomains) {
ea6606b0
WH
1586 if (iommu->domains[num] == domain) {
1587 id = num;
1588 found = 1;
1589 break;
1590 }
ea6606b0
WH
1591 }
1592
1593 if (found == 0) {
1594 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1595 if (num >= ndomains) {
1596 spin_unlock_irqrestore(&iommu->lock, flags);
1597 printk(KERN_ERR "IOMMU: no free domain ids\n");
1598 return -EFAULT;
1599 }
1600
1601 set_bit(num, iommu->domain_ids);
1602 iommu->domains[num] = domain;
1603 id = num;
1604 }
1605
1606 /* Skip top levels of page tables for
1607 * iommu which has less agaw than default.
1672af11 1608 * Unnecessary for PT mode.
ea6606b0 1609 */
1672af11
CW
1610 if (translation != CONTEXT_TT_PASS_THROUGH) {
1611 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1612 pgd = phys_to_virt(dma_pte_addr(pgd));
1613 if (!dma_pte_present(pgd)) {
1614 spin_unlock_irqrestore(&iommu->lock, flags);
1615 return -ENOMEM;
1616 }
ea6606b0
WH
1617 }
1618 }
1619 }
1620
1621 context_set_domain_id(context, id);
4ed0d3e6 1622
93a23a72
YZ
1623 if (translation != CONTEXT_TT_PASS_THROUGH) {
1624 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1625 translation = info ? CONTEXT_TT_DEV_IOTLB :
1626 CONTEXT_TT_MULTI_LEVEL;
1627 }
4ed0d3e6
FY
1628 /*
1629 * In pass through mode, AW must be programmed to indicate the largest
1630 * AGAW value supported by hardware. And ASR is ignored by hardware.
1631 */
93a23a72 1632 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
4ed0d3e6 1633 context_set_address_width(context, iommu->msagaw);
93a23a72
YZ
1634 else {
1635 context_set_address_root(context, virt_to_phys(pgd));
1636 context_set_address_width(context, iommu->agaw);
1637 }
4ed0d3e6
FY
1638
1639 context_set_translation_type(context, translation);
c07e7d21
MM
1640 context_set_fault_enable(context);
1641 context_set_present(context);
5331fe6f 1642 domain_flush_cache(domain, context, sizeof(*context));
ba395927 1643
4c25a2c1
DW
1644 /*
1645 * It's a non-present to present mapping. If hardware doesn't cache
1646 * non-present entry we only need to flush the write-buffer. If the
1647 * _does_ cache non-present entries, then it does so in the special
1648 * domain #0, which we have to flush:
1649 */
1650 if (cap_caching_mode(iommu->cap)) {
1651 iommu->flush.flush_context(iommu, 0,
1652 (((u16)bus) << 8) | devfn,
1653 DMA_CCMD_MASK_NOBIT,
1654 DMA_CCMD_DEVICE_INVL);
82653633 1655 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
4c25a2c1 1656 } else {
ba395927 1657 iommu_flush_write_buffer(iommu);
4c25a2c1 1658 }
93a23a72 1659 iommu_enable_dev_iotlb(info);
ba395927 1660 spin_unlock_irqrestore(&iommu->lock, flags);
c7151a8d
WH
1661
1662 spin_lock_irqsave(&domain->iommu_lock, flags);
1b198bb0 1663 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
c7151a8d 1664 domain->iommu_count++;
4c923d47
SS
1665 if (domain->iommu_count == 1)
1666 domain->nid = iommu->node;
58c610bd 1667 domain_update_iommu_cap(domain);
c7151a8d
WH
1668 }
1669 spin_unlock_irqrestore(&domain->iommu_lock, flags);
ba395927
KA
1670 return 0;
1671}
1672
1673static int
4ed0d3e6
FY
1674domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1675 int translation)
ba395927
KA
1676{
1677 int ret;
1678 struct pci_dev *tmp, *parent;
1679
276dbf99 1680 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
4ed0d3e6
FY
1681 pdev->bus->number, pdev->devfn,
1682 translation);
ba395927
KA
1683 if (ret)
1684 return ret;
1685
1686 /* dependent device mapping */
1687 tmp = pci_find_upstream_pcie_bridge(pdev);
1688 if (!tmp)
1689 return 0;
1690 /* Secondary interface's bus number and devfn 0 */
1691 parent = pdev->bus->self;
1692 while (parent != tmp) {
276dbf99
DW
1693 ret = domain_context_mapping_one(domain,
1694 pci_domain_nr(parent->bus),
1695 parent->bus->number,
4ed0d3e6 1696 parent->devfn, translation);
ba395927
KA
1697 if (ret)
1698 return ret;
1699 parent = parent->bus->self;
1700 }
45e829ea 1701 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
ba395927 1702 return domain_context_mapping_one(domain,
276dbf99 1703 pci_domain_nr(tmp->subordinate),
4ed0d3e6
FY
1704 tmp->subordinate->number, 0,
1705 translation);
ba395927
KA
1706 else /* this is a legacy PCI bridge */
1707 return domain_context_mapping_one(domain,
276dbf99
DW
1708 pci_domain_nr(tmp->bus),
1709 tmp->bus->number,
4ed0d3e6
FY
1710 tmp->devfn,
1711 translation);
ba395927
KA
1712}
1713
5331fe6f 1714static int domain_context_mapped(struct pci_dev *pdev)
ba395927
KA
1715{
1716 int ret;
1717 struct pci_dev *tmp, *parent;
5331fe6f
WH
1718 struct intel_iommu *iommu;
1719
276dbf99
DW
1720 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1721 pdev->devfn);
5331fe6f
WH
1722 if (!iommu)
1723 return -ENODEV;
ba395927 1724
276dbf99 1725 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
ba395927
KA
1726 if (!ret)
1727 return ret;
1728 /* dependent device mapping */
1729 tmp = pci_find_upstream_pcie_bridge(pdev);
1730 if (!tmp)
1731 return ret;
1732 /* Secondary interface's bus number and devfn 0 */
1733 parent = pdev->bus->self;
1734 while (parent != tmp) {
8c11e798 1735 ret = device_context_mapped(iommu, parent->bus->number,
276dbf99 1736 parent->devfn);
ba395927
KA
1737 if (!ret)
1738 return ret;
1739 parent = parent->bus->self;
1740 }
5f4d91a1 1741 if (pci_is_pcie(tmp))
276dbf99
DW
1742 return device_context_mapped(iommu, tmp->subordinate->number,
1743 0);
ba395927 1744 else
276dbf99
DW
1745 return device_context_mapped(iommu, tmp->bus->number,
1746 tmp->devfn);
ba395927
KA
1747}
1748
f532959b
FY
1749/* Returns a number of VTD pages, but aligned to MM page size */
1750static inline unsigned long aligned_nrpages(unsigned long host_addr,
1751 size_t size)
1752{
1753 host_addr &= ~PAGE_MASK;
1754 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1755}
1756
6dd9a7c7
YS
1757/* Return largest possible superpage level for a given mapping */
1758static inline int hardware_largepage_caps(struct dmar_domain *domain,
1759 unsigned long iov_pfn,
1760 unsigned long phy_pfn,
1761 unsigned long pages)
1762{
1763 int support, level = 1;
1764 unsigned long pfnmerge;
1765
1766 support = domain->iommu_superpage;
1767
1768 /* To use a large page, the virtual *and* physical addresses
1769 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1770 of them will mean we have to use smaller pages. So just
1771 merge them and check both at once. */
1772 pfnmerge = iov_pfn | phy_pfn;
1773
1774 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1775 pages >>= VTD_STRIDE_SHIFT;
1776 if (!pages)
1777 break;
1778 pfnmerge >>= VTD_STRIDE_SHIFT;
1779 level++;
1780 support--;
1781 }
1782 return level;
1783}
1784
9051aa02
DW
1785static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1786 struct scatterlist *sg, unsigned long phys_pfn,
1787 unsigned long nr_pages, int prot)
e1605495
DW
1788{
1789 struct dma_pte *first_pte = NULL, *pte = NULL;
9051aa02 1790 phys_addr_t uninitialized_var(pteval);
e1605495 1791 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
9051aa02 1792 unsigned long sg_res;
6dd9a7c7
YS
1793 unsigned int largepage_lvl = 0;
1794 unsigned long lvl_pages = 0;
e1605495
DW
1795
1796 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1797
1798 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1799 return -EINVAL;
1800
1801 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1802
9051aa02
DW
1803 if (sg)
1804 sg_res = 0;
1805 else {
1806 sg_res = nr_pages + 1;
1807 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1808 }
1809
6dd9a7c7 1810 while (nr_pages > 0) {
c85994e4
DW
1811 uint64_t tmp;
1812
e1605495 1813 if (!sg_res) {
f532959b 1814 sg_res = aligned_nrpages(sg->offset, sg->length);
e1605495
DW
1815 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1816 sg->dma_length = sg->length;
1817 pteval = page_to_phys(sg_page(sg)) | prot;
6dd9a7c7 1818 phys_pfn = pteval >> VTD_PAGE_SHIFT;
e1605495 1819 }
6dd9a7c7 1820
e1605495 1821 if (!pte) {
6dd9a7c7
YS
1822 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
1823
1824 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
e1605495
DW
1825 if (!pte)
1826 return -ENOMEM;
6dd9a7c7
YS
1827 /* It is large page*/
1828 if (largepage_lvl > 1)
1829 pteval |= DMA_PTE_LARGE_PAGE;
1830 else
1831 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
1832
e1605495
DW
1833 }
1834 /* We don't need lock here, nobody else
1835 * touches the iova range
1836 */
7766a3fb 1837 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
c85994e4 1838 if (tmp) {
1bf20f0d 1839 static int dumps = 5;
c85994e4
DW
1840 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1841 iov_pfn, tmp, (unsigned long long)pteval);
1bf20f0d
DW
1842 if (dumps) {
1843 dumps--;
1844 debug_dma_dump_mappings(NULL);
1845 }
1846 WARN_ON(1);
1847 }
6dd9a7c7
YS
1848
1849 lvl_pages = lvl_to_nr_pages(largepage_lvl);
1850
1851 BUG_ON(nr_pages < lvl_pages);
1852 BUG_ON(sg_res < lvl_pages);
1853
1854 nr_pages -= lvl_pages;
1855 iov_pfn += lvl_pages;
1856 phys_pfn += lvl_pages;
1857 pteval += lvl_pages * VTD_PAGE_SIZE;
1858 sg_res -= lvl_pages;
1859
1860 /* If the next PTE would be the first in a new page, then we
1861 need to flush the cache on the entries we've just written.
1862 And then we'll need to recalculate 'pte', so clear it and
1863 let it get set again in the if (!pte) block above.
1864
1865 If we're done (!nr_pages) we need to flush the cache too.
1866
1867 Also if we've been setting superpages, we may need to
1868 recalculate 'pte' and switch back to smaller pages for the
1869 end of the mapping, if the trailing size is not enough to
1870 use another superpage (i.e. sg_res < lvl_pages). */
e1605495 1871 pte++;
6dd9a7c7
YS
1872 if (!nr_pages || first_pte_in_page(pte) ||
1873 (largepage_lvl > 1 && sg_res < lvl_pages)) {
e1605495
DW
1874 domain_flush_cache(domain, first_pte,
1875 (void *)pte - (void *)first_pte);
1876 pte = NULL;
1877 }
6dd9a7c7
YS
1878
1879 if (!sg_res && nr_pages)
e1605495
DW
1880 sg = sg_next(sg);
1881 }
1882 return 0;
1883}
1884
9051aa02
DW
1885static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1886 struct scatterlist *sg, unsigned long nr_pages,
1887 int prot)
ba395927 1888{
9051aa02
DW
1889 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1890}
6f6a00e4 1891
9051aa02
DW
1892static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1893 unsigned long phys_pfn, unsigned long nr_pages,
1894 int prot)
1895{
1896 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
ba395927
KA
1897}
1898
c7151a8d 1899static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
ba395927 1900{
c7151a8d
WH
1901 if (!iommu)
1902 return;
8c11e798
WH
1903
1904 clear_context_table(iommu, bus, devfn);
1905 iommu->flush.flush_context(iommu, 0, 0, 0,
4c25a2c1 1906 DMA_CCMD_GLOBAL_INVL);
1f0ef2aa 1907 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
ba395927
KA
1908}
1909
109b9b04
DW
1910static inline void unlink_domain_info(struct device_domain_info *info)
1911{
1912 assert_spin_locked(&device_domain_lock);
1913 list_del(&info->link);
1914 list_del(&info->global);
1915 if (info->dev)
1916 info->dev->dev.archdata.iommu = NULL;
1917}
1918
ba395927
KA
1919static void domain_remove_dev_info(struct dmar_domain *domain)
1920{
1921 struct device_domain_info *info;
1922 unsigned long flags;
c7151a8d 1923 struct intel_iommu *iommu;
ba395927
KA
1924
1925 spin_lock_irqsave(&device_domain_lock, flags);
1926 while (!list_empty(&domain->devices)) {
1927 info = list_entry(domain->devices.next,
1928 struct device_domain_info, link);
109b9b04 1929 unlink_domain_info(info);
ba395927
KA
1930 spin_unlock_irqrestore(&device_domain_lock, flags);
1931
93a23a72 1932 iommu_disable_dev_iotlb(info);
276dbf99 1933 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
c7151a8d 1934 iommu_detach_dev(iommu, info->bus, info->devfn);
ba395927
KA
1935 free_devinfo_mem(info);
1936
1937 spin_lock_irqsave(&device_domain_lock, flags);
1938 }
1939 spin_unlock_irqrestore(&device_domain_lock, flags);
1940}
1941
1942/*
1943 * find_domain
358dd8ac 1944 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
ba395927 1945 */
38717946 1946static struct dmar_domain *
ba395927
KA
1947find_domain(struct pci_dev *pdev)
1948{
1949 struct device_domain_info *info;
1950
1951 /* No lock here, assumes no domain exit in normal case */
358dd8ac 1952 info = pdev->dev.archdata.iommu;
ba395927
KA
1953 if (info)
1954 return info->domain;
1955 return NULL;
1956}
1957
ba395927
KA
1958/* domain is initialized */
1959static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1960{
1961 struct dmar_domain *domain, *found = NULL;
1962 struct intel_iommu *iommu;
1963 struct dmar_drhd_unit *drhd;
1964 struct device_domain_info *info, *tmp;
1965 struct pci_dev *dev_tmp;
1966 unsigned long flags;
1967 int bus = 0, devfn = 0;
276dbf99 1968 int segment;
2c2e2c38 1969 int ret;
ba395927
KA
1970
1971 domain = find_domain(pdev);
1972 if (domain)
1973 return domain;
1974
276dbf99
DW
1975 segment = pci_domain_nr(pdev->bus);
1976
ba395927
KA
1977 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1978 if (dev_tmp) {
5f4d91a1 1979 if (pci_is_pcie(dev_tmp)) {
ba395927
KA
1980 bus = dev_tmp->subordinate->number;
1981 devfn = 0;
1982 } else {
1983 bus = dev_tmp->bus->number;
1984 devfn = dev_tmp->devfn;
1985 }
1986 spin_lock_irqsave(&device_domain_lock, flags);
1987 list_for_each_entry(info, &device_domain_list, global) {
276dbf99
DW
1988 if (info->segment == segment &&
1989 info->bus == bus && info->devfn == devfn) {
ba395927
KA
1990 found = info->domain;
1991 break;
1992 }
1993 }
1994 spin_unlock_irqrestore(&device_domain_lock, flags);
1995 /* pcie-pci bridge already has a domain, uses it */
1996 if (found) {
1997 domain = found;
1998 goto found_domain;
1999 }
2000 }
2001
2c2e2c38
FY
2002 domain = alloc_domain();
2003 if (!domain)
2004 goto error;
2005
ba395927
KA
2006 /* Allocate new domain for the device */
2007 drhd = dmar_find_matched_drhd_unit(pdev);
2008 if (!drhd) {
2009 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
2010 pci_name(pdev));
2011 return NULL;
2012 }
2013 iommu = drhd->iommu;
2014
2c2e2c38
FY
2015 ret = iommu_attach_domain(domain, iommu);
2016 if (ret) {
2fe9723d 2017 free_domain_mem(domain);
ba395927 2018 goto error;
2c2e2c38 2019 }
ba395927
KA
2020
2021 if (domain_init(domain, gaw)) {
2022 domain_exit(domain);
2023 goto error;
2024 }
2025
2026 /* register pcie-to-pci device */
2027 if (dev_tmp) {
2028 info = alloc_devinfo_mem();
2029 if (!info) {
2030 domain_exit(domain);
2031 goto error;
2032 }
276dbf99 2033 info->segment = segment;
ba395927
KA
2034 info->bus = bus;
2035 info->devfn = devfn;
2036 info->dev = NULL;
2037 info->domain = domain;
2038 /* This domain is shared by devices under p2p bridge */
3b5410e7 2039 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
ba395927
KA
2040
2041 /* pcie-to-pci bridge already has a domain, uses it */
2042 found = NULL;
2043 spin_lock_irqsave(&device_domain_lock, flags);
2044 list_for_each_entry(tmp, &device_domain_list, global) {
276dbf99
DW
2045 if (tmp->segment == segment &&
2046 tmp->bus == bus && tmp->devfn == devfn) {
ba395927
KA
2047 found = tmp->domain;
2048 break;
2049 }
2050 }
2051 if (found) {
00dfff77 2052 spin_unlock_irqrestore(&device_domain_lock, flags);
ba395927
KA
2053 free_devinfo_mem(info);
2054 domain_exit(domain);
2055 domain = found;
2056 } else {
2057 list_add(&info->link, &domain->devices);
2058 list_add(&info->global, &device_domain_list);
00dfff77 2059 spin_unlock_irqrestore(&device_domain_lock, flags);
ba395927 2060 }
ba395927
KA
2061 }
2062
2063found_domain:
2064 info = alloc_devinfo_mem();
2065 if (!info)
2066 goto error;
276dbf99 2067 info->segment = segment;
ba395927
KA
2068 info->bus = pdev->bus->number;
2069 info->devfn = pdev->devfn;
2070 info->dev = pdev;
2071 info->domain = domain;
2072 spin_lock_irqsave(&device_domain_lock, flags);
2073 /* somebody is fast */
2074 found = find_domain(pdev);
2075 if (found != NULL) {
2076 spin_unlock_irqrestore(&device_domain_lock, flags);
2077 if (found != domain) {
2078 domain_exit(domain);
2079 domain = found;
2080 }
2081 free_devinfo_mem(info);
2082 return domain;
2083 }
2084 list_add(&info->link, &domain->devices);
2085 list_add(&info->global, &device_domain_list);
358dd8ac 2086 pdev->dev.archdata.iommu = info;
ba395927
KA
2087 spin_unlock_irqrestore(&device_domain_lock, flags);
2088 return domain;
2089error:
2090 /* recheck it here, maybe others set it */
2091 return find_domain(pdev);
2092}
2093
2c2e2c38 2094static int iommu_identity_mapping;
e0fc7e0b
DW
2095#define IDENTMAP_ALL 1
2096#define IDENTMAP_GFX 2
2097#define IDENTMAP_AZALIA 4
2c2e2c38 2098
b213203e
DW
2099static int iommu_domain_identity_map(struct dmar_domain *domain,
2100 unsigned long long start,
2101 unsigned long long end)
ba395927 2102{
c5395d5c
DW
2103 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2104 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2105
2106 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2107 dma_to_mm_pfn(last_vpfn))) {
ba395927 2108 printk(KERN_ERR "IOMMU: reserve iova failed\n");
b213203e 2109 return -ENOMEM;
ba395927
KA
2110 }
2111
c5395d5c
DW
2112 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2113 start, end, domain->id);
ba395927
KA
2114 /*
2115 * RMRR range might have overlap with physical memory range,
2116 * clear it first
2117 */
c5395d5c 2118 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
ba395927 2119
c5395d5c
DW
2120 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2121 last_vpfn - first_vpfn + 1,
61df7443 2122 DMA_PTE_READ|DMA_PTE_WRITE);
b213203e
DW
2123}
2124
2125static int iommu_prepare_identity_map(struct pci_dev *pdev,
2126 unsigned long long start,
2127 unsigned long long end)
2128{
2129 struct dmar_domain *domain;
2130 int ret;
2131
c7ab48d2 2132 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
b213203e
DW
2133 if (!domain)
2134 return -ENOMEM;
2135
19943b0e
DW
2136 /* For _hardware_ passthrough, don't bother. But for software
2137 passthrough, we do it anyway -- it may indicate a memory
2138 range which is reserved in E820, so which didn't get set
2139 up to start with in si_domain */
2140 if (domain == si_domain && hw_pass_through) {
2141 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2142 pci_name(pdev), start, end);
2143 return 0;
2144 }
2145
2146 printk(KERN_INFO
2147 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2148 pci_name(pdev), start, end);
2ff729f5 2149
5595b528
DW
2150 if (end < start) {
2151 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2152 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2153 dmi_get_system_info(DMI_BIOS_VENDOR),
2154 dmi_get_system_info(DMI_BIOS_VERSION),
2155 dmi_get_system_info(DMI_PRODUCT_VERSION));
2156 ret = -EIO;
2157 goto error;
2158 }
2159
2ff729f5
DW
2160 if (end >> agaw_to_width(domain->agaw)) {
2161 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2162 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2163 agaw_to_width(domain->agaw),
2164 dmi_get_system_info(DMI_BIOS_VENDOR),
2165 dmi_get_system_info(DMI_BIOS_VERSION),
2166 dmi_get_system_info(DMI_PRODUCT_VERSION));
2167 ret = -EIO;
2168 goto error;
2169 }
19943b0e 2170
b213203e 2171 ret = iommu_domain_identity_map(domain, start, end);
ba395927
KA
2172 if (ret)
2173 goto error;
2174
2175 /* context entry init */
4ed0d3e6 2176 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
b213203e
DW
2177 if (ret)
2178 goto error;
2179
2180 return 0;
2181
2182 error:
ba395927
KA
2183 domain_exit(domain);
2184 return ret;
ba395927
KA
2185}
2186
2187static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2188 struct pci_dev *pdev)
2189{
358dd8ac 2190 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
ba395927
KA
2191 return 0;
2192 return iommu_prepare_identity_map(pdev, rmrr->base_address,
70e535d1 2193 rmrr->end_address);
ba395927
KA
2194}
2195
d3f13810 2196#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
49a0429e
KA
2197static inline void iommu_prepare_isa(void)
2198{
2199 struct pci_dev *pdev;
2200 int ret;
2201
2202 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2203 if (!pdev)
2204 return;
2205
c7ab48d2 2206 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
70e535d1 2207 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
49a0429e
KA
2208
2209 if (ret)
c7ab48d2
DW
2210 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2211 "floppy might not work\n");
49a0429e
KA
2212
2213}
2214#else
2215static inline void iommu_prepare_isa(void)
2216{
2217 return;
2218}
d3f13810 2219#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
49a0429e 2220
2c2e2c38 2221static int md_domain_init(struct dmar_domain *domain, int guest_width);
c7ab48d2 2222
071e1374 2223static int __init si_domain_init(int hw)
2c2e2c38
FY
2224{
2225 struct dmar_drhd_unit *drhd;
2226 struct intel_iommu *iommu;
c7ab48d2 2227 int nid, ret = 0;
2c2e2c38
FY
2228
2229 si_domain = alloc_domain();
2230 if (!si_domain)
2231 return -EFAULT;
2232
c7ab48d2 2233 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
2c2e2c38
FY
2234
2235 for_each_active_iommu(iommu, drhd) {
2236 ret = iommu_attach_domain(si_domain, iommu);
2237 if (ret) {
2238 domain_exit(si_domain);
2239 return -EFAULT;
2240 }
2241 }
2242
2243 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2244 domain_exit(si_domain);
2245 return -EFAULT;
2246 }
2247
2248 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2249
19943b0e
DW
2250 if (hw)
2251 return 0;
2252
c7ab48d2 2253 for_each_online_node(nid) {
5dfe8660
TH
2254 unsigned long start_pfn, end_pfn;
2255 int i;
2256
2257 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2258 ret = iommu_domain_identity_map(si_domain,
2259 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2260 if (ret)
2261 return ret;
2262 }
c7ab48d2
DW
2263 }
2264
2c2e2c38
FY
2265 return 0;
2266}
2267
2268static void domain_remove_one_dev_info(struct dmar_domain *domain,
2269 struct pci_dev *pdev);
2270static int identity_mapping(struct pci_dev *pdev)
2271{
2272 struct device_domain_info *info;
2273
2274 if (likely(!iommu_identity_mapping))
2275 return 0;
2276
cb452a40
MT
2277 info = pdev->dev.archdata.iommu;
2278 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2279 return (info->domain == si_domain);
2c2e2c38 2280
2c2e2c38
FY
2281 return 0;
2282}
2283
2284static int domain_add_dev_info(struct dmar_domain *domain,
5fe60f4e
DW
2285 struct pci_dev *pdev,
2286 int translation)
2c2e2c38
FY
2287{
2288 struct device_domain_info *info;
2289 unsigned long flags;
5fe60f4e 2290 int ret;
2c2e2c38
FY
2291
2292 info = alloc_devinfo_mem();
2293 if (!info)
2294 return -ENOMEM;
2295
2296 info->segment = pci_domain_nr(pdev->bus);
2297 info->bus = pdev->bus->number;
2298 info->devfn = pdev->devfn;
2299 info->dev = pdev;
2300 info->domain = domain;
2301
2302 spin_lock_irqsave(&device_domain_lock, flags);
2303 list_add(&info->link, &domain->devices);
2304 list_add(&info->global, &device_domain_list);
2305 pdev->dev.archdata.iommu = info;
2306 spin_unlock_irqrestore(&device_domain_lock, flags);
2307
e2ad23d0
DW
2308 ret = domain_context_mapping(domain, pdev, translation);
2309 if (ret) {
2310 spin_lock_irqsave(&device_domain_lock, flags);
109b9b04 2311 unlink_domain_info(info);
e2ad23d0
DW
2312 spin_unlock_irqrestore(&device_domain_lock, flags);
2313 free_devinfo_mem(info);
2314 return ret;
2315 }
2316
2c2e2c38
FY
2317 return 0;
2318}
2319
6941af28
DW
2320static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2321{
e0fc7e0b
DW
2322 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2323 return 1;
2324
2325 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2326 return 1;
2327
2328 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2329 return 0;
6941af28 2330
3dfc813d
DW
2331 /*
2332 * We want to start off with all devices in the 1:1 domain, and
2333 * take them out later if we find they can't access all of memory.
2334 *
2335 * However, we can't do this for PCI devices behind bridges,
2336 * because all PCI devices behind the same bridge will end up
2337 * with the same source-id on their transactions.
2338 *
2339 * Practically speaking, we can't change things around for these
2340 * devices at run-time, because we can't be sure there'll be no
2341 * DMA transactions in flight for any of their siblings.
2342 *
2343 * So PCI devices (unless they're on the root bus) as well as
2344 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2345 * the 1:1 domain, just in _case_ one of their siblings turns out
2346 * not to be able to map all of memory.
2347 */
5f4d91a1 2348 if (!pci_is_pcie(pdev)) {
3dfc813d
DW
2349 if (!pci_is_root_bus(pdev->bus))
2350 return 0;
2351 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2352 return 0;
2353 } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
2354 return 0;
2355
2356 /*
2357 * At boot time, we don't yet know if devices will be 64-bit capable.
2358 * Assume that they will -- if they turn out not to be, then we can
2359 * take them out of the 1:1 domain later.
2360 */
8fcc5372
CW
2361 if (!startup) {
2362 /*
2363 * If the device's dma_mask is less than the system's memory
2364 * size then this is not a candidate for identity mapping.
2365 */
2366 u64 dma_mask = pdev->dma_mask;
2367
2368 if (pdev->dev.coherent_dma_mask &&
2369 pdev->dev.coherent_dma_mask < dma_mask)
2370 dma_mask = pdev->dev.coherent_dma_mask;
2371
2372 return dma_mask >= dma_get_required_mask(&pdev->dev);
2373 }
6941af28
DW
2374
2375 return 1;
2376}
2377
071e1374 2378static int __init iommu_prepare_static_identity_mapping(int hw)
2c2e2c38 2379{
2c2e2c38
FY
2380 struct pci_dev *pdev = NULL;
2381 int ret;
2382
19943b0e 2383 ret = si_domain_init(hw);
2c2e2c38
FY
2384 if (ret)
2385 return -EFAULT;
2386
2c2e2c38 2387 for_each_pci_dev(pdev) {
6941af28 2388 if (iommu_should_identity_map(pdev, 1)) {
5fe60f4e 2389 ret = domain_add_dev_info(si_domain, pdev,
eae460b6
MT
2390 hw ? CONTEXT_TT_PASS_THROUGH :
2391 CONTEXT_TT_MULTI_LEVEL);
2392 if (ret) {
2393 /* device not associated with an iommu */
2394 if (ret == -ENODEV)
2395 continue;
62edf5dc 2396 return ret;
eae460b6
MT
2397 }
2398 pr_info("IOMMU: %s identity mapping for device %s\n",
2399 hw ? "hardware" : "software", pci_name(pdev));
62edf5dc 2400 }
2c2e2c38
FY
2401 }
2402
2403 return 0;
2404}
2405
b779260b 2406static int __init init_dmars(void)
ba395927
KA
2407{
2408 struct dmar_drhd_unit *drhd;
2409 struct dmar_rmrr_unit *rmrr;
2410 struct pci_dev *pdev;
2411 struct intel_iommu *iommu;
9d783ba0 2412 int i, ret;
2c2e2c38 2413
ba395927
KA
2414 /*
2415 * for each drhd
2416 * allocate root
2417 * initialize and program root entry to not present
2418 * endfor
2419 */
2420 for_each_drhd_unit(drhd) {
5e0d2a6f 2421 /*
2422 * lock not needed as this is only incremented in the single
2423 * threaded kernel __init code path all other access are read
2424 * only
2425 */
1b198bb0
MT
2426 if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
2427 g_num_of_iommus++;
2428 continue;
2429 }
2430 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
2431 IOMMU_UNITS_SUPPORTED);
5e0d2a6f 2432 }
2433
d9630fe9
WH
2434 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2435 GFP_KERNEL);
2436 if (!g_iommus) {
2437 printk(KERN_ERR "Allocating global iommu array failed\n");
2438 ret = -ENOMEM;
2439 goto error;
2440 }
2441
80b20dd8 2442 deferred_flush = kzalloc(g_num_of_iommus *
2443 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2444 if (!deferred_flush) {
5e0d2a6f 2445 ret = -ENOMEM;
2446 goto error;
2447 }
2448
5e0d2a6f 2449 for_each_drhd_unit(drhd) {
2450 if (drhd->ignored)
2451 continue;
1886e8a9
SS
2452
2453 iommu = drhd->iommu;
d9630fe9 2454 g_iommus[iommu->seq_id] = iommu;
ba395927 2455
e61d98d8
SS
2456 ret = iommu_init_domains(iommu);
2457 if (ret)
2458 goto error;
2459
ba395927
KA
2460 /*
2461 * TBD:
2462 * we could share the same root & context tables
25985edc 2463 * among all IOMMU's. Need to Split it later.
ba395927
KA
2464 */
2465 ret = iommu_alloc_root_entry(iommu);
2466 if (ret) {
2467 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2468 goto error;
2469 }
4ed0d3e6 2470 if (!ecap_pass_through(iommu->ecap))
19943b0e 2471 hw_pass_through = 0;
ba395927
KA
2472 }
2473
1531a6a6
SS
2474 /*
2475 * Start from the sane iommu hardware state.
2476 */
a77b67d4
YS
2477 for_each_drhd_unit(drhd) {
2478 if (drhd->ignored)
2479 continue;
2480
2481 iommu = drhd->iommu;
1531a6a6
SS
2482
2483 /*
2484 * If the queued invalidation is already initialized by us
2485 * (for example, while enabling interrupt-remapping) then
2486 * we got the things already rolling from a sane state.
2487 */
2488 if (iommu->qi)
2489 continue;
2490
2491 /*
2492 * Clear any previous faults.
2493 */
2494 dmar_fault(-1, iommu);
2495 /*
2496 * Disable queued invalidation if supported and already enabled
2497 * before OS handover.
2498 */
2499 dmar_disable_qi(iommu);
2500 }
2501
2502 for_each_drhd_unit(drhd) {
2503 if (drhd->ignored)
2504 continue;
2505
2506 iommu = drhd->iommu;
2507
a77b67d4
YS
2508 if (dmar_enable_qi(iommu)) {
2509 /*
2510 * Queued Invalidate not enabled, use Register Based
2511 * Invalidate
2512 */
2513 iommu->flush.flush_context = __iommu_flush_context;
2514 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
680a7524 2515 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
b4e0f9eb 2516 "invalidation\n",
680a7524 2517 iommu->seq_id,
b4e0f9eb 2518 (unsigned long long)drhd->reg_base_addr);
a77b67d4
YS
2519 } else {
2520 iommu->flush.flush_context = qi_flush_context;
2521 iommu->flush.flush_iotlb = qi_flush_iotlb;
680a7524 2522 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
b4e0f9eb 2523 "invalidation\n",
680a7524 2524 iommu->seq_id,
b4e0f9eb 2525 (unsigned long long)drhd->reg_base_addr);
a77b67d4
YS
2526 }
2527 }
2528
19943b0e 2529 if (iommu_pass_through)
e0fc7e0b
DW
2530 iommu_identity_mapping |= IDENTMAP_ALL;
2531
d3f13810 2532#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
e0fc7e0b 2533 iommu_identity_mapping |= IDENTMAP_GFX;
19943b0e 2534#endif
e0fc7e0b
DW
2535
2536 check_tylersburg_isoch();
2537
ba395927 2538 /*
19943b0e
DW
2539 * If pass through is not set or not enabled, setup context entries for
2540 * identity mappings for rmrr, gfx, and isa and may fall back to static
2541 * identity mapping if iommu_identity_mapping is set.
ba395927 2542 */
19943b0e
DW
2543 if (iommu_identity_mapping) {
2544 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
4ed0d3e6 2545 if (ret) {
19943b0e
DW
2546 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2547 goto error;
ba395927
KA
2548 }
2549 }
ba395927 2550 /*
19943b0e
DW
2551 * For each rmrr
2552 * for each dev attached to rmrr
2553 * do
2554 * locate drhd for dev, alloc domain for dev
2555 * allocate free domain
2556 * allocate page table entries for rmrr
2557 * if context not allocated for bus
2558 * allocate and init context
2559 * set present in root table for this bus
2560 * init context with domain, translation etc
2561 * endfor
2562 * endfor
ba395927 2563 */
19943b0e
DW
2564 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2565 for_each_rmrr_units(rmrr) {
2566 for (i = 0; i < rmrr->devices_cnt; i++) {
2567 pdev = rmrr->devices[i];
2568 /*
2569 * some BIOS lists non-exist devices in DMAR
2570 * table.
2571 */
2572 if (!pdev)
2573 continue;
2574 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2575 if (ret)
2576 printk(KERN_ERR
2577 "IOMMU: mapping reserved region failed\n");
ba395927 2578 }
4ed0d3e6 2579 }
49a0429e 2580
19943b0e
DW
2581 iommu_prepare_isa();
2582
ba395927
KA
2583 /*
2584 * for each drhd
2585 * enable fault log
2586 * global invalidate context cache
2587 * global invalidate iotlb
2588 * enable translation
2589 */
2590 for_each_drhd_unit(drhd) {
51a63e67
JC
2591 if (drhd->ignored) {
2592 /*
2593 * we always have to disable PMRs or DMA may fail on
2594 * this device
2595 */
2596 if (force_on)
2597 iommu_disable_protect_mem_regions(drhd->iommu);
ba395927 2598 continue;
51a63e67 2599 }
ba395927 2600 iommu = drhd->iommu;
ba395927
KA
2601
2602 iommu_flush_write_buffer(iommu);
2603
3460a6d9
KA
2604 ret = dmar_set_interrupt(iommu);
2605 if (ret)
2606 goto error;
2607
ba395927
KA
2608 iommu_set_root_entry(iommu);
2609
4c25a2c1 2610 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
1f0ef2aa 2611 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
f8bab735 2612
ba395927
KA
2613 ret = iommu_enable_translation(iommu);
2614 if (ret)
2615 goto error;
b94996c9
DW
2616
2617 iommu_disable_protect_mem_regions(iommu);
ba395927
KA
2618 }
2619
2620 return 0;
2621error:
2622 for_each_drhd_unit(drhd) {
2623 if (drhd->ignored)
2624 continue;
2625 iommu = drhd->iommu;
2626 free_iommu(iommu);
2627 }
d9630fe9 2628 kfree(g_iommus);
ba395927
KA
2629 return ret;
2630}
2631
5a5e02a6 2632/* This takes a number of _MM_ pages, not VTD pages */
875764de
DW
2633static struct iova *intel_alloc_iova(struct device *dev,
2634 struct dmar_domain *domain,
2635 unsigned long nrpages, uint64_t dma_mask)
ba395927 2636{
ba395927 2637 struct pci_dev *pdev = to_pci_dev(dev);
ba395927 2638 struct iova *iova = NULL;
ba395927 2639
875764de
DW
2640 /* Restrict dma_mask to the width that the iommu can handle */
2641 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2642
2643 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
ba395927
KA
2644 /*
2645 * First try to allocate an io virtual address in
284901a9 2646 * DMA_BIT_MASK(32) and if that fails then try allocating
3609801e 2647 * from higher range
ba395927 2648 */
875764de
DW
2649 iova = alloc_iova(&domain->iovad, nrpages,
2650 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2651 if (iova)
2652 return iova;
2653 }
2654 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2655 if (unlikely(!iova)) {
2656 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2657 nrpages, pci_name(pdev));
f76aec76
KA
2658 return NULL;
2659 }
2660
2661 return iova;
2662}
2663
147202aa 2664static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
f76aec76
KA
2665{
2666 struct dmar_domain *domain;
2667 int ret;
2668
2669 domain = get_domain_for_dev(pdev,
2670 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2671 if (!domain) {
2672 printk(KERN_ERR
2673 "Allocating domain for %s failed", pci_name(pdev));
4fe05bbc 2674 return NULL;
ba395927
KA
2675 }
2676
2677 /* make sure context mapping is ok */
5331fe6f 2678 if (unlikely(!domain_context_mapped(pdev))) {
4ed0d3e6
FY
2679 ret = domain_context_mapping(domain, pdev,
2680 CONTEXT_TT_MULTI_LEVEL);
f76aec76
KA
2681 if (ret) {
2682 printk(KERN_ERR
2683 "Domain context map for %s failed",
2684 pci_name(pdev));
4fe05bbc 2685 return NULL;
f76aec76 2686 }
ba395927
KA
2687 }
2688
f76aec76
KA
2689 return domain;
2690}
2691
147202aa
DW
2692static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2693{
2694 struct device_domain_info *info;
2695
2696 /* No lock here, assumes no domain exit in normal case */
2697 info = dev->dev.archdata.iommu;
2698 if (likely(info))
2699 return info->domain;
2700
2701 return __get_valid_domain_for_dev(dev);
2702}
2703
2c2e2c38
FY
2704static int iommu_dummy(struct pci_dev *pdev)
2705{
2706 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2707}
2708
2709/* Check if the pdev needs to go through non-identity map and unmap process.*/
73676832 2710static int iommu_no_mapping(struct device *dev)
2c2e2c38 2711{
73676832 2712 struct pci_dev *pdev;
2c2e2c38
FY
2713 int found;
2714
73676832
DW
2715 if (unlikely(dev->bus != &pci_bus_type))
2716 return 1;
2717
2718 pdev = to_pci_dev(dev);
1e4c64c4
DW
2719 if (iommu_dummy(pdev))
2720 return 1;
2721
2c2e2c38 2722 if (!iommu_identity_mapping)
1e4c64c4 2723 return 0;
2c2e2c38
FY
2724
2725 found = identity_mapping(pdev);
2726 if (found) {
6941af28 2727 if (iommu_should_identity_map(pdev, 0))
2c2e2c38
FY
2728 return 1;
2729 else {
2730 /*
2731 * 32 bit DMA is removed from si_domain and fall back
2732 * to non-identity mapping.
2733 */
2734 domain_remove_one_dev_info(si_domain, pdev);
2735 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2736 pci_name(pdev));
2737 return 0;
2738 }
2739 } else {
2740 /*
2741 * In case of a detached 64 bit DMA device from vm, the device
2742 * is put into si_domain for identity mapping.
2743 */
6941af28 2744 if (iommu_should_identity_map(pdev, 0)) {
2c2e2c38 2745 int ret;
5fe60f4e
DW
2746 ret = domain_add_dev_info(si_domain, pdev,
2747 hw_pass_through ?
2748 CONTEXT_TT_PASS_THROUGH :
2749 CONTEXT_TT_MULTI_LEVEL);
2c2e2c38
FY
2750 if (!ret) {
2751 printk(KERN_INFO "64bit %s uses identity mapping\n",
2752 pci_name(pdev));
2753 return 1;
2754 }
2755 }
2756 }
2757
1e4c64c4 2758 return 0;
2c2e2c38
FY
2759}
2760
bb9e6d65
FT
2761static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2762 size_t size, int dir, u64 dma_mask)
f76aec76
KA
2763{
2764 struct pci_dev *pdev = to_pci_dev(hwdev);
f76aec76 2765 struct dmar_domain *domain;
5b6985ce 2766 phys_addr_t start_paddr;
f76aec76
KA
2767 struct iova *iova;
2768 int prot = 0;
6865f0d1 2769 int ret;
8c11e798 2770 struct intel_iommu *iommu;
33041ec0 2771 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
f76aec76
KA
2772
2773 BUG_ON(dir == DMA_NONE);
2c2e2c38 2774
73676832 2775 if (iommu_no_mapping(hwdev))
6865f0d1 2776 return paddr;
f76aec76
KA
2777
2778 domain = get_valid_domain_for_dev(pdev);
2779 if (!domain)
2780 return 0;
2781
8c11e798 2782 iommu = domain_get_iommu(domain);
88cb6a74 2783 size = aligned_nrpages(paddr, size);
f76aec76 2784
c681d0ba 2785 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
f76aec76
KA
2786 if (!iova)
2787 goto error;
2788
ba395927
KA
2789 /*
2790 * Check if DMAR supports zero-length reads on write only
2791 * mappings..
2792 */
2793 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
8c11e798 2794 !cap_zlr(iommu->cap))
ba395927
KA
2795 prot |= DMA_PTE_READ;
2796 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2797 prot |= DMA_PTE_WRITE;
2798 /*
6865f0d1 2799 * paddr - (paddr + size) might be partial page, we should map the whole
ba395927 2800 * page. Note: if two part of one page are separately mapped, we
6865f0d1 2801 * might have two guest_addr mapping to the same host paddr, but this
ba395927
KA
2802 * is not a big problem
2803 */
0ab36de2 2804 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
33041ec0 2805 mm_to_dma_pfn(paddr_pfn), size, prot);
ba395927
KA
2806 if (ret)
2807 goto error;
2808
1f0ef2aa
DW
2809 /* it's a non-present to present mapping. Only flush if caching mode */
2810 if (cap_caching_mode(iommu->cap))
82653633 2811 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
1f0ef2aa 2812 else
8c11e798 2813 iommu_flush_write_buffer(iommu);
f76aec76 2814
03d6a246
DW
2815 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2816 start_paddr += paddr & ~PAGE_MASK;
2817 return start_paddr;
ba395927 2818
ba395927 2819error:
f76aec76
KA
2820 if (iova)
2821 __free_iova(&domain->iovad, iova);
4cf2e75d 2822 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
5b6985ce 2823 pci_name(pdev), size, (unsigned long long)paddr, dir);
ba395927
KA
2824 return 0;
2825}
2826
ffbbef5c
FT
2827static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2828 unsigned long offset, size_t size,
2829 enum dma_data_direction dir,
2830 struct dma_attrs *attrs)
bb9e6d65 2831{
ffbbef5c
FT
2832 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2833 dir, to_pci_dev(dev)->dma_mask);
bb9e6d65
FT
2834}
2835
5e0d2a6f 2836static void flush_unmaps(void)
2837{
80b20dd8 2838 int i, j;
5e0d2a6f 2839
5e0d2a6f 2840 timer_on = 0;
2841
2842 /* just flush them all */
2843 for (i = 0; i < g_num_of_iommus; i++) {
a2bb8459
WH
2844 struct intel_iommu *iommu = g_iommus[i];
2845 if (!iommu)
2846 continue;
c42d9f32 2847
9dd2fe89
YZ
2848 if (!deferred_flush[i].next)
2849 continue;
2850
78d5f0f5
NA
2851 /* In caching mode, global flushes turn emulation expensive */
2852 if (!cap_caching_mode(iommu->cap))
2853 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
93a23a72 2854 DMA_TLB_GLOBAL_FLUSH);
9dd2fe89 2855 for (j = 0; j < deferred_flush[i].next; j++) {
93a23a72
YZ
2856 unsigned long mask;
2857 struct iova *iova = deferred_flush[i].iova[j];
78d5f0f5
NA
2858 struct dmar_domain *domain = deferred_flush[i].domain[j];
2859
2860 /* On real hardware multiple invalidations are expensive */
2861 if (cap_caching_mode(iommu->cap))
2862 iommu_flush_iotlb_psi(iommu, domain->id,
2863 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
2864 else {
2865 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2866 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2867 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2868 }
93a23a72 2869 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
80b20dd8 2870 }
9dd2fe89 2871 deferred_flush[i].next = 0;
5e0d2a6f 2872 }
2873
5e0d2a6f 2874 list_size = 0;
5e0d2a6f 2875}
2876
2877static void flush_unmaps_timeout(unsigned long data)
2878{
80b20dd8 2879 unsigned long flags;
2880
2881 spin_lock_irqsave(&async_umap_flush_lock, flags);
5e0d2a6f 2882 flush_unmaps();
80b20dd8 2883 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
5e0d2a6f 2884}
2885
2886static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2887{
2888 unsigned long flags;
80b20dd8 2889 int next, iommu_id;
8c11e798 2890 struct intel_iommu *iommu;
5e0d2a6f 2891
2892 spin_lock_irqsave(&async_umap_flush_lock, flags);
80b20dd8 2893 if (list_size == HIGH_WATER_MARK)
2894 flush_unmaps();
2895
8c11e798
WH
2896 iommu = domain_get_iommu(dom);
2897 iommu_id = iommu->seq_id;
c42d9f32 2898
80b20dd8 2899 next = deferred_flush[iommu_id].next;
2900 deferred_flush[iommu_id].domain[next] = dom;
2901 deferred_flush[iommu_id].iova[next] = iova;
2902 deferred_flush[iommu_id].next++;
5e0d2a6f 2903
2904 if (!timer_on) {
2905 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2906 timer_on = 1;
2907 }
2908 list_size++;
2909 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2910}
2911
ffbbef5c
FT
2912static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2913 size_t size, enum dma_data_direction dir,
2914 struct dma_attrs *attrs)
ba395927 2915{
ba395927 2916 struct pci_dev *pdev = to_pci_dev(dev);
f76aec76 2917 struct dmar_domain *domain;
d794dc9b 2918 unsigned long start_pfn, last_pfn;
ba395927 2919 struct iova *iova;
8c11e798 2920 struct intel_iommu *iommu;
ba395927 2921
73676832 2922 if (iommu_no_mapping(dev))
f76aec76 2923 return;
2c2e2c38 2924
ba395927
KA
2925 domain = find_domain(pdev);
2926 BUG_ON(!domain);
2927
8c11e798
WH
2928 iommu = domain_get_iommu(domain);
2929
ba395927 2930 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
85b98276
DW
2931 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2932 (unsigned long long)dev_addr))
ba395927 2933 return;
ba395927 2934
d794dc9b
DW
2935 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2936 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
ba395927 2937
d794dc9b
DW
2938 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2939 pci_name(pdev), start_pfn, last_pfn);
ba395927 2940
f76aec76 2941 /* clear the whole page */
d794dc9b
DW
2942 dma_pte_clear_range(domain, start_pfn, last_pfn);
2943
f76aec76 2944 /* free page tables */
d794dc9b
DW
2945 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2946
5e0d2a6f 2947 if (intel_iommu_strict) {
03d6a246 2948 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
82653633 2949 last_pfn - start_pfn + 1, 0);
5e0d2a6f 2950 /* free iova */
2951 __free_iova(&domain->iovad, iova);
2952 } else {
2953 add_unmap(domain, iova);
2954 /*
2955 * queue up the release of the unmap to save the 1/6th of the
2956 * cpu used up by the iotlb flush operation...
2957 */
5e0d2a6f 2958 }
ba395927
KA
2959}
2960
d7ab5c46 2961static void *intel_alloc_coherent(struct device *hwdev, size_t size,
baa676fc
AP
2962 dma_addr_t *dma_handle, gfp_t flags,
2963 struct dma_attrs *attrs)
ba395927
KA
2964{
2965 void *vaddr;
2966 int order;
2967
5b6985ce 2968 size = PAGE_ALIGN(size);
ba395927 2969 order = get_order(size);
e8bb910d
AW
2970
2971 if (!iommu_no_mapping(hwdev))
2972 flags &= ~(GFP_DMA | GFP_DMA32);
2973 else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
2974 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
2975 flags |= GFP_DMA;
2976 else
2977 flags |= GFP_DMA32;
2978 }
ba395927
KA
2979
2980 vaddr = (void *)__get_free_pages(flags, order);
2981 if (!vaddr)
2982 return NULL;
2983 memset(vaddr, 0, size);
2984
bb9e6d65
FT
2985 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2986 DMA_BIDIRECTIONAL,
2987 hwdev->coherent_dma_mask);
ba395927
KA
2988 if (*dma_handle)
2989 return vaddr;
2990 free_pages((unsigned long)vaddr, order);
2991 return NULL;
2992}
2993
d7ab5c46 2994static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
baa676fc 2995 dma_addr_t dma_handle, struct dma_attrs *attrs)
ba395927
KA
2996{
2997 int order;
2998
5b6985ce 2999 size = PAGE_ALIGN(size);
ba395927
KA
3000 order = get_order(size);
3001
0db9b7ae 3002 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
ba395927
KA
3003 free_pages((unsigned long)vaddr, order);
3004}
3005
d7ab5c46
FT
3006static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
3007 int nelems, enum dma_data_direction dir,
3008 struct dma_attrs *attrs)
ba395927 3009{
ba395927
KA
3010 struct pci_dev *pdev = to_pci_dev(hwdev);
3011 struct dmar_domain *domain;
d794dc9b 3012 unsigned long start_pfn, last_pfn;
f76aec76 3013 struct iova *iova;
8c11e798 3014 struct intel_iommu *iommu;
ba395927 3015
73676832 3016 if (iommu_no_mapping(hwdev))
ba395927
KA
3017 return;
3018
3019 domain = find_domain(pdev);
8c11e798
WH
3020 BUG_ON(!domain);
3021
3022 iommu = domain_get_iommu(domain);
ba395927 3023
c03ab37c 3024 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
85b98276
DW
3025 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
3026 (unsigned long long)sglist[0].dma_address))
f76aec76 3027 return;
f76aec76 3028
d794dc9b
DW
3029 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3030 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
f76aec76
KA
3031
3032 /* clear the whole page */
d794dc9b
DW
3033 dma_pte_clear_range(domain, start_pfn, last_pfn);
3034
f76aec76 3035 /* free page tables */
d794dc9b 3036 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
f76aec76 3037
acea0018
DW
3038 if (intel_iommu_strict) {
3039 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
82653633 3040 last_pfn - start_pfn + 1, 0);
acea0018
DW
3041 /* free iova */
3042 __free_iova(&domain->iovad, iova);
3043 } else {
3044 add_unmap(domain, iova);
3045 /*
3046 * queue up the release of the unmap to save the 1/6th of the
3047 * cpu used up by the iotlb flush operation...
3048 */
3049 }
ba395927
KA
3050}
3051
ba395927 3052static int intel_nontranslate_map_sg(struct device *hddev,
c03ab37c 3053 struct scatterlist *sglist, int nelems, int dir)
ba395927
KA
3054{
3055 int i;
c03ab37c 3056 struct scatterlist *sg;
ba395927 3057
c03ab37c 3058 for_each_sg(sglist, sg, nelems, i) {
12d4d40e 3059 BUG_ON(!sg_page(sg));
4cf2e75d 3060 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
c03ab37c 3061 sg->dma_length = sg->length;
ba395927
KA
3062 }
3063 return nelems;
3064}
3065
d7ab5c46
FT
3066static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
3067 enum dma_data_direction dir, struct dma_attrs *attrs)
ba395927 3068{
ba395927 3069 int i;
ba395927
KA
3070 struct pci_dev *pdev = to_pci_dev(hwdev);
3071 struct dmar_domain *domain;
f76aec76
KA
3072 size_t size = 0;
3073 int prot = 0;
f76aec76
KA
3074 struct iova *iova = NULL;
3075 int ret;
c03ab37c 3076 struct scatterlist *sg;
b536d24d 3077 unsigned long start_vpfn;
8c11e798 3078 struct intel_iommu *iommu;
ba395927
KA
3079
3080 BUG_ON(dir == DMA_NONE);
73676832 3081 if (iommu_no_mapping(hwdev))
c03ab37c 3082 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
ba395927 3083
f76aec76
KA
3084 domain = get_valid_domain_for_dev(pdev);
3085 if (!domain)
3086 return 0;
3087
8c11e798
WH
3088 iommu = domain_get_iommu(domain);
3089
b536d24d 3090 for_each_sg(sglist, sg, nelems, i)
88cb6a74 3091 size += aligned_nrpages(sg->offset, sg->length);
f76aec76 3092
5a5e02a6
DW
3093 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
3094 pdev->dma_mask);
f76aec76 3095 if (!iova) {
c03ab37c 3096 sglist->dma_length = 0;
f76aec76
KA
3097 return 0;
3098 }
3099
3100 /*
3101 * Check if DMAR supports zero-length reads on write only
3102 * mappings..
3103 */
3104 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
8c11e798 3105 !cap_zlr(iommu->cap))
f76aec76
KA
3106 prot |= DMA_PTE_READ;
3107 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3108 prot |= DMA_PTE_WRITE;
3109
b536d24d 3110 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
e1605495 3111
f532959b 3112 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
e1605495
DW
3113 if (unlikely(ret)) {
3114 /* clear the page */
3115 dma_pte_clear_range(domain, start_vpfn,
3116 start_vpfn + size - 1);
3117 /* free page tables */
3118 dma_pte_free_pagetable(domain, start_vpfn,
3119 start_vpfn + size - 1);
3120 /* free iova */
3121 __free_iova(&domain->iovad, iova);
3122 return 0;
ba395927
KA
3123 }
3124
1f0ef2aa
DW
3125 /* it's a non-present to present mapping. Only flush if caching mode */
3126 if (cap_caching_mode(iommu->cap))
82653633 3127 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
1f0ef2aa 3128 else
8c11e798 3129 iommu_flush_write_buffer(iommu);
1f0ef2aa 3130
ba395927
KA
3131 return nelems;
3132}
3133
dfb805e8
FT
3134static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3135{
3136 return !dma_addr;
3137}
3138
160c1d8e 3139struct dma_map_ops intel_dma_ops = {
baa676fc
AP
3140 .alloc = intel_alloc_coherent,
3141 .free = intel_free_coherent,
ba395927
KA
3142 .map_sg = intel_map_sg,
3143 .unmap_sg = intel_unmap_sg,
ffbbef5c
FT
3144 .map_page = intel_map_page,
3145 .unmap_page = intel_unmap_page,
dfb805e8 3146 .mapping_error = intel_mapping_error,
ba395927
KA
3147};
3148
3149static inline int iommu_domain_cache_init(void)
3150{
3151 int ret = 0;
3152
3153 iommu_domain_cache = kmem_cache_create("iommu_domain",
3154 sizeof(struct dmar_domain),
3155 0,
3156 SLAB_HWCACHE_ALIGN,
3157
3158 NULL);
3159 if (!iommu_domain_cache) {
3160 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3161 ret = -ENOMEM;
3162 }
3163
3164 return ret;
3165}
3166
3167static inline int iommu_devinfo_cache_init(void)
3168{
3169 int ret = 0;
3170
3171 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3172 sizeof(struct device_domain_info),
3173 0,
3174 SLAB_HWCACHE_ALIGN,
ba395927
KA
3175 NULL);
3176 if (!iommu_devinfo_cache) {
3177 printk(KERN_ERR "Couldn't create devinfo cache\n");
3178 ret = -ENOMEM;
3179 }
3180
3181 return ret;
3182}
3183
3184static inline int iommu_iova_cache_init(void)
3185{
3186 int ret = 0;
3187
3188 iommu_iova_cache = kmem_cache_create("iommu_iova",
3189 sizeof(struct iova),
3190 0,
3191 SLAB_HWCACHE_ALIGN,
ba395927
KA
3192 NULL);
3193 if (!iommu_iova_cache) {
3194 printk(KERN_ERR "Couldn't create iova cache\n");
3195 ret = -ENOMEM;
3196 }
3197
3198 return ret;
3199}
3200
3201static int __init iommu_init_mempool(void)
3202{
3203 int ret;
3204 ret = iommu_iova_cache_init();
3205 if (ret)
3206 return ret;
3207
3208 ret = iommu_domain_cache_init();
3209 if (ret)
3210 goto domain_error;
3211
3212 ret = iommu_devinfo_cache_init();
3213 if (!ret)
3214 return ret;
3215
3216 kmem_cache_destroy(iommu_domain_cache);
3217domain_error:
3218 kmem_cache_destroy(iommu_iova_cache);
3219
3220 return -ENOMEM;
3221}
3222
3223static void __init iommu_exit_mempool(void)
3224{
3225 kmem_cache_destroy(iommu_devinfo_cache);
3226 kmem_cache_destroy(iommu_domain_cache);
3227 kmem_cache_destroy(iommu_iova_cache);
3228
3229}
3230
556ab45f
DW
3231static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3232{
3233 struct dmar_drhd_unit *drhd;
3234 u32 vtbar;
3235 int rc;
3236
3237 /* We know that this device on this chipset has its own IOMMU.
3238 * If we find it under a different IOMMU, then the BIOS is lying
3239 * to us. Hope that the IOMMU for this device is actually
3240 * disabled, and it needs no translation...
3241 */
3242 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3243 if (rc) {
3244 /* "can't" happen */
3245 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3246 return;
3247 }
3248 vtbar &= 0xffff0000;
3249
3250 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3251 drhd = dmar_find_matched_drhd_unit(pdev);
3252 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3253 TAINT_FIRMWARE_WORKAROUND,
3254 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3255 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3256}
3257DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3258
ba395927
KA
3259static void __init init_no_remapping_devices(void)
3260{
3261 struct dmar_drhd_unit *drhd;
3262
3263 for_each_drhd_unit(drhd) {
3264 if (!drhd->include_all) {
3265 int i;
3266 for (i = 0; i < drhd->devices_cnt; i++)
3267 if (drhd->devices[i] != NULL)
3268 break;
3269 /* ignore DMAR unit if no pci devices exist */
3270 if (i == drhd->devices_cnt)
3271 drhd->ignored = 1;
3272 }
3273 }
3274
ba395927
KA
3275 for_each_drhd_unit(drhd) {
3276 int i;
3277 if (drhd->ignored || drhd->include_all)
3278 continue;
3279
3280 for (i = 0; i < drhd->devices_cnt; i++)
3281 if (drhd->devices[i] &&
c0771df8 3282 !IS_GFX_DEVICE(drhd->devices[i]))
ba395927
KA
3283 break;
3284
3285 if (i < drhd->devices_cnt)
3286 continue;
3287
c0771df8
DW
3288 /* This IOMMU has *only* gfx devices. Either bypass it or
3289 set the gfx_mapped flag, as appropriate */
3290 if (dmar_map_gfx) {
3291 intel_iommu_gfx_mapped = 1;
3292 } else {
3293 drhd->ignored = 1;
3294 for (i = 0; i < drhd->devices_cnt; i++) {
3295 if (!drhd->devices[i])
3296 continue;
3297 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3298 }
ba395927
KA
3299 }
3300 }
3301}
3302
f59c7b69
FY
3303#ifdef CONFIG_SUSPEND
3304static int init_iommu_hw(void)
3305{
3306 struct dmar_drhd_unit *drhd;
3307 struct intel_iommu *iommu = NULL;
3308
3309 for_each_active_iommu(iommu, drhd)
3310 if (iommu->qi)
3311 dmar_reenable_qi(iommu);
3312
b779260b
JC
3313 for_each_iommu(iommu, drhd) {
3314 if (drhd->ignored) {
3315 /*
3316 * we always have to disable PMRs or DMA may fail on
3317 * this device
3318 */
3319 if (force_on)
3320 iommu_disable_protect_mem_regions(iommu);
3321 continue;
3322 }
3323
f59c7b69
FY
3324 iommu_flush_write_buffer(iommu);
3325
3326 iommu_set_root_entry(iommu);
3327
3328 iommu->flush.flush_context(iommu, 0, 0, 0,
1f0ef2aa 3329 DMA_CCMD_GLOBAL_INVL);
f59c7b69 3330 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
1f0ef2aa 3331 DMA_TLB_GLOBAL_FLUSH);
b779260b
JC
3332 if (iommu_enable_translation(iommu))
3333 return 1;
b94996c9 3334 iommu_disable_protect_mem_regions(iommu);
f59c7b69
FY
3335 }
3336
3337 return 0;
3338}
3339
3340static void iommu_flush_all(void)
3341{
3342 struct dmar_drhd_unit *drhd;
3343 struct intel_iommu *iommu;
3344
3345 for_each_active_iommu(iommu, drhd) {
3346 iommu->flush.flush_context(iommu, 0, 0, 0,
1f0ef2aa 3347 DMA_CCMD_GLOBAL_INVL);
f59c7b69 3348 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
1f0ef2aa 3349 DMA_TLB_GLOBAL_FLUSH);
f59c7b69
FY
3350 }
3351}
3352
134fac3f 3353static int iommu_suspend(void)
f59c7b69
FY
3354{
3355 struct dmar_drhd_unit *drhd;
3356 struct intel_iommu *iommu = NULL;
3357 unsigned long flag;
3358
3359 for_each_active_iommu(iommu, drhd) {
3360 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3361 GFP_ATOMIC);
3362 if (!iommu->iommu_state)
3363 goto nomem;
3364 }
3365
3366 iommu_flush_all();
3367
3368 for_each_active_iommu(iommu, drhd) {
3369 iommu_disable_translation(iommu);
3370
1f5b3c3f 3371 raw_spin_lock_irqsave(&iommu->register_lock, flag);
f59c7b69
FY
3372
3373 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3374 readl(iommu->reg + DMAR_FECTL_REG);
3375 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3376 readl(iommu->reg + DMAR_FEDATA_REG);
3377 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3378 readl(iommu->reg + DMAR_FEADDR_REG);
3379 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3380 readl(iommu->reg + DMAR_FEUADDR_REG);
3381
1f5b3c3f 3382 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
f59c7b69
FY
3383 }
3384 return 0;
3385
3386nomem:
3387 for_each_active_iommu(iommu, drhd)
3388 kfree(iommu->iommu_state);
3389
3390 return -ENOMEM;
3391}
3392
134fac3f 3393static void iommu_resume(void)
f59c7b69
FY
3394{
3395 struct dmar_drhd_unit *drhd;
3396 struct intel_iommu *iommu = NULL;
3397 unsigned long flag;
3398
3399 if (init_iommu_hw()) {
b779260b
JC
3400 if (force_on)
3401 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3402 else
3403 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
134fac3f 3404 return;
f59c7b69
FY
3405 }
3406
3407 for_each_active_iommu(iommu, drhd) {
3408
1f5b3c3f 3409 raw_spin_lock_irqsave(&iommu->register_lock, flag);
f59c7b69
FY
3410
3411 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3412 iommu->reg + DMAR_FECTL_REG);
3413 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3414 iommu->reg + DMAR_FEDATA_REG);
3415 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3416 iommu->reg + DMAR_FEADDR_REG);
3417 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3418 iommu->reg + DMAR_FEUADDR_REG);
3419
1f5b3c3f 3420 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
f59c7b69
FY
3421 }
3422
3423 for_each_active_iommu(iommu, drhd)
3424 kfree(iommu->iommu_state);
f59c7b69
FY
3425}
3426
134fac3f 3427static struct syscore_ops iommu_syscore_ops = {
f59c7b69
FY
3428 .resume = iommu_resume,
3429 .suspend = iommu_suspend,
3430};
3431
134fac3f 3432static void __init init_iommu_pm_ops(void)
f59c7b69 3433{
134fac3f 3434 register_syscore_ops(&iommu_syscore_ops);
f59c7b69
FY
3435}
3436
3437#else
99592ba4 3438static inline void init_iommu_pm_ops(void) {}
f59c7b69
FY
3439#endif /* CONFIG_PM */
3440
318fe7df
SS
3441LIST_HEAD(dmar_rmrr_units);
3442
3443static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
3444{
3445 list_add(&rmrr->list, &dmar_rmrr_units);
3446}
3447
3448
3449int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3450{
3451 struct acpi_dmar_reserved_memory *rmrr;
3452 struct dmar_rmrr_unit *rmrru;
3453
3454 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3455 if (!rmrru)
3456 return -ENOMEM;
3457
3458 rmrru->hdr = header;
3459 rmrr = (struct acpi_dmar_reserved_memory *)header;
3460 rmrru->base_address = rmrr->base_address;
3461 rmrru->end_address = rmrr->end_address;
3462
3463 dmar_register_rmrr_unit(rmrru);
3464 return 0;
3465}
3466
3467static int __init
3468rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
3469{
3470 struct acpi_dmar_reserved_memory *rmrr;
3471 int ret;
3472
3473 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
3474 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
3475 ((void *)rmrr) + rmrr->header.length,
3476 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
3477
3478 if (ret || (rmrru->devices_cnt == 0)) {
3479 list_del(&rmrru->list);
3480 kfree(rmrru);
3481 }
3482 return ret;
3483}
3484
3485static LIST_HEAD(dmar_atsr_units);
3486
3487int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3488{
3489 struct acpi_dmar_atsr *atsr;
3490 struct dmar_atsr_unit *atsru;
3491
3492 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3493 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3494 if (!atsru)
3495 return -ENOMEM;
3496
3497 atsru->hdr = hdr;
3498 atsru->include_all = atsr->flags & 0x1;
3499
3500 list_add(&atsru->list, &dmar_atsr_units);
3501
3502 return 0;
3503}
3504
3505static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
3506{
3507 int rc;
3508 struct acpi_dmar_atsr *atsr;
3509
3510 if (atsru->include_all)
3511 return 0;
3512
3513 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3514 rc = dmar_parse_dev_scope((void *)(atsr + 1),
3515 (void *)atsr + atsr->header.length,
3516 &atsru->devices_cnt, &atsru->devices,
3517 atsr->segment);
3518 if (rc || !atsru->devices_cnt) {
3519 list_del(&atsru->list);
3520 kfree(atsru);
3521 }
3522
3523 return rc;
3524}
3525
3526int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3527{
3528 int i;
3529 struct pci_bus *bus;
3530 struct acpi_dmar_atsr *atsr;
3531 struct dmar_atsr_unit *atsru;
3532
3533 dev = pci_physfn(dev);
3534
3535 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3536 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3537 if (atsr->segment == pci_domain_nr(dev->bus))
3538 goto found;
3539 }
3540
3541 return 0;
3542
3543found:
3544 for (bus = dev->bus; bus; bus = bus->parent) {
3545 struct pci_dev *bridge = bus->self;
3546
3547 if (!bridge || !pci_is_pcie(bridge) ||
3548 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
3549 return 0;
3550
3551 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
3552 for (i = 0; i < atsru->devices_cnt; i++)
3553 if (atsru->devices[i] == bridge)
3554 return 1;
3555 break;
3556 }
3557 }
3558
3559 if (atsru->include_all)
3560 return 1;
3561
3562 return 0;
3563}
3564
c8f369ab 3565int __init dmar_parse_rmrr_atsr_dev(void)
318fe7df
SS
3566{
3567 struct dmar_rmrr_unit *rmrr, *rmrr_n;
3568 struct dmar_atsr_unit *atsr, *atsr_n;
3569 int ret = 0;
3570
3571 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
3572 ret = rmrr_parse_dev(rmrr);
3573 if (ret)
3574 return ret;
3575 }
3576
3577 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
3578 ret = atsr_parse_dev(atsr);
3579 if (ret)
3580 return ret;
3581 }
3582
3583 return ret;
3584}
3585
99dcaded
FY
3586/*
3587 * Here we only respond to action of unbound device from driver.
3588 *
3589 * Added device is not attached to its DMAR domain here yet. That will happen
3590 * when mapping the device to iova.
3591 */
3592static int device_notifier(struct notifier_block *nb,
3593 unsigned long action, void *data)
3594{
3595 struct device *dev = data;
3596 struct pci_dev *pdev = to_pci_dev(dev);
3597 struct dmar_domain *domain;
3598
44cd613c
DW
3599 if (iommu_no_mapping(dev))
3600 return 0;
3601
99dcaded
FY
3602 domain = find_domain(pdev);
3603 if (!domain)
3604 return 0;
3605
a97590e5 3606 if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
99dcaded
FY
3607 domain_remove_one_dev_info(domain, pdev);
3608
a97590e5
AW
3609 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3610 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3611 list_empty(&domain->devices))
3612 domain_exit(domain);
3613 }
3614
99dcaded
FY
3615 return 0;
3616}
3617
3618static struct notifier_block device_nb = {
3619 .notifier_call = device_notifier,
3620};
3621
ba395927
KA
3622int __init intel_iommu_init(void)
3623{
3624 int ret = 0;
3625
a59b50e9
JC
3626 /* VT-d is required for a TXT/tboot launch, so enforce that */
3627 force_on = tboot_force_iommu();
3628
3629 if (dmar_table_init()) {
3630 if (force_on)
3631 panic("tboot: Failed to initialize DMAR table\n");
ba395927 3632 return -ENODEV;
a59b50e9 3633 }
ba395927 3634
c2c7286a 3635 if (dmar_dev_scope_init() < 0) {
a59b50e9
JC
3636 if (force_on)
3637 panic("tboot: Failed to initialize DMAR device scope\n");
1886e8a9 3638 return -ENODEV;
a59b50e9 3639 }
1886e8a9 3640
75f1cdf1 3641 if (no_iommu || dmar_disabled)
2ae21010
SS
3642 return -ENODEV;
3643
51a63e67
JC
3644 if (iommu_init_mempool()) {
3645 if (force_on)
3646 panic("tboot: Failed to initialize iommu memory\n");
3647 return -ENODEV;
3648 }
3649
318fe7df
SS
3650 if (list_empty(&dmar_rmrr_units))
3651 printk(KERN_INFO "DMAR: No RMRR found\n");
3652
3653 if (list_empty(&dmar_atsr_units))
3654 printk(KERN_INFO "DMAR: No ATSR found\n");
3655
51a63e67
JC
3656 if (dmar_init_reserved_ranges()) {
3657 if (force_on)
3658 panic("tboot: Failed to reserve iommu ranges\n");
3659 return -ENODEV;
3660 }
ba395927
KA
3661
3662 init_no_remapping_devices();
3663
b779260b 3664 ret = init_dmars();
ba395927 3665 if (ret) {
a59b50e9
JC
3666 if (force_on)
3667 panic("tboot: Failed to initialize DMARs\n");
ba395927
KA
3668 printk(KERN_ERR "IOMMU: dmar init failed\n");
3669 put_iova_domain(&reserved_iova_list);
3670 iommu_exit_mempool();
3671 return ret;
3672 }
3673 printk(KERN_INFO
3674 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3675
5e0d2a6f 3676 init_timer(&unmap_timer);
75f1cdf1
FT
3677#ifdef CONFIG_SWIOTLB
3678 swiotlb = 0;
3679#endif
19943b0e 3680 dma_ops = &intel_dma_ops;
4ed0d3e6 3681
134fac3f 3682 init_iommu_pm_ops();
a8bcbb0d 3683
4236d97d 3684 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
a8bcbb0d 3685
99dcaded
FY
3686 bus_register_notifier(&pci_bus_type, &device_nb);
3687
8bc1f85c
ED
3688 intel_iommu_enabled = 1;
3689
ba395927
KA
3690 return 0;
3691}
e820482c 3692
3199aa6b
HW
3693static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3694 struct pci_dev *pdev)
3695{
3696 struct pci_dev *tmp, *parent;
3697
3698 if (!iommu || !pdev)
3699 return;
3700
3701 /* dependent device detach */
3702 tmp = pci_find_upstream_pcie_bridge(pdev);
3703 /* Secondary interface's bus number and devfn 0 */
3704 if (tmp) {
3705 parent = pdev->bus->self;
3706 while (parent != tmp) {
3707 iommu_detach_dev(iommu, parent->bus->number,
276dbf99 3708 parent->devfn);
3199aa6b
HW
3709 parent = parent->bus->self;
3710 }
45e829ea 3711 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
3199aa6b
HW
3712 iommu_detach_dev(iommu,
3713 tmp->subordinate->number, 0);
3714 else /* this is a legacy PCI bridge */
276dbf99
DW
3715 iommu_detach_dev(iommu, tmp->bus->number,
3716 tmp->devfn);
3199aa6b
HW
3717 }
3718}
3719
2c2e2c38 3720static void domain_remove_one_dev_info(struct dmar_domain *domain,
c7151a8d
WH
3721 struct pci_dev *pdev)
3722{
3723 struct device_domain_info *info;
3724 struct intel_iommu *iommu;
3725 unsigned long flags;
3726 int found = 0;
3727 struct list_head *entry, *tmp;
3728
276dbf99
DW
3729 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3730 pdev->devfn);
c7151a8d
WH
3731 if (!iommu)
3732 return;
3733
3734 spin_lock_irqsave(&device_domain_lock, flags);
3735 list_for_each_safe(entry, tmp, &domain->devices) {
3736 info = list_entry(entry, struct device_domain_info, link);
8519dc44
MH
3737 if (info->segment == pci_domain_nr(pdev->bus) &&
3738 info->bus == pdev->bus->number &&
c7151a8d 3739 info->devfn == pdev->devfn) {
109b9b04 3740 unlink_domain_info(info);
c7151a8d
WH
3741 spin_unlock_irqrestore(&device_domain_lock, flags);
3742
93a23a72 3743 iommu_disable_dev_iotlb(info);
c7151a8d 3744 iommu_detach_dev(iommu, info->bus, info->devfn);
3199aa6b 3745 iommu_detach_dependent_devices(iommu, pdev);
c7151a8d
WH
3746 free_devinfo_mem(info);
3747
3748 spin_lock_irqsave(&device_domain_lock, flags);
3749
3750 if (found)
3751 break;
3752 else
3753 continue;
3754 }
3755
3756 /* if there is no other devices under the same iommu
3757 * owned by this domain, clear this iommu in iommu_bmp
3758 * update iommu count and coherency
3759 */
276dbf99
DW
3760 if (iommu == device_to_iommu(info->segment, info->bus,
3761 info->devfn))
c7151a8d
WH
3762 found = 1;
3763 }
3764
3e7abe25
RD
3765 spin_unlock_irqrestore(&device_domain_lock, flags);
3766
c7151a8d
WH
3767 if (found == 0) {
3768 unsigned long tmp_flags;
3769 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
1b198bb0 3770 clear_bit(iommu->seq_id, domain->iommu_bmp);
c7151a8d 3771 domain->iommu_count--;
58c610bd 3772 domain_update_iommu_cap(domain);
c7151a8d 3773 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
a97590e5 3774
9b4554b2
AW
3775 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3776 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
3777 spin_lock_irqsave(&iommu->lock, tmp_flags);
3778 clear_bit(domain->id, iommu->domain_ids);
3779 iommu->domains[domain->id] = NULL;
3780 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3781 }
c7151a8d 3782 }
c7151a8d
WH
3783}
3784
3785static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3786{
3787 struct device_domain_info *info;
3788 struct intel_iommu *iommu;
3789 unsigned long flags1, flags2;
3790
3791 spin_lock_irqsave(&device_domain_lock, flags1);
3792 while (!list_empty(&domain->devices)) {
3793 info = list_entry(domain->devices.next,
3794 struct device_domain_info, link);
109b9b04 3795 unlink_domain_info(info);
c7151a8d
WH
3796 spin_unlock_irqrestore(&device_domain_lock, flags1);
3797
93a23a72 3798 iommu_disable_dev_iotlb(info);
276dbf99 3799 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
c7151a8d 3800 iommu_detach_dev(iommu, info->bus, info->devfn);
3199aa6b 3801 iommu_detach_dependent_devices(iommu, info->dev);
c7151a8d
WH
3802
3803 /* clear this iommu in iommu_bmp, update iommu count
58c610bd 3804 * and capabilities
c7151a8d
WH
3805 */
3806 spin_lock_irqsave(&domain->iommu_lock, flags2);
3807 if (test_and_clear_bit(iommu->seq_id,
1b198bb0 3808 domain->iommu_bmp)) {
c7151a8d 3809 domain->iommu_count--;
58c610bd 3810 domain_update_iommu_cap(domain);
c7151a8d
WH
3811 }
3812 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3813
3814 free_devinfo_mem(info);
3815 spin_lock_irqsave(&device_domain_lock, flags1);
3816 }
3817 spin_unlock_irqrestore(&device_domain_lock, flags1);
3818}
3819
5e98c4b1
WH
3820/* domain id for virtual machine, it won't be set in context */
3821static unsigned long vm_domid;
3822
3823static struct dmar_domain *iommu_alloc_vm_domain(void)
3824{
3825 struct dmar_domain *domain;
3826
3827 domain = alloc_domain_mem();
3828 if (!domain)
3829 return NULL;
3830
3831 domain->id = vm_domid++;
4c923d47 3832 domain->nid = -1;
1b198bb0 3833 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
5e98c4b1
WH
3834 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3835
3836 return domain;
3837}
3838
2c2e2c38 3839static int md_domain_init(struct dmar_domain *domain, int guest_width)
5e98c4b1
WH
3840{
3841 int adjust_width;
3842
3843 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
5e98c4b1
WH
3844 spin_lock_init(&domain->iommu_lock);
3845
3846 domain_reserve_special_ranges(domain);
3847
3848 /* calculate AGAW */
3849 domain->gaw = guest_width;
3850 adjust_width = guestwidth_to_adjustwidth(guest_width);
3851 domain->agaw = width_to_agaw(adjust_width);
3852
3853 INIT_LIST_HEAD(&domain->devices);
3854
3855 domain->iommu_count = 0;
3856 domain->iommu_coherency = 0;
c5b15255 3857 domain->iommu_snooping = 0;
6dd9a7c7 3858 domain->iommu_superpage = 0;
fe40f1e0 3859 domain->max_addr = 0;
4c923d47 3860 domain->nid = -1;
5e98c4b1
WH
3861
3862 /* always allocate the top pgd */
4c923d47 3863 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
5e98c4b1
WH
3864 if (!domain->pgd)
3865 return -ENOMEM;
3866 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3867 return 0;
3868}
3869
3870static void iommu_free_vm_domain(struct dmar_domain *domain)
3871{
3872 unsigned long flags;
3873 struct dmar_drhd_unit *drhd;
3874 struct intel_iommu *iommu;
3875 unsigned long i;
3876 unsigned long ndomains;
3877
3878 for_each_drhd_unit(drhd) {
3879 if (drhd->ignored)
3880 continue;
3881 iommu = drhd->iommu;
3882
3883 ndomains = cap_ndoms(iommu->cap);
a45946ab 3884 for_each_set_bit(i, iommu->domain_ids, ndomains) {
5e98c4b1
WH
3885 if (iommu->domains[i] == domain) {
3886 spin_lock_irqsave(&iommu->lock, flags);
3887 clear_bit(i, iommu->domain_ids);
3888 iommu->domains[i] = NULL;
3889 spin_unlock_irqrestore(&iommu->lock, flags);
3890 break;
3891 }
5e98c4b1
WH
3892 }
3893 }
3894}
3895
3896static void vm_domain_exit(struct dmar_domain *domain)
3897{
5e98c4b1
WH
3898 /* Domain 0 is reserved, so dont process it */
3899 if (!domain)
3900 return;
3901
3902 vm_domain_remove_all_dev_info(domain);
3903 /* destroy iovas */
3904 put_iova_domain(&domain->iovad);
5e98c4b1
WH
3905
3906 /* clear ptes */
595badf5 3907 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
5e98c4b1
WH
3908
3909 /* free page tables */
d794dc9b 3910 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
5e98c4b1
WH
3911
3912 iommu_free_vm_domain(domain);
3913 free_domain_mem(domain);
3914}
3915
5d450806 3916static int intel_iommu_domain_init(struct iommu_domain *domain)
38717946 3917{
5d450806 3918 struct dmar_domain *dmar_domain;
38717946 3919
5d450806
JR
3920 dmar_domain = iommu_alloc_vm_domain();
3921 if (!dmar_domain) {
38717946 3922 printk(KERN_ERR
5d450806
JR
3923 "intel_iommu_domain_init: dmar_domain == NULL\n");
3924 return -ENOMEM;
38717946 3925 }
2c2e2c38 3926 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
38717946 3927 printk(KERN_ERR
5d450806
JR
3928 "intel_iommu_domain_init() failed\n");
3929 vm_domain_exit(dmar_domain);
3930 return -ENOMEM;
38717946 3931 }
8140a95d 3932 domain_update_iommu_cap(dmar_domain);
5d450806 3933 domain->priv = dmar_domain;
faa3d6f5 3934
5d450806 3935 return 0;
38717946 3936}
38717946 3937
5d450806 3938static void intel_iommu_domain_destroy(struct iommu_domain *domain)
38717946 3939{
5d450806
JR
3940 struct dmar_domain *dmar_domain = domain->priv;
3941
3942 domain->priv = NULL;
3943 vm_domain_exit(dmar_domain);
38717946 3944}
38717946 3945
4c5478c9
JR
3946static int intel_iommu_attach_device(struct iommu_domain *domain,
3947 struct device *dev)
38717946 3948{
4c5478c9
JR
3949 struct dmar_domain *dmar_domain = domain->priv;
3950 struct pci_dev *pdev = to_pci_dev(dev);
fe40f1e0
WH
3951 struct intel_iommu *iommu;
3952 int addr_width;
faa3d6f5
WH
3953
3954 /* normally pdev is not mapped */
3955 if (unlikely(domain_context_mapped(pdev))) {
3956 struct dmar_domain *old_domain;
3957
3958 old_domain = find_domain(pdev);
3959 if (old_domain) {
2c2e2c38
FY
3960 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3961 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3962 domain_remove_one_dev_info(old_domain, pdev);
faa3d6f5
WH
3963 else
3964 domain_remove_dev_info(old_domain);
3965 }
3966 }
3967
276dbf99
DW
3968 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3969 pdev->devfn);
fe40f1e0
WH
3970 if (!iommu)
3971 return -ENODEV;
3972
3973 /* check if this iommu agaw is sufficient for max mapped address */
3974 addr_width = agaw_to_width(iommu->agaw);
a99c47a2
TL
3975 if (addr_width > cap_mgaw(iommu->cap))
3976 addr_width = cap_mgaw(iommu->cap);
3977
3978 if (dmar_domain->max_addr > (1LL << addr_width)) {
3979 printk(KERN_ERR "%s: iommu width (%d) is not "
fe40f1e0 3980 "sufficient for the mapped address (%llx)\n",
a99c47a2 3981 __func__, addr_width, dmar_domain->max_addr);
fe40f1e0
WH
3982 return -EFAULT;
3983 }
a99c47a2
TL
3984 dmar_domain->gaw = addr_width;
3985
3986 /*
3987 * Knock out extra levels of page tables if necessary
3988 */
3989 while (iommu->agaw < dmar_domain->agaw) {
3990 struct dma_pte *pte;
3991
3992 pte = dmar_domain->pgd;
3993 if (dma_pte_present(pte)) {
25cbff16
SY
3994 dmar_domain->pgd = (struct dma_pte *)
3995 phys_to_virt(dma_pte_addr(pte));
7a661013 3996 free_pgtable_page(pte);
a99c47a2
TL
3997 }
3998 dmar_domain->agaw--;
3999 }
fe40f1e0 4000
5fe60f4e 4001 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
38717946 4002}
38717946 4003
4c5478c9
JR
4004static void intel_iommu_detach_device(struct iommu_domain *domain,
4005 struct device *dev)
38717946 4006{
4c5478c9
JR
4007 struct dmar_domain *dmar_domain = domain->priv;
4008 struct pci_dev *pdev = to_pci_dev(dev);
4009
2c2e2c38 4010 domain_remove_one_dev_info(dmar_domain, pdev);
faa3d6f5 4011}
c7151a8d 4012
b146a1c9
JR
4013static int intel_iommu_map(struct iommu_domain *domain,
4014 unsigned long iova, phys_addr_t hpa,
5009065d 4015 size_t size, int iommu_prot)
faa3d6f5 4016{
dde57a21 4017 struct dmar_domain *dmar_domain = domain->priv;
fe40f1e0 4018 u64 max_addr;
dde57a21 4019 int prot = 0;
faa3d6f5 4020 int ret;
fe40f1e0 4021
dde57a21
JR
4022 if (iommu_prot & IOMMU_READ)
4023 prot |= DMA_PTE_READ;
4024 if (iommu_prot & IOMMU_WRITE)
4025 prot |= DMA_PTE_WRITE;
9cf06697
SY
4026 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4027 prot |= DMA_PTE_SNP;
dde57a21 4028
163cc52c 4029 max_addr = iova + size;
dde57a21 4030 if (dmar_domain->max_addr < max_addr) {
fe40f1e0
WH
4031 u64 end;
4032
4033 /* check if minimum agaw is sufficient for mapped address */
8954da1f 4034 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
fe40f1e0 4035 if (end < max_addr) {
8954da1f 4036 printk(KERN_ERR "%s: iommu width (%d) is not "
fe40f1e0 4037 "sufficient for the mapped address (%llx)\n",
8954da1f 4038 __func__, dmar_domain->gaw, max_addr);
fe40f1e0
WH
4039 return -EFAULT;
4040 }
dde57a21 4041 dmar_domain->max_addr = max_addr;
fe40f1e0 4042 }
ad051221
DW
4043 /* Round up size to next multiple of PAGE_SIZE, if it and
4044 the low bits of hpa would take us onto the next page */
88cb6a74 4045 size = aligned_nrpages(hpa, size);
ad051221
DW
4046 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4047 hpa >> VTD_PAGE_SHIFT, size, prot);
faa3d6f5 4048 return ret;
38717946 4049}
38717946 4050
5009065d
OBC
4051static size_t intel_iommu_unmap(struct iommu_domain *domain,
4052 unsigned long iova, size_t size)
38717946 4053{
dde57a21 4054 struct dmar_domain *dmar_domain = domain->priv;
292827cb 4055 int order;
4b99d352 4056
292827cb 4057 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
163cc52c 4058 (iova + size - 1) >> VTD_PAGE_SHIFT);
fe40f1e0 4059
163cc52c
DW
4060 if (dmar_domain->max_addr == iova + size)
4061 dmar_domain->max_addr = iova;
b146a1c9 4062
5009065d 4063 return PAGE_SIZE << order;
38717946 4064}
38717946 4065
d14d6577
JR
4066static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4067 unsigned long iova)
38717946 4068{
d14d6577 4069 struct dmar_domain *dmar_domain = domain->priv;
38717946 4070 struct dma_pte *pte;
faa3d6f5 4071 u64 phys = 0;
38717946 4072
6dd9a7c7 4073 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
38717946 4074 if (pte)
faa3d6f5 4075 phys = dma_pte_addr(pte);
38717946 4076
faa3d6f5 4077 return phys;
38717946 4078}
a8bcbb0d 4079
dbb9fd86
SY
4080static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4081 unsigned long cap)
4082{
4083 struct dmar_domain *dmar_domain = domain->priv;
4084
4085 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4086 return dmar_domain->iommu_snooping;
323f99cb 4087 if (cap == IOMMU_CAP_INTR_REMAP)
95a02e97 4088 return irq_remapping_enabled;
dbb9fd86
SY
4089
4090 return 0;
4091}
4092
70ae6f0d
AW
4093/*
4094 * Group numbers are arbitrary. Device with the same group number
4095 * indicate the iommu cannot differentiate between them. To avoid
4096 * tracking used groups we just use the seg|bus|devfn of the lowest
4097 * level we're able to differentiate devices
4098 */
4099static int intel_iommu_device_group(struct device *dev, unsigned int *groupid)
4100{
4101 struct pci_dev *pdev = to_pci_dev(dev);
4102 struct pci_dev *bridge;
4103 union {
4104 struct {
4105 u8 devfn;
4106 u8 bus;
4107 u16 segment;
4108 } pci;
4109 u32 group;
4110 } id;
4111
4112 if (iommu_no_mapping(dev))
4113 return -ENODEV;
4114
4115 id.pci.segment = pci_domain_nr(pdev->bus);
4116 id.pci.bus = pdev->bus->number;
4117 id.pci.devfn = pdev->devfn;
4118
4119 if (!device_to_iommu(id.pci.segment, id.pci.bus, id.pci.devfn))
4120 return -ENODEV;
4121
4122 bridge = pci_find_upstream_pcie_bridge(pdev);
4123 if (bridge) {
4124 if (pci_is_pcie(bridge)) {
4125 id.pci.bus = bridge->subordinate->number;
4126 id.pci.devfn = 0;
4127 } else {
4128 id.pci.bus = bridge->bus->number;
4129 id.pci.devfn = bridge->devfn;
4130 }
4131 }
4132
bcb71abe
AW
4133 if (!pdev->is_virtfn && iommu_group_mf)
4134 id.pci.devfn = PCI_DEVFN(PCI_SLOT(id.pci.devfn), 0);
4135
70ae6f0d
AW
4136 *groupid = id.group;
4137
4138 return 0;
4139}
4140
a8bcbb0d
JR
4141static struct iommu_ops intel_iommu_ops = {
4142 .domain_init = intel_iommu_domain_init,
4143 .domain_destroy = intel_iommu_domain_destroy,
4144 .attach_dev = intel_iommu_attach_device,
4145 .detach_dev = intel_iommu_detach_device,
b146a1c9
JR
4146 .map = intel_iommu_map,
4147 .unmap = intel_iommu_unmap,
a8bcbb0d 4148 .iova_to_phys = intel_iommu_iova_to_phys,
dbb9fd86 4149 .domain_has_cap = intel_iommu_domain_has_cap,
70ae6f0d 4150 .device_group = intel_iommu_device_group,
6d1c56a9 4151 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
a8bcbb0d 4152};
9af88143
DW
4153
4154static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
4155{
4156 /*
4157 * Mobile 4 Series Chipset neglects to set RWBF capability,
4158 * but needs it:
4159 */
4160 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4161 rwbf_quirk = 1;
2d9e667e
DW
4162
4163 /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */
4164 if (dev->revision == 0x07) {
4165 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4166 dmar_map_gfx = 0;
4167 }
9af88143
DW
4168}
4169
4170DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
e0fc7e0b 4171
eecfd57f
AJ
4172#define GGC 0x52
4173#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4174#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4175#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4176#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4177#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4178#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4179#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4180#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4181
9eecabcb
DW
4182static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
4183{
4184 unsigned short ggc;
4185
eecfd57f 4186 if (pci_read_config_word(dev, GGC, &ggc))
9eecabcb
DW
4187 return;
4188
eecfd57f 4189 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
9eecabcb
DW
4190 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4191 dmar_map_gfx = 0;
6fbcfb3e
DW
4192 } else if (dmar_map_gfx) {
4193 /* we have to ensure the gfx device is idle before we flush */
4194 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4195 intel_iommu_strict = 1;
4196 }
9eecabcb
DW
4197}
4198DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4199DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4200DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4201DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4202
e0fc7e0b
DW
4203/* On Tylersburg chipsets, some BIOSes have been known to enable the
4204 ISOCH DMAR unit for the Azalia sound device, but not give it any
4205 TLB entries, which causes it to deadlock. Check for that. We do
4206 this in a function called from init_dmars(), instead of in a PCI
4207 quirk, because we don't want to print the obnoxious "BIOS broken"
4208 message if VT-d is actually disabled.
4209*/
4210static void __init check_tylersburg_isoch(void)
4211{
4212 struct pci_dev *pdev;
4213 uint32_t vtisochctrl;
4214
4215 /* If there's no Azalia in the system anyway, forget it. */
4216 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4217 if (!pdev)
4218 return;
4219 pci_dev_put(pdev);
4220
4221 /* System Management Registers. Might be hidden, in which case
4222 we can't do the sanity check. But that's OK, because the
4223 known-broken BIOSes _don't_ actually hide it, so far. */
4224 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4225 if (!pdev)
4226 return;
4227
4228 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4229 pci_dev_put(pdev);
4230 return;
4231 }
4232
4233 pci_dev_put(pdev);
4234
4235 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4236 if (vtisochctrl & 1)
4237 return;
4238
4239 /* Drop all bits other than the number of TLB entries */
4240 vtisochctrl &= 0x1c;
4241
4242 /* If we have the recommended number of TLB entries (16), fine. */
4243 if (vtisochctrl == 0x10)
4244 return;
4245
4246 /* Zero TLB entries? You get to ride the short bus to school. */
4247 if (!vtisochctrl) {
4248 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4249 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4250 dmi_get_system_info(DMI_BIOS_VENDOR),
4251 dmi_get_system_info(DMI_BIOS_VERSION),
4252 dmi_get_system_info(DMI_PRODUCT_VERSION));
4253 iommu_identity_mapping |= IDENTMAP_AZALIA;
4254 return;
4255 }
4256
4257 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4258 vtisochctrl);
4259}