#define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */
#define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops
domain for an IOMMU */
+ ++extern bool amd_iommu_dump;
+ ++#define DUMP_printk(format, arg...) \
+ ++ do { \
+ ++ if (amd_iommu_dump) \
+ ++ printk(KERN_INFO "AMD IOMMU: " format, ## arg); \
+ ++ } while(0);
+ +
++ +/*
++ + * Make iterating over all IOMMUs easier
++ + */
++ +#define for_each_iommu(iommu) \
++ + list_for_each_entry((iommu), &amd_iommu_list, list)
++ +#define for_each_iommu_safe(iommu, next) \
++ + list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list)
++
+++ #define APERTURE_RANGE_SHIFT 27 /* 128 MB */
+++ #define APERTURE_RANGE_SIZE (1ULL << APERTURE_RANGE_SHIFT)
+++ #define APERTURE_RANGE_PAGES (APERTURE_RANGE_SIZE >> PAGE_SHIFT)
+++ #define APERTURE_MAX_RANGES 32 /* allows 4GB of DMA address space */
+++ #define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT)
+++ #define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL)
++
/*
* This structure contains generic data for IOMMU protection domains
* independent of their use.
static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
struct unity_map_entry *e);
static struct dma_ops_domain *find_protection_domain(u16 devid);
-
+++ static u64* alloc_pte(struct protection_domain *dom,
+++ unsigned long address, u64
+++ **pte_page, gfp_t gfp);
+++ static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
+++ unsigned long start_page,
+++ unsigned int pages);
+++#ifndef BUS_NOTIFY_UNBOUND_DRIVER
+++#define BUS_NOTIFY_UNBOUND_DRIVER 0x0005
+++#endif
+
#ifdef CONFIG_AMD_IOMMU_STATS
/*
dma_dom->need_flush = false;
dma_dom->target_dev = 0xffff;
--- /* Intialize the exclusion range if necessary */
--- if (iommu->exclusion_start &&
--- iommu->exclusion_start < dma_dom->aperture_size) {
--- unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT;
--- int pages = iommu_num_pages(iommu->exclusion_start,
--- iommu->exclusion_length,
--- PAGE_SIZE);
--- dma_ops_reserve_addresses(dma_dom, startpage, pages);
--- }
+++ if (alloc_new_range(iommu, dma_dom, true, GFP_KERNEL))
+++ goto free_dma_dom;
/*
--- * At the last step, build the page tables so we don't need to
--- * allocate page table pages in the dma_ops mapping/unmapping
--- * path.
+++ * mark the first page as allocated so we never return 0 as
+++ * a valid dma-address. So we can use 0 as error value
*/
--- num_pte_pages = dma_dom->aperture_size / (PAGE_SIZE * 512);
--- dma_dom->pte_pages = kzalloc(num_pte_pages * sizeof(void *),
--- GFP_KERNEL);
--- if (!dma_dom->pte_pages)
-- goto free_dma_dom;
--
-- l2_pde = (u64 *)get_zeroed_page(GFP_KERNEL);
-- if (l2_pde == NULL)
--- goto free_dma_dom;
+++ dma_dom->aperture[0]->bitmap[0] = 1;
+++ dma_dom->next_address = 0;
- l2_pde = (u64 *)get_zeroed_page(GFP_KERNEL);
- if (l2_pde == NULL)
- goto free_dma_dom;
-
--- dma_dom->domain.pt_root[0] = IOMMU_L2_PDE(virt_to_phys(l2_pde));
---
--- for (i = 0; i < num_pte_pages; ++i) {
--- dma_dom->pte_pages[i] = (u64 *)get_zeroed_page(GFP_KERNEL);
--- if (!dma_dom->pte_pages[i])
--- goto free_dma_dom;
--- address = virt_to_phys(dma_dom->pte_pages[i]);
--- l2_pde[i] = IOMMU_L1_PDE(address);
--- }
return dma_dom;
if (!dma_domain)
dma_domain = iommu->default_dom;
attach_device(iommu, &dma_domain->domain, devid);
- -- printk(KERN_INFO "AMD IOMMU: Using protection domain %d for "
- -- "device %s\n", dma_domain->domain.id, dev_name(dev));
+ ++ DUMP_printk(KERN_INFO "AMD IOMMU: Using protection domain "
+ ++ "%d for device %s\n",
+ ++ dma_domain->domain.id, dev_name(dev));
break;
--- case BUS_NOTIFY_UNBIND_DRIVER:
+++ case BUS_NOTIFY_UNBOUND_DRIVER:
if (!domain)
goto out;
detach_device(domain, devid);
* found in the system. Devices not assigned to any other
* protection domain will be assigned to the default one.
*/
-- - list_for_each_entry(iommu, &amd_iommu_list, list) {
-- iommu->default_dom = dma_ops_domain_alloc(iommu, order);
++ + for_each_iommu(iommu) {
- iommu->default_dom = dma_ops_domain_alloc(iommu, order);
+++ iommu->default_dom = dma_ops_domain_alloc(iommu);
if (iommu->default_dom == NULL)
return -ENOMEM;
iommu->default_dom->domain.flags |= PD_DEFAULT_MASK;
to handle */
LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
we find in ACPI */
--- unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */
-
+ ++#ifdef CONFIG_IOMMU_STRESS
+ ++bool amd_iommu_isolate = false;
+ ++#else
bool amd_iommu_isolate = true; /* if true, device isolation is
enabled */
+ ++#endif
+ ++
bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
alias = false;
break;
case IVHD_DEV_ALIAS:
+ ++
+ ++ DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
+ ++ "flags: %02x devid_to: %02x:%02x.%x\n",
+ ++ PCI_BUS(e->devid),
+ ++ PCI_SLOT(e->devid),
+ ++ PCI_FUNC(e->devid),
+ ++ e->flags,
+ ++ PCI_BUS(e->ext >> 8),
+ ++ PCI_SLOT(e->ext >> 8),
+ ++ PCI_FUNC(e->ext >> 8));
+ ++
devid = e->devid;
devid_to = e->ext >> 8;
--- set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
+++ set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
amd_iommu_alias_table[devid] = devid_to;
break;
case IVHD_DEV_ALIAS_RANGE:
switch (m->type) {
default:
+++ kfree(e);
+++ return 0;
case ACPI_IVMD_TYPE:
+ ++ s = "IVMD_TYPEi\t\t\t";
e->devid_start = e->devid_end = m->devid;
break;
case ACPI_IVMD_TYPE_ALL:
return 1;
}
--- static int __init parse_amd_iommu_size_options(char *str)
--- {
--- unsigned order = PAGE_SHIFT + get_order(memparse(str, &str));
---
--- if ((order > 24) && (order < 31))
--- amd_iommu_aperture_order = order;
---
--- return 1;
--- }
---
+ ++__setup("amd_iommu_dump", parse_amd_iommu_dump);
__setup("amd_iommu=", parse_amd_iommu_options);
--- __setup("amd_iommu_size=", parse_amd_iommu_size_options);