static int timer_on;
static long list_size;
+static void domain_exit(struct dmar_domain *domain);
static void domain_remove_dev_info(struct dmar_domain *domain);
static void domain_remove_one_dev_info(struct dmar_domain *domain,
struct pci_dev *pdev);
+static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
+ struct pci_dev *pdev);
#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
int dmar_disabled = 0;
return 0;
}
-
-static void domain_exit(struct dmar_domain *domain);
-static void vm_domain_exit(struct dmar_domain *domain);
-
static void free_dmar_iommu(struct intel_iommu *iommu)
{
struct dmar_domain *domain;
spin_lock_irqsave(&domain->iommu_lock, flags);
count = --domain->iommu_count;
spin_unlock_irqrestore(&domain->iommu_lock, flags);
- if (count == 0) {
- if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
- vm_domain_exit(domain);
- else
- domain_exit(domain);
- }
+ if (count == 0)
+ domain_exit(domain);
}
}
free_context_table(iommu);
}
-static struct dmar_domain *alloc_domain(void)
+static struct dmar_domain *alloc_domain(bool vm)
{
+ /* domain id for virtual machine, it won't be set in context */
+ static atomic_t vm_domid = ATOMIC_INIT(0);
struct dmar_domain *domain;
domain = alloc_domain_mem();
return NULL;
domain->nid = -1;
+ domain->iommu_count = 0;
memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
domain->flags = 0;
+ spin_lock_init(&domain->iommu_lock);
+ INIT_LIST_HEAD(&domain->devices);
+ if (vm) {
+ domain->id = atomic_inc_return(&vm_domid);
+ domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
+ }
return domain;
}
{
unsigned long flags;
int num, ndomains;
- int found = 0;
spin_lock_irqsave(&iommu->lock, flags);
ndomains = cap_ndoms(iommu->cap);
for_each_set_bit(num, iommu->domain_ids, ndomains) {
if (iommu->domains[num] == domain) {
- found = 1;
+ clear_bit(num, iommu->domain_ids);
+ iommu->domains[num] = NULL;
break;
}
}
-
- if (found) {
- clear_bit(num, iommu->domain_ids);
- clear_bit(iommu->seq_id, domain->iommu_bmp);
- iommu->domains[num] = NULL;
- }
spin_unlock_irqrestore(&iommu->lock, flags);
}
unsigned long sagaw;
init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
- spin_lock_init(&domain->iommu_lock);
-
domain_reserve_special_ranges(domain);
/* calculate AGAW */
return -ENODEV;
}
domain->agaw = agaw;
- INIT_LIST_HEAD(&domain->devices);
if (ecap_coherent(iommu->ecap))
domain->iommu_coherency = 1;
if (!intel_iommu_strict)
flush_unmaps_timeout(0);
+ /* remove associated devices */
domain_remove_dev_info(domain);
+
/* destroy iovas */
put_iova_domain(&domain->iovad);
/* free page tables */
dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
+ /* clear attached or cached domains */
for_each_active_iommu(iommu, drhd)
- if (test_bit(iommu->seq_id, domain->iommu_bmp))
+ if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
+ test_bit(iommu->seq_id, domain->iommu_bmp))
iommu_detach_domain(domain, iommu);
free_domain_mem(domain);
static void domain_remove_dev_info(struct dmar_domain *domain)
{
struct device_domain_info *info;
- unsigned long flags;
+ unsigned long flags, flags2;
struct intel_iommu *iommu;
spin_lock_irqsave(&device_domain_lock, flags);
iommu_disable_dev_iotlb(info);
iommu = device_to_iommu(info->segment, info->bus, info->devfn);
iommu_detach_dev(iommu, info->bus, info->devfn);
- free_devinfo_mem(info);
+ if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) {
+ iommu_detach_dependent_devices(iommu, info->dev);
+ /* clear this iommu in iommu_bmp, update iommu count
+ * and capabilities
+ */
+ spin_lock_irqsave(&domain->iommu_lock, flags2);
+ if (test_and_clear_bit(iommu->seq_id,
+ domain->iommu_bmp)) {
+ domain->iommu_count--;
+ domain_update_iommu_cap(domain);
+ }
+ spin_unlock_irqrestore(&domain->iommu_lock, flags2);
+ }
+
+ free_devinfo_mem(info);
spin_lock_irqsave(&device_domain_lock, flags);
}
spin_unlock_irqrestore(&device_domain_lock, flags);
iommu = drhd->iommu;
/* Allocate and intialize new domain for the device */
- domain = alloc_domain();
+ domain = alloc_domain(false);
if (!domain)
goto error;
if (iommu_attach_domain(domain, iommu)) {
struct intel_iommu *iommu;
int nid, ret = 0;
- si_domain = alloc_domain();
+ si_domain = alloc_domain(false);
if (!si_domain)
return -EFAULT;
+ si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
+
for_each_active_iommu(iommu, drhd) {
ret = iommu_attach_domain(si_domain, iommu);
if (ret) {
return -EFAULT;
}
- si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
pr_debug("IOMMU: identity mapping domain is domain %d\n",
si_domain->id);
}
}
-static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
-{
- struct device_domain_info *info;
- struct intel_iommu *iommu;
- unsigned long flags1, flags2;
-
- spin_lock_irqsave(&device_domain_lock, flags1);
- while (!list_empty(&domain->devices)) {
- info = list_entry(domain->devices.next,
- struct device_domain_info, link);
- unlink_domain_info(info);
- spin_unlock_irqrestore(&device_domain_lock, flags1);
-
- iommu_disable_dev_iotlb(info);
- iommu = device_to_iommu(info->segment, info->bus, info->devfn);
- iommu_detach_dev(iommu, info->bus, info->devfn);
- iommu_detach_dependent_devices(iommu, info->dev);
-
- /* clear this iommu in iommu_bmp, update iommu count
- * and capabilities
- */
- spin_lock_irqsave(&domain->iommu_lock, flags2);
- if (test_and_clear_bit(iommu->seq_id,
- domain->iommu_bmp)) {
- domain->iommu_count--;
- domain_update_iommu_cap(domain);
- }
- spin_unlock_irqrestore(&domain->iommu_lock, flags2);
-
- free_devinfo_mem(info);
- spin_lock_irqsave(&device_domain_lock, flags1);
- }
- spin_unlock_irqrestore(&device_domain_lock, flags1);
-}
-
-/* domain id for virtual machine, it won't be set in context */
-static atomic_t vm_domid = ATOMIC_INIT(0);
-
-static struct dmar_domain *iommu_alloc_vm_domain(void)
-{
- struct dmar_domain *domain;
-
- domain = alloc_domain_mem();
- if (!domain)
- return NULL;
-
- domain->id = atomic_inc_return(&vm_domid);
- domain->nid = -1;
- memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
- domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
-
- return domain;
-}
-
static int md_domain_init(struct dmar_domain *domain, int guest_width)
{
int adjust_width;
init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
- spin_lock_init(&domain->iommu_lock);
-
domain_reserve_special_ranges(domain);
/* calculate AGAW */
adjust_width = guestwidth_to_adjustwidth(guest_width);
domain->agaw = width_to_agaw(adjust_width);
- INIT_LIST_HEAD(&domain->devices);
-
- domain->iommu_count = 0;
domain->iommu_coherency = 0;
domain->iommu_snooping = 0;
domain->iommu_superpage = 0;
return 0;
}
-static void iommu_free_vm_domain(struct dmar_domain *domain)
-{
- unsigned long flags;
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- unsigned long i;
- unsigned long ndomains;
-
- for_each_active_iommu(iommu, drhd) {
- ndomains = cap_ndoms(iommu->cap);
- for_each_set_bit(i, iommu->domain_ids, ndomains) {
- if (iommu->domains[i] == domain) {
- spin_lock_irqsave(&iommu->lock, flags);
- clear_bit(i, iommu->domain_ids);
- iommu->domains[i] = NULL;
- spin_unlock_irqrestore(&iommu->lock, flags);
- break;
- }
- }
- }
-}
-
-static void vm_domain_exit(struct dmar_domain *domain)
-{
- /* Domain 0 is reserved, so dont process it */
- if (!domain)
- return;
-
- vm_domain_remove_all_dev_info(domain);
- /* destroy iovas */
- put_iova_domain(&domain->iovad);
-
- /* clear ptes */
- dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
-
- /* free page tables */
- dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
-
- iommu_free_vm_domain(domain);
- free_domain_mem(domain);
-}
-
static int intel_iommu_domain_init(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain;
- dmar_domain = iommu_alloc_vm_domain();
+ dmar_domain = alloc_domain(true);
if (!dmar_domain) {
printk(KERN_ERR
"intel_iommu_domain_init: dmar_domain == NULL\n");
if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
printk(KERN_ERR
"intel_iommu_domain_init() failed\n");
- vm_domain_exit(dmar_domain);
+ domain_exit(dmar_domain);
return -ENOMEM;
}
domain_update_iommu_cap(dmar_domain);
struct dmar_domain *dmar_domain = domain->priv;
domain->priv = NULL;
- vm_domain_exit(dmar_domain);
+ domain_exit(dmar_domain);
}
static int intel_iommu_attach_device(struct iommu_domain *domain,