/* si_domain contains mulitple devices */
#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
+/* define the limit of IOMMUs supported in each domain */
+#ifdef CONFIG_X86
+# define IOMMU_UNITS_SUPPORTED MAX_IO_APICS
+#else
+# define IOMMU_UNITS_SUPPORTED 64
+#endif
+
struct dmar_domain {
int id; /* domain id */
int nid; /* node id */
- unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
+ DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
+ /* bitmap of iommus this domain uses*/
struct list_head devices; /* all devices' list */
struct iova_domain iovad; /* iova's that belong to this domain */
BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
- iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
+ iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
return NULL;
domain->iommu_coherency = 1;
- for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
+ for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
if (!ecap_coherent(g_iommus[i]->ecap)) {
domain->iommu_coherency = 0;
break;
domain->iommu_snooping = 1;
- for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
+ for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
if (!ecap_sc_support(g_iommus[i]->ecap)) {
domain->iommu_snooping = 0;
break;
return NULL;
domain->nid = -1;
- memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
+ memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
domain->flags = 0;
return domain;
domain->id = num;
set_bit(num, iommu->domain_ids);
- set_bit(iommu->seq_id, &domain->iommu_bmp);
+ set_bit(iommu->seq_id, domain->iommu_bmp);
iommu->domains[num] = domain;
spin_unlock_irqrestore(&iommu->lock, flags);
if (found) {
clear_bit(num, iommu->domain_ids);
- clear_bit(iommu->seq_id, &domain->iommu_bmp);
+ clear_bit(iommu->seq_id, domain->iommu_bmp);
iommu->domains[num] = NULL;
}
spin_unlock_irqrestore(&iommu->lock, flags);
dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
for_each_active_iommu(iommu, drhd)
- if (test_bit(iommu->seq_id, &domain->iommu_bmp))
+ if (test_bit(iommu->seq_id, domain->iommu_bmp))
iommu_detach_domain(domain, iommu);
free_domain_mem(domain);
spin_unlock_irqrestore(&iommu->lock, flags);
spin_lock_irqsave(&domain->iommu_lock, flags);
- if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
+ if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
domain->iommu_count++;
if (domain->iommu_count == 1)
domain->nid = iommu->node;
* endfor
*/
for_each_drhd_unit(drhd) {
- g_num_of_iommus++;
/*
* lock not needed as this is only incremented in the single
* threaded kernel __init code path all other access are read
* only
*/
+ if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
+ g_num_of_iommus++;
+ continue;
+ }
+ printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
+ IOMMU_UNITS_SUPPORTED);
}
g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
if (found == 0) {
unsigned long tmp_flags;
spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
- clear_bit(iommu->seq_id, &domain->iommu_bmp);
+ clear_bit(iommu->seq_id, domain->iommu_bmp);
domain->iommu_count--;
domain_update_iommu_cap(domain);
spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
*/
spin_lock_irqsave(&domain->iommu_lock, flags2);
if (test_and_clear_bit(iommu->seq_id,
- &domain->iommu_bmp)) {
+ domain->iommu_bmp)) {
domain->iommu_count--;
domain_update_iommu_cap(domain);
}
domain->id = vm_domid++;
domain->nid = -1;
- memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
+ memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
return domain;