#include "amd_iommu_proto.h"
#include "amd_iommu_types.h"
#include "irq_remapping.h"
+#include "pci.h"
#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
return true;
}
-static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
-{
- pci_dev_put(*from);
- *from = to;
-}
-
static struct pci_bus *find_hosted_bus(struct pci_bus *bus)
{
while (!bus->self) {
static void iommu_poll_events(struct amd_iommu *iommu)
{
u32 head, tail;
- unsigned long flags;
-
- spin_lock_irqsave(&iommu->lock, flags);
head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
}
writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
-
- spin_unlock_irqrestore(&iommu->lock, flags);
}
static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
static void iommu_poll_ppr_log(struct amd_iommu *iommu)
{
- unsigned long flags;
u32 head, tail;
if (iommu->ppr_log == NULL)
return;
- /* enable ppr interrupts again */
- writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
-
- spin_lock_irqsave(&iommu->lock, flags);
-
head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
- /*
- * Release iommu->lock because ppr-handling might need to
- * re-acquire it
- */
- spin_unlock_irqrestore(&iommu->lock, flags);
-
/* Handle PPR entry */
iommu_handle_ppr_entry(iommu, entry);
- spin_lock_irqsave(&iommu->lock, flags);
-
/* Refresh ring-buffer information */
head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
}
-
- spin_unlock_irqrestore(&iommu->lock, flags);
}
irqreturn_t amd_iommu_int_thread(int irq, void *data)
{
- struct amd_iommu *iommu;
+ struct amd_iommu *iommu = (struct amd_iommu *) data;
+ u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
- for_each_iommu(iommu) {
- iommu_poll_events(iommu);
- iommu_poll_ppr_log(iommu);
- }
+ while (status & (MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK)) {
+ /* Enable EVT and PPR interrupts again */
+ writel((MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK),
+ iommu->mmio_base + MMIO_STATUS_OFFSET);
+
+ if (status & MMIO_STATUS_EVT_INT_MASK) {
+ pr_devel("AMD-Vi: Processing IOMMU Event Log\n");
+ iommu_poll_events(iommu);
+ }
+ if (status & MMIO_STATUS_PPR_INT_MASK) {
+ pr_devel("AMD-Vi: Processing IOMMU PPR Log\n");
+ iommu_poll_ppr_log(iommu);
+ }
+
+ /*
+ * Hardware bug: ERBT1312
+ * When re-enabling interrupt (by writing 1
+ * to clear the bit), the hardware might also try to set
+ * the interrupt bit in the event status register.
+ * In this scenario, the bit will be set, and disable
+ * subsequent interrupts.
+ *
+ * Workaround: The IOMMU driver should read back the
+ * status register and check if the interrupt bits are cleared.
+ * If not, driver will need to go through the interrupt handler
+ * again and re-clear the bits
+ */
+ status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+ }
return IRQ_HANDLED;
}
/* Large PTE found which maps this address */
unmap_size = PTE_PAGE_SIZE(*pte);
+
+ /* Only unmap from the first pte in the page */
+ if ((unmap_size - 1) & bus_addr)
+ break;
count = PAGE_SIZE_PTE_COUNT(unmap_size);
for (i = 0; i < count; i++)
pte[i] = 0ULL;
unmapped += unmap_size;
}
- BUG_ON(!is_power_of_2(unmapped));
+ BUG_ON(unmapped && !is_power_of_2(unmapped));
return unmapped;
}
spin_unlock_irqrestore(&domain->lock, flags);
}
-/*
- * This is a special map_sg function which is used if we should map a
- * device which is not handled by an AMD IOMMU in the system.
- */
-static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist,
- int nelems, int dir)
-{
- struct scatterlist *s;
- int i;
-
- for_each_sg(sglist, s, nelems, i) {
- s->dma_address = (dma_addr_t)sg_phys(s);
- s->dma_length = s->length;
- }
-
- return nelems;
-}
-
/*
* The exported map_sg function for dma_ops (handles scatter-gather
* lists).
INC_STATS_COUNTER(cnt_map_sg);
domain = get_domain(dev);
- if (PTR_ERR(domain) == -EINVAL)
- return map_sg_no_iommu(dev, sglist, nelems, dir);
- else if (IS_ERR(domain))
+ if (IS_ERR(domain))
return 0;
dma_mask = *dev->dma_mask;
static void cleanup_domain(struct protection_domain *domain)
{
- struct iommu_dev_data *dev_data, *next;
+ struct iommu_dev_data *entry;
unsigned long flags;
write_lock_irqsave(&amd_iommu_devtable_lock, flags);
- list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
- __detach_device(dev_data);
- atomic_set(&dev_data->bind, 0);
+ while (!list_empty(&domain->dev_list)) {
+ entry = list_first_entry(&domain->dev_list,
+ struct iommu_dev_data, list);
+ __detach_device(entry);
+ atomic_set(&entry->bind, 0);
}
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
}
static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
- unsigned long iova)
+ dma_addr_t iova)
{
struct protection_domain *domain = dom->priv;
unsigned long offset_mask;
if (!table)
goto out;
+ /* Initialize table spin-lock */
+ spin_lock_init(&table->lock);
+
if (ioapic)
/* Keep the first 32 indexes free for IOAPIC interrupts */
table->min_index = 32;
iommu_flush_dte(iommu, devid);
if (devid != alias) {
irq_lookup_table[alias] = table;
- set_dte_irq_entry(devid, table);
+ set_dte_irq_entry(alias, table);
iommu_flush_dte(iommu, alias);
}
c = 0;
if (c == count) {
- struct irq_2_iommu *irte_info;
+ struct irq_2_irte *irte_info;
for (; c != 0; --c)
table->table[index - c + 1] = IRTE_ALLOCATED;
index -= count - 1;
cfg->remapped = 1;
- irte_info = &cfg->irq_2_iommu;
- irte_info->sub_handle = devid;
- irte_info->irte_index = index;
+ irte_info = &cfg->irq_2_irte;
+ irte_info->devid = devid;
+ irte_info->index = index;
goto out;
}
struct io_apic_irq_attr *attr)
{
struct irq_remap_table *table;
- struct irq_2_iommu *irte_info;
+ struct irq_2_irte *irte_info;
struct irq_cfg *cfg;
union irte irte;
int ioapic_id;
if (!cfg)
return -EINVAL;
- irte_info = &cfg->irq_2_iommu;
+ irte_info = &cfg->irq_2_irte;
ioapic_id = mpc_ioapic_id(attr->ioapic);
devid = get_ioapic_devid(ioapic_id);
/* Setup IRQ remapping info */
cfg->remapped = 1;
- irte_info->sub_handle = devid;
- irte_info->irte_index = index;
+ irte_info->devid = devid;
+ irte_info->index = index;
/* Setup IRTE for IOMMU */
irte.val = 0;
static int set_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
{
- struct irq_2_iommu *irte_info;
+ struct irq_2_irte *irte_info;
unsigned int dest, irq;
struct irq_cfg *cfg;
union irte irte;
cfg = data->chip_data;
irq = data->irq;
- irte_info = &cfg->irq_2_iommu;
+ irte_info = &cfg->irq_2_irte;
if (!cpumask_intersects(mask, cpu_online_mask))
return -EINVAL;
- if (get_irte(irte_info->sub_handle, irte_info->irte_index, &irte))
+ if (get_irte(irte_info->devid, irte_info->index, &irte))
return -EBUSY;
if (assign_irq_vector(irq, cfg, mask))
irte.fields.vector = cfg->vector;
irte.fields.destination = dest;
- modify_irte(irte_info->sub_handle, irte_info->irte_index, irte);
+ modify_irte(irte_info->devid, irte_info->index, irte);
if (cfg->move_in_progress)
send_cleanup_vector(cfg);
static int free_irq(int irq)
{
- struct irq_2_iommu *irte_info;
+ struct irq_2_irte *irte_info;
struct irq_cfg *cfg;
cfg = irq_get_chip_data(irq);
if (!cfg)
return -EINVAL;
- irte_info = &cfg->irq_2_iommu;
+ irte_info = &cfg->irq_2_irte;
- free_irte(irte_info->sub_handle, irte_info->irte_index);
+ free_irte(irte_info->devid, irte_info->index);
return 0;
}
unsigned int irq, unsigned int dest,
struct msi_msg *msg, u8 hpet_id)
{
- struct irq_2_iommu *irte_info;
+ struct irq_2_irte *irte_info;
struct irq_cfg *cfg;
union irte irte;
if (!cfg)
return;
- irte_info = &cfg->irq_2_iommu;
+ irte_info = &cfg->irq_2_irte;
irte.val = 0;
irte.fields.vector = cfg->vector;
irte.fields.dm = apic->irq_dest_mode;
irte.fields.valid = 1;
- modify_irte(irte_info->sub_handle, irte_info->irte_index, irte);
+ modify_irte(irte_info->devid, irte_info->index, irte);
msg->address_hi = MSI_ADDR_BASE_HI;
msg->address_lo = MSI_ADDR_BASE_LO;
- msg->data = irte_info->irte_index;
+ msg->data = irte_info->index;
}
static int msi_alloc_irq(struct pci_dev *pdev, int irq, int nvec)
static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
int index, int offset)
{
- struct irq_2_iommu *irte_info;
+ struct irq_2_irte *irte_info;
struct irq_cfg *cfg;
u16 devid;
return 0;
devid = get_device_id(&pdev->dev);
- irte_info = &cfg->irq_2_iommu;
+ irte_info = &cfg->irq_2_irte;
cfg->remapped = 1;
- irte_info->sub_handle = devid;
- irte_info->irte_index = index + offset;
+ irte_info->devid = devid;
+ irte_info->index = index + offset;
return 0;
}
static int setup_hpet_msi(unsigned int irq, unsigned int id)
{
- struct irq_2_iommu *irte_info;
+ struct irq_2_irte *irte_info;
struct irq_cfg *cfg;
int index, devid;
if (!cfg)
return -EINVAL;
- irte_info = &cfg->irq_2_iommu;
+ irte_info = &cfg->irq_2_irte;
devid = get_hpet_devid(id);
if (devid < 0)
return devid;
return index;
cfg->remapped = 1;
- irte_info->sub_handle = devid;
- irte_info->irte_index = index;
+ irte_info->devid = devid;
+ irte_info->index = index;
return 0;
}