struct dma_pte {
u64 val;
};
-#define dma_clear_pte(p) do {(p).val = 0;} while (0)
-#define dma_set_pte_readable(p) do {(p).val |= DMA_PTE_READ;} while (0)
-#define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0)
-#define dma_set_pte_prot(p, prot) \
- do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0)
-#define dma_pte_addr(p) ((p).val & VTD_PAGE_MASK)
-#define dma_set_pte_addr(p, addr) do {\
- (p).val |= ((addr) & VTD_PAGE_MASK); } while (0)
-#define dma_pte_present(p) (((p).val & 3) != 0)
+static inline void dma_clear_pte(struct dma_pte *pte)
+{
+ pte->val = 0;
+}
+
+static inline void dma_set_pte_readable(struct dma_pte *pte)
+{
+ pte->val |= DMA_PTE_READ;
+}
+
+static inline void dma_set_pte_writable(struct dma_pte *pte)
+{
+ pte->val |= DMA_PTE_WRITE;
+}
+
+static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
+{
+ pte->val = (pte->val & ~3) | (prot & 3);
+}
+
+static inline u64 dma_pte_addr(struct dma_pte *pte)
+{
+ return (pte->val & VTD_PAGE_MASK);
+}
+
+static inline void dma_set_pte_addr(struct dma_pte *pte, u64 addr)
+{
+ pte->val |= (addr & VTD_PAGE_MASK);
+}
+
+static inline bool dma_pte_present(struct dma_pte *pte)
+{
+ return (pte->val & 3) != 0;
+}
struct dmar_domain {
int id; /* domain id */
if (level == 1)
break;
- if (!dma_pte_present(*pte)) {
+ if (!dma_pte_present(pte)) {
tmp_page = alloc_pgtable_page();
if (!tmp_page) {
}
__iommu_flush_cache(domain->iommu, tmp_page,
PAGE_SIZE);
- dma_set_pte_addr(*pte, virt_to_phys(tmp_page));
+ dma_set_pte_addr(pte, virt_to_phys(tmp_page));
/*
* high level table always sets r/w, last level page
* table control read/write
*/
- dma_set_pte_readable(*pte);
- dma_set_pte_writable(*pte);
+ dma_set_pte_readable(pte);
+ dma_set_pte_writable(pte);
__iommu_flush_cache(domain->iommu, pte, sizeof(*pte));
}
- parent = phys_to_virt(dma_pte_addr(*pte));
+ parent = phys_to_virt(dma_pte_addr(pte));
level--;
}
if (level == total)
return pte;
- if (!dma_pte_present(*pte))
+ if (!dma_pte_present(pte))
break;
- parent = phys_to_virt(dma_pte_addr(*pte));
+ parent = phys_to_virt(dma_pte_addr(pte));
total--;
}
return NULL;
pte = dma_addr_level_pte(domain, addr, 1);
if (pte) {
- dma_clear_pte(*pte);
+ dma_clear_pte(pte);
__iommu_flush_cache(domain->iommu, pte, sizeof(*pte));
}
}
pte = dma_addr_level_pte(domain, tmp, level);
if (pte) {
free_pgtable_page(
- phys_to_virt(dma_pte_addr(*pte)));
- dma_clear_pte(*pte);
+ phys_to_virt(dma_pte_addr(pte)));
+ dma_clear_pte(pte);
__iommu_flush_cache(domain->iommu,
pte, sizeof(*pte));
}
/* We don't need lock here, nobody else
* touches the iova range
*/
- BUG_ON(dma_pte_addr(*pte));
- dma_set_pte_addr(*pte, start_pfn << VTD_PAGE_SHIFT);
- dma_set_pte_prot(*pte, prot);
+ BUG_ON(dma_pte_addr(pte));
+ dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT);
+ dma_set_pte_prot(pte, prot);
__iommu_flush_cache(domain->iommu, pte, sizeof(*pte));
start_pfn++;
index++;
pte = addr_to_dma_pte(domain, iova);
if (pte)
- pfn = dma_pte_addr(*pte);
+ pfn = dma_pte_addr(pte);
return pfn >> VTD_PAGE_SHIFT;
}