struct tegra_smmu *smmu;
unsigned int use_count;
struct page *count;
+ struct page **pts;
struct page *pd;
unsigned id;
u32 attr;
return NULL;
}
+ as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
+ if (!as->pts) {
+ __free_page(as->count);
+ __free_page(as->pd);
+ kfree(as);
+ return NULL;
+ }
+
/* clear PDEs */
pd = page_address(as->pd);
SetPageReserved(as->pd);
{
unsigned int pd_index = iova_pd_index(iova);
struct page *pt_page;
- u32 *pd;
- pd = page_address(as->pd);
-
- if (!pd[pd_index])
+ pt_page = as->pts[pd_index];
+ if (!pt_page)
return NULL;
- pt_page = pfn_to_page(pd[pd_index] & as->smmu->pfn_mask);
*pagep = pt_page;
return tegra_smmu_pte_offset(pt_page, iova);
struct page *page;
unsigned int i;
- if (pd[pde] == 0) {
+ if (!as->pts[pde]) {
page = alloc_page(GFP_KERNEL | __GFP_DMA);
if (!page)
return NULL;
for (i = 0; i < SMMU_NUM_PTE; i++)
pt[i] = 0;
+ as->pts[pde] = page;
+
smmu->soc->ops->flush_dcache(page, 0, SMMU_SIZE_PT);
pd[pde] = SMMU_MK_PDE(page, SMMU_PDE_ATTR | SMMU_PDE_NEXT);
smmu_flush_tlb_section(smmu, as->id, iova);
smmu_flush(smmu);
} else {
- page = pfn_to_page(pd[pde] & smmu->pfn_mask);
+ page = as->pts[pde];
}
*pagep = page;
unsigned int pde = iova_pd_index(iova);
u32 *count = page_address(as->count);
u32 *pd = page_address(as->pd);
- struct page *page;
-
- page = pfn_to_page(pd[pde] & smmu->pfn_mask);
+ struct page *page = as->pts[pde];
/*
* When no entries in this page table are used anymore, return the
/* Finally, free the page */
ClearPageReserved(page);
__free_page(page);
+ as->pts[pde] = NULL;
}
}