static int lv1set_section(struct exynos_iommu_domain *domain,
sysmmu_pte_t *sent, sysmmu_iova_t iova,
- phys_addr_t paddr, atomic_t *pgcnt)
+ phys_addr_t paddr, int prot, atomic_t *pgcnt)
{
+ bool shareable = !!(prot & IOMMU_CACHE);
+
if (lv1ent_section(sent)) {
WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
iova);
}
*sent = mk_lv1ent_sect(paddr);
+ if (shareable)
+ set_lv1ent_shareable(sent);
pgtable_flush(sent, sent + 1);
return 0;
}
static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
- atomic_t *pgcnt)
+ int prot, atomic_t *pgcnt)
{
+ bool shareable = !!(prot & IOMMU_CACHE);
+
if (size == SPAGE_SIZE) {
if (WARN_ON(!lv2ent_fault(pent)))
return -EADDRINUSE;
*pent = mk_lv2ent_spage(paddr);
+ if (shareable)
+ set_lv2ent_shareable(pent);
pgtable_flush(pent, pent + 1);
atomic_dec(pgcnt);
} else { /* size == LPAGE_SIZE */
}
*pent = mk_lv2ent_lpage(paddr);
+ if (shareable)
+ set_lv2ent_shareable(pent);
}
pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
atomic_sub(SPAGES_PER_LPAGE, pgcnt);
entry = section_entry(domain->pgtable, iova);
if (size == SECT_SIZE) {
- ret = lv1set_section(domain, entry, iova, paddr,
+ ret = lv1set_section(domain, entry, iova, paddr, prot,
&domain->lv2entcnt[lv1ent_offset(iova)]);
} else {
sysmmu_pte_t *pent;
if (IS_ERR(pent))
ret = PTR_ERR(pent);
else
- ret = lv2set_page(pent, paddr, size,
+ ret = lv2set_page(pent, paddr, size, prot,
&domain->lv2entcnt[lv1ent_offset(iova)]);
}