static void sysmmu_get_interrupt_info(struct sysmmu_drvdata *data,
int *flags, unsigned long *addr, bool is_secure)
{
- unsigned int itype;
+ unsigned long itype;
u32 info;
itype = __ffs(__sysmmu_get_intr_status(data, is_secure));
if (owner && owner->fault_handler)
owner->fault_handler(owner->domain, owner->master,
- fault_addr, (unsigned long)data, owner->token);
+ fault_addr, *(int *)data, owner->token);
return 0;
}
set_lv2ent_shareable(ent);
ent++;
- iova += PAGE_SIZE;
+ iova += (sysmmu_iova_t)PAGE_SIZE;
if ((iova & SECT_MASK) != ((iova - 1) & SECT_MASK)) {
pgtable_flush(ent_beg, ent);
next = pmd_addr_end(addr, end);
if (sysmmu_map_pte(mm, pmd, addr, next, domain, iova, prot))
return -ENOMEM;
- iova += (next - addr);
+ iova += (sysmmu_iova_t)(next - addr);
} while (pmd++, addr = next, addr != end);
return 0;
}
next = pud_addr_end(addr, end);
if (sysmmu_map_pmd(mm, pud, addr, next, domain, iova, prot))
return -ENOMEM;
- iova += (next - addr);
+ iova += (sysmmu_iova_t)(next - addr);
} while (pud++, addr = next, addr != end);
return 0;
}
ret = sysmmu_map_pud(mm, pgd, addr, next, domain, iova, prot);
if (ret)
goto err;
- iova += (next - addr);
+ iova += (sysmmu_iova_t)(next - addr);
} while (pgd++, addr = next, addr != end);
return 0;
struct exynos_iommu_domain *domain = to_exynos_domain(dom);
sysmmu_iova_t iova = (sysmmu_iova_t)d_iova;
sysmmu_pte_t *sent = section_entry(domain->pgtable, iova);
- unsigned int entries = size >> SPAGE_ORDER;
+ unsigned int entries = (unsigned int)(size >> SPAGE_ORDER);
dma_addr_t start = d_iova;
while (entries > 0) {
region->start = vstart;
region->size = vsize << PAGE_SHIFT;
region->dummy_size = region->size - size;
- region->section_off = section_offset << PAGE_SHIFT;
+ region->section_off = (unsigned int)(section_offset << PAGE_SHIFT);
spin_lock(&vmm->vmlist_lock);
list_add_tail(®ion->node, &vmm->regions_list);
INIT_LIST_HEAD(®ion->node);
region->start = start;
- region->size = size;
+ region->size = (u32)size;
spin_lock(&vmm->vmlist_lock);