--- /dev/null
+/* linux/drivers/iommu/exynos_iovmm.c
+ *
+ * Copyright (c) 2011-2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifdef CONFIG_EXYNOS_IOMMU_DEBUG
+#define DEBUG
+#endif
+
+#include <linux/kernel.h>
+#include <linux/hardirq.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/err.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+
+#include <linux/exynos_iovmm.h>
+
+#include "exynos-iommu.h"
+
+/* IOVM region: [0x1000000, 0xD0000000) */
+#define IOVA_START 0x10000000
+#define IOVM_SIZE (0xD0000000 - IOVA_START)
+#define sg_physically_continuous(sg) (sg_next(sg) == NULL)
+
+/* alloc_iovm_region - Allocate IO virtual memory region
+ * vmm: virtual memory allocator
+ * size: total size to allocate vm region from @vmm.
+ * align: alignment constraints of the allocated virtual address
+ * max_align: maximum alignment of allocated virtual address. allocated address
+ * does not need to satisfy larger alignment than max_align.
+ * section_offset: page size-aligned offset of iova start address within an 1MB
+ * boundary. The caller of alloc_iovm_region will obtain the
+ * allocated iova + section_offset. This is provided just for the
+ * physically contiguous memory.
+ * page_offset: must be smaller than PAGE_SIZE. Just a valut to be added to the
+ * allocated virtual address. This does not effect to the allocaded size
+ * and address.
+ *
+ * This function returns allocated IO virtual address that satisfies the given
+ * constraints: the caller will get the allocated virtual address plus
+ * (section_offset + page_offset). Returns 0 if this function is not able
+ * to allocate IO virtual memory.
+ */
+static dma_addr_t alloc_iovm_region(struct exynos_iovmm *vmm, size_t size,
+ size_t section_offset,
+ off_t page_offset)
+{
+ u32 index = 0;
+ u32 vstart;
+ u32 vsize;
+ unsigned long end, i;
+ struct exynos_vm_region *region;
+ size_t align = SZ_1M;
+
+ BUG_ON(page_offset >= PAGE_SIZE);
+
+ /* To avoid allocating prefetched iovm region */
+ vsize = (ALIGN(size + SZ_128K, SZ_128K) + section_offset) >> PAGE_SHIFT;
+ align >>= PAGE_SHIFT;
+ section_offset >>= PAGE_SHIFT;
+
+ spin_lock(&vmm->bitmap_lock);
+again:
+ index = find_next_zero_bit(vmm->vm_map,
+ IOVM_NUM_PAGES(vmm->iovm_size), index);
+
+ if (align) {
+ index = ALIGN(index, align);
+ if (index >= IOVM_NUM_PAGES(vmm->iovm_size)) {
+ spin_unlock(&vmm->bitmap_lock);
+ return 0;
+ }
+
+ if (test_bit(index, vmm->vm_map))
+ goto again;
+ }
+
+ end = index + vsize;
+
+ if (end >= IOVM_NUM_PAGES(vmm->iovm_size)) {
+ spin_unlock(&vmm->bitmap_lock);
+ return 0;
+ }
+
+ i = find_next_bit(vmm->vm_map, end, index);
+ if (i < end) {
+ index = i + 1;
+ goto again;
+ }
+
+ bitmap_set(vmm->vm_map, index, vsize);
+
+ spin_unlock(&vmm->bitmap_lock);
+
+ vstart = (index << PAGE_SHIFT) + vmm->iova_start + page_offset;
+
+ region = kmalloc(sizeof(*region), GFP_KERNEL);
+ if (unlikely(!region)) {
+ spin_lock(&vmm->bitmap_lock);
+ bitmap_clear(vmm->vm_map, index, vsize);
+ spin_unlock(&vmm->bitmap_lock);
+ return 0;
+ }
+
+ INIT_LIST_HEAD(®ion->node);
+ region->start = vstart;
+ region->size = vsize << PAGE_SHIFT;
+ region->dummy_size = region->size - size;
+ region->section_off = section_offset << PAGE_SHIFT;
+
+ spin_lock(&vmm->vmlist_lock);
+ list_add_tail(®ion->node, &vmm->regions_list);
+ vmm->allocated_size += region->size;
+ vmm->num_areas++;
+ vmm->num_map++;
+ spin_unlock(&vmm->vmlist_lock);
+
+ return region->start + region->section_off;
+}
+
+struct exynos_vm_region *find_iovm_region(struct exynos_iovmm *vmm,
+ dma_addr_t iova)
+{
+ struct exynos_vm_region *region;
+
+ spin_lock(&vmm->vmlist_lock);
+
+ list_for_each_entry(region, &vmm->regions_list, node) {
+ if (region->start <= iova &&
+ (region->start + region->size) > iova) {
+ spin_unlock(&vmm->vmlist_lock);
+ return region;
+ }
+ }
+
+ spin_unlock(&vmm->vmlist_lock);
+
+ return NULL;
+}
+
+static struct exynos_vm_region *remove_iovm_region(struct exynos_iovmm *vmm,
+ dma_addr_t iova)
+{
+ struct exynos_vm_region *region;
+
+ spin_lock(&vmm->vmlist_lock);
+
+ list_for_each_entry(region, &vmm->regions_list, node) {
+ if (region->start + region->section_off == iova) {
+ list_del(®ion->node);
+ vmm->allocated_size -= region->size;
+ vmm->num_areas--;
+ vmm->num_unmap++;
+ spin_unlock(&vmm->vmlist_lock);
+ return region;
+ }
+ }
+
+ spin_unlock(&vmm->vmlist_lock);
+
+ return NULL;
+}
+
+static void free_iovm_region(struct exynos_iovmm *vmm,
+ struct exynos_vm_region *region)
+{
+ if (!region)
+ return;
+
+ spin_lock(&vmm->bitmap_lock);
+ bitmap_clear(vmm->vm_map,
+ (region->start - vmm->iova_start) >> PAGE_SHIFT,
+ region->size >> PAGE_SHIFT);
+ spin_unlock(&vmm->bitmap_lock);
+
+ kfree(region);
+}
+
+static dma_addr_t add_iovm_region(struct exynos_iovmm *vmm,
+ dma_addr_t start, size_t size)
+{
+ struct exynos_vm_region *region, *pos;
+
+ region = kmalloc(sizeof(*region), GFP_KERNEL);
+ if (!region)
+ return 0;
+
+ INIT_LIST_HEAD(®ion->node);
+ region->start = start;
+ region->size = size;
+
+ spin_lock(&vmm->vmlist_lock);
+
+ list_for_each_entry(pos, &vmm->regions_list, node) {
+ if ((start < (pos->start + pos->size)) &&
+ ((start + size) > pos->start)) {
+ spin_unlock(&vmm->vmlist_lock);
+ kfree(region);
+ return 0;
+ }
+ }
+
+ list_add(®ion->node, &vmm->regions_list);
+
+ spin_unlock(&vmm->vmlist_lock);
+
+ return start;
+}
+
+static void show_iovm_regions(struct exynos_iovmm *vmm)
+{
+ struct exynos_vm_region *pos;
+
+ pr_err("LISTING IOVMM REGIONS...\n");
+ spin_lock(&vmm->vmlist_lock);
+ list_for_each_entry(pos, &vmm->regions_list, node) {
+ pr_err("REGION: %#x (SIZE: %#x, +[%#x, %#x])\n",
+ pos->start, pos->size,
+ pos->section_off, pos->dummy_size);
+ }
+ spin_unlock(&vmm->vmlist_lock);
+ pr_err("END OF LISTING IOVMM REGIONS...\n");
+}
+
+int iovmm_activate(struct device *dev)
+{
+ struct exynos_iovmm *vmm = exynos_get_iovmm(dev);
+
+ if (!vmm) {
+ dev_err(dev, "%s: IOVMM not found\n", __func__);
+ return -EINVAL;
+ }
+
+ return iommu_attach_device(vmm->domain, dev);
+}
+
+void iovmm_deactivate(struct device *dev)
+{
+ struct exynos_iovmm *vmm = exynos_get_iovmm(dev);
+
+ if (!vmm) {
+ dev_err(dev, "%s: IOVMM not found\n", __func__);
+ return;
+ }
+
+ iommu_detach_device(vmm->domain, dev);
+}
+
+struct iommu_domain *get_domain_from_dev(struct device *dev)
+{
+ struct exynos_iovmm *vmm = exynos_get_iovmm(dev);
+
+ if (!vmm) {
+ dev_err(dev, "%s: IOVMM not found\n", __func__);
+ return NULL;
+ }
+
+ return vmm->domain;
+}
+
+/* iovmm_map - allocate and map IO virtual memory for the given device
+ * dev: device that has IO virtual address space managed by IOVMM
+ * sg: list of physically contiguous memory chunks. The preceding chunk needs to
+ * be larger than the following chunks in sg for efficient mapping and
+ * performance. If elements of sg are more than one, physical address of
+ * each chunk needs to be aligned by its size for efficent mapping and TLB
+ * utilization.
+ * offset: offset in bytes to be mapped and accessed by dev.
+ * size: size in bytes to be mapped and accessed by dev.
+ *
+ * This function allocates IO virtual memory for the given device and maps the
+ * given physical memory conveyed by sg into the allocated IO memory region.
+ * Returns allocated IO virtual address if it allocates and maps successfull.
+ * Otherwise, minus error number. Caller must check if the return value of this
+ * function with IS_ERR_VALUE().
+ */
+dma_addr_t iovmm_map(struct device *dev, struct scatterlist *sg, off_t offset,
+ size_t size, enum dma_data_direction direction, int prot)
+{
+ off_t start_off;
+ dma_addr_t addr, start = 0;
+ size_t mapped_size = 0;
+ struct exynos_iovmm *vmm = exynos_get_iovmm(dev);
+ size_t section_offset = 0; /* section offset of contig. mem */
+ int ret = 0;
+ int idx;
+ struct scatterlist *tsg;
+ struct exynos_vm_region *region;
+
+ if (vmm == NULL) {
+ dev_err(dev, "%s: IOVMM not found\n", __func__);
+ return -EINVAL;
+ }
+
+ for (; (sg != NULL) && (sg->length < offset); sg = sg_next(sg))
+ offset -= sg->length;
+
+ if (sg == NULL) {
+ dev_err(dev, "IOVMM: invalid offset to %s.\n", __func__);
+ return -EINVAL;
+ }
+
+ tsg = sg;
+
+ start_off = offset_in_page(sg_phys(sg) + offset);
+ size = PAGE_ALIGN(size + start_off);
+
+ if (sg_physically_continuous(sg)) {
+ size_t aligned_pad_size;
+ phys_addr_t phys = page_to_phys(sg_page(sg));
+ section_offset = phys & (~SECT_MASK);
+ aligned_pad_size = ALIGN(phys, SECT_SIZE) - phys;
+ if ((sg->length - aligned_pad_size) < SECT_SIZE) {
+ aligned_pad_size = ALIGN(phys, LPAGE_SIZE) - phys;
+ if ((sg->length - aligned_pad_size) >= LPAGE_SIZE)
+ section_offset = phys & (~LPAGE_MASK);
+ else
+ section_offset = 0;
+ }
+ }
+ start = alloc_iovm_region(vmm, size, section_offset, start_off);
+ if (!start) {
+ spin_lock(&vmm->vmlist_lock);
+ dev_err(dev, "%s: Not enough IOVM space to allocate %#zx\n",
+ __func__, size);
+ dev_err(dev, "%s: Total %#zx, Allocated %#zx , Chunks %d\n",
+ __func__, vmm->iovm_size,
+ vmm->allocated_size, vmm->num_areas);
+ spin_unlock(&vmm->vmlist_lock);
+ ret = -ENOMEM;
+ goto err_map_nomem;
+ }
+
+ addr = start - start_off;
+
+ do {
+ phys_addr_t phys;
+ size_t len;
+
+ phys = sg_phys(sg);
+ len = sg->length;
+
+ /* if back to back sg entries are contiguous consolidate them */
+ while (sg_next(sg) &&
+ sg_phys(sg) + sg->length == sg_phys(sg_next(sg))) {
+ len += sg_next(sg)->length;
+ sg = sg_next(sg);
+ }
+
+ if (offset > 0) {
+ len -= offset;
+ phys += offset;
+ offset = 0;
+ }
+
+ if (offset_in_page(phys)) {
+ len += offset_in_page(phys);
+ phys = round_down(phys, PAGE_SIZE);
+ }
+
+ len = PAGE_ALIGN(len);
+
+ if (len > (size - mapped_size))
+ len = size - mapped_size;
+
+ ret = iommu_map(vmm->domain, addr, phys, len, prot);
+ if (ret) {
+ dev_err(dev, "iommu_map failed w/ err: %d\n", ret);
+ break;
+ }
+
+ addr += len;
+ mapped_size += len;
+ } while ((sg = sg_next(sg)) && (mapped_size < size));
+
+ BUG_ON(mapped_size > size);
+
+ if (mapped_size < size) {
+ dev_err(dev, "mapped_size(%#zx) is smaller than size(%#zx)\n",
+ mapped_size, size);
+ if (!ret) {
+ dev_err(dev, "ret: %d\n", ret);
+ ret = -EINVAL;
+ }
+ goto err_map_map;
+ }
+
+ region = find_iovm_region(vmm, start);
+ BUG_ON(!region);
+
+ /*
+ * If pretched SLPD is a fault SLPD in zero_l2_table, FLPD cache
+ * or prefetch buffer caches the address of zero_l2_table.
+ * This function replaces the zero_l2_table with new L2 page
+ * table to write valid mappings.
+ * Accessing the valid area may cause page fault since FLPD
+ * cache may still caches zero_l2_table for the valid area
+ * instead of new L2 page table that have the mapping
+ * information of the valid area
+ * Thus any replacement of zero_l2_table with other valid L2
+ * page table must involve FLPD cache invalidation if the System
+ * MMU have prefetch feature and FLPD cache (version 3.3).
+ * FLPD cache invalidation is performed with TLB invalidation
+ * by VPN without blocking. It is safe to invalidate TLB without
+ * blocking because the target address of TLB invalidation is
+ * not currently mapped.
+ */
+
+ /* TODO: for sysmmu v6, remove it later */
+ exynos_sysmmu_tlb_invalidate(vmm->domain, region->start, region->size);
+
+ dev_dbg(dev, "IOVMM: Allocated VM region @ %#x/%#x bytes.\n",
+ (unsigned int)start, (unsigned int)size);
+
+ return start;
+
+err_map_map:
+ iommu_unmap(vmm->domain, start - start_off, mapped_size);
+ free_iovm_region(vmm, remove_iovm_region(vmm, start));
+
+ dev_err(dev,
+ "Failed(%d) to map IOVMM REGION %pa (SIZE: %#zx, mapped: %#zx)\n",
+ ret, &start, size, mapped_size);
+ idx = 0;
+ do {
+ pr_err("SGLIST[%d].size = %#x\n", idx++, tsg->length);
+ } while ((tsg = sg_next(tsg)));
+
+ show_iovm_regions(vmm);
+
+err_map_nomem:
+ dev_dbg(dev,
+ "IOVMM: Failed to allocated VM region for %#x bytes.\n",
+ (unsigned int)size);
+ return (dma_addr_t)ret;
+}
+
+void iovmm_unmap(struct device *dev, dma_addr_t iova)
+{
+ struct exynos_iovmm *vmm = exynos_get_iovmm(dev);
+ struct exynos_vm_region *region;
+ size_t unmap_size;
+
+ /* This function must not be called in IRQ handlers */
+ BUG_ON(in_irq());
+
+ if (vmm == NULL) {
+ dev_err(dev, "%s: IOVMM not found\n", __func__);
+ return;
+ }
+
+ region = remove_iovm_region(vmm, iova);
+ if (region) {
+ u32 start = region->start + region->section_off;
+ u32 size = region->size - region->dummy_size;
+
+ /* clear page offset */
+ if (WARN_ON(start != iova)) {
+ dev_err(dev, "IOVMM: "
+ "iova %pa and region %#x(+%#x)@%#x(-%#x) mismatch\n",
+ &iova, region->size, region->dummy_size,
+ region->start, region->section_off);
+ show_iovm_regions(vmm);
+ /* reinsert iovm region */
+ add_iovm_region(vmm, region->start, region->size);
+ kfree(region);
+ return;
+ }
+ unmap_size = iommu_unmap(vmm->domain, start & SPAGE_MASK, size);
+ if (unlikely(unmap_size != size)) {
+ dev_err(dev,
+ "Failed to unmap REGION of %#x:\n", start);
+ dev_err(dev, "(SIZE: %#x, iova: %pa, unmapped: %#zx)\n",
+ size, &iova, unmap_size);
+ show_iovm_regions(vmm);
+ kfree(region);
+ BUG();
+ return;
+ }
+
+ exynos_sysmmu_tlb_invalidate(vmm->domain, region->start, region->size);
+
+ /* TODO: for sysmmu v6, remove it later */
+ /* 60us is required to guarantee that PTW ends itself */
+ udelay(60);
+
+ free_iovm_region(vmm, region);
+
+ dev_dbg(dev, "IOVMM: Unmapped %#x bytes from %#x.\n",
+ (unsigned int)unmap_size, (unsigned int)iova);
+ } else {
+ dev_err(dev, "IOVMM: No IOVM region %pa to free.\n", &iova);
+ }
+}
+
+/*
+ * NOTE:
+ * exynos_iovmm_map_userptr() should be called under current->mm.mmap_sem held.
+ */
+dma_addr_t exynos_iovmm_map_userptr(struct device *dev, unsigned long vaddr,
+ size_t size, int prot)
+{
+ struct exynos_iovmm *vmm = exynos_get_iovmm(dev);
+ unsigned long eaddr = vaddr + size;
+ off_t offset = offset_in_page(vaddr);
+ int ret = -EINVAL;
+ struct vm_area_struct *vma;
+ dma_addr_t start;
+ struct exynos_vm_region *region;
+
+ vma = find_vma(current->mm, vaddr);
+ if (vaddr < vma->vm_start) {
+ dev_err(dev, "%s: invalid address %#lx\n", __func__, vaddr);
+ goto err;
+ }
+
+ if (!!(vma->vm_flags & VM_PFNMAP))
+ prot |= IOMMU_PFNMAP;
+
+ while (eaddr > vma->vm_end) {
+ if (!!(vma->vm_flags & VM_PFNMAP)) {
+ dev_err(dev, "%s: non-linear pfnmap is not supported\n",
+ __func__);
+ goto err;
+ }
+
+ if ((vma->vm_next == NULL) ||
+ (vma->vm_end != vma->vm_next->vm_start)) {
+ dev_err(dev, "%s: invalid size %zu\n", __func__, size);
+ goto err;
+ }
+
+ vma = vma->vm_next;
+ }
+
+ size = PAGE_ALIGN(size + offset);
+ start = alloc_iovm_region(vmm, size, 0, offset);
+ if (!start) {
+ spin_lock(&vmm->vmlist_lock);
+ dev_err(dev, "%s: Not enough IOVM space to allocate %#zx\n",
+ __func__, size);
+ dev_err(dev, "%s: Total %#zx, Allocated %#zx , Chunks %d\n",
+ __func__, vmm->iovm_size,
+ vmm->allocated_size, vmm->num_areas);
+ spin_unlock(&vmm->vmlist_lock);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = exynos_iommu_map_userptr(vmm->domain, vaddr - offset,
+ start - offset, size, prot);
+ if (ret < 0)
+ goto err_map;
+
+ region = find_iovm_region(vmm, start);
+ BUG_ON(!region);
+
+ return start;
+err_map:
+ free_iovm_region(vmm, remove_iovm_region(vmm, start));
+err:
+ return (dma_addr_t)ret;
+}
+
+void exynos_iovmm_unmap_userptr(struct device *dev, dma_addr_t iova)
+{
+ struct exynos_iovmm *vmm = exynos_get_iovmm(dev);
+ struct exynos_vm_region *region;
+
+ region = remove_iovm_region(vmm, iova);
+ if (region) {
+ u32 start = region->start + region->section_off;
+ u32 size = region->size - region->dummy_size;
+
+ /* clear page offset */
+ if (WARN_ON(start != iova)) {
+ dev_err(dev, "IOVMM: "
+ "iova %pa and region %#x(+%#x)@%#x(-%#x) mismatch\n",
+ &iova, region->size, region->dummy_size,
+ region->start, region->section_off);
+ show_iovm_regions(vmm);
+ /* reinsert iovm region */
+ add_iovm_region(vmm, region->start, region->size);
+ kfree(region);
+ return;
+ }
+
+ exynos_iommu_unmap_userptr(vmm->domain,
+ start & SPAGE_MASK, size);
+
+ free_iovm_region(vmm, region);
+ } else {
+ dev_err(dev, "IOVMM: No IOVM region %pa to free.\n", &iova);
+ }
+}
+
+static struct dentry *exynos_iovmm_debugfs_root;
+static struct dentry *exynos_iommu_debugfs_root;
+
+static int exynos_iovmm_create_debugfs(void)
+{
+ exynos_iovmm_debugfs_root = debugfs_create_dir("iovmm", NULL);
+ if (!exynos_iovmm_debugfs_root)
+ pr_err("IOVMM: Failed to create debugfs entry\n");
+ else
+ pr_info("IOVMM: Created debugfs entry at debugfs/iovmm\n");
+
+ exynos_iommu_debugfs_root = debugfs_create_dir("iommu", NULL);
+ if (!exynos_iommu_debugfs_root)
+ pr_err("IOMMU: Failed to create debugfs entry\n");
+ else
+ pr_info("IOMMU: Created debugfs entry at debugfs/iommu\n");
+
+ return 0;
+}
+arch_initcall(exynos_iovmm_create_debugfs);
+
+static int iovmm_debug_show(struct seq_file *s, void *unused)
+{
+ struct exynos_iovmm *vmm = s->private;
+
+ seq_printf(s, "%10.s %10.s %10.s %6.s\n",
+ "VASTART", "SIZE", "FREE", "CHUNKS");
+ seq_puts(s, "---------------------------------------------\n");
+
+ spin_lock(&vmm->vmlist_lock);
+ seq_printf(s, " %#x %#10zx %#10zx %d\n",
+ vmm->iova_start, vmm->iovm_size,
+ vmm->iovm_size - vmm->allocated_size,
+ vmm->num_areas);
+ seq_puts(s, "---------------------------------------------\n");
+ seq_printf(s, "Total number of mappings : %d\n", vmm->num_map);
+ seq_printf(s, "Total number of unmappings: %d\n", vmm->num_unmap);
+ spin_unlock(&vmm->vmlist_lock);
+
+ return 0;
+}
+
+static int iovmm_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, iovmm_debug_show, inode->i_private);
+}
+
+static ssize_t iovmm_debug_write(struct file *filp, const char __user *p,
+ size_t len, loff_t *off)
+{
+ struct seq_file *s = filp->private_data;
+ struct exynos_iovmm *vmm = s->private;
+ /* clears the map count in IOVMM */
+ spin_lock(&vmm->vmlist_lock);
+ vmm->num_map = 0;
+ vmm->num_unmap = 0;
+ spin_unlock(&vmm->vmlist_lock);
+ return len;
+}
+
+static const struct file_operations iovmm_debug_fops = {
+ .open = iovmm_debug_open,
+ .read = seq_read,
+ .write = iovmm_debug_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void iovmm_register_debugfs(struct exynos_iovmm *vmm)
+{
+ if (!exynos_iovmm_debugfs_root)
+ return;
+
+ debugfs_create_file(vmm->domain_name, 0664,
+ exynos_iovmm_debugfs_root, vmm, &iovmm_debug_fops);
+}
+
+struct exynos_iovmm *exynos_create_single_iovmm(const char *name)
+{
+ struct exynos_iovmm *vmm;
+ int ret = 0;
+
+ vmm = kzalloc(sizeof(*vmm), GFP_KERNEL);
+ if (!vmm) {
+ ret = -ENOMEM;
+ goto err_alloc_vmm;
+ }
+
+ vmm->iovm_size = IOVM_SIZE;
+ vmm->iova_start = IOVA_START;
+ vmm->vm_map = kzalloc(IOVM_BITMAP_SIZE(IOVM_SIZE), GFP_KERNEL);
+ if (!vmm->vm_map) {
+ ret = -ENOMEM;
+ goto err_setup_domain;
+ }
+
+ vmm->domain = iommu_domain_alloc(&platform_bus_type);
+ if (!vmm->domain) {
+ ret = -ENOMEM;
+ goto err_setup_domain;
+ }
+
+ spin_lock_init(&vmm->vmlist_lock);
+ spin_lock_init(&vmm->bitmap_lock);
+
+ INIT_LIST_HEAD(&vmm->regions_list);
+
+ vmm->domain_name = name;
+
+ iovmm_register_debugfs(vmm);
+
+ pr_debug("%s IOVMM: Created %#x B IOVMM from %#x.\n",
+ name, IOVM_SIZE, IOVA_START);
+ return vmm;
+
+err_setup_domain:
+ kfree(vmm);
+err_alloc_vmm:
+ pr_err("%s IOVMM: Failed to create IOVMM (%d)\n", name, ret);
+
+ return ERR_PTR(ret);
+}
--- /dev/null
+/* /include/linux/exynos_iovmm.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_PLAT_IOVMM_H
+#define __ASM_PLAT_IOVMM_H
+
+#include <linux/dma-direction.h>
+#include <linux/iommu.h>
+
+#include <asm/page.h>
+#include <linux/mm_types.h>
+
+#define IOMMU_PFNMAP (1 << 5) /* VM_PFNMAP is set */
+
+struct scatterlist;
+struct device;
+
+typedef u32 exynos_iova_t;
+
+#define SYSMMU_FAULT_BITS 4
+#define SYSMMU_FAULT_SHIFT 16
+#define SYSMMU_FAULT_MASK ((1 << SYSMMU_FAULT_BITS) - 1)
+#define SYSMMU_FAULT_FLAG(id) (((id) & SYSMMU_FAULT_MASK) << SYSMMU_FAULT_SHIFT)
+#define SYSMMU_FAULT_ID(fg) (((fg) >> SYSMMU_FAULT_SHIFT) & SYSMMU_FAULT_MASK)
+
+#define SYSMMU_FAULT_PTW_ACCESS 0
+#define SYSMMU_FAULT_PAGE_FAULT 1
+#define SYSMMU_FAULT_TLB_MULTIHIT 2
+#define SYSMMU_FAULT_ACCESS 3
+#define SYSMMU_FAULT_SECURITY 4
+#define SYSMMU_FAULT_UNKNOWN 5
+
+#define IOMMU_FAULT_EXYNOS_PTW_ACCESS SYSMMU_FAULT_FLAG(SYSMMU_FAULT_PTW_ACCESS)
+#define IOMMU_FAULT_EXYNOS_PAGE_FAULT SYSMMU_FAULT_FLAG(SYSMMU_FAULT_PAGE_FAULT)
+#define IOMMU_FAULT_EXYNOS_TLB_MULTIHIT \
+ SYSMMU_FAULT_FLAG(SYSMMU_FAULT_TLB_MULTIHIT)
+#define IOMMU_FAULT_EXYNOS_ACCESS SYSMMU_FAULT_FLAG(SYSMMU_FAULT_ACCESS)
+#define IOMMU_FAULT_EXYNOS_SECURITY SYSMMU_FAULT_FLAG(SYSMMU_FAULT_SECURITY)
+#define IOMMU_FAULT_EXYNOS_UNKNOWN SYSMMU_FAULT_FLAG(SYSMMU_FAULT_UNKOWN)
+
+/* TODO: PB related for sysmmu v6, remove it later */
+#define SYSMMU_PBUFCFG_TLB_UPDATE (1 << 16)
+#define SYSMMU_PBUFCFG_ASCENDING (1 << 12)
+#define SYSMMU_PBUFCFG_DESCENDING (0 << 12)
+#define SYSMMU_PBUFCFG_PREFETCH (1 << 8)
+#define SYSMMU_PBUFCFG_WRITE (1 << 4)
+#define SYSMMU_PBUFCFG_READ (0 << 4)
+
+#define SYSMMU_PBUFCFG_DEFAULT_INPUT (SYSMMU_PBUFCFG_TLB_UPDATE | \
+ SYSMMU_PBUFCFG_ASCENDING | \
+ SYSMMU_PBUFCFG_PREFETCH | \
+ SYSMMU_PBUFCFG_READ)
+#define SYSMMU_PBUFCFG_DEFAULT_OUTPUT (SYSMMU_PBUFCFG_TLB_UPDATE | \
+ SYSMMU_PBUFCFG_ASCENDING | \
+ SYSMMU_PBUFCFG_PREFETCH | \
+ SYSMMU_PBUFCFG_WRITE)
+
+#define SYSMMU_PBUFCFG_ASCENDING_INPUT (SYSMMU_PBUFCFG_TLB_UPDATE | \
+ SYSMMU_PBUFCFG_ASCENDING | \
+ SYSMMU_PBUFCFG_PREFETCH | \
+ SYSMMU_PBUFCFG_READ)
+
+#define SYSMMU_PBUFCFG_DESCENDING_INPUT (SYSMMU_PBUFCFG_TLB_UPDATE | \
+ SYSMMU_PBUFCFG_DESCENDING | \
+ SYSMMU_PBUFCFG_PREFETCH | \
+ SYSMMU_PBUFCFG_READ)
+/* SYSMMU PPC Event ID */
+enum sysmmu_ppc_event {
+ READ_TOTAL,
+ READ_L1TLB_MISS,
+ READ_L2TLB_MISS,
+ READ_FLPD_MISS,
+ READ_PB_LOOKUP,
+ READ_PB_MISS,
+ READ_BLOCK_NUM_BY_PREFETCH,
+ READ_BLOCK_CYCLE_BRY_PREFETCH,
+ READ_TLB_MISS,
+ READ_FLPD_MISS_PREFETCH,
+ WRITE_TOTAL = 0x10,
+ WRITE_L1TLB_MISS,
+ WRITE_L2TLB_MISS,
+ WRITE_FLPD_MISS,
+ WRITE_PB_LOOKUP,
+ WRITE_PB_MISS,
+ WRITE_BLOCK_NUM_BY_PREFETCH,
+ WRITE_BLOCK_CYCLE_BY_PREFETCH,
+ WRITE_TLB_MISS,
+ WRITE_FLPD_MISS_PREFETCH,
+ TOTAL_ID_NUM,
+};
+
+struct sysmmu_prefbuf {
+ unsigned long base;
+ unsigned long size;
+ unsigned long config;
+};
+
+#if defined(CONFIG_EXYNOS_IOVMM)
+int iovmm_activate(struct device *dev);
+void iovmm_deactivate(struct device *dev);
+struct iommu_domain *get_domain_from_dev(struct device *dev);
+
+/* iovmm_map() - Maps a list of physical memory chunks
+ * @dev: the owner of the IO address space where the mapping is created
+ * @sg: list of physical memory chunks to map
+ * @offset: length in bytes where the mapping starts
+ * @size: how much memory to map in bytes. @offset + @size must not exceed
+ * total size of @sg
+ * @direction: dma data direction for iova
+ * @prot: iommu mapping property
+ *
+ * This function returns mapped IO address in the address space of @dev.
+ * Returns minus error number if mapping fails.
+ * Caller must check its return code with IS_ERROR_VALUE() if the function
+ * succeeded.
+ *
+ * The caller of this function must ensure that iovmm_cleanup() is not called
+ * while this function is called.
+ *
+ */
+dma_addr_t iovmm_map(struct device *dev, struct scatterlist *sg, off_t offset,
+ size_t size, enum dma_data_direction direction, int prot);
+
+/* iovmm_unmap() - unmaps the given IO address
+ * @dev: the owner of the IO address space where @iova belongs
+ * @iova: IO address that needs to be unmapped and freed.
+ *
+ * The caller of this function must ensure that iovmm_cleanup() is not called
+ * while this function is called.
+ */
+void iovmm_unmap(struct device *dev, dma_addr_t iova);
+
+/*
+ * flags to option_iplanes and option_oplanes.
+ * inplanes and onplanes is 'input planes' and 'output planes', respectively.
+ *
+ * default value to option_iplanes:
+ * (TLB_UPDATE | ASCENDING | PREFETCH)
+ * default value to option_oplanes:
+ * (TLB_UPDATE | ASCENDING | PREFETCH | WRITE)
+ *
+ * SYSMMU_PBUFCFG_READ and SYSMMU_PBUFCFG_WRITE are ignored because they are
+ * implicitly set from 'inplanes' and 'onplanes' arguments to
+ * iovmm_set_prefetch_buffer().
+ *
+ * Guide to setting flags:
+ * - Clear SYSMMU_BUFCFG_TLB_UPDATE if a buffer is accessed by the device
+ * for rotation.
+ * - Set SYSMMU_PBUFCFG_DESCENDING if the device access a buffer in reversed
+ * order
+ * - Clear SYSMMU_PBUFCFG_PREFETCH if access to a buffer has poor locality.
+ * - Otherwise, always set flags as default value.
+ */
+#else
+#define iovmm_activate(dev) (-ENOSYS)
+#define iovmm_deactivate(dev) do { } while (0)
+#define iovmm_map(dev, sg, offset, size, direction, prot) (-ENOSYS)
+#define iovmm_unmap(dev, iova) do { } while (0)
+#define get_domain_from_dev(dev) NULL
+static inline dma_addr_t exynos_iovmm_map_userptr(struct device *dev,
+ unsigned long vaddr, size_t size, int prot)
+{
+}
+#define exynos_iovmm_unmap_userptr(dev, iova) do { } while (0)
+
+#endif /* CONFIG_EXYNOS_IOVMM */
+
+#if defined(CONFIG_EXYNOS_IOMMU)
+/**
+ * exynos_sysmmu_map_user_pages() - maps all pages by fetching from
+ * user page table entries.
+ * @dev: The device whose System MMU is about to be disabled.
+ * @mm: mm struct of user requested to map
+ * @vaddr: start vaddr in valid vma
+ * @iova: start io vaddr to be mapped
+ * @size: size to map
+ * @write: set if buffer may be written
+ * @shareable: set shareable bit if true
+ *
+ * This function maps all user pages into sysmmu page table.
+ */
+int exynos_sysmmu_map_user_pages(struct device *dev,
+ struct mm_struct *mm,
+ unsigned long vaddr,
+ exynos_iova_t iova,
+ size_t size, bool write,
+ bool shareable);
+
+/**
+ * exynos_sysmmu_unmap_user_pages() - unmaps all mapped pages
+ * @dev: The device whose System MMU is about to be disabled.
+ * @mm: mm struct of user requested to map
+ * @vaddr: start vaddr in valid vma
+ * @iova: start io vaddr to be unmapped
+ * @size: size to map
+ *
+ * This function unmaps all user pages mapped in sysmmu page table.
+ */
+int exynos_sysmmu_unmap_user_pages(struct device *dev,
+ struct mm_struct *mm,
+ unsigned long vaddr,
+ exynos_iova_t iova,
+ size_t size);
+/**
+ * exynos_iommu_sync_for_device()
+ * - maintain cache lines on the given area before DMA
+ * @dev: The device that is about to see the area
+ * @iova: The start DMA address of @dev to maintain
+ * @len: The length of the area
+ * @dir: Indicate whether @dev read from or write to the area
+ */
+void exynos_iommu_sync_for_device(struct device *dev, dma_addr_t iova,
+ size_t len, enum dma_data_direction dir);
+
+/**
+ * exynos_iommu_sync_for_cpu()
+ * - maintain cache lines on the given area after DMA
+ * @dev: The device that is about to see the area
+ * @iova: The start DMA address of @dev to maintain
+ * @len: The length of the area
+ * @dir: Indicate whether @dev read from or write to the area
+ */
+void exynos_iommu_sync_for_cpu(struct device *dev, dma_addr_t iova, size_t len,
+ enum dma_data_direction dir);
+
+/**
+ * TODO: description
+ */
+dma_addr_t exynos_iovmm_map_userptr(struct device *dev, unsigned long vaddr,
+ size_t size, int prot);
+/**
+ * TODO: description
+ */
+void exynos_iovmm_unmap_userptr(struct device *dev, dma_addr_t iova);
+
+/*
+ * The handle_pte_fault() is called by exynos_sysmmu_map_user_pages().
+ * Driver cannot include include/linux/huge_mm.h because
+ * CONFIG_TRANSPARENT_HUGEPAGE is disabled.
+ */
+extern int handle_pte_fault(struct mm_struct *mm,
+ struct vm_area_struct *vma, unsigned long address,
+ pte_t *pte, pmd_t *pmd, unsigned int flags);
+
+/*
+ * sysmmu_set_prefetch_buffer_by_region() - set prefetch buffer configuration
+ *
+ * @dev: device descriptor of master device
+ * @pb_reg: array of regions where prefetch buffer contains.
+ *
+ * If @dev is NULL or @pb_reg is 0, prefetch buffers is disabled.
+ *
+ */
+void sysmmu_set_prefetch_buffer_by_region(struct device *dev,
+ struct sysmmu_prefbuf pb_reg[], unsigned int num_reg);
+
+int sysmmu_set_prefetch_buffer_property(struct device *dev,
+ unsigned int inplanes, unsigned int onplanes,
+ unsigned int ipoption[], unsigned int opoption[]);
+void exynos_sysmmu_show_status(struct device *dev);
+void exynos_sysmmu_dump_pgtable(struct device *dev);
+
+/*
+ * exynos_sysmmu_set/clear/show_ppc_event() -
+ * set/clear/show system mmu ppc event
+ *
+ * @dev: device descriptor of master device.
+ * @event: system mmu ppc event id.
+ * Returns 0 if setting is successful. -EINVAL if the argument is invalid.
+ *
+ */
+int exynos_sysmmu_set_ppc_event(struct device *dev, int event);
+void exynos_sysmmu_clear_ppc_event(struct device *dev);
+void exynos_sysmmu_show_ppc_event(struct device *dev);
+
+/*
+ * iovmm_set_fault_handler - register fault handler of dev to iommu controller
+ * @dev: the device that wants to register fault handler
+ * @handler: fault handler
+ * @token: any data the device driver needs to get when fault occurred
+ */
+void iovmm_set_fault_handler(struct device *dev,
+ iommu_fault_handler_t handler, void *token);
+
+#else
+#define sysmmu_set_prefetch_buffer_property(dev, inplanes, onplnes, ipoption, opoption) \
+ (0)
+#define sysmmu_set_prefetch_buffer_by_region(dev, pb_reg, num_reg) \
+ do { } while (0)
+#define exynos_sysmmu_map_user_pages(dev, mm, vaddr, iova, size, write, sharable) \
+ (-ENOSYS)
+#define exynos_sysmmu_unmap_user_pages(dev, mm, vaddr, iova, size) \
+ do { } while (0)
+#define exynos_sysmmu_show_status(dev) do { } while (0)
+#define exynos_sysmmu_dump_pgtable(dev) do { } while (0)
+
+#define exynos_sysmmu_clear_ppc_event(dev) do { } while (0)
+#define exynos_sysmmu_show_ppc_event(dev) do { } while (0)
+#define exynos_sysmmu_set_ppc_event(dev, event) do { } while (0)
+#define iovmm_set_fault_handler(dev, handler, token) do { } while(0)
+
+#define exynos_iommu_sync_for_device(dev, iova, len, dir) do { } while (0)
+#define exynos_iommu_sync_for_cpu(dev, iova, len, dir) do { } while (0)
+
+#endif
+#endif /*__ASM_PLAT_IOVMM_H*/