vfio/platform: support MMAP of MMIO regions
authorAntonios Motakis <a.motakis@virtualopensystems.com>
Mon, 16 Mar 2015 20:08:48 +0000 (14:08 -0600)
committerAlex Williamson <alex.williamson@redhat.com>
Mon, 16 Mar 2015 20:08:48 +0000 (14:08 -0600)
Allow to memory map the MMIO regions of the device so userspace can
directly access them. PIO regions are not being handled at this point.

Signed-off-by: Antonios Motakis <a.motakis@virtualopensystems.com>
Signed-off-by: Baptiste Reynal <b.reynal@virtualopensystems.com>
Reviewed-by: Eric Auger <eric.auger@linaro.org>
Tested-by: Eric Auger <eric.auger@linaro.org>
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
drivers/vfio/platform/vfio_platform_common.c

index 4df66f5fb313f9116a735cfc61b7923ca24cfd36..d7fe2c71e8bc75e60afbac453882df5a8120d313 100644 (file)
@@ -55,6 +55,16 @@ static int vfio_platform_regions_init(struct vfio_platform_device *vdev)
                        if (!(res->flags & IORESOURCE_READONLY))
                                vdev->regions[i].flags |=
                                        VFIO_REGION_INFO_FLAG_WRITE;
+
+                       /*
+                        * Only regions addressed with PAGE granularity may be
+                        * MMAPed securely.
+                        */
+                       if (!(vdev->regions[i].addr & ~PAGE_MASK) &&
+                                       !(vdev->regions[i].size & ~PAGE_MASK))
+                               vdev->regions[i].flags |=
+                                       VFIO_REGION_INFO_FLAG_MMAP;
+
                        break;
                case IORESOURCE_IO:
                        vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_PIO;
@@ -334,8 +344,63 @@ static ssize_t vfio_platform_write(void *device_data, const char __user *buf,
        return -EINVAL;
 }
 
+static int vfio_platform_mmap_mmio(struct vfio_platform_region region,
+                                  struct vm_area_struct *vma)
+{
+       u64 req_len, pgoff, req_start;
+
+       req_len = vma->vm_end - vma->vm_start;
+       pgoff = vma->vm_pgoff &
+               ((1U << (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
+       req_start = pgoff << PAGE_SHIFT;
+
+       if (region.size < PAGE_SIZE || req_start + req_len > region.size)
+               return -EINVAL;
+
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+       vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
+
+       return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+                              req_len, vma->vm_page_prot);
+}
+
 static int vfio_platform_mmap(void *device_data, struct vm_area_struct *vma)
 {
+       struct vfio_platform_device *vdev = device_data;
+       unsigned int index;
+
+       index = vma->vm_pgoff >> (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT);
+
+       if (vma->vm_end < vma->vm_start)
+               return -EINVAL;
+       if (!(vma->vm_flags & VM_SHARED))
+               return -EINVAL;
+       if (index >= vdev->num_regions)
+               return -EINVAL;
+       if (vma->vm_start & ~PAGE_MASK)
+               return -EINVAL;
+       if (vma->vm_end & ~PAGE_MASK)
+               return -EINVAL;
+
+       if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
+               return -EINVAL;
+
+       if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ)
+                       && (vma->vm_flags & VM_READ))
+               return -EINVAL;
+
+       if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE)
+                       && (vma->vm_flags & VM_WRITE))
+               return -EINVAL;
+
+       vma->vm_private_data = vdev;
+
+       if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
+               return vfio_platform_mmap_mmio(vdev->regions[index], vma);
+
+       else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
+               return -EINVAL; /* not implemented */
+
        return -EINVAL;
 }