* @region - parent region
* @dev - device backing the character device
* @kref - enable this data to be tracked in filp->private_data
+ * @alive - !alive + rcu grace period == no new mappings can be established
* @id - child id in the region
* @num_resources - number of physical address extents in this device
* @res - array of physical address ranges
struct dax_region *region;
struct device *dev;
struct kref kref;
+ bool alive;
int id;
int num_resources;
struct resource res[0];
dev_dbg(dev, "%s\n", __func__);
+ /*
+ * Note, rcu is not protecting the liveness of dax_dev, rcu is
+ * ensuring that any fault handlers that might have seen
+ * dax_dev->alive == true, have completed. Any fault handlers
+ * that start after synchronize_rcu() has started will abort
+ * upon seeing dax_dev->alive == false.
+ */
+ dax_dev->alive = false;
+ synchronize_rcu();
+
get_device(dev);
device_unregister(dev);
ida_simple_remove(&dax_region->ida, dax_dev->id);
memcpy(dax_dev->res, res, sizeof(*res) * count);
dax_dev->num_resources = count;
kref_init(&dax_dev->kref);
+ dax_dev->alive = true;
dax_dev->region = dax_region;
kref_get(&dax_region->kref);
}
EXPORT_SYMBOL_GPL(devm_create_dax_dev);
+/* return an unmapped area aligned to the dax region specified alignment */
+static unsigned long dax_dev_get_unmapped_area(struct file *filp,
+ unsigned long addr, unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ unsigned long off, off_end, off_align, len_align, addr_align, align;
+ struct dax_dev *dax_dev = filp ? filp->private_data : NULL;
+ struct dax_region *dax_region;
+
+ if (!dax_dev || addr)
+ goto out;
+
+ dax_region = dax_dev->region;
+ align = dax_region->align;
+ off = pgoff << PAGE_SHIFT;
+ off_end = off + len;
+ off_align = round_up(off, align);
+
+ if ((off_end <= off_align) || ((off_end - off_align) < align))
+ goto out;
+
+ len_align = len + align;
+ if ((off + len_align) < off)
+ goto out;
+
+ addr_align = current->mm->get_unmapped_area(filp, addr, len_align,
+ pgoff, flags);
+ if (!IS_ERR_VALUE(addr_align)) {
+ addr_align += (off - addr_align) & (align - 1);
+ return addr_align;
+ }
+ out:
+ return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
+}
+
+static int __match_devt(struct device *dev, const void *data)
+{
+ const dev_t *devt = data;
+
+ return dev->devt == *devt;
+}
+
+static struct device *dax_dev_find(dev_t dev_t)
+{
+ return class_find_device(dax_class, NULL, &dev_t, __match_devt);
+}
+
+static int dax_dev_open(struct inode *inode, struct file *filp)
+{
+ struct dax_dev *dax_dev = NULL;
+ struct device *dev;
+
+ dev = dax_dev_find(inode->i_rdev);
+ if (!dev)
+ return -ENXIO;
+
+ device_lock(dev);
+ dax_dev = dev_get_drvdata(dev);
+ if (dax_dev) {
+ dev_dbg(dev, "%s\n", __func__);
+ filp->private_data = dax_dev;
+ kref_get(&dax_dev->kref);
+ inode->i_flags = S_DAX;
+ }
+ device_unlock(dev);
+
+ if (!dax_dev) {
+ put_device(dev);
+ return -ENXIO;
+ }
+ return 0;
+}
+
+static int dax_dev_release(struct inode *inode, struct file *filp)
+{
+ struct dax_dev *dax_dev = filp->private_data;
+ struct device *dev = dax_dev->dev;
+
+ dev_dbg(dax_dev->dev, "%s\n", __func__);
+ dax_dev_put(dax_dev);
+ put_device(dev);
+
+ return 0;
+}
+
+static int check_vma(struct dax_dev *dax_dev, struct vm_area_struct *vma,
+ const char *func)
+{
+ struct dax_region *dax_region = dax_dev->region;
+ struct device *dev = dax_dev->dev;
+ unsigned long mask;
+
+ if (!dax_dev->alive)
+ return -ENXIO;
+
+ /* prevent private / writable mappings from being established */
+ if ((vma->vm_flags & (VM_NORESERVE|VM_SHARED|VM_WRITE)) == VM_WRITE) {
+ dev_info(dev, "%s: %s: fail, attempted private mapping\n",
+ current->comm, func);
+ return -EINVAL;
+ }
+
+ mask = dax_region->align - 1;
+ if (vma->vm_start & mask || vma->vm_end & mask) {
+ dev_info(dev, "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
+ current->comm, func, vma->vm_start, vma->vm_end,
+ mask);
+ return -EINVAL;
+ }
+
+ if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV
+ && (vma->vm_flags & VM_DONTCOPY) == 0) {
+ dev_info(dev, "%s: %s: fail, dax range requires MADV_DONTFORK\n",
+ current->comm, func);
+ return -EINVAL;
+ }
+
+ if (!vma_is_dax(vma)) {
+ dev_info(dev, "%s: %s: fail, vma is not DAX capable\n",
+ current->comm, func);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static phys_addr_t pgoff_to_phys(struct dax_dev *dax_dev, pgoff_t pgoff,
+ unsigned long size)
+{
+ struct resource *res;
+ phys_addr_t phys;
+ int i;
+
+ for (i = 0; i < dax_dev->num_resources; i++) {
+ res = &dax_dev->res[i];
+ phys = pgoff * PAGE_SIZE + res->start;
+ if (phys >= res->start && phys <= res->end)
+ break;
+ pgoff -= PHYS_PFN(resource_size(res));
+ }
+
+ if (i < dax_dev->num_resources) {
+ res = &dax_dev->res[i];
+ if (phys + size - 1 <= res->end)
+ return phys;
+ }
+
+ return -1;
+}
+
+static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma,
+ struct vm_fault *vmf)
+{
+ unsigned long vaddr = (unsigned long) vmf->virtual_address;
+ struct device *dev = dax_dev->dev;
+ struct dax_region *dax_region;
+ int rc = VM_FAULT_SIGBUS;
+ phys_addr_t phys;
+ pfn_t pfn;
+
+ if (check_vma(dax_dev, vma, __func__))
+ return VM_FAULT_SIGBUS;
+
+ dax_region = dax_dev->region;
+ if (dax_region->align > PAGE_SIZE) {
+ dev_dbg(dev, "%s: alignment > fault size\n", __func__);
+ return VM_FAULT_SIGBUS;
+ }
+
+ phys = pgoff_to_phys(dax_dev, vmf->pgoff, PAGE_SIZE);
+ if (phys == -1) {
+ dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
+ vmf->pgoff);
+ return VM_FAULT_SIGBUS;
+ }
+
+ pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
+
+ rc = vm_insert_mixed(vma, vaddr, pfn);
+
+ if (rc == -ENOMEM)
+ return VM_FAULT_OOM;
+ if (rc < 0 && rc != -EBUSY)
+ return VM_FAULT_SIGBUS;
+
+ return VM_FAULT_NOPAGE;
+}
+
+static int dax_dev_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ int rc;
+ struct file *filp = vma->vm_file;
+ struct dax_dev *dax_dev = filp->private_data;
+
+ dev_dbg(dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__,
+ current->comm, (vmf->flags & FAULT_FLAG_WRITE)
+ ? "write" : "read", vma->vm_start, vma->vm_end);
+ rcu_read_lock();
+ rc = __dax_dev_fault(dax_dev, vma, vmf);
+ rcu_read_unlock();
+
+ return rc;
+}
+
+static int __dax_dev_pmd_fault(struct dax_dev *dax_dev,
+ struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd,
+ unsigned int flags)
+{
+ unsigned long pmd_addr = addr & PMD_MASK;
+ struct device *dev = dax_dev->dev;
+ struct dax_region *dax_region;
+ phys_addr_t phys;
+ pgoff_t pgoff;
+ pfn_t pfn;
+
+ if (check_vma(dax_dev, vma, __func__))
+ return VM_FAULT_SIGBUS;
+
+ dax_region = dax_dev->region;
+ if (dax_region->align > PMD_SIZE) {
+ dev_dbg(dev, "%s: alignment > fault size\n", __func__);
+ return VM_FAULT_SIGBUS;
+ }
+
+ /* dax pmd mappings require pfn_t_devmap() */
+ if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
+ dev_dbg(dev, "%s: alignment > fault size\n", __func__);
+ return VM_FAULT_SIGBUS;
+ }
+
+ pgoff = linear_page_index(vma, pmd_addr);
+ phys = pgoff_to_phys(dax_dev, pgoff, PAGE_SIZE);
+ if (phys == -1) {
+ dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
+ pgoff);
+ return VM_FAULT_SIGBUS;
+ }
+
+ pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
+
+ return vmf_insert_pfn_pmd(vma, addr, pmd, pfn,
+ flags & FAULT_FLAG_WRITE);
+}
+
+static int dax_dev_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t *pmd, unsigned int flags)
+{
+ int rc;
+ struct file *filp = vma->vm_file;
+ struct dax_dev *dax_dev = filp->private_data;
+
+ dev_dbg(dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__,
+ current->comm, (flags & FAULT_FLAG_WRITE)
+ ? "write" : "read", vma->vm_start, vma->vm_end);
+
+ rcu_read_lock();
+ rc = __dax_dev_pmd_fault(dax_dev, vma, addr, pmd, flags);
+ rcu_read_unlock();
+
+ return rc;
+}
+
+static void dax_dev_vm_open(struct vm_area_struct *vma)
+{
+ struct file *filp = vma->vm_file;
+ struct dax_dev *dax_dev = filp->private_data;
+
+ dev_dbg(dax_dev->dev, "%s\n", __func__);
+ kref_get(&dax_dev->kref);
+}
+
+static void dax_dev_vm_close(struct vm_area_struct *vma)
+{
+ struct file *filp = vma->vm_file;
+ struct dax_dev *dax_dev = filp->private_data;
+
+ dev_dbg(dax_dev->dev, "%s\n", __func__);
+ dax_dev_put(dax_dev);
+}
+
+static const struct vm_operations_struct dax_dev_vm_ops = {
+ .fault = dax_dev_fault,
+ .pmd_fault = dax_dev_pmd_fault,
+ .open = dax_dev_vm_open,
+ .close = dax_dev_vm_close,
+};
+
+static int dax_dev_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct dax_dev *dax_dev = filp->private_data;
+ int rc;
+
+ dev_dbg(dax_dev->dev, "%s\n", __func__);
+
+ rc = check_vma(dax_dev, vma, __func__);
+ if (rc)
+ return rc;
+
+ kref_get(&dax_dev->kref);
+ vma->vm_ops = &dax_dev_vm_ops;
+ vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
+ return 0;
+
+}
+
static const struct file_operations dax_fops = {
.llseek = noop_llseek,
.owner = THIS_MODULE,
+ .open = dax_dev_open,
+ .release = dax_dev_release,
+ .get_unmapped_area = dax_dev_get_unmapped_area,
+ .mmap = dax_dev_mmap,
};
static int __init dax_init(void)