set_pmd_at(vma->vm_mm, address, pmdp, pmd);
mmu_notifier_invalidate_range(vma->vm_mm, start, end);
unlock_pmd:
- spin_unlock(ptl);
#endif
+ spin_unlock(ptl);
} else {
if (pfn != pte_pfn(*ptep))
goto unlock_pte;
}
dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(pfn));
- dax_flush(dax_dev, pgoff, kaddr, size);
+ dax_flush(dax_dev, kaddr, size);
/*
* After we have flushed the cache, we can clear the dirty tag. There
* cannot be new dirty data in the pfn after the flush has completed as
return rc;
}
memset(kaddr + offset, 0, size);
- dax_flush(dax_dev, pgoff, kaddr + offset, size);
+ dax_flush(dax_dev, kaddr + offset, size);
dax_read_unlock(id);
}
return 0;
* this is a reliable test.
*/
pgoff = linear_page_index(vma, pmd_addr);
- max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT;
+ max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
if ((pmd_addr + PMD_SIZE) > vma->vm_end)
goto fallback;
- if (pgoff > max_pgoff) {
+ if (pgoff >= max_pgoff) {
result = VM_FAULT_SIGBUS;
goto out;
}
/* If the PMD would extend beyond the file size */
- if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
+ if ((pgoff | PG_PMD_COLOUR) >= max_pgoff)
goto fallback;
/*