#include "internal.h"
+#ifdef CONFIG_MTK_EXTMEM
+extern bool extmem_in_mspace(struct vm_area_struct *vma);
+extern unsigned long get_virt_from_mspace(unsigned long pa);
+#endif
+
#ifdef LAST_NID_NOT_IN_PAGE_FLAGS
#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_nid.
#endif
}
EXPORT_SYMBOL_GPL(zap_vma_ptes);
+/*
+ * FOLL_FORCE can write to even unwritable pte's, but only
+ * after we've gone through a COW cycle and they are dirty.
+ */
+static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
+{
+ return pte_write(pte) ||
+ ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
+}
+
/**
* follow_page_mask - look up a page descriptor from a user-virtual address
* @vma: vm_area_struct mapping @address
}
if ((flags & FOLL_NUMA) && pte_numa(pte))
goto no_page;
- if ((flags & FOLL_WRITE) && !pte_write(pte))
+ if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags))
goto unlock;
page = vm_normal_page(vma, address, pte);
return ERR_PTR(-EFAULT);
return page;
}
+EXPORT_SYMBOL_GPL(follow_page_mask);
static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
{
page_mask = 0;
goto next_page;
}
-
+ #ifdef CONFIG_MTK_EXTMEM
+ if (!vma || !(vm_flags & vma->vm_flags))
+ {
+ return i ? : -EFAULT;
+ }
+
+ if (vma->vm_flags & (VM_IO | VM_PFNMAP))
+ {
+ /*Would pass VM_IO | VM_RESERVED | VM_PFNMAP. (for Reserved Physical Memory PFN Mapping Usage)*/
+ if(!((vma->vm_flags&VM_IO)&&(vma->vm_flags&VM_RESERVED)&&(vma->vm_flags&VM_PFNMAP)))
+ return i ? : -EFAULT;
+ }
+ #else
if (!vma ||
(vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
!(vm_flags & vma->vm_flags))
return i ? : -EFAULT;
+ #endif
if (is_vm_hugetlb_page(vma)) {
i = follow_hugetlb_page(mm, vma, pages, vmas,
*/
if ((ret & VM_FAULT_WRITE) &&
!(vma->vm_flags & VM_WRITE))
- foll_flags &= ~FOLL_WRITE;
+ foll_flags |= FOLL_COW;
cond_resched();
}
* un-COW'ed pages by matching them up with "vma->vm_pgoff".
* See vm_normal_page() for details.
*/
+#ifdef CONFIG_MTK_EXTMEM
+ if (addr == vma->vm_start && end == vma->vm_end) {
+ vma->vm_pgoff = pfn;
+ } else if (is_cow_mapping(vma->vm_flags))
+ return -EINVAL;
+#else
if (is_cow_mapping(vma->vm_flags)) {
if (addr != vma->vm_start || end != vma->vm_end)
return -EINVAL;
vma->vm_pgoff = pfn;
}
-
+#endif
err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size));
if (err)
return -EINVAL;
pgoff_t pgoff = (((address & PAGE_MASK)
- vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+ /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
+ if (!vma->vm_ops->fault)
+ return VM_FAULT_SIGBUS;
+
pte_unmap(page_table);
/* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
if (!vma->vm_ops->fault)
ret = get_user_pages(tsk, mm, addr, 1,
write, 1, &page, &vma);
if (ret <= 0) {
+#ifdef CONFIG_MTK_EXTMEM
+ if (!write) {
+ vma = find_vma(mm, addr);
+ if (!vma || vma->vm_start > addr)
+ break;
+ if (vma->vm_end < addr + len)
+ len = vma->vm_end - addr;
+ if (extmem_in_mspace(vma)) {
+ void *extmem_va = (void *)get_virt_from_mspace(vma->vm_pgoff << PAGE_SHIFT) + (addr - vma->vm_start);
+ memcpy(buf, extmem_va, len);
+ buf += len;
+ break;
+ }
+ }
+#endif
/*
* Check if this is a VM_IO | VM_PFNMAP VMA, which
* we can access using slightly different code.