remove libdss from Makefile
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / mm / page_vma_mapped.c
index 53afbb919a1c858734513a43360ec8ef7d7bb5a8..e00d985a51c56d09df22247d342a1d03aa044427 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 #include <linux/mm.h>
 #include <linux/rmap.h>
 #include <linux/hugetlb.h>
@@ -20,7 +21,29 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw)
                        if (!is_swap_pte(*pvmw->pte))
                                return false;
                } else {
-                       if (!pte_present(*pvmw->pte))
+                       /*
+                        * We get here when we are trying to unmap a private
+                        * device page from the process address space. Such
+                        * page is not CPU accessible and thus is mapped as
+                        * a special swap entry, nonetheless it still does
+                        * count as a valid regular mapping for the page (and
+                        * is accounted as such in page maps count).
+                        *
+                        * So handle this special case as if it was a normal
+                        * page mapping ie lock CPU page table and returns
+                        * true.
+                        *
+                        * For more details on device private memory see HMM
+                        * (include/linux/hmm.h or mm/hmm.c).
+                        */
+                       if (is_swap_pte(*pvmw->pte)) {
+                               swp_entry_t entry;
+
+                               /* Handle un-addressable ZONE_DEVICE memory */
+                               entry = pte_to_swp_entry(*pvmw->pte);
+                               if (!is_device_private_entry(entry))
+                                       return false;
+                       } else if (!pte_present(*pvmw->pte))
                                return false;
                }
        }
@@ -29,10 +52,29 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw)
        return true;
 }
 
+/**
+ * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
+ *
+ * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
+ * mapped. check_pte() has to validate this.
+ *
+ * @pvmw->pte may point to empty PTE, swap PTE or PTE pointing to arbitrary
+ * page.
+ *
+ * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
+ * entry that points to @pvmw->page or any subpage in case of THP.
+ *
+ * If PVMW_MIGRATION flag is not set, returns true if @pvmw->pte points to
+ * @pvmw->page or any subpage in case of THP.
+ *
+ * Otherwise, return false.
+ *
+ */
 static bool check_pte(struct page_vma_mapped_walk *pvmw)
 {
+       unsigned long pfn;
+
        if (pvmw->flags & PVMW_MIGRATION) {
-#ifdef CONFIG_MIGRATION
                swp_entry_t entry;
                if (!is_swap_pte(*pvmw->pte))
                        return false;
@@ -40,37 +82,31 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
 
                if (!is_migration_entry(entry))
                        return false;
-               if (migration_entry_to_page(entry) - pvmw->page >=
-                               hpage_nr_pages(pvmw->page)) {
-                       return false;
-               }
-               if (migration_entry_to_page(entry) < pvmw->page)
-                       return false;
-#else
-               WARN_ON_ONCE(1);
-#endif
-       } else {
-               if (is_swap_pte(*pvmw->pte)) {
-                       swp_entry_t entry;
 
-                       entry = pte_to_swp_entry(*pvmw->pte);
-                       if (is_device_private_entry(entry) &&
-                           device_private_entry_to_page(entry) == pvmw->page)
-                               return true;
-               }
+               pfn = migration_entry_to_pfn(entry);
+       } else if (is_swap_pte(*pvmw->pte)) {
+               swp_entry_t entry;
 
-               if (!pte_present(*pvmw->pte))
+               /* Handle un-addressable ZONE_DEVICE memory */
+               entry = pte_to_swp_entry(*pvmw->pte);
+               if (!is_device_private_entry(entry))
                        return false;
 
-               /* THP can be referenced by any subpage */
-               if (pte_page(*pvmw->pte) - pvmw->page >=
-                               hpage_nr_pages(pvmw->page)) {
-                       return false;
-               }
-               if (pte_page(*pvmw->pte) < pvmw->page)
+               pfn = device_private_entry_to_pfn(entry);
+       } else {
+               if (!pte_present(*pvmw->pte))
                        return false;
+
+               pfn = pte_pfn(*pvmw->pte);
        }
 
+       if (pfn < page_to_pfn(pvmw->page))
+               return false;
+
+       /* THP can be referenced by any subpage */
+       if (pfn - page_to_pfn(pvmw->page) >= hpage_nr_pages(pvmw->page))
+               return false;
+
        return true;
 }