[RAMEN9610-12171] mm: hpa: change allocate logic from buddy
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / mm / vmscan.c
index eb2f0315b8c0e9549b98e51c2d5e5cd45b9920e1..be56e2e1931e3ef0a499ce309eb9f8c276d72f14 100644 (file)
@@ -297,10 +297,13 @@ EXPORT_SYMBOL(register_shrinker);
  */
 void unregister_shrinker(struct shrinker *shrinker)
 {
+       if (!shrinker->nr_deferred)
+               return;
        down_write(&shrinker_rwsem);
        list_del(&shrinker->list);
        up_write(&shrinker_rwsem);
        kfree(shrinker->nr_deferred);
+       shrinker->nr_deferred = NULL;
 }
 EXPORT_SYMBOL(unregister_shrinker);
 
@@ -1433,14 +1436,24 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
 
                if (PageDirty(page)) {
                        struct address_space *mapping;
+                       bool migrate_dirty;
 
                        /*
                         * Only pages without mappings or that have a
                         * ->migratepage callback are possible to migrate
-                        * without blocking
+                        * without blocking. However, we can be racing with
+                        * truncation so it's necessary to lock the page
+                        * to stabilise the mapping as truncation holds
+                        * the page lock until after the page is removed
+                        * from the page cache.
                         */
+                       if (!trylock_page(page))
+                               return ret;
+
                        mapping = page_mapping(page);
-                       if (mapping && !mapping->a_ops->migratepage)
+                       migrate_dirty = !mapping || mapping->a_ops->migratepage;
+                       unlock_page(page);
+                       if (!migrate_dirty)
                                return ret;
                }
        }
@@ -1843,6 +1856,20 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
        if (stat.nr_writeback && stat.nr_writeback == nr_taken)
                set_bit(PGDAT_WRITEBACK, &pgdat->flags);
 
+       /*
+        * If dirty pages are scanned that are not queued for IO, it
+        * implies that flushers are not doing their job. This can
+        * happen when memory pressure pushes dirty pages to the end of
+        * the LRU before the dirty limits are breached and the dirty
+        * data has expired. It can also happen when the proportion of
+        * dirty pages grows not through writes but through memory
+        * pressure reclaiming all the clean cache. And in some cases,
+        * the flushers simply cannot keep up with the allocation
+        * rate. Nudge the flusher threads in case they are asleep.
+        */
+       if (stat.nr_unqueued_dirty == nr_taken)
+               wakeup_flusher_threads(0, WB_REASON_VMSCAN);
+
        /*
         * Legacy memcg will stall in page writeback so avoid forcibly
         * stalling here.
@@ -1855,22 +1882,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
                if (stat.nr_dirty && stat.nr_dirty == stat.nr_congested)
                        set_bit(PGDAT_CONGESTED, &pgdat->flags);
 
-               /*
-                * If dirty pages are scanned that are not queued for IO, it
-                * implies that flushers are not doing their job. This can
-                * happen when memory pressure pushes dirty pages to the end of
-                * the LRU before the dirty limits are breached and the dirty
-                * data has expired. It can also happen when the proportion of
-                * dirty pages grows not through writes but through memory
-                * pressure reclaiming all the clean cache. And in some cases,
-                * the flushers simply cannot keep up with the allocation
-                * rate. Nudge the flusher threads in case they are asleep, but
-                * also allow kswapd to start writing pages during reclaim.
-                */
-               if (stat.nr_unqueued_dirty == nr_taken) {
-                       wakeup_flusher_threads(0, WB_REASON_VMSCAN);
+               /* Allow kswapd to start writing pages during reclaim. */
+               if (stat.nr_unqueued_dirty == nr_taken)
                        set_bit(PGDAT_DIRTY, &pgdat->flags);
-               }
 
                /*
                 * If kswapd scans pages marked marked for immediate
@@ -3947,7 +3961,13 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
  */
 int page_evictable(struct page *page)
 {
-       return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
+       int ret;
+
+       /* Prevent address_space of inode and swap cache from being freed */
+       rcu_read_lock();
+       ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
+       rcu_read_unlock();
+       return ret;
 }
 
 #ifdef CONFIG_SHMEM