UNEVICTABLE_PGMUNLOCKED,
UNEVICTABLE_PGCLEARED, /* on COW, page truncate */
UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
+ UNEVICTABLE_MLOCKFREED,
#endif
NR_VM_EVENT_ITEMS
};
}
}
+/*
+ * free_page_mlock() -- clean up attempts to free and mlocked() page.
+ * Page should not be on lru, so no need to fix that up.
+ * free_pages_check() will verify...
+ */
+static inline void free_page_mlock(struct page *page)
+{
+ if (unlikely(TestClearPageMlocked(page))) {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __dec_zone_page_state(page, NR_MLOCK);
+ __count_vm_event(UNEVICTABLE_MLOCKFREED);
+ local_irq_restore(flags);
+ }
+}
#else /* CONFIG_UNEVICTABLE_LRU */
static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
static inline void clear_page_mlock(struct page *page) { }
static inline void mlock_vma_page(struct page *page) { }
static inline void mlock_migrate_page(struct page *new, struct page *old) { }
+static inline void free_page_mlock(struct page *page) { }
#endif /* CONFIG_UNEVICTABLE_LRU */
static inline int free_pages_check(struct page *page)
{
+ free_page_mlock(page);
if (unlikely(page_mapcount(page) |
(page->mapping != NULL) |
(page_get_page_cgroup(page) != NULL) |
"unevictable_pgs_munlocked",
"unevictable_pgs_cleared",
"unevictable_pgs_stranded",
+ "unevictable_pgs_mlockfreed",
#endif
#endif
};