From: Konstantin Khlebnikov Date: Wed, 21 Mar 2012 23:33:59 +0000 (-0700) Subject: mm: replace PAGE_MIGRATION with IS_ENABLED(CONFIG_MIGRATION) X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=ce1744f4ed20ca873360e54502f8a71564ef7cc6;p=GitHub%2FLineageOS%2Fandroid_kernel_motorola_exynos9610.git mm: replace PAGE_MIGRATION with IS_ENABLED(CONFIG_MIGRATION) Since commit 2a11c8ea20bf ("kconfig: Introduce IS_ENABLED(), IS_BUILTIN() and IS_MODULE()") there is a generic grep-friendly method for checking config options in C expressions. Signed-off-by: Konstantin Khlebnikov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 05ed2828a553..855c337b20c3 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -8,7 +8,6 @@ typedef struct page *new_page_t(struct page *, unsigned long private, int **); #ifdef CONFIG_MIGRATION -#define PAGE_MIGRATION 1 extern void putback_lru_pages(struct list_head *l); extern int migrate_page(struct address_space *, @@ -32,7 +31,6 @@ extern void migrate_page_copy(struct page *newpage, struct page *page); extern int migrate_huge_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page); #else -#define PAGE_MIGRATION 0 static inline void putback_lru_pages(struct list_head *l) {} static inline int migrate_pages(struct list_head *l, new_page_t x, diff --git a/mm/mprotect.c b/mm/mprotect.c index f437d054c3bf..c621e999cbf7 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -60,7 +60,7 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd, ptent = pte_mkwrite(ptent); ptep_modify_prot_commit(mm, addr, pte, ptent); - } else if (PAGE_MIGRATION && !pte_file(oldpte)) { + } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) { swp_entry_t entry = pte_to_swp_entry(oldpte); if (is_write_migration_entry(entry)) { diff --git a/mm/rmap.c b/mm/rmap.c index c8454e06b6c8..78cc46b876e6 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1282,7 +1282,7 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, } dec_mm_counter(mm, MM_ANONPAGES); inc_mm_counter(mm, MM_SWAPENTS); - } else if (PAGE_MIGRATION) { + } else if (IS_ENABLED(CONFIG_MIGRATION)) { /* * Store the pfn of the page in a special migration * pte. do_swap_page() will wait until the migration @@ -1293,7 +1293,8 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, } set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); BUG_ON(pte_file(*pte)); - } else if (PAGE_MIGRATION && (TTU_ACTION(flags) == TTU_MIGRATION)) { + } else if (IS_ENABLED(CONFIG_MIGRATION) && + (TTU_ACTION(flags) == TTU_MIGRATION)) { /* Establish migration entry for a file page */ swp_entry_t entry; entry = make_migration_entry(page, pte_write(pteval)); @@ -1499,7 +1500,7 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags) * locking requirements of exec(), migration skips * temporary VMAs until after exec() completes. */ - if (PAGE_MIGRATION && (flags & TTU_MIGRATION) && + if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION) && is_vma_temporary_stack(vma)) continue;