From 64363aad5ff1b878230e91223038c26a2205bff3 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Mon, 8 Jul 2013 16:00:18 -0700 Subject: [PATCH] mm: remove unused VM_ macros and expand other in-place These VM_ macros aren't used very often and three of them aren't used at all. Expand the ones that are used in-place, and remove all the now unused #define VM_ macros. VM_READHINTMASK, VM_NormalReadHint and VM_ClearReadHint were added just before 2.4 and appears have never been used. Signed-off-by: Joe Perches Acked-by: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 6 ------ mm/filemap.c | 6 +++--- mm/memory.c | 2 +- mm/rmap.c | 2 +- 4 files changed, 5 insertions(+), 11 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index b87681adf0ba..f0224608d15e 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -151,12 +151,6 @@ extern unsigned int kobjsize(const void *objp); #define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) #endif -#define VM_READHINTMASK (VM_SEQ_READ | VM_RAND_READ) -#define VM_ClearReadHint(v) (v)->vm_flags &= ~VM_READHINTMASK -#define VM_NormalReadHint(v) (!((v)->vm_flags & VM_READHINTMASK)) -#define VM_SequentialReadHint(v) ((v)->vm_flags & VM_SEQ_READ) -#define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ) - /* * Special vmas that are non-mergable, non-mlock()able. * Note: mm/huge_memory.c VM_NO_THP depends on this definition. diff --git a/mm/filemap.c b/mm/filemap.c index 7905fe721aa8..4b51ac1acae7 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1539,12 +1539,12 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma, struct address_space *mapping = file->f_mapping; /* If we don't want any read-ahead, don't bother */ - if (VM_RandomReadHint(vma)) + if (vma->vm_flags & VM_RAND_READ) return; if (!ra->ra_pages) return; - if (VM_SequentialReadHint(vma)) { + if (vma->vm_flags & VM_SEQ_READ) { page_cache_sync_readahead(mapping, ra, file, offset, ra->ra_pages); return; @@ -1584,7 +1584,7 @@ static void do_async_mmap_readahead(struct vm_area_struct *vma, struct address_space *mapping = file->f_mapping; /* If we don't want any read-ahead, don't bother */ - if (VM_RandomReadHint(vma)) + if (vma->vm_flags & VM_RAND_READ) return; if (ra->mmap_miss > 0) ra->mmap_miss--; diff --git a/mm/memory.c b/mm/memory.c index b68812d682b6..1ce2e2a734fc 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1150,7 +1150,7 @@ again: if (pte_dirty(ptent)) set_page_dirty(page); if (pte_young(ptent) && - likely(!VM_SequentialReadHint(vma))) + likely(!(vma->vm_flags & VM_SEQ_READ))) mark_page_accessed(page); rss[MM_FILEPAGES]--; } diff --git a/mm/rmap.c b/mm/rmap.c index e22ceeb6e5ec..cd356df4f71a 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -720,7 +720,7 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma, * mapping is already gone, the unmap path will have * set PG_referenced or activated the page. */ - if (likely(!VM_SequentialReadHint(vma))) + if (likely(!(vma->vm_flags & VM_SEQ_READ))) referenced++; } pte_unmap_unlock(pte, ptl); -- 2.20.1