mm: remove unused VM_<READfoo> macros and expand other in-place
authorJoe Perches <joe@perches.com>
Mon, 8 Jul 2013 23:00:18 +0000 (16:00 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 9 Jul 2013 17:33:23 +0000 (10:33 -0700)
These VM_<READfoo> macros aren't used very often and three of them
aren't used at all.

Expand the ones that are used in-place, and remove all the now unused
#define VM_<foo> macros.

VM_READHINTMASK, VM_NormalReadHint and VM_ClearReadHint were added just
before 2.4 and appears have never been used.

Signed-off-by: Joe Perches <joe@perches.com>
Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/mm.h
mm/filemap.c
mm/memory.c
mm/rmap.c

index b87681adf0bada78cec851382c7719a1c5e66945..f0224608d15ed9f226e52dd89f30971c5da2bb56 100644 (file)
@@ -151,12 +151,6 @@ extern unsigned int kobjsize(const void *objp);
 #define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
 #endif
 
-#define VM_READHINTMASK                        (VM_SEQ_READ | VM_RAND_READ)
-#define VM_ClearReadHint(v)            (v)->vm_flags &= ~VM_READHINTMASK
-#define VM_NormalReadHint(v)           (!((v)->vm_flags & VM_READHINTMASK))
-#define VM_SequentialReadHint(v)       ((v)->vm_flags & VM_SEQ_READ)
-#define VM_RandomReadHint(v)           ((v)->vm_flags & VM_RAND_READ)
-
 /*
  * Special vmas that are non-mergable, non-mlock()able.
  * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
index 7905fe721aa8ab3db06c957c9f2cc63cea1fee5f..4b51ac1acae7c6ae5233afeb8018dd31cadf909b 100644 (file)
@@ -1539,12 +1539,12 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma,
        struct address_space *mapping = file->f_mapping;
 
        /* If we don't want any read-ahead, don't bother */
-       if (VM_RandomReadHint(vma))
+       if (vma->vm_flags & VM_RAND_READ)
                return;
        if (!ra->ra_pages)
                return;
 
-       if (VM_SequentialReadHint(vma)) {
+       if (vma->vm_flags & VM_SEQ_READ) {
                page_cache_sync_readahead(mapping, ra, file, offset,
                                          ra->ra_pages);
                return;
@@ -1584,7 +1584,7 @@ static void do_async_mmap_readahead(struct vm_area_struct *vma,
        struct address_space *mapping = file->f_mapping;
 
        /* If we don't want any read-ahead, don't bother */
-       if (VM_RandomReadHint(vma))
+       if (vma->vm_flags & VM_RAND_READ)
                return;
        if (ra->mmap_miss > 0)
                ra->mmap_miss--;
index b68812d682b66c24e306d955ddbcb125e658e708..1ce2e2a734fc2b0812845279a69cb87f48588d3e 100644 (file)
@@ -1150,7 +1150,7 @@ again:
                                if (pte_dirty(ptent))
                                        set_page_dirty(page);
                                if (pte_young(ptent) &&
-                                   likely(!VM_SequentialReadHint(vma)))
+                                   likely(!(vma->vm_flags & VM_SEQ_READ)))
                                        mark_page_accessed(page);
                                rss[MM_FILEPAGES]--;
                        }
index e22ceeb6e5ec8a07dafec758a55e26f8a590fe26..cd356df4f71ab5aa0514e7685b4cce50eafceb6e 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -720,7 +720,7 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma,
                         * mapping is already gone, the unmap path will have
                         * set PG_referenced or activated the page.
                         */
-                       if (likely(!VM_SequentialReadHint(vma)))
+                       if (likely(!(vma->vm_flags & VM_SEQ_READ)))
                                referenced++;
                }
                pte_unmap_unlock(pte, ptl);