#define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
#endif
-#define VM_READHINTMASK (VM_SEQ_READ | VM_RAND_READ)
-#define VM_ClearReadHint(v) (v)->vm_flags &= ~VM_READHINTMASK
-#define VM_NormalReadHint(v) (!((v)->vm_flags & VM_READHINTMASK))
-#define VM_SequentialReadHint(v) ((v)->vm_flags & VM_SEQ_READ)
-#define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ)
-
/*
* Special vmas that are non-mergable, non-mlock()able.
* Note: mm/huge_memory.c VM_NO_THP depends on this definition.
struct address_space *mapping = file->f_mapping;
/* If we don't want any read-ahead, don't bother */
- if (VM_RandomReadHint(vma))
+ if (vma->vm_flags & VM_RAND_READ)
return;
if (!ra->ra_pages)
return;
- if (VM_SequentialReadHint(vma)) {
+ if (vma->vm_flags & VM_SEQ_READ) {
page_cache_sync_readahead(mapping, ra, file, offset,
ra->ra_pages);
return;
struct address_space *mapping = file->f_mapping;
/* If we don't want any read-ahead, don't bother */
- if (VM_RandomReadHint(vma))
+ if (vma->vm_flags & VM_RAND_READ)
return;
if (ra->mmap_miss > 0)
ra->mmap_miss--;
if (pte_dirty(ptent))
set_page_dirty(page);
if (pte_young(ptent) &&
- likely(!VM_SequentialReadHint(vma)))
+ likely(!(vma->vm_flags & VM_SEQ_READ)))
mark_page_accessed(page);
rss[MM_FILEPAGES]--;
}
* mapping is already gone, the unmap path will have
* set PG_referenced or activated the page.
*/
- if (likely(!VM_SequentialReadHint(vma)))
+ if (likely(!(vma->vm_flags & VM_SEQ_READ)))
referenced++;
}
pte_unmap_unlock(pte, ptl);