25438b2b6f223fb29989c41e1ce8ff854c425d10
[GitHub/MotorolaMobilityLLC/kernel-slsi.git] / include / linux / mm_inline.h
1 #ifndef LINUX_MM_INLINE_H
2 #define LINUX_MM_INLINE_H
3
4 #include <linux/huge_mm.h>
5 #include <linux/swap.h>
6
7 /**
8 * page_is_file_cache - should the page be on a file LRU or anon LRU?
9 * @page: the page to test
10 *
11 * Returns 1 if @page is page cache page backed by a regular filesystem,
12 * or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed.
13 * Used by functions that manipulate the LRU lists, to sort a page
14 * onto the right LRU list.
15 *
16 * We would like to get this info without a page flag, but the state
17 * needs to survive until the page is last deleted from the LRU, which
18 * could be as far down as __page_cache_release.
19 */
20 static inline int page_is_file_cache(struct page *page)
21 {
22 return !PageSwapBacked(page);
23 }
24
25 static __always_inline void __update_lru_size(struct lruvec *lruvec,
26 enum lru_list lru, enum zone_type zid,
27 int nr_pages)
28 {
29 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
30
31 __mod_node_page_state(pgdat, NR_LRU_BASE + lru, nr_pages);
32 __mod_zone_page_state(&pgdat->node_zones[zid],
33 NR_ZONE_LRU_BASE + lru, nr_pages);
34 }
35
36 static __always_inline void update_lru_size(struct lruvec *lruvec,
37 enum lru_list lru, enum zone_type zid,
38 int nr_pages)
39 {
40 __update_lru_size(lruvec, lru, zid, nr_pages);
41 #ifdef CONFIG_MEMCG
42 mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
43 #endif
44 }
45
46 static __always_inline void add_page_to_lru_list(struct page *page,
47 struct lruvec *lruvec, enum lru_list lru)
48 {
49 update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
50 list_add(&page->lru, &lruvec->lists[lru]);
51 }
52
53 static __always_inline void add_page_to_lru_list_tail(struct page *page,
54 struct lruvec *lruvec, enum lru_list lru)
55 {
56 update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
57 list_add_tail(&page->lru, &lruvec->lists[lru]);
58 }
59
60 static __always_inline void del_page_from_lru_list(struct page *page,
61 struct lruvec *lruvec, enum lru_list lru)
62 {
63 list_del(&page->lru);
64 update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page));
65 }
66
67 /**
68 * page_lru_base_type - which LRU list type should a page be on?
69 * @page: the page to test
70 *
71 * Used for LRU list index arithmetic.
72 *
73 * Returns the base LRU type - file or anon - @page should be on.
74 */
75 static inline enum lru_list page_lru_base_type(struct page *page)
76 {
77 if (page_is_file_cache(page))
78 return LRU_INACTIVE_FILE;
79 return LRU_INACTIVE_ANON;
80 }
81
82 /**
83 * page_off_lru - which LRU list was page on? clearing its lru flags.
84 * @page: the page to test
85 *
86 * Returns the LRU list a page was on, as an index into the array of LRU
87 * lists; and clears its Unevictable or Active flags, ready for freeing.
88 */
89 static __always_inline enum lru_list page_off_lru(struct page *page)
90 {
91 enum lru_list lru;
92
93 if (PageUnevictable(page)) {
94 __ClearPageUnevictable(page);
95 lru = LRU_UNEVICTABLE;
96 } else {
97 lru = page_lru_base_type(page);
98 if (PageActive(page)) {
99 __ClearPageActive(page);
100 lru += LRU_ACTIVE;
101 }
102 }
103 return lru;
104 }
105
106 /**
107 * page_lru - which LRU list should a page be on?
108 * @page: the page to test
109 *
110 * Returns the LRU list a page should be on, as an index
111 * into the array of LRU lists.
112 */
113 static __always_inline enum lru_list page_lru(struct page *page)
114 {
115 enum lru_list lru;
116
117 if (PageUnevictable(page))
118 lru = LRU_UNEVICTABLE;
119 else {
120 lru = page_lru_base_type(page);
121 if (PageActive(page))
122 lru += LRU_ACTIVE;
123 }
124 return lru;
125 }
126
127 #define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
128
129 #ifdef arch_unmap_kpfn
130 extern void arch_unmap_kpfn(unsigned long pfn);
131 #else
132 static __always_inline void arch_unmap_kpfn(unsigned long pfn) { }
133 #endif
134
135 #endif