Merge tag 'v3.10.98' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / proc / page.c
1 #include <linux/bootmem.h>
2 #include <linux/compiler.h>
3 #include <linux/fs.h>
4 #include <linux/init.h>
5 #include <linux/ksm.h>
6 #include <linux/mm.h>
7 #include <linux/mmzone.h>
8 #include <linux/proc_fs.h>
9 #include <linux/seq_file.h>
10 #include <linux/hugetlb.h>
11 #include <linux/kernel-page-flags.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
14 #include <asm/uaccess.h>
15 #include "internal.h"
16
17 #define KPMSIZE sizeof(u64)
18 #define KPMMASK (KPMSIZE - 1)
19
20 /* /proc/kpagecount - an array exposing page counts
21 *
22 * Each entry is a u64 representing the corresponding
23 * physical page count.
24 */
25 static ssize_t kpagecount_read(struct file *file, char __user *buf,
26 size_t count, loff_t *ppos)
27 {
28 u64 __user *out = (u64 __user *)buf;
29 struct page *ppage;
30 unsigned long src = *ppos;
31 unsigned long pfn;
32 unsigned long max_pfn_kpmsize = max_pfn * KPMSIZE;
33 ssize_t ret = 0;
34 u64 pcount;
35
36 pfn = src / KPMSIZE;
37 if(src != max_pfn_kpmsize){
38 count = min_t(size_t, count, max_pfn_kpmsize - src);
39 }
40
41 if (src & KPMMASK || count & KPMMASK)
42 return -EINVAL;
43
44 while (count > 0) {
45 if (pfn_valid(pfn))
46 ppage = pfn_to_page(pfn);
47 else
48 ppage = NULL;
49 if (!ppage || PageSlab(ppage))
50 pcount = 0;
51 else
52 pcount = page_mapcount(ppage);
53
54 if (put_user(pcount, out)) {
55 ret = -EFAULT;
56 break;
57 }
58
59 pfn++;
60 out++;
61 count -= KPMSIZE;
62 }
63
64 *ppos += (char __user *)out - buf;
65 if (!ret)
66 ret = (char __user *)out - buf;
67 return ret;
68 }
69
70 static const struct file_operations proc_kpagecount_operations = {
71 .llseek = mem_lseek,
72 .read = kpagecount_read,
73 };
74
75 #ifdef CONFIG_SWAP
76 extern struct swap_info_struct *swap_info_get(swp_entry_t entry);
77 extern void swap_info_unlock(struct swap_info_struct *si);
78
79 static inline unsigned char swap_count(unsigned char ent)
80 {
81 return ent & ~SWAP_HAS_CACHE; /* may include SWAP_HAS_CONT flag */
82 }
83
84 /* /proc/kpageswapn - an array exposing page swap counts
85 *
86 * Each entry is a u64 representing the corresponding
87 * physical page swap count.
88 */
89 static ssize_t kpageswapn_read(struct file *file, char __user *buf,
90 size_t count, loff_t *ppos)
91 {
92 u64 __user *out = (u64 __user *)buf;
93 unsigned long src = *ppos;
94 swp_entry_t swap_entry;
95 ssize_t ret = 0;
96 struct swap_info_struct *p;
97
98 swap_entry.val = src / KPMSIZE;
99 //printk(KERN_INFO "kpageswapn_read src: %lx\n", src);
100 //printk(KERN_INFO "kpageswapn_read swap entry: %lx\n", swap_entry.val);
101
102 if (src & KPMMASK || count & KPMMASK) {
103 printk(KERN_INFO "kpageswapn_read return EINVAL\n");
104 return -EINVAL;
105 }
106
107 p = swap_info_get(swap_entry);
108 if (p) {
109 u64 swapcount = swap_count(p->swap_map[swp_offset(swap_entry)]);
110 if (put_user(swapcount, out)) {
111 printk(KERN_INFO "kpageswapn_read put user failed\n");
112 ret = -EFAULT;
113 }
114 swap_info_unlock(p);
115 } else {
116 printk(KERN_INFO "kpageswapn_read swap_info_get failed\n");
117 ret = -EFAULT;
118 }
119
120 if (!ret) {
121 *ppos += KPMSIZE;
122 ret = KPMSIZE;
123 }
124 return ret;
125 }
126
127 static const struct file_operations proc_kpageswapn_operations = {
128 .llseek = mem_lseek,
129 .read = kpageswapn_read,
130 };
131 #endif // CONFIG_SWAP
132
133 /* /proc/kpageflags - an array exposing page flags
134 *
135 * Each entry is a u64 representing the corresponding
136 * physical page flags.
137 */
138
139 static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit)
140 {
141 return ((kflags >> kbit) & 1) << ubit;
142 }
143
144 u64 stable_page_flags(struct page *page)
145 {
146 u64 k;
147 u64 u;
148
149 /*
150 * pseudo flag: KPF_NOPAGE
151 * it differentiates a memory hole from a page with no flags
152 */
153 if (!page)
154 return 1 << KPF_NOPAGE;
155
156 k = page->flags;
157 u = 0;
158
159 /*
160 * pseudo flags for the well known (anonymous) memory mapped pages
161 *
162 * Note that page->_mapcount is overloaded in SLOB/SLUB/SLQB, so the
163 * simple test in page_mapped() is not enough.
164 */
165 if (!PageSlab(page) && page_mapped(page))
166 u |= 1 << KPF_MMAP;
167 if (PageAnon(page))
168 u |= 1 << KPF_ANON;
169 if (PageKsm(page))
170 u |= 1 << KPF_KSM;
171
172 /*
173 * compound pages: export both head/tail info
174 * they together define a compound page's start/end pos and order
175 */
176 if (PageHead(page))
177 u |= 1 << KPF_COMPOUND_HEAD;
178 if (PageTail(page))
179 u |= 1 << KPF_COMPOUND_TAIL;
180 if (PageHuge(page))
181 u |= 1 << KPF_HUGE;
182 /*
183 * PageTransCompound can be true for non-huge compound pages (slab
184 * pages or pages allocated by drivers with __GFP_COMP) because it
185 * just checks PG_head/PG_tail, so we need to check PageLRU to make
186 * sure a given page is a thp, not a non-huge compound page.
187 */
188 else if (PageTransCompound(page) && PageLRU(compound_head(page)))
189 u |= 1 << KPF_THP;
190
191 /*
192 * Caveats on high order pages: page->_count will only be set
193 * -1 on the head page; SLUB/SLQB do the same for PG_slab;
194 * SLOB won't set PG_slab at all on compound pages.
195 */
196 if (PageBuddy(page))
197 u |= 1 << KPF_BUDDY;
198
199 u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked);
200
201 u |= kpf_copy_bit(k, KPF_SLAB, PG_slab);
202
203 u |= kpf_copy_bit(k, KPF_ERROR, PG_error);
204 u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty);
205 u |= kpf_copy_bit(k, KPF_UPTODATE, PG_uptodate);
206 u |= kpf_copy_bit(k, KPF_WRITEBACK, PG_writeback);
207
208 u |= kpf_copy_bit(k, KPF_LRU, PG_lru);
209 u |= kpf_copy_bit(k, KPF_REFERENCED, PG_referenced);
210 u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active);
211 u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim);
212
213 u |= kpf_copy_bit(k, KPF_SWAPCACHE, PG_swapcache);
214 u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked);
215
216 u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable);
217 u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked);
218
219 #ifdef CONFIG_MEMORY_FAILURE
220 u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison);
221 #endif
222
223 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
224 u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached);
225 #endif
226
227 u |= kpf_copy_bit(k, KPF_RESERVED, PG_reserved);
228 u |= kpf_copy_bit(k, KPF_MAPPEDTODISK, PG_mappedtodisk);
229 u |= kpf_copy_bit(k, KPF_PRIVATE, PG_private);
230 u |= kpf_copy_bit(k, KPF_PRIVATE_2, PG_private_2);
231 u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE, PG_owner_priv_1);
232 u |= kpf_copy_bit(k, KPF_ARCH, PG_arch_1);
233
234 return u;
235 };
236
237 static ssize_t kpageflags_read(struct file *file, char __user *buf,
238 size_t count, loff_t *ppos)
239 {
240 u64 __user *out = (u64 __user *)buf;
241 struct page *ppage;
242 unsigned long src = *ppos;
243 unsigned long pfn;
244 unsigned long max_pfn_kpmsize = max_pfn * KPMSIZE;
245 ssize_t ret = 0;
246
247 pfn = src / KPMSIZE;
248 if(src != max_pfn_kpmsize){
249 count = min_t(unsigned long, count, max_pfn_kpmsize - src);
250 }
251
252 if (src & KPMMASK || count & KPMMASK)
253 return -EINVAL;
254
255 while (count > 0) {
256 if (pfn_valid(pfn))
257 ppage = pfn_to_page(pfn);
258 else
259 ppage = NULL;
260
261 if (put_user(stable_page_flags(ppage), out)) {
262 ret = -EFAULT;
263 break;
264 }
265
266 pfn++;
267 out++;
268 count -= KPMSIZE;
269 }
270
271 *ppos += (char __user *)out - buf;
272 if (!ret)
273 ret = (char __user *)out - buf;
274 return ret;
275 }
276
277 static const struct file_operations proc_kpageflags_operations = {
278 .llseek = mem_lseek,
279 .read = kpageflags_read,
280 };
281
282 static int __init proc_page_init(void)
283 {
284 proc_create("kpagecount", S_IRUSR, NULL, &proc_kpagecount_operations);
285 #ifdef CONFIG_SWAP
286 proc_create("kpageswapn", S_IRUSR, NULL, &proc_kpageswapn_operations);
287 #endif // CONFIG_SWAP
288 proc_create("kpageflags", S_IRUSR, NULL, &proc_kpageflags_operations);
289 return 0;
290 }
291 module_init(proc_page_init);