1 #include <linux/bootmem.h>
2 #include <linux/compiler.h>
4 #include <linux/init.h>
7 #include <linux/mmzone.h>
8 #include <linux/proc_fs.h>
9 #include <linux/seq_file.h>
10 #include <linux/hugetlb.h>
11 #include <linux/kernel-page-flags.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
14 #include <asm/uaccess.h>
17 #define KPMSIZE sizeof(u64)
18 #define KPMMASK (KPMSIZE - 1)
20 /* /proc/kpagecount - an array exposing page counts
22 * Each entry is a u64 representing the corresponding
23 * physical page count.
25 static ssize_t
kpagecount_read(struct file
*file
, char __user
*buf
,
26 size_t count
, loff_t
*ppos
)
28 u64 __user
*out
= (u64 __user
*)buf
;
30 unsigned long src
= *ppos
;
32 unsigned long max_pfn_kpmsize
= max_pfn
* KPMSIZE
;
37 if(src
!= max_pfn_kpmsize
){
38 count
= min_t(size_t, count
, max_pfn_kpmsize
- src
);
41 if (src
& KPMMASK
|| count
& KPMMASK
)
46 ppage
= pfn_to_page(pfn
);
49 if (!ppage
|| PageSlab(ppage
))
52 pcount
= page_mapcount(ppage
);
54 if (put_user(pcount
, out
)) {
64 *ppos
+= (char __user
*)out
- buf
;
66 ret
= (char __user
*)out
- buf
;
70 static const struct file_operations proc_kpagecount_operations
= {
72 .read
= kpagecount_read
,
76 extern struct swap_info_struct
*swap_info_get(swp_entry_t entry
);
77 extern void swap_info_unlock(struct swap_info_struct
*si
);
79 static inline unsigned char swap_count(unsigned char ent
)
81 return ent
& ~SWAP_HAS_CACHE
; /* may include SWAP_HAS_CONT flag */
84 /* /proc/kpageswapn - an array exposing page swap counts
86 * Each entry is a u64 representing the corresponding
87 * physical page swap count.
89 static ssize_t
kpageswapn_read(struct file
*file
, char __user
*buf
,
90 size_t count
, loff_t
*ppos
)
92 u64 __user
*out
= (u64 __user
*)buf
;
93 unsigned long src
= *ppos
;
94 swp_entry_t swap_entry
;
96 struct swap_info_struct
*p
;
98 swap_entry
.val
= src
/ KPMSIZE
;
99 //printk(KERN_INFO "kpageswapn_read src: %lx\n", src);
100 //printk(KERN_INFO "kpageswapn_read swap entry: %lx\n", swap_entry.val);
102 if (src
& KPMMASK
|| count
& KPMMASK
) {
103 printk(KERN_INFO
"kpageswapn_read return EINVAL\n");
107 p
= swap_info_get(swap_entry
);
109 u64 swapcount
= swap_count(p
->swap_map
[swp_offset(swap_entry
)]);
110 if (put_user(swapcount
, out
)) {
111 printk(KERN_INFO
"kpageswapn_read put user failed\n");
116 printk(KERN_INFO
"kpageswapn_read swap_info_get failed\n");
127 static const struct file_operations proc_kpageswapn_operations
= {
129 .read
= kpageswapn_read
,
131 #endif // CONFIG_SWAP
133 /* /proc/kpageflags - an array exposing page flags
135 * Each entry is a u64 representing the corresponding
136 * physical page flags.
139 static inline u64
kpf_copy_bit(u64 kflags
, int ubit
, int kbit
)
141 return ((kflags
>> kbit
) & 1) << ubit
;
144 u64
stable_page_flags(struct page
*page
)
150 * pseudo flag: KPF_NOPAGE
151 * it differentiates a memory hole from a page with no flags
154 return 1 << KPF_NOPAGE
;
160 * pseudo flags for the well known (anonymous) memory mapped pages
162 * Note that page->_mapcount is overloaded in SLOB/SLUB/SLQB, so the
163 * simple test in page_mapped() is not enough.
165 if (!PageSlab(page
) && page_mapped(page
))
173 * compound pages: export both head/tail info
174 * they together define a compound page's start/end pos and order
177 u
|= 1 << KPF_COMPOUND_HEAD
;
179 u
|= 1 << KPF_COMPOUND_TAIL
;
183 * PageTransCompound can be true for non-huge compound pages (slab
184 * pages or pages allocated by drivers with __GFP_COMP) because it
185 * just checks PG_head/PG_tail, so we need to check PageLRU to make
186 * sure a given page is a thp, not a non-huge compound page.
188 else if (PageTransCompound(page
) && PageLRU(compound_head(page
)))
192 * Caveats on high order pages: page->_count will only be set
193 * -1 on the head page; SLUB/SLQB do the same for PG_slab;
194 * SLOB won't set PG_slab at all on compound pages.
199 u
|= kpf_copy_bit(k
, KPF_LOCKED
, PG_locked
);
201 u
|= kpf_copy_bit(k
, KPF_SLAB
, PG_slab
);
203 u
|= kpf_copy_bit(k
, KPF_ERROR
, PG_error
);
204 u
|= kpf_copy_bit(k
, KPF_DIRTY
, PG_dirty
);
205 u
|= kpf_copy_bit(k
, KPF_UPTODATE
, PG_uptodate
);
206 u
|= kpf_copy_bit(k
, KPF_WRITEBACK
, PG_writeback
);
208 u
|= kpf_copy_bit(k
, KPF_LRU
, PG_lru
);
209 u
|= kpf_copy_bit(k
, KPF_REFERENCED
, PG_referenced
);
210 u
|= kpf_copy_bit(k
, KPF_ACTIVE
, PG_active
);
211 u
|= kpf_copy_bit(k
, KPF_RECLAIM
, PG_reclaim
);
213 u
|= kpf_copy_bit(k
, KPF_SWAPCACHE
, PG_swapcache
);
214 u
|= kpf_copy_bit(k
, KPF_SWAPBACKED
, PG_swapbacked
);
216 u
|= kpf_copy_bit(k
, KPF_UNEVICTABLE
, PG_unevictable
);
217 u
|= kpf_copy_bit(k
, KPF_MLOCKED
, PG_mlocked
);
219 #ifdef CONFIG_MEMORY_FAILURE
220 u
|= kpf_copy_bit(k
, KPF_HWPOISON
, PG_hwpoison
);
223 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
224 u
|= kpf_copy_bit(k
, KPF_UNCACHED
, PG_uncached
);
227 u
|= kpf_copy_bit(k
, KPF_RESERVED
, PG_reserved
);
228 u
|= kpf_copy_bit(k
, KPF_MAPPEDTODISK
, PG_mappedtodisk
);
229 u
|= kpf_copy_bit(k
, KPF_PRIVATE
, PG_private
);
230 u
|= kpf_copy_bit(k
, KPF_PRIVATE_2
, PG_private_2
);
231 u
|= kpf_copy_bit(k
, KPF_OWNER_PRIVATE
, PG_owner_priv_1
);
232 u
|= kpf_copy_bit(k
, KPF_ARCH
, PG_arch_1
);
237 static ssize_t
kpageflags_read(struct file
*file
, char __user
*buf
,
238 size_t count
, loff_t
*ppos
)
240 u64 __user
*out
= (u64 __user
*)buf
;
242 unsigned long src
= *ppos
;
244 unsigned long max_pfn_kpmsize
= max_pfn
* KPMSIZE
;
248 if(src
!= max_pfn_kpmsize
){
249 count
= min_t(unsigned long, count
, max_pfn_kpmsize
- src
);
252 if (src
& KPMMASK
|| count
& KPMMASK
)
257 ppage
= pfn_to_page(pfn
);
261 if (put_user(stable_page_flags(ppage
), out
)) {
271 *ppos
+= (char __user
*)out
- buf
;
273 ret
= (char __user
*)out
- buf
;
277 static const struct file_operations proc_kpageflags_operations
= {
279 .read
= kpageflags_read
,
282 static int __init
proc_page_init(void)
284 proc_create("kpagecount", S_IRUSR
, NULL
, &proc_kpagecount_operations
);
286 proc_create("kpageswapn", S_IRUSR
, NULL
, &proc_kpageswapn_operations
);
287 #endif // CONFIG_SWAP
288 proc_create("kpageflags", S_IRUSR
, NULL
, &proc_kpageflags_operations
);
291 module_init(proc_page_init
);