2 #include <linux/hugetlb.h>
3 #include <linux/huge_mm.h>
4 #include <linux/mount.h>
5 #include <linux/seq_file.h>
6 #include <linux/highmem.h>
7 #include <linux/ptrace.h>
8 #include <linux/slab.h>
9 #include <linux/pagemap.h>
10 #include <linux/mempolicy.h>
11 #include <linux/rmap.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
16 #include <asm/uaccess.h>
17 #include <asm/tlbflush.h>
20 void task_mem(struct seq_file
*m
, struct mm_struct
*mm
)
22 unsigned long data
, text
, lib
, swap
;
23 unsigned long hiwater_vm
, total_vm
, hiwater_rss
, total_rss
;
26 * Note: to minimize their overhead, mm maintains hiwater_vm and
27 * hiwater_rss only when about to *lower* total_vm or rss. Any
28 * collector of these hiwater stats must therefore get total_vm
29 * and rss too, which will usually be the higher. Barriers? not
30 * worth the effort, such snapshots can always be inconsistent.
32 hiwater_vm
= total_vm
= mm
->total_vm
;
33 if (hiwater_vm
< mm
->hiwater_vm
)
34 hiwater_vm
= mm
->hiwater_vm
;
35 hiwater_rss
= total_rss
= get_mm_rss(mm
);
36 if (hiwater_rss
< mm
->hiwater_rss
)
37 hiwater_rss
= mm
->hiwater_rss
;
39 data
= mm
->total_vm
- mm
->shared_vm
- mm
->stack_vm
;
40 text
= (PAGE_ALIGN(mm
->end_code
) - (mm
->start_code
& PAGE_MASK
)) >> 10;
41 lib
= (mm
->exec_vm
<< (PAGE_SHIFT
-10)) - text
;
42 swap
= get_mm_counter(mm
, MM_SWAPENTS
);
56 hiwater_vm
<< (PAGE_SHIFT
-10),
57 total_vm
<< (PAGE_SHIFT
-10),
58 mm
->locked_vm
<< (PAGE_SHIFT
-10),
59 mm
->pinned_vm
<< (PAGE_SHIFT
-10),
60 hiwater_rss
<< (PAGE_SHIFT
-10),
61 total_rss
<< (PAGE_SHIFT
-10),
62 data
<< (PAGE_SHIFT
-10),
63 mm
->stack_vm
<< (PAGE_SHIFT
-10), text
, lib
,
64 (PTRS_PER_PTE
*sizeof(pte_t
)*mm
->nr_ptes
) >> 10,
65 swap
<< (PAGE_SHIFT
-10));
68 unsigned long task_vsize(struct mm_struct
*mm
)
70 return PAGE_SIZE
* mm
->total_vm
;
73 unsigned long task_statm(struct mm_struct
*mm
,
74 unsigned long *shared
, unsigned long *text
,
75 unsigned long *data
, unsigned long *resident
)
77 *shared
= get_mm_counter(mm
, MM_FILEPAGES
);
78 *text
= (PAGE_ALIGN(mm
->end_code
) - (mm
->start_code
& PAGE_MASK
))
80 *data
= mm
->total_vm
- mm
->shared_vm
;
81 *resident
= *shared
+ get_mm_counter(mm
, MM_ANONPAGES
);
85 static void pad_len_spaces(struct seq_file
*m
, int len
)
87 len
= 25 + sizeof(void*) * 6 - len
;
90 seq_printf(m
, "%*c", len
, ' ');
95 * These functions are for numa_maps but called in generic **maps seq_file
96 * ->start(), ->stop() ops.
98 * numa_maps scans all vmas under mmap_sem and checks their mempolicy.
99 * Each mempolicy object is controlled by reference counting. The problem here
100 * is how to avoid accessing dead mempolicy object.
102 * Because we're holding mmap_sem while reading seq_file, it's safe to access
103 * each vma's mempolicy, no vma objects will never drop refs to mempolicy.
105 * A task's mempolicy (task->mempolicy) has different behavior. task->mempolicy
106 * is set and replaced under mmap_sem but unrefed and cleared under task_lock().
107 * So, without task_lock(), we cannot trust get_vma_policy() because we cannot
108 * gurantee the task never exits under us. But taking task_lock() around
109 * get_vma_plicy() causes lock order problem.
111 * To access task->mempolicy without lock, we hold a reference count of an
112 * object pointed by task->mempolicy and remember it. This will guarantee
113 * that task->mempolicy points to an alive object or NULL in numa_maps accesses.
115 static void hold_task_mempolicy(struct proc_maps_private
*priv
)
117 struct task_struct
*task
= priv
->task
;
120 priv
->task_mempolicy
= task
->mempolicy
;
121 mpol_get(priv
->task_mempolicy
);
124 static void release_task_mempolicy(struct proc_maps_private
*priv
)
126 mpol_put(priv
->task_mempolicy
);
129 static void hold_task_mempolicy(struct proc_maps_private
*priv
)
132 static void release_task_mempolicy(struct proc_maps_private
*priv
)
137 static void seq_print_vma_name(struct seq_file
*m
, struct vm_area_struct
*vma
)
139 const char __user
*name
= vma_get_anon_name(vma
);
140 struct mm_struct
*mm
= vma
->vm_mm
;
142 unsigned long page_start_vaddr
;
143 unsigned long page_offset
;
144 unsigned long num_pages
;
145 unsigned long max_len
= NAME_MAX
;
148 page_start_vaddr
= (unsigned long)name
& PAGE_MASK
;
149 page_offset
= (unsigned long)name
- page_start_vaddr
;
150 num_pages
= DIV_ROUND_UP(page_offset
+ max_len
, PAGE_SIZE
);
152 seq_puts(m
, "[anon:");
154 for (i
= 0; i
< num_pages
; i
++) {
161 pages_pinned
= get_user_pages(current
, mm
, page_start_vaddr
,
162 1, 0, 0, &page
, NULL
);
163 if (pages_pinned
< 1) {
164 seq_puts(m
, "<fault>]");
168 kaddr
= (const char *)kmap(page
);
169 len
= min(max_len
, PAGE_SIZE
- page_offset
);
170 write_len
= strnlen(kaddr
+ page_offset
, len
);
171 seq_write(m
, kaddr
+ page_offset
, write_len
);
175 /* if strnlen hit a null terminator then we're done */
176 if (write_len
!= len
)
181 page_start_vaddr
+= PAGE_SIZE
;
187 static void vma_stop(struct proc_maps_private
*priv
, struct vm_area_struct
*vma
)
189 if (vma
&& vma
!= priv
->tail_vma
) {
190 struct mm_struct
*mm
= vma
->vm_mm
;
191 release_task_mempolicy(priv
);
192 up_read(&mm
->mmap_sem
);
197 static void *m_start(struct seq_file
*m
, loff_t
*pos
)
199 struct proc_maps_private
*priv
= m
->private;
200 unsigned long last_addr
= m
->version
;
201 struct mm_struct
*mm
;
202 struct vm_area_struct
*vma
, *tail_vma
= NULL
;
205 /* Clear the per syscall fields in priv */
207 priv
->tail_vma
= NULL
;
210 * We remember last_addr rather than next_addr to hit with
211 * mmap_cache most of the time. We have zero last_addr at
212 * the beginning and also after lseek. We will have -1 last_addr
213 * after the end of the vmas.
216 if (last_addr
== -1UL)
219 priv
->task
= get_pid_task(priv
->pid
, PIDTYPE_PID
);
221 return ERR_PTR(-ESRCH
);
223 mm
= mm_access(priv
->task
, PTRACE_MODE_READ_FSCREDS
);
224 if (!mm
|| IS_ERR(mm
))
226 down_read(&mm
->mmap_sem
);
228 tail_vma
= get_gate_vma(priv
->task
->mm
);
229 priv
->tail_vma
= tail_vma
;
230 hold_task_mempolicy(priv
);
231 /* Start with last addr hint */
232 vma
= find_vma(mm
, last_addr
);
233 if (last_addr
&& vma
) {
239 * Check the vma index is within the range and do
240 * sequential scan until m_index.
243 if ((unsigned long)l
< mm
->map_count
) {
250 if (l
!= mm
->map_count
)
251 tail_vma
= NULL
; /* After gate vma */
257 release_task_mempolicy(priv
);
258 /* End of vmas has been reached */
259 m
->version
= (tail_vma
!= NULL
)? 0: -1UL;
260 up_read(&mm
->mmap_sem
);
265 static void *m_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
267 struct proc_maps_private
*priv
= m
->private;
268 struct vm_area_struct
*vma
= v
;
269 struct vm_area_struct
*tail_vma
= priv
->tail_vma
;
272 if (vma
&& (vma
!= tail_vma
) && vma
->vm_next
)
275 return (vma
!= tail_vma
)? tail_vma
: NULL
;
278 static void m_stop(struct seq_file
*m
, void *v
)
280 struct proc_maps_private
*priv
= m
->private;
281 struct vm_area_struct
*vma
= v
;
286 put_task_struct(priv
->task
);
289 static int do_maps_open(struct inode
*inode
, struct file
*file
,
290 const struct seq_operations
*ops
)
292 struct proc_maps_private
*priv
;
294 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
296 priv
->pid
= proc_pid(inode
);
297 ret
= seq_open(file
, ops
);
299 struct seq_file
*m
= file
->private_data
;
309 show_map_vma(struct seq_file
*m
, struct vm_area_struct
*vma
, int is_pid
)
311 struct mm_struct
*mm
= vma
->vm_mm
;
312 struct file
*file
= vma
->vm_file
;
313 struct proc_maps_private
*priv
= m
->private;
314 struct task_struct
*task
= priv
->task
;
315 vm_flags_t flags
= vma
->vm_flags
;
316 unsigned long ino
= 0;
317 unsigned long long pgoff
= 0;
318 unsigned long start
, end
;
321 const char *name
= NULL
;
324 struct inode
*inode
= file_inode(vma
->vm_file
);
325 dev
= inode
->i_sb
->s_dev
;
327 pgoff
= ((loff_t
)vma
->vm_pgoff
) << PAGE_SHIFT
;
330 /* We don't show the stack guard page in /proc/maps */
331 start
= vma
->vm_start
;
334 seq_printf(m
, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
337 flags
& VM_READ
? 'r' : '-',
338 flags
& VM_WRITE
? 'w' : '-',
339 flags
& VM_EXEC
? 'x' : '-',
340 flags
& VM_MAYSHARE
? 's' : 'p',
342 MAJOR(dev
), MINOR(dev
), ino
, &len
);
345 * Print the dentry name for named mappings, and a
346 * special [heap] marker for the heap:
349 pad_len_spaces(m
, len
);
350 seq_path(m
, &file
->f_path
, "\n");
354 name
= arch_vma_name(vma
);
363 if (vma
->vm_start
<= mm
->brk
&&
364 vma
->vm_end
>= mm
->start_brk
) {
369 tid
= vm_is_stack(task
, vma
, is_pid
);
373 * Thread stack in /proc/PID/task/TID/maps or
374 * the main process stack.
376 if (!is_pid
|| (vma
->vm_start
<= mm
->start_stack
&&
377 vma
->vm_end
>= mm
->start_stack
)) {
380 /* Thread stack in /proc/PID/maps */
381 pad_len_spaces(m
, len
);
382 seq_printf(m
, "[stack:%d]", tid
);
387 if (vma_get_anon_name(vma
)) {
388 pad_len_spaces(m
, len
);
389 seq_print_vma_name(m
, vma
);
395 pad_len_spaces(m
, len
);
401 static int show_map(struct seq_file
*m
, void *v
, int is_pid
)
403 struct vm_area_struct
*vma
= v
;
404 struct proc_maps_private
*priv
= m
->private;
405 struct task_struct
*task
= priv
->task
;
407 show_map_vma(m
, vma
, is_pid
);
409 if (m
->count
< m
->size
) /* vma is copied successfully */
410 m
->version
= (vma
!= get_gate_vma(task
->mm
))
415 static int show_pid_map(struct seq_file
*m
, void *v
)
417 return show_map(m
, v
, 1);
420 static int show_tid_map(struct seq_file
*m
, void *v
)
422 return show_map(m
, v
, 0);
425 static const struct seq_operations proc_pid_maps_op
= {
432 static const struct seq_operations proc_tid_maps_op
= {
439 static int pid_maps_open(struct inode
*inode
, struct file
*file
)
441 return do_maps_open(inode
, file
, &proc_pid_maps_op
);
444 static int tid_maps_open(struct inode
*inode
, struct file
*file
)
446 return do_maps_open(inode
, file
, &proc_tid_maps_op
);
449 const struct file_operations proc_pid_maps_operations
= {
450 .open
= pid_maps_open
,
453 .release
= seq_release_private
,
456 const struct file_operations proc_tid_maps_operations
= {
457 .open
= tid_maps_open
,
460 .release
= seq_release_private
,
464 * Proportional Set Size(PSS): my share of RSS.
466 * PSS of a process is the count of pages it has in memory, where each
467 * page is divided by the number of processes sharing it. So if a
468 * process has 1000 pages all to itself, and 1000 shared with one other
469 * process, its PSS will be 1500.
471 * To keep (accumulated) division errors low, we adopt a 64bit
472 * fixed-point pss counter to minimize division errors. So (pss >>
473 * PSS_SHIFT) would be the real byte count.
475 * A shift of 12 before division means (assuming 4K page size):
476 * - 1M 3-user-pages add up to 8KB errors;
477 * - supports mapcount up to 2^24, or 16M;
478 * - supports PSS up to 2^52 bytes, or 4PB.
482 #ifdef CONFIG_PROC_PAGE_MONITOR
483 struct mem_size_stats
{
484 struct vm_area_struct
*vma
;
485 unsigned long resident
;
486 unsigned long shared_clean
;
487 unsigned long shared_dirty
;
488 unsigned long private_clean
;
489 unsigned long private_dirty
;
490 unsigned long referenced
;
491 unsigned long anonymous
;
492 unsigned long anonymous_thp
;
494 unsigned long nonlinear
;
500 extern struct swap_info_struct
*swap_info_get(swp_entry_t entry
);
501 extern void swap_info_unlock(struct swap_info_struct
*si
);
502 #endif // CONFIG_SWAP
504 static inline unsigned char swap_count(unsigned char ent
)
506 return ent
& ~SWAP_HAS_CACHE
; /* may include SWAP_HAS_CONT flag */
509 static void smaps_pte_entry(pte_t ptent
, unsigned long addr
,
510 unsigned long ptent_size
, struct mm_walk
*walk
)
512 struct mem_size_stats
*mss
= walk
->private;
513 struct vm_area_struct
*vma
= mss
->vma
;
514 pgoff_t pgoff
= linear_page_index(vma
, addr
);
515 struct page
*page
= NULL
;
518 if (pte_present(ptent
)) {
519 page
= vm_normal_page(vma
, addr
, ptent
);
520 } else if (is_swap_pte(ptent
)) {
521 swp_entry_t swpent
= pte_to_swp_entry(ptent
);
523 if (!non_swap_entry(swpent
)) {
526 struct swap_info_struct
*p
;
527 #endif // CONFIG_SWAP
529 mss
->swap
+= ptent_size
;
532 entry
= pte_to_swp_entry(ptent
);
533 if (non_swap_entry(entry
))
535 p
= swap_info_get(entry
);
537 int swapcount
= swap_count(p
->swap_map
[swp_offset(entry
)]);
538 if (swapcount
== 0) {
541 mss
->pswap
+= (ptent_size
<< PSS_SHIFT
) / swapcount
;
544 #endif // CONFIG_SWAP
545 } else if (is_migration_entry(swpent
))
546 page
= migration_entry_to_page(swpent
);
547 } else if (pte_file(ptent
)) {
548 if (pte_to_pgoff(ptent
) != pgoff
)
549 mss
->nonlinear
+= ptent_size
;
556 mss
->anonymous
+= ptent_size
;
558 if (page
->index
!= pgoff
)
559 mss
->nonlinear
+= ptent_size
;
561 mss
->resident
+= ptent_size
;
562 /* Accumulate the size in pages that have been accessed. */
563 if (pte_young(ptent
) || PageReferenced(page
))
564 mss
->referenced
+= ptent_size
;
565 mapcount
= page_mapcount(page
);
567 if (pte_dirty(ptent
) || PageDirty(page
))
568 mss
->shared_dirty
+= ptent_size
;
570 mss
->shared_clean
+= ptent_size
;
571 mss
->pss
+= (ptent_size
<< PSS_SHIFT
) / mapcount
;
573 if (pte_dirty(ptent
) || PageDirty(page
))
574 mss
->private_dirty
+= ptent_size
;
576 mss
->private_clean
+= ptent_size
;
577 mss
->pss
+= (ptent_size
<< PSS_SHIFT
);
581 static int smaps_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
582 struct mm_walk
*walk
)
584 struct mem_size_stats
*mss
= walk
->private;
585 struct vm_area_struct
*vma
= mss
->vma
;
589 if (pmd_trans_huge_lock(pmd
, vma
) == 1) {
590 smaps_pte_entry(*(pte_t
*)pmd
, addr
, HPAGE_PMD_SIZE
, walk
);
591 spin_unlock(&walk
->mm
->page_table_lock
);
592 mss
->anonymous_thp
+= HPAGE_PMD_SIZE
;
596 if (pmd_trans_unstable(pmd
))
599 * The mmap_sem held all the way back in m_start() is what
600 * keeps khugepaged out of here and from collapsing things
603 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
604 for (; addr
!= end
; pte
++, addr
+= PAGE_SIZE
)
605 smaps_pte_entry(*pte
, addr
, PAGE_SIZE
, walk
);
606 pte_unmap_unlock(pte
- 1, ptl
);
611 static void show_smap_vma_flags(struct seq_file
*m
, struct vm_area_struct
*vma
)
614 * Don't forget to update Documentation/ on changes.
616 static const char mnemonics
[BITS_PER_LONG
][2] = {
618 * In case if we meet a flag we don't know about.
620 [0 ... (BITS_PER_LONG
-1)] = "??",
622 [ilog2(VM_READ
)] = "rd",
623 [ilog2(VM_WRITE
)] = "wr",
624 [ilog2(VM_EXEC
)] = "ex",
625 [ilog2(VM_SHARED
)] = "sh",
626 [ilog2(VM_MAYREAD
)] = "mr",
627 [ilog2(VM_MAYWRITE
)] = "mw",
628 [ilog2(VM_MAYEXEC
)] = "me",
629 [ilog2(VM_MAYSHARE
)] = "ms",
630 [ilog2(VM_GROWSDOWN
)] = "gd",
631 [ilog2(VM_PFNMAP
)] = "pf",
632 [ilog2(VM_DENYWRITE
)] = "dw",
633 [ilog2(VM_LOCKED
)] = "lo",
634 [ilog2(VM_IO
)] = "io",
635 [ilog2(VM_SEQ_READ
)] = "sr",
636 [ilog2(VM_RAND_READ
)] = "rr",
637 [ilog2(VM_DONTCOPY
)] = "dc",
638 [ilog2(VM_DONTEXPAND
)] = "de",
639 [ilog2(VM_ACCOUNT
)] = "ac",
640 [ilog2(VM_NORESERVE
)] = "nr",
641 [ilog2(VM_HUGETLB
)] = "ht",
642 [ilog2(VM_NONLINEAR
)] = "nl",
643 [ilog2(VM_ARCH_1
)] = "ar",
644 [ilog2(VM_DONTDUMP
)] = "dd",
645 [ilog2(VM_MIXEDMAP
)] = "mm",
646 [ilog2(VM_HUGEPAGE
)] = "hg",
647 [ilog2(VM_NOHUGEPAGE
)] = "nh",
648 [ilog2(VM_MERGEABLE
)] = "mg",
652 seq_puts(m
, "VmFlags: ");
653 for (i
= 0; i
< BITS_PER_LONG
; i
++) {
654 if (vma
->vm_flags
& (1UL << i
)) {
655 seq_printf(m
, "%c%c ",
656 mnemonics
[i
][0], mnemonics
[i
][1]);
662 static int show_smap(struct seq_file
*m
, void *v
, int is_pid
)
664 struct proc_maps_private
*priv
= m
->private;
665 struct task_struct
*task
= priv
->task
;
666 struct vm_area_struct
*vma
= v
;
667 struct mem_size_stats mss
;
668 struct mm_walk smaps_walk
= {
669 .pmd_entry
= smaps_pte_range
,
674 memset(&mss
, 0, sizeof mss
);
676 /* mmap_sem is held in m_start */
677 if (vma
->vm_mm
&& !is_vm_hugetlb_page(vma
))
678 walk_page_range(vma
->vm_start
, vma
->vm_end
, &smaps_walk
);
680 show_map_vma(m
, vma
, is_pid
);
686 "Shared_Clean: %8lu kB\n"
687 "Shared_Dirty: %8lu kB\n"
688 "Private_Clean: %8lu kB\n"
689 "Private_Dirty: %8lu kB\n"
690 "Referenced: %8lu kB\n"
691 "Anonymous: %8lu kB\n"
692 "AnonHugePages: %8lu kB\n"
695 "KernelPageSize: %8lu kB\n"
696 "MMUPageSize: %8lu kB\n"
698 (vma
->vm_end
- vma
->vm_start
) >> 10,
700 (unsigned long)(mss
.pss
>> (10 + PSS_SHIFT
)),
701 mss
.shared_clean
>> 10,
702 mss
.shared_dirty
>> 10,
703 mss
.private_clean
>> 10,
704 mss
.private_dirty
>> 10,
705 mss
.referenced
>> 10,
707 mss
.anonymous_thp
>> 10,
709 (unsigned long)(mss
.pswap
>> (10 + PSS_SHIFT
)),
710 vma_kernel_pagesize(vma
) >> 10,
711 vma_mmu_pagesize(vma
) >> 10,
712 (vma
->vm_flags
& VM_LOCKED
) ?
713 (unsigned long)(mss
.pss
>> (10 + PSS_SHIFT
)) : 0);
715 if (vma
->vm_flags
& VM_NONLINEAR
)
716 seq_printf(m
, "Nonlinear: %8lu kB\n",
717 mss
.nonlinear
>> 10);
719 show_smap_vma_flags(m
, vma
);
721 if (vma_get_anon_name(vma
)) {
722 seq_puts(m
, "Name: ");
723 seq_print_vma_name(m
, vma
);
727 if (m
->count
< m
->size
) /* vma is copied successfully */
728 m
->version
= (vma
!= get_gate_vma(task
->mm
))
733 static int show_pid_smap(struct seq_file
*m
, void *v
)
735 return show_smap(m
, v
, 1);
738 static int show_tid_smap(struct seq_file
*m
, void *v
)
740 return show_smap(m
, v
, 0);
743 static const struct seq_operations proc_pid_smaps_op
= {
747 .show
= show_pid_smap
750 static const struct seq_operations proc_tid_smaps_op
= {
754 .show
= show_tid_smap
757 static int pid_smaps_open(struct inode
*inode
, struct file
*file
)
759 return do_maps_open(inode
, file
, &proc_pid_smaps_op
);
762 static int tid_smaps_open(struct inode
*inode
, struct file
*file
)
764 return do_maps_open(inode
, file
, &proc_tid_smaps_op
);
767 const struct file_operations proc_pid_smaps_operations
= {
768 .open
= pid_smaps_open
,
771 .release
= seq_release_private
,
774 const struct file_operations proc_tid_smaps_operations
= {
775 .open
= tid_smaps_open
,
778 .release
= seq_release_private
,
781 static int clear_refs_pte_range(pmd_t
*pmd
, unsigned long addr
,
782 unsigned long end
, struct mm_walk
*walk
)
784 struct vm_area_struct
*vma
= walk
->private;
789 split_huge_page_pmd(vma
, addr
, pmd
);
790 if (pmd_trans_unstable(pmd
))
793 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
794 for (; addr
!= end
; pte
++, addr
+= PAGE_SIZE
) {
796 if (!pte_present(ptent
))
799 page
= vm_normal_page(vma
, addr
, ptent
);
803 /* Clear accessed and referenced bits. */
804 ptep_test_and_clear_young(vma
, addr
, pte
);
805 ClearPageReferenced(page
);
807 pte_unmap_unlock(pte
- 1, ptl
);
812 #define CLEAR_REFS_ALL 1
813 #define CLEAR_REFS_ANON 2
814 #define CLEAR_REFS_MAPPED 3
816 static ssize_t
clear_refs_write(struct file
*file
, const char __user
*buf
,
817 size_t count
, loff_t
*ppos
)
819 struct task_struct
*task
;
820 char buffer
[PROC_NUMBUF
];
821 struct mm_struct
*mm
;
822 struct vm_area_struct
*vma
;
826 memset(buffer
, 0, sizeof(buffer
));
827 if (count
> sizeof(buffer
) - 1)
828 count
= sizeof(buffer
) - 1;
829 if (copy_from_user(buffer
, buf
, count
))
831 rv
= kstrtoint(strstrip(buffer
), 10, &type
);
834 if (type
< CLEAR_REFS_ALL
|| type
> CLEAR_REFS_MAPPED
)
836 task
= get_proc_task(file_inode(file
));
839 mm
= get_task_mm(task
);
841 struct mm_walk clear_refs_walk
= {
842 .pmd_entry
= clear_refs_pte_range
,
845 down_read(&mm
->mmap_sem
);
846 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
847 clear_refs_walk
.private = vma
;
848 if (is_vm_hugetlb_page(vma
))
851 * Writing 1 to /proc/pid/clear_refs affects all pages.
853 * Writing 2 to /proc/pid/clear_refs only affects
856 * Writing 3 to /proc/pid/clear_refs only affects file
859 if (type
== CLEAR_REFS_ANON
&& vma
->vm_file
)
861 if (type
== CLEAR_REFS_MAPPED
&& !vma
->vm_file
)
863 walk_page_range(vma
->vm_start
, vma
->vm_end
,
867 up_read(&mm
->mmap_sem
);
870 put_task_struct(task
);
875 const struct file_operations proc_clear_refs_operations
= {
876 .write
= clear_refs_write
,
877 .llseek
= noop_llseek
,
885 int pos
, len
; /* units: PM_ENTRY_BYTES, not bytes */
886 pagemap_entry_t
*buffer
;
889 #define PAGEMAP_WALK_SIZE (PMD_SIZE)
890 #define PAGEMAP_WALK_MASK (PMD_MASK)
892 #define PM_ENTRY_BYTES sizeof(pagemap_entry_t)
893 #define PM_STATUS_BITS 3
894 #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS)
895 #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
896 #define PM_STATUS(nr) (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
897 #define PM_PSHIFT_BITS 6
898 #define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
899 #define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
900 #define PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
901 #define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1)
902 #define PM_PFRAME(x) ((x) & PM_PFRAME_MASK)
904 #define PM_PRESENT PM_STATUS(4LL)
905 #define PM_SWAP PM_STATUS(2LL)
906 #define PM_FILE PM_STATUS(1LL)
907 #define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT)
908 #define PM_END_OF_BUFFER 1
910 static inline pagemap_entry_t
make_pme(u64 val
)
912 return (pagemap_entry_t
) { .pme
= val
};
915 static int add_to_pagemap(unsigned long addr
, pagemap_entry_t
*pme
,
916 struct pagemapread
*pm
)
918 pm
->buffer
[pm
->pos
++] = *pme
;
919 if (pm
->pos
>= pm
->len
)
920 return PM_END_OF_BUFFER
;
924 static int pagemap_pte_hole(unsigned long start
, unsigned long end
,
925 struct mm_walk
*walk
)
927 struct pagemapread
*pm
= walk
->private;
930 pagemap_entry_t pme
= make_pme(PM_NOT_PRESENT
);
932 for (addr
= start
; addr
< end
; addr
+= PAGE_SIZE
) {
933 err
= add_to_pagemap(addr
, &pme
, pm
);
940 static void pte_to_pagemap_entry(pagemap_entry_t
*pme
,
941 struct vm_area_struct
*vma
, unsigned long addr
, pte_t pte
)
944 struct page
*page
= NULL
;
946 if (pte_present(pte
)) {
947 frame
= pte_pfn(pte
);
949 page
= vm_normal_page(vma
, addr
, pte
);
950 } else if (is_swap_pte(pte
)) {
951 swp_entry_t entry
= pte_to_swp_entry(pte
);
953 frame
= swp_type(entry
) |
954 (swp_offset(entry
) << MAX_SWAPFILES_SHIFT
);
956 if (is_migration_entry(entry
))
957 page
= migration_entry_to_page(entry
);
959 *pme
= make_pme(PM_NOT_PRESENT
);
963 if (page
&& !PageAnon(page
))
966 *pme
= make_pme(PM_PFRAME(frame
) | PM_PSHIFT(PAGE_SHIFT
) | flags
);
969 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
970 static void thp_pmd_to_pagemap_entry(pagemap_entry_t
*pme
,
971 pmd_t pmd
, int offset
)
974 * Currently pmd for thp is always present because thp can not be
975 * swapped-out, migrated, or HWPOISONed (split in such cases instead.)
976 * This if-check is just to prepare for future implementation.
978 if (pmd_present(pmd
))
979 *pme
= make_pme(PM_PFRAME(pmd_pfn(pmd
) + offset
)
980 | PM_PSHIFT(PAGE_SHIFT
) | PM_PRESENT
);
982 *pme
= make_pme(PM_NOT_PRESENT
);
985 static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t
*pme
,
986 pmd_t pmd
, int offset
)
991 static int pagemap_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
992 struct mm_walk
*walk
)
994 struct vm_area_struct
*vma
;
995 struct pagemapread
*pm
= walk
->private;
998 pagemap_entry_t pme
= make_pme(PM_NOT_PRESENT
);
1000 /* find the first VMA at or above 'addr' */
1001 vma
= find_vma(walk
->mm
, addr
);
1002 if (vma
&& pmd_trans_huge_lock(pmd
, vma
) == 1) {
1003 for (; addr
!= end
; addr
+= PAGE_SIZE
) {
1004 unsigned long offset
;
1006 offset
= (addr
& ~PAGEMAP_WALK_MASK
) >>
1008 thp_pmd_to_pagemap_entry(&pme
, *pmd
, offset
);
1009 err
= add_to_pagemap(addr
, &pme
, pm
);
1013 spin_unlock(&walk
->mm
->page_table_lock
);
1017 if (pmd_trans_unstable(pmd
))
1019 for (; addr
!= end
; addr
+= PAGE_SIZE
) {
1021 /* check to see if we've left 'vma' behind
1022 * and need a new, higher one */
1023 if (vma
&& (addr
>= vma
->vm_end
)) {
1024 vma
= find_vma(walk
->mm
, addr
);
1025 pme
= make_pme(PM_NOT_PRESENT
);
1028 /* check that 'vma' actually covers this address,
1029 * and that it isn't a huge page vma */
1030 if (vma
&& (vma
->vm_start
<= addr
) &&
1031 !is_vm_hugetlb_page(vma
)) {
1032 pte
= pte_offset_map(pmd
, addr
);
1033 pte_to_pagemap_entry(&pme
, vma
, addr
, *pte
);
1034 /* unmap before userspace copy */
1037 err
= add_to_pagemap(addr
, &pme
, pm
);
1047 #ifdef CONFIG_HUGETLB_PAGE
1048 static void huge_pte_to_pagemap_entry(pagemap_entry_t
*pme
,
1049 pte_t pte
, int offset
)
1051 if (pte_present(pte
))
1052 *pme
= make_pme(PM_PFRAME(pte_pfn(pte
) + offset
)
1053 | PM_PSHIFT(PAGE_SHIFT
) | PM_PRESENT
);
1055 *pme
= make_pme(PM_NOT_PRESENT
);
1058 /* This function walks within one hugetlb entry in the single call */
1059 static int pagemap_hugetlb_range(pte_t
*pte
, unsigned long hmask
,
1060 unsigned long addr
, unsigned long end
,
1061 struct mm_walk
*walk
)
1063 struct pagemapread
*pm
= walk
->private;
1065 pagemap_entry_t pme
;
1067 for (; addr
!= end
; addr
+= PAGE_SIZE
) {
1068 int offset
= (addr
& ~hmask
) >> PAGE_SHIFT
;
1069 huge_pte_to_pagemap_entry(&pme
, *pte
, offset
);
1070 err
= add_to_pagemap(addr
, &pme
, pm
);
1079 #endif /* HUGETLB_PAGE */
1082 * /proc/pid/pagemap - an array mapping virtual pages to pfns
1084 * For each page in the address space, this file contains one 64-bit entry
1085 * consisting of the following:
1087 * Bits 0-54 page frame number (PFN) if present
1088 * Bits 0-4 swap type if swapped
1089 * Bits 5-54 swap offset if swapped
1090 * Bits 55-60 page shift (page size = 1<<page shift)
1091 * Bit 61 page is file-page or shared-anon
1092 * Bit 62 page swapped
1093 * Bit 63 page present
1095 * If the page is not present but in swap, then the PFN contains an
1096 * encoding of the swap file number and the page's offset into the
1097 * swap. Unmapped pages return a null PFN. This allows determining
1098 * precisely which pages are mapped (or in swap) and comparing mapped
1099 * pages between processes.
1101 * Efficient users of this interface will use /proc/pid/maps to
1102 * determine which areas of memory are actually mapped and llseek to
1103 * skip over unmapped regions.
1105 static ssize_t
pagemap_read(struct file
*file
, char __user
*buf
,
1106 size_t count
, loff_t
*ppos
)
1108 struct task_struct
*task
= get_proc_task(file_inode(file
));
1109 struct mm_struct
*mm
;
1110 struct pagemapread pm
;
1112 struct mm_walk pagemap_walk
= {};
1114 unsigned long svpfn
;
1115 unsigned long start_vaddr
;
1116 unsigned long end_vaddr
;
1123 /* file position must be aligned */
1124 if ((*ppos
% PM_ENTRY_BYTES
) || (count
% PM_ENTRY_BYTES
))
1131 pm
.len
= (PAGEMAP_WALK_SIZE
>> PAGE_SHIFT
);
1132 pm
.buffer
= kmalloc(pm
.len
* PM_ENTRY_BYTES
, GFP_TEMPORARY
);
1137 mm
= mm_access(task
, PTRACE_MODE_READ_FSCREDS
);
1139 if (!mm
|| IS_ERR(mm
))
1142 pagemap_walk
.pmd_entry
= pagemap_pte_range
;
1143 pagemap_walk
.pte_hole
= pagemap_pte_hole
;
1144 #ifdef CONFIG_HUGETLB_PAGE
1145 pagemap_walk
.hugetlb_entry
= pagemap_hugetlb_range
;
1147 pagemap_walk
.mm
= mm
;
1148 pagemap_walk
.private = &pm
;
1151 svpfn
= src
/ PM_ENTRY_BYTES
;
1152 start_vaddr
= svpfn
<< PAGE_SHIFT
;
1153 end_vaddr
= TASK_SIZE_OF(task
);
1155 /* watch out for wraparound */
1156 if (svpfn
> TASK_SIZE_OF(task
) >> PAGE_SHIFT
)
1157 start_vaddr
= end_vaddr
;
1160 * The odds are that this will stop walking way
1161 * before end_vaddr, because the length of the
1162 * user buffer is tracked in "pm", and the walk
1163 * will stop when we hit the end of the buffer.
1166 while (count
&& (start_vaddr
< end_vaddr
)) {
1171 end
= (start_vaddr
+ PAGEMAP_WALK_SIZE
) & PAGEMAP_WALK_MASK
;
1173 if (end
< start_vaddr
|| end
> end_vaddr
)
1175 down_read(&mm
->mmap_sem
);
1176 ret
= walk_page_range(start_vaddr
, end
, &pagemap_walk
);
1177 up_read(&mm
->mmap_sem
);
1180 len
= min(count
, PM_ENTRY_BYTES
* pm
.pos
);
1181 if (copy_to_user(buf
, pm
.buffer
, len
)) {
1190 if (!ret
|| ret
== PM_END_OF_BUFFER
)
1198 put_task_struct(task
);
1203 static int pagemap_open(struct inode
*inode
, struct file
*file
)
1205 /* do not disclose physical addresses to unprivileged
1206 userspace (closes a rowhammer attack vector) */
1207 if (!capable(CAP_SYS_ADMIN
))
1212 const struct file_operations proc_pagemap_operations
= {
1213 .llseek
= mem_lseek
, /* borrow this */
1214 .read
= pagemap_read
,
1215 .open
= pagemap_open
,
1217 #endif /* CONFIG_PROC_PAGE_MONITOR */
1222 struct vm_area_struct
*vma
;
1223 unsigned long pages
;
1225 unsigned long active
;
1226 unsigned long writeback
;
1227 unsigned long mapcount_max
;
1228 unsigned long dirty
;
1229 unsigned long swapcache
;
1230 unsigned long node
[MAX_NUMNODES
];
1233 struct numa_maps_private
{
1234 struct proc_maps_private proc_maps
;
1235 struct numa_maps md
;
1238 static void gather_stats(struct page
*page
, struct numa_maps
*md
, int pte_dirty
,
1239 unsigned long nr_pages
)
1241 int count
= page_mapcount(page
);
1243 md
->pages
+= nr_pages
;
1244 if (pte_dirty
|| PageDirty(page
))
1245 md
->dirty
+= nr_pages
;
1247 if (PageSwapCache(page
))
1248 md
->swapcache
+= nr_pages
;
1250 if (PageActive(page
) || PageUnevictable(page
))
1251 md
->active
+= nr_pages
;
1253 if (PageWriteback(page
))
1254 md
->writeback
+= nr_pages
;
1257 md
->anon
+= nr_pages
;
1259 if (count
> md
->mapcount_max
)
1260 md
->mapcount_max
= count
;
1262 md
->node
[page_to_nid(page
)] += nr_pages
;
1265 static struct page
*can_gather_numa_stats(pte_t pte
, struct vm_area_struct
*vma
,
1271 if (!pte_present(pte
))
1274 page
= vm_normal_page(vma
, addr
, pte
);
1278 if (PageReserved(page
))
1281 nid
= page_to_nid(page
);
1282 if (!node_isset(nid
, node_states
[N_MEMORY
]))
1288 static int gather_pte_stats(pmd_t
*pmd
, unsigned long addr
,
1289 unsigned long end
, struct mm_walk
*walk
)
1291 struct numa_maps
*md
;
1298 if (pmd_trans_huge_lock(pmd
, md
->vma
) == 1) {
1299 pte_t huge_pte
= *(pte_t
*)pmd
;
1302 page
= can_gather_numa_stats(huge_pte
, md
->vma
, addr
);
1304 gather_stats(page
, md
, pte_dirty(huge_pte
),
1305 HPAGE_PMD_SIZE
/PAGE_SIZE
);
1306 spin_unlock(&walk
->mm
->page_table_lock
);
1310 if (pmd_trans_unstable(pmd
))
1312 orig_pte
= pte
= pte_offset_map_lock(walk
->mm
, pmd
, addr
, &ptl
);
1314 struct page
*page
= can_gather_numa_stats(*pte
, md
->vma
, addr
);
1317 gather_stats(page
, md
, pte_dirty(*pte
), 1);
1319 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
1320 pte_unmap_unlock(orig_pte
, ptl
);
1323 #ifdef CONFIG_HUGETLB_PAGE
1324 static int gather_hugetbl_stats(pte_t
*pte
, unsigned long hmask
,
1325 unsigned long addr
, unsigned long end
, struct mm_walk
*walk
)
1327 struct numa_maps
*md
;
1333 page
= pte_page(*pte
);
1338 gather_stats(page
, md
, pte_dirty(*pte
), 1);
1343 static int gather_hugetbl_stats(pte_t
*pte
, unsigned long hmask
,
1344 unsigned long addr
, unsigned long end
, struct mm_walk
*walk
)
1351 * Display pages allocated per node and memory policy via /proc.
1353 static int show_numa_map(struct seq_file
*m
, void *v
, int is_pid
)
1355 struct numa_maps_private
*numa_priv
= m
->private;
1356 struct proc_maps_private
*proc_priv
= &numa_priv
->proc_maps
;
1357 struct vm_area_struct
*vma
= v
;
1358 struct numa_maps
*md
= &numa_priv
->md
;
1359 struct file
*file
= vma
->vm_file
;
1360 struct task_struct
*task
= proc_priv
->task
;
1361 struct mm_struct
*mm
= vma
->vm_mm
;
1362 struct mm_walk walk
= {};
1363 struct mempolicy
*pol
;
1370 /* Ensure we start with an empty set of numa_maps statistics. */
1371 memset(md
, 0, sizeof(*md
));
1375 walk
.hugetlb_entry
= gather_hugetbl_stats
;
1376 walk
.pmd_entry
= gather_pte_stats
;
1380 pol
= get_vma_policy(task
, vma
, vma
->vm_start
);
1381 mpol_to_str(buffer
, sizeof(buffer
), pol
);
1384 seq_printf(m
, "%08lx %s", vma
->vm_start
, buffer
);
1387 seq_printf(m
, " file=");
1388 seq_path(m
, &file
->f_path
, "\n\t= ");
1389 } else if (vma
->vm_start
<= mm
->brk
&& vma
->vm_end
>= mm
->start_brk
) {
1390 seq_printf(m
, " heap");
1392 pid_t tid
= vm_is_stack(task
, vma
, is_pid
);
1395 * Thread stack in /proc/PID/task/TID/maps or
1396 * the main process stack.
1398 if (!is_pid
|| (vma
->vm_start
<= mm
->start_stack
&&
1399 vma
->vm_end
>= mm
->start_stack
))
1400 seq_printf(m
, " stack");
1402 seq_printf(m
, " stack:%d", tid
);
1406 if (is_vm_hugetlb_page(vma
))
1407 seq_printf(m
, " huge");
1409 walk_page_range(vma
->vm_start
, vma
->vm_end
, &walk
);
1415 seq_printf(m
, " anon=%lu", md
->anon
);
1418 seq_printf(m
, " dirty=%lu", md
->dirty
);
1420 if (md
->pages
!= md
->anon
&& md
->pages
!= md
->dirty
)
1421 seq_printf(m
, " mapped=%lu", md
->pages
);
1423 if (md
->mapcount_max
> 1)
1424 seq_printf(m
, " mapmax=%lu", md
->mapcount_max
);
1427 seq_printf(m
, " swapcache=%lu", md
->swapcache
);
1429 if (md
->active
< md
->pages
&& !is_vm_hugetlb_page(vma
))
1430 seq_printf(m
, " active=%lu", md
->active
);
1433 seq_printf(m
, " writeback=%lu", md
->writeback
);
1435 for_each_node_state(n
, N_MEMORY
)
1437 seq_printf(m
, " N%d=%lu", n
, md
->node
[n
]);
1441 if (m
->count
< m
->size
)
1442 m
->version
= (vma
!= proc_priv
->tail_vma
) ? vma
->vm_start
: 0;
1446 static int show_pid_numa_map(struct seq_file
*m
, void *v
)
1448 return show_numa_map(m
, v
, 1);
1451 static int show_tid_numa_map(struct seq_file
*m
, void *v
)
1453 return show_numa_map(m
, v
, 0);
1456 static const struct seq_operations proc_pid_numa_maps_op
= {
1460 .show
= show_pid_numa_map
,
1463 static const struct seq_operations proc_tid_numa_maps_op
= {
1467 .show
= show_tid_numa_map
,
1470 static int numa_maps_open(struct inode
*inode
, struct file
*file
,
1471 const struct seq_operations
*ops
)
1473 struct numa_maps_private
*priv
;
1475 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
1477 priv
->proc_maps
.pid
= proc_pid(inode
);
1478 ret
= seq_open(file
, ops
);
1480 struct seq_file
*m
= file
->private_data
;
1489 static int pid_numa_maps_open(struct inode
*inode
, struct file
*file
)
1491 return numa_maps_open(inode
, file
, &proc_pid_numa_maps_op
);
1494 static int tid_numa_maps_open(struct inode
*inode
, struct file
*file
)
1496 return numa_maps_open(inode
, file
, &proc_tid_numa_maps_op
);
1499 const struct file_operations proc_pid_numa_maps_operations
= {
1500 .open
= pid_numa_maps_open
,
1502 .llseek
= seq_lseek
,
1503 .release
= seq_release_private
,
1506 const struct file_operations proc_tid_numa_maps_operations
= {
1507 .open
= tid_numa_maps_open
,
1509 .llseek
= seq_lseek
,
1510 .release
= seq_release_private
,
1512 #endif /* CONFIG_NUMA */