Commit | Line | Data |
---|---|---|
f8af4da3 HD |
1 | #ifndef __LINUX_KSM_H |
2 | #define __LINUX_KSM_H | |
3 | /* | |
4 | * Memory merging support. | |
5 | * | |
6 | * This code enables dynamic sharing of identical pages found in different | |
7 | * memory areas, even if they are not shared by fork(). | |
8 | */ | |
9 | ||
10 | #include <linux/bitops.h> | |
11 | #include <linux/mm.h> | |
12 | #include <linux/sched.h> | |
9a840895 | 13 | #include <linux/vmstat.h> |
f8af4da3 HD |
14 | |
15 | #ifdef CONFIG_KSM | |
16 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, | |
17 | unsigned long end, int advice, unsigned long *vm_flags); | |
18 | int __ksm_enter(struct mm_struct *mm); | |
1c2fb7a4 | 19 | void __ksm_exit(struct mm_struct *mm); |
f8af4da3 HD |
20 | |
21 | static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) | |
22 | { | |
23 | if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) | |
24 | return __ksm_enter(mm); | |
25 | return 0; | |
26 | } | |
27 | ||
1c2fb7a4 | 28 | static inline void ksm_exit(struct mm_struct *mm) |
f8af4da3 HD |
29 | { |
30 | if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) | |
1c2fb7a4 | 31 | __ksm_exit(mm); |
f8af4da3 | 32 | } |
9a840895 HD |
33 | |
34 | /* | |
35 | * A KSM page is one of those write-protected "shared pages" or "merged pages" | |
36 | * which KSM maps into multiple mms, wherever identical anonymous page content | |
37 | * is found in VM_MERGEABLE vmas. It's a PageAnon page, with NULL anon_vma. | |
38 | */ | |
39 | static inline int PageKsm(struct page *page) | |
40 | { | |
3ca7b3c5 HD |
41 | return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == |
42 | (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); | |
9a840895 HD |
43 | } |
44 | ||
45 | /* | |
46 | * But we have to avoid the checking which page_add_anon_rmap() performs. | |
47 | */ | |
48 | static inline void page_add_ksm_rmap(struct page *page) | |
49 | { | |
50 | if (atomic_inc_and_test(&page->_mapcount)) { | |
3ca7b3c5 | 51 | page->mapping = (void *) (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); |
9a840895 HD |
52 | __inc_zone_page_state(page, NR_ANON_PAGES); |
53 | } | |
54 | } | |
f8af4da3 HD |
55 | #else /* !CONFIG_KSM */ |
56 | ||
57 | static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, | |
58 | unsigned long end, int advice, unsigned long *vm_flags) | |
59 | { | |
60 | return 0; | |
61 | } | |
62 | ||
63 | static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) | |
64 | { | |
65 | return 0; | |
66 | } | |
67 | ||
1c2fb7a4 | 68 | static inline void ksm_exit(struct mm_struct *mm) |
f8af4da3 HD |
69 | { |
70 | } | |
9a840895 HD |
71 | |
72 | static inline int PageKsm(struct page *page) | |
73 | { | |
74 | return 0; | |
75 | } | |
76 | ||
77 | /* No stub required for page_add_ksm_rmap(page) */ | |
f8af4da3 HD |
78 | #endif /* !CONFIG_KSM */ |
79 | ||
80 | #endif |