Merge tag 'v3.10.68' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / mm / pgtable-generic.c
CommitLineData
e2cda322
AA
1/*
2 * mm/pgtable-generic.c
3 *
4 * Generic pgtable methods declared in asm-generic/pgtable.h
5 *
6 * Copyright (C) 2010 Linus Torvalds
7 */
8
f95ba941 9#include <linux/pagemap.h>
e2cda322
AA
10#include <asm/tlb.h>
11#include <asm-generic/pgtable.h>
12
13#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
14/*
cef23d9d
RR
15 * Only sets the access flags (dirty, accessed), as well as write
16 * permission. Furthermore, we know it always gets set to a "more
e2cda322
AA
17 * permissive" setting, which allows most architectures to optimize
18 * this. We return whether the PTE actually changed, which in turn
19 * instructs the caller to do things like update__mmu_cache. This
20 * used to be done in the caller, but sparc needs minor faults to
21 * force that call on sun4c so we changed this macro slightly
22 */
23int ptep_set_access_flags(struct vm_area_struct *vma,
24 unsigned long address, pte_t *ptep,
25 pte_t entry, int dirty)
26{
27 int changed = !pte_same(*ptep, entry);
28 if (changed) {
29 set_pte_at(vma->vm_mm, address, ptep, entry);
cef23d9d 30 flush_tlb_fix_spurious_fault(vma, address);
e2cda322
AA
31 }
32 return changed;
33}
34#endif
35
36#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
37int pmdp_set_access_flags(struct vm_area_struct *vma,
38 unsigned long address, pmd_t *pmdp,
39 pmd_t entry, int dirty)
40{
41#ifdef CONFIG_TRANSPARENT_HUGEPAGE
42 int changed = !pmd_same(*pmdp, entry);
43 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
44 if (changed) {
45 set_pmd_at(vma->vm_mm, address, pmdp, entry);
46 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
47 }
48 return changed;
49#else /* CONFIG_TRANSPARENT_HUGEPAGE */
50 BUG();
51 return 0;
52#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
53}
54#endif
55
56#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
57int ptep_clear_flush_young(struct vm_area_struct *vma,
58 unsigned long address, pte_t *ptep)
59{
60 int young;
61 young = ptep_test_and_clear_young(vma, address, ptep);
62 if (young)
63 flush_tlb_page(vma, address);
64 return young;
65}
66#endif
67
68#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
69int pmdp_clear_flush_young(struct vm_area_struct *vma,
70 unsigned long address, pmd_t *pmdp)
71{
72 int young;
d8c37c48
NH
73#ifdef CONFIG_TRANSPARENT_HUGEPAGE
74 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
75#else
e2cda322
AA
76 BUG();
77#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
e2cda322
AA
78 young = pmdp_test_and_clear_young(vma, address, pmdp);
79 if (young)
80 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
81 return young;
82}
83#endif
84
85#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
86pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
87 pte_t *ptep)
88{
d303cf46 89 struct mm_struct *mm = (vma)->vm_mm;
e2cda322 90 pte_t pte;
d303cf46
RR
91 pte = ptep_get_and_clear(mm, address, ptep);
92 if (pte_accessible(mm, pte))
8d1acce4 93 flush_tlb_page(vma, address);
e2cda322
AA
94 return pte;
95}
96#endif
97
98#ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
b3697c02 99#ifdef CONFIG_TRANSPARENT_HUGEPAGE
e2cda322
AA
100pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address,
101 pmd_t *pmdp)
102{
103 pmd_t pmd;
e2cda322
AA
104 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
105 pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp);
106 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
107 return pmd;
108}
b3697c02 109#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
e2cda322
AA
110#endif
111
112#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
b3697c02 113#ifdef CONFIG_TRANSPARENT_HUGEPAGE
73636b1a
CM
114void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
115 pmd_t *pmdp)
e2cda322 116{
e2cda322
AA
117 pmd_t pmd = pmd_mksplitting(*pmdp);
118 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
119 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
120 /* tlb flush only to serialize against gup-fast */
121 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
e2cda322 122}
b3697c02 123#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
e2cda322 124#endif
e3ebcf64
GS
125
126#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
127#ifdef CONFIG_TRANSPARENT_HUGEPAGE
128void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable)
129{
130 assert_spin_locked(&mm->page_table_lock);
131
132 /* FIFO */
133 if (!mm->pmd_huge_pte)
134 INIT_LIST_HEAD(&pgtable->lru);
135 else
136 list_add(&pgtable->lru, &mm->pmd_huge_pte->lru);
137 mm->pmd_huge_pte = pgtable;
138}
139#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
140#endif
141
142#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
143#ifdef CONFIG_TRANSPARENT_HUGEPAGE
144/* no "address" argument so destroys page coloring of some arch */
145pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm)
146{
147 pgtable_t pgtable;
148
149 assert_spin_locked(&mm->page_table_lock);
150
151 /* FIFO */
152 pgtable = mm->pmd_huge_pte;
153 if (list_empty(&pgtable->lru))
154 mm->pmd_huge_pte = NULL;
155 else {
156 mm->pmd_huge_pte = list_entry(pgtable->lru.next,
157 struct page, lru);
158 list_del(&pgtable->lru);
159 }
160 return pgtable;
161}
162#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
163#endif
46dcde73
GS
164
165#ifndef __HAVE_ARCH_PMDP_INVALIDATE
166#ifdef CONFIG_TRANSPARENT_HUGEPAGE
167void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
168 pmd_t *pmdp)
169{
0b92137e
MG
170 pmd_t entry = *pmdp;
171 if (pmd_numa(entry))
172 entry = pmd_mknonnuma(entry);
46dcde73
GS
173 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp));
174 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
175}
176#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
177#endif