include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / power / hibernate_32.c
1 /*
2 * Hibernation support specific for i386 - temporary page tables
3 *
4 * Distribute under GPLv2
5 *
6 * Copyright (c) 2006 Rafael J. Wysocki <rjw@sisk.pl>
7 */
8
9 #include <linux/gfp.h>
10 #include <linux/suspend.h>
11 #include <linux/bootmem.h>
12
13 #include <asm/system.h>
14 #include <asm/page.h>
15 #include <asm/pgtable.h>
16 #include <asm/mmzone.h>
17
18 /* Defined in hibernate_asm_32.S */
19 extern int restore_image(void);
20
21 /* References to section boundaries */
22 extern const void __nosave_begin, __nosave_end;
23
24 /* Pointer to the temporary resume page tables */
25 pgd_t *resume_pg_dir;
26
27 /* The following three functions are based on the analogous code in
28 * arch/x86/mm/init_32.c
29 */
30
31 /*
32 * Create a middle page table on a resume-safe page and put a pointer to it in
33 * the given global directory entry. This only returns the gd entry
34 * in non-PAE compilation mode, since the middle layer is folded.
35 */
36 static pmd_t *resume_one_md_table_init(pgd_t *pgd)
37 {
38 pud_t *pud;
39 pmd_t *pmd_table;
40
41 #ifdef CONFIG_X86_PAE
42 pmd_table = (pmd_t *)get_safe_page(GFP_ATOMIC);
43 if (!pmd_table)
44 return NULL;
45
46 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
47 pud = pud_offset(pgd, 0);
48
49 BUG_ON(pmd_table != pmd_offset(pud, 0));
50 #else
51 pud = pud_offset(pgd, 0);
52 pmd_table = pmd_offset(pud, 0);
53 #endif
54
55 return pmd_table;
56 }
57
58 /*
59 * Create a page table on a resume-safe page and place a pointer to it in
60 * a middle page directory entry.
61 */
62 static pte_t *resume_one_page_table_init(pmd_t *pmd)
63 {
64 if (pmd_none(*pmd)) {
65 pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC);
66 if (!page_table)
67 return NULL;
68
69 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
70
71 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
72
73 return page_table;
74 }
75
76 return pte_offset_kernel(pmd, 0);
77 }
78
79 /*
80 * This maps the physical memory to kernel virtual address space, a total
81 * of max_low_pfn pages, by creating page tables starting from address
82 * PAGE_OFFSET. The page tables are allocated out of resume-safe pages.
83 */
84 static int resume_physical_mapping_init(pgd_t *pgd_base)
85 {
86 unsigned long pfn;
87 pgd_t *pgd;
88 pmd_t *pmd;
89 pte_t *pte;
90 int pgd_idx, pmd_idx;
91
92 pgd_idx = pgd_index(PAGE_OFFSET);
93 pgd = pgd_base + pgd_idx;
94 pfn = 0;
95
96 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
97 pmd = resume_one_md_table_init(pgd);
98 if (!pmd)
99 return -ENOMEM;
100
101 if (pfn >= max_low_pfn)
102 continue;
103
104 for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) {
105 if (pfn >= max_low_pfn)
106 break;
107
108 /* Map with big pages if possible, otherwise create
109 * normal page tables.
110 * NOTE: We can mark everything as executable here
111 */
112 if (cpu_has_pse) {
113 set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
114 pfn += PTRS_PER_PTE;
115 } else {
116 pte_t *max_pte;
117
118 pte = resume_one_page_table_init(pmd);
119 if (!pte)
120 return -ENOMEM;
121
122 max_pte = pte + PTRS_PER_PTE;
123 for (; pte < max_pte; pte++, pfn++) {
124 if (pfn >= max_low_pfn)
125 break;
126
127 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
128 }
129 }
130 }
131 }
132
133 resume_map_numa_kva(pgd_base);
134
135 return 0;
136 }
137
138 static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
139 {
140 #ifdef CONFIG_X86_PAE
141 int i;
142
143 /* Init entries of the first-level page table to the zero page */
144 for (i = 0; i < PTRS_PER_PGD; i++)
145 set_pgd(pg_dir + i,
146 __pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
147 #endif
148 }
149
150 int swsusp_arch_resume(void)
151 {
152 int error;
153
154 resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
155 if (!resume_pg_dir)
156 return -ENOMEM;
157
158 resume_init_first_level_page_table(resume_pg_dir);
159 error = resume_physical_mapping_init(resume_pg_dir);
160 if (error)
161 return error;
162
163 /* We have got enough memory and from now on we cannot recover */
164 restore_image();
165 return 0;
166 }
167
168 /*
169 * pfn_is_nosave - check if given pfn is in the 'nosave' section
170 */
171
172 int pfn_is_nosave(unsigned long pfn)
173 {
174 unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
175 unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
176 return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
177 }