drivers: power: report battery voltage in AOSP compatible format
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / power / tuxonice_pagedir.c
CommitLineData
6fa3eb70
S
1/*
2 * kernel/power/tuxonice_pagedir.c
3 *
4 * Copyright (C) 1998-2001 Gabor Kuti <seasons@fornax.hu>
5 * Copyright (C) 1998,2001,2002 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002-2003 Florent Chabaud <fchabaud@free.fr>
7 * Copyright (C) 2006-2010 Nigel Cunningham (nigel at tuxonice net)
8 *
9 * This file is released under the GPLv2.
10 *
11 * Routines for handling pagesets.
12 * Note that pbes aren't actually stored as such. They're stored as
13 * bitmaps and extents.
14 */
15
16#include <linux/suspend.h>
17#include <linux/highmem.h>
18#include <linux/bootmem.h>
19#include <linux/hardirq.h>
20#include <linux/sched.h>
21#include <linux/cpu.h>
22#include <asm/tlbflush.h>
23
24#include "tuxonice_pageflags.h"
25#include "tuxonice_ui.h"
26#include "tuxonice_pagedir.h"
27#include "tuxonice_prepare_image.h"
28#include "tuxonice.h"
29#include "tuxonice_builtin.h"
30#include "tuxonice_alloc.h"
31
32static int ptoi_pfn;
33static struct pbe *this_low_pbe;
34static struct pbe **last_low_pbe_ptr;
35
36void toi_reset_alt_image_pageset2_pfn(void)
37{
38 memory_bm_position_reset(pageset2_map);
39}
40
41static struct page *first_conflicting_page;
42
43/*
44 * free_conflicting_pages
45 */
46
47static void free_conflicting_pages(void)
48{
49 while (first_conflicting_page) {
50 struct page *next = *((struct page **)kmap(first_conflicting_page));
51 kunmap(first_conflicting_page);
52 toi__free_page(29, first_conflicting_page);
53 first_conflicting_page = next;
54 }
55}
56
57/* __toi_get_nonconflicting_page
58 *
59 * Description: Gets order zero pages that won't be overwritten
60 * while copying the original pages.
61 */
62
63struct page *___toi_get_nonconflicting_page(int can_be_highmem)
64{
65 struct page *page;
66 gfp_t flags = TOI_ATOMIC_GFP;
67 if (can_be_highmem)
68 flags |= __GFP_HIGHMEM;
69
70
71 if (test_toi_state(TOI_LOADING_ALT_IMAGE) && pageset2_map && (ptoi_pfn != BM_END_OF_MAP)) {
72 do {
73 ptoi_pfn = memory_bm_next_pfn(pageset2_map);
74 if (ptoi_pfn != BM_END_OF_MAP) {
75 page = pfn_to_page(ptoi_pfn);
76 if (!PagePageset1(page) && (can_be_highmem || !PageHighMem(page)))
77 return page;
78 }
79 } while (ptoi_pfn != BM_END_OF_MAP);
80 }
81
82 do {
83 page = toi_alloc_page(29, flags);
84 if (!page) {
85 printk(KERN_INFO "Failed to get nonconflicting " "page.\n");
86 return NULL;
87 }
88 if (PagePageset1(page)) {
89 struct page **next = (struct page **)kmap(page);
90 *next = first_conflicting_page;
91 first_conflicting_page = page;
92 kunmap(page);
93 }
94 } while (PagePageset1(page));
95
96 return page;
97}
98
99unsigned long __toi_get_nonconflicting_page(void)
100{
101 struct page *page = ___toi_get_nonconflicting_page(0);
102 return page ? (unsigned long)page_address(page) : 0;
103}
104
105static struct pbe *get_next_pbe(struct page **page_ptr, struct pbe *this_pbe, int highmem)
106{
107 if (((((unsigned long)this_pbe) & (PAGE_SIZE - 1))
108 + 2 * sizeof(struct pbe)) > PAGE_SIZE) {
109 struct page *new_page = ___toi_get_nonconflicting_page(highmem);
110 if (!new_page)
111 return ERR_PTR(-ENOMEM);
112 this_pbe = (struct pbe *)kmap(new_page);
113 memset(this_pbe, 0, PAGE_SIZE);
114 *page_ptr = new_page;
115 } else
116 this_pbe++;
117
118 return this_pbe;
119}
120
121/**
122 * get_pageset1_load_addresses - generate pbes for conflicting pages
123 *
124 * We check here that pagedir & pages it points to won't collide
125 * with pages where we're going to restore from the loaded pages
126 * later.
127 *
128 * Returns:
129 * Zero on success, one if couldn't find enough pages (shouldn't
130 * happen).
131 **/
132int toi_get_pageset1_load_addresses(void)
133{
134 int pfn, highallocd = 0, lowallocd = 0;
135 int low_needed = pagedir1.size - get_highmem_size(pagedir1);
136 int high_needed = get_highmem_size(pagedir1);
137 int low_pages_for_highmem = 0;
138 gfp_t flags = GFP_ATOMIC | __GFP_NOWARN | __GFP_HIGHMEM;
139 struct page *page, *high_pbe_page = NULL, *last_high_pbe_page = NULL,
140 *low_pbe_page, *last_low_pbe_page = NULL;
141 struct pbe **last_high_pbe_ptr = &restore_highmem_pblist, *this_high_pbe = NULL;
142 unsigned long orig_low_pfn, orig_high_pfn;
143 int high_pbes_done = 0, low_pbes_done = 0;
144 int low_direct = 0, high_direct = 0, result = 0, i;
145 int high_page = 1, high_offset = 0, low_page = 1, low_offset = 0;
146
147 memory_bm_set_iterators(pageset1_map, 3);
148 memory_bm_position_reset(pageset1_map);
149
150 memory_bm_set_iterators(pageset1_copy_map, 2);
151 memory_bm_position_reset(pageset1_copy_map);
152
153 last_low_pbe_ptr = &restore_pblist;
154
155 /* First, allocate pages for the start of our pbe lists. */
156 if (high_needed) {
157 high_pbe_page = ___toi_get_nonconflicting_page(1);
158 if (!high_pbe_page) {
159 result = -ENOMEM;
160 goto out;
161 }
162 this_high_pbe = (struct pbe *)kmap(high_pbe_page);
163 memset(this_high_pbe, 0, PAGE_SIZE);
164 }
165
166 low_pbe_page = ___toi_get_nonconflicting_page(0);
167 if (!low_pbe_page) {
168 result = -ENOMEM;
169 goto out;
170 }
171 this_low_pbe = (struct pbe *)page_address(low_pbe_page);
172
173 /*
174 * Next, allocate the number of pages we need.
175 */
176
177 i = low_needed + high_needed;
178
179 do {
180 int is_high;
181
182 if (i == low_needed)
183 flags &= ~__GFP_HIGHMEM;
184
185 page = toi_alloc_page(30, flags);
186 BUG_ON(!page);
187
188 SetPagePageset1Copy(page);
189 is_high = PageHighMem(page);
190
191 if (PagePageset1(page)) {
192 if (is_high)
193 high_direct++;
194 else
195 low_direct++;
196 } else {
197 if (is_high)
198 highallocd++;
199 else
200 lowallocd++;
201 }
202 } while (--i);
203
204 high_needed -= high_direct;
205 low_needed -= low_direct;
206
207 /*
208 * Do we need to use some lowmem pages for the copies of highmem
209 * pages?
210 */
211 if (high_needed > highallocd) {
212 low_pages_for_highmem = high_needed - highallocd;
213 high_needed -= low_pages_for_highmem;
214 low_needed += low_pages_for_highmem;
215 }
216
217 /*
218 * Now generate our pbes (which will be used for the atomic restore),
219 * and free unneeded pages.
220 */
221 memory_bm_position_reset(pageset1_copy_map);
222 for (pfn = memory_bm_next_pfn_index(pageset1_copy_map, 1); pfn != BM_END_OF_MAP;
223 pfn = memory_bm_next_pfn_index(pageset1_copy_map, 1)) {
224 int is_high;
225 page = pfn_to_page(pfn);
226 is_high = PageHighMem(page);
227
228 if (PagePageset1(page))
229 continue;
230
231 /* Nope. We're going to use this page. Add a pbe. */
232 if (is_high || low_pages_for_highmem) {
233 struct page *orig_page;
234 high_pbes_done++;
235 if (!is_high)
236 low_pages_for_highmem--;
237 do {
238 orig_high_pfn = memory_bm_next_pfn_index(pageset1_map, 1);
239 BUG_ON(orig_high_pfn == BM_END_OF_MAP);
240 orig_page = pfn_to_page(orig_high_pfn);
241 } while (!PageHighMem(orig_page) || PagePageset1Copy(orig_page));
242
243 this_high_pbe->orig_address = (void *)orig_high_pfn;
244 this_high_pbe->address = page;
245 this_high_pbe->next = NULL;
246 toi_message(TOI_PAGEDIR, TOI_VERBOSE, 0, "High pbe %d/%d: %p(%d)=>%p",
247 high_page, high_offset, page, orig_high_pfn, orig_page);
248 if (last_high_pbe_page != high_pbe_page) {
249 *last_high_pbe_ptr = (struct pbe *)high_pbe_page;
250 if (last_high_pbe_page) {
251 kunmap(last_high_pbe_page);
252 high_page++;
253 high_offset = 0;
254 } else
255 high_offset++;
256 last_high_pbe_page = high_pbe_page;
257 } else {
258 *last_high_pbe_ptr = this_high_pbe;
259 high_offset++;
260 }
261 last_high_pbe_ptr = &this_high_pbe->next;
262 this_high_pbe = get_next_pbe(&high_pbe_page, this_high_pbe, 1);
263 if (IS_ERR(this_high_pbe)) {
264 printk(KERN_INFO "This high pbe is an error.\n");
265 return -ENOMEM;
266 }
267 } else {
268 struct page *orig_page;
269 low_pbes_done++;
270 do {
271 orig_low_pfn = memory_bm_next_pfn_index(pageset1_map, 2);
272 BUG_ON(orig_low_pfn == BM_END_OF_MAP);
273 orig_page = pfn_to_page(orig_low_pfn);
274 } while (PageHighMem(orig_page) || PagePageset1Copy(orig_page));
275
276 this_low_pbe->orig_address = page_address(orig_page);
277 this_low_pbe->address = page_address(page);
278 this_low_pbe->next = NULL;
279 toi_message(TOI_PAGEDIR, TOI_VERBOSE, 0, "Low pbe %d/%d: %p(%d)=>%p",
280 low_page, low_offset, this_low_pbe->orig_address,
281 orig_low_pfn, this_low_pbe->address);
282 *last_low_pbe_ptr = this_low_pbe;
283 last_low_pbe_ptr = &this_low_pbe->next;
284 this_low_pbe = get_next_pbe(&low_pbe_page, this_low_pbe, 0);
285 if (low_pbe_page != last_low_pbe_page) {
286 if (last_low_pbe_page) {
287 low_page++;
288 low_offset = 0;
289 }
290 last_low_pbe_page = low_pbe_page;
291 } else
292 low_offset++;
293 if (IS_ERR(this_low_pbe)) {
294 printk(KERN_INFO "this_low_pbe is an error.\n");
295 return -ENOMEM;
296 }
297 }
298 }
299
300 if (high_pbe_page)
301 kunmap(high_pbe_page);
302
303 if (last_high_pbe_page != high_pbe_page) {
304 if (last_high_pbe_page)
305 kunmap(last_high_pbe_page);
306 toi__free_page(29, high_pbe_page);
307 }
308
309 free_conflicting_pages();
310
311 out:
312 memory_bm_set_iterators(pageset1_map, 1);
313 memory_bm_set_iterators(pageset1_copy_map, 1);
314 return result;
315}
316
317int add_boot_kernel_data_pbe(void)
318{
319 this_low_pbe->address = (char *)__toi_get_nonconflicting_page();
320 if (!this_low_pbe->address) {
321 printk(KERN_INFO "Failed to get bkd atomic restore buffer.");
322 return -ENOMEM;
323 }
324
325 toi_bkd.size = sizeof(toi_bkd);
326 memcpy(this_low_pbe->address, &toi_bkd, sizeof(toi_bkd));
327
328 *last_low_pbe_ptr = this_low_pbe;
329 this_low_pbe->orig_address = (char *)boot_kernel_data_buffer;
330 this_low_pbe->next = NULL;
331 return 0;
332}