drivers: power: report battery voltage in AOSP compatible format
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / power / tuxonice_atomic_copy.c
1 /*
2 * kernel/power/tuxonice_atomic_copy.c
3 *
4 * Copyright 2004-2010 Nigel Cunningham (nigel at tuxonice net)
5 *
6 * Distributed under GPLv2.
7 *
8 * Routines for doing the atomic save/restore.
9 */
10
11 #include <linux/suspend.h>
12 #include <linux/highmem.h>
13 #include <linux/cpu.h>
14 #include <linux/freezer.h>
15 #include <linux/console.h>
16 #include <linux/syscore_ops.h>
17 #include <linux/ftrace.h>
18 #include <asm/suspend.h>
19 #include "tuxonice.h"
20 #include "tuxonice_storage.h"
21 #include "tuxonice_power_off.h"
22 #include "tuxonice_ui.h"
23 #include "tuxonice_io.h"
24 #include "tuxonice_prepare_image.h"
25 #include "tuxonice_pageflags.h"
26 #include "tuxonice_checksum.h"
27 #include "tuxonice_builtin.h"
28 #include "tuxonice_atomic_copy.h"
29 #include "tuxonice_alloc.h"
30 #include "tuxonice_modules.h"
31
32 unsigned long extra_pd1_pages_used;
33
34 /**
35 * free_pbe_list - free page backup entries used by the atomic copy code.
36 * @list: List to free.
37 * @highmem: Whether the list is in highmem.
38 *
39 * Normally, this function isn't used. If, however, we need to abort before
40 * doing the atomic copy, we use this to free the pbes previously allocated.
41 **/
42 static void free_pbe_list(struct pbe **list, int highmem)
43 {
44 while (*list) {
45 int i;
46 struct pbe *free_pbe, *next_page = NULL;
47 struct page *page;
48
49 if (highmem) {
50 page = (struct page *)*list;
51 free_pbe = (struct pbe *)kmap(page);
52 } else {
53 page = virt_to_page(*list);
54 free_pbe = *list;
55 }
56
57 for (i = 0; i < PBES_PER_PAGE; i++) {
58 if (!free_pbe)
59 break;
60 if (highmem)
61 toi__free_page(29, free_pbe->address);
62 else
63 toi_free_page(29, (unsigned long)free_pbe->address);
64 free_pbe = free_pbe->next;
65 }
66
67 if (highmem) {
68 if (free_pbe)
69 next_page = free_pbe;
70 kunmap(page);
71 } else {
72 if (free_pbe)
73 next_page = free_pbe;
74 }
75
76 toi__free_page(29, page);
77 *list = (struct pbe *)next_page;
78 };
79 }
80
81 /**
82 * copyback_post - post atomic-restore actions
83 *
84 * After doing the atomic restore, we have a few more things to do:
85 * 1) We want to retain some values across the restore, so we now copy
86 * these from the nosave variables to the normal ones.
87 * 2) Set the status flags.
88 * 3) Resume devices.
89 * 4) Tell userui so it can redraw & restore settings.
90 * 5) Reread the page cache.
91 **/
92 void copyback_post(void)
93 {
94 struct toi_boot_kernel_data *bkd = (struct toi_boot_kernel_data *)boot_kernel_data_buffer;
95
96 if (toi_activate_storage(1))
97 panic("Failed to reactivate our storage.");
98
99 toi_post_atomic_restore_modules(bkd);
100
101 toi_cond_pause(1, "About to reload secondary pagedir.");
102
103 if (read_pageset2(0))
104 panic("Unable to successfully reread the page cache.");
105
106 /*
107 * If the user wants to sleep again after resuming from full-off,
108 * it's most likely to be in order to suspend to ram, so we'll
109 * do this check after loading pageset2, to give them the fastest
110 * wakeup when they are ready to use the computer again.
111 */
112 toi_check_resleep();
113 }
114
115 /**
116 * toi_copy_pageset1 - do the atomic copy of pageset1
117 *
118 * Make the atomic copy of pageset1. We can't use copy_page (as we once did)
119 * because we can't be sure what side effects it has. On my old Duron, with
120 * 3DNOW, kernel_fpu_begin increments preempt count, making our preempt
121 * count at resume time 4 instead of 3.
122 *
123 * We don't want to call kmap_atomic unconditionally because it has the side
124 * effect of incrementing the preempt count, which will leave it one too high
125 * post resume (the page containing the preempt count will be copied after
126 * its incremented. This is essentially the same problem.
127 **/
128 void toi_copy_pageset1(void)
129 {
130 int i;
131 unsigned long source_index, dest_index;
132
133 memory_bm_position_reset(pageset1_map);
134 memory_bm_position_reset(pageset1_copy_map);
135
136 source_index = memory_bm_next_pfn(pageset1_map);
137 dest_index = memory_bm_next_pfn(pageset1_copy_map);
138
139 for (i = 0; i < pagedir1.size; i++) {
140 unsigned long *origvirt, *copyvirt;
141 struct page *origpage, *copypage;
142 int loop = (PAGE_SIZE / sizeof(unsigned long)) - 1, was_present1, was_present2;
143
144 #ifdef CONFIG_TOI_ENHANCE
145 if (!pfn_valid(source_index) || !pfn_valid(dest_index)) {
146 pr_emerg("[%s] (%d) dest_index:%lu, source_index:%lu\n", __func__, i,
147 dest_index, source_index);
148 set_abort_result(TOI_ARCH_PREPARE_FAILED);
149 return;
150 }
151 #endif
152
153 origpage = pfn_to_page(source_index);
154 copypage = pfn_to_page(dest_index);
155
156 origvirt = PageHighMem(origpage) ? kmap_atomic(origpage) : page_address(origpage);
157
158 copyvirt = PageHighMem(copypage) ? kmap_atomic(copypage) : page_address(copypage);
159
160 was_present1 = kernel_page_present(origpage);
161 if (!was_present1)
162 kernel_map_pages(origpage, 1, 1);
163
164 was_present2 = kernel_page_present(copypage);
165 if (!was_present2)
166 kernel_map_pages(copypage, 1, 1);
167
168 while (loop >= 0) {
169 *(copyvirt + loop) = *(origvirt + loop);
170 loop--;
171 }
172
173 if (!was_present1)
174 kernel_map_pages(origpage, 1, 0);
175
176 if (!was_present2)
177 kernel_map_pages(copypage, 1, 0);
178
179 if (PageHighMem(origpage))
180 kunmap_atomic(origvirt);
181
182 if (PageHighMem(copypage))
183 kunmap_atomic(copyvirt);
184
185 source_index = memory_bm_next_pfn(pageset1_map);
186 dest_index = memory_bm_next_pfn(pageset1_copy_map);
187 }
188 }
189
190 /**
191 * __toi_post_context_save - steps after saving the cpu context
192 *
193 * Steps taken after saving the CPU state to make the actual
194 * atomic copy.
195 *
196 * Called from swsusp_save in snapshot.c via toi_post_context_save.
197 **/
198 int __toi_post_context_save(void)
199 {
200 unsigned long old_ps1_size = pagedir1.size;
201
202 check_checksums();
203
204 free_checksum_pages();
205
206 toi_recalculate_image_contents(1);
207
208 extra_pd1_pages_used = pagedir1.size > old_ps1_size ? pagedir1.size - old_ps1_size : 0;
209
210 if (extra_pd1_pages_used > extra_pd1_pages_allowance) {
211 printk(KERN_INFO "Pageset1 has grown by %lu pages. "
212 "extra_pages_allowance is currently only %lu.\n",
213 pagedir1.size - old_ps1_size, extra_pd1_pages_allowance);
214
215 /*
216 * Highlevel code will see this, clear the state and
217 * retry if we haven't already done so twice.
218 */
219 if (any_to_free(1)) {
220 set_abort_result(TOI_EXTRA_PAGES_ALLOW_TOO_SMALL);
221 return 1;
222 }
223 if (try_allocate_extra_memory()) {
224 printk(KERN_INFO "Failed to allocate the extra memory"
225 " needed. Restarting the process.");
226 set_abort_result(TOI_EXTRA_PAGES_ALLOW_TOO_SMALL);
227 return 1;
228 }
229 printk(KERN_INFO "However it looks like there's enough"
230 " free ram and storage to handle this, so " " continuing anyway.");
231 /*
232 * What if try_allocate_extra_memory above calls
233 * toi_allocate_extra_pagedir_memory and it allocs a new
234 * slab page via toi_kzalloc which should be in ps1? So...
235 */
236 toi_recalculate_image_contents(1);
237 }
238
239 if (!test_action_state(TOI_TEST_FILTER_SPEED) && !test_action_state(TOI_TEST_BIO))
240 toi_copy_pageset1();
241
242 return 0;
243 }
244
245 /**
246 * toi_hibernate - high level code for doing the atomic copy
247 *
248 * High-level code which prepares to do the atomic copy. Loosely based
249 * on the swsusp version, but with the following twists:
250 * - We set toi_running so the swsusp code uses our code paths.
251 * - We give better feedback regarding what goes wrong if there is a
252 * problem.
253 * - We use an extra function to call the assembly, just in case this code
254 * is in a module (return address).
255 **/
256 int toi_hibernate(void)
257 {
258 int error;
259
260 toi_running = 1; /* For the swsusp code we use :< */
261
262 error = toi_lowlevel_builtin();
263
264 if (!error) {
265 struct toi_boot_kernel_data *bkd =
266 (struct toi_boot_kernel_data *)boot_kernel_data_buffer;
267
268 /*
269 * The boot kernel's data may be larger (newer version) or
270 * smaller (older version) than ours. Copy the minimum
271 * of the two sizes, so that we don't overwrite valid values
272 * from pre-atomic copy.
273 */
274
275 memcpy(&toi_bkd, (char *)boot_kernel_data_buffer,
276 min_t(int, sizeof(struct toi_boot_kernel_data), bkd->size));
277 }
278
279 toi_running = 0;
280 return error;
281 }
282
283 /**
284 * toi_atomic_restore - prepare to do the atomic restore
285 *
286 * Get ready to do the atomic restore. This part gets us into the same
287 * state we are in prior to do calling do_toi_lowlevel while
288 * hibernating: hot-unplugging secondary cpus and freeze processes,
289 * before starting the thread that will do the restore.
290 **/
291 int toi_atomic_restore(void)
292 {
293 int error;
294
295 toi_running = 1;
296
297 toi_prepare_status(DONT_CLEAR_BAR, "Atomic restore.");
298
299 memcpy(&toi_bkd.toi_nosave_commandline, saved_command_line, strlen(saved_command_line));
300
301 toi_pre_atomic_restore_modules(&toi_bkd);
302
303 if (add_boot_kernel_data_pbe())
304 goto Failed;
305
306 toi_prepare_status(DONT_CLEAR_BAR, "Doing atomic copy/restore.");
307
308 if (toi_go_atomic(PMSG_QUIESCE, 0))
309 goto Failed;
310
311 /* We'll ignore saved state, but this gets preempt count (etc) right */
312 save_processor_state();
313
314 error = swsusp_arch_resume();
315 /*
316 * Code below is only ever reached in case of failure. Otherwise
317 * execution continues at place where swsusp_arch_suspend was called.
318 *
319 * We don't know whether it's safe to continue (this shouldn't happen),
320 * so lets err on the side of caution.
321 */
322 BUG();
323
324 Failed:
325 free_pbe_list(&restore_pblist, 0);
326 #ifdef CONFIG_HIGHMEM
327 pr_warn("[%s] 0x%p 0x%p 0x%p\n", __func__,
328 restore_highmem_pblist->address, restore_highmem_pblist->orig_address, restore_highmem_pblist->next);
329 if (restore_highmem_pblist->next != NULL)
330 free_pbe_list(&restore_highmem_pblist, 1);
331 #endif
332 toi_running = 0;
333 return 1;
334 }
335
336 /**
337 * toi_go_atomic - do the actual atomic copy/restore
338 * @state: The state to use for dpm_suspend_start & power_down calls.
339 * @suspend_time: Whether we're suspending or resuming.
340 **/
341 int toi_go_atomic(pm_message_t state, int suspend_time)
342 {
343 if (suspend_time) {
344 if (platform_begin(1)) {
345 set_abort_result(TOI_PLATFORM_PREP_FAILED);
346 toi_end_atomic(ATOMIC_STEP_PLATFORM_END, suspend_time, 3);
347 hib_log("FAILED @line:%d suspend(%d) pm_state(%d)\n", __LINE__,
348 suspend_time, state.event);
349 return 1;
350 }
351
352 if (dpm_prepare(PMSG_FREEZE)) {
353 set_abort_result(TOI_DPM_PREPARE_FAILED);
354 dpm_complete(PMSG_RECOVER);
355 toi_end_atomic(ATOMIC_STEP_PLATFORM_END, suspend_time, 3);
356 hib_log("FAILED @line:%d suspend(%d) pm_state(%d)\n", __LINE__,
357 suspend_time, state.event);
358 return 1;
359 }
360 }
361
362 suspend_console();
363 ftrace_stop();
364 pm_restrict_gfp_mask();
365
366 if (suspend_time) {
367 #if 0 /* FIXME: jonathan.jmchen: trick code here to let dpm_suspend succeeded, NEED to find out the root cause!! */
368 if (events_check_enabled) {
369 hib_log("play trick here set events_check_enabled(%d) = false!!\n",
370 events_check_enabled);
371 events_check_enabled = false;
372 }
373 #endif
374 if (dpm_suspend(state)) {
375 set_abort_result(TOI_DPM_SUSPEND_FAILED);
376 toi_end_atomic(ATOMIC_STEP_DEVICE_RESUME, suspend_time, 3);
377 hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n",
378 __LINE__, suspend_time, state.event, toi_result);
379 return 1;
380 }
381 } else {
382 if (dpm_suspend_start(state)) {
383 set_abort_result(TOI_DPM_SUSPEND_FAILED);
384 toi_end_atomic(ATOMIC_STEP_DEVICE_RESUME, suspend_time, 3);
385 hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n",
386 __LINE__, suspend_time, state.event, toi_result);
387 return 1;
388 }
389 }
390
391 /* At this point, dpm_suspend_start() has been called, but *not*
392 * dpm_suspend_noirq(). We *must* dpm_suspend_noirq() now.
393 * Otherwise, drivers for some devices (e.g. interrupt controllers)
394 * become desynchronized with the actual state of the hardware
395 * at resume time, and evil weirdness ensues.
396 */
397
398 if (dpm_suspend_end(state)) {
399 set_abort_result(TOI_DEVICE_REFUSED);
400 toi_end_atomic(ATOMIC_STEP_DEVICE_RESUME, suspend_time, 1);
401 hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n", __LINE__,
402 suspend_time, state.event, toi_result);
403 return 1;
404 }
405
406 if (suspend_time) {
407 if (platform_pre_snapshot(1))
408 set_abort_result(TOI_PRE_SNAPSHOT_FAILED);
409 } else {
410 if (platform_pre_restore(1))
411 set_abort_result(TOI_PRE_RESTORE_FAILED);
412 }
413
414 if (test_result_state(TOI_ABORTED)) {
415 toi_end_atomic(ATOMIC_STEP_PLATFORM_FINISH, suspend_time, 1);
416 hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n", __LINE__,
417 suspend_time, state.event, toi_result);
418 return 1;
419 }
420
421 if (test_action_state(TOI_LATE_CPU_HOTPLUG)) {
422 if (disable_nonboot_cpus()) {
423 set_abort_result(TOI_CPU_HOTPLUG_FAILED);
424 toi_end_atomic(ATOMIC_STEP_CPU_HOTPLUG, suspend_time, 1);
425 hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n",
426 __LINE__, suspend_time, state.event, toi_result);
427 return 1;
428 }
429 }
430
431 local_irq_disable();
432
433 if (syscore_suspend()) {
434 set_abort_result(TOI_SYSCORE_REFUSED);
435 toi_end_atomic(ATOMIC_STEP_IRQS, suspend_time, 1);
436 hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n", __LINE__,
437 suspend_time, state.event, toi_result);
438 return 1;
439 }
440
441 if (suspend_time && pm_wakeup_pending()) {
442 set_abort_result(TOI_WAKEUP_EVENT);
443 toi_end_atomic(ATOMIC_STEP_SYSCORE_RESUME, suspend_time, 1);
444 hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n", __LINE__,
445 suspend_time, state.event, toi_result);
446 return 1;
447 }
448 hib_log("SUCCEEDED @line:%d suspend(%d) pm_state(%d)\n", __LINE__, suspend_time,
449 state.event);
450 return 0;
451 }
452
453 /**
454 * toi_end_atomic - post atomic copy/restore routines
455 * @stage: What step to start at.
456 * @suspend_time: Whether we're suspending or resuming.
457 * @error: Whether we're recovering from an error.
458 **/
459 void toi_end_atomic(int stage, int suspend_time, int error)
460 {
461 pm_message_t msg = suspend_time ? (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE;
462
463 switch (stage) {
464 case ATOMIC_ALL_STEPS:
465 if (!suspend_time) {
466 events_check_enabled = false;
467 platform_leave(1);
468 }
469 case ATOMIC_STEP_SYSCORE_RESUME:
470 syscore_resume();
471 case ATOMIC_STEP_IRQS:
472 local_irq_enable();
473 case ATOMIC_STEP_CPU_HOTPLUG:
474 if (test_action_state(TOI_LATE_CPU_HOTPLUG))
475 enable_nonboot_cpus();
476 case ATOMIC_STEP_PLATFORM_FINISH:
477 if (!suspend_time && error & 2)
478 platform_restore_cleanup(1);
479 else
480 platform_finish(1);
481 dpm_resume_start(msg);
482 case ATOMIC_STEP_DEVICE_RESUME:
483 if (suspend_time && (error & 2))
484 platform_recover(1);
485 dpm_resume(msg);
486 if (error || !toi_in_suspend())
487 pm_restore_gfp_mask();
488 ftrace_start();
489 resume_console();
490 case ATOMIC_STEP_DPM_COMPLETE:
491 dpm_complete(msg);
492 case ATOMIC_STEP_PLATFORM_END:
493 platform_end(1);
494
495 toi_prepare_status(DONT_CLEAR_BAR, "Post atomic.");
496 }
497 }