drivers: power: report battery voltage in AOSP compatible format
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / power / suspend.c
1 /*
2 * kernel/power/suspend.c - Suspend to RAM and standby functionality.
3 *
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
6 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
7 *
8 * This file is released under the GPLv2.
9 */
10
11 #include <linux/string.h>
12 #include <linux/delay.h>
13 #include <linux/errno.h>
14 #include <linux/init.h>
15 #include <linux/console.h>
16 #include <linux/cpu.h>
17 #include <linux/syscalls.h>
18 #include <linux/gfp.h>
19 #include <linux/io.h>
20 #include <linux/kernel.h>
21 #include <linux/list.h>
22 #include <linux/mm.h>
23 #include <linux/slab.h>
24 #include <linux/export.h>
25 #include <linux/suspend.h>
26 #include <linux/syscore_ops.h>
27 #include <linux/ftrace.h>
28 #include <linux/rtc.h>
29 #include <trace/events/power.h>
30
31 #include "power.h"
32
33 struct pm_sleep_state pm_states[PM_SUSPEND_MAX] = {
34 #ifdef CONFIG_EARLYSUSPEND
35 [PM_SUSPEND_ON] = { .label = "on", },
36 #endif
37 [PM_SUSPEND_FREEZE] = { .label = "freeze", .state = PM_SUSPEND_FREEZE },
38 [PM_SUSPEND_STANDBY] = { .label = "standby", },
39 [PM_SUSPEND_MEM] = { .label = "mem", },
40 };
41
42 static const struct platform_suspend_ops *suspend_ops;
43
44 static bool need_suspend_ops(suspend_state_t state)
45 {
46 return !!(state > PM_SUSPEND_FREEZE);
47 }
48
49 static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head);
50 static bool suspend_freeze_wake;
51
52 static void freeze_begin(void)
53 {
54 suspend_freeze_wake = false;
55 }
56
57 static void freeze_enter(void)
58 {
59 wait_event(suspend_freeze_wait_head, suspend_freeze_wake);
60 }
61
62 void freeze_wake(void)
63 {
64 suspend_freeze_wake = true;
65 wake_up(&suspend_freeze_wait_head);
66 }
67 EXPORT_SYMBOL_GPL(freeze_wake);
68
69 bool valid_state(suspend_state_t state)
70 {
71 /*
72 * PM_SUSPEND_STANDBY and PM_SUSPEND_MEM states need low level
73 * support and need to be valid to the low level
74 * implementation, no valid callback implies that none are valid.
75 */
76 return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
77 }
78
79 /**
80 * suspend_set_ops - Set the global suspend method table.
81 * @ops: Suspend operations to use.
82 */
83 void suspend_set_ops(const struct platform_suspend_ops *ops)
84 {
85 suspend_state_t i;
86
87 lock_system_sleep();
88
89 suspend_ops = ops;
90 for (i = PM_SUSPEND_STANDBY; i <= PM_SUSPEND_MEM; i++)
91 pm_states[i].state = valid_state(i) ? i : 0;
92
93 unlock_system_sleep();
94 }
95 EXPORT_SYMBOL_GPL(suspend_set_ops);
96
97 /**
98 * suspend_valid_only_mem - Generic memory-only valid callback.
99 *
100 * Platform drivers that implement mem suspend only and only need to check for
101 * that in their .valid() callback can use this instead of rolling their own
102 * .valid() callback.
103 */
104 int suspend_valid_only_mem(suspend_state_t state)
105 {
106 return state == PM_SUSPEND_MEM;
107 }
108 EXPORT_SYMBOL_GPL(suspend_valid_only_mem);
109
110 static int suspend_test(int level)
111 {
112 #ifdef CONFIG_PM_DEBUG
113 if (pm_test_level == level) {
114 printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n");
115 mdelay(5000);
116 return 1;
117 }
118 #endif /* !CONFIG_PM_DEBUG */
119 return 0;
120 }
121
122 /**
123 * suspend_prepare - Prepare for entering system sleep state.
124 *
125 * Common code run for every system sleep state that can be entered (except for
126 * hibernation). Run suspend notifiers, allocate the "suspend" console and
127 * freeze processes.
128 */
129 static int suspend_prepare(suspend_state_t state)
130 {
131 int error;
132
133 if (need_suspend_ops(state) && (!suspend_ops || !suspend_ops->enter))
134 return -EPERM;
135
136 pm_prepare_console();
137
138 error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
139 if (error)
140 goto Finish;
141
142 error = suspend_freeze_processes();
143 if (!error)
144 return 0;
145
146 suspend_stats.failed_freeze++;
147 dpm_save_failed_step(SUSPEND_FREEZE);
148 Finish:
149 pm_notifier_call_chain(PM_POST_SUSPEND);
150 pm_restore_console();
151 return error;
152 }
153
154 /* default implementation */
155 void __attribute__ ((weak)) arch_suspend_disable_irqs(void)
156 {
157 local_irq_disable();
158 }
159
160 /* default implementation */
161 void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
162 {
163 local_irq_enable();
164 }
165
166 /**
167 * suspend_enter - Make the system enter the given sleep state.
168 * @state: System sleep state to enter.
169 * @wakeup: Returns information that the sleep state should not be re-entered.
170 *
171 * This function should be called after devices have been suspended.
172 */
173 static int suspend_enter(suspend_state_t state, bool *wakeup)
174 {
175 int error;
176
177 if (need_suspend_ops(state) && suspend_ops->prepare) {
178 error = suspend_ops->prepare();
179 if (error)
180 goto Platform_finish;
181 }
182
183 error = dpm_suspend_end(PMSG_SUSPEND);
184 if (error) {
185 printk(KERN_ERR "PM: Some devices failed to power down\n");
186 goto Platform_finish;
187 }
188
189 if (need_suspend_ops(state) && suspend_ops->prepare_late) {
190 error = suspend_ops->prepare_late();
191 if (error)
192 goto Platform_wake;
193 }
194
195 if (suspend_test(TEST_PLATFORM))
196 goto Platform_wake;
197
198 /*
199 * PM_SUSPEND_FREEZE equals
200 * frozen processes + suspended devices + idle processors.
201 * Thus we should invoke freeze_enter() soon after
202 * all the devices are suspended.
203 */
204 if (state == PM_SUSPEND_FREEZE) {
205 freeze_enter();
206 goto Platform_wake;
207 }
208
209 error = disable_nonboot_cpus();
210 if (error || suspend_test(TEST_CPUS))
211 goto Enable_cpus;
212
213 arch_suspend_disable_irqs();
214 BUG_ON(!irqs_disabled());
215
216 error = syscore_suspend();
217 if (!error) {
218 *wakeup = pm_wakeup_pending();
219 if (!(suspend_test(TEST_CORE) || *wakeup)) {
220 error = suspend_ops->enter(state);
221 events_check_enabled = false;
222 }
223 syscore_resume();
224 }
225
226 arch_suspend_enable_irqs();
227 BUG_ON(irqs_disabled());
228
229 Enable_cpus:
230 enable_nonboot_cpus();
231
232 Platform_wake:
233 if (need_suspend_ops(state) && suspend_ops->wake)
234 suspend_ops->wake();
235
236 dpm_resume_start(PMSG_RESUME);
237
238 Platform_finish:
239 if (need_suspend_ops(state) && suspend_ops->finish)
240 suspend_ops->finish();
241
242 return error;
243 }
244
245 /**
246 * suspend_devices_and_enter - Suspend devices and enter system sleep state.
247 * @state: System sleep state to enter.
248 */
249 int suspend_devices_and_enter(suspend_state_t state)
250 {
251 int error;
252 bool wakeup = false;
253
254 if (need_suspend_ops(state) && !suspend_ops)
255 return -ENOSYS;
256
257 #ifdef CONFIG_TOI
258 drop_pagecache();
259 #endif
260
261 trace_machine_suspend(state);
262 if (need_suspend_ops(state) && suspend_ops->begin) {
263 error = suspend_ops->begin(state);
264 if (error)
265 goto Close;
266 }
267 suspend_console();
268 ftrace_stop();
269 suspend_test_start();
270 error = dpm_suspend_start(PMSG_SUSPEND);
271 if (error) {
272 printk(KERN_ERR "PM: Some devices failed to suspend\n");
273 goto Recover_platform;
274 }
275 suspend_test_finish("suspend devices");
276 if (suspend_test(TEST_DEVICES))
277 goto Recover_platform;
278
279 do {
280 error = suspend_enter(state, &wakeup);
281 } while (!error && !wakeup && need_suspend_ops(state)
282 && suspend_ops->suspend_again && suspend_ops->suspend_again());
283
284 Resume_devices:
285 suspend_test_start();
286 dpm_resume_end(PMSG_RESUME);
287 suspend_test_finish("resume devices");
288 ftrace_start();
289 resume_console();
290 Close:
291 if (need_suspend_ops(state) && suspend_ops->end)
292 suspend_ops->end();
293 trace_machine_suspend(PWR_EVENT_EXIT);
294 return error;
295
296 Recover_platform:
297 if (need_suspend_ops(state) && suspend_ops->recover)
298 suspend_ops->recover();
299 goto Resume_devices;
300 }
301 EXPORT_SYMBOL_GPL(suspend_devices_and_enter);
302
303 /**
304 * suspend_finish - Clean up before finishing the suspend sequence.
305 *
306 * Call platform code to clean up, restart processes, and free the console that
307 * we've allocated. This routine is not called for hibernation.
308 */
309 static void suspend_finish(void)
310 {
311 suspend_thaw_processes();
312 pm_notifier_call_chain(PM_POST_SUSPEND);
313 pm_restore_console();
314 }
315
316 /**
317 * enter_state - Do common work needed to enter system sleep state.
318 * @state: System sleep state to enter.
319 *
320 * Make sure that no one else is trying to put the system into a sleep state.
321 * Fail if that's not the case. Otherwise, prepare for system suspend, make the
322 * system enter the given sleep state and clean up after wakeup.
323 */
324 //<20130327> <marc.huang> merge from android kernel 3.0 - modify enter_state function to non-static
325 int enter_state(suspend_state_t state)
326 {
327 int error;
328
329 if (state == PM_SUSPEND_FREEZE) {
330 #ifdef CONFIG_PM_DEBUG
331 if (pm_test_level != TEST_NONE && pm_test_level <= TEST_CPUS) {
332 pr_warning("PM: Unsupported test mode for freeze state,"
333 "please choose none/freezer/devices/platform.\n");
334 return -EAGAIN;
335 }
336 #endif
337 } else if (!valid_state(state)) {
338 return -EINVAL;
339 }
340 if (!mutex_trylock(&pm_mutex))
341 return -EBUSY;
342
343 if (state == PM_SUSPEND_FREEZE)
344 freeze_begin();
345
346 printk(KERN_INFO "PM: Syncing filesystems ... ");
347 #if 1
348 sys_sync();
349 #else /* sys_sync WQ ver2.0 use */
350 //[MTK]
351 suspend_syssync_enqueue();
352 suspend_check_sys_sync_done();
353 #endif
354 printk("done.\n");
355
356 pr_debug("PM: Preparing system for %s sleep\n", pm_states[state].label);
357 error = suspend_prepare(state);
358 if (error)
359 goto Unlock;
360
361 if (suspend_test(TEST_FREEZER))
362 goto Finish;
363
364 pr_debug("PM: Entering %s sleep\n", pm_states[state].label);
365 pm_restrict_gfp_mask();
366 error = suspend_devices_and_enter(state);
367 pm_restore_gfp_mask();
368
369 Finish:
370 pr_debug("PM: Finishing wakeup.\n");
371 suspend_finish();
372 Unlock:
373 mutex_unlock(&pm_mutex);
374 return error;
375 }
376
377 static void pm_suspend_marker(char *annotation)
378 {
379 struct timespec ts;
380 struct rtc_time tm;
381
382 getnstimeofday(&ts);
383 rtc_time_to_tm(ts.tv_sec, &tm);
384 pr_info("PM: suspend %s %d-%02d-%02d %02d:%02d:%02d.%09lu UTC\n",
385 annotation, tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
386 tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec);
387 }
388
389 /**
390 * pm_suspend - Externally visible function for suspending the system.
391 * @state: System sleep state to enter.
392 *
393 * Check if the value of @state represents one of the supported states,
394 * execute enter_state() and update system suspend statistics.
395 */
396 int pm_suspend(suspend_state_t state)
397 {
398 int error;
399
400 if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
401 return -EINVAL;
402
403 pm_suspend_marker("entry");
404 error = enter_state(state);
405 if (error) {
406 suspend_stats.fail++;
407 dpm_save_failed_errno(error);
408 } else {
409 suspend_stats.success++;
410 }
411 pm_suspend_marker("exit");
412 return error;
413 }
414 EXPORT_SYMBOL(pm_suspend);