Remove 'recurse into child resources' logic from 'reserve_region_with_split()'
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / linux / suspend.h
CommitLineData
95d9ffbe
RW
1#ifndef _LINUX_SUSPEND_H
2#define _LINUX_SUSPEND_H
1da177e4 3
1da177e4
LT
4#include <linux/swap.h>
5#include <linux/notifier.h>
1da177e4
LT
6#include <linux/init.h>
7#include <linux/pm.h>
7be98234 8#include <linux/mm.h>
95d9ffbe
RW
9#include <asm/errno.h>
10
11#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE)
b6f448e9 12extern void pm_set_vt_switch(int);
95d9ffbe
RW
13extern int pm_prepare_console(void);
14extern void pm_restore_console(void);
15#else
b6f448e9
AS
16static inline void pm_set_vt_switch(int do_switch)
17{
18}
19
20static inline int pm_prepare_console(void)
21{
22 return 0;
23}
24
25static inline void pm_restore_console(void)
26{
27}
95d9ffbe
RW
28#endif
29
30typedef int __bitwise suspend_state_t;
31
32#define PM_SUSPEND_ON ((__force suspend_state_t) 0)
33#define PM_SUSPEND_STANDBY ((__force suspend_state_t) 1)
34#define PM_SUSPEND_MEM ((__force suspend_state_t) 3)
35#define PM_SUSPEND_MAX ((__force suspend_state_t) 4)
36
37/**
26398a70
RW
38 * struct platform_suspend_ops - Callbacks for managing platform dependent
39 * system sleep states.
95d9ffbe
RW
40 *
41 * @valid: Callback to determine if given system sleep state is supported by
42 * the platform.
43 * Valid (ie. supported) states are advertised in /sys/power/state. Note
44 * that it still may be impossible to enter given system sleep state if the
45 * conditions aren't right.
26398a70
RW
46 * There is the %suspend_valid_only_mem function available that can be
47 * assigned to this if the platform only supports mem sleep.
95d9ffbe 48 *
c697eece
RW
49 * @begin: Initialise a transition to given system sleep state.
50 * @begin() is executed right prior to suspending devices. The information
51 * conveyed to the platform code by @begin() should be disregarded by it as
52 * soon as @end() is executed. If @begin() fails (ie. returns nonzero),
95d9ffbe
RW
53 * @prepare(), @enter() and @finish() will not be called by the PM core.
54 * This callback is optional. However, if it is implemented, the argument
c697eece 55 * passed to @enter() is redundant and should be ignored.
95d9ffbe
RW
56 *
57 * @prepare: Prepare the platform for entering the system sleep state indicated
c697eece 58 * by @begin().
95d9ffbe
RW
59 * @prepare() is called right after devices have been suspended (ie. the
60 * appropriate .suspend() method has been executed for each device) and
61 * before the nonboot CPUs are disabled (it is executed with IRQs enabled).
62 * This callback is optional. It returns 0 on success or a negative
63 * error code otherwise, in which case the system cannot enter the desired
64 * sleep state (@enter() and @finish() will not be called in that case).
65 *
c697eece
RW
66 * @enter: Enter the system sleep state indicated by @begin() or represented by
67 * the argument if @begin() is not implemented.
95d9ffbe
RW
68 * This callback is mandatory. It returns 0 on success or a negative
69 * error code otherwise, in which case the system cannot enter the desired
70 * sleep state.
71 *
72 * @finish: Called when the system has just left a sleep state, right after
73 * the nonboot CPUs have been enabled and before devices are resumed (it is
e6c5eb95 74 * executed with IRQs enabled).
95d9ffbe
RW
75 * This callback is optional, but should be implemented by the platforms
76 * that implement @prepare(). If implemented, it is always called after
77 * @enter() (even if @enter() fails).
c697eece
RW
78 *
79 * @end: Called by the PM core right after resuming devices, to indicate to
80 * the platform that the system has returned to the working state or
81 * the transition to the sleep state has been aborted.
82 * This callback is optional, but should be implemented by the platforms
83 * that implement @begin(), but platforms implementing @begin() should
84 * also provide a @end() which cleans up transitions aborted before
85 * @enter().
d8f3de0d
RW
86 *
87 * @recover: Recover the platform from a suspend failure.
88 * Called by the PM core if the suspending of devices fails.
89 * This callback is optional and should only be implemented by platforms
90 * which require special recovery actions in that situation.
95d9ffbe 91 */
26398a70 92struct platform_suspend_ops {
95d9ffbe 93 int (*valid)(suspend_state_t state);
c697eece 94 int (*begin)(suspend_state_t state);
e6c5eb95 95 int (*prepare)(void);
95d9ffbe 96 int (*enter)(suspend_state_t state);
e6c5eb95 97 void (*finish)(void);
c697eece 98 void (*end)(void);
d8f3de0d 99 void (*recover)(void);
95d9ffbe
RW
100};
101
102#ifdef CONFIG_SUSPEND
95d9ffbe 103/**
26398a70
RW
104 * suspend_set_ops - set platform dependent suspend operations
105 * @ops: The new suspend operations to set.
95d9ffbe 106 */
26398a70
RW
107extern void suspend_set_ops(struct platform_suspend_ops *ops);
108extern int suspend_valid_only_mem(suspend_state_t state);
95d9ffbe
RW
109
110/**
111 * arch_suspend_disable_irqs - disable IRQs for suspend
112 *
113 * Disables IRQs (in the default case). This is a weak symbol in the common
114 * code and thus allows architectures to override it if more needs to be
115 * done. Not called for suspend to disk.
116 */
117extern void arch_suspend_disable_irqs(void);
118
119/**
120 * arch_suspend_enable_irqs - enable IRQs after suspend
121 *
122 * Enables IRQs (in the default case). This is a weak symbol in the common
123 * code and thus allows architectures to override it if more needs to be
124 * done. Not called for suspend to disk.
125 */
126extern void arch_suspend_enable_irqs(void);
127
128extern int pm_suspend(suspend_state_t state);
129#else /* !CONFIG_SUSPEND */
130#define suspend_valid_only_mem NULL
131
26398a70 132static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
95d9ffbe
RW
133static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
134#endif /* !CONFIG_SUSPEND */
1da177e4 135
8357376d
RW
136/* struct pbe is used for creating lists of pages that should be restored
137 * atomically during the resume from disk, because the page frames they have
138 * occupied before the suspend are in use.
139 */
dcbb5a54 140struct pbe {
8357376d
RW
141 void *address; /* address of the copy */
142 void *orig_address; /* original address of a page */
7088a5c0 143 struct pbe *next;
dcbb5a54 144};
1da177e4 145
1da177e4 146/* mm/page_alloc.c */
1da177e4
LT
147extern void mark_free_pages(struct zone *zone);
148
a3d25c27 149/**
b3dac3b3 150 * struct platform_hibernation_ops - hibernation platform support
a3d25c27 151 *
caea99ef
RW
152 * The methods in this structure allow a platform to carry out special
153 * operations required by it during a hibernation transition.
a3d25c27 154 *
d8f3de0d 155 * All the methods below, except for @recover(), must be implemented.
a3d25c27 156 *
caea99ef 157 * @begin: Tell the platform driver that we're starting hibernation.
74f270af
RW
158 * Called right after shrinking memory and before freezing devices.
159 *
caea99ef
RW
160 * @end: Called by the PM core right after resuming devices, to indicate to
161 * the platform that the system has returned to the working state.
162 *
74f270af
RW
163 * @pre_snapshot: Prepare the platform for creating the hibernation image.
164 * Called right after devices have been frozen and before the nonboot
165 * CPUs are disabled (runs with IRQs on).
166 *
167 * @finish: Restore the previous state of the platform after the hibernation
168 * image has been created *or* put the platform into the normal operation
169 * mode after the hibernation (the same method is executed in both cases).
170 * Called right after the nonboot CPUs have been enabled and before
171 * thawing devices (runs with IRQs on).
172 *
173 * @prepare: Prepare the platform for entering the low power state.
174 * Called right after the hibernation image has been saved and before
175 * devices are prepared for entering the low power state.
176 *
177 * @enter: Put the system into the low power state after the hibernation image
178 * has been saved to disk.
179 * Called after the nonboot CPUs have been disabled and all of the low
180 * level devices have been shut down (runs with IRQs off).
181 *
c7e0831d
RW
182 * @leave: Perform the first stage of the cleanup after the system sleep state
183 * indicated by @set_target() has been left.
184 * Called right after the control has been passed from the boot kernel to
185 * the image kernel, before the nonboot CPUs are enabled and before devices
186 * are resumed. Executed with interrupts disabled.
187 *
74f270af
RW
188 * @pre_restore: Prepare system for the restoration from a hibernation image.
189 * Called right after devices have been frozen and before the nonboot
190 * CPUs are disabled (runs with IRQs on).
191 *
192 * @restore_cleanup: Clean up after a failing image restoration.
193 * Called right after the nonboot CPUs have been enabled and before
194 * thawing devices (runs with IRQs on).
d8f3de0d
RW
195 *
196 * @recover: Recover the platform from a failure to suspend devices.
197 * Called by the PM core if the suspending of devices during hibernation
198 * fails. This callback is optional and should only be implemented by
199 * platforms which require special recovery actions in that situation.
a3d25c27 200 */
b3dac3b3 201struct platform_hibernation_ops {
caea99ef
RW
202 int (*begin)(void);
203 void (*end)(void);
74f270af
RW
204 int (*pre_snapshot)(void);
205 void (*finish)(void);
a3d25c27
RW
206 int (*prepare)(void);
207 int (*enter)(void);
c7e0831d 208 void (*leave)(void);
a634cc10
RW
209 int (*pre_restore)(void);
210 void (*restore_cleanup)(void);
d8f3de0d 211 void (*recover)(void);
a3d25c27
RW
212};
213
b0cb1a19 214#ifdef CONFIG_HIBERNATION
74dfd666 215/* kernel/power/snapshot.c */
940d67f6 216extern void __register_nosave_region(unsigned long b, unsigned long e, int km);
ce289e89 217static inline void __init register_nosave_region(unsigned long b, unsigned long e)
940d67f6
JB
218{
219 __register_nosave_region(b, e, 0);
220}
ce289e89 221static inline void __init register_nosave_region_late(unsigned long b, unsigned long e)
940d67f6
JB
222{
223 __register_nosave_region(b, e, 1);
224}
74dfd666
RW
225extern int swsusp_page_is_forbidden(struct page *);
226extern void swsusp_set_page_free(struct page *);
227extern void swsusp_unset_page_free(struct page *);
228extern unsigned long get_safe_page(gfp_t gfp_mask);
a3d25c27 229
b3dac3b3 230extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
a3d25c27 231extern int hibernate(void);
3f4b0ef7
RW
232extern int hibernate_nvs_register(unsigned long start, unsigned long size);
233extern int hibernate_nvs_alloc(void);
234extern void hibernate_nvs_free(void);
235extern void hibernate_nvs_save(void);
236extern void hibernate_nvs_restore(void);
abfe2d7b 237extern bool system_entering_hibernation(void);
b0cb1a19 238#else /* CONFIG_HIBERNATION */
74dfd666
RW
239static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
240static inline void swsusp_set_page_free(struct page *p) {}
241static inline void swsusp_unset_page_free(struct page *p) {}
a3d25c27 242
b3dac3b3 243static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
a3d25c27 244static inline int hibernate(void) { return -ENOSYS; }
3f4b0ef7
RW
245static inline int hibernate_nvs_register(unsigned long a, unsigned long b)
246{
247 return 0;
248}
249static inline int hibernate_nvs_alloc(void) { return 0; }
250static inline void hibernate_nvs_free(void) {}
251static inline void hibernate_nvs_save(void) {}
252static inline void hibernate_nvs_restore(void) {}
abfe2d7b 253static inline bool system_entering_hibernation(void) { return false; }
b0cb1a19 254#endif /* CONFIG_HIBERNATION */
1da177e4 255
296699de 256#ifdef CONFIG_PM_SLEEP
1da177e4
LT
257void save_processor_state(void);
258void restore_processor_state(void);
25761b6e 259
b10d9117 260/* kernel/power/main.c */
82525756
AS
261extern int register_pm_notifier(struct notifier_block *nb);
262extern int unregister_pm_notifier(struct notifier_block *nb);
b10d9117
RW
263
264#define pm_notifier(fn, pri) { \
265 static struct notifier_block fn##_nb = \
266 { .notifier_call = fn, .priority = pri }; \
267 register_pm_notifier(&fn##_nb); \
268}
296699de 269#else /* !CONFIG_PM_SLEEP */
b10d9117
RW
270
271static inline int register_pm_notifier(struct notifier_block *nb)
272{
273 return 0;
274}
275
276static inline int unregister_pm_notifier(struct notifier_block *nb)
277{
278 return 0;
279}
280
281#define pm_notifier(fn, pri) do { (void)(fn); } while (0)
296699de 282#endif /* !CONFIG_PM_SLEEP */
b10d9117 283
296699de 284#ifndef CONFIG_HIBERNATION
b10d9117
RW
285static inline void register_nosave_region(unsigned long b, unsigned long e)
286{
287}
70f38db6
RK
288static inline void register_nosave_region_late(unsigned long b, unsigned long e)
289{
290}
b10d9117
RW
291#endif
292
89081d17
HY
293extern struct mutex pm_mutex;
294
95d9ffbe 295#endif /* _LINUX_SUSPEND_H */