Commit | Line | Data |
---|---|---|
6fa3eb70 S |
1 | /* kernel/power/earlysuspend.c |
2 | * | |
3 | * Copyright (C) 2005-2008 Google, Inc. | |
4 | * | |
5 | * This software is licensed under the terms of the GNU General Public | |
6 | * License version 2, as published by the Free Software Foundation, and | |
7 | * may be copied, distributed, and modified under those terms. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | */ | |
15 | ||
16 | #include <linux/earlysuspend.h> | |
17 | #include <linux/module.h> | |
18 | #include <linux/mutex.h> | |
19 | #include <linux/rtc.h> | |
20 | #include <linux/syscalls.h> /* sys_sync */ | |
21 | #include <linux/wakelock.h> | |
22 | #include <linux/workqueue.h> | |
23 | ||
24 | #include "power.h" | |
25 | ||
26 | enum { | |
27 | DEBUG_USER_STATE = 1U << 0, | |
28 | DEBUG_SUSPEND = 1U << 2, | |
29 | DEBUG_VERBOSE = 1U << 3, | |
30 | }; | |
31 | static int debug_mask = DEBUG_USER_STATE | DEBUG_SUSPEND | DEBUG_VERBOSE; | |
32 | /* static int debug_mask = DEBUG_USER_STATE; */ | |
33 | module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); | |
34 | ||
35 | /* int earlysuspend_debug_mask = 0; */ | |
36 | int earlysuspend_debug_mask = DEBUG_USER_STATE; | |
37 | int early_suspend_count = 0; | |
38 | int forbid_id = 0x0; | |
39 | int g_suspend_sys_sync_count = 0; | |
40 | int wait_sys_sync_flag = 0; | |
41 | ||
42 | #define _TAG_PM_M "Ker_PM" | |
43 | #define pm_warn(fmt, ...) \ | |
44 | if (earlysuspend_debug_mask) pr_warn("[%s][%s]" fmt, _TAG_PM_M, __func__, ##__VA_ARGS__); | |
45 | ||
46 | static DEFINE_MUTEX(early_suspend_lock); | |
47 | static LIST_HEAD(early_suspend_handlers); | |
48 | static void early_sys_sync(struct work_struct *work); | |
49 | //void suspend_sys_sync(struct work_struct *work); | |
50 | static void early_suspend(struct work_struct *work); | |
51 | static void late_resume(struct work_struct *work); | |
52 | static void suspend_sys_sync(struct work_struct *work); | |
53 | ||
54 | static DECLARE_WORK(early_sys_sync_work, early_sys_sync); | |
55 | //DECLARE_WORK(suspend_sys_sync_work, suspend_sys_sync); | |
56 | static DECLARE_WORK(early_suspend_work, early_suspend); | |
57 | static DECLARE_WORK(late_resume_work, late_resume); | |
58 | static DECLARE_WORK(suspend_sys_sync_work, suspend_sys_sync); | |
59 | ||
60 | static DEFINE_SPINLOCK(state_lock); | |
61 | ||
62 | /* */ | |
63 | struct wake_lock sys_sync_wake_lock; | |
64 | struct workqueue_struct *suspend_work_queue; | |
65 | struct workqueue_struct *sys_sync_work_queue; | |
66 | suspend_state_t requested_suspend_state = PM_SUSPEND_MEM; | |
67 | ||
68 | enum { | |
69 | SUSPEND_REQUESTED = 0x1, | |
70 | SUSPENDED = 0x2, | |
71 | SUSPEND_REQUESTED_AND_SUSPENDED = SUSPEND_REQUESTED | SUSPENDED, | |
72 | }; | |
73 | static int state; | |
74 | static DECLARE_COMPLETION(fb_drv_ready); | |
75 | static DECLARE_COMPLETION(sys_sync_done); | |
76 | ||
77 | void register_early_suspend(struct early_suspend *handler) | |
78 | { | |
79 | struct list_head *pos; | |
80 | ||
81 | mutex_lock(&early_suspend_lock); | |
82 | list_for_each(pos, &early_suspend_handlers) { | |
83 | struct early_suspend *e; | |
84 | e = list_entry(pos, struct early_suspend, link); | |
85 | if (e->level > handler->level) | |
86 | break; | |
87 | } | |
88 | list_add_tail(&handler->link, pos); | |
89 | early_suspend_count++; | |
90 | if ((state & SUSPENDED) && handler->suspend) | |
91 | handler->suspend(handler); | |
92 | mutex_unlock(&early_suspend_lock); | |
93 | } | |
94 | EXPORT_SYMBOL(register_early_suspend); | |
95 | ||
96 | void unregister_early_suspend(struct early_suspend *handler) | |
97 | { | |
98 | mutex_lock(&early_suspend_lock); | |
99 | list_del(&handler->link); | |
100 | early_suspend_count--; | |
101 | mutex_unlock(&early_suspend_lock); | |
102 | } | |
103 | EXPORT_SYMBOL(unregister_early_suspend); | |
104 | ||
105 | static void early_sys_sync(struct work_struct *work) | |
106 | { | |
107 | wake_lock(&sys_sync_wake_lock); | |
108 | pm_warn("++\n"); | |
109 | sys_sync(); | |
110 | pm_warn("--\n"); | |
111 | wake_unlock(&sys_sync_wake_lock); | |
112 | } | |
113 | ||
114 | static void suspend_sys_sync(struct work_struct *work) | |
115 | { | |
116 | pm_warn("++\n"); | |
117 | sys_sync(); | |
118 | if (wait_sys_sync_flag) | |
119 | complete(&sys_sync_done); | |
120 | pm_warn("--\n"); | |
121 | } | |
122 | ||
123 | ||
124 | ||
125 | static void early_suspend(struct work_struct *work) | |
126 | { | |
127 | struct early_suspend *pos; | |
128 | unsigned long irqflags; | |
129 | int abort = 0, count = 0; | |
130 | ||
131 | pr_warn("@@@@@@@@@@@@@@@@@@@@@@@\n@@@__early_suspend__@@@\n@@@@@@@@@@@@@@@@@@@@@@@\n"); | |
132 | ||
133 | mutex_lock(&early_suspend_lock); | |
134 | spin_lock_irqsave(&state_lock, irqflags); | |
135 | if (state == SUSPEND_REQUESTED) | |
136 | state |= SUSPENDED; | |
137 | else | |
138 | abort = 1; | |
139 | spin_unlock_irqrestore(&state_lock, irqflags); | |
140 | ||
141 | if (abort) { | |
142 | if (earlysuspend_debug_mask & DEBUG_SUSPEND) | |
143 | pm_warn("abort, state %d\n", state); | |
144 | mutex_unlock(&early_suspend_lock); | |
145 | goto abort; | |
146 | } | |
147 | ||
148 | pr_warn("early_suspend_count = %d, forbid_id = 0x%x\n", early_suspend_count, forbid_id); | |
149 | if (earlysuspend_debug_mask & DEBUG_SUSPEND) | |
150 | pm_warn("call handlers\n"); | |
151 | list_for_each_entry(pos, &early_suspend_handlers, link) { | |
152 | if (pos->suspend != NULL) { | |
153 | if (!(forbid_id & (0x1 << count))) { | |
154 | /* if (earlysuspend_debug_mask & DEBUG_VERBOSE) */ | |
155 | pr_warn("ES handlers %d: [%pf], level: %d\n", count, pos->suspend, | |
156 | pos->level); | |
157 | pos->suspend(pos); | |
158 | } | |
159 | count++; | |
160 | } | |
161 | } | |
162 | mutex_unlock(&early_suspend_lock); | |
163 | ||
164 | /* Remove sys_sync from early_suspend, and use work queue to complete sys_sync */ | |
165 | ||
166 | abort: | |
167 | if (state == SUSPEND_REQUESTED_AND_SUSPENDED) { | |
168 | /* wake_unlock(&main_wake_lock); */ | |
169 | #ifdef CONFIG_MTK_HIBERNATION | |
170 | suspend_state_t susp_state = get_suspend_state(); | |
171 | pm_warn("calling pm_autosleep_set_state() with parameter: %d\n", susp_state); | |
172 | pm_autosleep_set_state(susp_state); | |
173 | #else | |
174 | pm_autosleep_set_state(PM_SUSPEND_MEM); | |
175 | #endif | |
176 | } | |
177 | } | |
178 | ||
179 | static void late_resume(struct work_struct *work) | |
180 | { | |
181 | struct early_suspend *pos; | |
182 | unsigned long irqflags; | |
183 | int abort = 0; | |
184 | int completed = 0, count = 0; | |
185 | ||
186 | pr_warn("@@@@@@@@@@@@@@@@@@@@@@@\n@@@__late_resume__@@@\n@@@@@@@@@@@@@@@@@@@@@@@\n"); | |
187 | ||
188 | #if 0 /* sys_sync WQ ver2.0 use */ | |
189 | if (wait_sys_sync_flag) | |
190 | { | |
191 | complete(&sys_sync_done); | |
192 | } | |
193 | #endif | |
194 | ||
195 | pm_autosleep_set_state(PM_SUSPEND_ON); | |
196 | ||
197 | mutex_lock(&early_suspend_lock); | |
198 | spin_lock_irqsave(&state_lock, irqflags); | |
199 | if (state == SUSPENDED) | |
200 | state &= ~SUSPENDED; | |
201 | else | |
202 | abort = 1; | |
203 | spin_unlock_irqrestore(&state_lock, irqflags); | |
204 | ||
205 | if (abort) { | |
206 | if (earlysuspend_debug_mask & DEBUG_SUSPEND) | |
207 | pm_warn("abort, state %d\n", state); | |
208 | goto abort; | |
209 | } | |
210 | pr_warn("early_suspend_count = %d, forbid_id = 0x%x\n", early_suspend_count, forbid_id); | |
211 | if (earlysuspend_debug_mask & DEBUG_SUSPEND) | |
212 | pm_warn("call handlers\n"); | |
213 | list_for_each_entry_reverse(pos, &early_suspend_handlers, link) { | |
214 | if (!completed && pos->level < EARLY_SUSPEND_LEVEL_STOP_DRAWING) { | |
215 | complete(&fb_drv_ready); | |
216 | completed = 1; | |
217 | } | |
218 | if (pos->resume != NULL) { | |
219 | if (!(forbid_id & (0x1 << (early_suspend_count - count - 1)))) { | |
220 | /* if (earlysuspend_debug_mask & DEBUG_VERBOSE) */ | |
221 | pr_warn("LR handlers %d: [%pf], level: %d\n", count, pos->resume, | |
222 | pos->level); | |
223 | pos->resume(pos); | |
224 | } | |
225 | count++; | |
226 | } | |
227 | } | |
228 | if (earlysuspend_debug_mask & DEBUG_SUSPEND) | |
229 | pm_warn("done\n"); | |
230 | abort: | |
231 | if (!completed) | |
232 | complete(&fb_drv_ready); | |
233 | mutex_unlock(&early_suspend_lock); | |
234 | } | |
235 | ||
236 | void request_suspend_state(suspend_state_t new_state) | |
237 | { | |
238 | unsigned long irqflags; | |
239 | int old_sleep; | |
240 | int wait_flag = 0; | |
241 | ||
242 | spin_lock_irqsave(&state_lock, irqflags); | |
243 | old_sleep = state & SUSPEND_REQUESTED; | |
244 | if (earlysuspend_debug_mask & DEBUG_USER_STATE) { | |
245 | struct timespec ts; | |
246 | struct rtc_time tm; | |
247 | getnstimeofday(&ts); | |
248 | rtc_time_to_tm(ts.tv_sec, &tm); | |
249 | pm_warn("%s (%d->%d) at %lld " | |
250 | "(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n", | |
251 | new_state != PM_SUSPEND_ON ? "sleep" : "wakeup", | |
252 | requested_suspend_state, new_state, | |
253 | ktime_to_ns(ktime_get()), | |
254 | tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, | |
255 | tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec); | |
256 | } | |
257 | if (!old_sleep && new_state != PM_SUSPEND_ON) { | |
258 | state |= SUSPEND_REQUESTED; | |
259 | pm_warn("sys_sync_work_queue early_sys_sync_work\n"); | |
260 | queue_work(sys_sync_work_queue, &early_sys_sync_work); | |
261 | pm_warn("suspend_work_queue early_suspend_work\n"); | |
262 | queue_work(suspend_work_queue, &early_suspend_work); | |
263 | } else if (old_sleep && new_state == PM_SUSPEND_ON) { | |
264 | state &= ~SUSPEND_REQUESTED; | |
265 | /* wake_lock(&main_wake_lock); */ | |
266 | /* /cun */ | |
267 | if (queue_work(suspend_work_queue, &late_resume_work)) { | |
268 | /* | |
269 | * In order to synchronize the backlight turn on timing, | |
270 | * block the thread and wait for fb driver late_resume() | |
271 | * callback function is completed | |
272 | */ | |
273 | wait_flag = 1; | |
274 | } | |
275 | } | |
276 | requested_suspend_state = new_state; | |
277 | spin_unlock_irqrestore(&state_lock, irqflags); | |
278 | if (wait_flag == 1) { | |
279 | wait_for_completion(&fb_drv_ready); | |
280 | pr_warn("wait done\n"); | |
281 | } | |
282 | } | |
283 | ||
284 | suspend_state_t get_suspend_state(void) | |
285 | { | |
286 | return requested_suspend_state; | |
287 | } | |
288 | ||
289 | /* cun */ | |
290 | static int __init org_wakelocks_init(void) | |
291 | { | |
292 | int ret; | |
293 | ||
294 | wake_lock_init(&sys_sync_wake_lock, WAKE_LOCK_SUSPEND, "sys_sync"); | |
295 | ||
296 | sys_sync_work_queue = create_singlethread_workqueue("fs_sync"); | |
297 | if (sys_sync_work_queue == NULL) { | |
298 | pr_err("[wakelocks_init] fs_sync workqueue create failed\n"); | |
299 | } | |
300 | ||
301 | suspend_work_queue = create_singlethread_workqueue("suspend"); | |
302 | if (suspend_work_queue == NULL) { | |
303 | ret = -ENOMEM; | |
304 | goto err_suspend_work_queue; | |
305 | } | |
306 | return 0; | |
307 | ||
308 | err_suspend_work_queue: | |
309 | ||
310 | return ret; | |
311 | } | |
312 | ||
313 | static void __exit org_wakelocks_exit(void) | |
314 | { | |
315 | destroy_workqueue(suspend_work_queue); | |
316 | } | |
317 | //[MTK] | |
318 | void suspend_syssync_enqueue(void) | |
319 | { | |
320 | spin_lock(&state_lock); | |
321 | queue_work(sys_sync_work_queue, &suspend_sys_sync_work); | |
322 | spin_unlock(&state_lock); | |
323 | return; | |
324 | } | |
325 | ||
326 | ||
327 | void suspend_check_sys_sync_done(void) | |
328 | { | |
329 | wait_sys_sync_flag = 1; | |
330 | wait_for_completion(&sys_sync_done); | |
331 | wait_sys_sync_flag = 0; | |
332 | return; | |
333 | } | |
334 | ||
335 | core_initcall(org_wakelocks_init); | |
336 | module_exit(org_wakelocks_exit); |