Merge tag 'v3.10.92' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / power / wakelock.c
1 /*
2 * kernel/power/wakelock.c
3 *
4 * User space wakeup sources support.
5 *
6 * Copyright (C) 2012 Rafael J. Wysocki <rjw@sisk.pl>
7 *
8 * This code is based on the analogous interface allowing user space to
9 * manipulate wakelocks on Android.
10 */
11
12 #include <linux/capability.h>
13 #include <linux/ctype.h>
14 #include <linux/device.h>
15 #include <linux/err.h>
16 #include <linux/hrtimer.h>
17 #include <linux/list.h>
18 #include <linux/rbtree.h>
19 #include <linux/slab.h>
20
21 //<20130327> <marc.huang> add wakelock dubug log
22 int wakelock_debug = 1;
23 #define _TAG_WAKELOCK "WAKELOCK"
24 #define wakelock_log(fmt, ...) do { if (wakelock_debug) pr_debug("[%s][%s]" fmt, _TAG_WAKELOCK, __func__, ##__VA_ARGS__); } while (0)
25 #define wakelock_warn(fmt, ...) do { if (wakelock_debug) pr_warn("[%s][%s]" fmt, _TAG_WAKELOCK, __func__, ##__VA_ARGS__); } while (0)
26
27 static DEFINE_MUTEX(wakelocks_lock);
28
29 struct wakelock {
30 char *name;
31 struct rb_node node;
32 struct wakeup_source ws;
33 #ifdef CONFIG_PM_WAKELOCKS_GC
34 struct list_head lru;
35 #endif
36 };
37
38 static struct rb_root wakelocks_tree = RB_ROOT;
39
40 ssize_t pm_show_wakelocks(char *buf, bool show_active)
41 {
42 struct rb_node *node;
43 struct wakelock *wl;
44 char *str = buf;
45 char *end = buf + PAGE_SIZE;
46
47 mutex_lock(&wakelocks_lock);
48
49 for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) {
50 wl = rb_entry(node, struct wakelock, node);
51 if (wl->ws.active == show_active)
52 str += scnprintf(str, end - str, "%s ", wl->name);
53 }
54 if (str > buf)
55 str--;
56
57 str += scnprintf(str, end - str, "\n");
58
59 mutex_unlock(&wakelocks_lock);
60 return (str - buf);
61 }
62
63 #if CONFIG_PM_WAKELOCKS_LIMIT > 0
64 static unsigned int number_of_wakelocks;
65
66 static inline bool wakelocks_limit_exceeded(void)
67 {
68 return number_of_wakelocks > CONFIG_PM_WAKELOCKS_LIMIT;
69 }
70
71 static inline void increment_wakelocks_number(void)
72 {
73 number_of_wakelocks++;
74 }
75
76 static inline void decrement_wakelocks_number(void)
77 {
78 number_of_wakelocks--;
79 }
80 #else /* CONFIG_PM_WAKELOCKS_LIMIT = 0 */
81 static inline bool wakelocks_limit_exceeded(void) { return false; }
82 static inline void increment_wakelocks_number(void) {}
83 static inline void decrement_wakelocks_number(void) {}
84 #endif /* CONFIG_PM_WAKELOCKS_LIMIT */
85
86 #ifdef CONFIG_PM_WAKELOCKS_GC
87 #define WL_GC_COUNT_MAX 100
88 #define WL_GC_TIME_SEC 300
89
90 static LIST_HEAD(wakelocks_lru_list);
91 static unsigned int wakelocks_gc_count;
92
93 static inline void wakelocks_lru_add(struct wakelock *wl)
94 {
95 list_add(&wl->lru, &wakelocks_lru_list);
96 }
97
98 static inline void wakelocks_lru_most_recent(struct wakelock *wl)
99 {
100 list_move(&wl->lru, &wakelocks_lru_list);
101 }
102
103 static void wakelocks_gc(void)
104 {
105 struct wakelock *wl, *aux;
106 ktime_t now;
107
108 if (++wakelocks_gc_count <= WL_GC_COUNT_MAX)
109 return;
110
111 now = ktime_get();
112 list_for_each_entry_safe_reverse(wl, aux, &wakelocks_lru_list, lru) {
113 u64 idle_time_ns;
114 bool active;
115
116 spin_lock_irq(&wl->ws.lock);
117 idle_time_ns = ktime_to_ns(ktime_sub(now, wl->ws.last_time));
118 active = wl->ws.active;
119 spin_unlock_irq(&wl->ws.lock);
120
121 if (idle_time_ns < ((u64)WL_GC_TIME_SEC * NSEC_PER_SEC))
122 break;
123
124 if (!active) {
125 wakeup_source_remove(&wl->ws);
126 rb_erase(&wl->node, &wakelocks_tree);
127 list_del(&wl->lru);
128 kfree(wl->name);
129 kfree(wl);
130 decrement_wakelocks_number();
131 }
132 }
133 wakelocks_gc_count = 0;
134 }
135 #else /* !CONFIG_PM_WAKELOCKS_GC */
136 static inline void wakelocks_lru_add(struct wakelock *wl) {}
137 static inline void wakelocks_lru_most_recent(struct wakelock *wl) {}
138 static inline void wakelocks_gc(void) {}
139 #endif /* !CONFIG_PM_WAKELOCKS_GC */
140
141 static struct wakelock *wakelock_lookup_add(const char *name, size_t len,
142 bool add_if_not_found)
143 {
144 struct rb_node **node = &wakelocks_tree.rb_node;
145 struct rb_node *parent = *node;
146 struct wakelock *wl;
147
148 while (*node) {
149 int diff;
150
151 parent = *node;
152 wl = rb_entry(*node, struct wakelock, node);
153 diff = strncmp(name, wl->name, len);
154 if (diff == 0) {
155 if (wl->name[len])
156 diff = -1;
157 else
158 return wl;
159 }
160 if (diff < 0)
161 node = &(*node)->rb_left;
162 else
163 node = &(*node)->rb_right;
164 }
165 if (!add_if_not_found)
166 return ERR_PTR(-EINVAL);
167
168 if (wakelocks_limit_exceeded())
169 return ERR_PTR(-ENOSPC);
170
171 /* Not found, we have to add a new one. */
172 wl = kzalloc(sizeof(*wl), GFP_KERNEL);
173 if (!wl)
174 return ERR_PTR(-ENOMEM);
175
176 wl->name = kstrndup(name, len, GFP_KERNEL);
177 if (!wl->name) {
178 kfree(wl);
179 return ERR_PTR(-ENOMEM);
180 }
181 wl->ws.name = wl->name;
182 wakeup_source_add(&wl->ws);
183 rb_link_node(&wl->node, parent, node);
184 rb_insert_color(&wl->node, &wakelocks_tree);
185 wakelocks_lru_add(wl);
186 increment_wakelocks_number();
187 return wl;
188 }
189
190 int pm_wake_lock(const char *buf)
191 {
192 const char *str = buf;
193 struct wakelock *wl;
194 u64 timeout_ns = 0;
195 size_t len;
196 int ret = 0;
197
198 /*
199 * 20130429 marc.huang
200 * remove CAP_BLOCK_SUSPEND capability check (rollback to android kernel 3.4)
201 */
202 //if (!capable(CAP_BLOCK_SUSPEND))
203 // return -EPERM;
204
205 while (*str && !isspace(*str))
206 str++;
207
208 len = str - buf;
209 if (!len)
210 return -EINVAL;
211
212 if (*str && *str != '\n') {
213 /* Find out if there's a valid timeout string appended. */
214 ret = kstrtou64(skip_spaces(str), 10, &timeout_ns);
215 if (ret)
216 return -EINVAL;
217 }
218
219 //<20130327> <marc.huang> add wakelock dubug log
220 wakelock_log("%s\n", buf);
221
222 mutex_lock(&wakelocks_lock);
223
224 wl = wakelock_lookup_add(buf, len, true);
225 if (IS_ERR(wl)) {
226 ret = PTR_ERR(wl);
227 goto out;
228 }
229 if (timeout_ns) {
230 u64 timeout_ms = timeout_ns + NSEC_PER_MSEC - 1;
231
232 do_div(timeout_ms, NSEC_PER_MSEC);
233 __pm_wakeup_event(&wl->ws, timeout_ms);
234 } else {
235 __pm_stay_awake(&wl->ws);
236 }
237
238 wakelocks_lru_most_recent(wl);
239
240 out:
241 mutex_unlock(&wakelocks_lock);
242 return ret;
243 }
244
245 int pm_wake_unlock(const char *buf)
246 {
247 struct wakelock *wl;
248 size_t len;
249 int ret = 0;
250
251 /*
252 * 20130429 marc.huang
253 * remove CAP_BLOCK_SUSPEND capability check (rollback to android kernel 3.4)
254 */
255 //if (!capable(CAP_BLOCK_SUSPEND))
256 // return -EPERM;
257
258 len = strlen(buf);
259 if (!len)
260 return -EINVAL;
261
262 if (buf[len-1] == '\n')
263 len--;
264
265 if (!len)
266 return -EINVAL;
267
268 //<20130327> <marc.huang> add wakelock dubug log
269 wakelock_log("%s\n", buf);
270
271 mutex_lock(&wakelocks_lock);
272
273 wl = wakelock_lookup_add(buf, len, false);
274 if (IS_ERR(wl)) {
275 ret = PTR_ERR(wl);
276 goto out;
277 }
278 __pm_relax(&wl->ws);
279
280 wakelocks_lru_most_recent(wl);
281 wakelocks_gc();
282
283 out:
284 mutex_unlock(&wakelocks_lock);
285 return ret;
286 }