Merge git://oak/home/sfr/kernels/iseries/work
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / power / pm.c
CommitLineData
1da177e4
LT
1/*
2 * pm.c - Power management interface
3 *
4 * Copyright (C) 2000 Andrew Henroid
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include <linux/init.h>
21#include <linux/module.h>
22#include <linux/spinlock.h>
23#include <linux/mm.h>
24#include <linux/slab.h>
25#include <linux/pm.h>
bca73e4b 26#include <linux/pm_legacy.h>
1da177e4
LT
27#include <linux/interrupt.h>
28
29int pm_active;
30
31/*
32 * Locking notes:
33 * pm_devs_lock can be a semaphore providing pm ops are not called
34 * from an interrupt handler (already a bad idea so no change here). Each
35 * change must be protected so that an unlink of an entry doesn't clash
36 * with a pm send - which is permitted to sleep in the current architecture
37 *
38 * Module unloads clashing with pm events now work out safely, the module
39 * unload path will block until the event has been sent. It may well block
40 * until a resume but that will be fine.
41 */
42
43static DECLARE_MUTEX(pm_devs_lock);
44static LIST_HEAD(pm_devs);
45
46/**
47 * pm_register - register a device with power management
48 * @type: device type
49 * @id: device ID
50 * @callback: callback function
51 *
52 * Add a device to the list of devices that wish to be notified about
53 * power management events. A &pm_dev structure is returned on success,
54 * on failure the return is %NULL.
55 *
56 * The callback function will be called in process context and
57 * it may sleep.
58 */
59
60struct pm_dev *pm_register(pm_dev_t type,
61 unsigned long id,
62 pm_callback callback)
63{
dd392710 64 struct pm_dev *dev = kzalloc(sizeof(struct pm_dev), GFP_KERNEL);
1da177e4 65 if (dev) {
1da177e4
LT
66 dev->type = type;
67 dev->id = id;
68 dev->callback = callback;
69
70 down(&pm_devs_lock);
71 list_add(&dev->entry, &pm_devs);
72 up(&pm_devs_lock);
73 }
74 return dev;
75}
76
77/**
78 * pm_unregister - unregister a device with power management
79 * @dev: device to unregister
80 *
81 * Remove a device from the power management notification lists. The
82 * dev passed must be a handle previously returned by pm_register.
83 */
84
85void pm_unregister(struct pm_dev *dev)
86{
87 if (dev) {
88 down(&pm_devs_lock);
89 list_del(&dev->entry);
90 up(&pm_devs_lock);
91
92 kfree(dev);
93 }
94}
95
96static void __pm_unregister(struct pm_dev *dev)
97{
98 if (dev) {
99 list_del(&dev->entry);
100 kfree(dev);
101 }
102}
103
104/**
105 * pm_unregister_all - unregister all devices with matching callback
106 * @callback: callback function pointer
107 *
108 * Unregister every device that would call the callback passed. This
109 * is primarily meant as a helper function for loadable modules. It
110 * enables a module to give up all its managed devices without keeping
111 * its own private list.
112 */
113
114void pm_unregister_all(pm_callback callback)
115{
116 struct list_head *entry;
117
118 if (!callback)
119 return;
120
121 down(&pm_devs_lock);
122 entry = pm_devs.next;
123 while (entry != &pm_devs) {
124 struct pm_dev *dev = list_entry(entry, struct pm_dev, entry);
125 entry = entry->next;
126 if (dev->callback == callback)
127 __pm_unregister(dev);
128 }
129 up(&pm_devs_lock);
130}
131
132/**
133 * pm_send - send request to a single device
134 * @dev: device to send to
135 * @rqst: power management request
136 * @data: data for the callback
137 *
138 * Issue a power management request to a given device. The
139 * %PM_SUSPEND and %PM_RESUME events are handled specially. The
140 * data field must hold the intended next state. No call is made
141 * if the state matches.
142 *
143 * BUGS: what stops two power management requests occurring in parallel
144 * and conflicting.
145 *
146 * WARNING: Calling pm_send directly is not generally recommended, in
147 * particular there is no locking against the pm_dev going away. The
148 * caller must maintain all needed locking or have 'inside knowledge'
149 * on the safety. Also remember that this function is not locked against
150 * pm_unregister. This means that you must handle SMP races on callback
151 * execution and unload yourself.
152 */
153
154static int pm_send(struct pm_dev *dev, pm_request_t rqst, void *data)
155{
156 int status = 0;
157 unsigned long prev_state, next_state;
158
159 if (in_interrupt())
160 BUG();
161
162 switch (rqst) {
163 case PM_SUSPEND:
164 case PM_RESUME:
165 prev_state = dev->state;
166 next_state = (unsigned long) data;
167 if (prev_state != next_state) {
168 if (dev->callback)
169 status = (*dev->callback)(dev, rqst, data);
170 if (!status) {
171 dev->state = next_state;
172 dev->prev_state = prev_state;
173 }
174 }
175 else {
176 dev->prev_state = prev_state;
177 }
178 break;
179 default:
180 if (dev->callback)
181 status = (*dev->callback)(dev, rqst, data);
182 break;
183 }
184 return status;
185}
186
187/*
188 * Undo incomplete request
189 */
190static void pm_undo_all(struct pm_dev *last)
191{
192 struct list_head *entry = last->entry.prev;
193 while (entry != &pm_devs) {
194 struct pm_dev *dev = list_entry(entry, struct pm_dev, entry);
195 if (dev->state != dev->prev_state) {
196 /* previous state was zero (running) resume or
197 * previous state was non-zero (suspended) suspend
198 */
199 pm_request_t undo = (dev->prev_state
200 ? PM_SUSPEND:PM_RESUME);
201 pm_send(dev, undo, (void*) dev->prev_state);
202 }
203 entry = entry->prev;
204 }
205}
206
207/**
208 * pm_send_all - send request to all managed devices
209 * @rqst: power management request
210 * @data: data for the callback
211 *
212 * Issue a power management request to a all devices. The
213 * %PM_SUSPEND events are handled specially. Any device is
214 * permitted to fail a suspend by returning a non zero (error)
215 * value from its callback function. If any device vetoes a
216 * suspend request then all other devices that have suspended
217 * during the processing of this request are restored to their
218 * previous state.
219 *
220 * WARNING: This function takes the pm_devs_lock. The lock is not dropped until
221 * the callbacks have completed. This prevents races against pm locking
222 * functions, races against module unload pm_unregister code. It does
223 * mean however that you must not issue pm_ functions within the callback
224 * or you will deadlock and users will hate you.
225 *
226 * Zero is returned on success. If a suspend fails then the status
227 * from the device that vetoes the suspend is returned.
228 *
229 * BUGS: what stops two power management requests occurring in parallel
230 * and conflicting.
231 */
232
233int pm_send_all(pm_request_t rqst, void *data)
234{
235 struct list_head *entry;
236
237 down(&pm_devs_lock);
238 entry = pm_devs.next;
239 while (entry != &pm_devs) {
240 struct pm_dev *dev = list_entry(entry, struct pm_dev, entry);
241 if (dev->callback) {
242 int status = pm_send(dev, rqst, data);
243 if (status) {
244 /* return devices to previous state on
245 * failed suspend request
246 */
247 if (rqst == PM_SUSPEND)
248 pm_undo_all(dev);
249 up(&pm_devs_lock);
250 return status;
251 }
252 }
253 entry = entry->next;
254 }
255 up(&pm_devs_lock);
256 return 0;
257}
258
259EXPORT_SYMBOL(pm_register);
260EXPORT_SYMBOL(pm_unregister);
261EXPORT_SYMBOL(pm_unregister_all);
262EXPORT_SYMBOL(pm_send_all);
263EXPORT_SYMBOL(pm_active);
264
265