Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / iio / inkern.c
CommitLineData
e27d75d7
JC
1/* The industrial I/O core in kernel channel mapping
2 *
3 * Copyright (c) 2011 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 */
9#include <linux/err.h>
10#include <linux/export.h>
11#include <linux/slab.h>
12#include <linux/mutex.h>
13
06458e27 14#include <linux/iio/iio.h>
e27d75d7 15#include "iio_core.h"
06458e27
JC
16#include <linux/iio/machine.h>
17#include <linux/iio/driver.h>
18#include <linux/iio/consumer.h>
e27d75d7
JC
19
20struct iio_map_internal {
21 struct iio_dev *indio_dev;
22 struct iio_map *map;
23 struct list_head l;
24};
25
26static LIST_HEAD(iio_map_list);
27static DEFINE_MUTEX(iio_map_list_lock);
28
29int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
30{
31 int i = 0, ret = 0;
32 struct iio_map_internal *mapi;
33
34 if (maps == NULL)
35 return 0;
36
37 mutex_lock(&iio_map_list_lock);
38 while (maps[i].consumer_dev_name != NULL) {
39 mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
40 if (mapi == NULL) {
41 ret = -ENOMEM;
42 goto error_ret;
43 }
44 mapi->map = &maps[i];
45 mapi->indio_dev = indio_dev;
46 list_add(&mapi->l, &iio_map_list);
47 i++;
48 }
49error_ret:
50 mutex_unlock(&iio_map_list_lock);
51
52 return ret;
53}
54EXPORT_SYMBOL_GPL(iio_map_array_register);
55
56
6cb2afd7
GR
57/*
58 * Remove all map entries associated with the given iio device
e27d75d7 59 */
6cb2afd7 60int iio_map_array_unregister(struct iio_dev *indio_dev)
e27d75d7 61{
6cb2afd7 62 int ret = -ENODEV;
e27d75d7 63 struct iio_map_internal *mapi;
6cb2afd7 64 struct list_head *pos, *tmp;
e27d75d7
JC
65
66 mutex_lock(&iio_map_list_lock);
6cb2afd7
GR
67 list_for_each_safe(pos, tmp, &iio_map_list) {
68 mapi = list_entry(pos, struct iio_map_internal, l);
69 if (indio_dev == mapi->indio_dev) {
70 list_del(&mapi->l);
71 kfree(mapi);
72 ret = 0;
e27d75d7
JC
73 }
74 }
e27d75d7 75 mutex_unlock(&iio_map_list_lock);
e27d75d7
JC
76 return ret;
77}
78EXPORT_SYMBOL_GPL(iio_map_array_unregister);
79
80static const struct iio_chan_spec
314be14b 81*iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name)
e27d75d7
JC
82{
83 int i;
84 const struct iio_chan_spec *chan = NULL;
85
86 for (i = 0; i < indio_dev->num_channels; i++)
87 if (indio_dev->channels[i].datasheet_name &&
88 strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
89 chan = &indio_dev->channels[i];
90 break;
91 }
92 return chan;
93}
94
95
5aa57f0a
GR
96static struct iio_channel *iio_channel_get_sys(const char *name,
97 const char *channel_name)
e27d75d7
JC
98{
99 struct iio_map_internal *c_i = NULL, *c = NULL;
100 struct iio_channel *channel;
3183bac1 101 int err;
e27d75d7
JC
102
103 if (name == NULL && channel_name == NULL)
104 return ERR_PTR(-ENODEV);
105
106 /* first find matching entry the channel map */
107 mutex_lock(&iio_map_list_lock);
108 list_for_each_entry(c_i, &iio_map_list, l) {
109 if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
110 (channel_name &&
111 strcmp(channel_name, c_i->map->consumer_channel) != 0))
112 continue;
113 c = c_i;
1875ffd2 114 iio_device_get(c->indio_dev);
e27d75d7
JC
115 break;
116 }
117 mutex_unlock(&iio_map_list_lock);
118 if (c == NULL)
119 return ERR_PTR(-ENODEV);
120
2cc412b5 121 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
3183bac1
KM
122 if (channel == NULL) {
123 err = -ENOMEM;
801c4b5c 124 goto error_no_mem;
3183bac1 125 }
e27d75d7
JC
126
127 channel->indio_dev = c->indio_dev;
128
b2b79ffa 129 if (c->map->adc_channel_label) {
e27d75d7
JC
130 channel->channel =
131 iio_chan_spec_from_name(channel->indio_dev,
132 c->map->adc_channel_label);
133
3183bac1
KM
134 if (channel->channel == NULL) {
135 err = -EINVAL;
b2b79ffa 136 goto error_no_chan;
3183bac1 137 }
b2b79ffa
KM
138 }
139
e27d75d7 140 return channel;
b2b79ffa
KM
141
142error_no_chan:
b2b79ffa 143 kfree(channel);
801c4b5c
KM
144error_no_mem:
145 iio_device_put(c->indio_dev);
3183bac1 146 return ERR_PTR(err);
e27d75d7 147}
5aa57f0a
GR
148
149struct iio_channel *iio_channel_get(struct device *dev,
150 const char *channel_name)
151{
152 const char *name = dev ? dev_name(dev) : NULL;
153
154 return iio_channel_get_sys(name, channel_name);
155}
314be14b 156EXPORT_SYMBOL_GPL(iio_channel_get);
e27d75d7 157
314be14b 158void iio_channel_release(struct iio_channel *channel)
e27d75d7 159{
1875ffd2 160 iio_device_put(channel->indio_dev);
e27d75d7
JC
161 kfree(channel);
162}
314be14b 163EXPORT_SYMBOL_GPL(iio_channel_release);
e27d75d7 164
ca7d98db 165struct iio_channel *iio_channel_get_all(struct device *dev)
e27d75d7 166{
ca7d98db 167 const char *name;
e27d75d7
JC
168 struct iio_channel *chans;
169 struct iio_map_internal *c = NULL;
170 int nummaps = 0;
171 int mapind = 0;
172 int i, ret;
173
ca7d98db 174 if (dev == NULL)
e27d75d7 175 return ERR_PTR(-EINVAL);
ca7d98db 176 name = dev_name(dev);
e27d75d7
JC
177
178 mutex_lock(&iio_map_list_lock);
179 /* first count the matching maps */
180 list_for_each_entry(c, &iio_map_list, l)
181 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
182 continue;
183 else
184 nummaps++;
185
186 if (nummaps == 0) {
187 ret = -ENODEV;
188 goto error_ret;
189 }
190
191 /* NULL terminated array to save passing size */
192 chans = kzalloc(sizeof(*chans)*(nummaps + 1), GFP_KERNEL);
193 if (chans == NULL) {
194 ret = -ENOMEM;
195 goto error_ret;
196 }
197
198 /* for each map fill in the chans element */
199 list_for_each_entry(c, &iio_map_list, l) {
200 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
201 continue;
202 chans[mapind].indio_dev = c->indio_dev;
0464415d 203 chans[mapind].data = c->map->consumer_data;
e27d75d7
JC
204 chans[mapind].channel =
205 iio_chan_spec_from_name(chans[mapind].indio_dev,
206 c->map->adc_channel_label);
207 if (chans[mapind].channel == NULL) {
208 ret = -EINVAL;
e27d75d7
JC
209 goto error_free_chans;
210 }
1875ffd2 211 iio_device_get(chans[mapind].indio_dev);
e27d75d7
JC
212 mapind++;
213 }
e27d75d7
JC
214 if (mapind == 0) {
215 ret = -ENODEV;
216 goto error_free_chans;
217 }
e59b9afe
DC
218 mutex_unlock(&iio_map_list_lock);
219
e27d75d7
JC
220 return chans;
221
222error_free_chans:
223 for (i = 0; i < nummaps; i++)
1875ffd2 224 iio_device_put(chans[i].indio_dev);
e27d75d7
JC
225 kfree(chans);
226error_ret:
227 mutex_unlock(&iio_map_list_lock);
228
229 return ERR_PTR(ret);
230}
314be14b 231EXPORT_SYMBOL_GPL(iio_channel_get_all);
e27d75d7 232
314be14b 233void iio_channel_release_all(struct iio_channel *channels)
e27d75d7
JC
234{
235 struct iio_channel *chan = &channels[0];
236
237 while (chan->indio_dev) {
1875ffd2 238 iio_device_put(chan->indio_dev);
e27d75d7
JC
239 chan++;
240 }
241 kfree(channels);
242}
314be14b 243EXPORT_SYMBOL_GPL(iio_channel_release_all);
e27d75d7 244
48e44ce0
LPC
245static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
246 enum iio_chan_info_enum info)
247{
248 int unused;
249
250 if (val2 == NULL)
251 val2 = &unused;
252
253 return chan->indio_dev->info->read_raw(chan->indio_dev, chan->channel,
254 val, val2, info);
255}
256
314be14b 257int iio_read_channel_raw(struct iio_channel *chan, int *val)
e27d75d7 258{
48e44ce0 259 int ret;
e27d75d7
JC
260
261 mutex_lock(&chan->indio_dev->info_exist_lock);
262 if (chan->indio_dev->info == NULL) {
263 ret = -ENODEV;
264 goto err_unlock;
265 }
266
48e44ce0 267 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
e27d75d7
JC
268err_unlock:
269 mutex_unlock(&chan->indio_dev->info_exist_lock);
270
271 return ret;
272}
314be14b 273EXPORT_SYMBOL_GPL(iio_read_channel_raw);
e27d75d7 274
48e44ce0
LPC
275static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
276 int raw, int *processed, unsigned int scale)
277{
278 int scale_type, scale_val, scale_val2, offset;
279 s64 raw64 = raw;
280 int ret;
281
282 ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_SCALE);
283 if (ret == 0)
284 raw64 += offset;
285
286 scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
287 IIO_CHAN_INFO_SCALE);
288 if (scale_type < 0)
289 return scale_type;
290
291 switch (scale_type) {
292 case IIO_VAL_INT:
293 *processed = raw64 * scale_val;
294 break;
295 case IIO_VAL_INT_PLUS_MICRO:
296 if (scale_val2 < 0)
297 *processed = -raw64 * scale_val;
298 else
299 *processed = raw64 * scale_val;
300 *processed += div_s64(raw64 * (s64)scale_val2 * scale,
301 1000000LL);
302 break;
303 case IIO_VAL_INT_PLUS_NANO:
304 if (scale_val2 < 0)
305 *processed = -raw64 * scale_val;
306 else
307 *processed = raw64 * scale_val;
308 *processed += div_s64(raw64 * (s64)scale_val2 * scale,
309 1000000000LL);
310 break;
311 case IIO_VAL_FRACTIONAL:
312 *processed = div_s64(raw64 * (s64)scale_val * scale,
313 scale_val2);
314 break;
103d9fb9
LPC
315 case IIO_VAL_FRACTIONAL_LOG2:
316 *processed = (raw64 * (s64)scale_val * scale) >> scale_val2;
317 break;
48e44ce0
LPC
318 default:
319 return -EINVAL;
320 }
321
322 return 0;
323}
324
325int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
326 int *processed, unsigned int scale)
327{
328 int ret;
329
330 mutex_lock(&chan->indio_dev->info_exist_lock);
331 if (chan->indio_dev->info == NULL) {
332 ret = -ENODEV;
333 goto err_unlock;
334 }
335
336 ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed,
337 scale);
338err_unlock:
339 mutex_unlock(&chan->indio_dev->info_exist_lock);
340
341 return ret;
342}
343EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
344
345int iio_read_channel_processed(struct iio_channel *chan, int *val)
346{
347 int ret;
348
349 mutex_lock(&chan->indio_dev->info_exist_lock);
350 if (chan->indio_dev->info == NULL) {
351 ret = -ENODEV;
352 goto err_unlock;
353 }
354
355 if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
356 ret = iio_channel_read(chan, val, NULL,
357 IIO_CHAN_INFO_PROCESSED);
358 } else {
359 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
360 if (ret < 0)
361 goto err_unlock;
362 ret = iio_convert_raw_to_processed_unlocked(chan, *val, val, 1);
363 }
364
365err_unlock:
366 mutex_unlock(&chan->indio_dev->info_exist_lock);
367
368 return ret;
369}
370EXPORT_SYMBOL_GPL(iio_read_channel_processed);
371
314be14b 372int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
e27d75d7
JC
373{
374 int ret;
375
376 mutex_lock(&chan->indio_dev->info_exist_lock);
377 if (chan->indio_dev->info == NULL) {
378 ret = -ENODEV;
379 goto err_unlock;
380 }
381
48e44ce0 382 ret = iio_channel_read(chan, val, val2, IIO_CHAN_INFO_SCALE);
e27d75d7
JC
383err_unlock:
384 mutex_unlock(&chan->indio_dev->info_exist_lock);
385
386 return ret;
387}
314be14b 388EXPORT_SYMBOL_GPL(iio_read_channel_scale);
e27d75d7 389
314be14b 390int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
e27d75d7
JC
391{
392 int ret = 0;
393 /* Need to verify underlying driver has not gone away */
394
395 mutex_lock(&chan->indio_dev->info_exist_lock);
396 if (chan->indio_dev->info == NULL) {
397 ret = -ENODEV;
398 goto err_unlock;
399 }
400
401 *type = chan->channel->type;
402err_unlock:
403 mutex_unlock(&chan->indio_dev->info_exist_lock);
404
405 return ret;
406}
314be14b 407EXPORT_SYMBOL_GPL(iio_get_channel_type);