llseek: automatically add .llseek fop
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / iio / industrialio-ring.c
1 /* The industrial I/O core
2 *
3 * Copyright (c) 2008 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * Handling of ring allocation / resizing.
10 *
11 *
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
15 */
16 #include <linux/kernel.h>
17 #include <linux/device.h>
18 #include <linux/interrupt.h>
19 #include <linux/fs.h>
20 #include <linux/poll.h>
21 #include <linux/module.h>
22 #include <linux/cdev.h>
23 #include <linux/slab.h>
24
25 #include "iio.h"
26 #include "ring_generic.h"
27
28 int iio_push_ring_event(struct iio_ring_buffer *ring_buf,
29 int event_code,
30 s64 timestamp)
31 {
32 return __iio_push_event(&ring_buf->ev_int,
33 event_code,
34 timestamp,
35 &ring_buf->shared_ev_pointer);
36 }
37 EXPORT_SYMBOL(iio_push_ring_event);
38
39 int iio_push_or_escallate_ring_event(struct iio_ring_buffer *ring_buf,
40 int event_code,
41 s64 timestamp)
42 {
43 if (ring_buf->shared_ev_pointer.ev_p)
44 __iio_change_event(ring_buf->shared_ev_pointer.ev_p,
45 event_code,
46 timestamp);
47 else
48 return iio_push_ring_event(ring_buf,
49 event_code,
50 timestamp);
51 return 0;
52 }
53 EXPORT_SYMBOL(iio_push_or_escallate_ring_event);
54
55 /**
56 * iio_ring_open() chrdev file open for ring buffer access
57 *
58 * This function relies on all ring buffer implementations having an
59 * iio_ring_buffer as their first element.
60 **/
61 static int iio_ring_open(struct inode *inode, struct file *filp)
62 {
63 struct iio_handler *hand
64 = container_of(inode->i_cdev, struct iio_handler, chrdev);
65 struct iio_ring_buffer *rb = hand->private;
66
67 filp->private_data = hand->private;
68 if (rb->access.mark_in_use)
69 rb->access.mark_in_use(rb);
70
71 return 0;
72 }
73
74 /**
75 * iio_ring_release() -chrdev file close ring buffer access
76 *
77 * This function relies on all ring buffer implementations having an
78 * iio_ring_buffer as their first element.
79 **/
80 static int iio_ring_release(struct inode *inode, struct file *filp)
81 {
82 struct cdev *cd = inode->i_cdev;
83 struct iio_handler *hand = iio_cdev_to_handler(cd);
84 struct iio_ring_buffer *rb = hand->private;
85
86 clear_bit(IIO_BUSY_BIT_POS, &rb->access_handler.flags);
87 if (rb->access.unmark_in_use)
88 rb->access.unmark_in_use(rb);
89
90 return 0;
91 }
92
93 /**
94 * iio_ring_rip_outer() chrdev read for ring buffer access
95 *
96 * This function relies on all ring buffer implementations having an
97 * iio_ring _bufer as their first element.
98 **/
99 static ssize_t iio_ring_rip_outer(struct file *filp, char __user *buf,
100 size_t count, loff_t *f_ps)
101 {
102 struct iio_ring_buffer *rb = filp->private_data;
103 int ret, dead_offset, copied;
104 u8 *data;
105 /* rip lots must exist. */
106 if (!rb->access.rip_lots)
107 return -EINVAL;
108 copied = rb->access.rip_lots(rb, count, &data, &dead_offset);
109
110 if (copied < 0) {
111 ret = copied;
112 goto error_ret;
113 }
114 if (copy_to_user(buf, data + dead_offset, copied)) {
115 ret = -EFAULT;
116 goto error_free_data_cpy;
117 }
118 /* In clever ring buffer designs this may not need to be freed.
119 * When such a design exists I'll add this to ring access funcs.
120 */
121 kfree(data);
122
123 return copied;
124
125 error_free_data_cpy:
126 kfree(data);
127 error_ret:
128 return ret;
129 }
130
131 static const struct file_operations iio_ring_fileops = {
132 .read = iio_ring_rip_outer,
133 .release = iio_ring_release,
134 .open = iio_ring_open,
135 .owner = THIS_MODULE,
136 .llseek = noop_llseek,
137 };
138
139 /**
140 * __iio_request_ring_buffer_event_chrdev() allocate ring event chrdev
141 * @buf: ring buffer whose event chrdev we are allocating
142 * @owner: the module who owns the ring buffer (for ref counting)
143 * @dev: device with which the chrdev is associated
144 **/
145 static inline int
146 __iio_request_ring_buffer_event_chrdev(struct iio_ring_buffer *buf,
147 int id,
148 struct module *owner,
149 struct device *dev)
150 {
151 int ret;
152
153 snprintf(buf->ev_int._name, sizeof(buf->ev_int._name),
154 "%s:event%d",
155 dev_name(&buf->dev),
156 id);
157 ret = iio_setup_ev_int(&(buf->ev_int),
158 buf->ev_int._name,
159 owner,
160 dev);
161 if (ret)
162 goto error_ret;
163 return 0;
164
165 error_ret:
166 return ret;
167 }
168
169 static inline void
170 __iio_free_ring_buffer_event_chrdev(struct iio_ring_buffer *buf)
171 {
172 iio_free_ev_int(&(buf->ev_int));
173 }
174
175 static void iio_ring_access_release(struct device *dev)
176 {
177 struct iio_ring_buffer *buf
178 = access_dev_to_iio_ring_buffer(dev);
179 cdev_del(&buf->access_handler.chrdev);
180 iio_device_free_chrdev_minor(MINOR(dev->devt));
181 }
182
183 static struct device_type iio_ring_access_type = {
184 .release = iio_ring_access_release,
185 };
186
187 static inline int
188 __iio_request_ring_buffer_access_chrdev(struct iio_ring_buffer *buf,
189 int id,
190 struct module *owner)
191 {
192 int ret, minor;
193
194 buf->access_handler.flags = 0;
195
196 buf->access_dev.parent = &buf->dev;
197 buf->access_dev.bus = &iio_bus_type;
198 buf->access_dev.type = &iio_ring_access_type;
199 device_initialize(&buf->access_dev);
200
201 minor = iio_device_get_chrdev_minor();
202 if (minor < 0) {
203 ret = minor;
204 goto error_device_put;
205 }
206 buf->access_dev.devt = MKDEV(MAJOR(iio_devt), minor);
207
208
209 buf->access_id = id;
210
211 dev_set_name(&buf->access_dev, "%s:access%d",
212 dev_name(&buf->dev),
213 buf->access_id);
214 ret = device_add(&buf->access_dev);
215 if (ret < 0) {
216 printk(KERN_ERR "failed to add the ring access dev\n");
217 goto error_device_put;
218 }
219
220 cdev_init(&buf->access_handler.chrdev, &iio_ring_fileops);
221 buf->access_handler.chrdev.owner = owner;
222
223 ret = cdev_add(&buf->access_handler.chrdev, buf->access_dev.devt, 1);
224 if (ret) {
225 printk(KERN_ERR "failed to allocate ring access chrdev\n");
226 goto error_device_unregister;
227 }
228 return 0;
229
230 error_device_unregister:
231 device_unregister(&buf->access_dev);
232 error_device_put:
233 put_device(&buf->access_dev);
234
235 return ret;
236 }
237
238 static void __iio_free_ring_buffer_access_chrdev(struct iio_ring_buffer *buf)
239 {
240 device_unregister(&buf->access_dev);
241 }
242
243 void iio_ring_buffer_init(struct iio_ring_buffer *ring,
244 struct iio_dev *dev_info)
245 {
246 if (ring->access.mark_param_change)
247 ring->access.mark_param_change(ring);
248 ring->indio_dev = dev_info;
249 ring->ev_int.private = ring;
250 ring->access_handler.private = ring;
251 ring->shared_ev_pointer.ev_p = NULL;
252 spin_lock_init(&ring->shared_ev_pointer.lock);
253 }
254 EXPORT_SYMBOL(iio_ring_buffer_init);
255
256 int iio_ring_buffer_register(struct iio_ring_buffer *ring, int id)
257 {
258 int ret;
259
260 ring->id = id;
261
262 dev_set_name(&ring->dev, "%s:buffer%d",
263 dev_name(ring->dev.parent),
264 ring->id);
265 ret = device_add(&ring->dev);
266 if (ret)
267 goto error_ret;
268
269 ret = __iio_request_ring_buffer_event_chrdev(ring,
270 0,
271 ring->owner,
272 &ring->dev);
273 if (ret)
274 goto error_remove_device;
275
276 ret = __iio_request_ring_buffer_access_chrdev(ring,
277 0,
278 ring->owner);
279
280 if (ret)
281 goto error_free_ring_buffer_event_chrdev;
282
283 return ret;
284 error_free_ring_buffer_event_chrdev:
285 __iio_free_ring_buffer_event_chrdev(ring);
286 error_remove_device:
287 device_del(&ring->dev);
288 error_ret:
289 return ret;
290 }
291 EXPORT_SYMBOL(iio_ring_buffer_register);
292
293 void iio_ring_buffer_unregister(struct iio_ring_buffer *ring)
294 {
295 __iio_free_ring_buffer_access_chrdev(ring);
296 __iio_free_ring_buffer_event_chrdev(ring);
297 device_del(&ring->dev);
298 }
299 EXPORT_SYMBOL(iio_ring_buffer_unregister);
300
301 ssize_t iio_read_ring_length(struct device *dev,
302 struct device_attribute *attr,
303 char *buf)
304 {
305 int len = 0;
306 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
307
308 if (ring->access.get_length)
309 len = sprintf(buf, "%d\n",
310 ring->access.get_length(ring));
311
312 return len;
313 }
314 EXPORT_SYMBOL(iio_read_ring_length);
315
316 ssize_t iio_write_ring_length(struct device *dev,
317 struct device_attribute *attr,
318 const char *buf,
319 size_t len)
320 {
321 int ret;
322 ulong val;
323 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
324 ret = strict_strtoul(buf, 10, &val);
325 if (ret)
326 return ret;
327
328 if (ring->access.get_length)
329 if (val == ring->access.get_length(ring))
330 return len;
331
332 if (ring->access.set_length) {
333 ring->access.set_length(ring, val);
334 if (ring->access.mark_param_change)
335 ring->access.mark_param_change(ring);
336 }
337
338 return len;
339 }
340 EXPORT_SYMBOL(iio_write_ring_length);
341
342 ssize_t iio_read_ring_bps(struct device *dev,
343 struct device_attribute *attr,
344 char *buf)
345 {
346 int len = 0;
347 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
348
349 if (ring->access.get_bpd)
350 len = sprintf(buf, "%d\n",
351 ring->access.get_bpd(ring));
352
353 return len;
354 }
355 EXPORT_SYMBOL(iio_read_ring_bps);
356
357 ssize_t iio_store_ring_enable(struct device *dev,
358 struct device_attribute *attr,
359 const char *buf,
360 size_t len)
361 {
362 int ret;
363 bool requested_state, current_state;
364 int previous_mode;
365 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
366 struct iio_dev *dev_info = ring->indio_dev;
367
368 mutex_lock(&dev_info->mlock);
369 previous_mode = dev_info->currentmode;
370 requested_state = !(buf[0] == '0');
371 current_state = !!(previous_mode & INDIO_ALL_RING_MODES);
372 if (current_state == requested_state) {
373 printk(KERN_INFO "iio-ring, current state requested again\n");
374 goto done;
375 }
376 if (requested_state) {
377 if (ring->preenable) {
378 ret = ring->preenable(dev_info);
379 if (ret) {
380 printk(KERN_ERR
381 "Buffer not started:"
382 "ring preenable failed\n");
383 goto error_ret;
384 }
385 }
386 if (ring->access.request_update) {
387 ret = ring->access.request_update(ring);
388 if (ret) {
389 printk(KERN_INFO
390 "Buffer not started:"
391 "ring parameter update failed\n");
392 goto error_ret;
393 }
394 }
395 if (ring->access.mark_in_use)
396 ring->access.mark_in_use(ring);
397 /* Definitely possible for devices to support both of these.*/
398 if (dev_info->modes & INDIO_RING_TRIGGERED) {
399 if (!dev_info->trig) {
400 printk(KERN_INFO
401 "Buffer not started: no trigger\n");
402 ret = -EINVAL;
403 if (ring->access.unmark_in_use)
404 ring->access.unmark_in_use(ring);
405 goto error_ret;
406 }
407 dev_info->currentmode = INDIO_RING_TRIGGERED;
408 } else if (dev_info->modes & INDIO_RING_HARDWARE_BUFFER)
409 dev_info->currentmode = INDIO_RING_HARDWARE_BUFFER;
410 else { /* should never be reached */
411 ret = -EINVAL;
412 goto error_ret;
413 }
414
415 if (ring->postenable) {
416
417 ret = ring->postenable(dev_info);
418 if (ret) {
419 printk(KERN_INFO
420 "Buffer not started:"
421 "postenable failed\n");
422 if (ring->access.unmark_in_use)
423 ring->access.unmark_in_use(ring);
424 dev_info->currentmode = previous_mode;
425 if (ring->postdisable)
426 ring->postdisable(dev_info);
427 goto error_ret;
428 }
429 }
430 } else {
431 if (ring->predisable) {
432 ret = ring->predisable(dev_info);
433 if (ret)
434 goto error_ret;
435 }
436 if (ring->access.unmark_in_use)
437 ring->access.unmark_in_use(ring);
438 dev_info->currentmode = INDIO_DIRECT_MODE;
439 if (ring->postdisable) {
440 ret = ring->postdisable(dev_info);
441 if (ret)
442 goto error_ret;
443 }
444 }
445 done:
446 mutex_unlock(&dev_info->mlock);
447 return len;
448
449 error_ret:
450 mutex_unlock(&dev_info->mlock);
451 return ret;
452 }
453 EXPORT_SYMBOL(iio_store_ring_enable);
454 ssize_t iio_show_ring_enable(struct device *dev,
455 struct device_attribute *attr,
456 char *buf)
457 {
458 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
459 return sprintf(buf, "%d\n", !!(ring->indio_dev->currentmode
460 & INDIO_ALL_RING_MODES));
461 }
462 EXPORT_SYMBOL(iio_show_ring_enable);
463
464 ssize_t iio_scan_el_show(struct device *dev,
465 struct device_attribute *attr,
466 char *buf)
467 {
468 int ret;
469 struct iio_dev *indio_dev = dev_get_drvdata(dev);
470 struct iio_scan_el *this_el = to_iio_scan_el(attr);
471
472 ret = iio_scan_mask_query(indio_dev, this_el->number);
473 if (ret < 0)
474 return ret;
475 return sprintf(buf, "%d\n", ret);
476 }
477 EXPORT_SYMBOL(iio_scan_el_show);
478
479 ssize_t iio_scan_el_store(struct device *dev,
480 struct device_attribute *attr,
481 const char *buf,
482 size_t len)
483 {
484 int ret = 0;
485 bool state;
486 struct iio_dev *indio_dev = dev_get_drvdata(dev);
487 struct iio_scan_el *this_el = to_iio_scan_el(attr);
488
489 state = !(buf[0] == '0');
490 mutex_lock(&indio_dev->mlock);
491 if (indio_dev->currentmode == INDIO_RING_TRIGGERED) {
492 ret = -EBUSY;
493 goto error_ret;
494 }
495 ret = iio_scan_mask_query(indio_dev, this_el->number);
496 if (ret < 0)
497 goto error_ret;
498 if (!state && ret) {
499 ret = iio_scan_mask_clear(indio_dev, this_el->number);
500 if (ret)
501 goto error_ret;
502 indio_dev->scan_count--;
503 } else if (state && !ret) {
504 ret = iio_scan_mask_set(indio_dev, this_el->number);
505 if (ret)
506 goto error_ret;
507 indio_dev->scan_count++;
508 }
509 if (this_el->set_state)
510 ret = this_el->set_state(this_el, indio_dev, state);
511 error_ret:
512 mutex_unlock(&indio_dev->mlock);
513
514 return ret ? ret : len;
515
516 }
517 EXPORT_SYMBOL(iio_scan_el_store);
518
519 ssize_t iio_scan_el_ts_show(struct device *dev,
520 struct device_attribute *attr,
521 char *buf)
522 {
523 struct iio_dev *indio_dev = dev_get_drvdata(dev);
524 return sprintf(buf, "%d\n", indio_dev->scan_timestamp);
525 }
526 EXPORT_SYMBOL(iio_scan_el_ts_show);
527
528 ssize_t iio_scan_el_ts_store(struct device *dev,
529 struct device_attribute *attr,
530 const char *buf,
531 size_t len)
532 {
533 int ret = 0;
534 struct iio_dev *indio_dev = dev_get_drvdata(dev);
535 bool state;
536 state = !(buf[0] == '0');
537 mutex_lock(&indio_dev->mlock);
538 if (indio_dev->currentmode == INDIO_RING_TRIGGERED) {
539 ret = -EBUSY;
540 goto error_ret;
541 }
542 indio_dev->scan_timestamp = state;
543 error_ret:
544 mutex_unlock(&indio_dev->mlock);
545
546 return ret ? ret : len;
547 }
548 EXPORT_SYMBOL(iio_scan_el_ts_store);
549