6ab578e4f5f36c99ffa9c938204dc06c84a40952
[sfrench/cifs-2.6.git] / drivers / staging / iio / industrialio-ring.c
1 /* The industrial I/O core
2  *
3  * Copyright (c) 2008 Jonathan Cameron
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * Handling of ring allocation / resizing.
10  *
11  *
12  * Things to look at here.
13  * - Better memory allocation techniques?
14  * - Alternative access techniques?
15  */
16 #include <linux/kernel.h>
17 #include <linux/device.h>
18 #include <linux/interrupt.h>
19 #include <linux/fs.h>
20 #include <linux/poll.h>
21 #include <linux/module.h>
22 #include <linux/cdev.h>
23 #include <linux/slab.h>
24
25 #include "iio.h"
26 #include "ring_generic.h"
27
28 int iio_push_ring_event(struct iio_ring_buffer *ring_buf,
29                        int event_code,
30                        s64 timestamp)
31 {
32         return __iio_push_event(&ring_buf->ev_int,
33                                event_code,
34                                timestamp,
35                                &ring_buf->shared_ev_pointer);
36 }
37 EXPORT_SYMBOL(iio_push_ring_event);
38
39 int iio_push_or_escallate_ring_event(struct iio_ring_buffer *ring_buf,
40                                     int event_code,
41                                     s64 timestamp)
42 {
43         if (ring_buf->shared_ev_pointer.ev_p)
44                 __iio_change_event(ring_buf->shared_ev_pointer.ev_p,
45                                    event_code,
46                                    timestamp);
47         else
48                 return iio_push_ring_event(ring_buf,
49                                           event_code,
50                                           timestamp);
51         return 0;
52 }
53 EXPORT_SYMBOL(iio_push_or_escallate_ring_event);
54
55 /**
56  * iio_ring_open() chrdev file open for ring buffer access
57  *
58  * This function relies on all ring buffer implementations having an
59  * iio_ring_buffer as their first element.
60  **/
61 static int iio_ring_open(struct inode *inode, struct file *filp)
62 {
63         struct iio_handler *hand
64                 = container_of(inode->i_cdev, struct iio_handler, chrdev);
65         struct iio_ring_buffer *rb = hand->private;
66
67         filp->private_data = hand->private;
68         if (rb->access.mark_in_use)
69                 rb->access.mark_in_use(rb);
70
71         return 0;
72 }
73
74 /**
75  * iio_ring_release() -chrdev file close ring buffer access
76  *
77  * This function relies on all ring buffer implementations having an
78  * iio_ring_buffer as their first element.
79  **/
80 static int iio_ring_release(struct inode *inode, struct file *filp)
81 {
82         struct cdev *cd = inode->i_cdev;
83         struct iio_handler *hand = iio_cdev_to_handler(cd);
84         struct iio_ring_buffer *rb = hand->private;
85
86         clear_bit(IIO_BUSY_BIT_POS, &rb->access_handler.flags);
87         if (rb->access.unmark_in_use)
88                 rb->access.unmark_in_use(rb);
89
90         return 0;
91 }
92
93 /**
94  * iio_ring_rip_outer() chrdev read for ring buffer access
95  *
96  * This function relies on all ring buffer implementations having an
97  * iio_ring _bufer as their first element.
98  **/
99 static ssize_t iio_ring_rip_outer(struct file *filp, char __user *buf,
100                                   size_t count, loff_t *f_ps)
101 {
102         struct iio_ring_buffer *rb = filp->private_data;
103         int ret, dead_offset, copied;
104         u8 *data;
105         /* rip lots must exist. */
106         if (!rb->access.rip_lots)
107                 return -EINVAL;
108         copied = rb->access.rip_lots(rb, count, &data, &dead_offset);
109
110         if (copied < 0) {
111                 ret = copied;
112                 goto error_ret;
113         }
114         if (copy_to_user(buf, data + dead_offset, copied))  {
115                 ret =  -EFAULT;
116                 goto error_free_data_cpy;
117         }
118         /* In clever ring buffer designs this may not need to be freed.
119          * When such a design exists I'll add this to ring access funcs.
120          */
121         kfree(data);
122
123         return copied;
124
125 error_free_data_cpy:
126         kfree(data);
127 error_ret:
128         return ret;
129 }
130
131 static const struct file_operations iio_ring_fileops = {
132         .read = iio_ring_rip_outer,
133         .release = iio_ring_release,
134         .open = iio_ring_open,
135         .owner = THIS_MODULE,
136 };
137
138 /**
139  * __iio_request_ring_buffer_event_chrdev() allocate ring event chrdev
140  * @buf:        ring buffer whose event chrdev we are allocating
141  * @owner:      the module who owns the ring buffer (for ref counting)
142  * @dev:        device with which the chrdev is associated
143  **/
144 static inline int
145 __iio_request_ring_buffer_event_chrdev(struct iio_ring_buffer *buf,
146                                        int id,
147                                        struct module *owner,
148                                        struct device *dev)
149 {
150         int ret;
151
152         snprintf(buf->ev_int._name, sizeof(buf->ev_int._name),
153                  "%s:event%d",
154                  dev_name(&buf->dev),
155                  id);
156         ret = iio_setup_ev_int(&(buf->ev_int),
157                                buf->ev_int._name,
158                                owner,
159                                dev);
160         if (ret)
161                 goto error_ret;
162         return 0;
163
164 error_ret:
165         return ret;
166 }
167
168 static inline void
169 __iio_free_ring_buffer_event_chrdev(struct iio_ring_buffer *buf)
170 {
171         iio_free_ev_int(&(buf->ev_int));
172 }
173
174 static void iio_ring_access_release(struct device *dev)
175 {
176         struct iio_ring_buffer *buf
177                 = access_dev_to_iio_ring_buffer(dev);
178         cdev_del(&buf->access_handler.chrdev);
179         iio_device_free_chrdev_minor(MINOR(dev->devt));
180 }
181
182 static struct device_type iio_ring_access_type = {
183         .release = iio_ring_access_release,
184 };
185
186 static inline int
187 __iio_request_ring_buffer_access_chrdev(struct iio_ring_buffer *buf,
188                                         int id,
189                                         struct module *owner)
190 {
191         int ret, minor;
192
193         buf->access_handler.flags = 0;
194
195         buf->access_dev.parent = &buf->dev;
196         buf->access_dev.bus = &iio_bus_type;
197         buf->access_dev.type = &iio_ring_access_type;
198         device_initialize(&buf->access_dev);
199
200         minor = iio_device_get_chrdev_minor();
201         if (minor < 0) {
202                 ret = minor;
203                 goto error_device_put;
204         }
205         buf->access_dev.devt = MKDEV(MAJOR(iio_devt), minor);
206
207
208         buf->access_id = id;
209
210         dev_set_name(&buf->access_dev, "%s:access%d",
211                      dev_name(&buf->dev),
212                      buf->access_id);
213         ret = device_add(&buf->access_dev);
214         if (ret < 0) {
215                 printk(KERN_ERR "failed to add the ring access dev\n");
216                 goto error_device_put;
217         }
218
219         cdev_init(&buf->access_handler.chrdev, &iio_ring_fileops);
220         buf->access_handler.chrdev.owner = owner;
221
222         ret = cdev_add(&buf->access_handler.chrdev, buf->access_dev.devt, 1);
223         if (ret) {
224                 printk(KERN_ERR "failed to allocate ring access chrdev\n");
225                 goto error_device_unregister;
226         }
227         return 0;
228
229 error_device_unregister:
230         device_unregister(&buf->access_dev);
231 error_device_put:
232         put_device(&buf->access_dev);
233
234         return ret;
235 }
236
237 static void __iio_free_ring_buffer_access_chrdev(struct iio_ring_buffer *buf)
238 {
239         device_unregister(&buf->access_dev);
240 }
241
242 void iio_ring_buffer_init(struct iio_ring_buffer *ring,
243                           struct iio_dev *dev_info)
244 {
245         if (ring->access.mark_param_change)
246                 ring->access.mark_param_change(ring);
247         ring->indio_dev = dev_info;
248         ring->ev_int.private = ring;
249         ring->access_handler.private = ring;
250         ring->shared_ev_pointer.ev_p = NULL;
251         spin_lock_init(&ring->shared_ev_pointer.lock);
252 }
253 EXPORT_SYMBOL(iio_ring_buffer_init);
254
255 int iio_ring_buffer_register(struct iio_ring_buffer *ring, int id)
256 {
257         int ret;
258
259         ring->id = id;
260
261         dev_set_name(&ring->dev, "%s:buffer%d",
262                      dev_name(ring->dev.parent),
263                      ring->id);
264         ret = device_add(&ring->dev);
265         if (ret)
266                 goto error_ret;
267
268         ret = __iio_request_ring_buffer_event_chrdev(ring,
269                                                      0,
270                                                      ring->owner,
271                                                      &ring->dev);
272         if (ret)
273                 goto error_remove_device;
274
275         ret = __iio_request_ring_buffer_access_chrdev(ring,
276                                                       0,
277                                                       ring->owner);
278
279         if (ret)
280                 goto error_free_ring_buffer_event_chrdev;
281
282         return ret;
283 error_free_ring_buffer_event_chrdev:
284         __iio_free_ring_buffer_event_chrdev(ring);
285 error_remove_device:
286         device_del(&ring->dev);
287 error_ret:
288         return ret;
289 }
290 EXPORT_SYMBOL(iio_ring_buffer_register);
291
292 void iio_ring_buffer_unregister(struct iio_ring_buffer *ring)
293 {
294         __iio_free_ring_buffer_access_chrdev(ring);
295         __iio_free_ring_buffer_event_chrdev(ring);
296         device_del(&ring->dev);
297 }
298 EXPORT_SYMBOL(iio_ring_buffer_unregister);
299
300 ssize_t iio_read_ring_length(struct device *dev,
301                              struct device_attribute *attr,
302                              char *buf)
303 {
304         int len = 0;
305         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
306
307         if (ring->access.get_length)
308                 len = sprintf(buf, "%d\n",
309                               ring->access.get_length(ring));
310
311         return len;
312 }
313 EXPORT_SYMBOL(iio_read_ring_length);
314
315  ssize_t iio_write_ring_length(struct device *dev,
316                                struct device_attribute *attr,
317                                const char *buf,
318                                size_t len)
319 {
320         int ret;
321         ulong val;
322         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
323         ret = strict_strtoul(buf, 10, &val);
324         if (ret)
325                 return ret;
326
327         if (ring->access.get_length)
328                 if (val == ring->access.get_length(ring))
329                         return len;
330
331         if (ring->access.set_length) {
332                 ring->access.set_length(ring, val);
333                 if (ring->access.mark_param_change)
334                         ring->access.mark_param_change(ring);
335         }
336
337         return len;
338 }
339 EXPORT_SYMBOL(iio_write_ring_length);
340
341 ssize_t iio_read_ring_bps(struct device *dev,
342                           struct device_attribute *attr,
343                           char *buf)
344 {
345         int len = 0;
346         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
347
348         if (ring->access.get_bpd)
349                 len = sprintf(buf, "%d\n",
350                               ring->access.get_bpd(ring));
351
352         return len;
353 }
354 EXPORT_SYMBOL(iio_read_ring_bps);
355
356 ssize_t iio_store_ring_enable(struct device *dev,
357                               struct device_attribute *attr,
358                               const char *buf,
359                               size_t len)
360 {
361         int ret;
362         bool requested_state, current_state;
363         int previous_mode;
364         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
365         struct iio_dev *dev_info = ring->indio_dev;
366
367         mutex_lock(&dev_info->mlock);
368         previous_mode = dev_info->currentmode;
369         requested_state = !(buf[0] == '0');
370         current_state = !!(previous_mode & INDIO_ALL_RING_MODES);
371         if (current_state == requested_state) {
372                 printk(KERN_INFO "iio-ring, current state requested again\n");
373                 goto done;
374         }
375         if (requested_state) {
376                 if (ring->preenable) {
377                         ret = ring->preenable(dev_info);
378                         if (ret) {
379                                 printk(KERN_ERR
380                                        "Buffer not started:"
381                                        "ring preenable failed\n");
382                                 goto error_ret;
383                         }
384                 }
385                 if (ring->access.request_update) {
386                         ret = ring->access.request_update(ring);
387                         if (ret) {
388                                 printk(KERN_INFO
389                                        "Buffer not started:"
390                                        "ring parameter update failed\n");
391                                 goto error_ret;
392                         }
393                 }
394                 if (ring->access.mark_in_use)
395                         ring->access.mark_in_use(ring);
396                 /* Definitely possible for devices to support both of these.*/
397                 if (dev_info->modes & INDIO_RING_TRIGGERED) {
398                         if (!dev_info->trig) {
399                                 printk(KERN_INFO
400                                        "Buffer not started: no trigger\n");
401                                 ret = -EINVAL;
402                                 if (ring->access.unmark_in_use)
403                                         ring->access.unmark_in_use(ring);
404                                 goto error_ret;
405                         }
406                         dev_info->currentmode = INDIO_RING_TRIGGERED;
407                 } else if (dev_info->modes & INDIO_RING_HARDWARE_BUFFER)
408                         dev_info->currentmode = INDIO_RING_HARDWARE_BUFFER;
409                 else { /* should never be reached */
410                         ret = -EINVAL;
411                         goto error_ret;
412                 }
413
414                 if (ring->postenable) {
415
416                         ret = ring->postenable(dev_info);
417                         if (ret) {
418                                 printk(KERN_INFO
419                                        "Buffer not started:"
420                                        "postenable failed\n");
421                                 if (ring->access.unmark_in_use)
422                                         ring->access.unmark_in_use(ring);
423                                 dev_info->currentmode = previous_mode;
424                                 if (ring->postdisable)
425                                         ring->postdisable(dev_info);
426                                 goto error_ret;
427                         }
428                 }
429         } else {
430                 if (ring->predisable) {
431                         ret = ring->predisable(dev_info);
432                         if (ret)
433                                 goto error_ret;
434                 }
435                 if (ring->access.unmark_in_use)
436                         ring->access.unmark_in_use(ring);
437                 dev_info->currentmode = INDIO_DIRECT_MODE;
438                 if (ring->postdisable) {
439                         ret = ring->postdisable(dev_info);
440                         if (ret)
441                                 goto error_ret;
442                 }
443         }
444 done:
445         mutex_unlock(&dev_info->mlock);
446         return len;
447
448 error_ret:
449         mutex_unlock(&dev_info->mlock);
450         return ret;
451 }
452 EXPORT_SYMBOL(iio_store_ring_enable);
453 ssize_t iio_show_ring_enable(struct device *dev,
454                                     struct device_attribute *attr,
455                                     char *buf)
456 {
457         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
458         return sprintf(buf, "%d\n", !!(ring->indio_dev->currentmode
459                                        & INDIO_ALL_RING_MODES));
460 }
461 EXPORT_SYMBOL(iio_show_ring_enable);
462
463 ssize_t iio_scan_el_show(struct device *dev,
464                          struct device_attribute *attr,
465                          char *buf)
466 {
467         int ret;
468         struct iio_dev *indio_dev = dev_get_drvdata(dev);
469         struct iio_scan_el *this_el = to_iio_scan_el(attr);
470
471         ret = iio_scan_mask_query(indio_dev, this_el->number);
472         if (ret < 0)
473                 return ret;
474         return sprintf(buf, "%d\n", ret);
475 }
476 EXPORT_SYMBOL(iio_scan_el_show);
477
478 ssize_t iio_scan_el_store(struct device *dev,
479                           struct device_attribute *attr,
480                           const char *buf,
481                           size_t len)
482 {
483         int ret = 0;
484         bool state;
485         struct iio_dev *indio_dev = dev_get_drvdata(dev);
486         struct iio_scan_el *this_el = to_iio_scan_el(attr);
487
488         state = !(buf[0] == '0');
489         mutex_lock(&indio_dev->mlock);
490         if (indio_dev->currentmode == INDIO_RING_TRIGGERED) {
491                 ret = -EBUSY;
492                 goto error_ret;
493         }
494         ret = iio_scan_mask_query(indio_dev, this_el->number);
495         if (ret < 0)
496                 goto error_ret;
497         if (!state && ret) {
498                 ret = iio_scan_mask_clear(indio_dev, this_el->number);
499                 if (ret)
500                         goto error_ret;
501                 indio_dev->scan_count--;
502         } else if (state && !ret) {
503                 ret = iio_scan_mask_set(indio_dev, this_el->number);
504                 if (ret)
505                         goto error_ret;
506                 indio_dev->scan_count++;
507         }
508         if (this_el->set_state)
509                 ret = this_el->set_state(this_el, indio_dev, state);
510 error_ret:
511         mutex_unlock(&indio_dev->mlock);
512
513         return ret ? ret : len;
514
515 }
516 EXPORT_SYMBOL(iio_scan_el_store);
517
518 ssize_t iio_scan_el_ts_show(struct device *dev,
519                             struct device_attribute *attr,
520                             char *buf)
521 {
522         struct iio_dev *indio_dev = dev_get_drvdata(dev);
523         return sprintf(buf, "%d\n", indio_dev->scan_timestamp);
524 }
525 EXPORT_SYMBOL(iio_scan_el_ts_show);
526
527 ssize_t iio_scan_el_ts_store(struct device *dev,
528                              struct device_attribute *attr,
529                              const char *buf,
530                              size_t len)
531 {
532         int ret = 0;
533         struct iio_dev *indio_dev = dev_get_drvdata(dev);
534         bool state;
535         state = !(buf[0] == '0');
536         mutex_lock(&indio_dev->mlock);
537         if (indio_dev->currentmode == INDIO_RING_TRIGGERED) {
538                 ret = -EBUSY;
539                 goto error_ret;
540         }
541         indio_dev->scan_timestamp = state;
542 error_ret:
543         mutex_unlock(&indio_dev->mlock);
544
545         return ret ? ret : len;
546 }
547 EXPORT_SYMBOL(iio_scan_el_ts_store);
548