ARM: 7759/1: decouple CPU offlining from reboot/shutdown
[sfrench/cifs-2.6.git] / drivers / iio / buffer_cb.c
1 #include <linux/kernel.h>
2 #include <linux/slab.h>
3 #include <linux/err.h>
4 #include <linux/export.h>
5 #include <linux/iio/buffer.h>
6 #include <linux/iio/consumer.h>
7
8 struct iio_cb_buffer {
9         struct iio_buffer buffer;
10         int (*cb)(u8 *data, void *private);
11         void *private;
12         struct iio_channel *channels;
13 };
14
15 static int iio_buffer_cb_store_to(struct iio_buffer *buffer, u8 *data)
16 {
17         struct iio_cb_buffer *cb_buff = container_of(buffer,
18                                                      struct iio_cb_buffer,
19                                                      buffer);
20
21         return cb_buff->cb(data, cb_buff->private);
22 }
23
24 static struct iio_buffer_access_funcs iio_cb_access = {
25         .store_to = &iio_buffer_cb_store_to,
26 };
27
28 struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
29                                              int (*cb)(u8 *data,
30                                                        void *private),
31                                              void *private)
32 {
33         int ret;
34         struct iio_cb_buffer *cb_buff;
35         struct iio_dev *indio_dev;
36         struct iio_channel *chan;
37
38         cb_buff = kzalloc(sizeof(*cb_buff), GFP_KERNEL);
39         if (cb_buff == NULL) {
40                 ret = -ENOMEM;
41                 goto error_ret;
42         }
43
44         cb_buff->private = private;
45         cb_buff->cb = cb;
46         cb_buff->buffer.access = &iio_cb_access;
47         INIT_LIST_HEAD(&cb_buff->buffer.demux_list);
48
49         cb_buff->channels = iio_channel_get_all(dev);
50         if (IS_ERR(cb_buff->channels)) {
51                 ret = PTR_ERR(cb_buff->channels);
52                 goto error_free_cb_buff;
53         }
54
55         indio_dev = cb_buff->channels[0].indio_dev;
56         cb_buff->buffer.scan_mask
57                 = kcalloc(BITS_TO_LONGS(indio_dev->masklength), sizeof(long),
58                           GFP_KERNEL);
59         if (cb_buff->buffer.scan_mask == NULL) {
60                 ret = -ENOMEM;
61                 goto error_release_channels;
62         }
63         chan = &cb_buff->channels[0];
64         while (chan->indio_dev) {
65                 if (chan->indio_dev != indio_dev) {
66                         ret = -EINVAL;
67                         goto error_release_channels;
68                 }
69                 set_bit(chan->channel->scan_index,
70                         cb_buff->buffer.scan_mask);
71                 chan++;
72         }
73
74         return cb_buff;
75
76 error_release_channels:
77         iio_channel_release_all(cb_buff->channels);
78 error_free_cb_buff:
79         kfree(cb_buff);
80 error_ret:
81         return ERR_PTR(ret);
82 }
83 EXPORT_SYMBOL_GPL(iio_channel_get_all_cb);
84
85 int iio_channel_start_all_cb(struct iio_cb_buffer *cb_buff)
86 {
87         return iio_update_buffers(cb_buff->channels[0].indio_dev,
88                                   &cb_buff->buffer,
89                                   NULL);
90 }
91 EXPORT_SYMBOL_GPL(iio_channel_start_all_cb);
92
93 void iio_channel_stop_all_cb(struct iio_cb_buffer *cb_buff)
94 {
95         iio_update_buffers(cb_buff->channels[0].indio_dev,
96                            NULL,
97                            &cb_buff->buffer);
98 }
99 EXPORT_SYMBOL_GPL(iio_channel_stop_all_cb);
100
101 void iio_channel_release_all_cb(struct iio_cb_buffer *cb_buff)
102 {
103         iio_channel_release_all(cb_buff->channels);
104         kfree(cb_buff);
105 }
106 EXPORT_SYMBOL_GPL(iio_channel_release_all_cb);
107
108 struct iio_channel
109 *iio_channel_cb_get_channels(const struct iio_cb_buffer *cb_buffer)
110 {
111         return cb_buffer->channels;
112 }
113 EXPORT_SYMBOL_GPL(iio_channel_cb_get_channels);