1 /*
2  * libiio - Library for interfacing industrial I/O (IIO) devices
3  *
4  * Copyright (C) 2014-2015 Analog Devices, Inc.
5  * Author: Paul Cercueil <paul.cercueil@analog.com>
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * */
18 
19 #include "iio-config.h"
20 #include "iio-private.h"
21 
22 #include <errno.h>
23 #include <string.h>
24 
device_is_high_speed(const struct iio_device * dev)25 static bool device_is_high_speed(const struct iio_device *dev)
26 {
27 	/* Little trick: We call the backend's get_buffer() function, which is
28 	 * for now only implemented in the Local backend, with a NULL pointer.
29 	 * It will return -ENOSYS if the device is not high speed, and either
30 	 * -EBADF or -EINVAL otherwise. */
31 	const struct iio_backend_ops *ops = dev->ctx->ops;
32 	return !!ops->get_buffer &&
33 		(ops->get_buffer(dev, NULL, 0, NULL, 0) != -ENOSYS);
34 }
35 
iio_device_create_buffer(const struct iio_device * dev,size_t samples_count,bool cyclic)36 struct iio_buffer * iio_device_create_buffer(const struct iio_device *dev,
37 		size_t samples_count, bool cyclic)
38 {
39 	ssize_t ret = -EINVAL;
40 	struct iio_buffer *buf;
41 	ssize_t sample_size = iio_device_get_sample_size(dev);
42 
43 	if (!sample_size || !samples_count)
44 		goto err_set_errno;
45 
46 	if (sample_size < 0) {
47 		ret = sample_size;
48 		goto err_set_errno;
49 	}
50 
51 	buf = malloc(sizeof(*buf));
52 	if (!buf) {
53 		ret = -ENOMEM;
54 		goto err_set_errno;
55 	}
56 
57 	buf->dev_sample_size = (unsigned int) sample_size;
58 	buf->length = sample_size * samples_count;
59 	buf->dev = dev;
60 	buf->mask = calloc(dev->words, sizeof(*buf->mask));
61 	if (!buf->mask) {
62 		ret = -ENOMEM;
63 		goto err_free_buf;
64 	}
65 
66 	/* Set the default channel mask to the one used by the device.
67 	 * While input buffers will erase this as soon as the refill function
68 	 * is used, it is useful for output buffers, as it permits
69 	 * iio_buffer_foreach_sample to be used. */
70 	memcpy(buf->mask, dev->mask, dev->words * sizeof(*buf->mask));
71 
72 	ret = iio_device_open(dev, samples_count, cyclic);
73 	if (ret < 0)
74 		goto err_free_mask;
75 
76 	buf->dev_is_high_speed = device_is_high_speed(dev);
77 	if (buf->dev_is_high_speed) {
78 		/* Dequeue the first buffer, so that buf->buffer is correctly
79 		 * initialized */
80 		buf->buffer = NULL;
81 		if (iio_device_is_tx(dev)) {
82 			ret = dev->ctx->ops->get_buffer(dev, &buf->buffer,
83 					buf->length, buf->mask, dev->words);
84 			if (ret < 0)
85 				goto err_close_device;
86 		}
87 	} else {
88 		buf->buffer = malloc(buf->length);
89 		if (!buf->buffer) {
90 			ret = -ENOMEM;
91 			goto err_close_device;
92 		}
93 	}
94 
95 	ret = iio_device_get_sample_size_mask(dev, buf->mask, dev->words);
96 	if (ret < 0)
97 		goto err_close_device;
98 
99 	buf->sample_size = (unsigned int) ret;
100 	buf->data_length = buf->length;
101 	return buf;
102 
103 err_close_device:
104 	iio_device_close(dev);
105 err_free_mask:
106 	free(buf->mask);
107 err_free_buf:
108 	free(buf);
109 err_set_errno:
110 	errno = -(int)ret;
111 	return NULL;
112 }
113 
iio_buffer_destroy(struct iio_buffer * buffer)114 void iio_buffer_destroy(struct iio_buffer *buffer)
115 {
116 	iio_device_close(buffer->dev);
117 	if (!buffer->dev_is_high_speed)
118 		free(buffer->buffer);
119 	free(buffer->mask);
120 	free(buffer);
121 }
122 
iio_buffer_get_poll_fd(struct iio_buffer * buffer)123 int iio_buffer_get_poll_fd(struct iio_buffer *buffer)
124 {
125 	return iio_device_get_poll_fd(buffer->dev);
126 }
127 
iio_buffer_set_blocking_mode(struct iio_buffer * buffer,bool blocking)128 int iio_buffer_set_blocking_mode(struct iio_buffer *buffer, bool blocking)
129 {
130 	return iio_device_set_blocking_mode(buffer->dev, blocking);
131 }
132 
iio_buffer_refill(struct iio_buffer * buffer)133 ssize_t iio_buffer_refill(struct iio_buffer *buffer)
134 {
135 	ssize_t read;
136 	const struct iio_device *dev = buffer->dev;
137 	ssize_t ret;
138 
139 	if (buffer->dev_is_high_speed) {
140 		read = dev->ctx->ops->get_buffer(dev, &buffer->buffer,
141 				buffer->length, buffer->mask, dev->words);
142 	} else {
143 		read = iio_device_read_raw(dev, buffer->buffer, buffer->length,
144 				buffer->mask, dev->words);
145 	}
146 
147 	if (read >= 0) {
148 		buffer->data_length = read;
149 		ret = iio_device_get_sample_size_mask(dev, buffer->mask, dev->words);
150 		if (ret < 0)
151 			return ret;
152 		buffer->sample_size = (unsigned int)ret;
153 	}
154 	return read;
155 }
156 
iio_buffer_push(struct iio_buffer * buffer)157 ssize_t iio_buffer_push(struct iio_buffer *buffer)
158 {
159 	const struct iio_device *dev = buffer->dev;
160 	ssize_t ret;
161 
162 	if (buffer->dev_is_high_speed) {
163 		void *buf;
164 		ret = dev->ctx->ops->get_buffer(dev, &buf,
165 				buffer->data_length, buffer->mask, dev->words);
166 		if (ret >= 0) {
167 			buffer->buffer = buf;
168 			ret = (ssize_t) buffer->data_length;
169 		}
170 	} else {
171 		void *ptr = buffer->buffer;
172 		size_t tmp_len;
173 
174 		/* iio_device_write_raw doesn't guarantee that all bytes are
175 		 * written */
176 		for (tmp_len = buffer->data_length; tmp_len; ) {
177 			ret = iio_device_write_raw(dev, ptr, tmp_len);
178 			if (ret < 0)
179 				goto out_reset_data_length;
180 
181 			tmp_len -= ret;
182 			ptr = (void *) ((uintptr_t) ptr + ret);
183 		}
184 
185 		ret = (ssize_t) buffer->data_length;
186 	}
187 
188 out_reset_data_length:
189 	buffer->data_length = buffer->length;
190 	return ret;
191 }
192 
iio_buffer_push_partial(struct iio_buffer * buffer,size_t samples_count)193 ssize_t iio_buffer_push_partial(struct iio_buffer *buffer, size_t samples_count)
194 {
195 	size_t new_len = samples_count * buffer->dev_sample_size;
196 
197 	if (new_len == 0 || new_len > buffer->length)
198 		return -EINVAL;
199 
200 	buffer->data_length = new_len;
201 	return iio_buffer_push(buffer);
202 }
203 
iio_buffer_foreach_sample(struct iio_buffer * buffer,ssize_t (* callback)(const struct iio_channel *,void *,size_t,void *),void * d)204 ssize_t iio_buffer_foreach_sample(struct iio_buffer *buffer,
205 		ssize_t (*callback)(const struct iio_channel *,
206 			void *, size_t, void *), void *d)
207 {
208 	uintptr_t ptr = (uintptr_t) buffer->buffer,
209 		  start = ptr,
210 		  end = ptr + buffer->data_length;
211 	const struct iio_device *dev = buffer->dev;
212 	ssize_t processed = 0;
213 
214 	if (buffer->sample_size == 0)
215 		return -EINVAL;
216 
217 	if (buffer->data_length < buffer->dev_sample_size)
218 		return 0;
219 
220 	while (end - ptr >= (size_t) buffer->sample_size) {
221 		unsigned int i;
222 
223 		for (i = 0; i < dev->nb_channels; i++) {
224 			const struct iio_channel *chn = dev->channels[i];
225 			unsigned int length = chn->format.length / 8;
226 
227 			if (chn->index < 0)
228 				break;
229 
230 			/* Test if the buffer has samples for this channel */
231 			if (!TEST_BIT(buffer->mask, chn->number))
232 				continue;
233 
234 			if ((ptr - start) % length)
235 				ptr += length - ((ptr - start) % length);
236 
237 			/* Test if the client wants samples from this channel */
238 			if (TEST_BIT(dev->mask, chn->number)) {
239 				ssize_t ret = callback(chn,
240 						(void *) ptr, length, d);
241 				if (ret < 0)
242 					return ret;
243 				else
244 					processed += ret;
245 			}
246 
247 			if (i == dev->nb_channels - 1 || dev->channels[
248 					i + 1]->index != chn->index)
249 				ptr += length * chn->format.repeat;
250 		}
251 	}
252 	return processed;
253 }
254 
iio_buffer_start(const struct iio_buffer * buffer)255 void * iio_buffer_start(const struct iio_buffer *buffer)
256 {
257 	return buffer->buffer;
258 }
259 
iio_buffer_first(const struct iio_buffer * buffer,const struct iio_channel * chn)260 void * iio_buffer_first(const struct iio_buffer *buffer,
261 		const struct iio_channel *chn)
262 {
263 	size_t len;
264 	unsigned int i;
265 	uintptr_t ptr = (uintptr_t) buffer->buffer,
266 		  start = ptr;
267 
268 	if (!iio_channel_is_enabled(chn))
269 		return iio_buffer_end(buffer);
270 
271 	for (i = 0; i < buffer->dev->nb_channels; i++) {
272 		struct iio_channel *cur = buffer->dev->channels[i];
273 		len = cur->format.length / 8 * cur->format.repeat;
274 
275 		/* NOTE: dev->channels are ordered by index */
276 		if (cur->index < 0 || cur->index == chn->index)
277 			break;
278 
279 		/* Test if the buffer has samples for this channel */
280 		if (!TEST_BIT(buffer->mask, cur->number))
281 			continue;
282 
283 		/* Two channels with the same index use the same samples */
284 		if (i > 0 && cur->index == buffer->dev->channels[i - 1]->index)
285 			continue;
286 
287 		if ((ptr - start) % len)
288 			ptr += len - ((ptr - start) % len);
289 		ptr += len;
290 	}
291 
292 	len = chn->format.length / 8;
293 	if ((ptr - start) % len)
294 		ptr += len - ((ptr - start) % len);
295 	return (void *) ptr;
296 }
297 
iio_buffer_step(const struct iio_buffer * buffer)298 ptrdiff_t iio_buffer_step(const struct iio_buffer *buffer)
299 {
300 	return (ptrdiff_t) buffer->sample_size;
301 }
302 
iio_buffer_end(const struct iio_buffer * buffer)303 void * iio_buffer_end(const struct iio_buffer *buffer)
304 {
305 	return (void *) ((uintptr_t) buffer->buffer + buffer->data_length);
306 }
307 
iio_buffer_set_data(struct iio_buffer * buf,void * data)308 void iio_buffer_set_data(struct iio_buffer *buf, void *data)
309 {
310 	buf->userdata = data;
311 }
312 
iio_buffer_get_data(const struct iio_buffer * buf)313 void * iio_buffer_get_data(const struct iio_buffer *buf)
314 {
315 	return buf->userdata;
316 }
317 
iio_buffer_get_device(const struct iio_buffer * buf)318 const struct iio_device * iio_buffer_get_device(const struct iio_buffer *buf)
319 {
320 	return buf->dev;
321 }
322 
iio_buffer_cancel(struct iio_buffer * buf)323 void iio_buffer_cancel(struct iio_buffer *buf)
324 {
325 	const struct iio_backend_ops *ops = buf->dev->ctx->ops;
326 
327 	if (ops->cancel)
328 		ops->cancel(buf->dev);
329 }
330