1 /*
2 * libiio - Library for interfacing industrial I/O (IIO) devices
3 *
4 * Copyright (C) 2014-2020 Analog Devices, Inc.
5 * Author: Paul Cercueil <paul.cercueil@analog.com>
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * */
18
19 #include "iio-config.h"
20 #include "iio-private.h"
21 #include "network.h"
22 #include "iio-lock.h"
23 #include "iiod-client.h"
24 #include "debug.h"
25
26 #define _STRINGIFY(x) #x
27 #define STRINGIFY(x) _STRINGIFY(x)
28
29 #define IIOD_PORT_STR STRINGIFY(IIOD_PORT)
30
31 struct iio_network_io_context {
32 int fd;
33
34 /* Only buffer IO contexts can be cancelled. */
35 bool cancellable;
36 bool cancelled;
37 #if defined(_WIN32)
38 WSAEVENT events[2];
39 #elif defined(WITH_NETWORK_EVENTFD)
40 int cancel_fd[1]; /* eventfd */
41 #else
42 int cancel_fd[2]; /* pipe */
43 #endif
44 unsigned int timeout_ms;
45 };
46
47 struct iio_context_pdata {
48 struct iio_network_io_context io_ctx;
49 struct addrinfo *addrinfo;
50 struct iio_mutex *lock;
51 struct iiod_client *iiod_client;
52 bool msg_trunc_supported;
53 };
54
55 struct iio_device_pdata {
56 struct iio_network_io_context io_ctx;
57 #ifdef WITH_NETWORK_GET_BUFFER
58 int memfd;
59 void *mmap_addr;
60 size_t mmap_len;
61 #endif
62 bool wait_for_err_code, is_cyclic, is_tx;
63 struct iio_mutex *lock;
64 };
65
66 #ifdef _WIN32
67
set_blocking_mode(int s,bool blocking)68 static int set_blocking_mode(int s, bool blocking)
69 {
70 unsigned long nonblock;
71 int ret;
72
73 nonblock = blocking ? 0 : 1;
74
75 ret = ioctlsocket(s, FIONBIO, &nonblock);
76 if (ret == SOCKET_ERROR) {
77 ret = -WSAGetLastError();
78 return ret;
79 }
80
81 return 0;
82 }
83
setup_cancel(struct iio_network_io_context * io_ctx)84 static int setup_cancel(struct iio_network_io_context *io_ctx)
85 {
86 io_ctx->events[0] = WSACreateEvent();
87 if (io_ctx->events[0] == WSA_INVALID_EVENT)
88 return -ENOMEM; /* Pretty much the only error that can happen */
89
90 io_ctx->events[1] = WSACreateEvent();
91 if (io_ctx->events[1] == WSA_INVALID_EVENT) {
92 WSACloseEvent(io_ctx->events[0]);
93 return -ENOMEM;
94 }
95
96 return 0;
97 }
98
cleanup_cancel(struct iio_network_io_context * io_ctx)99 static void cleanup_cancel(struct iio_network_io_context *io_ctx)
100 {
101 WSACloseEvent(io_ctx->events[0]);
102 WSACloseEvent(io_ctx->events[1]);
103 }
104
do_cancel(struct iio_network_io_context * io_ctx)105 static void do_cancel(struct iio_network_io_context *io_ctx)
106 {
107 WSASetEvent(io_ctx->events[1]);
108 }
109
wait_cancellable(struct iio_network_io_context * io_ctx,bool read)110 static int wait_cancellable(struct iio_network_io_context *io_ctx, bool read)
111 {
112 long wsa_events = FD_CLOSE;
113 DWORD ret;
114
115 if (!io_ctx->cancellable)
116 return 0;
117
118 if (read)
119 wsa_events |= FD_READ;
120 else
121 wsa_events |= FD_WRITE;
122
123 WSAEventSelect(io_ctx->fd, NULL, 0);
124 WSAResetEvent(io_ctx->events[0]);
125 WSAEventSelect(io_ctx->fd, io_ctx->events[0], wsa_events);
126
127 ret = WSAWaitForMultipleEvents(2, io_ctx->events, FALSE,
128 WSA_INFINITE, FALSE);
129
130 if (ret == WSA_WAIT_EVENT_0 + 1)
131 return -EBADF;
132
133 return 0;
134 }
135
network_get_error(void)136 static int network_get_error(void)
137 {
138 return -WSAGetLastError();
139 }
140
network_should_retry(int err)141 static bool network_should_retry(int err)
142 {
143 return err == -WSAEWOULDBLOCK || err == -WSAETIMEDOUT;
144 }
145
network_is_interrupted(int err)146 static bool network_is_interrupted(int err)
147 {
148 return false;
149 }
150
network_connect_in_progress(int err)151 static bool network_connect_in_progress(int err)
152 {
153 return err == -WSAEWOULDBLOCK;
154 }
155
156 #define NETWORK_ERR_TIMEOUT WSAETIMEDOUT
157
158 #else
159
set_blocking_mode(int fd,bool blocking)160 static int set_blocking_mode(int fd, bool blocking)
161 {
162 int ret = fcntl(fd, F_GETFL, 0);
163 if (ret < 0)
164 return -errno;
165
166 if (blocking)
167 ret &= ~O_NONBLOCK;
168 else
169 ret |= O_NONBLOCK;
170
171 ret = fcntl(fd, F_SETFL, ret);
172 return ret < 0 ? -errno : 0;
173 }
174
175 #include <poll.h>
176
177 #if defined(WITH_NETWORK_EVENTFD)
178
179 #include <sys/eventfd.h>
180
create_cancel_fd(struct iio_network_io_context * io_ctx)181 static int create_cancel_fd(struct iio_network_io_context *io_ctx)
182 {
183 io_ctx->cancel_fd[0] = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
184 if (io_ctx->cancel_fd[0] < 0)
185 return -errno;
186 return 0;
187 }
188
cleanup_cancel(struct iio_network_io_context * io_ctx)189 static void cleanup_cancel(struct iio_network_io_context *io_ctx)
190 {
191 close(io_ctx->cancel_fd[0]);
192 }
193
194 #define CANCEL_WR_FD 0
195
196 #else
197
create_cancel_fd(struct iio_network_io_context * io_ctx)198 static int create_cancel_fd(struct iio_network_io_context *io_ctx)
199 {
200 int ret;
201
202 #ifdef HAS_PIPE2
203 ret = pipe2(io_ctx->cancel_fd, O_CLOEXEC | O_NONBLOCK);
204 if (ret < 0 && errno != ENOSYS) /* If ENOSYS try pipe() */
205 return -errno;
206 #endif
207 ret = pipe(io_ctx->cancel_fd);
208 if (ret < 0)
209 return -errno;
210 ret = set_blocking_mode(io_ctx->cancel_fd[0], false);
211 if (ret < 0)
212 goto err_close;
213 ret = set_blocking_mode(io_ctx->cancel_fd[1], false);
214 if (ret < 0)
215 goto err_close;
216
217 return 0;
218 err_close:
219 close(io_ctx->cancel_fd[0]);
220 close(io_ctx->cancel_fd[1]);
221 return ret;
222 }
223
cleanup_cancel(struct iio_network_io_context * io_ctx)224 static void cleanup_cancel(struct iio_network_io_context *io_ctx)
225 {
226 close(io_ctx->cancel_fd[0]);
227 close(io_ctx->cancel_fd[1]);
228 }
229
230 #define CANCEL_WR_FD 1
231
232 #endif
233
setup_cancel(struct iio_network_io_context * io_ctx)234 static int setup_cancel(struct iio_network_io_context *io_ctx)
235 {
236 int ret;
237
238 ret = set_blocking_mode(io_ctx->fd, false);
239 if (ret)
240 return ret;
241
242 return create_cancel_fd(io_ctx);
243 }
244
do_cancel(struct iio_network_io_context * io_ctx)245 static void do_cancel(struct iio_network_io_context *io_ctx)
246 {
247 uint64_t event = 1;
248 int ret;
249
250 ret = write(io_ctx->cancel_fd[CANCEL_WR_FD], &event, sizeof(event));
251 if (ret == -1) {
252 /* If this happens something went very seriously wrong */
253 char err_str[1024];
254 iio_strerror(errno, err_str, sizeof(err_str));
255 IIO_ERROR("Unable to signal cancellation event: %s\n", err_str);
256 }
257 }
258
wait_cancellable(struct iio_network_io_context * io_ctx,bool read)259 static int wait_cancellable(struct iio_network_io_context *io_ctx, bool read)
260 {
261 struct pollfd pfd[2];
262 int ret;
263
264 if (!io_ctx->cancellable)
265 return 0;
266
267 memset(pfd, 0, sizeof(pfd));
268
269 pfd[0].fd = io_ctx->fd;
270 if (read)
271 pfd[0].events = POLLIN;
272 else
273 pfd[0].events = POLLOUT;
274 pfd[1].fd = io_ctx->cancel_fd[0];
275 pfd[1].events = POLLIN;
276
277 do {
278 int timeout_ms;
279
280 if (io_ctx->timeout_ms > 0)
281 timeout_ms = (int) io_ctx->timeout_ms;
282 else
283 timeout_ms = -1;
284
285 do {
286 ret = poll(pfd, 2, timeout_ms);
287 } while (ret == -1 && errno == EINTR);
288
289 if (ret == -1)
290 return -errno;
291 if (!ret)
292 return -EPIPE;
293
294 if (pfd[1].revents & POLLIN)
295 return -EBADF;
296 } while (!(pfd[0].revents & (pfd[0].events | POLLERR | POLLHUP)));
297
298 return 0;
299 }
300
network_get_error(void)301 static int network_get_error(void)
302 {
303 return -errno;
304 }
305
network_should_retry(int err)306 static bool network_should_retry(int err)
307 {
308 return err == -EAGAIN;
309 }
310
network_is_interrupted(int err)311 static bool network_is_interrupted(int err)
312 {
313 return err == -EINTR;
314 }
315
network_connect_in_progress(int err)316 static bool network_connect_in_progress(int err)
317 {
318 return err == -EINPROGRESS;
319 }
320
321 #define NETWORK_ERR_TIMEOUT ETIMEDOUT
322
323 #endif
324
network_recv(struct iio_network_io_context * io_ctx,void * data,size_t len,int flags)325 static ssize_t network_recv(struct iio_network_io_context *io_ctx,
326 void *data, size_t len, int flags)
327 {
328 ssize_t ret;
329 int err;
330
331 while (1) {
332 ret = wait_cancellable(io_ctx, true);
333 if (ret < 0)
334 return ret;
335
336 ret = recv(io_ctx->fd, data, (int) len, flags);
337 if (ret == 0)
338 return -EPIPE;
339 else if (ret > 0)
340 break;
341
342 err = network_get_error();
343 if (network_should_retry(err)) {
344 if (io_ctx->cancellable)
345 continue;
346 else
347 return -EPIPE;
348 } else if (!network_is_interrupted(err)) {
349 return (ssize_t) err;
350 }
351 }
352 return ret;
353 }
354
network_send(struct iio_network_io_context * io_ctx,const void * data,size_t len,int flags)355 static ssize_t network_send(struct iio_network_io_context *io_ctx,
356 const void *data, size_t len, int flags)
357 {
358 ssize_t ret;
359 int err;
360
361 while (1) {
362 ret = wait_cancellable(io_ctx, false);
363 if (ret < 0)
364 return ret;
365
366 ret = send(io_ctx->fd, data, (int) len, flags);
367 if (ret == 0)
368 return -EPIPE;
369 else if (ret > 0)
370 break;
371
372 err = network_get_error();
373 if (network_should_retry(err)) {
374 if (io_ctx->cancellable)
375 continue;
376 else
377 return -EPIPE;
378 } else if (!network_is_interrupted(err)) {
379 return (ssize_t) err;
380 }
381 }
382
383 return ret;
384 }
385
write_all(struct iio_network_io_context * io_ctx,const void * src,size_t len)386 static ssize_t write_all(struct iio_network_io_context *io_ctx,
387 const void *src, size_t len)
388 {
389 uintptr_t ptr = (uintptr_t) src;
390 while (len) {
391 ssize_t ret = network_send(io_ctx, (const void *) ptr, len, 0);
392 if (ret < 0)
393 return ret;
394 ptr += ret;
395 len -= ret;
396 }
397 return (ssize_t)(ptr - (uintptr_t) src);
398 }
399
write_command(struct iio_network_io_context * io_ctx,const char * cmd)400 static ssize_t write_command(struct iio_network_io_context *io_ctx,
401 const char *cmd)
402 {
403 ssize_t ret;
404
405 IIO_DEBUG("Writing command: %s\n", cmd);
406 ret = write_all(io_ctx, cmd, strlen(cmd));
407 if (ret < 0) {
408 char buf[1024];
409 iio_strerror(-(int) ret, buf, sizeof(buf));
410 IIO_ERROR("Unable to send command: %s\n", buf);
411 }
412 return ret;
413 }
414
network_cancel(const struct iio_device * dev)415 static void network_cancel(const struct iio_device *dev)
416 {
417 struct iio_device_pdata *ppdata = dev->pdata;
418
419 do_cancel(&ppdata->io_ctx);
420
421 ppdata->io_ctx.cancelled = true;
422 }
423
424 #ifndef _WIN32
425
426 /* Use it if available */
427 #ifndef SOCK_CLOEXEC
428 #define SOCK_CLOEXEC 0
429 #endif
430
do_create_socket(const struct addrinfo * addrinfo)431 static int do_create_socket(const struct addrinfo *addrinfo)
432 {
433 int fd;
434
435 fd = socket(addrinfo->ai_family, addrinfo->ai_socktype | SOCK_CLOEXEC, 0);
436 if (fd < 0)
437 return -errno;
438
439 return fd;
440 }
441
set_socket_timeout(int fd,unsigned int timeout)442 static int set_socket_timeout(int fd, unsigned int timeout)
443 {
444 struct timeval tv;
445
446 tv.tv_sec = timeout / 1000;
447 tv.tv_usec = (timeout % 1000) * 1000;
448 if (setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)) < 0 ||
449 setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO,
450 &tv, sizeof(tv)) < 0)
451 return -errno;
452 else
453 return 0;
454 }
455 #else
456
457 /* Use it if available */
458 #ifndef WSA_FLAG_NO_HANDLE_INHERIT
459 #define WSA_FLAG_NO_HANDLE_INHERIT 0
460 #endif
461
do_create_socket(const struct addrinfo * addrinfo)462 static int do_create_socket(const struct addrinfo *addrinfo)
463 {
464 SOCKET s;
465
466 s = WSASocketW(addrinfo->ai_family, addrinfo->ai_socktype, 0, NULL, 0,
467 WSA_FLAG_NO_HANDLE_INHERIT | WSA_FLAG_OVERLAPPED);
468 if (s == INVALID_SOCKET)
469 return -WSAGetLastError();
470
471 return (int) s;
472 }
473
set_socket_timeout(int fd,unsigned int timeout)474 static int set_socket_timeout(int fd, unsigned int timeout)
475 {
476 if (setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO,
477 (const char *) &timeout, sizeof(timeout)) < 0 ||
478 setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO,
479 (const char *) &timeout, sizeof(timeout)) < 0)
480 return -WSAGetLastError();
481 else
482 return 0;
483 }
484 #endif /* !_WIN32 */
485
486 /* The purpose of this function is to provide a version of connect()
487 * that does not ignore timeouts... */
do_connect(int fd,const struct addrinfo * addrinfo,unsigned int timeout)488 static int do_connect(int fd, const struct addrinfo *addrinfo,
489 unsigned int timeout)
490 {
491 int ret, error;
492 socklen_t len;
493 #ifdef _WIN32
494 struct timeval tv;
495 struct timeval *ptv;
496 fd_set set;
497 #else
498 struct pollfd pfd;
499 #endif
500
501 ret = set_blocking_mode(fd, false);
502 if (ret < 0)
503 return ret;
504
505 ret = connect(fd, addrinfo->ai_addr, (int) addrinfo->ai_addrlen);
506 if (ret < 0) {
507 ret = network_get_error();
508 if (!network_connect_in_progress(ret))
509 return ret;
510 }
511
512 #ifdef _WIN32
513 #ifdef _MSC_BUILD
514 /* This is so stupid, but studio emits a signed/unsigned mismatch
515 * on their own FD_ZERO macro, so turn the warning off/on
516 */
517 #pragma warning(disable : 4389)
518 #endif
519 FD_ZERO(&set);
520 FD_SET(fd, &set);
521 #ifdef _MSC_BUILD
522 #pragma warning(default: 4389)
523 #endif
524
525 if (timeout != 0) {
526 tv.tv_sec = timeout / 1000;
527 tv.tv_usec = (timeout % 1000) * 1000;
528 ptv = &tv;
529 } else {
530 ptv = NULL;
531 }
532
533 ret = select(fd + 1, NULL, &set, &set, ptv);
534 #else
535 pfd.fd = fd;
536 pfd.events = POLLOUT | POLLERR;
537 pfd.revents = 0;
538
539 do {
540 ret = poll(&pfd, 1, timeout);
541 } while (ret == -1 && errno == EINTR);
542 #endif
543
544 if (ret < 0)
545 return network_get_error();
546
547 if (ret == 0)
548 return -NETWORK_ERR_TIMEOUT;
549
550 /* Verify that we don't have an error */
551 len = sizeof(error);
552 ret = getsockopt(fd, SOL_SOCKET, SO_ERROR, (char *)&error, &len);
553 if(ret < 0)
554 return network_get_error();
555
556 if (error)
557 return -error;
558
559 ret = set_blocking_mode(fd, true);
560 if (ret < 0)
561 return ret;
562
563 return 0;
564 }
565
create_socket(const struct addrinfo * addrinfo,unsigned int timeout)566 int create_socket(const struct addrinfo *addrinfo, unsigned int timeout)
567 {
568 int ret, fd, yes = 1;
569
570 fd = do_create_socket(addrinfo);
571 if (fd < 0)
572 return fd;
573
574 ret = do_connect(fd, addrinfo, timeout);
575 if (ret < 0) {
576 close(fd);
577 return ret;
578 }
579
580 set_socket_timeout(fd, timeout);
581 if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY,
582 (const char *) &yes, sizeof(yes)) < 0) {
583 ret = -errno;
584 close(fd);
585 return ret;
586 }
587
588 return fd;
589 }
590
network_open(const struct iio_device * dev,size_t samples_count,bool cyclic)591 static int network_open(const struct iio_device *dev,
592 size_t samples_count, bool cyclic)
593 {
594 struct iio_context_pdata *pdata = dev->ctx->pdata;
595 struct iio_device_pdata *ppdata = dev->pdata;
596 int ret = -EBUSY;
597
598 iio_mutex_lock(ppdata->lock);
599 if (ppdata->io_ctx.fd >= 0)
600 goto out_mutex_unlock;
601
602 ret = create_socket(pdata->addrinfo, DEFAULT_TIMEOUT_MS);
603 if (ret < 0) {
604 IIO_ERROR("Create socket: %d\n", ret);
605 goto out_mutex_unlock;
606 }
607
608 ppdata->io_ctx.fd = ret;
609 ppdata->io_ctx.cancelled = false;
610 ppdata->io_ctx.cancellable = false;
611 ppdata->io_ctx.timeout_ms = DEFAULT_TIMEOUT_MS;
612
613 ret = iiod_client_open_unlocked(pdata->iiod_client,
614 &ppdata->io_ctx, dev, samples_count, cyclic);
615 if (ret < 0) {
616 IIO_ERROR("Open unlocked: %d\n", ret);
617 goto err_close_socket;
618 }
619
620 ret = setup_cancel(&ppdata->io_ctx);
621 if (ret < 0)
622 goto err_close_socket;
623
624 set_socket_timeout(ppdata->io_ctx.fd, pdata->io_ctx.timeout_ms);
625
626 ppdata->io_ctx.timeout_ms = pdata->io_ctx.timeout_ms;
627 ppdata->io_ctx.cancellable = true;
628 ppdata->is_tx = iio_device_is_tx(dev);
629 ppdata->is_cyclic = cyclic;
630 ppdata->wait_for_err_code = false;
631 #ifdef WITH_NETWORK_GET_BUFFER
632 ppdata->mmap_len = samples_count * iio_device_get_sample_size(dev);
633 #endif
634
635 iio_mutex_unlock(ppdata->lock);
636
637 return 0;
638
639 err_close_socket:
640 close(ppdata->io_ctx.fd);
641 ppdata->io_ctx.fd = -1;
642 out_mutex_unlock:
643 iio_mutex_unlock(ppdata->lock);
644 return ret;
645 }
646
network_close(const struct iio_device * dev)647 static int network_close(const struct iio_device *dev)
648 {
649 struct iio_device_pdata *pdata = dev->pdata;
650 int ret = -EBADF;
651
652 iio_mutex_lock(pdata->lock);
653
654 if (pdata->io_ctx.fd >= 0) {
655 if (!pdata->io_ctx.cancelled) {
656 ret = iiod_client_close_unlocked(
657 dev->ctx->pdata->iiod_client,
658 &pdata->io_ctx, dev);
659
660 write_command(&pdata->io_ctx, "\r\nEXIT\r\n");
661 } else {
662 ret = 0;
663 }
664
665 cleanup_cancel(&pdata->io_ctx);
666 close(pdata->io_ctx.fd);
667 pdata->io_ctx.fd = -1;
668 }
669
670 #ifdef WITH_NETWORK_GET_BUFFER
671 if (pdata->memfd >= 0)
672 close(pdata->memfd);
673 pdata->memfd = -1;
674
675 if (pdata->mmap_addr) {
676 munmap(pdata->mmap_addr, pdata->mmap_len);
677 pdata->mmap_addr = NULL;
678 }
679 #endif
680
681 iio_mutex_unlock(pdata->lock);
682 return ret;
683 }
684
network_read(const struct iio_device * dev,void * dst,size_t len,uint32_t * mask,size_t words)685 static ssize_t network_read(const struct iio_device *dev, void *dst, size_t len,
686 uint32_t *mask, size_t words)
687 {
688 struct iio_device_pdata *pdata = dev->pdata;
689 ssize_t ret;
690
691 iio_mutex_lock(pdata->lock);
692 ret = iiod_client_read_unlocked(dev->ctx->pdata->iiod_client,
693 &pdata->io_ctx, dev, dst, len, mask, words);
694 iio_mutex_unlock(pdata->lock);
695
696 return ret;
697 }
698
network_write(const struct iio_device * dev,const void * src,size_t len)699 static ssize_t network_write(const struct iio_device *dev,
700 const void *src, size_t len)
701 {
702 struct iio_device_pdata *pdata = dev->pdata;
703 ssize_t ret;
704
705 iio_mutex_lock(pdata->lock);
706 ret = iiod_client_write_unlocked(dev->ctx->pdata->iiod_client,
707 &pdata->io_ctx, dev, src, len);
708 iio_mutex_unlock(pdata->lock);
709
710 return ret;
711 }
712
713 #ifdef WITH_NETWORK_GET_BUFFER
714
read_all(struct iio_network_io_context * io_ctx,void * dst,size_t len)715 static ssize_t read_all(struct iio_network_io_context *io_ctx,
716 void *dst, size_t len)
717 {
718 uintptr_t ptr = (uintptr_t) dst;
719 while (len) {
720 ssize_t ret = network_recv(io_ctx, (void *) ptr, len, 0);
721 if (ret < 0) {
722 IIO_ERROR("NETWORK RECV: %zu\n", ret);
723 return ret;
724 }
725 ptr += ret;
726 len -= ret;
727 }
728 return (ssize_t)(ptr - (uintptr_t) dst);
729 }
730
read_integer(struct iio_network_io_context * io_ctx,long * val)731 static int read_integer(struct iio_network_io_context *io_ctx, long *val)
732 {
733 unsigned int i;
734 char buf[1024], *ptr;
735 ssize_t ret;
736 bool found = false;
737
738 for (i = 0; i < sizeof(buf) - 1; i++) {
739 ret = read_all(io_ctx, buf + i, 1);
740 if (ret < 0)
741 return (int) ret;
742
743 /* Skip the eventual first few carriage returns.
744 * Also stop when a dot is found (for parsing floats) */
745 if (buf[i] != '\n' && buf[i] != '.')
746 found = true;
747 else if (found)
748 break;
749 }
750
751 buf[i] = '\0';
752 errno = 0;
753 ret = (ssize_t) strtol(buf, &ptr, 10);
754 if (ptr == buf || errno == ERANGE)
755 return -EINVAL;
756 *val = (long) ret;
757 return 0;
758 }
759
network_read_mask(struct iio_network_io_context * io_ctx,uint32_t * mask,size_t words)760 static ssize_t network_read_mask(struct iio_network_io_context *io_ctx,
761 uint32_t *mask, size_t words)
762 {
763 long read_len;
764 ssize_t ret;
765
766 ret = read_integer(io_ctx, &read_len);
767 if (ret < 0) {
768 IIO_ERROR("READ INTEGER: %zu\n", ret);
769 return ret;
770 }
771
772 if (read_len > 0 && mask) {
773 size_t i;
774 char buf[9];
775
776 buf[8] = '\0';
777 IIO_DEBUG("Reading mask\n");
778
779 for (i = words; i > 0; i--) {
780 ret = read_all(io_ctx, buf, 8);
781 if (ret < 0)
782 return ret;
783
784 iio_sscanf(buf, "%08x", &mask[i - 1]);
785 IIO_DEBUG("mask[%lu] = 0x%x\n",
786 (unsigned long)(i - 1), mask[i - 1]);
787 }
788 }
789
790 if (read_len > 0) {
791 char c;
792 ssize_t nb = read_all(io_ctx, &c, 1);
793 if (nb > 0 && c != '\n')
794 read_len = -EIO;
795 }
796
797 return (ssize_t) read_len;
798 }
799
read_error_code(struct iio_network_io_context * io_ctx)800 static ssize_t read_error_code(struct iio_network_io_context *io_ctx)
801 {
802 /*
803 * The server returns two integer codes.
804 * The first one is returned right after the WRITEBUF command is issued,
805 * and corresponds to the error code returned when the server attempted
806 * to open the device.
807 * If zero, a second error code is returned, that corresponds (if positive)
808 * to the number of bytes written.
809 *
810 * To speed up things, we delay error reporting. We just send out the
811 * data without reading the error code that the server gives us, because
812 * the answer will take too much time. If an error occurred, it will be
813 * reported by the next call to iio_buffer_push().
814 */
815
816 unsigned int i;
817 long resp = 0;
818
819 for (i = 0; i < 2; i++) {
820 ssize_t ret = read_integer(io_ctx, &resp);
821 if (ret < 0)
822 return ret;
823 if (resp < 0)
824 return (ssize_t) resp;
825 }
826
827 return (ssize_t) resp;
828 }
829
write_rwbuf_command(const struct iio_device * dev,const char * cmd)830 static ssize_t write_rwbuf_command(const struct iio_device *dev,
831 const char *cmd)
832 {
833 struct iio_device_pdata *pdata = dev->pdata;
834
835 if (pdata->wait_for_err_code) {
836 ssize_t ret = read_error_code(&pdata->io_ctx);
837
838 pdata->wait_for_err_code = false;
839 if (ret < 0)
840 return ret;
841 }
842
843 return write_command(&pdata->io_ctx, cmd);
844 }
845
network_do_splice(struct iio_device_pdata * pdata,size_t len,bool read)846 static ssize_t network_do_splice(struct iio_device_pdata *pdata, size_t len,
847 bool read)
848 {
849 int pipefd[2];
850 int fd_in, fd_out;
851 ssize_t ret, read_len = len, write_len = 0;
852
853 ret = (ssize_t) pipe2(pipefd, O_CLOEXEC);
854 if (ret < 0)
855 return -errno;
856
857 if (read) {
858 fd_in = pdata->io_ctx.fd;
859 fd_out = pdata->memfd;
860 } else {
861 fd_in = pdata->memfd;
862 fd_out = pdata->io_ctx.fd;
863 }
864
865 do {
866 ret = wait_cancellable(&pdata->io_ctx, read);
867 if (ret < 0)
868 goto err_close_pipe;
869
870 if (read_len) {
871 /*
872 * SPLICE_F_NONBLOCK is just here to avoid a deadlock when
873 * splicing from a socket. As the socket is not in
874 * non-blocking mode, it should never return -EAGAIN.
875 * TODO(pcercuei): Find why it locks...
876 * */
877 ret = splice(fd_in, NULL, pipefd[1], NULL, read_len,
878 SPLICE_F_MOVE | SPLICE_F_NONBLOCK);
879 if (!ret)
880 ret = -EIO;
881 if (ret < 0 && errno != EAGAIN) {
882 ret = -errno;
883 goto err_close_pipe;
884 } else if (ret > 0) {
885 write_len += ret;
886 read_len -= ret;
887 }
888 }
889
890 if (write_len) {
891 ret = splice(pipefd[0], NULL, fd_out, NULL, write_len,
892 SPLICE_F_MOVE | SPLICE_F_NONBLOCK);
893 if (!ret)
894 ret = -EIO;
895 if (ret < 0 && errno != EAGAIN) {
896 ret = -errno;
897 goto err_close_pipe;
898 } else if (ret > 0) {
899 write_len -= ret;
900 }
901 }
902
903 } while (write_len || read_len);
904
905 err_close_pipe:
906 close(pipefd[0]);
907 close(pipefd[1]);
908 return ret < 0 ? ret : len;
909 }
910
network_get_buffer(const struct iio_device * dev,void ** addr_ptr,size_t bytes_used,uint32_t * mask,size_t words)911 static ssize_t network_get_buffer(const struct iio_device *dev,
912 void **addr_ptr, size_t bytes_used,
913 uint32_t *mask, size_t words)
914 {
915 struct iio_device_pdata *pdata = dev->pdata;
916 ssize_t ret, read = 0;
917 int memfd;
918
919 if (pdata->is_cyclic)
920 return -ENOSYS;
921
922 /* We check early that the temporary file can be created, so that we can
923 * return -ENOSYS in case it fails, which will indicate that the
924 * high-speed interface is not available.
925 *
926 * O_TMPFILE -> Linux 3.11.
927 * TODO: use memfd_create (Linux 3.17) */
928 memfd = open(P_tmpdir, O_RDWR | O_TMPFILE | O_EXCL | O_CLOEXEC, S_IRWXU);
929 if (memfd < 0)
930 return -ENOSYS;
931
932 if (!addr_ptr || words != (dev->nb_channels + 31) / 32) {
933 close(memfd);
934 return -EINVAL;
935 }
936
937 if (pdata->mmap_addr)
938 munmap(pdata->mmap_addr, pdata->mmap_len);
939
940 if (pdata->mmap_addr && pdata->is_tx) {
941 char buf[1024];
942
943 iio_snprintf(buf, sizeof(buf), "WRITEBUF %s %lu\r\n",
944 dev->id, (unsigned long) bytes_used);
945
946 iio_mutex_lock(pdata->lock);
947
948 ret = write_rwbuf_command(dev, buf);
949 if (ret < 0)
950 goto err_close_memfd;
951
952 ret = network_do_splice(pdata, bytes_used, false);
953 if (ret < 0)
954 goto err_close_memfd;
955
956 pdata->wait_for_err_code = true;
957 iio_mutex_unlock(pdata->lock);
958 }
959
960 if (pdata->memfd >= 0)
961 close(pdata->memfd);
962
963 pdata->memfd = memfd;
964
965 ret = (ssize_t) ftruncate(pdata->memfd, pdata->mmap_len);
966 if (ret < 0) {
967 ret = -errno;
968 IIO_ERROR("Unable to truncate temp file: %zi\n", -ret);
969 return ret;
970 }
971
972 if (!pdata->is_tx) {
973 char buf[1024];
974 size_t len = pdata->mmap_len;
975
976 iio_snprintf(buf, sizeof(buf), "READBUF %s %lu\r\n",
977 dev->id, (unsigned long) len);
978
979 iio_mutex_lock(pdata->lock);
980 ret = write_rwbuf_command(dev, buf);
981 if (ret < 0)
982 goto err_unlock;
983
984 do {
985 ret = network_read_mask(&pdata->io_ctx, mask, words);
986 if (!ret)
987 break;
988 if (ret < 0)
989 goto err_unlock;
990
991 mask = NULL; /* We read the mask only once */
992
993 ret = network_do_splice(pdata, ret, true);
994 if (ret < 0)
995 goto err_unlock;
996
997 read += ret;
998 len -= ret;
999 } while (len);
1000
1001 iio_mutex_unlock(pdata->lock);
1002 }
1003
1004 pdata->mmap_addr = mmap(NULL, pdata->mmap_len,
1005 PROT_READ | PROT_WRITE, MAP_SHARED, pdata->memfd, 0);
1006 if (pdata->mmap_addr == MAP_FAILED) {
1007 pdata->mmap_addr = NULL;
1008 ret = -errno;
1009 IIO_ERROR("Unable to mmap: %zi\n", -ret);
1010 return ret;
1011 }
1012
1013 *addr_ptr = pdata->mmap_addr;
1014 return read ? read : (ssize_t) bytes_used;
1015
1016 err_close_memfd:
1017 close(memfd);
1018 err_unlock:
1019 iio_mutex_unlock(pdata->lock);
1020 return ret;
1021 }
1022 #endif
1023
network_read_dev_attr(const struct iio_device * dev,const char * attr,char * dst,size_t len,enum iio_attr_type type)1024 static ssize_t network_read_dev_attr(const struct iio_device *dev,
1025 const char *attr, char *dst, size_t len, enum iio_attr_type type)
1026 {
1027 struct iio_context_pdata *pdata = dev->ctx->pdata;
1028
1029 return iiod_client_read_attr(pdata->iiod_client,
1030 &pdata->io_ctx, dev, NULL, attr, dst, len, type);
1031 }
1032
network_write_dev_attr(const struct iio_device * dev,const char * attr,const char * src,size_t len,enum iio_attr_type type)1033 static ssize_t network_write_dev_attr(const struct iio_device *dev,
1034 const char *attr, const char *src, size_t len, enum iio_attr_type type)
1035 {
1036 struct iio_context_pdata *pdata = dev->ctx->pdata;
1037
1038 return iiod_client_write_attr(pdata->iiod_client,
1039 &pdata->io_ctx, dev, NULL, attr, src, len, type);
1040 }
1041
network_read_chn_attr(const struct iio_channel * chn,const char * attr,char * dst,size_t len)1042 static ssize_t network_read_chn_attr(const struct iio_channel *chn,
1043 const char *attr, char *dst, size_t len)
1044 {
1045 struct iio_context_pdata *pdata = chn->dev->ctx->pdata;
1046
1047 return iiod_client_read_attr(pdata->iiod_client,
1048 &pdata->io_ctx, chn->dev, chn, attr, dst, len, false);
1049 }
1050
network_write_chn_attr(const struct iio_channel * chn,const char * attr,const char * src,size_t len)1051 static ssize_t network_write_chn_attr(const struct iio_channel *chn,
1052 const char *attr, const char *src, size_t len)
1053 {
1054 struct iio_context_pdata *pdata = chn->dev->ctx->pdata;
1055
1056 return iiod_client_write_attr(pdata->iiod_client,
1057 &pdata->io_ctx, chn->dev, chn, attr, src, len, false);
1058 }
1059
network_get_trigger(const struct iio_device * dev,const struct iio_device ** trigger)1060 static int network_get_trigger(const struct iio_device *dev,
1061 const struct iio_device **trigger)
1062 {
1063 struct iio_context_pdata *pdata = dev->ctx->pdata;
1064
1065 return iiod_client_get_trigger(pdata->iiod_client,
1066 &pdata->io_ctx, dev, trigger);
1067 }
1068
network_set_trigger(const struct iio_device * dev,const struct iio_device * trigger)1069 static int network_set_trigger(const struct iio_device *dev,
1070 const struct iio_device *trigger)
1071 {
1072 struct iio_context_pdata *pdata = dev->ctx->pdata;
1073
1074 return iiod_client_set_trigger(pdata->iiod_client,
1075 &pdata->io_ctx, dev, trigger);
1076 }
1077
network_shutdown(struct iio_context * ctx)1078 static void network_shutdown(struct iio_context *ctx)
1079 {
1080 struct iio_context_pdata *pdata = ctx->pdata;
1081 unsigned int i;
1082
1083 iio_mutex_lock(pdata->lock);
1084 write_command(&pdata->io_ctx, "\r\nEXIT\r\n");
1085 close(pdata->io_ctx.fd);
1086 iio_mutex_unlock(pdata->lock);
1087
1088 for (i = 0; i < ctx->nb_devices; i++) {
1089 struct iio_device *dev = ctx->devices[i];
1090 struct iio_device_pdata *dpdata = dev->pdata;
1091
1092 if (dpdata) {
1093 network_close(dev);
1094 iio_mutex_destroy(dpdata->lock);
1095 free(dpdata);
1096 }
1097 }
1098
1099 iiod_client_destroy(pdata->iiod_client);
1100 iio_mutex_destroy(pdata->lock);
1101 freeaddrinfo(pdata->addrinfo);
1102 free(pdata);
1103 }
1104
network_get_version(const struct iio_context * ctx,unsigned int * major,unsigned int * minor,char git_tag[8])1105 static int network_get_version(const struct iio_context *ctx,
1106 unsigned int *major, unsigned int *minor, char git_tag[8])
1107 {
1108 return iiod_client_get_version(ctx->pdata->iiod_client,
1109 &ctx->pdata->io_ctx, major, minor, git_tag);
1110 }
1111
calculate_remote_timeout(unsigned int timeout)1112 static unsigned int calculate_remote_timeout(unsigned int timeout)
1113 {
1114 /* XXX(pcercuei): We currently hardcode timeout / 2 for the backend used
1115 * by the remote. Is there something better to do here? */
1116 return timeout / 2;
1117 }
1118
network_set_timeout(struct iio_context * ctx,unsigned int timeout)1119 static int network_set_timeout(struct iio_context *ctx, unsigned int timeout)
1120 {
1121 struct iio_context_pdata *pdata = ctx->pdata;
1122 int ret, fd = pdata->io_ctx.fd;
1123
1124 ret = set_socket_timeout(fd, timeout);
1125 if (!ret) {
1126 unsigned int remote_timeout = calculate_remote_timeout(timeout);
1127
1128 ret = iiod_client_set_timeout(pdata->iiod_client,
1129 &pdata->io_ctx, remote_timeout);
1130 if (!ret)
1131 pdata->io_ctx.timeout_ms = timeout;
1132 }
1133 if (ret < 0) {
1134 char buf[1024];
1135 iio_strerror(-ret, buf, sizeof(buf));
1136 IIO_WARNING("Unable to set R/W timeout: %s\n", buf);
1137 }
1138 return ret;
1139 }
1140
network_set_kernel_buffers_count(const struct iio_device * dev,unsigned int nb_blocks)1141 static int network_set_kernel_buffers_count(const struct iio_device *dev,
1142 unsigned int nb_blocks)
1143 {
1144 struct iio_context_pdata *pdata = dev->ctx->pdata;
1145
1146 return iiod_client_set_kernel_buffers_count(pdata->iiod_client,
1147 &pdata->io_ctx, dev, nb_blocks);
1148 }
1149
network_clone(const struct iio_context * ctx)1150 static struct iio_context * network_clone(const struct iio_context *ctx)
1151 {
1152 const char *addr = iio_context_get_attr_value(ctx, "ip,ip-addr");
1153
1154 return iio_create_network_context(addr);
1155 }
1156
1157 static const struct iio_backend_ops network_ops = {
1158 .clone = network_clone,
1159 .open = network_open,
1160 .close = network_close,
1161 .read = network_read,
1162 .write = network_write,
1163 #ifdef WITH_NETWORK_GET_BUFFER
1164 .get_buffer = network_get_buffer,
1165 #endif
1166 .read_device_attr = network_read_dev_attr,
1167 .write_device_attr = network_write_dev_attr,
1168 .read_channel_attr = network_read_chn_attr,
1169 .write_channel_attr = network_write_chn_attr,
1170 .get_trigger = network_get_trigger,
1171 .set_trigger = network_set_trigger,
1172 .shutdown = network_shutdown,
1173 .get_version = network_get_version,
1174 .set_timeout = network_set_timeout,
1175 .set_kernel_buffers_count = network_set_kernel_buffers_count,
1176
1177 .cancel = network_cancel,
1178 };
1179
network_write_data(struct iio_context_pdata * pdata,void * io_data,const char * src,size_t len)1180 static ssize_t network_write_data(struct iio_context_pdata *pdata,
1181 void *io_data, const char *src, size_t len)
1182 {
1183 struct iio_network_io_context *io_ctx = io_data;
1184
1185 return network_send(io_ctx, src, len, 0);
1186 }
1187
network_read_data(struct iio_context_pdata * pdata,void * io_data,char * dst,size_t len)1188 static ssize_t network_read_data(struct iio_context_pdata *pdata,
1189 void *io_data, char *dst, size_t len)
1190 {
1191 struct iio_network_io_context *io_ctx = io_data;
1192
1193 return network_recv(io_ctx, dst, len, 0);
1194 }
1195
network_read_line(struct iio_context_pdata * pdata,void * io_data,char * dst,size_t len)1196 static ssize_t network_read_line(struct iio_context_pdata *pdata,
1197 void *io_data, char *dst, size_t len)
1198 {
1199 bool found = false;
1200 size_t i;
1201 #ifdef __linux__
1202 struct iio_network_io_context *io_ctx = io_data;
1203 ssize_t ret;
1204 size_t bytes_read = 0;
1205
1206 do {
1207 size_t to_trunc;
1208
1209 ret = network_recv(io_ctx, dst, len, MSG_PEEK);
1210 if (ret < 0)
1211 return ret;
1212
1213 /* Lookup for the trailing \n */
1214 for (i = 0; i < (size_t) ret && dst[i] != '\n'; i++);
1215 found = i < (size_t) ret;
1216
1217 len -= ret;
1218 dst += ret;
1219
1220 if (found)
1221 to_trunc = i + 1;
1222 else
1223 to_trunc = (size_t) ret;
1224
1225 /* Advance the read offset to the byte following the \n if
1226 * found, or after the last character read otherwise */
1227 if (pdata->msg_trunc_supported)
1228 ret = network_recv(io_ctx, NULL, to_trunc, MSG_TRUNC);
1229 else
1230 ret = network_recv(io_ctx, dst - ret, to_trunc, 0);
1231 if (ret < 0) {
1232 IIO_ERROR("NETWORK RECV: %zu\n", ret);
1233 return ret;
1234 }
1235
1236 bytes_read += to_trunc;
1237 } while (!found && len);
1238
1239 if (!found) {
1240 IIO_ERROR("EIO: %zu\n", ret);
1241 return -EIO;
1242 } else
1243 return bytes_read;
1244 #else
1245 for (i = 0; i < len - 1; i++) {
1246 ssize_t ret = network_read_data(pdata, io_data, dst + i, 1);
1247
1248 if (ret < 0)
1249 return ret;
1250
1251 if (dst[i] != '\n')
1252 found = true;
1253 else if (found)
1254 break;
1255 }
1256
1257 if (!found || i == len - 1)
1258 return -EIO;
1259
1260 return (ssize_t) i + 1;
1261 #endif
1262 }
1263
1264 static const struct iiod_client_ops network_iiod_client_ops = {
1265 .write = network_write_data,
1266 .read = network_read_data,
1267 .read_line = network_read_line,
1268 };
1269
1270 #ifdef __linux__
1271 /*
1272 * As of build 16299, Windows Subsystem for Linux presents a Linux API but
1273 * without support for MSG_TRUNC. Since WSL allows running native Linux
1274 * applications this is not something that can be detected at compile time. If
1275 * we want to support WSL we have to have a runtime workaround.
1276 */
msg_trunc_supported(struct iio_network_io_context * io_ctx)1277 static bool msg_trunc_supported(struct iio_network_io_context *io_ctx)
1278 {
1279 int ret;
1280
1281 ret = network_recv(io_ctx, NULL, 0, MSG_TRUNC | MSG_DONTWAIT);
1282
1283 return ret != -EFAULT && ret != -EINVAL;
1284 }
1285 #else
msg_trunc_supported(struct iio_network_io_context * io_ctx)1286 static bool msg_trunc_supported(struct iio_network_io_context *io_ctx)
1287 {
1288 return false;
1289 }
1290 #endif
1291
network_create_context(const char * host)1292 struct iio_context * network_create_context(const char *host)
1293 {
1294 struct addrinfo hints, *res;
1295 struct iio_context *ctx;
1296 struct iio_context_pdata *pdata;
1297 size_t i, len, uri_len;
1298 int fd, ret;
1299 char *description, *uri;
1300 #ifdef _WIN32
1301 WSADATA wsaData;
1302
1303 ret = WSAStartup(MAKEWORD(2, 0), &wsaData);
1304 if (ret < 0) {
1305 IIO_ERROR("WSAStartup failed with error %i\n", ret);
1306 errno = -ret;
1307 return NULL;
1308 }
1309 #endif
1310
1311 memset(&hints, 0, sizeof(hints));
1312 hints.ai_family = AF_UNSPEC;
1313 hints.ai_socktype = SOCK_STREAM;
1314
1315 #ifdef HAVE_DNS_SD
1316 if (!host || !host[0]) {
1317 char addr_str[DNS_SD_ADDRESS_STR_MAX];
1318 char port_str[6];
1319 uint16_t port = IIOD_PORT;
1320
1321 ret = dnssd_discover_host(addr_str, sizeof(addr_str), &port);
1322 if (ret < 0) {
1323 char buf[1024];
1324 iio_strerror(-ret, buf, sizeof(buf));
1325 IIO_DEBUG("Unable to find host: %s\n", buf);
1326 errno = -ret;
1327 return NULL;
1328 }
1329 if (!strlen(addr_str)) {
1330 IIO_DEBUG("No DNS Service Discovery hosts on network\n");
1331 errno = ENOENT;
1332 return NULL;
1333 }
1334
1335 iio_snprintf(port_str, sizeof(port_str), "%hu", port);
1336 ret = getaddrinfo(addr_str, port_str, &hints, &res);
1337 } else
1338 #endif
1339 {
1340 ret = getaddrinfo(host, IIOD_PORT_STR, &hints, &res);
1341 }
1342
1343 if (ret) {
1344 IIO_ERROR("Unable to find host: %s\n", gai_strerror(ret));
1345 #ifndef _WIN32
1346 if (ret != EAI_SYSTEM)
1347 errno = -ret;
1348 #endif
1349 return NULL;
1350 }
1351
1352 fd = create_socket(res, DEFAULT_TIMEOUT_MS);
1353 if (fd < 0) {
1354 errno = -fd;
1355 goto err_free_addrinfo;
1356 }
1357
1358 pdata = zalloc(sizeof(*pdata));
1359 if (!pdata) {
1360 errno = ENOMEM;
1361 goto err_close_socket;
1362 }
1363
1364 pdata->io_ctx.fd = fd;
1365 pdata->addrinfo = res;
1366 pdata->io_ctx.timeout_ms = DEFAULT_TIMEOUT_MS;
1367
1368 pdata->lock = iio_mutex_create();
1369 if (!pdata->lock) {
1370 errno = ENOMEM;
1371 goto err_free_pdata;
1372 }
1373
1374 pdata->iiod_client = iiod_client_new(pdata, pdata->lock,
1375 &network_iiod_client_ops);
1376
1377 pdata->msg_trunc_supported = msg_trunc_supported(&pdata->io_ctx);
1378 if (pdata->msg_trunc_supported)
1379 IIO_DEBUG("MSG_TRUNC is supported\n");
1380 else
1381 IIO_DEBUG("MSG_TRUNC is NOT supported\n");
1382
1383 if (!pdata->iiod_client)
1384 goto err_destroy_mutex;
1385
1386 IIO_DEBUG("Creating context...\n");
1387 ctx = iiod_client_create_context(pdata->iiod_client, &pdata->io_ctx);
1388 if (!ctx)
1389 goto err_destroy_iiod_client;
1390
1391 /* Override the name and low-level functions of the XML context
1392 * with those corresponding to the network context */
1393 ctx->name = "network";
1394 ctx->ops = &network_ops;
1395 ctx->pdata = pdata;
1396
1397 #ifdef HAVE_IPV6
1398 len = INET6_ADDRSTRLEN + IF_NAMESIZE + 2;
1399 #else
1400 len = INET_ADDRSTRLEN + 1;
1401 #endif
1402
1403 uri_len = len;
1404 if (host && host[0])
1405 uri_len = strnlen(host, MAXHOSTNAMELEN);
1406 uri_len += sizeof ("ip:");
1407
1408 uri = malloc(uri_len);
1409 if (!uri) {
1410 ret = -ENOMEM;
1411 goto err_network_shutdown;
1412 }
1413
1414 description = malloc(len);
1415 if (!description) {
1416 ret = -ENOMEM;
1417 goto err_free_uri;
1418 }
1419
1420 description[0] = '\0';
1421
1422 #ifdef HAVE_IPV6
1423 if (res->ai_family == AF_INET6) {
1424 struct sockaddr_in6 *in = (struct sockaddr_in6 *) res->ai_addr;
1425 char *ptr;
1426 inet_ntop(AF_INET6, &in->sin6_addr,
1427 description, INET6_ADDRSTRLEN);
1428
1429 if (IN6_IS_ADDR_LINKLOCAL(&in->sin6_addr)) {
1430 ptr = if_indextoname(in->sin6_scope_id, description +
1431 strlen(description) + 1);
1432 if (!ptr) {
1433 ret = -errno;
1434 IIO_ERROR("Unable to lookup interface of IPv6 address\n");
1435 goto err_free_description;
1436 }
1437
1438 *(ptr - 1) = '%';
1439 }
1440 }
1441 #endif
1442 if (res->ai_family == AF_INET) {
1443 struct sockaddr_in *in = (struct sockaddr_in *) res->ai_addr;
1444 #if (!_WIN32 || _WIN32_WINNT >= 0x600)
1445 inet_ntop(AF_INET, &in->sin_addr, description, INET_ADDRSTRLEN);
1446 #else
1447 char *tmp = inet_ntoa(in->sin_addr);
1448 iio_strlcpy(description, tmp, len);
1449 #endif
1450 }
1451
1452 ret = iio_context_add_attr(ctx, "ip,ip-addr", description);
1453 if (ret < 0)
1454 goto err_free_description;
1455
1456 if (host && host[0])
1457 iio_snprintf(uri, uri_len, "ip:%s", host);
1458 else
1459 iio_snprintf(uri, uri_len, "ip:%s\n", description);
1460
1461 ret = iio_context_add_attr(ctx, "uri", uri);
1462 if (ret < 0)
1463 goto err_free_description;
1464
1465 for (i = 0; i < ctx->nb_devices; i++) {
1466 struct iio_device *dev = ctx->devices[i];
1467
1468 dev->pdata = zalloc(sizeof(*dev->pdata));
1469 if (!dev->pdata) {
1470 ret = -ENOMEM;
1471 goto err_free_description;
1472 }
1473
1474 dev->pdata->io_ctx.fd = -1;
1475 dev->pdata->io_ctx.timeout_ms = DEFAULT_TIMEOUT_MS;
1476 #ifdef WITH_NETWORK_GET_BUFFER
1477 dev->pdata->memfd = -1;
1478 #endif
1479
1480 dev->pdata->lock = iio_mutex_create();
1481 if (!dev->pdata->lock) {
1482 ret = -ENOMEM;
1483 goto err_free_description;
1484 }
1485 }
1486
1487 if (ctx->description) {
1488 size_t desc_len = strlen(description);
1489 size_t new_size = desc_len + strlen(ctx->description) + 2;
1490 char *ptr, *new_description = realloc(description, new_size);
1491 if (!new_description) {
1492 ret = -ENOMEM;
1493 goto err_free_description;
1494 }
1495
1496 ptr = strrchr(new_description, '\0');
1497 iio_snprintf(ptr, new_size - desc_len, " %s", ctx->description);
1498 free(ctx->description);
1499
1500 ctx->description = new_description;
1501 } else {
1502 ctx->description = description;
1503 }
1504
1505 free(uri);
1506 iiod_client_set_timeout(pdata->iiod_client, &pdata->io_ctx,
1507 calculate_remote_timeout(DEFAULT_TIMEOUT_MS));
1508 return ctx;
1509
1510 err_free_description:
1511 free(description);
1512 err_free_uri:
1513 free(uri);
1514 err_network_shutdown:
1515 iio_context_destroy(ctx);
1516 errno = -ret;
1517 return NULL;
1518
1519 err_destroy_iiod_client:
1520 iiod_client_destroy(pdata->iiod_client);
1521 err_destroy_mutex:
1522 iio_mutex_destroy(pdata->lock);
1523 err_free_pdata:
1524 free(pdata);
1525 err_close_socket:
1526 close(fd);
1527 err_free_addrinfo:
1528 freeaddrinfo(res);
1529 return NULL;
1530 }
1531