1 /**
2 * @file
3 * Sockets BSD-Like API module
4 */
5
6 /*
7 * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without modification,
11 * are permitted provided that the following conditions are met:
12 *
13 * 1. Redistributions of source code must retain the above copyright notice,
14 * this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
18 * 3. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
24 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
25 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
26 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
29 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
30 * OF SUCH DAMAGE.
31 *
32 * This file is part of the lwIP TCP/IP stack.
33 *
34 * Author: Adam Dunkels <adam@sics.se>
35 *
36 * Improved by Marc Boucher <marc@mbsi.ca> and David Haas <dhaas@alum.rpi.edu>
37 *
38 */
39
40 #include "lwip/opt.h"
41
42 #if LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */
43
44 #include "lwip/sockets.h"
45 #include "lwip/priv/sockets_priv.h"
46 #include "lwip/api.h"
47 #include "lwip/igmp.h"
48 #include "lwip/inet.h"
49 #include "lwip/tcp.h"
50 #include "lwip/raw.h"
51 #include "lwip/udp.h"
52 #include "lwip/memp.h"
53 #include "lwip/pbuf.h"
54 #include "lwip/netif.h"
55 #include "lwip/priv/tcpip_priv.h"
56 #include "lwip/mld6.h"
57 #if LWIP_CHECKSUM_ON_COPY
58 #include "lwip/inet_chksum.h"
59 #endif
60
61 #if LWIP_COMPAT_SOCKETS == 2 && LWIP_POSIX_SOCKETS_IO_NAMES
62 #include <stdarg.h>
63 #endif
64
65 #include <string.h>
66
67 #ifdef LWIP_HOOK_FILENAME
68 #include LWIP_HOOK_FILENAME
69 #endif
70
71 /* If the netconn API is not required publicly, then we include the necessary
72 files here to get the implementation */
73 #if !LWIP_NETCONN
74 #undef LWIP_NETCONN
75 #define LWIP_NETCONN 1
76 #include "api_msg.c"
77 #include "api_lib.c"
78 #include "netbuf.c"
79 #undef LWIP_NETCONN
80 #define LWIP_NETCONN 0
81 #endif
82
83 #define API_SELECT_CB_VAR_REF(name) API_VAR_REF(name)
84 #define API_SELECT_CB_VAR_DECLARE(name) API_VAR_DECLARE(struct lwip_select_cb, name)
85 #define API_SELECT_CB_VAR_ALLOC(name, retblock) API_VAR_ALLOC_EXT(struct lwip_select_cb, MEMP_SELECT_CB, name, retblock)
86 #define API_SELECT_CB_VAR_FREE(name) API_VAR_FREE(MEMP_SELECT_CB, name)
87
88 #ifndef LWIP_SOCKET_HAVE_SA_LEN
89 #define LWIP_SOCKET_HAVE_SA_LEN 0
90 #endif /* LWIP_SOCKET_HAVE_SA_LEN */
91
92 /* Address length safe read and write */
93 #if LWIP_SOCKET_HAVE_SA_LEN
94
95 #if LWIP_IPV4
96 #define IP4ADDR_SOCKADDR_SET_LEN(sin) \
97 (sin)->sin_len = sizeof(struct sockaddr_in)
98 #endif /* LWIP_IPV4 */
99
100 #if LWIP_IPV6
101 #define IP6ADDR_SOCKADDR_SET_LEN(sin6) \
102 (sin6)->sin6_len = sizeof(struct sockaddr_in6)
103 #endif /* LWIP_IPV6 */
104
105 #define IPADDR_SOCKADDR_GET_LEN(addr) \
106 (addr)->sa.sa_len
107
108 #else
109
110 #if LWIP_IPV4
111 #define IP4ADDR_SOCKADDR_SET_LEN(addr)
112 #endif /* LWIP_IPV4 */
113
114 #if LWIP_IPV6
115 #define IP6ADDR_SOCKADDR_SET_LEN(addr)
116 #endif /* LWIP_IPV6 */
117
118 #if LWIP_IPV4 && LWIP_IPV6
119 #define IPADDR_SOCKADDR_GET_LEN(addr) \
120 ((addr)->sa.sa_family == AF_INET ? sizeof(struct sockaddr_in) \
121 : ((addr)->sa.sa_family == AF_INET6 ? sizeof(struct sockaddr_in6) : 0))
122 #elif LWIP_IPV4
123 #define IPADDR_SOCKADDR_GET_LEN(addr) sizeof(struct sockaddr_in)
124 #elif LWIP_IPV6
125 #define IPADDR_SOCKADDR_GET_LEN(addr) sizeof(struct sockaddr_in6)
126 #else
127 #define IPADDR_SOCKADDR_GET_LEN(addr) sizeof(struct sockaddr)
128 #endif /* LWIP_IPV4 && LWIP_IPV6 */
129
130 #endif /* LWIP_SOCKET_HAVE_SA_LEN */
131
132 #if LWIP_IPV4
133 #define IP4ADDR_PORT_TO_SOCKADDR(sin, ipaddr, port) do { \
134 IP4ADDR_SOCKADDR_SET_LEN(sin); \
135 (sin)->sin_family = AF_INET; \
136 (sin)->sin_port = lwip_htons((port)); \
137 inet_addr_from_ip4addr(&(sin)->sin_addr, ipaddr); \
138 memset((sin)->sin_zero, 0, SIN_ZERO_LEN); }while(0)
139 #define SOCKADDR4_TO_IP4ADDR_PORT(sin, ipaddr, port) do { \
140 inet_addr_to_ip4addr(ip_2_ip4(ipaddr), &((sin)->sin_addr)); \
141 (port) = lwip_ntohs((sin)->sin_port); }while(0)
142 #endif /* LWIP_IPV4 */
143
144 #if LWIP_IPV6
145 #define IP6ADDR_PORT_TO_SOCKADDR(sin6, ipaddr, port) do { \
146 IP6ADDR_SOCKADDR_SET_LEN(sin6); \
147 (sin6)->sin6_family = AF_INET6; \
148 (sin6)->sin6_port = lwip_htons((port)); \
149 (sin6)->sin6_flowinfo = 0; \
150 inet6_addr_from_ip6addr(&(sin6)->sin6_addr, ipaddr); \
151 (sin6)->sin6_scope_id = ip6_addr_zone(ipaddr); }while(0)
152 #define SOCKADDR6_TO_IP6ADDR_PORT(sin6, ipaddr, port) do { \
153 inet6_addr_to_ip6addr(ip_2_ip6(ipaddr), &((sin6)->sin6_addr)); \
154 if (ip6_addr_has_scope(ip_2_ip6(ipaddr), IP6_UNKNOWN)) { \
155 ip6_addr_set_zone(ip_2_ip6(ipaddr), (u8_t)((sin6)->sin6_scope_id)); \
156 } \
157 (port) = lwip_ntohs((sin6)->sin6_port); }while(0)
158 #endif /* LWIP_IPV6 */
159
160 #if LWIP_IPV4 && LWIP_IPV6
161 static void sockaddr_to_ipaddr_port(const struct sockaddr *sockaddr, ip_addr_t *ipaddr, u16_t *port);
162
163 #define IS_SOCK_ADDR_LEN_VALID(namelen) (((namelen) == sizeof(struct sockaddr_in)) || \
164 ((namelen) == sizeof(struct sockaddr_in6)))
165 #define IS_SOCK_ADDR_TYPE_VALID(name) (((name)->sa_family == AF_INET) || \
166 ((name)->sa_family == AF_INET6))
167 #define SOCK_ADDR_TYPE_MATCH(name, sock) \
168 ((((name)->sa_family == AF_INET) && !(NETCONNTYPE_ISIPV6((sock)->conn->type))) || \
169 (((name)->sa_family == AF_INET6) && (NETCONNTYPE_ISIPV6((sock)->conn->type))))
170 #define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) do { \
171 if (IP_IS_ANY_TYPE_VAL(*ipaddr) || IP_IS_V6_VAL(*ipaddr)) { \
172 IP6ADDR_PORT_TO_SOCKADDR((struct sockaddr_in6*)(void*)(sockaddr), ip_2_ip6(ipaddr), port); \
173 } else { \
174 IP4ADDR_PORT_TO_SOCKADDR((struct sockaddr_in*)(void*)(sockaddr), ip_2_ip4(ipaddr), port); \
175 } } while(0)
176 #define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) sockaddr_to_ipaddr_port(sockaddr, ipaddr, &(port))
177 #define DOMAIN_TO_NETCONN_TYPE(domain, type) (((domain) == AF_INET) ? \
178 (type) : (enum netconn_type)((type) | NETCONN_TYPE_IPV6))
179 #elif LWIP_IPV6 /* LWIP_IPV4 && LWIP_IPV6 */
180 #define IS_SOCK_ADDR_LEN_VALID(namelen) ((namelen) == sizeof(struct sockaddr_in6))
181 #define IS_SOCK_ADDR_TYPE_VALID(name) ((name)->sa_family == AF_INET6)
182 #define SOCK_ADDR_TYPE_MATCH(name, sock) 1
183 #define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) \
184 IP6ADDR_PORT_TO_SOCKADDR((struct sockaddr_in6*)(void*)(sockaddr), ip_2_ip6(ipaddr), port)
185 #define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) \
186 SOCKADDR6_TO_IP6ADDR_PORT((const struct sockaddr_in6*)(const void*)(sockaddr), ipaddr, port)
187 #define DOMAIN_TO_NETCONN_TYPE(domain, netconn_type) (netconn_type)
188 #else /*-> LWIP_IPV4: LWIP_IPV4 && LWIP_IPV6 */
189 #define IS_SOCK_ADDR_LEN_VALID(namelen) ((namelen) == sizeof(struct sockaddr_in))
190 #define IS_SOCK_ADDR_TYPE_VALID(name) ((name)->sa_family == AF_INET)
191 #define SOCK_ADDR_TYPE_MATCH(name, sock) 1
192 #define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) \
193 IP4ADDR_PORT_TO_SOCKADDR((struct sockaddr_in*)(void*)(sockaddr), ip_2_ip4(ipaddr), port)
194 #define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) \
195 SOCKADDR4_TO_IP4ADDR_PORT((const struct sockaddr_in*)(const void*)(sockaddr), ipaddr, port)
196 #define DOMAIN_TO_NETCONN_TYPE(domain, netconn_type) (netconn_type)
197 #endif /* LWIP_IPV6 */
198
199 #define IS_SOCK_ADDR_TYPE_VALID_OR_UNSPEC(name) (((name)->sa_family == AF_UNSPEC) || \
200 IS_SOCK_ADDR_TYPE_VALID(name))
201 #define SOCK_ADDR_TYPE_MATCH_OR_UNSPEC(name, sock) (((name)->sa_family == AF_UNSPEC) || \
202 SOCK_ADDR_TYPE_MATCH(name, sock))
203 #define IS_SOCK_ADDR_ALIGNED(name) ((((mem_ptr_t)(name)) % LWIP_MIN(4, MEM_ALIGNMENT)) == 0)
204
205
206 #define LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype) do { if ((optlen) < sizeof(opttype)) { done_socket(sock); return EINVAL; }}while(0)
207 #define LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, opttype) do { \
208 LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype); \
209 if ((sock)->conn == NULL) { done_socket(sock); return EINVAL; } }while(0)
210 #define LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, opttype) do { \
211 LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype); \
212 if (((sock)->conn == NULL) || ((sock)->conn->pcb.tcp == NULL)) { done_socket(sock); return EINVAL; } }while(0)
213 #define LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, opttype, netconntype) do { \
214 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, opttype); \
215 if (NETCONNTYPE_GROUP(netconn_type((sock)->conn)) != netconntype) { done_socket(sock); return ENOPROTOOPT; } }while(0)
216
217
218 #define LWIP_SETGETSOCKOPT_DATA_VAR_REF(name) API_VAR_REF(name)
219 #define LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(name) API_VAR_DECLARE(struct lwip_setgetsockopt_data, name)
220 #define LWIP_SETGETSOCKOPT_DATA_VAR_FREE(name) API_VAR_FREE(MEMP_SOCKET_SETGETSOCKOPT_DATA, name)
221 #if LWIP_MPU_COMPATIBLE
222 #define LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(name, sock) do { \
223 name = (struct lwip_setgetsockopt_data *)memp_malloc(MEMP_SOCKET_SETGETSOCKOPT_DATA); \
224 if (name == NULL) { \
225 set_errno(ENOMEM); \
226 done_socket(sock); \
227 return -1; \
228 } }while(0)
229 #else /* LWIP_MPU_COMPATIBLE */
230 #define LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(name, sock)
231 #endif /* LWIP_MPU_COMPATIBLE */
232
233 #if LWIP_SO_SNDRCVTIMEO_NONSTANDARD
234 #define LWIP_SO_SNDRCVTIMEO_OPTTYPE int
235 #define LWIP_SO_SNDRCVTIMEO_SET(optval, val) (*(int *)(optval) = (val))
236 #define LWIP_SO_SNDRCVTIMEO_GET_MS(optval) ((long)*(const int*)(optval))
237 #else
238 #define LWIP_SO_SNDRCVTIMEO_OPTTYPE struct timeval
239 #define LWIP_SO_SNDRCVTIMEO_SET(optval, val) do { \
240 u32_t loc = (val); \
241 ((struct timeval *)(optval))->tv_sec = (long)((loc) / 1000U); \
242 ((struct timeval *)(optval))->tv_usec = (long)(((loc) % 1000U) * 1000U); }while(0)
243 #define LWIP_SO_SNDRCVTIMEO_GET_MS(optval) ((((const struct timeval *)(optval))->tv_sec * 1000) + (((const struct timeval *)(optval))->tv_usec / 1000))
244 #endif
245
246
247 /** A struct sockaddr replacement that has the same alignment as sockaddr_in/
248 * sockaddr_in6 if instantiated.
249 */
250 union sockaddr_aligned {
251 struct sockaddr sa;
252 #if LWIP_IPV6
253 struct sockaddr_in6 sin6;
254 #endif /* LWIP_IPV6 */
255 #if LWIP_IPV4
256 struct sockaddr_in sin;
257 #endif /* LWIP_IPV4 */
258 };
259
260 /* Define the number of IPv4 multicast memberships, default is one per socket */
261 #ifndef LWIP_SOCKET_MAX_MEMBERSHIPS
262 #define LWIP_SOCKET_MAX_MEMBERSHIPS NUM_SOCKETS
263 #endif
264
265 #if LWIP_IGMP
266 /* This is to keep track of IP_ADD_MEMBERSHIP calls to drop the membership when
267 a socket is closed */
268 struct lwip_socket_multicast_pair {
269 /** the socket */
270 struct lwip_sock *sock;
271 /** the interface address */
272 ip4_addr_t if_addr;
273 /** the group address */
274 ip4_addr_t multi_addr;
275 };
276
277 static struct lwip_socket_multicast_pair socket_ipv4_multicast_memberships[LWIP_SOCKET_MAX_MEMBERSHIPS];
278
279 static int lwip_socket_register_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr);
280 static void lwip_socket_unregister_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr);
281 static void lwip_socket_drop_registered_memberships(int s);
282 #endif /* LWIP_IGMP */
283
284 #if LWIP_IPV6_MLD
285 /* This is to keep track of IP_JOIN_GROUP calls to drop the membership when
286 a socket is closed */
287 struct lwip_socket_multicast_mld6_pair {
288 /** the socket */
289 struct lwip_sock *sock;
290 /** the interface index */
291 u8_t if_idx;
292 /** the group address */
293 ip6_addr_t multi_addr;
294 };
295
296 static struct lwip_socket_multicast_mld6_pair socket_ipv6_multicast_memberships[LWIP_SOCKET_MAX_MEMBERSHIPS];
297
298 static int lwip_socket_register_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr);
299 static void lwip_socket_unregister_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr);
300 static void lwip_socket_drop_registered_mld6_memberships(int s);
301 #endif /* LWIP_IPV6_MLD */
302
303 /** The global array of available sockets */
304 static struct lwip_sock sockets[NUM_SOCKETS];
305
306 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
307 #if LWIP_TCPIP_CORE_LOCKING
308 /* protect the select_cb_list using core lock */
309 #define LWIP_SOCKET_SELECT_DECL_PROTECT(lev)
310 #define LWIP_SOCKET_SELECT_PROTECT(lev) LOCK_TCPIP_CORE()
311 #define LWIP_SOCKET_SELECT_UNPROTECT(lev) UNLOCK_TCPIP_CORE()
312 #else /* LWIP_TCPIP_CORE_LOCKING */
313 /* protect the select_cb_list using SYS_LIGHTWEIGHT_PROT */
314 #define LWIP_SOCKET_SELECT_DECL_PROTECT(lev) SYS_ARCH_DECL_PROTECT(lev)
315 #define LWIP_SOCKET_SELECT_PROTECT(lev) SYS_ARCH_PROTECT(lev)
316 #define LWIP_SOCKET_SELECT_UNPROTECT(lev) SYS_ARCH_UNPROTECT(lev)
317 /** This counter is increased from lwip_select when the list is changed
318 and checked in select_check_waiters to see if it has changed. */
319 static volatile int select_cb_ctr;
320 #endif /* LWIP_TCPIP_CORE_LOCKING */
321 /** The global list of tasks waiting for select */
322 static struct lwip_select_cb *select_cb_list;
323 #endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
324
325 /* Forward declaration of some functions */
326 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
327 static void event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len);
328 #define DEFAULT_SOCKET_EVENTCB event_callback
329 static void select_check_waiters(int s, int has_recvevent, int has_sendevent, int has_errevent);
330 #else
331 #define DEFAULT_SOCKET_EVENTCB NULL
332 #endif
333 #if !LWIP_TCPIP_CORE_LOCKING
334 static void lwip_getsockopt_callback(void *arg);
335 static void lwip_setsockopt_callback(void *arg);
336 #endif
337 static int lwip_getsockopt_impl(int s, int level, int optname, void *optval, socklen_t *optlen);
338 static int lwip_setsockopt_impl(int s, int level, int optname, const void *optval, socklen_t optlen);
339 static int free_socket_locked(struct lwip_sock *sock, int is_tcp, struct netconn **conn,
340 union lwip_sock_lastdata *lastdata);
341 static void free_socket_free_elements(int is_tcp, struct netconn *conn, union lwip_sock_lastdata *lastdata);
342
343 #if LWIP_IPV4 && LWIP_IPV6
344 static void
sockaddr_to_ipaddr_port(const struct sockaddr * sockaddr,ip_addr_t * ipaddr,u16_t * port)345 sockaddr_to_ipaddr_port(const struct sockaddr *sockaddr, ip_addr_t *ipaddr, u16_t *port)
346 {
347 if ((sockaddr->sa_family) == AF_INET6) {
348 SOCKADDR6_TO_IP6ADDR_PORT((const struct sockaddr_in6 *)(const void *)(sockaddr), ipaddr, *port);
349 ipaddr->type = IPADDR_TYPE_V6;
350 } else {
351 SOCKADDR4_TO_IP4ADDR_PORT((const struct sockaddr_in *)(const void *)(sockaddr), ipaddr, *port);
352 ipaddr->type = IPADDR_TYPE_V4;
353 }
354 }
355 #endif /* LWIP_IPV4 && LWIP_IPV6 */
356
357 /** LWIP_NETCONN_SEM_PER_THREAD==1: initialize thread-local semaphore */
358 void
lwip_socket_thread_init(void)359 lwip_socket_thread_init(void)
360 {
361 netconn_thread_init();
362 }
363
364 /** LWIP_NETCONN_SEM_PER_THREAD==1: destroy thread-local semaphore */
365 void
lwip_socket_thread_cleanup(void)366 lwip_socket_thread_cleanup(void)
367 {
368 netconn_thread_cleanup();
369 }
370
371 #if LWIP_NETCONN_FULLDUPLEX
372 /* Thread-safe increment of sock->fd_used, with overflow check */
373 static int
sock_inc_used(struct lwip_sock * sock)374 sock_inc_used(struct lwip_sock *sock)
375 {
376 int ret;
377 SYS_ARCH_DECL_PROTECT(lev);
378
379 LWIP_ASSERT("sock != NULL", sock != NULL);
380
381 SYS_ARCH_PROTECT(lev);
382 if (sock->fd_free_pending) {
383 /* prevent new usage of this socket if free is pending */
384 ret = 0;
385 } else {
386 ++sock->fd_used;
387 ret = 1;
388 LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0);
389 }
390 SYS_ARCH_UNPROTECT(lev);
391 return ret;
392 }
393
394 /* Like sock_inc_used(), but called under SYS_ARCH_PROTECT lock. */
395 static int
sock_inc_used_locked(struct lwip_sock * sock)396 sock_inc_used_locked(struct lwip_sock *sock)
397 {
398 LWIP_ASSERT("sock != NULL", sock != NULL);
399
400 if (sock->fd_free_pending) {
401 LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0);
402 return 0;
403 }
404
405 ++sock->fd_used;
406 LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0);
407 return 1;
408 }
409
410 /* In full-duplex mode,sock->fd_used != 0 prevents a socket descriptor from being
411 * released (and possibly reused) when used from more than one thread
412 * (e.g. read-while-write or close-while-write, etc)
413 * This function is called at the end of functions using (try)get_socket*().
414 */
415 static void
done_socket(struct lwip_sock * sock)416 done_socket(struct lwip_sock *sock)
417 {
418 int freed = 0;
419 int is_tcp = 0;
420 struct netconn *conn = NULL;
421 union lwip_sock_lastdata lastdata;
422 SYS_ARCH_DECL_PROTECT(lev);
423 LWIP_ASSERT("sock != NULL", sock != NULL);
424
425 SYS_ARCH_PROTECT(lev);
426 LWIP_ASSERT("sock->fd_used > 0", sock->fd_used > 0);
427 if (--sock->fd_used == 0) {
428 if (sock->fd_free_pending) {
429 /* free the socket */
430 sock->fd_used = 1;
431 is_tcp = sock->fd_free_pending & LWIP_SOCK_FD_FREE_TCP;
432 freed = free_socket_locked(sock, is_tcp, &conn, &lastdata);
433 }
434 }
435 SYS_ARCH_UNPROTECT(lev);
436
437 if (freed) {
438 free_socket_free_elements(is_tcp, conn, &lastdata);
439 }
440 }
441
442 #else /* LWIP_NETCONN_FULLDUPLEX */
443 #define sock_inc_used(sock) 1
444 #define sock_inc_used_locked(sock) 1
445 #define done_socket(sock)
446 #endif /* LWIP_NETCONN_FULLDUPLEX */
447
448 /* Translate a socket 'int' into a pointer (only fails if the index is invalid) */
449 static struct lwip_sock *
tryget_socket_unconn_nouse(int fd)450 tryget_socket_unconn_nouse(int fd)
451 {
452 int s = fd - LWIP_SOCKET_OFFSET;
453 if ((s < 0) || (s >= NUM_SOCKETS)) {
454 LWIP_DEBUGF(SOCKETS_DEBUG, ("tryget_socket_unconn(%d): invalid\n", fd));
455 return NULL;
456 }
457 return &sockets[s];
458 }
459
460 struct lwip_sock *
lwip_socket_dbg_get_socket(int fd)461 lwip_socket_dbg_get_socket(int fd)
462 {
463 return tryget_socket_unconn_nouse(fd);
464 }
465
466 /* Translate a socket 'int' into a pointer (only fails if the index is invalid) */
467 static struct lwip_sock *
tryget_socket_unconn(int fd)468 tryget_socket_unconn(int fd)
469 {
470 struct lwip_sock *ret = tryget_socket_unconn_nouse(fd);
471 if (ret != NULL) {
472 if (!sock_inc_used(ret)) {
473 return NULL;
474 }
475 }
476 return ret;
477 }
478
479 /* Like tryget_socket_unconn(), but called under SYS_ARCH_PROTECT lock. */
480 static struct lwip_sock *
tryget_socket_unconn_locked(int fd)481 tryget_socket_unconn_locked(int fd)
482 {
483 struct lwip_sock *ret = tryget_socket_unconn_nouse(fd);
484 if (ret != NULL) {
485 if (!sock_inc_used_locked(ret)) {
486 return NULL;
487 }
488 }
489 return ret;
490 }
491
492 /**
493 * Same as get_socket but doesn't set errno
494 *
495 * @param fd externally used socket index
496 * @return struct lwip_sock for the socket or NULL if not found
497 */
498 static struct lwip_sock *
tryget_socket(int fd)499 tryget_socket(int fd)
500 {
501 struct lwip_sock *sock = tryget_socket_unconn(fd);
502 if (sock != NULL) {
503 if (sock->conn) {
504 return sock;
505 }
506 done_socket(sock);
507 }
508 return NULL;
509 }
510
511 /**
512 * Map a externally used socket index to the internal socket representation.
513 *
514 * @param fd externally used socket index
515 * @return struct lwip_sock for the socket or NULL if not found
516 */
517 static struct lwip_sock *
get_socket(int fd)518 get_socket(int fd)
519 {
520 struct lwip_sock *sock = tryget_socket(fd);
521 if (!sock) {
522 if ((fd < LWIP_SOCKET_OFFSET) || (fd >= (LWIP_SOCKET_OFFSET + NUM_SOCKETS))) {
523 LWIP_DEBUGF(SOCKETS_DEBUG, ("get_socket(%d): invalid\n", fd));
524 }
525 set_errno(EBADF);
526 return NULL;
527 }
528 return sock;
529 }
530
531 /**
532 * Allocate a new socket for a given netconn.
533 *
534 * @param newconn the netconn for which to allocate a socket
535 * @param accepted 1 if socket has been created by accept(),
536 * 0 if socket has been created by socket()
537 * @return the index of the new socket; -1 on error
538 */
539 static int
alloc_socket(struct netconn * newconn,int accepted)540 alloc_socket(struct netconn *newconn, int accepted)
541 {
542 int i;
543 SYS_ARCH_DECL_PROTECT(lev);
544 LWIP_UNUSED_ARG(accepted);
545
546 /* allocate a new socket identifier */
547 for (i = 0; i < NUM_SOCKETS; ++i) {
548 /* Protect socket array */
549 SYS_ARCH_PROTECT(lev);
550 if (!sockets[i].conn) {
551 #if LWIP_NETCONN_FULLDUPLEX
552 if (sockets[i].fd_used) {
553 SYS_ARCH_UNPROTECT(lev);
554 continue;
555 }
556 sockets[i].fd_used = 1;
557 sockets[i].fd_free_pending = 0;
558 #endif
559 sockets[i].conn = newconn;
560 /* The socket is not yet known to anyone, so no need to protect
561 after having marked it as used. */
562 SYS_ARCH_UNPROTECT(lev);
563 sockets[i].lastdata.pbuf = NULL;
564 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
565 LWIP_ASSERT("sockets[i].select_waiting == 0", sockets[i].select_waiting == 0);
566 sockets[i].rcvevent = 0;
567 /* TCP sendbuf is empty, but the socket is not yet writable until connected
568 * (unless it has been created by accept()). */
569 sockets[i].sendevent = (NETCONNTYPE_GROUP(newconn->type) == NETCONN_TCP ? (accepted != 0) : 1);
570 sockets[i].errevent = 0;
571 #endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
572 return i + LWIP_SOCKET_OFFSET;
573 }
574 SYS_ARCH_UNPROTECT(lev);
575 }
576 return -1;
577 }
578
579 /** Free a socket (under lock)
580 *
581 * @param sock the socket to free
582 * @param is_tcp != 0 for TCP sockets, used to free lastdata
583 * @param conn the socekt's netconn is stored here, must be freed externally
584 * @param lastdata lastdata is stored here, must be freed externally
585 */
586 static int
free_socket_locked(struct lwip_sock * sock,int is_tcp,struct netconn ** conn,union lwip_sock_lastdata * lastdata)587 free_socket_locked(struct lwip_sock *sock, int is_tcp, struct netconn **conn,
588 union lwip_sock_lastdata *lastdata)
589 {
590 #if LWIP_NETCONN_FULLDUPLEX
591 LWIP_ASSERT("sock->fd_used > 0", sock->fd_used > 0);
592 sock->fd_used--;
593 if (sock->fd_used > 0) {
594 sock->fd_free_pending = LWIP_SOCK_FD_FREE_FREE | (is_tcp ? LWIP_SOCK_FD_FREE_TCP : 0);
595 return 0;
596 }
597 #else /* LWIP_NETCONN_FULLDUPLEX */
598 LWIP_UNUSED_ARG(is_tcp);
599 #endif /* LWIP_NETCONN_FULLDUPLEX */
600
601 *lastdata = sock->lastdata;
602 sock->lastdata.pbuf = NULL;
603 *conn = sock->conn;
604 sock->conn = NULL;
605 return 1;
606 }
607
608 /** Free a socket's leftover members.
609 */
610 static void
free_socket_free_elements(int is_tcp,struct netconn * conn,union lwip_sock_lastdata * lastdata)611 free_socket_free_elements(int is_tcp, struct netconn *conn, union lwip_sock_lastdata *lastdata)
612 {
613 if (lastdata->pbuf != NULL) {
614 if (is_tcp) {
615 pbuf_free(lastdata->pbuf);
616 } else {
617 netbuf_delete(lastdata->netbuf);
618 }
619 }
620 if (conn != NULL) {
621 /* netconn_prepare_delete() has already been called, here we only free the conn */
622 netconn_delete(conn);
623 }
624 }
625
626 /** Free a socket. The socket's netconn must have been
627 * delete before!
628 *
629 * @param sock the socket to free
630 * @param is_tcp != 0 for TCP sockets, used to free lastdata
631 */
632 static void
free_socket(struct lwip_sock * sock,int is_tcp)633 free_socket(struct lwip_sock *sock, int is_tcp)
634 {
635 int freed;
636 struct netconn *conn;
637 union lwip_sock_lastdata lastdata;
638 SYS_ARCH_DECL_PROTECT(lev);
639
640 /* Protect socket array */
641 SYS_ARCH_PROTECT(lev);
642
643 freed = free_socket_locked(sock, is_tcp, &conn, &lastdata);
644 SYS_ARCH_UNPROTECT(lev);
645 /* don't use 'sock' after this line, as another task might have allocated it */
646
647 if (freed) {
648 free_socket_free_elements(is_tcp, conn, &lastdata);
649 }
650 }
651
652 /* Below this, the well-known socket functions are implemented.
653 * Use google.com or opengroup.org to get a good description :-)
654 *
655 * Exceptions are documented!
656 */
657
658 int
lwip_accept(int s,struct sockaddr * addr,socklen_t * addrlen)659 lwip_accept(int s, struct sockaddr *addr, socklen_t *addrlen)
660 {
661 struct lwip_sock *sock, *nsock;
662 struct netconn *newconn;
663 ip_addr_t naddr;
664 u16_t port = 0;
665 int newsock;
666 err_t err;
667 int recvevent;
668 SYS_ARCH_DECL_PROTECT(lev);
669
670 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d)...\n", s));
671 sock = get_socket(s);
672 if (!sock) {
673 return -1;
674 }
675
676 /* wait for a new connection */
677 err = netconn_accept(sock->conn, &newconn);
678 if (err != ERR_OK) {
679 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): netconn_acept failed, err=%d\n", s, err));
680 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
681 set_errno(EOPNOTSUPP);
682 } else if (err == ERR_CLSD) {
683 set_errno(EINVAL);
684 } else {
685 set_errno(err_to_errno(err));
686 }
687 done_socket(sock);
688 return -1;
689 }
690 LWIP_ASSERT("newconn != NULL", newconn != NULL);
691
692 newsock = alloc_socket(newconn, 1);
693 if (newsock == -1) {
694 netconn_delete(newconn);
695 set_errno(ENFILE);
696 done_socket(sock);
697 return -1;
698 }
699 LWIP_ASSERT("invalid socket index", (newsock >= LWIP_SOCKET_OFFSET) && (newsock < NUM_SOCKETS + LWIP_SOCKET_OFFSET));
700 nsock = &sockets[newsock - LWIP_SOCKET_OFFSET];
701
702 /* See event_callback: If data comes in right away after an accept, even
703 * though the server task might not have created a new socket yet.
704 * In that case, newconn->socket is counted down (newconn->socket--),
705 * so nsock->rcvevent is >= 1 here!
706 */
707 SYS_ARCH_PROTECT(lev);
708 recvevent = (s16_t)(-1 - newconn->callback_arg.socket);
709 newconn->callback_arg.socket = newsock;
710 SYS_ARCH_UNPROTECT(lev);
711
712 if (newconn->callback) {
713 LOCK_TCPIP_CORE();
714 while (recvevent > 0) {
715 recvevent--;
716 newconn->callback(newconn, NETCONN_EVT_RCVPLUS, 0);
717 }
718 UNLOCK_TCPIP_CORE();
719 }
720
721 /* Note that POSIX only requires us to check addr is non-NULL. addrlen must
722 * not be NULL if addr is valid.
723 */
724 if ((addr != NULL) && (addrlen != NULL)) {
725 union sockaddr_aligned tempaddr;
726 /* get the IP address and port of the remote host */
727 err = netconn_peer(newconn, &naddr, &port);
728 if (err != ERR_OK) {
729 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): netconn_peer failed, err=%d\n", s, err));
730 free_socket(nsock, 1);
731 set_errno(err_to_errno(err));
732 done_socket(sock);
733 return -1;
734 }
735
736 IPADDR_PORT_TO_SOCKADDR(&tempaddr, &naddr, port);
737 if (*addrlen > IPADDR_SOCKADDR_GET_LEN(&tempaddr)) {
738 *addrlen = IPADDR_SOCKADDR_GET_LEN(&tempaddr);
739 }
740 MEMCPY(addr, &tempaddr, *addrlen);
741
742 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d) returning new sock=%d addr=", s, newsock));
743 ip_addr_debug_print_val(SOCKETS_DEBUG, naddr);
744 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F"\n", port));
745 } else {
746 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d) returning new sock=%d\n", s, newsock));
747 }
748
749 set_errno(0);
750 done_socket(sock);
751 done_socket(nsock);
752 return newsock;
753 }
754
755 int
lwip_bind(int s,const struct sockaddr * name,socklen_t namelen)756 lwip_bind(int s, const struct sockaddr *name, socklen_t namelen)
757 {
758 struct lwip_sock *sock;
759 ip_addr_t local_addr;
760 u16_t local_port;
761 err_t err;
762
763 sock = get_socket(s);
764 if (!sock) {
765 return -1;
766 }
767
768 if (!SOCK_ADDR_TYPE_MATCH(name, sock)) {
769 /* sockaddr does not match socket type (IPv4/IPv6) */
770 set_errno(err_to_errno(ERR_VAL));
771 done_socket(sock);
772 return -1;
773 }
774
775 /* check size, family and alignment of 'name' */
776 LWIP_ERROR("lwip_bind: invalid address", (IS_SOCK_ADDR_LEN_VALID(namelen) &&
777 IS_SOCK_ADDR_TYPE_VALID(name) && IS_SOCK_ADDR_ALIGNED(name)),
778 set_errno(err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
779 LWIP_UNUSED_ARG(namelen);
780
781 SOCKADDR_TO_IPADDR_PORT(name, &local_addr, local_port);
782 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d, addr=", s));
783 ip_addr_debug_print_val(SOCKETS_DEBUG, local_addr);
784 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", local_port));
785
786 #if LWIP_IPV4 && LWIP_IPV6
787 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
788 if (IP_IS_V6_VAL(local_addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&local_addr))) {
789 unmap_ipv4_mapped_ipv6(ip_2_ip4(&local_addr), ip_2_ip6(&local_addr));
790 IP_SET_TYPE_VAL(local_addr, IPADDR_TYPE_V4);
791 }
792 #endif /* LWIP_IPV4 && LWIP_IPV6 */
793
794 err = netconn_bind(sock->conn, &local_addr, local_port);
795
796 if (err != ERR_OK) {
797 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d) failed, err=%d\n", s, err));
798 set_errno(err_to_errno(err));
799 done_socket(sock);
800 return -1;
801 }
802
803 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d) succeeded\n", s));
804 set_errno(0);
805 done_socket(sock);
806 return 0;
807 }
808
809 int
lwip_close(int s)810 lwip_close(int s)
811 {
812 struct lwip_sock *sock;
813 int is_tcp = 0;
814 err_t err;
815
816 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_close(%d)\n", s));
817
818 sock = get_socket(s);
819 if (!sock) {
820 return -1;
821 }
822
823 if (sock->conn != NULL) {
824 is_tcp = NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP;
825 } else {
826 LWIP_ASSERT("sock->lastdata == NULL", sock->lastdata.pbuf == NULL);
827 }
828
829 #if LWIP_IGMP
830 /* drop all possibly joined IGMP memberships */
831 lwip_socket_drop_registered_memberships(s);
832 #endif /* LWIP_IGMP */
833 #if LWIP_IPV6_MLD
834 /* drop all possibly joined MLD6 memberships */
835 lwip_socket_drop_registered_mld6_memberships(s);
836 #endif /* LWIP_IPV6_MLD */
837
838 err = netconn_prepare_delete(sock->conn);
839 if (err != ERR_OK) {
840 set_errno(err_to_errno(err));
841 done_socket(sock);
842 return -1;
843 }
844
845 free_socket(sock, is_tcp);
846 set_errno(0);
847 return 0;
848 }
849
850 int
lwip_connect(int s,const struct sockaddr * name,socklen_t namelen)851 lwip_connect(int s, const struct sockaddr *name, socklen_t namelen)
852 {
853 struct lwip_sock *sock;
854 err_t err;
855
856 sock = get_socket(s);
857 if (!sock) {
858 return -1;
859 }
860
861 if (!SOCK_ADDR_TYPE_MATCH_OR_UNSPEC(name, sock)) {
862 /* sockaddr does not match socket type (IPv4/IPv6) */
863 set_errno(err_to_errno(ERR_VAL));
864 done_socket(sock);
865 return -1;
866 }
867
868 LWIP_UNUSED_ARG(namelen);
869 if (name->sa_family == AF_UNSPEC) {
870 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d, AF_UNSPEC)\n", s));
871 err = netconn_disconnect(sock->conn);
872 } else {
873 ip_addr_t remote_addr;
874 u16_t remote_port;
875
876 /* check size, family and alignment of 'name' */
877 LWIP_ERROR("lwip_connect: invalid address", IS_SOCK_ADDR_LEN_VALID(namelen) &&
878 IS_SOCK_ADDR_TYPE_VALID_OR_UNSPEC(name) && IS_SOCK_ADDR_ALIGNED(name),
879 set_errno(err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
880
881 SOCKADDR_TO_IPADDR_PORT(name, &remote_addr, remote_port);
882 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d, addr=", s));
883 ip_addr_debug_print_val(SOCKETS_DEBUG, remote_addr);
884 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", remote_port));
885
886 #if LWIP_IPV4 && LWIP_IPV6
887 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
888 if (IP_IS_V6_VAL(remote_addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&remote_addr))) {
889 unmap_ipv4_mapped_ipv6(ip_2_ip4(&remote_addr), ip_2_ip6(&remote_addr));
890 IP_SET_TYPE_VAL(remote_addr, IPADDR_TYPE_V4);
891 }
892 #endif /* LWIP_IPV4 && LWIP_IPV6 */
893
894 err = netconn_connect(sock->conn, &remote_addr, remote_port);
895 }
896
897 if (err != ERR_OK) {
898 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d) failed, err=%d\n", s, err));
899 set_errno(err_to_errno(err));
900 done_socket(sock);
901 return -1;
902 }
903
904 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d) succeeded\n", s));
905 set_errno(0);
906 done_socket(sock);
907 return 0;
908 }
909
910 /**
911 * Set a socket into listen mode.
912 * The socket may not have been used for another connection previously.
913 *
914 * @param s the socket to set to listening mode
915 * @param backlog (ATTENTION: needs TCP_LISTEN_BACKLOG=1)
916 * @return 0 on success, non-zero on failure
917 */
918 int
lwip_listen(int s,int backlog)919 lwip_listen(int s, int backlog)
920 {
921 struct lwip_sock *sock;
922 err_t err;
923
924 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_listen(%d, backlog=%d)\n", s, backlog));
925
926 sock = get_socket(s);
927 if (!sock) {
928 return -1;
929 }
930
931 /* limit the "backlog" parameter to fit in an u8_t */
932 backlog = LWIP_MIN(LWIP_MAX(backlog, 0), 0xff);
933
934 err = netconn_listen_with_backlog(sock->conn, (u8_t)backlog);
935
936 if (err != ERR_OK) {
937 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_listen(%d) failed, err=%d\n", s, err));
938 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
939 set_errno(EOPNOTSUPP);
940 } else {
941 set_errno(err_to_errno(err));
942 }
943 done_socket(sock);
944 return -1;
945 }
946
947 set_errno(0);
948 done_socket(sock);
949 return 0;
950 }
951
952 #if LWIP_TCP
953 /* Helper function to loop over receiving pbufs from netconn
954 * until "len" bytes are received or we're otherwise done.
955 * Keeps sock->lastdata for peeking or partly copying.
956 */
957 static ssize_t
lwip_recv_tcp(struct lwip_sock * sock,void * mem,size_t len,int flags)958 lwip_recv_tcp(struct lwip_sock *sock, void *mem, size_t len, int flags)
959 {
960 u8_t apiflags = NETCONN_NOAUTORCVD;
961 ssize_t recvd = 0;
962 ssize_t recv_left = (len <= SSIZE_MAX) ? (ssize_t)len : SSIZE_MAX;
963
964 LWIP_ASSERT("no socket given", sock != NULL);
965 LWIP_ASSERT("this should be checked internally", NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP);
966
967 if (flags & MSG_DONTWAIT) {
968 apiflags |= NETCONN_DONTBLOCK;
969 }
970
971 do {
972 struct pbuf *p;
973 err_t err;
974 u16_t copylen;
975
976 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: top while sock->lastdata=%p\n", (void *)sock->lastdata.pbuf));
977 /* Check if there is data left from the last recv operation. */
978 if (sock->lastdata.pbuf) {
979 p = sock->lastdata.pbuf;
980 } else {
981 /* No data was left from the previous operation, so we try to get
982 some from the network. */
983 err = netconn_recv_tcp_pbuf_flags(sock->conn, &p, apiflags);
984 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: netconn_recv err=%d, pbuf=%p\n",
985 err, (void *)p));
986
987 if (err != ERR_OK) {
988 if (recvd > 0) {
989 /* already received data, return that (this trusts in getting the same error from
990 netconn layer again next time netconn_recv is called) */
991 goto lwip_recv_tcp_done;
992 }
993 /* We should really do some error checking here. */
994 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: p == NULL, error is \"%s\"!\n",
995 lwip_strerr(err)));
996 set_errno(err_to_errno(err));
997 if (err == ERR_CLSD) {
998 return 0;
999 } else {
1000 return -1;
1001 }
1002 }
1003 LWIP_ASSERT("p != NULL", p != NULL);
1004 sock->lastdata.pbuf = p;
1005 }
1006
1007 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: buflen=%"U16_F" recv_left=%d off=%d\n",
1008 p->tot_len, (int)recv_left, (int)recvd));
1009
1010 if (recv_left > p->tot_len) {
1011 copylen = p->tot_len;
1012 } else {
1013 copylen = (u16_t)recv_left;
1014 }
1015 if (recvd > SSIZE_MAX - copylen) {
1016 /* overflow */
1017 copylen = (u16_t)(SSIZE_MAX - recvd);
1018 }
1019
1020 /* copy the contents of the received buffer into
1021 the supplied memory pointer mem */
1022 pbuf_copy_partial(p, (u8_t *)mem + recvd, copylen, 0);
1023
1024 recvd += copylen;
1025
1026 /* TCP combines multiple pbufs for one recv */
1027 LWIP_ASSERT("invalid copylen, len would underflow", recv_left >= copylen);
1028 recv_left -= copylen;
1029
1030 /* Unless we peek the incoming message... */
1031 if ((flags & MSG_PEEK) == 0) {
1032 /* ... check if there is data left in the pbuf */
1033 LWIP_ASSERT("invalid copylen", p->tot_len >= copylen);
1034 if (p->tot_len - copylen > 0) {
1035 /* If so, it should be saved in the sock structure for the next recv call.
1036 We store the pbuf but hide/free the consumed data: */
1037 sock->lastdata.pbuf = pbuf_free_header(p, copylen);
1038 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: lastdata now pbuf=%p\n", (void *)sock->lastdata.pbuf));
1039 } else {
1040 sock->lastdata.pbuf = NULL;
1041 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: deleting pbuf=%p\n", (void *)p));
1042 pbuf_free(p);
1043 }
1044 }
1045 /* once we have some data to return, only add more if we don't need to wait */
1046 apiflags |= NETCONN_DONTBLOCK | NETCONN_NOFIN;
1047 /* @todo: do we need to support peeking more than one pbuf? */
1048 } while ((recv_left > 0) && !(flags & MSG_PEEK));
1049 lwip_recv_tcp_done:
1050 if ((recvd > 0) && !(flags & MSG_PEEK)) {
1051 /* ensure window update after copying all data */
1052 netconn_tcp_recvd(sock->conn, (size_t)recvd);
1053 }
1054 set_errno(0);
1055 return recvd;
1056 }
1057 #endif
1058
1059 /* Convert a netbuf's address data to struct sockaddr */
1060 static int
lwip_sock_make_addr(struct netconn * conn,ip_addr_t * fromaddr,u16_t port,struct sockaddr * from,socklen_t * fromlen)1061 lwip_sock_make_addr(struct netconn *conn, ip_addr_t *fromaddr, u16_t port,
1062 struct sockaddr *from, socklen_t *fromlen)
1063 {
1064 int truncated = 0;
1065 union sockaddr_aligned saddr;
1066
1067 LWIP_UNUSED_ARG(conn);
1068
1069 LWIP_ASSERT("fromaddr != NULL", fromaddr != NULL);
1070 LWIP_ASSERT("from != NULL", from != NULL);
1071 LWIP_ASSERT("fromlen != NULL", fromlen != NULL);
1072
1073 #if LWIP_IPV4 && LWIP_IPV6
1074 /* Dual-stack: Map IPv4 addresses to IPv4 mapped IPv6 */
1075 if (NETCONNTYPE_ISIPV6(netconn_type(conn)) && IP_IS_V4(fromaddr)) {
1076 ip4_2_ipv4_mapped_ipv6(ip_2_ip6(fromaddr), ip_2_ip4(fromaddr));
1077 IP_SET_TYPE(fromaddr, IPADDR_TYPE_V6);
1078 }
1079 #endif /* LWIP_IPV4 && LWIP_IPV6 */
1080
1081 IPADDR_PORT_TO_SOCKADDR(&saddr, fromaddr, port);
1082 if (*fromlen < IPADDR_SOCKADDR_GET_LEN(&saddr)) {
1083 truncated = 1;
1084 } else if (*fromlen > IPADDR_SOCKADDR_GET_LEN(&saddr)) {
1085 *fromlen = IPADDR_SOCKADDR_GET_LEN(&saddr);
1086 }
1087 MEMCPY(from, &saddr, *fromlen);
1088 return truncated;
1089 }
1090
1091 #if LWIP_TCP
1092 /* Helper function to get a tcp socket's remote address info */
1093 static int
lwip_recv_tcp_from(struct lwip_sock * sock,struct sockaddr * from,socklen_t * fromlen,const char * dbg_fn,int dbg_s,ssize_t dbg_ret)1094 lwip_recv_tcp_from(struct lwip_sock *sock, struct sockaddr *from, socklen_t *fromlen, const char *dbg_fn, int dbg_s, ssize_t dbg_ret)
1095 {
1096 if (sock == NULL) {
1097 return 0;
1098 }
1099 LWIP_UNUSED_ARG(dbg_fn);
1100 LWIP_UNUSED_ARG(dbg_s);
1101 LWIP_UNUSED_ARG(dbg_ret);
1102
1103 #if !SOCKETS_DEBUG
1104 if (from && fromlen)
1105 #endif /* !SOCKETS_DEBUG */
1106 {
1107 /* get remote addr/port from tcp_pcb */
1108 u16_t port;
1109 ip_addr_t tmpaddr;
1110 netconn_getaddr(sock->conn, &tmpaddr, &port, 0);
1111 LWIP_DEBUGF(SOCKETS_DEBUG, ("%s(%d): addr=", dbg_fn, dbg_s));
1112 ip_addr_debug_print_val(SOCKETS_DEBUG, tmpaddr);
1113 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F" len=%d\n", port, (int)dbg_ret));
1114 if (from && fromlen) {
1115 return lwip_sock_make_addr(sock->conn, &tmpaddr, port, from, fromlen);
1116 }
1117 }
1118 return 0;
1119 }
1120 #endif
1121
1122 /* Helper function to receive a netbuf from a udp or raw netconn.
1123 * Keeps sock->lastdata for peeking.
1124 */
1125 static err_t
lwip_recvfrom_udp_raw(struct lwip_sock * sock,int flags,struct msghdr * msg,u16_t * datagram_len,int dbg_s)1126 lwip_recvfrom_udp_raw(struct lwip_sock *sock, int flags, struct msghdr *msg, u16_t *datagram_len, int dbg_s)
1127 {
1128 struct netbuf *buf;
1129 u8_t apiflags;
1130 err_t err;
1131 u16_t buflen, copylen, copied;
1132 msg_iovlen_t i;
1133
1134 LWIP_UNUSED_ARG(dbg_s);
1135 LWIP_ERROR("lwip_recvfrom_udp_raw: invalid arguments", (msg->msg_iov != NULL) || (msg->msg_iovlen <= 0), return ERR_ARG;);
1136
1137 if (flags & MSG_DONTWAIT) {
1138 apiflags = NETCONN_DONTBLOCK;
1139 } else {
1140 apiflags = 0;
1141 }
1142
1143 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw[UDP/RAW]: top sock->lastdata=%p\n", (void *)sock->lastdata.netbuf));
1144 /* Check if there is data left from the last recv operation. */
1145 buf = sock->lastdata.netbuf;
1146 if (buf == NULL) {
1147 /* No data was left from the previous operation, so we try to get
1148 some from the network. */
1149 err = netconn_recv_udp_raw_netbuf_flags(sock->conn, &buf, apiflags);
1150 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw[UDP/RAW]: netconn_recv err=%d, netbuf=%p\n",
1151 err, (void *)buf));
1152
1153 if (err != ERR_OK) {
1154 return err;
1155 }
1156 LWIP_ASSERT("buf != NULL", buf != NULL);
1157 sock->lastdata.netbuf = buf;
1158 }
1159 buflen = buf->p->tot_len;
1160 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw: buflen=%"U16_F"\n", buflen));
1161
1162 copied = 0;
1163 /* copy the pbuf payload into the iovs */
1164 for (i = 0; (i < msg->msg_iovlen) && (copied < buflen); i++) {
1165 u16_t len_left = (u16_t)(buflen - copied);
1166 if (msg->msg_iov[i].iov_len > len_left) {
1167 copylen = len_left;
1168 } else {
1169 copylen = (u16_t)msg->msg_iov[i].iov_len;
1170 }
1171
1172 /* copy the contents of the received buffer into
1173 the supplied memory buffer */
1174 pbuf_copy_partial(buf->p, (u8_t *)msg->msg_iov[i].iov_base, copylen, copied);
1175 copied = (u16_t)(copied + copylen);
1176 }
1177
1178 /* Check to see from where the data was.*/
1179 #if !SOCKETS_DEBUG
1180 if (msg->msg_name && msg->msg_namelen)
1181 #endif /* !SOCKETS_DEBUG */
1182 {
1183 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw(%d): addr=", dbg_s));
1184 ip_addr_debug_print_val(SOCKETS_DEBUG, *netbuf_fromaddr(buf));
1185 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F" len=%d\n", netbuf_fromport(buf), copied));
1186 if (msg->msg_name && msg->msg_namelen) {
1187 lwip_sock_make_addr(sock->conn, netbuf_fromaddr(buf), netbuf_fromport(buf),
1188 (struct sockaddr *)msg->msg_name, &msg->msg_namelen);
1189 }
1190 }
1191
1192 /* Initialize flag output */
1193 msg->msg_flags = 0;
1194
1195 if (msg->msg_control) {
1196 u8_t wrote_msg = 0;
1197 #if LWIP_NETBUF_RECVINFO
1198 /* Check if packet info was recorded */
1199 if (buf->flags & NETBUF_FLAG_DESTADDR) {
1200 if (IP_IS_V4(&buf->toaddr)) {
1201 #if LWIP_IPV4
1202 if (msg->msg_controllen >= CMSG_SPACE(sizeof(struct in_pktinfo))) {
1203 struct cmsghdr *chdr = CMSG_FIRSTHDR(msg); /* This will always return a header!! */
1204 struct in_pktinfo *pkti = (struct in_pktinfo *)CMSG_DATA(chdr);
1205 chdr->cmsg_level = IPPROTO_IP;
1206 chdr->cmsg_type = IP_PKTINFO;
1207 chdr->cmsg_len = CMSG_LEN(sizeof(struct in_pktinfo));
1208 pkti->ipi_ifindex = buf->p->if_idx;
1209 inet_addr_from_ip4addr(&pkti->ipi_addr, ip_2_ip4(netbuf_destaddr(buf)));
1210 msg->msg_controllen = CMSG_SPACE(sizeof(struct in_pktinfo));
1211 wrote_msg = 1;
1212 } else {
1213 msg->msg_flags |= MSG_CTRUNC;
1214 }
1215 #endif /* LWIP_IPV4 */
1216 }
1217 }
1218 #endif /* LWIP_NETBUF_RECVINFO */
1219
1220 if (!wrote_msg) {
1221 msg->msg_controllen = 0;
1222 }
1223 }
1224
1225 /* If we don't peek the incoming message: zero lastdata pointer and free the netbuf */
1226 if ((flags & MSG_PEEK) == 0) {
1227 sock->lastdata.netbuf = NULL;
1228 netbuf_delete(buf);
1229 }
1230 if (datagram_len) {
1231 *datagram_len = buflen;
1232 }
1233 return ERR_OK;
1234 }
1235
1236 ssize_t
lwip_recvfrom(int s,void * mem,size_t len,int flags,struct sockaddr * from,socklen_t * fromlen)1237 lwip_recvfrom(int s, void *mem, size_t len, int flags,
1238 struct sockaddr *from, socklen_t *fromlen)
1239 {
1240 struct lwip_sock *sock;
1241 ssize_t ret;
1242
1243 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom(%d, %p, %"SZT_F", 0x%x, ..)\n", s, mem, len, flags));
1244 sock = get_socket(s);
1245 if (!sock) {
1246 return -1;
1247 }
1248 #if LWIP_TCP
1249 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1250 ret = lwip_recv_tcp(sock, mem, len, flags);
1251 lwip_recv_tcp_from(sock, from, fromlen, "lwip_recvfrom", s, ret);
1252 done_socket(sock);
1253 return ret;
1254 } else
1255 #endif
1256 {
1257 u16_t datagram_len = 0;
1258 struct iovec vec;
1259 struct msghdr msg;
1260 err_t err;
1261 vec.iov_base = mem;
1262 vec.iov_len = len;
1263 msg.msg_control = NULL;
1264 msg.msg_controllen = 0;
1265 msg.msg_flags = 0;
1266 msg.msg_iov = &vec;
1267 msg.msg_iovlen = 1;
1268 msg.msg_name = from;
1269 msg.msg_namelen = (fromlen ? *fromlen : 0);
1270 err = lwip_recvfrom_udp_raw(sock, flags, &msg, &datagram_len, s);
1271 if (err != ERR_OK) {
1272 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom[UDP/RAW](%d): buf == NULL, error is \"%s\"!\n",
1273 s, lwip_strerr(err)));
1274 set_errno(err_to_errno(err));
1275 done_socket(sock);
1276 return -1;
1277 }
1278 ret = (ssize_t)LWIP_MIN(LWIP_MIN(len, datagram_len), SSIZE_MAX);
1279 if (fromlen) {
1280 *fromlen = msg.msg_namelen;
1281 }
1282 }
1283
1284 set_errno(0);
1285 done_socket(sock);
1286 return ret;
1287 }
1288
1289 ssize_t
lwip_read(int s,void * mem,size_t len)1290 lwip_read(int s, void *mem, size_t len)
1291 {
1292 return lwip_recvfrom(s, mem, len, 0, NULL, NULL);
1293 }
1294
1295 ssize_t
lwip_readv(int s,const struct iovec * iov,int iovcnt)1296 lwip_readv(int s, const struct iovec *iov, int iovcnt)
1297 {
1298 struct msghdr msg;
1299
1300 msg.msg_name = NULL;
1301 msg.msg_namelen = 0;
1302 /* Hack: we have to cast via number to cast from 'const' pointer to non-const.
1303 Blame the opengroup standard for this inconsistency. */
1304 msg.msg_iov = LWIP_CONST_CAST(struct iovec *, iov);
1305 msg.msg_iovlen = iovcnt;
1306 msg.msg_control = NULL;
1307 msg.msg_controllen = 0;
1308 msg.msg_flags = 0;
1309 return lwip_recvmsg(s, &msg, 0);
1310 }
1311
1312 ssize_t
lwip_recv(int s,void * mem,size_t len,int flags)1313 lwip_recv(int s, void *mem, size_t len, int flags)
1314 {
1315 return lwip_recvfrom(s, mem, len, flags, NULL, NULL);
1316 }
1317
1318 ssize_t
lwip_recvmsg(int s,struct msghdr * message,int flags)1319 lwip_recvmsg(int s, struct msghdr *message, int flags)
1320 {
1321 struct lwip_sock *sock;
1322 msg_iovlen_t i;
1323 ssize_t buflen;
1324
1325 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvmsg(%d, message=%p, flags=0x%x)\n", s, (void *)message, flags));
1326 LWIP_ERROR("lwip_recvmsg: invalid message pointer", message != NULL, return ERR_ARG;);
1327 LWIP_ERROR("lwip_recvmsg: unsupported flags", (flags & ~(MSG_PEEK|MSG_DONTWAIT)) == 0,
1328 set_errno(EOPNOTSUPP); return -1;);
1329
1330 if ((message->msg_iovlen <= 0) || (message->msg_iovlen > IOV_MAX)) {
1331 set_errno(EMSGSIZE);
1332 return -1;
1333 }
1334
1335 sock = get_socket(s);
1336 if (!sock) {
1337 return -1;
1338 }
1339
1340 /* check for valid vectors */
1341 buflen = 0;
1342 for (i = 0; i < message->msg_iovlen; i++) {
1343 if ((message->msg_iov[i].iov_base == NULL) || ((ssize_t)message->msg_iov[i].iov_len <= 0) ||
1344 ((size_t)(ssize_t)message->msg_iov[i].iov_len != message->msg_iov[i].iov_len) ||
1345 ((ssize_t)(buflen + (ssize_t)message->msg_iov[i].iov_len) <= 0)) {
1346 set_errno(err_to_errno(ERR_VAL));
1347 done_socket(sock);
1348 return -1;
1349 }
1350 buflen = (ssize_t)(buflen + (ssize_t)message->msg_iov[i].iov_len);
1351 }
1352
1353 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1354 #if LWIP_TCP
1355 int recv_flags = flags;
1356 message->msg_flags = 0;
1357 /* recv the data */
1358 buflen = 0;
1359 for (i = 0; i < message->msg_iovlen; i++) {
1360 /* try to receive into this vector's buffer */
1361 ssize_t recvd_local = lwip_recv_tcp(sock, message->msg_iov[i].iov_base, message->msg_iov[i].iov_len, recv_flags);
1362 if (recvd_local > 0) {
1363 /* sum up received bytes */
1364 buflen += recvd_local;
1365 }
1366 if ((recvd_local < 0) || (recvd_local < (int)message->msg_iov[i].iov_len) ||
1367 (flags & MSG_PEEK)) {
1368 /* returned prematurely (or peeking, which might actually be limitated to the first iov) */
1369 if (buflen <= 0) {
1370 /* nothing received at all, propagate the error */
1371 buflen = recvd_local;
1372 }
1373 break;
1374 }
1375 /* pass MSG_DONTWAIT to lwip_recv_tcp() to prevent waiting for more data */
1376 recv_flags |= MSG_DONTWAIT;
1377 }
1378 if (buflen > 0) {
1379 /* reset socket error since we have received something */
1380 set_errno(0);
1381 }
1382 /* " If the socket is connected, the msg_name and msg_namelen members shall be ignored." */
1383 done_socket(sock);
1384 return buflen;
1385 #else /* LWIP_TCP */
1386 set_errno(err_to_errno(ERR_ARG));
1387 done_socket(sock);
1388 return -1;
1389 #endif /* LWIP_TCP */
1390 }
1391 /* else, UDP and RAW NETCONNs */
1392 #if LWIP_UDP || LWIP_RAW
1393 {
1394 u16_t datagram_len = 0;
1395 err_t err;
1396 err = lwip_recvfrom_udp_raw(sock, flags, message, &datagram_len, s);
1397 if (err != ERR_OK) {
1398 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvmsg[UDP/RAW](%d): buf == NULL, error is \"%s\"!\n",
1399 s, lwip_strerr(err)));
1400 set_errno(err_to_errno(err));
1401 done_socket(sock);
1402 return -1;
1403 }
1404 if (datagram_len > buflen) {
1405 message->msg_flags |= MSG_TRUNC;
1406 }
1407
1408 set_errno(0);
1409 done_socket(sock);
1410 return (int)datagram_len;
1411 }
1412 #else /* LWIP_UDP || LWIP_RAW */
1413 set_errno(err_to_errno(ERR_ARG));
1414 done_socket(sock);
1415 return -1;
1416 #endif /* LWIP_UDP || LWIP_RAW */
1417 }
1418
1419 ssize_t
lwip_send(int s,const void * data,size_t size,int flags)1420 lwip_send(int s, const void *data, size_t size, int flags)
1421 {
1422 struct lwip_sock *sock;
1423 err_t err;
1424 u8_t write_flags;
1425 size_t written;
1426
1427 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_send(%d, data=%p, size=%"SZT_F", flags=0x%x)\n",
1428 s, data, size, flags));
1429
1430 sock = get_socket(s);
1431 if (!sock) {
1432 return -1;
1433 }
1434
1435 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
1436 #if (LWIP_UDP || LWIP_RAW)
1437 done_socket(sock);
1438 return lwip_sendto(s, data, size, flags, NULL, 0);
1439 #else /* (LWIP_UDP || LWIP_RAW) */
1440 set_errno(err_to_errno(ERR_ARG));
1441 done_socket(sock);
1442 return -1;
1443 #endif /* (LWIP_UDP || LWIP_RAW) */
1444 }
1445
1446 write_flags = (u8_t)(NETCONN_COPY |
1447 ((flags & MSG_MORE) ? NETCONN_MORE : 0) |
1448 ((flags & MSG_DONTWAIT) ? NETCONN_DONTBLOCK : 0));
1449 written = 0;
1450 err = netconn_write_partly(sock->conn, data, size, write_flags, &written);
1451
1452 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_send(%d) err=%d written=%"SZT_F"\n", s, err, written));
1453 set_errno(err_to_errno(err));
1454 done_socket(sock);
1455 /* casting 'written' to ssize_t is OK here since the netconn API limits it to SSIZE_MAX */
1456 return (err == ERR_OK ? (ssize_t)written : -1);
1457 }
1458
1459 ssize_t
lwip_sendmsg(int s,const struct msghdr * msg,int flags)1460 lwip_sendmsg(int s, const struct msghdr *msg, int flags)
1461 {
1462 struct lwip_sock *sock;
1463 #if LWIP_TCP
1464 u8_t write_flags;
1465 size_t written;
1466 #endif
1467 err_t err = ERR_OK;
1468
1469 sock = get_socket(s);
1470 if (!sock) {
1471 return -1;
1472 }
1473
1474 LWIP_ERROR("lwip_sendmsg: invalid msghdr", msg != NULL,
1475 set_errno(err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1476 LWIP_ERROR("lwip_sendmsg: invalid msghdr iov", msg->msg_iov != NULL,
1477 set_errno(err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1478 LWIP_ERROR("lwip_sendmsg: maximum iovs exceeded", (msg->msg_iovlen > 0) && (msg->msg_iovlen <= IOV_MAX),
1479 set_errno(EMSGSIZE); done_socket(sock); return -1;);
1480 LWIP_ERROR("lwip_sendmsg: unsupported flags", (flags & ~(MSG_DONTWAIT | MSG_MORE)) == 0,
1481 set_errno(EOPNOTSUPP); done_socket(sock); return -1;);
1482
1483 LWIP_UNUSED_ARG(msg->msg_control);
1484 LWIP_UNUSED_ARG(msg->msg_controllen);
1485 LWIP_UNUSED_ARG(msg->msg_flags);
1486
1487 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1488 #if LWIP_TCP
1489 write_flags = (u8_t)(NETCONN_COPY |
1490 ((flags & MSG_MORE) ? NETCONN_MORE : 0) |
1491 ((flags & MSG_DONTWAIT) ? NETCONN_DONTBLOCK : 0));
1492
1493 written = 0;
1494 err = netconn_write_vectors_partly(sock->conn, (struct netvector *)msg->msg_iov, (u16_t)msg->msg_iovlen, write_flags, &written);
1495 set_errno(err_to_errno(err));
1496 done_socket(sock);
1497 /* casting 'written' to ssize_t is OK here since the netconn API limits it to SSIZE_MAX */
1498 return (err == ERR_OK ? (ssize_t)written : -1);
1499 #else /* LWIP_TCP */
1500 set_errno(err_to_errno(ERR_ARG));
1501 done_socket(sock);
1502 return -1;
1503 #endif /* LWIP_TCP */
1504 }
1505 /* else, UDP and RAW NETCONNs */
1506 #if LWIP_UDP || LWIP_RAW
1507 {
1508 struct netbuf chain_buf;
1509 msg_iovlen_t i;
1510 ssize_t size = 0;
1511
1512 LWIP_UNUSED_ARG(flags);
1513 LWIP_ERROR("lwip_sendmsg: invalid msghdr name", (((msg->msg_name == NULL) && (msg->msg_namelen == 0)) ||
1514 IS_SOCK_ADDR_LEN_VALID(msg->msg_namelen)),
1515 set_errno(err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1516
1517 /* initialize chain buffer with destination */
1518 memset(&chain_buf, 0, sizeof(struct netbuf));
1519 if (msg->msg_name) {
1520 u16_t remote_port;
1521 SOCKADDR_TO_IPADDR_PORT((const struct sockaddr *)msg->msg_name, &chain_buf.addr, remote_port);
1522 netbuf_fromport(&chain_buf) = remote_port;
1523 }
1524 #if LWIP_NETIF_TX_SINGLE_PBUF
1525 for (i = 0; i < msg->msg_iovlen; i++) {
1526 size += msg->msg_iov[i].iov_len;
1527 if ((msg->msg_iov[i].iov_len > INT_MAX) || (size < (int)msg->msg_iov[i].iov_len)) {
1528 /* overflow */
1529 goto sendmsg_emsgsize;
1530 }
1531 }
1532 if (size > 0xFFFF) {
1533 /* overflow */
1534 goto sendmsg_emsgsize;
1535 }
1536 /* Allocate a new netbuf and copy the data into it. */
1537 if (netbuf_alloc(&chain_buf, (u16_t)size) == NULL) {
1538 err = ERR_MEM;
1539 } else {
1540 /* flatten the IO vectors */
1541 size_t offset = 0;
1542 for (i = 0; i < msg->msg_iovlen; i++) {
1543 MEMCPY(&((u8_t *)chain_buf.p->payload)[offset], msg->msg_iov[i].iov_base, msg->msg_iov[i].iov_len);
1544 offset += msg->msg_iov[i].iov_len;
1545 }
1546 #if LWIP_CHECKSUM_ON_COPY
1547 {
1548 /* This can be improved by using LWIP_CHKSUM_COPY() and aggregating the checksum for each IO vector */
1549 u16_t chksum = ~inet_chksum_pbuf(chain_buf.p);
1550 netbuf_set_chksum(&chain_buf, chksum);
1551 }
1552 #endif /* LWIP_CHECKSUM_ON_COPY */
1553 err = ERR_OK;
1554 }
1555 #else /* LWIP_NETIF_TX_SINGLE_PBUF */
1556 /* create a chained netbuf from the IO vectors. NOTE: we assemble a pbuf chain
1557 manually to avoid having to allocate, chain, and delete a netbuf for each iov */
1558 for (i = 0; i < msg->msg_iovlen; i++) {
1559 struct pbuf *p;
1560 if (msg->msg_iov[i].iov_len > 0xFFFF) {
1561 /* overflow */
1562 goto sendmsg_emsgsize;
1563 }
1564 p = pbuf_alloc(PBUF_TRANSPORT, 0, PBUF_REF);
1565 if (p == NULL) {
1566 err = ERR_MEM; /* let netbuf_delete() cleanup chain_buf */
1567 break;
1568 }
1569 p->payload = msg->msg_iov[i].iov_base;
1570 p->len = p->tot_len = (u16_t)msg->msg_iov[i].iov_len;
1571 /* netbuf empty, add new pbuf */
1572 if (chain_buf.p == NULL) {
1573 chain_buf.p = chain_buf.ptr = p;
1574 /* add pbuf to existing pbuf chain */
1575 } else {
1576 if (chain_buf.p->tot_len + p->len > 0xffff) {
1577 /* overflow */
1578 pbuf_free(p);
1579 goto sendmsg_emsgsize;
1580 }
1581 pbuf_cat(chain_buf.p, p);
1582 }
1583 }
1584 /* save size of total chain */
1585 if (err == ERR_OK) {
1586 size = netbuf_len(&chain_buf);
1587 }
1588 #endif /* LWIP_NETIF_TX_SINGLE_PBUF */
1589
1590 if (err == ERR_OK) {
1591 #if LWIP_IPV4 && LWIP_IPV6
1592 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
1593 if (IP_IS_V6_VAL(chain_buf.addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&chain_buf.addr))) {
1594 unmap_ipv4_mapped_ipv6(ip_2_ip4(&chain_buf.addr), ip_2_ip6(&chain_buf.addr));
1595 IP_SET_TYPE_VAL(chain_buf.addr, IPADDR_TYPE_V4);
1596 }
1597 #endif /* LWIP_IPV4 && LWIP_IPV6 */
1598
1599 /* send the data */
1600 err = netconn_send(sock->conn, &chain_buf);
1601 }
1602
1603 /* deallocated the buffer */
1604 netbuf_free(&chain_buf);
1605
1606 set_errno(err_to_errno(err));
1607 done_socket(sock);
1608 return (err == ERR_OK ? size : -1);
1609 sendmsg_emsgsize:
1610 set_errno(EMSGSIZE);
1611 netbuf_free(&chain_buf);
1612 done_socket(sock);
1613 return -1;
1614 }
1615 #else /* LWIP_UDP || LWIP_RAW */
1616 set_errno(err_to_errno(ERR_ARG));
1617 done_socket(sock);
1618 return -1;
1619 #endif /* LWIP_UDP || LWIP_RAW */
1620 }
1621
1622 ssize_t
lwip_sendto(int s,const void * data,size_t size,int flags,const struct sockaddr * to,socklen_t tolen)1623 lwip_sendto(int s, const void *data, size_t size, int flags,
1624 const struct sockaddr *to, socklen_t tolen)
1625 {
1626 struct lwip_sock *sock;
1627 err_t err;
1628 u16_t short_size;
1629 u16_t remote_port;
1630 struct netbuf buf;
1631
1632 sock = get_socket(s);
1633 if (!sock) {
1634 return -1;
1635 }
1636
1637 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1638 #if LWIP_TCP
1639 done_socket(sock);
1640 return lwip_send(s, data, size, flags);
1641 #else /* LWIP_TCP */
1642 LWIP_UNUSED_ARG(flags);
1643 set_errno(err_to_errno(ERR_ARG));
1644 done_socket(sock);
1645 return -1;
1646 #endif /* LWIP_TCP */
1647 }
1648
1649 if (size > LWIP_MIN(0xFFFF, SSIZE_MAX)) {
1650 /* cannot fit into one datagram (at least for us) */
1651 set_errno(EMSGSIZE);
1652 done_socket(sock);
1653 return -1;
1654 }
1655 short_size = (u16_t)size;
1656 LWIP_ERROR("lwip_sendto: invalid address", (((to == NULL) && (tolen == 0)) ||
1657 (IS_SOCK_ADDR_LEN_VALID(tolen) &&
1658 ((to != NULL) && (IS_SOCK_ADDR_TYPE_VALID(to) && IS_SOCK_ADDR_ALIGNED(to))))),
1659 set_errno(err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1660 LWIP_UNUSED_ARG(tolen);
1661
1662 /* initialize a buffer */
1663 buf.p = buf.ptr = NULL;
1664 #if LWIP_CHECKSUM_ON_COPY
1665 buf.flags = 0;
1666 #endif /* LWIP_CHECKSUM_ON_COPY */
1667 if (to) {
1668 SOCKADDR_TO_IPADDR_PORT(to, &buf.addr, remote_port);
1669 } else {
1670 remote_port = 0;
1671 ip_addr_set_any(NETCONNTYPE_ISIPV6(netconn_type(sock->conn)), &buf.addr);
1672 }
1673 netbuf_fromport(&buf) = remote_port;
1674
1675
1676 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_sendto(%d, data=%p, short_size=%"U16_F", flags=0x%x to=",
1677 s, data, short_size, flags));
1678 ip_addr_debug_print_val(SOCKETS_DEBUG, buf.addr);
1679 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F"\n", remote_port));
1680
1681 /* make the buffer point to the data that should be sent */
1682 #if LWIP_NETIF_TX_SINGLE_PBUF
1683 /* Allocate a new netbuf and copy the data into it. */
1684 if (netbuf_alloc(&buf, short_size) == NULL) {
1685 err = ERR_MEM;
1686 } else {
1687 #if LWIP_CHECKSUM_ON_COPY
1688 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_RAW) {
1689 u16_t chksum = LWIP_CHKSUM_COPY(buf.p->payload, data, short_size);
1690 netbuf_set_chksum(&buf, chksum);
1691 } else
1692 #endif /* LWIP_CHECKSUM_ON_COPY */
1693 {
1694 MEMCPY(buf.p->payload, data, short_size);
1695 }
1696 err = ERR_OK;
1697 }
1698 #else /* LWIP_NETIF_TX_SINGLE_PBUF */
1699 err = netbuf_ref(&buf, data, short_size);
1700 #endif /* LWIP_NETIF_TX_SINGLE_PBUF */
1701 if (err == ERR_OK) {
1702 #if LWIP_IPV4 && LWIP_IPV6
1703 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
1704 if (IP_IS_V6_VAL(buf.addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&buf.addr))) {
1705 unmap_ipv4_mapped_ipv6(ip_2_ip4(&buf.addr), ip_2_ip6(&buf.addr));
1706 IP_SET_TYPE_VAL(buf.addr, IPADDR_TYPE_V4);
1707 }
1708 #endif /* LWIP_IPV4 && LWIP_IPV6 */
1709
1710 /* send the data */
1711 err = netconn_send(sock->conn, &buf);
1712 }
1713
1714 /* deallocated the buffer */
1715 netbuf_free(&buf);
1716
1717 set_errno(err_to_errno(err));
1718 done_socket(sock);
1719 return (err == ERR_OK ? short_size : -1);
1720 }
1721
1722 int
lwip_socket(int domain,int type,int protocol)1723 lwip_socket(int domain, int type, int protocol)
1724 {
1725 struct netconn *conn;
1726 int i;
1727
1728 LWIP_UNUSED_ARG(domain); /* @todo: check this */
1729
1730 /* create a netconn */
1731 switch (type) {
1732 case SOCK_RAW:
1733 conn = netconn_new_with_proto_and_callback(DOMAIN_TO_NETCONN_TYPE(domain, NETCONN_RAW),
1734 (u8_t)protocol, DEFAULT_SOCKET_EVENTCB);
1735 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_RAW, %d) = ",
1736 domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol));
1737 break;
1738 case SOCK_DGRAM:
1739 conn = netconn_new_with_callback(DOMAIN_TO_NETCONN_TYPE(domain,
1740 ((protocol == IPPROTO_UDPLITE) ? NETCONN_UDPLITE : NETCONN_UDP)),
1741 DEFAULT_SOCKET_EVENTCB);
1742 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_DGRAM, %d) = ",
1743 domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol));
1744 #if LWIP_NETBUF_RECVINFO
1745 if (conn) {
1746 /* netconn layer enables pktinfo by default, sockets default to off */
1747 conn->flags &= ~NETCONN_FLAG_PKTINFO;
1748 }
1749 #endif /* LWIP_NETBUF_RECVINFO */
1750 break;
1751 case SOCK_STREAM:
1752 conn = netconn_new_with_callback(DOMAIN_TO_NETCONN_TYPE(domain, NETCONN_TCP), DEFAULT_SOCKET_EVENTCB);
1753 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_STREAM, %d) = ",
1754 domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol));
1755 break;
1756 default:
1757 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%d, %d/UNKNOWN, %d) = -1\n",
1758 domain, type, protocol));
1759 set_errno(EINVAL);
1760 return -1;
1761 }
1762
1763 if (!conn) {
1764 LWIP_DEBUGF(SOCKETS_DEBUG, ("-1 / ENOBUFS (could not create netconn)\n"));
1765 set_errno(ENOBUFS);
1766 return -1;
1767 }
1768
1769 i = alloc_socket(conn, 0);
1770
1771 if (i == -1) {
1772 netconn_delete(conn);
1773 set_errno(ENFILE);
1774 return -1;
1775 }
1776 conn->callback_arg.socket = i;
1777 done_socket(&sockets[i - LWIP_SOCKET_OFFSET]);
1778 LWIP_DEBUGF(SOCKETS_DEBUG, ("%d\n", i));
1779 set_errno(0);
1780 return i;
1781 }
1782
1783 ssize_t
lwip_write(int s,const void * data,size_t size)1784 lwip_write(int s, const void *data, size_t size)
1785 {
1786 return lwip_send(s, data, size, 0);
1787 }
1788
1789 ssize_t
lwip_writev(int s,const struct iovec * iov,int iovcnt)1790 lwip_writev(int s, const struct iovec *iov, int iovcnt)
1791 {
1792 struct msghdr msg;
1793
1794 msg.msg_name = NULL;
1795 msg.msg_namelen = 0;
1796 /* Hack: we have to cast via number to cast from 'const' pointer to non-const.
1797 Blame the opengroup standard for this inconsistency. */
1798 msg.msg_iov = LWIP_CONST_CAST(struct iovec *, iov);
1799 msg.msg_iovlen = iovcnt;
1800 msg.msg_control = NULL;
1801 msg.msg_controllen = 0;
1802 msg.msg_flags = 0;
1803 return lwip_sendmsg(s, &msg, 0);
1804 }
1805
1806 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
1807 /* Add select_cb to select_cb_list. */
1808 static void
lwip_link_select_cb(struct lwip_select_cb * select_cb)1809 lwip_link_select_cb(struct lwip_select_cb *select_cb)
1810 {
1811 LWIP_SOCKET_SELECT_DECL_PROTECT(lev);
1812
1813 /* Protect the select_cb_list */
1814 LWIP_SOCKET_SELECT_PROTECT(lev);
1815
1816 /* Put this select_cb on top of list */
1817 select_cb->next = select_cb_list;
1818 if (select_cb_list != NULL) {
1819 select_cb_list->prev = select_cb;
1820 }
1821 select_cb_list = select_cb;
1822 #if !LWIP_TCPIP_CORE_LOCKING
1823 /* Increasing this counter tells select_check_waiters that the list has changed. */
1824 select_cb_ctr++;
1825 #endif
1826
1827 /* Now we can safely unprotect */
1828 LWIP_SOCKET_SELECT_UNPROTECT(lev);
1829 }
1830
1831 /* Remove select_cb from select_cb_list. */
1832 static void
lwip_unlink_select_cb(struct lwip_select_cb * select_cb)1833 lwip_unlink_select_cb(struct lwip_select_cb *select_cb)
1834 {
1835 LWIP_SOCKET_SELECT_DECL_PROTECT(lev);
1836
1837 /* Take us off the list */
1838 LWIP_SOCKET_SELECT_PROTECT(lev);
1839 if (select_cb->next != NULL) {
1840 select_cb->next->prev = select_cb->prev;
1841 }
1842 if (select_cb_list == select_cb) {
1843 LWIP_ASSERT("select_cb->prev == NULL", select_cb->prev == NULL);
1844 select_cb_list = select_cb->next;
1845 } else {
1846 LWIP_ASSERT("select_cb->prev != NULL", select_cb->prev != NULL);
1847 select_cb->prev->next = select_cb->next;
1848 }
1849 #if !LWIP_TCPIP_CORE_LOCKING
1850 /* Increasing this counter tells select_check_waiters that the list has changed. */
1851 select_cb_ctr++;
1852 #endif
1853 LWIP_SOCKET_SELECT_UNPROTECT(lev);
1854 }
1855 #endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
1856
1857 #if LWIP_SOCKET_SELECT
1858 /**
1859 * Go through the readset and writeset lists and see which socket of the sockets
1860 * set in the sets has events. On return, readset, writeset and exceptset have
1861 * the sockets enabled that had events.
1862 *
1863 * @param maxfdp1 the highest socket index in the sets
1864 * @param readset_in set of sockets to check for read events
1865 * @param writeset_in set of sockets to check for write events
1866 * @param exceptset_in set of sockets to check for error events
1867 * @param readset_out set of sockets that had read events
1868 * @param writeset_out set of sockets that had write events
1869 * @param exceptset_out set os sockets that had error events
1870 * @return number of sockets that had events (read/write/exception) (>= 0)
1871 */
1872 static int
lwip_selscan(int maxfdp1,fd_set * readset_in,fd_set * writeset_in,fd_set * exceptset_in,fd_set * readset_out,fd_set * writeset_out,fd_set * exceptset_out)1873 lwip_selscan(int maxfdp1, fd_set *readset_in, fd_set *writeset_in, fd_set *exceptset_in,
1874 fd_set *readset_out, fd_set *writeset_out, fd_set *exceptset_out)
1875 {
1876 int i, nready = 0;
1877 fd_set lreadset, lwriteset, lexceptset;
1878 struct lwip_sock *sock;
1879 SYS_ARCH_DECL_PROTECT(lev);
1880
1881 FD_ZERO(&lreadset);
1882 FD_ZERO(&lwriteset);
1883 FD_ZERO(&lexceptset);
1884
1885 /* Go through each socket in each list to count number of sockets which
1886 currently match */
1887 for (i = LWIP_SOCKET_OFFSET; i < maxfdp1; i++) {
1888 /* if this FD is not in the set, continue */
1889 if (!(readset_in && FD_ISSET(i, readset_in)) &&
1890 !(writeset_in && FD_ISSET(i, writeset_in)) &&
1891 !(exceptset_in && FD_ISSET(i, exceptset_in))) {
1892 continue;
1893 }
1894 /* First get the socket's status (protected)... */
1895 SYS_ARCH_PROTECT(lev);
1896 sock = tryget_socket_unconn_locked(i);
1897 if (sock != NULL) {
1898 void *lastdata = sock->lastdata.pbuf;
1899 s16_t rcvevent = sock->rcvevent;
1900 u16_t sendevent = sock->sendevent;
1901 u16_t errevent = sock->errevent;
1902 SYS_ARCH_UNPROTECT(lev);
1903
1904 /* ... then examine it: */
1905 /* See if netconn of this socket is ready for read */
1906 if (readset_in && FD_ISSET(i, readset_in) && ((lastdata != NULL) || (rcvevent > 0))) {
1907 FD_SET(i, &lreadset);
1908 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for reading\n", i));
1909 nready++;
1910 }
1911 /* See if netconn of this socket is ready for write */
1912 if (writeset_in && FD_ISSET(i, writeset_in) && (sendevent != 0)) {
1913 FD_SET(i, &lwriteset);
1914 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for writing\n", i));
1915 nready++;
1916 }
1917 /* See if netconn of this socket had an error */
1918 if (exceptset_in && FD_ISSET(i, exceptset_in) && (errevent != 0)) {
1919 FD_SET(i, &lexceptset);
1920 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for exception\n", i));
1921 nready++;
1922 }
1923 done_socket(sock);
1924 } else {
1925 SYS_ARCH_UNPROTECT(lev);
1926 /* no a valid open socket */
1927 return -1;
1928 }
1929 }
1930 /* copy local sets to the ones provided as arguments */
1931 *readset_out = lreadset;
1932 *writeset_out = lwriteset;
1933 *exceptset_out = lexceptset;
1934
1935 LWIP_ASSERT("nready >= 0", nready >= 0);
1936 return nready;
1937 }
1938
1939 #if LWIP_NETCONN_FULLDUPLEX
1940 /* Mark all of the set sockets in one of the three fdsets passed to select as used.
1941 * All sockets are marked (and later unmarked), whether they are open or not.
1942 * This is OK as lwip_selscan aborts select when non-open sockets are found.
1943 */
1944 static void
lwip_select_inc_sockets_used_set(int maxfdp,fd_set * fdset,fd_set * used_sockets)1945 lwip_select_inc_sockets_used_set(int maxfdp, fd_set *fdset, fd_set *used_sockets)
1946 {
1947 SYS_ARCH_DECL_PROTECT(lev);
1948 if (fdset) {
1949 int i;
1950 for (i = LWIP_SOCKET_OFFSET; i < maxfdp; i++) {
1951 /* if this FD is in the set, lock it (unless already done) */
1952 if (FD_ISSET(i, fdset) && !FD_ISSET(i, used_sockets)) {
1953 struct lwip_sock *sock;
1954 SYS_ARCH_PROTECT(lev);
1955 sock = tryget_socket_unconn_locked(i);
1956 if (sock != NULL) {
1957 /* leave the socket used until released by lwip_select_dec_sockets_used */
1958 FD_SET(i, used_sockets);
1959 }
1960 SYS_ARCH_UNPROTECT(lev);
1961 }
1962 }
1963 }
1964 }
1965
1966 /* Mark all sockets passed to select as used to prevent them from being freed
1967 * from other threads while select is running.
1968 * Marked sockets are added to 'used_sockets' to mark them only once an be able
1969 * to unmark them correctly.
1970 */
1971 static void
lwip_select_inc_sockets_used(int maxfdp,fd_set * fdset1,fd_set * fdset2,fd_set * fdset3,fd_set * used_sockets)1972 lwip_select_inc_sockets_used(int maxfdp, fd_set *fdset1, fd_set *fdset2, fd_set *fdset3, fd_set *used_sockets)
1973 {
1974 FD_ZERO(used_sockets);
1975 lwip_select_inc_sockets_used_set(maxfdp, fdset1, used_sockets);
1976 lwip_select_inc_sockets_used_set(maxfdp, fdset2, used_sockets);
1977 lwip_select_inc_sockets_used_set(maxfdp, fdset3, used_sockets);
1978 }
1979
1980 /* Let go all sockets that were marked as used when starting select */
1981 static void
lwip_select_dec_sockets_used(int maxfdp,fd_set * used_sockets)1982 lwip_select_dec_sockets_used(int maxfdp, fd_set *used_sockets)
1983 {
1984 int i;
1985 for (i = LWIP_SOCKET_OFFSET; i < maxfdp; i++) {
1986 /* if this FD is not in the set, continue */
1987 if (FD_ISSET(i, used_sockets)) {
1988 struct lwip_sock *sock = tryget_socket_unconn_nouse(i);
1989 LWIP_ASSERT("socket gone at the end of select", sock != NULL);
1990 if (sock != NULL) {
1991 done_socket(sock);
1992 }
1993 }
1994 }
1995 }
1996 #else /* LWIP_NETCONN_FULLDUPLEX */
1997 #define lwip_select_inc_sockets_used(maxfdp1, readset, writeset, exceptset, used_sockets)
1998 #define lwip_select_dec_sockets_used(maxfdp1, used_sockets)
1999 #endif /* LWIP_NETCONN_FULLDUPLEX */
2000
2001 int
lwip_select(int maxfdp1,fd_set * readset,fd_set * writeset,fd_set * exceptset,struct timeval * timeout)2002 lwip_select(int maxfdp1, fd_set *readset, fd_set *writeset, fd_set *exceptset,
2003 struct timeval *timeout)
2004 {
2005 u32_t waitres = 0;
2006 int nready;
2007 fd_set lreadset, lwriteset, lexceptset;
2008 u32_t msectimeout;
2009 int i;
2010 int maxfdp2;
2011 #if LWIP_NETCONN_SEM_PER_THREAD
2012 int waited = 0;
2013 #endif
2014 #if LWIP_NETCONN_FULLDUPLEX
2015 fd_set used_sockets;
2016 #endif
2017 SYS_ARCH_DECL_PROTECT(lev);
2018
2019 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select(%d, %p, %p, %p, tvsec=%"S32_F" tvusec=%"S32_F")\n",
2020 maxfdp1, (void *)readset, (void *) writeset, (void *) exceptset,
2021 timeout ? (s32_t)timeout->tv_sec : (s32_t) - 1,
2022 timeout ? (s32_t)timeout->tv_usec : (s32_t) - 1));
2023
2024 if ((maxfdp1 < 0) || (maxfdp1 > LWIP_SELECT_MAXNFDS)) {
2025 set_errno(EINVAL);
2026 return -1;
2027 }
2028
2029 lwip_select_inc_sockets_used(maxfdp1, readset, writeset, exceptset, &used_sockets);
2030
2031 /* Go through each socket in each list to count number of sockets which
2032 currently match */
2033 nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset);
2034
2035 if (nready < 0) {
2036 /* one of the sockets in one of the fd_sets was invalid */
2037 set_errno(EBADF);
2038 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2039 return -1;
2040 } else if (nready > 0) {
2041 /* one or more sockets are set, no need to wait */
2042 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: nready=%d\n", nready));
2043 } else {
2044 /* If we don't have any current events, then suspend if we are supposed to */
2045 if (timeout && timeout->tv_sec == 0 && timeout->tv_usec == 0) {
2046 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: no timeout, returning 0\n"));
2047 /* This is OK as the local fdsets are empty and nready is zero,
2048 or we would have returned earlier. */
2049 } else {
2050 /* None ready: add our semaphore to list:
2051 We don't actually need any dynamic memory. Our entry on the
2052 list is only valid while we are in this function, so it's ok
2053 to use local variables (unless we're running in MPU compatible
2054 mode). */
2055 API_SELECT_CB_VAR_DECLARE(select_cb);
2056 API_SELECT_CB_VAR_ALLOC(select_cb, set_errno(ENOMEM); lwip_select_dec_sockets_used(maxfdp1, &used_sockets); return -1);
2057 memset(&API_SELECT_CB_VAR_REF(select_cb), 0, sizeof(struct lwip_select_cb));
2058
2059 API_SELECT_CB_VAR_REF(select_cb).readset = readset;
2060 API_SELECT_CB_VAR_REF(select_cb).writeset = writeset;
2061 API_SELECT_CB_VAR_REF(select_cb).exceptset = exceptset;
2062 #if LWIP_NETCONN_SEM_PER_THREAD
2063 API_SELECT_CB_VAR_REF(select_cb).sem = LWIP_NETCONN_THREAD_SEM_GET();
2064 #else /* LWIP_NETCONN_SEM_PER_THREAD */
2065 if (sys_sem_new(&API_SELECT_CB_VAR_REF(select_cb).sem, 0) != ERR_OK) {
2066 /* failed to create semaphore */
2067 set_errno(ENOMEM);
2068 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2069 API_SELECT_CB_VAR_FREE(select_cb);
2070 return -1;
2071 }
2072 #endif /* LWIP_NETCONN_SEM_PER_THREAD */
2073
2074 lwip_link_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2075
2076 /* Increase select_waiting for each socket we are interested in */
2077 maxfdp2 = maxfdp1;
2078 for (i = LWIP_SOCKET_OFFSET; i < maxfdp1; i++) {
2079 if ((readset && FD_ISSET(i, readset)) ||
2080 (writeset && FD_ISSET(i, writeset)) ||
2081 (exceptset && FD_ISSET(i, exceptset))) {
2082 struct lwip_sock *sock;
2083 SYS_ARCH_PROTECT(lev);
2084 sock = tryget_socket_unconn_locked(i);
2085 if (sock != NULL) {
2086 sock->select_waiting++;
2087 if (sock->select_waiting == 0) {
2088 /* overflow - too many threads waiting */
2089 sock->select_waiting--;
2090 nready = -1;
2091 maxfdp2 = i;
2092 SYS_ARCH_UNPROTECT(lev);
2093 done_socket(sock);
2094 set_errno(EBUSY);
2095 break;
2096 }
2097 SYS_ARCH_UNPROTECT(lev);
2098 done_socket(sock);
2099 } else {
2100 /* Not a valid socket */
2101 nready = -1;
2102 maxfdp2 = i;
2103 SYS_ARCH_UNPROTECT(lev);
2104 set_errno(EBADF);
2105 break;
2106 }
2107 }
2108 }
2109
2110 if (nready >= 0) {
2111 /* Call lwip_selscan again: there could have been events between
2112 the last scan (without us on the list) and putting us on the list! */
2113 nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset);
2114 if (nready < 0) {
2115 set_errno(EBADF);
2116 } else if (!nready) {
2117 /* Still none ready, just wait to be woken */
2118 if (timeout == NULL) {
2119 /* Wait forever */
2120 msectimeout = 0;
2121 } else {
2122 long msecs_long = ((timeout->tv_sec * 1000) + ((timeout->tv_usec + 500) / 1000));
2123 if (msecs_long <= 0) {
2124 /* Wait 1ms at least (0 means wait forever) */
2125 msectimeout = 1;
2126 } else {
2127 msectimeout = (u32_t)msecs_long;
2128 }
2129 }
2130
2131 waitres = sys_arch_sem_wait(SELECT_SEM_PTR(API_SELECT_CB_VAR_REF(select_cb).sem), msectimeout);
2132 #if LWIP_NETCONN_SEM_PER_THREAD
2133 waited = 1;
2134 #endif
2135 }
2136 }
2137
2138 /* Decrease select_waiting for each socket we are interested in */
2139 for (i = LWIP_SOCKET_OFFSET; i < maxfdp2; i++) {
2140 if ((readset && FD_ISSET(i, readset)) ||
2141 (writeset && FD_ISSET(i, writeset)) ||
2142 (exceptset && FD_ISSET(i, exceptset))) {
2143 struct lwip_sock *sock;
2144 SYS_ARCH_PROTECT(lev);
2145 sock = tryget_socket_unconn_nouse(i);
2146 LWIP_ASSERT("socket gone at the end of select", sock != NULL);
2147 if (sock != NULL) {
2148 /* for now, handle select_waiting==0... */
2149 LWIP_ASSERT("sock->select_waiting > 0", sock->select_waiting > 0);
2150 if (sock->select_waiting > 0) {
2151 sock->select_waiting--;
2152 }
2153 SYS_ARCH_UNPROTECT(lev);
2154 } else {
2155 SYS_ARCH_UNPROTECT(lev);
2156 /* Not a valid socket */
2157 nready = -1;
2158 set_errno(EBADF);
2159 }
2160 }
2161 }
2162
2163 lwip_unlink_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2164
2165 #if LWIP_NETCONN_SEM_PER_THREAD
2166 if (API_SELECT_CB_VAR_REF(select_cb).sem_signalled && (!waited || (waitres == SYS_ARCH_TIMEOUT))) {
2167 /* don't leave the thread-local semaphore signalled */
2168 sys_arch_sem_wait(API_SELECT_CB_VAR_REF(select_cb).sem, 1);
2169 }
2170 #else /* LWIP_NETCONN_SEM_PER_THREAD */
2171 sys_sem_free(&API_SELECT_CB_VAR_REF(select_cb).sem);
2172 #endif /* LWIP_NETCONN_SEM_PER_THREAD */
2173 API_SELECT_CB_VAR_FREE(select_cb);
2174
2175 if (nready < 0) {
2176 /* This happens when a socket got closed while waiting */
2177 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2178 return -1;
2179 }
2180
2181 if (waitres == SYS_ARCH_TIMEOUT) {
2182 /* Timeout */
2183 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: timeout expired\n"));
2184 /* This is OK as the local fdsets are empty and nready is zero,
2185 or we would have returned earlier. */
2186 } else {
2187 /* See what's set now after waiting */
2188 nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset);
2189 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: nready=%d\n", nready));
2190 if (nready < 0) {
2191 set_errno(EBADF);
2192 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2193 return -1;
2194 }
2195 }
2196 }
2197 }
2198
2199 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2200 set_errno(0);
2201 if (readset) {
2202 *readset = lreadset;
2203 }
2204 if (writeset) {
2205 *writeset = lwriteset;
2206 }
2207 if (exceptset) {
2208 *exceptset = lexceptset;
2209 }
2210 return nready;
2211 }
2212 #endif /* LWIP_SOCKET_SELECT */
2213
2214 #if LWIP_SOCKET_POLL
2215 /** Options for the lwip_pollscan function. */
2216 enum lwip_pollscan_opts
2217 {
2218 /** Clear revents in each struct pollfd. */
2219 LWIP_POLLSCAN_CLEAR = 1,
2220
2221 /** Increment select_waiting in each struct lwip_sock. */
2222 LWIP_POLLSCAN_INC_WAIT = 2,
2223
2224 /** Decrement select_waiting in each struct lwip_sock. */
2225 LWIP_POLLSCAN_DEC_WAIT = 4
2226 };
2227
2228 /**
2229 * Update revents in each struct pollfd.
2230 * Optionally update select_waiting in struct lwip_sock.
2231 *
2232 * @param fds array of structures to update
2233 * @param nfds number of structures in fds
2234 * @param opts what to update and how
2235 * @return number of structures that have revents != 0
2236 */
2237 static int
lwip_pollscan(struct pollfd * fds,nfds_t nfds,enum lwip_pollscan_opts opts)2238 lwip_pollscan(struct pollfd *fds, nfds_t nfds, enum lwip_pollscan_opts opts)
2239 {
2240 int nready = 0;
2241 nfds_t fdi;
2242 struct lwip_sock *sock;
2243 SYS_ARCH_DECL_PROTECT(lev);
2244
2245 /* Go through each struct pollfd in the array. */
2246 for (fdi = 0; fdi < nfds; fdi++) {
2247 if ((opts & LWIP_POLLSCAN_CLEAR) != 0) {
2248 fds[fdi].revents = 0;
2249 }
2250
2251 /* Negative fd means the caller wants us to ignore this struct.
2252 POLLNVAL means we already detected that the fd is invalid;
2253 if another thread has since opened a new socket with that fd,
2254 we must not use that socket. */
2255 if (fds[fdi].fd >= 0 && (fds[fdi].revents & POLLNVAL) == 0) {
2256 /* First get the socket's status (protected)... */
2257 SYS_ARCH_PROTECT(lev);
2258 sock = tryget_socket_unconn_locked(fds[fdi].fd);
2259 if (sock != NULL) {
2260 void* lastdata = sock->lastdata.pbuf;
2261 s16_t rcvevent = sock->rcvevent;
2262 u16_t sendevent = sock->sendevent;
2263 u16_t errevent = sock->errevent;
2264
2265 if ((opts & LWIP_POLLSCAN_INC_WAIT) != 0) {
2266 sock->select_waiting++;
2267 if (sock->select_waiting == 0) {
2268 /* overflow - too many threads waiting */
2269 sock->select_waiting--;
2270 nready = -1;
2271 SYS_ARCH_UNPROTECT(lev);
2272 done_socket(sock);
2273 break;
2274 }
2275 } else if ((opts & LWIP_POLLSCAN_DEC_WAIT) != 0) {
2276 /* for now, handle select_waiting==0... */
2277 LWIP_ASSERT("sock->select_waiting > 0", sock->select_waiting > 0);
2278 if (sock->select_waiting > 0) {
2279 sock->select_waiting--;
2280 }
2281 }
2282 SYS_ARCH_UNPROTECT(lev);
2283 done_socket(sock);
2284
2285 /* ... then examine it: */
2286 /* See if netconn of this socket is ready for read */
2287 if ((fds[fdi].events & POLLIN) != 0 && ((lastdata != NULL) || (rcvevent > 0))) {
2288 fds[fdi].revents |= POLLIN;
2289 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for reading\n", fds[fdi].fd));
2290 }
2291 /* See if netconn of this socket is ready for write */
2292 if ((fds[fdi].events & POLLOUT) != 0 && (sendevent != 0)) {
2293 fds[fdi].revents |= POLLOUT;
2294 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for writing\n", fds[fdi].fd));
2295 }
2296 /* See if netconn of this socket had an error */
2297 if (errevent != 0) {
2298 /* POLLERR is output only. */
2299 fds[fdi].revents |= POLLERR;
2300 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for exception\n", fds[fdi].fd));
2301 }
2302 } else {
2303 /* Not a valid socket */
2304 SYS_ARCH_UNPROTECT(lev);
2305 /* POLLNVAL is output only. */
2306 fds[fdi].revents |= POLLNVAL;
2307 return -1;
2308 }
2309 }
2310
2311 /* Will return the number of structures that have events,
2312 not the number of events. */
2313 if (fds[fdi].revents != 0) {
2314 nready++;
2315 }
2316 }
2317
2318 LWIP_ASSERT("nready >= 0", nready >= 0);
2319 return nready;
2320 }
2321
2322 #if LWIP_NETCONN_FULLDUPLEX
2323 /* Mark all sockets as used.
2324 *
2325 * All sockets are marked (and later unmarked), whether they are open or not.
2326 * This is OK as lwip_pollscan aborts select when non-open sockets are found.
2327 */
2328 static void
lwip_poll_inc_sockets_used(struct pollfd * fds,nfds_t nfds)2329 lwip_poll_inc_sockets_used(struct pollfd *fds, nfds_t nfds)
2330 {
2331 nfds_t fdi;
2332
2333 if(fds) {
2334 /* Go through each struct pollfd in the array. */
2335 for (fdi = 0; fdi < nfds; fdi++) {
2336 /* Increase the reference counter */
2337 tryget_socket_unconn(fds[fdi].fd);
2338 }
2339 }
2340 }
2341
2342 /* Let go all sockets that were marked as used when starting poll */
2343 static void
lwip_poll_dec_sockets_used(struct pollfd * fds,nfds_t nfds)2344 lwip_poll_dec_sockets_used(struct pollfd *fds, nfds_t nfds)
2345 {
2346 nfds_t fdi;
2347
2348 if(fds) {
2349 /* Go through each struct pollfd in the array. */
2350 for (fdi = 0; fdi < nfds; fdi++) {
2351 struct lwip_sock *sock = tryget_socket_unconn_nouse(fds[fdi].fd);
2352 if (sock != NULL) {
2353 done_socket(sock);
2354 }
2355 }
2356 }
2357 }
2358 #else /* LWIP_NETCONN_FULLDUPLEX */
2359 #define lwip_poll_inc_sockets_used(fds, nfds)
2360 #define lwip_poll_dec_sockets_used(fds, nfds)
2361 #endif /* LWIP_NETCONN_FULLDUPLEX */
2362
2363 int
lwip_poll(struct pollfd * fds,nfds_t nfds,int timeout)2364 lwip_poll(struct pollfd *fds, nfds_t nfds, int timeout)
2365 {
2366 u32_t waitres = 0;
2367 int nready;
2368 u32_t msectimeout;
2369 #if LWIP_NETCONN_SEM_PER_THREAD
2370 int waited = 0;
2371 #endif
2372
2373 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll(%p, %d, %d)\n",
2374 (void*)fds, (int)nfds, timeout));
2375 LWIP_ERROR("lwip_poll: invalid fds", ((fds != NULL && nfds > 0) || (fds == NULL && nfds == 0)),
2376 set_errno(EINVAL); return -1;);
2377
2378 lwip_poll_inc_sockets_used(fds, nfds);
2379
2380 /* Go through each struct pollfd to count number of structures
2381 which currently match */
2382 nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_CLEAR);
2383
2384 if (nready < 0) {
2385 lwip_poll_dec_sockets_used(fds, nfds);
2386 return -1;
2387 }
2388
2389 /* If we don't have any current events, then suspend if we are supposed to */
2390 if (!nready) {
2391 API_SELECT_CB_VAR_DECLARE(select_cb);
2392
2393 if (timeout == 0) {
2394 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: no timeout, returning 0\n"));
2395 goto return_success;
2396 }
2397 API_SELECT_CB_VAR_ALLOC(select_cb, set_errno(EAGAIN); lwip_poll_dec_sockets_used(fds, nfds); return -1);
2398 memset(&API_SELECT_CB_VAR_REF(select_cb), 0, sizeof(struct lwip_select_cb));
2399
2400 /* None ready: add our semaphore to list:
2401 We don't actually need any dynamic memory. Our entry on the
2402 list is only valid while we are in this function, so it's ok
2403 to use local variables. */
2404
2405 API_SELECT_CB_VAR_REF(select_cb).poll_fds = fds;
2406 API_SELECT_CB_VAR_REF(select_cb).poll_nfds = nfds;
2407 #if LWIP_NETCONN_SEM_PER_THREAD
2408 API_SELECT_CB_VAR_REF(select_cb).sem = LWIP_NETCONN_THREAD_SEM_GET();
2409 #else /* LWIP_NETCONN_SEM_PER_THREAD */
2410 if (sys_sem_new(&API_SELECT_CB_VAR_REF(select_cb).sem, 0) != ERR_OK) {
2411 /* failed to create semaphore */
2412 set_errno(EAGAIN);
2413 lwip_poll_dec_sockets_used(fds, nfds);
2414 API_SELECT_CB_VAR_FREE(select_cb);
2415 return -1;
2416 }
2417 #endif /* LWIP_NETCONN_SEM_PER_THREAD */
2418
2419 lwip_link_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2420
2421 /* Increase select_waiting for each socket we are interested in.
2422 Also, check for events again: there could have been events between
2423 the last scan (without us on the list) and putting us on the list! */
2424 nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_INC_WAIT);
2425
2426 if (!nready) {
2427 /* Still none ready, just wait to be woken */
2428 if (timeout < 0) {
2429 /* Wait forever */
2430 msectimeout = 0;
2431 } else {
2432 /* timeout == 0 would have been handled earlier. */
2433 LWIP_ASSERT("timeout > 0", timeout > 0);
2434 msectimeout = timeout;
2435 }
2436 waitres = sys_arch_sem_wait(SELECT_SEM_PTR(API_SELECT_CB_VAR_REF(select_cb).sem), msectimeout);
2437 #if LWIP_NETCONN_SEM_PER_THREAD
2438 waited = 1;
2439 #endif
2440 }
2441
2442 /* Decrease select_waiting for each socket we are interested in,
2443 and check which events occurred while we waited. */
2444 nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_DEC_WAIT);
2445
2446 lwip_unlink_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2447
2448 #if LWIP_NETCONN_SEM_PER_THREAD
2449 if (select_cb.sem_signalled && (!waited || (waitres == SYS_ARCH_TIMEOUT))) {
2450 /* don't leave the thread-local semaphore signalled */
2451 sys_arch_sem_wait(API_SELECT_CB_VAR_REF(select_cb).sem, 1);
2452 }
2453 #else /* LWIP_NETCONN_SEM_PER_THREAD */
2454 sys_sem_free(&API_SELECT_CB_VAR_REF(select_cb).sem);
2455 #endif /* LWIP_NETCONN_SEM_PER_THREAD */
2456 API_SELECT_CB_VAR_FREE(select_cb);
2457
2458 if (nready < 0) {
2459 /* This happens when a socket got closed while waiting */
2460 lwip_poll_dec_sockets_used(fds, nfds);
2461 return -1;
2462 }
2463
2464 if (waitres == SYS_ARCH_TIMEOUT) {
2465 /* Timeout */
2466 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: timeout expired\n"));
2467 goto return_success;
2468 }
2469 }
2470
2471 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: nready=%d\n", nready));
2472 return_success:
2473 lwip_poll_dec_sockets_used(fds, nfds);
2474 set_errno(0);
2475 return nready;
2476 }
2477
2478 /**
2479 * Check whether event_callback should wake up a thread waiting in
2480 * lwip_poll.
2481 */
2482 static int
lwip_poll_should_wake(const struct lwip_select_cb * scb,int fd,int has_recvevent,int has_sendevent,int has_errevent)2483 lwip_poll_should_wake(const struct lwip_select_cb *scb, int fd, int has_recvevent, int has_sendevent, int has_errevent)
2484 {
2485 nfds_t fdi;
2486 for (fdi = 0; fdi < scb->poll_nfds; fdi++) {
2487 const struct pollfd *pollfd = &scb->poll_fds[fdi];
2488 if (pollfd->fd == fd) {
2489 /* Do not update pollfd->revents right here;
2490 that would be a data race because lwip_pollscan
2491 accesses revents without protecting. */
2492 if (has_recvevent && (pollfd->events & POLLIN) != 0) {
2493 return 1;
2494 }
2495 if (has_sendevent && (pollfd->events & POLLOUT) != 0) {
2496 return 1;
2497 }
2498 if (has_errevent) {
2499 /* POLLERR is output only. */
2500 return 1;
2501 }
2502 }
2503 }
2504 return 0;
2505 }
2506 #endif /* LWIP_SOCKET_POLL */
2507
2508 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
2509 /**
2510 * Callback registered in the netconn layer for each socket-netconn.
2511 * Processes recvevent (data available) and wakes up tasks waiting for select.
2512 *
2513 * @note for LWIP_TCPIP_CORE_LOCKING any caller of this function
2514 * must have the core lock held when signaling the following events
2515 * as they might cause select_list_cb to be checked:
2516 * NETCONN_EVT_RCVPLUS
2517 * NETCONN_EVT_SENDPLUS
2518 * NETCONN_EVT_ERROR
2519 * This requirement will be asserted in select_check_waiters()
2520 */
2521 static void
event_callback(struct netconn * conn,enum netconn_evt evt,u16_t len)2522 event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len)
2523 {
2524 int s, check_waiters;
2525 struct lwip_sock *sock;
2526 SYS_ARCH_DECL_PROTECT(lev);
2527
2528 LWIP_UNUSED_ARG(len);
2529
2530 /* Get socket */
2531 if (conn) {
2532 s = conn->callback_arg.socket;
2533 if (s < 0) {
2534 /* Data comes in right away after an accept, even though
2535 * the server task might not have created a new socket yet.
2536 * Just count down (or up) if that's the case and we
2537 * will use the data later. Note that only receive events
2538 * can happen before the new socket is set up. */
2539 SYS_ARCH_PROTECT(lev);
2540 if (conn->callback_arg.socket < 0) {
2541 if (evt == NETCONN_EVT_RCVPLUS) {
2542 /* conn->socket is -1 on initialization
2543 lwip_accept adjusts sock->recvevent if conn->socket < -1 */
2544 conn->callback_arg.socket--;
2545 }
2546 SYS_ARCH_UNPROTECT(lev);
2547 return;
2548 }
2549 s = conn->callback_arg.socket;
2550 SYS_ARCH_UNPROTECT(lev);
2551 }
2552
2553 sock = get_socket(s);
2554 if (!sock) {
2555 return;
2556 }
2557 } else {
2558 return;
2559 }
2560
2561 check_waiters = 1;
2562 SYS_ARCH_PROTECT(lev);
2563 /* Set event as required */
2564 switch (evt) {
2565 case NETCONN_EVT_RCVPLUS:
2566 sock->rcvevent++;
2567 if (sock->rcvevent > 1) {
2568 check_waiters = 0;
2569 }
2570 break;
2571 case NETCONN_EVT_RCVMINUS:
2572 sock->rcvevent--;
2573 check_waiters = 0;
2574 break;
2575 case NETCONN_EVT_SENDPLUS:
2576 if (sock->sendevent) {
2577 check_waiters = 0;
2578 }
2579 sock->sendevent = 1;
2580 break;
2581 case NETCONN_EVT_SENDMINUS:
2582 sock->sendevent = 0;
2583 check_waiters = 0;
2584 break;
2585 case NETCONN_EVT_ERROR:
2586 sock->errevent = 1;
2587 break;
2588 default:
2589 LWIP_ASSERT("unknown event", 0);
2590 break;
2591 }
2592
2593 if (sock->select_waiting && check_waiters) {
2594 /* Save which events are active */
2595 int has_recvevent, has_sendevent, has_errevent;
2596 has_recvevent = sock->rcvevent > 0;
2597 has_sendevent = sock->sendevent != 0;
2598 has_errevent = sock->errevent != 0;
2599 SYS_ARCH_UNPROTECT(lev);
2600 /* Check any select calls waiting on this socket */
2601 select_check_waiters(s, has_recvevent, has_sendevent, has_errevent);
2602 } else {
2603 SYS_ARCH_UNPROTECT(lev);
2604 }
2605 done_socket(sock);
2606 }
2607
2608 /**
2609 * Check if any select waiters are waiting on this socket and its events
2610 *
2611 * @note on synchronization of select_cb_list:
2612 * LWIP_TCPIP_CORE_LOCKING: the select_cb_list must only be accessed while holding
2613 * the core lock. We do a single pass through the list and signal any waiters.
2614 * Core lock should already be held when calling here!!!!
2615
2616 * !LWIP_TCPIP_CORE_LOCKING: we use SYS_ARCH_PROTECT but unlock on each iteration
2617 * of the loop, thus creating a possibility where a thread could modify the
2618 * select_cb_list during our UNPROTECT/PROTECT. We use a generational counter to
2619 * detect this change and restart the list walk. The list is expected to be small
2620 */
select_check_waiters(int s,int has_recvevent,int has_sendevent,int has_errevent)2621 static void select_check_waiters(int s, int has_recvevent, int has_sendevent, int has_errevent)
2622 {
2623 struct lwip_select_cb *scb;
2624 #if !LWIP_TCPIP_CORE_LOCKING
2625 int last_select_cb_ctr;
2626 SYS_ARCH_DECL_PROTECT(lev);
2627 #endif /* !LWIP_TCPIP_CORE_LOCKING */
2628
2629 LWIP_ASSERT_CORE_LOCKED();
2630
2631 #if !LWIP_TCPIP_CORE_LOCKING
2632 SYS_ARCH_PROTECT(lev);
2633 again:
2634 /* remember the state of select_cb_list to detect changes */
2635 last_select_cb_ctr = select_cb_ctr;
2636 #endif /* !LWIP_TCPIP_CORE_LOCKING */
2637 for (scb = select_cb_list; scb != NULL; scb = scb->next) {
2638 if (scb->sem_signalled == 0) {
2639 /* semaphore not signalled yet */
2640 int do_signal = 0;
2641 #if LWIP_SOCKET_POLL
2642 if (scb->poll_fds != NULL) {
2643 do_signal = lwip_poll_should_wake(scb, s, has_recvevent, has_sendevent, has_errevent);
2644 }
2645 #endif /* LWIP_SOCKET_POLL */
2646 #if LWIP_SOCKET_SELECT && LWIP_SOCKET_POLL
2647 else
2648 #endif /* LWIP_SOCKET_SELECT && LWIP_SOCKET_POLL */
2649 #if LWIP_SOCKET_SELECT
2650 {
2651 /* Test this select call for our socket */
2652 if (has_recvevent) {
2653 if (scb->readset && FD_ISSET(s, scb->readset)) {
2654 do_signal = 1;
2655 }
2656 }
2657 if (has_sendevent) {
2658 if (!do_signal && scb->writeset && FD_ISSET(s, scb->writeset)) {
2659 do_signal = 1;
2660 }
2661 }
2662 if (has_errevent) {
2663 if (!do_signal && scb->exceptset && FD_ISSET(s, scb->exceptset)) {
2664 do_signal = 1;
2665 }
2666 }
2667 }
2668 #endif /* LWIP_SOCKET_SELECT */
2669 if (do_signal) {
2670 scb->sem_signalled = 1;
2671 /* For !LWIP_TCPIP_CORE_LOCKING, we don't call SYS_ARCH_UNPROTECT() before signaling
2672 the semaphore, as this might lead to the select thread taking itself off the list,
2673 invalidating the semaphore. */
2674 sys_sem_signal(SELECT_SEM_PTR(scb->sem));
2675 }
2676 }
2677 #if LWIP_TCPIP_CORE_LOCKING
2678 }
2679 #else
2680 /* unlock interrupts with each step */
2681 SYS_ARCH_UNPROTECT(lev);
2682 /* this makes sure interrupt protection time is short */
2683 SYS_ARCH_PROTECT(lev);
2684 if (last_select_cb_ctr != select_cb_ctr) {
2685 /* someone has changed select_cb_list, restart at the beginning */
2686 goto again;
2687 }
2688 /* remember the state of select_cb_list to detect changes */
2689 last_select_cb_ctr = select_cb_ctr;
2690 }
2691 SYS_ARCH_UNPROTECT(lev);
2692 #endif
2693 }
2694 #endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
2695
2696 /**
2697 * Close one end of a full-duplex connection.
2698 */
2699 int
lwip_shutdown(int s,int how)2700 lwip_shutdown(int s, int how)
2701 {
2702 struct lwip_sock *sock;
2703 err_t err;
2704 u8_t shut_rx = 0, shut_tx = 0;
2705
2706 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_shutdown(%d, how=%d)\n", s, how));
2707
2708 sock = get_socket(s);
2709 if (!sock) {
2710 return -1;
2711 }
2712
2713 if (sock->conn != NULL) {
2714 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
2715 set_errno(EOPNOTSUPP);
2716 done_socket(sock);
2717 return -1;
2718 }
2719 } else {
2720 set_errno(ENOTCONN);
2721 done_socket(sock);
2722 return -1;
2723 }
2724
2725 if (how == SHUT_RD) {
2726 shut_rx = 1;
2727 } else if (how == SHUT_WR) {
2728 shut_tx = 1;
2729 } else if (how == SHUT_RDWR) {
2730 shut_rx = 1;
2731 shut_tx = 1;
2732 } else {
2733 set_errno(EINVAL);
2734 done_socket(sock);
2735 return -1;
2736 }
2737 err = netconn_shutdown(sock->conn, shut_rx, shut_tx);
2738
2739 set_errno(err_to_errno(err));
2740 done_socket(sock);
2741 return (err == ERR_OK ? 0 : -1);
2742 }
2743
2744 static int
lwip_getaddrname(int s,struct sockaddr * name,socklen_t * namelen,u8_t local)2745 lwip_getaddrname(int s, struct sockaddr *name, socklen_t *namelen, u8_t local)
2746 {
2747 struct lwip_sock *sock;
2748 union sockaddr_aligned saddr;
2749 ip_addr_t naddr;
2750 u16_t port;
2751 err_t err;
2752
2753 sock = get_socket(s);
2754 if (!sock) {
2755 return -1;
2756 }
2757
2758 /* get the IP address and port */
2759 err = netconn_getaddr(sock->conn, &naddr, &port, local);
2760 if (err != ERR_OK) {
2761 set_errno(err_to_errno(err));
2762 done_socket(sock);
2763 return -1;
2764 }
2765
2766 #if LWIP_IPV4 && LWIP_IPV6
2767 /* Dual-stack: Map IPv4 addresses to IPv4 mapped IPv6 */
2768 if (NETCONNTYPE_ISIPV6(netconn_type(sock->conn)) &&
2769 IP_IS_V4_VAL(naddr)) {
2770 ip4_2_ipv4_mapped_ipv6(ip_2_ip6(&naddr), ip_2_ip4(&naddr));
2771 IP_SET_TYPE_VAL(naddr, IPADDR_TYPE_V6);
2772 }
2773 #endif /* LWIP_IPV4 && LWIP_IPV6 */
2774
2775 IPADDR_PORT_TO_SOCKADDR(&saddr, &naddr, port);
2776
2777 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getaddrname(%d, addr=", s));
2778 ip_addr_debug_print_val(SOCKETS_DEBUG, naddr);
2779 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", port));
2780
2781 if (*namelen > IPADDR_SOCKADDR_GET_LEN(&saddr)) {
2782 *namelen = IPADDR_SOCKADDR_GET_LEN(&saddr);
2783 }
2784 MEMCPY(name, &saddr, *namelen);
2785
2786 set_errno(0);
2787 done_socket(sock);
2788 return 0;
2789 }
2790
2791 int
lwip_getpeername(int s,struct sockaddr * name,socklen_t * namelen)2792 lwip_getpeername(int s, struct sockaddr *name, socklen_t *namelen)
2793 {
2794 return lwip_getaddrname(s, name, namelen, 0);
2795 }
2796
2797 int
lwip_getsockname(int s,struct sockaddr * name,socklen_t * namelen)2798 lwip_getsockname(int s, struct sockaddr *name, socklen_t *namelen)
2799 {
2800 return lwip_getaddrname(s, name, namelen, 1);
2801 }
2802
2803 int
lwip_getsockopt(int s,int level,int optname,void * optval,socklen_t * optlen)2804 lwip_getsockopt(int s, int level, int optname, void *optval, socklen_t *optlen)
2805 {
2806 int err;
2807 struct lwip_sock *sock = get_socket(s);
2808 #if !LWIP_TCPIP_CORE_LOCKING
2809 err_t cberr;
2810 LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(data);
2811 #endif /* !LWIP_TCPIP_CORE_LOCKING */
2812
2813 if (!sock) {
2814 return -1;
2815 }
2816
2817 if ((NULL == optval) || (NULL == optlen)) {
2818 set_errno(EFAULT);
2819 done_socket(sock);
2820 return -1;
2821 }
2822
2823 #if LWIP_TCPIP_CORE_LOCKING
2824 /* core-locking can just call the -impl function */
2825 LOCK_TCPIP_CORE();
2826 err = lwip_getsockopt_impl(s, level, optname, optval, optlen);
2827 UNLOCK_TCPIP_CORE();
2828
2829 #else /* LWIP_TCPIP_CORE_LOCKING */
2830
2831 #if LWIP_MPU_COMPATIBLE
2832 /* MPU_COMPATIBLE copies the optval data, so check for max size here */
2833 if (*optlen > LWIP_SETGETSOCKOPT_MAXOPTLEN) {
2834 set_errno(ENOBUFS);
2835 done_socket(sock);
2836 return -1;
2837 }
2838 #endif /* LWIP_MPU_COMPATIBLE */
2839
2840 LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(data, sock);
2841 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).s = s;
2842 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).level = level;
2843 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optname = optname;
2844 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen = *optlen;
2845 #if !LWIP_MPU_COMPATIBLE
2846 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval.p = optval;
2847 #endif /* !LWIP_MPU_COMPATIBLE */
2848 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err = 0;
2849 #if LWIP_NETCONN_SEM_PER_THREAD
2850 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = LWIP_NETCONN_THREAD_SEM_GET();
2851 #else
2852 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = &sock->conn->op_completed;
2853 #endif
2854 cberr = tcpip_callback(lwip_getsockopt_callback, &LWIP_SETGETSOCKOPT_DATA_VAR_REF(data));
2855 if (cberr != ERR_OK) {
2856 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
2857 set_errno(err_to_errno(cberr));
2858 done_socket(sock);
2859 return -1;
2860 }
2861 sys_arch_sem_wait((sys_sem_t *)(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem), 0);
2862
2863 /* write back optlen and optval */
2864 *optlen = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen;
2865 #if LWIP_MPU_COMPATIBLE
2866 MEMCPY(optval, LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval,
2867 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen);
2868 #endif /* LWIP_MPU_COMPATIBLE */
2869
2870 /* maybe lwip_getsockopt_impl has changed err */
2871 err = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err;
2872 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
2873 #endif /* LWIP_TCPIP_CORE_LOCKING */
2874
2875 set_errno(err);
2876 done_socket(sock);
2877 return err ? -1 : 0;
2878 }
2879
2880 #if !LWIP_TCPIP_CORE_LOCKING
2881 /** lwip_getsockopt_callback: only used without CORE_LOCKING
2882 * to get into the tcpip_thread
2883 */
2884 static void
lwip_getsockopt_callback(void * arg)2885 lwip_getsockopt_callback(void *arg)
2886 {
2887 struct lwip_setgetsockopt_data *data;
2888 LWIP_ASSERT("arg != NULL", arg != NULL);
2889 data = (struct lwip_setgetsockopt_data *)arg;
2890
2891 data->err = lwip_getsockopt_impl(data->s, data->level, data->optname,
2892 #if LWIP_MPU_COMPATIBLE
2893 data->optval,
2894 #else /* LWIP_MPU_COMPATIBLE */
2895 data->optval.p,
2896 #endif /* LWIP_MPU_COMPATIBLE */
2897 &data->optlen);
2898
2899 sys_sem_signal((sys_sem_t *)(data->completed_sem));
2900 }
2901 #endif /* LWIP_TCPIP_CORE_LOCKING */
2902
2903 static int
lwip_sockopt_to_ipopt(int optname)2904 lwip_sockopt_to_ipopt(int optname)
2905 {
2906 /* Map SO_* values to our internal SOF_* values
2907 * We should not rely on #defines in socket.h
2908 * being in sync with ip.h.
2909 */
2910 switch (optname) {
2911 case SO_BROADCAST:
2912 return SOF_BROADCAST;
2913 case SO_KEEPALIVE:
2914 return SOF_KEEPALIVE;
2915 case SO_REUSEADDR:
2916 return SOF_REUSEADDR;
2917 default:
2918 LWIP_ASSERT("Unknown socket option", 0);
2919 return 0;
2920 }
2921 }
2922
2923 /** lwip_getsockopt_impl: the actual implementation of getsockopt:
2924 * same argument as lwip_getsockopt, either called directly or through callback
2925 */
2926 static int
lwip_getsockopt_impl(int s,int level,int optname,void * optval,socklen_t * optlen)2927 lwip_getsockopt_impl(int s, int level, int optname, void *optval, socklen_t *optlen)
2928 {
2929 int err = 0;
2930 struct lwip_sock *sock = tryget_socket(s);
2931 if (!sock) {
2932 return EBADF;
2933 }
2934
2935 #ifdef LWIP_HOOK_SOCKETS_GETSOCKOPT
2936 if (LWIP_HOOK_SOCKETS_GETSOCKOPT(s, sock, level, optname, optval, optlen, &err)) {
2937 return err;
2938 }
2939 #endif
2940
2941 switch (level) {
2942
2943 /* Level: SOL_SOCKET */
2944 case SOL_SOCKET:
2945 switch (optname) {
2946
2947 #if LWIP_TCP
2948 case SO_ACCEPTCONN:
2949 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
2950 if (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_TCP) {
2951 done_socket(sock);
2952 return ENOPROTOOPT;
2953 }
2954 if ((sock->conn->pcb.tcp != NULL) && (sock->conn->pcb.tcp->state == LISTEN)) {
2955 *(int *)optval = 1;
2956 } else {
2957 *(int *)optval = 0;
2958 }
2959 break;
2960 #endif /* LWIP_TCP */
2961
2962 /* The option flags */
2963 case SO_BROADCAST:
2964 case SO_KEEPALIVE:
2965 #if SO_REUSE
2966 case SO_REUSEADDR:
2967 #endif /* SO_REUSE */
2968 if ((optname == SO_BROADCAST) &&
2969 (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_UDP)) {
2970 done_socket(sock);
2971 return ENOPROTOOPT;
2972 }
2973
2974 optname = lwip_sockopt_to_ipopt(optname);
2975
2976 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
2977 *(int *)optval = ip_get_option(sock->conn->pcb.ip, optname);
2978 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, optname=0x%x, ..) = %s\n",
2979 s, optname, (*(int *)optval ? "on" : "off")));
2980 break;
2981
2982 case SO_TYPE:
2983 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int);
2984 switch (NETCONNTYPE_GROUP(netconn_type(sock->conn))) {
2985 case NETCONN_RAW:
2986 *(int *)optval = SOCK_RAW;
2987 break;
2988 case NETCONN_TCP:
2989 *(int *)optval = SOCK_STREAM;
2990 break;
2991 case NETCONN_UDP:
2992 *(int *)optval = SOCK_DGRAM;
2993 break;
2994 default: /* unrecognized socket type */
2995 *(int *)optval = netconn_type(sock->conn);
2996 LWIP_DEBUGF(SOCKETS_DEBUG,
2997 ("lwip_getsockopt(%d, SOL_SOCKET, SO_TYPE): unrecognized socket type %d\n",
2998 s, *(int *)optval));
2999 } /* switch (netconn_type(sock->conn)) */
3000 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, SO_TYPE) = %d\n",
3001 s, *(int *)optval));
3002 break;
3003
3004 case SO_ERROR:
3005 LWIP_SOCKOPT_CHECK_OPTLEN(sock, *optlen, int);
3006 *(int *)optval = err_to_errno(netconn_err(sock->conn));
3007 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, SO_ERROR) = %d\n",
3008 s, *(int *)optval));
3009 break;
3010
3011 #if LWIP_SO_SNDTIMEO
3012 case SO_SNDTIMEO:
3013 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
3014 LWIP_SO_SNDRCVTIMEO_SET(optval, netconn_get_sendtimeout(sock->conn));
3015 break;
3016 #endif /* LWIP_SO_SNDTIMEO */
3017 #if LWIP_SO_RCVTIMEO
3018 case SO_RCVTIMEO:
3019 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
3020 LWIP_SO_SNDRCVTIMEO_SET(optval, netconn_get_recvtimeout(sock->conn));
3021 break;
3022 #endif /* LWIP_SO_RCVTIMEO */
3023 #if LWIP_SO_RCVBUF
3024 case SO_RCVBUF:
3025 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int);
3026 *(int *)optval = netconn_get_recvbufsize(sock->conn);
3027 break;
3028 #endif /* LWIP_SO_RCVBUF */
3029 #if LWIP_SO_LINGER
3030 case SO_LINGER: {
3031 s16_t conn_linger;
3032 struct linger *linger = (struct linger *)optval;
3033 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, struct linger);
3034 conn_linger = sock->conn->linger;
3035 if (conn_linger >= 0) {
3036 linger->l_onoff = 1;
3037 linger->l_linger = (int)conn_linger;
3038 } else {
3039 linger->l_onoff = 0;
3040 linger->l_linger = 0;
3041 }
3042 }
3043 break;
3044 #endif /* LWIP_SO_LINGER */
3045 #if LWIP_UDP
3046 case SO_NO_CHECK:
3047 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_UDP);
3048 #if LWIP_UDPLITE
3049 if (udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_UDPLITE)) {
3050 /* this flag is only available for UDP, not for UDP lite */
3051 done_socket(sock);
3052 return EAFNOSUPPORT;
3053 }
3054 #endif /* LWIP_UDPLITE */
3055 *(int *)optval = udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM) ? 1 : 0;
3056 break;
3057 #endif /* LWIP_UDP*/
3058 default:
3059 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, UNIMPL: optname=0x%x, ..)\n",
3060 s, optname));
3061 err = ENOPROTOOPT;
3062 break;
3063 } /* switch (optname) */
3064 break;
3065
3066 /* Level: IPPROTO_IP */
3067 case IPPROTO_IP:
3068 switch (optname) {
3069 case IP_TTL:
3070 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
3071 *(int *)optval = sock->conn->pcb.ip->ttl;
3072 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_TTL) = %d\n",
3073 s, *(int *)optval));
3074 break;
3075 case IP_TOS:
3076 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
3077 *(int *)optval = sock->conn->pcb.ip->tos;
3078 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_TOS) = %d\n",
3079 s, *(int *)optval));
3080 break;
3081 #if LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP
3082 case IP_MULTICAST_TTL:
3083 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t);
3084 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_UDP) {
3085 done_socket(sock);
3086 return ENOPROTOOPT;
3087 }
3088 *(u8_t *)optval = udp_get_multicast_ttl(sock->conn->pcb.udp);
3089 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_TTL) = %d\n",
3090 s, *(int *)optval));
3091 break;
3092 case IP_MULTICAST_IF:
3093 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, struct in_addr);
3094 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_UDP) {
3095 done_socket(sock);
3096 return ENOPROTOOPT;
3097 }
3098 inet_addr_from_ip4addr((struct in_addr *)optval, udp_get_multicast_netif_addr(sock->conn->pcb.udp));
3099 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_IF) = 0x%"X32_F"\n",
3100 s, *(u32_t *)optval));
3101 break;
3102 case IP_MULTICAST_LOOP:
3103 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t);
3104 if ((sock->conn->pcb.udp->flags & UDP_FLAGS_MULTICAST_LOOP) != 0) {
3105 *(u8_t *)optval = 1;
3106 } else {
3107 *(u8_t *)optval = 0;
3108 }
3109 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_LOOP) = %d\n",
3110 s, *(int *)optval));
3111 break;
3112 #endif /* LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP */
3113 default:
3114 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, UNIMPL: optname=0x%x, ..)\n",
3115 s, optname));
3116 err = ENOPROTOOPT;
3117 break;
3118 } /* switch (optname) */
3119 break;
3120
3121 #if LWIP_TCP
3122 /* Level: IPPROTO_TCP */
3123 case IPPROTO_TCP:
3124 /* Special case: all IPPROTO_TCP option take an int */
3125 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_TCP);
3126 if (sock->conn->pcb.tcp->state == LISTEN) {
3127 done_socket(sock);
3128 return EINVAL;
3129 }
3130 switch (optname) {
3131 case TCP_NODELAY:
3132 *(int *)optval = tcp_nagle_disabled(sock->conn->pcb.tcp);
3133 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_NODELAY) = %s\n",
3134 s, (*(int *)optval) ? "on" : "off") );
3135 break;
3136 case TCP_KEEPALIVE:
3137 *(int *)optval = (int)sock->conn->pcb.tcp->keep_idle;
3138 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPALIVE) = %d\n",
3139 s, *(int *)optval));
3140 break;
3141
3142 #if LWIP_TCP_KEEPALIVE
3143 case TCP_KEEPIDLE:
3144 *(int *)optval = (int)(sock->conn->pcb.tcp->keep_idle / 1000);
3145 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPIDLE) = %d\n",
3146 s, *(int *)optval));
3147 break;
3148 case TCP_KEEPINTVL:
3149 *(int *)optval = (int)(sock->conn->pcb.tcp->keep_intvl / 1000);
3150 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPINTVL) = %d\n",
3151 s, *(int *)optval));
3152 break;
3153 case TCP_KEEPCNT:
3154 *(int *)optval = (int)sock->conn->pcb.tcp->keep_cnt;
3155 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPCNT) = %d\n",
3156 s, *(int *)optval));
3157 break;
3158 #endif /* LWIP_TCP_KEEPALIVE */
3159 default:
3160 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, UNIMPL: optname=0x%x, ..)\n",
3161 s, optname));
3162 err = ENOPROTOOPT;
3163 break;
3164 } /* switch (optname) */
3165 break;
3166 #endif /* LWIP_TCP */
3167
3168 #if LWIP_IPV6
3169 /* Level: IPPROTO_IPV6 */
3170 case IPPROTO_IPV6:
3171 switch (optname) {
3172 case IPV6_V6ONLY:
3173 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int);
3174 *(int *)optval = (netconn_get_ipv6only(sock->conn) ? 1 : 0);
3175 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, IPV6_V6ONLY) = %d\n",
3176 s, *(int *)optval));
3177 break;
3178 default:
3179 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, UNIMPL: optname=0x%x, ..)\n",
3180 s, optname));
3181 err = ENOPROTOOPT;
3182 break;
3183 } /* switch (optname) */
3184 break;
3185 #endif /* LWIP_IPV6 */
3186
3187 #if LWIP_UDP && LWIP_UDPLITE
3188 /* Level: IPPROTO_UDPLITE */
3189 case IPPROTO_UDPLITE:
3190 /* Special case: all IPPROTO_UDPLITE option take an int */
3191 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
3192 /* If this is no UDP lite socket, ignore any options. */
3193 if (!NETCONNTYPE_ISUDPLITE(netconn_type(sock->conn))) {
3194 done_socket(sock);
3195 return ENOPROTOOPT;
3196 }
3197 switch (optname) {
3198 case UDPLITE_SEND_CSCOV:
3199 *(int *)optval = sock->conn->pcb.udp->chksum_len_tx;
3200 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UDPLITE_SEND_CSCOV) = %d\n",
3201 s, (*(int *)optval)) );
3202 break;
3203 case UDPLITE_RECV_CSCOV:
3204 *(int *)optval = sock->conn->pcb.udp->chksum_len_rx;
3205 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UDPLITE_RECV_CSCOV) = %d\n",
3206 s, (*(int *)optval)) );
3207 break;
3208 default:
3209 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UNIMPL: optname=0x%x, ..)\n",
3210 s, optname));
3211 err = ENOPROTOOPT;
3212 break;
3213 } /* switch (optname) */
3214 break;
3215 #endif /* LWIP_UDP */
3216 /* Level: IPPROTO_RAW */
3217 case IPPROTO_RAW:
3218 switch (optname) {
3219 #if LWIP_IPV6 && LWIP_RAW
3220 case IPV6_CHECKSUM:
3221 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_RAW);
3222 if (sock->conn->pcb.raw->chksum_reqd == 0) {
3223 *(int *)optval = -1;
3224 } else {
3225 *(int *)optval = sock->conn->pcb.raw->chksum_offset;
3226 }
3227 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_RAW, IPV6_CHECKSUM) = %d\n",
3228 s, (*(int *)optval)) );
3229 break;
3230 #endif /* LWIP_IPV6 && LWIP_RAW */
3231 default:
3232 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_RAW, UNIMPL: optname=0x%x, ..)\n",
3233 s, optname));
3234 err = ENOPROTOOPT;
3235 break;
3236 } /* switch (optname) */
3237 break;
3238 default:
3239 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, level=0x%x, UNIMPL: optname=0x%x, ..)\n",
3240 s, level, optname));
3241 err = ENOPROTOOPT;
3242 break;
3243 } /* switch (level) */
3244
3245 done_socket(sock);
3246 return err;
3247 }
3248
3249 int
lwip_setsockopt(int s,int level,int optname,const void * optval,socklen_t optlen)3250 lwip_setsockopt(int s, int level, int optname, const void *optval, socklen_t optlen)
3251 {
3252 int err = 0;
3253 struct lwip_sock *sock = get_socket(s);
3254 #if !LWIP_TCPIP_CORE_LOCKING
3255 err_t cberr;
3256 LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(data);
3257 #endif /* !LWIP_TCPIP_CORE_LOCKING */
3258
3259 if (!sock) {
3260 return -1;
3261 }
3262
3263 if (NULL == optval) {
3264 set_errno(EFAULT);
3265 done_socket(sock);
3266 return -1;
3267 }
3268
3269 #if LWIP_TCPIP_CORE_LOCKING
3270 /* core-locking can just call the -impl function */
3271 LOCK_TCPIP_CORE();
3272 err = lwip_setsockopt_impl(s, level, optname, optval, optlen);
3273 UNLOCK_TCPIP_CORE();
3274
3275 #else /* LWIP_TCPIP_CORE_LOCKING */
3276
3277 #if LWIP_MPU_COMPATIBLE
3278 /* MPU_COMPATIBLE copies the optval data, so check for max size here */
3279 if (optlen > LWIP_SETGETSOCKOPT_MAXOPTLEN) {
3280 set_errno(ENOBUFS);
3281 done_socket(sock);
3282 return -1;
3283 }
3284 #endif /* LWIP_MPU_COMPATIBLE */
3285
3286 LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(data, sock);
3287 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).s = s;
3288 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).level = level;
3289 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optname = optname;
3290 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen = optlen;
3291 #if LWIP_MPU_COMPATIBLE
3292 MEMCPY(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval, optval, optlen);
3293 #else /* LWIP_MPU_COMPATIBLE */
3294 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval.pc = (const void *)optval;
3295 #endif /* LWIP_MPU_COMPATIBLE */
3296 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err = 0;
3297 #if LWIP_NETCONN_SEM_PER_THREAD
3298 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = LWIP_NETCONN_THREAD_SEM_GET();
3299 #else
3300 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = &sock->conn->op_completed;
3301 #endif
3302 cberr = tcpip_callback(lwip_setsockopt_callback, &LWIP_SETGETSOCKOPT_DATA_VAR_REF(data));
3303 if (cberr != ERR_OK) {
3304 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
3305 set_errno(err_to_errno(cberr));
3306 done_socket(sock);
3307 return -1;
3308 }
3309 sys_arch_sem_wait((sys_sem_t *)(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem), 0);
3310
3311 /* maybe lwip_setsockopt_impl has changed err */
3312 err = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err;
3313 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
3314 #endif /* LWIP_TCPIP_CORE_LOCKING */
3315
3316 set_errno(err);
3317 done_socket(sock);
3318 return err ? -1 : 0;
3319 }
3320
3321 #if !LWIP_TCPIP_CORE_LOCKING
3322 /** lwip_setsockopt_callback: only used without CORE_LOCKING
3323 * to get into the tcpip_thread
3324 */
3325 static void
lwip_setsockopt_callback(void * arg)3326 lwip_setsockopt_callback(void *arg)
3327 {
3328 struct lwip_setgetsockopt_data *data;
3329 LWIP_ASSERT("arg != NULL", arg != NULL);
3330 data = (struct lwip_setgetsockopt_data *)arg;
3331
3332 data->err = lwip_setsockopt_impl(data->s, data->level, data->optname,
3333 #if LWIP_MPU_COMPATIBLE
3334 data->optval,
3335 #else /* LWIP_MPU_COMPATIBLE */
3336 data->optval.pc,
3337 #endif /* LWIP_MPU_COMPATIBLE */
3338 data->optlen);
3339
3340 sys_sem_signal((sys_sem_t *)(data->completed_sem));
3341 }
3342 #endif /* LWIP_TCPIP_CORE_LOCKING */
3343
3344 /** lwip_setsockopt_impl: the actual implementation of setsockopt:
3345 * same argument as lwip_setsockopt, either called directly or through callback
3346 */
3347 static int
lwip_setsockopt_impl(int s,int level,int optname,const void * optval,socklen_t optlen)3348 lwip_setsockopt_impl(int s, int level, int optname, const void *optval, socklen_t optlen)
3349 {
3350 int err = 0;
3351 struct lwip_sock *sock = tryget_socket(s);
3352 if (!sock) {
3353 return EBADF;
3354 }
3355
3356 #ifdef LWIP_HOOK_SOCKETS_SETSOCKOPT
3357 if (LWIP_HOOK_SOCKETS_SETSOCKOPT(s, sock, level, optname, optval, optlen, &err)) {
3358 return err;
3359 }
3360 #endif
3361
3362 switch (level) {
3363
3364 /* Level: SOL_SOCKET */
3365 case SOL_SOCKET:
3366 switch (optname) {
3367
3368 /* SO_ACCEPTCONN is get-only */
3369
3370 /* The option flags */
3371 case SO_BROADCAST:
3372 case SO_KEEPALIVE:
3373 #if SO_REUSE
3374 case SO_REUSEADDR:
3375 #endif /* SO_REUSE */
3376 if ((optname == SO_BROADCAST) &&
3377 (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_UDP)) {
3378 done_socket(sock);
3379 return ENOPROTOOPT;
3380 }
3381
3382 optname = lwip_sockopt_to_ipopt(optname);
3383
3384 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3385 if (*(const int *)optval) {
3386 ip_set_option(sock->conn->pcb.ip, optname);
3387 } else {
3388 ip_reset_option(sock->conn->pcb.ip, optname);
3389 }
3390 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, SOL_SOCKET, optname=0x%x, ..) -> %s\n",
3391 s, optname, (*(const int *)optval ? "on" : "off")));
3392 break;
3393
3394 /* SO_TYPE is get-only */
3395 /* SO_ERROR is get-only */
3396
3397 #if LWIP_SO_SNDTIMEO
3398 case SO_SNDTIMEO: {
3399 long ms_long;
3400 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
3401 ms_long = LWIP_SO_SNDRCVTIMEO_GET_MS(optval);
3402 if (ms_long < 0) {
3403 done_socket(sock);
3404 return EINVAL;
3405 }
3406 netconn_set_sendtimeout(sock->conn, ms_long);
3407 break;
3408 }
3409 #endif /* LWIP_SO_SNDTIMEO */
3410 #if LWIP_SO_RCVTIMEO
3411 case SO_RCVTIMEO: {
3412 long ms_long;
3413 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
3414 ms_long = LWIP_SO_SNDRCVTIMEO_GET_MS(optval);
3415 if (ms_long < 0) {
3416 done_socket(sock);
3417 return EINVAL;
3418 }
3419 netconn_set_recvtimeout(sock->conn, (u32_t)ms_long);
3420 break;
3421 }
3422 #endif /* LWIP_SO_RCVTIMEO */
3423 #if LWIP_SO_RCVBUF
3424 case SO_RCVBUF:
3425 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, int);
3426 netconn_set_recvbufsize(sock->conn, *(const int *)optval);
3427 break;
3428 #endif /* LWIP_SO_RCVBUF */
3429 #if LWIP_SO_LINGER
3430 case SO_LINGER: {
3431 const struct linger *linger = (const struct linger *)optval;
3432 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, struct linger);
3433 if (linger->l_onoff) {
3434 int lingersec = linger->l_linger;
3435 if (lingersec < 0) {
3436 done_socket(sock);
3437 return EINVAL;
3438 }
3439 if (lingersec > 0xFFFF) {
3440 lingersec = 0xFFFF;
3441 }
3442 sock->conn->linger = (s16_t)lingersec;
3443 } else {
3444 sock->conn->linger = -1;
3445 }
3446 }
3447 break;
3448 #endif /* LWIP_SO_LINGER */
3449 #if LWIP_UDP
3450 case SO_NO_CHECK:
3451 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_UDP);
3452 #if LWIP_UDPLITE
3453 if (udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_UDPLITE)) {
3454 /* this flag is only available for UDP, not for UDP lite */
3455 done_socket(sock);
3456 return EAFNOSUPPORT;
3457 }
3458 #endif /* LWIP_UDPLITE */
3459 if (*(const int *)optval) {
3460 udp_set_flags(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM);
3461 } else {
3462 udp_clear_flags(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM);
3463 }
3464 break;
3465 #endif /* LWIP_UDP */
3466 case SO_BINDTODEVICE: {
3467 const struct ifreq *iface;
3468 struct netif *n = NULL;
3469
3470 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, struct ifreq);
3471
3472 iface = (const struct ifreq *)optval;
3473 if (iface->ifr_name[0] != 0) {
3474 n = netif_find(iface->ifr_name);
3475 if (n == NULL) {
3476 done_socket(sock);
3477 return ENODEV;
3478 }
3479 }
3480
3481 switch (NETCONNTYPE_GROUP(netconn_type(sock->conn))) {
3482 #if LWIP_TCP
3483 case NETCONN_TCP:
3484 tcp_bind_netif(sock->conn->pcb.tcp, n);
3485 break;
3486 #endif
3487 #if LWIP_UDP
3488 case NETCONN_UDP:
3489 udp_bind_netif(sock->conn->pcb.udp, n);
3490 break;
3491 #endif
3492 #if LWIP_RAW
3493 case NETCONN_RAW:
3494 raw_bind_netif(sock->conn->pcb.raw, n);
3495 break;
3496 #endif
3497 default:
3498 LWIP_ASSERT("Unhandled netconn type in SO_BINDTODEVICE", 0);
3499 break;
3500 }
3501 }
3502 break;
3503 default:
3504 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, SOL_SOCKET, UNIMPL: optname=0x%x, ..)\n",
3505 s, optname));
3506 err = ENOPROTOOPT;
3507 break;
3508 } /* switch (optname) */
3509 break;
3510
3511 /* Level: IPPROTO_IP */
3512 case IPPROTO_IP:
3513 switch (optname) {
3514 case IP_TTL:
3515 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3516 sock->conn->pcb.ip->ttl = (u8_t)(*(const int *)optval);
3517 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, IP_TTL, ..) -> %d\n",
3518 s, sock->conn->pcb.ip->ttl));
3519 break;
3520 case IP_TOS:
3521 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3522 sock->conn->pcb.ip->tos = (u8_t)(*(const int *)optval);
3523 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, IP_TOS, ..)-> %d\n",
3524 s, sock->conn->pcb.ip->tos));
3525 break;
3526 #if LWIP_NETBUF_RECVINFO
3527 case IP_PKTINFO:
3528 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_UDP);
3529 if (*(const int *)optval) {
3530 sock->conn->flags |= NETCONN_FLAG_PKTINFO;
3531 } else {
3532 sock->conn->flags &= ~NETCONN_FLAG_PKTINFO;
3533 }
3534 break;
3535 #endif /* LWIP_NETBUF_RECVINFO */
3536 #if LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP
3537 case IP_MULTICAST_TTL:
3538 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP);
3539 udp_set_multicast_ttl(sock->conn->pcb.udp, (u8_t)(*(const u8_t *)optval));
3540 break;
3541 case IP_MULTICAST_IF: {
3542 ip4_addr_t if_addr;
3543 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct in_addr, NETCONN_UDP);
3544 inet_addr_to_ip4addr(&if_addr, (const struct in_addr *)optval);
3545 udp_set_multicast_netif_addr(sock->conn->pcb.udp, &if_addr);
3546 }
3547 break;
3548 case IP_MULTICAST_LOOP:
3549 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP);
3550 if (*(const u8_t *)optval) {
3551 udp_set_flags(sock->conn->pcb.udp, UDP_FLAGS_MULTICAST_LOOP);
3552 } else {
3553 udp_clear_flags(sock->conn->pcb.udp, UDP_FLAGS_MULTICAST_LOOP);
3554 }
3555 break;
3556 #endif /* LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP */
3557 #if LWIP_IGMP
3558 case IP_ADD_MEMBERSHIP:
3559 case IP_DROP_MEMBERSHIP: {
3560 /* If this is a TCP or a RAW socket, ignore these options. */
3561 err_t igmp_err;
3562 const struct ip_mreq *imr = (const struct ip_mreq *)optval;
3563 ip4_addr_t if_addr;
3564 ip4_addr_t multi_addr;
3565 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct ip_mreq, NETCONN_UDP);
3566 inet_addr_to_ip4addr(&if_addr, &imr->imr_interface);
3567 inet_addr_to_ip4addr(&multi_addr, &imr->imr_multiaddr);
3568 if (optname == IP_ADD_MEMBERSHIP) {
3569 if (!lwip_socket_register_membership(s, &if_addr, &multi_addr)) {
3570 /* cannot track membership (out of memory) */
3571 err = ENOMEM;
3572 igmp_err = ERR_OK;
3573 } else {
3574 igmp_err = igmp_joingroup(&if_addr, &multi_addr);
3575 }
3576 } else {
3577 igmp_err = igmp_leavegroup(&if_addr, &multi_addr);
3578 lwip_socket_unregister_membership(s, &if_addr, &multi_addr);
3579 }
3580 if (igmp_err != ERR_OK) {
3581 err = EADDRNOTAVAIL;
3582 }
3583 }
3584 break;
3585 #endif /* LWIP_IGMP */
3586 default:
3587 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, UNIMPL: optname=0x%x, ..)\n",
3588 s, optname));
3589 err = ENOPROTOOPT;
3590 break;
3591 } /* switch (optname) */
3592 break;
3593
3594 #if LWIP_TCP
3595 /* Level: IPPROTO_TCP */
3596 case IPPROTO_TCP:
3597 /* Special case: all IPPROTO_TCP option take an int */
3598 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_TCP);
3599 if (sock->conn->pcb.tcp->state == LISTEN) {
3600 done_socket(sock);
3601 return EINVAL;
3602 }
3603 switch (optname) {
3604 case TCP_NODELAY:
3605 if (*(const int *)optval) {
3606 tcp_nagle_disable(sock->conn->pcb.tcp);
3607 } else {
3608 tcp_nagle_enable(sock->conn->pcb.tcp);
3609 }
3610 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_NODELAY) -> %s\n",
3611 s, (*(const int *)optval) ? "on" : "off") );
3612 break;
3613 case TCP_KEEPALIVE:
3614 sock->conn->pcb.tcp->keep_idle = (u32_t)(*(const int *)optval);
3615 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPALIVE) -> %"U32_F"\n",
3616 s, sock->conn->pcb.tcp->keep_idle));
3617 break;
3618
3619 #if LWIP_TCP_KEEPALIVE
3620 case TCP_KEEPIDLE:
3621 sock->conn->pcb.tcp->keep_idle = 1000 * (u32_t)(*(const int *)optval);
3622 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPIDLE) -> %"U32_F"\n",
3623 s, sock->conn->pcb.tcp->keep_idle));
3624 break;
3625 case TCP_KEEPINTVL:
3626 sock->conn->pcb.tcp->keep_intvl = 1000 * (u32_t)(*(const int *)optval);
3627 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPINTVL) -> %"U32_F"\n",
3628 s, sock->conn->pcb.tcp->keep_intvl));
3629 break;
3630 case TCP_KEEPCNT:
3631 sock->conn->pcb.tcp->keep_cnt = (u32_t)(*(const int *)optval);
3632 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPCNT) -> %"U32_F"\n",
3633 s, sock->conn->pcb.tcp->keep_cnt));
3634 break;
3635 #endif /* LWIP_TCP_KEEPALIVE */
3636 default:
3637 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, UNIMPL: optname=0x%x, ..)\n",
3638 s, optname));
3639 err = ENOPROTOOPT;
3640 break;
3641 } /* switch (optname) */
3642 break;
3643 #endif /* LWIP_TCP*/
3644
3645 #if LWIP_IPV6
3646 /* Level: IPPROTO_IPV6 */
3647 case IPPROTO_IPV6:
3648 switch (optname) {
3649 case IPV6_V6ONLY:
3650 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3651 if (*(const int *)optval) {
3652 netconn_set_ipv6only(sock->conn, 1);
3653 } else {
3654 netconn_set_ipv6only(sock->conn, 0);
3655 }
3656 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IPV6, IPV6_V6ONLY, ..) -> %d\n",
3657 s, (netconn_get_ipv6only(sock->conn) ? 1 : 0)));
3658 break;
3659 #if LWIP_IPV6_MLD
3660 case IPV6_JOIN_GROUP:
3661 case IPV6_LEAVE_GROUP: {
3662 /* If this is a TCP or a RAW socket, ignore these options. */
3663 err_t mld6_err;
3664 struct netif *netif;
3665 ip6_addr_t multi_addr;
3666 const struct ipv6_mreq *imr = (const struct ipv6_mreq *)optval;
3667 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct ipv6_mreq, NETCONN_UDP);
3668 inet6_addr_to_ip6addr(&multi_addr, &imr->ipv6mr_multiaddr);
3669 LWIP_ASSERT("Invalid netif index", imr->ipv6mr_interface <= 0xFFu);
3670 netif = netif_get_by_index((u8_t)imr->ipv6mr_interface);
3671 if (netif == NULL) {
3672 err = EADDRNOTAVAIL;
3673 break;
3674 }
3675
3676 if (optname == IPV6_JOIN_GROUP) {
3677 if (!lwip_socket_register_mld6_membership(s, imr->ipv6mr_interface, &multi_addr)) {
3678 /* cannot track membership (out of memory) */
3679 err = ENOMEM;
3680 mld6_err = ERR_OK;
3681 } else {
3682 mld6_err = mld6_joingroup_netif(netif, &multi_addr);
3683 }
3684 } else {
3685 mld6_err = mld6_leavegroup_netif(netif, &multi_addr);
3686 lwip_socket_unregister_mld6_membership(s, imr->ipv6mr_interface, &multi_addr);
3687 }
3688 if (mld6_err != ERR_OK) {
3689 err = EADDRNOTAVAIL;
3690 }
3691 }
3692 break;
3693 #endif /* LWIP_IPV6_MLD */
3694 default:
3695 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IPV6, UNIMPL: optname=0x%x, ..)\n",
3696 s, optname));
3697 err = ENOPROTOOPT;
3698 break;
3699 } /* switch (optname) */
3700 break;
3701 #endif /* LWIP_IPV6 */
3702
3703 #if LWIP_UDP && LWIP_UDPLITE
3704 /* Level: IPPROTO_UDPLITE */
3705 case IPPROTO_UDPLITE:
3706 /* Special case: all IPPROTO_UDPLITE option take an int */
3707 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3708 /* If this is no UDP lite socket, ignore any options. */
3709 if (!NETCONNTYPE_ISUDPLITE(netconn_type(sock->conn))) {
3710 done_socket(sock);
3711 return ENOPROTOOPT;
3712 }
3713 switch (optname) {
3714 case UDPLITE_SEND_CSCOV:
3715 if ((*(const int *)optval != 0) && ((*(const int *)optval < 8) || (*(const int *)optval > 0xffff))) {
3716 /* don't allow illegal values! */
3717 sock->conn->pcb.udp->chksum_len_tx = 8;
3718 } else {
3719 sock->conn->pcb.udp->chksum_len_tx = (u16_t) * (const int *)optval;
3720 }
3721 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UDPLITE_SEND_CSCOV) -> %d\n",
3722 s, (*(const int *)optval)) );
3723 break;
3724 case UDPLITE_RECV_CSCOV:
3725 if ((*(const int *)optval != 0) && ((*(const int *)optval < 8) || (*(const int *)optval > 0xffff))) {
3726 /* don't allow illegal values! */
3727 sock->conn->pcb.udp->chksum_len_rx = 8;
3728 } else {
3729 sock->conn->pcb.udp->chksum_len_rx = (u16_t) * (const int *)optval;
3730 }
3731 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UDPLITE_RECV_CSCOV) -> %d\n",
3732 s, (*(const int *)optval)) );
3733 break;
3734 default:
3735 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UNIMPL: optname=0x%x, ..)\n",
3736 s, optname));
3737 err = ENOPROTOOPT;
3738 break;
3739 } /* switch (optname) */
3740 break;
3741 #endif /* LWIP_UDP */
3742 /* Level: IPPROTO_RAW */
3743 case IPPROTO_RAW:
3744 switch (optname) {
3745 #if LWIP_IPV6 && LWIP_RAW
3746 case IPV6_CHECKSUM:
3747 /* It should not be possible to disable the checksum generation with ICMPv6
3748 * as per RFC 3542 chapter 3.1 */
3749 if (sock->conn->pcb.raw->protocol == IPPROTO_ICMPV6) {
3750 done_socket(sock);
3751 return EINVAL;
3752 }
3753
3754 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_RAW);
3755 if (*(const int *)optval < 0) {
3756 sock->conn->pcb.raw->chksum_reqd = 0;
3757 } else if (*(const int *)optval & 1) {
3758 /* Per RFC3542, odd offsets are not allowed */
3759 done_socket(sock);
3760 return EINVAL;
3761 } else {
3762 sock->conn->pcb.raw->chksum_reqd = 1;
3763 sock->conn->pcb.raw->chksum_offset = (u16_t) * (const int *)optval;
3764 }
3765 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_RAW, IPV6_CHECKSUM, ..) -> %d\n",
3766 s, sock->conn->pcb.raw->chksum_reqd));
3767 break;
3768 #endif /* LWIP_IPV6 && LWIP_RAW */
3769 default:
3770 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_RAW, UNIMPL: optname=0x%x, ..)\n",
3771 s, optname));
3772 err = ENOPROTOOPT;
3773 break;
3774 } /* switch (optname) */
3775 break;
3776 default:
3777 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, level=0x%x, UNIMPL: optname=0x%x, ..)\n",
3778 s, level, optname));
3779 err = ENOPROTOOPT;
3780 break;
3781 } /* switch (level) */
3782
3783 done_socket(sock);
3784 return err;
3785 }
3786
3787 int
lwip_ioctl(int s,long cmd,void * argp)3788 lwip_ioctl(int s, long cmd, void *argp)
3789 {
3790 struct lwip_sock *sock = get_socket(s);
3791 u8_t val;
3792 #if LWIP_SO_RCVBUF
3793 int recv_avail;
3794 #endif /* LWIP_SO_RCVBUF */
3795
3796 if (!sock) {
3797 return -1;
3798 }
3799
3800 switch (cmd) {
3801 #if LWIP_SO_RCVBUF || LWIP_FIONREAD_LINUXMODE
3802 case FIONREAD:
3803 if (!argp) {
3804 set_errno(EINVAL);
3805 done_socket(sock);
3806 return -1;
3807 }
3808 #if LWIP_FIONREAD_LINUXMODE
3809 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
3810 struct netbuf *nb;
3811 if (sock->lastdata.netbuf) {
3812 nb = sock->lastdata.netbuf;
3813 *((int *)argp) = nb->p->tot_len;
3814 } else {
3815 struct netbuf *rxbuf;
3816 err_t err = netconn_recv_udp_raw_netbuf_flags(sock->conn, &rxbuf, NETCONN_DONTBLOCK);
3817 if (err != ERR_OK) {
3818 *((int *)argp) = 0;
3819 } else {
3820 sock->lastdata.netbuf = rxbuf;
3821 *((int *)argp) = rxbuf->p->tot_len;
3822 }
3823 }
3824 done_socket(sock);
3825 return 0;
3826 }
3827 #endif /* LWIP_FIONREAD_LINUXMODE */
3828
3829 #if LWIP_SO_RCVBUF
3830 /* we come here if either LWIP_FIONREAD_LINUXMODE==0 or this is a TCP socket */
3831 SYS_ARCH_GET(sock->conn->recv_avail, recv_avail);
3832 if (recv_avail < 0) {
3833 recv_avail = 0;
3834 }
3835
3836 /* Check if there is data left from the last recv operation. /maq 041215 */
3837 if (sock->lastdata.netbuf) {
3838 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
3839 recv_avail += sock->lastdata.pbuf->tot_len;
3840 } else {
3841 recv_avail += sock->lastdata.netbuf->p->tot_len;
3842 }
3843 }
3844 *((int *)argp) = recv_avail;
3845
3846 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, FIONREAD, %p) = %"U16_F"\n", s, argp, *((u16_t *)argp)));
3847 set_errno(0);
3848 done_socket(sock);
3849 return 0;
3850 #else /* LWIP_SO_RCVBUF */
3851 break;
3852 #endif /* LWIP_SO_RCVBUF */
3853 #endif /* LWIP_SO_RCVBUF || LWIP_FIONREAD_LINUXMODE */
3854
3855 case (long)FIONBIO:
3856 val = 0;
3857 if (argp && *(int *)argp) {
3858 val = 1;
3859 }
3860 netconn_set_nonblocking(sock->conn, val);
3861 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, FIONBIO, %d)\n", s, val));
3862 set_errno(0);
3863 done_socket(sock);
3864 return 0;
3865
3866 default:
3867 break;
3868 } /* switch (cmd) */
3869 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, UNIMPL: 0x%lx, %p)\n", s, cmd, argp));
3870 set_errno(ENOSYS); /* not yet implemented */
3871 done_socket(sock);
3872 return -1;
3873 }
3874
3875 /** A minimal implementation of fcntl.
3876 * Currently only the commands F_GETFL and F_SETFL are implemented.
3877 * The flag O_NONBLOCK and access modes are supported for F_GETFL, only
3878 * the flag O_NONBLOCK is implemented for F_SETFL.
3879 */
3880 int
lwip_fcntl(int s,int cmd,int val)3881 lwip_fcntl(int s, int cmd, int val)
3882 {
3883 struct lwip_sock *sock = get_socket(s);
3884 int ret = -1;
3885 int op_mode = 0;
3886
3887 if (!sock) {
3888 return -1;
3889 }
3890
3891 switch (cmd) {
3892 case F_GETFL:
3893 ret = netconn_is_nonblocking(sock->conn) ? O_NONBLOCK : 0;
3894 set_errno(0);
3895
3896 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
3897 #if LWIP_TCPIP_CORE_LOCKING
3898 LOCK_TCPIP_CORE();
3899 #else
3900 SYS_ARCH_DECL_PROTECT(lev);
3901 /* the proper thing to do here would be to get into the tcpip_thread,
3902 but locking should be OK as well since we only *read* some flags */
3903 SYS_ARCH_PROTECT(lev);
3904 #endif
3905 #if LWIP_TCP
3906 if (sock->conn->pcb.tcp) {
3907 if (!(sock->conn->pcb.tcp->flags & TF_RXCLOSED)) {
3908 op_mode |= O_RDONLY;
3909 }
3910 if (!(sock->conn->pcb.tcp->flags & TF_FIN)) {
3911 op_mode |= O_WRONLY;
3912 }
3913 }
3914 #endif
3915 #if LWIP_TCPIP_CORE_LOCKING
3916 UNLOCK_TCPIP_CORE();
3917 #else
3918 SYS_ARCH_UNPROTECT(lev);
3919 #endif
3920 } else {
3921 op_mode |= O_RDWR;
3922 }
3923
3924 /* ensure O_RDWR for (O_RDONLY|O_WRONLY) != O_RDWR cases */
3925 ret |= (op_mode == (O_RDONLY | O_WRONLY)) ? O_RDWR : op_mode;
3926
3927 break;
3928 case F_SETFL:
3929 /* Bits corresponding to the file access mode and the file creation flags [..] that are set in arg shall be ignored */
3930 val &= ~(O_RDONLY | O_WRONLY | O_RDWR);
3931 if ((val & ~O_NONBLOCK) == 0) {
3932 /* only O_NONBLOCK, all other bits are zero */
3933 netconn_set_nonblocking(sock->conn, val & O_NONBLOCK);
3934 ret = 0;
3935 set_errno(0);
3936 } else {
3937 set_errno(ENOSYS); /* not yet implemented */
3938 }
3939 break;
3940 default:
3941 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_fcntl(%d, UNIMPL: %d, %d)\n", s, cmd, val));
3942 set_errno(ENOSYS); /* not yet implemented */
3943 break;
3944 }
3945 done_socket(sock);
3946 return ret;
3947 }
3948
3949 #if LWIP_COMPAT_SOCKETS == 2 && LWIP_POSIX_SOCKETS_IO_NAMES
3950 int
fcntl(int s,int cmd,...)3951 fcntl(int s, int cmd, ...)
3952 {
3953 va_list ap;
3954 int val;
3955
3956 va_start(ap, cmd);
3957 val = va_arg(ap, int);
3958 va_end(ap);
3959 return lwip_fcntl(s, cmd, val);
3960 }
3961 #endif
3962
3963 const char *
lwip_inet_ntop(int af,const void * src,char * dst,socklen_t size)3964 lwip_inet_ntop(int af, const void *src, char *dst, socklen_t size)
3965 {
3966 const char *ret = NULL;
3967 int size_int = (int)size;
3968 if (size_int < 0) {
3969 set_errno(ENOSPC);
3970 return NULL;
3971 }
3972 switch (af) {
3973 #if LWIP_IPV4
3974 case AF_INET:
3975 ret = ip4addr_ntoa_r((const ip4_addr_t *)src, dst, size_int);
3976 if (ret == NULL) {
3977 set_errno(ENOSPC);
3978 }
3979 break;
3980 #endif
3981 #if LWIP_IPV6
3982 case AF_INET6:
3983 ret = ip6addr_ntoa_r((const ip6_addr_t *)src, dst, size_int);
3984 if (ret == NULL) {
3985 set_errno(ENOSPC);
3986 }
3987 break;
3988 #endif
3989 default:
3990 set_errno(EAFNOSUPPORT);
3991 break;
3992 }
3993 return ret;
3994 }
3995
3996 int
lwip_inet_pton(int af,const char * src,void * dst)3997 lwip_inet_pton(int af, const char *src, void *dst)
3998 {
3999 int err;
4000 switch (af) {
4001 #if LWIP_IPV4
4002 case AF_INET:
4003 err = ip4addr_aton(src, (ip4_addr_t *)dst);
4004 break;
4005 #endif
4006 #if LWIP_IPV6
4007 case AF_INET6: {
4008 /* convert into temporary variable since ip6_addr_t might be larger
4009 than in6_addr when scopes are enabled */
4010 ip6_addr_t addr;
4011 err = ip6addr_aton(src, &addr);
4012 if (err) {
4013 memcpy(dst, &addr.addr, sizeof(addr.addr));
4014 }
4015 break;
4016 }
4017 #endif
4018 default:
4019 err = -1;
4020 set_errno(EAFNOSUPPORT);
4021 break;
4022 }
4023 return err;
4024 }
4025
4026 #if LWIP_IGMP
4027 /** Register a new IGMP membership. On socket close, the membership is dropped automatically.
4028 *
4029 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
4030 *
4031 * @return 1 on success, 0 on failure
4032 */
4033 static int
lwip_socket_register_membership(int s,const ip4_addr_t * if_addr,const ip4_addr_t * multi_addr)4034 lwip_socket_register_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr)
4035 {
4036 struct lwip_sock *sock = get_socket(s);
4037 int i;
4038
4039 if (!sock) {
4040 return 0;
4041 }
4042
4043 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4044 if (socket_ipv4_multicast_memberships[i].sock == NULL) {
4045 socket_ipv4_multicast_memberships[i].sock = sock;
4046 ip4_addr_copy(socket_ipv4_multicast_memberships[i].if_addr, *if_addr);
4047 ip4_addr_copy(socket_ipv4_multicast_memberships[i].multi_addr, *multi_addr);
4048 done_socket(sock);
4049 return 1;
4050 }
4051 }
4052 done_socket(sock);
4053 return 0;
4054 }
4055
4056 /** Unregister a previously registered membership. This prevents dropping the membership
4057 * on socket close.
4058 *
4059 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
4060 */
4061 static void
lwip_socket_unregister_membership(int s,const ip4_addr_t * if_addr,const ip4_addr_t * multi_addr)4062 lwip_socket_unregister_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr)
4063 {
4064 struct lwip_sock *sock = get_socket(s);
4065 int i;
4066
4067 if (!sock) {
4068 return;
4069 }
4070
4071 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4072 if ((socket_ipv4_multicast_memberships[i].sock == sock) &&
4073 ip4_addr_eq(&socket_ipv4_multicast_memberships[i].if_addr, if_addr) &&
4074 ip4_addr_eq(&socket_ipv4_multicast_memberships[i].multi_addr, multi_addr)) {
4075 socket_ipv4_multicast_memberships[i].sock = NULL;
4076 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].if_addr);
4077 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].multi_addr);
4078 break;
4079 }
4080 }
4081 done_socket(sock);
4082 }
4083
4084 /** Drop all memberships of a socket that were not dropped explicitly via setsockopt.
4085 *
4086 * ATTENTION: this function is NOT called from tcpip_thread (or under CORE_LOCK).
4087 */
4088 static void
lwip_socket_drop_registered_memberships(int s)4089 lwip_socket_drop_registered_memberships(int s)
4090 {
4091 struct lwip_sock *sock = get_socket(s);
4092 int i;
4093
4094 if (!sock) {
4095 return;
4096 }
4097
4098 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4099 if (socket_ipv4_multicast_memberships[i].sock == sock) {
4100 ip_addr_t multi_addr, if_addr;
4101 ip_addr_copy_from_ip4(multi_addr, socket_ipv4_multicast_memberships[i].multi_addr);
4102 ip_addr_copy_from_ip4(if_addr, socket_ipv4_multicast_memberships[i].if_addr);
4103 socket_ipv4_multicast_memberships[i].sock = NULL;
4104 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].if_addr);
4105 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].multi_addr);
4106
4107 netconn_join_leave_group(sock->conn, &multi_addr, &if_addr, NETCONN_LEAVE);
4108 }
4109 }
4110 done_socket(sock);
4111 }
4112 #endif /* LWIP_IGMP */
4113
4114 #if LWIP_IPV6_MLD
4115 /** Register a new MLD6 membership. On socket close, the membership is dropped automatically.
4116 *
4117 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
4118 *
4119 * @return 1 on success, 0 on failure
4120 */
4121 static int
lwip_socket_register_mld6_membership(int s,unsigned int if_idx,const ip6_addr_t * multi_addr)4122 lwip_socket_register_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr)
4123 {
4124 struct lwip_sock *sock = get_socket(s);
4125 int i;
4126
4127 if (!sock) {
4128 return 0;
4129 }
4130
4131 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4132 if (socket_ipv6_multicast_memberships[i].sock == NULL) {
4133 socket_ipv6_multicast_memberships[i].sock = sock;
4134 socket_ipv6_multicast_memberships[i].if_idx = (u8_t)if_idx;
4135 ip6_addr_copy(socket_ipv6_multicast_memberships[i].multi_addr, *multi_addr);
4136 done_socket(sock);
4137 return 1;
4138 }
4139 }
4140 done_socket(sock);
4141 return 0;
4142 }
4143
4144 /** Unregister a previously registered MLD6 membership. This prevents dropping the membership
4145 * on socket close.
4146 *
4147 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
4148 */
4149 static void
lwip_socket_unregister_mld6_membership(int s,unsigned int if_idx,const ip6_addr_t * multi_addr)4150 lwip_socket_unregister_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr)
4151 {
4152 struct lwip_sock *sock = get_socket(s);
4153 int i;
4154
4155 if (!sock) {
4156 return;
4157 }
4158
4159 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4160 if ((socket_ipv6_multicast_memberships[i].sock == sock) &&
4161 (socket_ipv6_multicast_memberships[i].if_idx == if_idx) &&
4162 ip6_addr_eq(&socket_ipv6_multicast_memberships[i].multi_addr, multi_addr)) {
4163 socket_ipv6_multicast_memberships[i].sock = NULL;
4164 socket_ipv6_multicast_memberships[i].if_idx = NETIF_NO_INDEX;
4165 ip6_addr_set_zero(&socket_ipv6_multicast_memberships[i].multi_addr);
4166 break;
4167 }
4168 }
4169 done_socket(sock);
4170 }
4171
4172 /** Drop all MLD6 memberships of a socket that were not dropped explicitly via setsockopt.
4173 *
4174 * ATTENTION: this function is NOT called from tcpip_thread (or under CORE_LOCK).
4175 */
4176 static void
lwip_socket_drop_registered_mld6_memberships(int s)4177 lwip_socket_drop_registered_mld6_memberships(int s)
4178 {
4179 struct lwip_sock *sock = get_socket(s);
4180 int i;
4181
4182 if (!sock) {
4183 return;
4184 }
4185
4186 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4187 if (socket_ipv6_multicast_memberships[i].sock == sock) {
4188 ip_addr_t multi_addr;
4189 u8_t if_idx;
4190
4191 ip_addr_copy_from_ip6(multi_addr, socket_ipv6_multicast_memberships[i].multi_addr);
4192 if_idx = socket_ipv6_multicast_memberships[i].if_idx;
4193
4194 socket_ipv6_multicast_memberships[i].sock = NULL;
4195 socket_ipv6_multicast_memberships[i].if_idx = NETIF_NO_INDEX;
4196 ip6_addr_set_zero(&socket_ipv6_multicast_memberships[i].multi_addr);
4197
4198 netconn_join_leave_group_netif(sock->conn, &multi_addr, if_idx, NETCONN_LEAVE);
4199 }
4200 }
4201 done_socket(sock);
4202 }
4203 #endif /* LWIP_IPV6_MLD */
4204
4205 #endif /* LWIP_SOCKET */
4206