1 //
2 // Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
3 // Copyright 2018 Capitar IT Group BV <info@capitar.com>
4 //
5 // This software is supplied under the terms of the MIT License, a
6 // copy of which should be located in the distribution where this
7 // file was obtained (LICENSE.txt). A copy of the license may also be
8 // found online at https://opensource.org/licenses/MIT.
9 //
10
11 #include <string.h>
12
13 #include "core/nng_impl.h"
14 #include "nng/protocol/pubsub0/pub.h"
15
16 // Publish protocol. The PUB protocol simply sends messages out, as
17 // a broadcast. It has nothing more sophisticated because it does not
18 // perform sender-side filtering. Its best effort delivery, so anything
19 // that can't receive the message won't get one.
20
21 #ifndef NNI_PROTO_SUB_V0
22 #define NNI_PROTO_SUB_V0 NNI_PROTO(2, 1)
23 #endif
24
25 #ifndef NNI_PROTO_PUB_V0
26 #define NNI_PROTO_PUB_V0 NNI_PROTO(2, 0)
27 #endif
28
29 typedef struct pub0_pipe pub0_pipe;
30 typedef struct pub0_sock pub0_sock;
31
32 static void pub0_pipe_recv_cb(void *);
33 static void pub0_pipe_send_cb(void *);
34 static void pub0_sock_fini(void *);
35 static void pub0_pipe_fini(void *);
36
37 // pub0_sock is our per-socket protocol private structure.
38 struct pub0_sock {
39 nni_list pipes;
40 nni_mtx mtx;
41 bool closed;
42 size_t sendbuf;
43 nni_pollable *sendable;
44 };
45
46 // pub0_pipe is our per-pipe protocol private structure.
47 struct pub0_pipe {
48 nni_pipe * pipe;
49 pub0_sock * pub;
50 nni_lmq sendq;
51 bool closed;
52 bool busy;
53 nni_aio * aio_send;
54 nni_aio * aio_recv;
55 nni_list_node node;
56 };
57
58 static void
pub0_sock_fini(void * arg)59 pub0_sock_fini(void *arg)
60 {
61 pub0_sock *s = arg;
62
63 nni_pollable_free(s->sendable);
64 nni_mtx_fini(&s->mtx);
65 }
66
67 static int
pub0_sock_init(void * arg,nni_sock * nsock)68 pub0_sock_init(void *arg, nni_sock *nsock)
69 {
70 pub0_sock *sock = arg;
71 int rv;
72 NNI_ARG_UNUSED(nsock);
73
74 if ((rv = nni_pollable_alloc(&sock->sendable)) != 0) {
75 return (rv);
76 }
77 nni_mtx_init(&sock->mtx);
78 NNI_LIST_INIT(&sock->pipes, pub0_pipe, node);
79 sock->sendbuf = 16; // fairly arbitrary
80 return (0);
81 }
82
83 static void
pub0_sock_open(void * arg)84 pub0_sock_open(void *arg)
85 {
86 NNI_ARG_UNUSED(arg);
87 }
88
89 static void
pub0_sock_close(void * arg)90 pub0_sock_close(void *arg)
91 {
92 NNI_ARG_UNUSED(arg);
93 }
94
95 static void
pub0_pipe_stop(void * arg)96 pub0_pipe_stop(void *arg)
97 {
98 pub0_pipe *p = arg;
99
100 nni_aio_stop(p->aio_send);
101 nni_aio_stop(p->aio_recv);
102 }
103
104 static void
pub0_pipe_fini(void * arg)105 pub0_pipe_fini(void *arg)
106 {
107 pub0_pipe *p = arg;
108
109 nni_aio_free(p->aio_send);
110 nni_aio_free(p->aio_recv);
111 nni_lmq_fini(&p->sendq);
112 }
113
114 static int
pub0_pipe_init(void * arg,nni_pipe * pipe,void * s)115 pub0_pipe_init(void *arg, nni_pipe *pipe, void *s)
116 {
117 pub0_pipe *p = arg;
118 pub0_sock *sock = s;
119 int rv;
120 size_t len;
121
122 nni_mtx_lock(&sock->mtx);
123 len = sock->sendbuf;
124 nni_mtx_unlock(&sock->mtx);
125
126 // XXX: consider making this depth tunable
127 if (((rv = nni_lmq_init(&p->sendq, len)) != 0) ||
128 ((rv = nni_aio_alloc(&p->aio_send, pub0_pipe_send_cb, p)) != 0) ||
129 ((rv = nni_aio_alloc(&p->aio_recv, pub0_pipe_recv_cb, p)) != 0)) {
130
131 pub0_pipe_fini(p);
132 return (rv);
133 }
134
135 p->busy = false;
136 p->pipe = pipe;
137 p->pub = s;
138 return (0);
139 }
140
141 static int
pub0_pipe_start(void * arg)142 pub0_pipe_start(void *arg)
143 {
144 pub0_pipe *p = arg;
145 pub0_sock *sock = p->pub;
146
147 if (nni_pipe_peer(p->pipe) != NNI_PROTO_SUB_V0) {
148 return (NNG_EPROTO);
149 }
150 nni_mtx_lock(&sock->mtx);
151 nni_list_append(&sock->pipes, p);
152 nni_mtx_unlock(&sock->mtx);
153
154 // Start the receiver.
155 nni_pipe_recv(p->pipe, p->aio_recv);
156
157 return (0);
158 }
159
160 static void
pub0_pipe_close(void * arg)161 pub0_pipe_close(void *arg)
162 {
163 pub0_pipe *p = arg;
164 pub0_sock *sock = p->pub;
165
166 nni_aio_close(p->aio_send);
167 nni_aio_close(p->aio_recv);
168
169 nni_mtx_lock(&sock->mtx);
170 p->closed = true;
171 nni_lmq_flush(&p->sendq);
172
173 if (nni_list_active(&sock->pipes, p)) {
174 nni_list_remove(&sock->pipes, p);
175 }
176 nni_mtx_unlock(&sock->mtx);
177 }
178
179 static void
pub0_pipe_recv_cb(void * arg)180 pub0_pipe_recv_cb(void *arg)
181 {
182 pub0_pipe *p = arg;
183
184 // We should never receive a message -- the only valid reason for us to
185 // be here is on pipe close.
186 if (nni_aio_result(p->aio_recv) == 0) {
187 nni_msg_free(nni_aio_get_msg(p->aio_recv));
188 }
189 nni_pipe_close(p->pipe);
190 }
191
192 static void
pub0_pipe_send_cb(void * arg)193 pub0_pipe_send_cb(void *arg)
194 {
195 pub0_pipe *p = arg;
196 pub0_sock *sock = p->pub;
197 nni_msg * msg;
198
199 if (nni_aio_result(p->aio_send) != 0) {
200 nni_msg_free(nni_aio_get_msg(p->aio_send));
201 nni_aio_set_msg(p->aio_send, NULL);
202 nni_pipe_close(p->pipe);
203 return;
204 }
205
206 nni_mtx_lock(&sock->mtx);
207 if (p->closed) {
208 nni_mtx_unlock(&sock->mtx);
209 return;
210 }
211 if (nni_lmq_getq(&p->sendq, &msg) == 0) {
212 nni_aio_set_msg(p->aio_send, msg);
213 nni_pipe_send(p->pipe, p->aio_send);
214 } else {
215 p->busy = false;
216 }
217 nni_mtx_unlock(&sock->mtx);
218 }
219
220 static void
pub0_sock_recv(void * arg,nni_aio * aio)221 pub0_sock_recv(void *arg, nni_aio *aio)
222 {
223 NNI_ARG_UNUSED(arg);
224 if (nni_aio_begin(aio) == 0) {
225 nni_aio_finish_error(aio, NNG_ENOTSUP);
226 }
227 }
228
229 static void
pub0_sock_send(void * arg,nni_aio * aio)230 pub0_sock_send(void *arg, nni_aio *aio)
231 {
232 pub0_sock *sock = arg;
233 pub0_pipe *p;
234 nng_msg * msg;
235 size_t len;
236
237 msg = nni_aio_get_msg(aio);
238 len = nni_msg_len(msg);
239 nni_mtx_lock(&sock->mtx);
240 NNI_LIST_FOREACH (&sock->pipes, p) {
241
242 nni_msg_clone(msg);
243 if (p->busy) {
244 if (nni_lmq_full(&p->sendq)) {
245 // Make space for the new message.
246 nni_msg *old;
247 (void) nni_lmq_getq(&p->sendq, &old);
248 nni_msg_free(old);
249 }
250 nni_lmq_putq(&p->sendq, msg);
251 } else {
252 p->busy = true;
253 nni_aio_set_msg(p->aio_send, msg);
254 nni_pipe_send(p->pipe, p->aio_send);
255 }
256 }
257 nni_mtx_unlock(&sock->mtx);
258 nng_msg_free(msg);
259 nni_aio_finish(aio, 0, len);
260 }
261
262 static int
pub0_sock_get_sendfd(void * arg,void * buf,size_t * szp,nni_type t)263 pub0_sock_get_sendfd(void *arg, void *buf, size_t *szp, nni_type t)
264 {
265 pub0_sock *sock = arg;
266 int fd;
267 int rv;
268 nni_mtx_lock(&sock->mtx);
269 // PUB sockets are *always* writable.
270 nni_pollable_raise(sock->sendable);
271 rv = nni_pollable_getfd(sock->sendable, &fd);
272 nni_mtx_unlock(&sock->mtx);
273
274 if (rv == 0) {
275 rv = nni_copyout_int(fd, buf, szp, t);
276 }
277 return (rv);
278 }
279
280 static int
pub0_sock_set_sendbuf(void * arg,const void * buf,size_t sz,nni_type t)281 pub0_sock_set_sendbuf(void *arg, const void *buf, size_t sz, nni_type t)
282 {
283 pub0_sock *sock = arg;
284 pub0_pipe *p;
285 int val;
286 int rv;
287
288 if ((rv = nni_copyin_int(&val, buf, sz, 1, 8192, t)) != 0) {
289 return (rv);
290 }
291
292 nni_mtx_lock(&sock->mtx);
293 sock->sendbuf = (size_t) val;
294 NNI_LIST_FOREACH (&sock->pipes, p) {
295 // If we fail part way thru (should only be ENOMEM), we
296 // stop short. The others would likely fail for ENOMEM as
297 // well anyway. There is a weird effect here where the
298 // buffers may have been set for *some* of the pipes, but
299 // we have no way to correct partial failure.
300 if ((rv = nni_lmq_resize(&p->sendq, (size_t) val)) != 0) {
301 break;
302 }
303 }
304 nni_mtx_unlock(&sock->mtx);
305 return (rv);
306 }
307
308 static int
pub0_sock_get_sendbuf(void * arg,void * buf,size_t * szp,nni_type t)309 pub0_sock_get_sendbuf(void *arg, void *buf, size_t *szp, nni_type t)
310 {
311 pub0_sock *sock = arg;
312 int val;
313 nni_mtx_lock(&sock->mtx);
314 val = (int) sock->sendbuf;
315 nni_mtx_unlock(&sock->mtx);
316 return (nni_copyout_int(val, buf, szp, t));
317 }
318
319 static nni_proto_pipe_ops pub0_pipe_ops = {
320 .pipe_size = sizeof(pub0_pipe),
321 .pipe_init = pub0_pipe_init,
322 .pipe_fini = pub0_pipe_fini,
323 .pipe_start = pub0_pipe_start,
324 .pipe_close = pub0_pipe_close,
325 .pipe_stop = pub0_pipe_stop,
326 };
327
328 static nni_option pub0_sock_options[] = {
329 // terminate list
330 {
331 .o_name = NNG_OPT_SENDFD,
332 .o_get = pub0_sock_get_sendfd,
333 },
334 {
335 .o_name = NNG_OPT_SENDBUF,
336 .o_get = pub0_sock_get_sendbuf,
337 .o_set = pub0_sock_set_sendbuf,
338 },
339 {
340 .o_name = NULL,
341 },
342 };
343
344 static nni_proto_sock_ops pub0_sock_ops = {
345 .sock_size = sizeof(pub0_sock),
346 .sock_init = pub0_sock_init,
347 .sock_fini = pub0_sock_fini,
348 .sock_open = pub0_sock_open,
349 .sock_close = pub0_sock_close,
350 .sock_send = pub0_sock_send,
351 .sock_recv = pub0_sock_recv,
352 .sock_options = pub0_sock_options,
353 };
354
355 static nni_proto pub0_proto = {
356 .proto_version = NNI_PROTOCOL_VERSION,
357 .proto_self = { NNI_PROTO_PUB_V0, "pub" },
358 .proto_peer = { NNI_PROTO_SUB_V0, "sub" },
359 .proto_flags = NNI_PROTO_FLAG_SND,
360 .proto_sock_ops = &pub0_sock_ops,
361 .proto_pipe_ops = &pub0_pipe_ops,
362 };
363
364 static nni_proto pub0_proto_raw = {
365 .proto_version = NNI_PROTOCOL_VERSION,
366 .proto_self = { NNI_PROTO_PUB_V0, "pub" },
367 .proto_peer = { NNI_PROTO_SUB_V0, "sub" },
368 .proto_flags = NNI_PROTO_FLAG_SND | NNI_PROTO_FLAG_RAW,
369 .proto_sock_ops = &pub0_sock_ops,
370 .proto_pipe_ops = &pub0_pipe_ops,
371 };
372
373 int
nng_pub0_open(nng_socket * sidp)374 nng_pub0_open(nng_socket *sidp)
375 {
376 return (nni_proto_open(sidp, &pub0_proto));
377 }
378
379 int
nng_pub0_open_raw(nng_socket * sidp)380 nng_pub0_open_raw(nng_socket *sidp)
381 {
382 return (nni_proto_open(sidp, &pub0_proto_raw));
383 }
384