1 /* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */
2 /*
3 * Copyright 2014 Couchbase, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18 #include "ssl_iot_common.h"
19 #include "sllist.h"
20 #include "sllist-inl.h"
21
22 /* throw-away write buffer structure (for encoded data) */
23 typedef struct {
24 void *parent;
25 char buf[1];
26 } my_WBUF;
27
28 /* throw-away write buffer structure (for application data) */
29 typedef struct {
30 sllist_node slnode;
31 lcb_ioC_write2_callback cb;
32 void *uarg;
33 void *iovroot_;
34 lcb_IOV *iov;
35 lcb_size_t niov;
36 } my_WCTX;
37
38 typedef struct {
39 IOTSSL_COMMON_FIELDS
40 lcb_sockdata_t *sd; /**< Socket pointer */
41 lcbio_pTIMER as_read; /**< For callbacks when SSL_pending > 0 */
42 lcbio_pTIMER as_write; /**< For callbacks when SSL_writes succeeds */
43 lcb_IOV urd_iov; /**< User-defined buffer to read in applicataion data */
44 void *urd_arg; /**< User-defined argument for read callback */
45 my_WCTX *wctx_cached;
46 lcb_ioC_read2_callback urd_cb; /**< User defined read callback */
47 sllist_root writes; /**< List of pending user writes */
48
49 /**
50 * Whether a current read request is active. This read request refers to
51 * this module reading raw data from the actual underlying socket. The
52 * presence of a user-level (i.e. lcbio-invoked) read request is determined
53 * by the presence of a non-NULL urd_cb value
54 */
55 int rdactive;
56
57 int closed; /**< Pending delivery of close */
58 int entered;
59 } lcbio_CSSL;
60
61 #define CS_FROM_IOPS(iops) (lcbio_CSSL *)IOTSSL_FROM_IOPS(iops)
62 #define SCHEDULE_WANT_SAFE(cs) if (!(cs)->entered) { schedule_wants(cs); }
63
64 static void appdata_encode(lcbio_CSSL *);
65 static void appdata_free_flushed(lcbio_CSSL *);
66 static void appdata_read(lcbio_CSSL *);
67 static void schedule_wants(lcbio_CSSL *cs);
maybe_set_error(lcbio_CSSL * cs,int rv)68 static int maybe_set_error(lcbio_CSSL *cs, int rv)
69 {
70 return iotssl_maybe_error((lcbio_XSSL *)cs, rv);
71 }
72
73 /* This function goes through all the pending copies of data that was scheduled
74 * for write and where the current IOV position is at the end (or niov==0).
75 * For each of those routines this function will invoke its write callback
76 */
77 static void
appdata_free_flushed(lcbio_CSSL * cs)78 appdata_free_flushed(lcbio_CSSL *cs)
79 {
80 sllist_iterator iter;
81 SLLIST_ITERFOR(&cs->writes, &iter) {
82 my_WCTX *cur = SLLIST_ITEM(iter.cur, my_WCTX, slnode);
83 if (cur->niov && cs->error == 0) {
84 break;
85 }
86 /* invoke the callback */
87 cur->cb(cs->sd, cs->error?-1:0, cur->uarg);
88 sllist_iter_remove(&cs->writes, &iter);
89 free(cur->iovroot_);
90 if (cs->wctx_cached) {
91 free(cur);
92 } else {
93 cs->wctx_cached = cur;
94 }
95 }
96 }
97
98 /* This function will attempt to encode pending user data into SSL data. This
99 * will be output to the wbio. */
100 static void
appdata_encode(lcbio_CSSL * cs)101 appdata_encode(lcbio_CSSL *cs)
102 {
103 sllist_node *cur;
104
105 /* each element here represents a used-defined write buffer */
106 SLLIST_FOREACH(&cs->writes, cur) {
107 my_WCTX *ctx = SLLIST_ITEM(cur, my_WCTX, slnode);
108
109 for (; ctx->niov && cs->error == 0; ctx->niov--, ctx->iov++) {
110 int rv;
111
112 lcb_assert(ctx->iov->iov_len);
113 rv = SSL_write(cs->ssl, ctx->iov->iov_base, ctx->iov->iov_len);
114 if (rv > 0) {
115 continue;
116 } else if (maybe_set_error(cs, rv) == 0) {
117 /* SSL_ERROR_WANT_READ. Should schedule a read here.
118 * XXX: Note that this buffer will not be returned to the user
119 * until the _next_ time the appdata_free_flushed function is
120 * invoked; the call chain for appdata_free_flushed is like this:
121 *
122 * start_write2 => async_schedule(async_write) => appdata_free_flushed.
123 * OR
124 * start_write2 => write_callback => appdata_free_flushed
125 */
126 SCHEDULE_WANT_SAFE(cs)
127 return;
128 } else {
129 IOTSSL_ERRNO(cs) = EINVAL;
130 }
131 }
132 }
133 }
134
135 static void
async_write(void * arg)136 async_write(void *arg)
137 {
138 lcbio_CSSL *cs = arg;
139 appdata_encode(cs);
140 schedule_wants(cs);
141 appdata_free_flushed(cs);
142 }
143
144 /* Called when SSL data has been written to the socket */
145 static void
write_callback(lcb_sockdata_t * sd,int status,void * arg)146 write_callback(lcb_sockdata_t *sd, int status, void *arg)
147 {
148 my_WBUF *wb = arg;
149 lcbio_CSSL *cs = wb->parent;
150
151 if (status) {
152 IOTSSL_ERRNO(cs) = IOT_ERRNO(cs->orig);
153 cs->error = 1;
154 }
155
156 free(wb);
157
158 appdata_free_flushed(cs);
159 lcbio_table_unref(&cs->base_);
160 (void) sd;
161 }
162
163 /* Read application data from SSL's rbio buffer. Invokes the user callback
164 * for the current read operation if there is data */
165 static void
appdata_read(lcbio_CSSL * cs)166 appdata_read(lcbio_CSSL *cs)
167 {
168 /* either an error or an actual read event */
169 int nr;
170 lcb_ioC_read2_callback cb = cs->urd_cb;
171 if (!cb) {
172 return;
173 }
174 lcb_assert(!cs->rdactive);
175 nr = SSL_read(cs->ssl, cs->urd_iov.iov_base, cs->urd_iov.iov_len);
176 if (nr > 0) {
177 /* nothing */
178 } else if (cs->closed || nr == 0) {
179 nr = 0;
180 } else if (maybe_set_error(cs, nr) == 0) {
181 return;
182 }
183
184 cs->urd_cb = NULL;
185 cb(cs->sd, nr, cs->urd_arg);
186 }
187
188 /* Invoked when SSL data has been read from the socket */
189 static void
read_callback(lcb_sockdata_t * sd,lcb_ssize_t nr,void * arg)190 read_callback(lcb_sockdata_t *sd, lcb_ssize_t nr, void *arg)
191 {
192 #if LCB_CAN_OPTIMIZE_SSL_BIO
193 lcbio_CSSL *cs = arg;
194 #else
195 my_WBUF *rb = arg;
196 lcbio_CSSL *cs = rb->parent;
197 #endif
198
199 cs->rdactive = 0;
200 cs->entered++;
201
202 if (nr > 0) {
203 #if LCB_CAN_OPTIMIZE_SSL_BIO
204 BUF_MEM *mb;
205
206 BIO_clear_retry_flags(cs->rbio);
207 BIO_get_mem_ptr(cs->rbio, &mb);
208 mb->length += nr;
209 #else
210 BIO_write(cs->rbio, rb->buf, nr);
211 #endif
212
213 } else if (nr == 0) {
214 cs->closed = 1;
215 cs->error = 1;
216
217 } else {
218 cs->error = 1;
219 IOTSSL_ERRNO(cs) = IOT_ERRNO(cs->orig);
220 }
221 #if !LCB_CAN_OPTIMIZE_SSL_BIO
222 free(rb);
223 #endif
224
225 appdata_encode(cs);
226 appdata_read(cs);
227
228 cs->entered--;
229 schedule_wants(cs);
230 lcbio_table_unref(&cs->base_);
231 (void) sd;
232 }
233
234
235 /* This function schedules any I/O on the actual socket. It writes encoded
236 * data and requests to read decoded data */
237 static void
schedule_wants(lcbio_CSSL * cs)238 schedule_wants(lcbio_CSSL *cs)
239 {
240 size_t npend = BIO_ctrl_pending(cs->wbio);
241 char dummy;
242
243 int has_appdata = 0;
244
245 if (SSL_peek(cs->ssl, &dummy, 1) == 1) {
246 has_appdata = 1;
247 }
248
249 if (npend) {
250 /* Have pending data to write. The buffer is copied here because the
251 * BIO structure doesn't support "lockdown" semantics like netbuf/rdb
252 * do. We might transplant this with a different sort of BIO eventually..
253 */
254 my_WBUF *wb = malloc(sizeof(*wb) + npend);
255 lcb_IOV iov;
256 BIO_read(cs->wbio, wb->buf, npend);
257 iov.iov_base = wb->buf;
258 iov.iov_len = npend;
259 wb->parent = cs;
260
261 /* Increment the reference count. This is decremented when we get back
262 * the callback. The goal is that a pending internal SSL_write() should
263 * keep the object alive despite the user having called lcbio_table_unref()
264 * on us.
265 */
266 lcbio_table_ref(&cs->base_);
267 IOT_V1(cs->orig).write2(
268 IOT_ARG(cs->orig), cs->sd, &iov, 1, wb, write_callback);
269 }
270
271 /* Only schedule additional reads if we're not already in the process of a
272 * read */
273
274 if (cs->rdactive == 0) {
275 if (cs->error) {
276 /* This can happen if we got an SSL error in performing something
277 * within this callback.
278 *
279 * In this case, just signal "as-if" a read happened. appdata_read
280 * will do the right thing if there is no read callback, and will
281 * return an error if SSL_read() fails (which it should).
282 */
283 lcbio_async_signal(cs->as_read);
284
285 } else if (SSL_want_read(cs->ssl) || (cs->urd_cb && has_appdata == 0)) {
286 /* request more data from the socket */
287 lcb_IOV iov;
288 #if LCB_CAN_OPTIMIZE_SSL_BIO
289 BUF_MEM *mb;
290 #else
291 #define BUFSZ 4096
292 my_WBUF *rb = malloc(sizeof(*rb) + BUFSZ);
293 rb->parent = cs;
294 #endif
295
296 cs->rdactive = 1;
297 lcbio_table_ref(&cs->base_);
298 #if LCB_CAN_OPTIMIZE_SSL_BIO
299 BIO_get_mem_ptr(cs->rbio, &mb);
300 iotssl_bm_reserve(mb);
301 iov.iov_base = mb->data + mb->length;
302 iov.iov_len = mb->max - mb->length;
303 IOT_V1(cs->orig).read2(
304 IOT_ARG(cs->orig), cs->sd, &iov, 1, cs, read_callback);
305 #else
306 iov.iov_base = rb->buf;
307 iov.iov_len = BUFSZ;
308 IOT_V1(cs->orig).read2(
309 IOT_ARG(cs->orig), cs->sd, &iov, 1, rb, read_callback);
310 #endif
311 }
312
313 }
314 }
315
316 static int
Cssl_read2(lcb_io_opt_t iops,lcb_sockdata_t * sd,lcb_IOV * iov,lcb_size_t niov,void * uarg,lcb_ioC_read2_callback callback)317 Cssl_read2(lcb_io_opt_t iops, lcb_sockdata_t *sd, lcb_IOV *iov, lcb_size_t niov,
318 void *uarg, lcb_ioC_read2_callback callback)
319 {
320 lcbio_CSSL *cs = CS_FROM_IOPS(iops);
321 cs->urd_iov = *iov;
322 cs->urd_arg = uarg;
323 cs->urd_cb = callback;
324
325 IOTSSL_PENDING_PRECHECK(cs->ssl);
326 if (IOTSSL_IS_PENDING(cs->ssl)) {
327 /* have data to be read. Fast path here */
328 lcbio_async_signal(cs->as_read);
329 } else {
330 SCHEDULE_WANT_SAFE(cs);
331 }
332
333 (void) niov; (void) sd;
334 return 0;
335 }
336
337 static int
Cssl_write2(lcb_io_opt_t io,lcb_sockdata_t * sd,lcb_IOV * iov,lcb_size_t niov,void * uarg,lcb_ioC_write2_callback callback)338 Cssl_write2(lcb_io_opt_t io, lcb_sockdata_t *sd, lcb_IOV *iov, lcb_size_t niov,
339 void *uarg, lcb_ioC_write2_callback callback)
340 {
341 lcbio_CSSL *cs = CS_FROM_IOPS(io);
342 my_WCTX *wc;
343
344 /* We keep one of these cached inside the cs structure so we don't have
345 * to make a new malloc for each write */
346 if (cs->wctx_cached) {
347 wc = cs->wctx_cached;
348 cs->wctx_cached = NULL;
349 memset(wc, 0, sizeof *wc);
350 } else {
351 wc = calloc(1, sizeof(*wc));
352 }
353
354 /* assign the common parameters */
355 wc->uarg = uarg;
356 wc->cb = callback;
357
358 /* If the socket does not have a pending error and there are no other
359 * writes before this, then try to write the current buffer immediately. */
360 if (cs->error == 0 && SLLIST_IS_EMPTY(&cs->writes)) {
361 unsigned ii;
362 for (ii = 0; ii < niov; ++ii) {
363 int rv = SSL_write(cs->ssl, iov->iov_base, iov->iov_len);
364 if (rv > 0) {
365 iov++;
366 niov--;
367 } else {
368 maybe_set_error(cs, rv);
369 break;
370 }
371 }
372 }
373
374 /* We add this now in order for the SLLIST_IS_EMPTY to be false before, if
375 * no other items were pending */
376 sllist_append(&cs->writes, &wc->slnode);
377
378 /* If we have some IOVs remaining then it means we couldn't write all the
379 * data. If so, reschedule and place in the queue for later */
380 if (niov && cs->error == 0) {
381 wc->niov = niov;
382 wc->iov = malloc(sizeof (*iov) * wc->niov);
383 wc->iovroot_ = wc->iov;
384 memcpy(wc->iov, iov, sizeof (*iov) * niov);
385 /* This function will try to schedule the proper events. We need at least
386 * one SSL_write() in order to advance the state machine. In the future
387 * we could determine if we performed a previous SSL_write above */
388 appdata_encode(cs);
389 }
390
391 /* In most cases we will want to deliver the "flushed" notification */
392 lcbio_async_signal(cs->as_write);
393 (void) sd;
394 return 0;
395 }
396
397 static unsigned
Cssl_close(lcb_io_opt_t io,lcb_sockdata_t * sd)398 Cssl_close(lcb_io_opt_t io, lcb_sockdata_t *sd)
399 {
400 lcbio_CSSL *cs = CS_FROM_IOPS(io);
401 IOT_V1(cs->orig).close(IOT_ARG(cs->orig), sd);
402 cs->error = 1;
403 if (!SLLIST_IS_EMPTY(&cs->writes)) {
404 /* It is possible that a prior call to SSL_write returned an SSL_want_read
405 * and the next subsequent call to the underlying read API returned an
406 * error. For this reason we signal to the as_write function (which
407 * then calls the appdata_free_flushed function) in case we have such
408 * leftover data.
409 */
410 lcbio_async_signal(cs->as_write);
411 }
412 return 0;
413 }
414
415 static void
Cssl_dtor(void * arg)416 Cssl_dtor(void *arg)
417 {
418 lcbio_CSSL *cs = arg;
419 lcb_assert(SLLIST_IS_EMPTY(&cs->writes));
420 lcbio_timer_destroy(cs->as_read);
421 lcbio_timer_destroy(cs->as_write);
422 iotssl_destroy_common((lcbio_XSSL *)cs);
423 free(cs->wctx_cached);
424 free(arg);
425 }
426
427 lcbio_pTABLE
lcbio_Cssl_new(lcbio_pTABLE orig,lcb_sockdata_t * sd,SSL_CTX * sctx)428 lcbio_Cssl_new(lcbio_pTABLE orig, lcb_sockdata_t *sd, SSL_CTX *sctx)
429 {
430 lcbio_CSSL *ret = calloc(1, sizeof(*ret));
431 lcbio_pTABLE iot = &ret->base_;
432 ret->sd = sd;
433 ret->as_read = lcbio_timer_new(orig, ret, (void (*)(void*))appdata_read);
434 ret->as_write = lcbio_timer_new(orig, ret, async_write);
435 ret->base_.dtor = Cssl_dtor;
436
437 iot->u_io.completion.read2 = Cssl_read2;
438 iot->u_io.completion.write2 = Cssl_write2;
439 iot->u_io.completion.close = Cssl_close;
440 iotssl_init_common((lcbio_XSSL *)ret, orig, sctx);
441 return iot;
442 }
443