1 /* hiwrite.c  -  Hiquu I/O Engine Write Operation.
2  * Copyright (c) 2006,2012 Sampo Kellomaki (sampo@iki.fi), All Rights Reserved.
3  * This is confidential unpublished proprietary source code of the author.
4  * NO WARRANTY, not even implied warranties. Contains trade secrets.
5  * Distribution prohibited unless authorized in writing. See file COPYING.
6  * Special grant: hiwrite.c may be used with zxid open source project under
7  * same licensing terms as zxid itself.
8  * $Id$
9  *
10  * 15.4.2006, created over Easter holiday --Sampo
11  * 22.4.2006, refined multi iov sends over the weekend --Sampo
12  * 16.8.2012, modified license grant to allow use with ZXID.org --Sampo
13  * 6.9.2012,  added support for TLS and SSL --Sampo
14  *
15  * Idea: Consider separate lock for maintenance of to_write queue and separate
16  * for in_write, iov, and actual wrintev().
17  */
18 
19 #include <pthread.h>
20 #include <memory.h>
21 #include <sys/types.h>
22 #include <sys/socket.h>
23 #include <errno.h>
24 #include <stdarg.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 
29 #include "platform.h"
30 #include "errmac.h"
31 #include "akbox.h"
32 #include "hiios.h"
33 #include <zx/zx.h>   /* for zx_report_openssl_err() */
34 
35 extern int errmac_debug;
36 
37 /* Alias some struct fields for headers that can not be seen together. */
38 /* *** this is really STOMP 1.1 specific */
39 #define receipt   host
40 #define rcpt_id   host
41 #define acpt_vers vers
42 #define tx_id     vers
43 #define session   login
44 #define subs_id   login
45 #define subsc     login
46 #define server    pw
47 #define ack       pw
48 #define msg_id    pw
49 #define heart_bt  dest
50 #define zx_rcpt_sig dest
51 
52 /*() Schedule to be sent a response.
53  * If req is supplied, the response is taken to be response to that.
54  * Otherwise resp istreated as a stand along PDU, unsolicited response if you like.
55  * locking:: will take io->qel.mut */
56 
57 /* Called by:  hi_send1, hi_send2, hi_send3 */
hi_send0(struct hi_thr * hit,struct hi_io * io,struct hi_pdu * parent,struct hi_pdu * req,struct hi_pdu * resp)58 void hi_send0(struct hi_thr* hit, struct hi_io* io, struct hi_pdu* parent, struct hi_pdu* req, struct hi_pdu* resp)
59 {
60   struct hi_io* read_io;
61   int write_now = 0;
62   HI_SANITY(hit->shf, hit);
63   if (req) {
64     resp->req = req;
65     resp->n = req->reals;
66     req->reals = resp;
67     req->parent = parent;
68   } else {
69     resp->req = resp->n = 0;
70   }
71   resp->parent = parent;
72 
73   LOCK(io->qel.mut, "send0");
74   if (!resp->req) {
75     /* resp is really a request sent by server to the client */
76     /* *** this is really STOMP 1.1 specific. Ideally msg_id
77      * and dest would already be set by the STOMP layer before
78      * calling this - or there should be dispatch to protocol
79      * specific method to recover them. */
80     resp->ad.stomp.msg_id = strstr(resp->m, "\nmessage-id:");
81     if (resp->ad.stomp.msg_id) {
82       resp->ad.stomp.msg_id += sizeof("\nmessage-id:")-1;
83       resp->n = io->pending;  /* add to io->pending, protected by io->qel.mut */
84       io->pending = resp;
85       resp->ad.stomp.dest = strstr(resp->m, "\ndestination:");
86       if (resp->ad.stomp.dest)
87 	resp->ad.stomp.dest += sizeof("\ndestination:")-1;
88       resp->ad.stomp.body = strstr(resp->m, "\n\n");
89       if (resp->ad.stomp.body) {
90 	resp->ad.stomp.body += sizeof("\n\n")-1;
91 	resp->ad.stomp.len = resp->ap - resp->ad.stomp.body - 1 /* nul at end of frame */;
92       } else
93 	resp->ad.stomp.len = 0;
94       D("pending resp_%p msgid(%.*s)", resp, (int)(strchr(resp->ad.stomp.msg_id,'\n')-resp->ad.stomp.msg_id), resp->ad.stomp.msg_id);
95     } else {
96       ERR("request from server to client lacks message-id header and thus can not expect an ACK. Not scheduling as pending. %p", resp);
97     }
98   }
99 
100   if (ONE_OF_2(io->n_thr, HI_IO_N_THR_END_GAME, HI_IO_N_THR_END_POLL)) {
101     D("LK&UNLK end-game io(%x)->qel.thr=%lx n_c/t=%d/%d", io->fd, (long)io->qel.mut.thr, io->n_close,io->n_thr);
102     UNLOCK(io->qel.mut, "send0-end");
103     return; /* Ignore write attempt. hi_todo_consume() will eventually call hi_close() last time */
104   }
105   D("LOCK io(%x)->qel.thr=%lx n_c/t=%d/%d", io->fd, (long)io->qel.mut.thr, io->n_close,io->n_thr);
106 
107   ASSERT(io->n_thr >= 0);
108   if (!io->to_write_produce)
109     io->to_write_consume = resp;
110   else
111     io->to_write_produce->qel.n = &resp->qel;
112   io->to_write_produce = resp;
113   resp->qel.n = 0;
114   ++io->n_to_write;
115   ++io->n_pdu_out;
116   ++io->n_thr;           /* Account for anticipated call to hi_write() or hi_todo_produce() */
117   if (!io->writing) {
118     io->writing = write_now = 1;
119     D("stash cur_io(%x)->n_close=%d, io(%x) n_close=%d", hit->cur_io?hit->cur_io->fd:-1, hit->cur_n_close, io->fd, io->n_close);
120     read_io = hit->cur_io;
121     hit->cur_io = io;
122     hit->cur_n_close = io->n_close;
123   }
124   io->events |= EPOLLOUT;  /* Set write event in case there is no poll before write opportunity. */
125   D("UNLOCK io(%x)->qel.thr=%lx", io->fd, (long)io->qel.mut.thr);
126   UNLOCK(io->qel.mut, "send0");
127 
128   HI_SANITY(hit->shf, hit);
129   ASSERT(req != resp);
130   D("send fd(%x) parent_%p req_%p resp_%p n_iov=%d iov0(%.*s)", io->fd, parent, req, resp, resp->n_iov, (int)MIN(resp->iov->iov_len,3), (char*)resp->iov->iov_base);
131 
132   if (write_now) {
133     /* Try cranking the write machine right away! *** should we fish out any todo queue item that may stomp on us? How to deal with thread that has already consumed from the todo_queue? */
134     hi_write(hit, io);   /* Will decrement io->n_thr for write */
135     hit->cur_io = read_io;
136     if (read_io) {
137       hit->cur_n_close = read_io->n_close;
138       D("restored cur_io(%x)->n_close=%d", hit->cur_io?hit->cur_io->fd:-1, hit->cur_n_close);
139     }
140   } else {
141     hi_todo_produce(hit, &io->qel, "send0", 0);
142   }
143 }
144 
145 /*() Frontend to hi_send1() which uses hi_send0() to send one segment message. */
146 
147 /* Called by:  http_send_err, test_ping_reply */
hi_send(struct hi_thr * hit,struct hi_io * io,struct hi_pdu * parent,struct hi_pdu * req,struct hi_pdu * resp)148 void hi_send(struct hi_thr* hit, struct hi_io* io, struct hi_pdu* parent, struct hi_pdu* req, struct hi_pdu* resp)
149 {
150   hi_send1(hit, io, parent, req, resp, resp->need, resp->m);
151 }
152 
153 /*() Uses hi_send0() to send one segment message. */
154 
155 /* Called by:  hi_send, hi_sendf, smtp_resp_wait_250_from_ehlo, smtp_resp_wait_354_from_data, smtp_send */
hi_send1(struct hi_thr * hit,struct hi_io * io,struct hi_pdu * parent,struct hi_pdu * req,struct hi_pdu * resp,int len0,char * d0)156 void hi_send1(struct hi_thr* hit, struct hi_io* io, struct hi_pdu* parent, struct hi_pdu* req, struct hi_pdu* resp, int len0, char* d0)
157 {
158   resp->n_iov = 1;
159   resp->iov[0].iov_len = len0;
160   resp->iov[0].iov_base = d0;
161   //HEXDUMP("iov0: ", d0, d0+len0, 800);
162   hi_send0(hit, io, parent, req, resp);
163 }
164 
165 /*() Uses hi_send0() to send two segment message. */
166 
167 /* Called by:  hmtp_send */
hi_send2(struct hi_thr * hit,struct hi_io * io,struct hi_pdu * parent,struct hi_pdu * req,struct hi_pdu * resp,int len0,char * d0,int len1,char * d1)168 void hi_send2(struct hi_thr* hit, struct hi_io* io, struct hi_pdu* parent, struct hi_pdu* req, struct hi_pdu* resp, int len0, char* d0, int len1, char* d1)
169 {
170   resp->n_iov = 2;
171   resp->iov[0].iov_len  = len0;
172   resp->iov[0].iov_base = d0;
173   resp->iov[1].iov_len  = len1;
174   resp->iov[1].iov_base = d1;
175   //HEXDUMP("iov0: ", d0, d0+len0, 800);
176   //HEXDUMP("iov1: ", d1, d1+len1, 800);
177   hi_send0(hit, io, parent, req, resp);
178 }
179 
180 /*() Uses hi_send0() to send three segment message. */
181 
182 /* Called by:  hmtp_send */
hi_send3(struct hi_thr * hit,struct hi_io * io,struct hi_pdu * parent,struct hi_pdu * req,struct hi_pdu * resp,int len0,char * d0,int len1,char * d1,int len2,char * d2)183 void hi_send3(struct hi_thr* hit, struct hi_io* io, struct hi_pdu* parent, struct hi_pdu* req, struct hi_pdu* resp, int len0, char* d0, int len1, char* d1, int len2, char* d2)
184 {
185   resp->n_iov = 3;
186   resp->iov[0].iov_len  = len0;
187   resp->iov[0].iov_base = d0;
188   resp->iov[1].iov_len  = len1;
189   resp->iov[1].iov_base = d1;
190   resp->iov[2].iov_len  = len2;
191   resp->iov[2].iov_base = d2;
192   //HEXDUMP("iov0: ", d0, d0+len0, 800);
193   //HEXDUMP("iov1: ", d1, d1+len1, 800);
194   //HEXDUMP("iov2: ", d2, d2+len2, 800);
195   hi_send0(hit, io, parent, req, resp);
196 }
197 
198 /*() Send formatted response.
199  * Uses underlying machiner of hi_send0().
200  * *** As req argument is entirely lacking, this must be to send unsolicited responses. */
201 
202 /* Called by:  hi_accept_book, smtp_data, smtp_ehlo, smtp_mail_from x2, smtp_rcpt_to x3, smtp_resp_wait_220_greet, smtp_resp_wait_250_msg_sent, stomp_cmd_ni, stomp_err, stomp_got_login, stomp_got_send, stomp_msg_deliver, stomp_send_receipt */
hi_sendf(struct hi_thr * hit,struct hi_io * io,struct hi_pdu * parent,struct hi_pdu * req,char * fmt,...)203 void hi_sendf(struct hi_thr* hit, struct hi_io* io, struct hi_pdu* parent, struct hi_pdu* req, char* fmt, ...)
204 {
205   va_list pv;
206   struct hi_pdu* pdu = hi_pdu_alloc(hit, "send");
207   if (!pdu) { hi_dump(hit->shf); NEVERNEVER("Out of PDUs in bad place. fmt(%s)", fmt); }
208 
209   va_start(pv, fmt);
210   pdu->need = vsnprintf(pdu->m, pdu->lim - pdu->m, fmt, pv);
211   va_end(pv);
212 
213   pdu->ap += pdu->need;
214   hi_send1(hit, io, parent, req, pdu, pdu->need, pdu->m);
215 }
216 
217 /*() Process io->to_write_consume to produce an iov and move the PDUs to io->in_write.
218  * This is the main (only?) way how writes end up in hiios poll machinery to be written.
219  * The only consumer of the io->to_write_consume queue.
220  * Must only be called with io->qel.mut held. */
221 
222 /* Called by:  hi_in_out, hi_make_iov */
hi_make_iov_nolock(struct hi_io * io)223 void hi_make_iov_nolock(struct hi_io* io)
224 {
225   struct hi_pdu* pdu;
226   struct iovec* lim = io->iov+HI_N_IOV;
227   struct iovec* cur = io->iov_cur = io->iov;
228   D("make_iov(%x) n_to_write=%d", io->fd, io->n_to_write);
229   while ((pdu = io->to_write_consume) && (cur + pdu->n_iov) <= lim) {
230     memcpy(cur, pdu->iov, pdu->n_iov * sizeof(struct iovec));
231     cur += pdu->n_iov;
232 
233     if (!(io->to_write_consume = pdu->wn)) /* consume from to_write */
234       io->to_write_produce = 0;
235     --io->n_to_write;
236     pdu->wn = io->in_write;                /* produce to in_write so pdu can eventually be freed */
237     io->in_write = pdu;
238 
239     ASSERT(io->n_to_write >= 0);
240     ASSERT(pdu->n_iov && pdu->iov[0].iov_len);   /* Empty writes can lead to infinite loops */
241     D("make_iov(%x) added pdu(%p) n_iov=%d", io->fd, pdu, (int)(cur - io->iov_cur));
242   }
243   io->n_iov = cur - io->iov_cur;
244 }
245 
246 /*() The locked (and usual) way of calling hi_make_iov_nolock() */
247 
248 /* Called by:  hi_write */
hi_make_iov(struct hi_io * io)249 static void hi_make_iov(struct hi_io* io)
250 {
251   LOCK(io->qel.mut, "make_iov");
252   D("LOCK io(%x)->qel.thr=%lx", io->fd, (long)io->qel.mut.thr);
253   hi_make_iov_nolock(io);
254   D("UNLOCK io(%x)->qel.thr=%lx", io->fd, (long)io->qel.mut.thr);
255   UNLOCK(io->qel.mut, "make_iov");
256 }
257 
258 #define HIT_FREE_HIWATER 10  /* Maximum number of per thread free PDUs */
259 #define HIT_FREE_LOWATER 5   /* How many PDS to move from hit to shf if HIWATER is exceeded. */
260 
261 /*() Low level call to Free a PDU. Usually you would call hi_free_resp() instead.
262  * Usually frees to hit->free_pdus, but if that grows
263  * too long, then to shf->free_pdus, to avoid over accumulation
264  * of PDUs in single thread (i.e. allocated in one, but freed in another).
265  * locking:: will use shf->pdu_mut
266  * see also:: hi_pdu_alloc() */
267 
268 /* Called by:  hi_free_req x2, hi_free_resp */
hi_pdu_free(struct hi_thr * hit,struct hi_pdu * pdu,const char * lk1,const char * lk2)269 static void hi_pdu_free(struct hi_thr* hit, struct hi_pdu* pdu, const char* lk1, const char* lk2)
270 {
271   int i;
272 
273   ASSERT(!ONE_OF_2(pdu->qel.intodo, HI_INTODO_SHF_FREE, HI_INTODO_HIT_FREE));
274   pdu->qel.n = &hit->free_pdus->qel;         /* move to hit free list */
275   hit->free_pdus = pdu;
276   ++hit->n_free_pdus;
277   pdu->qel.intodo = HI_INTODO_HIT_FREE;
278   D("%s%s: pdu_%p freed (%.*s) n_free=%d",lk1,lk2, pdu, (int)MIN(pdu->ap - pdu->m,3), pdu->m, hit->n_free_pdus);
279 
280   if (hit->n_free_pdus <= HIT_FREE_HIWATER)  /* high water mark */
281     return;
282 
283   D("%s%s: pdu_%p mv some hit->free_pdus to shf",lk1,lk2,pdu);
284   LOCK(hit->shf->pdu_mut, "pdu_free");
285   for (i = HIT_FREE_LOWATER; i; --i) {
286     pdu = hit->free_pdus;
287     hit->free_pdus = (struct hi_pdu*)pdu->qel.n;
288 
289     D("%s%s: mv hit free pdu_%p to shf",lk1,lk2,pdu);
290 
291     pdu->qel.n = &hit->shf->free_pdus->qel;         /* move to free list */
292     hit->shf->free_pdus = pdu;
293     ASSERTOPI(pdu->qel.intodo, ==, HI_INTODO_HIT_FREE);
294     pdu->qel.intodo = HI_INTODO_SHF_FREE;
295   }
296   UNLOCK(hit->shf->pdu_mut, "pdu_free");
297   hit->n_free_pdus -= HIT_FREE_LOWATER;
298 }
299 
300 /*() Free a response PDU.
301  * *** Here complex determination about freeability of a PDU needs to be done.
302  * For now we "fake" it by assuming that a response sufficies to free request.
303  * In real life you would have to consider
304  * a. multiple responses
305  * b. subrequests and their responses
306  * c. possibility of sending a response before processing of request itself has ended
307  * locking:: Called outside io->qel.mut */
308 
309 /* Called by:  hi_free_in_write, stomp_got_ack x2, stomp_got_nack */
hi_free_resp(struct hi_thr * hit,struct hi_pdu * resp,const char * lk1)310 void hi_free_resp(struct hi_thr* hit, struct hi_pdu* resp, const char* lk1)
311 {
312   struct hi_pdu* pdu = resp->req->reals;
313 
314   HI_SANITY(hit->shf, hit);
315 
316   /* Remove resp from request's real response list. resp MUST be in this list: if it
317    * is not, pdu->n (next) pointer chasing will lead to NULL dereference (by design). */
318 
319   if (resp == pdu)
320     resp->req->reals = pdu->n;
321   else
322     for (; pdu; pdu = pdu->n)
323       if (pdu->n == resp) {
324 	pdu->n = resp->n;
325 	break;
326       }
327 
328   hi_pdu_free(hit, resp, lk1, "free_resp");
329   HI_SANITY(hit->shf, hit);
330 }
331 
332 /*() Free a request, and transitively its real consequences (response, subrequests, etc.).
333  * May be called either because individual resp was done, or because of connection close.
334  * locking:: Called outside io->qel.mut */
335 
336 /* Called by:  hi_close x3, hi_free_req_fe, stomp_got_ack, stomp_got_nack, stomp_msg_deliver */
hi_free_req(struct hi_thr * hit,struct hi_pdu * req,const char * lk1)337 void hi_free_req(struct hi_thr* hit, struct hi_pdu* req, const char* lk1)
338 {
339   struct hi_pdu* pdu;
340 
341   HI_SANITY(hit->shf, hit);
342 
343   for (pdu = req->reals; pdu; pdu = pdu->n)  /* free dependent resps */
344     hi_pdu_free(hit, pdu, lk1, "free_req-real");
345 
346   hi_pdu_free(hit, req, lk1, "free_req");
347   HI_SANITY(hit->shf, hit);
348 }
349 
350 /*() Remove a PDU from the reqs list of an io object.
351  * Also looks in the pending list.
352  * locking:: takes io->qel.mut
353  * see also:: hi_add_to_reqs() */
354 
355 /* Called by:  hi_free_req_fe, stomp_got_ack */
hi_del_from_reqs(struct hi_io * io,struct hi_pdu * req)356 void hi_del_from_reqs(struct hi_io* io, struct hi_pdu* req)
357 {
358   struct hi_pdu* pdu;
359   LOCK(io->qel.mut, "del-from-reqs");
360   pdu = io->reqs;
361   if (pdu == req) {
362     io->reqs = req->n;
363   } else {
364     for (; pdu; pdu = pdu->n) {
365       if (pdu->n == req) {
366 	pdu->n = req->n;
367 	goto out;
368       }
369     }
370     pdu = io->pending;
371     if (pdu == req) {
372       io->pending = req->n;
373     } else {
374       for (; pdu; pdu = pdu->n) {
375 	if (pdu->n == req) {
376 	  pdu->n = req->n;
377 	  goto out;
378 	}
379       }
380     }
381     ERR("req(%p) not found in fe(%x)->reqs or pending", req, io->fd);
382     /*NEVERNEVER("req not found in fe(%x)->reqs or pending", io->fd); can happen for cur_pdu */
383   out: ;
384   }
385   UNLOCK(io->qel.mut, "del-from-reqs");
386 }
387 
388 /*() Free a request, assuming it is associated with a frontend.
389  * Will also remove the PDU from the frontend reqs queue.
390  * locking:: called outside io->qel.mut, takes it indirectly */
391 
392 /* Called by:  hi_free_in_write */
hi_free_req_fe(struct hi_thr * hit,struct hi_pdu * req)393 static void hi_free_req_fe(struct hi_thr* hit, struct hi_pdu* req)
394 {
395   ASSERT(req->fe);
396   if (!req->fe)
397     return;
398   HI_SANITY(hit->shf, hit);
399 
400   /* Scan the frontend to find the reference. The theory is that
401    * hi_free_req_fe() only gets called when its known that the request is in the queue.
402    * If it is not, the loop will run off the end and crash with NULL pointer. */
403   hi_del_from_reqs(req->fe, req);
404   HI_SANITY(hit->shf, hit);
405   hi_free_req(hit, req, "req_fe ");
406 }
407 
408 /*() Free the contents of io->in_write list and anything that depends from it.
409  * This is called either after successful write, by hi_clear_iov(), or failed
410  * write when close will mean that no further writes will be attempted.
411  * locking:: called outside io->qel.mut */
412 
413 /* Called by:  hi_clear_iov, hi_write */
hi_free_in_write(struct hi_thr * hit,struct hi_io * io)414 static void hi_free_in_write(struct hi_thr* hit, struct hi_io* io)
415 {
416   struct hi_pdu* req;
417   struct hi_pdu* resp;
418   D("freeing resps&reqs io(%x)->in_write=%p", io->fd, io->in_write);
419 
420   while (resp = io->in_write) {
421     io->in_write = resp->wn;
422     resp->wn = 0;
423 
424     if (!(req = resp->req)) continue;  /* It is a request */
425 
426     /* Only a response can cause anything freed, and every response is freeable upon write. */
427 
428     hi_free_resp(hit, resp, "in_write ");
429     if (!req->reals)                   /* last response, free the request */
430       hi_free_req_fe(hit, req);
431   }
432 }
433 
434 /*() Post process iov after write.
435  * Determine if any (resp) PDUs got completely written and
436  * warrant deletion of entire chaing of req and responses,
437  * including subreqs and their responses.
438  * locking:: called outside io->qel.mut */
439 
440 /* Called by:  hi_write x2 */
hi_clear_iov(struct hi_thr * hit,struct hi_io * io,int n)441 static void hi_clear_iov(struct hi_thr* hit, struct hi_io* io, int n)
442 {
443   io->n_written += n;
444   while (io->n_iov && n) {
445     if (n >= io->iov_cur->iov_len) {
446       n -= io->iov_cur->iov_len;
447       ++io->iov_cur;
448       --io->n_iov;
449       ASSERTOPP(io->iov_cur, >=, 0);
450     } else {
451       /* partial write: need to adjust iov_cur->iov_base */
452       io->iov_cur->iov_base = ((char*)(io->iov_cur->iov_base)) + n;
453       io->iov_cur->iov_len -= n;
454       return;  /* we are not in end so nothing to free */
455     }
456   }
457   ASSERTOPI(n, ==, 0);
458   if (io->n_iov)
459     return;
460 
461   /* Everything has now been written. Time to free in_write list. */
462 
463   hi_free_in_write(hit, io);
464 }
465 
466 /*() Attempt to write pending iovs.
467  * This function can only be called by one thread at a time because the todo_queue
468  * only admits an io object once and only one thread can consume it. Thus locking
469  * is really needed only to protect the to_write queue, see hi_make_iov().
470  * Return:: 1 if connection got closed (and n_thr decremented),
471  *     0 if connection remains open (permitting, e.g., a read(2)). */
472 
473 /* Called by:  hi_in_out, hi_send0 */
hi_write(struct hi_thr * hit,struct hi_io * io)474 int hi_write(struct hi_thr* hit, struct hi_io* io)
475 {
476   int ret,err;
477   ASSERT(io->writing);
478   while (1) {   /* Write until exhausted! */
479     if (!io->in_write)  /* Need to prepare new iov? */
480       hi_make_iov(io);
481     if (!io->in_write)
482       goto out;         /* Nothing further to write */
483   retry:
484     ASSERT(io->writing);
485 #ifdef USE_OPENSSL
486     if (io->ssl) {
487       D("SSL_write(%x) n_iov=%d n_thr=%d r/w=%d/%d ev=%x", io->fd, io->n_iov, io->n_thr, io->reading, io->writing, io->events);
488       HEXDUMP("iov0:", io->iov_cur->iov_base, io->iov_cur->iov_base + io->iov_cur->iov_len, /*16*/ 256);
489       /* N.B. As SSL_write() does not support vector of iovs, we just write the
490        * first iov here. Eventually the loop will iterate and others get written. */
491       ret = SSL_write(io->ssl, io->iov_cur->iov_base, io->iov_cur->iov_len);
492       ASSERT(io->writing);
493       switch (err = SSL_get_error(io->ssl, ret)) {
494       case SSL_ERROR_NONE:  /* Something written case */
495 	D("SSL_wrote(%x) %d bytes n_thr=%d r/w=%d/%d ev=%x", io->fd, ret, io->n_thr, io->reading, io->writing, io->events);
496 	hi_clear_iov(hit, io, ret);
497 	break; /* iterate write loop again */
498       case SSL_ERROR_WANT_READ:
499 	D("SSL EAGAIN READ fd(%x)", io->fd);
500 	zx_report_openssl_err("SSL again read"); /* *** do we need this to clear error stack? */
501 	goto out; /* Comparable to EAGAIN. Should we remember which? */
502       case SSL_ERROR_WANT_WRITE:
503 	D("SSL EAGAIN WRITE fd(%x)", io->fd);
504 	zx_report_openssl_err("SSL again write"); /* *** do we need this to clear error stack? */
505 	goto out; /* Comparable to EAGAIN. Should we remember which? */
506       case SSL_ERROR_ZERO_RETURN: /* Probably close from other end */
507       default:
508 	ERR("SSL_write ret=%d err=%d", ret, err);
509 	zx_report_openssl_err("SSL_write");
510 	goto clear_writing_err;
511       }
512     } else
513 #endif
514     {
515       D("writev(%x) n_iov=%d n_thr=%d r/w=%d/%d ev=%x", io->fd, io->n_iov, io->n_thr, io->reading, io->writing, io->events);
516       HEXDUMP("iov0:", io->iov_cur->iov_base, io->iov_cur->iov_base + io->iov_cur->iov_len, /*16*/ 256);
517       ret = writev(io->fd&0x7fffffff /* in case of write after close */, io->iov_cur, io->n_iov);
518       ASSERT(io->writing);
519       switch (ret) {
520       case 0: NEVERNEVER("writev on %x returned 0", io->fd);
521       case -1:
522 	switch (errno) {
523 	case EINTR:  D("EINTR fd(%x)", io->fd); goto retry;
524 	case EAGAIN: D("EAGAIN WRITE fd(%x)", io->fd); goto out;   /* writev(2) exhausted (c.f. edge triggered epoll) */
525 	default:
526 	  ERR("writev(%x) failed: %d %s (closing connection)", io->fd, errno, STRERROR(errno));
527 	  goto clear_writing_err;
528 	}
529       default:  /* something was written, deduce it from the iov */
530 	D("wrote(%x) %d bytes n_thr=%d r/w=%d/%d ev=%x", io->fd, ret, io->n_thr, io->reading, io->writing, io->events);
531 	hi_clear_iov(hit, io, ret);
532       }
533     }
534   }
535  out:
536   LOCK(io->qel.mut, "clear-writing");   /* The io->writing was set in hi_in_out() or hi_send0() */
537   D("WR-OUT: LOCK & UNLOCK io(%x)->qel.thr=%lx", io->fd, (long)io->qel.mut.thr);
538   ASSERT(io->writing);
539   io->writing = 0;
540   --io->n_thr;
541   ASSERT(io->n_thr >= 0);
542   UNLOCK(io->qel.mut, "clear-writing");
543   return 0;
544 
545  clear_writing_err:
546   hi_free_in_write(hit, io);
547   LOCK(io->qel.mut, "clear-writing-err");   /* The io->writing was set in hi_in_out() */
548   D("WR-FAIL: LK&UNLK io(%x)->qel.thr=%lx", io->fd, (long)io->qel.mut.thr);
549   ASSERT(io->writing);
550   io->writing = 0;
551   --io->n_thr;
552   ASSERT(io->n_thr >= 0);
553   UNLOCK(io->qel.mut, "clear-writing-err");
554   hi_close(hit, io, "hi_write");
555   return 1;
556 }
557 
558 /* EOF  --  hiwrite.c */
559