1 /***************************************************************************
2  *                                  _   _ ____  _
3  *  Project                     ___| | | |  _ \| |
4  *                             / __| | | | |_) | |
5  *                            | (__| |_| |  _ <| |___
6  *                             \___|\___/|_| \_\_____|
7  *
8  * Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
9  *
10  * This software is licensed as described in the file COPYING, which
11  * you should have received as part of this distribution. The terms
12  * are also available at https://curl.haxx.se/docs/copyright.html.
13  *
14  * You may opt to use, copy, modify, merge, publish, distribute and/or sell
15  * copies of the Software, and permit persons to whom the Software is
16  * furnished to do so, under the terms of the COPYING file.
17  *
18  * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
19  * KIND, either express or implied.
20  *
21  ***************************************************************************/
22 
23 #include "curl_setup.h"
24 #include "strtoofft.h"
25 
26 #ifdef HAVE_NETINET_IN_H
27 #include <netinet/in.h>
28 #endif
29 #ifdef HAVE_NETDB_H
30 #include <netdb.h>
31 #endif
32 #ifdef HAVE_ARPA_INET_H
33 #include <arpa/inet.h>
34 #endif
35 #ifdef HAVE_NET_IF_H
36 #include <net/if.h>
37 #endif
38 #ifdef HAVE_SYS_IOCTL_H
39 #include <sys/ioctl.h>
40 #endif
41 #ifdef HAVE_SIGNAL_H
42 #include <signal.h>
43 #endif
44 
45 #ifdef HAVE_SYS_PARAM_H
46 #include <sys/param.h>
47 #endif
48 
49 #ifdef HAVE_SYS_SELECT_H
50 #include <sys/select.h>
51 #endif
52 
53 #ifndef HAVE_SOCKET
54 #error "We can't compile without socket() support!"
55 #endif
56 
57 #include "urldata.h"
58 #include <curl/curl.h>
59 #include "netrc.h"
60 
61 #include "content_encoding.h"
62 #include "hostip.h"
63 #include "transfer.h"
64 #include "sendf.h"
65 #include "speedcheck.h"
66 #include "progress.h"
67 #include "http.h"
68 #include "url.h"
69 #include "getinfo.h"
70 #include "vtls/vtls.h"
71 #include "select.h"
72 #include "multiif.h"
73 #include "connect.h"
74 #include "non-ascii.h"
75 #include "http2.h"
76 #include "mime.h"
77 #include "strcase.h"
78 #include "urlapi-int.h"
79 
80 /* The last 3 #include files should be in this order */
81 #include "curl_printf.h"
82 #include "curl_memory.h"
83 #include "memdebug.h"
84 
85 #if !defined(CURL_DISABLE_HTTP) || !defined(CURL_DISABLE_SMTP) || \
86     !defined(CURL_DISABLE_IMAP)
87 /*
88  * checkheaders() checks the linked list of custom headers for a
89  * particular header (prefix). Provide the prefix without colon!
90  *
91  * Returns a pointer to the first matching header or NULL if none matched.
92  */
Curl_checkheaders(const struct connectdata * conn,const char * thisheader)93 char *Curl_checkheaders(const struct connectdata *conn,
94                         const char *thisheader)
95 {
96   struct curl_slist *head;
97   size_t thislen = strlen(thisheader);
98   struct Curl_easy *data = conn->data;
99 
100   for(head = data->set.headers; head; head = head->next) {
101     if(strncasecompare(head->data, thisheader, thislen) &&
102        Curl_headersep(head->data[thislen]) )
103       return head->data;
104   }
105 
106   return NULL;
107 }
108 #endif
109 
Curl_get_upload_buffer(struct Curl_easy * data)110 CURLcode Curl_get_upload_buffer(struct Curl_easy *data)
111 {
112   if(!data->state.ulbuf) {
113     data->state.ulbuf = malloc(data->set.upload_buffer_size);
114     if(!data->state.ulbuf)
115       return CURLE_OUT_OF_MEMORY;
116   }
117   return CURLE_OK;
118 }
119 
120 #ifndef CURL_DISABLE_HTTP
121 /*
122  * This function will be called to loop through the trailers buffer
123  * until no more data is available for sending.
124  */
Curl_trailers_read(char * buffer,size_t size,size_t nitems,void * raw)125 static size_t Curl_trailers_read(char *buffer, size_t size, size_t nitems,
126                                  void *raw)
127 {
128   struct Curl_easy *data = (struct Curl_easy *)raw;
129   Curl_send_buffer *trailers_buf = data->state.trailers_buf;
130   size_t bytes_left = trailers_buf->size_used-data->state.trailers_bytes_sent;
131   size_t to_copy = (size*nitems < bytes_left) ? size*nitems : bytes_left;
132   if(to_copy) {
133     memcpy(buffer,
134            &trailers_buf->buffer[data->state.trailers_bytes_sent],
135            to_copy);
136     data->state.trailers_bytes_sent += to_copy;
137   }
138   return to_copy;
139 }
140 
Curl_trailers_left(void * raw)141 static size_t Curl_trailers_left(void *raw)
142 {
143   struct Curl_easy *data = (struct Curl_easy *)raw;
144   Curl_send_buffer *trailers_buf = data->state.trailers_buf;
145   return trailers_buf->size_used - data->state.trailers_bytes_sent;
146 }
147 #endif
148 
149 /*
150  * This function will call the read callback to fill our buffer with data
151  * to upload.
152  */
Curl_fillreadbuffer(struct connectdata * conn,size_t bytes,size_t * nreadp)153 CURLcode Curl_fillreadbuffer(struct connectdata *conn, size_t bytes,
154                              size_t *nreadp)
155 {
156   struct Curl_easy *data = conn->data;
157   size_t buffersize = bytes;
158   size_t nread;
159 
160   curl_read_callback readfunc = NULL;
161   void *extra_data = NULL;
162 
163 #ifdef CURL_DOES_CONVERSIONS
164   bool sending_http_headers = FALSE;
165 
166   if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
167     const struct HTTP *http = data->req.protop;
168 
169     if(http->sending == HTTPSEND_REQUEST)
170       /* We're sending the HTTP request headers, not the data.
171          Remember that so we don't re-translate them into garbage. */
172       sending_http_headers = TRUE;
173   }
174 #endif
175 
176 #ifndef CURL_DISABLE_HTTP
177   if(data->state.trailers_state == TRAILERS_INITIALIZED) {
178     struct curl_slist *trailers = NULL;
179     CURLcode result;
180     int trailers_ret_code;
181 
182     /* at this point we already verified that the callback exists
183        so we compile and store the trailers buffer, then proceed */
184     infof(data,
185           "Moving trailers state machine from initialized to sending.\n");
186     data->state.trailers_state = TRAILERS_SENDING;
187     data->state.trailers_buf = Curl_add_buffer_init();
188     if(!data->state.trailers_buf) {
189       failf(data, "Unable to allocate trailing headers buffer !");
190       return CURLE_OUT_OF_MEMORY;
191     }
192     data->state.trailers_bytes_sent = 0;
193     Curl_set_in_callback(data, true);
194     trailers_ret_code = data->set.trailer_callback(&trailers,
195                                                    data->set.trailer_data);
196     Curl_set_in_callback(data, false);
197     if(trailers_ret_code == CURL_TRAILERFUNC_OK) {
198       result = Curl_http_compile_trailers(trailers, &data->state.trailers_buf,
199                                           data);
200     }
201     else {
202       failf(data, "operation aborted by trailing headers callback");
203       *nreadp = 0;
204       result = CURLE_ABORTED_BY_CALLBACK;
205     }
206     if(result) {
207       Curl_add_buffer_free(&data->state.trailers_buf);
208       curl_slist_free_all(trailers);
209       return result;
210     }
211     infof(data, "Successfully compiled trailers.\r\n");
212     curl_slist_free_all(trailers);
213   }
214 #endif
215 
216   /* if we are transmitting trailing data, we don't need to write
217      a chunk size so we skip this */
218   if(data->req.upload_chunky &&
219      data->state.trailers_state == TRAILERS_NONE) {
220     /* if chunked Transfer-Encoding */
221     buffersize -= (8 + 2 + 2);   /* 32bit hex + CRLF + CRLF */
222     data->req.upload_fromhere += (8 + 2); /* 32bit hex + CRLF */
223   }
224 
225 #ifndef CURL_DISABLE_HTTP
226   if(data->state.trailers_state == TRAILERS_SENDING) {
227     /* if we're here then that means that we already sent the last empty chunk
228        but we didn't send a final CR LF, so we sent 0 CR LF. We then start
229        pulling trailing data until we have no more at which point we
230        simply return to the previous point in the state machine as if
231        nothing happened.
232        */
233     readfunc = Curl_trailers_read;
234     extra_data = (void *)data;
235   }
236   else
237 #endif
238   {
239     readfunc = data->state.fread_func;
240     extra_data = data->state.in;
241   }
242 
243   Curl_set_in_callback(data, true);
244   nread = readfunc(data->req.upload_fromhere, 1,
245                    buffersize, extra_data);
246   Curl_set_in_callback(data, false);
247 
248   if(nread == CURL_READFUNC_ABORT) {
249     failf(data, "operation aborted by callback");
250     *nreadp = 0;
251     return CURLE_ABORTED_BY_CALLBACK;
252   }
253   if(nread == CURL_READFUNC_PAUSE) {
254     struct SingleRequest *k = &data->req;
255 
256     if(conn->handler->flags & PROTOPT_NONETWORK) {
257       /* protocols that work without network cannot be paused. This is
258          actually only FILE:// just now, and it can't pause since the transfer
259          isn't done using the "normal" procedure. */
260       failf(data, "Read callback asked for PAUSE when not supported!");
261       return CURLE_READ_ERROR;
262     }
263 
264     /* CURL_READFUNC_PAUSE pauses read callbacks that feed socket writes */
265     k->keepon |= KEEP_SEND_PAUSE; /* mark socket send as paused */
266     if(data->req.upload_chunky) {
267         /* Back out the preallocation done above */
268       data->req.upload_fromhere -= (8 + 2);
269     }
270     *nreadp = 0;
271 
272     return CURLE_OK; /* nothing was read */
273   }
274   else if(nread > buffersize) {
275     /* the read function returned a too large value */
276     *nreadp = 0;
277     failf(data, "read function returned funny value");
278     return CURLE_READ_ERROR;
279   }
280 
281   if(!data->req.forbidchunk && data->req.upload_chunky) {
282     /* if chunked Transfer-Encoding
283      *    build chunk:
284      *
285      *        <HEX SIZE> CRLF
286      *        <DATA> CRLF
287      */
288     /* On non-ASCII platforms the <DATA> may or may not be
289        translated based on set.prefer_ascii while the protocol
290        portion must always be translated to the network encoding.
291        To further complicate matters, line end conversion might be
292        done later on, so we need to prevent CRLFs from becoming
293        CRCRLFs if that's the case.  To do this we use bare LFs
294        here, knowing they'll become CRLFs later on.
295      */
296 
297     bool added_crlf = FALSE;
298     int hexlen = 0;
299     const char *endofline_native;
300     const char *endofline_network;
301 
302     if(
303 #ifdef CURL_DO_LINEEND_CONV
304        (data->set.prefer_ascii) ||
305 #endif
306        (data->set.crlf)) {
307       /* \n will become \r\n later on */
308       endofline_native  = "\n";
309       endofline_network = "\x0a";
310     }
311     else {
312       endofline_native  = "\r\n";
313       endofline_network = "\x0d\x0a";
314     }
315 
316     /* if we're not handling trailing data, proceed as usual */
317     if(data->state.trailers_state != TRAILERS_SENDING) {
318       char hexbuffer[11] = "";
319       hexlen = msnprintf(hexbuffer, sizeof(hexbuffer),
320                          "%zx%s", nread, endofline_native);
321 
322       /* move buffer pointer */
323       data->req.upload_fromhere -= hexlen;
324       nread += hexlen;
325 
326       /* copy the prefix to the buffer, leaving out the NUL */
327       memcpy(data->req.upload_fromhere, hexbuffer, hexlen);
328 
329       /* always append ASCII CRLF to the data unless
330          we have a valid trailer callback */
331 #ifndef CURL_DISABLE_HTTP
332       if((nread-hexlen) == 0 &&
333           data->set.trailer_callback != NULL &&
334           data->state.trailers_state == TRAILERS_NONE) {
335         data->state.trailers_state = TRAILERS_INITIALIZED;
336       }
337       else
338 #endif
339       {
340         memcpy(data->req.upload_fromhere + nread,
341                endofline_network,
342                strlen(endofline_network));
343         added_crlf = TRUE;
344       }
345     }
346 
347 #ifdef CURL_DOES_CONVERSIONS
348     {
349       CURLcode result;
350       size_t length;
351       if(data->set.prefer_ascii)
352         /* translate the protocol and data */
353         length = nread;
354       else
355         /* just translate the protocol portion */
356         length = hexlen;
357       if(length) {
358         result = Curl_convert_to_network(data, data->req.upload_fromhere,
359                                          length);
360         /* Curl_convert_to_network calls failf if unsuccessful */
361         if(result)
362           return result;
363       }
364     }
365 #endif /* CURL_DOES_CONVERSIONS */
366 
367 #ifndef CURL_DISABLE_HTTP
368     if(data->state.trailers_state == TRAILERS_SENDING &&
369        !Curl_trailers_left(data)) {
370       Curl_add_buffer_free(&data->state.trailers_buf);
371       data->state.trailers_state = TRAILERS_DONE;
372       data->set.trailer_data = NULL;
373       data->set.trailer_callback = NULL;
374       /* mark the transfer as done */
375       data->req.upload_done = TRUE;
376       infof(data, "Signaling end of chunked upload after trailers.\n");
377     }
378     else
379 #endif
380       if((nread - hexlen) == 0 &&
381          data->state.trailers_state != TRAILERS_INITIALIZED) {
382         /* mark this as done once this chunk is transferred */
383         data->req.upload_done = TRUE;
384         infof(data,
385               "Signaling end of chunked upload via terminating chunk.\n");
386       }
387 
388     if(added_crlf)
389       nread += strlen(endofline_network); /* for the added end of line */
390   }
391 #ifdef CURL_DOES_CONVERSIONS
392   else if((data->set.prefer_ascii) && (!sending_http_headers)) {
393     CURLcode result;
394     result = Curl_convert_to_network(data, data->req.upload_fromhere, nread);
395     /* Curl_convert_to_network calls failf if unsuccessful */
396     if(result)
397       return result;
398   }
399 #endif /* CURL_DOES_CONVERSIONS */
400 
401   *nreadp = nread;
402 
403   return CURLE_OK;
404 }
405 
406 
407 /*
408  * Curl_readrewind() rewinds the read stream. This is typically used for HTTP
409  * POST/PUT with multi-pass authentication when a sending was denied and a
410  * resend is necessary.
411  */
Curl_readrewind(struct connectdata * conn)412 CURLcode Curl_readrewind(struct connectdata *conn)
413 {
414   struct Curl_easy *data = conn->data;
415   curl_mimepart *mimepart = &data->set.mimepost;
416 
417   conn->bits.rewindaftersend = FALSE; /* we rewind now */
418 
419   /* explicitly switch off sending data on this connection now since we are
420      about to restart a new transfer and thus we want to avoid inadvertently
421      sending more data on the existing connection until the next transfer
422      starts */
423   data->req.keepon &= ~KEEP_SEND;
424 
425   /* We have sent away data. If not using CURLOPT_POSTFIELDS or
426      CURLOPT_HTTPPOST, call app to rewind
427   */
428   if(conn->handler->protocol & PROTO_FAMILY_HTTP) {
429     struct HTTP *http = data->req.protop;
430 
431     if(http->sendit)
432       mimepart = http->sendit;
433   }
434   if(data->set.postfields)
435     ; /* do nothing */
436   else if(data->set.httpreq == HTTPREQ_POST_MIME ||
437           data->set.httpreq == HTTPREQ_POST_FORM) {
438     if(Curl_mime_rewind(mimepart)) {
439       failf(data, "Cannot rewind mime/post data");
440       return CURLE_SEND_FAIL_REWIND;
441     }
442   }
443   else {
444     if(data->set.seek_func) {
445       int err;
446 
447       Curl_set_in_callback(data, true);
448       err = (data->set.seek_func)(data->set.seek_client, 0, SEEK_SET);
449       Curl_set_in_callback(data, false);
450       if(err) {
451         failf(data, "seek callback returned error %d", (int)err);
452         return CURLE_SEND_FAIL_REWIND;
453       }
454     }
455     else if(data->set.ioctl_func) {
456       curlioerr err;
457 
458       Curl_set_in_callback(data, true);
459       err = (data->set.ioctl_func)(data, CURLIOCMD_RESTARTREAD,
460                                    data->set.ioctl_client);
461       Curl_set_in_callback(data, false);
462       infof(data, "the ioctl callback returned %d\n", (int)err);
463 
464       if(err) {
465         failf(data, "ioctl callback returned error %d", (int)err);
466         return CURLE_SEND_FAIL_REWIND;
467       }
468     }
469     else {
470       /* If no CURLOPT_READFUNCTION is used, we know that we operate on a
471          given FILE * stream and we can actually attempt to rewind that
472          ourselves with fseek() */
473       if(data->state.fread_func == (curl_read_callback)fread) {
474         if(-1 != fseek(data->state.in, 0, SEEK_SET))
475           /* successful rewind */
476           return CURLE_OK;
477       }
478 
479       /* no callback set or failure above, makes us fail at once */
480       failf(data, "necessary data rewind wasn't possible");
481       return CURLE_SEND_FAIL_REWIND;
482     }
483   }
484   return CURLE_OK;
485 }
486 
data_pending(const struct Curl_easy * data)487 static int data_pending(const struct Curl_easy *data)
488 {
489   struct connectdata *conn = data->conn;
490   /* in the case of libssh2, we can never be really sure that we have emptied
491      its internal buffers so we MUST always try until we get EAGAIN back */
492   return conn->handler->protocol&(CURLPROTO_SCP|CURLPROTO_SFTP) ||
493 #if defined(USE_NGHTTP2)
494     Curl_ssl_data_pending(conn, FIRSTSOCKET) ||
495     /* For HTTP/2, we may read up everything including response body
496        with header fields in Curl_http_readwrite_headers. If no
497        content-length is provided, curl waits for the connection
498        close, which we emulate it using conn->proto.httpc.closed =
499        TRUE. The thing is if we read everything, then http2_recv won't
500        be called and we cannot signal the HTTP/2 stream has closed. As
501        a workaround, we return nonzero here to call http2_recv. */
502     ((conn->handler->protocol&PROTO_FAMILY_HTTP) && conn->httpversion >= 20);
503 #elif defined(ENABLE_QUIC)
504     Curl_ssl_data_pending(conn, FIRSTSOCKET) || Curl_quic_data_pending(data);
505 #else
506     Curl_ssl_data_pending(conn, FIRSTSOCKET);
507 #endif
508 }
509 
510 /*
511  * Check to see if CURLOPT_TIMECONDITION was met by comparing the time of the
512  * remote document with the time provided by CURLOPT_TIMEVAL
513  */
Curl_meets_timecondition(struct Curl_easy * data,time_t timeofdoc)514 bool Curl_meets_timecondition(struct Curl_easy *data, time_t timeofdoc)
515 {
516   if((timeofdoc == 0) || (data->set.timevalue == 0))
517     return TRUE;
518 
519   switch(data->set.timecondition) {
520   case CURL_TIMECOND_IFMODSINCE:
521   default:
522     if(timeofdoc <= data->set.timevalue) {
523       infof(data,
524             "The requested document is not new enough\n");
525       data->info.timecond = TRUE;
526       return FALSE;
527     }
528     break;
529   case CURL_TIMECOND_IFUNMODSINCE:
530     if(timeofdoc >= data->set.timevalue) {
531       infof(data,
532             "The requested document is not old enough\n");
533       data->info.timecond = TRUE;
534       return FALSE;
535     }
536     break;
537   }
538 
539   return TRUE;
540 }
541 
542 /*
543  * Go ahead and do a read if we have a readable socket or if
544  * the stream was rewound (in which case we have data in a
545  * buffer)
546  *
547  * return '*comeback' TRUE if we didn't properly drain the socket so this
548  * function should get called again without select() or similar in between!
549  */
readwrite_data(struct Curl_easy * data,struct connectdata * conn,struct SingleRequest * k,int * didwhat,bool * done,bool * comeback)550 static CURLcode readwrite_data(struct Curl_easy *data,
551                                struct connectdata *conn,
552                                struct SingleRequest *k,
553                                int *didwhat, bool *done,
554                                bool *comeback)
555 {
556   CURLcode result = CURLE_OK;
557   ssize_t nread; /* number of bytes read */
558   size_t excess = 0; /* excess bytes read */
559   bool readmore = FALSE; /* used by RTP to signal for more data */
560   int maxloops = 100;
561 
562   *done = FALSE;
563   *comeback = FALSE;
564 
565   /* This is where we loop until we have read everything there is to
566      read or we get a CURLE_AGAIN */
567   do {
568     bool is_empty_data = FALSE;
569     size_t buffersize = data->set.buffer_size;
570     size_t bytestoread = buffersize;
571 
572     if(
573 #if defined(USE_NGHTTP2)
574        /* For HTTP/2, read data without caring about the content
575           length. This is safe because body in HTTP/2 is always
576           segmented thanks to its framing layer. Meanwhile, we have to
577           call Curl_read to ensure that http2_handle_stream_close is
578           called when we read all incoming bytes for a particular
579           stream. */
580        !((conn->handler->protocol & PROTO_FAMILY_HTTP) &&
581          conn->httpversion == 20) &&
582 #endif
583        k->size != -1 && !k->header) {
584       /* make sure we don't read too much */
585       curl_off_t totalleft = k->size - k->bytecount;
586       if(totalleft < (curl_off_t)bytestoread)
587         bytestoread = (size_t)totalleft;
588     }
589 
590     if(bytestoread) {
591       /* receive data from the network! */
592       result = Curl_read(conn, conn->sockfd, k->buf, bytestoread, &nread);
593 
594       /* read would've blocked */
595       if(CURLE_AGAIN == result)
596         break; /* get out of loop */
597 
598       if(result>0)
599         return result;
600     }
601     else {
602       /* read nothing but since we wanted nothing we consider this an OK
603          situation to proceed from */
604       DEBUGF(infof(data, "readwrite_data: we're done!\n"));
605       nread = 0;
606     }
607 
608     if(!k->bytecount) {
609       Curl_pgrsTime(data, TIMER_STARTTRANSFER);
610       if(k->exp100 > EXP100_SEND_DATA)
611         /* set time stamp to compare with when waiting for the 100 */
612         k->start100 = Curl_now();
613     }
614 
615     *didwhat |= KEEP_RECV;
616     /* indicates data of zero size, i.e. empty file */
617     is_empty_data = ((nread == 0) && (k->bodywrites == 0)) ? TRUE : FALSE;
618 
619     /* NUL terminate, allowing string ops to be used */
620     if(0 < nread || is_empty_data) {
621       k->buf[nread] = 0;
622     }
623     else {
624       /* if we receive 0 or less here, the server closed the connection
625          and we bail out from this! */
626       DEBUGF(infof(data, "nread <= 0, server closed connection, bailing\n"));
627       k->keepon &= ~KEEP_RECV;
628       break;
629     }
630 
631     /* Default buffer to use when we write the buffer, it may be changed
632        in the flow below before the actual storing is done. */
633     k->str = k->buf;
634 
635     if(conn->handler->readwrite) {
636       result = conn->handler->readwrite(data, conn, &nread, &readmore);
637       if(result)
638         return result;
639       if(readmore)
640         break;
641     }
642 
643 #ifndef CURL_DISABLE_HTTP
644     /* Since this is a two-state thing, we check if we are parsing
645        headers at the moment or not. */
646     if(k->header) {
647       /* we are in parse-the-header-mode */
648       bool stop_reading = FALSE;
649       result = Curl_http_readwrite_headers(data, conn, &nread, &stop_reading);
650       if(result)
651         return result;
652 
653       if(conn->handler->readwrite &&
654          (k->maxdownload <= 0 && nread > 0)) {
655         result = conn->handler->readwrite(data, conn, &nread, &readmore);
656         if(result)
657           return result;
658         if(readmore)
659           break;
660       }
661 
662       if(stop_reading) {
663         /* We've stopped dealing with input, get out of the do-while loop */
664 
665         if(nread > 0) {
666           infof(data,
667                 "Excess found:"
668                 " excess = %zd"
669                 " url = %s (zero-length body)\n",
670                 nread, data->state.up.path);
671         }
672 
673         break;
674       }
675     }
676 #endif /* CURL_DISABLE_HTTP */
677 
678 
679     /* This is not an 'else if' since it may be a rest from the header
680        parsing, where the beginning of the buffer is headers and the end
681        is non-headers. */
682     if(k->str && !k->header && (nread > 0 || is_empty_data)) {
683 
684       if(data->set.opt_no_body) {
685         /* data arrives although we want none, bail out */
686         streamclose(conn, "ignoring body");
687         *done = TRUE;
688         return CURLE_WEIRD_SERVER_REPLY;
689       }
690 
691 #ifndef CURL_DISABLE_HTTP
692       if(0 == k->bodywrites && !is_empty_data) {
693         /* These checks are only made the first time we are about to
694            write a piece of the body */
695         if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
696           /* HTTP-only checks */
697 
698           if(data->req.newurl) {
699             if(conn->bits.close) {
700               /* Abort after the headers if "follow Location" is set
701                  and we're set to close anyway. */
702               k->keepon &= ~KEEP_RECV;
703               *done = TRUE;
704               return CURLE_OK;
705             }
706             /* We have a new url to load, but since we want to be able
707                to re-use this connection properly, we read the full
708                response in "ignore more" */
709             k->ignorebody = TRUE;
710             infof(data, "Ignoring the response-body\n");
711           }
712           if(data->state.resume_from && !k->content_range &&
713              (data->set.httpreq == HTTPREQ_GET) &&
714              !k->ignorebody) {
715 
716             if(k->size == data->state.resume_from) {
717               /* The resume point is at the end of file, consider this fine
718                  even if it doesn't allow resume from here. */
719               infof(data, "The entire document is already downloaded");
720               connclose(conn, "already downloaded");
721               /* Abort download */
722               k->keepon &= ~KEEP_RECV;
723               *done = TRUE;
724               return CURLE_OK;
725             }
726 
727             /* we wanted to resume a download, although the server doesn't
728              * seem to support this and we did this with a GET (if it
729              * wasn't a GET we did a POST or PUT resume) */
730             failf(data, "HTTP server doesn't seem to support "
731                   "byte ranges. Cannot resume.");
732             return CURLE_RANGE_ERROR;
733           }
734 
735           if(data->set.timecondition && !data->state.range) {
736             /* A time condition has been set AND no ranges have been
737                requested. This seems to be what chapter 13.3.4 of
738                RFC 2616 defines to be the correct action for a
739                HTTP/1.1 client */
740 
741             if(!Curl_meets_timecondition(data, k->timeofdoc)) {
742               *done = TRUE;
743               /* We're simulating a http 304 from server so we return
744                  what should have been returned from the server */
745               data->info.httpcode = 304;
746               infof(data, "Simulate a HTTP 304 response!\n");
747               /* we abort the transfer before it is completed == we ruin the
748                  re-use ability. Close the connection */
749               connclose(conn, "Simulated 304 handling");
750               return CURLE_OK;
751             }
752           } /* we have a time condition */
753 
754         } /* this is HTTP or RTSP */
755       } /* this is the first time we write a body part */
756 #endif /* CURL_DISABLE_HTTP */
757 
758       k->bodywrites++;
759 
760       /* pass data to the debug function before it gets "dechunked" */
761       if(data->set.verbose) {
762         if(k->badheader) {
763           Curl_debug(data, CURLINFO_DATA_IN, data->state.headerbuff,
764                      (size_t)k->hbuflen);
765           if(k->badheader == HEADER_PARTHEADER)
766             Curl_debug(data, CURLINFO_DATA_IN,
767                        k->str, (size_t)nread);
768         }
769         else
770           Curl_debug(data, CURLINFO_DATA_IN,
771                      k->str, (size_t)nread);
772       }
773 
774 #ifndef CURL_DISABLE_HTTP
775       if(k->chunk) {
776         /*
777          * Here comes a chunked transfer flying and we need to decode this
778          * properly.  While the name says read, this function both reads
779          * and writes away the data. The returned 'nread' holds the number
780          * of actual data it wrote to the client.
781          */
782         CURLcode extra;
783         CHUNKcode res =
784           Curl_httpchunk_read(conn, k->str, nread, &nread, &extra);
785 
786         if(CHUNKE_OK < res) {
787           if(CHUNKE_PASSTHRU_ERROR == res) {
788             failf(data, "Failed reading the chunked-encoded stream");
789             return extra;
790           }
791           failf(data, "%s in chunked-encoding", Curl_chunked_strerror(res));
792           return CURLE_RECV_ERROR;
793         }
794         if(CHUNKE_STOP == res) {
795           size_t dataleft;
796           /* we're done reading chunks! */
797           k->keepon &= ~KEEP_RECV; /* read no more */
798 
799           /* There are now possibly N number of bytes at the end of the
800              str buffer that weren't written to the client.
801              Push it back to be read on the next pass. */
802 
803           dataleft = conn->chunk.dataleft;
804           if(dataleft != 0) {
805             infof(conn->data, "Leftovers after chunking: %zu bytes\n",
806                   dataleft);
807           }
808         }
809         /* If it returned OK, we just keep going */
810       }
811 #endif   /* CURL_DISABLE_HTTP */
812 
813       /* Account for body content stored in the header buffer */
814       if((k->badheader == HEADER_PARTHEADER) && !k->ignorebody) {
815         DEBUGF(infof(data, "Increasing bytecount by %zu from hbuflen\n",
816                      k->hbuflen));
817         k->bytecount += k->hbuflen;
818       }
819 
820       if((-1 != k->maxdownload) &&
821          (k->bytecount + nread >= k->maxdownload)) {
822 
823         excess = (size_t)(k->bytecount + nread - k->maxdownload);
824         if(excess > 0 && !k->ignorebody) {
825           infof(data,
826                 "Excess found in a read:"
827                 " excess = %zu"
828                 ", size = %" CURL_FORMAT_CURL_OFF_T
829                 ", maxdownload = %" CURL_FORMAT_CURL_OFF_T
830                 ", bytecount = %" CURL_FORMAT_CURL_OFF_T "\n",
831                 excess, k->size, k->maxdownload, k->bytecount);
832         }
833 
834         nread = (ssize_t) (k->maxdownload - k->bytecount);
835         if(nread < 0) /* this should be unusual */
836           nread = 0;
837 
838         k->keepon &= ~KEEP_RECV; /* we're done reading */
839       }
840 
841       k->bytecount += nread;
842 
843       Curl_pgrsSetDownloadCounter(data, k->bytecount);
844 
845       if(!k->chunk && (nread || k->badheader || is_empty_data)) {
846         /* If this is chunky transfer, it was already written */
847 
848         if(k->badheader && !k->ignorebody) {
849           /* we parsed a piece of data wrongly assuming it was a header
850              and now we output it as body instead */
851 
852           /* Don't let excess data pollute body writes */
853           if(k->maxdownload == -1 || (curl_off_t)k->hbuflen <= k->maxdownload)
854             result = Curl_client_write(conn, CLIENTWRITE_BODY,
855                                        data->state.headerbuff,
856                                        k->hbuflen);
857           else
858             result = Curl_client_write(conn, CLIENTWRITE_BODY,
859                                        data->state.headerbuff,
860                                        (size_t)k->maxdownload);
861 
862           if(result)
863             return result;
864         }
865         if(k->badheader < HEADER_ALLBAD) {
866           /* This switch handles various content encodings. If there's an
867              error here, be sure to check over the almost identical code
868              in http_chunks.c.
869              Make sure that ALL_CONTENT_ENCODINGS contains all the
870              encodings handled here. */
871           if(conn->data->set.http_ce_skip || !k->writer_stack) {
872             if(!k->ignorebody) {
873 #ifndef CURL_DISABLE_POP3
874               if(conn->handler->protocol & PROTO_FAMILY_POP3)
875                 result = Curl_pop3_write(conn, k->str, nread);
876               else
877 #endif /* CURL_DISABLE_POP3 */
878                 result = Curl_client_write(conn, CLIENTWRITE_BODY, k->str,
879                                            nread);
880             }
881           }
882           else if(!k->ignorebody)
883             result = Curl_unencode_write(conn, k->writer_stack, k->str, nread);
884         }
885         k->badheader = HEADER_NORMAL; /* taken care of now */
886 
887         if(result)
888           return result;
889       }
890 
891     } /* if(!header and data to read) */
892 
893     if(conn->handler->readwrite && excess && !conn->bits.stream_was_rewound) {
894       /* Parse the excess data */
895       k->str += nread;
896 
897       if(&k->str[excess] > &k->buf[data->set.buffer_size]) {
898         /* the excess amount was too excessive(!), make sure
899            it doesn't read out of buffer */
900         excess = &k->buf[data->set.buffer_size] - k->str;
901       }
902       nread = (ssize_t)excess;
903 
904       result = conn->handler->readwrite(data, conn, &nread, &readmore);
905       if(result)
906         return result;
907 
908       if(readmore)
909         k->keepon |= KEEP_RECV; /* we're not done reading */
910       break;
911     }
912 
913     if(is_empty_data) {
914       /* if we received nothing, the server closed the connection and we
915          are done */
916       k->keepon &= ~KEEP_RECV;
917     }
918 
919     if(k->keepon & KEEP_RECV_PAUSE) {
920       /* this is a paused transfer */
921       break;
922     }
923 
924   } while(data_pending(data) && maxloops--);
925 
926   if(maxloops <= 0) {
927     /* we mark it as read-again-please */
928     conn->cselect_bits = CURL_CSELECT_IN;
929     *comeback = TRUE;
930   }
931 
932   if(((k->keepon & (KEEP_RECV|KEEP_SEND)) == KEEP_SEND) &&
933      conn->bits.close) {
934     /* When we've read the entire thing and the close bit is set, the server
935        may now close the connection. If there's now any kind of sending going
936        on from our side, we need to stop that immediately. */
937     infof(data, "we are done reading and this is set to close, stop send\n");
938     k->keepon &= ~KEEP_SEND; /* no writing anymore either */
939   }
940 
941   return CURLE_OK;
942 }
943 
Curl_done_sending(struct connectdata * conn,struct SingleRequest * k)944 CURLcode Curl_done_sending(struct connectdata *conn,
945                            struct SingleRequest *k)
946 {
947   k->keepon &= ~KEEP_SEND; /* we're done writing */
948 
949   /* These functions should be moved into the handler struct! */
950   Curl_http2_done_sending(conn);
951   Curl_quic_done_sending(conn);
952 
953   if(conn->bits.rewindaftersend) {
954     CURLcode result = Curl_readrewind(conn);
955     if(result)
956       return result;
957   }
958   return CURLE_OK;
959 }
960 
961 #if defined(WIN32) && !defined(USE_LWIPSOCK)
962 #ifndef SIO_IDEAL_SEND_BACKLOG_QUERY
963 #define SIO_IDEAL_SEND_BACKLOG_QUERY 0x4004747B
964 #endif
965 
win_update_buffer_size(curl_socket_t sockfd)966 static void win_update_buffer_size(curl_socket_t sockfd)
967 {
968   int result;
969   ULONG ideal;
970   DWORD ideallen;
971   result = WSAIoctl(sockfd, SIO_IDEAL_SEND_BACKLOG_QUERY, 0, 0,
972                     &ideal, sizeof(ideal), &ideallen, 0, 0);
973   if(result == 0) {
974     setsockopt(sockfd, SOL_SOCKET, SO_SNDBUF,
975                (const char *)&ideal, sizeof(ideal));
976   }
977 }
978 #else
979 #define win_update_buffer_size(x)
980 #endif
981 
982 /*
983  * Send data to upload to the server, when the socket is writable.
984  */
readwrite_upload(struct Curl_easy * data,struct connectdata * conn,int * didwhat)985 static CURLcode readwrite_upload(struct Curl_easy *data,
986                                  struct connectdata *conn,
987                                  int *didwhat)
988 {
989   ssize_t i, si;
990   ssize_t bytes_written;
991   CURLcode result;
992   ssize_t nread; /* number of bytes read */
993   bool sending_http_headers = FALSE;
994   struct SingleRequest *k = &data->req;
995 
996   if((k->bytecount == 0) && (k->writebytecount == 0))
997     Curl_pgrsTime(data, TIMER_STARTTRANSFER);
998 
999   *didwhat |= KEEP_SEND;
1000 
1001   do {
1002     /* only read more data if there's no upload data already
1003        present in the upload buffer */
1004     if(0 == k->upload_present) {
1005       result = Curl_get_upload_buffer(data);
1006       if(result)
1007         return result;
1008       /* init the "upload from here" pointer */
1009       k->upload_fromhere = data->state.ulbuf;
1010 
1011       if(!k->upload_done) {
1012         /* HTTP pollution, this should be written nicer to become more
1013            protocol agnostic. */
1014         size_t fillcount;
1015         struct HTTP *http = k->protop;
1016 
1017         if((k->exp100 == EXP100_SENDING_REQUEST) &&
1018            (http->sending == HTTPSEND_BODY)) {
1019           /* If this call is to send body data, we must take some action:
1020              We have sent off the full HTTP 1.1 request, and we shall now
1021              go into the Expect: 100 state and await such a header */
1022           k->exp100 = EXP100_AWAITING_CONTINUE; /* wait for the header */
1023           k->keepon &= ~KEEP_SEND;         /* disable writing */
1024           k->start100 = Curl_now();       /* timeout count starts now */
1025           *didwhat &= ~KEEP_SEND;  /* we didn't write anything actually */
1026           /* set a timeout for the multi interface */
1027           Curl_expire(data, data->set.expect_100_timeout, EXPIRE_100_TIMEOUT);
1028           break;
1029         }
1030 
1031         if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
1032           if(http->sending == HTTPSEND_REQUEST)
1033             /* We're sending the HTTP request headers, not the data.
1034                Remember that so we don't change the line endings. */
1035             sending_http_headers = TRUE;
1036           else
1037             sending_http_headers = FALSE;
1038         }
1039 
1040         result = Curl_fillreadbuffer(conn, data->set.upload_buffer_size,
1041                                      &fillcount);
1042         if(result)
1043           return result;
1044 
1045         nread = fillcount;
1046       }
1047       else
1048         nread = 0; /* we're done uploading/reading */
1049 
1050       if(!nread && (k->keepon & KEEP_SEND_PAUSE)) {
1051         /* this is a paused transfer */
1052         break;
1053       }
1054       if(nread <= 0) {
1055         result = Curl_done_sending(conn, k);
1056         if(result)
1057           return result;
1058         break;
1059       }
1060 
1061       /* store number of bytes available for upload */
1062       k->upload_present = nread;
1063 
1064       /* convert LF to CRLF if so asked */
1065       if((!sending_http_headers) && (
1066 #ifdef CURL_DO_LINEEND_CONV
1067          /* always convert if we're FTPing in ASCII mode */
1068          (data->set.prefer_ascii) ||
1069 #endif
1070          (data->set.crlf))) {
1071         /* Do we need to allocate a scratch buffer? */
1072         if(!data->state.scratch) {
1073           data->state.scratch = malloc(2 * data->set.upload_buffer_size);
1074           if(!data->state.scratch) {
1075             failf(data, "Failed to alloc scratch buffer!");
1076 
1077             return CURLE_OUT_OF_MEMORY;
1078           }
1079         }
1080 
1081         /*
1082          * ASCII/EBCDIC Note: This is presumably a text (not binary)
1083          * transfer so the data should already be in ASCII.
1084          * That means the hex values for ASCII CR (0x0d) & LF (0x0a)
1085          * must be used instead of the escape sequences \r & \n.
1086          */
1087         for(i = 0, si = 0; i < nread; i++, si++) {
1088           if(k->upload_fromhere[i] == 0x0a) {
1089             data->state.scratch[si++] = 0x0d;
1090             data->state.scratch[si] = 0x0a;
1091             if(!data->set.crlf) {
1092               /* we're here only because FTP is in ASCII mode...
1093                  bump infilesize for the LF we just added */
1094               if(data->state.infilesize != -1)
1095                 data->state.infilesize++;
1096             }
1097           }
1098           else
1099             data->state.scratch[si] = k->upload_fromhere[i];
1100         }
1101 
1102         if(si != nread) {
1103           /* only perform the special operation if we really did replace
1104              anything */
1105           nread = si;
1106 
1107           /* upload from the new (replaced) buffer instead */
1108           k->upload_fromhere = data->state.scratch;
1109 
1110           /* set the new amount too */
1111           k->upload_present = nread;
1112         }
1113       }
1114 
1115 #ifndef CURL_DISABLE_SMTP
1116       if(conn->handler->protocol & PROTO_FAMILY_SMTP) {
1117         result = Curl_smtp_escape_eob(conn, nread);
1118         if(result)
1119           return result;
1120       }
1121 #endif /* CURL_DISABLE_SMTP */
1122     } /* if 0 == k->upload_present */
1123     else {
1124       /* We have a partial buffer left from a previous "round". Use
1125          that instead of reading more data */
1126     }
1127 
1128     /* write to socket (send away data) */
1129     result = Curl_write(conn,
1130                         conn->writesockfd,  /* socket to send to */
1131                         k->upload_fromhere, /* buffer pointer */
1132                         k->upload_present,  /* buffer size */
1133                         &bytes_written);    /* actually sent */
1134     if(result)
1135       return result;
1136 
1137     win_update_buffer_size(conn->writesockfd);
1138 
1139     if(data->set.verbose)
1140       /* show the data before we change the pointer upload_fromhere */
1141       Curl_debug(data, CURLINFO_DATA_OUT, k->upload_fromhere,
1142                  (size_t)bytes_written);
1143 
1144     k->writebytecount += bytes_written;
1145     Curl_pgrsSetUploadCounter(data, k->writebytecount);
1146 
1147     if((!k->upload_chunky || k->forbidchunk) &&
1148        (k->writebytecount == data->state.infilesize)) {
1149       /* we have sent all data we were supposed to */
1150       k->upload_done = TRUE;
1151       infof(data, "We are completely uploaded and fine\n");
1152     }
1153 
1154     if(k->upload_present != bytes_written) {
1155       /* we only wrote a part of the buffer (if anything), deal with it! */
1156 
1157       /* store the amount of bytes left in the buffer to write */
1158       k->upload_present -= bytes_written;
1159 
1160       /* advance the pointer where to find the buffer when the next send
1161          is to happen */
1162       k->upload_fromhere += bytes_written;
1163     }
1164     else {
1165       /* we've uploaded that buffer now */
1166       result = Curl_get_upload_buffer(data);
1167       if(result)
1168         return result;
1169       k->upload_fromhere = data->state.ulbuf;
1170       k->upload_present = 0; /* no more bytes left */
1171 
1172       if(k->upload_done) {
1173         result = Curl_done_sending(conn, k);
1174         if(result)
1175           return result;
1176       }
1177     }
1178 
1179 
1180   } while(0); /* just to break out from! */
1181 
1182   return CURLE_OK;
1183 }
1184 
1185 /*
1186  * Curl_readwrite() is the low-level function to be called when data is to
1187  * be read and written to/from the connection.
1188  *
1189  * return '*comeback' TRUE if we didn't properly drain the socket so this
1190  * function should get called again without select() or similar in between!
1191  */
Curl_readwrite(struct connectdata * conn,struct Curl_easy * data,bool * done,bool * comeback)1192 CURLcode Curl_readwrite(struct connectdata *conn,
1193                         struct Curl_easy *data,
1194                         bool *done,
1195                         bool *comeback)
1196 {
1197   struct SingleRequest *k = &data->req;
1198   CURLcode result;
1199   int didwhat = 0;
1200 
1201   curl_socket_t fd_read;
1202   curl_socket_t fd_write;
1203   int select_res = conn->cselect_bits;
1204 
1205   conn->cselect_bits = 0;
1206 
1207   /* only use the proper socket if the *_HOLD bit is not set simultaneously as
1208      then we are in rate limiting state in that transfer direction */
1209 
1210   if((k->keepon & KEEP_RECVBITS) == KEEP_RECV)
1211     fd_read = conn->sockfd;
1212   else
1213     fd_read = CURL_SOCKET_BAD;
1214 
1215   if((k->keepon & KEEP_SENDBITS) == KEEP_SEND)
1216     fd_write = conn->writesockfd;
1217   else
1218     fd_write = CURL_SOCKET_BAD;
1219 
1220   if(conn->data->state.drain) {
1221     select_res |= CURL_CSELECT_IN;
1222     DEBUGF(infof(data, "Curl_readwrite: forcibly told to drain data\n"));
1223   }
1224 
1225   if(!select_res) /* Call for select()/poll() only, if read/write/error
1226                      status is not known. */
1227     select_res = Curl_socket_check(fd_read, CURL_SOCKET_BAD, fd_write, 0);
1228 
1229   if(select_res == CURL_CSELECT_ERR) {
1230     failf(data, "select/poll returned error");
1231     return CURLE_SEND_ERROR;
1232   }
1233 
1234   /* We go ahead and do a read if we have a readable socket or if
1235      the stream was rewound (in which case we have data in a
1236      buffer) */
1237   if((k->keepon & KEEP_RECV) &&
1238      ((select_res & CURL_CSELECT_IN) || conn->bits.stream_was_rewound)) {
1239 
1240     result = readwrite_data(data, conn, k, &didwhat, done, comeback);
1241     if(result || *done)
1242       return result;
1243   }
1244 
1245   /* If we still have writing to do, we check if we have a writable socket. */
1246   if((k->keepon & KEEP_SEND) && (select_res & CURL_CSELECT_OUT)) {
1247     /* write */
1248 
1249     result = readwrite_upload(data, conn, &didwhat);
1250     if(result)
1251       return result;
1252   }
1253 
1254   k->now = Curl_now();
1255   if(didwhat) {
1256     ;
1257   }
1258   else {
1259     /* no read no write, this is a timeout? */
1260     if(k->exp100 == EXP100_AWAITING_CONTINUE) {
1261       /* This should allow some time for the header to arrive, but only a
1262          very short time as otherwise it'll be too much wasted time too
1263          often. */
1264 
1265       /* Quoting RFC2616, section "8.2.3 Use of the 100 (Continue) Status":
1266 
1267          Therefore, when a client sends this header field to an origin server
1268          (possibly via a proxy) from which it has never seen a 100 (Continue)
1269          status, the client SHOULD NOT wait for an indefinite period before
1270          sending the request body.
1271 
1272       */
1273 
1274       timediff_t ms = Curl_timediff(k->now, k->start100);
1275       if(ms >= data->set.expect_100_timeout) {
1276         /* we've waited long enough, continue anyway */
1277         k->exp100 = EXP100_SEND_DATA;
1278         k->keepon |= KEEP_SEND;
1279         Curl_expire_done(data, EXPIRE_100_TIMEOUT);
1280         infof(data, "Done waiting for 100-continue\n");
1281       }
1282     }
1283   }
1284 
1285   if(Curl_pgrsUpdate(conn))
1286     result = CURLE_ABORTED_BY_CALLBACK;
1287   else
1288     result = Curl_speedcheck(data, k->now);
1289   if(result)
1290     return result;
1291 
1292   if(k->keepon) {
1293     if(0 > Curl_timeleft(data, &k->now, FALSE)) {
1294       if(k->size != -1) {
1295         failf(data, "Operation timed out after %" CURL_FORMAT_TIMEDIFF_T
1296               " milliseconds with %" CURL_FORMAT_CURL_OFF_T " out of %"
1297               CURL_FORMAT_CURL_OFF_T " bytes received",
1298               Curl_timediff(k->now, data->progress.t_startsingle),
1299               k->bytecount, k->size);
1300       }
1301       else {
1302         failf(data, "Operation timed out after %" CURL_FORMAT_TIMEDIFF_T
1303               " milliseconds with %" CURL_FORMAT_CURL_OFF_T " bytes received",
1304               Curl_timediff(k->now, data->progress.t_startsingle),
1305               k->bytecount);
1306       }
1307       return CURLE_OPERATION_TIMEDOUT;
1308     }
1309   }
1310   else {
1311     /*
1312      * The transfer has been performed. Just make some general checks before
1313      * returning.
1314      */
1315 
1316     if(!(data->set.opt_no_body) && (k->size != -1) &&
1317        (k->bytecount != k->size) &&
1318 #ifdef CURL_DO_LINEEND_CONV
1319        /* Most FTP servers don't adjust their file SIZE response for CRLFs,
1320           so we'll check to see if the discrepancy can be explained
1321           by the number of CRLFs we've changed to LFs.
1322        */
1323        (k->bytecount != (k->size + data->state.crlf_conversions)) &&
1324 #endif /* CURL_DO_LINEEND_CONV */
1325        !k->newurl) {
1326       failf(data, "transfer closed with %" CURL_FORMAT_CURL_OFF_T
1327             " bytes remaining to read", k->size - k->bytecount);
1328       return CURLE_PARTIAL_FILE;
1329     }
1330     if(!(data->set.opt_no_body) && k->chunk &&
1331        (conn->chunk.state != CHUNK_STOP)) {
1332       /*
1333        * In chunked mode, return an error if the connection is closed prior to
1334        * the empty (terminating) chunk is read.
1335        *
1336        * The condition above used to check for
1337        * conn->proto.http->chunk.datasize != 0 which is true after reading
1338        * *any* chunk, not just the empty chunk.
1339        *
1340        */
1341       failf(data, "transfer closed with outstanding read data remaining");
1342       return CURLE_PARTIAL_FILE;
1343     }
1344     if(Curl_pgrsUpdate(conn))
1345       return CURLE_ABORTED_BY_CALLBACK;
1346   }
1347 
1348   /* Now update the "done" boolean we return */
1349   *done = (0 == (k->keepon&(KEEP_RECV|KEEP_SEND|
1350                             KEEP_RECV_PAUSE|KEEP_SEND_PAUSE))) ? TRUE : FALSE;
1351 
1352   return CURLE_OK;
1353 }
1354 
1355 /*
1356  * Curl_single_getsock() gets called by the multi interface code when the app
1357  * has requested to get the sockets for the current connection. This function
1358  * will then be called once for every connection that the multi interface
1359  * keeps track of. This function will only be called for connections that are
1360  * in the proper state to have this information available.
1361  */
Curl_single_getsock(const struct connectdata * conn,curl_socket_t * sock)1362 int Curl_single_getsock(const struct connectdata *conn,
1363                         curl_socket_t *sock)
1364 {
1365   const struct Curl_easy *data = conn->data;
1366   int bitmap = GETSOCK_BLANK;
1367   unsigned sockindex = 0;
1368 
1369   if(conn->handler->perform_getsock)
1370     return conn->handler->perform_getsock(conn, sock);
1371 
1372   /* don't include HOLD and PAUSE connections */
1373   if((data->req.keepon & KEEP_RECVBITS) == KEEP_RECV) {
1374 
1375     DEBUGASSERT(conn->sockfd != CURL_SOCKET_BAD);
1376 
1377     bitmap |= GETSOCK_READSOCK(sockindex);
1378     sock[sockindex] = conn->sockfd;
1379   }
1380 
1381   /* don't include HOLD and PAUSE connections */
1382   if((data->req.keepon & KEEP_SENDBITS) == KEEP_SEND) {
1383 
1384     if((conn->sockfd != conn->writesockfd) ||
1385        bitmap == GETSOCK_BLANK) {
1386       /* only if they are not the same socket and we have a readable
1387          one, we increase index */
1388       if(bitmap != GETSOCK_BLANK)
1389         sockindex++; /* increase index if we need two entries */
1390 
1391       DEBUGASSERT(conn->writesockfd != CURL_SOCKET_BAD);
1392 
1393       sock[sockindex] = conn->writesockfd;
1394     }
1395 
1396     bitmap |= GETSOCK_WRITESOCK(sockindex);
1397   }
1398 
1399   return bitmap;
1400 }
1401 
1402 /* Curl_init_CONNECT() gets called each time the handle switches to CONNECT
1403    which means this gets called once for each subsequent redirect etc */
Curl_init_CONNECT(struct Curl_easy * data)1404 void Curl_init_CONNECT(struct Curl_easy *data)
1405 {
1406   data->state.fread_func = data->set.fread_func_set;
1407   data->state.in = data->set.in_set;
1408 }
1409 
1410 /*
1411  * Curl_pretransfer() is called immediately before a transfer starts, and only
1412  * once for one transfer no matter if it has redirects or do multi-pass
1413  * authentication etc.
1414  */
Curl_pretransfer(struct Curl_easy * data)1415 CURLcode Curl_pretransfer(struct Curl_easy *data)
1416 {
1417   CURLcode result;
1418 
1419   if(!data->change.url && !data->set.uh) {
1420     /* we can't do anything without URL */
1421     failf(data, "No URL set!");
1422     return CURLE_URL_MALFORMAT;
1423   }
1424 
1425   /* since the URL may have been redirected in a previous use of this handle */
1426   if(data->change.url_alloc) {
1427     /* the already set URL is allocated, free it first! */
1428     Curl_safefree(data->change.url);
1429     data->change.url_alloc = FALSE;
1430   }
1431 
1432   if(!data->change.url && data->set.uh) {
1433     CURLUcode uc;
1434     uc = curl_url_get(data->set.uh,
1435                         CURLUPART_URL, &data->set.str[STRING_SET_URL], 0);
1436     if(uc) {
1437       failf(data, "No URL set!");
1438       return CURLE_URL_MALFORMAT;
1439     }
1440   }
1441 
1442   data->change.url = data->set.str[STRING_SET_URL];
1443 
1444   /* Init the SSL session ID cache here. We do it here since we want to do it
1445      after the *_setopt() calls (that could specify the size of the cache) but
1446      before any transfer takes place. */
1447   result = Curl_ssl_initsessions(data, data->set.general_ssl.max_ssl_sessions);
1448   if(result)
1449     return result;
1450 
1451   data->state.wildcardmatch = data->set.wildcard_enabled;
1452   data->set.followlocation = 0; /* reset the location-follow counter */
1453   data->state.this_is_a_follow = FALSE; /* reset this */
1454   data->state.errorbuf = FALSE; /* no error has occurred */
1455   data->state.httpversion = 0; /* don't assume any particular server version */
1456 
1457   data->state.authproblem = FALSE;
1458   data->state.authhost.want = data->set.httpauth;
1459   data->state.authproxy.want = data->set.proxyauth;
1460   Curl_safefree(data->info.wouldredirect);
1461   data->info.wouldredirect = NULL;
1462 
1463   if(data->set.httpreq == HTTPREQ_PUT)
1464     data->state.infilesize = data->set.filesize;
1465   else if((data->set.httpreq != HTTPREQ_GET) &&
1466           (data->set.httpreq != HTTPREQ_HEAD)) {
1467     data->state.infilesize = data->set.postfieldsize;
1468     if(data->set.postfields && (data->state.infilesize == -1))
1469       data->state.infilesize = (curl_off_t)strlen(data->set.postfields);
1470   }
1471   else
1472     data->state.infilesize = 0;
1473 
1474   /* If there is a list of cookie files to read, do it now! */
1475   if(data->change.cookielist)
1476     Curl_cookie_loadfiles(data);
1477 
1478   /* If there is a list of host pairs to deal with */
1479   if(data->change.resolve)
1480     result = Curl_loadhostpairs(data);
1481 
1482   if(!result) {
1483     /* Allow data->set.use_port to set which port to use. This needs to be
1484      * disabled for example when we follow Location: headers to URLs using
1485      * different ports! */
1486     data->state.allow_port = TRUE;
1487 
1488 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1489     /*************************************************************
1490      * Tell signal handler to ignore SIGPIPE
1491      *************************************************************/
1492     if(!data->set.no_signal)
1493       data->state.prev_signal = signal(SIGPIPE, SIG_IGN);
1494 #endif
1495 
1496     Curl_initinfo(data); /* reset session-specific information "variables" */
1497     Curl_pgrsResetTransferSizes(data);
1498     Curl_pgrsStartNow(data);
1499 
1500     /* In case the handle is re-used and an authentication method was picked
1501        in the session we need to make sure we only use the one(s) we now
1502        consider to be fine */
1503     data->state.authhost.picked &= data->state.authhost.want;
1504     data->state.authproxy.picked &= data->state.authproxy.want;
1505 
1506 #ifndef CURL_DISABLE_FTP
1507     if(data->state.wildcardmatch) {
1508       struct WildcardData *wc = &data->wildcard;
1509       if(wc->state < CURLWC_INIT) {
1510         result = Curl_wildcard_init(wc); /* init wildcard structures */
1511         if(result)
1512           return CURLE_OUT_OF_MEMORY;
1513       }
1514     }
1515 #endif
1516     Curl_http2_init_state(&data->state);
1517   }
1518 
1519   return result;
1520 }
1521 
1522 /*
1523  * Curl_posttransfer() is called immediately after a transfer ends
1524  */
Curl_posttransfer(struct Curl_easy * data)1525 CURLcode Curl_posttransfer(struct Curl_easy *data)
1526 {
1527 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1528   /* restore the signal handler for SIGPIPE before we get back */
1529   if(!data->set.no_signal)
1530     signal(SIGPIPE, data->state.prev_signal);
1531 #else
1532   (void)data; /* unused parameter */
1533 #endif
1534 
1535   return CURLE_OK;
1536 }
1537 
1538 /*
1539  * Curl_follow() handles the URL redirect magic. Pass in the 'newurl' string
1540  * as given by the remote server and set up the new URL to request.
1541  *
1542  * This function DOES NOT FREE the given url.
1543  */
Curl_follow(struct Curl_easy * data,char * newurl,followtype type)1544 CURLcode Curl_follow(struct Curl_easy *data,
1545                      char *newurl,    /* the Location: string */
1546                      followtype type) /* see transfer.h */
1547 {
1548 #ifdef CURL_DISABLE_HTTP
1549   (void)data;
1550   (void)newurl;
1551   (void)type;
1552   /* Location: following will not happen when HTTP is disabled */
1553   return CURLE_TOO_MANY_REDIRECTS;
1554 #else
1555 
1556   /* Location: redirect */
1557   bool disallowport = FALSE;
1558   bool reachedmax = FALSE;
1559   CURLUcode uc;
1560 
1561   if(type == FOLLOW_REDIR) {
1562     if((data->set.maxredirs != -1) &&
1563        (data->set.followlocation >= data->set.maxredirs)) {
1564       reachedmax = TRUE;
1565       type = FOLLOW_FAKE; /* switch to fake to store the would-be-redirected
1566                              to URL */
1567     }
1568     else {
1569       /* mark the next request as a followed location: */
1570       data->state.this_is_a_follow = TRUE;
1571 
1572       data->set.followlocation++; /* count location-followers */
1573 
1574       if(data->set.http_auto_referer) {
1575         /* We are asked to automatically set the previous URL as the referer
1576            when we get the next URL. We pick the ->url field, which may or may
1577            not be 100% correct */
1578 
1579         if(data->change.referer_alloc) {
1580           Curl_safefree(data->change.referer);
1581           data->change.referer_alloc = FALSE;
1582         }
1583 
1584         data->change.referer = strdup(data->change.url);
1585         if(!data->change.referer)
1586           return CURLE_OUT_OF_MEMORY;
1587         data->change.referer_alloc = TRUE; /* yes, free this later */
1588       }
1589     }
1590   }
1591 
1592   if(Curl_is_absolute_url(newurl, NULL, MAX_SCHEME_LEN))
1593     /* This is an absolute URL, don't allow the custom port number */
1594     disallowport = TRUE;
1595 
1596   DEBUGASSERT(data->state.uh);
1597   uc = curl_url_set(data->state.uh, CURLUPART_URL, newurl,
1598                     (type == FOLLOW_FAKE) ? CURLU_NON_SUPPORT_SCHEME :
1599                     ((type == FOLLOW_REDIR) ? CURLU_URLENCODE : 0) );
1600   if(uc) {
1601     if(type != FOLLOW_FAKE)
1602       return Curl_uc_to_curlcode(uc);
1603 
1604     /* the URL could not be parsed for some reason, but since this is FAKE
1605        mode, just duplicate the field as-is */
1606     newurl = strdup(newurl);
1607     if(!newurl)
1608       return CURLE_OUT_OF_MEMORY;
1609   }
1610   else {
1611 
1612     uc = curl_url_get(data->state.uh, CURLUPART_URL, &newurl, 0);
1613     if(uc)
1614       return Curl_uc_to_curlcode(uc);
1615   }
1616 
1617   if(type == FOLLOW_FAKE) {
1618     /* we're only figuring out the new url if we would've followed locations
1619        but now we're done so we can get out! */
1620     data->info.wouldredirect = newurl;
1621 
1622     if(reachedmax) {
1623       failf(data, "Maximum (%ld) redirects followed", data->set.maxredirs);
1624       return CURLE_TOO_MANY_REDIRECTS;
1625     }
1626     return CURLE_OK;
1627   }
1628 
1629   if(disallowport)
1630     data->state.allow_port = FALSE;
1631 
1632   if(data->change.url_alloc)
1633     Curl_safefree(data->change.url);
1634 
1635   data->change.url = newurl;
1636   data->change.url_alloc = TRUE;
1637 
1638   infof(data, "Issue another request to this URL: '%s'\n", data->change.url);
1639 
1640   /*
1641    * We get here when the HTTP code is 300-399 (and 401). We need to perform
1642    * differently based on exactly what return code there was.
1643    *
1644    * News from 7.10.6: we can also get here on a 401 or 407, in case we act on
1645    * a HTTP (proxy-) authentication scheme other than Basic.
1646    */
1647   switch(data->info.httpcode) {
1648     /* 401 - Act on a WWW-Authenticate, we keep on moving and do the
1649        Authorization: XXXX header in the HTTP request code snippet */
1650     /* 407 - Act on a Proxy-Authenticate, we keep on moving and do the
1651        Proxy-Authorization: XXXX header in the HTTP request code snippet */
1652     /* 300 - Multiple Choices */
1653     /* 306 - Not used */
1654     /* 307 - Temporary Redirect */
1655   default:  /* for all above (and the unknown ones) */
1656     /* Some codes are explicitly mentioned since I've checked RFC2616 and they
1657      * seem to be OK to POST to.
1658      */
1659     break;
1660   case 301: /* Moved Permanently */
1661     /* (quote from RFC7231, section 6.4.2)
1662      *
1663      * Note: For historical reasons, a user agent MAY change the request
1664      * method from POST to GET for the subsequent request.  If this
1665      * behavior is undesired, the 307 (Temporary Redirect) status code
1666      * can be used instead.
1667      *
1668      * ----
1669      *
1670      * Many webservers expect this, so these servers often answers to a POST
1671      * request with an error page. To be sure that libcurl gets the page that
1672      * most user agents would get, libcurl has to force GET.
1673      *
1674      * This behaviour is forbidden by RFC1945 and the obsolete RFC2616, and
1675      * can be overridden with CURLOPT_POSTREDIR.
1676      */
1677     if((data->set.httpreq == HTTPREQ_POST
1678         || data->set.httpreq == HTTPREQ_POST_FORM
1679         || data->set.httpreq == HTTPREQ_POST_MIME)
1680        && !(data->set.keep_post & CURL_REDIR_POST_301)) {
1681       infof(data, "Switch from POST to GET\n");
1682       data->set.httpreq = HTTPREQ_GET;
1683     }
1684     break;
1685   case 302: /* Found */
1686     /* (quote from RFC7231, section 6.4.3)
1687      *
1688      * Note: For historical reasons, a user agent MAY change the request
1689      * method from POST to GET for the subsequent request.  If this
1690      * behavior is undesired, the 307 (Temporary Redirect) status code
1691      * can be used instead.
1692      *
1693      * ----
1694      *
1695      * Many webservers expect this, so these servers often answers to a POST
1696      * request with an error page. To be sure that libcurl gets the page that
1697      * most user agents would get, libcurl has to force GET.
1698      *
1699      * This behaviour is forbidden by RFC1945 and the obsolete RFC2616, and
1700      * can be overridden with CURLOPT_POSTREDIR.
1701      */
1702     if((data->set.httpreq == HTTPREQ_POST
1703         || data->set.httpreq == HTTPREQ_POST_FORM
1704         || data->set.httpreq == HTTPREQ_POST_MIME)
1705        && !(data->set.keep_post & CURL_REDIR_POST_302)) {
1706       infof(data, "Switch from POST to GET\n");
1707       data->set.httpreq = HTTPREQ_GET;
1708     }
1709     break;
1710 
1711   case 303: /* See Other */
1712     /* Disable both types of POSTs, unless the user explicitly
1713        asks for POST after POST */
1714     if(data->set.httpreq != HTTPREQ_GET
1715       && !(data->set.keep_post & CURL_REDIR_POST_303)) {
1716       data->set.httpreq = HTTPREQ_GET; /* enforce GET request */
1717       infof(data, "Disables POST, goes with %s\n",
1718             data->set.opt_no_body?"HEAD":"GET");
1719     }
1720     break;
1721   case 304: /* Not Modified */
1722     /* 304 means we did a conditional request and it was "Not modified".
1723      * We shouldn't get any Location: header in this response!
1724      */
1725     break;
1726   case 305: /* Use Proxy */
1727     /* (quote from RFC2616, section 10.3.6):
1728      * "The requested resource MUST be accessed through the proxy given
1729      * by the Location field. The Location field gives the URI of the
1730      * proxy.  The recipient is expected to repeat this single request
1731      * via the proxy. 305 responses MUST only be generated by origin
1732      * servers."
1733      */
1734     break;
1735   }
1736   Curl_pgrsTime(data, TIMER_REDIRECT);
1737   Curl_pgrsResetTransferSizes(data);
1738 
1739   return CURLE_OK;
1740 #endif /* CURL_DISABLE_HTTP */
1741 }
1742 
1743 /* Returns CURLE_OK *and* sets '*url' if a request retry is wanted.
1744 
1745    NOTE: that the *url is malloc()ed. */
Curl_retry_request(struct connectdata * conn,char ** url)1746 CURLcode Curl_retry_request(struct connectdata *conn,
1747                             char **url)
1748 {
1749   struct Curl_easy *data = conn->data;
1750   bool retry = FALSE;
1751   *url = NULL;
1752 
1753   /* if we're talking upload, we can't do the checks below, unless the protocol
1754      is HTTP as when uploading over HTTP we will still get a response */
1755   if(data->set.upload &&
1756      !(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)))
1757     return CURLE_OK;
1758 
1759   if((data->req.bytecount + data->req.headerbytecount == 0) &&
1760       conn->bits.reuse &&
1761       (!data->set.opt_no_body
1762         || (conn->handler->protocol & PROTO_FAMILY_HTTP)) &&
1763       (data->set.rtspreq != RTSPREQ_RECEIVE))
1764     /* We got no data, we attempted to re-use a connection. For HTTP this
1765        can be a retry so we try again regardless if we expected a body.
1766        For other protocols we only try again only if we expected a body.
1767 
1768        This might happen if the connection was left alive when we were
1769        done using it before, but that was closed when we wanted to read from
1770        it again. Bad luck. Retry the same request on a fresh connect! */
1771     retry = TRUE;
1772   else if(data->state.refused_stream &&
1773           (data->req.bytecount + data->req.headerbytecount == 0) ) {
1774     /* This was sent on a refused stream, safe to rerun. A refused stream
1775        error can typically only happen on HTTP/2 level if the stream is safe
1776        to issue again, but the nghttp2 API can deliver the message to other
1777        streams as well, which is why this adds the check the data counters
1778        too. */
1779     infof(conn->data, "REFUSED_STREAM, retrying a fresh connect\n");
1780     data->state.refused_stream = FALSE; /* clear again */
1781     retry = TRUE;
1782   }
1783   if(retry) {
1784     infof(conn->data, "Connection died, retrying a fresh connect\n");
1785     *url = strdup(conn->data->change.url);
1786     if(!*url)
1787       return CURLE_OUT_OF_MEMORY;
1788 
1789     connclose(conn, "retry"); /* close this connection */
1790     conn->bits.retry = TRUE; /* mark this as a connection we're about
1791                                 to retry. Marking it this way should
1792                                 prevent i.e HTTP transfers to return
1793                                 error just because nothing has been
1794                                 transferred! */
1795 
1796 
1797     if(conn->handler->protocol&PROTO_FAMILY_HTTP) {
1798       if(data->req.writebytecount) {
1799         CURLcode result = Curl_readrewind(conn);
1800         if(result) {
1801           Curl_safefree(*url);
1802           return result;
1803         }
1804       }
1805     }
1806   }
1807   return CURLE_OK;
1808 }
1809 
1810 /*
1811  * Curl_setup_transfer() is called to setup some basic properties for the
1812  * upcoming transfer.
1813  */
1814 void
Curl_setup_transfer(struct Curl_easy * data,int sockindex,curl_off_t size,bool getheader,int writesockindex)1815 Curl_setup_transfer(
1816   struct Curl_easy *data,   /* transfer */
1817   int sockindex,            /* socket index to read from or -1 */
1818   curl_off_t size,          /* -1 if unknown at this point */
1819   bool getheader,           /* TRUE if header parsing is wanted */
1820   int writesockindex        /* socket index to write to, it may very well be
1821                                the same we read from. -1 disables */
1822   )
1823 {
1824   struct SingleRequest *k = &data->req;
1825   struct connectdata *conn = data->conn;
1826   DEBUGASSERT(conn != NULL);
1827   DEBUGASSERT((sockindex <= 1) && (sockindex >= -1));
1828 
1829   if(conn->bits.multiplex || conn->httpversion == 20) {
1830     /* when multiplexing, the read/write sockets need to be the same! */
1831     conn->sockfd = sockindex == -1 ?
1832       ((writesockindex == -1 ? CURL_SOCKET_BAD : conn->sock[writesockindex])) :
1833       conn->sock[sockindex];
1834     conn->writesockfd = conn->sockfd;
1835   }
1836   else {
1837     conn->sockfd = sockindex == -1 ?
1838       CURL_SOCKET_BAD : conn->sock[sockindex];
1839     conn->writesockfd = writesockindex == -1 ?
1840       CURL_SOCKET_BAD:conn->sock[writesockindex];
1841   }
1842   k->getheader = getheader;
1843 
1844   k->size = size;
1845 
1846   /* The code sequence below is placed in this function just because all
1847      necessary input is not always known in do_complete() as this function may
1848      be called after that */
1849 
1850   if(!k->getheader) {
1851     k->header = FALSE;
1852     if(size > 0)
1853       Curl_pgrsSetDownloadSize(data, size);
1854   }
1855   /* we want header and/or body, if neither then don't do this! */
1856   if(k->getheader || !data->set.opt_no_body) {
1857 
1858     if(sockindex != -1)
1859       k->keepon |= KEEP_RECV;
1860 
1861     if(writesockindex != -1) {
1862       struct HTTP *http = data->req.protop;
1863       /* HTTP 1.1 magic:
1864 
1865          Even if we require a 100-return code before uploading data, we might
1866          need to write data before that since the REQUEST may not have been
1867          finished sent off just yet.
1868 
1869          Thus, we must check if the request has been sent before we set the
1870          state info where we wait for the 100-return code
1871       */
1872       if((data->state.expect100header) &&
1873          (conn->handler->protocol&PROTO_FAMILY_HTTP) &&
1874          (http->sending == HTTPSEND_BODY)) {
1875         /* wait with write until we either got 100-continue or a timeout */
1876         k->exp100 = EXP100_AWAITING_CONTINUE;
1877         k->start100 = Curl_now();
1878 
1879         /* Set a timeout for the multi interface. Add the inaccuracy margin so
1880            that we don't fire slightly too early and get denied to run. */
1881         Curl_expire(data, data->set.expect_100_timeout, EXPIRE_100_TIMEOUT);
1882       }
1883       else {
1884         if(data->state.expect100header)
1885           /* when we've sent off the rest of the headers, we must await a
1886              100-continue but first finish sending the request */
1887           k->exp100 = EXP100_SENDING_REQUEST;
1888 
1889         /* enable the write bit when we're not waiting for continue */
1890         k->keepon |= KEEP_SEND;
1891       }
1892     } /* if(writesockindex != -1) */
1893   } /* if(k->getheader || !data->set.opt_no_body) */
1894 
1895 }
1896