1 /***************************************************************************
2 * _ _ ____ _
3 * Project ___| | | | _ \| |
4 * / __| | | | |_) | |
5 * | (__| |_| | _ <| |___
6 * \___|\___/|_| \_\_____|
7 *
8 * Copyright (C) 1998 - 2021, Daniel Stenberg, <daniel@haxx.se>, et al.
9 *
10 * This software is licensed as described in the file COPYING, which
11 * you should have received as part of this distribution. The terms
12 * are also available at https://curl.se/docs/copyright.html.
13 *
14 * You may opt to use, copy, modify, merge, publish, distribute and/or sell
15 * copies of the Software, and permit persons to whom the Software is
16 * furnished to do so, under the terms of the COPYING file.
17 *
18 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
19 * KIND, either express or implied.
20 *
21 ***************************************************************************/
22
23 #include "curl_setup.h"
24 #include "strtoofft.h"
25
26 #ifdef HAVE_NETINET_IN_H
27 #include <netinet/in.h>
28 #endif
29 #ifdef HAVE_NETDB_H
30 #include <netdb.h>
31 #endif
32 #ifdef HAVE_ARPA_INET_H
33 #include <arpa/inet.h>
34 #endif
35 #ifdef HAVE_NET_IF_H
36 #include <net/if.h>
37 #endif
38 #ifdef HAVE_SYS_IOCTL_H
39 #include <sys/ioctl.h>
40 #endif
41 #ifdef HAVE_SIGNAL_H
42 #include <signal.h>
43 #endif
44
45 #ifdef HAVE_SYS_PARAM_H
46 #include <sys/param.h>
47 #endif
48
49 #ifdef HAVE_SYS_SELECT_H
50 #include <sys/select.h>
51 #elif defined(HAVE_UNISTD_H)
52 #include <unistd.h>
53 #endif
54
55 #ifndef HAVE_SOCKET
56 #error "We can't compile without socket() support!"
57 #endif
58
59 #include "urldata.h"
60 #include <curl/curl.h>
61 #include "netrc.h"
62
63 #include "content_encoding.h"
64 #include "hostip.h"
65 #include "transfer.h"
66 #include "sendf.h"
67 #include "speedcheck.h"
68 #include "progress.h"
69 #include "http.h"
70 #include "url.h"
71 #include "getinfo.h"
72 #include "vtls/vtls.h"
73 #include "select.h"
74 #include "multiif.h"
75 #include "connect.h"
76 #include "non-ascii.h"
77 #include "http2.h"
78 #include "mime.h"
79 #include "strcase.h"
80 #include "urlapi-int.h"
81 #include "hsts.h"
82
83 /* The last 3 #include files should be in this order */
84 #include "curl_printf.h"
85 #include "curl_memory.h"
86 #include "memdebug.h"
87
88 #if !defined(CURL_DISABLE_HTTP) || !defined(CURL_DISABLE_SMTP) || \
89 !defined(CURL_DISABLE_IMAP)
90 /*
91 * checkheaders() checks the linked list of custom headers for a
92 * particular header (prefix). Provide the prefix without colon!
93 *
94 * Returns a pointer to the first matching header or NULL if none matched.
95 */
Curl_checkheaders(const struct Curl_easy * data,const char * thisheader)96 char *Curl_checkheaders(const struct Curl_easy *data,
97 const char *thisheader)
98 {
99 struct curl_slist *head;
100 size_t thislen = strlen(thisheader);
101
102 for(head = data->set.headers; head; head = head->next) {
103 if(strncasecompare(head->data, thisheader, thislen) &&
104 Curl_headersep(head->data[thislen]) )
105 return head->data;
106 }
107
108 return NULL;
109 }
110 #endif
111
Curl_get_upload_buffer(struct Curl_easy * data)112 CURLcode Curl_get_upload_buffer(struct Curl_easy *data)
113 {
114 if(!data->state.ulbuf) {
115 data->state.ulbuf = malloc(data->set.upload_buffer_size);
116 if(!data->state.ulbuf)
117 return CURLE_OUT_OF_MEMORY;
118 }
119 return CURLE_OK;
120 }
121
122 #ifndef CURL_DISABLE_HTTP
123 /*
124 * This function will be called to loop through the trailers buffer
125 * until no more data is available for sending.
126 */
trailers_read(char * buffer,size_t size,size_t nitems,void * raw)127 static size_t trailers_read(char *buffer, size_t size, size_t nitems,
128 void *raw)
129 {
130 struct Curl_easy *data = (struct Curl_easy *)raw;
131 struct dynbuf *trailers_buf = &data->state.trailers_buf;
132 size_t bytes_left = Curl_dyn_len(trailers_buf) -
133 data->state.trailers_bytes_sent;
134 size_t to_copy = (size*nitems < bytes_left) ? size*nitems : bytes_left;
135 if(to_copy) {
136 memcpy(buffer,
137 Curl_dyn_ptr(trailers_buf) + data->state.trailers_bytes_sent,
138 to_copy);
139 data->state.trailers_bytes_sent += to_copy;
140 }
141 return to_copy;
142 }
143
trailers_left(void * raw)144 static size_t trailers_left(void *raw)
145 {
146 struct Curl_easy *data = (struct Curl_easy *)raw;
147 struct dynbuf *trailers_buf = &data->state.trailers_buf;
148 return Curl_dyn_len(trailers_buf) - data->state.trailers_bytes_sent;
149 }
150 #endif
151
152 /*
153 * This function will call the read callback to fill our buffer with data
154 * to upload.
155 */
Curl_fillreadbuffer(struct Curl_easy * data,size_t bytes,size_t * nreadp)156 CURLcode Curl_fillreadbuffer(struct Curl_easy *data, size_t bytes,
157 size_t *nreadp)
158 {
159 size_t buffersize = bytes;
160 size_t nread;
161
162 curl_read_callback readfunc = NULL;
163 void *extra_data = NULL;
164
165 #ifdef CURL_DOES_CONVERSIONS
166 bool sending_http_headers = FALSE;
167 struct connectdata *conn = data->conn;
168
169 if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
170 const struct HTTP *http = data->req.p.http;
171
172 if(http->sending == HTTPSEND_REQUEST)
173 /* We're sending the HTTP request headers, not the data.
174 Remember that so we don't re-translate them into garbage. */
175 sending_http_headers = TRUE;
176 }
177 #endif
178
179 #ifndef CURL_DISABLE_HTTP
180 if(data->state.trailers_state == TRAILERS_INITIALIZED) {
181 struct curl_slist *trailers = NULL;
182 CURLcode result;
183 int trailers_ret_code;
184
185 /* at this point we already verified that the callback exists
186 so we compile and store the trailers buffer, then proceed */
187 infof(data,
188 "Moving trailers state machine from initialized to sending.\n");
189 data->state.trailers_state = TRAILERS_SENDING;
190 Curl_dyn_init(&data->state.trailers_buf, DYN_TRAILERS);
191
192 data->state.trailers_bytes_sent = 0;
193 Curl_set_in_callback(data, true);
194 trailers_ret_code = data->set.trailer_callback(&trailers,
195 data->set.trailer_data);
196 Curl_set_in_callback(data, false);
197 if(trailers_ret_code == CURL_TRAILERFUNC_OK) {
198 result = Curl_http_compile_trailers(trailers, &data->state.trailers_buf,
199 data);
200 }
201 else {
202 failf(data, "operation aborted by trailing headers callback");
203 *nreadp = 0;
204 result = CURLE_ABORTED_BY_CALLBACK;
205 }
206 if(result) {
207 Curl_dyn_free(&data->state.trailers_buf);
208 curl_slist_free_all(trailers);
209 return result;
210 }
211 infof(data, "Successfully compiled trailers.\r\n");
212 curl_slist_free_all(trailers);
213 }
214 #endif
215
216 /* if we are transmitting trailing data, we don't need to write
217 a chunk size so we skip this */
218 if(data->req.upload_chunky &&
219 data->state.trailers_state == TRAILERS_NONE) {
220 /* if chunked Transfer-Encoding */
221 buffersize -= (8 + 2 + 2); /* 32bit hex + CRLF + CRLF */
222 data->req.upload_fromhere += (8 + 2); /* 32bit hex + CRLF */
223 }
224
225 #ifndef CURL_DISABLE_HTTP
226 if(data->state.trailers_state == TRAILERS_SENDING) {
227 /* if we're here then that means that we already sent the last empty chunk
228 but we didn't send a final CR LF, so we sent 0 CR LF. We then start
229 pulling trailing data until we have no more at which point we
230 simply return to the previous point in the state machine as if
231 nothing happened.
232 */
233 readfunc = trailers_read;
234 extra_data = (void *)data;
235 }
236 else
237 #endif
238 {
239 readfunc = data->state.fread_func;
240 extra_data = data->state.in;
241 }
242
243 Curl_set_in_callback(data, true);
244 nread = readfunc(data->req.upload_fromhere, 1,
245 buffersize, extra_data);
246 Curl_set_in_callback(data, false);
247
248 if(nread == CURL_READFUNC_ABORT) {
249 failf(data, "operation aborted by callback");
250 *nreadp = 0;
251 return CURLE_ABORTED_BY_CALLBACK;
252 }
253 if(nread == CURL_READFUNC_PAUSE) {
254 struct SingleRequest *k = &data->req;
255
256 if(data->conn->handler->flags & PROTOPT_NONETWORK) {
257 /* protocols that work without network cannot be paused. This is
258 actually only FILE:// just now, and it can't pause since the transfer
259 isn't done using the "normal" procedure. */
260 failf(data, "Read callback asked for PAUSE when not supported!");
261 return CURLE_READ_ERROR;
262 }
263
264 /* CURL_READFUNC_PAUSE pauses read callbacks that feed socket writes */
265 k->keepon |= KEEP_SEND_PAUSE; /* mark socket send as paused */
266 if(data->req.upload_chunky) {
267 /* Back out the preallocation done above */
268 data->req.upload_fromhere -= (8 + 2);
269 }
270 *nreadp = 0;
271
272 return CURLE_OK; /* nothing was read */
273 }
274 else if(nread > buffersize) {
275 /* the read function returned a too large value */
276 *nreadp = 0;
277 failf(data, "read function returned funny value");
278 return CURLE_READ_ERROR;
279 }
280
281 if(!data->req.forbidchunk && data->req.upload_chunky) {
282 /* if chunked Transfer-Encoding
283 * build chunk:
284 *
285 * <HEX SIZE> CRLF
286 * <DATA> CRLF
287 */
288 /* On non-ASCII platforms the <DATA> may or may not be
289 translated based on set.prefer_ascii while the protocol
290 portion must always be translated to the network encoding.
291 To further complicate matters, line end conversion might be
292 done later on, so we need to prevent CRLFs from becoming
293 CRCRLFs if that's the case. To do this we use bare LFs
294 here, knowing they'll become CRLFs later on.
295 */
296
297 bool added_crlf = FALSE;
298 int hexlen = 0;
299 const char *endofline_native;
300 const char *endofline_network;
301
302 if(
303 #ifdef CURL_DO_LINEEND_CONV
304 (data->set.prefer_ascii) ||
305 #endif
306 (data->set.crlf)) {
307 /* \n will become \r\n later on */
308 endofline_native = "\n";
309 endofline_network = "\x0a";
310 }
311 else {
312 endofline_native = "\r\n";
313 endofline_network = "\x0d\x0a";
314 }
315
316 /* if we're not handling trailing data, proceed as usual */
317 if(data->state.trailers_state != TRAILERS_SENDING) {
318 char hexbuffer[11] = "";
319 hexlen = msnprintf(hexbuffer, sizeof(hexbuffer),
320 "%zx%s", nread, endofline_native);
321
322 /* move buffer pointer */
323 data->req.upload_fromhere -= hexlen;
324 nread += hexlen;
325
326 /* copy the prefix to the buffer, leaving out the NUL */
327 memcpy(data->req.upload_fromhere, hexbuffer, hexlen);
328
329 /* always append ASCII CRLF to the data unless
330 we have a valid trailer callback */
331 #ifndef CURL_DISABLE_HTTP
332 if((nread-hexlen) == 0 &&
333 data->set.trailer_callback != NULL &&
334 data->state.trailers_state == TRAILERS_NONE) {
335 data->state.trailers_state = TRAILERS_INITIALIZED;
336 }
337 else
338 #endif
339 {
340 memcpy(data->req.upload_fromhere + nread,
341 endofline_network,
342 strlen(endofline_network));
343 added_crlf = TRUE;
344 }
345 }
346
347 #ifdef CURL_DOES_CONVERSIONS
348 {
349 CURLcode result;
350 size_t length;
351 if(data->set.prefer_ascii)
352 /* translate the protocol and data */
353 length = nread;
354 else
355 /* just translate the protocol portion */
356 length = hexlen;
357 if(length) {
358 result = Curl_convert_to_network(data, data->req.upload_fromhere,
359 length);
360 /* Curl_convert_to_network calls failf if unsuccessful */
361 if(result)
362 return result;
363 }
364 }
365 #endif /* CURL_DOES_CONVERSIONS */
366
367 #ifndef CURL_DISABLE_HTTP
368 if(data->state.trailers_state == TRAILERS_SENDING &&
369 !trailers_left(data)) {
370 Curl_dyn_free(&data->state.trailers_buf);
371 data->state.trailers_state = TRAILERS_DONE;
372 data->set.trailer_data = NULL;
373 data->set.trailer_callback = NULL;
374 /* mark the transfer as done */
375 data->req.upload_done = TRUE;
376 infof(data, "Signaling end of chunked upload after trailers.\n");
377 }
378 else
379 #endif
380 if((nread - hexlen) == 0 &&
381 data->state.trailers_state != TRAILERS_INITIALIZED) {
382 /* mark this as done once this chunk is transferred */
383 data->req.upload_done = TRUE;
384 infof(data,
385 "Signaling end of chunked upload via terminating chunk.\n");
386 }
387
388 if(added_crlf)
389 nread += strlen(endofline_network); /* for the added end of line */
390 }
391 #ifdef CURL_DOES_CONVERSIONS
392 else if((data->set.prefer_ascii) && (!sending_http_headers)) {
393 CURLcode result;
394 result = Curl_convert_to_network(data, data->req.upload_fromhere, nread);
395 /* Curl_convert_to_network calls failf if unsuccessful */
396 if(result)
397 return result;
398 }
399 #endif /* CURL_DOES_CONVERSIONS */
400
401 *nreadp = nread;
402
403 return CURLE_OK;
404 }
405
406
407 /*
408 * Curl_readrewind() rewinds the read stream. This is typically used for HTTP
409 * POST/PUT with multi-pass authentication when a sending was denied and a
410 * resend is necessary.
411 */
Curl_readrewind(struct Curl_easy * data)412 CURLcode Curl_readrewind(struct Curl_easy *data)
413 {
414 struct connectdata *conn = data->conn;
415 curl_mimepart *mimepart = &data->set.mimepost;
416
417 conn->bits.rewindaftersend = FALSE; /* we rewind now */
418
419 /* explicitly switch off sending data on this connection now since we are
420 about to restart a new transfer and thus we want to avoid inadvertently
421 sending more data on the existing connection until the next transfer
422 starts */
423 data->req.keepon &= ~KEEP_SEND;
424
425 /* We have sent away data. If not using CURLOPT_POSTFIELDS or
426 CURLOPT_HTTPPOST, call app to rewind
427 */
428 if(conn->handler->protocol & PROTO_FAMILY_HTTP) {
429 struct HTTP *http = data->req.p.http;
430
431 if(http->sendit)
432 mimepart = http->sendit;
433 }
434 if(data->set.postfields)
435 ; /* do nothing */
436 else if(data->state.httpreq == HTTPREQ_POST_MIME ||
437 data->state.httpreq == HTTPREQ_POST_FORM) {
438 CURLcode result = Curl_mime_rewind(mimepart);
439 if(result) {
440 failf(data, "Cannot rewind mime/post data");
441 return result;
442 }
443 }
444 else {
445 if(data->set.seek_func) {
446 int err;
447
448 Curl_set_in_callback(data, true);
449 err = (data->set.seek_func)(data->set.seek_client, 0, SEEK_SET);
450 Curl_set_in_callback(data, false);
451 if(err) {
452 failf(data, "seek callback returned error %d", (int)err);
453 return CURLE_SEND_FAIL_REWIND;
454 }
455 }
456 else if(data->set.ioctl_func) {
457 curlioerr err;
458
459 Curl_set_in_callback(data, true);
460 err = (data->set.ioctl_func)(data, CURLIOCMD_RESTARTREAD,
461 data->set.ioctl_client);
462 Curl_set_in_callback(data, false);
463 infof(data, "the ioctl callback returned %d\n", (int)err);
464
465 if(err) {
466 failf(data, "ioctl callback returned error %d", (int)err);
467 return CURLE_SEND_FAIL_REWIND;
468 }
469 }
470 else {
471 /* If no CURLOPT_READFUNCTION is used, we know that we operate on a
472 given FILE * stream and we can actually attempt to rewind that
473 ourselves with fseek() */
474 if(data->state.fread_func == (curl_read_callback)fread) {
475 if(-1 != fseek(data->state.in, 0, SEEK_SET))
476 /* successful rewind */
477 return CURLE_OK;
478 }
479
480 /* no callback set or failure above, makes us fail at once */
481 failf(data, "necessary data rewind wasn't possible");
482 return CURLE_SEND_FAIL_REWIND;
483 }
484 }
485 return CURLE_OK;
486 }
487
data_pending(const struct Curl_easy * data)488 static int data_pending(const struct Curl_easy *data)
489 {
490 struct connectdata *conn = data->conn;
491
492 #ifdef ENABLE_QUIC
493 if(conn->transport == TRNSPRT_QUIC)
494 return Curl_quic_data_pending(data);
495 #endif
496
497 /* in the case of libssh2, we can never be really sure that we have emptied
498 its internal buffers so we MUST always try until we get EAGAIN back */
499 return conn->handler->protocol&(CURLPROTO_SCP|CURLPROTO_SFTP) ||
500 #if defined(USE_NGHTTP2)
501 Curl_ssl_data_pending(conn, FIRSTSOCKET) ||
502 /* For HTTP/2, we may read up everything including response body
503 with header fields in Curl_http_readwrite_headers. If no
504 content-length is provided, curl waits for the connection
505 close, which we emulate it using conn->proto.httpc.closed =
506 TRUE. The thing is if we read everything, then http2_recv won't
507 be called and we cannot signal the HTTP/2 stream has closed. As
508 a workaround, we return nonzero here to call http2_recv. */
509 ((conn->handler->protocol&PROTO_FAMILY_HTTP) && conn->httpversion >= 20);
510 #else
511 Curl_ssl_data_pending(conn, FIRSTSOCKET);
512 #endif
513 }
514
515 /*
516 * Check to see if CURLOPT_TIMECONDITION was met by comparing the time of the
517 * remote document with the time provided by CURLOPT_TIMEVAL
518 */
Curl_meets_timecondition(struct Curl_easy * data,time_t timeofdoc)519 bool Curl_meets_timecondition(struct Curl_easy *data, time_t timeofdoc)
520 {
521 if((timeofdoc == 0) || (data->set.timevalue == 0))
522 return TRUE;
523
524 switch(data->set.timecondition) {
525 case CURL_TIMECOND_IFMODSINCE:
526 default:
527 if(timeofdoc <= data->set.timevalue) {
528 infof(data,
529 "The requested document is not new enough\n");
530 data->info.timecond = TRUE;
531 return FALSE;
532 }
533 break;
534 case CURL_TIMECOND_IFUNMODSINCE:
535 if(timeofdoc >= data->set.timevalue) {
536 infof(data,
537 "The requested document is not old enough\n");
538 data->info.timecond = TRUE;
539 return FALSE;
540 }
541 break;
542 }
543
544 return TRUE;
545 }
546
547 /*
548 * Go ahead and do a read if we have a readable socket or if
549 * the stream was rewound (in which case we have data in a
550 * buffer)
551 *
552 * return '*comeback' TRUE if we didn't properly drain the socket so this
553 * function should get called again without select() or similar in between!
554 */
readwrite_data(struct Curl_easy * data,struct connectdata * conn,struct SingleRequest * k,int * didwhat,bool * done,bool * comeback)555 static CURLcode readwrite_data(struct Curl_easy *data,
556 struct connectdata *conn,
557 struct SingleRequest *k,
558 int *didwhat, bool *done,
559 bool *comeback)
560 {
561 CURLcode result = CURLE_OK;
562 ssize_t nread; /* number of bytes read */
563 size_t excess = 0; /* excess bytes read */
564 bool readmore = FALSE; /* used by RTP to signal for more data */
565 int maxloops = 100;
566 char *buf = data->state.buffer;
567 DEBUGASSERT(buf);
568
569 *done = FALSE;
570 *comeback = FALSE;
571
572 /* This is where we loop until we have read everything there is to
573 read or we get a CURLE_AGAIN */
574 do {
575 bool is_empty_data = FALSE;
576 size_t buffersize = data->set.buffer_size;
577 size_t bytestoread = buffersize;
578 #ifdef USE_NGHTTP2
579 bool is_http2 = ((conn->handler->protocol & PROTO_FAMILY_HTTP) &&
580 (conn->httpversion == 20));
581 #endif
582
583 if(
584 #ifdef USE_NGHTTP2
585 /* For HTTP/2, read data without caring about the content
586 length. This is safe because body in HTTP/2 is always
587 segmented thanks to its framing layer. Meanwhile, we have to
588 call Curl_read to ensure that http2_handle_stream_close is
589 called when we read all incoming bytes for a particular
590 stream. */
591 !is_http2 &&
592 #endif
593 k->size != -1 && !k->header) {
594 /* make sure we don't read too much */
595 curl_off_t totalleft = k->size - k->bytecount;
596 if(totalleft < (curl_off_t)bytestoread)
597 bytestoread = (size_t)totalleft;
598 }
599
600 if(bytestoread) {
601 /* receive data from the network! */
602 result = Curl_read(data, conn->sockfd, buf, bytestoread, &nread);
603
604 /* read would've blocked */
605 if(CURLE_AGAIN == result)
606 break; /* get out of loop */
607
608 if(result>0)
609 return result;
610 }
611 else {
612 /* read nothing but since we wanted nothing we consider this an OK
613 situation to proceed from */
614 DEBUGF(infof(data, "readwrite_data: we're done!\n"));
615 nread = 0;
616 }
617
618 if(!k->bytecount) {
619 Curl_pgrsTime(data, TIMER_STARTTRANSFER);
620 if(k->exp100 > EXP100_SEND_DATA)
621 /* set time stamp to compare with when waiting for the 100 */
622 k->start100 = Curl_now();
623 }
624
625 *didwhat |= KEEP_RECV;
626 /* indicates data of zero size, i.e. empty file */
627 is_empty_data = ((nread == 0) && (k->bodywrites == 0)) ? TRUE : FALSE;
628
629 if(0 < nread || is_empty_data) {
630 buf[nread] = 0;
631 }
632 else {
633 /* if we receive 0 or less here, either the http2 stream is closed or the
634 server closed the connection and we bail out from this! */
635 #ifdef USE_NGHTTP2
636 if(is_http2 && !nread)
637 DEBUGF(infof(data, "nread == 0, stream closed, bailing\n"));
638 else
639 #endif
640 DEBUGF(infof(data, "nread <= 0, server closed connection, bailing\n"));
641 k->keepon &= ~KEEP_RECV;
642 break;
643 }
644
645 /* Default buffer to use when we write the buffer, it may be changed
646 in the flow below before the actual storing is done. */
647 k->str = buf;
648
649 if(conn->handler->readwrite) {
650 result = conn->handler->readwrite(data, conn, &nread, &readmore);
651 if(result)
652 return result;
653 if(readmore)
654 break;
655 }
656
657 #ifndef CURL_DISABLE_HTTP
658 /* Since this is a two-state thing, we check if we are parsing
659 headers at the moment or not. */
660 if(k->header) {
661 /* we are in parse-the-header-mode */
662 bool stop_reading = FALSE;
663 result = Curl_http_readwrite_headers(data, conn, &nread, &stop_reading);
664 if(result)
665 return result;
666
667 if(conn->handler->readwrite &&
668 (k->maxdownload <= 0 && nread > 0)) {
669 result = conn->handler->readwrite(data, conn, &nread, &readmore);
670 if(result)
671 return result;
672 if(readmore)
673 break;
674 }
675
676 if(stop_reading) {
677 /* We've stopped dealing with input, get out of the do-while loop */
678
679 if(nread > 0) {
680 infof(data,
681 "Excess found:"
682 " excess = %zd"
683 " url = %s (zero-length body)\n",
684 nread, data->state.up.path);
685 }
686
687 break;
688 }
689 }
690 #endif /* CURL_DISABLE_HTTP */
691
692
693 /* This is not an 'else if' since it may be a rest from the header
694 parsing, where the beginning of the buffer is headers and the end
695 is non-headers. */
696 if(!k->header && (nread > 0 || is_empty_data)) {
697
698 if(data->set.opt_no_body) {
699 /* data arrives although we want none, bail out */
700 streamclose(conn, "ignoring body");
701 *done = TRUE;
702 return CURLE_WEIRD_SERVER_REPLY;
703 }
704
705 #ifndef CURL_DISABLE_HTTP
706 if(0 == k->bodywrites && !is_empty_data) {
707 /* These checks are only made the first time we are about to
708 write a piece of the body */
709 if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
710 /* HTTP-only checks */
711 result = Curl_http_firstwrite(data, conn, done);
712 if(result || *done)
713 return result;
714 }
715 } /* this is the first time we write a body part */
716 #endif /* CURL_DISABLE_HTTP */
717
718 k->bodywrites++;
719
720 /* pass data to the debug function before it gets "dechunked" */
721 if(data->set.verbose) {
722 if(k->badheader) {
723 Curl_debug(data, CURLINFO_DATA_IN,
724 Curl_dyn_ptr(&data->state.headerb),
725 Curl_dyn_len(&data->state.headerb));
726 if(k->badheader == HEADER_PARTHEADER)
727 Curl_debug(data, CURLINFO_DATA_IN,
728 k->str, (size_t)nread);
729 }
730 else
731 Curl_debug(data, CURLINFO_DATA_IN,
732 k->str, (size_t)nread);
733 }
734
735 #ifndef CURL_DISABLE_HTTP
736 if(k->chunk) {
737 /*
738 * Here comes a chunked transfer flying and we need to decode this
739 * properly. While the name says read, this function both reads
740 * and writes away the data. The returned 'nread' holds the number
741 * of actual data it wrote to the client.
742 */
743 CURLcode extra;
744 CHUNKcode res =
745 Curl_httpchunk_read(data, k->str, nread, &nread, &extra);
746
747 if(CHUNKE_OK < res) {
748 if(CHUNKE_PASSTHRU_ERROR == res) {
749 failf(data, "Failed reading the chunked-encoded stream");
750 return extra;
751 }
752 failf(data, "%s in chunked-encoding", Curl_chunked_strerror(res));
753 return CURLE_RECV_ERROR;
754 }
755 if(CHUNKE_STOP == res) {
756 /* we're done reading chunks! */
757 k->keepon &= ~KEEP_RECV; /* read no more */
758
759 /* N number of bytes at the end of the str buffer that weren't
760 written to the client. */
761 if(conn->chunk.datasize) {
762 infof(data, "Leftovers after chunking: % "
763 CURL_FORMAT_CURL_OFF_T "u bytes\n",
764 conn->chunk.datasize);
765 }
766 }
767 /* If it returned OK, we just keep going */
768 }
769 #endif /* CURL_DISABLE_HTTP */
770
771 /* Account for body content stored in the header buffer */
772 if((k->badheader == HEADER_PARTHEADER) && !k->ignorebody) {
773 size_t headlen = Curl_dyn_len(&data->state.headerb);
774 DEBUGF(infof(data, "Increasing bytecount by %zu\n", headlen));
775 k->bytecount += headlen;
776 }
777
778 if((-1 != k->maxdownload) &&
779 (k->bytecount + nread >= k->maxdownload)) {
780
781 excess = (size_t)(k->bytecount + nread - k->maxdownload);
782 if(excess > 0 && !k->ignorebody) {
783 infof(data,
784 "Excess found in a read:"
785 " excess = %zu"
786 ", size = %" CURL_FORMAT_CURL_OFF_T
787 ", maxdownload = %" CURL_FORMAT_CURL_OFF_T
788 ", bytecount = %" CURL_FORMAT_CURL_OFF_T "\n",
789 excess, k->size, k->maxdownload, k->bytecount);
790 connclose(conn, "excess found in a read");
791 }
792
793 nread = (ssize_t) (k->maxdownload - k->bytecount);
794 if(nread < 0) /* this should be unusual */
795 nread = 0;
796
797 k->keepon &= ~KEEP_RECV; /* we're done reading */
798 }
799
800 k->bytecount += nread;
801
802 Curl_pgrsSetDownloadCounter(data, k->bytecount);
803
804 if(!k->chunk && (nread || k->badheader || is_empty_data)) {
805 /* If this is chunky transfer, it was already written */
806
807 if(k->badheader && !k->ignorebody) {
808 /* we parsed a piece of data wrongly assuming it was a header
809 and now we output it as body instead */
810 size_t headlen = Curl_dyn_len(&data->state.headerb);
811
812 /* Don't let excess data pollute body writes */
813 if(k->maxdownload == -1 || (curl_off_t)headlen <= k->maxdownload)
814 result = Curl_client_write(data, CLIENTWRITE_BODY,
815 Curl_dyn_ptr(&data->state.headerb),
816 headlen);
817 else
818 result = Curl_client_write(data, CLIENTWRITE_BODY,
819 Curl_dyn_ptr(&data->state.headerb),
820 (size_t)k->maxdownload);
821
822 if(result)
823 return result;
824 }
825 if(k->badheader < HEADER_ALLBAD) {
826 /* This switch handles various content encodings. If there's an
827 error here, be sure to check over the almost identical code
828 in http_chunks.c.
829 Make sure that ALL_CONTENT_ENCODINGS contains all the
830 encodings handled here. */
831 if(data->set.http_ce_skip || !k->writer_stack) {
832 if(!k->ignorebody) {
833 #ifndef CURL_DISABLE_POP3
834 if(conn->handler->protocol & PROTO_FAMILY_POP3)
835 result = Curl_pop3_write(data, k->str, nread);
836 else
837 #endif /* CURL_DISABLE_POP3 */
838 result = Curl_client_write(data, CLIENTWRITE_BODY, k->str,
839 nread);
840 }
841 }
842 else if(!k->ignorebody)
843 result = Curl_unencode_write(data, k->writer_stack, k->str, nread);
844 }
845 k->badheader = HEADER_NORMAL; /* taken care of now */
846
847 if(result)
848 return result;
849 }
850
851 } /* if(!header and data to read) */
852
853 if(conn->handler->readwrite && excess) {
854 /* Parse the excess data */
855 k->str += nread;
856
857 if(&k->str[excess] > &buf[data->set.buffer_size]) {
858 /* the excess amount was too excessive(!), make sure
859 it doesn't read out of buffer */
860 excess = &buf[data->set.buffer_size] - k->str;
861 }
862 nread = (ssize_t)excess;
863
864 result = conn->handler->readwrite(data, conn, &nread, &readmore);
865 if(result)
866 return result;
867
868 if(readmore)
869 k->keepon |= KEEP_RECV; /* we're not done reading */
870 break;
871 }
872
873 if(is_empty_data) {
874 /* if we received nothing, the server closed the connection and we
875 are done */
876 k->keepon &= ~KEEP_RECV;
877 }
878
879 if(k->keepon & KEEP_RECV_PAUSE) {
880 /* this is a paused transfer */
881 break;
882 }
883
884 } while(data_pending(data) && maxloops--);
885
886 if(maxloops <= 0) {
887 /* we mark it as read-again-please */
888 conn->cselect_bits = CURL_CSELECT_IN;
889 *comeback = TRUE;
890 }
891
892 if(((k->keepon & (KEEP_RECV|KEEP_SEND)) == KEEP_SEND) &&
893 conn->bits.close) {
894 /* When we've read the entire thing and the close bit is set, the server
895 may now close the connection. If there's now any kind of sending going
896 on from our side, we need to stop that immediately. */
897 infof(data, "we are done reading and this is set to close, stop send\n");
898 k->keepon &= ~KEEP_SEND; /* no writing anymore either */
899 }
900
901 return CURLE_OK;
902 }
903
Curl_done_sending(struct Curl_easy * data,struct SingleRequest * k)904 CURLcode Curl_done_sending(struct Curl_easy *data,
905 struct SingleRequest *k)
906 {
907 struct connectdata *conn = data->conn;
908 k->keepon &= ~KEEP_SEND; /* we're done writing */
909
910 /* These functions should be moved into the handler struct! */
911 Curl_http2_done_sending(data, conn);
912 Curl_quic_done_sending(data);
913
914 if(conn->bits.rewindaftersend) {
915 CURLcode result = Curl_readrewind(data);
916 if(result)
917 return result;
918 }
919 return CURLE_OK;
920 }
921
922 #if defined(WIN32) && !defined(USE_LWIPSOCK)
923 #ifndef SIO_IDEAL_SEND_BACKLOG_QUERY
924 #define SIO_IDEAL_SEND_BACKLOG_QUERY 0x4004747B
925 #endif
926
win_update_buffer_size(curl_socket_t sockfd)927 static void win_update_buffer_size(curl_socket_t sockfd)
928 {
929 int result;
930 ULONG ideal;
931 DWORD ideallen;
932 result = WSAIoctl(sockfd, SIO_IDEAL_SEND_BACKLOG_QUERY, 0, 0,
933 &ideal, sizeof(ideal), &ideallen, 0, 0);
934 if(result == 0) {
935 setsockopt(sockfd, SOL_SOCKET, SO_SNDBUF,
936 (const char *)&ideal, sizeof(ideal));
937 }
938 }
939 #else
940 #define win_update_buffer_size(x)
941 #endif
942
943 /*
944 * Send data to upload to the server, when the socket is writable.
945 */
readwrite_upload(struct Curl_easy * data,struct connectdata * conn,int * didwhat)946 static CURLcode readwrite_upload(struct Curl_easy *data,
947 struct connectdata *conn,
948 int *didwhat)
949 {
950 ssize_t i, si;
951 ssize_t bytes_written;
952 CURLcode result;
953 ssize_t nread; /* number of bytes read */
954 bool sending_http_headers = FALSE;
955 struct SingleRequest *k = &data->req;
956
957 if((k->bytecount == 0) && (k->writebytecount == 0))
958 Curl_pgrsTime(data, TIMER_STARTTRANSFER);
959
960 *didwhat |= KEEP_SEND;
961
962 do {
963 curl_off_t nbody;
964
965 /* only read more data if there's no upload data already
966 present in the upload buffer */
967 if(0 == k->upload_present) {
968 result = Curl_get_upload_buffer(data);
969 if(result)
970 return result;
971 /* init the "upload from here" pointer */
972 k->upload_fromhere = data->state.ulbuf;
973
974 if(!k->upload_done) {
975 /* HTTP pollution, this should be written nicer to become more
976 protocol agnostic. */
977 size_t fillcount;
978 struct HTTP *http = k->p.http;
979
980 if((k->exp100 == EXP100_SENDING_REQUEST) &&
981 (http->sending == HTTPSEND_BODY)) {
982 /* If this call is to send body data, we must take some action:
983 We have sent off the full HTTP 1.1 request, and we shall now
984 go into the Expect: 100 state and await such a header */
985 k->exp100 = EXP100_AWAITING_CONTINUE; /* wait for the header */
986 k->keepon &= ~KEEP_SEND; /* disable writing */
987 k->start100 = Curl_now(); /* timeout count starts now */
988 *didwhat &= ~KEEP_SEND; /* we didn't write anything actually */
989 /* set a timeout for the multi interface */
990 Curl_expire(data, data->set.expect_100_timeout, EXPIRE_100_TIMEOUT);
991 break;
992 }
993
994 if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
995 if(http->sending == HTTPSEND_REQUEST)
996 /* We're sending the HTTP request headers, not the data.
997 Remember that so we don't change the line endings. */
998 sending_http_headers = TRUE;
999 else
1000 sending_http_headers = FALSE;
1001 }
1002
1003 result = Curl_fillreadbuffer(data, data->set.upload_buffer_size,
1004 &fillcount);
1005 if(result)
1006 return result;
1007
1008 nread = fillcount;
1009 }
1010 else
1011 nread = 0; /* we're done uploading/reading */
1012
1013 if(!nread && (k->keepon & KEEP_SEND_PAUSE)) {
1014 /* this is a paused transfer */
1015 break;
1016 }
1017 if(nread <= 0) {
1018 result = Curl_done_sending(data, k);
1019 if(result)
1020 return result;
1021 break;
1022 }
1023
1024 /* store number of bytes available for upload */
1025 k->upload_present = nread;
1026
1027 /* convert LF to CRLF if so asked */
1028 if((!sending_http_headers) && (
1029 #ifdef CURL_DO_LINEEND_CONV
1030 /* always convert if we're FTPing in ASCII mode */
1031 (data->set.prefer_ascii) ||
1032 #endif
1033 (data->set.crlf))) {
1034 /* Do we need to allocate a scratch buffer? */
1035 if(!data->state.scratch) {
1036 data->state.scratch = malloc(2 * data->set.upload_buffer_size);
1037 if(!data->state.scratch) {
1038 failf(data, "Failed to alloc scratch buffer!");
1039
1040 return CURLE_OUT_OF_MEMORY;
1041 }
1042 }
1043
1044 /*
1045 * ASCII/EBCDIC Note: This is presumably a text (not binary)
1046 * transfer so the data should already be in ASCII.
1047 * That means the hex values for ASCII CR (0x0d) & LF (0x0a)
1048 * must be used instead of the escape sequences \r & \n.
1049 */
1050 for(i = 0, si = 0; i < nread; i++, si++) {
1051 if(k->upload_fromhere[i] == 0x0a) {
1052 data->state.scratch[si++] = 0x0d;
1053 data->state.scratch[si] = 0x0a;
1054 if(!data->set.crlf) {
1055 /* we're here only because FTP is in ASCII mode...
1056 bump infilesize for the LF we just added */
1057 if(data->state.infilesize != -1)
1058 data->state.infilesize++;
1059 }
1060 }
1061 else
1062 data->state.scratch[si] = k->upload_fromhere[i];
1063 }
1064
1065 if(si != nread) {
1066 /* only perform the special operation if we really did replace
1067 anything */
1068 nread = si;
1069
1070 /* upload from the new (replaced) buffer instead */
1071 k->upload_fromhere = data->state.scratch;
1072
1073 /* set the new amount too */
1074 k->upload_present = nread;
1075 }
1076 }
1077
1078 #ifndef CURL_DISABLE_SMTP
1079 if(conn->handler->protocol & PROTO_FAMILY_SMTP) {
1080 result = Curl_smtp_escape_eob(data, nread);
1081 if(result)
1082 return result;
1083 }
1084 #endif /* CURL_DISABLE_SMTP */
1085 } /* if 0 == k->upload_present */
1086 else {
1087 /* We have a partial buffer left from a previous "round". Use
1088 that instead of reading more data */
1089 }
1090
1091 /* write to socket (send away data) */
1092 result = Curl_write(data,
1093 conn->writesockfd, /* socket to send to */
1094 k->upload_fromhere, /* buffer pointer */
1095 k->upload_present, /* buffer size */
1096 &bytes_written); /* actually sent */
1097 if(result)
1098 return result;
1099
1100 win_update_buffer_size(conn->writesockfd);
1101
1102 if(k->pendingheader) {
1103 /* parts of what was sent was header */
1104 curl_off_t n = CURLMIN(k->pendingheader, bytes_written);
1105 /* show the data before we change the pointer upload_fromhere */
1106 Curl_debug(data, CURLINFO_HEADER_OUT, k->upload_fromhere, (size_t)n);
1107 k->pendingheader -= n;
1108 nbody = bytes_written - n; /* size of the written body part */
1109 }
1110 else
1111 nbody = bytes_written;
1112
1113 if(nbody) {
1114 /* show the data before we change the pointer upload_fromhere */
1115 Curl_debug(data, CURLINFO_DATA_OUT,
1116 &k->upload_fromhere[bytes_written - nbody],
1117 (size_t)nbody);
1118
1119 k->writebytecount += nbody;
1120 Curl_pgrsSetUploadCounter(data, k->writebytecount);
1121 }
1122
1123 if((!k->upload_chunky || k->forbidchunk) &&
1124 (k->writebytecount == data->state.infilesize)) {
1125 /* we have sent all data we were supposed to */
1126 k->upload_done = TRUE;
1127 infof(data, "We are completely uploaded and fine\n");
1128 }
1129
1130 if(k->upload_present != bytes_written) {
1131 /* we only wrote a part of the buffer (if anything), deal with it! */
1132
1133 /* store the amount of bytes left in the buffer to write */
1134 k->upload_present -= bytes_written;
1135
1136 /* advance the pointer where to find the buffer when the next send
1137 is to happen */
1138 k->upload_fromhere += bytes_written;
1139 }
1140 else {
1141 /* we've uploaded that buffer now */
1142 result = Curl_get_upload_buffer(data);
1143 if(result)
1144 return result;
1145 k->upload_fromhere = data->state.ulbuf;
1146 k->upload_present = 0; /* no more bytes left */
1147
1148 if(k->upload_done) {
1149 result = Curl_done_sending(data, k);
1150 if(result)
1151 return result;
1152 }
1153 }
1154
1155
1156 } while(0); /* just to break out from! */
1157
1158 return CURLE_OK;
1159 }
1160
1161 /*
1162 * Curl_readwrite() is the low-level function to be called when data is to
1163 * be read and written to/from the connection.
1164 *
1165 * return '*comeback' TRUE if we didn't properly drain the socket so this
1166 * function should get called again without select() or similar in between!
1167 */
Curl_readwrite(struct connectdata * conn,struct Curl_easy * data,bool * done,bool * comeback)1168 CURLcode Curl_readwrite(struct connectdata *conn,
1169 struct Curl_easy *data,
1170 bool *done,
1171 bool *comeback)
1172 {
1173 struct SingleRequest *k = &data->req;
1174 CURLcode result;
1175 int didwhat = 0;
1176
1177 curl_socket_t fd_read;
1178 curl_socket_t fd_write;
1179 int select_res = conn->cselect_bits;
1180
1181 conn->cselect_bits = 0;
1182
1183 /* only use the proper socket if the *_HOLD bit is not set simultaneously as
1184 then we are in rate limiting state in that transfer direction */
1185
1186 if((k->keepon & KEEP_RECVBITS) == KEEP_RECV)
1187 fd_read = conn->sockfd;
1188 else
1189 fd_read = CURL_SOCKET_BAD;
1190
1191 if((k->keepon & KEEP_SENDBITS) == KEEP_SEND)
1192 fd_write = conn->writesockfd;
1193 else
1194 fd_write = CURL_SOCKET_BAD;
1195
1196 if(data->state.drain) {
1197 select_res |= CURL_CSELECT_IN;
1198 DEBUGF(infof(data, "Curl_readwrite: forcibly told to drain data\n"));
1199 }
1200
1201 if(!select_res) /* Call for select()/poll() only, if read/write/error
1202 status is not known. */
1203 select_res = Curl_socket_check(fd_read, CURL_SOCKET_BAD, fd_write, 0);
1204
1205 if(select_res == CURL_CSELECT_ERR) {
1206 failf(data, "select/poll returned error");
1207 return CURLE_SEND_ERROR;
1208 }
1209
1210 #ifdef USE_HYPER
1211 if(conn->datastream)
1212 return conn->datastream(data, conn, &didwhat, done, select_res);
1213 #endif
1214 /* We go ahead and do a read if we have a readable socket or if
1215 the stream was rewound (in which case we have data in a
1216 buffer) */
1217 if((k->keepon & KEEP_RECV) && (select_res & CURL_CSELECT_IN)) {
1218 result = readwrite_data(data, conn, k, &didwhat, done, comeback);
1219 if(result || *done)
1220 return result;
1221 }
1222
1223 /* If we still have writing to do, we check if we have a writable socket. */
1224 if((k->keepon & KEEP_SEND) && (select_res & CURL_CSELECT_OUT)) {
1225 /* write */
1226
1227 result = readwrite_upload(data, conn, &didwhat);
1228 if(result)
1229 return result;
1230 }
1231
1232 k->now = Curl_now();
1233 if(!didwhat) {
1234 /* no read no write, this is a timeout? */
1235 if(k->exp100 == EXP100_AWAITING_CONTINUE) {
1236 /* This should allow some time for the header to arrive, but only a
1237 very short time as otherwise it'll be too much wasted time too
1238 often. */
1239
1240 /* Quoting RFC2616, section "8.2.3 Use of the 100 (Continue) Status":
1241
1242 Therefore, when a client sends this header field to an origin server
1243 (possibly via a proxy) from which it has never seen a 100 (Continue)
1244 status, the client SHOULD NOT wait for an indefinite period before
1245 sending the request body.
1246
1247 */
1248
1249 timediff_t ms = Curl_timediff(k->now, k->start100);
1250 if(ms >= data->set.expect_100_timeout) {
1251 /* we've waited long enough, continue anyway */
1252 k->exp100 = EXP100_SEND_DATA;
1253 k->keepon |= KEEP_SEND;
1254 Curl_expire_done(data, EXPIRE_100_TIMEOUT);
1255 infof(data, "Done waiting for 100-continue\n");
1256 }
1257 }
1258 }
1259
1260 if(Curl_pgrsUpdate(data))
1261 result = CURLE_ABORTED_BY_CALLBACK;
1262 else
1263 result = Curl_speedcheck(data, k->now);
1264 if(result)
1265 return result;
1266
1267 if(k->keepon) {
1268 if(0 > Curl_timeleft(data, &k->now, FALSE)) {
1269 if(k->size != -1) {
1270 failf(data, "Operation timed out after %" CURL_FORMAT_TIMEDIFF_T
1271 " milliseconds with %" CURL_FORMAT_CURL_OFF_T " out of %"
1272 CURL_FORMAT_CURL_OFF_T " bytes received",
1273 Curl_timediff(k->now, data->progress.t_startsingle),
1274 k->bytecount, k->size);
1275 }
1276 else {
1277 failf(data, "Operation timed out after %" CURL_FORMAT_TIMEDIFF_T
1278 " milliseconds with %" CURL_FORMAT_CURL_OFF_T " bytes received",
1279 Curl_timediff(k->now, data->progress.t_startsingle),
1280 k->bytecount);
1281 }
1282 return CURLE_OPERATION_TIMEDOUT;
1283 }
1284 }
1285 else {
1286 /*
1287 * The transfer has been performed. Just make some general checks before
1288 * returning.
1289 */
1290
1291 if(!(data->set.opt_no_body) && (k->size != -1) &&
1292 (k->bytecount != k->size) &&
1293 #ifdef CURL_DO_LINEEND_CONV
1294 /* Most FTP servers don't adjust their file SIZE response for CRLFs,
1295 so we'll check to see if the discrepancy can be explained
1296 by the number of CRLFs we've changed to LFs.
1297 */
1298 (k->bytecount != (k->size + data->state.crlf_conversions)) &&
1299 #endif /* CURL_DO_LINEEND_CONV */
1300 !k->newurl) {
1301 failf(data, "transfer closed with %" CURL_FORMAT_CURL_OFF_T
1302 " bytes remaining to read", k->size - k->bytecount);
1303 return CURLE_PARTIAL_FILE;
1304 }
1305 if(!(data->set.opt_no_body) && k->chunk &&
1306 (conn->chunk.state != CHUNK_STOP)) {
1307 /*
1308 * In chunked mode, return an error if the connection is closed prior to
1309 * the empty (terminating) chunk is read.
1310 *
1311 * The condition above used to check for
1312 * conn->proto.http->chunk.datasize != 0 which is true after reading
1313 * *any* chunk, not just the empty chunk.
1314 *
1315 */
1316 failf(data, "transfer closed with outstanding read data remaining");
1317 return CURLE_PARTIAL_FILE;
1318 }
1319 if(Curl_pgrsUpdate(data))
1320 return CURLE_ABORTED_BY_CALLBACK;
1321 }
1322
1323 /* Now update the "done" boolean we return */
1324 *done = (0 == (k->keepon&(KEEP_RECV|KEEP_SEND|
1325 KEEP_RECV_PAUSE|KEEP_SEND_PAUSE))) ? TRUE : FALSE;
1326
1327 return CURLE_OK;
1328 }
1329
1330 /*
1331 * Curl_single_getsock() gets called by the multi interface code when the app
1332 * has requested to get the sockets for the current connection. This function
1333 * will then be called once for every connection that the multi interface
1334 * keeps track of. This function will only be called for connections that are
1335 * in the proper state to have this information available.
1336 */
Curl_single_getsock(struct Curl_easy * data,struct connectdata * conn,curl_socket_t * sock)1337 int Curl_single_getsock(struct Curl_easy *data,
1338 struct connectdata *conn,
1339 curl_socket_t *sock)
1340 {
1341 int bitmap = GETSOCK_BLANK;
1342 unsigned sockindex = 0;
1343
1344 if(conn->handler->perform_getsock)
1345 return conn->handler->perform_getsock(data, conn, sock);
1346
1347 /* don't include HOLD and PAUSE connections */
1348 if((data->req.keepon & KEEP_RECVBITS) == KEEP_RECV) {
1349
1350 DEBUGASSERT(conn->sockfd != CURL_SOCKET_BAD);
1351
1352 bitmap |= GETSOCK_READSOCK(sockindex);
1353 sock[sockindex] = conn->sockfd;
1354 }
1355
1356 /* don't include HOLD and PAUSE connections */
1357 if((data->req.keepon & KEEP_SENDBITS) == KEEP_SEND) {
1358
1359 if((conn->sockfd != conn->writesockfd) ||
1360 bitmap == GETSOCK_BLANK) {
1361 /* only if they are not the same socket and we have a readable
1362 one, we increase index */
1363 if(bitmap != GETSOCK_BLANK)
1364 sockindex++; /* increase index if we need two entries */
1365
1366 DEBUGASSERT(conn->writesockfd != CURL_SOCKET_BAD);
1367
1368 sock[sockindex] = conn->writesockfd;
1369 }
1370
1371 bitmap |= GETSOCK_WRITESOCK(sockindex);
1372 }
1373
1374 return bitmap;
1375 }
1376
1377 /* Curl_init_CONNECT() gets called each time the handle switches to CONNECT
1378 which means this gets called once for each subsequent redirect etc */
Curl_init_CONNECT(struct Curl_easy * data)1379 void Curl_init_CONNECT(struct Curl_easy *data)
1380 {
1381 data->state.fread_func = data->set.fread_func_set;
1382 data->state.in = data->set.in_set;
1383 }
1384
1385 /*
1386 * Curl_pretransfer() is called immediately before a transfer starts, and only
1387 * once for one transfer no matter if it has redirects or do multi-pass
1388 * authentication etc.
1389 */
Curl_pretransfer(struct Curl_easy * data)1390 CURLcode Curl_pretransfer(struct Curl_easy *data)
1391 {
1392 CURLcode result;
1393
1394 if(!data->change.url && !data->set.uh) {
1395 /* we can't do anything without URL */
1396 failf(data, "No URL set!");
1397 return CURLE_URL_MALFORMAT;
1398 }
1399
1400 /* since the URL may have been redirected in a previous use of this handle */
1401 if(data->change.url_alloc) {
1402 /* the already set URL is allocated, free it first! */
1403 Curl_safefree(data->change.url);
1404 data->change.url_alloc = FALSE;
1405 }
1406
1407 if(!data->change.url && data->set.uh) {
1408 CURLUcode uc;
1409 free(data->set.str[STRING_SET_URL]);
1410 uc = curl_url_get(data->set.uh,
1411 CURLUPART_URL, &data->set.str[STRING_SET_URL], 0);
1412 if(uc) {
1413 failf(data, "No URL set!");
1414 return CURLE_URL_MALFORMAT;
1415 }
1416 }
1417
1418 data->state.httpreq = data->set.method;
1419 data->change.url = data->set.str[STRING_SET_URL];
1420
1421 /* Init the SSL session ID cache here. We do it here since we want to do it
1422 after the *_setopt() calls (that could specify the size of the cache) but
1423 before any transfer takes place. */
1424 result = Curl_ssl_initsessions(data, data->set.general_ssl.max_ssl_sessions);
1425 if(result)
1426 return result;
1427
1428 data->state.wildcardmatch = data->set.wildcard_enabled;
1429 data->set.followlocation = 0; /* reset the location-follow counter */
1430 data->state.this_is_a_follow = FALSE; /* reset this */
1431 data->state.errorbuf = FALSE; /* no error has occurred */
1432 data->state.httpversion = 0; /* don't assume any particular server version */
1433
1434 data->state.authproblem = FALSE;
1435 data->state.authhost.want = data->set.httpauth;
1436 data->state.authproxy.want = data->set.proxyauth;
1437 Curl_safefree(data->info.wouldredirect);
1438
1439 if(data->state.httpreq == HTTPREQ_PUT)
1440 data->state.infilesize = data->set.filesize;
1441 else if((data->state.httpreq != HTTPREQ_GET) &&
1442 (data->state.httpreq != HTTPREQ_HEAD)) {
1443 data->state.infilesize = data->set.postfieldsize;
1444 if(data->set.postfields && (data->state.infilesize == -1))
1445 data->state.infilesize = (curl_off_t)strlen(data->set.postfields);
1446 }
1447 else
1448 data->state.infilesize = 0;
1449
1450 /* If there is a list of cookie files to read, do it now! */
1451 if(data->change.cookielist)
1452 Curl_cookie_loadfiles(data);
1453
1454 /* If there is a list of host pairs to deal with */
1455 if(data->change.resolve)
1456 result = Curl_loadhostpairs(data);
1457
1458 if(!result) {
1459 /* Allow data->set.use_port to set which port to use. This needs to be
1460 * disabled for example when we follow Location: headers to URLs using
1461 * different ports! */
1462 data->state.allow_port = TRUE;
1463
1464 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1465 /*************************************************************
1466 * Tell signal handler to ignore SIGPIPE
1467 *************************************************************/
1468 if(!data->set.no_signal)
1469 data->state.prev_signal = signal(SIGPIPE, SIG_IGN);
1470 #endif
1471
1472 Curl_initinfo(data); /* reset session-specific information "variables" */
1473 Curl_pgrsResetTransferSizes(data);
1474 Curl_pgrsStartNow(data);
1475
1476 /* In case the handle is re-used and an authentication method was picked
1477 in the session we need to make sure we only use the one(s) we now
1478 consider to be fine */
1479 data->state.authhost.picked &= data->state.authhost.want;
1480 data->state.authproxy.picked &= data->state.authproxy.want;
1481
1482 #ifndef CURL_DISABLE_FTP
1483 if(data->state.wildcardmatch) {
1484 struct WildcardData *wc = &data->wildcard;
1485 if(wc->state < CURLWC_INIT) {
1486 result = Curl_wildcard_init(wc); /* init wildcard structures */
1487 if(result)
1488 return CURLE_OUT_OF_MEMORY;
1489 }
1490 }
1491 #endif
1492 Curl_http2_init_state(&data->state);
1493 Curl_hsts_loadcb(data, data->hsts);
1494 }
1495
1496 /*
1497 * Set user-agent. Used for HTTP, but since we can attempt to tunnel
1498 * basically anything through a http proxy we can't limit this based on
1499 * protocol.
1500 */
1501 if(data->set.str[STRING_USERAGENT]) {
1502 Curl_safefree(data->state.aptr.uagent);
1503 data->state.aptr.uagent =
1504 aprintf("User-Agent: %s\r\n", data->set.str[STRING_USERAGENT]);
1505 if(!data->state.aptr.uagent)
1506 return CURLE_OUT_OF_MEMORY;
1507 }
1508
1509 data->req.headerbytecount = 0;
1510 return result;
1511 }
1512
1513 /*
1514 * Curl_posttransfer() is called immediately after a transfer ends
1515 */
Curl_posttransfer(struct Curl_easy * data)1516 CURLcode Curl_posttransfer(struct Curl_easy *data)
1517 {
1518 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1519 /* restore the signal handler for SIGPIPE before we get back */
1520 if(!data->set.no_signal)
1521 signal(SIGPIPE, data->state.prev_signal);
1522 #else
1523 (void)data; /* unused parameter */
1524 #endif
1525
1526 return CURLE_OK;
1527 }
1528
1529 /*
1530 * Curl_follow() handles the URL redirect magic. Pass in the 'newurl' string
1531 * as given by the remote server and set up the new URL to request.
1532 *
1533 * This function DOES NOT FREE the given url.
1534 */
Curl_follow(struct Curl_easy * data,char * newurl,followtype type)1535 CURLcode Curl_follow(struct Curl_easy *data,
1536 char *newurl, /* the Location: string */
1537 followtype type) /* see transfer.h */
1538 {
1539 #ifdef CURL_DISABLE_HTTP
1540 (void)data;
1541 (void)newurl;
1542 (void)type;
1543 /* Location: following will not happen when HTTP is disabled */
1544 return CURLE_TOO_MANY_REDIRECTS;
1545 #else
1546
1547 /* Location: redirect */
1548 bool disallowport = FALSE;
1549 bool reachedmax = FALSE;
1550 CURLUcode uc;
1551
1552 DEBUGASSERT(type != FOLLOW_NONE);
1553
1554 if(type == FOLLOW_REDIR) {
1555 if((data->set.maxredirs != -1) &&
1556 (data->set.followlocation >= data->set.maxredirs)) {
1557 reachedmax = TRUE;
1558 type = FOLLOW_FAKE; /* switch to fake to store the would-be-redirected
1559 to URL */
1560 }
1561 else {
1562 /* mark the next request as a followed location: */
1563 data->state.this_is_a_follow = TRUE;
1564
1565 data->set.followlocation++; /* count location-followers */
1566
1567 if(data->set.http_auto_referer) {
1568 /* We are asked to automatically set the previous URL as the referer
1569 when we get the next URL. We pick the ->url field, which may or may
1570 not be 100% correct */
1571
1572 if(data->change.referer_alloc) {
1573 Curl_safefree(data->change.referer);
1574 data->change.referer_alloc = FALSE;
1575 }
1576
1577 data->change.referer = strdup(data->change.url);
1578 if(!data->change.referer)
1579 return CURLE_OUT_OF_MEMORY;
1580 data->change.referer_alloc = TRUE; /* yes, free this later */
1581 }
1582 }
1583 }
1584
1585 if((type != FOLLOW_RETRY) &&
1586 (data->req.httpcode != 401) && (data->req.httpcode != 407) &&
1587 Curl_is_absolute_url(newurl, NULL, MAX_SCHEME_LEN))
1588 /* If this is not redirect due to a 401 or 407 response and an absolute
1589 URL: don't allow a custom port number */
1590 disallowport = TRUE;
1591
1592 DEBUGASSERT(data->state.uh);
1593 uc = curl_url_set(data->state.uh, CURLUPART_URL, newurl,
1594 (type == FOLLOW_FAKE) ? CURLU_NON_SUPPORT_SCHEME :
1595 ((type == FOLLOW_REDIR) ? CURLU_URLENCODE : 0) );
1596 if(uc) {
1597 if(type != FOLLOW_FAKE)
1598 return Curl_uc_to_curlcode(uc);
1599
1600 /* the URL could not be parsed for some reason, but since this is FAKE
1601 mode, just duplicate the field as-is */
1602 newurl = strdup(newurl);
1603 if(!newurl)
1604 return CURLE_OUT_OF_MEMORY;
1605 }
1606 else {
1607
1608 uc = curl_url_get(data->state.uh, CURLUPART_URL, &newurl, 0);
1609 if(uc)
1610 return Curl_uc_to_curlcode(uc);
1611 }
1612
1613 if(type == FOLLOW_FAKE) {
1614 /* we're only figuring out the new url if we would've followed locations
1615 but now we're done so we can get out! */
1616 data->info.wouldredirect = newurl;
1617
1618 if(reachedmax) {
1619 failf(data, "Maximum (%ld) redirects followed", data->set.maxredirs);
1620 return CURLE_TOO_MANY_REDIRECTS;
1621 }
1622 return CURLE_OK;
1623 }
1624
1625 if(disallowport)
1626 data->state.allow_port = FALSE;
1627
1628 if(data->change.url_alloc)
1629 Curl_safefree(data->change.url);
1630
1631 data->change.url = newurl;
1632 data->change.url_alloc = TRUE;
1633
1634 infof(data, "Issue another request to this URL: '%s'\n", data->change.url);
1635
1636 /*
1637 * We get here when the HTTP code is 300-399 (and 401). We need to perform
1638 * differently based on exactly what return code there was.
1639 *
1640 * News from 7.10.6: we can also get here on a 401 or 407, in case we act on
1641 * a HTTP (proxy-) authentication scheme other than Basic.
1642 */
1643 switch(data->info.httpcode) {
1644 /* 401 - Act on a WWW-Authenticate, we keep on moving and do the
1645 Authorization: XXXX header in the HTTP request code snippet */
1646 /* 407 - Act on a Proxy-Authenticate, we keep on moving and do the
1647 Proxy-Authorization: XXXX header in the HTTP request code snippet */
1648 /* 300 - Multiple Choices */
1649 /* 306 - Not used */
1650 /* 307 - Temporary Redirect */
1651 default: /* for all above (and the unknown ones) */
1652 /* Some codes are explicitly mentioned since I've checked RFC2616 and they
1653 * seem to be OK to POST to.
1654 */
1655 break;
1656 case 301: /* Moved Permanently */
1657 /* (quote from RFC7231, section 6.4.2)
1658 *
1659 * Note: For historical reasons, a user agent MAY change the request
1660 * method from POST to GET for the subsequent request. If this
1661 * behavior is undesired, the 307 (Temporary Redirect) status code
1662 * can be used instead.
1663 *
1664 * ----
1665 *
1666 * Many webservers expect this, so these servers often answers to a POST
1667 * request with an error page. To be sure that libcurl gets the page that
1668 * most user agents would get, libcurl has to force GET.
1669 *
1670 * This behavior is forbidden by RFC1945 and the obsolete RFC2616, and
1671 * can be overridden with CURLOPT_POSTREDIR.
1672 */
1673 if((data->state.httpreq == HTTPREQ_POST
1674 || data->state.httpreq == HTTPREQ_POST_FORM
1675 || data->state.httpreq == HTTPREQ_POST_MIME)
1676 && !(data->set.keep_post & CURL_REDIR_POST_301)) {
1677 infof(data, "Switch from POST to GET\n");
1678 data->state.httpreq = HTTPREQ_GET;
1679 }
1680 break;
1681 case 302: /* Found */
1682 /* (quote from RFC7231, section 6.4.3)
1683 *
1684 * Note: For historical reasons, a user agent MAY change the request
1685 * method from POST to GET for the subsequent request. If this
1686 * behavior is undesired, the 307 (Temporary Redirect) status code
1687 * can be used instead.
1688 *
1689 * ----
1690 *
1691 * Many webservers expect this, so these servers often answers to a POST
1692 * request with an error page. To be sure that libcurl gets the page that
1693 * most user agents would get, libcurl has to force GET.
1694 *
1695 * This behavior is forbidden by RFC1945 and the obsolete RFC2616, and
1696 * can be overridden with CURLOPT_POSTREDIR.
1697 */
1698 if((data->state.httpreq == HTTPREQ_POST
1699 || data->state.httpreq == HTTPREQ_POST_FORM
1700 || data->state.httpreq == HTTPREQ_POST_MIME)
1701 && !(data->set.keep_post & CURL_REDIR_POST_302)) {
1702 infof(data, "Switch from POST to GET\n");
1703 data->state.httpreq = HTTPREQ_GET;
1704 }
1705 break;
1706
1707 case 303: /* See Other */
1708 /* 'See Other' location is not the resource but a substitute for the
1709 * resource. In this case we switch the method to GET/HEAD, unless the
1710 * method is POST and the user specified to keep it as POST.
1711 * https://github.com/curl/curl/issues/5237#issuecomment-614641049
1712 */
1713 if(data->state.httpreq != HTTPREQ_GET &&
1714 ((data->state.httpreq != HTTPREQ_POST &&
1715 data->state.httpreq != HTTPREQ_POST_FORM &&
1716 data->state.httpreq != HTTPREQ_POST_MIME) ||
1717 !(data->set.keep_post & CURL_REDIR_POST_303))) {
1718 data->state.httpreq = HTTPREQ_GET;
1719 data->set.upload = false;
1720 infof(data, "Switch to %s\n",
1721 data->set.opt_no_body?"HEAD":"GET");
1722 }
1723 break;
1724 case 304: /* Not Modified */
1725 /* 304 means we did a conditional request and it was "Not modified".
1726 * We shouldn't get any Location: header in this response!
1727 */
1728 break;
1729 case 305: /* Use Proxy */
1730 /* (quote from RFC2616, section 10.3.6):
1731 * "The requested resource MUST be accessed through the proxy given
1732 * by the Location field. The Location field gives the URI of the
1733 * proxy. The recipient is expected to repeat this single request
1734 * via the proxy. 305 responses MUST only be generated by origin
1735 * servers."
1736 */
1737 break;
1738 }
1739 Curl_pgrsTime(data, TIMER_REDIRECT);
1740 Curl_pgrsResetTransferSizes(data);
1741
1742 return CURLE_OK;
1743 #endif /* CURL_DISABLE_HTTP */
1744 }
1745
1746 /* Returns CURLE_OK *and* sets '*url' if a request retry is wanted.
1747
1748 NOTE: that the *url is malloc()ed. */
Curl_retry_request(struct Curl_easy * data,char ** url)1749 CURLcode Curl_retry_request(struct Curl_easy *data, char **url)
1750 {
1751 struct connectdata *conn = data->conn;
1752 bool retry = FALSE;
1753 *url = NULL;
1754
1755 /* if we're talking upload, we can't do the checks below, unless the protocol
1756 is HTTP as when uploading over HTTP we will still get a response */
1757 if(data->set.upload &&
1758 !(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)))
1759 return CURLE_OK;
1760
1761 if((data->req.bytecount + data->req.headerbytecount == 0) &&
1762 conn->bits.reuse &&
1763 (!data->set.opt_no_body
1764 || (conn->handler->protocol & PROTO_FAMILY_HTTP)) &&
1765 (data->set.rtspreq != RTSPREQ_RECEIVE))
1766 /* We got no data, we attempted to re-use a connection. For HTTP this
1767 can be a retry so we try again regardless if we expected a body.
1768 For other protocols we only try again only if we expected a body.
1769
1770 This might happen if the connection was left alive when we were
1771 done using it before, but that was closed when we wanted to read from
1772 it again. Bad luck. Retry the same request on a fresh connect! */
1773 retry = TRUE;
1774 else if(data->state.refused_stream &&
1775 (data->req.bytecount + data->req.headerbytecount == 0) ) {
1776 /* This was sent on a refused stream, safe to rerun. A refused stream
1777 error can typically only happen on HTTP/2 level if the stream is safe
1778 to issue again, but the nghttp2 API can deliver the message to other
1779 streams as well, which is why this adds the check the data counters
1780 too. */
1781 infof(data, "REFUSED_STREAM, retrying a fresh connect\n");
1782 data->state.refused_stream = FALSE; /* clear again */
1783 retry = TRUE;
1784 }
1785 if(retry) {
1786 #define CONN_MAX_RETRIES 5
1787 if(data->state.retrycount++ >= CONN_MAX_RETRIES) {
1788 failf(data, "Connection died, tried %d times before giving up",
1789 CONN_MAX_RETRIES);
1790 data->state.retrycount = 0;
1791 return CURLE_SEND_ERROR;
1792 }
1793 infof(data, "Connection died, retrying a fresh connect\
1794 (retry count: %d)\n", data->state.retrycount);
1795 *url = strdup(data->change.url);
1796 if(!*url)
1797 return CURLE_OUT_OF_MEMORY;
1798
1799 connclose(conn, "retry"); /* close this connection */
1800 conn->bits.retry = TRUE; /* mark this as a connection we're about
1801 to retry. Marking it this way should
1802 prevent i.e HTTP transfers to return
1803 error just because nothing has been
1804 transferred! */
1805
1806
1807 if(conn->handler->protocol&PROTO_FAMILY_HTTP) {
1808 if(data->req.writebytecount) {
1809 CURLcode result = Curl_readrewind(data);
1810 if(result) {
1811 Curl_safefree(*url);
1812 return result;
1813 }
1814 }
1815 }
1816 }
1817 return CURLE_OK;
1818 }
1819
1820 /*
1821 * Curl_setup_transfer() is called to setup some basic properties for the
1822 * upcoming transfer.
1823 */
1824 void
Curl_setup_transfer(struct Curl_easy * data,int sockindex,curl_off_t size,bool getheader,int writesockindex)1825 Curl_setup_transfer(
1826 struct Curl_easy *data, /* transfer */
1827 int sockindex, /* socket index to read from or -1 */
1828 curl_off_t size, /* -1 if unknown at this point */
1829 bool getheader, /* TRUE if header parsing is wanted */
1830 int writesockindex /* socket index to write to, it may very well be
1831 the same we read from. -1 disables */
1832 )
1833 {
1834 struct SingleRequest *k = &data->req;
1835 struct connectdata *conn = data->conn;
1836 struct HTTP *http = data->req.p.http;
1837 bool httpsending = ((conn->handler->protocol&PROTO_FAMILY_HTTP) &&
1838 (http->sending == HTTPSEND_REQUEST));
1839 DEBUGASSERT(conn != NULL);
1840 DEBUGASSERT((sockindex <= 1) && (sockindex >= -1));
1841
1842 if(conn->bits.multiplex || conn->httpversion == 20 || httpsending) {
1843 /* when multiplexing, the read/write sockets need to be the same! */
1844 conn->sockfd = sockindex == -1 ?
1845 ((writesockindex == -1 ? CURL_SOCKET_BAD : conn->sock[writesockindex])) :
1846 conn->sock[sockindex];
1847 conn->writesockfd = conn->sockfd;
1848 if(httpsending)
1849 /* special and very HTTP-specific */
1850 writesockindex = FIRSTSOCKET;
1851 }
1852 else {
1853 conn->sockfd = sockindex == -1 ?
1854 CURL_SOCKET_BAD : conn->sock[sockindex];
1855 conn->writesockfd = writesockindex == -1 ?
1856 CURL_SOCKET_BAD:conn->sock[writesockindex];
1857 }
1858 k->getheader = getheader;
1859
1860 k->size = size;
1861
1862 /* The code sequence below is placed in this function just because all
1863 necessary input is not always known in do_complete() as this function may
1864 be called after that */
1865
1866 if(!k->getheader) {
1867 k->header = FALSE;
1868 if(size > 0)
1869 Curl_pgrsSetDownloadSize(data, size);
1870 }
1871 /* we want header and/or body, if neither then don't do this! */
1872 if(k->getheader || !data->set.opt_no_body) {
1873
1874 if(sockindex != -1)
1875 k->keepon |= KEEP_RECV;
1876
1877 if(writesockindex != -1) {
1878 /* HTTP 1.1 magic:
1879
1880 Even if we require a 100-return code before uploading data, we might
1881 need to write data before that since the REQUEST may not have been
1882 finished sent off just yet.
1883
1884 Thus, we must check if the request has been sent before we set the
1885 state info where we wait for the 100-return code
1886 */
1887 if((data->state.expect100header) &&
1888 (conn->handler->protocol&PROTO_FAMILY_HTTP) &&
1889 (http->sending == HTTPSEND_BODY)) {
1890 /* wait with write until we either got 100-continue or a timeout */
1891 k->exp100 = EXP100_AWAITING_CONTINUE;
1892 k->start100 = Curl_now();
1893
1894 /* Set a timeout for the multi interface. Add the inaccuracy margin so
1895 that we don't fire slightly too early and get denied to run. */
1896 Curl_expire(data, data->set.expect_100_timeout, EXPIRE_100_TIMEOUT);
1897 }
1898 else {
1899 if(data->state.expect100header)
1900 /* when we've sent off the rest of the headers, we must await a
1901 100-continue but first finish sending the request */
1902 k->exp100 = EXP100_SENDING_REQUEST;
1903
1904 /* enable the write bit when we're not waiting for continue */
1905 k->keepon |= KEEP_SEND;
1906 }
1907 } /* if(writesockindex != -1) */
1908 } /* if(k->getheader || !data->set.opt_no_body) */
1909
1910 }
1911