1 /***************************************************************************
2  *                                  _   _ ____  _
3  *  Project                     ___| | | |  _ \| |
4  *                             / __| | | | |_) | |
5  *                            | (__| |_| |  _ <| |___
6  *                             \___|\___/|_| \_\_____|
7  *
8  * Copyright (C) 1998 - 2009, Daniel Stenberg, <daniel@haxx.se>, et al.
9  *
10  * This software is licensed as described in the file COPYING, which
11  * you should have received as part of this distribution. The terms
12  * are also available at http://curl.haxx.se/docs/copyright.html.
13  *
14  * You may opt to use, copy, modify, merge, publish, distribute and/or sell
15  * copies of the Software, and permit persons to whom the Software is
16  * furnished to do so, under the terms of the COPYING file.
17  *
18  * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
19  * KIND, either express or implied.
20  *
21  * $Id: transfer.c,v 1.427 2009-02-27 08:53:10 bagder Exp $
22  ***************************************************************************/
23 
24 #include "setup.h"
25 
26 /* -- WIN32 approved -- */
27 #include <stdio.h>
28 #include <string.h>
29 #include <stdarg.h>
30 #include <stdlib.h>
31 #include <ctype.h>
32 #include <errno.h>
33 
34 #include "strtoofft.h"
35 #include "strequal.h"
36 #include "rawstr.h"
37 
38 #ifdef WIN32
39 #include <time.h>
40 #include <io.h>
41 #else
42 #ifdef HAVE_SYS_SOCKET_H
43 #include <sys/socket.h>
44 #endif
45 #ifdef HAVE_NETINET_IN_H
46 #include <netinet/in.h>
47 #endif
48 #ifdef HAVE_SYS_TIME_H
49 #include <sys/time.h>
50 #endif
51 #ifdef HAVE_UNISTD_H
52 #include <unistd.h>
53 #endif
54 #ifdef HAVE_NETDB_H
55 #include <netdb.h>
56 #endif
57 #ifdef HAVE_ARPA_INET_H
58 #include <arpa/inet.h>
59 #endif
60 #ifdef HAVE_NET_IF_H
61 #include <net/if.h>
62 #endif
63 #ifdef HAVE_SYS_IOCTL_H
64 #include <sys/ioctl.h>
65 #endif
66 #ifdef HAVE_SIGNAL_H
67 #include <signal.h>
68 #endif
69 
70 #ifdef HAVE_SYS_PARAM_H
71 #include <sys/param.h>
72 #endif
73 
74 #ifdef HAVE_SYS_SELECT_H
75 #include <sys/select.h>
76 #endif
77 
78 #ifndef HAVE_SOCKET
79 #error "We can't compile without socket() support!"
80 #endif
81 
82 #endif  /* WIN32 */
83 
84 #include "urldata.h"
85 #include <curl/curl.h>
86 #include "netrc.h"
87 
88 #include "content_encoding.h"
89 #include "hostip.h"
90 #include "transfer.h"
91 #include "sendf.h"
92 #include "speedcheck.h"
93 #include "progress.h"
94 #include "http.h"
95 #include "url.h"
96 #include "getinfo.h"
97 #include "sslgen.h"
98 #include "http_digest.h"
99 #include "http_ntlm.h"
100 #include "http_negotiate.h"
101 #include "share.h"
102 #include "memory.h"
103 #include "select.h"
104 #include "multiif.h"
105 #include "easyif.h" /* for Curl_convert_to_network prototype */
106 
107 #define _MPRINTF_REPLACE /* use our functions only */
108 #include <curl/mprintf.h>
109 
110 /* The last #include file should be: */
111 #include "memdebug.h"
112 
113 #define CURL_TIMEOUT_EXPECT_100 1000 /* counting ms here */
114 
115 
116 #ifndef CURL_DISABLE_HTTP
117 static CURLcode readwrite_http_headers(struct SessionHandle *data,
118                                        struct connectdata *conn,
119                                        struct SingleRequest *k,
120                                        ssize_t *nread,
121                                        bool *stop_reading);
122 #endif /* CURL_DISABLE_HTTP */
123 
124 /*
125  * This function will call the read callback to fill our buffer with data
126  * to upload.
127  */
Curl_fillreadbuffer(struct connectdata * conn,int bytes,int * nreadp)128 CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
129 {
130   struct SessionHandle *data = conn->data;
131   size_t buffersize = (size_t)bytes;
132   int nread;
133 
134   if(data->req.upload_chunky) {
135     /* if chunked Transfer-Encoding */
136     buffersize -= (8 + 2 + 2);   /* 32bit hex + CRLF + CRLF */
137     data->req.upload_fromhere += (8 + 2); /* 32bit hex + CRLF */
138   }
139 
140   /* this function returns a size_t, so we typecast to int to prevent warnings
141      with picky compilers */
142   nread = (int)conn->fread_func(data->req.upload_fromhere, 1,
143                                 buffersize, conn->fread_in);
144 
145   if(nread == CURL_READFUNC_ABORT) {
146     failf(data, "operation aborted by callback");
147     *nreadp = 0;
148     return CURLE_ABORTED_BY_CALLBACK;
149   }
150   else if(nread == CURL_READFUNC_PAUSE) {
151     struct SingleRequest *k = &data->req;
152     /* CURL_READFUNC_PAUSE pauses read callbacks that feed socket writes */
153     k->keepon |= KEEP_WRITE_PAUSE; /* mark socket send as paused */
154     if(data->req.upload_chunky) {
155       /* Back out the preallocation done above */
156       data->req.upload_fromhere -= (8 + 2);
157     }
158     *nreadp = 0;
159     return CURLE_OK; /* nothing was read */
160   }
161   else if((size_t)nread > buffersize) {
162     /* the read function returned a too large value */
163     *nreadp = 0;
164     failf(data, "read function returned funny value");
165     return CURLE_READ_ERROR;
166   }
167 
168   if(!data->req.forbidchunk && data->req.upload_chunky) {
169     /* if chunked Transfer-Encoding */
170     char hexbuffer[11];
171     int hexlen = snprintf(hexbuffer, sizeof(hexbuffer),
172                           "%x\r\n", nread);
173     /* move buffer pointer */
174     data->req.upload_fromhere -= hexlen;
175     nread += hexlen;
176 
177     /* copy the prefix to the buffer, leaving out the NUL */
178     memcpy(data->req.upload_fromhere, hexbuffer, hexlen);
179 
180     /* always append CRLF to the data */
181     memcpy(data->req.upload_fromhere + nread, "\r\n", 2);
182 
183     if((nread - hexlen) == 0) {
184       /* mark this as done once this chunk is transfered */
185       data->req.upload_done = TRUE;
186     }
187 
188     nread+=2; /* for the added CRLF */
189   }
190 
191   *nreadp = nread;
192 
193 #ifdef CURL_DOES_CONVERSIONS
194   if(data->set.prefer_ascii) {
195     CURLcode res;
196     res = Curl_convert_to_network(data, data->req.upload_fromhere, nread);
197     /* Curl_convert_to_network calls failf if unsuccessful */
198     if(res != CURLE_OK) {
199       return(res);
200     }
201   }
202 #endif /* CURL_DOES_CONVERSIONS */
203 
204   return CURLE_OK;
205 }
206 
207 #ifndef CURL_DISABLE_HTTP
208 /*
209  * checkhttpprefix()
210  *
211  * Returns TRUE if member of the list matches prefix of string
212  */
213 static bool
checkhttpprefix(struct SessionHandle * data,const char * s)214 checkhttpprefix(struct SessionHandle *data,
215                 const char *s)
216 {
217   struct curl_slist *head = data->set.http200aliases;
218   bool rc = FALSE;
219 #ifdef CURL_DOES_CONVERSIONS
220   /* convert from the network encoding using a scratch area */
221   char *scratch = calloc(1, strlen(s)+1);
222   if(NULL == scratch) {
223      failf (data, "Failed to calloc memory for conversion!");
224      return FALSE; /* can't return CURLE_OUT_OF_MEMORY so return FALSE */
225   }
226   strcpy(scratch, s);
227   if(CURLE_OK != Curl_convert_from_network(data, scratch, strlen(s)+1)) {
228     /* Curl_convert_from_network calls failf if unsuccessful */
229      free(scratch);
230      return FALSE; /* can't return CURLE_foobar so return FALSE */
231   }
232   s = scratch;
233 #endif /* CURL_DOES_CONVERSIONS */
234 
235   while(head) {
236     if(checkprefix(head->data, s)) {
237       rc = TRUE;
238       break;
239     }
240     head = head->next;
241   }
242 
243   if((rc != TRUE) && (checkprefix("HTTP/", s))) {
244     rc = TRUE;
245   }
246 
247 #ifdef CURL_DOES_CONVERSIONS
248   free(scratch);
249 #endif /* CURL_DOES_CONVERSIONS */
250   return rc;
251 }
252 #endif   /* CURL_DISABLE_HTTP */
253 
254 /*
255  * Curl_readrewind() rewinds the read stream. This is typically used for HTTP
256  * POST/PUT with multi-pass authentication when a sending was denied and a
257  * resend is necessary.
258  */
Curl_readrewind(struct connectdata * conn)259 CURLcode Curl_readrewind(struct connectdata *conn)
260 {
261   struct SessionHandle *data = conn->data;
262 
263   conn->bits.rewindaftersend = FALSE; /* we rewind now */
264 
265   /* explicitly switch off sending data on this connection now since we are
266      about to restart a new transfer and thus we want to avoid inadvertently
267      sending more data on the existing connection until the next transfer
268      starts */
269   data->req.keepon &= ~KEEP_WRITE;
270 
271   /* We have sent away data. If not using CURLOPT_POSTFIELDS or
272      CURLOPT_HTTPPOST, call app to rewind
273   */
274   if(data->set.postfields ||
275      (data->set.httpreq == HTTPREQ_POST_FORM))
276     ; /* do nothing */
277   else {
278     if(data->set.seek_func) {
279       int err;
280 
281       err = (data->set.seek_func)(data->set.seek_client, 0, SEEK_SET);
282       if(err) {
283         failf(data, "seek callback returned error %d", (int)err);
284         return CURLE_SEND_FAIL_REWIND;
285       }
286     }
287     else if(data->set.ioctl_func) {
288       curlioerr err;
289 
290       err = (data->set.ioctl_func)(data, CURLIOCMD_RESTARTREAD,
291                                    data->set.ioctl_client);
292       infof(data, "the ioctl callback returned %d\n", (int)err);
293 
294       if(err) {
295         /* FIXME: convert to a human readable error message */
296         failf(data, "ioctl callback returned error %d", (int)err);
297         return CURLE_SEND_FAIL_REWIND;
298       }
299     }
300     else {
301       /* If no CURLOPT_READFUNCTION is used, we know that we operate on a
302          given FILE * stream and we can actually attempt to rewind that
303          ourself with fseek() */
304       if(data->set.fread_func == (curl_read_callback)fread) {
305         if(-1 != fseek(data->set.in, 0, SEEK_SET))
306           /* successful rewind */
307           return CURLE_OK;
308       }
309 
310       /* no callback set or failure above, makes us fail at once */
311       failf(data, "necessary data rewind wasn't possible");
312       return CURLE_SEND_FAIL_REWIND;
313     }
314   }
315   return CURLE_OK;
316 }
317 
data_pending(const struct connectdata * conn)318 static int data_pending(const struct connectdata *conn)
319 {
320   /* in the case of libssh2, we can never be really sure that we have emptied
321      its internal buffers so we MUST always try until we get EAGAIN back */
322   return conn->protocol&(PROT_SCP|PROT_SFTP) ||
323     Curl_ssl_data_pending(conn, FIRSTSOCKET);
324 }
325 
read_rewind(struct connectdata * conn,size_t thismuch)326 static void read_rewind(struct connectdata *conn,
327                         size_t thismuch)
328 {
329   conn->read_pos -= thismuch;
330   conn->bits.stream_was_rewound = TRUE;
331 
332 #ifdef CURLDEBUG
333   {
334     char buf[512 + 1];
335     size_t show;
336 
337     show = CURLMIN(conn->buf_len - conn->read_pos, sizeof(buf)-1);
338     if(conn->master_buffer) {
339         memcpy(buf, conn->master_buffer + conn->read_pos, show);
340         buf[show] = '\0';
341     }
342     else {
343         buf[0] = '\0';
344     }
345 
346     DEBUGF(infof(conn->data,
347                  "Buffer after stream rewind (read_pos = %d): [%s]",
348                  conn->read_pos, buf));
349   }
350 #endif
351 }
352 
353 
354 /*
355  * Go ahead and do a read if we have a readable socket or if
356  * the stream was rewound (in which case we have data in a
357  * buffer)
358  */
readwrite_data(struct SessionHandle * data,struct connectdata * conn,struct SingleRequest * k,int * didwhat,bool * done)359 static CURLcode readwrite_data(struct SessionHandle *data,
360                                struct connectdata *conn,
361                                struct SingleRequest *k,
362                                int *didwhat, bool *done)
363 {
364   CURLcode result = CURLE_OK;
365   ssize_t nread; /* number of bytes read */
366   bool is_empty_data = FALSE;
367 
368   *done = FALSE;
369 
370   /* This is where we loop until we have read everything there is to
371      read or we get a EWOULDBLOCK */
372   do {
373     size_t buffersize = data->set.buffer_size?
374       data->set.buffer_size : BUFSIZE;
375     size_t bytestoread = buffersize;
376     int readrc;
377 
378     if(k->size != -1 && !k->header) {
379       /* make sure we don't read "too much" if we can help it since we
380          might be pipelining and then someone else might want to read what
381          follows! */
382       curl_off_t totalleft = k->size - k->bytecount;
383       if(totalleft < (curl_off_t)bytestoread)
384         bytestoread = (size_t)totalleft;
385     }
386 
387     if(bytestoread) {
388       /* receive data from the network! */
389       readrc = Curl_read(conn, conn->sockfd, k->buf, bytestoread, &nread);
390 
391       /* subzero, this would've blocked */
392       if(0 > readrc)
393         break; /* get out of loop */
394 
395       /* get the CURLcode from the int */
396       result = (CURLcode)readrc;
397 
398       if(result>0)
399         return result;
400     }
401     else {
402       /* read nothing but since we wanted nothing we consider this an OK
403          situation to proceed from */
404       nread = 0;
405     }
406 
407     if((k->bytecount == 0) && (k->writebytecount == 0)) {
408       Curl_pgrsTime(data, TIMER_STARTTRANSFER);
409       if(k->exp100 > EXP100_SEND_DATA)
410         /* set time stamp to compare with when waiting for the 100 */
411         k->start100 = Curl_tvnow();
412     }
413 
414     *didwhat |= KEEP_READ;
415     /* indicates data of zero size, i.e. empty file */
416     is_empty_data = (bool)((nread == 0) && (k->bodywrites == 0));
417 
418     /* NUL terminate, allowing string ops to be used */
419     if(0 < nread || is_empty_data) {
420       k->buf[nread] = 0;
421     }
422     else if(0 >= nread) {
423       /* if we receive 0 or less here, the server closed the connection
424          and we bail out from this! */
425       DEBUGF(infof(data, "nread <= 0, server closed connection, bailing\n"));
426       k->keepon &= ~KEEP_READ;
427       break;
428     }
429 
430     /* Default buffer to use when we write the buffer, it may be changed
431        in the flow below before the actual storing is done. */
432     k->str = k->buf;
433 
434 #ifndef CURL_DISABLE_HTTP
435     /* Since this is a two-state thing, we check if we are parsing
436        headers at the moment or not. */
437     if(k->header) {
438       /* we are in parse-the-header-mode */
439       bool stop_reading = FALSE;
440       result = readwrite_http_headers(data, conn, k, &nread, &stop_reading);
441       if(result)
442         return result;
443       if(stop_reading)
444         /* We've stopped dealing with input, get out of the do-while loop */
445         break;
446     }
447 #endif /* CURL_DISABLE_HTTP */
448 
449 
450     /* This is not an 'else if' since it may be a rest from the header
451        parsing, where the beginning of the buffer is headers and the end
452        is non-headers. */
453     if(k->str && !k->header && (nread > 0 || is_empty_data)) {
454 
455 #ifndef CURL_DISABLE_HTTP
456       if(0 == k->bodywrites && !is_empty_data) {
457         /* These checks are only made the first time we are about to
458            write a piece of the body */
459         if(conn->protocol&PROT_HTTP) {
460           /* HTTP-only checks */
461 
462           if(data->req.newurl) {
463             if(conn->bits.close) {
464               /* Abort after the headers if "follow Location" is set
465                  and we're set to close anyway. */
466               k->keepon &= ~KEEP_READ;
467               *done = TRUE;
468               return CURLE_OK;
469             }
470             /* We have a new url to load, but since we want to be able
471                to re-use this connection properly, we read the full
472                response in "ignore more" */
473             k->ignorebody = TRUE;
474             infof(data, "Ignoring the response-body\n");
475           }
476           if(data->state.resume_from && !k->content_range &&
477              (data->set.httpreq==HTTPREQ_GET) &&
478              !k->ignorebody) {
479             /* we wanted to resume a download, although the server doesn't
480              * seem to support this and we did this with a GET (if it
481              * wasn't a GET we did a POST or PUT resume) */
482             failf(data, "HTTP server doesn't seem to support "
483                   "byte ranges. Cannot resume.");
484             return CURLE_RANGE_ERROR;
485           }
486 
487           if(data->set.timecondition && !data->state.range) {
488             /* A time condition has been set AND no ranges have been
489                requested. This seems to be what chapter 13.3.4 of
490                RFC 2616 defines to be the correct action for a
491                HTTP/1.1 client */
492             if((k->timeofdoc > 0) && (data->set.timevalue > 0)) {
493               switch(data->set.timecondition) {
494               case CURL_TIMECOND_IFMODSINCE:
495               default:
496                 if(k->timeofdoc < data->set.timevalue) {
497                   infof(data,
498                         "The requested document is not new enough\n");
499                   *done = TRUE;
500                   data->info.timecond = TRUE;
501                   return CURLE_OK;
502                 }
503                 break;
504               case CURL_TIMECOND_IFUNMODSINCE:
505                 if(k->timeofdoc > data->set.timevalue) {
506                   infof(data,
507                         "The requested document is not old enough\n");
508                   *done = TRUE;
509                   data->info.timecond = TRUE;
510                   return CURLE_OK;
511                 }
512                 break;
513               } /* switch */
514             } /* two valid time strings */
515           } /* we have a time condition */
516 
517         } /* this is HTTP */
518       } /* this is the first time we write a body part */
519 #endif /* CURL_DISABLE_HTTP */
520       k->bodywrites++;
521 
522       /* pass data to the debug function before it gets "dechunked" */
523       if(data->set.verbose) {
524         if(k->badheader) {
525           Curl_debug(data, CURLINFO_DATA_IN, data->state.headerbuff,
526                      (size_t)k->hbuflen, conn);
527           if(k->badheader == HEADER_PARTHEADER)
528             Curl_debug(data, CURLINFO_DATA_IN,
529                        k->str, (size_t)nread, conn);
530         }
531         else
532           Curl_debug(data, CURLINFO_DATA_IN,
533                      k->str, (size_t)nread, conn);
534       }
535 
536 #ifndef CURL_DISABLE_HTTP
537       if(k->chunk) {
538         /*
539          * Here comes a chunked transfer flying and we need to decode this
540          * properly.  While the name says read, this function both reads
541          * and writes away the data. The returned 'nread' holds the number
542          * of actual data it wrote to the client.
543          */
544 
545         CHUNKcode res =
546           Curl_httpchunk_read(conn, k->str, nread, &nread);
547 
548         if(CHUNKE_OK < res) {
549           if(CHUNKE_WRITE_ERROR == res) {
550             failf(data, "Failed writing data");
551             return CURLE_WRITE_ERROR;
552           }
553           failf(data, "Received problem %d in the chunky parser", res);
554           return CURLE_RECV_ERROR;
555         }
556         else if(CHUNKE_STOP == res) {
557           size_t dataleft;
558           /* we're done reading chunks! */
559           k->keepon &= ~KEEP_READ; /* read no more */
560 
561           /* There are now possibly N number of bytes at the end of the
562              str buffer that weren't written to the client.
563 
564              We DO care about this data if we are pipelining.
565              Push it back to be read on the next pass. */
566 
567           dataleft = conn->chunk.dataleft;
568           if(dataleft != 0) {
569             infof(conn->data, "Leftovers after chunking. "
570                   " Rewinding %d bytes\n",dataleft);
571             read_rewind(conn, dataleft);
572           }
573         }
574         /* If it returned OK, we just keep going */
575       }
576 #endif   /* CURL_DISABLE_HTTP */
577 
578       if((-1 != k->maxdownload) &&
579          (k->bytecount + nread >= k->maxdownload)) {
580         /* The 'excess' amount below can't be more than BUFSIZE which
581            always will fit in a size_t */
582         size_t excess = (size_t)(k->bytecount + nread - k->maxdownload);
583         if(excess > 0 && !k->ignorebody) {
584           infof(data,
585                 "Rewinding stream by : %d"
586                 " bytes on url %s (size = %" FORMAT_OFF_T
587                 ", maxdownload = %" FORMAT_OFF_T
588                 ", bytecount = %" FORMAT_OFF_T ", nread = %d)\n",
589                 excess, data->state.path,
590                 k->size, k->maxdownload, k->bytecount, nread);
591           read_rewind(conn, excess);
592         }
593 
594         nread = (ssize_t) (k->maxdownload - k->bytecount);
595         if(nread < 0 ) /* this should be unusual */
596           nread = 0;
597 
598         k->keepon &= ~KEEP_READ; /* we're done reading */
599       }
600 
601       k->bytecount += nread;
602 
603       Curl_pgrsSetDownloadCounter(data, k->bytecount);
604 
605       if(!k->chunk && (nread || k->badheader || is_empty_data)) {
606         /* If this is chunky transfer, it was already written */
607 
608         if(k->badheader && !k->ignorebody) {
609           /* we parsed a piece of data wrongly assuming it was a header
610              and now we output it as body instead */
611           result = Curl_client_write(conn, CLIENTWRITE_BODY,
612                                      data->state.headerbuff,
613                                      k->hbuflen);
614           if(result)
615             return result;
616         }
617         if(k->badheader < HEADER_ALLBAD) {
618           /* This switch handles various content encodings. If there's an
619              error here, be sure to check over the almost identical code
620              in http_chunks.c.
621              Make sure that ALL_CONTENT_ENCODINGS contains all the
622              encodings handled here. */
623 #ifdef HAVE_LIBZ
624           switch (conn->data->set.http_ce_skip ?
625                   IDENTITY : k->content_encoding) {
626           case IDENTITY:
627 #endif
628             /* This is the default when the server sends no
629                Content-Encoding header. See Curl_readwrite_init; the
630                memset() call initializes k->content_encoding to zero. */
631             if(!k->ignorebody)
632               result = Curl_client_write(conn, CLIENTWRITE_BODY, k->str,
633                                          nread);
634 #ifdef HAVE_LIBZ
635             break;
636 
637           case DEFLATE:
638             /* Assume CLIENTWRITE_BODY; headers are not encoded. */
639             if(!k->ignorebody)
640               result = Curl_unencode_deflate_write(conn, k, nread);
641             break;
642 
643           case GZIP:
644             /* Assume CLIENTWRITE_BODY; headers are not encoded. */
645             if(!k->ignorebody)
646               result = Curl_unencode_gzip_write(conn, k, nread);
647             break;
648 
649           case COMPRESS:
650           default:
651             failf (data, "Unrecognized content encoding type. "
652                    "libcurl understands `identity', `deflate' and `gzip' "
653                    "content encodings.");
654             result = CURLE_BAD_CONTENT_ENCODING;
655             break;
656           }
657 #endif
658         }
659         k->badheader = HEADER_NORMAL; /* taken care of now */
660 
661         if(result)
662           return result;
663       }
664 
665     } /* if(! header and data to read ) */
666 
667     if(is_empty_data) {
668       /* if we received nothing, the server closed the connection and we
669          are done */
670       k->keepon &= ~KEEP_READ;
671     }
672 
673   } while(data_pending(conn));
674 
675   if(((k->keepon & (KEEP_READ|KEEP_WRITE)) == KEEP_WRITE) &&
676      conn->bits.close ) {
677     /* When we've read the entire thing and the close bit is set, the server
678        may now close the connection. If there's now any kind of sending going
679        on from our side, we need to stop that immediately. */
680     infof(data, "we are done reading and this is set to close, stop send\n");
681     k->keepon &= ~KEEP_WRITE; /* no writing anymore either */
682   }
683 
684   return CURLE_OK;
685 }
686 
687 #ifndef CURL_DISABLE_HTTP
688 /*
689  * Read any HTTP header lines from the server and pass them to the client app.
690  */
readwrite_http_headers(struct SessionHandle * data,struct connectdata * conn,struct SingleRequest * k,ssize_t * nread,bool * stop_reading)691 static CURLcode readwrite_http_headers(struct SessionHandle *data,
692                                        struct connectdata *conn,
693                                        struct SingleRequest *k,
694                                        ssize_t *nread,
695                                        bool *stop_reading)
696 {
697   CURLcode result;
698 
699   /* header line within buffer loop */
700   do {
701     size_t hbufp_index;
702     size_t rest_length;
703     size_t full_length;
704     int writetype;
705 
706     /* str_start is start of line within buf */
707     k->str_start = k->str;
708 
709     /* data is in network encoding so use 0x0a instead of '\n' */
710     k->end_ptr = memchr(k->str_start, 0x0a, *nread);
711 
712     if(!k->end_ptr) {
713       /* Not a complete header line within buffer, append the data to
714          the end of the headerbuff. */
715 
716       if(k->hbuflen + *nread >= data->state.headersize) {
717         /* We enlarge the header buffer as it is too small */
718         char *newbuff;
719         size_t newsize=CURLMAX((k->hbuflen+*nread)*3/2,
720                                data->state.headersize*2);
721         hbufp_index = k->hbufp - data->state.headerbuff;
722         newbuff = realloc(data->state.headerbuff, newsize);
723         if(!newbuff) {
724           failf (data, "Failed to alloc memory for big header!");
725           return CURLE_OUT_OF_MEMORY;
726         }
727         data->state.headersize=newsize;
728         data->state.headerbuff = newbuff;
729         k->hbufp = data->state.headerbuff + hbufp_index;
730       }
731       memcpy(k->hbufp, k->str, *nread);
732       k->hbufp += *nread;
733       k->hbuflen += *nread;
734       if(!k->headerline && (k->hbuflen>5)) {
735         /* make a first check that this looks like a HTTP header */
736         if(!checkhttpprefix(data, data->state.headerbuff)) {
737           /* this is not the beginning of a HTTP first header line */
738           k->header = FALSE;
739           k->badheader = HEADER_ALLBAD;
740           break;
741         }
742       }
743 
744       break; /* read more and try again */
745     }
746 
747     /* decrease the size of the remaining (supposed) header line */
748     rest_length = (k->end_ptr - k->str)+1;
749     *nread -= (ssize_t)rest_length;
750 
751     k->str = k->end_ptr + 1; /* move past new line */
752 
753     full_length = k->str - k->str_start;
754 
755     /*
756      * We're about to copy a chunk of data to the end of the
757      * already received header. We make sure that the full string
758      * fit in the allocated header buffer, or else we enlarge
759      * it.
760      */
761     if(k->hbuflen + full_length >=
762        data->state.headersize) {
763       char *newbuff;
764       size_t newsize=CURLMAX((k->hbuflen+full_length)*3/2,
765                              data->state.headersize*2);
766       hbufp_index = k->hbufp - data->state.headerbuff;
767       newbuff = realloc(data->state.headerbuff, newsize);
768       if(!newbuff) {
769         failf (data, "Failed to alloc memory for big header!");
770         return CURLE_OUT_OF_MEMORY;
771       }
772       data->state.headersize= newsize;
773       data->state.headerbuff = newbuff;
774       k->hbufp = data->state.headerbuff + hbufp_index;
775     }
776 
777     /* copy to end of line */
778     memcpy(k->hbufp, k->str_start, full_length);
779     k->hbufp += full_length;
780     k->hbuflen += full_length;
781     *k->hbufp = 0;
782     k->end_ptr = k->hbufp;
783 
784     k->p = data->state.headerbuff;
785 
786     /****
787      * We now have a FULL header line that p points to
788      *****/
789 
790     if(!k->headerline) {
791       /* the first read header */
792       if((k->hbuflen>5) &&
793          !checkhttpprefix(data, data->state.headerbuff)) {
794         /* this is not the beginning of a HTTP first header line */
795         k->header = FALSE;
796         if(*nread)
797           /* since there's more, this is a partial bad header */
798           k->badheader = HEADER_PARTHEADER;
799         else {
800           /* this was all we read so it's all a bad header */
801           k->badheader = HEADER_ALLBAD;
802           *nread = (ssize_t)rest_length;
803         }
804         break;
805       }
806     }
807 
808     /* headers are in network encoding so
809        use 0x0a and 0x0d instead of '\n' and '\r' */
810     if((0x0a == *k->p) || (0x0d == *k->p)) {
811       size_t headerlen;
812       /* Zero-length header line means end of headers! */
813 
814 #ifdef CURL_DOES_CONVERSIONS
815       if(0x0d == *k->p) {
816         *k->p = '\r'; /* replace with CR in host encoding */
817         k->p++;       /* pass the CR byte */
818       }
819       if(0x0a == *k->p) {
820         *k->p = '\n'; /* replace with LF in host encoding */
821         k->p++;       /* pass the LF byte */
822       }
823 #else
824       if('\r' == *k->p)
825         k->p++; /* pass the \r byte */
826       if('\n' == *k->p)
827         k->p++; /* pass the \n byte */
828 #endif /* CURL_DOES_CONVERSIONS */
829 
830       if(100 <= k->httpcode && 199 >= k->httpcode) {
831         /*
832          * We have made a HTTP PUT or POST and this is 1.1-lingo
833          * that tells us that the server is OK with this and ready
834          * to receive the data.
835          * However, we'll get more headers now so we must get
836          * back into the header-parsing state!
837          */
838         k->header = TRUE;
839         k->headerline = 0; /* restart the header line counter */
840 
841         /* if we did wait for this do enable write now! */
842         if(k->exp100) {
843           k->exp100 = EXP100_SEND_DATA;
844           k->keepon |= KEEP_WRITE;
845         }
846       }
847       else {
848         k->header = FALSE; /* no more header to parse! */
849 
850         if((k->size == -1) && !k->chunk && !conn->bits.close &&
851            (conn->httpversion >= 11) ) {
852           /* On HTTP 1.1, when connection is not to get closed, but no
853              Content-Length nor Content-Encoding chunked have been
854              received, according to RFC2616 section 4.4 point 5, we
855              assume that the server will close the connection to
856              signal the end of the document. */
857           infof(data, "no chunk, no close, no size. Assume close to "
858                 "signal end\n");
859           conn->bits.close = TRUE;
860         }
861       }
862 
863       if(417 == k->httpcode) {
864         /*
865          * we got: "417 Expectation Failed" this means:
866          * we have made a HTTP call and our Expect Header
867          * seems to cause a problem => abort the write operations
868          * (or prevent them from starting).
869          */
870         k->exp100 = EXP100_FAILED;
871         k->keepon &= ~KEEP_WRITE;
872       }
873 
874       /*
875        * When all the headers have been parsed, see if we should give
876        * up and return an error.
877        */
878       if(Curl_http_should_fail(conn)) {
879         failf (data, "The requested URL returned error: %d",
880                k->httpcode);
881         return CURLE_HTTP_RETURNED_ERROR;
882       }
883 
884       /* now, only output this if the header AND body are requested:
885        */
886       writetype = CLIENTWRITE_HEADER;
887       if(data->set.include_header)
888         writetype |= CLIENTWRITE_BODY;
889 
890       headerlen = k->p - data->state.headerbuff;
891 
892       result = Curl_client_write(conn, writetype,
893                                  data->state.headerbuff,
894                                  headerlen);
895       if(result)
896         return result;
897 
898       data->info.header_size += (long)headerlen;
899       data->req.headerbytecount += (long)headerlen;
900 
901       data->req.deductheadercount =
902         (100 <= k->httpcode && 199 >= k->httpcode)?data->req.headerbytecount:0;
903 
904       if(data->state.resume_from &&
905          (data->set.httpreq==HTTPREQ_GET) &&
906          (k->httpcode == 416)) {
907         /* "Requested Range Not Satisfiable" */
908         *stop_reading = TRUE;
909       }
910 
911       if(!*stop_reading) {
912         /* Curl_http_auth_act() checks what authentication methods
913          * that are available and decides which one (if any) to
914          * use. It will set 'newurl' if an auth method was picked. */
915         result = Curl_http_auth_act(conn);
916 
917         if(result)
918           return result;
919 
920         if(conn->bits.rewindaftersend) {
921           /* We rewind after a complete send, so thus we continue
922              sending now */
923           infof(data, "Keep sending data to get tossed away!\n");
924           k->keepon |= KEEP_WRITE;
925         }
926       }
927 
928       if(!k->header) {
929         /*
930          * really end-of-headers.
931          *
932          * If we requested a "no body", this is a good time to get
933          * out and return home.
934          */
935         if(data->set.opt_no_body)
936           *stop_reading = TRUE;
937         else {
938           /* If we know the expected size of this document, we set the
939              maximum download size to the size of the expected
940              document or else, we won't know when to stop reading!
941 
942              Note that we set the download maximum even if we read a
943              "Connection: close" header, to make sure that
944              "Content-Length: 0" still prevents us from attempting to
945              read the (missing) response-body.
946           */
947           /* According to RFC2616 section 4.4, we MUST ignore
948              Content-Length: headers if we are now receiving data
949              using chunked Transfer-Encoding.
950           */
951           if(k->chunk)
952             k->size=-1;
953 
954         }
955         if(-1 != k->size) {
956           /* We do this operation even if no_body is true, since this
957              data might be retrieved later with curl_easy_getinfo()
958              and its CURLINFO_CONTENT_LENGTH_DOWNLOAD option. */
959 
960           Curl_pgrsSetDownloadSize(data, k->size);
961           k->maxdownload = k->size;
962         }
963         /* If max download size is *zero* (nothing) we already
964            have nothing and can safely return ok now! */
965         if(0 == k->maxdownload)
966           *stop_reading = TRUE;
967 
968         if(*stop_reading) {
969           /* we make sure that this socket isn't read more now */
970           k->keepon &= ~KEEP_READ;
971         }
972 
973         if(data->set.verbose)
974           Curl_debug(data, CURLINFO_HEADER_IN,
975                      k->str_start, headerlen, conn);
976         break;          /* exit header line loop */
977       }
978 
979       /* We continue reading headers, so reset the line-based
980          header parsing variables hbufp && hbuflen */
981       k->hbufp = data->state.headerbuff;
982       k->hbuflen = 0;
983       continue;
984     }
985 
986     /*
987      * Checks for special headers coming up.
988      */
989 
990     if(!k->headerline++) {
991       /* This is the first header, it MUST be the error code line
992          or else we consider this to be the body right away! */
993       int httpversion_major;
994       int nc;
995 #ifdef CURL_DOES_CONVERSIONS
996 #define HEADER1 scratch
997 #define SCRATCHSIZE 21
998       CURLcode res;
999       char scratch[SCRATCHSIZE+1]; /* "HTTP/major.minor 123" */
1000       /* We can't really convert this yet because we
1001          don't know if it's the 1st header line or the body.
1002          So we do a partial conversion into a scratch area,
1003          leaving the data at k->p as-is.
1004       */
1005       strncpy(&scratch[0], k->p, SCRATCHSIZE);
1006       scratch[SCRATCHSIZE] = 0; /* null terminate */
1007       res = Curl_convert_from_network(data,
1008                                       &scratch[0],
1009                                       SCRATCHSIZE);
1010       if(CURLE_OK != res) {
1011         /* Curl_convert_from_network calls failf if unsuccessful */
1012         return res;
1013       }
1014 #else
1015 #define HEADER1 k->p /* no conversion needed, just use k->p */
1016 #endif /* CURL_DOES_CONVERSIONS */
1017 
1018       nc = sscanf(HEADER1,
1019                   " HTTP/%d.%d %3d",
1020                   &httpversion_major,
1021                   &conn->httpversion,
1022                   &k->httpcode);
1023       if(nc==3) {
1024         conn->httpversion += 10 * httpversion_major;
1025       }
1026       else {
1027         /* this is the real world, not a Nirvana
1028            NCSA 1.5.x returns this crap when asked for HTTP/1.1
1029         */
1030         nc=sscanf(HEADER1, " HTTP %3d", &k->httpcode);
1031         conn->httpversion = 10;
1032 
1033         /* If user has set option HTTP200ALIASES,
1034            compare header line against list of aliases
1035         */
1036         if(!nc) {
1037           if(checkhttpprefix(data, k->p)) {
1038             nc = 1;
1039             k->httpcode = 200;
1040             conn->httpversion = 10;
1041           }
1042         }
1043       }
1044 
1045       if(nc) {
1046         data->info.httpcode = k->httpcode;
1047         data->info.httpversion = conn->httpversion;
1048         if (!data->state.httpversion ||
1049             data->state.httpversion > conn->httpversion)
1050           /* store the lowest server version we encounter */
1051           data->state.httpversion = conn->httpversion;
1052 
1053         /*
1054          * This code executes as part of processing the header.  As a
1055          * result, it's not totally clear how to interpret the
1056          * response code yet as that depends on what other headers may
1057          * be present.  401 and 407 may be errors, but may be OK
1058          * depending on how authentication is working.  Other codes
1059          * are definitely errors, so give up here.
1060          */
1061         if(data->set.http_fail_on_error && (k->httpcode >= 400) &&
1062            ((k->httpcode != 401) || !conn->bits.user_passwd) &&
1063            ((k->httpcode != 407) || !conn->bits.proxy_user_passwd) ) {
1064 
1065           if(data->state.resume_from &&
1066              (data->set.httpreq==HTTPREQ_GET) &&
1067              (k->httpcode == 416)) {
1068             /* "Requested Range Not Satisfiable", just proceed and
1069                pretend this is no error */
1070           }
1071           else {
1072             /* serious error, go home! */
1073             failf (data, "The requested URL returned error: %d",
1074                    k->httpcode);
1075             return CURLE_HTTP_RETURNED_ERROR;
1076           }
1077         }
1078 
1079         if(conn->httpversion == 10) {
1080           /* Default action for HTTP/1.0 must be to close, unless
1081              we get one of those fancy headers that tell us the
1082              server keeps it open for us! */
1083           infof(data, "HTTP 1.0, assume close after body\n");
1084           conn->bits.close = TRUE;
1085         }
1086         else if(conn->httpversion >= 11 &&
1087                 !conn->bits.close) {
1088           /* If HTTP version is >= 1.1 and connection is persistent
1089              server supports pipelining. */
1090           DEBUGF(infof(data,
1091                        "HTTP 1.1 or later with persistent connection, "
1092                        "pipelining supported\n"));
1093           conn->server_supports_pipelining = TRUE;
1094         }
1095 
1096         switch(k->httpcode) {
1097         case 204:
1098           /* (quote from RFC2616, section 10.2.5): The server has
1099            * fulfilled the request but does not need to return an
1100            * entity-body ... The 204 response MUST NOT include a
1101            * message-body, and thus is always terminated by the first
1102            * empty line after the header fields. */
1103           /* FALLTHROUGH */
1104         case 416: /* Requested Range Not Satisfiable, it has the
1105                      Content-Length: set as the "real" document but no
1106                      actual response is sent. */
1107         case 304:
1108           /* (quote from RFC2616, section 10.3.5): The 304 response
1109            * MUST NOT contain a message-body, and thus is always
1110            * terminated by the first empty line after the header
1111            * fields.  */
1112           if(data->set.timecondition)
1113             data->info.timecond = TRUE;
1114           k->size=0;
1115           k->maxdownload=0;
1116           k->ignorecl = TRUE; /* ignore Content-Length headers */
1117           break;
1118         default:
1119           /* nothing */
1120           break;
1121         }
1122       }
1123       else {
1124         k->header = FALSE;   /* this is not a header line */
1125         break;
1126       }
1127     }
1128 
1129 #ifdef CURL_DOES_CONVERSIONS
1130     /* convert from the network encoding */
1131     result = Curl_convert_from_network(data, k->p, strlen(k->p));
1132     if(CURLE_OK != result) {
1133       return(result);
1134     }
1135     /* Curl_convert_from_network calls failf if unsuccessful */
1136 #endif /* CURL_DOES_CONVERSIONS */
1137 
1138     /* Check for Content-Length: header lines to get size. Ignore
1139        the header completely if we get a 416 response as then we're
1140        resuming a document that we don't get, and this header contains
1141        info about the true size of the document we didn't get now. */
1142     if(!k->ignorecl && !data->set.ignorecl &&
1143        checkprefix("Content-Length:", k->p)) {
1144       curl_off_t contentlength = curlx_strtoofft(k->p+15, NULL, 10);
1145       if(data->set.max_filesize &&
1146          contentlength > data->set.max_filesize) {
1147         failf(data, "Maximum file size exceeded");
1148         return CURLE_FILESIZE_EXCEEDED;
1149       }
1150       if(contentlength >= 0) {
1151         k->size = contentlength;
1152         k->maxdownload = k->size;
1153         /* we set the progress download size already at this point
1154            just to make it easier for apps/callbacks to extract this
1155            info as soon as possible */
1156         Curl_pgrsSetDownloadSize(data, k->size);
1157       }
1158       else {
1159         /* Negative Content-Length is really odd, and we know it
1160            happens for example when older Apache servers send large
1161            files */
1162         conn->bits.close = TRUE;
1163         infof(data, "Negative content-length: %" FORMAT_OFF_T
1164               ", closing after transfer\n", contentlength);
1165       }
1166     }
1167     /* check for Content-Type: header lines to get the MIME-type */
1168     else if(checkprefix("Content-Type:", k->p)) {
1169       char *contenttype = Curl_copy_header_value(k->p);
1170       if (!contenttype)
1171         return CURLE_OUT_OF_MEMORY;
1172       if (!*contenttype)
1173         /* ignore empty data */
1174         free(contenttype);
1175       else {
1176         Curl_safefree(data->info.contenttype);
1177         data->info.contenttype = contenttype;
1178       }
1179     }
1180     else if((conn->httpversion == 10) &&
1181             conn->bits.httpproxy &&
1182             Curl_compareheader(k->p,
1183                                "Proxy-Connection:", "keep-alive")) {
1184       /*
1185        * When a HTTP/1.0 reply comes when using a proxy, the
1186        * 'Proxy-Connection: keep-alive' line tells us the
1187        * connection will be kept alive for our pleasure.
1188        * Default action for 1.0 is to close.
1189        */
1190       conn->bits.close = FALSE; /* don't close when done */
1191       infof(data, "HTTP/1.0 proxy connection set to keep alive!\n");
1192     }
1193     else if((conn->httpversion == 11) &&
1194             conn->bits.httpproxy &&
1195             Curl_compareheader(k->p,
1196                                "Proxy-Connection:", "close")) {
1197       /*
1198        * We get a HTTP/1.1 response from a proxy and it says it'll
1199        * close down after this transfer.
1200        */
1201       conn->bits.close = TRUE; /* close when done */
1202       infof(data, "HTTP/1.1 proxy connection set close!\n");
1203     }
1204     else if((conn->httpversion == 10) &&
1205             Curl_compareheader(k->p, "Connection:", "keep-alive")) {
1206       /*
1207        * A HTTP/1.0 reply with the 'Connection: keep-alive' line
1208        * tells us the connection will be kept alive for our
1209        * pleasure.  Default action for 1.0 is to close.
1210        *
1211        * [RFC2068, section 19.7.1] */
1212       conn->bits.close = FALSE; /* don't close when done */
1213       infof(data, "HTTP/1.0 connection set to keep alive!\n");
1214     }
1215     else if(Curl_compareheader(k->p, "Connection:", "close")) {
1216       /*
1217        * [RFC 2616, section 8.1.2.1]
1218        * "Connection: close" is HTTP/1.1 language and means that
1219        * the connection will close when this request has been
1220        * served.
1221        */
1222       conn->bits.close = TRUE; /* close when done */
1223     }
1224     else if(Curl_compareheader(k->p,
1225                                "Transfer-Encoding:", "chunked")) {
1226       /*
1227        * [RFC 2616, section 3.6.1] A 'chunked' transfer encoding
1228        * means that the server will send a series of "chunks". Each
1229        * chunk starts with line with info (including size of the
1230        * coming block) (terminated with CRLF), then a block of data
1231        * with the previously mentioned size. There can be any amount
1232        * of chunks, and a chunk-data set to zero signals the
1233        * end-of-chunks. */
1234       k->chunk = TRUE; /* chunks coming our way */
1235 
1236       /* init our chunky engine */
1237       Curl_httpchunk_init(conn);
1238     }
1239 
1240     else if(checkprefix("Trailer:", k->p) ||
1241             checkprefix("Trailers:", k->p)) {
1242       /*
1243        * This test helps Curl_httpchunk_read() to determine to look
1244        * for well formed trailers after the zero chunksize record. In
1245        * this case a CRLF is required after the zero chunksize record
1246        * when no trailers are sent, or after the last trailer record.
1247        *
1248        * It seems both Trailer: and Trailers: occur in the wild.
1249        */
1250       k->trailerhdrpresent = TRUE;
1251     }
1252 
1253     else if(checkprefix("Content-Encoding:", k->p) &&
1254             data->set.str[STRING_ENCODING]) {
1255       /*
1256        * Process Content-Encoding. Look for the values: identity,
1257        * gzip, deflate, compress, x-gzip and x-compress. x-gzip and
1258        * x-compress are the same as gzip and compress. (Sec 3.5 RFC
1259        * 2616). zlib cannot handle compress.  However, errors are
1260        * handled further down when the response body is processed
1261        */
1262       char *start;
1263 
1264       /* Find the first non-space letter */
1265       start = k->p + 17;
1266       while(*start && ISSPACE(*start))
1267         start++;
1268 
1269       /* Record the content-encoding for later use */
1270       if(checkprefix("identity", start))
1271         k->content_encoding = IDENTITY;
1272       else if(checkprefix("deflate", start))
1273         k->content_encoding = DEFLATE;
1274       else if(checkprefix("gzip", start)
1275               || checkprefix("x-gzip", start))
1276         k->content_encoding = GZIP;
1277       else if(checkprefix("compress", start)
1278               || checkprefix("x-compress", start))
1279         k->content_encoding = COMPRESS;
1280     }
1281     else if(checkprefix("Content-Range:", k->p)) {
1282       /* Content-Range: bytes [num]-
1283          Content-Range: bytes: [num]-
1284          Content-Range: [num]-
1285 
1286          The second format was added since Sun's webserver
1287          JavaWebServer/1.1.1 obviously sends the header this way!
1288          The third added since some servers use that!
1289       */
1290 
1291       char *ptr = k->p + 14;
1292 
1293       /* Move forward until first digit */
1294       while(*ptr && !ISDIGIT(*ptr))
1295         ptr++;
1296 
1297       k->offset = curlx_strtoofft(ptr, NULL, 10);
1298 
1299       if(data->state.resume_from == k->offset)
1300         /* we asked for a resume and we got it */
1301         k->content_range = TRUE;
1302     }
1303 #if !defined(CURL_DISABLE_COOKIES)
1304     else if(data->cookies &&
1305             checkprefix("Set-Cookie:", k->p)) {
1306       Curl_share_lock(data, CURL_LOCK_DATA_COOKIE,
1307                       CURL_LOCK_ACCESS_SINGLE);
1308       Curl_cookie_add(data,
1309                       data->cookies, TRUE, k->p+11,
1310                       /* If there is a custom-set Host: name, use it
1311                          here, or else use real peer host name. */
1312                       conn->allocptr.cookiehost?
1313                       conn->allocptr.cookiehost:conn->host.name,
1314                       data->state.path);
1315       Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
1316     }
1317 #endif
1318     else if(checkprefix("Last-Modified:", k->p) &&
1319             (data->set.timecondition || data->set.get_filetime) ) {
1320       time_t secs=time(NULL);
1321       k->timeofdoc = curl_getdate(k->p+strlen("Last-Modified:"),
1322                                   &secs);
1323       if(data->set.get_filetime)
1324         data->info.filetime = (long)k->timeofdoc;
1325     }
1326     else if((checkprefix("WWW-Authenticate:", k->p) &&
1327              (401 == k->httpcode)) ||
1328             (checkprefix("Proxy-authenticate:", k->p) &&
1329              (407 == k->httpcode))) {
1330       result = Curl_http_input_auth(conn, k->httpcode, k->p);
1331       if(result)
1332         return result;
1333     }
1334     else if((k->httpcode >= 300 && k->httpcode < 400) &&
1335             checkprefix("Location:", k->p)) {
1336       /* this is the URL that the server advises us to use instead */
1337       char *location = Curl_copy_header_value(k->p);
1338       if (!location)
1339         return CURLE_OUT_OF_MEMORY;
1340       if (!*location)
1341         /* ignore empty data */
1342         free(location);
1343       else {
1344         DEBUGASSERT(!data->req.location);
1345         data->req.location = location;
1346 
1347         if(data->set.http_follow_location) {
1348           DEBUGASSERT(!data->req.newurl);
1349           data->req.newurl = strdup(data->req.location); /* clone */
1350           if(!data->req.newurl)
1351             return CURLE_OUT_OF_MEMORY;
1352 
1353           /* some cases of POST and PUT etc needs to rewind the data
1354              stream at this point */
1355           result = Curl_http_perhapsrewind(conn);
1356           if(result)
1357             return result;
1358         }
1359       }
1360     }
1361 
1362     /*
1363      * End of header-checks. Write them to the client.
1364      */
1365 
1366     writetype = CLIENTWRITE_HEADER;
1367     if(data->set.include_header)
1368       writetype |= CLIENTWRITE_BODY;
1369 
1370     if(data->set.verbose)
1371       Curl_debug(data, CURLINFO_HEADER_IN,
1372                  k->p, (size_t)k->hbuflen, conn);
1373 
1374     result = Curl_client_write(conn, writetype, k->p, k->hbuflen);
1375     if(result)
1376       return result;
1377 
1378     data->info.header_size += (long)k->hbuflen;
1379     data->req.headerbytecount += (long)k->hbuflen;
1380 
1381     /* reset hbufp pointer && hbuflen */
1382     k->hbufp = data->state.headerbuff;
1383     k->hbuflen = 0;
1384   }
1385   while(!*stop_reading && *k->str); /* header line within buffer */
1386 
1387   /* We might have reached the end of the header part here, but
1388      there might be a non-header part left in the end of the read
1389      buffer. */
1390 
1391   return CURLE_OK;
1392 }
1393 #endif   /* CURL_DISABLE_HTTP */
1394 
1395 /*
1396  * Send data to upload to the server, when the socket is writable.
1397  */
readwrite_upload(struct SessionHandle * data,struct connectdata * conn,struct SingleRequest * k,int * didwhat)1398 static CURLcode readwrite_upload(struct SessionHandle *data,
1399                                  struct connectdata *conn,
1400                                  struct SingleRequest *k,
1401                                  int *didwhat)
1402 {
1403   ssize_t i, si;
1404   ssize_t bytes_written;
1405   CURLcode result;
1406   ssize_t nread; /* number of bytes read */
1407 
1408   if((k->bytecount == 0) && (k->writebytecount == 0))
1409     Curl_pgrsTime(data, TIMER_STARTTRANSFER);
1410 
1411   *didwhat |= KEEP_WRITE;
1412 
1413   /*
1414    * We loop here to do the READ and SEND loop until we run out of
1415    * data to send or until we get EWOULDBLOCK back
1416    */
1417   do {
1418 
1419     /* only read more data if there's no upload data already
1420        present in the upload buffer */
1421     if(0 == data->req.upload_present) {
1422       /* init the "upload from here" pointer */
1423       data->req.upload_fromhere = k->uploadbuf;
1424 
1425       if(!k->upload_done) {
1426         /* HTTP pollution, this should be written nicer to become more
1427            protocol agnostic. */
1428         int fillcount;
1429 
1430         if((k->exp100 == EXP100_SENDING_REQUEST) &&
1431            (data->state.proto.http->sending == HTTPSEND_BODY)) {
1432           /* If this call is to send body data, we must take some action:
1433              We have sent off the full HTTP 1.1 request, and we shall now
1434              go into the Expect: 100 state and await such a header */
1435           k->exp100 = EXP100_AWAITING_CONTINUE; /* wait for the header */
1436           k->keepon &= ~KEEP_WRITE;         /* disable writing */
1437           k->start100 = Curl_tvnow();       /* timeout count starts now */
1438           *didwhat &= ~KEEP_WRITE;  /* we didn't write anything actually */
1439           break;
1440         }
1441 
1442         result = Curl_fillreadbuffer(conn, BUFSIZE, &fillcount);
1443         if(result)
1444           return result;
1445 
1446         nread = (ssize_t)fillcount;
1447       }
1448       else
1449         nread = 0; /* we're done uploading/reading */
1450 
1451       if(!nread && (k->keepon & KEEP_WRITE_PAUSE)) {
1452         /* this is a paused transfer */
1453         break;
1454       }
1455       else if(nread<=0) {
1456         /* done */
1457         k->keepon &= ~KEEP_WRITE; /* we're done writing */
1458 
1459         if(conn->bits.rewindaftersend) {
1460           result = Curl_readrewind(conn);
1461           if(result)
1462             return result;
1463         }
1464         break;
1465       }
1466 
1467       /* store number of bytes available for upload */
1468       data->req.upload_present = nread;
1469 
1470       /* convert LF to CRLF if so asked */
1471 #ifdef CURL_DO_LINEEND_CONV
1472       /* always convert if we're FTPing in ASCII mode */
1473       if((data->set.crlf) || (data->set.prefer_ascii))
1474 #else
1475         if(data->set.crlf)
1476 #endif /* CURL_DO_LINEEND_CONV */
1477         {
1478           if(data->state.scratch == NULL)
1479             data->state.scratch = malloc(2*BUFSIZE);
1480           if(data->state.scratch == NULL) {
1481             failf (data, "Failed to alloc scratch buffer!");
1482             return CURLE_OUT_OF_MEMORY;
1483           }
1484           /*
1485            * ASCII/EBCDIC Note: This is presumably a text (not binary)
1486            * transfer so the data should already be in ASCII.
1487            * That means the hex values for ASCII CR (0x0d) & LF (0x0a)
1488            * must be used instead of the escape sequences \r & \n.
1489            */
1490           for(i = 0, si = 0; i < nread; i++, si++) {
1491             if(data->req.upload_fromhere[i] == 0x0a) {
1492               data->state.scratch[si++] = 0x0d;
1493               data->state.scratch[si] = 0x0a;
1494               if(!data->set.crlf) {
1495                 /* we're here only because FTP is in ASCII mode...
1496                    bump infilesize for the LF we just added */
1497                 data->set.infilesize++;
1498               }
1499             }
1500             else
1501               data->state.scratch[si] = data->req.upload_fromhere[i];
1502           }
1503           if(si != nread) {
1504             /* only perform the special operation if we really did replace
1505                anything */
1506             nread = si;
1507 
1508             /* upload from the new (replaced) buffer instead */
1509             data->req.upload_fromhere = data->state.scratch;
1510 
1511             /* set the new amount too */
1512             data->req.upload_present = nread;
1513           }
1514         }
1515     } /* if 0 == data->req.upload_present */
1516     else {
1517       /* We have a partial buffer left from a previous "round". Use
1518          that instead of reading more data */
1519     }
1520 
1521     /* write to socket (send away data) */
1522     result = Curl_write(conn,
1523                         conn->writesockfd,     /* socket to send to */
1524                         data->req.upload_fromhere, /* buffer pointer */
1525                         data->req.upload_present,  /* buffer size */
1526                         &bytes_written);       /* actually send away */
1527 
1528     if(result)
1529       return result;
1530 
1531     if(data->set.verbose)
1532       /* show the data before we change the pointer upload_fromhere */
1533       Curl_debug(data, CURLINFO_DATA_OUT, data->req.upload_fromhere,
1534                  (size_t)bytes_written, conn);
1535 
1536     if(data->req.upload_present != bytes_written) {
1537       /* we only wrote a part of the buffer (if anything), deal with it! */
1538 
1539       /* store the amount of bytes left in the buffer to write */
1540       data->req.upload_present -= bytes_written;
1541 
1542       /* advance the pointer where to find the buffer when the next send
1543          is to happen */
1544       data->req.upload_fromhere += bytes_written;
1545     }
1546     else {
1547       /* we've uploaded that buffer now */
1548       data->req.upload_fromhere = k->uploadbuf;
1549       data->req.upload_present = 0; /* no more bytes left */
1550 
1551       if(k->upload_done) {
1552         /* switch off writing, we're done! */
1553         k->keepon &= ~KEEP_WRITE; /* we're done writing */
1554       }
1555     }
1556 
1557     k->writebytecount += bytes_written;
1558     Curl_pgrsSetUploadCounter(data, k->writebytecount);
1559 
1560   } while(0); /* just to break out from! */
1561 
1562   return CURLE_OK;
1563 }
1564 
1565 /*
1566  * Curl_readwrite() is the low-level function to be called when data is to
1567  * be read and written to/from the connection.
1568  */
Curl_readwrite(struct connectdata * conn,bool * done)1569 CURLcode Curl_readwrite(struct connectdata *conn,
1570                         bool *done)
1571 {
1572   struct SessionHandle *data = conn->data;
1573   struct SingleRequest *k = &data->req;
1574   CURLcode result;
1575   int didwhat=0;
1576 
1577   curl_socket_t fd_read;
1578   curl_socket_t fd_write;
1579   int select_res = conn->cselect_bits;
1580 
1581   conn->cselect_bits = 0;
1582 
1583   /* only use the proper socket if the *_HOLD bit is not set simultaneously as
1584      then we are in rate limiting state in that transfer direction */
1585 
1586   if((k->keepon & KEEP_READBITS) == KEEP_READ) {
1587     fd_read = conn->sockfd;
1588 #if defined(USE_LIBSSH2)
1589     if(conn->protocol & (PROT_SCP|PROT_SFTP))
1590       select_res |= CURL_CSELECT_IN;
1591 #endif /* USE_LIBSSH2 */
1592   } else
1593     fd_read = CURL_SOCKET_BAD;
1594 
1595   if((k->keepon & KEEP_WRITEBITS) == KEEP_WRITE)
1596     fd_write = conn->writesockfd;
1597   else
1598     fd_write = CURL_SOCKET_BAD;
1599 
1600    if(!select_res) { /* Call for select()/poll() only, if read/write/error
1601                          status is not known. */
1602        select_res = Curl_socket_ready(fd_read, fd_write, 0);
1603    }
1604 
1605   if(select_res == CURL_CSELECT_ERR) {
1606     failf(data, "select/poll returned error");
1607     return CURLE_SEND_ERROR;
1608   }
1609 
1610   /* We go ahead and do a read if we have a readable socket or if
1611      the stream was rewound (in which case we have data in a
1612      buffer) */
1613   if((k->keepon & KEEP_READ) &&
1614      ((select_res & CURL_CSELECT_IN) || conn->bits.stream_was_rewound)) {
1615 
1616     result = readwrite_data(data, conn, k, &didwhat, done);
1617     if(result || *done)
1618       return result;
1619   }
1620 
1621   /* If we still have writing to do, we check if we have a writable socket. */
1622   if((k->keepon & KEEP_WRITE) && (select_res & CURL_CSELECT_OUT)) {
1623     /* write */
1624 
1625     result = readwrite_upload(data, conn, k, &didwhat);
1626     if(result)
1627       return result;
1628   }
1629 
1630   k->now = Curl_tvnow();
1631   if(didwhat) {
1632     /* Update read/write counters */
1633     if(k->bytecountp)
1634       *k->bytecountp = k->bytecount; /* read count */
1635     if(k->writebytecountp)
1636       *k->writebytecountp = k->writebytecount; /* write count */
1637   }
1638   else {
1639     /* no read no write, this is a timeout? */
1640     if(k->exp100 == EXP100_AWAITING_CONTINUE) {
1641       /* This should allow some time for the header to arrive, but only a
1642          very short time as otherwise it'll be too much wasted time too
1643          often. */
1644 
1645       /* Quoting RFC2616, section "8.2.3 Use of the 100 (Continue) Status":
1646 
1647          Therefore, when a client sends this header field to an origin server
1648          (possibly via a proxy) from which it has never seen a 100 (Continue)
1649          status, the client SHOULD NOT wait for an indefinite period before
1650          sending the request body.
1651 
1652       */
1653 
1654       long ms = Curl_tvdiff(k->now, k->start100);
1655       if(ms > CURL_TIMEOUT_EXPECT_100) {
1656         /* we've waited long enough, continue anyway */
1657         k->exp100 = EXP100_SEND_DATA;
1658         k->keepon |= KEEP_WRITE;
1659         infof(data, "Done waiting for 100-continue\n");
1660       }
1661     }
1662   }
1663 
1664   if(Curl_pgrsUpdate(conn))
1665     result = CURLE_ABORTED_BY_CALLBACK;
1666   else
1667     result = Curl_speedcheck(data, k->now);
1668   if(result)
1669     return result;
1670 
1671   if(data->set.timeout &&
1672      (Curl_tvdiff(k->now, k->start) >= data->set.timeout)) {
1673     if(k->size != -1) {
1674       failf(data, "Operation timed out after %ld milliseconds with %"
1675             FORMAT_OFF_T " out of %" FORMAT_OFF_T " bytes received",
1676             data->set.timeout, k->bytecount, k->size);
1677     } else {
1678       failf(data, "Operation timed out after %ld milliseconds with %"
1679             FORMAT_OFF_T " bytes received",
1680             data->set.timeout, k->bytecount);
1681     }
1682     return CURLE_OPERATION_TIMEDOUT;
1683   }
1684 
1685   if(!k->keepon) {
1686     /*
1687      * The transfer has been performed. Just make some general checks before
1688      * returning.
1689      */
1690 
1691     if(!(data->set.opt_no_body) && (k->size != -1) &&
1692        (k->bytecount != k->size) &&
1693 #ifdef CURL_DO_LINEEND_CONV
1694        /* Most FTP servers don't adjust their file SIZE response for CRLFs,
1695           so we'll check to see if the discrepancy can be explained
1696           by the number of CRLFs we've changed to LFs.
1697        */
1698        (k->bytecount != (k->size + data->state.crlf_conversions)) &&
1699 #endif /* CURL_DO_LINEEND_CONV */
1700        !data->req.newurl) {
1701       failf(data, "transfer closed with %" FORMAT_OFF_T
1702             " bytes remaining to read",
1703             k->size - k->bytecount);
1704       return CURLE_PARTIAL_FILE;
1705     }
1706     else if(!(data->set.opt_no_body) &&
1707             k->chunk &&
1708             (conn->chunk.state != CHUNK_STOP)) {
1709       /*
1710        * In chunked mode, return an error if the connection is closed prior to
1711        * the empty (terminiating) chunk is read.
1712        *
1713        * The condition above used to check for
1714        * conn->proto.http->chunk.datasize != 0 which is true after reading
1715        * *any* chunk, not just the empty chunk.
1716        *
1717        */
1718       failf(data, "transfer closed with outstanding read data remaining");
1719       return CURLE_PARTIAL_FILE;
1720     }
1721     if(Curl_pgrsUpdate(conn))
1722       return CURLE_ABORTED_BY_CALLBACK;
1723   }
1724 
1725   /* Now update the "done" boolean we return */
1726   *done = (bool)(0 == (k->keepon&(KEEP_READ|KEEP_WRITE|
1727                                   KEEP_READ_PAUSE|KEEP_WRITE_PAUSE)));
1728 
1729   return CURLE_OK;
1730 }
1731 
1732 /*
1733  * Curl_single_getsock() gets called by the multi interface code when the app
1734  * has requested to get the sockets for the current connection. This function
1735  * will then be called once for every connection that the multi interface
1736  * keeps track of. This function will only be called for connections that are
1737  * in the proper state to have this information available.
1738  */
Curl_single_getsock(const struct connectdata * conn,curl_socket_t * sock,int numsocks)1739 int Curl_single_getsock(const struct connectdata *conn,
1740                         curl_socket_t *sock, /* points to numsocks number
1741                                                 of sockets */
1742                         int numsocks)
1743 {
1744   const struct SessionHandle *data = conn->data;
1745   int bitmap = GETSOCK_BLANK;
1746   unsigned sockindex = 0;
1747 
1748   if(conn->handler->perform_getsock)
1749     return conn->handler->perform_getsock(conn, sock, numsocks);
1750 
1751   if(numsocks < 2)
1752     /* simple check but we might need two slots */
1753     return GETSOCK_BLANK;
1754 
1755   /* don't include HOLD and PAUSE connections */
1756   if((data->req.keepon & KEEP_READBITS) == KEEP_READ) {
1757 
1758     DEBUGASSERT(conn->sockfd != CURL_SOCKET_BAD);
1759 
1760     bitmap |= GETSOCK_READSOCK(sockindex);
1761     sock[sockindex] = conn->sockfd;
1762   }
1763 
1764   /* don't include HOLD and PAUSE connections */
1765   if((data->req.keepon & KEEP_WRITEBITS) == KEEP_WRITE) {
1766 
1767     if((conn->sockfd != conn->writesockfd) ||
1768        !(data->req.keepon & KEEP_READ)) {
1769       /* only if they are not the same socket or we didn't have a readable
1770          one, we increase index */
1771       if(data->req.keepon & KEEP_READ)
1772         sockindex++; /* increase index if we need two entries */
1773 
1774       DEBUGASSERT(conn->writesockfd != CURL_SOCKET_BAD);
1775 
1776       sock[sockindex] = conn->writesockfd;
1777     }
1778 
1779     bitmap |= GETSOCK_WRITESOCK(sockindex);
1780   }
1781 
1782   return bitmap;
1783 }
1784 
1785 
1786 /*
1787  * Transfer()
1788  *
1789  * This function is what performs the actual transfer. It is capable of
1790  * doing both ways simultaneously.
1791  * The transfer must already have been setup by a call to Curl_setup_transfer().
1792  *
1793  * Note that headers are created in a preallocated buffer of a default size.
1794  * That buffer can be enlarged on demand, but it is never shrunken again.
1795  *
1796  * Parts of this function was once written by the friendly Mark Butler
1797  * <butlerm@xmission.com>.
1798  */
1799 
1800 static CURLcode
Transfer(struct connectdata * conn)1801 Transfer(struct connectdata *conn)
1802 {
1803   CURLcode result;
1804   struct SessionHandle *data = conn->data;
1805   struct SingleRequest *k = &data->req;
1806   bool done=FALSE;
1807   bool first=TRUE;
1808   int timeout_ms;
1809 
1810   if((conn->sockfd == CURL_SOCKET_BAD) &&
1811      (conn->writesockfd == CURL_SOCKET_BAD))
1812     /* nothing to read, nothing to write, we're already OK! */
1813     return CURLE_OK;
1814 
1815   /* we want header and/or body, if neither then don't do this! */
1816   if(!k->getheader && data->set.opt_no_body)
1817     return CURLE_OK;
1818 
1819   while(!done) {
1820     curl_socket_t fd_read;
1821     curl_socket_t fd_write;
1822 
1823     /* limit-rate logic: if speed exceeds threshold, then do not include fd in
1824        select set. The current speed is recalculated in each Curl_readwrite()
1825        call */
1826     if((k->keepon & KEEP_WRITE) &&
1827         (!data->set.max_send_speed ||
1828          (data->progress.ulspeed < data->set.max_send_speed) )) {
1829       fd_write = conn->writesockfd;
1830       k->keepon &= ~KEEP_WRITE_HOLD;
1831     }
1832     else {
1833       fd_write = CURL_SOCKET_BAD;
1834       if(k->keepon & KEEP_WRITE)
1835         k->keepon |= KEEP_WRITE_HOLD; /* hold it */
1836     }
1837 
1838     if((k->keepon & KEEP_READ) &&
1839         (!data->set.max_recv_speed ||
1840          (data->progress.dlspeed < data->set.max_recv_speed)) ) {
1841       fd_read = conn->sockfd;
1842       k->keepon &= ~KEEP_READ_HOLD;
1843     }
1844     else {
1845       fd_read = CURL_SOCKET_BAD;
1846       if(k->keepon & KEEP_READ)
1847         k->keepon |= KEEP_READ_HOLD; /* hold it */
1848     }
1849 
1850     /* pause logic. Don't check descriptors for paused connections */
1851     if(k->keepon & KEEP_READ_PAUSE)
1852       fd_read = CURL_SOCKET_BAD;
1853     if(k->keepon & KEEP_WRITE_PAUSE)
1854       fd_write = CURL_SOCKET_BAD;
1855 
1856     /* The *_HOLD and *_PAUSE logic is necessary since even though there might
1857        be no traffic during the select interval, we still call
1858        Curl_readwrite() for the timeout case and if we limit transfer speed we
1859        must make sure that this function doesn't transfer anything while in
1860        HOLD status.
1861 
1862        The no timeout for the first round is for the protocols for which data
1863        has already been slurped off the socket and thus waiting for action
1864        won't work since it'll wait even though there is already data present
1865        to work with. */
1866     if(first &&
1867        ((fd_read != CURL_SOCKET_BAD) || (fd_write != CURL_SOCKET_BAD)))
1868       /* if this is the first lap and one of the file descriptors is fine
1869          to work with, skip the timeout */
1870       timeout_ms = 0;
1871     else
1872       timeout_ms = 1000;
1873 
1874     switch (Curl_socket_ready(fd_read, fd_write, timeout_ms)) {
1875     case -1: /* select() error, stop reading */
1876 #ifdef EINTR
1877       /* The EINTR is not serious, and it seems you might get this more
1878          often when using the lib in a multi-threaded environment! */
1879       if(SOCKERRNO == EINTR)
1880         continue;
1881 #endif
1882       return CURLE_RECV_ERROR;  /* indicate a network problem */
1883     case 0:  /* timeout */
1884     default: /* readable descriptors */
1885 
1886       result = Curl_readwrite(conn, &done);
1887       /* "done" signals to us if the transfer(s) are ready */
1888       break;
1889     }
1890     if(result)
1891       return result;
1892 
1893     first = FALSE; /* not the first lap anymore */
1894   }
1895 
1896   return CURLE_OK;
1897 }
1898 
1899 /*
1900  * Curl_pretransfer() is called immediately before a transfer starts.
1901  */
Curl_pretransfer(struct SessionHandle * data)1902 CURLcode Curl_pretransfer(struct SessionHandle *data)
1903 {
1904   CURLcode res;
1905   if(!data->change.url) {
1906     /* we can't do anything without URL */
1907     failf(data, "No URL set!");
1908     return CURLE_URL_MALFORMAT;
1909   }
1910 
1911   /* Init the SSL session ID cache here. We do it here since we want to do it
1912      after the *_setopt() calls (that could change the size of the cache) but
1913      before any transfer takes place. */
1914   res = Curl_ssl_initsessions(data, data->set.ssl.numsessions);
1915   if(res)
1916     return res;
1917 
1918   data->set.followlocation=0; /* reset the location-follow counter */
1919   data->state.this_is_a_follow = FALSE; /* reset this */
1920   data->state.errorbuf = FALSE; /* no error has occurred */
1921   data->state.httpversion = 0; /* don't assume any particular server version */
1922 
1923   data->state.authproblem = FALSE;
1924   data->state.authhost.want = data->set.httpauth;
1925   data->state.authproxy.want = data->set.proxyauth;
1926   Curl_safefree(data->info.wouldredirect);
1927   data->info.wouldredirect = NULL;
1928 
1929   /* If there is a list of cookie files to read, do it now! */
1930   if(data->change.cookielist) {
1931     Curl_cookie_loadfiles(data);
1932   }
1933 
1934  /* Allow data->set.use_port to set which port to use. This needs to be
1935   * disabled for example when we follow Location: headers to URLs using
1936   * different ports! */
1937   data->state.allow_port = TRUE;
1938 
1939 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1940   /*************************************************************
1941    * Tell signal handler to ignore SIGPIPE
1942    *************************************************************/
1943   if(!data->set.no_signal)
1944     data->state.prev_signal = signal(SIGPIPE, SIG_IGN);
1945 #endif
1946 
1947   Curl_initinfo(data); /* reset session-specific information "variables" */
1948   Curl_pgrsStartNow(data);
1949 
1950   return CURLE_OK;
1951 }
1952 
1953 /*
1954  * Curl_posttransfer() is called immediately after a transfer ends
1955  */
Curl_posttransfer(struct SessionHandle * data)1956 CURLcode Curl_posttransfer(struct SessionHandle *data)
1957 {
1958 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1959   /* restore the signal handler for SIGPIPE before we get back */
1960   if(!data->set.no_signal)
1961     signal(SIGPIPE, data->state.prev_signal);
1962 #else
1963   (void)data; /* unused parameter */
1964 #endif
1965 
1966   if(!(data->progress.flags & PGRS_HIDE) &&
1967      !data->progress.callback)
1968     /* only output if we don't use a progress callback and we're not hidden */
1969     fprintf(data->set.err, "\n");
1970 
1971   return CURLE_OK;
1972 }
1973 
1974 #ifndef CURL_DISABLE_HTTP
1975 /*
1976  * strlen_url() returns the length of the given URL if the spaces within the
1977  * URL were properly URL encoded.
1978  */
strlen_url(const char * url)1979 static size_t strlen_url(const char *url)
1980 {
1981   const char *ptr;
1982   size_t newlen=0;
1983   bool left=TRUE; /* left side of the ? */
1984 
1985   for(ptr=url; *ptr; ptr++) {
1986     switch(*ptr) {
1987     case '?':
1988       left=FALSE;
1989       /* fall through */
1990     default:
1991       newlen++;
1992       break;
1993     case ' ':
1994       if(left)
1995         newlen+=3;
1996       else
1997         newlen++;
1998       break;
1999     }
2000   }
2001   return newlen;
2002 }
2003 
2004 /* strcpy_url() copies a url to a output buffer and URL-encodes the spaces in
2005  * the source URL accordingly.
2006  */
strcpy_url(char * output,const char * url)2007 static void strcpy_url(char *output, const char *url)
2008 {
2009   /* we must add this with whitespace-replacing */
2010   bool left=TRUE;
2011   const char *iptr;
2012   char *optr = output;
2013   for(iptr = url;    /* read from here */
2014       *iptr;         /* until zero byte */
2015       iptr++) {
2016     switch(*iptr) {
2017     case '?':
2018       left=FALSE;
2019       /* fall through */
2020     default:
2021       *optr++=*iptr;
2022       break;
2023     case ' ':
2024       if(left) {
2025         *optr++='%'; /* add a '%' */
2026         *optr++='2'; /* add a '2' */
2027         *optr++='0'; /* add a '0' */
2028       }
2029       else
2030         *optr++='+'; /* add a '+' here */
2031       break;
2032     }
2033   }
2034   *optr=0; /* zero terminate output buffer */
2035 
2036 }
2037 
2038 /*
2039  * Returns true if the given URL is absolute (as opposed to relative)
2040  */
is_absolute_url(const char * url)2041 static bool is_absolute_url(const char *url)
2042 {
2043   char prot[16]; /* URL protocol string storage */
2044   char letter;   /* used for a silly sscanf */
2045 
2046   return (bool)(2 == sscanf(url, "%15[^?&/:]://%c", prot, &letter));
2047 }
2048 
2049 /*
2050  * Concatenate a relative URL to a base URL making it absolute.
2051  * URL-encodes any spaces.
2052  * The returned pointer must be freed by the caller unless NULL
2053  * (returns NULL on out of memory).
2054  */
concat_url(const char * base,const char * relurl)2055 static char *concat_url(const char *base, const char *relurl)
2056 {
2057   /***
2058    TRY to append this new path to the old URL
2059    to the right of the host part. Oh crap, this is doomed to cause
2060    problems in the future...
2061   */
2062   char *newest;
2063   char *protsep;
2064   char *pathsep;
2065   size_t newlen;
2066 
2067   const char *useurl = relurl;
2068   size_t urllen;
2069 
2070   /* we must make our own copy of the URL to play with, as it may
2071      point to read-only data */
2072   char *url_clone=strdup(base);
2073 
2074   if(!url_clone)
2075     return NULL; /* skip out of this NOW */
2076 
2077   /* protsep points to the start of the host name */
2078   protsep=strstr(url_clone, "//");
2079   if(!protsep)
2080     protsep=url_clone;
2081   else
2082     protsep+=2; /* pass the slashes */
2083 
2084   if('/' != relurl[0]) {
2085     int level=0;
2086 
2087     /* First we need to find out if there's a ?-letter in the URL,
2088        and cut it and the right-side of that off */
2089     pathsep = strchr(protsep, '?');
2090     if(pathsep)
2091       *pathsep=0;
2092 
2093     /* we have a relative path to append to the last slash if there's one
2094        available, or if the new URL is just a query string (starts with a
2095        '?')  we append the new one at the end of the entire currently worked
2096        out URL */
2097     if(useurl[0] != '?') {
2098       pathsep = strrchr(protsep, '/');
2099       if(pathsep)
2100         *pathsep=0;
2101     }
2102 
2103     /* Check if there's any slash after the host name, and if so, remember
2104        that position instead */
2105     pathsep = strchr(protsep, '/');
2106     if(pathsep)
2107       protsep = pathsep+1;
2108     else
2109       protsep = NULL;
2110 
2111     /* now deal with one "./" or any amount of "../" in the newurl
2112        and act accordingly */
2113 
2114     if((useurl[0] == '.') && (useurl[1] == '/'))
2115       useurl+=2; /* just skip the "./" */
2116 
2117     while((useurl[0] == '.') &&
2118           (useurl[1] == '.') &&
2119           (useurl[2] == '/')) {
2120       level++;
2121       useurl+=3; /* pass the "../" */
2122     }
2123 
2124     if(protsep) {
2125       while(level--) {
2126         /* cut off one more level from the right of the original URL */
2127         pathsep = strrchr(protsep, '/');
2128         if(pathsep)
2129           *pathsep=0;
2130         else {
2131           *protsep=0;
2132           break;
2133         }
2134       }
2135     }
2136   }
2137   else {
2138     /* We got a new absolute path for this server, cut off from the
2139        first slash */
2140     pathsep = strchr(protsep, '/');
2141     if(pathsep) {
2142       /* When people use badly formatted URLs, such as
2143          "http://www.url.com?dir=/home/daniel" we must not use the first
2144          slash, if there's a ?-letter before it! */
2145       char *sep = strchr(protsep, '?');
2146       if(sep && (sep < pathsep))
2147         pathsep = sep;
2148       *pathsep=0;
2149     }
2150     else {
2151       /* There was no slash. Now, since we might be operating on a badly
2152          formatted URL, such as "http://www.url.com?id=2380" which doesn't
2153          use a slash separator as it is supposed to, we need to check for a
2154          ?-letter as well! */
2155       pathsep = strchr(protsep, '?');
2156       if(pathsep)
2157         *pathsep=0;
2158     }
2159   }
2160 
2161   /* If the new part contains a space, this is a mighty stupid redirect
2162      but we still make an effort to do "right". To the left of a '?'
2163      letter we replace each space with %20 while it is replaced with '+'
2164      on the right side of the '?' letter.
2165   */
2166   newlen = strlen_url(useurl);
2167 
2168   urllen = strlen(url_clone);
2169 
2170   newest = malloc( urllen + 1 + /* possible slash */
2171                          newlen + 1 /* zero byte */);
2172 
2173   if(!newest) {
2174     free(url_clone); /* don't leak this */
2175     return NULL;
2176   }
2177 
2178   /* copy over the root url part */
2179   memcpy(newest, url_clone, urllen);
2180 
2181   /* check if we need to append a slash */
2182   if(('/' == useurl[0]) || (protsep && !*protsep) || ('?' == useurl[0]))
2183     ;
2184   else
2185     newest[urllen++]='/';
2186 
2187   /* then append the new piece on the right side */
2188   strcpy_url(&newest[urllen], useurl);
2189 
2190   free(url_clone);
2191 
2192   return newest;
2193 }
2194 #endif /* CURL_DISABLE_HTTP */
2195 
2196 /*
2197  * Curl_follow() handles the URL redirect magic. Pass in the 'newurl' string
2198  * as given by the remote server and set up the new URL to request.
2199  */
Curl_follow(struct SessionHandle * data,char * newurl,followtype type)2200 CURLcode Curl_follow(struct SessionHandle *data,
2201                      char *newurl, /* this 'newurl' is the Location: string,
2202                                       and it must be malloc()ed before passed
2203                                       here */
2204                      followtype type) /* see transfer.h */
2205 {
2206 #ifdef CURL_DISABLE_HTTP
2207   (void)data;
2208   (void)newurl;
2209   (void)type;
2210   /* Location: following will not happen when HTTP is disabled */
2211   return CURLE_TOO_MANY_REDIRECTS;
2212 #else
2213 
2214   /* Location: redirect */
2215   bool disallowport = FALSE;
2216 
2217   if(type == FOLLOW_REDIR) {
2218     if((data->set.maxredirs != -1) &&
2219         (data->set.followlocation >= data->set.maxredirs)) {
2220       failf(data,"Maximum (%d) redirects followed", data->set.maxredirs);
2221       return CURLE_TOO_MANY_REDIRECTS;
2222     }
2223 
2224     /* mark the next request as a followed location: */
2225     data->state.this_is_a_follow = TRUE;
2226 
2227     data->set.followlocation++; /* count location-followers */
2228 
2229     if(data->set.http_auto_referer) {
2230       /* We are asked to automatically set the previous URL as the referer
2231          when we get the next URL. We pick the ->url field, which may or may
2232          not be 100% correct */
2233 
2234       if(data->change.referer_alloc)
2235         /* If we already have an allocated referer, free this first */
2236         free(data->change.referer);
2237 
2238       data->change.referer = strdup(data->change.url);
2239       if (!data->change.referer) {
2240         data->change.referer_alloc = FALSE;
2241         return CURLE_OUT_OF_MEMORY;
2242       }
2243       data->change.referer_alloc = TRUE; /* yes, free this later */
2244     }
2245   }
2246 
2247   if(!is_absolute_url(newurl))  {
2248     /***
2249      *DANG* this is an RFC 2068 violation. The URL is supposed
2250      to be absolute and this doesn't seem to be that!
2251      */
2252     char *absolute = concat_url(data->change.url, newurl);
2253     if (!absolute)
2254       return CURLE_OUT_OF_MEMORY;
2255     free(newurl);
2256     newurl = absolute;
2257   }
2258   else {
2259     /* This is an absolute URL, don't allow the custom port number */
2260     disallowport = TRUE;
2261 
2262     if(strchr(newurl, ' ')) {
2263       /* This new URL contains at least one space, this is a mighty stupid
2264          redirect but we still make an effort to do "right". */
2265       char *newest;
2266       size_t newlen = strlen_url(newurl);
2267 
2268       newest = malloc(newlen+1); /* get memory for this */
2269       if (!newest)
2270         return CURLE_OUT_OF_MEMORY;
2271       strcpy_url(newest, newurl); /* create a space-free URL */
2272 
2273       free(newurl); /* that was no good */
2274       newurl = newest; /* use this instead now */
2275     }
2276 
2277   }
2278 
2279   if(type == FOLLOW_FAKE) {
2280     /* we're only figuring out the new url if we would've followed locations
2281        but now we're done so we can get out! */
2282     data->info.wouldredirect = newurl;
2283     return CURLE_OK;
2284   }
2285 
2286   if(disallowport)
2287     data->state.allow_port = FALSE;
2288 
2289   if(data->change.url_alloc)
2290     free(data->change.url);
2291   else
2292     data->change.url_alloc = TRUE; /* the URL is allocated */
2293 
2294   data->change.url = newurl;
2295   newurl = NULL; /* don't free! */
2296 
2297   infof(data, "Issue another request to this URL: '%s'\n", data->change.url);
2298 
2299   /*
2300    * We get here when the HTTP code is 300-399 (and 401). We need to perform
2301    * differently based on exactly what return code there was.
2302    *
2303    * News from 7.10.6: we can also get here on a 401 or 407, in case we act on
2304    * a HTTP (proxy-) authentication scheme other than Basic.
2305    */
2306   switch(data->info.httpcode) {
2307     /* 401 - Act on a WWW-Authenticate, we keep on moving and do the
2308        Authorization: XXXX header in the HTTP request code snippet */
2309     /* 407 - Act on a Proxy-Authenticate, we keep on moving and do the
2310        Proxy-Authorization: XXXX header in the HTTP request code snippet */
2311     /* 300 - Multiple Choices */
2312     /* 306 - Not used */
2313     /* 307 - Temporary Redirect */
2314   default:  /* for all above (and the unknown ones) */
2315     /* Some codes are explicitly mentioned since I've checked RFC2616 and they
2316      * seem to be OK to POST to.
2317      */
2318     break;
2319   case 301: /* Moved Permanently */
2320     /* (quote from RFC2616, section 10.3.2):
2321      *
2322      * Note: When automatically redirecting a POST request after receiving a
2323      * 301 status code, some existing HTTP/1.0 user agents will erroneously
2324      * change it into a GET request.
2325      *
2326      * ----
2327      *
2328      * Warning: Because most of importants user agents do this obvious RFC2616
2329      * violation, many webservers expect this misbehavior. So these servers
2330      * often answers to a POST request with an error page.  To be sure that
2331      * libcurl gets the page that most user agents would get, libcurl has to
2332      * force GET.
2333      *
2334      * This behaviour can be overridden with CURLOPT_POSTREDIR.
2335      */
2336     if( (data->set.httpreq == HTTPREQ_POST
2337          || data->set.httpreq == HTTPREQ_POST_FORM)
2338         && !data->set.post301) {
2339       infof(data,
2340             "Violate RFC 2616/10.3.2 and switch from POST to GET\n");
2341       data->set.httpreq = HTTPREQ_GET;
2342     }
2343     break;
2344   case 302: /* Found */
2345     /* (From 10.3.3)
2346 
2347     Note: RFC 1945 and RFC 2068 specify that the client is not allowed
2348     to change the method on the redirected request.  However, most
2349     existing user agent implementations treat 302 as if it were a 303
2350     response, performing a GET on the Location field-value regardless
2351     of the original request method. The status codes 303 and 307 have
2352     been added for servers that wish to make unambiguously clear which
2353     kind of reaction is expected of the client.
2354 
2355     (From 10.3.4)
2356 
2357     Note: Many pre-HTTP/1.1 user agents do not understand the 303
2358     status. When interoperability with such clients is a concern, the
2359     302 status code may be used instead, since most user agents react
2360     to a 302 response as described here for 303.
2361 
2362     This behaviour can be overriden with CURLOPT_POSTREDIR
2363     */
2364     if( (data->set.httpreq == HTTPREQ_POST
2365          || data->set.httpreq == HTTPREQ_POST_FORM)
2366         && !data->set.post302) {
2367       infof(data,
2368             "Violate RFC 2616/10.3.3 and switch from POST to GET\n");
2369       data->set.httpreq = HTTPREQ_GET;
2370     }
2371     break;
2372 
2373   case 303: /* See Other */
2374     /* Disable both types of POSTs, since doing a second POST when
2375      * following isn't what anyone would want! */
2376     if(data->set.httpreq != HTTPREQ_GET) {
2377       data->set.httpreq = HTTPREQ_GET; /* enforce GET request */
2378       infof(data, "Disables POST, goes with %s\n",
2379             data->set.opt_no_body?"HEAD":"GET");
2380     }
2381     break;
2382   case 304: /* Not Modified */
2383     /* 304 means we did a conditional request and it was "Not modified".
2384      * We shouldn't get any Location: header in this response!
2385      */
2386     break;
2387   case 305: /* Use Proxy */
2388     /* (quote from RFC2616, section 10.3.6):
2389      * "The requested resource MUST be accessed through the proxy given
2390      * by the Location field. The Location field gives the URI of the
2391      * proxy.  The recipient is expected to repeat this single request
2392      * via the proxy. 305 responses MUST only be generated by origin
2393      * servers."
2394      */
2395     break;
2396   }
2397   Curl_pgrsTime(data, TIMER_REDIRECT);
2398   Curl_pgrsResetTimes(data);
2399 
2400   return CURLE_OK;
2401 #endif /* CURL_DISABLE_HTTP */
2402 }
2403 
2404 static CURLcode
connect_host(struct SessionHandle * data,struct connectdata ** conn)2405 connect_host(struct SessionHandle *data,
2406              struct connectdata **conn)
2407 {
2408   CURLcode res = CURLE_OK;
2409 
2410   bool async;
2411   bool protocol_done=TRUE; /* will be TRUE always since this is only used
2412                                 within the easy interface */
2413   Curl_pgrsTime(data, TIMER_STARTSINGLE);
2414   res = Curl_connect(data, conn, &async, &protocol_done);
2415 
2416   if((CURLE_OK == res) && async) {
2417     /* Now, if async is TRUE here, we need to wait for the name
2418        to resolve */
2419     res = Curl_wait_for_resolv(*conn, NULL);
2420     if(CURLE_OK == res)
2421       /* Resolved, continue with the connection */
2422       res = Curl_async_resolved(*conn, &protocol_done);
2423     else
2424       /* if we can't resolve, we kill this "connection" now */
2425       (void)Curl_disconnect(*conn);
2426   }
2427 
2428   return res;
2429 }
2430 
2431 /* Returns TRUE and sets '*url' if a request retry is wanted.
2432 
2433    NOTE: that the *url is malloc()ed. */
Curl_retry_request(struct connectdata * conn,char ** url)2434 bool Curl_retry_request(struct connectdata *conn,
2435                         char **url)
2436 {
2437   bool retry = FALSE;
2438   struct SessionHandle *data = conn->data;
2439 
2440   /* if we're talking upload, we can't do the checks below, unless the protocol
2441      is HTTP as when uploading over HTTP we will still get a response */
2442   if(data->set.upload && !(conn->protocol&PROT_HTTP))
2443     return retry;
2444 
2445   if((data->req.bytecount +
2446       data->req.headerbytecount == 0) &&
2447      conn->bits.reuse &&
2448      !data->set.opt_no_body) {
2449     /* We got no data, we attempted to re-use a connection and yet we want a
2450        "body". This might happen if the connection was left alive when we were
2451        done using it before, but that was closed when we wanted to read from
2452        it again. Bad luck. Retry the same request on a fresh connect! */
2453     infof(conn->data, "Connection died, retrying a fresh connect\n");
2454     *url = strdup(conn->data->change.url);
2455 
2456     conn->bits.close = TRUE; /* close this connection */
2457     conn->bits.retry = TRUE; /* mark this as a connection we're about
2458                                 to retry. Marking it this way should
2459                                 prevent i.e HTTP transfers to return
2460                                 error just because nothing has been
2461                                 transfered! */
2462     retry = TRUE;
2463   }
2464 
2465   return retry;
2466 }
2467 
2468 /*
2469  * Curl_perform() is the internal high-level function that gets called by the
2470  * external curl_easy_perform() function. It inits, performs and cleans up a
2471  * single file transfer.
2472  */
Curl_perform(struct SessionHandle * data)2473 CURLcode Curl_perform(struct SessionHandle *data)
2474 {
2475   CURLcode res;
2476   CURLcode res2;
2477   struct connectdata *conn=NULL;
2478   char *newurl = NULL; /* possibly a new URL to follow to! */
2479   followtype follow = FOLLOW_NONE;
2480 
2481   data->state.used_interface = Curl_if_easy;
2482 
2483   res = Curl_pretransfer(data);
2484   if(res)
2485     return res;
2486 
2487   /*
2488    * It is important that there is NO 'return' from this function at any other
2489    * place than falling down to the end of the function! This is because we
2490    * have cleanup stuff that must be done before we get back, and that is only
2491    * performed after this do-while loop.
2492    */
2493 
2494   do {
2495     res = connect_host(data, &conn);   /* primary connection */
2496 
2497     if(res == CURLE_OK) {
2498       bool do_done;
2499       if(data->set.connect_only) {
2500         /* keep connection open for application to use the socket */
2501         conn->bits.close = FALSE;
2502         res = Curl_done(&conn, CURLE_OK, FALSE);
2503         break;
2504       }
2505       res = Curl_do(&conn, &do_done);
2506 
2507       if(res == CURLE_OK) {
2508         res = Transfer(conn); /* now fetch that URL please */
2509         if((res == CURLE_OK) || (res == CURLE_RECV_ERROR)) {
2510           bool retry = Curl_retry_request(conn, &newurl);
2511 
2512           if(retry) {
2513             res = CURLE_OK;
2514             follow = FOLLOW_RETRY;
2515             if (!newurl)
2516               res = CURLE_OUT_OF_MEMORY;
2517           }
2518           else if (res == CURLE_OK) {
2519             /*
2520              * We must duplicate the new URL here as the connection data may
2521              * be free()ed in the Curl_done() function. We prefer the newurl
2522              * one since that's used for redirects or just further requests
2523              * for retries or multi-stage HTTP auth methods etc.
2524              */
2525             if(data->req.newurl) {
2526               follow = FOLLOW_REDIR;
2527               newurl = strdup(data->req.newurl);
2528               if (!newurl)
2529                 res = CURLE_OUT_OF_MEMORY;
2530             }
2531             else if(data->req.location) {
2532               follow = FOLLOW_FAKE;
2533               newurl = strdup(data->req.location);
2534               if (!newurl)
2535                 res = CURLE_OUT_OF_MEMORY;
2536             }
2537           }
2538 
2539           /* in the above cases where 'newurl' gets assigned, we have a fresh
2540            * allocated memory pointed to */
2541         }
2542         if(res != CURLE_OK) {
2543           /* The transfer phase returned error, we mark the connection to get
2544            * closed to prevent being re-used. This is because we can't
2545            * possibly know if the connection is in a good shape or not now. */
2546           conn->bits.close = TRUE;
2547 
2548           if(CURL_SOCKET_BAD != conn->sock[SECONDARYSOCKET]) {
2549             /* if we failed anywhere, we must clean up the secondary socket if
2550                it was used */
2551             sclose(conn->sock[SECONDARYSOCKET]);
2552             conn->sock[SECONDARYSOCKET] = CURL_SOCKET_BAD;
2553           }
2554         }
2555 
2556         /* Always run Curl_done(), even if some of the previous calls
2557            failed, but return the previous (original) error code */
2558         res2 = Curl_done(&conn, res, FALSE);
2559 
2560         if(CURLE_OK == res)
2561           res = res2;
2562       }
2563       else if(conn)
2564         /* Curl_do() failed, clean up left-overs in the done-call, but note
2565            that at some cases the conn pointer is NULL when Curl_do() failed
2566            and the connection cache is very small so only call Curl_done() if
2567            conn is still "alive".
2568         */
2569         res2 = Curl_done(&conn, res, FALSE);
2570 
2571       /*
2572        * Important: 'conn' cannot be used here, since it may have been closed
2573        * in 'Curl_done' or other functions.
2574        */
2575 
2576       if((res == CURLE_OK) && follow) {
2577         res = Curl_follow(data, newurl, follow);
2578         if(CURLE_OK == res) {
2579           /* if things went fine, Curl_follow() freed or otherwise took
2580              responsibility for the newurl pointer */
2581           newurl = NULL;
2582           if(follow >= FOLLOW_RETRY) {
2583             follow = FOLLOW_NONE;
2584             continue;
2585           }
2586           /* else we break out of the loop below */
2587         }
2588       }
2589     }
2590     break; /* it only reaches here when this shouldn't loop */
2591 
2592   } while(1); /* loop if Location: */
2593 
2594   if(newurl)
2595     free(newurl);
2596 
2597   if(res && !data->state.errorbuf) {
2598     /*
2599      * As an extra precaution: if no error string has been set and there was
2600      * an error, use the strerror() string or if things are so bad that not
2601      * even that is good, set a bad string that mentions the error code.
2602      */
2603     const char *str = curl_easy_strerror(res);
2604     if(!str)
2605       failf(data, "unspecified error %d", (int)res);
2606     else
2607       failf(data, "%s", str);
2608   }
2609 
2610   /* run post-transfer unconditionally, but don't clobber the return code if
2611      we already have an error code recorder */
2612   res2 = Curl_posttransfer(data);
2613   if(!res && res2)
2614     res = res2;
2615 
2616   return res;
2617 }
2618 
2619 /*
2620  * Curl_setup_transfer() is called to setup some basic properties for the
2621  * upcoming transfer.
2622  */
2623 CURLcode
Curl_setup_transfer(struct connectdata * conn,int sockindex,curl_off_t size,bool getheader,curl_off_t * bytecountp,int writesockindex,curl_off_t * writecountp)2624 Curl_setup_transfer(
2625   struct connectdata *conn, /* connection data */
2626   int sockindex,            /* socket index to read from or -1 */
2627   curl_off_t size,          /* -1 if unknown at this point */
2628   bool getheader,           /* TRUE if header parsing is wanted */
2629   curl_off_t *bytecountp,   /* return number of bytes read or NULL */
2630   int writesockindex,       /* socket index to write to, it may very well be
2631                                the same we read from. -1 disables */
2632   curl_off_t *writecountp   /* return number of bytes written or NULL */
2633   )
2634 {
2635   struct SessionHandle *data;
2636   struct SingleRequest *k;
2637 
2638   DEBUGASSERT(conn != NULL);
2639 
2640   data = conn->data;
2641   k = &data->req;
2642 
2643   DEBUGASSERT((sockindex <= 1) && (sockindex >= -1));
2644 
2645   /* now copy all input parameters */
2646   conn->sockfd = sockindex == -1 ?
2647       CURL_SOCKET_BAD : conn->sock[sockindex];
2648   conn->writesockfd = writesockindex == -1 ?
2649       CURL_SOCKET_BAD:conn->sock[writesockindex];
2650   k->getheader = getheader;
2651 
2652   k->size = size;
2653   k->bytecountp = bytecountp;
2654   k->writebytecountp = writecountp;
2655 
2656   /* The code sequence below is placed in this function just because all
2657      necessary input is not always known in do_complete() as this function may
2658      be called after that */
2659 
2660   if(!k->getheader) {
2661     k->header = FALSE;
2662     if(size > 0)
2663       Curl_pgrsSetDownloadSize(data, size);
2664   }
2665   /* we want header and/or body, if neither then don't do this! */
2666   if(k->getheader || !data->set.opt_no_body) {
2667 
2668     if(conn->sockfd != CURL_SOCKET_BAD) {
2669       k->keepon |= KEEP_READ;
2670     }
2671 
2672     if(conn->writesockfd != CURL_SOCKET_BAD) {
2673       /* HTTP 1.1 magic:
2674 
2675          Even if we require a 100-return code before uploading data, we might
2676          need to write data before that since the REQUEST may not have been
2677          finished sent off just yet.
2678 
2679          Thus, we must check if the request has been sent before we set the
2680          state info where we wait for the 100-return code
2681       */
2682       if((data->state.expect100header) &&
2683          (data->state.proto.http->sending == HTTPSEND_BODY)) {
2684         /* wait with write until we either got 100-continue or a timeout */
2685         k->exp100 = EXP100_AWAITING_CONTINUE;
2686         k->start100 = k->start;
2687       }
2688       else {
2689         if(data->state.expect100header)
2690           /* when we've sent off the rest of the headers, we must await a
2691              100-continue but first finish sending the request */
2692           k->exp100 = EXP100_SENDING_REQUEST;
2693 
2694         /* enable the write bit when we're not waiting for continue */
2695         k->keepon |= KEEP_WRITE;
2696       }
2697     } /* if(conn->writesockfd != CURL_SOCKET_BAD) */
2698   } /* if(k->getheader || !data->set.opt_no_body) */
2699 
2700   return CURLE_OK;
2701 }
2702