1from __future__ import absolute_import
2import errno
3import logging
4import sys
5import warnings
6
7from socket import error as SocketError, timeout as SocketTimeout
8import socket
9
10
11from .exceptions import (
12    ClosedPoolError,
13    ProtocolError,
14    EmptyPoolError,
15    HeaderParsingError,
16    HostChangedError,
17    LocationValueError,
18    MaxRetryError,
19    ProxyError,
20    ReadTimeoutError,
21    SSLError,
22    TimeoutError,
23    InsecureRequestWarning,
24    NewConnectionError,
25)
26from .packages.ssl_match_hostname import CertificateError
27from .packages import six
28from .packages.six.moves import queue
29from .connection import (
30    port_by_scheme,
31    DummyConnection,
32    HTTPConnection,
33    HTTPSConnection,
34    VerifiedHTTPSConnection,
35    HTTPException,
36    BaseSSLError,
37)
38from .request import RequestMethods
39from .response import HTTPResponse
40
41from .util.connection import is_connection_dropped
42from .util.request import set_file_position
43from .util.response import assert_header_parsing
44from .util.retry import Retry
45from .util.timeout import Timeout
46from .util.url import (
47    get_host,
48    parse_url,
49    Url,
50    _normalize_host as normalize_host,
51    _encode_target,
52)
53from .util.queue import LifoQueue
54
55
56xrange = six.moves.xrange
57
58log = logging.getLogger(__name__)
59
60_Default = object()
61
62
63# Pool objects
64class ConnectionPool(object):
65    """
66    Base class for all connection pools, such as
67    :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
68
69    .. note::
70       ConnectionPool.urlopen() does not normalize or percent-encode target URIs
71       which is useful if your target server doesn't support percent-encoded
72       target URIs.
73    """
74
75    scheme = None
76    QueueCls = LifoQueue
77
78    def __init__(self, host, port=None):
79        if not host:
80            raise LocationValueError("No host specified.")
81
82        self.host = _normalize_host(host, scheme=self.scheme)
83        self._proxy_host = host.lower()
84        self.port = port
85
86    def __str__(self):
87        return "%s(host=%r, port=%r)" % (type(self).__name__, self.host, self.port)
88
89    def __enter__(self):
90        return self
91
92    def __exit__(self, exc_type, exc_val, exc_tb):
93        self.close()
94        # Return False to re-raise any potential exceptions
95        return False
96
97    def close(self):
98        """
99        Close all pooled connections and disable the pool.
100        """
101        pass
102
103
104# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
105_blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK}
106
107
108class HTTPConnectionPool(ConnectionPool, RequestMethods):
109    """
110    Thread-safe connection pool for one host.
111
112    :param host:
113        Host used for this HTTP Connection (e.g. "localhost"), passed into
114        :class:`httplib.HTTPConnection`.
115
116    :param port:
117        Port used for this HTTP Connection (None is equivalent to 80), passed
118        into :class:`httplib.HTTPConnection`.
119
120    :param strict:
121        Causes BadStatusLine to be raised if the status line can't be parsed
122        as a valid HTTP/1.0 or 1.1 status line, passed into
123        :class:`httplib.HTTPConnection`.
124
125        .. note::
126           Only works in Python 2. This parameter is ignored in Python 3.
127
128    :param timeout:
129        Socket timeout in seconds for each individual connection. This can
130        be a float or integer, which sets the timeout for the HTTP request,
131        or an instance of :class:`urllib3.util.Timeout` which gives you more
132        fine-grained control over request timeouts. After the constructor has
133        been parsed, this is always a `urllib3.util.Timeout` object.
134
135    :param maxsize:
136        Number of connections to save that can be reused. More than 1 is useful
137        in multithreaded situations. If ``block`` is set to False, more
138        connections will be created but they will not be saved once they've
139        been used.
140
141    :param block:
142        If set to True, no more than ``maxsize`` connections will be used at
143        a time. When no free connections are available, the call will block
144        until a connection has been released. This is a useful side effect for
145        particular multithreaded situations where one does not want to use more
146        than maxsize connections per host to prevent flooding.
147
148    :param headers:
149        Headers to include with all requests, unless other headers are given
150        explicitly.
151
152    :param retries:
153        Retry configuration to use by default with requests in this pool.
154
155    :param _proxy:
156        Parsed proxy URL, should not be used directly, instead, see
157        :class:`urllib3.connectionpool.ProxyManager`"
158
159    :param _proxy_headers:
160        A dictionary with proxy headers, should not be used directly,
161        instead, see :class:`urllib3.connectionpool.ProxyManager`"
162
163    :param \\**conn_kw:
164        Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
165        :class:`urllib3.connection.HTTPSConnection` instances.
166    """
167
168    scheme = "http"
169    ConnectionCls = HTTPConnection
170    ResponseCls = HTTPResponse
171
172    def __init__(
173        self,
174        host,
175        port=None,
176        strict=False,
177        timeout=Timeout.DEFAULT_TIMEOUT,
178        maxsize=1,
179        block=False,
180        headers=None,
181        retries=None,
182        _proxy=None,
183        _proxy_headers=None,
184        **conn_kw
185    ):
186        ConnectionPool.__init__(self, host, port)
187        RequestMethods.__init__(self, headers)
188
189        self.strict = strict
190
191        if not isinstance(timeout, Timeout):
192            timeout = Timeout.from_float(timeout)
193
194        if retries is None:
195            retries = Retry.DEFAULT
196
197        self.timeout = timeout
198        self.retries = retries
199
200        self.pool = self.QueueCls(maxsize)
201        self.block = block
202
203        self.proxy = _proxy
204        self.proxy_headers = _proxy_headers or {}
205
206        # Fill the queue up so that doing get() on it will block properly
207        for _ in xrange(maxsize):
208            self.pool.put(None)
209
210        # These are mostly for testing and debugging purposes.
211        self.num_connections = 0
212        self.num_requests = 0
213        self.conn_kw = conn_kw
214
215        if self.proxy:
216            # Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
217            # We cannot know if the user has added default socket options, so we cannot replace the
218            # list.
219            self.conn_kw.setdefault("socket_options", [])
220
221    def _new_conn(self):
222        """
223        Return a fresh :class:`HTTPConnection`.
224        """
225        self.num_connections += 1
226        log.debug(
227            "Starting new HTTP connection (%d): %s:%s",
228            self.num_connections,
229            self.host,
230            self.port or "80",
231        )
232
233        conn = self.ConnectionCls(
234            host=self.host,
235            port=self.port,
236            timeout=self.timeout.connect_timeout,
237            strict=self.strict,
238            **self.conn_kw
239        )
240        return conn
241
242    def _get_conn(self, timeout=None):
243        """
244        Get a connection. Will return a pooled connection if one is available.
245
246        If no connections are available and :prop:`.block` is ``False``, then a
247        fresh connection is returned.
248
249        :param timeout:
250            Seconds to wait before giving up and raising
251            :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
252            :prop:`.block` is ``True``.
253        """
254        conn = None
255        try:
256            conn = self.pool.get(block=self.block, timeout=timeout)
257
258        except AttributeError:  # self.pool is None
259            raise ClosedPoolError(self, "Pool is closed.")
260
261        except queue.Empty:
262            if self.block:
263                raise EmptyPoolError(
264                    self,
265                    "Pool reached maximum size and no more connections are allowed.",
266                )
267            pass  # Oh well, we'll create a new connection then
268
269        # If this is a persistent connection, check if it got disconnected
270        if conn and is_connection_dropped(conn):
271            log.debug("Resetting dropped connection: %s", self.host)
272            conn.close()
273            if getattr(conn, "auto_open", 1) == 0:
274                # This is a proxied connection that has been mutated by
275                # httplib._tunnel() and cannot be reused (since it would
276                # attempt to bypass the proxy)
277                conn = None
278
279        return conn or self._new_conn()
280
281    def _put_conn(self, conn):
282        """
283        Put a connection back into the pool.
284
285        :param conn:
286            Connection object for the current host and port as returned by
287            :meth:`._new_conn` or :meth:`._get_conn`.
288
289        If the pool is already full, the connection is closed and discarded
290        because we exceeded maxsize. If connections are discarded frequently,
291        then maxsize should be increased.
292
293        If the pool is closed, then the connection will be closed and discarded.
294        """
295        try:
296            self.pool.put(conn, block=False)
297            return  # Everything is dandy, done.
298        except AttributeError:
299            # self.pool is None.
300            pass
301        except queue.Full:
302            # This should never happen if self.block == True
303            log.warning("Connection pool is full, discarding connection: %s", self.host)
304
305        # Connection never got put back into the pool, close it.
306        if conn:
307            conn.close()
308
309    def _validate_conn(self, conn):
310        """
311        Called right before a request is made, after the socket is created.
312        """
313        pass
314
315    def _prepare_proxy(self, conn):
316        # Nothing to do for HTTP connections.
317        pass
318
319    def _get_timeout(self, timeout):
320        """ Helper that always returns a :class:`urllib3.util.Timeout` """
321        if timeout is _Default:
322            return self.timeout.clone()
323
324        if isinstance(timeout, Timeout):
325            return timeout.clone()
326        else:
327            # User passed us an int/float. This is for backwards compatibility,
328            # can be removed later
329            return Timeout.from_float(timeout)
330
331    def _raise_timeout(self, err, url, timeout_value):
332        """Is the error actually a timeout? Will raise a ReadTimeout or pass"""
333
334        if isinstance(err, SocketTimeout):
335            raise ReadTimeoutError(
336                self, url, "Read timed out. (read timeout=%s)" % timeout_value
337            )
338
339        # See the above comment about EAGAIN in Python 3. In Python 2 we have
340        # to specifically catch it and throw the timeout error
341        if hasattr(err, "errno") and err.errno in _blocking_errnos:
342            raise ReadTimeoutError(
343                self, url, "Read timed out. (read timeout=%s)" % timeout_value
344            )
345
346        # Catch possible read timeouts thrown as SSL errors. If not the
347        # case, rethrow the original. We need to do this because of:
348        # http://bugs.python.org/issue10272
349        if "timed out" in str(err) or "did not complete (read)" in str(
350            err
351        ):  # Python < 2.7.4
352            raise ReadTimeoutError(
353                self, url, "Read timed out. (read timeout=%s)" % timeout_value
354            )
355
356    def _make_request(
357        self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw
358    ):
359        """
360        Perform a request on a given urllib connection object taken from our
361        pool.
362
363        :param conn:
364            a connection from one of our connection pools
365
366        :param timeout:
367            Socket timeout in seconds for the request. This can be a
368            float or integer, which will set the same timeout value for
369            the socket connect and the socket read, or an instance of
370            :class:`urllib3.util.Timeout`, which gives you more fine-grained
371            control over your timeouts.
372        """
373        self.num_requests += 1
374
375        timeout_obj = self._get_timeout(timeout)
376        timeout_obj.start_connect()
377        conn.timeout = timeout_obj.connect_timeout
378
379        # Trigger any extra validation we need to do.
380        try:
381            self._validate_conn(conn)
382        except (SocketTimeout, BaseSSLError) as e:
383            # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
384            self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
385            raise
386
387        # conn.request() calls httplib.*.request, not the method in
388        # urllib3.request. It also calls makefile (recv) on the socket.
389        if chunked:
390            conn.request_chunked(method, url, **httplib_request_kw)
391        else:
392            conn.request(method, url, **httplib_request_kw)
393
394        # Reset the timeout for the recv() on the socket
395        read_timeout = timeout_obj.read_timeout
396
397        # App Engine doesn't have a sock attr
398        if getattr(conn, "sock", None):
399            # In Python 3 socket.py will catch EAGAIN and return None when you
400            # try and read into the file pointer created by http.client, which
401            # instead raises a BadStatusLine exception. Instead of catching
402            # the exception and assuming all BadStatusLine exceptions are read
403            # timeouts, check for a zero timeout before making the request.
404            if read_timeout == 0:
405                raise ReadTimeoutError(
406                    self, url, "Read timed out. (read timeout=%s)" % read_timeout
407                )
408            if read_timeout is Timeout.DEFAULT_TIMEOUT:
409                conn.sock.settimeout(socket.getdefaulttimeout())
410            else:  # None or a value
411                conn.sock.settimeout(read_timeout)
412
413        # Receive the response from the server
414        try:
415            try:
416                # Python 2.7, use buffering of HTTP responses
417                httplib_response = conn.getresponse(buffering=True)
418            except TypeError:
419                # Python 3
420                try:
421                    httplib_response = conn.getresponse()
422                except BaseException as e:
423                    # Remove the TypeError from the exception chain in
424                    # Python 3 (including for exceptions like SystemExit).
425                    # Otherwise it looks like a bug in the code.
426                    six.raise_from(e, None)
427        except (SocketTimeout, BaseSSLError, SocketError) as e:
428            self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
429            raise
430
431        # AppEngine doesn't have a version attr.
432        http_version = getattr(conn, "_http_vsn_str", "HTTP/?")
433        log.debug(
434            '%s://%s:%s "%s %s %s" %s %s',
435            self.scheme,
436            self.host,
437            self.port,
438            method,
439            url,
440            http_version,
441            httplib_response.status,
442            httplib_response.length,
443        )
444
445        try:
446            assert_header_parsing(httplib_response.msg)
447        except (HeaderParsingError, TypeError) as hpe:  # Platform-specific: Python 3
448            log.warning(
449                "Failed to parse headers (url=%s): %s",
450                self._absolute_url(url),
451                hpe,
452                exc_info=True,
453            )
454
455        return httplib_response
456
457    def _absolute_url(self, path):
458        return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url
459
460    def close(self):
461        """
462        Close all pooled connections and disable the pool.
463        """
464        if self.pool is None:
465            return
466        # Disable access to the pool
467        old_pool, self.pool = self.pool, None
468
469        try:
470            while True:
471                conn = old_pool.get(block=False)
472                if conn:
473                    conn.close()
474
475        except queue.Empty:
476            pass  # Done.
477
478    def is_same_host(self, url):
479        """
480        Check if the given ``url`` is a member of the same host as this
481        connection pool.
482        """
483        if url.startswith("/"):
484            return True
485
486        # TODO: Add optional support for socket.gethostbyname checking.
487        scheme, host, port = get_host(url)
488        if host is not None:
489            host = _normalize_host(host, scheme=scheme)
490
491        # Use explicit default port for comparison when none is given
492        if self.port and not port:
493            port = port_by_scheme.get(scheme)
494        elif not self.port and port == port_by_scheme.get(scheme):
495            port = None
496
497        return (scheme, host, port) == (self.scheme, self.host, self.port)
498
499    def urlopen(
500        self,
501        method,
502        url,
503        body=None,
504        headers=None,
505        retries=None,
506        redirect=True,
507        assert_same_host=True,
508        timeout=_Default,
509        pool_timeout=None,
510        release_conn=None,
511        chunked=False,
512        body_pos=None,
513        **response_kw
514    ):
515        """
516        Get a connection from the pool and perform an HTTP request. This is the
517        lowest level call for making a request, so you'll need to specify all
518        the raw details.
519
520        .. note::
521
522           More commonly, it's appropriate to use a convenience method provided
523           by :class:`.RequestMethods`, such as :meth:`request`.
524
525        .. note::
526
527           `release_conn` will only behave as expected if
528           `preload_content=False` because we want to make
529           `preload_content=False` the default behaviour someday soon without
530           breaking backwards compatibility.
531
532        :param method:
533            HTTP request method (such as GET, POST, PUT, etc.)
534
535        :param body:
536            Data to send in the request body (useful for creating
537            POST requests, see HTTPConnectionPool.post_url for
538            more convenience).
539
540        :param headers:
541            Dictionary of custom headers to send, such as User-Agent,
542            If-None-Match, etc. If None, pool headers are used. If provided,
543            these headers completely replace any pool-specific headers.
544
545        :param retries:
546            Configure the number of retries to allow before raising a
547            :class:`~urllib3.exceptions.MaxRetryError` exception.
548
549            Pass ``None`` to retry until you receive a response. Pass a
550            :class:`~urllib3.util.retry.Retry` object for fine-grained control
551            over different types of retries.
552            Pass an integer number to retry connection errors that many times,
553            but no other types of errors. Pass zero to never retry.
554
555            If ``False``, then retries are disabled and any exception is raised
556            immediately. Also, instead of raising a MaxRetryError on redirects,
557            the redirect response will be returned.
558
559        :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
560
561        :param redirect:
562            If True, automatically handle redirects (status codes 301, 302,
563            303, 307, 308). Each redirect counts as a retry. Disabling retries
564            will disable redirect, too.
565
566        :param assert_same_host:
567            If ``True``, will make sure that the host of the pool requests is
568            consistent else will raise HostChangedError. When False, you can
569            use the pool on an HTTP proxy and request foreign hosts.
570
571        :param timeout:
572            If specified, overrides the default timeout for this one
573            request. It may be a float (in seconds) or an instance of
574            :class:`urllib3.util.Timeout`.
575
576        :param pool_timeout:
577            If set and the pool is set to block=True, then this method will
578            block for ``pool_timeout`` seconds and raise EmptyPoolError if no
579            connection is available within the time period.
580
581        :param release_conn:
582            If False, then the urlopen call will not release the connection
583            back into the pool once a response is received (but will release if
584            you read the entire contents of the response such as when
585            `preload_content=True`). This is useful if you're not preloading
586            the response's content immediately. You will need to call
587            ``r.release_conn()`` on the response ``r`` to return the connection
588            back into the pool. If None, it takes the value of
589            ``response_kw.get('preload_content', True)``.
590
591        :param chunked:
592            If True, urllib3 will send the body using chunked transfer
593            encoding. Otherwise, urllib3 will send the body using the standard
594            content-length form. Defaults to False.
595
596        :param int body_pos:
597            Position to seek to in file-like body in the event of a retry or
598            redirect. Typically this won't need to be set because urllib3 will
599            auto-populate the value when needed.
600
601        :param \\**response_kw:
602            Additional parameters are passed to
603            :meth:`urllib3.response.HTTPResponse.from_httplib`
604        """
605        if headers is None:
606            headers = self.headers
607
608        if not isinstance(retries, Retry):
609            retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
610
611        if release_conn is None:
612            release_conn = response_kw.get("preload_content", True)
613
614        # Check host
615        if assert_same_host and not self.is_same_host(url):
616            raise HostChangedError(self, url, retries)
617
618        # Ensure that the URL we're connecting to is properly encoded
619        if url.startswith("/"):
620            url = six.ensure_str(_encode_target(url))
621        else:
622            url = six.ensure_str(parse_url(url).url)
623
624        conn = None
625
626        # Track whether `conn` needs to be released before
627        # returning/raising/recursing. Update this variable if necessary, and
628        # leave `release_conn` constant throughout the function. That way, if
629        # the function recurses, the original value of `release_conn` will be
630        # passed down into the recursive call, and its value will be respected.
631        #
632        # See issue #651 [1] for details.
633        #
634        # [1] <https://github.com/urllib3/urllib3/issues/651>
635        release_this_conn = release_conn
636
637        # Merge the proxy headers. Only do this in HTTP. We have to copy the
638        # headers dict so we can safely change it without those changes being
639        # reflected in anyone else's copy.
640        if self.scheme == "http":
641            headers = headers.copy()
642            headers.update(self.proxy_headers)
643
644        # Must keep the exception bound to a separate variable or else Python 3
645        # complains about UnboundLocalError.
646        err = None
647
648        # Keep track of whether we cleanly exited the except block. This
649        # ensures we do proper cleanup in finally.
650        clean_exit = False
651
652        # Rewind body position, if needed. Record current position
653        # for future rewinds in the event of a redirect/retry.
654        body_pos = set_file_position(body, body_pos)
655
656        try:
657            # Request a connection from the queue.
658            timeout_obj = self._get_timeout(timeout)
659            conn = self._get_conn(timeout=pool_timeout)
660
661            conn.timeout = timeout_obj.connect_timeout
662
663            is_new_proxy_conn = self.proxy is not None and not getattr(
664                conn, "sock", None
665            )
666            if is_new_proxy_conn:
667                self._prepare_proxy(conn)
668
669            # Make the request on the httplib connection object.
670            httplib_response = self._make_request(
671                conn,
672                method,
673                url,
674                timeout=timeout_obj,
675                body=body,
676                headers=headers,
677                chunked=chunked,
678            )
679
680            # If we're going to release the connection in ``finally:``, then
681            # the response doesn't need to know about the connection. Otherwise
682            # it will also try to release it and we'll have a double-release
683            # mess.
684            response_conn = conn if not release_conn else None
685
686            # Pass method to Response for length checking
687            response_kw["request_method"] = method
688
689            # Import httplib's response into our own wrapper object
690            response = self.ResponseCls.from_httplib(
691                httplib_response,
692                pool=self,
693                connection=response_conn,
694                retries=retries,
695                **response_kw
696            )
697
698            # Everything went great!
699            clean_exit = True
700
701        except queue.Empty:
702            # Timed out by queue.
703            raise EmptyPoolError(self, "No pool connections are available.")
704
705        except (
706            TimeoutError,
707            HTTPException,
708            SocketError,
709            ProtocolError,
710            BaseSSLError,
711            SSLError,
712            CertificateError,
713        ) as e:
714            # Discard the connection for these exceptions. It will be
715            # replaced during the next _get_conn() call.
716            clean_exit = False
717            if isinstance(e, (BaseSSLError, CertificateError)):
718                e = SSLError(e)
719            elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
720                e = ProxyError("Cannot connect to proxy.", e)
721            elif isinstance(e, (SocketError, HTTPException)):
722                e = ProtocolError("Connection aborted.", e)
723
724            retries = retries.increment(
725                method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2]
726            )
727            retries.sleep()
728
729            # Keep track of the error for the retry warning.
730            err = e
731
732        finally:
733            if not clean_exit:
734                # We hit some kind of exception, handled or otherwise. We need
735                # to throw the connection away unless explicitly told not to.
736                # Close the connection, set the variable to None, and make sure
737                # we put the None back in the pool to avoid leaking it.
738                conn = conn and conn.close()
739                release_this_conn = True
740
741            if release_this_conn:
742                # Put the connection back to be reused. If the connection is
743                # expired then it will be None, which will get replaced with a
744                # fresh connection during _get_conn.
745                self._put_conn(conn)
746
747        if not conn:
748            # Try again
749            log.warning(
750                "Retrying (%r) after connection broken by '%r': %s", retries, err, url
751            )
752            return self.urlopen(
753                method,
754                url,
755                body,
756                headers,
757                retries,
758                redirect,
759                assert_same_host,
760                timeout=timeout,
761                pool_timeout=pool_timeout,
762                release_conn=release_conn,
763                chunked=chunked,
764                body_pos=body_pos,
765                **response_kw
766            )
767
768        # Handle redirect?
769        redirect_location = redirect and response.get_redirect_location()
770        if redirect_location:
771            if response.status == 303:
772                method = "GET"
773
774            try:
775                retries = retries.increment(method, url, response=response, _pool=self)
776            except MaxRetryError:
777                if retries.raise_on_redirect:
778                    response.drain_conn()
779                    raise
780                return response
781
782            response.drain_conn()
783            retries.sleep_for_retry(response)
784            log.debug("Redirecting %s -> %s", url, redirect_location)
785            return self.urlopen(
786                method,
787                redirect_location,
788                body,
789                headers,
790                retries=retries,
791                redirect=redirect,
792                assert_same_host=assert_same_host,
793                timeout=timeout,
794                pool_timeout=pool_timeout,
795                release_conn=release_conn,
796                chunked=chunked,
797                body_pos=body_pos,
798                **response_kw
799            )
800
801        # Check if we should retry the HTTP response.
802        has_retry_after = bool(response.getheader("Retry-After"))
803        if retries.is_retry(method, response.status, has_retry_after):
804            try:
805                retries = retries.increment(method, url, response=response, _pool=self)
806            except MaxRetryError:
807                if retries.raise_on_status:
808                    response.drain_conn()
809                    raise
810                return response
811
812            response.drain_conn()
813            retries.sleep(response)
814            log.debug("Retry: %s", url)
815            return self.urlopen(
816                method,
817                url,
818                body,
819                headers,
820                retries=retries,
821                redirect=redirect,
822                assert_same_host=assert_same_host,
823                timeout=timeout,
824                pool_timeout=pool_timeout,
825                release_conn=release_conn,
826                chunked=chunked,
827                body_pos=body_pos,
828                **response_kw
829            )
830
831        return response
832
833
834class HTTPSConnectionPool(HTTPConnectionPool):
835    """
836    Same as :class:`.HTTPConnectionPool`, but HTTPS.
837
838    When Python is compiled with the :mod:`ssl` module, then
839    :class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
840    instead of :class:`.HTTPSConnection`.
841
842    :class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
843    ``assert_hostname`` and ``host`` in this order to verify connections.
844    If ``assert_hostname`` is False, no verification is done.
845
846    The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
847    ``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl`
848    is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
849    the connection socket into an SSL socket.
850    """
851
852    scheme = "https"
853    ConnectionCls = HTTPSConnection
854
855    def __init__(
856        self,
857        host,
858        port=None,
859        strict=False,
860        timeout=Timeout.DEFAULT_TIMEOUT,
861        maxsize=1,
862        block=False,
863        headers=None,
864        retries=None,
865        _proxy=None,
866        _proxy_headers=None,
867        key_file=None,
868        cert_file=None,
869        cert_reqs=None,
870        key_password=None,
871        ca_certs=None,
872        ssl_version=None,
873        assert_hostname=None,
874        assert_fingerprint=None,
875        ca_cert_dir=None,
876        **conn_kw
877    ):
878
879        HTTPConnectionPool.__init__(
880            self,
881            host,
882            port,
883            strict,
884            timeout,
885            maxsize,
886            block,
887            headers,
888            retries,
889            _proxy,
890            _proxy_headers,
891            **conn_kw
892        )
893
894        self.key_file = key_file
895        self.cert_file = cert_file
896        self.cert_reqs = cert_reqs
897        self.key_password = key_password
898        self.ca_certs = ca_certs
899        self.ca_cert_dir = ca_cert_dir
900        self.ssl_version = ssl_version
901        self.assert_hostname = assert_hostname
902        self.assert_fingerprint = assert_fingerprint
903
904    def _prepare_conn(self, conn):
905        """
906        Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
907        and establish the tunnel if proxy is used.
908        """
909
910        if isinstance(conn, VerifiedHTTPSConnection):
911            conn.set_cert(
912                key_file=self.key_file,
913                key_password=self.key_password,
914                cert_file=self.cert_file,
915                cert_reqs=self.cert_reqs,
916                ca_certs=self.ca_certs,
917                ca_cert_dir=self.ca_cert_dir,
918                assert_hostname=self.assert_hostname,
919                assert_fingerprint=self.assert_fingerprint,
920            )
921            conn.ssl_version = self.ssl_version
922        return conn
923
924    def _prepare_proxy(self, conn):
925        """
926        Establish tunnel connection early, because otherwise httplib
927        would improperly set Host: header to proxy's IP:port.
928        """
929        conn.set_tunnel(self._proxy_host, self.port, self.proxy_headers)
930        conn.connect()
931
932    def _new_conn(self):
933        """
934        Return a fresh :class:`httplib.HTTPSConnection`.
935        """
936        self.num_connections += 1
937        log.debug(
938            "Starting new HTTPS connection (%d): %s:%s",
939            self.num_connections,
940            self.host,
941            self.port or "443",
942        )
943
944        if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
945            raise SSLError(
946                "Can't connect to HTTPS URL because the SSL module is not available."
947            )
948
949        actual_host = self.host
950        actual_port = self.port
951        if self.proxy is not None:
952            actual_host = self.proxy.host
953            actual_port = self.proxy.port
954
955        conn = self.ConnectionCls(
956            host=actual_host,
957            port=actual_port,
958            timeout=self.timeout.connect_timeout,
959            strict=self.strict,
960            cert_file=self.cert_file,
961            key_file=self.key_file,
962            key_password=self.key_password,
963            **self.conn_kw
964        )
965
966        return self._prepare_conn(conn)
967
968    def _validate_conn(self, conn):
969        """
970        Called right before a request is made, after the socket is created.
971        """
972        super(HTTPSConnectionPool, self)._validate_conn(conn)
973
974        # Force connect early to allow us to validate the connection.
975        if not getattr(conn, "sock", None):  # AppEngine might not have  `.sock`
976            conn.connect()
977
978        if not conn.is_verified:
979            warnings.warn(
980                (
981                    "Unverified HTTPS request is being made to host '%s'. "
982                    "Adding certificate verification is strongly advised. See: "
983                    "https://urllib3.readthedocs.io/en/latest/advanced-usage.html"
984                    "#ssl-warnings" % conn.host
985                ),
986                InsecureRequestWarning,
987            )
988
989
990def connection_from_url(url, **kw):
991    """
992    Given a url, return an :class:`.ConnectionPool` instance of its host.
993
994    This is a shortcut for not having to parse out the scheme, host, and port
995    of the url before creating an :class:`.ConnectionPool` instance.
996
997    :param url:
998        Absolute URL string that must include the scheme. Port is optional.
999
1000    :param \\**kw:
1001        Passes additional parameters to the constructor of the appropriate
1002        :class:`.ConnectionPool`. Useful for specifying things like
1003        timeout, maxsize, headers, etc.
1004
1005    Example::
1006
1007        >>> conn = connection_from_url('http://google.com/')
1008        >>> r = conn.request('GET', '/')
1009    """
1010    scheme, host, port = get_host(url)
1011    port = port or port_by_scheme.get(scheme, 80)
1012    if scheme == "https":
1013        return HTTPSConnectionPool(host, port=port, **kw)
1014    else:
1015        return HTTPConnectionPool(host, port=port, **kw)
1016
1017
1018def _normalize_host(host, scheme):
1019    """
1020    Normalize hosts for comparisons and use with sockets.
1021    """
1022
1023    host = normalize_host(host, scheme)
1024
1025    # httplib doesn't like it when we include brackets in IPv6 addresses
1026    # Specifically, if we include brackets but also pass the port then
1027    # httplib crazily doubles up the square brackets on the Host header.
1028    # Instead, we need to make sure we never pass ``None`` as the port.
1029    # However, for backward compatibility reasons we can't actually
1030    # *assert* that.  See http://bugs.python.org/issue28539
1031    if host.startswith("[") and host.endswith("]"):
1032        host = host[1:-1]
1033    return host
1034