1import queue
2import requests.adapters
3
4from docker.transport.basehttpadapter import BaseHTTPAdapter
5from .. import constants
6from .npipesocket import NpipeSocket
7
8import http.client as httplib
9
10try:
11    import requests.packages.urllib3 as urllib3
12except ImportError:
13    import urllib3
14
15RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
16
17
18class NpipeHTTPConnection(httplib.HTTPConnection):
19    def __init__(self, npipe_path, timeout=60):
20        super().__init__(
21            'localhost', timeout=timeout
22        )
23        self.npipe_path = npipe_path
24        self.timeout = timeout
25
26    def connect(self):
27        sock = NpipeSocket()
28        sock.settimeout(self.timeout)
29        sock.connect(self.npipe_path)
30        self.sock = sock
31
32
33class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
34    def __init__(self, npipe_path, timeout=60, maxsize=10):
35        super().__init__(
36            'localhost', timeout=timeout, maxsize=maxsize
37        )
38        self.npipe_path = npipe_path
39        self.timeout = timeout
40
41    def _new_conn(self):
42        return NpipeHTTPConnection(
43            self.npipe_path, self.timeout
44        )
45
46    # When re-using connections, urllib3 tries to call select() on our
47    # NpipeSocket instance, causing a crash. To circumvent this, we override
48    # _get_conn, where that check happens.
49    def _get_conn(self, timeout):
50        conn = None
51        try:
52            conn = self.pool.get(block=self.block, timeout=timeout)
53
54        except AttributeError:  # self.pool is None
55            raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.")
56
57        except queue.Empty:
58            if self.block:
59                raise urllib3.exceptions.EmptyPoolError(
60                    self,
61                    "Pool reached maximum size and no more "
62                    "connections are allowed."
63                )
64            pass  # Oh well, we'll create a new connection then
65
66        return conn or self._new_conn()
67
68
69class NpipeHTTPAdapter(BaseHTTPAdapter):
70
71    __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['npipe_path',
72                                                           'pools',
73                                                           'timeout',
74                                                           'max_pool_size']
75
76    def __init__(self, base_url, timeout=60,
77                 pool_connections=constants.DEFAULT_NUM_POOLS,
78                 max_pool_size=constants.DEFAULT_MAX_POOL_SIZE):
79        self.npipe_path = base_url.replace('npipe://', '')
80        self.timeout = timeout
81        self.max_pool_size = max_pool_size
82        self.pools = RecentlyUsedContainer(
83            pool_connections, dispose_func=lambda p: p.close()
84        )
85        super().__init__()
86
87    def get_connection(self, url, proxies=None):
88        with self.pools.lock:
89            pool = self.pools.get(url)
90            if pool:
91                return pool
92
93            pool = NpipeHTTPConnectionPool(
94                self.npipe_path, self.timeout,
95                maxsize=self.max_pool_size
96            )
97            self.pools[url] = pool
98
99        return pool
100
101    def request_url(self, request, proxies):
102        # The select_proxy utility in requests errors out when the provided URL
103        # doesn't have a hostname, like is the case when using a UNIX socket.
104        # Since proxies are an irrelevant notion in the case of UNIX sockets
105        # anyway, we simply return the path URL directly.
106        # See also: https://github.com/docker/docker-sdk-python/issues/811
107        return request.path_url
108