2018-09-17 00:27:00 +00:00
|
|
|
from __future__ import absolute_import
|
2022-01-24 04:07:52 +00:00
|
|
|
|
2018-09-17 00:27:00 +00:00
|
|
|
import errno
|
|
|
|
import logging
|
2022-01-24 04:07:52 +00:00
|
|
|
import re
|
|
|
|
import socket
|
2018-09-17 00:27:00 +00:00
|
|
|
import sys
|
|
|
|
import warnings
|
2022-01-24 04:07:52 +00:00
|
|
|
from socket import error as SocketError
|
|
|
|
from socket import timeout as SocketTimeout
|
2018-09-17 00:27:00 +00:00
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
from .connection import (
|
|
|
|
BaseSSLError,
|
|
|
|
BrokenPipeError,
|
|
|
|
DummyConnection,
|
|
|
|
HTTPConnection,
|
|
|
|
HTTPException,
|
|
|
|
HTTPSConnection,
|
|
|
|
VerifiedHTTPSConnection,
|
|
|
|
port_by_scheme,
|
|
|
|
)
|
2018-09-17 00:27:00 +00:00
|
|
|
from .exceptions import (
|
|
|
|
ClosedPoolError,
|
|
|
|
EmptyPoolError,
|
|
|
|
HeaderParsingError,
|
|
|
|
HostChangedError,
|
2022-01-24 04:07:52 +00:00
|
|
|
InsecureRequestWarning,
|
2018-09-17 00:27:00 +00:00
|
|
|
LocationValueError,
|
|
|
|
MaxRetryError,
|
2022-01-24 04:07:52 +00:00
|
|
|
NewConnectionError,
|
|
|
|
ProtocolError,
|
2018-09-17 00:27:00 +00:00
|
|
|
ProxyError,
|
|
|
|
ReadTimeoutError,
|
|
|
|
SSLError,
|
|
|
|
TimeoutError,
|
|
|
|
)
|
|
|
|
from .packages import six
|
|
|
|
from .packages.six.moves import queue
|
|
|
|
from .request import RequestMethods
|
|
|
|
from .response import HTTPResponse
|
|
|
|
from .util.connection import is_connection_dropped
|
2022-01-24 04:07:52 +00:00
|
|
|
from .util.proxy import connection_requires_http_tunnel
|
|
|
|
from .util.queue import LifoQueue
|
2018-09-17 00:27:00 +00:00
|
|
|
from .util.request import set_file_position
|
|
|
|
from .util.response import assert_header_parsing
|
|
|
|
from .util.retry import Retry
|
2022-01-24 04:07:52 +00:00
|
|
|
from .util.ssl_match_hostname import CertificateError
|
2018-09-17 00:27:00 +00:00
|
|
|
from .util.timeout import Timeout
|
2022-01-24 04:07:52 +00:00
|
|
|
from .util.url import Url, _encode_target
|
|
|
|
from .util.url import _normalize_host as normalize_host
|
|
|
|
from .util.url import get_host, parse_url
|
2018-09-17 00:27:00 +00:00
|
|
|
|
|
|
|
xrange = six.moves.xrange
|
|
|
|
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
_Default = object()
|
|
|
|
|
|
|
|
|
|
|
|
# Pool objects
|
|
|
|
class ConnectionPool(object):
|
|
|
|
"""
|
|
|
|
Base class for all connection pools, such as
|
|
|
|
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
|
2022-01-24 04:07:52 +00:00
|
|
|
|
|
|
|
.. note::
|
|
|
|
ConnectionPool.urlopen() does not normalize or percent-encode target URIs
|
|
|
|
which is useful if your target server doesn't support percent-encoded
|
|
|
|
target URIs.
|
2018-09-17 00:27:00 +00:00
|
|
|
"""
|
|
|
|
|
|
|
|
scheme = None
|
|
|
|
QueueCls = LifoQueue
|
|
|
|
|
|
|
|
def __init__(self, host, port=None):
|
|
|
|
if not host:
|
|
|
|
raise LocationValueError("No host specified.")
|
|
|
|
|
2019-09-18 15:30:46 +00:00
|
|
|
self.host = _normalize_host(host, scheme=self.scheme)
|
2018-09-17 00:27:00 +00:00
|
|
|
self._proxy_host = host.lower()
|
|
|
|
self.port = port
|
|
|
|
|
|
|
|
def __str__(self):
|
2022-01-24 04:07:52 +00:00
|
|
|
return "%s(host=%r, port=%r)" % (type(self).__name__, self.host, self.port)
|
2018-09-17 00:27:00 +00:00
|
|
|
|
|
|
|
def __enter__(self):
|
|
|
|
return self
|
|
|
|
|
|
|
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
|
|
self.close()
|
|
|
|
# Return False to re-raise any potential exceptions
|
|
|
|
return False
|
|
|
|
|
|
|
|
def close(self):
|
|
|
|
"""
|
|
|
|
Close all pooled connections and disable the pool.
|
|
|
|
"""
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
|
2018-10-31 16:08:29 +00:00
|
|
|
_blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK}
|
2018-09-17 00:27:00 +00:00
|
|
|
|
|
|
|
|
|
|
|
class HTTPConnectionPool(ConnectionPool, RequestMethods):
|
|
|
|
"""
|
|
|
|
Thread-safe connection pool for one host.
|
|
|
|
|
|
|
|
:param host:
|
|
|
|
Host used for this HTTP Connection (e.g. "localhost"), passed into
|
2022-01-24 04:07:52 +00:00
|
|
|
:class:`http.client.HTTPConnection`.
|
2018-09-17 00:27:00 +00:00
|
|
|
|
|
|
|
:param port:
|
|
|
|
Port used for this HTTP Connection (None is equivalent to 80), passed
|
2022-01-24 04:07:52 +00:00
|
|
|
into :class:`http.client.HTTPConnection`.
|
2018-09-17 00:27:00 +00:00
|
|
|
|
|
|
|
:param strict:
|
|
|
|
Causes BadStatusLine to be raised if the status line can't be parsed
|
|
|
|
as a valid HTTP/1.0 or 1.1 status line, passed into
|
2022-01-24 04:07:52 +00:00
|
|
|
:class:`http.client.HTTPConnection`.
|
2018-09-17 00:27:00 +00:00
|
|
|
|
|
|
|
.. note::
|
|
|
|
Only works in Python 2. This parameter is ignored in Python 3.
|
|
|
|
|
|
|
|
:param timeout:
|
|
|
|
Socket timeout in seconds for each individual connection. This can
|
|
|
|
be a float or integer, which sets the timeout for the HTTP request,
|
|
|
|
or an instance of :class:`urllib3.util.Timeout` which gives you more
|
|
|
|
fine-grained control over request timeouts. After the constructor has
|
|
|
|
been parsed, this is always a `urllib3.util.Timeout` object.
|
|
|
|
|
|
|
|
:param maxsize:
|
|
|
|
Number of connections to save that can be reused. More than 1 is useful
|
|
|
|
in multithreaded situations. If ``block`` is set to False, more
|
|
|
|
connections will be created but they will not be saved once they've
|
|
|
|
been used.
|
|
|
|
|
|
|
|
:param block:
|
|
|
|
If set to True, no more than ``maxsize`` connections will be used at
|
|
|
|
a time. When no free connections are available, the call will block
|
|
|
|
until a connection has been released. This is a useful side effect for
|
|
|
|
particular multithreaded situations where one does not want to use more
|
|
|
|
than maxsize connections per host to prevent flooding.
|
|
|
|
|
|
|
|
:param headers:
|
|
|
|
Headers to include with all requests, unless other headers are given
|
|
|
|
explicitly.
|
|
|
|
|
|
|
|
:param retries:
|
|
|
|
Retry configuration to use by default with requests in this pool.
|
|
|
|
|
|
|
|
:param _proxy:
|
|
|
|
Parsed proxy URL, should not be used directly, instead, see
|
2022-01-24 04:07:52 +00:00
|
|
|
:class:`urllib3.ProxyManager`
|
2018-09-17 00:27:00 +00:00
|
|
|
|
|
|
|
:param _proxy_headers:
|
|
|
|
A dictionary with proxy headers, should not be used directly,
|
2022-01-24 04:07:52 +00:00
|
|
|
instead, see :class:`urllib3.ProxyManager`
|
2018-09-17 00:27:00 +00:00
|
|
|
|
|
|
|
:param \\**conn_kw:
|
|
|
|
Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
|
|
|
|
:class:`urllib3.connection.HTTPSConnection` instances.
|
|
|
|
"""
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
scheme = "http"
|
2018-09-17 00:27:00 +00:00
|
|
|
ConnectionCls = HTTPConnection
|
|
|
|
ResponseCls = HTTPResponse
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
host,
|
|
|
|
port=None,
|
|
|
|
strict=False,
|
|
|
|
timeout=Timeout.DEFAULT_TIMEOUT,
|
|
|
|
maxsize=1,
|
|
|
|
block=False,
|
|
|
|
headers=None,
|
|
|
|
retries=None,
|
|
|
|
_proxy=None,
|
|
|
|
_proxy_headers=None,
|
|
|
|
_proxy_config=None,
|
|
|
|
**conn_kw
|
|
|
|
):
|
2018-09-17 00:27:00 +00:00
|
|
|
ConnectionPool.__init__(self, host, port)
|
|
|
|
RequestMethods.__init__(self, headers)
|
|
|
|
|
|
|
|
self.strict = strict
|
|
|
|
|
|
|
|
if not isinstance(timeout, Timeout):
|
|
|
|
timeout = Timeout.from_float(timeout)
|
|
|
|
|
|
|
|
if retries is None:
|
|
|
|
retries = Retry.DEFAULT
|
|
|
|
|
|
|
|
self.timeout = timeout
|
|
|
|
self.retries = retries
|
|
|
|
|
|
|
|
self.pool = self.QueueCls(maxsize)
|
|
|
|
self.block = block
|
|
|
|
|
|
|
|
self.proxy = _proxy
|
|
|
|
self.proxy_headers = _proxy_headers or {}
|
2022-01-24 04:07:52 +00:00
|
|
|
self.proxy_config = _proxy_config
|
2018-09-17 00:27:00 +00:00
|
|
|
|
|
|
|
# Fill the queue up so that doing get() on it will block properly
|
|
|
|
for _ in xrange(maxsize):
|
|
|
|
self.pool.put(None)
|
|
|
|
|
|
|
|
# These are mostly for testing and debugging purposes.
|
|
|
|
self.num_connections = 0
|
|
|
|
self.num_requests = 0
|
|
|
|
self.conn_kw = conn_kw
|
|
|
|
|
|
|
|
if self.proxy:
|
|
|
|
# Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
|
|
|
|
# We cannot know if the user has added default socket options, so we cannot replace the
|
|
|
|
# list.
|
2022-01-24 04:07:52 +00:00
|
|
|
self.conn_kw.setdefault("socket_options", [])
|
|
|
|
|
|
|
|
self.conn_kw["proxy"] = self.proxy
|
|
|
|
self.conn_kw["proxy_config"] = self.proxy_config
|
2018-09-17 00:27:00 +00:00
|
|
|
|
|
|
|
def _new_conn(self):
|
|
|
|
"""
|
|
|
|
Return a fresh :class:`HTTPConnection`.
|
|
|
|
"""
|
|
|
|
self.num_connections += 1
|
2022-01-24 04:07:52 +00:00
|
|
|
log.debug(
|
|
|
|
"Starting new HTTP connection (%d): %s:%s",
|
|
|
|
self.num_connections,
|
|
|
|
self.host,
|
|
|
|
self.port or "80",
|
|
|
|
)
|
|
|
|
|
|
|
|
conn = self.ConnectionCls(
|
|
|
|
host=self.host,
|
|
|
|
port=self.port,
|
|
|
|
timeout=self.timeout.connect_timeout,
|
|
|
|
strict=self.strict,
|
|
|
|
**self.conn_kw
|
|
|
|
)
|
2018-09-17 00:27:00 +00:00
|
|
|
return conn
|
|
|
|
|
|
|
|
def _get_conn(self, timeout=None):
|
|
|
|
"""
|
|
|
|
Get a connection. Will return a pooled connection if one is available.
|
|
|
|
|
|
|
|
If no connections are available and :prop:`.block` is ``False``, then a
|
|
|
|
fresh connection is returned.
|
|
|
|
|
|
|
|
:param timeout:
|
|
|
|
Seconds to wait before giving up and raising
|
|
|
|
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
|
|
|
|
:prop:`.block` is ``True``.
|
|
|
|
"""
|
|
|
|
conn = None
|
|
|
|
try:
|
|
|
|
conn = self.pool.get(block=self.block, timeout=timeout)
|
|
|
|
|
|
|
|
except AttributeError: # self.pool is None
|
|
|
|
raise ClosedPoolError(self, "Pool is closed.")
|
|
|
|
|
|
|
|
except queue.Empty:
|
|
|
|
if self.block:
|
2022-01-24 04:07:52 +00:00
|
|
|
raise EmptyPoolError(
|
|
|
|
self,
|
|
|
|
"Pool reached maximum size and no more connections are allowed.",
|
|
|
|
)
|
2018-09-17 00:27:00 +00:00
|
|
|
pass # Oh well, we'll create a new connection then
|
|
|
|
|
|
|
|
# If this is a persistent connection, check if it got disconnected
|
|
|
|
if conn and is_connection_dropped(conn):
|
|
|
|
log.debug("Resetting dropped connection: %s", self.host)
|
|
|
|
conn.close()
|
2022-01-24 04:07:52 +00:00
|
|
|
if getattr(conn, "auto_open", 1) == 0:
|
2018-09-17 00:27:00 +00:00
|
|
|
# This is a proxied connection that has been mutated by
|
2022-01-24 04:07:52 +00:00
|
|
|
# http.client._tunnel() and cannot be reused (since it would
|
2018-09-17 00:27:00 +00:00
|
|
|
# attempt to bypass the proxy)
|
|
|
|
conn = None
|
|
|
|
|
|
|
|
return conn or self._new_conn()
|
|
|
|
|
|
|
|
def _put_conn(self, conn):
|
|
|
|
"""
|
|
|
|
Put a connection back into the pool.
|
|
|
|
|
|
|
|
:param conn:
|
|
|
|
Connection object for the current host and port as returned by
|
|
|
|
:meth:`._new_conn` or :meth:`._get_conn`.
|
|
|
|
|
|
|
|
If the pool is already full, the connection is closed and discarded
|
|
|
|
because we exceeded maxsize. If connections are discarded frequently,
|
|
|
|
then maxsize should be increased.
|
|
|
|
|
|
|
|
If the pool is closed, then the connection will be closed and discarded.
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
self.pool.put(conn, block=False)
|
|
|
|
return # Everything is dandy, done.
|
|
|
|
except AttributeError:
|
|
|
|
# self.pool is None.
|
|
|
|
pass
|
|
|
|
except queue.Full:
|
|
|
|
# This should never happen if self.block == True
|
|
|
|
log.warning(
|
2022-01-24 04:07:52 +00:00
|
|
|
"Connection pool is full, discarding connection: %s. Connection pool size: %s",
|
|
|
|
self.host,
|
|
|
|
self.pool.qsize(),
|
|
|
|
)
|
2018-09-17 00:27:00 +00:00
|
|
|
# Connection never got put back into the pool, close it.
|
|
|
|
if conn:
|
|
|
|
conn.close()
|
|
|
|
|
|
|
|
def _validate_conn(self, conn):
|
|
|
|
"""
|
|
|
|
Called right before a request is made, after the socket is created.
|
|
|
|
"""
|
|
|
|
pass
|
|
|
|
|
|
|
|
def _prepare_proxy(self, conn):
|
|
|
|
# Nothing to do for HTTP connections.
|
|
|
|
pass
|
|
|
|
|
|
|
|
def _get_timeout(self, timeout):
|
2022-01-24 04:07:52 +00:00
|
|
|
"""Helper that always returns a :class:`urllib3.util.Timeout`"""
|
2018-09-17 00:27:00 +00:00
|
|
|
if timeout is _Default:
|
|
|
|
return self.timeout.clone()
|
|
|
|
|
|
|
|
if isinstance(timeout, Timeout):
|
|
|
|
return timeout.clone()
|
|
|
|
else:
|
|
|
|
# User passed us an int/float. This is for backwards compatibility,
|
|
|
|
# can be removed later
|
|
|
|
return Timeout.from_float(timeout)
|
|
|
|
|
|
|
|
def _raise_timeout(self, err, url, timeout_value):
|
|
|
|
"""Is the error actually a timeout? Will raise a ReadTimeout or pass"""
|
|
|
|
|
|
|
|
if isinstance(err, SocketTimeout):
|
2022-01-24 04:07:52 +00:00
|
|
|
raise ReadTimeoutError(
|
|
|
|
self, url, "Read timed out. (read timeout=%s)" % timeout_value
|
|
|
|
)
|
2018-09-17 00:27:00 +00:00
|
|
|
|
|
|
|
# See the above comment about EAGAIN in Python 3. In Python 2 we have
|
|
|
|
# to specifically catch it and throw the timeout error
|
2022-01-24 04:07:52 +00:00
|
|
|
if hasattr(err, "errno") and err.errno in _blocking_errnos:
|
|
|
|
raise ReadTimeoutError(
|
|
|
|
self, url, "Read timed out. (read timeout=%s)" % timeout_value
|
|
|
|
)
|
2018-09-17 00:27:00 +00:00
|
|
|
|
|
|
|
# Catch possible read timeouts thrown as SSL errors. If not the
|
|
|
|
# case, rethrow the original. We need to do this because of:
|
|
|
|
# http://bugs.python.org/issue10272
|
2022-01-24 04:07:52 +00:00
|
|
|
if "timed out" in str(err) or "did not complete (read)" in str(
|
|
|
|
err
|
|
|
|
): # Python < 2.7.4
|
|
|
|
raise ReadTimeoutError(
|
|
|
|
self, url, "Read timed out. (read timeout=%s)" % timeout_value
|
|
|
|
)
|
|
|
|
|
|
|
|
def _make_request(
|
|
|
|
self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw
|
|
|
|
):
|
2018-09-17 00:27:00 +00:00
|
|
|
"""
|
|
|
|
Perform a request on a given urllib connection object taken from our
|
|
|
|
pool.
|
|
|
|
|
|
|
|
:param conn:
|
|
|
|
a connection from one of our connection pools
|
|
|
|
|
|
|
|
:param timeout:
|
|
|
|
Socket timeout in seconds for the request. This can be a
|
|
|
|
float or integer, which will set the same timeout value for
|
|
|
|
the socket connect and the socket read, or an instance of
|
|
|
|
:class:`urllib3.util.Timeout`, which gives you more fine-grained
|
|
|
|
control over your timeouts.
|
|
|
|
"""
|
|
|
|
self.num_requests += 1
|
|
|
|
|
|
|
|
timeout_obj = self._get_timeout(timeout)
|
|
|
|
timeout_obj.start_connect()
|
|
|
|
conn.timeout = timeout_obj.connect_timeout
|
|
|
|
|
|
|
|
# Trigger any extra validation we need to do.
|
|
|
|
try:
|
|
|
|
self._validate_conn(conn)
|
|
|
|
except (SocketTimeout, BaseSSLError) as e:
|
|
|
|
# Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
|
|
|
|
self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
|
|
|
|
raise
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
# conn.request() calls http.client.*.request, not the method in
|
2018-09-17 00:27:00 +00:00
|
|
|
# urllib3.request. It also calls makefile (recv) on the socket.
|
2022-01-24 04:07:52 +00:00
|
|
|
try:
|
|
|
|
if chunked:
|
|
|
|
conn.request_chunked(method, url, **httplib_request_kw)
|
|
|
|
else:
|
|
|
|
conn.request(method, url, **httplib_request_kw)
|
|
|
|
|
|
|
|
# We are swallowing BrokenPipeError (errno.EPIPE) since the server is
|
|
|
|
# legitimately able to close the connection after sending a valid response.
|
|
|
|
# With this behaviour, the received response is still readable.
|
|
|
|
except BrokenPipeError:
|
|
|
|
# Python 3
|
|
|
|
pass
|
|
|
|
except IOError as e:
|
|
|
|
# Python 2 and macOS/Linux
|
|
|
|
# EPIPE and ESHUTDOWN are BrokenPipeError on Python 2, and EPROTOTYPE is needed on macOS
|
|
|
|
# https://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/
|
|
|
|
if e.errno not in {
|
|
|
|
errno.EPIPE,
|
|
|
|
errno.ESHUTDOWN,
|
|
|
|
errno.EPROTOTYPE,
|
|
|
|
}:
|
|
|
|
raise
|
2018-09-17 00:27:00 +00:00
|
|
|
|
|
|
|
# Reset the timeout for the recv() on the socket
|
|
|
|
read_timeout = timeout_obj.read_timeout
|
|
|
|
|
|
|
|
# App Engine doesn't have a sock attr
|
2022-01-24 04:07:52 +00:00
|
|
|
if getattr(conn, "sock", None):
|
2018-09-17 00:27:00 +00:00
|
|
|
# In Python 3 socket.py will catch EAGAIN and return None when you
|
|
|
|
# try and read into the file pointer created by http.client, which
|
|
|
|
# instead raises a BadStatusLine exception. Instead of catching
|
|
|
|
# the exception and assuming all BadStatusLine exceptions are read
|
|
|
|
# timeouts, check for a zero timeout before making the request.
|
|
|
|
if read_timeout == 0:
|
|
|
|
raise ReadTimeoutError(
|
2022-01-24 04:07:52 +00:00
|
|
|
self, url, "Read timed out. (read timeout=%s)" % read_timeout
|
|
|
|
)
|
2018-09-17 00:27:00 +00:00
|
|
|
if read_timeout is Timeout.DEFAULT_TIMEOUT:
|
|
|
|
conn.sock.settimeout(socket.getdefaulttimeout())
|
|
|
|
else: # None or a value
|
|
|
|
conn.sock.settimeout(read_timeout)
|
|
|
|
|
|
|
|
# Receive the response from the server
|
|
|
|
try:
|
2019-09-18 15:30:46 +00:00
|
|
|
try:
|
|
|
|
# Python 2.7, use buffering of HTTP responses
|
2018-09-17 00:27:00 +00:00
|
|
|
httplib_response = conn.getresponse(buffering=True)
|
2019-09-18 15:30:46 +00:00
|
|
|
except TypeError:
|
|
|
|
# Python 3
|
2018-09-17 00:27:00 +00:00
|
|
|
try:
|
|
|
|
httplib_response = conn.getresponse()
|
2022-01-24 04:07:52 +00:00
|
|
|
except BaseException as e:
|
|
|
|
# Remove the TypeError from the exception chain in
|
|
|
|
# Python 3 (including for exceptions like SystemExit).
|
|
|
|
# Otherwise it looks like a bug in the code.
|
2018-09-17 00:27:00 +00:00
|
|
|
six.raise_from(e, None)
|
|
|
|
except (SocketTimeout, BaseSSLError, SocketError) as e:
|
|
|
|
self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
|
|
|
|
raise
|
|
|
|
|
|
|
|
# AppEngine doesn't have a version attr.
|
2022-01-24 04:07:52 +00:00
|
|
|
http_version = getattr(conn, "_http_vsn_str", "HTTP/?")
|
|
|
|
log.debug(
|
|
|
|
'%s://%s:%s "%s %s %s" %s %s',
|
|
|
|
self.scheme,
|
|
|
|
self.host,
|
|
|
|
self.port,
|
|
|
|
method,
|
|
|
|
url,
|
|
|
|
http_version,
|
|
|
|
httplib_response.status,
|
|
|
|
httplib_response.length,
|
|
|
|
)
|
2018-09-17 00:27:00 +00:00
|
|
|
|
|
|
|
try:
|
|
|
|
assert_header_parsing(httplib_response.msg)
|
|
|
|
except (HeaderParsingError, TypeError) as hpe: # Platform-specific: Python 3
|
|
|
|
log.warning(
|
2022-01-24 04:07:52 +00:00
|
|
|
"Failed to parse headers (url=%s): %s",
|
|
|
|
self._absolute_url(url),
|
|
|
|
hpe,
|
|
|
|
exc_info=True,
|
|
|
|
)
|
2018-09-17 00:27:00 +00:00
|
|
|
|
|
|
|
return httplib_response
|
|
|
|
|
|
|
|
def _absolute_url(self, path):
|
|
|
|
return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url
|
|
|
|
|
|
|
|
def close(self):
|
|
|
|
"""
|
|
|
|
Close all pooled connections and disable the pool.
|
|
|
|
"""
|
|
|
|
if self.pool is None:
|
|
|
|
return
|
|
|
|
# Disable access to the pool
|
|
|
|
old_pool, self.pool = self.pool, None
|
|
|
|
|
|
|
|
try:
|
|
|
|
while True:
|
|
|
|
conn = old_pool.get(block=False)
|
|
|
|
if conn:
|
|
|
|
conn.close()
|
|
|
|
|
|
|
|
except queue.Empty:
|
|
|
|
pass # Done.
|
|
|
|
|
|
|
|
def is_same_host(self, url):
|
|
|
|
"""
|
|
|
|
Check if the given ``url`` is a member of the same host as this
|
|
|
|
connection pool.
|
|
|
|
"""
|
2022-01-24 04:07:52 +00:00
|
|
|
if url.startswith("/"):
|
2018-09-17 00:27:00 +00:00
|
|
|
return True
|
|
|
|
|
|
|
|
# TODO: Add optional support for socket.gethostbyname checking.
|
|
|
|
scheme, host, port = get_host(url)
|
2019-09-18 15:30:46 +00:00
|
|
|
if host is not None:
|
|
|
|
host = _normalize_host(host, scheme=scheme)
|
2018-09-17 00:27:00 +00:00
|
|
|
|
|
|
|
# Use explicit default port for comparison when none is given
|
|
|
|
if self.port and not port:
|
|
|
|
port = port_by_scheme.get(scheme)
|
|
|
|
elif not self.port and port == port_by_scheme.get(scheme):
|
|
|
|
port = None
|
|
|
|
|
|
|
|
return (scheme, host, port) == (self.scheme, self.host, self.port)
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def urlopen(
|
|
|
|
self,
|
|
|
|
method,
|
|
|
|
url,
|
|
|
|
body=None,
|
|
|
|
headers=None,
|
|
|
|
retries=None,
|
|
|
|
redirect=True,
|
|
|
|
assert_same_host=True,
|
|
|
|
timeout=_Default,
|
|
|
|
pool_timeout=None,
|
|
|
|
release_conn=None,
|
|
|
|
chunked=False,
|
|
|
|
body_pos=None,
|
|
|
|
**response_kw
|
|
|
|
):
|
2018-09-17 00:27:00 +00:00
|
|
|
"""
|
|
|
|
Get a connection from the pool and perform an HTTP request. This is the
|
|
|
|
lowest level call for making a request, so you'll need to specify all
|
|
|
|
the raw details.
|
|
|
|
|
|
|
|
.. note::
|
|
|
|
|
|
|
|
More commonly, it's appropriate to use a convenience method provided
|
|
|
|
by :class:`.RequestMethods`, such as :meth:`request`.
|
|
|
|
|
|
|
|
.. note::
|
|
|
|
|
|
|
|
`release_conn` will only behave as expected if
|
|
|
|
`preload_content=False` because we want to make
|
|
|
|
`preload_content=False` the default behaviour someday soon without
|
|
|
|
breaking backwards compatibility.
|
|
|
|
|
|
|
|
:param method:
|
|
|
|
HTTP request method (such as GET, POST, PUT, etc.)
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
:param url:
|
|
|
|
The URL to perform the request on.
|
|
|
|
|
2018-09-17 00:27:00 +00:00
|
|
|
:param body:
|
2022-01-24 04:07:52 +00:00
|
|
|
Data to send in the request body, either :class:`str`, :class:`bytes`,
|
|
|
|
an iterable of :class:`str`/:class:`bytes`, or a file-like object.
|
2018-09-17 00:27:00 +00:00
|
|
|
|
|
|
|
:param headers:
|
|
|
|
Dictionary of custom headers to send, such as User-Agent,
|
|
|
|
If-None-Match, etc. If None, pool headers are used. If provided,
|
|
|
|
these headers completely replace any pool-specific headers.
|
|
|
|
|
|
|
|
:param retries:
|
|
|
|
Configure the number of retries to allow before raising a
|
|
|
|
:class:`~urllib3.exceptions.MaxRetryError` exception.
|
|
|
|
|
|
|
|
Pass ``None`` to retry until you receive a response. Pass a
|
|
|
|
:class:`~urllib3.util.retry.Retry` object for fine-grained control
|
|
|
|
over different types of retries.
|
|
|
|
Pass an integer number to retry connection errors that many times,
|
|
|
|
but no other types of errors. Pass zero to never retry.
|
|
|
|
|
|
|
|
If ``False``, then retries are disabled and any exception is raised
|
|
|
|
immediately. Also, instead of raising a MaxRetryError on redirects,
|
|
|
|
the redirect response will be returned.
|
|
|
|
|
|
|
|
:type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
|
|
|
|
|
|
|
|
:param redirect:
|
|
|
|
If True, automatically handle redirects (status codes 301, 302,
|
|
|
|
303, 307, 308). Each redirect counts as a retry. Disabling retries
|
|
|
|
will disable redirect, too.
|
|
|
|
|
|
|
|
:param assert_same_host:
|
|
|
|
If ``True``, will make sure that the host of the pool requests is
|
2022-01-24 04:07:52 +00:00
|
|
|
consistent else will raise HostChangedError. When ``False``, you can
|
2018-09-17 00:27:00 +00:00
|
|
|
use the pool on an HTTP proxy and request foreign hosts.
|
|
|
|
|
|
|
|
:param timeout:
|
|
|
|
If specified, overrides the default timeout for this one
|
|
|
|
request. It may be a float (in seconds) or an instance of
|
|
|
|
:class:`urllib3.util.Timeout`.
|
|
|
|
|
|
|
|
:param pool_timeout:
|
|
|
|
If set and the pool is set to block=True, then this method will
|
|
|
|
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
|
|
|
|
connection is available within the time period.
|
|
|
|
|
|
|
|
:param release_conn:
|
|
|
|
If False, then the urlopen call will not release the connection
|
|
|
|
back into the pool once a response is received (but will release if
|
|
|
|
you read the entire contents of the response such as when
|
|
|
|
`preload_content=True`). This is useful if you're not preloading
|
|
|
|
the response's content immediately. You will need to call
|
|
|
|
``r.release_conn()`` on the response ``r`` to return the connection
|
|
|
|
back into the pool. If None, it takes the value of
|
|
|
|
``response_kw.get('preload_content', True)``.
|
|
|
|
|
|
|
|
:param chunked:
|
|
|
|
If True, urllib3 will send the body using chunked transfer
|
|
|
|
encoding. Otherwise, urllib3 will send the body using the standard
|
|
|
|
content-length form. Defaults to False.
|
|
|
|
|
|
|
|
:param int body_pos:
|
|
|
|
Position to seek to in file-like body in the event of a retry or
|
|
|
|
redirect. Typically this won't need to be set because urllib3 will
|
|
|
|
auto-populate the value when needed.
|
|
|
|
|
|
|
|
:param \\**response_kw:
|
|
|
|
Additional parameters are passed to
|
|
|
|
:meth:`urllib3.response.HTTPResponse.from_httplib`
|
|
|
|
"""
|
2022-01-24 04:07:52 +00:00
|
|
|
|
|
|
|
parsed_url = parse_url(url)
|
|
|
|
destination_scheme = parsed_url.scheme
|
|
|
|
|
2018-09-17 00:27:00 +00:00
|
|
|
if headers is None:
|
|
|
|
headers = self.headers
|
|
|
|
|
|
|
|
if not isinstance(retries, Retry):
|
|
|
|
retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
|
|
|
|
|
|
|
|
if release_conn is None:
|
2022-01-24 04:07:52 +00:00
|
|
|
release_conn = response_kw.get("preload_content", True)
|
2018-09-17 00:27:00 +00:00
|
|
|
|
|
|
|
# Check host
|
|
|
|
if assert_same_host and not self.is_same_host(url):
|
|
|
|
raise HostChangedError(self, url, retries)
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
# Ensure that the URL we're connecting to is properly encoded
|
|
|
|
if url.startswith("/"):
|
|
|
|
url = six.ensure_str(_encode_target(url))
|
|
|
|
else:
|
|
|
|
url = six.ensure_str(parsed_url.url)
|
|
|
|
|
2018-09-17 00:27:00 +00:00
|
|
|
conn = None
|
|
|
|
|
|
|
|
# Track whether `conn` needs to be released before
|
|
|
|
# returning/raising/recursing. Update this variable if necessary, and
|
|
|
|
# leave `release_conn` constant throughout the function. That way, if
|
|
|
|
# the function recurses, the original value of `release_conn` will be
|
|
|
|
# passed down into the recursive call, and its value will be respected.
|
|
|
|
#
|
|
|
|
# See issue #651 [1] for details.
|
|
|
|
#
|
2022-01-24 04:07:52 +00:00
|
|
|
# [1] <https://github.com/urllib3/urllib3/issues/651>
|
2018-09-17 00:27:00 +00:00
|
|
|
release_this_conn = release_conn
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
http_tunnel_required = connection_requires_http_tunnel(
|
|
|
|
self.proxy, self.proxy_config, destination_scheme
|
|
|
|
)
|
|
|
|
|
|
|
|
# Merge the proxy headers. Only done when not using HTTP CONNECT. We
|
|
|
|
# have to copy the headers dict so we can safely change it without those
|
|
|
|
# changes being reflected in anyone else's copy.
|
|
|
|
if not http_tunnel_required:
|
2018-09-17 00:27:00 +00:00
|
|
|
headers = headers.copy()
|
|
|
|
headers.update(self.proxy_headers)
|
|
|
|
|
|
|
|
# Must keep the exception bound to a separate variable or else Python 3
|
|
|
|
# complains about UnboundLocalError.
|
|
|
|
err = None
|
|
|
|
|
|
|
|
# Keep track of whether we cleanly exited the except block. This
|
|
|
|
# ensures we do proper cleanup in finally.
|
|
|
|
clean_exit = False
|
|
|
|
|
|
|
|
# Rewind body position, if needed. Record current position
|
|
|
|
# for future rewinds in the event of a redirect/retry.
|
|
|
|
body_pos = set_file_position(body, body_pos)
|
|
|
|
|
|
|
|
try:
|
|
|
|
# Request a connection from the queue.
|
|
|
|
timeout_obj = self._get_timeout(timeout)
|
|
|
|
conn = self._get_conn(timeout=pool_timeout)
|
|
|
|
|
|
|
|
conn.timeout = timeout_obj.connect_timeout
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
is_new_proxy_conn = self.proxy is not None and not getattr(
|
|
|
|
conn, "sock", None
|
|
|
|
)
|
|
|
|
if is_new_proxy_conn and http_tunnel_required:
|
2018-09-17 00:27:00 +00:00
|
|
|
self._prepare_proxy(conn)
|
|
|
|
|
|
|
|
# Make the request on the httplib connection object.
|
2022-01-24 04:07:52 +00:00
|
|
|
httplib_response = self._make_request(
|
|
|
|
conn,
|
|
|
|
method,
|
|
|
|
url,
|
|
|
|
timeout=timeout_obj,
|
|
|
|
body=body,
|
|
|
|
headers=headers,
|
|
|
|
chunked=chunked,
|
|
|
|
)
|
2018-09-17 00:27:00 +00:00
|
|
|
|
|
|
|
# If we're going to release the connection in ``finally:``, then
|
|
|
|
# the response doesn't need to know about the connection. Otherwise
|
|
|
|
# it will also try to release it and we'll have a double-release
|
|
|
|
# mess.
|
|
|
|
response_conn = conn if not release_conn else None
|
|
|
|
|
|
|
|
# Pass method to Response for length checking
|
2022-01-24 04:07:52 +00:00
|
|
|
response_kw["request_method"] = method
|
2018-09-17 00:27:00 +00:00
|
|
|
|
|
|
|
# Import httplib's response into our own wrapper object
|
2022-01-24 04:07:52 +00:00
|
|
|
response = self.ResponseCls.from_httplib(
|
|
|
|
httplib_response,
|
|
|
|
pool=self,
|
|
|
|
connection=response_conn,
|
|
|
|
retries=retries,
|
|
|
|
**response_kw
|
|
|
|
)
|
2018-09-17 00:27:00 +00:00
|
|
|
|
|
|
|
# Everything went great!
|
|
|
|
clean_exit = True
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
except EmptyPoolError:
|
|
|
|
# Didn't get a connection from the pool, no need to clean up
|
|
|
|
clean_exit = True
|
|
|
|
release_this_conn = False
|
|
|
|
raise
|
2018-09-17 00:27:00 +00:00
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
except (
|
|
|
|
TimeoutError,
|
|
|
|
HTTPException,
|
|
|
|
SocketError,
|
|
|
|
ProtocolError,
|
|
|
|
BaseSSLError,
|
|
|
|
SSLError,
|
|
|
|
CertificateError,
|
|
|
|
) as e:
|
2018-09-17 00:27:00 +00:00
|
|
|
# Discard the connection for these exceptions. It will be
|
|
|
|
# replaced during the next _get_conn() call.
|
|
|
|
clean_exit = False
|
2022-01-24 04:07:52 +00:00
|
|
|
|
|
|
|
def _is_ssl_error_message_from_http_proxy(ssl_error):
|
|
|
|
# We're trying to detect the message 'WRONG_VERSION_NUMBER' but
|
|
|
|
# SSLErrors are kinda all over the place when it comes to the message,
|
|
|
|
# so we try to cover our bases here!
|
|
|
|
message = " ".join(re.split("[^a-z]", str(ssl_error).lower()))
|
|
|
|
return (
|
|
|
|
"wrong version number" in message or "unknown protocol" in message
|
|
|
|
)
|
|
|
|
|
|
|
|
# Try to detect a common user error with proxies which is to
|
|
|
|
# set an HTTP proxy to be HTTPS when it should be 'http://'
|
|
|
|
# (ie {'http': 'http://proxy', 'https': 'https://proxy'})
|
|
|
|
# Instead we add a nice error message and point to a URL.
|
|
|
|
if (
|
|
|
|
isinstance(e, BaseSSLError)
|
|
|
|
and self.proxy
|
|
|
|
and _is_ssl_error_message_from_http_proxy(e)
|
2022-11-07 18:06:49 +00:00
|
|
|
and conn.proxy
|
|
|
|
and conn.proxy.scheme == "https"
|
2022-01-24 04:07:52 +00:00
|
|
|
):
|
|
|
|
e = ProxyError(
|
|
|
|
"Your proxy appears to only use HTTP and not HTTPS, "
|
|
|
|
"try changing your proxy URL to be HTTP. See: "
|
|
|
|
"https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html"
|
|
|
|
"#https-proxy-error-http-proxy",
|
|
|
|
SSLError(e),
|
|
|
|
)
|
|
|
|
elif isinstance(e, (BaseSSLError, CertificateError)):
|
2018-09-17 00:27:00 +00:00
|
|
|
e = SSLError(e)
|
|
|
|
elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
|
2022-01-24 04:07:52 +00:00
|
|
|
e = ProxyError("Cannot connect to proxy.", e)
|
2018-09-17 00:27:00 +00:00
|
|
|
elif isinstance(e, (SocketError, HTTPException)):
|
2022-01-24 04:07:52 +00:00
|
|
|
e = ProtocolError("Connection aborted.", e)
|
2018-09-17 00:27:00 +00:00
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
retries = retries.increment(
|
|
|
|
method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2]
|
|
|
|
)
|
2018-09-17 00:27:00 +00:00
|
|
|
retries.sleep()
|
|
|
|
|
|
|
|
# Keep track of the error for the retry warning.
|
|
|
|
err = e
|
|
|
|
|
|
|
|
finally:
|
|
|
|
if not clean_exit:
|
|
|
|
# We hit some kind of exception, handled or otherwise. We need
|
|
|
|
# to throw the connection away unless explicitly told not to.
|
|
|
|
# Close the connection, set the variable to None, and make sure
|
|
|
|
# we put the None back in the pool to avoid leaking it.
|
|
|
|
conn = conn and conn.close()
|
|
|
|
release_this_conn = True
|
|
|
|
|
|
|
|
if release_this_conn:
|
|
|
|
# Put the connection back to be reused. If the connection is
|
|
|
|
# expired then it will be None, which will get replaced with a
|
|
|
|
# fresh connection during _get_conn.
|
|
|
|
self._put_conn(conn)
|
|
|
|
|
|
|
|
if not conn:
|
|
|
|
# Try again
|
2022-01-24 04:07:52 +00:00
|
|
|
log.warning(
|
|
|
|
"Retrying (%r) after connection broken by '%r': %s", retries, err, url
|
|
|
|
)
|
|
|
|
return self.urlopen(
|
|
|
|
method,
|
|
|
|
url,
|
|
|
|
body,
|
|
|
|
headers,
|
|
|
|
retries,
|
|
|
|
redirect,
|
|
|
|
assert_same_host,
|
|
|
|
timeout=timeout,
|
|
|
|
pool_timeout=pool_timeout,
|
|
|
|
release_conn=release_conn,
|
|
|
|
chunked=chunked,
|
|
|
|
body_pos=body_pos,
|
|
|
|
**response_kw
|
|
|
|
)
|
2018-09-17 00:27:00 +00:00
|
|
|
|
|
|
|
# Handle redirect?
|
|
|
|
redirect_location = redirect and response.get_redirect_location()
|
|
|
|
if redirect_location:
|
|
|
|
if response.status == 303:
|
2022-01-24 04:07:52 +00:00
|
|
|
method = "GET"
|
2018-09-17 00:27:00 +00:00
|
|
|
|
|
|
|
try:
|
|
|
|
retries = retries.increment(method, url, response=response, _pool=self)
|
|
|
|
except MaxRetryError:
|
|
|
|
if retries.raise_on_redirect:
|
2022-01-24 04:07:52 +00:00
|
|
|
response.drain_conn()
|
2018-09-17 00:27:00 +00:00
|
|
|
raise
|
|
|
|
return response
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
response.drain_conn()
|
2018-09-17 00:27:00 +00:00
|
|
|
retries.sleep_for_retry(response)
|
|
|
|
log.debug("Redirecting %s -> %s", url, redirect_location)
|
|
|
|
return self.urlopen(
|
2022-01-24 04:07:52 +00:00
|
|
|
method,
|
|
|
|
redirect_location,
|
|
|
|
body,
|
|
|
|
headers,
|
|
|
|
retries=retries,
|
|
|
|
redirect=redirect,
|
2018-09-17 00:27:00 +00:00
|
|
|
assert_same_host=assert_same_host,
|
2022-01-24 04:07:52 +00:00
|
|
|
timeout=timeout,
|
|
|
|
pool_timeout=pool_timeout,
|
|
|
|
release_conn=release_conn,
|
|
|
|
chunked=chunked,
|
|
|
|
body_pos=body_pos,
|
|
|
|
**response_kw
|
|
|
|
)
|
2018-09-17 00:27:00 +00:00
|
|
|
|
|
|
|
# Check if we should retry the HTTP response.
|
2022-01-24 04:07:52 +00:00
|
|
|
has_retry_after = bool(response.getheader("Retry-After"))
|
2018-09-17 00:27:00 +00:00
|
|
|
if retries.is_retry(method, response.status, has_retry_after):
|
|
|
|
try:
|
|
|
|
retries = retries.increment(method, url, response=response, _pool=self)
|
|
|
|
except MaxRetryError:
|
|
|
|
if retries.raise_on_status:
|
2022-01-24 04:07:52 +00:00
|
|
|
response.drain_conn()
|
2018-09-17 00:27:00 +00:00
|
|
|
raise
|
|
|
|
return response
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
response.drain_conn()
|
2018-09-17 00:27:00 +00:00
|
|
|
retries.sleep(response)
|
|
|
|
log.debug("Retry: %s", url)
|
|
|
|
return self.urlopen(
|
2022-01-24 04:07:52 +00:00
|
|
|
method,
|
|
|
|
url,
|
|
|
|
body,
|
|
|
|
headers,
|
|
|
|
retries=retries,
|
|
|
|
redirect=redirect,
|
2018-09-17 00:27:00 +00:00
|
|
|
assert_same_host=assert_same_host,
|
2022-01-24 04:07:52 +00:00
|
|
|
timeout=timeout,
|
|
|
|
pool_timeout=pool_timeout,
|
2018-09-17 00:27:00 +00:00
|
|
|
release_conn=release_conn,
|
2022-01-24 04:07:52 +00:00
|
|
|
chunked=chunked,
|
|
|
|
body_pos=body_pos,
|
|
|
|
**response_kw
|
|
|
|
)
|
2018-09-17 00:27:00 +00:00
|
|
|
|
|
|
|
return response
|
|
|
|
|
|
|
|
|
|
|
|
class HTTPSConnectionPool(HTTPConnectionPool):
|
|
|
|
"""
|
|
|
|
Same as :class:`.HTTPConnectionPool`, but HTTPS.
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
:class:`.HTTPSConnection` uses one of ``assert_fingerprint``,
|
2018-09-17 00:27:00 +00:00
|
|
|
``assert_hostname`` and ``host`` in this order to verify connections.
|
|
|
|
If ``assert_hostname`` is False, no verification is done.
|
|
|
|
|
|
|
|
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
|
2019-09-18 15:30:46 +00:00
|
|
|
``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl`
|
|
|
|
is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
|
2018-09-17 00:27:00 +00:00
|
|
|
the connection socket into an SSL socket.
|
|
|
|
"""
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
scheme = "https"
|
2018-09-17 00:27:00 +00:00
|
|
|
ConnectionCls = HTTPSConnection
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
host,
|
|
|
|
port=None,
|
|
|
|
strict=False,
|
|
|
|
timeout=Timeout.DEFAULT_TIMEOUT,
|
|
|
|
maxsize=1,
|
|
|
|
block=False,
|
|
|
|
headers=None,
|
|
|
|
retries=None,
|
|
|
|
_proxy=None,
|
|
|
|
_proxy_headers=None,
|
|
|
|
key_file=None,
|
|
|
|
cert_file=None,
|
|
|
|
cert_reqs=None,
|
|
|
|
key_password=None,
|
|
|
|
ca_certs=None,
|
|
|
|
ssl_version=None,
|
|
|
|
assert_hostname=None,
|
|
|
|
assert_fingerprint=None,
|
|
|
|
ca_cert_dir=None,
|
|
|
|
**conn_kw
|
|
|
|
):
|
|
|
|
|
|
|
|
HTTPConnectionPool.__init__(
|
|
|
|
self,
|
|
|
|
host,
|
|
|
|
port,
|
|
|
|
strict,
|
|
|
|
timeout,
|
|
|
|
maxsize,
|
|
|
|
block,
|
|
|
|
headers,
|
|
|
|
retries,
|
|
|
|
_proxy,
|
|
|
|
_proxy_headers,
|
|
|
|
**conn_kw
|
|
|
|
)
|
2018-09-17 00:27:00 +00:00
|
|
|
|
|
|
|
self.key_file = key_file
|
|
|
|
self.cert_file = cert_file
|
|
|
|
self.cert_reqs = cert_reqs
|
2019-09-18 15:30:46 +00:00
|
|
|
self.key_password = key_password
|
2018-09-17 00:27:00 +00:00
|
|
|
self.ca_certs = ca_certs
|
|
|
|
self.ca_cert_dir = ca_cert_dir
|
|
|
|
self.ssl_version = ssl_version
|
|
|
|
self.assert_hostname = assert_hostname
|
|
|
|
self.assert_fingerprint = assert_fingerprint
|
|
|
|
|
|
|
|
def _prepare_conn(self, conn):
|
|
|
|
"""
|
|
|
|
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
|
|
|
|
and establish the tunnel if proxy is used.
|
|
|
|
"""
|
|
|
|
|
|
|
|
if isinstance(conn, VerifiedHTTPSConnection):
|
2022-01-24 04:07:52 +00:00
|
|
|
conn.set_cert(
|
|
|
|
key_file=self.key_file,
|
|
|
|
key_password=self.key_password,
|
|
|
|
cert_file=self.cert_file,
|
|
|
|
cert_reqs=self.cert_reqs,
|
|
|
|
ca_certs=self.ca_certs,
|
|
|
|
ca_cert_dir=self.ca_cert_dir,
|
|
|
|
assert_hostname=self.assert_hostname,
|
|
|
|
assert_fingerprint=self.assert_fingerprint,
|
|
|
|
)
|
2018-09-17 00:27:00 +00:00
|
|
|
conn.ssl_version = self.ssl_version
|
|
|
|
return conn
|
|
|
|
|
|
|
|
def _prepare_proxy(self, conn):
|
|
|
|
"""
|
2022-01-24 04:07:52 +00:00
|
|
|
Establishes a tunnel connection through HTTP CONNECT.
|
|
|
|
|
|
|
|
Tunnel connection is established early because otherwise httplib would
|
|
|
|
improperly set Host: header to proxy's IP:port.
|
2018-09-17 00:27:00 +00:00
|
|
|
"""
|
2022-01-24 04:07:52 +00:00
|
|
|
|
2018-10-31 16:08:29 +00:00
|
|
|
conn.set_tunnel(self._proxy_host, self.port, self.proxy_headers)
|
2022-01-24 04:07:52 +00:00
|
|
|
|
|
|
|
if self.proxy.scheme == "https":
|
|
|
|
conn.tls_in_tls_required = True
|
|
|
|
|
2018-09-17 00:27:00 +00:00
|
|
|
conn.connect()
|
|
|
|
|
|
|
|
def _new_conn(self):
|
|
|
|
"""
|
2022-01-24 04:07:52 +00:00
|
|
|
Return a fresh :class:`http.client.HTTPSConnection`.
|
2018-09-17 00:27:00 +00:00
|
|
|
"""
|
|
|
|
self.num_connections += 1
|
2022-01-24 04:07:52 +00:00
|
|
|
log.debug(
|
|
|
|
"Starting new HTTPS connection (%d): %s:%s",
|
|
|
|
self.num_connections,
|
|
|
|
self.host,
|
|
|
|
self.port or "443",
|
|
|
|
)
|
2018-09-17 00:27:00 +00:00
|
|
|
|
|
|
|
if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
|
2022-01-24 04:07:52 +00:00
|
|
|
raise SSLError(
|
|
|
|
"Can't connect to HTTPS URL because the SSL module is not available."
|
|
|
|
)
|
2018-09-17 00:27:00 +00:00
|
|
|
|
|
|
|
actual_host = self.host
|
|
|
|
actual_port = self.port
|
|
|
|
if self.proxy is not None:
|
|
|
|
actual_host = self.proxy.host
|
|
|
|
actual_port = self.proxy.port
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
conn = self.ConnectionCls(
|
|
|
|
host=actual_host,
|
|
|
|
port=actual_port,
|
|
|
|
timeout=self.timeout.connect_timeout,
|
|
|
|
strict=self.strict,
|
|
|
|
cert_file=self.cert_file,
|
|
|
|
key_file=self.key_file,
|
|
|
|
key_password=self.key_password,
|
|
|
|
**self.conn_kw
|
|
|
|
)
|
2018-09-17 00:27:00 +00:00
|
|
|
|
|
|
|
return self._prepare_conn(conn)
|
|
|
|
|
|
|
|
def _validate_conn(self, conn):
|
|
|
|
"""
|
|
|
|
Called right before a request is made, after the socket is created.
|
|
|
|
"""
|
|
|
|
super(HTTPSConnectionPool, self)._validate_conn(conn)
|
|
|
|
|
|
|
|
# Force connect early to allow us to validate the connection.
|
2022-01-24 04:07:52 +00:00
|
|
|
if not getattr(conn, "sock", None): # AppEngine might not have `.sock`
|
2018-09-17 00:27:00 +00:00
|
|
|
conn.connect()
|
|
|
|
|
|
|
|
if not conn.is_verified:
|
2022-01-24 04:07:52 +00:00
|
|
|
warnings.warn(
|
|
|
|
(
|
|
|
|
"Unverified HTTPS request is being made to host '%s'. "
|
|
|
|
"Adding certificate verification is strongly advised. See: "
|
|
|
|
"https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html"
|
|
|
|
"#ssl-warnings" % conn.host
|
|
|
|
),
|
|
|
|
InsecureRequestWarning,
|
|
|
|
)
|
|
|
|
|
|
|
|
if getattr(conn, "proxy_is_verified", None) is False:
|
|
|
|
warnings.warn(
|
|
|
|
(
|
|
|
|
"Unverified HTTPS connection done to an HTTPS proxy. "
|
|
|
|
"Adding certificate verification is strongly advised. See: "
|
|
|
|
"https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html"
|
|
|
|
"#ssl-warnings"
|
|
|
|
),
|
|
|
|
InsecureRequestWarning,
|
|
|
|
)
|
2018-09-17 00:27:00 +00:00
|
|
|
|
|
|
|
|
|
|
|
def connection_from_url(url, **kw):
|
|
|
|
"""
|
|
|
|
Given a url, return an :class:`.ConnectionPool` instance of its host.
|
|
|
|
|
|
|
|
This is a shortcut for not having to parse out the scheme, host, and port
|
|
|
|
of the url before creating an :class:`.ConnectionPool` instance.
|
|
|
|
|
|
|
|
:param url:
|
|
|
|
Absolute URL string that must include the scheme. Port is optional.
|
|
|
|
|
|
|
|
:param \\**kw:
|
|
|
|
Passes additional parameters to the constructor of the appropriate
|
|
|
|
:class:`.ConnectionPool`. Useful for specifying things like
|
|
|
|
timeout, maxsize, headers, etc.
|
|
|
|
|
|
|
|
Example::
|
|
|
|
|
|
|
|
>>> conn = connection_from_url('http://google.com/')
|
|
|
|
>>> r = conn.request('GET', '/')
|
|
|
|
"""
|
|
|
|
scheme, host, port = get_host(url)
|
|
|
|
port = port or port_by_scheme.get(scheme, 80)
|
2022-01-24 04:07:52 +00:00
|
|
|
if scheme == "https":
|
2018-09-17 00:27:00 +00:00
|
|
|
return HTTPSConnectionPool(host, port=port, **kw)
|
|
|
|
else:
|
|
|
|
return HTTPConnectionPool(host, port=port, **kw)
|
|
|
|
|
|
|
|
|
2019-09-18 15:30:46 +00:00
|
|
|
def _normalize_host(host, scheme):
|
2018-09-17 00:27:00 +00:00
|
|
|
"""
|
2019-09-18 15:30:46 +00:00
|
|
|
Normalize hosts for comparisons and use with sockets.
|
2018-09-17 00:27:00 +00:00
|
|
|
"""
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
host = normalize_host(host, scheme)
|
|
|
|
|
2018-09-17 00:27:00 +00:00
|
|
|
# httplib doesn't like it when we include brackets in IPv6 addresses
|
|
|
|
# Specifically, if we include brackets but also pass the port then
|
|
|
|
# httplib crazily doubles up the square brackets on the Host header.
|
|
|
|
# Instead, we need to make sure we never pass ``None`` as the port.
|
|
|
|
# However, for backward compatibility reasons we can't actually
|
|
|
|
# *assert* that. See http://bugs.python.org/issue28539
|
2022-01-24 04:07:52 +00:00
|
|
|
if host.startswith("[") and host.endswith("]"):
|
|
|
|
host = host[1:-1]
|
2018-09-17 00:27:00 +00:00
|
|
|
return host
|