mirror of
https://github.com/ansible-collections/community.docker.git
synced 2025-12-15 11:32:05 +00:00
* Bump version to 5.0.0-a1. * Drop support for ansible-core 2.15 and 2.16. * Remove Python 2 and early Python 3 compatibility.
113 lines
3.9 KiB
Python
113 lines
3.9 KiB
Python
# -*- coding: utf-8 -*-
|
|
# This code is part of the Ansible collection community.docker, but is an independent component.
|
|
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
|
#
|
|
# Copyright (c) 2016-2022 Docker, Inc.
|
|
#
|
|
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
|
|
from __future__ import annotations
|
|
|
|
from queue import Empty
|
|
|
|
from .. import constants
|
|
from .._import_helper import HTTPAdapter, urllib3, urllib3_connection
|
|
|
|
from .basehttpadapter import BaseHTTPAdapter
|
|
from .npipesocket import NpipeSocket
|
|
|
|
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
|
|
|
|
|
|
class NpipeHTTPConnection(urllib3_connection.HTTPConnection, object):
|
|
def __init__(self, npipe_path, timeout=60):
|
|
super(NpipeHTTPConnection, self).__init__(
|
|
'localhost', timeout=timeout
|
|
)
|
|
self.npipe_path = npipe_path
|
|
self.timeout = timeout
|
|
|
|
def connect(self):
|
|
sock = NpipeSocket()
|
|
sock.settimeout(self.timeout)
|
|
sock.connect(self.npipe_path)
|
|
self.sock = sock
|
|
|
|
|
|
class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
|
|
def __init__(self, npipe_path, timeout=60, maxsize=10):
|
|
super(NpipeHTTPConnectionPool, self).__init__(
|
|
'localhost', timeout=timeout, maxsize=maxsize
|
|
)
|
|
self.npipe_path = npipe_path
|
|
self.timeout = timeout
|
|
|
|
def _new_conn(self):
|
|
return NpipeHTTPConnection(
|
|
self.npipe_path, self.timeout
|
|
)
|
|
|
|
# When re-using connections, urllib3 tries to call select() on our
|
|
# NpipeSocket instance, causing a crash. To circumvent this, we override
|
|
# _get_conn, where that check happens.
|
|
def _get_conn(self, timeout):
|
|
conn = None
|
|
try:
|
|
conn = self.pool.get(block=self.block, timeout=timeout)
|
|
|
|
except AttributeError: # self.pool is None
|
|
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.")
|
|
|
|
except Empty:
|
|
if self.block:
|
|
raise urllib3.exceptions.EmptyPoolError(
|
|
self,
|
|
"Pool reached maximum size and no more "
|
|
"connections are allowed."
|
|
)
|
|
pass # Oh well, we'll create a new connection then
|
|
|
|
return conn or self._new_conn()
|
|
|
|
|
|
class NpipeHTTPAdapter(BaseHTTPAdapter):
|
|
|
|
__attrs__ = HTTPAdapter.__attrs__ + ['npipe_path',
|
|
'pools',
|
|
'timeout',
|
|
'max_pool_size']
|
|
|
|
def __init__(self, base_url, timeout=60,
|
|
pool_connections=constants.DEFAULT_NUM_POOLS,
|
|
max_pool_size=constants.DEFAULT_MAX_POOL_SIZE):
|
|
self.npipe_path = base_url.replace('npipe://', '')
|
|
self.timeout = timeout
|
|
self.max_pool_size = max_pool_size
|
|
self.pools = RecentlyUsedContainer(
|
|
pool_connections, dispose_func=lambda p: p.close()
|
|
)
|
|
super(NpipeHTTPAdapter, self).__init__()
|
|
|
|
def get_connection(self, url, proxies=None):
|
|
with self.pools.lock:
|
|
pool = self.pools.get(url)
|
|
if pool:
|
|
return pool
|
|
|
|
pool = NpipeHTTPConnectionPool(
|
|
self.npipe_path, self.timeout,
|
|
maxsize=self.max_pool_size
|
|
)
|
|
self.pools[url] = pool
|
|
|
|
return pool
|
|
|
|
def request_url(self, request, proxies):
|
|
# The select_proxy utility in requests errors out when the provided URL
|
|
# does not have a hostname, like is the case when using a UNIX socket.
|
|
# Since proxies are an irrelevant notion in the case of UNIX sockets
|
|
# anyway, we simply return the path URL directly.
|
|
# See also: https://github.com/docker/docker-sdk-python/issues/811
|
|
return request.path_url
|