mirror of
https://github.com/ansible-collections/community.docker.git
synced 2025-12-15 11:32:05 +00:00
* Move licenses to LICENSES/, use SPDX-License-Identifier, mention all licenses in galaxy.yml. * ignore.txt lines cannot be empty or contain only a comment. * Cleanup. * This particular __init__.py seems to be crucial. * Try extra newline. * Markdown comments are a real mess. I hope this won't break Galaxy... * More licenses. * Add sanity test. * Skip some files, lint. * Make sure there is a copyright line everywhere. * Also check for copyright line in sanity tests. * Remove colon after 'Copyright'. * Normalize lint script. * Avoid colon after 'Copyright' in lint script. * Improve license checker. * Update README.md Co-authored-by: Maxwell G <9920591+gotmax23@users.noreply.github.com> * Remove superfluous space. * Referencing target instead of symlink Co-authored-by: Maxwell G <9920591+gotmax23@users.noreply.github.com>
120 lines
4.0 KiB
Python
120 lines
4.0 KiB
Python
# -*- coding: utf-8 -*-
|
|
# This code is part of the Ansible collection community.docker, but is an independent component.
|
|
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
|
#
|
|
# Copyright (c) 2016-2022 Docker, Inc.
|
|
#
|
|
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
|
|
from __future__ import (absolute_import, division, print_function)
|
|
__metaclass__ = type
|
|
|
|
from ansible.module_utils.six import PY3
|
|
from ansible.module_utils.six.moves.queue import Empty
|
|
|
|
from .. import constants
|
|
from .._import_helper import HTTPAdapter, urllib3
|
|
|
|
from .basehttpadapter import BaseHTTPAdapter
|
|
from .npipesocket import NpipeSocket
|
|
|
|
if PY3:
|
|
import http.client as httplib
|
|
else:
|
|
import httplib
|
|
|
|
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
|
|
|
|
|
|
class NpipeHTTPConnection(httplib.HTTPConnection, object):
|
|
def __init__(self, npipe_path, timeout=60):
|
|
super(NpipeHTTPConnection, self).__init__(
|
|
'localhost', timeout=timeout
|
|
)
|
|
self.npipe_path = npipe_path
|
|
self.timeout = timeout
|
|
|
|
def connect(self):
|
|
sock = NpipeSocket()
|
|
sock.settimeout(self.timeout)
|
|
sock.connect(self.npipe_path)
|
|
self.sock = sock
|
|
|
|
|
|
class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
|
|
def __init__(self, npipe_path, timeout=60, maxsize=10):
|
|
super(NpipeHTTPConnectionPool, self).__init__(
|
|
'localhost', timeout=timeout, maxsize=maxsize
|
|
)
|
|
self.npipe_path = npipe_path
|
|
self.timeout = timeout
|
|
|
|
def _new_conn(self):
|
|
return NpipeHTTPConnection(
|
|
self.npipe_path, self.timeout
|
|
)
|
|
|
|
# When re-using connections, urllib3 tries to call select() on our
|
|
# NpipeSocket instance, causing a crash. To circumvent this, we override
|
|
# _get_conn, where that check happens.
|
|
def _get_conn(self, timeout):
|
|
conn = None
|
|
try:
|
|
conn = self.pool.get(block=self.block, timeout=timeout)
|
|
|
|
except AttributeError: # self.pool is None
|
|
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.")
|
|
|
|
except Empty:
|
|
if self.block:
|
|
raise urllib3.exceptions.EmptyPoolError(
|
|
self,
|
|
"Pool reached maximum size and no more "
|
|
"connections are allowed."
|
|
)
|
|
pass # Oh well, we'll create a new connection then
|
|
|
|
return conn or self._new_conn()
|
|
|
|
|
|
class NpipeHTTPAdapter(BaseHTTPAdapter):
|
|
|
|
__attrs__ = HTTPAdapter.__attrs__ + ['npipe_path',
|
|
'pools',
|
|
'timeout',
|
|
'max_pool_size']
|
|
|
|
def __init__(self, base_url, timeout=60,
|
|
pool_connections=constants.DEFAULT_NUM_POOLS,
|
|
max_pool_size=constants.DEFAULT_MAX_POOL_SIZE):
|
|
self.npipe_path = base_url.replace('npipe://', '')
|
|
self.timeout = timeout
|
|
self.max_pool_size = max_pool_size
|
|
self.pools = RecentlyUsedContainer(
|
|
pool_connections, dispose_func=lambda p: p.close()
|
|
)
|
|
super(NpipeHTTPAdapter, self).__init__()
|
|
|
|
def get_connection(self, url, proxies=None):
|
|
with self.pools.lock:
|
|
pool = self.pools.get(url)
|
|
if pool:
|
|
return pool
|
|
|
|
pool = NpipeHTTPConnectionPool(
|
|
self.npipe_path, self.timeout,
|
|
maxsize=self.max_pool_size
|
|
)
|
|
self.pools[url] = pool
|
|
|
|
return pool
|
|
|
|
def request_url(self, request, proxies):
|
|
# The select_proxy utility in requests errors out when the provided URL
|
|
# doesn't have a hostname, like is the case when using a UNIX socket.
|
|
# Since proxies are an irrelevant notion in the case of UNIX sockets
|
|
# anyway, we simply return the path URL directly.
|
|
# See also: https://github.com/docker/docker-sdk-python/issues/811
|
|
return request.path_url
|