mirror of
https://github.com/ansible-collections/community.docker.git
synced 2025-12-16 20:08:41 +00:00
Compare commits
No commits in common. "main" and "5.0.0-a1" have entirely different histories.
@ -30,6 +30,7 @@ schedules:
|
||||
branches:
|
||||
include:
|
||||
- stable-4
|
||||
- stable-3
|
||||
|
||||
variables:
|
||||
- name: checkoutPath
|
||||
@ -95,6 +96,17 @@ stages:
|
||||
test: '2.18/sanity/1'
|
||||
- name: Units
|
||||
test: '2.18/units/1'
|
||||
- stage: Ansible_2_17
|
||||
displayName: Sanity & Units 2.17
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
targets:
|
||||
- name: Sanity
|
||||
test: '2.17/sanity/1'
|
||||
- name: Units
|
||||
test: '2.17/units/1'
|
||||
|
||||
### Docker
|
||||
- stage: Docker_devel
|
||||
@ -163,6 +175,23 @@ stages:
|
||||
groups:
|
||||
- 4
|
||||
- 5
|
||||
- stage: Docker_2_17
|
||||
displayName: Docker 2.17
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.17/linux/{0}
|
||||
targets:
|
||||
- name: Fedora 39
|
||||
test: fedora39
|
||||
- name: Ubuntu 20.04
|
||||
test: ubuntu2004
|
||||
- name: Alpine 3.19
|
||||
test: alpine319
|
||||
groups:
|
||||
- 4
|
||||
- 5
|
||||
|
||||
### Community Docker
|
||||
- stage: Docker_community_devel
|
||||
@ -257,6 +286,22 @@ stages:
|
||||
- 3
|
||||
- 4
|
||||
- 5
|
||||
- stage: Remote_2_17
|
||||
displayName: Remote 2.17
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.17/{0}
|
||||
targets:
|
||||
- name: RHEL 9.3
|
||||
test: rhel/9.3
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
- 5
|
||||
|
||||
## Finally
|
||||
|
||||
@ -267,14 +312,17 @@ stages:
|
||||
- Ansible_2_20
|
||||
- Ansible_2_19
|
||||
- Ansible_2_18
|
||||
- Ansible_2_17
|
||||
- Remote_devel
|
||||
- Remote_2_20
|
||||
- Remote_2_19
|
||||
- Remote_2_18
|
||||
- Remote_2_17
|
||||
- Docker_devel
|
||||
- Docker_2_20
|
||||
- Docker_2_19
|
||||
- Docker_2_18
|
||||
- Docker_2_17
|
||||
- Docker_community_devel
|
||||
jobs:
|
||||
- template: templates/coverage.yml
|
||||
|
||||
2
.github/workflows/docker-images.yml
vendored
2
.github/workflows/docker-images.yml
vendored
@ -45,7 +45,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
|
||||
2
.github/workflows/nox.yml
vendored
2
.github/workflows/nox.yml
vendored
@ -30,6 +30,6 @@ jobs:
|
||||
upload-codecov-pr: false
|
||||
upload-codecov-push: false
|
||||
upload-codecov-schedule: true
|
||||
max-ansible-core: "2.17"
|
||||
max-ansible-core: "2.16"
|
||||
secrets:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
@ -22,6 +22,10 @@ follow_untyped_imports = True
|
||||
# Docker SDK for Python has partial typing information
|
||||
follow_untyped_imports = True
|
||||
|
||||
[mypy-ansible_collections.community.internal_test_tools.*]
|
||||
# community.internal_test_tools has no typing information
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-jsondiff.*]
|
||||
# jsondiff has no typing information
|
||||
ignore_missing_imports = True
|
||||
|
||||
@ -388,8 +388,6 @@ disable=raw-checker-failed,
|
||||
unused-argument,
|
||||
# Cannot remove yet due to inadequacy of rules
|
||||
inconsistent-return-statements, # doesn't notice that fail_json() does not return
|
||||
# Buggy impementation in pylint:
|
||||
relative-beyond-top-level, # TODO
|
||||
|
||||
# Enable the message, report, category or checker with the given id(s). You can
|
||||
# either give multiple identifier separated by comma (,) or put this option
|
||||
|
||||
725
CHANGELOG.md
725
CHANGELOG.md
File diff suppressed because it is too large
Load Diff
@ -4,86 +4,20 @@ Docker Community Collection Release Notes
|
||||
|
||||
.. contents:: Topics
|
||||
|
||||
v5.0.4
|
||||
======
|
||||
v5.0.0-a1
|
||||
=========
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Bugfix release.
|
||||
First alpha release of community.docker 5.0.0.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- CLI-based modules - when parsing JSON output fails, also provide standard error output. Also provide information on the command and its result in machine-readable way (https://github.com/ansible-collections/community.docker/issues/1216, https://github.com/ansible-collections/community.docker/pull/1221).
|
||||
- docker_compose_v2, docker_compose_v2_pull - adjust parsing from image pull events to changes in Docker Compose 5.0.0 (https://github.com/ansible-collections/community.docker/pull/1219).
|
||||
|
||||
v5.0.3
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Bugfix release.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- docker_container - when the same port is mapped more than once for the same protocol without specifying an interface, a bug caused an invalid value to be passed for the interface (https://github.com/ansible-collections/community.docker/issues/1213, https://github.com/ansible-collections/community.docker/pull/1214).
|
||||
|
||||
v5.0.2
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Bugfix release for Docker 29.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Docker CLI based modules - work around bug in Docker 29.0.0 that caused a breaking change in ``docker version --format json`` output (https://github.com/ansible-collections/community.docker/issues/1185, https://github.com/ansible-collections/community.docker/pull/1187).
|
||||
- docker_container - fix ``pull`` idempotency with Docker 29.0.0 (https://github.com/ansible-collections/community.docker/pull/1192).
|
||||
- docker_container - fix handling of exposed port ranges. So far, the module used an undocumented feature of Docker that was removed from Docker 29.0.0, that allowed to pass the range to the deamon and let handle it. Now the module explodes ranges into a list of all contained ports, same as the Docker CLI does. For backwards compatibility with Docker < 29.0.0, it also explodes ranges returned by the API for existing containers so that comparison should only indicate a difference if the ranges actually change (https://github.com/ansible-collections/community.docker/pull/1192).
|
||||
- docker_container - fix idempotency for IPv6 addresses with Docker 29.0.0 (https://github.com/ansible-collections/community.docker/pull/1192).
|
||||
- docker_image - fix ``source=pull`` idempotency with Docker 29.0.0 (https://github.com/ansible-collections/community.docker/pull/1192).
|
||||
- docker_image, docker_image_push - adjust image push detection to Docker 29 (https://github.com/ansible-collections/community.docker/pull/1199).
|
||||
- docker_image_pull - fix idempotency with Docker 29.0.0 (https://github.com/ansible-collections/community.docker/pull/1192).
|
||||
- docker_network - fix idempotency for IPv6 addresses and networks with Docker 29.0.0 (https://github.com/ansible-collections/community.docker/pull/1201).
|
||||
|
||||
Known Issues
|
||||
------------
|
||||
|
||||
- docker_image, docker_image_export - idempotency for archiving images depends on whether the image IDs used by the image storage backend correspond to the IDs used in the tarball's ``manifest.json`` files. The new default backend in Docker 29 apparently uses image IDs that no longer correspond, whence idempotency no longer works (https://github.com/ansible-collections/community.docker/pull/1199).
|
||||
|
||||
v5.0.1
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Bugfix release.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- docker_compose_v2_run - when ``detach=true``, ensure that the returned container ID is not a bytes string (https://github.com/ansible-collections/community.docker/pull/1183).
|
||||
- docker_image - fix 'Cannot locate specified Dockerfile' error (https://github.com/ansible-collections/community.docker/pull/1184).
|
||||
|
||||
v5.0.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
New major release.
|
||||
|
||||
The main changes are that the collection dropped support for some ansible-core
|
||||
versions that are End of Life, and thus dropped support for Python 2.7.
|
||||
The main changes are that the collection dropped support for some ansible-core versions that are End of Life, and thus dropped support for Python 2.7.
|
||||
This allowed to modernize the Python code, in particular with type hints.
|
||||
Also all module and plugin utils are now private to the collection, which
|
||||
makes it easier to refactor code. All these changes should have no effect on
|
||||
end-users.
|
||||
Also all module and plugin utils are now private to the collection, which makes it easier to refactor code.
|
||||
All these changes should have no effect on end-users.
|
||||
|
||||
The current plan is to release 5.0.0 in time for Ansible 13's feature freeze, so in roughly one week.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
@ -19,8 +19,6 @@ stable_branches = [ "stable-*" ]
|
||||
run_isort = true
|
||||
isort_config = ".isort.cfg"
|
||||
run_black = true
|
||||
run_ruff_check = true
|
||||
ruff_check_config = "ruff.toml"
|
||||
run_flake8 = true
|
||||
flake8_config = ".flake8"
|
||||
run_pylint = true
|
||||
|
||||
@ -2250,98 +2250,3 @@ releases:
|
||||
- 5.0.0-a1.yml
|
||||
- 5.0.0.yml
|
||||
release_date: '2025-10-25'
|
||||
5.0.0:
|
||||
changes:
|
||||
release_summary: 'New major release.
|
||||
|
||||
|
||||
The main changes are that the collection dropped support for some ansible-core
|
||||
|
||||
versions that are End of Life, and thus dropped support for Python 2.7.
|
||||
|
||||
This allowed to modernize the Python code, in particular with type hints.
|
||||
|
||||
Also all module and plugin utils are now private to the collection, which
|
||||
|
||||
makes it easier to refactor code. All these changes should have no effect
|
||||
on
|
||||
|
||||
end-users.'
|
||||
fragments:
|
||||
- 5.0.0.yml
|
||||
release_date: '2025-11-02'
|
||||
5.0.1:
|
||||
changes:
|
||||
bugfixes:
|
||||
- docker_compose_v2_run - when ``detach=true``, ensure that the returned container
|
||||
ID is not a bytes string (https://github.com/ansible-collections/community.docker/pull/1183).
|
||||
- docker_image - fix 'Cannot locate specified Dockerfile' error (https://github.com/ansible-collections/community.docker/pull/1184).
|
||||
release_summary: Bugfix release.
|
||||
fragments:
|
||||
- 1185-fix.yml
|
||||
- 5.0.1.yml
|
||||
- typing.yml
|
||||
release_date: '2025-11-09'
|
||||
5.0.2:
|
||||
changes:
|
||||
bugfixes:
|
||||
- Docker CLI based modules - work around bug in Docker 29.0.0 that caused
|
||||
a breaking change in ``docker version --format json`` output (https://github.com/ansible-collections/community.docker/issues/1185,
|
||||
https://github.com/ansible-collections/community.docker/pull/1187).
|
||||
- docker_container - fix ``pull`` idempotency with Docker 29.0.0 (https://github.com/ansible-collections/community.docker/pull/1192).
|
||||
- docker_container - fix handling of exposed port ranges. So far, the module
|
||||
used an undocumented feature of Docker that was removed from Docker 29.0.0,
|
||||
that allowed to pass the range to the deamon and let handle it. Now the
|
||||
module explodes ranges into a list of all contained ports, same as the Docker
|
||||
CLI does. For backwards compatibility with Docker < 29.0.0, it also explodes
|
||||
ranges returned by the API for existing containers so that comparison should
|
||||
only indicate a difference if the ranges actually change (https://github.com/ansible-collections/community.docker/pull/1192).
|
||||
- docker_container - fix idempotency for IPv6 addresses with Docker 29.0.0
|
||||
(https://github.com/ansible-collections/community.docker/pull/1192).
|
||||
- docker_image - fix ``source=pull`` idempotency with Docker 29.0.0 (https://github.com/ansible-collections/community.docker/pull/1192).
|
||||
- docker_image, docker_image_push - adjust image push detection to Docker
|
||||
29 (https://github.com/ansible-collections/community.docker/pull/1199).
|
||||
- docker_image_pull - fix idempotency with Docker 29.0.0 (https://github.com/ansible-collections/community.docker/pull/1192).
|
||||
- docker_network - fix idempotency for IPv6 addresses and networks with Docker
|
||||
29.0.0 (https://github.com/ansible-collections/community.docker/pull/1201).
|
||||
known_issues:
|
||||
- docker_image, docker_image_export - idempotency for archiving images depends
|
||||
on whether the image IDs used by the image storage backend correspond to
|
||||
the IDs used in the tarball's ``manifest.json`` files. The new default backend
|
||||
in Docker 29 apparently uses image IDs that no longer correspond, whence
|
||||
idempotency no longer works (https://github.com/ansible-collections/community.docker/pull/1199).
|
||||
release_summary: Bugfix release for Docker 29.
|
||||
fragments:
|
||||
- 1187-docker.yml
|
||||
- 1192-docker_container.yml
|
||||
- 1199-docker_image-push.yml
|
||||
- 1201-docker_network.yml
|
||||
- 5.0.2.yml
|
||||
release_date: '2025-11-16'
|
||||
5.0.3:
|
||||
changes:
|
||||
bugfixes:
|
||||
- docker_container - when the same port is mapped more than once for the same
|
||||
protocol without specifying an interface, a bug caused an invalid value
|
||||
to be passed for the interface (https://github.com/ansible-collections/community.docker/issues/1213,
|
||||
https://github.com/ansible-collections/community.docker/pull/1214).
|
||||
release_summary: Bugfix release.
|
||||
fragments:
|
||||
- 1214-docker_container-ports.yml
|
||||
- 5.0.3.yml
|
||||
release_date: '2025-11-29'
|
||||
5.0.4:
|
||||
changes:
|
||||
bugfixes:
|
||||
- CLI-based modules - when parsing JSON output fails, also provide standard
|
||||
error output. Also provide information on the command and its result in
|
||||
machine-readable way (https://github.com/ansible-collections/community.docker/issues/1216,
|
||||
https://github.com/ansible-collections/community.docker/pull/1221).
|
||||
- docker_compose_v2, docker_compose_v2_pull - adjust parsing from image pull
|
||||
events to changes in Docker Compose 5.0.0 (https://github.com/ansible-collections/community.docker/pull/1219).
|
||||
release_summary: Bugfix release.
|
||||
fragments:
|
||||
- 1219-compose-v2-pull.yml
|
||||
- 1221-cli-json-errors.yml
|
||||
- 5.0.4.yml
|
||||
release_date: '2025-12-06'
|
||||
|
||||
@ -7,7 +7,7 @@
|
||||
|
||||
namespace: community
|
||||
name: docker
|
||||
version: 5.1.0
|
||||
version: 5.0.0-a1
|
||||
readme: README.md
|
||||
authors:
|
||||
- Ansible Docker Working Group
|
||||
|
||||
@ -228,12 +228,12 @@ class Connection(ConnectionBase):
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
) as p:
|
||||
out_b, err_b = p.communicate()
|
||||
out = to_text(out_b, errors="surrogate_or_strict")
|
||||
out, err = p.communicate()
|
||||
out = to_text(out, errors="surrogate_or_strict")
|
||||
|
||||
if p.returncode != 0:
|
||||
display.warning(
|
||||
f"unable to retrieve default user from docker container: {out} {to_text(err_b)}"
|
||||
f"unable to retrieve default user from docker container: {out} {to_text(err)}"
|
||||
)
|
||||
self._container_user_cache[container] = None
|
||||
return None
|
||||
@ -266,9 +266,7 @@ class Connection(ConnectionBase):
|
||||
if not isinstance(val, str):
|
||||
raise AnsibleConnectionFailure(
|
||||
f"Non-string {what.lower()} found for extra_env option. Ambiguous env options must be "
|
||||
"wrapped in quotes to avoid them being interpreted when directly specified "
|
||||
"in YAML, or explicitly converted to strings when the option is templated. "
|
||||
f"{what}: {val!r}"
|
||||
f"wrapped in quotes to avoid them being interpreted. {what}: {val!r}"
|
||||
)
|
||||
local_cmd += [
|
||||
b"-e",
|
||||
|
||||
@ -282,11 +282,11 @@ class Connection(ConnectionBase):
|
||||
if not isinstance(val, str):
|
||||
raise AnsibleConnectionFailure(
|
||||
f"Non-string {what.lower()} found for extra_env option. Ambiguous env options must be "
|
||||
"wrapped in quotes to avoid them being interpreted when directly specified "
|
||||
"in YAML, or explicitly converted to strings when the option is templated. "
|
||||
f"{what}: {val!r}"
|
||||
f"wrapped in quotes to avoid them being interpreted. {what}: {val!r}"
|
||||
)
|
||||
data["Env"].append(f"{k}={v}")
|
||||
kk = to_text(k, errors="surrogate_or_strict")
|
||||
vv = to_text(v, errors="surrogate_or_strict")
|
||||
data["Env"].append(f"{kk}={vv}")
|
||||
|
||||
if self.get_option("working_dir") is not None:
|
||||
data["WorkingDir"] = self.get_option("working_dir")
|
||||
|
||||
@ -116,9 +116,9 @@ class Connection(ConnectionBase):
|
||||
]
|
||||
|
||||
cmd_parts = nsenter_cmd_parts + [cmd]
|
||||
cmd_b = to_bytes(" ".join(cmd_parts))
|
||||
cmd = to_bytes(" ".join(cmd_parts))
|
||||
|
||||
display.vvv(f"EXEC {to_text(cmd_b)}", host=self._play_context.remote_addr)
|
||||
display.vvv(f"EXEC {to_text(cmd)}", host=self._play_context.remote_addr)
|
||||
display.debug("opening command with Popen()")
|
||||
|
||||
master = None
|
||||
@ -137,9 +137,9 @@ class Connection(ConnectionBase):
|
||||
display.debug(f"Unable to open pty: {e}")
|
||||
|
||||
with subprocess.Popen(
|
||||
cmd_b,
|
||||
shell=True,
|
||||
executable=executable,
|
||||
cmd,
|
||||
shell=isinstance(cmd, (str, bytes)),
|
||||
executable=executable if isinstance(cmd, (str, bytes)) else None,
|
||||
cwd=self.cwd,
|
||||
stdin=stdin,
|
||||
stdout=subprocess.PIPE,
|
||||
|
||||
@ -698,7 +698,9 @@ class APIClient(_Session):
|
||||
if auth.INDEX_URL not in auth_data and auth.INDEX_NAME in auth_data:
|
||||
auth_data[auth.INDEX_URL] = auth_data.get(auth.INDEX_NAME, {})
|
||||
|
||||
log.debug("Sending auth config (%s)", ", ".join(repr(k) for k in auth_data))
|
||||
log.debug(
|
||||
"Sending auth config (%s)", ", ".join(repr(k) for k in auth_data.keys())
|
||||
)
|
||||
|
||||
if auth_data:
|
||||
headers["X-Registry-Config"] = auth.encode_header(auth_data)
|
||||
|
||||
@ -292,7 +292,7 @@ class AuthConfig(dict):
|
||||
log.debug("No entry found")
|
||||
return None
|
||||
except StoreError as e:
|
||||
raise errors.DockerException(f"Credentials store error: {e}") from e
|
||||
raise errors.DockerException(f"Credentials store error: {e}")
|
||||
|
||||
def _get_store_instance(self, name: str) -> Store:
|
||||
if name not in self._stores:
|
||||
@ -310,7 +310,7 @@ class AuthConfig(dict):
|
||||
if self.creds_store:
|
||||
# Retrieve all credentials from the default store
|
||||
store = self._get_store_instance(self.creds_store)
|
||||
for k in store.list():
|
||||
for k in store.list().keys():
|
||||
auth_data[k] = self._resolve_authconfig_credstore(k, self.creds_store)
|
||||
auth_data[convert_to_hostname(k)] = auth_data[k]
|
||||
|
||||
|
||||
@ -102,7 +102,8 @@ def get_tls_dir(name: str | None = None, endpoint: str = "") -> str:
|
||||
|
||||
def get_context_host(path: str | None = None, tls: bool = False) -> str:
|
||||
host = parse_host(path, IS_WINDOWS_PLATFORM, tls)
|
||||
if host == DEFAULT_UNIX_SOCKET and host.startswith("http+"):
|
||||
if host == DEFAULT_UNIX_SOCKET:
|
||||
# remove http+ from default docker socket url
|
||||
host = host[5:]
|
||||
if host.startswith("http+"):
|
||||
host = host[5:]
|
||||
return host
|
||||
|
||||
@ -90,13 +90,13 @@ class Store:
|
||||
env=env,
|
||||
)
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise errors.process_store_error(e, self.program) from e
|
||||
raise errors.process_store_error(e, self.program)
|
||||
except OSError as e:
|
||||
if e.errno == errno.ENOENT:
|
||||
raise errors.StoreError(
|
||||
f"{self.program} not installed or not available in PATH"
|
||||
) from e
|
||||
)
|
||||
raise errors.StoreError(
|
||||
f'Unexpected OS error "{e.strerror}", errno={e.errno}'
|
||||
) from e
|
||||
)
|
||||
return output
|
||||
|
||||
@ -98,7 +98,7 @@ def create_archive(
|
||||
extra_files = extra_files or []
|
||||
if not fileobj:
|
||||
# pylint: disable-next=consider-using-with
|
||||
fileobj = tempfile.NamedTemporaryFile() # noqa: SIM115
|
||||
fileobj = tempfile.NamedTemporaryFile()
|
||||
|
||||
with tarfile.open(mode="w:gz" if gzip else "w", fileobj=fileobj) as tarf:
|
||||
if files is None:
|
||||
@ -146,8 +146,7 @@ def create_archive(
|
||||
|
||||
|
||||
def mkbuildcontext(dockerfile: io.BytesIO | t.IO[bytes]) -> t.IO[bytes]:
|
||||
# pylint: disable-next=consider-using-with
|
||||
f = tempfile.NamedTemporaryFile() # noqa: SIM115
|
||||
f = tempfile.NamedTemporaryFile() # pylint: disable=consider-using-with
|
||||
try:
|
||||
with tarfile.open(mode="w", fileobj=f) as tarf:
|
||||
if isinstance(dockerfile, io.StringIO): # type: ignore
|
||||
@ -196,14 +195,11 @@ class PatternMatcher:
|
||||
for pattern in self.patterns:
|
||||
negative = pattern.exclusion
|
||||
match = pattern.match(filepath)
|
||||
if (
|
||||
not match
|
||||
and parent_path != ""
|
||||
and len(pattern.dirs) <= len(parent_path_dirs)
|
||||
):
|
||||
match = pattern.match(
|
||||
os.path.sep.join(parent_path_dirs[: len(pattern.dirs)])
|
||||
)
|
||||
if not match and parent_path != "":
|
||||
if len(pattern.dirs) <= len(parent_path_dirs):
|
||||
match = pattern.match(
|
||||
os.path.sep.join(parent_path_dirs[: len(pattern.dirs)])
|
||||
)
|
||||
|
||||
if match:
|
||||
matched = not negative
|
||||
|
||||
@ -22,7 +22,7 @@ from ..transport.npipesocket import NpipeSocket
|
||||
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from collections.abc import Sequence
|
||||
from collections.abc import Iterable, Sequence
|
||||
|
||||
from ..._socket_helper import SocketLike
|
||||
|
||||
@ -59,8 +59,8 @@ def read(socket: SocketLike, n: int = 4096) -> bytes | None:
|
||||
try:
|
||||
if hasattr(socket, "recv"):
|
||||
return socket.recv(n)
|
||||
if isinstance(socket, pysocket.SocketIO): # type: ignore
|
||||
return socket.read(n) # type: ignore[unreachable]
|
||||
if isinstance(socket, getattr(pysocket, "SocketIO")):
|
||||
return socket.read(n)
|
||||
return os.read(socket.fileno(), n)
|
||||
except EnvironmentError as e:
|
||||
if e.errno not in recoverable_errors:
|
||||
|
||||
@ -36,6 +36,7 @@ from ..tls import TLSConfig
|
||||
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
import ssl
|
||||
from collections.abc import Mapping, Sequence
|
||||
|
||||
|
||||
@ -297,7 +298,7 @@ def parse_host(addr: str | None, is_win32: bool = False, tls: bool = False) -> s
|
||||
if proto == "unix" and parsed_url.hostname is not None:
|
||||
# For legacy reasons, we consider unix://path
|
||||
# to be valid and equivalent to unix:///path
|
||||
path = f"{parsed_url.hostname}/{path}"
|
||||
path = "/".join((parsed_url.hostname, path))
|
||||
|
||||
netloc = parsed_url.netloc
|
||||
if proto in ("tcp", "ssh"):
|
||||
@ -428,8 +429,9 @@ def parse_bytes(s: int | float | str) -> int | float:
|
||||
if len(s) == 0:
|
||||
return 0
|
||||
|
||||
if s[-2:-1].isalpha() and s[-1].isalpha() and (s[-1] == "b" or s[-1] == "B"):
|
||||
s = s[:-1]
|
||||
if s[-2:-1].isalpha() and s[-1].isalpha():
|
||||
if s[-1] == "b" or s[-1] == "B":
|
||||
s = s[:-1]
|
||||
units = BYTE_UNITS
|
||||
suffix = s[-1].lower()
|
||||
|
||||
|
||||
@ -43,8 +43,10 @@ docker_version: str | None # pylint: disable=invalid-name
|
||||
|
||||
try:
|
||||
from docker import __version__ as docker_version
|
||||
from docker.errors import APIError, TLSParameterError
|
||||
from docker import auth
|
||||
from docker.errors import APIError, NotFound, TLSParameterError
|
||||
from docker.tls import TLSConfig
|
||||
from requests.exceptions import SSLError
|
||||
|
||||
if LooseVersion(docker_version) >= LooseVersion("3.0.0"):
|
||||
HAS_DOCKER_PY_3 = True # pylint: disable=invalid-name
|
||||
@ -389,6 +391,242 @@ class AnsibleDockerClientBase(Client):
|
||||
)
|
||||
self.fail(f"SSL Exception: {error}")
|
||||
|
||||
def get_container_by_id(self, container_id: str) -> dict[str, t.Any] | None:
|
||||
try:
|
||||
self.log(f"Inspecting container Id {container_id}")
|
||||
result = self.inspect_container(container=container_id)
|
||||
self.log("Completed container inspection")
|
||||
return result
|
||||
except NotFound:
|
||||
return None
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error inspecting container: {exc}")
|
||||
|
||||
def get_container(self, name: str | None) -> dict[str, t.Any] | None:
|
||||
"""
|
||||
Lookup a container and return the inspection results.
|
||||
"""
|
||||
if name is None:
|
||||
return None
|
||||
|
||||
search_name = name
|
||||
if not name.startswith("/"):
|
||||
search_name = "/" + name
|
||||
|
||||
result = None
|
||||
try:
|
||||
for container in self.containers(all=True):
|
||||
self.log(f"testing container: {container['Names']}")
|
||||
if (
|
||||
isinstance(container["Names"], list)
|
||||
and search_name in container["Names"]
|
||||
):
|
||||
result = container
|
||||
break
|
||||
if container["Id"].startswith(name):
|
||||
result = container
|
||||
break
|
||||
if container["Id"] == name:
|
||||
result = container
|
||||
break
|
||||
except SSLError as exc:
|
||||
self._handle_ssl_error(exc)
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error retrieving container list: {exc}")
|
||||
|
||||
if result is None:
|
||||
return None
|
||||
|
||||
return self.get_container_by_id(result["Id"])
|
||||
|
||||
def get_network(
|
||||
self, name: str | None = None, network_id: str | None = None
|
||||
) -> dict[str, t.Any] | None:
|
||||
"""
|
||||
Lookup a network and return the inspection results.
|
||||
"""
|
||||
if name is None and network_id is None:
|
||||
return None
|
||||
|
||||
result = None
|
||||
|
||||
if network_id is None:
|
||||
try:
|
||||
for network in self.networks():
|
||||
self.log(f"testing network: {network['Name']}")
|
||||
if name == network["Name"]:
|
||||
result = network
|
||||
break
|
||||
if network["Id"].startswith(name):
|
||||
result = network
|
||||
break
|
||||
except SSLError as exc:
|
||||
self._handle_ssl_error(exc)
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error retrieving network list: {exc}")
|
||||
|
||||
if result is not None:
|
||||
network_id = result["Id"]
|
||||
|
||||
if network_id is not None:
|
||||
try:
|
||||
self.log(f"Inspecting network Id {network_id}")
|
||||
result = self.inspect_network(network_id)
|
||||
self.log("Completed network inspection")
|
||||
except NotFound:
|
||||
return None
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error inspecting network: {exc}")
|
||||
|
||||
return result
|
||||
|
||||
def find_image(self, name: str, tag: str) -> dict[str, t.Any] | None:
|
||||
"""
|
||||
Lookup an image (by name and tag) and return the inspection results.
|
||||
"""
|
||||
if not name:
|
||||
return None
|
||||
|
||||
self.log(f"Find image {name}:{tag}")
|
||||
images = self._image_lookup(name, tag)
|
||||
if not images:
|
||||
# In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
|
||||
registry, repo_name = auth.resolve_repository_name(name)
|
||||
if registry == "docker.io":
|
||||
# If docker.io is explicitly there in name, the image
|
||||
# is not found in some cases (#41509)
|
||||
self.log(f"Check for docker.io image: {repo_name}")
|
||||
images = self._image_lookup(repo_name, tag)
|
||||
if not images and repo_name.startswith("library/"):
|
||||
# Sometimes library/xxx images are not found
|
||||
lookup = repo_name[len("library/") :]
|
||||
self.log(f"Check for docker.io image: {lookup}")
|
||||
images = self._image_lookup(lookup, tag)
|
||||
if not images:
|
||||
# Last case for some Docker versions: if docker.io was not there,
|
||||
# it can be that the image was not found either
|
||||
# (https://github.com/ansible/ansible/pull/15586)
|
||||
lookup = f"{registry}/{repo_name}"
|
||||
self.log(f"Check for docker.io image: {lookup}")
|
||||
images = self._image_lookup(lookup, tag)
|
||||
if not images and "/" not in repo_name:
|
||||
# This seems to be happening with podman-docker
|
||||
# (https://github.com/ansible-collections/community.docker/issues/291)
|
||||
lookup = f"{registry}/library/{repo_name}"
|
||||
self.log(f"Check for docker.io image: {lookup}")
|
||||
images = self._image_lookup(lookup, tag)
|
||||
|
||||
if len(images) > 1:
|
||||
self.fail(f"Daemon returned more than one result for {name}:{tag}")
|
||||
|
||||
if len(images) == 1:
|
||||
try:
|
||||
inspection = self.inspect_image(images[0]["Id"])
|
||||
except NotFound:
|
||||
self.log(f"Image {name}:{tag} not found.")
|
||||
return None
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error inspecting image {name}:{tag} - {exc}")
|
||||
return inspection
|
||||
|
||||
self.log(f"Image {name}:{tag} not found.")
|
||||
return None
|
||||
|
||||
def find_image_by_id(
|
||||
self, image_id: str, accept_missing_image: bool = False
|
||||
) -> dict[str, t.Any] | None:
|
||||
"""
|
||||
Lookup an image (by ID) and return the inspection results.
|
||||
"""
|
||||
if not image_id:
|
||||
return None
|
||||
|
||||
self.log(f"Find image {image_id} (by ID)")
|
||||
try:
|
||||
inspection = self.inspect_image(image_id)
|
||||
except NotFound as exc:
|
||||
if not accept_missing_image:
|
||||
self.fail(f"Error inspecting image ID {image_id} - {exc}")
|
||||
self.log(f"Image {image_id} not found.")
|
||||
return None
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error inspecting image ID {image_id} - {exc}")
|
||||
return inspection
|
||||
|
||||
def _image_lookup(self, name: str, tag: str) -> list[dict[str, t.Any]]:
|
||||
"""
|
||||
Including a tag in the name parameter sent to the Docker SDK for Python images method
|
||||
does not work consistently. Instead, get the result set for name and manually check
|
||||
if the tag exists.
|
||||
"""
|
||||
try:
|
||||
response = self.images(name=name)
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error searching for image {name} - {exc}")
|
||||
images = response
|
||||
if tag:
|
||||
lookup = f"{name}:{tag}"
|
||||
lookup_digest = f"{name}@{tag}"
|
||||
images = []
|
||||
for image in response:
|
||||
tags = image.get("RepoTags")
|
||||
digests = image.get("RepoDigests")
|
||||
if (tags and lookup in tags) or (digests and lookup_digest in digests):
|
||||
images = [image]
|
||||
break
|
||||
return images
|
||||
|
||||
def pull_image(
|
||||
self, name: str, tag: str = "latest", image_platform: str | None = None
|
||||
) -> tuple[dict[str, t.Any] | None, bool]:
|
||||
"""
|
||||
Pull an image
|
||||
"""
|
||||
kwargs = {
|
||||
"tag": tag,
|
||||
"stream": True,
|
||||
"decode": True,
|
||||
}
|
||||
if image_platform is not None:
|
||||
kwargs["platform"] = image_platform
|
||||
self.log(f"Pulling image {name}:{tag}")
|
||||
old_tag = self.find_image(name, tag)
|
||||
try:
|
||||
for line in self.pull(name, **kwargs):
|
||||
self.log(line, pretty_print=True)
|
||||
if line.get("error"):
|
||||
if line.get("errorDetail"):
|
||||
error_detail = line.get("errorDetail")
|
||||
self.fail(
|
||||
f"Error pulling {name} - code: {error_detail.get('code')} message: {error_detail.get('message')}"
|
||||
)
|
||||
else:
|
||||
self.fail(f"Error pulling {name} - {line.get('error')}")
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error pulling image {name}:{tag} - {exc}")
|
||||
|
||||
new_tag = self.find_image(name, tag)
|
||||
|
||||
return new_tag, old_tag == new_tag
|
||||
|
||||
def inspect_distribution(self, image: str, **kwargs: t.Any) -> dict[str, t.Any]:
|
||||
"""
|
||||
Get image digest by directly calling the Docker API when running Docker SDK < 4.0.0
|
||||
since prior versions did not support accessing private repositories.
|
||||
"""
|
||||
if self.docker_py_version < LooseVersion("4.0.0"):
|
||||
registry = auth.resolve_repository_name(image)[0]
|
||||
header = auth.get_config_header(self, registry)
|
||||
if header:
|
||||
return self._result(
|
||||
self._get(
|
||||
self._url("/distribution/{0}/json", image),
|
||||
headers={"X-Registry-Auth": header},
|
||||
),
|
||||
json=True,
|
||||
)
|
||||
return super().inspect_distribution(image, **kwargs)
|
||||
|
||||
|
||||
class AnsibleDockerClient(AnsibleDockerClientBase):
|
||||
def __init__(
|
||||
@ -480,8 +718,9 @@ class AnsibleDockerClient(AnsibleDockerClientBase):
|
||||
) -> None:
|
||||
self.option_minimal_versions: dict[str, dict[str, t.Any]] = {}
|
||||
for option in self.module.argument_spec:
|
||||
if ignore_params is not None and option in ignore_params:
|
||||
continue
|
||||
if ignore_params is not None:
|
||||
if option in ignore_params:
|
||||
continue
|
||||
self.option_minimal_versions[option] = {}
|
||||
self.option_minimal_versions.update(option_minimal_versions)
|
||||
|
||||
|
||||
@ -519,17 +519,6 @@ class AnsibleDockerClientBase(Client):
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error inspecting image ID {image_id} - {exc}")
|
||||
|
||||
@staticmethod
|
||||
def _compare_images(
|
||||
img1: dict[str, t.Any] | None, img2: dict[str, t.Any] | None
|
||||
) -> bool:
|
||||
if img1 is None or img2 is None:
|
||||
return img1 == img2
|
||||
filter_keys = {"Metadata"}
|
||||
img1_filtered = {k: v for k, v in img1.items() if k not in filter_keys}
|
||||
img2_filtered = {k: v for k, v in img2.items() if k not in filter_keys}
|
||||
return img1_filtered == img2_filtered
|
||||
|
||||
def pull_image(
|
||||
self, name: str, tag: str = "latest", image_platform: str | None = None
|
||||
) -> tuple[dict[str, t.Any] | None, bool]:
|
||||
@ -537,7 +526,7 @@ class AnsibleDockerClientBase(Client):
|
||||
Pull an image
|
||||
"""
|
||||
self.log(f"Pulling image {name}:{tag}")
|
||||
old_image = self.find_image(name, tag)
|
||||
old_tag = self.find_image(name, tag)
|
||||
try:
|
||||
repository, image_tag = parse_repository_tag(name)
|
||||
registry, dummy_repo_name = auth.resolve_repository_name(repository)
|
||||
@ -574,9 +563,9 @@ class AnsibleDockerClientBase(Client):
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error pulling image {name}:{tag} - {exc}")
|
||||
|
||||
new_image = self.find_image(name, tag)
|
||||
new_tag = self.find_image(name, tag)
|
||||
|
||||
return new_image, self._compare_images(old_image, new_image)
|
||||
return new_tag, old_tag == new_tag
|
||||
|
||||
|
||||
class AnsibleDockerClient(AnsibleDockerClientBase):
|
||||
@ -665,8 +654,9 @@ class AnsibleDockerClient(AnsibleDockerClientBase):
|
||||
) -> None:
|
||||
self.option_minimal_versions: dict[str, dict[str, t.Any]] = {}
|
||||
for option in self.module.argument_spec:
|
||||
if ignore_params is not None and option in ignore_params:
|
||||
continue
|
||||
if ignore_params is not None:
|
||||
if option in ignore_params:
|
||||
continue
|
||||
self.option_minimal_versions[option] = {}
|
||||
self.option_minimal_versions.update(option_minimal_versions)
|
||||
|
||||
|
||||
@ -126,16 +126,13 @@ class AnsibleDockerClientBase:
|
||||
self._info: dict[str, t.Any] | None = None
|
||||
|
||||
if needs_api_version:
|
||||
api_version_string = self._version["Server"].get(
|
||||
"ApiVersion"
|
||||
) or self._version["Server"].get("APIVersion")
|
||||
if not isinstance(self._version.get("Server"), dict) or not isinstance(
|
||||
api_version_string, str
|
||||
self._version["Server"].get("ApiVersion"), str
|
||||
):
|
||||
self.fail(
|
||||
"Cannot determine Docker Daemon information. Are you maybe using podman instead of docker?"
|
||||
)
|
||||
self.docker_api_version_str = to_text(api_version_string)
|
||||
self.docker_api_version_str = to_text(self._version["Server"]["ApiVersion"])
|
||||
self.docker_api_version = LooseVersion(self.docker_api_version_str)
|
||||
min_docker_api_version = min_docker_api_version or "1.25"
|
||||
if self.docker_api_version < LooseVersion(min_docker_api_version):
|
||||
@ -197,11 +194,7 @@ class AnsibleDockerClientBase:
|
||||
data = json.loads(stdout)
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(
|
||||
f"Error while parsing JSON output of {self._compose_cmd_str(args)}: {exc}\nJSON output: {to_text(stdout)}\n\nError output:\n{to_text(stderr)}",
|
||||
cmd=self._compose_cmd_str(args),
|
||||
rc=rc,
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
f"Error while parsing JSON output of {self._compose_cmd_str(args)}: {exc}\nJSON output: {to_text(stdout)}"
|
||||
)
|
||||
return rc, data, stderr
|
||||
|
||||
@ -227,11 +220,7 @@ class AnsibleDockerClientBase:
|
||||
result.append(json.loads(line))
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(
|
||||
f"Error while parsing JSON output of {self._compose_cmd_str(args)}: {exc}\nJSON output: {to_text(stdout)}\n\nError output:\n{to_text(stderr)}",
|
||||
cmd=self._compose_cmd_str(args),
|
||||
rc=rc,
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
f"Error while parsing JSON output of {self._compose_cmd_str(args)}: {exc}\nJSON output: {to_text(stdout)}"
|
||||
)
|
||||
return rc, result, stderr
|
||||
|
||||
|
||||
@ -132,7 +132,7 @@ DOCKER_PULL_PROGRESS_DONE = frozenset(
|
||||
"Pull complete",
|
||||
)
|
||||
)
|
||||
DOCKER_PULL_PROGRESS_WORKING_OLD = frozenset(
|
||||
DOCKER_PULL_PROGRESS_WORKING = frozenset(
|
||||
(
|
||||
"Pulling fs layer",
|
||||
"Waiting",
|
||||
@ -141,7 +141,6 @@ DOCKER_PULL_PROGRESS_WORKING_OLD = frozenset(
|
||||
"Extracting",
|
||||
)
|
||||
)
|
||||
DOCKER_PULL_PROGRESS_WORKING = frozenset(DOCKER_PULL_PROGRESS_WORKING_OLD | {"Working"})
|
||||
|
||||
|
||||
class ResourceType:
|
||||
@ -192,7 +191,7 @@ _RE_PULL_EVENT = re.compile(
|
||||
)
|
||||
|
||||
_DOCKER_PULL_PROGRESS_WD = sorted(
|
||||
DOCKER_PULL_PROGRESS_DONE | DOCKER_PULL_PROGRESS_WORKING_OLD
|
||||
DOCKER_PULL_PROGRESS_DONE | DOCKER_PULL_PROGRESS_WORKING
|
||||
)
|
||||
|
||||
_RE_PULL_PROGRESS = re.compile(
|
||||
@ -495,17 +494,7 @@ def parse_json_events(
|
||||
# {"dry-run":true,"id":"ansible-docker-test-dc713f1f-container ==> ==>","text":"naming to ansible-docker-test-dc713f1f-image"}
|
||||
# (The longer form happens since Docker Compose 2.39.0)
|
||||
continue
|
||||
if (
|
||||
status in ("Working", "Done")
|
||||
and isinstance(line_data.get("parent_id"), str)
|
||||
and line_data["parent_id"].startswith("Image ")
|
||||
):
|
||||
# Compose 5.0.0+:
|
||||
# {"id":"63a26ae4e8a8","parent_id":"Image ghcr.io/ansible-collections/simple-1:tag","status":"Working"}
|
||||
# {"id":"63a26ae4e8a8","parent_id":"Image ghcr.io/ansible-collections/simple-1:tag","status":"Done","percent":100}
|
||||
resource_type = ResourceType.IMAGE_LAYER
|
||||
resource_id = line_data["parent_id"][len("Image ") :]
|
||||
elif isinstance(resource_id, str) and " " in resource_id:
|
||||
if isinstance(resource_id, str) and " " in resource_id:
|
||||
resource_type_str, resource_id = resource_id.split(" ", 1)
|
||||
try:
|
||||
resource_type = ResourceType.from_docker_compose_event(
|
||||
@ -524,7 +513,7 @@ def parse_json_events(
|
||||
status, text = text, status
|
||||
elif (
|
||||
text in DOCKER_PULL_PROGRESS_DONE
|
||||
or line_data.get("text") in DOCKER_PULL_PROGRESS_WORKING_OLD
|
||||
or line_data.get("text") in DOCKER_PULL_PROGRESS_WORKING
|
||||
):
|
||||
resource_type = ResourceType.IMAGE_LAYER
|
||||
status, text = text, status
|
||||
@ -567,8 +556,8 @@ def parse_events(
|
||||
stderr_lines = stderr.splitlines()
|
||||
if stderr_lines and stderr_lines[-1] == b"":
|
||||
del stderr_lines[-1]
|
||||
for index, line_b in enumerate(stderr_lines):
|
||||
line = to_text(line_b.strip())
|
||||
for index, line in enumerate(stderr_lines):
|
||||
line = to_text(line.strip())
|
||||
if not line:
|
||||
continue
|
||||
warn_missing_dry_run_prefix = False
|
||||
@ -701,7 +690,9 @@ def emit_warnings(
|
||||
|
||||
|
||||
def is_failed(events: Sequence[Event], rc: int) -> bool:
|
||||
return bool(rc)
|
||||
if rc:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def update_failed(
|
||||
|
||||
@ -479,8 +479,9 @@ def fetch_file(
|
||||
|
||||
reader = tar.extractfile(member)
|
||||
if reader:
|
||||
with reader as in_f, open(b_out_path, "wb") as out_f:
|
||||
shutil.copyfileobj(in_f, out_f)
|
||||
with reader as in_f:
|
||||
with open(b_out_path, "wb") as out_f:
|
||||
shutil.copyfileobj(in_f, out_f)
|
||||
return in_path
|
||||
|
||||
def process_symlink(in_path: str, member: tarfile.TarInfo) -> str:
|
||||
|
||||
@ -659,9 +659,7 @@ def _preprocess_env(
|
||||
if not isinstance(value, str):
|
||||
module.fail_json(
|
||||
msg="Non-string value found for env option. Ambiguous env options must be "
|
||||
"wrapped in quotes to avoid them being interpreted when directly specified "
|
||||
"in YAML, or explicitly converted to strings when the option is templated. "
|
||||
f"Key: {name}"
|
||||
f"wrapped in quotes to avoid them being interpreted. Key: {name}"
|
||||
)
|
||||
final_env[name] = to_text(value, errors="surrogate_or_strict")
|
||||
formatted_env = []
|
||||
@ -892,15 +890,14 @@ def _preprocess_mounts(
|
||||
check_collision(container, "volumes")
|
||||
new_vols.append(f"{host}:{container}:{mode}")
|
||||
continue
|
||||
if (
|
||||
len(parts) == 2
|
||||
and not _is_volume_permissions(parts[1])
|
||||
and re.match(r"[.~]", parts[0])
|
||||
):
|
||||
host = os.path.abspath(os.path.expanduser(parts[0]))
|
||||
check_collision(parts[1], "volumes")
|
||||
new_vols.append(f"{host}:{parts[1]}:rw")
|
||||
continue
|
||||
if len(parts) == 2:
|
||||
if not _is_volume_permissions(parts[1]) and re.match(
|
||||
r"[.~]", parts[0]
|
||||
):
|
||||
host = os.path.abspath(os.path.expanduser(parts[0]))
|
||||
check_collision(parts[1], "volumes")
|
||||
new_vols.append(f"{host}:{parts[1]}:rw")
|
||||
continue
|
||||
check_collision(parts[min(1, len(parts) - 1)], "volumes")
|
||||
new_vols.append(vol)
|
||||
values["volumes"] = new_vols
|
||||
@ -949,8 +946,7 @@ def _preprocess_log(
|
||||
value = to_text(v, errors="surrogate_or_strict")
|
||||
module.warn(
|
||||
f"Non-string value found for log_options option '{k}'. The value is automatically converted to {value!r}. "
|
||||
"If this is not correct, or you want to avoid such warnings, please quote the value,"
|
||||
" or explicitly convert the values to strings when templating them."
|
||||
"If this is not correct, or you want to avoid such warnings, please quote the value."
|
||||
)
|
||||
v = value
|
||||
options[k] = v
|
||||
@ -1019,7 +1015,7 @@ def _preprocess_ports(
|
||||
else:
|
||||
port_binds = len(container_ports) * [(ipaddr,)]
|
||||
else:
|
||||
module.fail_json(
|
||||
return module.fail_json(
|
||||
msg=f'Invalid port description "{port}" - expected 1 to 3 colon-separated parts, but got {p_len}. '
|
||||
"Maybe you forgot to use square brackets ([...]) around an IPv6 address?"
|
||||
)
|
||||
@ -1040,43 +1036,38 @@ def _preprocess_ports(
|
||||
binds[idx] = bind
|
||||
values["published_ports"] = binds
|
||||
|
||||
exposed: set[tuple[int, str]] = set()
|
||||
exposed = []
|
||||
if "exposed_ports" in values:
|
||||
for port in values["exposed_ports"]:
|
||||
port = to_text(port, errors="surrogate_or_strict").strip()
|
||||
protocol = "tcp"
|
||||
parts = port.split("/", maxsplit=1)
|
||||
if len(parts) == 2:
|
||||
port, protocol = parts
|
||||
parts = port.split("-", maxsplit=1)
|
||||
if len(parts) < 2:
|
||||
try:
|
||||
exposed.add((int(port), protocol))
|
||||
except ValueError as e:
|
||||
module.fail_json(msg=f"Cannot parse port {port!r}: {e}")
|
||||
else:
|
||||
try:
|
||||
start_port = int(parts[0])
|
||||
end_port = int(parts[1])
|
||||
if start_port > end_port:
|
||||
raise ValueError(
|
||||
"start port must be smaller or equal to end port."
|
||||
)
|
||||
except ValueError as e:
|
||||
module.fail_json(msg=f"Cannot parse port range {port!r}: {e}")
|
||||
for port in range(start_port, end_port + 1):
|
||||
exposed.add((port, protocol))
|
||||
matcher = re.search(r"(/.+$)", port)
|
||||
if matcher:
|
||||
protocol = matcher.group(1).replace("/", "")
|
||||
port = re.sub(r"/.+$", "", port)
|
||||
exposed.append((port, protocol))
|
||||
if "published_ports" in values:
|
||||
# Any published port should also be exposed
|
||||
for publish_port in values["published_ports"]:
|
||||
match = False
|
||||
if isinstance(publish_port, str) and "/" in publish_port:
|
||||
port, protocol = publish_port.split("/")
|
||||
port = int(port)
|
||||
else:
|
||||
protocol = "tcp"
|
||||
port = int(publish_port)
|
||||
exposed.add((port, protocol))
|
||||
values["ports"] = sorted(exposed)
|
||||
for exposed_port in exposed:
|
||||
if exposed_port[1] != protocol:
|
||||
continue
|
||||
if isinstance(exposed_port[0], str) and "-" in exposed_port[0]:
|
||||
start_port, end_port = exposed_port[0].split("-")
|
||||
if int(start_port) <= port <= int(end_port):
|
||||
match = True
|
||||
elif exposed_port[0] == port:
|
||||
match = True
|
||||
if not match:
|
||||
exposed.append((port, protocol))
|
||||
values["ports"] = exposed
|
||||
return values
|
||||
|
||||
|
||||
|
||||
@ -29,7 +29,6 @@ from ansible_collections.community.docker.plugins.module_utils._common_api impor
|
||||
RequestException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._module_container.base import (
|
||||
_DEFAULT_IP_REPLACEMENT_STRING,
|
||||
OPTION_AUTO_REMOVE,
|
||||
OPTION_BLKIO_WEIGHT,
|
||||
OPTION_CAP_DROP,
|
||||
@ -128,6 +127,11 @@ if t.TYPE_CHECKING:
|
||||
Sentry = object
|
||||
|
||||
|
||||
_DEFAULT_IP_REPLACEMENT_STRING = (
|
||||
"[[DEFAULT_IP:iewahhaeB4Sae6Aen8IeShairoh4zeph7xaekoh8Geingunaesaeweiy3ooleiwi]]"
|
||||
)
|
||||
|
||||
|
||||
_SENTRY: Sentry = object()
|
||||
|
||||
|
||||
@ -215,11 +219,12 @@ class DockerAPIEngineDriver(EngineDriver[AnsibleDockerClient]):
|
||||
return False
|
||||
|
||||
def is_container_running(self, container: dict[str, t.Any]) -> bool:
|
||||
return bool(
|
||||
container.get("State")
|
||||
and container["State"].get("Running")
|
||||
and not container["State"].get("Ghost", False)
|
||||
)
|
||||
if container.get("State"):
|
||||
if container["State"].get("Running") and not container["State"].get(
|
||||
"Ghost", False
|
||||
):
|
||||
return True
|
||||
return False
|
||||
|
||||
def is_container_paused(self, container: dict[str, t.Any]) -> bool:
|
||||
if container.get("State"):
|
||||
@ -1701,8 +1706,9 @@ def _get_expected_values_mounts(
|
||||
parts = vol.split(":")
|
||||
if len(parts) == 3:
|
||||
continue
|
||||
if len(parts) == 2 and not _is_volume_permissions(parts[1]):
|
||||
continue
|
||||
if len(parts) == 2:
|
||||
if not _is_volume_permissions(parts[1]):
|
||||
continue
|
||||
expected_vols[vol] = {}
|
||||
if expected_vols:
|
||||
expected_values["volumes"] = expected_vols
|
||||
@ -1799,8 +1805,9 @@ def _set_values_mounts(
|
||||
parts = volume.split(":")
|
||||
if len(parts) == 3:
|
||||
continue
|
||||
if len(parts) == 2 and not _is_volume_permissions(parts[1]):
|
||||
continue
|
||||
if len(parts) == 2:
|
||||
if not _is_volume_permissions(parts[1]):
|
||||
continue
|
||||
volumes[volume] = {}
|
||||
data["Volumes"] = volumes
|
||||
if "volume_binds" in values:
|
||||
@ -1966,20 +1973,10 @@ def _get_values_ports(
|
||||
config = container["Config"]
|
||||
|
||||
# "ExposedPorts": null returns None type & causes AttributeError - PR #5517
|
||||
expected_exposed: list[str] = []
|
||||
if config.get("ExposedPorts") is not None:
|
||||
for port_and_protocol in config.get("ExposedPorts", {}):
|
||||
port, protocol = _normalize_port(port_and_protocol).rsplit("/")
|
||||
try:
|
||||
start, end = port.split("-", 1)
|
||||
start_port = int(start)
|
||||
end_port = int(end)
|
||||
for port_no in range(start_port, end_port + 1):
|
||||
expected_exposed.append(f"{port_no}/{protocol}")
|
||||
continue
|
||||
except ValueError:
|
||||
# Either it is not a range, or a broken one - in both cases, simply add the original form
|
||||
expected_exposed.append(f"{port}/{protocol}")
|
||||
expected_exposed = [_normalize_port(p) for p in config.get("ExposedPorts", {})]
|
||||
else:
|
||||
expected_exposed = []
|
||||
|
||||
return {
|
||||
"published_ports": host_config.get("PortBindings"),
|
||||
@ -2033,14 +2030,17 @@ def _get_expected_values_ports(
|
||||
]
|
||||
expected_values["published_ports"] = expected_bound_ports
|
||||
|
||||
image_ports: set[str] = set()
|
||||
image_ports = []
|
||||
if image:
|
||||
image_exposed_ports = image["Config"].get("ExposedPorts") or {}
|
||||
image_ports = {_normalize_port(p) for p in image_exposed_ports}
|
||||
param_ports: set[str] = set()
|
||||
image_ports = [_normalize_port(p) for p in image_exposed_ports]
|
||||
param_ports = []
|
||||
if "ports" in values:
|
||||
param_ports = {f"{p[0]}/{p[1]}" for p in values["ports"]}
|
||||
result = sorted(image_ports | param_ports)
|
||||
param_ports = [
|
||||
to_text(p[0], errors="surrogate_or_strict") + "/" + p[1]
|
||||
for p in values["ports"]
|
||||
]
|
||||
result = list(set(image_ports + param_ports))
|
||||
expected_values["exposed_ports"] = result
|
||||
|
||||
if "publish_all_ports" in values:
|
||||
@ -2089,26 +2089,16 @@ def _preprocess_value_ports(
|
||||
if "published_ports" not in values:
|
||||
return values
|
||||
found = False
|
||||
for port_specs in values["published_ports"].values():
|
||||
if not isinstance(port_specs, list):
|
||||
port_specs = [port_specs]
|
||||
for port_spec in port_specs:
|
||||
if port_spec[0] == _DEFAULT_IP_REPLACEMENT_STRING:
|
||||
found = True
|
||||
break
|
||||
for port_spec in values["published_ports"].values():
|
||||
if port_spec[0] == _DEFAULT_IP_REPLACEMENT_STRING:
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
return values
|
||||
default_ip = _get_default_host_ip(module, client)
|
||||
for port, port_specs in values["published_ports"].items():
|
||||
if isinstance(port_specs, list):
|
||||
for index, port_spec in enumerate(port_specs):
|
||||
if port_spec[0] == _DEFAULT_IP_REPLACEMENT_STRING:
|
||||
port_specs[index] = tuple([default_ip] + list(port_spec[1:]))
|
||||
else:
|
||||
if port_specs[0] == _DEFAULT_IP_REPLACEMENT_STRING:
|
||||
values["published_ports"][port] = tuple(
|
||||
[default_ip] + list(port_specs[1:])
|
||||
)
|
||||
for port, port_spec in values["published_ports"].items():
|
||||
if port_spec[0] == _DEFAULT_IP_REPLACEMENT_STRING:
|
||||
values["published_ports"][port] = tuple([default_ip] + list(port_spec[1:]))
|
||||
return values
|
||||
|
||||
|
||||
|
||||
@ -25,7 +25,6 @@ from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
DockerBaseClass,
|
||||
compare_generic,
|
||||
is_image_name_id,
|
||||
normalize_ip_address,
|
||||
sanitize_result,
|
||||
)
|
||||
|
||||
@ -218,13 +217,11 @@ class ContainerManager(DockerBaseClass, t.Generic[Client]):
|
||||
"The wildcard can only be used with comparison modes 'strict' and 'ignore'!"
|
||||
)
|
||||
for option in self.all_options.values():
|
||||
# `networks` is special: only update if
|
||||
# some value is actually specified
|
||||
if (
|
||||
option.name == "networks"
|
||||
and self.module.params["networks"] is None
|
||||
):
|
||||
continue
|
||||
if option.name == "networks":
|
||||
# `networks` is special: only update if
|
||||
# some value is actually specified
|
||||
if self.module.params["networks"] is None:
|
||||
continue
|
||||
option.comparison = value
|
||||
# Now process all other comparisons.
|
||||
comp_aliases_used: dict[str, str] = {}
|
||||
@ -682,17 +679,13 @@ class ContainerManager(DockerBaseClass, t.Generic[Client]):
|
||||
def _image_is_different(
|
||||
self, image: dict[str, t.Any] | None, container: Container
|
||||
) -> bool:
|
||||
if (
|
||||
image
|
||||
and image.get("Id")
|
||||
and container
|
||||
and container.image
|
||||
and image.get("Id") != container.image
|
||||
):
|
||||
self.diff_tracker.add(
|
||||
"image", parameter=image.get("Id"), active=container.image
|
||||
)
|
||||
return True
|
||||
if image and image.get("Id"):
|
||||
if container and container.image:
|
||||
if image.get("Id") != container.image:
|
||||
self.diff_tracker.add(
|
||||
"image", parameter=image.get("Id"), active=container.image
|
||||
)
|
||||
return True
|
||||
return False
|
||||
|
||||
def _compose_create_parameters(self, image: str) -> dict[str, t.Any]:
|
||||
@ -926,21 +919,22 @@ class ContainerManager(DockerBaseClass, t.Generic[Client]):
|
||||
else:
|
||||
diff = False
|
||||
network_info_ipam = network_info.get("IPAMConfig") or {}
|
||||
if network.get("ipv4_address") and normalize_ip_address(
|
||||
network["ipv4_address"]
|
||||
) != normalize_ip_address(network_info_ipam.get("IPv4Address")):
|
||||
if network.get("ipv4_address") and network[
|
||||
"ipv4_address"
|
||||
] != network_info_ipam.get("IPv4Address"):
|
||||
diff = True
|
||||
if network.get("ipv6_address") and normalize_ip_address(
|
||||
network["ipv6_address"]
|
||||
) != normalize_ip_address(network_info_ipam.get("IPv6Address")):
|
||||
diff = True
|
||||
if network.get("aliases") and not compare_generic(
|
||||
network["aliases"],
|
||||
network_info.get("Aliases"),
|
||||
"allow_more_present",
|
||||
"set",
|
||||
):
|
||||
if network.get("ipv6_address") and network[
|
||||
"ipv6_address"
|
||||
] != network_info_ipam.get("IPv6Address"):
|
||||
diff = True
|
||||
if network.get("aliases"):
|
||||
if not compare_generic(
|
||||
network["aliases"],
|
||||
network_info.get("Aliases"),
|
||||
"allow_more_present",
|
||||
"set",
|
||||
):
|
||||
diff = True
|
||||
if network.get("links"):
|
||||
expected_links = []
|
||||
for link, alias in network["links"]:
|
||||
|
||||
@ -73,7 +73,7 @@ class DockerSocketHandlerBase:
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
type_: type[BaseException] | None,
|
||||
type_: t.Type[BaseException] | None,
|
||||
value: BaseException | None,
|
||||
tb: TracebackType | None,
|
||||
) -> None:
|
||||
@ -199,9 +199,10 @@ class DockerSocketHandlerBase:
|
||||
if event & selectors.EVENT_WRITE != 0:
|
||||
self._write()
|
||||
result = len(events)
|
||||
if self._paramiko_read_workaround and len(self._write_buffer) > 0 and self._sock.send_ready(): # type: ignore
|
||||
self._write()
|
||||
result += 1
|
||||
if self._paramiko_read_workaround and len(self._write_buffer) > 0:
|
||||
if self._sock.send_ready(): # type: ignore
|
||||
self._write()
|
||||
result += 1
|
||||
return result > 0
|
||||
|
||||
def is_eof(self) -> bool:
|
||||
|
||||
@ -64,8 +64,8 @@ def shutdown_writing(
|
||||
# probably: "TypeError: shutdown() takes 1 positional argument but 2 were given"
|
||||
log(f"Shutting down for writing not possible; trying shutdown instead: {e}")
|
||||
sock.shutdown() # type: ignore
|
||||
elif isinstance(sock, pysocket.SocketIO): # type: ignore
|
||||
sock._sock.shutdown(pysocket.SHUT_WR) # type: ignore[unreachable]
|
||||
elif isinstance(sock, getattr(pysocket, "SocketIO")):
|
||||
sock._sock.shutdown(pysocket.SHUT_WR)
|
||||
else:
|
||||
log("No idea how to signal end of writing")
|
||||
|
||||
|
||||
@ -115,7 +115,9 @@ class AnsibleDockerSwarmClient(AnsibleDockerClient):
|
||||
:return: True if node is Swarm Worker, False otherwise
|
||||
"""
|
||||
|
||||
return bool(self.check_if_swarm_node() and not self.check_if_swarm_manager())
|
||||
if self.check_if_swarm_node() and not self.check_if_swarm_manager():
|
||||
return True
|
||||
return False
|
||||
|
||||
def check_if_swarm_node_is_down(
|
||||
self, node_id: str | None = None, repeat_check: int = 1
|
||||
@ -179,8 +181,9 @@ class AnsibleDockerSwarmClient(AnsibleDockerClient):
|
||||
self.fail(
|
||||
"Cannot inspect node: To inspect node execute module on Swarm Manager"
|
||||
)
|
||||
if exc.status_code == 404 and skip_missing:
|
||||
return None
|
||||
if exc.status_code == 404:
|
||||
if skip_missing:
|
||||
return None
|
||||
self.fail(f"Error while reading from Swarm manager: {exc}")
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error inspecting swarm node: {exc}")
|
||||
@ -188,18 +191,19 @@ class AnsibleDockerSwarmClient(AnsibleDockerClient):
|
||||
json_str = json.dumps(node_info, ensure_ascii=False)
|
||||
node_info = json.loads(json_str)
|
||||
|
||||
if "ManagerStatus" in node_info and node_info["ManagerStatus"].get("Leader"):
|
||||
# This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
|
||||
# Check moby/moby#35437 for details
|
||||
count_colons = node_info["ManagerStatus"]["Addr"].count(":")
|
||||
if count_colons == 1:
|
||||
swarm_leader_ip = (
|
||||
node_info["ManagerStatus"]["Addr"].split(":", 1)[0]
|
||||
or node_info["Status"]["Addr"]
|
||||
)
|
||||
else:
|
||||
swarm_leader_ip = node_info["Status"]["Addr"]
|
||||
node_info["Status"]["Addr"] = swarm_leader_ip
|
||||
if "ManagerStatus" in node_info:
|
||||
if node_info["ManagerStatus"].get("Leader"):
|
||||
# This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
|
||||
# Check moby/moby#35437 for details
|
||||
count_colons = node_info["ManagerStatus"]["Addr"].count(":")
|
||||
if count_colons == 1:
|
||||
swarm_leader_ip = (
|
||||
node_info["ManagerStatus"]["Addr"].split(":", 1)[0]
|
||||
or node_info["Status"]["Addr"]
|
||||
)
|
||||
else:
|
||||
swarm_leader_ip = node_info["Status"]["Addr"]
|
||||
node_info["Status"]["Addr"] = swarm_leader_ip
|
||||
return node_info
|
||||
|
||||
def get_all_nodes_inspect(self) -> list[dict[str, t.Any]]:
|
||||
|
||||
@ -7,7 +7,6 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import ipaddress
|
||||
import json
|
||||
import re
|
||||
import typing as t
|
||||
@ -28,7 +27,7 @@ if t.TYPE_CHECKING:
|
||||
from ._common_api import AnsibleDockerClientBase as CAPIADCB
|
||||
from ._common_cli import AnsibleDockerClientBase as CCLIADCB
|
||||
|
||||
Client = t.Union[CADCB, CAPIADCB, CCLIADCB] # noqa: UP007
|
||||
Client = t.Union[CADCB, CAPIADCB, CCLIADCB]
|
||||
|
||||
|
||||
DEFAULT_DOCKER_HOST = "unix:///var/run/docker.sock"
|
||||
@ -95,7 +94,9 @@ BYTE_SUFFIXES = ["B", "KB", "MB", "GB", "TB", "PB"]
|
||||
|
||||
def is_image_name_id(name: str) -> bool:
|
||||
"""Check whether the given image name is in fact an image ID (hash)."""
|
||||
return bool(re.match("^sha256:[0-9a-fA-F]{64}$", name))
|
||||
if re.match("^sha256:[0-9a-fA-F]{64}$", name):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def is_valid_tag(tag: str, allow_empty: bool = False) -> bool:
|
||||
@ -506,47 +507,3 @@ def omit_none_from_dict(d: dict[str, t.Any]) -> dict[str, t.Any]:
|
||||
Return a copy of the dictionary with all keys with value None omitted.
|
||||
"""
|
||||
return {k: v for (k, v) in d.items() if v is not None}
|
||||
|
||||
|
||||
@t.overload
|
||||
def normalize_ip_address(ip_address: str) -> str: ...
|
||||
|
||||
|
||||
@t.overload
|
||||
def normalize_ip_address(ip_address: str | None) -> str | None: ...
|
||||
|
||||
|
||||
def normalize_ip_address(ip_address: str | None) -> str | None:
|
||||
"""
|
||||
Given an IP address as a string, normalize it so that it can be
|
||||
used to compare IP addresses as strings.
|
||||
"""
|
||||
if ip_address is None:
|
||||
return None
|
||||
try:
|
||||
return ipaddress.ip_address(ip_address).compressed
|
||||
except ValueError:
|
||||
# Fallback for invalid addresses: simply return the input
|
||||
return ip_address
|
||||
|
||||
|
||||
@t.overload
|
||||
def normalize_ip_network(network: str) -> str: ...
|
||||
|
||||
|
||||
@t.overload
|
||||
def normalize_ip_network(network: str | None) -> str | None: ...
|
||||
|
||||
|
||||
def normalize_ip_network(network: str | None) -> str | None:
|
||||
"""
|
||||
Given a network in CIDR notation as a string, normalize it so that it can be
|
||||
used to compare networks as strings.
|
||||
"""
|
||||
if network is None:
|
||||
return None
|
||||
try:
|
||||
return ipaddress.ip_network(network).compressed
|
||||
except ValueError:
|
||||
# Fallback for invalid networks: simply return the input
|
||||
return network
|
||||
|
||||
@ -585,10 +585,10 @@ class ServicesManager(BaseComposeManager):
|
||||
return args
|
||||
|
||||
def _are_containers_stopped(self) -> bool:
|
||||
return all(
|
||||
container["State"] in ("created", "exited", "stopped", "killed")
|
||||
for container in self.list_containers_raw()
|
||||
)
|
||||
for container in self.list_containers_raw():
|
||||
if container["State"] not in ("created", "exited", "stopped", "killed"):
|
||||
return False
|
||||
return True
|
||||
|
||||
def cmd_stop(self) -> dict[str, t.Any]:
|
||||
# Since 'docker compose stop' **always** claims it is stopping containers, even if they are already
|
||||
|
||||
@ -210,14 +210,13 @@ class ExecManager(BaseComposeManager):
|
||||
self.stdin += "\n"
|
||||
|
||||
if self.env is not None:
|
||||
for name, value in self.env.items():
|
||||
for name, value in list(self.env.items()):
|
||||
if not isinstance(value, str):
|
||||
self.fail(
|
||||
"Non-string value found for env option. Ambiguous env options must be "
|
||||
"wrapped in quotes to avoid them being interpreted when directly specified "
|
||||
"in YAML, or explicitly converted to strings when the option is templated. "
|
||||
f"Key: {name}"
|
||||
f"wrapped in quotes to avoid them being interpreted. Key: {name}"
|
||||
)
|
||||
self.env[name] = to_text(value, errors="surrogate_or_strict")
|
||||
|
||||
def get_exec_cmd(self, dry_run: bool) -> list[str]:
|
||||
args = self.get_base_args(plain_progress=True) + ["exec"]
|
||||
@ -251,11 +250,11 @@ class ExecManager(BaseComposeManager):
|
||||
kwargs["data"] = self.stdin.encode("utf-8")
|
||||
if self.detach:
|
||||
kwargs["check_rc"] = True
|
||||
rc, stdout_b, stderr_b = self.client.call_cli(*args, **kwargs)
|
||||
rc, stdout, stderr = self.client.call_cli(*args, **kwargs)
|
||||
if self.detach:
|
||||
return {}
|
||||
stdout = to_text(stdout_b)
|
||||
stderr = to_text(stderr_b)
|
||||
stdout = to_text(stdout)
|
||||
stderr = to_text(stderr)
|
||||
if self.strip_empty_ends:
|
||||
stdout = stdout.rstrip("\r\n")
|
||||
stderr = stderr.rstrip("\r\n")
|
||||
|
||||
@ -296,14 +296,13 @@ class ExecManager(BaseComposeManager):
|
||||
self.stdin += "\n"
|
||||
|
||||
if self.env is not None:
|
||||
for name, value in self.env.items():
|
||||
for name, value in list(self.env.items()):
|
||||
if not isinstance(value, str):
|
||||
self.fail(
|
||||
"Non-string value found for env option. Ambiguous env options must be "
|
||||
"wrapped in quotes to avoid them being interpreted when directly specified "
|
||||
"in YAML, or explicitly converted to strings when the option is templated. "
|
||||
f"Key: {name}"
|
||||
f"wrapped in quotes to avoid them being interpreted. Key: {name}"
|
||||
)
|
||||
self.env[name] = to_text(value, errors="surrogate_or_strict")
|
||||
|
||||
def get_run_cmd(self, dry_run: bool) -> list[str]:
|
||||
args = self.get_base_args(plain_progress=True) + ["run"]
|
||||
@ -369,13 +368,13 @@ class ExecManager(BaseComposeManager):
|
||||
kwargs["data"] = self.stdin.encode("utf-8")
|
||||
if self.detach:
|
||||
kwargs["check_rc"] = True
|
||||
rc, stdout_b, stderr_b = self.client.call_cli(*args, **kwargs)
|
||||
rc, stdout, stderr = self.client.call_cli(*args, **kwargs)
|
||||
if self.detach:
|
||||
return {
|
||||
"container_id": to_text(stdout_b.strip()),
|
||||
"container_id": stdout.strip(),
|
||||
}
|
||||
stdout = to_text(stdout_b)
|
||||
stderr = to_text(stderr_b)
|
||||
stdout = to_text(stdout)
|
||||
stderr = to_text(stderr)
|
||||
if self.strip_empty_ends:
|
||||
stdout = stdout.rstrip("\r\n")
|
||||
stderr = stderr.rstrip("\r\n")
|
||||
|
||||
@ -287,20 +287,20 @@ def are_fileobjs_equal_read_first(
|
||||
|
||||
|
||||
def is_container_file_not_regular_file(container_stat: dict[str, t.Any]) -> bool:
|
||||
return any(
|
||||
container_stat["mode"] & 1 << bit != 0
|
||||
for bit in (
|
||||
# https://pkg.go.dev/io/fs#FileMode
|
||||
32 - 1, # ModeDir
|
||||
32 - 4, # ModeTemporary
|
||||
32 - 5, # ModeSymlink
|
||||
32 - 6, # ModeDevice
|
||||
32 - 7, # ModeNamedPipe
|
||||
32 - 8, # ModeSocket
|
||||
32 - 11, # ModeCharDevice
|
||||
32 - 13, # ModeIrregular
|
||||
)
|
||||
)
|
||||
for bit in (
|
||||
# https://pkg.go.dev/io/fs#FileMode
|
||||
32 - 1, # ModeDir
|
||||
32 - 4, # ModeTemporary
|
||||
32 - 5, # ModeSymlink
|
||||
32 - 6, # ModeDevice
|
||||
32 - 7, # ModeNamedPipe
|
||||
32 - 8, # ModeSocket
|
||||
32 - 11, # ModeCharDevice
|
||||
32 - 13, # ModeIrregular
|
||||
):
|
||||
if container_stat["mode"] & (1 << bit) != 0:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def get_container_file_mode(container_stat: dict[str, t.Any]) -> int:
|
||||
@ -420,7 +420,7 @@ def retrieve_diff(
|
||||
|
||||
|
||||
def is_binary(content: bytes) -> bool:
|
||||
if b"\x00" in content: # noqa: SIM103
|
||||
if b"\x00" in content:
|
||||
return True
|
||||
# TODO: better detection
|
||||
# (ansible-core also just checks for 0x00, and even just sticks to the first 8k, so this is not too bad...)
|
||||
@ -695,10 +695,11 @@ def is_file_idempotent(
|
||||
mf = tar.extractfile(member)
|
||||
if mf is None:
|
||||
raise AssertionError("Member should be present for regular file")
|
||||
with mf as tar_f, open(managed_path, "rb") as local_f:
|
||||
is_equal = are_fileobjs_equal_with_diff_of_first(
|
||||
tar_f, local_f, member.size, diff, max_file_size_for_diff, in_path
|
||||
)
|
||||
with mf as tar_f:
|
||||
with open(managed_path, "rb") as local_f:
|
||||
is_equal = are_fileobjs_equal_with_diff_of_first(
|
||||
tar_f, local_f, member.size, diff, max_file_size_for_diff, in_path
|
||||
)
|
||||
return container_path, mode, is_equal
|
||||
|
||||
def process_symlink(in_path: str, member: tarfile.TarInfo) -> tuple[str, int, bool]:
|
||||
|
||||
@ -221,17 +221,16 @@ def main() -> None:
|
||||
stdin: str | None = client.module.params["stdin"]
|
||||
strip_empty_ends: bool = client.module.params["strip_empty_ends"]
|
||||
tty: bool = client.module.params["tty"]
|
||||
env: dict[str, t.Any] | None = client.module.params["env"]
|
||||
env: dict[str, t.Any] = client.module.params["env"]
|
||||
|
||||
if env is not None:
|
||||
for name, value in env.items():
|
||||
for name, value in list(env.items()):
|
||||
if not isinstance(value, str):
|
||||
client.module.fail_json(
|
||||
msg="Non-string value found for env option. Ambiguous env options must be "
|
||||
"wrapped in quotes to avoid them being interpreted when directly specified "
|
||||
"in YAML, or explicitly converted to strings when the option is templated. "
|
||||
f"Key: {name}"
|
||||
f"wrapped in quotes to avoid them being interpreted. Key: {name}"
|
||||
)
|
||||
env[name] = to_text(value, errors="surrogate_or_strict")
|
||||
|
||||
if command is not None:
|
||||
argv = shlex.split(command)
|
||||
|
||||
@ -21,8 +21,6 @@ description:
|
||||
notes:
|
||||
- Building images is done using Docker daemon's API. It is not possible to use BuildKit / buildx this way. Use M(community.docker.docker_image_build)
|
||||
to build images with BuildKit.
|
||||
- Exporting images is generally not idempotent. It depends on whether the image ID equals the IDs found in the generated tarball's C(manifest.json).
|
||||
This was the case with the default storage backend up to Docker 28, but seems to have changed in Docker 29.
|
||||
extends_documentation_fragment:
|
||||
- community.docker._docker.api_documentation
|
||||
- community.docker._attributes
|
||||
@ -805,7 +803,7 @@ class ImageManager(DockerBaseClass):
|
||||
if line.get("errorDetail"):
|
||||
raise RuntimeError(line["errorDetail"]["message"])
|
||||
status = line.get("status")
|
||||
if status in ("Pushing", "Pushed"):
|
||||
if status == "Pushing":
|
||||
changed = True
|
||||
self.results["changed"] = changed
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
@ -904,13 +902,11 @@ class ImageManager(DockerBaseClass):
|
||||
buildargs[key] = to_text(value)
|
||||
|
||||
container_limits = self.container_limits or {}
|
||||
for key in container_limits:
|
||||
for key in container_limits.keys():
|
||||
if key not in CONTAINER_LIMITS_KEYS:
|
||||
raise DockerException(f"Invalid container_limits key {key}")
|
||||
|
||||
dockerfile: tuple[str, str | None] | tuple[None, None] | str | None = (
|
||||
self.dockerfile
|
||||
)
|
||||
dockerfile = self.dockerfile
|
||||
if self.build_path.startswith(
|
||||
("http://", "https://", "git://", "github.com/", "git@")
|
||||
):
|
||||
@ -928,8 +924,7 @@ class ImageManager(DockerBaseClass):
|
||||
[line.strip() for line in f.read().splitlines()],
|
||||
)
|
||||
)
|
||||
dockerfile_data = process_dockerfile(self.dockerfile, self.build_path)
|
||||
dockerfile = dockerfile_data
|
||||
dockerfile_data = process_dockerfile(dockerfile, self.build_path)
|
||||
context = tar(
|
||||
self.build_path, exclude=exclude, dockerfile=dockerfile_data, gzip=False
|
||||
)
|
||||
@ -1212,13 +1207,13 @@ def main() -> None:
|
||||
if not is_valid_tag(client.module.params["tag"], allow_empty=True):
|
||||
client.fail(f'"{client.module.params["tag"]}" is not a valid docker tag!')
|
||||
|
||||
if client.module.params["source"] == "build" and (
|
||||
not client.module.params["build"]
|
||||
or not client.module.params["build"].get("path")
|
||||
):
|
||||
client.fail(
|
||||
'If "source" is set to "build", the "build.path" option must be specified.'
|
||||
)
|
||||
if client.module.params["source"] == "build":
|
||||
if not client.module.params["build"] or not client.module.params["build"].get(
|
||||
"path"
|
||||
):
|
||||
client.fail(
|
||||
'If "source" is set to "build", the "build.path" option must be specified.'
|
||||
)
|
||||
|
||||
try:
|
||||
results = {"changed": False, "actions": [], "image": {}}
|
||||
|
||||
@ -368,20 +368,16 @@ class ImageBuilder(DockerBaseClass):
|
||||
|
||||
if self.secrets:
|
||||
for secret in self.secrets:
|
||||
if secret["type"] in ("env", "value") and LooseVersion(
|
||||
buildx_version
|
||||
) < LooseVersion("0.6.0"):
|
||||
self.fail(
|
||||
f"The Docker buildx plugin has version {buildx_version}, but 0.6.0 is needed for secrets of type=env and type=value"
|
||||
)
|
||||
if (
|
||||
self.outputs
|
||||
and len(self.outputs) > 1
|
||||
and LooseVersion(buildx_version) < LooseVersion("0.13.0")
|
||||
):
|
||||
self.fail(
|
||||
f"The Docker buildx plugin has version {buildx_version}, but 0.13.0 is needed to specify more than one output"
|
||||
)
|
||||
if secret["type"] in ("env", "value"):
|
||||
if LooseVersion(buildx_version) < LooseVersion("0.6.0"):
|
||||
self.fail(
|
||||
f"The Docker buildx plugin has version {buildx_version}, but 0.6.0 is needed for secrets of type=env and type=value"
|
||||
)
|
||||
if self.outputs and len(self.outputs) > 1:
|
||||
if LooseVersion(buildx_version) < LooseVersion("0.13.0"):
|
||||
self.fail(
|
||||
f"The Docker buildx plugin has version {buildx_version}, but 0.13.0 is needed to specify more than one output"
|
||||
)
|
||||
|
||||
self.path = parameters["path"]
|
||||
if not os.path.isdir(self.path):
|
||||
@ -534,8 +530,9 @@ class ImageBuilder(DockerBaseClass):
|
||||
"image": image or {},
|
||||
}
|
||||
|
||||
if image and self.rebuild == "never":
|
||||
return results
|
||||
if image:
|
||||
if self.rebuild == "never":
|
||||
return results
|
||||
|
||||
results["changed"] = True
|
||||
if not self.check_mode:
|
||||
|
||||
@ -28,13 +28,7 @@ attributes:
|
||||
diff_mode:
|
||||
support: none
|
||||
idempotent:
|
||||
support: partial
|
||||
details:
|
||||
- Whether the module is idempotent depends on the storage API used for images,
|
||||
which determines how the image ID is computed. The idempotency check needs
|
||||
that the image ID equals the ID stored in archive's C(manifest.json).
|
||||
This seemed to have worked fine with the default storage backend up to Docker 28,
|
||||
but seems to have changed in Docker 29.
|
||||
support: full
|
||||
|
||||
options:
|
||||
names:
|
||||
|
||||
@ -159,7 +159,7 @@ class ImagePusher(DockerBaseClass):
|
||||
if line.get("errorDetail"):
|
||||
raise RuntimeError(line["errorDetail"]["message"])
|
||||
status = line.get("status")
|
||||
if status in ("Pushing", "Pushed"):
|
||||
if status == "Pushing":
|
||||
results["changed"] = True
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
if "unauthorized" in str(exc):
|
||||
|
||||
@ -219,7 +219,6 @@ class ImageRemover(DockerBaseClass):
|
||||
|
||||
elif is_image_name_id(name):
|
||||
deleted.append(image["Id"])
|
||||
# TODO: the following is no longer correct with Docker 29+...
|
||||
untagged[:] = sorted(
|
||||
(image.get("RepoTags") or []) + (image.get("RepoDigests") or [])
|
||||
)
|
||||
|
||||
@ -299,8 +299,6 @@ from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
DifferenceTracker,
|
||||
DockerBaseClass,
|
||||
clean_dict_booleans_for_docker_api,
|
||||
normalize_ip_address,
|
||||
normalize_ip_network,
|
||||
sanitize_labels,
|
||||
)
|
||||
|
||||
@ -362,7 +360,6 @@ def validate_cidr(cidr: str) -> t.Literal["ipv4", "ipv6"]:
|
||||
:rtype: str
|
||||
:raises ValueError: If ``cidr`` is not a valid CIDR
|
||||
"""
|
||||
# TODO: Use ipaddress for this instead of rolling your own...
|
||||
if CIDR_IPV4.match(cidr):
|
||||
return "ipv4"
|
||||
if CIDR_IPV6.match(cidr):
|
||||
@ -392,19 +389,6 @@ def dicts_are_essentially_equal(a: dict[str, t.Any], b: dict[str, t.Any]) -> boo
|
||||
return True
|
||||
|
||||
|
||||
def normalize_ipam_values(ipam_config: dict[str, t.Any]) -> dict[str, t.Any]:
|
||||
result = {}
|
||||
for key, value in ipam_config.items():
|
||||
if key in ("subnet", "iprange"):
|
||||
value = normalize_ip_network(value)
|
||||
elif key in ("gateway",):
|
||||
value = normalize_ip_address(value)
|
||||
elif key in ("aux_addresses",) and value is not None:
|
||||
value = {k: normalize_ip_address(v) for k, v in value.items()}
|
||||
result[key] = value
|
||||
return result
|
||||
|
||||
|
||||
class DockerNetworkManager:
|
||||
def __init__(self, client: AnsibleDockerClient) -> None:
|
||||
self.client = client
|
||||
@ -494,21 +478,23 @@ class DockerNetworkManager:
|
||||
)
|
||||
else:
|
||||
for key, value in self.parameters.driver_options.items():
|
||||
if key not in net["Options"] or value != net["Options"][key]:
|
||||
if not (key in net["Options"]) or value != net["Options"][key]:
|
||||
differences.add(
|
||||
f"driver_options.{key}",
|
||||
parameter=value,
|
||||
active=net["Options"].get(key),
|
||||
)
|
||||
|
||||
if self.parameters.ipam_driver and (
|
||||
not net.get("IPAM") or net["IPAM"]["Driver"] != self.parameters.ipam_driver
|
||||
):
|
||||
differences.add(
|
||||
"ipam_driver",
|
||||
parameter=self.parameters.ipam_driver,
|
||||
active=net.get("IPAM"),
|
||||
)
|
||||
if self.parameters.ipam_driver:
|
||||
if (
|
||||
not net.get("IPAM")
|
||||
or net["IPAM"]["Driver"] != self.parameters.ipam_driver
|
||||
):
|
||||
differences.add(
|
||||
"ipam_driver",
|
||||
parameter=self.parameters.ipam_driver,
|
||||
active=net.get("IPAM"),
|
||||
)
|
||||
|
||||
if self.parameters.ipam_driver_options is not None:
|
||||
ipam_driver_options = net["IPAM"].get("Options") or {}
|
||||
@ -529,35 +515,24 @@ class DockerNetworkManager:
|
||||
else:
|
||||
# Put network's IPAM config into the same format as module's IPAM config
|
||||
net_ipam_configs = []
|
||||
net_ipam_configs_normalized = []
|
||||
for net_ipam_config in net["IPAM"]["Config"]:
|
||||
config = {}
|
||||
for k, v in net_ipam_config.items():
|
||||
config[normalize_ipam_config_key(k)] = v
|
||||
net_ipam_configs.append(config)
|
||||
net_ipam_configs_normalized.append(normalize_ipam_values(config))
|
||||
# Compare lists of dicts as sets of dicts
|
||||
for idx, ipam_config in enumerate(self.parameters.ipam_config):
|
||||
ipam_config_normalized = normalize_ipam_values(ipam_config)
|
||||
net_config = {}
|
||||
net_config_normalized = {}
|
||||
for net_ipam_config, net_ipam_config_normalized in zip(
|
||||
net_ipam_configs, net_ipam_configs_normalized
|
||||
):
|
||||
if dicts_are_essentially_equal(
|
||||
ipam_config_normalized, net_ipam_config_normalized
|
||||
):
|
||||
for net_ipam_config in net_ipam_configs:
|
||||
if dicts_are_essentially_equal(ipam_config, net_ipam_config):
|
||||
net_config = net_ipam_config
|
||||
net_config_normalized = net_ipam_config_normalized
|
||||
break
|
||||
for key, value in ipam_config.items():
|
||||
if value is None:
|
||||
# due to recursive argument_spec, all keys are always present
|
||||
# (but have default value None if not specified)
|
||||
continue
|
||||
if ipam_config_normalized[key] != net_config_normalized.get(
|
||||
key
|
||||
):
|
||||
if value != net_config.get(key):
|
||||
differences.add(
|
||||
f"ipam_config[{idx}].{key}",
|
||||
parameter=value,
|
||||
@ -622,7 +597,7 @@ class DockerNetworkManager:
|
||||
)
|
||||
else:
|
||||
for key, value in self.parameters.labels.items():
|
||||
if key not in net["Labels"] or value != net["Labels"][key]:
|
||||
if not (key in net["Labels"]) or value != net["Labels"][key]:
|
||||
differences.add(
|
||||
f"labels.{key}",
|
||||
parameter=value,
|
||||
|
||||
@ -216,14 +216,14 @@ class SwarmNodeManager(DockerBaseClass):
|
||||
if self.parameters.role is None:
|
||||
node_spec["Role"] = node_info["Spec"]["Role"]
|
||||
else:
|
||||
if node_info["Spec"]["Role"] != self.parameters.role:
|
||||
if not node_info["Spec"]["Role"] == self.parameters.role:
|
||||
node_spec["Role"] = self.parameters.role
|
||||
changed = True
|
||||
|
||||
if self.parameters.availability is None:
|
||||
node_spec["Availability"] = node_info["Spec"]["Availability"]
|
||||
else:
|
||||
if node_info["Spec"]["Availability"] != self.parameters.availability:
|
||||
if not node_info["Spec"]["Availability"] == self.parameters.availability:
|
||||
node_info["Spec"]["Availability"] = self.parameters.availability
|
||||
changed = True
|
||||
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
#!/usr/bin/python
|
||||
# coding: utf-8
|
||||
#
|
||||
# Copyright (c) 2021 Red Hat | Ansible Sakar Mehra<@sakarmehra100@gmail.com | @sakar97>
|
||||
# Copyright (c) 2019, Vladimir Porshkevich (@porshkevich) <neosonic@mail.ru>
|
||||
@ -280,7 +281,7 @@ class DockerPluginManager:
|
||||
stream=True,
|
||||
)
|
||||
self.client._raise_for_status(response)
|
||||
for dummy in self.client._stream_helper(response, decode=True):
|
||||
for data in self.client._stream_helper(response, decode=True):
|
||||
pass
|
||||
# Inspect and configure plugin
|
||||
self.existing_plugin = self.client.get_json(
|
||||
|
||||
@ -322,7 +322,7 @@ def main() -> None:
|
||||
before_after_differences = json_diff(
|
||||
before_stack_services, after_stack_services
|
||||
)
|
||||
for k in before_after_differences:
|
||||
for k in before_after_differences.keys():
|
||||
if isinstance(before_after_differences[k], dict):
|
||||
before_after_differences[k].pop("UpdatedAt", None)
|
||||
before_after_differences[k].pop("Version", None)
|
||||
|
||||
@ -554,8 +554,9 @@ class SwarmManager(DockerBaseClass):
|
||||
except APIError as exc:
|
||||
self.client.fail(f"Can not create a new Swarm Cluster: {exc}")
|
||||
|
||||
if not self.client.check_if_swarm_manager() and not self.check_mode:
|
||||
self.client.fail("Swarm not created or other error!")
|
||||
if not self.client.check_if_swarm_manager():
|
||||
if not self.check_mode:
|
||||
self.client.fail("Swarm not created or other error!")
|
||||
|
||||
self.created = True
|
||||
self.inspect_swarm()
|
||||
|
||||
@ -914,10 +914,8 @@ def get_docker_environment(
|
||||
for name, value in env.items():
|
||||
if not isinstance(value, str):
|
||||
raise ValueError(
|
||||
"Non-string value found for env option. Ambiguous env options must be "
|
||||
"wrapped in quotes to avoid them being interpreted when directly specified "
|
||||
"in YAML, or explicitly converted to strings when the option is templated. "
|
||||
f"Key: {name}"
|
||||
"Non-string value found for env option. "
|
||||
f"Ambiguous env options must be wrapped in quotes to avoid YAML parsing. Key: {name}"
|
||||
)
|
||||
env_dict[name] = str(value)
|
||||
elif env is not None and isinstance(env, list):
|
||||
@ -2382,13 +2380,13 @@ class DockerServiceManager:
|
||||
ds.container_labels = task_template_data["ContainerSpec"].get("Labels")
|
||||
|
||||
mode = raw_data["Spec"]["Mode"]
|
||||
if "Replicated" in mode:
|
||||
ds.mode = to_text("replicated", encoding="utf-8") # type: ignore
|
||||
if "Replicated" in mode.keys():
|
||||
ds.mode = to_text("replicated", encoding="utf-8")
|
||||
ds.replicas = mode["Replicated"]["Replicas"]
|
||||
elif "Global" in mode:
|
||||
elif "Global" in mode.keys():
|
||||
ds.mode = "global"
|
||||
elif "ReplicatedJob" in mode:
|
||||
ds.mode = to_text("replicated-job", encoding="utf-8") # type: ignore
|
||||
elif "ReplicatedJob" in mode.keys():
|
||||
ds.mode = to_text("replicated-job", encoding="utf-8")
|
||||
ds.replicas = mode["ReplicatedJob"]["TotalCompletions"]
|
||||
else:
|
||||
raise ValueError(f"Unknown service mode: {mode}")
|
||||
@ -2651,9 +2649,10 @@ class DockerServiceManager:
|
||||
|
||||
|
||||
def _detect_publish_mode_usage(client: AnsibleDockerClient) -> bool:
|
||||
return any(
|
||||
publish_def.get("mode") for publish_def in client.module.params["publish"] or []
|
||||
)
|
||||
for publish_def in client.module.params["publish"] or []:
|
||||
if publish_def.get("mode"):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _detect_healthcheck_start_period(client: AnsibleDockerClient) -> bool:
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
#!/usr/bin/python
|
||||
# coding: utf-8
|
||||
#
|
||||
# Copyright 2017 Red Hat | Ansible, Alex Grönholm <alex.gronholm@nextday.fi>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
#!/usr/bin/python
|
||||
# coding: utf-8
|
||||
#
|
||||
# Copyright 2017 Red Hat | Ansible, Alex Grönholm <alex.gronholm@nextday.fi>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
31
ruff.toml
31
ruff.toml
@ -1,31 +0,0 @@
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
# SPDX-FileCopyrightText: 2025 Felix Fontein <felix@fontein.de>
|
||||
|
||||
line-length = 160
|
||||
|
||||
[lint]
|
||||
# https://docs.astral.sh/ruff/rules/
|
||||
|
||||
select = ["A", "B", "E", "F", "FA", "FLY", "UP", "SIM"]
|
||||
ignore = [
|
||||
# Better keep ignored (for now)
|
||||
"F811", # Redefinition of unused `xxx` (happens a lot for fixtures in unit tests)
|
||||
"E402", # Module level import not at top of file
|
||||
"E741", # Ambiguous variable name
|
||||
"UP012", # unnecessary-encode-utf8
|
||||
"UP015", # Unnecessary mode argument
|
||||
"SIM105", # suppressible-exception
|
||||
"SIM108", # if-else-block-instead-of-if-exp
|
||||
# To fix later:
|
||||
"B905", # zip-without-explicit-strict - needs Python 3.10+
|
||||
# To fix:
|
||||
"UP024", # Replace aliased errors with `OSError`
|
||||
]
|
||||
|
||||
# Allow fix for all enabled rules (when `--fix`) is provided.
|
||||
fixable = ["ALL"]
|
||||
unfixable = []
|
||||
|
||||
# Allow unused variables when underscore-prefixed or starting with dummy
|
||||
dummy-variable-rgx = "^(_|dummy).*$"
|
||||
@ -124,16 +124,13 @@
|
||||
# - present_3_check is changed -- whether this is true depends on a combination of Docker CLI and Docker Compose version...
|
||||
# Compose 2.37.3 with Docker 28.2.x results in 'changed', while Compose 2.37.3 with Docker 28.3.0 results in 'not changed'.
|
||||
# It seems that Docker is now clever enough to notice that nothing is rebuilt...
|
||||
# With Docker 29.0.0, the behvaior seems to change again... I'm currently tending to simply ignore this check, for that
|
||||
# reason the next three lines are commented out:
|
||||
# - present_3_check.warnings | default([]) | select('regex', ' please report this at ') | length == 0
|
||||
# - ((present_3 is changed) if docker_compose_version is version('2.31.0', '>=') and docker_compose_version is version('2.32.2', '<') else (present_3 is not changed))
|
||||
# - present_3.warnings | default([]) | select('regex', ' please report this at ') | length == 0
|
||||
# Same as above:
|
||||
- present_3_check.warnings | default([]) | select('regex', ' please report this at ') | length == 0
|
||||
- ((present_3 is changed) if docker_compose_version is version('2.31.0', '>=') and docker_compose_version is version('2.32.2', '<') else (present_3 is not changed))
|
||||
- present_3.warnings | default([]) | select('regex', ' please report this at ') | length == 0
|
||||
# - present_4_check is changed
|
||||
# Same as above...
|
||||
- present_4_check.warnings | default([]) | select('regex', ' please report this at ') | length == 0
|
||||
# Also seems like a hopeless case with Docker 29:
|
||||
# - present_4 is not changed
|
||||
- present_4 is not changed
|
||||
- present_4.warnings | default([]) | select('regex', ' please report this at ') | length == 0
|
||||
|
||||
always:
|
||||
|
||||
@ -81,19 +81,16 @@
|
||||
- ansible.builtin.assert:
|
||||
that:
|
||||
- present_1_check is failed or present_1_check is changed
|
||||
- present_1_check is changed or 'General error:' in present_1_check.msg
|
||||
- present_1_check is changed or present_1_check.msg.startswith('General error:')
|
||||
- present_1_check.warnings | default([]) | select('regex', ' please report this at ') | length == 0
|
||||
- present_1 is failed
|
||||
- >-
|
||||
'General error:' in present_1.msg
|
||||
- present_1.msg.startswith('General error:')
|
||||
- present_1.warnings | default([]) | select('regex', ' please report this at ') | length == 0
|
||||
- present_2_check is failed
|
||||
- present_2_check.msg.startswith('Error when processing ' ~ cname ~ ':') or
|
||||
present_2_check.msg.startswith('Error when processing image ' ~ non_existing_image ~ ':')
|
||||
- present_2_check.msg.startswith('Error when processing ' ~ cname ~ ':')
|
||||
- present_2_check.warnings | default([]) | select('regex', ' please report this at ') | length == 0
|
||||
- present_2 is failed
|
||||
- present_2.msg.startswith('Error when processing ' ~ cname ~ ':') or
|
||||
present_2.msg.startswith('Error when processing image ' ~ non_existing_image ~ ':')
|
||||
- present_2.msg.startswith('Error when processing ' ~ cname ~ ':')
|
||||
- present_2.warnings | default([]) | select('regex', ' please report this at ') | length == 0
|
||||
|
||||
####################################################################
|
||||
|
||||
@ -9,10 +9,12 @@
|
||||
non_existing_image: does-not-exist:latest
|
||||
project_src: "{{ remote_tmp_dir }}/{{ pname }}"
|
||||
test_service_non_existing: |
|
||||
version: '3'
|
||||
services:
|
||||
{{ cname }}:
|
||||
image: {{ non_existing_image }}
|
||||
test_service_simple: |
|
||||
version: '3'
|
||||
services:
|
||||
{{ cname }}:
|
||||
image: {{ docker_test_image_simple_1 }}
|
||||
|
||||
@ -77,8 +77,7 @@
|
||||
- ansible.builtin.assert:
|
||||
that:
|
||||
- result_1.rc == 0
|
||||
# Since Compose 5, unrelated output shows up in stderr...
|
||||
- result_1.stderr == "" or ("Creating" in result_1.stderr and "Created" in result_1.stderr)
|
||||
- result_1.stderr == ""
|
||||
- >-
|
||||
"usr" in result_1.stdout_lines
|
||||
and
|
||||
|
||||
@ -37,10 +37,7 @@
|
||||
register: docker_host_info
|
||||
|
||||
# Run the tests
|
||||
- module_defaults:
|
||||
community.general.docker_container:
|
||||
debug: true
|
||||
block:
|
||||
- block:
|
||||
- ansible.builtin.include_tasks: run-test.yml
|
||||
with_fileglob:
|
||||
- "tests/*.yml"
|
||||
|
||||
@ -128,7 +128,6 @@
|
||||
image: "{{ docker_test_image_digest_base }}@sha256:{{ docker_test_image_digest_v1 }}"
|
||||
name: "{{ cname }}"
|
||||
pull: true
|
||||
debug: true
|
||||
state: present
|
||||
force_kill: true
|
||||
register: digest_3
|
||||
|
||||
@ -3077,14 +3077,10 @@
|
||||
that:
|
||||
- log_options_1 is changed
|
||||
- log_options_2 is not changed
|
||||
- message in (log_options_2.warnings | default([]))
|
||||
- "'Non-string value found for log_options option \\'max-file\\'. The value is automatically converted to \\'5\\'. If this is not correct, or you want to
|
||||
avoid such warnings, please quote the value.' in (log_options_2.warnings | default([]))"
|
||||
- log_options_3 is not changed
|
||||
- log_options_4 is changed
|
||||
vars:
|
||||
message: >-
|
||||
Non-string value found for log_options option 'max-file'. The value is automatically converted to '5'.
|
||||
If this is not correct, or you want to avoid such warnings, please quote the value,
|
||||
or explicitly convert the values to strings when templating them.
|
||||
|
||||
####################################################################
|
||||
## mac_address #####################################################
|
||||
@ -3690,6 +3686,18 @@
|
||||
register: platform_5
|
||||
ignore_errors: true
|
||||
|
||||
- name: platform (idempotency)
|
||||
community.docker.docker_container:
|
||||
image: "{{ docker_test_image_simple_1 }}"
|
||||
name: "{{ cname }}"
|
||||
state: present
|
||||
pull: true
|
||||
platform: 386
|
||||
force_kill: true
|
||||
debug: true
|
||||
register: platform_6
|
||||
ignore_errors: true
|
||||
|
||||
- name: cleanup
|
||||
community.docker.docker_container:
|
||||
name: "{{ cname }}"
|
||||
@ -3704,6 +3712,7 @@
|
||||
- platform_3 is not changed and platform_3 is not failed
|
||||
- platform_4 is not changed and platform_4 is not failed
|
||||
- platform_5 is changed
|
||||
- platform_6 is not changed and platform_6 is not failed
|
||||
when: docker_api_version is version('1.41', '>=')
|
||||
- ansible.builtin.assert:
|
||||
that:
|
||||
|
||||
@ -106,101 +106,6 @@
|
||||
force_kill: true
|
||||
register: published_ports_3
|
||||
|
||||
- name: published_ports -- port range (same range, but listed explicitly)
|
||||
community.docker.docker_container:
|
||||
image: "{{ docker_test_image_alpine }}"
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
exposed_ports:
|
||||
- "9001"
|
||||
- "9010"
|
||||
- "9011"
|
||||
- "9012"
|
||||
- "9013"
|
||||
- "9014"
|
||||
- "9015"
|
||||
- "9016"
|
||||
- "9017"
|
||||
- "9018"
|
||||
- "9019"
|
||||
- "9020"
|
||||
- "9021"
|
||||
- "9022"
|
||||
- "9023"
|
||||
- "9024"
|
||||
- "9025"
|
||||
- "9026"
|
||||
- "9027"
|
||||
- "9028"
|
||||
- "9029"
|
||||
- "9030"
|
||||
- "9031"
|
||||
- "9032"
|
||||
- "9033"
|
||||
- "9034"
|
||||
- "9035"
|
||||
- "9036"
|
||||
- "9037"
|
||||
- "9038"
|
||||
- "9039"
|
||||
- "9040"
|
||||
- "9041"
|
||||
- "9042"
|
||||
- "9043"
|
||||
- "9044"
|
||||
- "9045"
|
||||
- "9046"
|
||||
- "9047"
|
||||
- "9048"
|
||||
- "9049"
|
||||
- "9050"
|
||||
published_ports:
|
||||
- "9001:9001"
|
||||
- "9020:9020"
|
||||
- "9021:9021"
|
||||
- "9022:9022"
|
||||
- "9023:9023"
|
||||
- "9024:9024"
|
||||
- "9025:9025"
|
||||
- "9026:9026"
|
||||
- "9027:9027"
|
||||
- "9028:9028"
|
||||
- "9029:9029"
|
||||
- "9030:9030"
|
||||
- "9031:9031"
|
||||
- "9032:9032"
|
||||
- "9033:9033"
|
||||
- "9034:9034"
|
||||
- "9035:9035"
|
||||
- "9036:9036"
|
||||
- "9037:9037"
|
||||
- "9038:9038"
|
||||
- "9039:9039"
|
||||
- "9040:9040"
|
||||
- "9041:9041"
|
||||
- "9042:9042"
|
||||
- "9043:9043"
|
||||
- "9044:9044"
|
||||
- "9045:9045"
|
||||
- "9046:9046"
|
||||
- "9047:9047"
|
||||
- "9048:9048"
|
||||
- "9049:9049"
|
||||
- "9050:9050"
|
||||
- "9051:9051"
|
||||
- "9052:9052"
|
||||
- "9053:9053"
|
||||
- "9054:9054"
|
||||
- "9055:9055"
|
||||
- "9056:9056"
|
||||
- "9057:9057"
|
||||
- "9058:9058"
|
||||
- "9059:9059"
|
||||
- "9060:9060"
|
||||
force_kill: true
|
||||
register: published_ports_4
|
||||
|
||||
- name: cleanup
|
||||
community.docker.docker_container:
|
||||
name: "{{ cname }}"
|
||||
@ -213,7 +118,6 @@
|
||||
- published_ports_1 is changed
|
||||
- published_ports_2 is not changed
|
||||
- published_ports_3 is changed
|
||||
- published_ports_4 is not changed
|
||||
|
||||
####################################################################
|
||||
## published_ports: one-element container port range ###############
|
||||
@ -277,58 +181,6 @@
|
||||
- published_ports_2 is not changed
|
||||
- published_ports_3 is changed
|
||||
|
||||
####################################################################
|
||||
## published_ports: duplicate ports ################################
|
||||
####################################################################
|
||||
|
||||
- name: published_ports -- duplicate ports
|
||||
community.docker.docker_container:
|
||||
image: "{{ docker_test_image_alpine }}"
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
published_ports:
|
||||
- 8000:80
|
||||
- 10000:80
|
||||
register: published_ports_1
|
||||
|
||||
- name: published_ports -- duplicate ports (idempotency)
|
||||
community.docker.docker_container:
|
||||
image: "{{ docker_test_image_alpine }}"
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
published_ports:
|
||||
- 8000:80
|
||||
- 10000:80
|
||||
force_kill: true
|
||||
register: published_ports_2
|
||||
|
||||
- name: published_ports -- duplicate ports (idempotency w/ protocol)
|
||||
community.docker.docker_container:
|
||||
image: "{{ docker_test_image_alpine }}"
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
name: "{{ cname }}"
|
||||
state: started
|
||||
published_ports:
|
||||
- 8000:80/tcp
|
||||
- 10000:80/tcp
|
||||
force_kill: true
|
||||
register: published_ports_3
|
||||
|
||||
- name: cleanup
|
||||
community.docker.docker_container:
|
||||
name: "{{ cname }}"
|
||||
state: absent
|
||||
force_kill: true
|
||||
diff: false
|
||||
|
||||
- ansible.builtin.assert:
|
||||
that:
|
||||
- published_ports_1 is changed
|
||||
- published_ports_2 is not changed
|
||||
- published_ports_3 is not changed
|
||||
|
||||
####################################################################
|
||||
## published_ports: IPv6 addresses #################################
|
||||
####################################################################
|
||||
|
||||
@ -256,10 +256,6 @@
|
||||
- ansible.builtin.assert:
|
||||
that:
|
||||
- archive_image_2 is not changed
|
||||
when: docker_cli_version is version("29.0.0", "<")
|
||||
# Apparently idempotency no longer works with the default storage backend
|
||||
# in Docker 29.0.0.
|
||||
# https://github.com/ansible-collections/community.docker/pull/1199
|
||||
|
||||
- name: Archive image 3rd time, should overwrite due to different id
|
||||
community.docker.docker_image:
|
||||
|
||||
@ -67,7 +67,3 @@
|
||||
manifests_json: "{{ manifests.results | map(attribute='stdout') | map('from_json') }}"
|
||||
manifest_json_images: "{{ item.2 | map(attribute='Config') | map('regex_replace', '.json$', '') | map('regex_replace', '^blobs/sha256/', '') | sort }}"
|
||||
export_image_ids: "{{ item.1 | map('regex_replace', '^sha256:', '') | unique | sort }}"
|
||||
when: docker_cli_version is version("29.0.0", "<")
|
||||
# Apparently idempotency no longer works with the default storage backend
|
||||
# in Docker 29.0.0.
|
||||
# https://github.com/ansible-collections/community.docker/pull/1199
|
||||
|
||||
@ -73,17 +73,11 @@
|
||||
loop: "{{ all_images }}"
|
||||
when: remove_all_images is failed
|
||||
|
||||
- name: Show all images
|
||||
ansible.builtin.command: docker image ls
|
||||
|
||||
- name: Load all images (IDs)
|
||||
community.docker.docker_image_load:
|
||||
path: "{{ remote_tmp_dir }}/archive-2.tar"
|
||||
register: result
|
||||
|
||||
- name: Show all images
|
||||
ansible.builtin.command: docker image ls
|
||||
|
||||
- name: Print loaded image names
|
||||
ansible.builtin.debug:
|
||||
var: result.image_names
|
||||
@ -116,17 +110,11 @@
|
||||
name: "{{ item }}"
|
||||
loop: "{{ all_images }}"
|
||||
|
||||
- name: Show all images
|
||||
ansible.builtin.command: docker image ls
|
||||
|
||||
- name: Load all images (mixed images and IDs)
|
||||
community.docker.docker_image_load:
|
||||
path: "{{ remote_tmp_dir }}/archive-3.tar"
|
||||
register: result
|
||||
|
||||
- name: Show all images
|
||||
ansible.builtin.command: docker image ls
|
||||
|
||||
- name: Print loading log
|
||||
ansible.builtin.debug:
|
||||
var: result.stdout_lines
|
||||
@ -139,14 +127,10 @@
|
||||
that:
|
||||
- result is changed
|
||||
# For some reason, *sometimes* only the named image is found; in fact, in that case, the log only mentions that image and nothing else
|
||||
# With Docker 29, a third possibility appears: just two entries.
|
||||
- >-
|
||||
result.images | length == 3
|
||||
or ('Loaded image: ' ~ docker_test_image_hello_world) == result.stdout
|
||||
or result.images | length == 2
|
||||
- (result.image_names | sort) in [[image_names[0], image_ids[0], image_ids[1]] | sort, [image_names[0], image_ids[1]] | sort, [image_names[0]]]
|
||||
- result.images | length in [1, 2, 3]
|
||||
- (result.images | map(attribute='Id') | sort) in [[image_ids[0], image_ids[0], image_ids[1]] | sort, [image_ids[0], image_ids[1]] | sort, [image_ids[0]]]
|
||||
- "result.images | length == 3 or ('Loaded image: ' ~ docker_test_image_hello_world) == result.stdout"
|
||||
- (result.image_names | sort) in [[image_names[0], image_ids[0], image_ids[1]] | sort, [image_names[0]]]
|
||||
- result.images | length in [1, 3]
|
||||
- (result.images | map(attribute='Id') | sort) in [[image_ids[0], image_ids[0], image_ids[1]] | sort, [image_ids[0]]]
|
||||
|
||||
# Same image twice
|
||||
|
||||
@ -155,17 +139,11 @@
|
||||
name: "{{ item }}"
|
||||
loop: "{{ all_images }}"
|
||||
|
||||
- name: Show all images
|
||||
ansible.builtin.command: docker image ls
|
||||
|
||||
- name: Load all images (same image twice)
|
||||
community.docker.docker_image_load:
|
||||
path: "{{ remote_tmp_dir }}/archive-4.tar"
|
||||
register: result
|
||||
|
||||
- name: Show all images
|
||||
ansible.builtin.command: docker image ls
|
||||
|
||||
- name: Print loaded image names
|
||||
ansible.builtin.debug:
|
||||
var: result.image_names
|
||||
@ -173,11 +151,10 @@
|
||||
- ansible.builtin.assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.image_names | length in [1, 2]
|
||||
- (result.image_names | sort) in [[image_names[0]], [image_names[0], image_ids[0]] | sort]
|
||||
- result.images | length in [1, 2]
|
||||
- result.image_names | length == 1
|
||||
- result.image_names[0] == image_names[0]
|
||||
- result.images | length == 1
|
||||
- result.images[0].Id == image_ids[0]
|
||||
- result.images[1].Id | default(image_ids[0]) == image_ids[0]
|
||||
|
||||
# Single image by ID
|
||||
|
||||
@ -186,17 +163,11 @@
|
||||
name: "{{ item }}"
|
||||
loop: "{{ all_images }}"
|
||||
|
||||
- name: Show all images
|
||||
ansible.builtin.command: docker image ls
|
||||
|
||||
- name: Load all images (single image by ID)
|
||||
community.docker.docker_image_load:
|
||||
path: "{{ remote_tmp_dir }}/archive-5.tar"
|
||||
register: result
|
||||
|
||||
- name: Show all images
|
||||
ansible.builtin.command: docker image ls
|
||||
|
||||
- name: Print loaded image names
|
||||
ansible.builtin.debug:
|
||||
var: result.image_names
|
||||
@ -226,17 +197,11 @@
|
||||
name: "{{ item }}"
|
||||
loop: "{{ all_images }}"
|
||||
|
||||
- name: Show all images
|
||||
ansible.builtin.command: docker image ls
|
||||
|
||||
- name: Load all images (names)
|
||||
community.docker.docker_image_load:
|
||||
path: "{{ remote_tmp_dir }}/archive-1.tar"
|
||||
register: result
|
||||
|
||||
- name: Show all images
|
||||
ansible.builtin.command: docker image ls
|
||||
|
||||
- name: Print loaded image names
|
||||
ansible.builtin.debug:
|
||||
var: result.image_names
|
||||
|
||||
@ -142,8 +142,6 @@
|
||||
- present_3_check.actions[0] == ('Pulled image ' ~ image_name)
|
||||
- present_3_check.diff.before.id == present_1.diff.after.id
|
||||
- present_3_check.diff.after.id == 'unknown'
|
||||
- ansible.builtin.assert:
|
||||
that:
|
||||
- present_3 is changed
|
||||
- present_3.actions | length == 1
|
||||
- present_3.actions[0] == ('Pulled image ' ~ image_name)
|
||||
@ -168,11 +166,6 @@
|
||||
- present_5.actions[0] == ('Pulled image ' ~ image_name)
|
||||
- present_5.diff.before.id == present_3.diff.after.id
|
||||
- present_5.diff.after.id == present_1.diff.after.id
|
||||
when: docker_cli_version is version("29.0.0", "<")
|
||||
# From Docker 29 on, Docker won't pull images for other architectures
|
||||
# if there are better matching ones. The above tests assume it will
|
||||
# just do what it is told, and thus fail from 29.0.0 on.
|
||||
# https://github.com/ansible-collections/community.docker/pull/1199
|
||||
|
||||
always:
|
||||
- name: cleanup
|
||||
|
||||
@ -7,9 +7,11 @@
|
||||
block:
|
||||
- name: Make sure images are not there
|
||||
community.docker.docker_image_remove:
|
||||
name: "sha256:{{ item }}"
|
||||
name: "{{ item }}"
|
||||
force: true
|
||||
loop: "{{ docker_test_image_digest_v1_image_ids + docker_test_image_digest_v2_image_ids }}"
|
||||
loop:
|
||||
- "sha256:{{ docker_test_image_digest_v1_image_id }}"
|
||||
- "sha256:{{ docker_test_image_digest_v2_image_id }}"
|
||||
|
||||
- name: Pull image 1
|
||||
community.docker.docker_image_pull:
|
||||
@ -80,6 +82,8 @@
|
||||
always:
|
||||
- name: cleanup
|
||||
community.docker.docker_image_remove:
|
||||
name: "sha256:{{ item }}"
|
||||
name: "{{ item }}"
|
||||
force: true
|
||||
loop: "{{ docker_test_image_digest_v1_image_ids + docker_test_image_digest_v2_image_ids }}"
|
||||
loop:
|
||||
- "sha256:{{ docker_test_image_digest_v1_image_id }}"
|
||||
- "sha256:{{ docker_test_image_digest_v2_image_id }}"
|
||||
|
||||
@ -18,7 +18,7 @@
|
||||
|
||||
- name: Push image ID (must fail)
|
||||
community.docker.docker_image_push:
|
||||
name: "sha256:{{ docker_test_image_digest_v1_image_ids[0] }}"
|
||||
name: "sha256:{{ docker_test_image_digest_v1_image_id }}"
|
||||
register: fail_2
|
||||
ignore_errors: true
|
||||
|
||||
|
||||
@ -80,6 +80,4 @@
|
||||
that:
|
||||
- push_4 is failed
|
||||
- >-
|
||||
push_4.msg.startswith('Error pushing image ' ~ image_name_base2 ~ ':' ~ image_tag ~ ': ')
|
||||
- >-
|
||||
push_4.msg.endswith(': no basic auth credentials')
|
||||
push_4.msg == ('Error pushing image ' ~ image_name_base2 ~ ':' ~ image_tag ~ ': no basic auth credentials')
|
||||
|
||||
@ -8,16 +8,15 @@
|
||||
# and should not be used as examples of how to write Ansible roles #
|
||||
####################################################################
|
||||
|
||||
- vars:
|
||||
image: "{{ docker_test_image_hello_world }}"
|
||||
image_ids: "{{ docker_test_image_hello_world_image_ids }}"
|
||||
block:
|
||||
- block:
|
||||
- name: Pick image prefix
|
||||
ansible.builtin.set_fact:
|
||||
iname_prefix: "{{ 'ansible-docker-test-%0x' % ((2**32) | random) }}"
|
||||
|
||||
- name: Define image names
|
||||
ansible.builtin.set_fact:
|
||||
image: "{{ docker_test_image_hello_world }}"
|
||||
image_id: "{{ docker_test_image_hello_world_image_id }}"
|
||||
image_names:
|
||||
- "{{ iname_prefix }}-tagged-1:latest"
|
||||
- "{{ iname_prefix }}-tagged-1:foo"
|
||||
@ -25,9 +24,8 @@
|
||||
|
||||
- name: Remove image complete
|
||||
community.docker.docker_image_remove:
|
||||
name: "{{ item }}"
|
||||
name: "{{ image_id }}"
|
||||
force: true
|
||||
loop: "{{ image_ids }}"
|
||||
|
||||
- name: Remove tagged images
|
||||
community.docker.docker_image_remove:
|
||||
@ -104,11 +102,10 @@
|
||||
- remove_2 is changed
|
||||
- remove_2.diff.before.id == pulled_image.image.Id
|
||||
- remove_2.diff.before.tags | length == 4
|
||||
# With Docker 29, there are now two digests in before and after:
|
||||
- remove_2.diff.before.digests | length in [1, 2]
|
||||
- remove_2.diff.before.digests | length == 1
|
||||
- remove_2.diff.after.id == pulled_image.image.Id
|
||||
- remove_2.diff.after.tags | length == 3
|
||||
- remove_2.diff.after.digests | length in [1, 2]
|
||||
- remove_2.diff.after.digests | length == 1
|
||||
- remove_2.deleted | length == 0
|
||||
- remove_2.untagged | length == 1
|
||||
- remove_2.untagged[0] == (iname_prefix ~ '-tagged-1:latest')
|
||||
@ -177,11 +174,10 @@
|
||||
- remove_4 is changed
|
||||
- remove_4.diff.before.id == pulled_image.image.Id
|
||||
- remove_4.diff.before.tags | length == 3
|
||||
# With Docker 29, there are now two digests in before and after:
|
||||
- remove_4.diff.before.digests | length in [1, 2]
|
||||
- remove_4.diff.before.digests | length == 1
|
||||
- remove_4.diff.after.id == pulled_image.image.Id
|
||||
- remove_4.diff.after.tags | length == 2
|
||||
- remove_4.diff.after.digests | length in [1, 2]
|
||||
- remove_4.diff.after.digests | length == 1
|
||||
- remove_4.deleted | length == 0
|
||||
- remove_4.untagged | length == 1
|
||||
- remove_4.untagged[0] == (iname_prefix ~ '-tagged-1:foo')
|
||||
@ -249,22 +245,16 @@
|
||||
- remove_6 is changed
|
||||
- remove_6.diff.before.id == pulled_image.image.Id
|
||||
- remove_6.diff.before.tags | length == 2
|
||||
# With Docker 29, there are now two digests in before and after:
|
||||
- remove_6.diff.before.digests | length in [1, 2]
|
||||
- remove_6.diff.before.digests | length == 1
|
||||
- remove_6.diff.after.exists is false
|
||||
- remove_6.deleted | length >= 1
|
||||
- remove_6.deleted | length > 1
|
||||
- pulled_image.image.Id in remove_6.deleted
|
||||
- remove_6.untagged | length in [2, 3]
|
||||
- remove_6.untagged | length == 3
|
||||
- (iname_prefix ~ '-tagged-1:bar') in remove_6.untagged
|
||||
- image in remove_6.untagged
|
||||
- remove_6_check.deleted | length == 1
|
||||
- remove_6_check.deleted[0] == pulled_image.image.Id
|
||||
# The following is only true for Docker < 29...
|
||||
# We use the CLI version as a proxy...
|
||||
- >-
|
||||
remove_6_check.untagged == remove_6.untagged
|
||||
or
|
||||
docker_cli_version is version("29.0.0", ">=")
|
||||
- remove_6_check.untagged == remove_6.untagged
|
||||
- info_5.images | length == 0
|
||||
|
||||
- name: Remove image ID (force, idempotent, check mode)
|
||||
|
||||
@ -133,24 +133,8 @@
|
||||
|
||||
- name: Get proxied daemon URLs
|
||||
ansible.builtin.set_fact:
|
||||
# Since Docker 29, nginx_container.container.NetworkSettings.IPAddress no longer exists.
|
||||
# Use the bridge network's IP address instead...
|
||||
docker_daemon_frontend_https: >-
|
||||
https://{{
|
||||
nginx_container.container.NetworkSettings.Networks[current_container_network_ip].IPAddress
|
||||
if current_container_network_ip else (
|
||||
nginx_container.container.NetworkSettings.IPAddress
|
||||
| default(nginx_container.container.NetworkSettings.Networks['bridge'].IPAddress)
|
||||
)
|
||||
}}:5000
|
||||
docker_daemon_frontend_http: >-
|
||||
http://{{
|
||||
nginx_container.container.NetworkSettings.Networks[current_container_network_ip].IPAddress
|
||||
if current_container_network_ip else (
|
||||
nginx_container.container.NetworkSettings.IPAddress
|
||||
| default(nginx_container.container.NetworkSettings.Networks['bridge'].IPAddress)
|
||||
)
|
||||
}}:6000
|
||||
docker_daemon_frontend_https: "https://{{ nginx_container.container.NetworkSettings.Networks[current_container_network_ip].IPAddress if current_container_network_ip else nginx_container.container.NetworkSettings.IPAddress }}:5000"
|
||||
docker_daemon_frontend_http: "http://{{ nginx_container.container.NetworkSettings.Networks[current_container_network_ip].IPAddress if current_container_network_ip else nginx_container.container.NetworkSettings.IPAddress }}:6000"
|
||||
|
||||
- name: Wait for registry frontend
|
||||
ansible.builtin.uri:
|
||||
|
||||
@ -4,18 +4,12 @@
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
docker_test_image_digest_v1: e004c2cc521c95383aebb1fb5893719aa7a8eae2e7a71f316a4410784edb00a9
|
||||
docker_test_image_digest_v1_image_ids:
|
||||
- 758ec7f3a1ee85f8f08399b55641bfb13e8c1109287ddc5e22b68c3d653152ee # Docker 28 and before
|
||||
- e004c2cc521c95383aebb1fb5893719aa7a8eae2e7a71f316a4410784edb00a9 # Docker 29
|
||||
docker_test_image_digest_v1_image_id: 758ec7f3a1ee85f8f08399b55641bfb13e8c1109287ddc5e22b68c3d653152ee
|
||||
docker_test_image_digest_v2: ee44b399df993016003bf5466bd3eeb221305e9d0fa831606bc7902d149c775b
|
||||
docker_test_image_digest_v2_image_ids:
|
||||
- dc3bacd8b5ea796cea5d6070c8f145df9076f26a6bc1c8981fd5b176d37de843 # Docker 28 and before
|
||||
- ee44b399df993016003bf5466bd3eeb221305e9d0fa831606bc7902d149c775b # Docker 29
|
||||
docker_test_image_digest_v2_image_id: dc3bacd8b5ea796cea5d6070c8f145df9076f26a6bc1c8981fd5b176d37de843
|
||||
docker_test_image_digest_base: quay.io/ansible/docker-test-containers
|
||||
docker_test_image_hello_world: quay.io/ansible/docker-test-containers:hello-world
|
||||
docker_test_image_hello_world_image_ids:
|
||||
- sha256:bf756fb1ae65adf866bd8c456593cd24beb6a0a061dedf42b26a993176745f6b # Docker 28 and before
|
||||
- sha256:90659bf80b44ce6be8234e6ff90a1ac34acbeb826903b02cfa0da11c82cbc042 # Docker 29
|
||||
docker_test_image_hello_world_image_id: sha256:bf756fb1ae65adf866bd8c456593cd24beb6a0a061dedf42b26a993176745f6b
|
||||
docker_test_image_hello_world_base: quay.io/ansible/docker-test-containers
|
||||
docker_test_image_busybox: quay.io/ansible/docker-test-containers:busybox
|
||||
docker_test_image_alpine: quay.io/ansible/docker-test-containers:alpine3.8
|
||||
|
||||
@ -102,17 +102,7 @@
|
||||
# This host/port combination cannot be used if the tests are running inside a docker container.
|
||||
docker_registry_frontend_address: localhost:{{ nginx_container.container.NetworkSettings.Ports['5000/tcp'].0.HostPort }}
|
||||
# The following host/port combination can be used from inside the docker container.
|
||||
docker_registry_frontend_address_internal: >-
|
||||
{{
|
||||
nginx_container.container.NetworkSettings.Networks[current_container_network_ip].IPAddress
|
||||
if current_container_network_ip else
|
||||
(
|
||||
nginx_container.container.NetworkSettings.IPAddress
|
||||
| default(nginx_container.container.NetworkSettings.Networks['bridge'].IPAddress)
|
||||
)
|
||||
}}:5000
|
||||
# Since Docker 29, nginx_container.container.NetworkSettings.IPAddress no longer exists.
|
||||
# Use the bridge network's IP address instead...
|
||||
docker_registry_frontend_address_internal: "{{ nginx_container.container.NetworkSettings.Networks[current_container_network_ip].IPAddress if current_container_network_ip else nginx_container.container.NetworkSettings.IPAddress }}:5000"
|
||||
|
||||
- name: Wait for registry frontend
|
||||
ansible.builtin.uri:
|
||||
|
||||
@ -27,7 +27,7 @@
|
||||
- name: Install cryptography (Darwin, and potentially upgrade for other OSes)
|
||||
become: true
|
||||
ansible.builtin.pip:
|
||||
name: cryptography>=3.3.0
|
||||
name: cryptography>=1.3.0
|
||||
extra_args: "-c {{ remote_constraints }}"
|
||||
|
||||
- name: Register cryptography version
|
||||
|
||||
@ -226,7 +226,7 @@ class DockerApiTest(BaseAPIClientTest):
|
||||
def test_retrieve_server_version(self) -> None:
|
||||
client = APIClient(version="auto")
|
||||
assert isinstance(client._version, str)
|
||||
assert client._version != "auto"
|
||||
assert not (client._version == "auto")
|
||||
client.close()
|
||||
|
||||
def test_auto_retrieve_server_version(self) -> None:
|
||||
@ -323,8 +323,8 @@ class DockerApiTest(BaseAPIClientTest):
|
||||
|
||||
# mock a stream interface
|
||||
raw_resp = urllib3.HTTPResponse(body=body)
|
||||
raw_resp._fp.chunked = True
|
||||
raw_resp._fp.chunk_left = len(body.getvalue()) - 1
|
||||
setattr(raw_resp._fp, "chunked", True)
|
||||
setattr(raw_resp._fp, "chunk_left", len(body.getvalue()) - 1)
|
||||
|
||||
# pass `decode=False` to the helper
|
||||
raw_resp._fp.seek(0)
|
||||
@ -339,7 +339,7 @@ class DockerApiTest(BaseAPIClientTest):
|
||||
assert result == content
|
||||
|
||||
# non-chunked response, pass `decode=False` to the helper
|
||||
raw_resp._fp.chunked = False
|
||||
setattr(raw_resp._fp, "chunked", False)
|
||||
raw_resp._fp.seek(0)
|
||||
resp = create_response(status_code=status_code, content=content, raw=raw_resp)
|
||||
result = next(self.client._stream_helper(resp))
|
||||
@ -503,7 +503,7 @@ class TCPSocketStreamTest(unittest.TestCase):
|
||||
cls.thread.join()
|
||||
|
||||
@classmethod
|
||||
def get_handler_class(cls) -> type[BaseHTTPRequestHandler]:
|
||||
def get_handler_class(cls) -> t.Type[BaseHTTPRequestHandler]:
|
||||
stdout_data = cls.stdout_data
|
||||
stderr_data = cls.stderr_data
|
||||
|
||||
|
||||
@ -581,7 +581,7 @@ fake_responses: dict[str | tuple[str, str], Callable] = {
|
||||
f"{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/restart": post_fake_restart_container,
|
||||
f"{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b": delete_fake_remove_container,
|
||||
# TODO: the following is a duplicate of the import endpoint further above!
|
||||
f"{prefix}/{CURRENT_VERSION}/images/create": post_fake_image_create, # noqa: F601
|
||||
f"{prefix}/{CURRENT_VERSION}/images/create": post_fake_image_create,
|
||||
f"{prefix}/{CURRENT_VERSION}/images/e9aa60c60128": delete_fake_remove_image,
|
||||
f"{prefix}/{CURRENT_VERSION}/images/e9aa60c60128/get": get_fake_get_image,
|
||||
f"{prefix}/{CURRENT_VERSION}/images/load": post_fake_load_image,
|
||||
|
||||
@ -256,7 +256,7 @@ class ResolveAuthTest(unittest.TestCase):
|
||||
m.return_value = None
|
||||
ac = auth.resolve_authconfig(auth_config, None)
|
||||
assert ac is not None
|
||||
assert ac["username"] == "indexuser"
|
||||
assert "indexuser" == ac["username"]
|
||||
|
||||
|
||||
class LoadConfigTest(unittest.TestCase):
|
||||
|
||||
@ -421,18 +421,18 @@ class TarTest(unittest.TestCase):
|
||||
base = make_tree(dirs, files)
|
||||
self.addCleanup(shutil.rmtree, base)
|
||||
|
||||
with tar(base, exclude=exclude) as archive, tarfile.open(
|
||||
fileobj=archive
|
||||
) as tar_data:
|
||||
assert sorted(tar_data.getnames()) == sorted(expected_names)
|
||||
with tar(base, exclude=exclude) as archive:
|
||||
with tarfile.open(fileobj=archive) as tar_data:
|
||||
assert sorted(tar_data.getnames()) == sorted(expected_names)
|
||||
|
||||
def test_tar_with_empty_directory(self) -> None:
|
||||
base = tempfile.mkdtemp()
|
||||
self.addCleanup(shutil.rmtree, base)
|
||||
for d in ["foo", "bar"]:
|
||||
os.makedirs(os.path.join(base, d))
|
||||
with tar(base) as archive, tarfile.open(fileobj=archive) as tar_data:
|
||||
assert sorted(tar_data.getnames()) == ["bar", "foo"]
|
||||
with tar(base) as archive:
|
||||
with tarfile.open(fileobj=archive) as tar_data:
|
||||
assert sorted(tar_data.getnames()) == ["bar", "foo"]
|
||||
|
||||
@pytest.mark.skipif(
|
||||
IS_WINDOWS_PLATFORM or os.geteuid() == 0,
|
||||
@ -458,8 +458,9 @@ class TarTest(unittest.TestCase):
|
||||
f.write("content")
|
||||
os.makedirs(os.path.join(base, "bar"))
|
||||
os.symlink("../foo", os.path.join(base, "bar/foo"))
|
||||
with tar(base) as archive, tarfile.open(fileobj=archive) as tar_data:
|
||||
assert sorted(tar_data.getnames()) == ["bar", "bar/foo", "foo"]
|
||||
with tar(base) as archive:
|
||||
with tarfile.open(fileobj=archive) as tar_data:
|
||||
assert sorted(tar_data.getnames()) == ["bar", "bar/foo", "foo"]
|
||||
|
||||
@pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason="No symlinks on Windows")
|
||||
def test_tar_with_directory_symlinks(self) -> None:
|
||||
@ -468,8 +469,9 @@ class TarTest(unittest.TestCase):
|
||||
for d in ["foo", "bar"]:
|
||||
os.makedirs(os.path.join(base, d))
|
||||
os.symlink("../foo", os.path.join(base, "bar/foo"))
|
||||
with tar(base) as archive, tarfile.open(fileobj=archive) as tar_data:
|
||||
assert sorted(tar_data.getnames()) == ["bar", "bar/foo", "foo"]
|
||||
with tar(base) as archive:
|
||||
with tarfile.open(fileobj=archive) as tar_data:
|
||||
assert sorted(tar_data.getnames()) == ["bar", "bar/foo", "foo"]
|
||||
|
||||
@pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason="No symlinks on Windows")
|
||||
def test_tar_with_broken_symlinks(self) -> None:
|
||||
@ -479,8 +481,9 @@ class TarTest(unittest.TestCase):
|
||||
os.makedirs(os.path.join(base, d))
|
||||
|
||||
os.symlink("../baz", os.path.join(base, "bar/foo"))
|
||||
with tar(base) as archive, tarfile.open(fileobj=archive) as tar_data:
|
||||
assert sorted(tar_data.getnames()) == ["bar", "bar/foo", "foo"]
|
||||
with tar(base) as archive:
|
||||
with tarfile.open(fileobj=archive) as tar_data:
|
||||
assert sorted(tar_data.getnames()) == ["bar", "bar/foo", "foo"]
|
||||
|
||||
@pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason="No UNIX sockets on Win32")
|
||||
def test_tar_socket_file(self) -> None:
|
||||
@ -491,8 +494,9 @@ class TarTest(unittest.TestCase):
|
||||
sock = socket.socket(socket.AF_UNIX)
|
||||
self.addCleanup(sock.close)
|
||||
sock.bind(os.path.join(base, "test.sock"))
|
||||
with tar(base) as archive, tarfile.open(fileobj=archive) as tar_data:
|
||||
assert sorted(tar_data.getnames()) == ["bar", "foo"]
|
||||
with tar(base) as archive:
|
||||
with tarfile.open(fileobj=archive) as tar_data:
|
||||
assert sorted(tar_data.getnames()) == ["bar", "foo"]
|
||||
|
||||
def tar_test_negative_mtime_bug(self) -> None:
|
||||
base = tempfile.mkdtemp()
|
||||
@ -501,9 +505,10 @@ class TarTest(unittest.TestCase):
|
||||
with open(filename, "wt", encoding="utf-8") as f:
|
||||
f.write("Invisible Full Moon")
|
||||
os.utime(filename, (12345, -3600.0))
|
||||
with tar(base) as archive, tarfile.open(fileobj=archive) as tar_data:
|
||||
assert tar_data.getnames() == ["th.txt"]
|
||||
assert tar_data.getmember("th.txt").mtime == -3600
|
||||
with tar(base) as archive:
|
||||
with tarfile.open(fileobj=archive) as tar_data:
|
||||
assert tar_data.getnames() == ["th.txt"]
|
||||
assert tar_data.getmember("th.txt").mtime == -3600
|
||||
|
||||
@pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason="No symlinks on Windows")
|
||||
def test_tar_directory_link(self) -> None:
|
||||
|
||||
@ -58,12 +58,7 @@ class KwargsFromEnvTest(unittest.TestCase):
|
||||
self.os_environ = os.environ.copy()
|
||||
|
||||
def tearDown(self) -> None:
|
||||
for k, v in self.os_environ.items():
|
||||
if os.environ.get(k) != v:
|
||||
os.environ[k] = v
|
||||
for k in os.environ:
|
||||
if k not in self.os_environ:
|
||||
os.environ.pop(k)
|
||||
os.environ = self.os_environ # type: ignore
|
||||
|
||||
def test_kwargs_from_env_empty(self) -> None:
|
||||
os.environ.update(DOCKER_HOST="", DOCKER_CERT_PATH="")
|
||||
@ -80,7 +75,7 @@ class KwargsFromEnvTest(unittest.TestCase):
|
||||
DOCKER_TLS_VERIFY="1",
|
||||
)
|
||||
kwargs = kwargs_from_env(assert_hostname=False)
|
||||
assert kwargs["base_url"] == "tcp://192.168.59.103:2376"
|
||||
assert "tcp://192.168.59.103:2376" == kwargs["base_url"]
|
||||
assert "ca.pem" in kwargs["tls"].ca_cert
|
||||
assert "cert.pem" in kwargs["tls"].cert[0]
|
||||
assert "key.pem" in kwargs["tls"].cert[1]
|
||||
@ -104,7 +99,7 @@ class KwargsFromEnvTest(unittest.TestCase):
|
||||
DOCKER_TLS_VERIFY="",
|
||||
)
|
||||
kwargs = kwargs_from_env(assert_hostname=True)
|
||||
assert kwargs["base_url"] == "tcp://192.168.59.103:2376"
|
||||
assert "tcp://192.168.59.103:2376" == kwargs["base_url"]
|
||||
assert "ca.pem" in kwargs["tls"].ca_cert
|
||||
assert "cert.pem" in kwargs["tls"].cert[0]
|
||||
assert "key.pem" in kwargs["tls"].cert[1]
|
||||
@ -130,7 +125,7 @@ class KwargsFromEnvTest(unittest.TestCase):
|
||||
)
|
||||
os.environ.pop("DOCKER_CERT_PATH", None)
|
||||
kwargs = kwargs_from_env(assert_hostname=True)
|
||||
assert kwargs["base_url"] == "tcp://192.168.59.103:2376"
|
||||
assert "tcp://192.168.59.103:2376" == kwargs["base_url"]
|
||||
|
||||
def test_kwargs_from_env_no_cert_path(self) -> None:
|
||||
try:
|
||||
@ -162,7 +157,7 @@ class KwargsFromEnvTest(unittest.TestCase):
|
||||
"DOCKER_HOST": "http://docker.gensokyo.jp:2581",
|
||||
}
|
||||
)
|
||||
assert kwargs["base_url"] == "http://docker.gensokyo.jp:2581"
|
||||
assert "http://docker.gensokyo.jp:2581" == kwargs["base_url"]
|
||||
assert "tls" not in kwargs
|
||||
|
||||
|
||||
|
||||
@ -9,7 +9,6 @@ import pytest
|
||||
from ansible_collections.community.docker.plugins.module_utils._compose_v2 import (
|
||||
Event,
|
||||
parse_events,
|
||||
parse_json_events,
|
||||
)
|
||||
|
||||
from .compose_v2_test_cases import EVENT_TEST_CASES
|
||||
@ -385,208 +384,3 @@ def test_parse_events(
|
||||
|
||||
assert collected_events == events
|
||||
assert collected_warnings == warnings
|
||||
|
||||
|
||||
JSON_TEST_CASES: list[tuple[str, str, str, list[Event], list[str]]] = [
|
||||
(
|
||||
"pull-compose-2",
|
||||
"2.40.3",
|
||||
'{"level":"warning","msg":"/tmp/ansible.f9pcm_i3.test/ansible-docker-test-3c46cd06-pull/docker-compose.yml: the attribute `version`'
|
||||
' is obsolete, it will be ignored, please remove it to avoid potential confusion","time":"2025-12-06T13:16:30Z"}\n'
|
||||
'{"id":"ansible-docker-test-3c46cd06-cont","text":"Pulling"}\n'
|
||||
'{"id":"63a26ae4e8a8","parent_id":"ansible-docker-test-3c46cd06-cont","text":"Pulling fs layer"}\n'
|
||||
'{"id":"63a26ae4e8a8","parent_id":"ansible-docker-test-3c46cd06-cont","text":"Downloading","status":"[\\u003e '
|
||||
' ] 6.89kB/599.9kB","current":6890,"total":599883,"percent":1}\n'
|
||||
'{"id":"63a26ae4e8a8","parent_id":"ansible-docker-test-3c46cd06-cont","text":"Download complete","percent":100}\n'
|
||||
'{"id":"63a26ae4e8a8","parent_id":"ansible-docker-test-3c46cd06-cont","text":"Extracting","status":"[==\\u003e '
|
||||
' ] 32.77kB/599.9kB","current":32768,"total":599883,"percent":5}\n'
|
||||
'{"id":"63a26ae4e8a8","parent_id":"ansible-docker-test-3c46cd06-cont","text":"Extracting","status":"[============'
|
||||
'======================================\\u003e] 599.9kB/599.9kB","current":599883,"total":599883,"percent":100}\n'
|
||||
'{"id":"63a26ae4e8a8","parent_id":"ansible-docker-test-3c46cd06-cont","text":"Extracting","status":"[============'
|
||||
'======================================\\u003e] 599.9kB/599.9kB","current":599883,"total":599883,"percent":100}\n'
|
||||
'{"id":"63a26ae4e8a8","parent_id":"ansible-docker-test-3c46cd06-cont","text":"Pull complete","percent":100}\n'
|
||||
'{"id":"ansible-docker-test-3c46cd06-cont","text":"Pulled"}\n',
|
||||
[
|
||||
Event(
|
||||
"unknown",
|
||||
None,
|
||||
"Warning",
|
||||
"/tmp/ansible.f9pcm_i3.test/ansible-docker-test-3c46cd06-pull/docker-compose.yml: the attribute `version` is obsolete,"
|
||||
" it will be ignored, please remove it to avoid potential confusion",
|
||||
),
|
||||
Event(
|
||||
"image",
|
||||
"ansible-docker-test-3c46cd06-cont",
|
||||
"Pulling",
|
||||
None,
|
||||
),
|
||||
Event(
|
||||
"image-layer",
|
||||
"63a26ae4e8a8",
|
||||
"Pulling fs layer",
|
||||
None,
|
||||
),
|
||||
Event(
|
||||
"image-layer",
|
||||
"63a26ae4e8a8",
|
||||
"Downloading",
|
||||
"[> ] 6.89kB/599.9kB",
|
||||
),
|
||||
Event(
|
||||
"image-layer",
|
||||
"63a26ae4e8a8",
|
||||
"Download complete",
|
||||
None,
|
||||
),
|
||||
Event(
|
||||
"image-layer",
|
||||
"63a26ae4e8a8",
|
||||
"Extracting",
|
||||
"[==> ] 32.77kB/599.9kB",
|
||||
),
|
||||
Event(
|
||||
"image-layer",
|
||||
"63a26ae4e8a8",
|
||||
"Extracting",
|
||||
"[==================================================>] 599.9kB/599.9kB",
|
||||
),
|
||||
Event(
|
||||
"image-layer",
|
||||
"63a26ae4e8a8",
|
||||
"Extracting",
|
||||
"[==================================================>] 599.9kB/599.9kB",
|
||||
),
|
||||
Event(
|
||||
"image-layer",
|
||||
"63a26ae4e8a8",
|
||||
"Pull complete",
|
||||
None,
|
||||
),
|
||||
Event(
|
||||
"image",
|
||||
"ansible-docker-test-3c46cd06-cont",
|
||||
"Pulled",
|
||||
None,
|
||||
),
|
||||
],
|
||||
[],
|
||||
),
|
||||
(
|
||||
"pull-compose-5",
|
||||
"5.0.0",
|
||||
'{"level":"warning","msg":"/tmp/ansible.1n0q46aj.test/ansible-docker-test-b2fa9191-pull/docker-compose.yml: the attribute'
|
||||
' `version` is obsolete, it will be ignored, please remove it to avoid potential confusion","time":"2025-12-06T13:08:22Z"}\n'
|
||||
'{"id":"Image ghcr.io/ansible-collections/simple-1:tag","status":"Working","text":"Pulling"}\n'
|
||||
'{"id":"63a26ae4e8a8","parent_id":"Image ghcr.io/ansible-collections/simple-1:tag","status":"Working"}\n'
|
||||
'{"id":"63a26ae4e8a8","parent_id":"Image ghcr.io/ansible-collections/simple-1:tag","status":"Working","text":"[\\u003e '
|
||||
' ] 6.89kB/599.9kB","current":6890,"total":599883,"percent":1}\n'
|
||||
'{"id":"63a26ae4e8a8","parent_id":"Image ghcr.io/ansible-collections/simple-1:tag","status":"Working","text":"[=============='
|
||||
'====================================\\u003e] 599.9kB/599.9kB","current":599883,"total":599883,"percent":100}\n'
|
||||
'{"id":"63a26ae4e8a8","parent_id":"Image ghcr.io/ansible-collections/simple-1:tag","status":"Working"}\n'
|
||||
'{"id":"63a26ae4e8a8","parent_id":"Image ghcr.io/ansible-collections/simple-1:tag","status":"Done","percent":100}\n'
|
||||
'{"id":"63a26ae4e8a8","parent_id":"Image ghcr.io/ansible-collections/simple-1:tag","status":"Working","text":"[==\\u003e '
|
||||
' ] 32.77kB/599.9kB","current":32768,"total":599883,"percent":5}\n'
|
||||
'{"id":"63a26ae4e8a8","parent_id":"Image ghcr.io/ansible-collections/simple-1:tag","status":"Working","text":"[=============='
|
||||
'====================================\\u003e] 599.9kB/599.9kB","current":599883,"total":599883,"percent":100}\n'
|
||||
'{"id":"63a26ae4e8a8","parent_id":"Image ghcr.io/ansible-collections/simple-1:tag","status":"Working","text":"[=============='
|
||||
'====================================\\u003e] 599.9kB/599.9kB","current":599883,"total":599883,"percent":100}\n'
|
||||
'{"id":"63a26ae4e8a8","parent_id":"Image ghcr.io/ansible-collections/simple-1:tag","status":"Done","percent":100}\n'
|
||||
'{"id":"Image ghcr.io/ansible-collections/simple-1:tag","status":"Done","text":"Pulled"}\n',
|
||||
[
|
||||
Event(
|
||||
"unknown",
|
||||
None,
|
||||
"Warning",
|
||||
"/tmp/ansible.1n0q46aj.test/ansible-docker-test-b2fa9191-pull/docker-compose.yml: the attribute `version`"
|
||||
" is obsolete, it will be ignored, please remove it to avoid potential confusion",
|
||||
),
|
||||
Event(
|
||||
"image",
|
||||
"ghcr.io/ansible-collections/simple-1:tag",
|
||||
"Pulling",
|
||||
"Working",
|
||||
),
|
||||
Event(
|
||||
"image-layer",
|
||||
"ghcr.io/ansible-collections/simple-1:tag",
|
||||
"Working",
|
||||
None,
|
||||
),
|
||||
Event(
|
||||
"image-layer",
|
||||
"ghcr.io/ansible-collections/simple-1:tag",
|
||||
"Working",
|
||||
"[> ] 6.89kB/599.9kB",
|
||||
),
|
||||
Event(
|
||||
"image-layer",
|
||||
"ghcr.io/ansible-collections/simple-1:tag",
|
||||
"Working",
|
||||
"[==================================================>] 599.9kB/599.9kB",
|
||||
),
|
||||
Event(
|
||||
"image-layer",
|
||||
"ghcr.io/ansible-collections/simple-1:tag",
|
||||
"Working",
|
||||
None,
|
||||
),
|
||||
Event(
|
||||
"image-layer", "ghcr.io/ansible-collections/simple-1:tag", "Done", None
|
||||
),
|
||||
Event(
|
||||
"image-layer",
|
||||
"ghcr.io/ansible-collections/simple-1:tag",
|
||||
"Working",
|
||||
"[==> ] 32.77kB/599.9kB",
|
||||
),
|
||||
Event(
|
||||
"image-layer",
|
||||
"ghcr.io/ansible-collections/simple-1:tag",
|
||||
"Working",
|
||||
"[==================================================>] 599.9kB/599.9kB",
|
||||
),
|
||||
Event(
|
||||
"image-layer",
|
||||
"ghcr.io/ansible-collections/simple-1:tag",
|
||||
"Working",
|
||||
"[==================================================>] 599.9kB/599.9kB",
|
||||
),
|
||||
Event(
|
||||
"image-layer", "ghcr.io/ansible-collections/simple-1:tag", "Done", None
|
||||
),
|
||||
Event(
|
||||
"image", "ghcr.io/ansible-collections/simple-1:tag", "Pulled", "Done"
|
||||
),
|
||||
],
|
||||
[],
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"test_id, compose_version, stderr, events, warnings",
|
||||
JSON_TEST_CASES,
|
||||
ids=[tc[0] for tc in JSON_TEST_CASES],
|
||||
)
|
||||
def test_parse_json_events(
|
||||
test_id: str,
|
||||
compose_version: str,
|
||||
stderr: str,
|
||||
events: list[Event],
|
||||
warnings: list[str],
|
||||
) -> None:
|
||||
collected_warnings = []
|
||||
|
||||
def collect_warning(msg: str) -> None:
|
||||
collected_warnings.append(msg)
|
||||
|
||||
collected_events = parse_json_events(
|
||||
stderr.encode("utf-8"),
|
||||
warn_function=collect_warning,
|
||||
)
|
||||
|
||||
print(collected_events)
|
||||
print(collected_warnings)
|
||||
|
||||
assert collected_events == events
|
||||
assert collected_warnings == warnings
|
||||
|
||||
@ -23,6 +23,7 @@ from ..test_support.docker_image_archive_stubbing import (
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from collections.abc import Callable
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def assert_no_logging(msg: str) -> t.NoReturn:
|
||||
|
||||
@ -156,7 +156,7 @@ def test_has_list_changed() -> None:
|
||||
[{"a": 1}, {"a": 2}], [{"a": 1}, {"a": 2}], sort_key="a"
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
with pytest.raises(Exception):
|
||||
docker_swarm_service.has_list_changed(
|
||||
[{"a": 1}, {"a": 2}], [{"a": 1}, {"a": 2}]
|
||||
)
|
||||
|
||||
@ -36,14 +36,15 @@ def write_imitation_archive(
|
||||
def write_imitation_archive_with_manifest(
|
||||
file_name: str, manifest: list[dict[str, t.Any]]
|
||||
) -> None:
|
||||
with tarfile.open(file_name, "w") as tf, TemporaryFile() as f:
|
||||
f.write(json.dumps(manifest).encode("utf-8"))
|
||||
with tarfile.open(file_name, "w") as tf:
|
||||
with TemporaryFile() as f:
|
||||
f.write(json.dumps(manifest).encode("utf-8"))
|
||||
|
||||
ti = tarfile.TarInfo("manifest.json")
|
||||
ti.size = f.tell()
|
||||
ti = tarfile.TarInfo("manifest.json")
|
||||
ti.size = f.tell()
|
||||
|
||||
f.seek(0)
|
||||
tf.addfile(ti, f)
|
||||
f.seek(0)
|
||||
tf.addfile(ti, f)
|
||||
|
||||
|
||||
def write_irrelevant_tar(file_name: str) -> None:
|
||||
@ -54,11 +55,12 @@ def write_irrelevant_tar(file_name: str) -> None:
|
||||
:type file_name: str
|
||||
"""
|
||||
|
||||
with tarfile.open(file_name, "w") as tf, TemporaryFile() as f:
|
||||
f.write("Hello, world.".encode("utf-8"))
|
||||
with tarfile.open(file_name, "w") as tf:
|
||||
with TemporaryFile() as f:
|
||||
f.write("Hello, world.".encode("utf-8"))
|
||||
|
||||
ti = tarfile.TarInfo("hi.txt")
|
||||
ti.size = f.tell()
|
||||
ti = tarfile.TarInfo("hi.txt")
|
||||
ti.size = f.tell()
|
||||
|
||||
f.seek(0)
|
||||
tf.addfile(ti, f)
|
||||
f.seek(0)
|
||||
tf.addfile(ti, f)
|
||||
|
||||
Loading…
Reference in New Issue
Block a user