Replace % with f-strings, and do some cleanup.

This commit is contained in:
Felix Fontein 2025-10-06 07:48:36 +02:00
parent e4d37af9ca
commit f68f80722b
48 changed files with 414 additions and 468 deletions

View File

@ -202,7 +202,7 @@ class Connection(ConnectionBase):
cmd, cmd_output, err, returncode = self._new_docker_version()
if returncode:
raise AnsibleError('Docker version check (%s) failed: %s' % (to_native(cmd), to_native(err)))
raise AnsibleError(f'Docker version check ({to_native(cmd)}) failed: {to_native(err)}')
return self._sanitize_version(to_text(cmd_output, errors='surrogate_or_strict'))
@ -218,7 +218,7 @@ class Connection(ConnectionBase):
out = to_text(out, errors='surrogate_or_strict')
if p.returncode != 0:
display.warning('unable to retrieve default user from docker container: %s %s' % (out, to_text(err)))
display.warning(f'unable to retrieve default user from docker container: {out} {to_text(err)}')
self._container_user_cache[container] = None
return None
@ -252,7 +252,7 @@ class Connection(ConnectionBase):
f'Non-string {what.lower()} found for extra_env option. Ambiguous env options must be '
f'wrapped in quotes to avoid them being interpreted. {what}: {val!r}'
)
local_cmd += [b'-e', b'%s=%s' % (to_bytes(k, errors='surrogate_or_strict'), to_bytes(v, errors='surrogate_or_strict'))]
local_cmd += [b'-e', b"%s=%s" % (to_bytes(k, errors='surrogate_or_strict'), to_bytes(v, errors='surrogate_or_strict'))]
if self.get_option('working_dir') is not None:
local_cmd += [b'-w', to_bytes(self.get_option('working_dir'), errors='surrogate_or_strict')]
@ -420,12 +420,12 @@ class Connection(ConnectionBase):
""" Transfer a file from local to docker container """
self._set_conn_data()
super(Connection, self).put_file(in_path, out_path)
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr'))
display.vvv(f"PUT {in_path} TO {out_path}", host=self.get_option('remote_addr'))
out_path = self._prefix_login_path(out_path)
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
raise AnsibleFileNotFound(
"file or module does not exist: %s" % to_native(in_path))
f"file or module does not exist: {to_native(in_path)}")
out_path = quote(out_path)
# Older docker does not have native support for copying files into
@ -437,7 +437,7 @@ class Connection(ConnectionBase):
count = ' count=0'
else:
count = ''
args = self._build_exec_cmd([self._play_context.executable, "-c", "dd of=%s bs=%s%s" % (out_path, BUFSIZE, count)])
args = self._build_exec_cmd([self._play_context.executable, "-c", f"dd of={out_path} bs={BUFSIZE}{count}"])
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
try:
p = subprocess.Popen(args, stdin=in_file, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
@ -446,21 +446,20 @@ class Connection(ConnectionBase):
stdout, stderr = p.communicate()
if p.returncode != 0:
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" %
(to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr)))
raise AnsibleError(f"failed to transfer file {to_native(in_path)} to {to_native(out_path)}:\n{to_native(stdout)}\n{to_native(stderr)}")
def fetch_file(self, in_path, out_path):
""" Fetch a file from container to local. """
self._set_conn_data()
super(Connection, self).fetch_file(in_path, out_path)
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr'))
display.vvv(f"FETCH {in_path} TO {out_path}", host=self.get_option('remote_addr'))
in_path = self._prefix_login_path(in_path)
# out_path is the final file path, but docker takes a directory, not a
# file path
out_dir = os.path.dirname(out_path)
args = [self.docker_cmd, "cp", "%s:%s" % (self.get_option('remote_addr'), in_path), out_dir]
args = [self.docker_cmd, "cp", f"{self.get_option('remote_addr')}:{in_path}", out_dir]
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
p = subprocess.Popen(args, stdin=subprocess.PIPE,
@ -476,7 +475,7 @@ class Connection(ConnectionBase):
if p.returncode != 0:
# Older docker does not have native support for fetching files command `cp`
# If `cp` fails, try to use `dd` instead
args = self._build_exec_cmd([self._play_context.executable, "-c", "dd if=%s bs=%s" % (in_path, BUFSIZE)])
args = self._build_exec_cmd([self._play_context.executable, "-c", f"dd if={in_path} bs={BUFSIZE}"])
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
with open(to_bytes(actual_out_path, errors='surrogate_or_strict'), 'wb') as out_file:
try:
@ -487,7 +486,7 @@ class Connection(ConnectionBase):
stdout, stderr = p.communicate()
if p.returncode != 0:
raise AnsibleError("failed to fetch file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
raise AnsibleError(f"failed to fetch file {in_path} to {out_path}:\n{stdout}\n{stderr}")
# Rename if needed
if actual_out_path != out_path:

View File

@ -321,7 +321,7 @@ class Connection(ConnectionBase):
def put_file(self, in_path, out_path):
""" Transfer a file from local to docker container """
super(Connection, self).put_file(in_path, out_path)
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr'))
display.vvv(f"PUT {in_path} TO {out_path}", host=self.get_option('remote_addr'))
out_path = self._prefix_login_path(out_path)
@ -363,7 +363,7 @@ class Connection(ConnectionBase):
def fetch_file(self, in_path, out_path):
""" Fetch a file from container to local. """
super(Connection, self).fetch_file(in_path, out_path)
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr'))
display.vvv(f"FETCH {in_path} TO {out_path}", host=self.get_option('remote_addr'))
in_path = self._prefix_login_path(in_path)

View File

@ -90,8 +90,8 @@ class Connection(ConnectionBase):
executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else None
if not os.path.exists(to_bytes(executable, errors='surrogate_or_strict')):
raise AnsibleError("failed to find the executable specified %s."
" Please verify if the executable exists and re-try." % executable)
raise AnsibleError(f"failed to find the executable specified {executable}."
" Please verify if the executable exists and re-try.")
# Rewrite the provided command to prefix it with nsenter
nsenter_cmd_parts = [
@ -129,7 +129,7 @@ class Connection(ConnectionBase):
try:
master, stdin = pty.openpty()
except (IOError, OSError) as e:
display.debug("Unable to open pty: %s" % to_native(e))
display.debug(f"Unable to open pty: {e}")
p = subprocess.Popen(
cmd,

View File

@ -172,7 +172,6 @@ filters:
import re
from ansible.errors import AnsibleError
from ansible.module_utils.common.text.converters import to_native
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
from ansible_collections.community.docker.plugins.module_utils.common_api import (
@ -198,7 +197,8 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
NAME = 'community.docker.docker_containers'
def _slugify(self, value):
return 'docker_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_'))
slug = re.sub(r'[^\w-]', '_', value).lower().lstrip('_')
return f'docker_{slug}'
def _populate(self, client):
strict = self.get_option('strict')
@ -221,7 +221,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
}
containers = client.get_json('/containers/json', params=params)
except APIError as exc:
raise AnsibleError("Error listing containers: %s" % to_native(exc))
raise AnsibleError(f"Error listing containers: {exc}")
if add_legacy_groups:
self.inventory.add_group('running')
@ -255,7 +255,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
try:
inspect = client.get_json('/containers/{0}/json', id)
except APIError as exc:
raise AnsibleError("Error inspecting container %s - %s" % (name, str(exc)))
raise AnsibleError(f"Error inspecting container {name} - {exc!s}")
state = inspect.get('State') or dict()
config = inspect.get('Config') or dict()
@ -289,7 +289,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
# Lookup the public facing port Nat'ed to ssh port.
network_settings = inspect.get('NetworkSettings') or {}
port_settings = network_settings.get('Ports') or {}
port = port_settings.get('%d/tcp' % (ssh_port, ))[0]
port = port_settings.get(f'{ssh_port}/tcp')[0]
except (IndexError, AttributeError, TypeError):
port = dict()

View File

@ -279,8 +279,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), unsafe_node_attrs, machine_name, strict=strict)
except Exception as e:
raise AnsibleError('Unable to fetch hosts from Docker Machine, this was the original exception: %s' %
to_native(e), orig_exc=e)
raise AnsibleError(f'Unable to fetch hosts from Docker Machine, this was the original exception: {e}') from e
def verify_file(self, path):
"""Return the possibility of a file being consumable by this plugin."""

View File

@ -149,7 +149,6 @@ keyed_groups:
'''
from ansible.errors import AnsibleError
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.docker.plugins.module_utils.common import get_connect_params
from ansible_collections.community.docker.plugins.module_utils.util import update_tls_hostname
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
@ -255,8 +254,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
unsafe_node_attrs['ID'],
strict=strict)
except Exception as e:
raise AnsibleError('Unable to fetch hosts from Docker swarm API, this was the original exception: %s' %
to_native(e))
raise AnsibleError(f'Unable to fetch hosts from Docker swarm API, this was the original exception: {e}')
def verify_file(self, path):
"""Return the possibly of a file being consumable by this plugin."""

View File

@ -119,7 +119,7 @@ def translate(pat):
stuff = '^' + stuff[1:]
elif stuff[0] == '^':
stuff = '\\' + stuff
res = '%s[%s]' % (res, stuff)
res = f'{res}[{stuff}]'
else:
res = res + re.escape(c)

View File

@ -49,9 +49,9 @@ def build_port_bindings(ports):
def _raise_invalid_port(port):
raise ValueError('Invalid port "%s", should be '
raise ValueError(f'Invalid port "{port}", should be '
'[[remote_ip:]remote_port[-remote_port]:]'
'port[/protocol]' % port)
'port[/protocol]')
def port_range(start, end, proto, randomly_available_port=False):

View File

@ -169,7 +169,7 @@ def consume_socket_output(frames, demux=False):
# It is guaranteed that for each frame, one and only one stream
# is not None.
if frame == (None, None):
raise AssertionError('frame must be (None, None), but got %s' % (frame, ))
raise AssertionError(f'frame must be (None, None), but got {frame}')
if frame[0] is not None:
if out[0] is None:
out[0] = frame[0]

View File

@ -135,7 +135,7 @@ def _get_tls_config(fail_function, **kwargs):
tls_config = TLSConfig(**kwargs)
return tls_config
except TLSParameterError as exc:
fail_function("TLS config error: %s" % exc)
fail_function(f"TLS config error: {exc}")
def is_using_tls(auth):
@ -200,17 +200,20 @@ class AnsibleDockerClientBase(Client):
self.fail("Cannot have both the docker-py and docker python modules (old and new version of Docker "
"SDK for Python) installed together as they use the same namespace and cause a corrupt "
"installation. Please uninstall both packages, and re-install only the docker-py or docker "
"python module (for %s's Python %s). It is recommended to install the docker module. Please "
f"python module (for {platform.node()}'s Python {sys.executable}). It is recommended to install the docker module. Please "
"note that simply uninstalling one of the modules can leave the other module in a broken "
"state." % (platform.node(), sys.executable))
"state.")
if not HAS_DOCKER_PY:
msg = missing_required_lib("Docker SDK for Python: docker>=5.0.0")
msg = msg + ", for example via `pip install docker`. The error was: %s"
self.fail(msg % HAS_DOCKER_ERROR, exception=HAS_DOCKER_TRACEBACK)
msg = f"{msg}, for example via `pip install docker`. The error was: {HAS_DOCKER_ERROR}"
self.fail(msg, exception=HAS_DOCKER_TRACEBACK)
if self.docker_py_version < LooseVersion(min_docker_version):
msg = "Error: Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s."
msg = (
f"Error: Docker SDK for Python version is {docker_version} ({platform.node()}'s Python {sys.executable})."
f" Minimum version required is {min_docker_version}."
)
if not NEEDS_DOCKER_PY2:
# The minimal required version is < 2.0 (and the current version as well).
# Advertise docker (instead of docker-py).
@ -219,7 +222,7 @@ class AnsibleDockerClientBase(Client):
msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
else:
msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
self.fail(msg % (docker_version, platform.node(), sys.executable, min_docker_version))
self.fail(msg)
self._connect_params = get_connect_params(self.auth_params, fail_function=self.fail)
@ -227,14 +230,14 @@ class AnsibleDockerClientBase(Client):
super(AnsibleDockerClientBase, self).__init__(**self._connect_params)
self.docker_api_version_str = self.api_version
except APIError as exc:
self.fail("Docker API error: %s" % exc)
self.fail(f"Docker API error: {exc}")
except Exception as exc:
self.fail("Error connecting: %s" % exc)
self.fail(f"Error connecting: {exc}")
self.docker_api_version = LooseVersion(self.docker_api_version_str)
min_docker_api_version = min_docker_api_version or '1.25'
if self.docker_api_version < LooseVersion(min_docker_api_version):
self.fail('Docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version))
self.fail(f'Docker API version is {self.docker_api_version_str}. Minimum version required is {min_docker_api_version}.')
def log(self, msg, pretty_print=False):
pass
@ -328,23 +331,23 @@ class AnsibleDockerClientBase(Client):
def _handle_ssl_error(self, error):
match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
if match:
self.fail("You asked for verification that Docker daemons certificate's hostname matches %s. "
"The actual certificate's hostname is %s. Most likely you need to set DOCKER_TLS_HOSTNAME "
"or pass `tls_hostname` with a value of %s. You may also use TLS without verification by "
"setting the `tls` parameter to true."
% (self.auth_params['tls_hostname'], match.group(1), match.group(1)))
self.fail("SSL Exception: %s" % (error))
hostname = self.auth_params['tls_hostname']
self.fail(f"You asked for verification that Docker daemons certificate's hostname matches {hostname}. "
f"The actual certificate's hostname is {match.group(1)}. Most likely you need to set DOCKER_TLS_HOSTNAME "
f"or pass `tls_hostname` with a value of {match.group(1)}. You may also use TLS without verification by "
"setting the `tls` parameter to true.")
self.fail(f"SSL Exception: {error}")
def get_container_by_id(self, container_id):
try:
self.log("Inspecting container Id %s" % container_id)
self.log(f"Inspecting container Id {container_id}")
result = self.inspect_container(container=container_id)
self.log("Completed container inspection")
return result
except NotFound as dummy:
return None
except Exception as exc:
self.fail("Error inspecting container: %s" % exc)
self.fail(f"Error inspecting container: {exc}")
def get_container(self, name=None):
'''
@ -360,7 +363,7 @@ class AnsibleDockerClientBase(Client):
result = None
try:
for container in self.containers(all=True):
self.log("testing container: %s" % (container['Names']))
self.log(f"testing container: {container['Names']}")
if isinstance(container['Names'], list) and search_name in container['Names']:
result = container
break
@ -373,7 +376,7 @@ class AnsibleDockerClientBase(Client):
except SSLError as exc:
self._handle_ssl_error(exc)
except Exception as exc:
self.fail("Error retrieving container list: %s" % exc)
self.fail(f"Error retrieving container list: {exc}")
if result is None:
return None
@ -392,7 +395,7 @@ class AnsibleDockerClientBase(Client):
if network_id is None:
try:
for network in self.networks():
self.log("testing network: %s" % (network['Name']))
self.log(f"testing network: {network['Name']}")
if name == network['Name']:
result = network
break
@ -402,20 +405,20 @@ class AnsibleDockerClientBase(Client):
except SSLError as exc:
self._handle_ssl_error(exc)
except Exception as exc:
self.fail("Error retrieving network list: %s" % exc)
self.fail(f"Error retrieving network list: {exc}")
if result is not None:
network_id = result['Id']
if network_id is not None:
try:
self.log("Inspecting network Id %s" % network_id)
self.log(f"Inspecting network Id {network_id}")
result = self.inspect_network(network_id)
self.log("Completed network inspection")
except NotFound as dummy:
return None
except Exception as exc:
self.fail("Error inspecting network: %s" % exc)
self.fail(f"Error inspecting network: {exc}")
return result
@ -426,7 +429,7 @@ class AnsibleDockerClientBase(Client):
if not name:
return None
self.log("Find image %s:%s" % (name, tag))
self.log(f"Find image {name}:{tag}")
images = self._image_lookup(name, tag)
if not images:
# In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
@ -434,41 +437,41 @@ class AnsibleDockerClientBase(Client):
if registry == 'docker.io':
# If docker.io is explicitly there in name, the image
# is not found in some cases (#41509)
self.log("Check for docker.io image: %s" % repo_name)
self.log(f"Check for docker.io image: {repo_name}")
images = self._image_lookup(repo_name, tag)
if not images and repo_name.startswith('library/'):
# Sometimes library/xxx images are not found
lookup = repo_name[len('library/'):]
self.log("Check for docker.io image: %s" % lookup)
self.log(f"Check for docker.io image: {lookup}")
images = self._image_lookup(lookup, tag)
if not images:
# Last case for some Docker versions: if docker.io was not there,
# it can be that the image was not found either
# (https://github.com/ansible/ansible/pull/15586)
lookup = "%s/%s" % (registry, repo_name)
self.log("Check for docker.io image: %s" % lookup)
lookup = f"{registry}/{repo_name}"
self.log(f"Check for docker.io image: {lookup}")
images = self._image_lookup(lookup, tag)
if not images and '/' not in repo_name:
# This seems to be happening with podman-docker
# (https://github.com/ansible-collections/community.docker/issues/291)
lookup = "%s/library/%s" % (registry, repo_name)
self.log("Check for docker.io image: %s" % lookup)
lookup = f"{registry}/library/{repo_name}"
self.log(f"Check for docker.io image: {lookup}")
images = self._image_lookup(lookup, tag)
if len(images) > 1:
self.fail("Daemon returned more than one result for %s:%s" % (name, tag))
self.fail(f"Daemon returned more than one result for {name}:{tag}")
if len(images) == 1:
try:
inspection = self.inspect_image(images[0]['Id'])
except NotFound:
self.log("Image %s:%s not found." % (name, tag))
self.log(f"Image {name}:{tag} not found.")
return None
except Exception as exc:
self.fail("Error inspecting image %s:%s - %s" % (name, tag, str(exc)))
self.fail(f"Error inspecting image {name}:{tag} - {exc!s}")
return inspection
self.log("Image %s:%s not found." % (name, tag))
self.log(f"Image {name}:{tag} not found.")
return None
def find_image_by_id(self, image_id, accept_missing_image=False):
@ -478,16 +481,16 @@ class AnsibleDockerClientBase(Client):
if not image_id:
return None
self.log("Find image %s (by ID)" % image_id)
self.log(f"Find image {image_id} (by ID)")
try:
inspection = self.inspect_image(image_id)
except NotFound as exc:
if not accept_missing_image:
self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc)))
self.log("Image %s not found." % image_id)
self.fail(f"Error inspecting image ID {image_id} - {exc!s}")
self.log(f"Image {image_id} not found.")
return None
except Exception as exc:
self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc)))
self.fail(f"Error inspecting image ID {image_id} - {exc!s}")
return inspection
def _image_lookup(self, name, tag):
@ -499,11 +502,11 @@ class AnsibleDockerClientBase(Client):
try:
response = self.images(name=name)
except Exception as exc:
self.fail("Error searching for image %s - %s" % (name, str(exc)))
self.fail(f"Error searching for image {name} - {exc!s}")
images = response
if tag:
lookup = "%s:%s" % (name, tag)
lookup_digest = "%s@%s" % (name, tag)
lookup = f"{name}:{tag}"
lookup_digest = f"{name}@{tag}"
images = []
for image in response:
tags = image.get('RepoTags')
@ -524,7 +527,7 @@ class AnsibleDockerClientBase(Client):
)
if platform is not None:
kwargs['platform'] = platform
self.log("Pulling image %s:%s" % (name, tag))
self.log(f"Pulling image {name}:{tag}")
old_tag = self.find_image(name, tag)
try:
for line in self.pull(name, **kwargs):
@ -532,13 +535,11 @@ class AnsibleDockerClientBase(Client):
if line.get('error'):
if line.get('errorDetail'):
error_detail = line.get('errorDetail')
self.fail("Error pulling %s - code: %s message: %s" % (name,
error_detail.get('code'),
error_detail.get('message')))
self.fail(f"Error pulling {name} - code: {error_detail.get('code')} message: {error_detail.get('message')}")
else:
self.fail("Error pulling %s - %s" % (name, line.get('error')))
self.fail(f"Error pulling {name} - {line.get('error')}")
except Exception as exc:
self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc)))
self.fail(f"Error pulling image {name}:{tag} - {exc!s}")
new_tag = self.find_image(name, tag)
@ -649,22 +650,23 @@ class AnsibleDockerClient(AnsibleDockerClientBase):
if 'usage_msg' in data:
usg = data['usage_msg']
else:
usg = 'set %s option' % (option, )
usg = f'set {option} option'
if not support_docker_api:
msg = 'Docker API version is %s. Minimum version required is %s to %s.'
msg = msg % (self.docker_api_version_str, data['docker_api_version'], usg)
msg = f"Docker API version is {self.docker_api_version_str}. Minimum version required is {data['docker_api_version']} to {usg}."
elif not support_docker_py:
msg = "Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s to %s. "
msg = (
f"Docker SDK for Python version is {docker_version} ({platform.node()}'s Python {sys.executable})."
f" Minimum version required is {data['docker_py_version']} to {usg}. "
)
if LooseVersion(data['docker_py_version']) < LooseVersion('2.0.0'):
msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
elif self.docker_py_version < LooseVersion('2.0.0'):
msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
else:
msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
msg = msg % (docker_version, platform.node(), sys.executable, data['docker_py_version'], usg)
else:
# should not happen
msg = 'Cannot %s with your configuration.' % (usg, )
msg = f'Cannot {usg} with your configuration.'
self.fail(msg)
def report_warnings(self, result, warnings_key=None):

View File

@ -59,7 +59,7 @@ def _get_tls_config(fail_function, **kwargs):
tls_config = TLSConfig(**kwargs)
return tls_config
except TLSParameterError as exc:
fail_function("TLS config error: %s" % exc)
fail_function(f"TLS config error: {exc}")
def is_using_tls(auth_data):
@ -115,14 +115,14 @@ class AnsibleDockerClientBase(Client):
except MissingRequirementException as exc:
self.fail(missing_required_lib(exc.requirement), exception=exc.import_exception)
except APIError as exc:
self.fail("Docker API error: %s" % exc)
self.fail(f"Docker API error: {exc}")
except Exception as exc:
self.fail("Error connecting: %s" % exc)
self.fail(f"Error connecting: {exc}")
self.docker_api_version = LooseVersion(self.docker_api_version_str)
min_docker_api_version = min_docker_api_version or '1.25'
if self.docker_api_version < LooseVersion(min_docker_api_version):
self.fail('Docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version))
self.fail(f'Docker API version is {self.docker_api_version_str}. Minimum version required is {min_docker_api_version}.')
def log(self, msg, pretty_print=False):
pass
@ -219,23 +219,23 @@ class AnsibleDockerClientBase(Client):
def _handle_ssl_error(self, error):
match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
if match:
self.fail("You asked for verification that Docker daemons certificate's hostname matches %s. "
"The actual certificate's hostname is %s. Most likely you need to set DOCKER_TLS_HOSTNAME "
"or pass `tls_hostname` with a value of %s. You may also use TLS without verification by "
"setting the `tls` parameter to true."
% (self.auth_params['tls_hostname'], match.group(1), match.group(1)))
self.fail("SSL Exception: %s" % (error))
hostname = self.auth_params['tls_hostname']
self.fail(f"You asked for verification that Docker daemons certificate's hostname matches {hostname}. "
f"The actual certificate's hostname is {match.group(1)}. Most likely you need to set DOCKER_TLS_HOSTNAME "
f"or pass `tls_hostname` with a value of {match.group(1)}. You may also use TLS without verification by "
"setting the `tls` parameter to true.")
self.fail(f"SSL Exception: {error}")
def get_container_by_id(self, container_id):
try:
self.log("Inspecting container Id %s" % container_id)
self.log(f"Inspecting container Id {container_id}")
result = self.get_json('/containers/{0}/json', container_id)
self.log("Completed container inspection")
return result
except NotFound as dummy:
return None
except Exception as exc:
self.fail("Error inspecting container: %s" % exc)
self.fail(f"Error inspecting container: {exc}")
def get_container(self, name=None):
'''
@ -258,7 +258,7 @@ class AnsibleDockerClientBase(Client):
}
containers = self.get_json("/containers/json", params=params)
for container in containers:
self.log("testing container: %s" % (container['Names']))
self.log(f"testing container: {container['Names']}")
if isinstance(container['Names'], list) and search_name in container['Names']:
result = container
break
@ -271,7 +271,7 @@ class AnsibleDockerClientBase(Client):
except SSLError as exc:
self._handle_ssl_error(exc)
except Exception as exc:
self.fail("Error retrieving container list: %s" % exc)
self.fail(f"Error retrieving container list: {exc}")
if result is None:
return None
@ -291,7 +291,7 @@ class AnsibleDockerClientBase(Client):
try:
networks = self.get_json("/networks")
for network in networks:
self.log("testing network: %s" % (network['Name']))
self.log(f"testing network: {network['Name']}")
if name == network['Name']:
result = network
break
@ -301,20 +301,20 @@ class AnsibleDockerClientBase(Client):
except SSLError as exc:
self._handle_ssl_error(exc)
except Exception as exc:
self.fail("Error retrieving network list: %s" % exc)
self.fail(f"Error retrieving network list: {exc}")
if result is not None:
network_id = result['Id']
if network_id is not None:
try:
self.log("Inspecting network Id %s" % network_id)
self.log(f"Inspecting network Id {network_id}")
result = self.get_json('/networks/{0}', network_id)
self.log("Completed network inspection")
except NotFound as dummy:
return None
except Exception as exc:
self.fail("Error inspecting network: %s" % exc)
self.fail(f"Error inspecting network: {exc}")
return result
@ -336,10 +336,10 @@ class AnsibleDockerClientBase(Client):
params['filters'] = convert_filters({'reference': name})
images = self.get_json("/images/json", params=params)
except Exception as exc:
self.fail("Error searching for image %s - %s" % (name, str(exc)))
self.fail(f"Error searching for image {name} - {exc!s}")
if tag:
lookup = "%s:%s" % (name, tag)
lookup_digest = "%s@%s" % (name, tag)
lookup = f"{name}:{tag}"
lookup_digest = f"{name}@{tag}"
response = images
images = []
for image in response:
@ -357,7 +357,7 @@ class AnsibleDockerClientBase(Client):
if not name:
return None
self.log("Find image %s:%s" % (name, tag))
self.log(f"Find image {name}:{tag}")
images = self._image_lookup(name, tag)
if not images:
# In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
@ -365,40 +365,40 @@ class AnsibleDockerClientBase(Client):
if registry == 'docker.io':
# If docker.io is explicitly there in name, the image
# is not found in some cases (#41509)
self.log("Check for docker.io image: %s" % repo_name)
self.log(f"Check for docker.io image: {repo_name}")
images = self._image_lookup(repo_name, tag)
if not images and repo_name.startswith('library/'):
# Sometimes library/xxx images are not found
lookup = repo_name[len('library/'):]
self.log("Check for docker.io image: %s" % lookup)
self.log(f"Check for docker.io image: {lookup}")
images = self._image_lookup(lookup, tag)
if not images:
# Last case for some Docker versions: if docker.io was not there,
# it can be that the image was not found either
# (https://github.com/ansible/ansible/pull/15586)
lookup = "%s/%s" % (registry, repo_name)
self.log("Check for docker.io image: %s" % lookup)
lookup = f"{registry}/{repo_name}"
self.log(f"Check for docker.io image: {lookup}")
images = self._image_lookup(lookup, tag)
if not images and '/' not in repo_name:
# This seems to be happening with podman-docker
# (https://github.com/ansible-collections/community.docker/issues/291)
lookup = "%s/library/%s" % (registry, repo_name)
self.log("Check for docker.io image: %s" % lookup)
lookup = f"{registry}/library/{repo_name}"
self.log(f"Check for docker.io image: {lookup}")
images = self._image_lookup(lookup, tag)
if len(images) > 1:
self.fail("Daemon returned more than one result for %s:%s" % (name, tag))
self.fail(f"Daemon returned more than one result for {name}:{tag}")
if len(images) == 1:
try:
return self.get_json('/images/{0}/json', images[0]['Id'])
except NotFound:
self.log("Image %s:%s not found." % (name, tag))
self.log(f"Image {name}:{tag} not found.")
return None
except Exception as exc:
self.fail("Error inspecting image %s:%s - %s" % (name, tag, str(exc)))
self.fail(f"Error inspecting image {name}:{tag} - {exc!s}")
self.log("Image %s:%s not found." % (name, tag))
self.log(f"Image {name}:{tag} not found.")
return None
def find_image_by_id(self, image_id, accept_missing_image=False):
@ -408,22 +408,22 @@ class AnsibleDockerClientBase(Client):
if not image_id:
return None
self.log("Find image %s (by ID)" % image_id)
self.log(f"Find image {image_id} (by ID)")
try:
return self.get_json('/images/{0}/json', image_id)
except NotFound as exc:
if not accept_missing_image:
self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc)))
self.log("Image %s not found." % image_id)
self.fail(f"Error inspecting image ID {image_id} - {exc!s}")
self.log(f"Image {image_id} not found.")
return None
except Exception as exc:
self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc)))
self.fail(f"Error inspecting image ID {image_id} - {exc!s}")
def pull_image(self, name, tag="latest", platform=None):
'''
Pull an image
'''
self.log("Pulling image %s:%s" % (name, tag))
self.log(f"Pulling image {name}:{tag}")
old_tag = self.find_image(name, tag)
try:
repository, image_tag = parse_repository_tag(name)
@ -450,13 +450,11 @@ class AnsibleDockerClientBase(Client):
if line.get('error'):
if line.get('errorDetail'):
error_detail = line.get('errorDetail')
self.fail("Error pulling %s - code: %s message: %s" % (name,
error_detail.get('code'),
error_detail.get('message')))
self.fail(f"Error pulling {name} - code: {error_detail.get('code')} message: {error_detail.get('message')}")
else:
self.fail("Error pulling %s - %s" % (name, line.get('error')))
self.fail(f"Error pulling {name} - {line.get('error')}")
except Exception as exc:
self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc)))
self.fail(f"Error pulling image {name}:{tag} - {exc!s}")
new_tag = self.find_image(name, tag)
@ -547,13 +545,12 @@ class AnsibleDockerClient(AnsibleDockerClientBase):
if 'usage_msg' in data:
usg = data['usage_msg']
else:
usg = 'set %s option' % (option, )
usg = f'set {option} option'
if not support_docker_api:
msg = 'Docker API version is %s. Minimum version required is %s to %s.'
msg = msg % (self.docker_api_version_str, data['docker_api_version'], usg)
msg = f"Docker API version is {self.docker_api_version_str}. Minimum version required is {data['docker_api_version']} to {usg}."
else:
# should not happen
msg = 'Cannot %s with your configuration.' % (usg, )
msg = f'Cannot {usg} with your configuration.'
self.fail(msg)
def report_warnings(self, result, warnings_key=None):

View File

@ -90,7 +90,7 @@ class AnsibleDockerClientBase(object):
self.docker_api_version = LooseVersion(self.docker_api_version_str)
min_docker_api_version = min_docker_api_version or '1.25'
if self.docker_api_version < LooseVersion(min_docker_api_version):
self.fail('Docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version))
self.fail(f'Docker API version is {self.docker_api_version_str}. Minimum version required is {min_docker_api_version}.')
else:
self.docker_api_version_str = None
self.docker_api_version = None
@ -184,8 +184,8 @@ class AnsibleDockerClientBase(object):
check_rc=True,
)
if tag:
lookup = "%s:%s" % (name, tag)
lookup_digest = "%s@%s" % (name, tag)
lookup = f"{name}:{tag}"
lookup_digest = f"{name}@{tag}"
response = images
images = []
for image in response:
@ -201,7 +201,7 @@ class AnsibleDockerClientBase(object):
if not name:
return None
self.log("Find image %s:%s" % (name, tag))
self.log(f"Find image {name}:{tag}")
images = self._image_lookup(name, tag)
if not images:
# In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
@ -209,40 +209,40 @@ class AnsibleDockerClientBase(object):
if registry == 'docker.io':
# If docker.io is explicitly there in name, the image
# is not found in some cases (#41509)
self.log("Check for docker.io image: %s" % repo_name)
self.log(f"Check for docker.io image: {repo_name}")
images = self._image_lookup(repo_name, tag)
if not images and repo_name.startswith('library/'):
# Sometimes library/xxx images are not found
lookup = repo_name[len('library/'):]
self.log("Check for docker.io image: %s" % lookup)
self.log(f"Check for docker.io image: {lookup}")
images = self._image_lookup(lookup, tag)
if not images:
# Last case for some Docker versions: if docker.io was not there,
# it can be that the image was not found either
# (https://github.com/ansible/ansible/pull/15586)
lookup = "%s/%s" % (registry, repo_name)
self.log("Check for docker.io image: %s" % lookup)
lookup = f"{registry}/{repo_name}"
self.log(f"Check for docker.io image: {lookup}")
images = self._image_lookup(lookup, tag)
if not images and '/' not in repo_name:
# This seems to be happening with podman-docker
# (https://github.com/ansible-collections/community.docker/issues/291)
lookup = "%s/library/%s" % (registry, repo_name)
self.log("Check for docker.io image: %s" % lookup)
lookup = f"{registry}/library/{repo_name}"
self.log(f"Check for docker.io image: {lookup}")
images = self._image_lookup(lookup, tag)
if len(images) > 1:
self.fail("Daemon returned more than one result for %s:%s" % (name, tag))
self.fail(f"Daemon returned more than one result for {name}:{tag}")
if len(images) == 1:
rc, image, stderr = self.call_cli_json('image', 'inspect', images[0]['ID'])
if not image:
self.log("Image %s:%s not found." % (name, tag))
self.log(f"Image {name}:{tag} not found.")
return None
if rc != 0:
self.fail("Error inspecting image %s:%s - %s" % (name, tag, to_native(stderr)))
self.fail(f"Error inspecting image {name}:{tag} - {to_native(stderr)}")
return image[0]
self.log("Image %s:%s not found." % (name, tag))
self.log(f"Image {name}:{tag} not found.")
return None
def find_image_by_id(self, image_id, accept_missing_image=False):
@ -252,15 +252,15 @@ class AnsibleDockerClientBase(object):
if not image_id:
return None
self.log("Find image %s (by ID)" % image_id)
self.log(f"Find image {image_id} (by ID)")
rc, image, stderr = self.call_cli_json('image', 'inspect', image_id)
if not image:
if not accept_missing_image:
self.fail("Error inspecting image ID %s - %s" % (image_id, to_native(stderr)))
self.log("Image %s not found." % image_id)
self.fail(f"Error inspecting image ID {image_id} - {to_native(stderr)}")
self.log(f"Image {image_id} not found.")
return None
if rc != 0:
self.fail("Error inspecting image ID %s - %s" % (image_id, to_native(stderr)))
self.fail(f"Error inspecting image ID {image_id} - {to_native(stderr)}")
return image[0]

View File

@ -681,7 +681,7 @@ class BaseComposeManager(DockerBaseClass):
with open(compose_file, 'wb') as f:
yaml.dump(parameters['definition'], f, encoding="utf-8", Dumper=_SafeDumper)
except Exception as exc:
self.fail("Error writing to %s - %s" % (compose_file, to_native(exc)))
self.fail(f"Error writing to {compose_file} - {exc}")
else:
self.project_src = os.path.abspath(parameters['project_src'])

View File

@ -156,7 +156,7 @@ def put_file(client, container, in_path, out_path, user_id, group_id, mode=None,
"""Transfer a file from local to Docker container."""
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
raise DockerFileNotFound(
"file or module does not exist: %s" % to_native(in_path))
f"file or module does not exist: {to_native(in_path)}")
b_in_path = to_bytes(in_path, errors='surrogate_or_strict')
@ -212,7 +212,7 @@ def stat_file(client, container, in_path, follow_links=False, log=None):
considered_in_paths.add(in_path)
if log:
log('FETCH: Stating "%s"' % in_path)
log(f'FETCH: Stating "{in_path}"')
response = client._head(
client._url('/containers/{0}/archive', container),
@ -288,7 +288,7 @@ def fetch_file_ex(client, container, in_path, process_none, process_regular, pro
considered_in_paths.add(in_path)
if log:
log('FETCH: Fetching "%s"' % in_path)
log(f'FETCH: Fetching "{in_path}"')
try:
stream = client.get_raw_stream(
'/containers/{0}/archive', container,
@ -318,7 +318,7 @@ def fetch_file_ex(client, container, in_path, process_none, process_regular, pro
return process_symlink(in_path, symlink_member)
in_path = os.path.join(os.path.split(in_path)[0], symlink_member.linkname)
if log:
log('FETCH: Following symbolic link to "%s"' % in_path)
log(f'FETCH: Following symbolic link to "{in_path}"')
continue
if found:
return result
@ -350,7 +350,7 @@ def fetch_file(client, container, in_path, out_path, follow_links=False, log=Non
return in_path
def process_other(in_path, member):
raise DockerFileCopyError('Remote file "%s" is not a regular file or a symbolic link' % in_path)
raise DockerFileCopyError(f'Remote file "{in_path}" is not a regular file or a symbolic link')
return fetch_file_ex(client, container, in_path, process_none, process_regular, process_symlink, process_other, follow_links=follow_links, log=log)

View File

@ -8,8 +8,6 @@ import json
import os
import tarfile
from ansible.module_utils.common.text.converters import to_native
class ImageArchiveManifestSummary(object):
'''
@ -45,7 +43,7 @@ def api_image_id(archive_image_id):
:rtype: str
'''
return 'sha256:%s' % archive_image_id
return f'sha256:{archive_image_id}'
def load_archived_image_manifest(archive_path):
@ -79,7 +77,7 @@ def load_archived_image_manifest(archive_path):
manifest = json.load(ef)
except Exception as exc:
raise ImageArchiveInvalidException(
"Failed to decode and deserialize manifest.json: %s" % to_native(exc)
f"Failed to decode and deserialize manifest.json: {exc}"
) from exc
if len(manifest) == 0:
@ -128,13 +126,13 @@ def load_archived_image_manifest(archive_path):
raise
except Exception as exc:
raise ImageArchiveInvalidException(
"Failed to extract manifest.json from tar file %s: %s" % (archive_path, to_native(exc))
f"Failed to extract manifest.json from tar file {archive_path}: {exc}"
) from exc
except ImageArchiveInvalidException:
raise
except Exception as exc:
raise ImageArchiveInvalidException("Failed to open tar file %s: %s" % (archive_path, to_native(exc))) from exc
raise ImageArchiveInvalidException(f"Failed to open tar file {archive_path}: {exc}") from exc
def archived_image_manifest(archive_path):
@ -162,5 +160,5 @@ def archived_image_manifest(archive_path):
if len(results) == 1:
return results[0]
raise ImageArchiveInvalidException(
"Expected to have one entry in manifest.json but found %s" % len(results)
f"Expected to have one entry in manifest.json but found {len(results)}"
)

View File

@ -12,7 +12,7 @@ import shlex
from functools import partial
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.common.text.formatters import human_to_bytes
from ansible_collections.community.docker.plugins.module_utils.util import (
@ -56,7 +56,7 @@ def _get_ansible_type(type):
if type == 'set':
return 'list'
if type not in ('list', 'dict', 'bool', 'int', 'float', 'str'):
raise Exception('Invalid type "%s"' % (type, ))
raise Exception(f'Invalid type "{type}"')
return type
@ -461,11 +461,11 @@ def _preprocess_env(module, values):
for name, value in values['env'].items():
if not isinstance(value, str):
module.fail_json(msg='Non-string value found for env option. Ambiguous env options must be '
'wrapped in quotes to avoid them being interpreted. Key: %s' % (name, ))
f'wrapped in quotes to avoid them being interpreted. Key: {name}')
final_env[name] = to_text(value, errors='surrogate_or_strict')
formatted_env = []
for key, value in final_env.items():
formatted_env.append('%s=%s' % (key, value))
formatted_env.append(f'{key}={value}')
return {
'env': formatted_env,
}
@ -491,7 +491,7 @@ def _preprocess_convert_to_bytes(module, values, name, unlimited_value=None):
values[name] = value
return values
except ValueError as exc:
module.fail_json(msg='Failed to convert %s to bytes: %s' % (name, to_native(exc)))
module.fail_json(msg=f'Failed to convert {name} to bytes: {exc}')
def _preprocess_mac_address(module, values):
@ -641,13 +641,13 @@ def _preprocess_mounts(module, values):
if re.match(r'[.~]', host):
host = os.path.abspath(os.path.expanduser(host))
check_collision(container, 'volumes')
new_vols.append("%s:%s:%s" % (host, container, mode))
new_vols.append(f"{host}:{container}:{mode}")
continue
elif len(parts) == 2:
if not _is_volume_permissions(parts[1]) and re.match(r'[.~]', parts[0]):
host = os.path.abspath(os.path.expanduser(parts[0]))
check_collision(parts[1], 'volumes')
new_vols.append("%s:%s:rw" % (host, parts[1]))
new_vols.append(f"{host}:{parts[1]}:rw")
continue
check_collision(parts[min(1, len(parts) - 1)], 'volumes')
new_vols.append(vol)
@ -665,7 +665,7 @@ def _preprocess_mounts(module, values):
if not _is_volume_permissions(parts[1]):
host, container, mode = (parts + ['rw'])
if host is not None:
new_binds.append('%s:%s:%s' % (host, container, mode))
new_binds.append(f'{host}:{container}:{mode}')
values['volume_binds'] = new_binds
return values
@ -690,12 +690,12 @@ def _preprocess_log(module, values):
options = {}
for k, v in values['log_options'].items():
if not isinstance(v, str):
value = to_text(v, errors='surrogate_or_strict')
module.warn(
"Non-string value found for log_options option '%s'. The value is automatically converted to '%s'. "
"If this is not correct, or you want to avoid such warnings, please quote the value." % (
k, to_text(v, errors='surrogate_or_strict'))
f"Non-string value found for log_options option '{k}'. The value is automatically converted to {value!r}. "
"If this is not correct, or you want to avoid such warnings, please quote the value."
)
v = to_text(v, errors='surrogate_or_strict')
v = value
options[k] = v
result['log_options'] = options
return result
@ -744,8 +744,8 @@ def _preprocess_ports(module, values):
port_binds = len(container_ports) * [(ipaddr,)]
else:
module.fail_json(
msg='Invalid port description "%s" - expected 1 to 3 colon-separated parts, but got %d. '
'Maybe you forgot to use square brackets ([...]) around an IPv6 address?' % (port, p_len)
msg=f'Invalid port description "{port}" - expected 1 to 3 colon-separated parts, but got {p_len}. '
'Maybe you forgot to use square brackets ([...]) around an IPv6 address?'
)
for bind, container_port in zip(port_binds, container_ports):

View File

@ -8,7 +8,7 @@ from __future__ import annotations
import json
import traceback
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.common.text.formatters import human_to_bytes
from ansible_collections.community.docker.plugins.module_utils.common_api import (
@ -123,7 +123,7 @@ def _get_ansible_type(type):
if type == 'set':
return 'list'
if type not in ('list', 'dict', 'bool', 'int', 'float', 'str'):
raise Exception('Invalid type "%s"' % (type, ))
raise Exception(f'Invalid type "{type}"')
return type
@ -248,8 +248,9 @@ class DockerAPIEngineDriver(EngineDriver):
value = normalize_links(value)
params[dest_para] = value
if parameters:
ups = ', '.join([f'"{p}"' for p in sorted(parameters)])
raise Exception(
'Unknown parameter(s) for connect_container_to_network for Docker API driver: %s' % (', '.join(['"%s"' % p for p in sorted(parameters)])))
f'Unknown parameter(s) for connect_container_to_network for Docker API driver: {ups}')
ipam_config = {}
for param in ('IPv4Address', 'IPv6Address'):
if param in params:
@ -307,7 +308,7 @@ class DockerAPIEngineDriver(EngineDriver):
output = client._get_result_tty(False, res, config['Config']['Tty'])
return output, True
else:
return "Result logged using `%s` driver" % logging_driver, False
return f"Result logged using `{logging_driver}` driver", False
def update_container(self, client, container_id, update_parameters):
result = client.post_json_to_json('/containers/{0}/update', container_id, data=update_parameters)
@ -343,13 +344,13 @@ class DockerAPIEngineDriver(EngineDriver):
# New docker daemon versions do not allow containers to be removed
# if they are paused. Make sure we do not end up in an infinite loop.
if count == 3:
raise Exception('%s [tried to unpause three times]' % to_native(exc))
raise Exception(f'{exc} [tried to unpause three times]')
count += 1
# Unpause
try:
self.unpause_container(client, container_id)
except Exception as exc2:
raise Exception('%s [while unpausing]' % to_native(exc2))
raise Exception(f'{exc2} [while unpausing]')
# Now try again
continue
raise
@ -369,13 +370,13 @@ class DockerAPIEngineDriver(EngineDriver):
# New docker daemon versions do not allow containers to be removed
# if they are paused. Make sure we do not end up in an infinite loop.
if count == 3:
raise Exception('%s [tried to unpause three times]' % to_native(exc))
raise Exception(f'{exc} [tried to unpause three times]')
count += 1
# Unpause
try:
self.unpause_container(client, container_id)
except Exception as exc2:
raise Exception('%s [while unpausing]' % to_native(exc2))
raise Exception(f'{exc2} [while unpausing]')
# Now try again
continue
if 'removal of container ' in exc.explanation and ' is already in progress' in exc.explanation:
@ -658,7 +659,7 @@ def _get_expected_env_value(module, client, api_version, image, value, sentry):
expected_env[parts[0]] = parts[1]
param_env = []
for key, env_value in expected_env.items():
param_env.append("%s=%s" % (key, env_value))
param_env.append(f"{key}={env_value}")
return param_env
@ -744,7 +745,7 @@ def _preprocess_etc_hosts(module, client, api_version, value):
return value
results = []
for key, value in value.items():
results.append('%s%s%s' % (key, ':', value))
results.append(f'{key}:{value}')
return results
@ -783,7 +784,7 @@ def _preprocess_convert_to_bytes(module, values, name, unlimited_value=None):
values[name] = value
return values
except ValueError as exc:
module.fail_json(msg='Failed to convert %s to bytes: %s' % (name, to_native(exc)))
module.fail_json(msg=f'Failed to convert {name} to bytes: {exc}')
def _get_image_labels(image):
@ -815,7 +816,7 @@ def _preprocess_links(module, client, api_version, value):
link, alias = parsed_link
else:
link, alias = parsed_link[0], parsed_link[0]
result.append('/%s:/%s/%s' % (link, module.params['name'], alias))
result.append(f"/{link}:/{module.params['name']}/{alias}")
return result
@ -830,7 +831,7 @@ def _ignore_mismatching_label_result(module, client, api_version, option, image,
for label in image_labels:
if label not in labels_param:
# Format label for error message
would_remove_labels.append('"%s"' % (label, ))
would_remove_labels.append(f'"{label}"')
if would_remove_labels:
labels = ', '.join(would_remove_labels)
msg = ("Some labels should be removed but are present in the base image. You can set image_label_mismatch to 'ignore' to ignore"
@ -861,7 +862,7 @@ def _preprocess_network_values(module, client, api_version, options, values):
for network in values['networks']:
network['id'] = _get_network_id(module, client, network['name'])
if not network['id']:
client.fail("Parameter error: network named %s could not be found. Does it exist?" % (network['name'], ))
client.fail(f"Parameter error: network named {network['name']} could not be found. Does it exist?")
if 'network_mode' in values:
values['network_mode'] = _preprocess_container_names(module, client, api_version, values['network_mode'])
@ -879,7 +880,7 @@ def _get_network_id(module, client, network_name):
break
return network_id
except Exception as exc:
client.fail("Error getting network id for %s - %s" % (network_name, to_native(exc)))
client.fail(f"Error getting network id for {network_name} - {exc}")
def _get_values_network(module, container, api_version, options, image, host_info):
@ -948,7 +949,7 @@ def _get_bind_from_dict(volume_dict):
if isinstance(config, dict) and config.get('bind'):
container_path = config.get('bind')
mode = config.get('mode', 'rw')
results.append("%s:%s:%s" % (host_path, container_path, mode))
results.append(f"{host_path}:{container_path}:{mode}")
return results
@ -1134,7 +1135,7 @@ def _get_expected_values_platform(module, client, api_version, options, image, v
daemon_arch=host_info.get('Architecture') if host_info else None,
)
except ValueError as exc:
module.fail_json(msg='Error while parsing platform parameer: %s' % (to_native(exc), ))
module.fail_json(msg=f'Error while parsing platform parameer: {exc}')
return expected_values
@ -1204,7 +1205,7 @@ def _get_expected_values_ports(module, client, api_version, options, image, valu
expected_bound_ports = {}
for container_port, config in values['published_ports'].items():
if isinstance(container_port, int):
container_port = "%s/tcp" % container_port
container_port = f"{container_port}/tcp"
if len(config) == 1:
if isinstance(config[0], int):
expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': config[0]}]
@ -1244,7 +1245,7 @@ def _set_values_ports(module, data, api_version, options, values):
if len(port_definition) == 2:
proto = port_definition[1]
port = port_definition[0]
exposed_ports['%s/%s' % (port, proto)] = {}
exposed_ports[f'{port}/{proto}'] = {}
data['ExposedPorts'] = exposed_ports
if 'published_ports' in values:
if 'HostConfig' not in data:

View File

@ -8,7 +8,7 @@ from __future__ import annotations
import re
from time import sleep
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.module_utils.common.text.converters import to_text
from ansible_collections.community.docker.plugins.module_utils.util import (
DifferenceTracker,
@ -157,23 +157,23 @@ class ContainerManager(DockerBaseClass):
key_main = comp_aliases.get(key)
if key_main is None:
if key_main in all_module_options:
self.fail("The module option '%s' cannot be specified in the comparisons dict, "
"since it does not correspond to container's state!" % key)
self.fail(f"The module option '{key}' cannot be specified in the comparisons dict, "
"since it does not correspond to container's state!")
if key not in self.all_options or self.all_options[key].not_an_ansible_option:
self.fail("Unknown module option '%s' in comparisons dict!" % key)
self.fail(f"Unknown module option '{key}' in comparisons dict!")
key_main = key
if key_main in comp_aliases_used:
self.fail("Both '%s' and '%s' (aliases of %s) are specified in comparisons dict!" % (key, comp_aliases_used[key_main], key_main))
self.fail(f"Both '{key}' and '{comp_aliases_used[key_main]}' (aliases of {key_main}) are specified in comparisons dict!")
comp_aliases_used[key_main] = key
# Check value and update accordingly
if value in ('strict', 'ignore'):
self.all_options[key_main].comparison = value
elif value == 'allow_more_present':
if self.all_options[key_main].comparison_type == 'value':
self.fail("Option '%s' is a value and not a set/list/dict, so its comparison cannot be %s" % (key, value))
self.fail(f"Option '{key}' is a value and not a set/list/dict, so its comparison cannot be {value}")
self.all_options[key_main].comparison = value
else:
self.fail("Unknown comparison mode '%s'!" % value)
self.fail(f"Unknown comparison mode '{value}'!")
# Copy values
for option in self.all_options.values():
if option.copy_comparison_from is not None:
@ -373,9 +373,7 @@ class ContainerManager(DockerBaseClass):
else:
self.engine_driver.unpause_container(self.client, container.id)
except Exception as exc:
self.fail("Error %s container %s: %s" % (
"pausing" if self.param_paused else "unpausing", container.id, to_native(exc)
))
self.fail(f"Error {'pausing' if self.param_paused else 'unpausing'} container {container.id}: {exc}")
container = self._get_container(container.id)
self.results['changed'] = True
self.results['actions'].append(dict(set_paused=self.param_paused))
@ -440,14 +438,14 @@ class ContainerManager(DockerBaseClass):
if is_image_name_id(image_parameter):
image = self.engine_driver.inspect_image_by_id(self.client, image_parameter)
if image is None:
self.client.fail("Cannot find image with ID %s" % (image_parameter, ))
self.client.fail(f"Cannot find image with ID {image_parameter}")
else:
repository, tag = parse_repository_tag(image_parameter)
if not tag:
tag = "latest"
image = self.engine_driver.inspect_image_by_name(self.client, repository, tag)
if not image and self.param_pull == "never":
self.client.fail("Cannot find image with name %s:%s, and pull=never" % (repository, tag))
self.client.fail(f"Cannot find image with name {repository}:{tag}, and pull=never")
if not image or self.param_pull == "always":
if not self.check_mode:
self.log("Pull the image.")
@ -455,16 +453,16 @@ class ContainerManager(DockerBaseClass):
self.client, repository, tag, platform=self.module.params['platform'])
if alreadyToLatest:
self.results['changed'] = False
self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag), changed=False))
self.results['actions'].append(dict(pulled_image=f"{repository}:{tag}", changed=False))
else:
self.results['changed'] = True
self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag), changed=True))
self.results['actions'].append(dict(pulled_image=f"{repository}:{tag}", changed=True))
elif not image or self.param_pull_check_mode_behavior == 'always':
# If the image is not there, or pull_check_mode_behavior == 'always', claim we'll
# pull. (Implicitly: if the image is there, claim it already was latest unless
# pull_check_mode_behavior == 'always'.)
self.results['changed'] = True
action = dict(pulled_image="%s:%s" % (repository, tag))
action = dict(pulled_image=f"{repository}:{tag}")
if not image:
action['changed'] = True
self.results['actions'].append(action)
@ -620,7 +618,7 @@ class ContainerManager(DockerBaseClass):
if network.get('links'):
expected_links = []
for link, alias in network['links']:
expected_links.append("%s:%s" % (link, alias))
expected_links.append(f"{link}:{alias}")
if not compare_generic(expected_links, network_info.get('Links'), 'allow_more_present', 'set'):
diff = True
if network.get('mac_address') and network['mac_address'] != network_info.get('MacAddress'):
@ -707,18 +705,17 @@ class ContainerManager(DockerBaseClass):
try:
self.engine_driver.disconnect_container_from_network(self.client, container.id, diff['parameter']['id'])
except Exception as exc:
self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'],
to_native(exc)))
self.fail(f"Error disconnecting container from network {diff['parameter']['name']} - {exc}")
# connect to the network
self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=diff['parameter']))
if not self.check_mode:
params = {key: value for key, value in diff['parameter'].items() if key not in ('id', 'name')}
try:
self.log("Connecting container to network %s" % diff['parameter']['id'])
self.log(f"Connecting container to network {diff['parameter']['id']}")
self.log(params, pretty_print=True)
self.engine_driver.connect_container_to_network(self.client, container.id, diff['parameter']['id'], params)
except Exception as exc:
self.fail("Error connecting container to network %s - %s" % (diff['parameter']['name'], to_native(exc)))
self.fail(f"Error connecting container to network {diff['parameter']['name']} - {exc}")
return self._get_container(container.id)
def _purge_networks(self, container, networks):
@ -728,14 +725,13 @@ class ContainerManager(DockerBaseClass):
try:
self.engine_driver.disconnect_container_from_network(self.client, container.id, network['name'])
except Exception as exc:
self.fail("Error disconnecting container from network %s - %s" % (network['name'],
to_native(exc)))
self.fail(f"Error disconnecting container from network {network['name']} - {exc}")
return self._get_container(container.id)
def container_create(self, image):
create_parameters = self._compose_create_parameters(image)
self.log("create container")
self.log("image: %s parameters:" % image)
self.log(f"image: {image} parameters:")
self.log(create_parameters, pretty_print=True)
networks = {}
if self.param_networks_cli_compatible and self.module.params['networks']:
@ -754,19 +750,19 @@ class ContainerManager(DockerBaseClass):
try:
container_id = self.engine_driver.create_container(self.client, self.param_name, create_parameters, networks=networks)
except Exception as exc:
self.fail("Error creating container: %s" % to_native(exc))
self.fail(f"Error creating container: {exc}")
return self._get_container(container_id)
return new_container
def container_start(self, container_id):
self.log("start container %s" % (container_id))
self.log(f"start container {container_id}")
self.results['actions'].append(dict(started=container_id))
self.results['changed'] = True
if not self.check_mode:
try:
self.engine_driver.start_container(self.client, container_id)
except Exception as exc:
self.fail("Error starting container %s: %s" % (container_id, to_native(exc)))
self.fail(f"Error starting container {container_id}: {exc}")
if self.module.params['detach'] is False:
status = self.engine_driver.wait_for_container(self.client, container_id)
@ -798,18 +794,18 @@ class ContainerManager(DockerBaseClass):
def container_remove(self, container_id, link=False, force=False):
volume_state = (not self.param_keep_volumes)
self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force))
self.log(f"remove container container:{container_id} v:{volume_state} link:{link} force{force}")
self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force))
self.results['changed'] = True
if not self.check_mode:
try:
self.engine_driver.remove_container(self.client, container_id, remove_volumes=volume_state, link=link, force=force)
except Exception as exc:
self.client.fail("Error removing container %s: %s" % (container_id, to_native(exc)))
self.client.fail(f"Error removing container {container_id}: {exc}")
def container_update(self, container_id, update_parameters):
if update_parameters:
self.log("update container %s" % (container_id))
self.log(f"update container {container_id}")
self.log(update_parameters, pretty_print=True)
self.results['actions'].append(dict(updated=container_id, update_parameters=update_parameters))
self.results['changed'] = True
@ -817,7 +813,7 @@ class ContainerManager(DockerBaseClass):
try:
self.engine_driver.update_container(self.client, container_id, update_parameters)
except Exception as exc:
self.fail("Error updating container %s: %s" % (container_id, to_native(exc)))
self.fail(f"Error updating container {container_id}: {exc}")
return self._get_container(container_id)
def container_kill(self, container_id):
@ -827,7 +823,7 @@ class ContainerManager(DockerBaseClass):
try:
self.engine_driver.kill_container(self.client, container_id, kill_signal=self.param_kill_signal)
except Exception as exc:
self.fail("Error killing container %s: %s" % (container_id, to_native(exc)))
self.fail(f"Error killing container {container_id}: {exc}")
def container_restart(self, container_id):
self.results['actions'].append(dict(restarted=container_id, timeout=self.module.params['stop_timeout']))
@ -836,7 +832,7 @@ class ContainerManager(DockerBaseClass):
try:
self.engine_driver.restart_container(self.client, container_id, self.module.params['stop_timeout'] or 10)
except Exception as exc:
self.fail("Error restarting container %s: %s" % (container_id, to_native(exc)))
self.fail(f"Error restarting container {container_id}: {exc}")
return self._get_container(container_id)
def container_stop(self, container_id):
@ -849,7 +845,7 @@ class ContainerManager(DockerBaseClass):
try:
self.engine_driver.stop_container(self.client, container_id, self.module.params['stop_timeout'])
except Exception as exc:
self.fail("Error stopping container %s: %s" % (container_id, to_native(exc)))
self.fail(f"Error stopping container {container_id}: {exc}")
def run_module(engine_driver):

View File

@ -15,8 +15,6 @@ except ImportError:
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
pass
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
from ansible_collections.community.docker.plugins.module_utils.common import AnsibleDockerClient
@ -38,7 +36,7 @@ class AnsibleDockerSwarmClient(AnsibleDockerClient):
try:
info = self.info()
except APIError as exc:
self.fail("Failed to get node information for %s" % to_native(exc))
self.fail(f"Failed to get node information for {exc}")
if info:
json_str = json.dumps(info, ensure_ascii=False)
@ -166,9 +164,9 @@ class AnsibleDockerSwarmClient(AnsibleDockerClient):
if exc.status_code == 404:
if skip_missing:
return None
self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
self.fail(f"Error while reading from Swarm manager: {exc}")
except Exception as exc:
self.fail("Error inspecting swarm node: %s" % exc)
self.fail(f"Error inspecting swarm node: {exc}")
json_str = json.dumps(node_info, ensure_ascii=False)
node_info = json.loads(json_str)
@ -197,9 +195,9 @@ class AnsibleDockerSwarmClient(AnsibleDockerClient):
except APIError as exc:
if exc.status_code == 503:
self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
self.fail(f"Error while reading from Swarm manager: {exc}")
except Exception as exc:
self.fail("Error inspecting swarm node: %s" % exc)
self.fail(f"Error inspecting swarm node: {exc}")
json_str = json.dumps(node_info, ensure_ascii=False)
node_info = json.loads(json_str)
@ -265,15 +263,15 @@ class AnsibleDockerSwarmClient(AnsibleDockerClient):
service_info = self.inspect_service(service_id)
except NotFound as exc:
if skip_missing is False:
self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
self.fail(f"Error while reading from Swarm manager: {exc}")
else:
return None
except APIError as exc:
if exc.status_code == 503:
self.fail("Cannot inspect service: To inspect service execute module on Swarm Manager")
self.fail("Error inspecting swarm service: %s" % exc)
self.fail(f"Error inspecting swarm service: {exc}")
except Exception as exc:
self.fail("Error inspecting swarm service: %s" % exc)
self.fail(f"Error inspecting swarm service: {exc}")
json_str = json.dumps(service_info, ensure_ascii=False)
service_info = json.loads(json_str)

View File

@ -36,7 +36,7 @@ DOCKER_COMMON_ARGS = dict(
)
DOCKER_COMMON_ARGS_VARS = dict([
[option_name, 'ansible_docker_%s' % option_name]
[option_name, f'ansible_docker_{option_name}']
for option_name in DOCKER_COMMON_ARGS
if option_name != 'debug'
])
@ -324,7 +324,7 @@ def convert_duration_to_nanosecond(time_str):
Return time duration in nanosecond.
"""
if not isinstance(time_str, str):
raise ValueError('Missing unit in duration - %s' % time_str)
raise ValueError(f'Missing unit in duration - {time_str}')
regex = re.compile(
r'^(((?P<hours>\d+)h)?'
@ -336,7 +336,7 @@ def convert_duration_to_nanosecond(time_str):
parts = regex.match(time_str)
if not parts:
raise ValueError('Invalid time duration - %s' % time_str)
raise ValueError(f'Invalid time duration - {time_str}')
parts = parts.groupdict()
time_params = {}

View File

@ -476,17 +476,17 @@ class ServicesManager(BaseComposeManager):
self.wait_timeout = parameters['wait_timeout']
self.yes = parameters['assume_yes']
if self.compose_version < LooseVersion('2.32.0') and self.yes:
self.fail('assume_yes=true needs Docker Compose 2.32.0 or newer, not version %s' % (self.compose_version, ))
self.fail(f'assume_yes=true needs Docker Compose 2.32.0 or newer, not version {self.compose_version}')
for key, value in self.scale.items():
if not isinstance(key, str):
self.fail('The key %s for `scale` is not a string' % repr(key))
self.fail(f'The key {key!r} for `scale` is not a string')
try:
value = check_type_int(value)
except TypeError as exc:
self.fail('The value %s for `scale[%s]` is not an integer' % (repr(value), repr(key)))
self.fail(f'The value {value!r} for `scale[{key!r}]` is not an integer')
if value < 0:
self.fail('The value %s for `scale[%s]` is negative' % (repr(value), repr(key)))
self.fail(f'The value {value!r} for `scale[{key!r}]` is negative')
self.scale[key] = value
def run(self):
@ -519,13 +519,13 @@ class ServicesManager(BaseComposeManager):
if not self.dependencies:
args.append('--no-deps')
if self.timeout is not None:
args.extend(['--timeout', '%d' % self.timeout])
args.extend(['--timeout', f'{self.timeout}'])
if self.build == 'always':
args.append('--build')
elif self.build == 'never':
args.append('--no-build')
for key, value in sorted(self.scale.items()):
args.extend(['--scale', '%s=%d' % (key, value)])
args.extend(['--scale', f'{key}={value}'])
if self.wait:
args.append('--wait')
if self.wait_timeout is not None:
@ -556,7 +556,7 @@ class ServicesManager(BaseComposeManager):
def get_stop_cmd(self, dry_run):
args = self.get_base_args() + ['stop']
if self.timeout is not None:
args.extend(['--timeout', '%d' % self.timeout])
args.extend(['--timeout', f'{self.timeout}'])
if dry_run:
args.append('--dry-run')
args.append('--')
@ -609,7 +609,7 @@ class ServicesManager(BaseComposeManager):
if not self.dependencies:
args.append('--no-deps')
if self.timeout is not None:
args.extend(['--timeout', '%d' % self.timeout])
args.extend(['--timeout', f'{self.timeout}'])
if dry_run:
args.append('--dry-run')
args.append('--')
@ -636,7 +636,7 @@ class ServicesManager(BaseComposeManager):
if self.remove_volumes:
args.append('--volumes')
if self.timeout is not None:
args.extend(['--timeout', '%d' % self.timeout])
args.extend(['--timeout', f'{self.timeout}'])
if dry_run:
args.append('--dry-run')
args.append('--')

View File

@ -211,7 +211,7 @@ class ExecManager(BaseComposeManager):
if not isinstance(value, str):
self.fail(
"Non-string value found for env option. Ambiguous env options must be "
"wrapped in quotes to avoid them being interpreted. Key: %s" % (name, )
f"wrapped in quotes to avoid them being interpreted. Key: {name}"
)
self.env[name] = to_text(value, errors='surrogate_or_strict')

View File

@ -297,7 +297,7 @@ class ExecManager(BaseComposeManager):
if not isinstance(value, str):
self.fail(
"Non-string value found for env option. Ambiguous env options must be "
"wrapped in quotes to avoid them being interpreted. Key: %s" % (name, )
f"wrapped in quotes to avoid them being interpreted. Key: {name}"
)
self.env[name] = to_text(value, errors='surrogate_or_strict')

View File

@ -214,7 +214,7 @@ from ansible_collections.community.docker.plugins.module_utils.util import (
compare_generic,
sanitize_labels,
)
from ansible.module_utils.common.text.converters import to_native, to_bytes
from ansible.module_utils.common.text.converters import to_bytes
class ConfigManager(DockerBaseClass):
@ -281,7 +281,7 @@ class ConfigManager(DockerBaseClass):
try:
configs = self.client.configs(filters={'name': self.name})
except APIError as exc:
self.client.fail("Error accessing config %s: %s" % (self.name, to_native(exc)))
self.client.fail(f"Error accessing config {self.name}: {exc}")
if self.rolling_versions:
self.configs = [
@ -320,7 +320,7 @@ class ConfigManager(DockerBaseClass):
config_id = self.client.create_config(self.name, self.data, labels=labels, **kwargs)
self.configs += self.client.configs(filters={'id': config_id})
except APIError as exc:
self.client.fail("Error creating config: %s" % to_native(exc))
self.client.fail(f"Error creating config: {exc}")
if isinstance(config_id, dict):
config_id = config_id['ID']
@ -332,7 +332,7 @@ class ConfigManager(DockerBaseClass):
if not self.check_mode:
self.client.remove_config(config['ID'])
except APIError as exc:
self.client.fail("Error removing config %s: %s" % (config['Spec']['Name'], to_native(exc)))
self.client.fail(f"Error removing config {config['Spec']['Name']}: {exc}")
def present(self):
''' Handles state == 'present', creating or updating the config '''

View File

@ -228,7 +228,7 @@ def main():
if not isinstance(value, str):
client.module.fail_json(
msg="Non-string value found for env option. Ambiguous env options must be "
"wrapped in quotes to avoid them being interpreted. Key: %s" % (name, ))
f"wrapped in quotes to avoid them being interpreted. Key: {name}")
env[name] = to_text(value, errors='surrogate_or_strict')
if command is not None:

View File

@ -213,8 +213,6 @@ disk_usage:
import traceback
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.docker.plugins.module_utils.common_api import (
AnsibleDockerClient,
RequestException,
@ -259,7 +257,7 @@ class DockerHostManager(DockerBaseClass):
try:
return self.client.info()
except APIError as exc:
self.client.fail("Error inspecting docker host: %s" % to_native(exc))
self.client.fail(f"Error inspecting docker host: {exc}")
def get_docker_disk_usage_facts(self):
try:
@ -268,7 +266,7 @@ class DockerHostManager(DockerBaseClass):
else:
return dict(LayersSize=self.client.df()['LayersSize'])
except APIError as exc:
self.client.fail("Error inspecting docker host: %s" % to_native(exc))
self.client.fail(f"Error inspecting docker host: {exc}")
def get_docker_items_list(self, docker_object=None, filters=None, verbose=False):
items = None
@ -311,7 +309,7 @@ class DockerHostManager(DockerBaseClass):
items = self.client.get_json('/volumes', params=params)
items = items['Volumes']
except APIError as exc:
self.client.fail("Error inspecting docker host for object '%s': %s" % (docker_object, to_native(exc)))
self.client.fail(f"Error inspecting docker host for object '{docker_object}': {exc}")
if self.verbose_output:
return items

View File

@ -417,7 +417,7 @@ def convert_to_bytes(value, module, name, unlimited_value=None):
return unlimited_value
return human_to_bytes(value)
except ValueError as exc:
module.fail_json(msg='Failed to convert %s to bytes: %s' % (name, to_native(exc)))
module.fail_json(msg=f'Failed to convert {name} to bytes: {exc}')
class ImageManager(DockerBaseClass):
@ -485,9 +485,9 @@ class ImageManager(DockerBaseClass):
# Sanity check: fail early when we know that something will fail later
if self.repository and is_image_name_id(self.repository):
self.fail("`repository` must not be an image ID; got: %s" % self.repository)
self.fail(f"`repository` must not be an image ID; got: {self.repository}")
if not self.repository and self.push and is_image_name_id(self.name):
self.fail("Cannot push an image by ID; specify `repository` to tag and push the image with ID %s instead" % self.name)
self.fail(f"Cannot push an image by ID; specify `repository` to tag and push the image with ID {self.name} instead")
if self.state == 'present':
self.present()
@ -512,16 +512,16 @@ class ImageManager(DockerBaseClass):
if not image or self.force_source:
if self.source == 'build':
if is_image_name_id(self.name):
self.fail("Image name must not be an image ID for source=build; got: %s" % self.name)
self.fail(f"Image name must not be an image ID for source=build; got: {self.name}")
# Build the image
if not os.path.isdir(self.build_path):
self.fail("Requested build path %s could not be found or you do not have access." % self.build_path)
self.fail(f"Requested build path {self.build_path} could not be found or you do not have access.")
image_name = self.name
if self.tag:
image_name = "%s:%s" % (self.name, self.tag)
self.log("Building image %s" % image_name)
self.results['actions'].append("Built image %s from %s" % (image_name, self.build_path))
image_name = f"{self.name}:{self.tag}"
self.log(f"Building image {image_name}")
self.results['actions'].append(f"Built image {image_name} from {self.build_path}")
self.results['changed'] = True
if not self.check_mode:
self.results.update(self.build_image())
@ -529,21 +529,20 @@ class ImageManager(DockerBaseClass):
elif self.source == 'load':
# Load the image from an archive
if not os.path.isfile(self.load_path):
self.fail("Error loading image %s. Specified path %s does not exist." % (self.name,
self.load_path))
self.fail(f"Error loading image {self.name}. Specified path {self.load_path} does not exist.")
image_name = self.name
if self.tag and not is_image_name_id(image_name):
image_name = "%s:%s" % (self.name, self.tag)
self.results['actions'].append("Loaded image %s from %s" % (image_name, self.load_path))
image_name = f"{self.name}:{self.tag}"
self.results['actions'].append(f"Loaded image {image_name} from {self.load_path}")
self.results['changed'] = True
if not self.check_mode:
self.results['image'] = self.load_image()
elif self.source == 'pull':
if is_image_name_id(self.name):
self.fail("Image name must not be an image ID for source=pull; got: %s" % self.name)
self.fail(f"Image name must not be an image ID for source=pull; got: {self.name}")
# pull the image
self.results['actions'].append('Pulled image %s:%s' % (self.name, self.tag))
self.results['actions'].append(f'Pulled image {self.name}:{self.tag}')
self.results['changed'] = True
if not self.check_mode:
self.results['image'], dummy = self.client.pull_image(self.name, tag=self.tag, platform=self.pull_platform)
@ -551,8 +550,8 @@ class ImageManager(DockerBaseClass):
if image is None:
name = self.name
if self.tag and not is_image_name_id(name):
name = "%s:%s" % (self.name, self.tag)
self.client.fail('Cannot find the image %s locally.' % name)
name = f"{self.name}:{self.tag}"
self.client.fail(f'Cannot find the image {name} locally.')
if not self.check_mode and image and image['Id'] == self.results['image']['Id']:
self.results['changed'] = False
else:
@ -578,7 +577,7 @@ class ImageManager(DockerBaseClass):
else:
image = self.client.find_image(name, self.tag)
if self.tag:
name = "%s:%s" % (self.name, self.tag)
name = f"{self.name}:{self.tag}"
if image:
if not self.check_mode:
try:
@ -587,10 +586,10 @@ class ImageManager(DockerBaseClass):
# If the image vanished while we were trying to remove it, do not fail
pass
except Exception as exc:
self.fail("Error removing image %s - %s" % (name, to_native(exc)))
self.fail(f"Error removing image {name} - {exc}")
self.results['changed'] = True
self.results['actions'].append("Removed image %s" % (name))
self.results['actions'].append(f"Removed image {name}")
self.results['image']['state'] = 'Deleted'
@staticmethod
@ -612,12 +611,12 @@ class ImageManager(DockerBaseClass):
'''
def build_msg(reason):
return 'Archived image %s to %s, %s' % (current_image_name, archive_path, reason)
return f'Archived image {current_image_name} to {archive_path}, {reason}'
try:
archived = archived_image_manifest(archive_path)
except ImageArchiveInvalidException as exc:
failure_logger('Unable to extract manifest summary from archive: %s' % to_native(exc))
failure_logger(f'Unable to extract manifest summary from archive: {exc}')
return build_msg('overwriting an unreadable archive file')
if archived is None:
@ -627,7 +626,7 @@ class ImageManager(DockerBaseClass):
else:
name = ', '.join(archived.repo_tags)
return build_msg('overwriting archive with image %s named %s' % (archived.image_id, name))
return build_msg(f'overwriting archive with image {archived.image_id} named {name}')
def archive_image(self, name, tag):
'''
@ -647,10 +646,10 @@ class ImageManager(DockerBaseClass):
image_name = name
else:
image = self.client.find_image(name=name, tag=tag)
image_name = "%s:%s" % (name, tag)
image_name = f"{name}:{tag}"
if not image:
self.log("archive image: image %s not found" % image_name)
self.log(f"archive image: image {image_name} not found")
return
# Will have a 'sha256:' prefix
@ -664,7 +663,7 @@ class ImageManager(DockerBaseClass):
self.results['changed'] = action is not None
if (not self.check_mode) and self.results['changed']:
self.log("Getting archive of image %s" % image_name)
self.log(f"Getting archive of image {image_name}")
try:
saved_image = self.client._stream_raw_result(
self.client._get(self.client._url('/images/{0}/get', image_name), stream=True),
@ -672,14 +671,14 @@ class ImageManager(DockerBaseClass):
False,
)
except Exception as exc:
self.fail("Error getting image %s - %s" % (image_name, to_native(exc)))
self.fail(f"Error getting image {image_name} - {exc}")
try:
with open(self.archive_path, 'wb') as fd:
for chunk in saved_image:
fd.write(chunk)
except Exception as exc:
self.fail("Error writing image archive %s - %s" % (self.archive_path, to_native(exc)))
self.fail(f"Error writing image archive {self.archive_path} - {exc}")
self.results['image'] = image
@ -693,17 +692,17 @@ class ImageManager(DockerBaseClass):
'''
if is_image_name_id(name):
self.fail("Cannot push an image ID: %s" % name)
self.fail(f"Cannot push an image ID: {name}")
repository = name
if not tag:
repository, tag = parse_repository_tag(name)
registry, repo_name = resolve_repository_name(repository)
self.log("push %s to %s/%s:%s" % (self.name, registry, repo_name, tag))
self.log(f"push {self.name} to {registry}/{repo_name}:{tag}")
if registry:
self.results['actions'].append("Pushed image %s to %s/%s:%s" % (self.name, registry, repo_name, tag))
self.results['actions'].append(f"Pushed image {self.name} to {registry}/{repo_name}:{tag}")
self.results['changed'] = True
if not self.check_mode:
status = None
@ -740,12 +739,10 @@ class ImageManager(DockerBaseClass):
except Exception as exc:
if 'unauthorized' in str(exc):
if 'authentication required' in str(exc):
self.fail("Error pushing image %s/%s:%s - %s. Try logging into %s first." %
(registry, repo_name, tag, to_native(exc), registry))
self.fail(f"Error pushing image {registry}/{repo_name}:{tag} - {exc}. Try logging into {registry} first.")
else:
self.fail("Error pushing image %s/%s:%s - %s. Does the repository exist?" %
(registry, repo_name, tag, str(exc)))
self.fail("Error pushing image %s: %s" % (repository, to_native(exc)))
self.fail(f"Error pushing image {registry}/{repo_name}:{tag} - {exc}. Does the repository exist?")
self.fail(f"Error pushing image {repository}: {exc}")
self.results['image'] = self.client.find_image(name=repository, tag=tag)
if not self.results['image']:
self.results['image'] = dict()
@ -768,15 +765,15 @@ class ImageManager(DockerBaseClass):
repo_tag = tag
image = self.client.find_image(name=repo, tag=repo_tag)
found = 'found' if image else 'not found'
self.log("image %s was %s" % (repo, found))
self.log(f"image {repo} was {found}")
if not image or self.force_tag:
image_name = name
if not is_image_name_id(name) and tag and not name.endswith(':' + tag):
image_name = "%s:%s" % (name, tag)
self.log("tagging %s to %s:%s" % (image_name, repo, repo_tag))
image_name = f"{name}:{tag}"
self.log(f"tagging {image_name} to {repo}:{repo_tag}")
self.results['changed'] = True
self.results['actions'].append("Tagged image %s to %s:%s" % (image_name, repo, repo_tag))
self.results['actions'].append(f"Tagged image {image_name} to {repo}:{repo_tag}")
if not self.check_mode:
try:
# Finding the image does not always work, especially running a localhost registry. In those
@ -791,7 +788,7 @@ class ImageManager(DockerBaseClass):
if res.status_code != 201:
raise Exception("Tag operation failed.")
except Exception as exc:
self.fail("Error: failed to tag image - %s" % to_native(exc))
self.fail(f"Error: failed to tag image - {exc}")
self.results['image'] = self.client.find_image(name=repo, tag=repo_tag)
if image and image['Id'] == self.results['image']['Id']:
self.results['changed'] = False
@ -846,7 +843,7 @@ class ImageManager(DockerBaseClass):
context = tar(self.build_path, exclude=exclude, dockerfile=dockerfile, gzip=False)
params = {
't': "%s:%s" % (self.name, self.tag) if self.tag else self.name,
't': f"{self.name}:{self.tag}" if self.tag else self.name,
'remote': remote,
'q': False,
'nocache': self.nocache,
@ -912,14 +909,9 @@ class ImageManager(DockerBaseClass):
if line.get('errorDetail'):
errorDetail = line.get('errorDetail')
self.fail(
"Error building %s - code: %s, message: %s, logs: %s" % (
self.name,
errorDetail.get('code'),
errorDetail.get('message'),
build_output))
f"Error building {self.name} - code: {errorDetail.get('code')}, message: {errorDetail.get('message')}, logs: {build_output}")
else:
self.fail("Error building %s - message: %s, logs: %s" % (
self.name, line.get('error'), build_output))
self.fail(f"Error building {self.name} - message: {line.get('error')}, logs: {build_output}")
return {
"stdout": "\n".join(build_output),
@ -936,9 +928,9 @@ class ImageManager(DockerBaseClass):
load_output = []
has_output = False
try:
self.log("Opening image %s" % self.load_path)
self.log(f"Opening image {self.load_path}")
with open(self.load_path, 'rb') as image_tar:
self.log("Loading image from %s" % self.load_path)
self.log(f"Loading image from {self.load_path}")
res = self.client._post(self.client._url("/images/load"), data=image_tar, stream=True)
if LooseVersion(self.client.api_version) >= LooseVersion('1.23'):
has_output = True
@ -955,10 +947,10 @@ class ImageManager(DockerBaseClass):
)
except EnvironmentError as exc:
if exc.errno == errno.ENOENT:
self.client.fail("Error opening image %s - %s" % (self.load_path, to_native(exc)))
self.client.fail("Error loading image %s - %s" % (self.name, to_native(exc)), stdout='\n'.join(load_output))
self.client.fail(f"Error opening image {self.load_path} - {exc}")
self.client.fail(f"Error loading image {self.name} - {exc}", stdout='\n'.join(load_output))
except Exception as exc:
self.client.fail("Error loading image %s - %s" % (self.name, to_native(exc)), stdout='\n'.join(load_output))
self.client.fail(f"Error loading image {self.name} - {exc}", stdout='\n'.join(load_output))
# Collect loaded images
if has_output:
@ -978,20 +970,19 @@ class ImageManager(DockerBaseClass):
expected_image = self.name.lower()
found_image = expected_image not in loaded_image_ids
else:
expected_image = '%s:%s' % (self.name, self.tag)
expected_image = f'{self.name}:{self.tag}'
found_image = expected_image not in loaded_images
if found_image:
found_instead = ', '.join(sorted([f"'{image}'" for image in loaded_images] + list(loaded_image_ids)))
self.client.fail(
"The archive did not contain image '%s'. Instead, found %s." % (
expected_image,
', '.join(sorted(["'%s'" % image for image in loaded_images] + list(loaded_image_ids)))),
f"The archive did not contain image '{expected_image}'. Instead, found {found_instead}.",
stdout='\n'.join(load_output))
loaded_images.remove(expected_image)
if loaded_images:
found_more = ', '.join(sorted([f"'{image}'" for image in loaded_images] + list(loaded_image_ids)))
self.client.module.warn(
"The archive contained more images than specified: %s" % (
', '.join(sorted(["'%s'" % image for image in loaded_images] + list(loaded_image_ids))), ))
f"The archive contained more images than specified: {found_more}")
if is_image_name_id(self.name):
return self.client.find_image_by_id(self.name, accept_missing_image=True)

View File

@ -313,11 +313,11 @@ def convert_to_bytes(value, module, name, unlimited_value=None):
return unlimited_value
return human_to_bytes(value)
except ValueError as exc:
module.fail_json(msg='Failed to convert %s to bytes: %s' % (name, to_native(exc)))
module.fail_json(msg=f'Failed to convert {name} to bytes: {exc}')
def dict_to_list(dictionary, concat='='):
return ['%s%s%s' % (k, concat, v) for k, v in sorted(dictionary.items())]
return [f'{k}{concat}{v}' for k, v in sorted(dictionary.items())]
def _quote_csv(input):
@ -387,7 +387,7 @@ class ImageBuilder(DockerBaseClass):
if self.outputs:
found = False
name_tag = '%s:%s' % (self.name, self.tag)
name_tag = f'{self.name}:{self.tag}'
for output in self.outputs:
if output['type'] == 'image':
if not output['name']:
@ -417,7 +417,7 @@ class ImageBuilder(DockerBaseClass):
def add_args(self, args):
environ_update = {}
if not self.outputs:
args.extend(['--tag', '%s:%s' % (self.name, self.tag)])
args.extend(['--tag', f'{self.name}:{self.tag}'])
if self.dockerfile:
args.extend(['--file', os.path.join(self.path, self.dockerfile)])
if self.cache_from:
@ -510,7 +510,7 @@ class ImageBuilder(DockerBaseClass):
args.extend(['--', self.path])
rc, stdout, stderr = self.client.call_cli(*args, environ_update=environ_update)
if rc != 0:
self.fail('Building %s:%s failed' % (self.name, self.tag), stdout=to_native(stdout), stderr=to_native(stderr), command=args)
self.fail(f'Building {self.name}:{self.tag} failed', stdout=to_native(stdout), stderr=to_native(stderr), command=args)
results['stdout'] = to_native(stdout)
results['stderr'] = to_native(stderr)
results['image'] = self.client.find_image(self.name, self.tag) or {}

View File

@ -95,8 +95,6 @@ images:
import traceback
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.docker.plugins.module_utils.common_api import (
AnsibleDockerClient,
RequestException,
@ -146,7 +144,7 @@ class ImageExportManager(DockerBaseClass):
repo, repo_tag = parse_repository_tag(name)
if not repo_tag:
repo_tag = self.tag
self.names.append({'name': repo, 'tag': repo_tag, 'joined': '%s:%s' % (repo, repo_tag)})
self.names.append({'name': repo, 'tag': repo_tag, 'joined': f'{repo}:{repo_tag}'})
if not self.names:
self.fail('At least one image name must be specified')
@ -163,7 +161,7 @@ class ImageExportManager(DockerBaseClass):
if archived_images is None:
return 'Overwriting since no image is present in archive'
except ImageArchiveInvalidException as exc:
self.log('Unable to extract manifest summary from archive: %s' % to_native(exc))
self.log(f'Unable to extract manifest summary from archive: {exc}')
return 'Overwriting an unreadable archive file'
left_names = list(self.names)
@ -175,11 +173,9 @@ class ImageExportManager(DockerBaseClass):
found = True
break
if not found:
return 'Overwriting archive since it contains unexpected image %s named %s' % (
archived_image.image_id, ', '.join(archived_image.repo_tags)
)
return f'Overwriting archive since it contains unexpected image {archived_image.image_id} named {", ".join(archived_image.repo_tags)}'
if left_names:
return 'Overwriting archive since it is missing image(s) %s' % (', '.join([name['joined'] for name in left_names]))
return f"Overwriting archive since it is missing image(s) {', '.join([name['joined'] for name in left_names])}"
return None
@ -189,13 +185,13 @@ class ImageExportManager(DockerBaseClass):
for chunk in chunks:
fd.write(chunk)
except Exception as exc:
self.fail("Error writing image archive %s - %s" % (self.path, to_native(exc)))
self.fail(f"Error writing image archive {self.path} - {exc}")
def export_images(self):
image_names = [name['joined'] for name in self.names]
image_names_str = ', '.join(image_names)
if len(image_names) == 1:
self.log("Getting archive of image %s" % image_names[0])
self.log(f"Getting archive of image {image_names[0]}")
try:
chunks = self.client._stream_raw_result(
self.client._get(self.client._url('/images/{0}/get', image_names[0]), stream=True),
@ -203,9 +199,9 @@ class ImageExportManager(DockerBaseClass):
False,
)
except Exception as exc:
self.fail("Error getting image %s - %s" % (image_names[0], to_native(exc)))
self.fail(f"Error getting image {image_names[0]} - {exc}")
else:
self.log("Getting archive of images %s" % image_names_str)
self.log(f"Getting archive of images {image_names_str}")
try:
chunks = self.client._stream_raw_result(
self.client._get(
@ -217,7 +213,7 @@ class ImageExportManager(DockerBaseClass):
False,
)
except Exception as exc:
self.fail("Error getting images %s - %s" % (image_names_str, to_native(exc)))
self.fail(f"Error getting images {image_names_str} - {exc}")
self.write_chunks(chunks)
@ -233,7 +229,7 @@ class ImageExportManager(DockerBaseClass):
else:
image = self.client.find_image(name=name['name'], tag=name['tag'])
if not image:
self.fail("Image %s not found" % name['joined'])
self.fail(f"Image {name['joined']} not found")
images.append(image)
# Will have a 'sha256:' prefix

View File

@ -137,8 +137,6 @@ images:
import traceback
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.docker.plugins.module_utils.common_api import (
AnsibleDockerClient,
RequestException,
@ -160,7 +158,7 @@ class ImageManager(DockerBaseClass):
self.client = client
self.results = results
self.name = self.client.module.params.get('name')
self.log("Gathering facts for images: %s" % (str(self.name)))
self.log(f"Gathering facts for images: {self.name!s}")
if self.name:
self.results['images'] = self.get_facts()
@ -185,13 +183,13 @@ class ImageManager(DockerBaseClass):
for name in names:
if is_image_name_id(name):
self.log('Fetching image %s (ID)' % (name))
self.log(f'Fetching image {name} (ID)')
image = self.client.find_image_by_id(name, accept_missing_image=True)
else:
repository, tag = parse_repository_tag(name)
if not tag:
tag = 'latest'
self.log('Fetching image %s:%s' % (repository, tag))
self.log(f'Fetching image {repository}:{tag}')
image = self.client.find_image(name=repository, tag=tag)
if image:
results.append(image)
@ -210,7 +208,7 @@ class ImageManager(DockerBaseClass):
except NotFound:
inspection = None
except Exception as exc:
self.fail("Error inspecting image %s - %s" % (image['Id'], to_native(exc)))
self.fail(f"Error inspecting image {image['Id']} - {exc}")
results.append(inspection)
return results

View File

@ -176,7 +176,7 @@ class ImagePuller(DockerBaseClass):
if compare_platform_strings(wanted_platform, image_platform):
return results
results['actions'].append('Pulled image %s:%s' % (self.name, self.tag))
results['actions'].append(f'Pulled image {self.name}:{self.tag}')
if self.check_mode:
results['changed'] = True
results['diff']['after'] = image_info(dict(Id='unknown'))

View File

@ -74,8 +74,6 @@ image:
import base64
import traceback
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.docker.plugins.module_utils.common_api import (
AnsibleDockerClient,
RequestException,
@ -128,7 +126,7 @@ class ImagePusher(DockerBaseClass):
def push(self):
image = self.client.find_image(name=self.name, tag=self.tag)
if not image:
self.client.fail('Cannot find image %s:%s' % (self.name, self.tag))
self.client.fail(f'Cannot find image {self.name}:{self.tag}')
results = dict(
changed=False,
@ -138,7 +136,7 @@ class ImagePusher(DockerBaseClass):
push_registry, push_repo = resolve_repository_name(self.name)
try:
results['actions'].append('Pushed image %s:%s' % (self.name, self.tag))
results['actions'].append(f'Pushed image {self.name}:{self.tag}')
headers = {}
header = get_config_header(self.client, push_registry)
@ -165,12 +163,10 @@ class ImagePusher(DockerBaseClass):
except Exception as exc:
if 'unauthorized' in str(exc):
if 'authentication required' in str(exc):
self.client.fail("Error pushing image %s/%s:%s - %s. Try logging into %s first." %
(push_registry, push_repo, self.tag, to_native(exc), push_registry))
self.client.fail(f"Error pushing image {push_registry}/{push_repo}:{self.tag} - {exc}. Try logging into {push_registry} first.")
else:
self.client.fail("Error pushing image %s/%s:%s - %s. Does the repository exist?" %
(push_registry, push_repo, self.tag, str(exc)))
self.client.fail("Error pushing image %s:%s: %s" % (self.name, self.tag, to_native(exc)))
self.client.fail(f"Error pushing image {push_registry}/{push_repo}:{self.tag} - {exc}. Does the repository exist?")
self.client.fail(f"Error pushing image {self.name}:{self.tag}: {exc}")
return results

View File

@ -99,8 +99,6 @@ untagged:
import traceback
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.docker.plugins.module_utils.common_api import (
AnsibleDockerClient,
RequestException,
@ -171,7 +169,7 @@ class ImageRemover(DockerBaseClass):
else:
image = self.client.find_image(name, self.tag)
if self.tag:
name = "%s:%s" % (self.name, self.tag)
name = f"{self.name}:{self.tag}"
if self.diff:
results['diff'] = dict(before=self.get_diff_state(image))
@ -182,7 +180,7 @@ class ImageRemover(DockerBaseClass):
return results
results['changed'] = True
results['actions'].append("Removed image %s" % (name))
results['actions'].append(f"Removed image {name}")
results['image'] = image
if not self.check_mode:
@ -192,7 +190,7 @@ class ImageRemover(DockerBaseClass):
# If the image vanished while we were trying to remove it, do not fail
res = []
except Exception as exc:
self.fail("Error removing image %s - %s" % (name, to_native(exc)))
self.fail(f"Error removing image {name} - {exc}")
for entry in res:
if entry.get('Untagged'):

View File

@ -102,7 +102,6 @@ tagged_images:
import traceback
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.common.text.formatters import human_to_bytes
from ansible_collections.community.docker.plugins.module_utils.common_api import (
@ -130,7 +129,7 @@ def convert_to_bytes(value, module, name, unlimited_value=None):
return unlimited_value
return human_to_bytes(value)
except ValueError as exc:
module.fail_json(msg='Failed to convert %s to bytes: %s' % (name, to_native(exc)))
module.fail_json(msg=f'Failed to convert {name} to bytes: {exc}')
def image_info(name, tag, image):
@ -168,12 +167,12 @@ class ImageTagger(DockerBaseClass):
self.repositories = []
for i, repository in enumerate(parameters['repository']):
if is_image_name_id(repository):
self.fail("repository[%d] must not be an image ID; got: %s" % (i + 1, repository))
self.fail(f"repository[{i + 1}] must not be an image ID; got: {repository}")
repo, repo_tag = parse_repository_tag(repository)
if not repo_tag:
repo_tag = parameters['tag']
elif not is_valid_tag(repo_tag, allow_empty=False):
self.fail("repository[%d] must not have a digest; got: %s" % (i + 1, repository))
self.fail(f"repository[{i + 1}] must not have a digest; got: {repository}")
self.repositories.append((repo, repo_tag))
def fail(self, msg):
@ -186,16 +185,16 @@ class ImageTagger(DockerBaseClass):
if tagged_image['Id'] == image['Id']:
return (
False,
"target image already exists (%s) and is as expected" % tagged_image['Id'],
f"target image already exists ({tagged_image['Id']}) and is as expected",
tagged_image,
)
if self.keep_existing_images:
return (
False,
"target image already exists (%s) and is not as expected, but kept" % tagged_image['Id'],
f"target image already exists ({tagged_image['Id']}) and is not as expected, but kept",
tagged_image,
)
msg = "target image existed (%s) and was not as expected" % tagged_image['Id']
msg = f"target image existed ({tagged_image['Id']}) and was not as expected"
else:
msg = "target image did not exist"
@ -211,7 +210,7 @@ class ImageTagger(DockerBaseClass):
if res.status_code != 201:
raise Exception("Tag operation failed.")
except Exception as exc:
self.fail("Error: failed to tag image as %s:%s - %s" % (name, tag, to_native(exc)))
self.fail(f"Error: failed to tag image as {name}:{tag} - {exc}")
return True, msg, tagged_image
@ -221,7 +220,7 @@ class ImageTagger(DockerBaseClass):
else:
image = self.client.find_image(name=self.name, tag=self.tag)
if not image:
self.fail("Cannot find image %s:%s" % (self.name, self.tag))
self.fail(f"Cannot find image {self.name}:{self.tag}")
before = []
after = []
@ -239,10 +238,10 @@ class ImageTagger(DockerBaseClass):
after.append(image_info(repository, tag, image if tagged else old_image))
if tagged:
results['changed'] = True
results['actions'].append('Tagged image %s as %s:%s: %s' % (image['Id'], repository, tag, msg))
tagged_images.append('%s:%s' % (repository, tag))
results['actions'].append(f"Tagged image {image['Id']} as {repository}:{tag}: {msg}")
tagged_images.append(f'{repository}:{tag}')
else:
results['actions'].append('Not tagged image %s as %s:%s: %s' % (image['Id'], repository, tag, msg))
results['actions'].append(f"Not tagged image {image['Id']} as {repository}:{tag}: {msg}")
return results

View File

@ -121,7 +121,7 @@ import json
import os
import traceback
from ansible.module_utils.common.text.converters import to_bytes, to_text, to_native
from ansible.module_utils.common.text.converters import to_bytes, to_text
from ansible_collections.community.docker.plugins.module_utils.common_api import (
AnsibleDockerClient,
@ -305,12 +305,12 @@ class LoginManager(DockerBaseClass):
:return: None
'''
self.results['actions'].append("Logged into %s" % (self.registry_url))
self.log("Log into %s with username %s" % (self.registry_url, self.username))
self.results['actions'].append(f"Logged into {self.registry_url}")
self.log(f"Log into {self.registry_url} with username {self.username}")
try:
response = self._login(self.reauthorize)
except Exception as exc:
self.fail("Logging into %s for user %s failed - %s" % (self.registry_url, self.username, to_native(exc)))
self.fail(f"Logging into {self.registry_url} for user {self.username} failed - {exc}")
# If user is already logged in, then response contains password for user
if 'password' in response:
@ -321,7 +321,7 @@ class LoginManager(DockerBaseClass):
try:
response = self._login(True)
except Exception as exc:
self.fail("Logging into %s for user %s failed - %s" % (self.registry_url, self.username, to_native(exc)))
self.fail(f"Logging into {self.registry_url} for user {self.username} failed - {exc}")
response.pop('password', None)
self.results['login_result'] = response
@ -341,7 +341,7 @@ class LoginManager(DockerBaseClass):
store.get(self.registry_url)
except CredentialsNotFound:
# get raises an exception on not found.
self.log("Credentials for %s not present, doing nothing." % (self.registry_url))
self.log(f"Credentials for {self.registry_url} not present, doing nothing.")
self.results['changed'] = False
return
@ -372,9 +372,8 @@ class LoginManager(DockerBaseClass):
if current['Username'] != self.username or current['Secret'] != self.password or self.reauthorize:
if not self.check_mode:
store.store(self.registry_url, self.username, self.password)
self.log("Writing credentials to configured helper %s for %s" % (store.program, self.registry_url))
self.results['actions'].append("Wrote credentials to configured helper %s for %s" % (
store.program, self.registry_url))
self.log(f"Writing credentials to configured helper {store.program} for {self.registry_url}")
self.results['actions'].append(f"Wrote credentials to configured helper {store.program} for {self.registry_url}")
self.results['changed'] = True
def get_credential_store_instance(self, registry, dockercfg_path):
@ -394,7 +393,7 @@ class LoginManager(DockerBaseClass):
# Make sure that there is a credential helper before trying to instantiate a
# Store object.
if store_name:
self.log("Found credential store %s" % store_name)
self.log(f"Found credential store {store_name}")
return Store(store_name, environment=credstore_env)
return DockerFileStore(dockercfg_path)

View File

@ -454,7 +454,7 @@ class DockerNetworkManager(object):
else:
for key, value in self.parameters.driver_options.items():
if not (key in net['Options']) or value != net['Options'][key]:
differences.add('driver_options.%s' % key,
differences.add(f'driver_options.{key}',
parameter=value,
active=net['Options'].get(key))
@ -497,7 +497,7 @@ class DockerNetworkManager(object):
# (but have default value None if not specified)
continue
if value != net_config.get(key):
differences.add('ipam_config[%s].%s' % (idx, key),
differences.add(f'ipam_config[{idx}].{key}',
parameter=value,
active=net_config.get(key))
@ -536,7 +536,7 @@ class DockerNetworkManager(object):
else:
for key, value in self.parameters.labels.items():
if not (key in net['Labels']) or value != net['Labels'][key]:
differences.add('labels.%s' % key,
differences.add(f'labels.{key}',
parameter=value,
active=net['Labels'].get(key))
@ -596,7 +596,7 @@ class DockerNetworkManager(object):
resp = self.client.post_json_to_json('/networks/create', data=data)
self.client.report_warnings(resp, ['Warning'])
self.existing_network = self.client.get_network(network_id=resp['Id'])
self.results['actions'].append("Created network %s with driver %s" % (self.parameters.name, self.parameters.driver))
self.results['actions'].append(f"Created network {self.parameters.name} with driver {self.parameters.driver}")
self.results['changed'] = True
def remove_network(self):
@ -607,7 +607,7 @@ class DockerNetworkManager(object):
if self.existing_network.get('Scope', 'local') == 'swarm':
while self.get_existing_network():
time.sleep(0.1)
self.results['actions'].append("Removed network %s" % (self.parameters.name,))
self.results['actions'].append(f"Removed network {self.parameters.name}")
self.results['changed'] = True
def is_container_connected(self, container_name):
@ -636,7 +636,7 @@ class DockerNetworkManager(object):
"EndpointConfig": None,
}
self.client.post_json('/networks/{0}/connect', self.parameters.name, data=data)
self.results['actions'].append("Connected container %s" % (name,))
self.results['actions'].append(f"Connected container {name}")
self.results['changed'] = True
self.diff_tracker.add(f'connected.{name}', parameter=True, active=False)
@ -662,7 +662,7 @@ class DockerNetworkManager(object):
if not self.check_mode:
data = {"Container": container_name, "Force": True}
self.client.post_json('/networks/{0}/disconnect', self.parameters.name, data=data)
self.results['actions'].append("Disconnected container %s" % (container_name,))
self.results['actions'].append(f"Disconnected container {container_name}")
self.results['changed'] = True
self.diff_tracker.add(f'connected.{container_name}',
parameter=False,

View File

@ -198,7 +198,7 @@ class SwarmNodeManager(DockerBaseClass):
try:
node_info = self.client.inspect_node(node_id=self.parameters.hostname)
except APIError as exc:
self.client.fail("Failed to get node information for %s" % to_native(exc))
self.client.fail(f"Failed to get node information for {exc}")
changed = False
node_spec = dict(
@ -247,9 +247,8 @@ class SwarmNodeManager(DockerBaseClass):
changed = True
else:
self.client.module.warn(
"Label '%s' listed both in 'labels' and 'labels_to_remove'. "
"Keeping the assigned label value."
% to_native(key))
f"Label '{to_native(key)}' listed both in 'labels' and 'labels_to_remove'. "
"Keeping the assigned label value.")
else:
if node_spec['Labels'].get(key):
node_spec['Labels'].pop(key)
@ -261,7 +260,7 @@ class SwarmNodeManager(DockerBaseClass):
self.client.update_node(node_id=node_info['ID'], version=node_info['Version']['Index'],
node_spec=node_spec)
except APIError as exc:
self.client.fail("Failed to update node : %s" % to_native(exc))
self.client.fail(f"Failed to update node : {exc}")
self.results['node'] = self.client.get_node_inspect(node_id=node_info['ID'])
self.results['changed'] = changed
else:

View File

@ -162,7 +162,7 @@ class TaskParameters(DockerBaseClass):
def prepare_options(options):
return ['%s=%s' % (k, v if v is not None else "") for k, v in options.items()] if options else []
return [f'{k}={v if v is not None else ""}' for k, v in options.items()] if options else []
def parse_options(options_list):
@ -227,7 +227,7 @@ class DockerPluginManager(object):
if ((not existing_options.get(key) and value) or
not value or
value != existing_options[key]):
differences.add('plugin_options.%s' % key,
differences.add(f'plugin_options.{key}',
parameter=value,
active=existing_options.get(key))
@ -262,7 +262,7 @@ class DockerPluginManager(object):
except APIError as e:
self.client.fail(to_native(e))
self.actions.append("Installed plugin %s" % self.preferred_name)
self.actions.append(f"Installed plugin {self.preferred_name}")
self.changed = True
def remove_plugin(self):
@ -274,7 +274,7 @@ class DockerPluginManager(object):
except APIError as e:
self.client.fail(to_native(e))
self.actions.append("Removed plugin %s" % self.preferred_name)
self.actions.append(f"Removed plugin {self.preferred_name}")
self.changed = True
def update_plugin(self):
@ -287,7 +287,7 @@ class DockerPluginManager(object):
self.client.post_json('/plugins/{0}/set', self.preferred_name, data=data)
except APIError as e:
self.client.fail(to_native(e))
self.actions.append("Updated plugin %s settings" % self.preferred_name)
self.actions.append(f"Updated plugin {self.preferred_name} settings")
self.changed = True
else:
self.client.fail("Cannot update the plugin: Plugin does not exist")
@ -322,7 +322,7 @@ class DockerPluginManager(object):
self.client.post_json('/plugins/{0}/enable', self.preferred_name, params={'timeout': timeout})
except APIError as e:
self.client.fail(to_native(e))
self.actions.append("Enabled plugin %s" % self.preferred_name)
self.actions.append(f"Enabled plugin {self.preferred_name}")
self.changed = True
else:
self.install_plugin()
@ -331,7 +331,7 @@ class DockerPluginManager(object):
self.client.post_json('/plugins/{0}/enable', self.preferred_name, params={'timeout': timeout})
except APIError as e:
self.client.fail(to_native(e))
self.actions.append("Enabled plugin %s" % self.preferred_name)
self.actions.append(f"Enabled plugin {self.preferred_name}")
self.changed = True
def disable(self):
@ -342,7 +342,7 @@ class DockerPluginManager(object):
self.client.post_json('/plugins/{0}/disable', self.preferred_name)
except APIError as e:
self.client.fail(to_native(e))
self.actions.append("Disable plugin %s" % self.preferred_name)
self.actions.append(f"Disable plugin {self.preferred_name}")
self.changed = True
else:
self.client.fail("Plugin not found: Plugin does not exist.")

View File

@ -206,7 +206,7 @@ from ansible_collections.community.docker.plugins.module_utils.util import (
compare_generic,
sanitize_labels,
)
from ansible.module_utils.common.text.converters import to_native, to_bytes
from ansible.module_utils.common.text.converters import to_bytes
class SecretManager(DockerBaseClass):
@ -272,7 +272,7 @@ class SecretManager(DockerBaseClass):
try:
secrets = self.client.secrets(filters={'name': self.name})
except APIError as exc:
self.client.fail("Error accessing secret %s: %s" % (self.name, to_native(exc)))
self.client.fail(f"Error accessing secret {self.name}: {exc}")
if self.rolling_versions:
self.secrets = [
@ -305,7 +305,7 @@ class SecretManager(DockerBaseClass):
secret_id = self.client.create_secret(self.name, self.data, labels=labels)
self.secrets += self.client.secrets(filters={'id': secret_id})
except APIError as exc:
self.client.fail("Error creating secret: %s" % to_native(exc))
self.client.fail(f"Error creating secret: {exc}")
if isinstance(secret_id, dict):
secret_id = secret_id['ID']
@ -317,7 +317,7 @@ class SecretManager(DockerBaseClass):
if not self.check_mode:
self.client.remove_secret(secret['ID'])
except APIError as exc:
self.client.fail("Error removing secret %s: %s" % (secret['Spec']['Name'], to_native(exc)))
self.client.fail(f"Error removing secret {secret['Spec']['Name']}: {exc}")
def present(self):
''' Handles state == 'present', creating or updating the secret '''

View File

@ -184,7 +184,7 @@ except ImportError:
def docker_stack_services(client, stack_name):
rc, out, err = client.call_cli("stack", "services", stack_name, "--format", "{{.Name}}")
if to_native(err) == "Nothing found in stack: %s\n" % stack_name:
if to_native(err) == f"Nothing found in stack: {stack_name}\n":
return []
return to_native(out).strip().split('\n')
@ -230,7 +230,7 @@ def docker_stack_rm(client, stack_name, retries, interval):
command += ["--detach=false"]
rc, out, err = client.call_cli(*command)
while to_native(err) != "Nothing found in stack: %s\n" % stack_name and retries > 0:
while to_native(err) != f"Nothing found in stack: {stack_name}\n" and retries > 0:
sleep(interval)
retries = retries - 1
rc, out, err = client.call_cli(*command)
@ -281,7 +281,7 @@ def main():
elif isinstance(compose_def, str):
compose_files.append(compose_def)
else:
client.fail("compose element '%s' must be a string or a dictionary" % compose_def)
client.fail(f"compose element '{compose_def}' must be a string or a dictionary")
before_stack_services = docker_stack_inspect(client, name)

View File

@ -309,8 +309,6 @@ from ansible_collections.community.docker.plugins.module_utils.util import (
from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient
from ansible.module_utils.common.text.converters import to_native
class TaskParameters(DockerBaseClass):
def __init__(self):
@ -531,7 +529,7 @@ class SwarmManager(DockerBaseClass):
try:
self.client.init_swarm(**init_arguments)
except APIError as exc:
self.client.fail("Can not create a new Swarm Cluster: %s" % to_native(exc))
self.client.fail(f"Can not create a new Swarm Cluster: {exc}")
if not self.client.check_if_swarm_manager():
if not self.check_mode:
@ -539,7 +537,7 @@ class SwarmManager(DockerBaseClass):
self.created = True
self.inspect_swarm()
self.results['actions'].append("New Swarm cluster created: %s" % (self.swarm_info.get('ID')))
self.results['actions'].append(f"New Swarm cluster created: {self.swarm_info.get('ID')}")
self.differences.add('state', parameter='present', active='absent')
self.results['changed'] = True
self.results['swarm_facts'] = {
@ -567,7 +565,7 @@ class SwarmManager(DockerBaseClass):
rotate_worker_token=self.parameters.rotate_worker_token,
rotate_manager_token=self.parameters.rotate_manager_token)
except APIError as exc:
self.client.fail("Can not update a Swarm Cluster: %s" % to_native(exc))
self.client.fail(f"Can not update a Swarm Cluster: {exc}")
return
self.inspect_swarm()
@ -590,7 +588,7 @@ class SwarmManager(DockerBaseClass):
try:
self.client.join_swarm(**join_arguments)
except APIError as exc:
self.client.fail("Can not join the Swarm Cluster: %s" % to_native(exc))
self.client.fail(f"Can not join the Swarm Cluster: {exc}")
self.results['actions'].append("New node is added to swarm cluster")
self.differences.add('joined', parameter=True, active=False)
self.results['changed'] = True
@ -603,7 +601,7 @@ class SwarmManager(DockerBaseClass):
try:
self.client.leave_swarm(force=self.force)
except APIError as exc:
self.client.fail("This node can not leave the Swarm Cluster: %s" % to_native(exc))
self.client.fail(f"This node can not leave the Swarm Cluster: {exc}")
self.results['actions'].append("Node has left the swarm cluster")
self.differences.add('joined', parameter='absent', active='present')
self.results['changed'] = True
@ -624,7 +622,7 @@ class SwarmManager(DockerBaseClass):
try:
self.client.remove_node(node_id=self.node_id, force=self.force)
except APIError as exc:
self.client.fail("Can not remove the node from the Swarm Cluster: %s" % to_native(exc))
self.client.fail(f"Can not remove the node from the Swarm Cluster: {exc}")
self.results['actions'].append("Node is removed from swarm cluster.")
self.differences.add('joined', parameter=False, active=True)
self.results['changed'] = True

View File

@ -192,8 +192,6 @@ except ImportError:
# missing Docker SDK for Python handled in ansible.module_utils.docker_common
pass
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient
from ansible_collections.community.docker.plugins.module_utils.common import RequestException
from ansible_collections.community.docker.plugins.module_utils.util import (
@ -231,7 +229,7 @@ class DockerSwarmManager(DockerBaseClass):
try:
return self.client.inspect_swarm()
except APIError as exc:
self.client.fail("Error inspecting docker swarm: %s" % to_native(exc))
self.client.fail(f"Error inspecting docker swarm: {exc}")
def get_docker_items_list(self, docker_object=None, filters=None):
items = None
@ -245,8 +243,7 @@ class DockerSwarmManager(DockerBaseClass):
elif docker_object == 'services':
items = self.client.services(filters=filters)
except APIError as exc:
self.client.fail("Error inspecting docker swarm for object '%s': %s" %
(docker_object, to_native(exc)))
self.client.fail(f"Error inspecting docker swarm for object '{docker_object}': {exc}")
if self.verbose_output:
return items

View File

@ -869,7 +869,7 @@ from ansible_collections.community.docker.plugins.module_utils.util import (
)
from ansible.module_utils.basic import human_to_bytes
from ansible.module_utils.common.text.converters import to_text, to_native
from ansible.module_utils.common.text.converters import to_text
try:
from docker import types
@ -909,7 +909,7 @@ def get_docker_environment(env, env_files):
if not isinstance(value, str):
raise ValueError(
'Non-string value found for env option. '
'Ambiguous env options must be wrapped in quotes to avoid YAML parsing. Key: %s' % name
f'Ambiguous env options must be wrapped in quotes to avoid YAML parsing. Key: {name}'
)
env_dict[name] = str(value)
elif env is not None and isinstance(env, list):
@ -921,7 +921,7 @@ def get_docker_environment(env, env_files):
env_dict[name] = value
elif env is not None:
raise ValueError(
'Invalid type for env %s (%s). Only list or dict allowed.' % (env, type(env))
f'Invalid type for env {env} ({type(env)}). Only list or dict allowed.'
)
env_list = format_environment(env_dict)
if not env_list:
@ -968,7 +968,7 @@ def get_docker_networks(networks, network_ids):
if network:
invalid_keys = ', '.join(network.keys())
raise TypeError(
'%s are not valid keys for the networks option' % invalid_keys
f'{invalid_keys} are not valid keys for the networks option'
)
else:
@ -979,7 +979,7 @@ def get_docker_networks(networks, network_ids):
try:
parsed_network['id'] = network_ids[network_name]
except KeyError as e:
raise ValueError('Could not find a network named: %s.' % e)
raise ValueError(f'Could not find a network named: {e}.')
parsed_networks.append(parsed_network)
return parsed_networks or []
@ -996,8 +996,7 @@ def get_nanoseconds_from_raw_option(name, value):
return convert_duration_to_nanosecond(value)
else:
raise ValueError(
'Invalid type for %s %s (%s). Only string or int allowed.'
% (name, value, type(value))
f'Invalid type for {name} {value} ({type(value)}). Only string or int allowed.'
)
@ -1385,7 +1384,7 @@ class DockerService(DockerBaseClass):
try:
memory = human_to_bytes(memory)
except ValueError as exc:
raise Exception('Failed to convert limit_memory to bytes: %s' % exc)
raise Exception(f'Failed to convert limit_memory to bytes: {exc}')
return {
'limit_cpu': cpus,
'limit_memory': memory,
@ -1407,7 +1406,7 @@ class DockerService(DockerBaseClass):
try:
memory = human_to_bytes(memory)
except ValueError as exc:
raise Exception('Failed to convert reserve_memory to bytes: %s' % exc)
raise Exception(f'Failed to convert reserve_memory to bytes: {exc}')
return {
'reserve_cpu': cpus,
'reserve_memory': memory,
@ -1483,21 +1482,19 @@ class DockerService(DockerBaseClass):
if invalid_items:
errors = ', '.join(
[
'%s (%s) at index %s' % (item, type(item), index)
f'{item} ({type(item)}) at index {index}'
for index, item in invalid_items
]
)
raise Exception(
'All items in a command list need to be strings. '
'Check quoting. Invalid items: %s.'
% errors
f'Check quoting. Invalid items: {errors}.'
)
s.command = ap['command']
elif s.command is not None:
raise ValueError(
'Invalid type for command %s (%s). '
f'Invalid type for command {s.command} ({type(s.command)}). '
'Only string or list allowed. Check quoting.'
% (s.command, type(s.command))
)
s.env = get_docker_environment(ap['env'], ap['env_files'])
@ -1577,7 +1574,7 @@ class DockerService(DockerBaseClass):
tmpfs_size = human_to_bytes(tmpfs_size)
except ValueError as exc:
raise ValueError(
'Failed to convert tmpfs_size to bytes: %s' % exc
f'Failed to convert tmpfs_size to bytes: {exc}'
)
service_m['tmpfs_size'] = tmpfs_size
@ -2214,7 +2211,7 @@ class DockerServiceManager(object):
ds.mode = to_text('replicated-job', encoding='utf-8')
ds.replicas = mode['ReplicatedJob']['TotalCompletions']
else:
raise Exception('Unknown service mode: %s' % mode)
raise Exception(f'Unknown service mode: {mode}')
raw_data_mounts = task_template_data['ContainerSpec'].get('Mounts')
if raw_data_mounts:
@ -2314,7 +2311,7 @@ class DockerServiceManager(object):
name = repo + ':' + tag
distribution_data = self.client.inspect_distribution(name)
digest = distribution_data['Descriptor']['digest']
return '%s@%s' % (name, digest)
return f'{name}@{digest}'
def get_networks_names_ids(self):
return dict(
@ -2341,7 +2338,7 @@ class DockerServiceManager(object):
for secret_name in secret_names:
if secret_name not in secrets:
self.client.fail(
'Could not find a secret named "%s"' % secret_name
f'Could not find a secret named "{secret_name}"'
)
return secrets
@ -2365,7 +2362,7 @@ class DockerServiceManager(object):
for config_name in config_names:
if config_name not in configs:
self.client.fail(
'Could not find a config named "%s"' % config_name
f'Could not find a config named "{config_name}"'
)
return configs
@ -2381,16 +2378,14 @@ class DockerServiceManager(object):
)
except DockerException as e:
self.client.fail(
'Error looking for an image named %s: %s'
% (image, to_native(e))
f'Error looking for an image named {image}: {e}'
)
try:
current_service = self.get_service(module.params['name'])
except Exception as e:
self.client.fail(
'Error looking for service named %s: %s'
% (module.params['name'], to_native(e))
f"Error looking for service named {module.params['name']}: {e}"
)
try:
secret_ids = self.get_missing_secret_ids()
@ -2407,7 +2402,7 @@ class DockerServiceManager(object):
)
except Exception as e:
return self.client.fail(
'Error parsing module parameters: %s' % to_native(e)
f'Error parsing module parameters: {e}'
)
changed = False

View File

@ -212,14 +212,14 @@ class DockerVolumeManager(object):
for key, value in self.parameters.driver_options.items():
if (not self.existing_volume['Options'].get(key) or
value != self.existing_volume['Options'][key]):
differences.add('driver_options.%s' % key,
differences.add(f'driver_options.{key}',
parameter=value,
active=self.existing_volume['Options'].get(key))
if self.parameters.labels:
existing_labels = self.existing_volume.get('Labels') or {}
for label in self.parameters.labels:
if existing_labels.get(label) != self.parameters.labels.get(label):
differences.add('labels.%s' % label,
differences.add(f'labels.{label}',
parameter=self.parameters.labels.get(label),
active=existing_labels.get(label))
@ -241,7 +241,7 @@ class DockerVolumeManager(object):
except APIError as e:
self.client.fail(to_native(e))
self.results['actions'].append("Created volume %s with driver %s" % (self.parameters.volume_name, self.parameters.driver))
self.results['actions'].append(f"Created volume {self.parameters.volume_name} with driver {self.parameters.driver}")
self.results['changed'] = True
def remove_volume(self):
@ -252,7 +252,7 @@ class DockerVolumeManager(object):
except APIError as e:
self.client.fail(to_native(e))
self.results['actions'].append("Removed volume %s" % self.parameters.volume_name)
self.results['actions'].append(f"Removed volume {self.parameters.volume_name}")
self.results['changed'] = True
def present(self):

View File

@ -72,8 +72,6 @@ volume:
import traceback
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.docker.plugins.module_utils.common_api import (
AnsibleDockerClient,
RequestException,
@ -87,7 +85,7 @@ def get_existing_volume(client, volume_name):
except NotFound as dummy:
return None
except Exception as exc:
client.fail("Error inspecting volume: %s" % to_native(exc))
client.fail(f"Error inspecting volume: {exc}")
def main():

View File

@ -395,7 +395,7 @@ class UnixSocketStreamTest(unittest.TestCase):
lines = []
for i in range(0, 50):
line = str(i).encode()
lines += [('%x' % len(line)).encode(), line]
lines += [f'{len(line):x}'.encode(), line]
lines.append(b'0')
lines.append(b'')
@ -566,7 +566,7 @@ class UserAgentTest(unittest.TestCase):
self.patcher = mock.patch.object(
APIClient,
'send',
return_value=fake_resp("GET", "%s/version" % fake_api.prefix)
return_value=fake_resp("GET", f"{fake_api.prefix}/version")
)
self.mock_send = self.patcher.start()

View File

@ -18,7 +18,7 @@ from ..test_support.docker_image_archive_stubbing import (
def assert_no_logging(msg):
raise AssertionError('Should not have logged anything but logged %s' % msg)
raise AssertionError(f'Should not have logged anything but logged {msg}')
def capture_logging(messages):
@ -41,7 +41,7 @@ def test_archived_image_action_when_missing(tar_file_name):
fake_name = 'a:latest'
fake_id = 'a1'
expected = 'Archived image %s to %s, since none present' % (fake_name, tar_file_name)
expected = f'Archived image {fake_name} to {tar_file_name}, since none present'
actual = ImageManager.archived_image_action(assert_no_logging, tar_file_name, fake_name, api_image_id(fake_id))
@ -65,7 +65,7 @@ def test_archived_image_action_when_invalid(tar_file_name):
write_irrelevant_tar(tar_file_name)
expected = 'Archived image %s to %s, overwriting an unreadable archive file' % (fake_name, tar_file_name)
expected = f'Archived image {fake_name} to {tar_file_name}, overwriting an unreadable archive file'
actual_log = []
actual = ImageManager.archived_image_action(
@ -88,9 +88,7 @@ def test_archived_image_action_when_obsolete_by_id(tar_file_name):
write_imitation_archive(tar_file_name, old_id, [fake_name])
expected = 'Archived image %s to %s, overwriting archive with image %s named %s' % (
fake_name, tar_file_name, old_id, fake_name
)
expected = f'Archived image {fake_name} to {tar_file_name}, overwriting archive with image {old_id} named {fake_name}'
actual = ImageManager.archived_image_action(assert_no_logging, tar_file_name, fake_name, api_image_id(new_id))
assert actual == expected
@ -103,11 +101,9 @@ def test_archived_image_action_when_obsolete_by_name(tar_file_name):
write_imitation_archive(tar_file_name, fake_id, [old_name])
expected = 'Archived image %s to %s, overwriting archive with image %s named %s' % (
new_name, tar_file_name, fake_id, old_name
)
expected = f'Archived image {new_name} to {tar_file_name}, overwriting archive with image {fake_id} named {old_name}'
actual = ImageManager.archived_image_action(assert_no_logging, tar_file_name, new_name, api_image_id(fake_id))
print('actual : %s', actual)
print('expected : %s', expected)
print(f'actual : {actual}')
print(f'expected : {expected}')
assert actual == expected

View File

@ -28,7 +28,7 @@ def write_imitation_archive(file_name, image_id, repo_tags):
manifest = [
{
'Config': '%s.json' % image_id,
'Config': f'{image_id}.json',
'RepoTags': repo_tags
}
]