diff --git a/plugins/action/docker_container_copy_into.py b/plugins/action/docker_container_copy_into.py index ffdcfe9f..19b2ed0d 100644 --- a/plugins/action/docker_container_copy_into.py +++ b/plugins/action/docker_container_copy_into.py @@ -28,11 +28,11 @@ class ActionModule(ActionBase): result = merge_hash(result, self._execute_module(task_vars=task_vars, wrap_async=self._task.async_val)) - if u'diff' in result and result[u'diff'].get(u'scrambled_diff'): + if 'diff' in result and result['diff'].get('scrambled_diff'): # Scrambling is not done for security, but to avoid no_log screwing up the diff - diff = result[u'diff'] - key = base64.b64decode(diff.pop(u'scrambled_diff')) - for k in (u'before', u'after'): + diff = result['diff'] + key = base64.b64decode(diff.pop('scrambled_diff')) + for k in ('before', 'after'): if k in diff: diff[k] = unscramble(diff[k], key) diff --git a/plugins/connection/docker.py b/plugins/connection/docker.py index b27b08b8..bf455fb2 100644 --- a/plugins/connection/docker.py +++ b/plugins/connection/docker.py @@ -166,8 +166,8 @@ class Connection(ConnectionBase): @staticmethod def _sanitize_version(version): - version = re.sub(u'[^0-9a-zA-Z.]', u'', version) - version = re.sub(u'^v', u'', version) + version = re.sub('[^0-9a-zA-Z.]', '', version) + version = re.sub('^v', '', version) return version def _old_docker_version(self): @@ -196,13 +196,13 @@ class Connection(ConnectionBase): cmd, cmd_output, err, returncode = self._old_docker_version() if returncode == 0: - for line in to_text(cmd_output, errors='surrogate_or_strict').split(u'\n'): - if line.startswith(u'Server version:'): # old docker versions + for line in to_text(cmd_output, errors='surrogate_or_strict').split('\n'): + if line.startswith('Server version:'): # old docker versions return self._sanitize_version(line.split()[2]) cmd, cmd_output, err, returncode = self._new_docker_version() if returncode: - raise AnsibleError('Docker version check (%s) failed: %s' % (to_native(cmd), to_native(err))) + raise AnsibleError(f'Docker version check ({to_native(cmd)}) failed: {to_native(err)}') return self._sanitize_version(to_text(cmd_output, errors='surrogate_or_strict')) @@ -218,12 +218,12 @@ class Connection(ConnectionBase): out = to_text(out, errors='surrogate_or_strict') if p.returncode != 0: - display.warning(u'unable to retrieve default user from docker container: %s %s' % (out, to_text(err))) + display.warning(f'unable to retrieve default user from docker container: {out} {to_text(err)}') self._container_user_cache[container] = None return None # The default exec user is root, unless it was changed in the Dockerfile with USER - user = out.strip() or u'root' + user = out.strip() or 'root' self._container_user_cache[container] = user return user @@ -249,19 +249,17 @@ class Connection(ConnectionBase): for val, what in ((k, 'Key'), (v, 'Value')): if not isinstance(val, str): raise AnsibleConnectionFailure( - 'Non-string {0} found for extra_env option. Ambiguous env options must be ' - 'wrapped in quotes to avoid them being interpreted. {1}: {2!r}' - .format(what.lower(), what, val) + f'Non-string {what.lower()} found for extra_env option. Ambiguous env options must be ' + f'wrapped in quotes to avoid them being interpreted. {what}: {val!r}' ) - local_cmd += [b'-e', b'%s=%s' % (to_bytes(k, errors='surrogate_or_strict'), to_bytes(v, errors='surrogate_or_strict'))] + local_cmd += [b'-e', b"%s=%s" % (to_bytes(k, errors='surrogate_or_strict'), to_bytes(v, errors='surrogate_or_strict'))] if self.get_option('working_dir') is not None: local_cmd += [b'-w', to_bytes(self.get_option('working_dir'), errors='surrogate_or_strict')] - if self.docker_version != u'dev' and LooseVersion(self.docker_version) < LooseVersion(u'18.06'): + if self.docker_version != 'dev' and LooseVersion(self.docker_version) < LooseVersion('18.06'): # https://github.com/docker/cli/pull/732, first appeared in release 18.06.0 raise AnsibleConnectionFailure( - 'Providing the working directory requires Docker CLI version 18.06 or newer. You have Docker CLI version {0}.' - .format(self.docker_version) + f'Providing the working directory requires Docker CLI version 18.06 or newer. You have Docker CLI version {self.docker_version}.' ) if self.get_option('privileged'): @@ -302,24 +300,23 @@ class Connection(ConnectionBase): self._set_docker_args() self._version = self._get_docker_version() - if self._version == u'dev': - display.warning(u'Docker version number is "dev". Will assume latest version.') - if self._version != u'dev' and LooseVersion(self._version) < LooseVersion(u'1.3'): + if self._version == 'dev': + display.warning('Docker version number is "dev". Will assume latest version.') + if self._version != 'dev' and LooseVersion(self._version) < LooseVersion('1.3'): raise AnsibleError('docker connection type requires docker 1.3 or higher') return self._version def _get_actual_user(self): if self.remote_user is not None: # An explicit user is provided - if self.docker_version == u'dev' or LooseVersion(self.docker_version) >= LooseVersion(u'1.7'): + if self.docker_version == 'dev' or LooseVersion(self.docker_version) >= LooseVersion('1.7'): # Support for specifying the exec user was added in docker 1.7 return self.remote_user else: self.remote_user = None actual_user = self._get_docker_remote_user() if actual_user != self.get_option('remote_user'): - display.warning(u'docker {0} does not support remote_user, using container default: {1}' - .format(self.docker_version, self.actual_user or u'?')) + display.warning(f'docker {self.docker_version} does not support remote_user, using container default: {self.actual_user or "?"}') return actual_user elif self._display.verbosity > 2: # Since we are not setting the actual_user, look it up so we have it for logging later @@ -335,9 +332,7 @@ class Connection(ConnectionBase): if not self._connected: self._set_conn_data() actual_user = self._get_actual_user() - display.vvv(u"ESTABLISH DOCKER CONNECTION FOR USER: {0}".format( - actual_user or u'?'), host=self.get_option('remote_addr') - ) + display.vvv(f"ESTABLISH DOCKER CONNECTION FOR USER: {actual_user or '?'}", host=self.get_option('remote_addr')) self._connected = True def exec_command(self, cmd, in_data=None, sudoable=False): @@ -349,7 +344,7 @@ class Connection(ConnectionBase): local_cmd = self._build_exec_cmd([self._play_context.executable, '-c', cmd]) - display.vvv(u"EXEC {0}".format(to_text(local_cmd)), host=self.get_option('remote_addr')) + display.vvv(f"EXEC {to_text(local_cmd)}", host=self.get_option('remote_addr')) display.debug("opening command with Popen()") local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] @@ -425,12 +420,12 @@ class Connection(ConnectionBase): """ Transfer a file from local to docker container """ self._set_conn_data() super(Connection, self).put_file(in_path, out_path) - display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr')) + display.vvv(f"PUT {in_path} TO {out_path}", host=self.get_option('remote_addr')) out_path = self._prefix_login_path(out_path) if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')): raise AnsibleFileNotFound( - "file or module does not exist: %s" % to_native(in_path)) + f"file or module does not exist: {to_native(in_path)}") out_path = quote(out_path) # Older docker does not have native support for copying files into @@ -442,7 +437,7 @@ class Connection(ConnectionBase): count = ' count=0' else: count = '' - args = self._build_exec_cmd([self._play_context.executable, "-c", "dd of=%s bs=%s%s" % (out_path, BUFSIZE, count)]) + args = self._build_exec_cmd([self._play_context.executable, "-c", f"dd of={out_path} bs={BUFSIZE}{count}"]) args = [to_bytes(i, errors='surrogate_or_strict') for i in args] try: p = subprocess.Popen(args, stdin=in_file, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -451,21 +446,20 @@ class Connection(ConnectionBase): stdout, stderr = p.communicate() if p.returncode != 0: - raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % - (to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr))) + raise AnsibleError(f"failed to transfer file {to_native(in_path)} to {to_native(out_path)}:\n{to_native(stdout)}\n{to_native(stderr)}") def fetch_file(self, in_path, out_path): """ Fetch a file from container to local. """ self._set_conn_data() super(Connection, self).fetch_file(in_path, out_path) - display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr')) + display.vvv(f"FETCH {in_path} TO {out_path}", host=self.get_option('remote_addr')) in_path = self._prefix_login_path(in_path) # out_path is the final file path, but docker takes a directory, not a # file path out_dir = os.path.dirname(out_path) - args = [self.docker_cmd, "cp", "%s:%s" % (self.get_option('remote_addr'), in_path), out_dir] + args = [self.docker_cmd, "cp", f"{self.get_option('remote_addr')}:{in_path}", out_dir] args = [to_bytes(i, errors='surrogate_or_strict') for i in args] p = subprocess.Popen(args, stdin=subprocess.PIPE, @@ -481,7 +475,7 @@ class Connection(ConnectionBase): if p.returncode != 0: # Older docker does not have native support for fetching files command `cp` # If `cp` fails, try to use `dd` instead - args = self._build_exec_cmd([self._play_context.executable, "-c", "dd if=%s bs=%s" % (in_path, BUFSIZE)]) + args = self._build_exec_cmd([self._play_context.executable, "-c", f"dd if={in_path} bs={BUFSIZE}"]) args = [to_bytes(i, errors='surrogate_or_strict') for i in args] with open(to_bytes(actual_out_path, errors='surrogate_or_strict'), 'wb') as out_file: try: @@ -492,7 +486,7 @@ class Connection(ConnectionBase): stdout, stderr = p.communicate() if p.returncode != 0: - raise AnsibleError("failed to fetch file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) + raise AnsibleError(f"failed to fetch file {in_path} to {out_path}:\n{stdout}\n{stderr}") # Rename if needed if actual_out_path != out_path: diff --git a/plugins/connection/docker_api.py b/plugins/connection/docker_api.py index 248bac97..df7f8158 100644 --- a/plugins/connection/docker_api.py +++ b/plugins/connection/docker_api.py @@ -146,27 +146,27 @@ class Connection(ConnectionBase): has_pipelining = True def _call_client(self, callable, not_found_can_be_resource=False): + remote_addr = self.get_option('remote_addr') try: return callable() except NotFound as e: if not_found_can_be_resource: - raise AnsibleConnectionFailure('Could not find container "{1}" or resource in it ({0})'.format(e, self.get_option('remote_addr'))) + raise AnsibleConnectionFailure(f'Could not find container "{remote_addr}" or resource in it ({e})') else: - raise AnsibleConnectionFailure('Could not find container "{1}" ({0})'.format(e, self.get_option('remote_addr'))) + raise AnsibleConnectionFailure(f'Could not find container "{remote_addr}" ({e})') except APIError as e: if e.response is not None and e.response.status_code == 409: - raise AnsibleConnectionFailure('The container "{1}" has been paused ({0})'.format(e, self.get_option('remote_addr'))) + raise AnsibleConnectionFailure(f'The container "{remote_addr}" has been paused ({e})') self.client.fail( - 'An unexpected Docker error occurred for container "{1}": {0}'.format(e, self.get_option('remote_addr')) + f'An unexpected Docker error occurred for container "{remote_addr}": {e}' ) except DockerException as e: self.client.fail( - 'An unexpected Docker error occurred for container "{1}": {0}'.format(e, self.get_option('remote_addr')) + f'An unexpected Docker error occurred for container "{remote_addr}": {e}' ) except RequestException as e: self.client.fail( - 'An unexpected requests error occurred for container "{1}" when trying to talk to the Docker daemon: {0}' - .format(e, self.get_option('remote_addr')) + f'An unexpected requests error occurred for container "{remote_addr}" when trying to talk to the Docker daemon: {e}' ) def __init__(self, play_context, new_stdin, *args, **kwargs): @@ -186,9 +186,7 @@ class Connection(ConnectionBase): super(Connection, self)._connect() if not self._connected: self.actual_user = self.get_option('remote_user') - display.vvv(u"ESTABLISH DOCKER CONNECTION FOR USER: {0}".format( - self.actual_user or u'?'), host=self.get_option('remote_addr') - ) + display.vvv(f"ESTABLISH DOCKER CONNECTION FOR USER: {self.actual_user or '?'}", host=self.get_option('remote_addr')) if self.client is None: self.client = AnsibleDockerClient(self, min_docker_api_version=MIN_DOCKER_API) self._connected = True @@ -197,12 +195,12 @@ class Connection(ConnectionBase): # Since we are not setting the actual_user, look it up so we have it for logging later # Only do this if display verbosity is high enough that we'll need the value # This saves overhead from calling into docker when we do not need to - display.vvv(u"Trying to determine actual user") + display.vvv("Trying to determine actual user") result = self._call_client(lambda: self.client.get_json('/containers/{0}/json', self.get_option('remote_addr'))) if result.get('Config'): self.actual_user = result['Config'].get('User') if self.actual_user is not None: - display.vvv(u"Actual user is '{0}'".format(self.actual_user)) + display.vvv(f"Actual user is '{self.actual_user}'") def exec_command(self, cmd, in_data=None, sudoable=False): """ Run a command on the docker host """ @@ -213,12 +211,10 @@ class Connection(ConnectionBase): do_become = self.become and self.become.expect_prompt() and sudoable + stdin_part = f', with stdin ({len(in_data)} bytes)' if in_data is not None else '' + become_part = ', with become prompt' if do_become else '' display.vvv( - u"EXEC {0}{1}{2}".format( - to_text(command), - ', with stdin ({0} bytes)'.format(len(in_data)) if in_data is not None else '', - ', with become prompt' if do_become else '', - ), + f"EXEC {to_text(command)}{stdin_part}{become_part}", host=self.get_option('remote_addr') ) @@ -244,19 +240,19 @@ class Connection(ConnectionBase): for val, what in ((k, 'Key'), (v, 'Value')): if not isinstance(val, str): raise AnsibleConnectionFailure( - 'Non-string {0} found for extra_env option. Ambiguous env options must be ' - 'wrapped in quotes to avoid them being interpreted. {1}: {2!r}' - .format(what.lower(), what, val) + f'Non-string {what.lower()} found for extra_env option. Ambiguous env options must be ' + f'wrapped in quotes to avoid them being interpreted. {what}: {val!r}' ) - data['Env'].append(u'{0}={1}'.format(to_text(k, errors='surrogate_or_strict'), to_text(v, errors='surrogate_or_strict'))) + kk = to_text(k, errors='surrogate_or_strict') + vv = to_text(v, errors='surrogate_or_strict') + data['Env'].append(f'{kk}={vv}') if self.get_option('working_dir') is not None: data['WorkingDir'] = self.get_option('working_dir') if self.client.docker_api_version < LooseVersion('1.35'): raise AnsibleConnectionFailure( 'Providing the working directory requires Docker API version 1.35 or newer.' - ' The Docker daemon the connection is using has API version {0}.' - .format(self.client.docker_api_version_str) + f' The Docker daemon the connection is using has API version {self.client.docker_api_version_str}.' ) exec_data = self._call_client(lambda: self.client.post_json_to_json('/containers/{0}/exec', self.get_option('remote_addr'), data=data)) @@ -325,23 +321,23 @@ class Connection(ConnectionBase): def put_file(self, in_path, out_path): """ Transfer a file from local to docker container """ super(Connection, self).put_file(in_path, out_path) - display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr')) + display.vvv(f"PUT {in_path} TO {out_path}", host=self.get_option('remote_addr')) out_path = self._prefix_login_path(out_path) if self.actual_user not in self.ids: dummy, ids, dummy = self.exec_command(b'id -u && id -g') + remote_addr = self.get_option('remote_addr') try: user_id, group_id = ids.splitlines() self.ids[self.actual_user] = int(user_id), int(group_id) display.vvvv( - 'PUT: Determined uid={0} and gid={1} for user "{2}"'.format(user_id, group_id, self.actual_user), - host=self.get_option('remote_addr') + f'PUT: Determined uid={user_id} and gid={group_id} for user "{self.actual_user}"', + host=remote_addr ) except Exception as e: raise AnsibleConnectionFailure( - 'Error while determining user and group ID of current user in container "{1}": {0}\nGot value: {2!r}' - .format(e, self.get_option('remote_addr'), ids) + f'Error while determining user and group ID of current user in container "{remote_addr}": {e}\nGot value: {ids!r}' ) user_id, group_id = self.ids[self.actual_user] @@ -367,7 +363,7 @@ class Connection(ConnectionBase): def fetch_file(self, in_path, out_path): """ Fetch a file from container to local. """ super(Connection, self).fetch_file(in_path, out_path) - display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr')) + display.vvv(f"FETCH {in_path} TO {out_path}", host=self.get_option('remote_addr')) in_path = self._prefix_login_path(in_path) diff --git a/plugins/connection/nsenter.py b/plugins/connection/nsenter.py index 2a858104..a18f4dfd 100644 --- a/plugins/connection/nsenter.py +++ b/plugins/connection/nsenter.py @@ -76,9 +76,7 @@ class Connection(ConnectionBase): if not self._connected: display.vvv( - u"ESTABLISH NSENTER CONNECTION FOR USER: {0}".format( - self._play_context.remote_user - ), + f"ESTABLISH NSENTER CONNECTION FOR USER: {self._play_context.remote_user}", host=self._play_context.remote_addr, ) self._connected = True @@ -92,8 +90,8 @@ class Connection(ConnectionBase): executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else None if not os.path.exists(to_bytes(executable, errors='surrogate_or_strict')): - raise AnsibleError("failed to find the executable specified %s." - " Please verify if the executable exists and re-try." % executable) + raise AnsibleError(f"failed to find the executable specified {executable}." + " Please verify if the executable exists and re-try.") # Rewrite the provided command to prefix it with nsenter nsenter_cmd_parts = [ @@ -104,7 +102,7 @@ class Connection(ConnectionBase): "--pid", "--uts", "--preserve-credentials", - "--target={0}".format(self._nsenter_pid), + f"--target={self._nsenter_pid}", "--", ] @@ -115,7 +113,7 @@ class Connection(ConnectionBase): cmd_parts = nsenter_cmd_parts + cmd cmd = [to_bytes(arg) for arg in cmd_parts] - display.vvv(u"EXEC {0}".format(to_text(cmd)), host=self._play_context.remote_addr) + display.vvv(f"EXEC {to_text(cmd)}", host=self._play_context.remote_addr) display.debug("opening command with Popen()") master = None @@ -131,7 +129,7 @@ class Connection(ConnectionBase): try: master, stdin = pty.openpty() except (IOError, OSError) as e: - display.debug("Unable to open pty: %s" % to_native(e)) + display.debug(f"Unable to open pty: {e}") p = subprocess.Popen( cmd, @@ -204,15 +202,15 @@ class Connection(ConnectionBase): in_path = unfrackpath(in_path, basedir=self.cwd) out_path = unfrackpath(out_path, basedir=self.cwd) - display.vvv(u"PUT {0} to {1}".format(in_path, out_path), host=self._play_context.remote_addr) + display.vvv(f"PUT {in_path} to {out_path}", host=self._play_context.remote_addr) try: with open(to_bytes(in_path, errors="surrogate_or_strict"), "rb") as in_file: in_data = in_file.read() rc, out, err = self.exec_command(cmd=["tee", out_path], in_data=in_data) if rc != 0: - raise AnsibleError("failed to transfer file to {0}: {1}".format(out_path, err)) + raise AnsibleError(f"failed to transfer file to {out_path}: {err}") except IOError as e: - raise AnsibleError("failed to transfer file to {0}: {1}".format(out_path, to_native(e))) + raise AnsibleError(f"failed to transfer file to {out_path}: {e}") def fetch_file(self, in_path, out_path): super(Connection, self).fetch_file(in_path, out_path) @@ -222,13 +220,13 @@ class Connection(ConnectionBase): try: rc, out, err = self.exec_command(cmd=["cat", in_path]) - display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._play_context.remote_addr) + display.vvv(f"FETCH {in_path} TO {out_path}", host=self._play_context.remote_addr) if rc != 0: - raise AnsibleError("failed to transfer file to {0}: {1}".format(in_path, err)) + raise AnsibleError(f"failed to transfer file to {in_path}: {err}") with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb') as out_file: out_file.write(out) except IOError as e: - raise AnsibleError("failed to transfer file to {0}: {1}".format(to_native(out_path), to_native(e))) + raise AnsibleError(f"failed to transfer file to {to_native(out_path)}: {e}") def close(self): ''' terminate the connection; nothing to do here ''' diff --git a/plugins/inventory/docker_containers.py b/plugins/inventory/docker_containers.py index d8b29c14..df2c6c40 100644 --- a/plugins/inventory/docker_containers.py +++ b/plugins/inventory/docker_containers.py @@ -172,7 +172,6 @@ filters: import re from ansible.errors import AnsibleError -from ansible.module_utils.common.text.converters import to_native from ansible.plugins.inventory import BaseInventoryPlugin, Constructable from ansible_collections.community.docker.plugins.module_utils.common_api import ( @@ -198,7 +197,8 @@ class InventoryModule(BaseInventoryPlugin, Constructable): NAME = 'community.docker.docker_containers' def _slugify(self, value): - return 'docker_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_')) + slug = re.sub(r'[^\w-]', '_', value).lower().lstrip('_') + return f'docker_{slug}' def _populate(self, client): strict = self.get_option('strict') @@ -221,7 +221,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): } containers = client.get_json('/containers/json', params=params) except APIError as exc: - raise AnsibleError("Error listing containers: %s" % to_native(exc)) + raise AnsibleError(f"Error listing containers: {exc}") if add_legacy_groups: self.inventory.add_group('running') @@ -255,7 +255,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): try: inspect = client.get_json('/containers/{0}/json', id) except APIError as exc: - raise AnsibleError("Error inspecting container %s - %s" % (name, str(exc))) + raise AnsibleError(f"Error inspecting container {name} - {exc}") state = inspect.get('State') or dict() config = inspect.get('Config') or dict() @@ -268,19 +268,19 @@ class InventoryModule(BaseInventoryPlugin, Constructable): # Add container to groups image_name = config.get('Image') if image_name and add_legacy_groups: - groups.append('image_{0}'.format(image_name)) + groups.append(f'image_{image_name}') stack_name = labels.get('com.docker.stack.namespace') if stack_name: full_facts['docker_stack'] = stack_name if add_legacy_groups: - groups.append('stack_{0}'.format(stack_name)) + groups.append(f'stack_{stack_name}') service_name = labels.get('com.docker.swarm.service.name') if service_name: full_facts['docker_service'] = service_name if add_legacy_groups: - groups.append('service_{0}'.format(service_name)) + groups.append(f'service_{service_name}') ansible_connection = None if connection_type == 'ssh': @@ -289,7 +289,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): # Lookup the public facing port Nat'ed to ssh port. network_settings = inspect.get('NetworkSettings') or {} port_settings = network_settings.get('Ports') or {} - port = port_settings.get('%d/tcp' % (ssh_port, ))[0] + port = port_settings.get(f'{ssh_port}/tcp')[0] except (IndexError, AttributeError, TypeError): port = dict() @@ -383,9 +383,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable): self._populate(client) except DockerException as e: raise AnsibleError( - 'An unexpected Docker error occurred: {0}'.format(e) + f'An unexpected Docker error occurred: {e}' ) except RequestException as e: raise AnsibleError( - 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(e) + f'An unexpected requests error occurred when trying to talk to the Docker daemon: {e}' ) diff --git a/plugins/inventory/docker_machine.py b/plugins/inventory/docker_machine.py index e284c505..f1b1cb5d 100644 --- a/plugins/inventory/docker_machine.py +++ b/plugins/inventory/docker_machine.py @@ -131,11 +131,11 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): command = [self.DOCKER_MACHINE_PATH] command.extend(args) - display.debug('Executing command {0}'.format(command)) + display.debug(f'Executing command {command}') try: result = subprocess.check_output(command) except subprocess.CalledProcessError as e: - display.warning('Exception {0} caught while executing command {1}, this was the original exception: {2}'.format(type(e).__name__, command, e)) + display.warning(f'Exception {type(e).__name__} caught while executing command {command}, this was the original exception: {e}') raise e return to_text(result).strip() @@ -203,14 +203,14 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): def _should_skip_host(self, machine_name, env_var_tuples, daemon_env): if not env_var_tuples: - warning_prefix = 'Unable to fetch Docker daemon env vars from Docker Machine for host {0}'.format(machine_name) + warning_prefix = f'Unable to fetch Docker daemon env vars from Docker Machine for host {machine_name}' if daemon_env in ('require', 'require-silently'): if daemon_env == 'require': - display.warning('{0}: host will be skipped'.format(warning_prefix)) + display.warning(f'{warning_prefix}: host will be skipped') return True else: # 'optional', 'optional-silently' if daemon_env == 'optional': - display.warning('{0}: host will lack dm_DOCKER_xxx variables'.format(warning_prefix)) + display.warning(f'{warning_prefix}: host will lack dm_DOCKER_xxx variables') return False def _populate(self): @@ -261,7 +261,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): # set variables based on Docker Machine env variables for kv in env_var_tuples: - self.inventory.set_variable(machine_name, 'dm_{0}'.format(kv[0]), make_unsafe(kv[1])) + self.inventory.set_variable(machine_name, f'dm_{kv[0]}', make_unsafe(kv[1])) if self.get_option('verbose_output'): self.inventory.set_variable(machine_name, 'docker_machine_node_attributes', unsafe_node_attrs) @@ -279,8 +279,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): self._add_host_to_keyed_groups(self.get_option('keyed_groups'), unsafe_node_attrs, machine_name, strict=strict) except Exception as e: - raise AnsibleError('Unable to fetch hosts from Docker Machine, this was the original exception: %s' % - to_native(e), orig_exc=e) + raise AnsibleError(f'Unable to fetch hosts from Docker Machine, this was the original exception: {e}') from e def verify_file(self, path): """Return the possibility of a file being consumable by this plugin.""" diff --git a/plugins/inventory/docker_swarm.py b/plugins/inventory/docker_swarm.py index 9fa8ed12..012a5bc3 100644 --- a/plugins/inventory/docker_swarm.py +++ b/plugins/inventory/docker_swarm.py @@ -149,7 +149,6 @@ keyed_groups: ''' from ansible.errors import AnsibleError -from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.docker.plugins.module_utils.common import get_connect_params from ansible_collections.community.docker.plugins.module_utils.util import update_tls_hostname from ansible.plugins.inventory import BaseInventoryPlugin, Constructable @@ -255,8 +254,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): unsafe_node_attrs['ID'], strict=strict) except Exception as e: - raise AnsibleError('Unable to fetch hosts from Docker swarm API, this was the original exception: %s' % - to_native(e)) + raise AnsibleError(f'Unable to fetch hosts from Docker swarm API, this was the original exception: {e}') def verify_file(self, path): """Return the possibly of a file being consumable by this plugin.""" diff --git a/plugins/module_utils/_api/api/client.py b/plugins/module_utils/_api/api/client.py index b539671f..4d959239 100644 --- a/plugins/module_utils/_api/api/client.py +++ b/plugins/module_utils/_api/api/client.py @@ -55,15 +55,15 @@ class APIClient( >>> import docker >>> client = docker.APIClient(base_url='unix://var/run/docker.sock') >>> client.version() - {u'ApiVersion': u'1.33', - u'Arch': u'amd64', - u'BuildTime': u'2017-11-19T18:46:37.000000000+00:00', - u'GitCommit': u'f4ffd2511c', - u'GoVersion': u'go1.9.2', - u'KernelVersion': u'4.14.3-1-ARCH', - u'MinAPIVersion': u'1.12', - u'Os': u'linux', - u'Version': u'17.10.0-ce'} + {'ApiVersion': '1.33', + 'Arch': 'amd64', + 'BuildTime': '2017-11-19T18:46:37.000000000+00:00', + 'GitCommit': 'f4ffd2511c', + 'GoVersion': 'go1.9.2', + 'KernelVersion': '4.14.3-1-ARCH', + 'MinAPIVersion': '1.12', + 'Os': 'linux', + 'Version': '17.10.0-ce'} Args: base_url (str): URL to the Docker server. For example, @@ -187,14 +187,11 @@ class APIClient( self._version = version if not isinstance(self._version, str): raise DockerException( - 'Version parameter must be a string or None. Found {0}'.format( - type(version).__name__ - ) + f'Version parameter must be a string or None. Found {type(version).__name__}' ) if utils.version_lt(self._version, MINIMUM_DOCKER_API_VERSION): raise InvalidVersion( - 'API versions below {0} are no longer supported by this ' - 'library.'.format(MINIMUM_DOCKER_API_VERSION) + f'API versions below {MINIMUM_DOCKER_API_VERSION} are no longer supported by this library.' ) def _retrieve_server_version(self): @@ -202,7 +199,7 @@ class APIClient( version_result = self.version(api_version=False) except Exception as e: raise DockerException( - 'Error while fetching server API version: {0}'.format(e) + f'Error while fetching server API version: {e}' ) try: @@ -214,7 +211,7 @@ class APIClient( ) except Exception as e: raise DockerException( - 'Error while fetching server API version: {0}. Response seems to be broken.'.format(e) + f'Error while fetching server API version: {e}. Response seems to be broken.' ) def _set_request_timeout(self, kwargs): @@ -247,19 +244,16 @@ class APIClient( for arg in args: if not isinstance(arg, str): raise ValueError( - 'Expected a string but found {0} ({1}) ' - 'instead'.format(arg, type(arg)) + f'Expected a string but found {arg} ({type(arg)}) instead' ) quote_f = partial(quote, safe="/:") args = map(quote_f, args) if kwargs.get('versioned_api', True): - return '{0}/v{1}{2}'.format( - self.base_url, self._version, pathfmt.format(*args) - ) + return f'{self.base_url}/v{self._version}{pathfmt.format(*args)}' else: - return '{0}{1}'.format(self.base_url, pathfmt.format(*args)) + return f'{self.base_url}{pathfmt.format(*args)}' def _raise_for_status(self, response): """Raises stored :class:`APIError`, if one occurred.""" diff --git a/plugins/module_utils/_api/auth.py b/plugins/module_utils/_api/auth.py index e23825fd..abbaafc4 100644 --- a/plugins/module_utils/_api/auth.py +++ b/plugins/module_utils/_api/auth.py @@ -19,7 +19,7 @@ from .credentials.errors import StoreError, CredentialsNotFound from .utils import config INDEX_NAME = 'docker.io' -INDEX_URL = 'https://index.{0}/v1/'.format(INDEX_NAME) +INDEX_URL = f'https://index.{INDEX_NAME}/v1/' TOKEN_USERNAME = '' log = logging.getLogger(__name__) @@ -28,14 +28,13 @@ log = logging.getLogger(__name__) def resolve_repository_name(repo_name): if '://' in repo_name: raise errors.InvalidRepository( - 'Repository name cannot contain a scheme ({0})'.format(repo_name) + f'Repository name cannot contain a scheme ({repo_name})' ) index_name, remote_name = split_repo_name(repo_name) if index_name[0] == '-' or index_name[-1] == '-': raise errors.InvalidRepository( - 'Invalid index name ({0}). Cannot begin or end with a' - ' hyphen.'.format(index_name) + f'Invalid index name ({index_name}). Cannot begin or end with a hyphen.' ) return resolve_index_name(index_name), remote_name @@ -117,9 +116,7 @@ class AuthConfig(dict): # keys is not formatted properly. if raise_on_error: raise errors.InvalidConfigFile( - 'Invalid configuration for registry {0}'.format( - registry - ) + f'Invalid configuration for registry {registry}' ) return {} if 'identitytoken' in entry: @@ -272,7 +269,7 @@ class AuthConfig(dict): return None except StoreError as e: raise errors.DockerException( - 'Credentials store error: {0}'.format(repr(e)) + f'Credentials store error: {e}' ) def _get_store_instance(self, name): diff --git a/plugins/module_utils/_api/context/api.py b/plugins/module_utils/_api/context/api.py index 879a8613..0dc45042 100644 --- a/plugins/module_utils/_api/context/api.py +++ b/plugins/module_utils/_api/context/api.py @@ -146,14 +146,14 @@ class ContextAPI(object): names.append(name) except Exception as e: raise errors.ContextException( - "Failed to load metafile {filepath}: {e}".format(filepath=filepath, e=e), + f"Failed to load metafile {filepath}: {e}" ) from e contexts = [cls.get_default_context()] for name in names: context = Context.load_context(name) if not context: - raise errors.ContextException("Context {context} cannot be found".format(context=name)) + raise errors.ContextException(f"Context {name} cannot be found") contexts.append(context) return contexts @@ -174,7 +174,7 @@ class ContextAPI(object): err = write_context_name_to_docker_config(name) if err: raise errors.ContextException( - 'Failed to set current context: {err}'.format(err=err)) + f'Failed to set current context: {err}') @classmethod def remove_context(cls, name): diff --git a/plugins/module_utils/_api/context/config.py b/plugins/module_utils/_api/context/config.py index 3ac4e833..9bcd020d 100644 --- a/plugins/module_utils/_api/context/config.py +++ b/plugins/module_utils/_api/context/config.py @@ -29,7 +29,7 @@ def get_current_context_name_with_source(): if docker_cfg_path: try: with open(docker_cfg_path) as f: - return json.load(f).get("currentContext", "default"), "configuration file {file}".format(file=docker_cfg_path) + return json.load(f).get("currentContext", "default"), f"configuration file {docker_cfg_path}" except Exception: pass return "default", "fallback value" diff --git a/plugins/module_utils/_api/context/context.py b/plugins/module_utils/_api/context/context.py index dde4fc56..798d36e6 100644 --- a/plugins/module_utils/_api/context/context.py +++ b/plugins/module_utils/_api/context/context.py @@ -62,7 +62,7 @@ class Context(object): if not isinstance(v, dict): # unknown format raise ContextException( - "Unknown endpoint format for context {name}: {v}".format(name=name, v=v), + f"Unknown endpoint format for context {name}: {v}", ) self.endpoints[k] = v @@ -118,7 +118,7 @@ class Context(object): except (OSError, KeyError, ValueError) as e: # unknown format raise Exception( - "Detected corrupted meta file for context {name} : {e}".format(name=name, e=e) + f"Detected corrupted meta file for context {name} : {e}" ) from e # for docker endpoints, set defaults for @@ -193,7 +193,7 @@ class Context(object): rmtree(self.tls_path) def __repr__(self): - return "<{classname}: '{name}'>".format(classname=self.__class__.__name__, name=self.name) + return f"<{self.__class__.__name__}: '{self.name}'>" def __str__(self): return json.dumps(self.__call__(), indent=2) diff --git a/plugins/module_utils/_api/credentials/errors.py b/plugins/module_utils/_api/credentials/errors.py index 905b4121..3d74324a 100644 --- a/plugins/module_utils/_api/credentials/errors.py +++ b/plugins/module_utils/_api/credentials/errors.py @@ -26,12 +26,8 @@ def process_store_error(cpe, program): message = cpe.output.decode('utf-8') if 'credentials not found in native keychain' in message: return CredentialsNotFound( - 'No matching credentials in {0}'.format( - program - ) + f'No matching credentials in {program}' ) return StoreError( - 'Credentials store {0} exited with "{1}".'.format( - program, cpe.output.decode('utf-8').strip() - ) + f'Credentials store {program} exited with "{cpe.output.decode("utf-8").strip()}".' ) diff --git a/plugins/module_utils/_api/credentials/store.py b/plugins/module_utils/_api/credentials/store.py index 1cbc112e..f152f736 100644 --- a/plugins/module_utils/_api/credentials/store.py +++ b/plugins/module_utils/_api/credentials/store.py @@ -30,9 +30,7 @@ class Store(object): self.environment = environment if self.exe is None: raise errors.InitializationError( - '{0} not installed or not available in PATH'.format( - self.program - ) + f'{self.program} not installed or not available in PATH' ) def get(self, server): @@ -50,7 +48,7 @@ class Store(object): # raise CredentialsNotFound if result['Username'] == '' and result['Secret'] == '': raise errors.CredentialsNotFound( - 'No matching credentials in {0}'.format(self.program) + f'No matching credentials in {self.program}' ) return result @@ -92,14 +90,10 @@ class Store(object): except OSError as e: if e.errno == errno.ENOENT: raise errors.StoreError( - '{0} not installed or not available in PATH'.format( - self.program - ) + f'{self.program} not installed or not available in PATH' ) else: raise errors.StoreError( - 'Unexpected OS error "{0}", errno={1}'.format( - e.strerror, e.errno - ) + f'Unexpected OS error "{e.strerror}", errno={e.errno}' ) return output diff --git a/plugins/module_utils/_api/errors.py b/plugins/module_utils/_api/errors.py index a81bef3d..8854f612 100644 --- a/plugins/module_utils/_api/errors.py +++ b/plugins/module_utils/_api/errors.py @@ -59,17 +59,13 @@ class APIError(_HTTPError, DockerException): message = super(APIError, self).__str__() if self.is_client_error(): - message = '{0} Client Error for {1}: {2}'.format( - self.response.status_code, self.response.url, - self.response.reason) + message = f'{self.response.status_code} Client Error for {self.response.url}: {self.response.reason}' elif self.is_server_error(): - message = '{0} Server Error for {1}: {2}'.format( - self.response.status_code, self.response.url, - self.response.reason) + message = f'{self.response.status_code} Server Error for {self.response.url}: {self.response.reason}' if self.explanation: - message = '{0} ("{1}")'.format(message, self.explanation) + message = f'{message} ("{self.explanation}")' return message @@ -146,9 +142,8 @@ class ContainerError(DockerException): self.image = image self.stderr = stderr - err = ": {0}".format(stderr) if stderr is not None else "" - msg = ("Command '{0}' in image '{1}' returned non-zero exit " - "status {2}{3}").format(command, image, exit_status, err) + err = f": {stderr}" if stderr is not None else "" + msg = f"Command '{command}' in image '{image}' returned non-zero exit status {exit_status}{err}" super(ContainerError, self).__init__(msg) @@ -170,8 +165,8 @@ class ImageLoadError(DockerException): def create_unexpected_kwargs_error(name, kwargs): - quoted_kwargs = ["'{0}'".format(k) for k in sorted(kwargs)] - text = ["{0}() ".format(name)] + quoted_kwargs = [f"'{k}'" for k in sorted(kwargs)] + text = [f"{name}() "] if len(quoted_kwargs) == 1: text.append("got an unexpected keyword argument ") else: @@ -185,7 +180,7 @@ class MissingContextParameter(DockerException): self.param = param def __str__(self): - return ("missing parameter: {0}".format(self.param)) + return f"missing parameter: {self.param}" class ContextAlreadyExists(DockerException): @@ -193,7 +188,7 @@ class ContextAlreadyExists(DockerException): self.name = name def __str__(self): - return ("context {0} already exists".format(self.name)) + return f"context {self.name} already exists" class ContextException(DockerException): @@ -209,7 +204,7 @@ class ContextNotFound(DockerException): self.name = name def __str__(self): - return ("context '{0}' not found".format(self.name)) + return f"context '{self.name}' not found" class MissingRequirementException(DockerException): diff --git a/plugins/module_utils/_api/utils/build.py b/plugins/module_utils/_api/utils/build.py index bec03f86..c75119da 100644 --- a/plugins/module_utils/_api/utils/build.py +++ b/plugins/module_utils/_api/utils/build.py @@ -107,7 +107,7 @@ def create_archive(root, files=None, fileobj=None, gzip=False, t.addfile(i, f) except IOError: raise IOError( - 'Can not read file in context: {0}'.format(full_path) + f'Can not read file in context: {full_path}' ) else: # Directories, FIFOs, symlinks... do not need to be read. @@ -271,18 +271,13 @@ def process_dockerfile(dockerfile, path): abs_dockerfile = os.path.join(path, dockerfile) if IS_WINDOWS_PLATFORM and path.startswith( WINDOWS_LONGPATH_PREFIX): - abs_dockerfile = '{0}{1}'.format( - WINDOWS_LONGPATH_PREFIX, - os.path.normpath( - abs_dockerfile[len(WINDOWS_LONGPATH_PREFIX):] - ) - ) + abs_dockerfile = f'{WINDOWS_LONGPATH_PREFIX}{os.path.normpath(abs_dockerfile[len(WINDOWS_LONGPATH_PREFIX):])}' if (os.path.splitdrive(path)[0] != os.path.splitdrive(abs_dockerfile)[0] or os.path.relpath(abs_dockerfile, path).startswith('..')): # Dockerfile not in context - read data to insert into tar later with open(abs_dockerfile) as df: return ( - '.dockerfile.{random:x}'.format(random=random.getrandbits(160)), + f'.dockerfile.{random.getrandbits(160):x}', df.read() ) diff --git a/plugins/module_utils/_api/utils/decorators.py b/plugins/module_utils/_api/utils/decorators.py index 59abd483..5a0cd091 100644 --- a/plugins/module_utils/_api/utils/decorators.py +++ b/plugins/module_utils/_api/utils/decorators.py @@ -38,9 +38,7 @@ def minimum_version(version): def wrapper(self, *args, **kwargs): if utils.version_lt(self._version, version): raise errors.InvalidVersion( - '{0} is not available for version < {1}'.format( - f.__name__, version - ) + f'{f.__name__} is not available for version < {version}' ) return f(self, *args, **kwargs) return wrapper diff --git a/plugins/module_utils/_api/utils/fnmatch.py b/plugins/module_utils/_api/utils/fnmatch.py index 9181d62c..84677e1e 100644 --- a/plugins/module_utils/_api/utils/fnmatch.py +++ b/plugins/module_utils/_api/utils/fnmatch.py @@ -119,7 +119,7 @@ def translate(pat): stuff = '^' + stuff[1:] elif stuff[0] == '^': stuff = '\\' + stuff - res = '%s[%s]' % (res, stuff) + res = f'{res}[{stuff}]' else: res = res + re.escape(c) diff --git a/plugins/module_utils/_api/utils/json_stream.py b/plugins/module_utils/_api/utils/json_stream.py index e2bd8e92..caf400b1 100644 --- a/plugins/module_utils/_api/utils/json_stream.py +++ b/plugins/module_utils/_api/utils/json_stream.py @@ -52,7 +52,7 @@ def json_stream(stream): return split_buffer(stream, json_splitter, json_decoder.decode) -def line_splitter(buffer, separator=u'\n'): +def line_splitter(buffer, separator='\n'): index = buffer.find(str(separator)) if index == -1: return None diff --git a/plugins/module_utils/_api/utils/ports.py b/plugins/module_utils/_api/utils/ports.py index 0fb85eed..a0e3a04c 100644 --- a/plugins/module_utils/_api/utils/ports.py +++ b/plugins/module_utils/_api/utils/ports.py @@ -49,19 +49,19 @@ def build_port_bindings(ports): def _raise_invalid_port(port): - raise ValueError('Invalid port "%s", should be ' + raise ValueError(f'Invalid port "{port}", should be ' '[[remote_ip:]remote_port[-remote_port]:]' - 'port[/protocol]' % port) + 'port[/protocol]') def port_range(start, end, proto, randomly_available_port=False): if not start: return start if not end: - return [start + proto] + return [f'{start}{proto}'] if randomly_available_port: - return ['{0}-{1}'.format(start, end) + proto] - return [str(port) + proto for port in range(int(start), int(end) + 1)] + return [f'{start}-{end}{proto}'] + return [f'{port}{proto}' for port in range(int(start), int(end) + 1)] def split_port(port): diff --git a/plugins/module_utils/_api/utils/proxy.py b/plugins/module_utils/_api/utils/proxy.py index 9181ae18..9389eae9 100644 --- a/plugins/module_utils/_api/utils/proxy.py +++ b/plugins/module_utils/_api/utils/proxy.py @@ -80,5 +80,4 @@ class ProxyConfig(dict): return proxy_env + environment def __str__(self): - return 'ProxyConfig(http={0}, https={1}, ftp={2}, no_proxy={3})'.format( - self.http, self.https, self.ftp, self.no_proxy) + return f'ProxyConfig(http={self.http}, https={self.https}, ftp={self.ftp}, no_proxy={self.no_proxy})' diff --git a/plugins/module_utils/_api/utils/socket.py b/plugins/module_utils/_api/utils/socket.py index 963ed98c..44b81f2e 100644 --- a/plugins/module_utils/_api/utils/socket.py +++ b/plugins/module_utils/_api/utils/socket.py @@ -169,7 +169,7 @@ def consume_socket_output(frames, demux=False): # It is guaranteed that for each frame, one and only one stream # is not None. if frame == (None, None): - raise AssertionError('frame must be (None, None), but got %s' % (frame, )) + raise AssertionError(f'frame must be (None, None), but got {frame}') if frame[0] is not None: if out[0] is None: out[0] = frame[0] @@ -193,4 +193,4 @@ def demux_adaptor(stream_id, data): elif stream_id == STDERR: return (None, data) else: - raise ValueError('{0} is not a valid stream'.format(stream_id)) + raise ValueError(f'{stream_id} is not a valid stream') diff --git a/plugins/module_utils/_api/utils/utils.py b/plugins/module_utils/_api/utils/utils.py index 0d7db9f1..8bcf8546 100644 --- a/plugins/module_utils/_api/utils/utils.py +++ b/plugins/module_utils/_api/utils/utils.py @@ -136,8 +136,7 @@ def convert_volume_binds(binds): if isinstance(v, dict): if 'ro' in v and 'mode' in v: raise ValueError( - 'Binding cannot contain both "ro" and "mode": {0}' - .format(repr(v)) + f'Binding cannot contain both "ro" and "mode": {v!r}' ) bind = v['bind'] @@ -167,11 +166,11 @@ def convert_volume_binds(binds): else: mode = v['propagation'] - result.append('{0}:{1}:{2}'.format(k, bind, mode)) + result.append(f'{k}:{bind}:{mode}') else: if isinstance(v, bytes): v = v.decode('utf-8') - result.append('{0}:{1}:rw'.format(k, v)) + result.append(f'{k}:{v}:rw') return result @@ -181,8 +180,7 @@ def convert_tmpfs_mounts(tmpfs): if not isinstance(tmpfs, list): raise ValueError( - 'Expected tmpfs value to be either a list or a dict, found: {0}' - .format(type(tmpfs).__name__) + f'Expected tmpfs value to be either a list or a dict, found: {type(tmpfs).__name__}' ) result = {} @@ -196,8 +194,7 @@ def convert_tmpfs_mounts(tmpfs): else: raise ValueError( - "Expected item in tmpfs list to be a string, found: {0}" - .format(type(mount).__name__) + f"Expected item in tmpfs list to be a string, found: {type(mount).__name__}" ) result[name] = options @@ -257,14 +254,14 @@ def parse_host(addr, is_win32=False, tls=False): if proto not in ('tcp', 'unix', 'npipe', 'ssh'): raise errors.DockerException( - "Invalid bind address protocol: {0}".format(addr) + f"Invalid bind address protocol: {addr}" ) if proto == 'tcp' and not parsed_url.netloc: # "tcp://" is exceptionally disallowed by convention; # omitting a hostname for other protocols is fine raise errors.DockerException( - 'Invalid bind address format: {0}'.format(addr) + f'Invalid bind address format: {addr}' ) if any([ @@ -272,13 +269,12 @@ def parse_host(addr, is_win32=False, tls=False): parsed_url.password ]): raise errors.DockerException( - 'Invalid bind address format: {0}'.format(addr) + f'Invalid bind address format: {addr}' ) if parsed_url.path and proto == 'ssh': raise errors.DockerException( - 'Invalid bind address format: no path allowed for this protocol:' - ' {0}'.format(addr) + f'Invalid bind address format: no path allowed for this protocol: {addr}' ) else: path = parsed_url.path @@ -292,19 +288,19 @@ def parse_host(addr, is_win32=False, tls=False): port = parsed_url.port or 0 if port <= 0: port = 22 if proto == 'ssh' else (2375 if tls else 2376) - netloc = '{0}:{1}'.format(parsed_url.netloc, port) + netloc = f'{parsed_url.netloc}:{port}' if not parsed_url.hostname: - netloc = '{0}:{1}'.format(DEFAULT_HTTP_HOST, port) + netloc = f'{DEFAULT_HTTP_HOST}:{port}' # Rewrite schemes to fit library internals (requests adapters) if proto == 'tcp': - proto = 'http{0}'.format('s' if tls else '') + proto = f"http{'s' if tls else ''}" elif proto == 'unix': proto = 'http+unix' if proto in ('http+unix', 'npipe'): - return "{0}://{1}".format(proto, path).rstrip('/') + return f"{proto}://{path}".rstrip('/') return urlunparse(URLComponents( scheme=proto, netloc=netloc, @@ -323,7 +319,7 @@ def parse_devices(devices): continue if not isinstance(device, str): raise errors.DockerException( - 'Invalid device type {0}'.format(type(device)) + f'Invalid device type {type(device)}' ) device_mapping = device.split(':') if device_mapping: @@ -428,17 +424,14 @@ def parse_bytes(s): digits = float(digits_part) except ValueError: raise errors.DockerException( - 'Failed converting the string value for memory ({0}) to' - ' an integer.'.format(digits_part) + f'Failed converting the string value for memory ({digits_part}) to an integer.' ) # Reconvert to long for the final result s = int(digits * units[suffix]) else: raise errors.DockerException( - 'The specified value for memory ({0}) should specify the' - ' units. The postfix should be one of the `b` `k` `m` `g`' - ' characters'.format(s) + f'The specified value for memory ({s}) should specify the units. The postfix should be one of the `b` `k` `m` `g` characters' ) return s @@ -448,7 +441,7 @@ def normalize_links(links): if isinstance(links, dict): links = links.items() - return ['{0}:{1}'.format(k, v) if v else k for k, v in sorted(links)] + return [f'{k}:{v}' if v else k for k, v in sorted(links)] def parse_env_file(env_file): @@ -473,9 +466,7 @@ def parse_env_file(env_file): k, v = parse_line environment[k] = v else: - raise errors.DockerException( - 'Invalid line in environment file {0}:\n{1}'.format( - env_file, line)) + raise errors.DockerException(f'Invalid line in environment file {env_file}:\n{line}') return environment @@ -491,7 +482,7 @@ def format_environment(environment): if isinstance(value, bytes): value = value.decode('utf-8') - return u'{key}={value}'.format(key=key, value=value) + return f'{key}={value}' return [format_env(*var) for var in environment.items()] @@ -499,11 +490,11 @@ def format_extra_hosts(extra_hosts, task=False): # Use format dictated by Swarm API if container is part of a task if task: return [ - '{0} {1}'.format(v, k) for k, v in sorted(extra_hosts.items()) + f'{v} {k}' for k, v in sorted(extra_hosts.items()) ] return [ - '{0}:{1}'.format(k, v) for k, v in sorted(extra_hosts.items()) + f'{k}:{v}' for k, v in sorted(extra_hosts.items()) ] diff --git a/plugins/module_utils/_logfmt.py b/plugins/module_utils/_logfmt.py index 73a2732d..1c306742 100644 --- a/plugins/module_utils/_logfmt.py +++ b/plugins/module_utils/_logfmt.py @@ -98,7 +98,7 @@ class _Parser(object): try: v += _HEX_DICT[self.line[self.index]] except KeyError: - raise InvalidLogFmt('Invalid unicode escape digit {digit!r}'.format(digit=self.line[self.index])) + raise InvalidLogFmt(f'Invalid unicode escape digit {self.line[self.index]!r}') self.index += 6 return chr(v) @@ -170,7 +170,8 @@ def parse_line(line, logrus_mode=False): if cur in _ESCAPE_DICT: value.append(_ESCAPE_DICT[cur]) elif cur != 'u': - raise InvalidLogFmt('Unknown escape sequence {seq!r}'.format(seq='\\' + cur)) + es = f"\\{cur}" + raise InvalidLogFmt(f'Unknown escape sequence {es!r}') else: parser.prev() value.append(parser.parse_unicode_sequence()) diff --git a/plugins/module_utils/_platform.py b/plugins/module_utils/_platform.py index 3460bdb5..5d993ff2 100644 --- a/plugins/module_utils/_platform.py +++ b/plugins/module_utils/_platform.py @@ -18,9 +18,9 @@ _VALID_STR = re.compile('^[A-Za-z0-9_-]+$') def _validate_part(string, part, part_name): if not part: - raise ValueError('Invalid platform string "{string}": {part} is empty'.format(string=string, part=part_name)) + raise ValueError(f'Invalid platform string "{string}": {part_name} is empty') if not _VALID_STR.match(part): - raise ValueError('Invalid platform string "{string}": {part} has invalid characters'.format(string=string, part=part_name)) + raise ValueError(f'Invalid platform string "{string}": {part_name} has invalid characters') return part @@ -123,16 +123,16 @@ class _Platform(object): arch=arch or None, variant=variant or None, ) - raise ValueError('Invalid platform string "{0}": unknown OS or architecture'.format(string)) + raise ValueError(f'Invalid platform string "{string}": unknown OS or architecture') os = _validate_part(string, parts[0], 'OS') if not os: - raise ValueError('Invalid platform string "{0}": OS is empty'.format(string)) + raise ValueError(f'Invalid platform string "{string}": OS is empty') arch = _validate_part(string, parts[1], 'architecture') if len(parts) > 1 else None if arch is not None and not arch: - raise ValueError('Invalid platform string "{0}": architecture is empty'.format(string)) + raise ValueError(f'Invalid platform string "{string}": architecture is empty') variant = _validate_part(string, parts[2], 'variant') if len(parts) > 2 else None if variant is not None and not variant: - raise ValueError('Invalid platform string "{0}": variant is empty'.format(string)) + raise ValueError(f'Invalid platform string "{string}": variant is empty') arch, variant = _normalize_arch(arch, variant or '') if len(parts) == 2 and arch == 'arm' and variant == 'v7': variant = None @@ -155,7 +155,7 @@ class _Platform(object): return '/'.join(parts) def __repr__(self): - return '_Platform(os={os!r}, arch={arch!r}, variant={variant!r})'.format(os=self.os, arch=self.arch, variant=self.variant) + return f'_Platform(os={self.os!r}, arch={self.arch!r}, variant={self.variant!r})' def __eq__(self, other): return self.os == other.os and self.arch == other.arch and self.variant == other.variant diff --git a/plugins/module_utils/_scramble.py b/plugins/module_utils/_scramble.py index 3b82ab58..17cfe28a 100644 --- a/plugins/module_utils/_scramble.py +++ b/plugins/module_utils/_scramble.py @@ -35,7 +35,7 @@ def unscramble(value, key): '''Do NOT use this for cryptographic purposes!''' if len(key) < 1: raise ValueError('Key must be at least one byte') - if not value.startswith(u'=S='): + if not value.startswith('=S='): raise ValueError('Value does not start with indicator') value = base64.b64decode(value[3:]) k = key[0] diff --git a/plugins/module_utils/common.py b/plugins/module_utils/common.py index 7289e1c5..2954d0d2 100644 --- a/plugins/module_utils/common.py +++ b/plugins/module_utils/common.py @@ -125,12 +125,9 @@ def _get_tls_config(fail_function, **kwargs): if assert_hostname is not None: fail_function( "tls_hostname is not compatible with Docker SDK for Python 7.0.0+. You are using" - " Docker SDK for Python {docker_py_version}. The tls_hostname option (value: {tls_hostname})" + f" Docker SDK for Python {docker_version}. The tls_hostname option (value: {assert_hostname})" " has either been set directly or with the environment variable DOCKER_TLS_HOSTNAME." - " Make sure it is not set, or switch to an older version of Docker SDK for Python.".format( - docker_py_version=docker_version, - tls_hostname=assert_hostname, - ) + " Make sure it is not set, or switch to an older version of Docker SDK for Python." ) # Filter out all None parameters kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) @@ -138,7 +135,7 @@ def _get_tls_config(fail_function, **kwargs): tls_config = TLSConfig(**kwargs) return tls_config except TLSParameterError as exc: - fail_function("TLS config error: %s" % exc) + fail_function(f"TLS config error: {exc}") def is_using_tls(auth): @@ -203,17 +200,20 @@ class AnsibleDockerClientBase(Client): self.fail("Cannot have both the docker-py and docker python modules (old and new version of Docker " "SDK for Python) installed together as they use the same namespace and cause a corrupt " "installation. Please uninstall both packages, and re-install only the docker-py or docker " - "python module (for %s's Python %s). It is recommended to install the docker module. Please " + f"python module (for {platform.node()}'s Python {sys.executable}). It is recommended to install the docker module. Please " "note that simply uninstalling one of the modules can leave the other module in a broken " - "state." % (platform.node(), sys.executable)) + "state.") if not HAS_DOCKER_PY: msg = missing_required_lib("Docker SDK for Python: docker>=5.0.0") - msg = msg + ", for example via `pip install docker`. The error was: %s" - self.fail(msg % HAS_DOCKER_ERROR, exception=HAS_DOCKER_TRACEBACK) + msg = f"{msg}, for example via `pip install docker`. The error was: {HAS_DOCKER_ERROR}" + self.fail(msg, exception=HAS_DOCKER_TRACEBACK) if self.docker_py_version < LooseVersion(min_docker_version): - msg = "Error: Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s." + msg = ( + f"Error: Docker SDK for Python version is {docker_version} ({platform.node()}'s Python {sys.executable})." + f" Minimum version required is {min_docker_version}." + ) if not NEEDS_DOCKER_PY2: # The minimal required version is < 2.0 (and the current version as well). # Advertise docker (instead of docker-py). @@ -222,7 +222,7 @@ class AnsibleDockerClientBase(Client): msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER else: msg += DOCKERPYUPGRADE_UPGRADE_DOCKER - self.fail(msg % (docker_version, platform.node(), sys.executable, min_docker_version)) + self.fail(msg) self._connect_params = get_connect_params(self.auth_params, fail_function=self.fail) @@ -230,14 +230,14 @@ class AnsibleDockerClientBase(Client): super(AnsibleDockerClientBase, self).__init__(**self._connect_params) self.docker_api_version_str = self.api_version except APIError as exc: - self.fail("Docker API error: %s" % exc) + self.fail(f"Docker API error: {exc}") except Exception as exc: - self.fail("Error connecting: %s" % exc) + self.fail(f"Error connecting: {exc}") self.docker_api_version = LooseVersion(self.docker_api_version_str) min_docker_api_version = min_docker_api_version or '1.25' if self.docker_api_version < LooseVersion(min_docker_api_version): - self.fail('Docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version)) + self.fail(f'Docker API version is {self.docker_api_version_str}. Minimum version required is {min_docker_api_version}.') def log(self, msg, pretty_print=False): pass @@ -331,23 +331,23 @@ class AnsibleDockerClientBase(Client): def _handle_ssl_error(self, error): match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error)) if match: - self.fail("You asked for verification that Docker daemons certificate's hostname matches %s. " - "The actual certificate's hostname is %s. Most likely you need to set DOCKER_TLS_HOSTNAME " - "or pass `tls_hostname` with a value of %s. You may also use TLS without verification by " - "setting the `tls` parameter to true." - % (self.auth_params['tls_hostname'], match.group(1), match.group(1))) - self.fail("SSL Exception: %s" % (error)) + hostname = self.auth_params['tls_hostname'] + self.fail(f"You asked for verification that Docker daemons certificate's hostname matches {hostname}. " + f"The actual certificate's hostname is {match.group(1)}. Most likely you need to set DOCKER_TLS_HOSTNAME " + f"or pass `tls_hostname` with a value of {match.group(1)}. You may also use TLS without verification by " + "setting the `tls` parameter to true.") + self.fail(f"SSL Exception: {error}") def get_container_by_id(self, container_id): try: - self.log("Inspecting container Id %s" % container_id) + self.log(f"Inspecting container Id {container_id}") result = self.inspect_container(container=container_id) self.log("Completed container inspection") return result except NotFound as dummy: return None except Exception as exc: - self.fail("Error inspecting container: %s" % exc) + self.fail(f"Error inspecting container: {exc}") def get_container(self, name=None): ''' @@ -363,7 +363,7 @@ class AnsibleDockerClientBase(Client): result = None try: for container in self.containers(all=True): - self.log("testing container: %s" % (container['Names'])) + self.log(f"testing container: {container['Names']}") if isinstance(container['Names'], list) and search_name in container['Names']: result = container break @@ -376,7 +376,7 @@ class AnsibleDockerClientBase(Client): except SSLError as exc: self._handle_ssl_error(exc) except Exception as exc: - self.fail("Error retrieving container list: %s" % exc) + self.fail(f"Error retrieving container list: {exc}") if result is None: return None @@ -395,7 +395,7 @@ class AnsibleDockerClientBase(Client): if network_id is None: try: for network in self.networks(): - self.log("testing network: %s" % (network['Name'])) + self.log(f"testing network: {network['Name']}") if name == network['Name']: result = network break @@ -405,20 +405,20 @@ class AnsibleDockerClientBase(Client): except SSLError as exc: self._handle_ssl_error(exc) except Exception as exc: - self.fail("Error retrieving network list: %s" % exc) + self.fail(f"Error retrieving network list: {exc}") if result is not None: network_id = result['Id'] if network_id is not None: try: - self.log("Inspecting network Id %s" % network_id) + self.log(f"Inspecting network Id {network_id}") result = self.inspect_network(network_id) self.log("Completed network inspection") except NotFound as dummy: return None except Exception as exc: - self.fail("Error inspecting network: %s" % exc) + self.fail(f"Error inspecting network: {exc}") return result @@ -429,7 +429,7 @@ class AnsibleDockerClientBase(Client): if not name: return None - self.log("Find image %s:%s" % (name, tag)) + self.log(f"Find image {name}:{tag}") images = self._image_lookup(name, tag) if not images: # In API <= 1.20 seeing 'docker.io/' as the name of images pulled from docker hub @@ -437,41 +437,41 @@ class AnsibleDockerClientBase(Client): if registry == 'docker.io': # If docker.io is explicitly there in name, the image # is not found in some cases (#41509) - self.log("Check for docker.io image: %s" % repo_name) + self.log(f"Check for docker.io image: {repo_name}") images = self._image_lookup(repo_name, tag) if not images and repo_name.startswith('library/'): # Sometimes library/xxx images are not found lookup = repo_name[len('library/'):] - self.log("Check for docker.io image: %s" % lookup) + self.log(f"Check for docker.io image: {lookup}") images = self._image_lookup(lookup, tag) if not images: # Last case for some Docker versions: if docker.io was not there, # it can be that the image was not found either # (https://github.com/ansible/ansible/pull/15586) - lookup = "%s/%s" % (registry, repo_name) - self.log("Check for docker.io image: %s" % lookup) + lookup = f"{registry}/{repo_name}" + self.log(f"Check for docker.io image: {lookup}") images = self._image_lookup(lookup, tag) if not images and '/' not in repo_name: # This seems to be happening with podman-docker # (https://github.com/ansible-collections/community.docker/issues/291) - lookup = "%s/library/%s" % (registry, repo_name) - self.log("Check for docker.io image: %s" % lookup) + lookup = f"{registry}/library/{repo_name}" + self.log(f"Check for docker.io image: {lookup}") images = self._image_lookup(lookup, tag) if len(images) > 1: - self.fail("Daemon returned more than one result for %s:%s" % (name, tag)) + self.fail(f"Daemon returned more than one result for {name}:{tag}") if len(images) == 1: try: inspection = self.inspect_image(images[0]['Id']) except NotFound: - self.log("Image %s:%s not found." % (name, tag)) + self.log(f"Image {name}:{tag} not found.") return None except Exception as exc: - self.fail("Error inspecting image %s:%s - %s" % (name, tag, str(exc))) + self.fail(f"Error inspecting image {name}:{tag} - {exc}") return inspection - self.log("Image %s:%s not found." % (name, tag)) + self.log(f"Image {name}:{tag} not found.") return None def find_image_by_id(self, image_id, accept_missing_image=False): @@ -481,16 +481,16 @@ class AnsibleDockerClientBase(Client): if not image_id: return None - self.log("Find image %s (by ID)" % image_id) + self.log(f"Find image {image_id} (by ID)") try: inspection = self.inspect_image(image_id) except NotFound as exc: if not accept_missing_image: - self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc))) - self.log("Image %s not found." % image_id) + self.fail(f"Error inspecting image ID {image_id} - {exc}") + self.log(f"Image {image_id} not found.") return None except Exception as exc: - self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc))) + self.fail(f"Error inspecting image ID {image_id} - {exc}") return inspection def _image_lookup(self, name, tag): @@ -502,11 +502,11 @@ class AnsibleDockerClientBase(Client): try: response = self.images(name=name) except Exception as exc: - self.fail("Error searching for image %s - %s" % (name, str(exc))) + self.fail(f"Error searching for image {name} - {exc}") images = response if tag: - lookup = "%s:%s" % (name, tag) - lookup_digest = "%s@%s" % (name, tag) + lookup = f"{name}:{tag}" + lookup_digest = f"{name}@{tag}" images = [] for image in response: tags = image.get('RepoTags') @@ -527,7 +527,7 @@ class AnsibleDockerClientBase(Client): ) if platform is not None: kwargs['platform'] = platform - self.log("Pulling image %s:%s" % (name, tag)) + self.log(f"Pulling image {name}:{tag}") old_tag = self.find_image(name, tag) try: for line in self.pull(name, **kwargs): @@ -535,13 +535,11 @@ class AnsibleDockerClientBase(Client): if line.get('error'): if line.get('errorDetail'): error_detail = line.get('errorDetail') - self.fail("Error pulling %s - code: %s message: %s" % (name, - error_detail.get('code'), - error_detail.get('message'))) + self.fail(f"Error pulling {name} - code: {error_detail.get('code')} message: {error_detail.get('message')}") else: - self.fail("Error pulling %s - %s" % (name, line.get('error'))) + self.fail(f"Error pulling {name} - {line.get('error')}") except Exception as exc: - self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc))) + self.fail(f"Error pulling image {name}:{tag} - {exc}") new_tag = self.find_image(name, tag) @@ -652,22 +650,23 @@ class AnsibleDockerClient(AnsibleDockerClientBase): if 'usage_msg' in data: usg = data['usage_msg'] else: - usg = 'set %s option' % (option, ) + usg = f'set {option} option' if not support_docker_api: - msg = 'Docker API version is %s. Minimum version required is %s to %s.' - msg = msg % (self.docker_api_version_str, data['docker_api_version'], usg) + msg = f"Docker API version is {self.docker_api_version_str}. Minimum version required is {data['docker_api_version']} to {usg}." elif not support_docker_py: - msg = "Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s to %s. " + msg = ( + f"Docker SDK for Python version is {docker_version} ({platform.node()}'s Python {sys.executable})." + f" Minimum version required is {data['docker_py_version']} to {usg}. " + ) if LooseVersion(data['docker_py_version']) < LooseVersion('2.0.0'): msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER elif self.docker_py_version < LooseVersion('2.0.0'): msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER else: msg += DOCKERPYUPGRADE_UPGRADE_DOCKER - msg = msg % (docker_version, platform.node(), sys.executable, data['docker_py_version'], usg) else: # should not happen - msg = 'Cannot %s with your configuration.' % (usg, ) + msg = f'Cannot {usg} with your configuration.' self.fail(msg) def report_warnings(self, result, warnings_key=None): @@ -691,6 +690,6 @@ class AnsibleDockerClient(AnsibleDockerClientBase): result = result.get(key) if isinstance(result, Sequence): for warning in result: - self.module.warn('Docker warning: {0}'.format(warning)) + self.module.warn(f'Docker warning: {warning}') elif isinstance(result, str) and result: - self.module.warn('Docker warning: {0}'.format(result)) + self.module.warn(f'Docker warning: {result}') diff --git a/plugins/module_utils/common_api.py b/plugins/module_utils/common_api.py index 1da8ad19..19421255 100644 --- a/plugins/module_utils/common_api.py +++ b/plugins/module_utils/common_api.py @@ -59,7 +59,7 @@ def _get_tls_config(fail_function, **kwargs): tls_config = TLSConfig(**kwargs) return tls_config except TLSParameterError as exc: - fail_function("TLS config error: %s" % exc) + fail_function(f"TLS config error: {exc}") def is_using_tls(auth_data): @@ -115,14 +115,14 @@ class AnsibleDockerClientBase(Client): except MissingRequirementException as exc: self.fail(missing_required_lib(exc.requirement), exception=exc.import_exception) except APIError as exc: - self.fail("Docker API error: %s" % exc) + self.fail(f"Docker API error: {exc}") except Exception as exc: - self.fail("Error connecting: %s" % exc) + self.fail(f"Error connecting: {exc}") self.docker_api_version = LooseVersion(self.docker_api_version_str) min_docker_api_version = min_docker_api_version or '1.25' if self.docker_api_version < LooseVersion(min_docker_api_version): - self.fail('Docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version)) + self.fail(f'Docker API version is {self.docker_api_version_str}. Minimum version required is {min_docker_api_version}.') def log(self, msg, pretty_print=False): pass @@ -219,23 +219,23 @@ class AnsibleDockerClientBase(Client): def _handle_ssl_error(self, error): match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error)) if match: - self.fail("You asked for verification that Docker daemons certificate's hostname matches %s. " - "The actual certificate's hostname is %s. Most likely you need to set DOCKER_TLS_HOSTNAME " - "or pass `tls_hostname` with a value of %s. You may also use TLS without verification by " - "setting the `tls` parameter to true." - % (self.auth_params['tls_hostname'], match.group(1), match.group(1))) - self.fail("SSL Exception: %s" % (error)) + hostname = self.auth_params['tls_hostname'] + self.fail(f"You asked for verification that Docker daemons certificate's hostname matches {hostname}. " + f"The actual certificate's hostname is {match.group(1)}. Most likely you need to set DOCKER_TLS_HOSTNAME " + f"or pass `tls_hostname` with a value of {match.group(1)}. You may also use TLS without verification by " + "setting the `tls` parameter to true.") + self.fail(f"SSL Exception: {error}") def get_container_by_id(self, container_id): try: - self.log("Inspecting container Id %s" % container_id) + self.log(f"Inspecting container Id {container_id}") result = self.get_json('/containers/{0}/json', container_id) self.log("Completed container inspection") return result except NotFound as dummy: return None except Exception as exc: - self.fail("Error inspecting container: %s" % exc) + self.fail(f"Error inspecting container: {exc}") def get_container(self, name=None): ''' @@ -258,7 +258,7 @@ class AnsibleDockerClientBase(Client): } containers = self.get_json("/containers/json", params=params) for container in containers: - self.log("testing container: %s" % (container['Names'])) + self.log(f"testing container: {container['Names']}") if isinstance(container['Names'], list) and search_name in container['Names']: result = container break @@ -271,7 +271,7 @@ class AnsibleDockerClientBase(Client): except SSLError as exc: self._handle_ssl_error(exc) except Exception as exc: - self.fail("Error retrieving container list: %s" % exc) + self.fail(f"Error retrieving container list: {exc}") if result is None: return None @@ -291,7 +291,7 @@ class AnsibleDockerClientBase(Client): try: networks = self.get_json("/networks") for network in networks: - self.log("testing network: %s" % (network['Name'])) + self.log(f"testing network: {network['Name']}") if name == network['Name']: result = network break @@ -301,20 +301,20 @@ class AnsibleDockerClientBase(Client): except SSLError as exc: self._handle_ssl_error(exc) except Exception as exc: - self.fail("Error retrieving network list: %s" % exc) + self.fail(f"Error retrieving network list: {exc}") if result is not None: network_id = result['Id'] if network_id is not None: try: - self.log("Inspecting network Id %s" % network_id) + self.log(f"Inspecting network Id {network_id}") result = self.get_json('/networks/{0}', network_id) self.log("Completed network inspection") except NotFound as dummy: return None except Exception as exc: - self.fail("Error inspecting network: %s" % exc) + self.fail(f"Error inspecting network: {exc}") return result @@ -336,10 +336,10 @@ class AnsibleDockerClientBase(Client): params['filters'] = convert_filters({'reference': name}) images = self.get_json("/images/json", params=params) except Exception as exc: - self.fail("Error searching for image %s - %s" % (name, str(exc))) + self.fail(f"Error searching for image {name} - {exc}") if tag: - lookup = "%s:%s" % (name, tag) - lookup_digest = "%s@%s" % (name, tag) + lookup = f"{name}:{tag}" + lookup_digest = f"{name}@{tag}" response = images images = [] for image in response: @@ -357,7 +357,7 @@ class AnsibleDockerClientBase(Client): if not name: return None - self.log("Find image %s:%s" % (name, tag)) + self.log(f"Find image {name}:{tag}") images = self._image_lookup(name, tag) if not images: # In API <= 1.20 seeing 'docker.io/' as the name of images pulled from docker hub @@ -365,40 +365,40 @@ class AnsibleDockerClientBase(Client): if registry == 'docker.io': # If docker.io is explicitly there in name, the image # is not found in some cases (#41509) - self.log("Check for docker.io image: %s" % repo_name) + self.log(f"Check for docker.io image: {repo_name}") images = self._image_lookup(repo_name, tag) if not images and repo_name.startswith('library/'): # Sometimes library/xxx images are not found lookup = repo_name[len('library/'):] - self.log("Check for docker.io image: %s" % lookup) + self.log(f"Check for docker.io image: {lookup}") images = self._image_lookup(lookup, tag) if not images: # Last case for some Docker versions: if docker.io was not there, # it can be that the image was not found either # (https://github.com/ansible/ansible/pull/15586) - lookup = "%s/%s" % (registry, repo_name) - self.log("Check for docker.io image: %s" % lookup) + lookup = f"{registry}/{repo_name}" + self.log(f"Check for docker.io image: {lookup}") images = self._image_lookup(lookup, tag) if not images and '/' not in repo_name: # This seems to be happening with podman-docker # (https://github.com/ansible-collections/community.docker/issues/291) - lookup = "%s/library/%s" % (registry, repo_name) - self.log("Check for docker.io image: %s" % lookup) + lookup = f"{registry}/library/{repo_name}" + self.log(f"Check for docker.io image: {lookup}") images = self._image_lookup(lookup, tag) if len(images) > 1: - self.fail("Daemon returned more than one result for %s:%s" % (name, tag)) + self.fail(f"Daemon returned more than one result for {name}:{tag}") if len(images) == 1: try: return self.get_json('/images/{0}/json', images[0]['Id']) except NotFound: - self.log("Image %s:%s not found." % (name, tag)) + self.log(f"Image {name}:{tag} not found.") return None except Exception as exc: - self.fail("Error inspecting image %s:%s - %s" % (name, tag, str(exc))) + self.fail(f"Error inspecting image {name}:{tag} - {exc}") - self.log("Image %s:%s not found." % (name, tag)) + self.log(f"Image {name}:{tag} not found.") return None def find_image_by_id(self, image_id, accept_missing_image=False): @@ -408,22 +408,22 @@ class AnsibleDockerClientBase(Client): if not image_id: return None - self.log("Find image %s (by ID)" % image_id) + self.log(f"Find image {image_id} (by ID)") try: return self.get_json('/images/{0}/json', image_id) except NotFound as exc: if not accept_missing_image: - self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc))) - self.log("Image %s not found." % image_id) + self.fail(f"Error inspecting image ID {image_id} - {exc}") + self.log(f"Image {image_id} not found.") return None except Exception as exc: - self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc))) + self.fail(f"Error inspecting image ID {image_id} - {exc}") def pull_image(self, name, tag="latest", platform=None): ''' Pull an image ''' - self.log("Pulling image %s:%s" % (name, tag)) + self.log(f"Pulling image {name}:{tag}") old_tag = self.find_image(name, tag) try: repository, image_tag = parse_repository_tag(name) @@ -450,13 +450,11 @@ class AnsibleDockerClientBase(Client): if line.get('error'): if line.get('errorDetail'): error_detail = line.get('errorDetail') - self.fail("Error pulling %s - code: %s message: %s" % (name, - error_detail.get('code'), - error_detail.get('message'))) + self.fail(f"Error pulling {name} - code: {error_detail.get('code')} message: {error_detail.get('message')}") else: - self.fail("Error pulling %s - %s" % (name, line.get('error'))) + self.fail(f"Error pulling {name} - {line.get('error')}") except Exception as exc: - self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc))) + self.fail(f"Error pulling image {name}:{tag} - {exc}") new_tag = self.find_image(name, tag) @@ -547,13 +545,12 @@ class AnsibleDockerClient(AnsibleDockerClientBase): if 'usage_msg' in data: usg = data['usage_msg'] else: - usg = 'set %s option' % (option, ) + usg = f'set {option} option' if not support_docker_api: - msg = 'Docker API version is %s. Minimum version required is %s to %s.' - msg = msg % (self.docker_api_version_str, data['docker_api_version'], usg) + msg = f"Docker API version is {self.docker_api_version_str}. Minimum version required is {data['docker_api_version']} to {usg}." else: # should not happen - msg = 'Cannot %s with your configuration.' % (usg, ) + msg = f'Cannot {usg} with your configuration.' self.fail(msg) def report_warnings(self, result, warnings_key=None): @@ -577,6 +574,6 @@ class AnsibleDockerClient(AnsibleDockerClientBase): result = result.get(key) if isinstance(result, Sequence): for warning in result: - self.module.warn('Docker warning: {0}'.format(warning)) + self.module.warn(f'Docker warning: {warning}') elif isinstance(result, str) and result: - self.module.warn('Docker warning: {0}'.format(result)) + self.module.warn(f'Docker warning: {result}') diff --git a/plugins/module_utils/common_cli.py b/plugins/module_utils/common_cli.py index 1cdc788c..7ea5d124 100644 --- a/plugins/module_utils/common_cli.py +++ b/plugins/module_utils/common_cli.py @@ -90,7 +90,7 @@ class AnsibleDockerClientBase(object): self.docker_api_version = LooseVersion(self.docker_api_version_str) min_docker_api_version = min_docker_api_version or '1.25' if self.docker_api_version < LooseVersion(min_docker_api_version): - self.fail('Docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version)) + self.fail(f'Docker API version is {self.docker_api_version_str}. Minimum version required is {min_docker_api_version}.') else: self.docker_api_version_str = None self.docker_api_version = None @@ -128,11 +128,7 @@ class AnsibleDockerClientBase(object): try: data = json.loads(stdout) except Exception as exc: - self.fail('Error while parsing JSON output of {cmd}: {exc}\nJSON output: {stdout}'.format( - cmd=self._compose_cmd_str(args), - exc=to_native(exc), - stdout=to_native(stdout), - )) + self.fail(f'Error while parsing JSON output of {self._compose_cmd_str(args)}: {exc}\nJSON output: {to_native(stdout)}') return rc, data, stderr # def call_cli_json_stream(self, *args, check_rc=False, data=None, cwd=None, environ_update=None, warn_on_stderr=False): @@ -148,11 +144,7 @@ class AnsibleDockerClientBase(object): if line.startswith(b'{'): result.append(json.loads(line)) except Exception as exc: - self.fail('Error while parsing JSON output of {cmd}: {exc}\nJSON output: {stdout}'.format( - cmd=self._compose_cmd_str(args), - exc=to_native(exc), - stdout=to_native(stdout), - )) + self.fail(f'Error while parsing JSON output of {self._compose_cmd_str(args)}: {exc}\nJSON output: {to_native(stdout)}') return rc, result, stderr @abc.abstractmethod @@ -188,12 +180,12 @@ class AnsibleDockerClientBase(object): if the tag exists. ''' dummy, images, dummy = self.call_cli_json_stream( - 'image', 'ls', '--format', '{{ json . }}', '--no-trunc', '--filter', 'reference={0}'.format(name), + 'image', 'ls', '--format', '{{ json . }}', '--no-trunc', '--filter', f'reference={name}', check_rc=True, ) if tag: - lookup = "%s:%s" % (name, tag) - lookup_digest = "%s@%s" % (name, tag) + lookup = f"{name}:{tag}" + lookup_digest = f"{name}@{tag}" response = images images = [] for image in response: @@ -209,7 +201,7 @@ class AnsibleDockerClientBase(object): if not name: return None - self.log("Find image %s:%s" % (name, tag)) + self.log(f"Find image {name}:{tag}") images = self._image_lookup(name, tag) if not images: # In API <= 1.20 seeing 'docker.io/' as the name of images pulled from docker hub @@ -217,40 +209,40 @@ class AnsibleDockerClientBase(object): if registry == 'docker.io': # If docker.io is explicitly there in name, the image # is not found in some cases (#41509) - self.log("Check for docker.io image: %s" % repo_name) + self.log(f"Check for docker.io image: {repo_name}") images = self._image_lookup(repo_name, tag) if not images and repo_name.startswith('library/'): # Sometimes library/xxx images are not found lookup = repo_name[len('library/'):] - self.log("Check for docker.io image: %s" % lookup) + self.log(f"Check for docker.io image: {lookup}") images = self._image_lookup(lookup, tag) if not images: # Last case for some Docker versions: if docker.io was not there, # it can be that the image was not found either # (https://github.com/ansible/ansible/pull/15586) - lookup = "%s/%s" % (registry, repo_name) - self.log("Check for docker.io image: %s" % lookup) + lookup = f"{registry}/{repo_name}" + self.log(f"Check for docker.io image: {lookup}") images = self._image_lookup(lookup, tag) if not images and '/' not in repo_name: # This seems to be happening with podman-docker # (https://github.com/ansible-collections/community.docker/issues/291) - lookup = "%s/library/%s" % (registry, repo_name) - self.log("Check for docker.io image: %s" % lookup) + lookup = f"{registry}/library/{repo_name}" + self.log(f"Check for docker.io image: {lookup}") images = self._image_lookup(lookup, tag) if len(images) > 1: - self.fail("Daemon returned more than one result for %s:%s" % (name, tag)) + self.fail(f"Daemon returned more than one result for {name}:{tag}") if len(images) == 1: rc, image, stderr = self.call_cli_json('image', 'inspect', images[0]['ID']) if not image: - self.log("Image %s:%s not found." % (name, tag)) + self.log(f"Image {name}:{tag} not found.") return None if rc != 0: - self.fail("Error inspecting image %s:%s - %s" % (name, tag, to_native(stderr))) + self.fail(f"Error inspecting image {name}:{tag} - {to_native(stderr)}") return image[0] - self.log("Image %s:%s not found." % (name, tag)) + self.log(f"Image {name}:{tag} not found.") return None def find_image_by_id(self, image_id, accept_missing_image=False): @@ -260,15 +252,15 @@ class AnsibleDockerClientBase(object): if not image_id: return None - self.log("Find image %s (by ID)" % image_id) + self.log(f"Find image {image_id} (by ID)") rc, image, stderr = self.call_cli_json('image', 'inspect', image_id) if not image: if not accept_missing_image: - self.fail("Error inspecting image ID %s - %s" % (image_id, to_native(stderr))) - self.log("Image %s not found." % image_id) + self.fail(f"Error inspecting image ID {image_id} - {to_native(stderr)}") + self.log(f"Image {image_id} not found.") return None if rc != 0: - self.fail("Error inspecting image ID %s - %s" % (image_id, to_native(stderr))) + self.fail(f"Error inspecting image ID {image_id} - {to_native(stderr)}") return image[0] diff --git a/plugins/module_utils/compose_v2.py b/plugins/module_utils/compose_v2.py index 75d0ce80..26295f3b 100644 --- a/plugins/module_utils/compose_v2.py +++ b/plugins/module_utils/compose_v2.py @@ -271,10 +271,10 @@ def _extract_event(line, warn_function=None): if match: if warn_function: if match.group('msg'): - msg = '{rid}: {msg}' + msg = f"{match.group('resource_id')}: {match.group('msg')}" else: - msg = 'Unspecified warning for {rid}' - warn_function(msg.format(rid=match.group('resource_id'), msg=match.group('msg'))) + msg = f"Unspecified warning for {match.group('resource_id')}" + warn_function(msg) return None, True match = _RE_PULL_PROGRESS.match(line) if match: @@ -323,9 +323,8 @@ def _warn_missing_dry_run_prefix(line, warn_missing_dry_run_prefix, warn_functio # This could be a bug, a change of docker compose's output format, ... # Tell the user to report it to us :-) warn_function( - 'Event line is missing dry-run mode marker: {0!r}. Please report this at ' + f'Event line is missing dry-run mode marker: {line!r}. Please report this at ' 'https://github.com/ansible-collections/community.docker/issues/new?assignees=&labels=&projects=&template=bug_report.md' - .format(line) ) @@ -334,9 +333,8 @@ def _warn_unparsable_line(line, warn_function): # Tell the user to report it to us :-) if warn_function: warn_function( - 'Cannot parse event from line: {0!r}. Please report this at ' + f'Cannot parse event from line: {line!r}. Please report this at ' 'https://github.com/ansible-collections/community.docker/issues/new?assignees=&labels=&projects=&template=bug_report.md' - .format(line) ) @@ -382,9 +380,8 @@ def parse_json_events(stderr, warn_function=None): continue if warn_function: warn_function( - 'Cannot parse event from non-JSON line: {0!r}. Please report this at ' + f'Cannot parse event from non-JSON line: {line!r}. Please report this at ' 'https://github.com/ansible-collections/community.docker/issues/new?assignees=&labels=&projects=&template=bug_report.md' - .format(line) ) continue try: @@ -392,9 +389,8 @@ def parse_json_events(stderr, warn_function=None): except Exception as exc: if warn_function: warn_function( - 'Cannot parse event from line: {0!r}: {1}. Please report this at ' + f'Cannot parse event from line: {line!r}: {exc}. Please report this at ' 'https://github.com/ansible-collections/community.docker/issues/new?assignees=&labels=&projects=&template=bug_report.md' - .format(line, exc) ) continue if line_data.get('tail'): @@ -449,9 +445,8 @@ def parse_json_events(stderr, warn_function=None): except KeyError: if warn_function: warn_function( - 'Unknown resource type {0!r} in line {1!r}. Please report this at ' + f'Unknown resource type {resource_type_str!r} in line {line!r}. Please report this at ' 'https://github.com/ansible-collections/community.docker/issues/new?assignees=&labels=&projects=&template=bug_report.md' - .format(resource_type_str, line) ) resource_type = ResourceType.UNKNOWN elif text in DOCKER_STATUS_PULL: @@ -589,11 +584,7 @@ def emit_warnings(events, warn_function): for event in events: # If a message is present, assume it is a warning if (event.status is None and event.msg is not None) or event.status in DOCKER_STATUS_WARNING: - warn_function('Docker compose: {resource_type} {resource_id}: {msg}'.format( - resource_type=event.resource_type, - resource_id=event.resource_id, - msg=event.msg, - )) + warn_function(f'Docker compose: {event.resource_type} {event.resource_id}: {event.msg}') def is_failed(events, rc): @@ -610,22 +601,17 @@ def update_failed(result, events, args, stdout, stderr, rc, cli): if event.status in DOCKER_STATUS_ERROR: if event.resource_id is None: if event.resource_type == 'unknown': - msg = 'General error: ' if event.resource_type == 'unknown' else 'Error when processing {resource_type}: ' + msg = 'General error: ' if event.resource_type == 'unknown' else f'Error when processing {event.resource_type}: ' else: - msg = 'Error when processing {resource_type} {resource_id}: ' + msg = f'Error when processing {event.resource_type} {event.resource_id}: ' if event.resource_type == 'unknown': - msg = 'Error when processing {resource_id}: ' + msg = f'Error when processing {event.resource_id}: ' if event.resource_id == '': msg = 'General error: ' - msg += '{status}' if event.msg is None else '{msg}' - errors.append(msg.format( - resource_type=event.resource_type, - resource_id=event.resource_id, - status=event.status, - msg=event.msg, - )) + msg += f'{event.status}' if event.msg is None else f'{event.msg}' + errors.append(msg) if not errors: - errors.append('Return code {code} is non-zero'.format(code=rc)) + errors.append(f'Return code {rc} is non-zero') result['failed'] = True result['msg'] = '\n'.join(errors) result['cmd'] = ' '.join(quote(arg) for arg in [cli] + args) @@ -695,7 +681,7 @@ class BaseComposeManager(DockerBaseClass): with open(compose_file, 'wb') as f: yaml.dump(parameters['definition'], f, encoding="utf-8", Dumper=_SafeDumper) except Exception as exc: - self.fail("Error writing to %s - %s" % (compose_file, to_native(exc))) + self.fail(f"Error writing to {compose_file} - {exc}") else: self.project_src = os.path.abspath(parameters['project_src']) @@ -706,24 +692,20 @@ class BaseComposeManager(DockerBaseClass): compose_version = self.get_compose_version() self.compose_version = LooseVersion(compose_version) if self.compose_version < LooseVersion(min_version): - self.fail('Docker CLI {cli} has the compose plugin with version {version}; need version {min_version} or later'.format( - cli=self.client.get_cli(), - version=compose_version, - min_version=min_version, - )) + self.fail(f'Docker CLI {self.client.get_cli()} has the compose plugin with version {compose_version}; need version {min_version} or later') if not os.path.isdir(self.project_src): - self.fail('"{0}" is not a directory'.format(self.project_src)) + self.fail(f'"{self.project_src}" is not a directory') self.check_files_existing = parameters['check_files_existing'] if self.files: for file in self.files: path = os.path.join(self.project_src, file) if not os.path.exists(path): - self.fail('Cannot find Compose file "{0}" relative to project directory "{1}"'.format(file, self.project_src)) + self.fail(f'Cannot find Compose file "{file}" relative to project directory "{self.project_src}"') elif self.check_files_existing and all(not os.path.exists(os.path.join(self.project_src, f)) for f in DOCKER_COMPOSE_FILES): filenames = ', '.join(DOCKER_COMPOSE_FILES[:-1]) - self.fail('"{0}" does not contain {1}, or {2}'.format(self.project_src, filenames, DOCKER_COMPOSE_FILES[-1])) + self.fail(f'"{self.project_src}" does not contain {filenames}, or {DOCKER_COMPOSE_FILES[-1]}') # Support for JSON output was added in Compose 2.29.0 (https://github.com/docker/compose/releases/tag/v2.29.0); # more precisely in https://github.com/docker/compose/pull/11478 @@ -747,12 +729,11 @@ class BaseComposeManager(DockerBaseClass): def get_compose_version_from_api(self): compose = self.client.get_client_plugin_info('compose') if compose is None: - self.fail('Docker CLI {0} does not have the compose plugin installed'.format(self.client.get_cli())) + self.fail(f'Docker CLI {self.client.get_cli()} does not have the compose plugin installed') if compose['Version'] == 'dev': self.fail( - 'Docker CLI {0} has a compose plugin installed, but it reports version "dev".' + f'Docker CLI {self.client.get_cli()} has a compose plugin installed, but it reports version "dev".' ' Please use a version of the plugin that returns a proper version.' - .format(self.client.get_cli()) ) return compose['Version'].lstrip('v') diff --git a/plugins/module_utils/copy.py b/plugins/module_utils/copy.py index fd1fa20d..92fb52d9 100644 --- a/plugins/module_utils/copy.py +++ b/plugins/module_utils/copy.py @@ -156,7 +156,7 @@ def put_file(client, container, in_path, out_path, user_id, group_id, mode=None, """Transfer a file from local to Docker container.""" if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')): raise DockerFileNotFound( - "file or module does not exist: %s" % to_native(in_path)) + f"file or module does not exist: {to_native(in_path)}") b_in_path = to_bytes(in_path, errors='surrogate_or_strict') @@ -172,13 +172,13 @@ def put_file(client, container, in_path, out_path, user_id, group_id, mode=None, elif stat.S_ISLNK(file_stat.st_mode): stream = _symlink_tar_generator(b_in_path, file_stat, out_file, user_id, group_id, mode=mode, user_name=user_name) else: + file_part = ' referenced by' if follow_links else '' raise DockerFileCopyError( - 'File{0} {1} is neither a regular file nor a symlink (stat mode {2}).'.format( - ' referenced by' if follow_links else '', in_path, oct(file_stat.st_mode))) + f'File{file_part} {in_path} is neither a regular file nor a symlink (stat mode {oct(file_stat.st_mode)}).') ok = _put_archive(client, container, out_dir, stream) if not ok: - raise DockerUnexpectedError('Unknown error while creating file "{0}" in container "{1}".'.format(out_path, container)) + raise DockerUnexpectedError(f'Unknown error while creating file "{out_path}" in container "{container}".') def put_file_content(client, container, content, out_path, user_id, group_id, mode, user_name=None): @@ -189,7 +189,7 @@ def put_file_content(client, container, content, out_path, user_id, group_id, mo ok = _put_archive(client, container, out_dir, stream) if not ok: - raise DockerUnexpectedError('Unknown error while creating file "{0}" in container "{1}".'.format(out_path, container)) + raise DockerUnexpectedError(f'Unknown error while creating file "{out_path}" in container "{container}".') def stat_file(client, container, in_path, follow_links=False, log=None): @@ -208,11 +208,11 @@ def stat_file(client, container, in_path, follow_links=False, log=None): while True: if in_path in considered_in_paths: - raise DockerFileCopyError('Found infinite symbolic link loop when trying to stating "{0}"'.format(in_path)) + raise DockerFileCopyError(f'Found infinite symbolic link loop when trying to stating "{in_path}"') considered_in_paths.add(in_path) if log: - log('FETCH: Stating "%s"' % in_path) + log(f'FETCH: Stating "{in_path}"') response = client._head( client._url('/containers/{0}/archive', container), @@ -226,8 +226,7 @@ def stat_file(client, container, in_path, follow_links=False, log=None): stat_data = json.loads(base64.b64decode(header)) except Exception as exc: raise DockerUnexpectedError( - 'When retrieving information for {in_path} from {container}, obtained header {header!r} that cannot be loaded as JSON: {exc}' - .format(in_path=in_path, container=container, header=header, exc=exc) + f'When retrieving information for {in_path} from {container}, obtained header {header!r} that cannot be loaded as JSON: {exc}' ) # https://pkg.go.dev/io/fs#FileMode: bit 32 - 5 means ModeSymlink @@ -285,11 +284,11 @@ def fetch_file_ex(client, container, in_path, process_none, process_regular, pro while True: if in_path in considered_in_paths: - raise DockerFileCopyError('Found infinite symbolic link loop when trying to fetch "{0}"'.format(in_path)) + raise DockerFileCopyError(f'Found infinite symbolic link loop when trying to fetch "{in_path}"') considered_in_paths.add(in_path) if log: - log('FETCH: Fetching "%s"' % in_path) + log(f'FETCH: Fetching "{in_path}"') try: stream = client.get_raw_stream( '/containers/{0}/archive', container, @@ -319,7 +318,7 @@ def fetch_file_ex(client, container, in_path, process_none, process_regular, pro return process_symlink(in_path, symlink_member) in_path = os.path.join(os.path.split(in_path)[0], symlink_member.linkname) if log: - log('FETCH: Following symbolic link to "%s"' % in_path) + log(f'FETCH: Following symbolic link to "{in_path}"') continue if found: return result @@ -331,8 +330,7 @@ def fetch_file(client, container, in_path, out_path, follow_links=False, log=Non def process_none(in_path): raise DockerFileNotFound( - 'File {in_path} does not exist in container {container}' - .format(in_path=in_path, container=container) + f'File {in_path} does not exist in container {container}' ) def process_regular(in_path, tar, member): @@ -352,14 +350,14 @@ def fetch_file(client, container, in_path, out_path, follow_links=False, log=Non return in_path def process_other(in_path, member): - raise DockerFileCopyError('Remote file "%s" is not a regular file or a symbolic link' % in_path) + raise DockerFileCopyError(f'Remote file "{in_path}" is not a regular file or a symbolic link') return fetch_file_ex(client, container, in_path, process_none, process_regular, process_symlink, process_other, follow_links=follow_links, log=log) def _execute_command(client, container, command, log=None, check_rc=False): if log: - log('Executing {command} in {container}'.format(command=command, container=container)) + log(f'Executing {command} in {container}') data = { 'Container': container, @@ -378,10 +376,10 @@ def _execute_command(client, container, command, log=None, check_rc=False): try: exec_data = client.post_json_to_json('/containers/{0}/exec', container, data=data) except NotFound as e: - raise DockerFileCopyError('Could not find container "{container}"'.format(container=container)) from e + raise DockerFileCopyError(f'Could not find container "{container}"') from e except APIError as e: if e.response is not None and e.response.status_code == 409: - raise DockerFileCopyError('Cannot execute command in paused container "{container}"'.format(container=container)) from e + raise DockerFileCopyError(f'Cannot execute command in paused container "{container}"') from e raise exec_id = exec_data['Id'] @@ -398,12 +396,12 @@ def _execute_command(client, container, command, log=None, check_rc=False): stderr = stderr or b'' if log: - log('Exit code {rc}, stdout {stdout!r}, stderr {stderr!r}'.format(rc=rc, stdout=stdout, stderr=stderr)) + log(f'Exit code {rc}, stdout {stdout!r}, stderr {stderr!r}') if check_rc and rc != 0: + command_str = ' '.join(command) raise DockerUnexpectedError( - 'Obtained unexpected exit code {rc} when running "{command}" in {container}.\nSTDOUT: {stdout}\nSTDERR: {stderr}' - .format(command=' '.join(command), container=container, rc=rc, stdout=stdout, stderr=stderr) + f'Obtained unexpected exit code {rc} when running "{command_str}" in {container}.\nSTDOUT: {stdout}\nSTDERR: {stderr}' ) return rc, stdout, stderr @@ -415,8 +413,7 @@ def determine_user_group(client, container, log=None): stdout_lines = stdout.splitlines() if len(stdout_lines) != 2: raise DockerUnexpectedError( - 'Expected two-line output to obtain user and group ID for container {container}, but got {lc} lines:\n{stdout}' - .format(container=container, lc=len(stdout_lines), stdout=stdout) + f'Expected two-line output to obtain user and group ID for container {container}, but got {len(stdout_lines)} lines:\n{stdout}' ) user_id, group_id = stdout_lines @@ -424,6 +421,5 @@ def determine_user_group(client, container, log=None): return int(user_id), int(group_id) except ValueError: raise DockerUnexpectedError( - 'Expected two-line output with numeric IDs to obtain user and group ID for container {container}, but got "{l1}" and "{l2}" instead' - .format(container=container, l1=user_id, l2=group_id) + f'Expected two-line output with numeric IDs to obtain user and group ID for container {container}, but got "{user_id}" and "{group_id}" instead' ) diff --git a/plugins/module_utils/image_archive.py b/plugins/module_utils/image_archive.py index 014e0f87..1e700261 100644 --- a/plugins/module_utils/image_archive.py +++ b/plugins/module_utils/image_archive.py @@ -8,8 +8,6 @@ import json import os import tarfile -from ansible.module_utils.common.text.converters import to_native - class ImageArchiveManifestSummary(object): ''' @@ -45,7 +43,7 @@ def api_image_id(archive_image_id): :rtype: str ''' - return 'sha256:%s' % archive_image_id + return f'sha256:{archive_image_id}' def load_archived_image_manifest(archive_path): @@ -79,7 +77,7 @@ def load_archived_image_manifest(archive_path): manifest = json.load(ef) except Exception as exc: raise ImageArchiveInvalidException( - "Failed to decode and deserialize manifest.json: %s" % to_native(exc) + f"Failed to decode and deserialize manifest.json: {exc}" ) from exc if len(manifest) == 0: @@ -93,7 +91,7 @@ def load_archived_image_manifest(archive_path): config_file = meta['Config'] except KeyError as exc: raise ImageArchiveInvalidException( - "Failed to get Config entry from {0}th manifest in manifest.json: {1}".format(index + 1, to_native(exc)) + f"Failed to get Config entry from {index + 1}th manifest in manifest.json: {exc}" ) from exc # Extracts hash without 'sha256:' prefix @@ -102,7 +100,7 @@ def load_archived_image_manifest(archive_path): image_id = os.path.splitext(config_file)[0] except Exception as exc: raise ImageArchiveInvalidException( - "Failed to extract image id from config file name %s: %s" % (config_file, to_native(exc)) + f"Failed to extract image id from config file name {config_file}: {exc}" ) from exc for prefix in ( @@ -115,7 +113,7 @@ def load_archived_image_manifest(archive_path): repo_tags = meta['RepoTags'] except KeyError as exc: raise ImageArchiveInvalidException( - "Failed to get RepoTags entry from {0}th manifest in manifest.json: {1}".format(index + 1, to_native(exc)) + f"Failed to get RepoTags entry from {index + 1}th manifest in manifest.json: {exc}" ) from exc result.append(ImageArchiveManifestSummary( @@ -128,13 +126,13 @@ def load_archived_image_manifest(archive_path): raise except Exception as exc: raise ImageArchiveInvalidException( - "Failed to extract manifest.json from tar file %s: %s" % (archive_path, to_native(exc)) + f"Failed to extract manifest.json from tar file {archive_path}: {exc}" ) from exc except ImageArchiveInvalidException: raise except Exception as exc: - raise ImageArchiveInvalidException("Failed to open tar file %s: %s" % (archive_path, to_native(exc))) from exc + raise ImageArchiveInvalidException(f"Failed to open tar file {archive_path}: {exc}") from exc def archived_image_manifest(archive_path): @@ -162,5 +160,5 @@ def archived_image_manifest(archive_path): if len(results) == 1: return results[0] raise ImageArchiveInvalidException( - "Expected to have one entry in manifest.json but found %s" % len(results) + f"Expected to have one entry in manifest.json but found {len(results)}" ) diff --git a/plugins/module_utils/module_container/base.py b/plugins/module_utils/module_container/base.py index f4dc5af1..c550c034 100644 --- a/plugins/module_utils/module_container/base.py +++ b/plugins/module_utils/module_container/base.py @@ -12,7 +12,7 @@ import shlex from functools import partial -from ansible.module_utils.common.text.converters import to_native, to_text +from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.common.text.formatters import human_to_bytes from ansible_collections.community.docker.plugins.module_utils.util import ( @@ -56,7 +56,7 @@ def _get_ansible_type(type): if type == 'set': return 'list' if type not in ('list', 'dict', 'bool', 'int', 'float', 'str'): - raise Exception('Invalid type "%s"' % (type, )) + raise Exception(f'Invalid type "{type}"') return type @@ -365,15 +365,15 @@ def _parse_port_range(range_or_port, module): try: start, end = [int(port) for port in range_or_port.split('-')] except Exception: - module.fail_json(msg='Invalid port range: "{0}"'.format(range_or_port)) + module.fail_json(msg=f'Invalid port range: "{range_or_port}"') if end < start: - module.fail_json(msg='Invalid port range: "{0}"'.format(range_or_port)) + module.fail_json(msg=f'Invalid port range: "{range_or_port}"') return list(range(start, end + 1)) else: try: return [int(range_or_port)] except Exception: - module.fail_json(msg='Invalid port: "{0}"'.format(range_or_port)) + module.fail_json(msg=f'Invalid port: "{range_or_port}"') def _split_colon_ipv6(text, module): @@ -391,7 +391,7 @@ def _split_colon_ipv6(text, module): break j = text.find(']', i) if j < 0: - module.fail_json(msg='Cannot find closing "]" in input "{0}" for opening "[" at index {1}!'.format(text, i + 1)) + module.fail_json(msg=f'Cannot find closing "]" in input "{text}" for opening "[" at index {i + 1}!') result.extend(text[start:i].split(':')) k = text.find(':', j) if k < 0: @@ -461,11 +461,11 @@ def _preprocess_env(module, values): for name, value in values['env'].items(): if not isinstance(value, str): module.fail_json(msg='Non-string value found for env option. Ambiguous env options must be ' - 'wrapped in quotes to avoid them being interpreted. Key: %s' % (name, )) + f'wrapped in quotes to avoid them being interpreted. Key: {name}') final_env[name] = to_text(value, errors='surrogate_or_strict') formatted_env = [] for key, value in final_env.items(): - formatted_env.append('%s=%s' % (key, value)) + formatted_env.append(f'{key}={value}') return { 'env': formatted_env, } @@ -491,7 +491,7 @@ def _preprocess_convert_to_bytes(module, values, name, unlimited_value=None): values[name] = value return values except ValueError as exc: - module.fail_json(msg='Failed to convert %s to bytes: %s' % (name, to_native(exc))) + module.fail_json(msg=f'Failed to convert {name} to bytes: {exc}') def _preprocess_mac_address(module, values): @@ -571,9 +571,9 @@ def _preprocess_mounts(module, values): def check_collision(t, name): if t in last: if name == last[t]: - module.fail_json(msg='The mount point "{0}" appears twice in the {1} option'.format(t, name)) + module.fail_json(msg=f'The mount point "{t}" appears twice in the {name} option') else: - module.fail_json(msg='The mount point "{0}" appears both in the {1} and {2} option'.format(t, name, last[t])) + module.fail_json(msg=f'The mount point "{t}" appears both in the {name} and {last[t]} option') last[t] = name if 'mounts' in values: @@ -588,17 +588,13 @@ def _preprocess_mounts(module, values): # Sanity checks if mount['source'] is None and mount_type not in ('tmpfs', 'volume', 'image', 'cluster'): - module.fail_json(msg='source must be specified for mount "{0}" of type "{1}"'.format(target, mount_type)) + module.fail_json(msg=f'source must be specified for mount "{target}" of type "{mount_type}"') for option, req_mount_types in _MOUNT_OPTION_TYPES.items(): if mount[option] is not None and mount_type not in req_mount_types: + type_plural = "" if len(req_mount_types) == 1 else "s" + type_list = '", "'.join(req_mount_types) module.fail_json( - msg='{0} cannot be specified for mount "{1}" of type "{2}" (needs type{3} "{4}")'.format( - option, - target, - mount_type, - "" if len(req_mount_types) == 1 else "s", - '", "'.join(req_mount_types), - ) + msg=f'{option} cannot be specified for mount "{target}" of type "{mount_type}" (needs type{type_plural} "{type_list}")' ) # Streamline options @@ -611,22 +607,22 @@ def _preprocess_mounts(module, values): try: mount_dict['tmpfs_size'] = human_to_bytes(mount_dict['tmpfs_size']) except ValueError as exc: - module.fail_json(msg='Failed to convert tmpfs_size of mount "{0}" to bytes: {1}'.format(target, to_native(exc))) + module.fail_json(msg=f'Failed to convert tmpfs_size of mount "{target}" to bytes: {exc}') if mount_dict['tmpfs_mode'] is not None: try: mount_dict['tmpfs_mode'] = int(mount_dict['tmpfs_mode'], 8) except Exception as dummy: - module.fail_json(msg='tmp_fs mode of mount "{0}" is not an octal string!'.format(target)) + module.fail_json(msg=f'tmp_fs mode of mount "{target}" is not an octal string!') if mount_dict['tmpfs_options']: opts = [] for idx, opt in enumerate(mount_dict['tmpfs_options']): if len(opt) != 1: - module.fail_json(msg='tmpfs_options[{1}] of mount "{0}" must be a one-element dictionary!'.format(target, idx + 1)) + module.fail_json(msg=f'tmpfs_options[{idx + 1}] of mount "{target}" must be a one-element dictionary!') k, v = list(opt.items())[0] if not isinstance(k, str): - module.fail_json(msg='key {2!r} in tmpfs_options[{1}] of mount "{0}" must be a string!'.format(target, idx + 1, k)) + module.fail_json(msg=f'key {k!r} in tmpfs_options[{idx + 1}] of mount "{target}" must be a string!') if v is not None and not isinstance(v, str): - module.fail_json(msg='value {2!r} in tmpfs_options[{1}] of mount "{0}" must be a string or null/none!'.format(target, idx + 1, v)) + module.fail_json(msg=f'value {v!r} in tmpfs_options[{idx + 1}] of mount "{target}" must be a string or null/none!') opts.append([k, v] if v is not None else [k]) mount_dict['tmpfs_options'] = opts @@ -641,17 +637,17 @@ def _preprocess_mounts(module, values): if len(parts) == 3: host, container, mode = parts if not _is_volume_permissions(mode): - module.fail_json(msg='Found invalid volumes mode: {0}'.format(mode)) + module.fail_json(msg=f'Found invalid volumes mode: {mode}') if re.match(r'[.~]', host): host = os.path.abspath(os.path.expanduser(host)) check_collision(container, 'volumes') - new_vols.append("%s:%s:%s" % (host, container, mode)) + new_vols.append(f"{host}:{container}:{mode}") continue elif len(parts) == 2: if not _is_volume_permissions(parts[1]) and re.match(r'[.~]', parts[0]): host = os.path.abspath(os.path.expanduser(parts[0])) check_collision(parts[1], 'volumes') - new_vols.append("%s:%s:rw" % (host, parts[1])) + new_vols.append(f"{host}:{parts[1]}:rw") continue check_collision(parts[min(1, len(parts) - 1)], 'volumes') new_vols.append(vol) @@ -664,12 +660,12 @@ def _preprocess_mounts(module, values): if len(parts) == 3: host, container, mode = parts if not _is_volume_permissions(mode): - module.fail_json(msg='Found invalid volumes mode: {0}'.format(mode)) + module.fail_json(msg=f'Found invalid volumes mode: {mode}') elif len(parts) == 2: if not _is_volume_permissions(parts[1]): host, container, mode = (parts + ['rw']) if host is not None: - new_binds.append('%s:%s:%s' % (host, container, mode)) + new_binds.append(f'{host}:{container}:{mode}') values['volume_binds'] = new_binds return values @@ -694,12 +690,12 @@ def _preprocess_log(module, values): options = {} for k, v in values['log_options'].items(): if not isinstance(v, str): + value = to_text(v, errors='surrogate_or_strict') module.warn( - "Non-string value found for log_options option '%s'. The value is automatically converted to '%s'. " - "If this is not correct, or you want to avoid such warnings, please quote the value." % ( - k, to_text(v, errors='surrogate_or_strict')) + f"Non-string value found for log_options option '{k}'. The value is automatically converted to {value!r}. " + "If this is not correct, or you want to avoid such warnings, please quote the value." ) - v = to_text(v, errors='surrogate_or_strict') + v = value options[k] = v result['log_options'] = options return result @@ -735,7 +731,7 @@ def _preprocess_ports(module, values): if not re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', parts[0]) and not re.match(r'^\[[0-9a-fA-F:]+(?:|%[^\]/]+)\]$', ipaddr): module.fail_json( msg='Bind addresses for published ports must be IPv4 or IPv6 addresses, not hostnames. ' - 'Use the dig lookup to resolve hostnames. (Found hostname: {0})'.format(ipaddr) + f'Use the dig lookup to resolve hostnames. (Found hostname: {ipaddr})' ) if re.match(r'^\[[0-9a-fA-F:]+\]$', ipaddr): ipaddr = ipaddr[1:-1] @@ -748,12 +744,12 @@ def _preprocess_ports(module, values): port_binds = len(container_ports) * [(ipaddr,)] else: module.fail_json( - msg='Invalid port description "%s" - expected 1 to 3 colon-separated parts, but got %d. ' - 'Maybe you forgot to use square brackets ([...]) around an IPv6 address?' % (port, p_len) + msg=f'Invalid port description "{port}" - expected 1 to 3 colon-separated parts, but got {p_len}. ' + 'Maybe you forgot to use square brackets ([...]) around an IPv6 address?' ) for bind, container_port in zip(port_binds, container_ports): - idx = '{0}/{1}'.format(container_port, protocol) if protocol else container_port + idx = f'{container_port}/{protocol}' if protocol else container_port if idx in binds: old_bind = binds[idx] if isinstance(old_bind, list): diff --git a/plugins/module_utils/module_container/docker_api.py b/plugins/module_utils/module_container/docker_api.py index bc2129a4..68734b00 100644 --- a/plugins/module_utils/module_container/docker_api.py +++ b/plugins/module_utils/module_container/docker_api.py @@ -8,7 +8,7 @@ from __future__ import annotations import json import traceback -from ansible.module_utils.common.text.converters import to_native, to_text +from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.common.text.formatters import human_to_bytes from ansible_collections.community.docker.plugins.module_utils.common_api import ( @@ -123,7 +123,7 @@ def _get_ansible_type(type): if type == 'set': return 'list' if type not in ('list', 'dict', 'bool', 'int', 'float', 'str'): - raise Exception('Invalid type "%s"' % (type, )) + raise Exception(f'Invalid type "{type}"') return type @@ -248,8 +248,9 @@ class DockerAPIEngineDriver(EngineDriver): value = normalize_links(value) params[dest_para] = value if parameters: + ups = ', '.join([f'"{p}"' for p in sorted(parameters)]) raise Exception( - 'Unknown parameter(s) for connect_container_to_network for Docker API driver: %s' % (', '.join(['"%s"' % p for p in sorted(parameters)]))) + f'Unknown parameter(s) for connect_container_to_network for Docker API driver: {ups}') ipam_config = {} for param in ('IPv4Address', 'IPv6Address'): if param in params: @@ -307,7 +308,7 @@ class DockerAPIEngineDriver(EngineDriver): output = client._get_result_tty(False, res, config['Config']['Tty']) return output, True else: - return "Result logged using `%s` driver" % logging_driver, False + return f"Result logged using `{logging_driver}` driver", False def update_container(self, client, container_id, update_parameters): result = client.post_json_to_json('/containers/{0}/update', container_id, data=update_parameters) @@ -343,13 +344,13 @@ class DockerAPIEngineDriver(EngineDriver): # New docker daemon versions do not allow containers to be removed # if they are paused. Make sure we do not end up in an infinite loop. if count == 3: - raise Exception('%s [tried to unpause three times]' % to_native(exc)) + raise Exception(f'{exc} [tried to unpause three times]') count += 1 # Unpause try: self.unpause_container(client, container_id) except Exception as exc2: - raise Exception('%s [while unpausing]' % to_native(exc2)) + raise Exception(f'{exc2} [while unpausing]') # Now try again continue raise @@ -369,13 +370,13 @@ class DockerAPIEngineDriver(EngineDriver): # New docker daemon versions do not allow containers to be removed # if they are paused. Make sure we do not end up in an infinite loop. if count == 3: - raise Exception('%s [tried to unpause three times]' % to_native(exc)) + raise Exception(f'{exc} [tried to unpause three times]') count += 1 # Unpause try: self.unpause_container(client, container_id) except Exception as exc2: - raise Exception('%s [while unpausing]' % to_native(exc2)) + raise Exception(f'{exc2} [while unpausing]') # Now try again continue if 'removal of container ' in exc.explanation and ' is already in progress' in exc.explanation: @@ -389,10 +390,10 @@ class DockerAPIEngineDriver(EngineDriver): try: runner() except DockerException as e: - client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) except RequestException as e: client.fail( - 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)), + f'An unexpected requests error occurred when trying to talk to the Docker daemon: {e}', exception=traceback.format_exc()) @@ -611,7 +612,7 @@ def _get_default_host_ip(module, client): network = client.get_network(network_data['name']) if network is None: client.fail( - "Cannot inspect the network '{0}' to determine the default IP".format(network_data['name']), + f"Cannot inspect the network '{network_data['name']}' to determine the default IP", ) if network.get('Driver') == 'bridge' and network.get('Options', {}).get('com.docker.network.bridge.host_binding_ipv4'): ip = network['Options']['com.docker.network.bridge.host_binding_ipv4'] @@ -658,7 +659,7 @@ def _get_expected_env_value(module, client, api_version, image, value, sentry): expected_env[parts[0]] = parts[1] param_env = [] for key, env_value in expected_env.items(): - param_env.append("%s=%s" % (key, env_value)) + param_env.append(f"{key}={env_value}") return param_env @@ -744,7 +745,7 @@ def _preprocess_etc_hosts(module, client, api_version, value): return value results = [] for key, value in value.items(): - results.append('%s%s%s' % (key, ':', value)) + results.append(f'{key}:{value}') return results @@ -783,7 +784,7 @@ def _preprocess_convert_to_bytes(module, values, name, unlimited_value=None): values[name] = value return values except ValueError as exc: - module.fail_json(msg='Failed to convert %s to bytes: %s' % (name, to_native(exc))) + module.fail_json(msg=f'Failed to convert {name} to bytes: {exc}') def _get_image_labels(image): @@ -815,7 +816,7 @@ def _preprocess_links(module, client, api_version, value): link, alias = parsed_link else: link, alias = parsed_link[0], parsed_link[0] - result.append('/%s:/%s/%s' % (link, module.params['name'], alias)) + result.append(f"/{link}:/{module.params['name']}/{alias}") return result @@ -830,11 +831,12 @@ def _ignore_mismatching_label_result(module, client, api_version, option, image, for label in image_labels: if label not in labels_param: # Format label for error message - would_remove_labels.append('"%s"' % (label, )) + would_remove_labels.append(f'"{label}"') if would_remove_labels: + labels = ', '.join(would_remove_labels) msg = ("Some labels should be removed but are present in the base image. You can set image_label_mismatch to 'ignore' to ignore" - " this error. Labels: {0}") - client.fail(msg.format(', '.join(would_remove_labels))) + f" this error. Labels: {labels}") + client.fail(msg) return False @@ -860,7 +862,7 @@ def _preprocess_network_values(module, client, api_version, options, values): for network in values['networks']: network['id'] = _get_network_id(module, client, network['name']) if not network['id']: - client.fail("Parameter error: network named %s could not be found. Does it exist?" % (network['name'], )) + client.fail(f"Parameter error: network named {network['name']} could not be found. Does it exist?") if 'network_mode' in values: values['network_mode'] = _preprocess_container_names(module, client, api_version, values['network_mode']) @@ -878,7 +880,7 @@ def _get_network_id(module, client, network_name): break return network_id except Exception as exc: - client.fail("Error getting network id for %s - %s" % (network_name, to_native(exc))) + client.fail(f"Error getting network id for {network_name} - {exc}") def _get_values_network(module, container, api_version, options, image, host_info): @@ -947,7 +949,7 @@ def _get_bind_from_dict(volume_dict): if isinstance(config, dict) and config.get('bind'): container_path = config.get('bind') mode = config.get('mode', 'rw') - results.append("%s:%s:%s" % (host_path, container_path, mode)) + results.append(f"{host_path}:{container_path}:{mode}") return results @@ -1133,7 +1135,7 @@ def _get_expected_values_platform(module, client, api_version, options, image, v daemon_arch=host_info.get('Architecture') if host_info else None, ) except ValueError as exc: - module.fail_json(msg='Error while parsing platform parameer: %s' % (to_native(exc), )) + module.fail_json(msg=f'Error while parsing platform parameer: {exc}') return expected_values @@ -1203,7 +1205,7 @@ def _get_expected_values_ports(module, client, api_version, options, image, valu expected_bound_ports = {} for container_port, config in values['published_ports'].items(): if isinstance(container_port, int): - container_port = "%s/tcp" % container_port + container_port = f"{container_port}/tcp" if len(config) == 1: if isinstance(config[0], int): expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': config[0]}] @@ -1243,7 +1245,7 @@ def _set_values_ports(module, data, api_version, options, values): if len(port_definition) == 2: proto = port_definition[1] port = port_definition[0] - exposed_ports['%s/%s' % (port, proto)] = {} + exposed_ports[f'{port}/{proto}'] = {} data['ExposedPorts'] = exposed_ports if 'published_ports' in values: if 'HostConfig' not in data: @@ -1282,9 +1284,9 @@ def _preprocess_container_names(module, client, api_version, value): if container is None: # If we cannot find the container, issue a warning and continue with # what the user specified. - module.warn('Cannot find a container with name or ID "{0}"'.format(container_name)) + module.warn(f'Cannot find a container with name or ID "{container_name}"') return value - return 'container:{0}'.format(container['Id']) + return f"container:{container['Id']}" def _get_value_command(module, container, api_version, options, image, host_info): diff --git a/plugins/module_utils/module_container/module.py b/plugins/module_utils/module_container/module.py index e68a8df4..8cf333d9 100644 --- a/plugins/module_utils/module_container/module.py +++ b/plugins/module_utils/module_container/module.py @@ -8,7 +8,7 @@ from __future__ import annotations import re from time import sleep -from ansible.module_utils.common.text.converters import to_native, to_text +from ansible.module_utils.common.text.converters import to_text from ansible_collections.community.docker.plugins.module_utils.util import ( DifferenceTracker, @@ -102,11 +102,11 @@ class ContainerManager(DockerBaseClass): if re.match(r'^\[[0-9a-fA-F:]+\]$', self.param_default_host_ip): valid_ip = True if re.match(r'^[0-9a-fA-F:]+$', self.param_default_host_ip): - self.param_default_host_ip = '[{0}]'.format(self.param_default_host_ip) + self.param_default_host_ip = f'[{self.param_default_host_ip}]' valid_ip = True if not valid_ip: self.fail('The value of default_host_ip must be an empty string, an IPv4 address, ' - 'or an IPv6 address. Got "{0}" instead.'.format(self.param_default_host_ip)) + f'or an IPv6 address. Got "{self.param_default_host_ip}" instead.') def _collect_all_options(self, active_options): all_options = {} @@ -157,23 +157,23 @@ class ContainerManager(DockerBaseClass): key_main = comp_aliases.get(key) if key_main is None: if key_main in all_module_options: - self.fail("The module option '%s' cannot be specified in the comparisons dict, " - "since it does not correspond to container's state!" % key) + self.fail(f"The module option '{key}' cannot be specified in the comparisons dict, " + "since it does not correspond to container's state!") if key not in self.all_options or self.all_options[key].not_an_ansible_option: - self.fail("Unknown module option '%s' in comparisons dict!" % key) + self.fail(f"Unknown module option '{key}' in comparisons dict!") key_main = key if key_main in comp_aliases_used: - self.fail("Both '%s' and '%s' (aliases of %s) are specified in comparisons dict!" % (key, comp_aliases_used[key_main], key_main)) + self.fail(f"Both '{key}' and '{comp_aliases_used[key_main]}' (aliases of {key_main}) are specified in comparisons dict!") comp_aliases_used[key_main] = key # Check value and update accordingly if value in ('strict', 'ignore'): self.all_options[key_main].comparison = value elif value == 'allow_more_present': if self.all_options[key_main].comparison_type == 'value': - self.fail("Option '%s' is a value and not a set/list/dict, so its comparison cannot be %s" % (key, value)) + self.fail(f"Option '{key}' is a value and not a set/list/dict, so its comparison cannot be {value}") self.all_options[key_main].comparison = value else: - self.fail("Unknown comparison mode '%s'!" % value) + self.fail(f"Unknown comparison mode '{value}'!") # Copy values for option in self.all_options.values(): if option.copy_comparison_from is not None: @@ -228,8 +228,8 @@ class ContainerManager(DockerBaseClass): if result is None: if accept_removal: return result - msg = 'Encontered vanished container while waiting for container "{0}"' - self.fail(msg.format(container_id)) + msg = f'Encontered vanished container while waiting for container "{container_id}"' + self.fail(msg) # Check container state state_info = result.get('State') or {} if health_state: @@ -238,13 +238,13 @@ class ContainerManager(DockerBaseClass): if complete_states is not None and state in complete_states: return result if wait_states is not None and state not in wait_states: - msg = 'Encontered unexpected state "{1}" while waiting for container "{0}"' - self.fail(msg.format(container_id, state), container=result) + msg = f'Encontered unexpected state "{state}" while waiting for container "{container_id}"' + self.fail(msg, container=result) # Wait if max_wait is not None: if total_wait > max_wait or delay < 1E-4: - msg = 'Timeout of {1} seconds exceeded while waiting for container "{0}"' - self.fail(msg.format(container_id, max_wait), container=result) + msg = f'Timeout of {max_wait} seconds exceeded while waiting for container "{container_id}"' + self.fail(msg, container=result) if total_wait + delay > max_wait: delay = max_wait - total_wait sleep(delay) @@ -373,9 +373,7 @@ class ContainerManager(DockerBaseClass): else: self.engine_driver.unpause_container(self.client, container.id) except Exception as exc: - self.fail("Error %s container %s: %s" % ( - "pausing" if self.param_paused else "unpausing", container.id, to_native(exc) - )) + self.fail(f"Error {'pausing' if self.param_paused else 'unpausing'} container {container.id}: {exc}") container = self._get_container(container.id) self.results['changed'] = True self.results['actions'].append(dict(set_paused=self.param_paused)) @@ -440,14 +438,14 @@ class ContainerManager(DockerBaseClass): if is_image_name_id(image_parameter): image = self.engine_driver.inspect_image_by_id(self.client, image_parameter) if image is None: - self.client.fail("Cannot find image with ID %s" % (image_parameter, )) + self.client.fail(f"Cannot find image with ID {image_parameter}") else: repository, tag = parse_repository_tag(image_parameter) if not tag: tag = "latest" image = self.engine_driver.inspect_image_by_name(self.client, repository, tag) if not image and self.param_pull == "never": - self.client.fail("Cannot find image with name %s:%s, and pull=never" % (repository, tag)) + self.client.fail(f"Cannot find image with name {repository}:{tag}, and pull=never") if not image or self.param_pull == "always": if not self.check_mode: self.log("Pull the image.") @@ -455,16 +453,16 @@ class ContainerManager(DockerBaseClass): self.client, repository, tag, platform=self.module.params['platform']) if alreadyToLatest: self.results['changed'] = False - self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag), changed=False)) + self.results['actions'].append(dict(pulled_image=f"{repository}:{tag}", changed=False)) else: self.results['changed'] = True - self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag), changed=True)) + self.results['actions'].append(dict(pulled_image=f"{repository}:{tag}", changed=True)) elif not image or self.param_pull_check_mode_behavior == 'always': # If the image is not there, or pull_check_mode_behavior == 'always', claim we'll # pull. (Implicitly: if the image is there, claim it already was latest unless # pull_check_mode_behavior == 'always'.) self.results['changed'] = True - action = dict(pulled_image="%s:%s" % (repository, tag)) + action = dict(pulled_image=f"{repository}:{tag}") if not image: action['changed'] = True self.results['actions'].append(action) @@ -620,7 +618,7 @@ class ContainerManager(DockerBaseClass): if network.get('links'): expected_links = [] for link, alias in network['links']: - expected_links.append("%s:%s" % (link, alias)) + expected_links.append(f"{link}:{alias}") if not compare_generic(expected_links, network_info.get('Links'), 'allow_more_present', 'set'): diff = True if network.get('mac_address') and network['mac_address'] != network_info.get('MacAddress'): @@ -674,7 +672,7 @@ class ContainerManager(DockerBaseClass): self.diff['differences'] = [dict(network_differences=network_differences)] for netdiff in network_differences: self.diff_tracker.add( - 'network.{0}'.format(netdiff['parameter']['name']), + f"network.{netdiff['parameter']['name']}", parameter=netdiff['parameter'], active=netdiff['container'] ) @@ -691,7 +689,7 @@ class ContainerManager(DockerBaseClass): self.diff['differences'] = [dict(purge_networks=extra_networks)] for extra_network in extra_networks: self.diff_tracker.add( - 'network.{0}'.format(extra_network['name']), + f"network.{extra_network['name']}", active=extra_network ) self.results['changed'] = True @@ -707,18 +705,17 @@ class ContainerManager(DockerBaseClass): try: self.engine_driver.disconnect_container_from_network(self.client, container.id, diff['parameter']['id']) except Exception as exc: - self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'], - to_native(exc))) + self.fail(f"Error disconnecting container from network {diff['parameter']['name']} - {exc}") # connect to the network self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=diff['parameter'])) if not self.check_mode: params = {key: value for key, value in diff['parameter'].items() if key not in ('id', 'name')} try: - self.log("Connecting container to network %s" % diff['parameter']['id']) + self.log(f"Connecting container to network {diff['parameter']['id']}") self.log(params, pretty_print=True) self.engine_driver.connect_container_to_network(self.client, container.id, diff['parameter']['id'], params) except Exception as exc: - self.fail("Error connecting container to network %s - %s" % (diff['parameter']['name'], to_native(exc))) + self.fail(f"Error connecting container to network {diff['parameter']['name']} - {exc}") return self._get_container(container.id) def _purge_networks(self, container, networks): @@ -728,14 +725,13 @@ class ContainerManager(DockerBaseClass): try: self.engine_driver.disconnect_container_from_network(self.client, container.id, network['name']) except Exception as exc: - self.fail("Error disconnecting container from network %s - %s" % (network['name'], - to_native(exc))) + self.fail(f"Error disconnecting container from network {network['name']} - {exc}") return self._get_container(container.id) def container_create(self, image): create_parameters = self._compose_create_parameters(image) self.log("create container") - self.log("image: %s parameters:" % image) + self.log(f"image: {image} parameters:") self.log(create_parameters, pretty_print=True) networks = {} if self.param_networks_cli_compatible and self.module.params['networks']: @@ -754,19 +750,19 @@ class ContainerManager(DockerBaseClass): try: container_id = self.engine_driver.create_container(self.client, self.param_name, create_parameters, networks=networks) except Exception as exc: - self.fail("Error creating container: %s" % to_native(exc)) + self.fail(f"Error creating container: {exc}") return self._get_container(container_id) return new_container def container_start(self, container_id): - self.log("start container %s" % (container_id)) + self.log(f"start container {container_id}") self.results['actions'].append(dict(started=container_id)) self.results['changed'] = True if not self.check_mode: try: self.engine_driver.start_container(self.client, container_id) except Exception as exc: - self.fail("Error starting container %s: %s" % (container_id, to_native(exc))) + self.fail(f"Error starting container {container_id}: {exc}") if self.module.params['detach'] is False: status = self.engine_driver.wait_for_container(self.client, container_id) @@ -798,18 +794,18 @@ class ContainerManager(DockerBaseClass): def container_remove(self, container_id, link=False, force=False): volume_state = (not self.param_keep_volumes) - self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force)) + self.log(f"remove container container:{container_id} v:{volume_state} link:{link} force{force}") self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force)) self.results['changed'] = True if not self.check_mode: try: self.engine_driver.remove_container(self.client, container_id, remove_volumes=volume_state, link=link, force=force) except Exception as exc: - self.client.fail("Error removing container %s: %s" % (container_id, to_native(exc))) + self.client.fail(f"Error removing container {container_id}: {exc}") def container_update(self, container_id, update_parameters): if update_parameters: - self.log("update container %s" % (container_id)) + self.log(f"update container {container_id}") self.log(update_parameters, pretty_print=True) self.results['actions'].append(dict(updated=container_id, update_parameters=update_parameters)) self.results['changed'] = True @@ -817,7 +813,7 @@ class ContainerManager(DockerBaseClass): try: self.engine_driver.update_container(self.client, container_id, update_parameters) except Exception as exc: - self.fail("Error updating container %s: %s" % (container_id, to_native(exc))) + self.fail(f"Error updating container {container_id}: {exc}") return self._get_container(container_id) def container_kill(self, container_id): @@ -827,7 +823,7 @@ class ContainerManager(DockerBaseClass): try: self.engine_driver.kill_container(self.client, container_id, kill_signal=self.param_kill_signal) except Exception as exc: - self.fail("Error killing container %s: %s" % (container_id, to_native(exc))) + self.fail(f"Error killing container {container_id}: {exc}") def container_restart(self, container_id): self.results['actions'].append(dict(restarted=container_id, timeout=self.module.params['stop_timeout'])) @@ -836,7 +832,7 @@ class ContainerManager(DockerBaseClass): try: self.engine_driver.restart_container(self.client, container_id, self.module.params['stop_timeout'] or 10) except Exception as exc: - self.fail("Error restarting container %s: %s" % (container_id, to_native(exc))) + self.fail(f"Error restarting container {container_id}: {exc}") return self._get_container(container_id) def container_stop(self, container_id): @@ -849,7 +845,7 @@ class ContainerManager(DockerBaseClass): try: self.engine_driver.stop_container(self.client, container_id, self.module.params['stop_timeout']) except Exception as exc: - self.fail("Error stopping container %s: %s" % (container_id, to_native(exc))) + self.fail(f"Error stopping container {container_id}: {exc}") def run_module(engine_driver): diff --git a/plugins/module_utils/socket_handler.py b/plugins/module_utils/socket_handler.py index 27d8a3eb..67ed518a 100644 --- a/plugins/module_utils/socket_handler.py +++ b/plugins/module_utils/socket_handler.py @@ -89,7 +89,7 @@ class DockerSocketHandlerBase(object): if data is None: # no data available return - self._log('read {0} bytes'.format(len(data))) + self._log(f'read {len(data)} bytes') if len(data) == 0: # Stream EOF self._eof = True @@ -123,7 +123,7 @@ class DockerSocketHandlerBase(object): if len(self._write_buffer) > 0: written = write_to_socket(self._sock, self._write_buffer) self._write_buffer = self._write_buffer[written:] - self._log('wrote {0} bytes, {1} are left'.format(written, len(self._write_buffer))) + self._log(f'wrote {written} bytes, {len(self._write_buffer)} are left') if len(self._write_buffer) > 0: self._selector.modify(self._sock, self._selectors.EVENT_READ | self._selectors.EVENT_WRITE) else: @@ -147,14 +147,13 @@ class DockerSocketHandlerBase(object): return True if timeout is not None: timeout -= PARAMIKO_POLL_TIMEOUT - self._log('select... ({0})'.format(timeout)) + self._log(f'select... ({timeout})') events = self._selector.select(timeout) for key, event in events: if key.fileobj == self._sock: - self._log( - 'select event read:{0} write:{1}'.format( - event & self._selectors.EVENT_READ != 0, - event & self._selectors.EVENT_WRITE != 0)) + ev_read = event & self._selectors.EVENT_READ != 0 + ev_write = event & self._selectors.EVENT_WRITE != 0 + self._log(f'select event read:{ev_read} write:{ev_write}') if event & self._selectors.EVENT_READ != 0: self._read() if event & self._selectors.EVENT_WRITE != 0: @@ -183,7 +182,7 @@ class DockerSocketHandlerBase(object): elif stream_id == docker_socket.STDERR: stderr.append(data) else: - raise ValueError('{0} is not a valid stream ID'.format(stream_id)) + raise ValueError(f'{stream_id} is not a valid stream ID') self.end_of_writing() diff --git a/plugins/module_utils/socket_helper.py b/plugins/module_utils/socket_helper.py index c0a54807..5300404e 100644 --- a/plugins/module_utils/socket_helper.py +++ b/plugins/module_utils/socket_helper.py @@ -44,7 +44,7 @@ def shutdown_writing(sock, log=_empty_writer): sock.shutdown(pysocket.SHUT_WR) except TypeError as e: # probably: "TypeError: shutdown() takes 1 positional argument but 2 were given" - log('Shutting down for writing not possible; trying shutdown instead: {0}'.format(e)) + log(f'Shutting down for writing not possible; trying shutdown instead: {e}') sock.shutdown() elif isinstance(sock, getattr(pysocket, 'SocketIO')): sock._sock.shutdown(pysocket.SHUT_WR) diff --git a/plugins/module_utils/swarm.py b/plugins/module_utils/swarm.py index 23b90ce8..b995adc7 100644 --- a/plugins/module_utils/swarm.py +++ b/plugins/module_utils/swarm.py @@ -15,8 +15,6 @@ except ImportError: # missing Docker SDK for Python handled in ansible.module_utils.docker.common pass -from ansible.module_utils.common.text.converters import to_native - from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion from ansible_collections.community.docker.plugins.module_utils.common import AnsibleDockerClient @@ -38,7 +36,7 @@ class AnsibleDockerSwarmClient(AnsibleDockerClient): try: info = self.info() except APIError as exc: - self.fail("Failed to get node information for %s" % to_native(exc)) + self.fail(f"Failed to get node information for {exc}") if info: json_str = json.dumps(info, ensure_ascii=False) @@ -166,9 +164,9 @@ class AnsibleDockerSwarmClient(AnsibleDockerClient): if exc.status_code == 404: if skip_missing: return None - self.fail("Error while reading from Swarm manager: %s" % to_native(exc)) + self.fail(f"Error while reading from Swarm manager: {exc}") except Exception as exc: - self.fail("Error inspecting swarm node: %s" % exc) + self.fail(f"Error inspecting swarm node: {exc}") json_str = json.dumps(node_info, ensure_ascii=False) node_info = json.loads(json_str) @@ -197,9 +195,9 @@ class AnsibleDockerSwarmClient(AnsibleDockerClient): except APIError as exc: if exc.status_code == 503: self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager") - self.fail("Error while reading from Swarm manager: %s" % to_native(exc)) + self.fail(f"Error while reading from Swarm manager: {exc}") except Exception as exc: - self.fail("Error inspecting swarm node: %s" % exc) + self.fail(f"Error inspecting swarm node: {exc}") json_str = json.dumps(node_info, ensure_ascii=False) node_info = json.loads(json_str) @@ -265,15 +263,15 @@ class AnsibleDockerSwarmClient(AnsibleDockerClient): service_info = self.inspect_service(service_id) except NotFound as exc: if skip_missing is False: - self.fail("Error while reading from Swarm manager: %s" % to_native(exc)) + self.fail(f"Error while reading from Swarm manager: {exc}") else: return None except APIError as exc: if exc.status_code == 503: self.fail("Cannot inspect service: To inspect service execute module on Swarm Manager") - self.fail("Error inspecting swarm service: %s" % exc) + self.fail(f"Error inspecting swarm service: {exc}") except Exception as exc: - self.fail("Error inspecting swarm service: %s" % exc) + self.fail(f"Error inspecting swarm service: {exc}") json_str = json.dumps(service_info, ensure_ascii=False) service_info = json.loads(json_str) diff --git a/plugins/module_utils/util.py b/plugins/module_utils/util.py index 2eca037d..7952e84c 100644 --- a/plugins/module_utils/util.py +++ b/plugins/module_utils/util.py @@ -36,7 +36,7 @@ DOCKER_COMMON_ARGS = dict( ) DOCKER_COMMON_ARGS_VARS = dict([ - [option_name, 'ansible_docker_%s' % option_name] + [option_name, f'ansible_docker_{option_name}'] for option_name in DOCKER_COMMON_ARGS if option_name != 'debug' ]) @@ -93,9 +93,9 @@ def log_debug(msg, pretty_print=False): with open('docker.log', 'a') as log_file: if pretty_print: log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': '))) - log_file.write(u'\n') + log_file.write('\n') else: - log_file.write(msg + u'\n') + log_file.write(f"{msg}\n") class DockerBaseClass(object): @@ -289,13 +289,9 @@ def sanitize_labels(labels, labels_field, client=None, module=None): return for k, v in list(labels.items()): if not isinstance(k, str): - fail( - "The key {key!r} of {field} is not a string!".format( - field=labels_field, key=k)) + fail(f"The key {k!r} of {labels_field} is not a string!") if isinstance(v, (bool, float)): - fail( - "The value {value!r} for {key!r} of {field} is not a string or something than can be safely converted to a string!".format( - field=labels_field, key=k, value=v)) + fail(f"The value {v!r} for {k!r} of {labels_field} is not a string or something than can be safely converted to a string!") labels[k] = to_text(v) @@ -328,7 +324,7 @@ def convert_duration_to_nanosecond(time_str): Return time duration in nanosecond. """ if not isinstance(time_str, str): - raise ValueError('Missing unit in duration - %s' % time_str) + raise ValueError(f'Missing unit in duration - {time_str}') regex = re.compile( r'^(((?P\d+)h)?' @@ -340,7 +336,7 @@ def convert_duration_to_nanosecond(time_str): parts = regex.match(time_str) if not parts: - raise ValueError('Invalid time duration - %s' % time_str) + raise ValueError(f'Invalid time duration - {time_str}') parts = parts.groupdict() time_params = {} @@ -389,8 +385,7 @@ def normalize_healthcheck(healthcheck, normalize_test=False): value = int(value) except ValueError: raise ValueError( - 'Cannot parse number of retries for healthcheck. ' - 'Expected an integer, got "{0}".'.format(value) + f'Cannot parse number of retries for healthcheck. Expected an integer, got "{value}".' ) if key == 'test' and value and normalize_test: value = normalize_healthcheck_test(value) diff --git a/plugins/modules/docker_compose_v2.py b/plugins/modules/docker_compose_v2.py index b27fec08..4e8d02c9 100644 --- a/plugins/modules/docker_compose_v2.py +++ b/plugins/modules/docker_compose_v2.py @@ -439,7 +439,6 @@ actions: import traceback from ansible.module_utils.common.validation import check_type_int -from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.docker.plugins.module_utils.common_cli import ( AnsibleModuleDockerClient, @@ -477,17 +476,17 @@ class ServicesManager(BaseComposeManager): self.wait_timeout = parameters['wait_timeout'] self.yes = parameters['assume_yes'] if self.compose_version < LooseVersion('2.32.0') and self.yes: - self.fail('assume_yes=true needs Docker Compose 2.32.0 or newer, not version %s' % (self.compose_version, )) + self.fail(f'assume_yes=true needs Docker Compose 2.32.0 or newer, not version {self.compose_version}') for key, value in self.scale.items(): if not isinstance(key, str): - self.fail('The key %s for `scale` is not a string' % repr(key)) + self.fail(f'The key {key!r} for `scale` is not a string') try: value = check_type_int(value) except TypeError as exc: - self.fail('The value %s for `scale[%s]` is not an integer' % (repr(value), repr(key))) + self.fail(f'The value {value!r} for `scale[{key!r}]` is not an integer') if value < 0: - self.fail('The value %s for `scale[%s]` is negative' % (repr(value), repr(key))) + self.fail(f'The value {value!r} for `scale[{key!r}]` is negative') self.scale[key] = value def run(self): @@ -520,13 +519,13 @@ class ServicesManager(BaseComposeManager): if not self.dependencies: args.append('--no-deps') if self.timeout is not None: - args.extend(['--timeout', '%d' % self.timeout]) + args.extend(['--timeout', f'{self.timeout}']) if self.build == 'always': args.append('--build') elif self.build == 'never': args.append('--no-build') for key, value in sorted(self.scale.items()): - args.extend(['--scale', '%s=%d' % (key, value)]) + args.extend(['--scale', f'{key}={value}']) if self.wait: args.append('--wait') if self.wait_timeout is not None: @@ -557,7 +556,7 @@ class ServicesManager(BaseComposeManager): def get_stop_cmd(self, dry_run): args = self.get_base_args() + ['stop'] if self.timeout is not None: - args.extend(['--timeout', '%d' % self.timeout]) + args.extend(['--timeout', f'{self.timeout}']) if dry_run: args.append('--dry-run') args.append('--') @@ -610,7 +609,7 @@ class ServicesManager(BaseComposeManager): if not self.dependencies: args.append('--no-deps') if self.timeout is not None: - args.extend(['--timeout', '%d' % self.timeout]) + args.extend(['--timeout', f'{self.timeout}']) if dry_run: args.append('--dry-run') args.append('--') @@ -637,7 +636,7 @@ class ServicesManager(BaseComposeManager): if self.remove_volumes: args.append('--volumes') if self.timeout is not None: - args.extend(['--timeout', '%d' % self.timeout]) + args.extend(['--timeout', f'{self.timeout}']) if dry_run: args.append('--dry-run') args.append('--') @@ -691,7 +690,7 @@ def main(): manager.cleanup() client.module.exit_json(**result) except DockerException as e: - client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) if __name__ == '__main__': diff --git a/plugins/modules/docker_compose_v2_exec.py b/plugins/modules/docker_compose_v2_exec.py index 8f88e798..24b29304 100644 --- a/plugins/modules/docker_compose_v2_exec.py +++ b/plugins/modules/docker_compose_v2_exec.py @@ -167,7 +167,7 @@ rc: import shlex import traceback -from ansible.module_utils.common.text.converters import to_text, to_native +from ansible.module_utils.common.text.converters import to_text from ansible_collections.community.docker.plugins.module_utils.common_cli import ( AnsibleModuleDockerClient, @@ -211,7 +211,7 @@ class ExecManager(BaseComposeManager): if not isinstance(value, str): self.fail( "Non-string value found for env option. Ambiguous env options must be " - "wrapped in quotes to avoid them being interpreted. Key: %s" % (name, ) + f"wrapped in quotes to avoid them being interpreted. Key: {name}" ) self.env[name] = to_text(value, errors='surrogate_or_strict') @@ -232,7 +232,7 @@ class ExecManager(BaseComposeManager): if self.env: for name, value in list(self.env.items()): args.append('--env') - args.append('{0}={1}'.format(name, value)) + args.append(f'{name}={value}') args.append('--') args.append(self.service) args.extend(self.argv) @@ -295,7 +295,7 @@ def main(): manager.cleanup() client.module.exit_json(**result) except DockerException as e: - client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) if __name__ == '__main__': diff --git a/plugins/modules/docker_compose_v2_pull.py b/plugins/modules/docker_compose_v2_pull.py index d5dbbd7c..d2b4f68f 100644 --- a/plugins/modules/docker_compose_v2_pull.py +++ b/plugins/modules/docker_compose_v2_pull.py @@ -112,8 +112,6 @@ actions: import traceback -from ansible.module_utils.common.text.converters import to_native - from ansible_collections.community.docker.plugins.module_utils.common_cli import ( AnsibleModuleDockerClient, DockerException, @@ -139,12 +137,12 @@ class PullManager(BaseComposeManager): if self.policy != 'always' and self.compose_version < LooseVersion('2.22.0'): # https://github.com/docker/compose/pull/10981 - 2.22.0 - self.fail('A pull policy other than always is only supported since Docker Compose 2.22.0. {0} has version {1}'.format( - self.client.get_cli(), self.compose_version)) + self.fail( + f'A pull policy other than always is only supported since Docker Compose 2.22.0. {self.client.get_cli()} has version {self.compose_version}') if self.ignore_buildable and self.compose_version < LooseVersion('2.15.0'): # https://github.com/docker/compose/pull/10134 - 2.15.0 - self.fail('--ignore-buildable is only supported since Docker Compose 2.15.0. {0} has version {1}'.format( - self.client.get_cli(), self.compose_version)) + self.fail( + f'--ignore-buildable is only supported since Docker Compose 2.15.0. {self.client.get_cli()} has version {self.compose_version}') def get_pull_cmd(self, dry_run, no_start=False): args = self.get_base_args() + ['pull'] @@ -196,7 +194,7 @@ def main(): manager.cleanup() client.module.exit_json(**result) except DockerException as e: - client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected docker error occurred: {e}', exception=traceback.format_exc()) if __name__ == '__main__': diff --git a/plugins/modules/docker_compose_v2_run.py b/plugins/modules/docker_compose_v2_run.py index efc0f907..afec8608 100644 --- a/plugins/modules/docker_compose_v2_run.py +++ b/plugins/modules/docker_compose_v2_run.py @@ -240,7 +240,7 @@ rc: import shlex import traceback -from ansible.module_utils.common.text.converters import to_text, to_native +from ansible.module_utils.common.text.converters import to_text from ansible_collections.community.docker.plugins.module_utils.common_cli import ( AnsibleModuleDockerClient, @@ -297,7 +297,7 @@ class ExecManager(BaseComposeManager): if not isinstance(value, str): self.fail( "Non-string value found for env option. Ambiguous env options must be " - "wrapped in quotes to avoid them being interpreted. Key: %s" % (name, ) + f"wrapped in quotes to avoid them being interpreted. Key: {name}" ) self.env[name] = to_text(value, errors='surrogate_or_strict') @@ -349,7 +349,7 @@ class ExecManager(BaseComposeManager): if self.env: for name, value in list(self.env.items()): args.append('--env') - args.append('{0}={1}'.format(name, value)) + args.append(f'{name}={value}') args.append('--') args.append(self.service) if self.argv: @@ -428,7 +428,7 @@ def main(): manager.cleanup() client.module.exit_json(**result) except DockerException as e: - client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) if __name__ == '__main__': diff --git a/plugins/modules/docker_config.py b/plugins/modules/docker_config.py index db1e8b64..5ca8d0c7 100644 --- a/plugins/modules/docker_config.py +++ b/plugins/modules/docker_config.py @@ -214,7 +214,7 @@ from ansible_collections.community.docker.plugins.module_utils.util import ( compare_generic, sanitize_labels, ) -from ansible.module_utils.common.text.converters import to_native, to_bytes +from ansible.module_utils.common.text.converters import to_bytes class ConfigManager(DockerBaseClass): @@ -242,7 +242,7 @@ class ConfigManager(DockerBaseClass): with open(data_src, 'rb') as f: self.data = f.read() except Exception as exc: - self.client.fail('Error while reading {src}: {error}'.format(src=data_src, error=to_native(exc))) + self.client.fail(f'Error while reading {data_src}: {exc}') self.labels = parameters.get('labels') self.force = parameters.get('force') self.rolling_versions = parameters.get('rolling_versions') @@ -281,13 +281,13 @@ class ConfigManager(DockerBaseClass): try: configs = self.client.configs(filters={'name': self.name}) except APIError as exc: - self.client.fail("Error accessing config %s: %s" % (self.name, to_native(exc))) + self.client.fail(f"Error accessing config {self.name}: {exc}") if self.rolling_versions: self.configs = [ config for config in configs - if config['Spec']['Name'].startswith('{name}_v'.format(name=self.name)) + if config['Spec']['Name'].startswith(f'{self.name}_v') ] self.configs.sort(key=self.get_version) else: @@ -305,7 +305,7 @@ class ConfigManager(DockerBaseClass): if self.rolling_versions: self.version += 1 labels['ansible_version'] = str(self.version) - self.name = '{name}_v{version}'.format(name=self.name, version=self.version) + self.name = f'{self.name}_v{self.version}' if self.labels: labels.update(self.labels) @@ -320,7 +320,7 @@ class ConfigManager(DockerBaseClass): config_id = self.client.create_config(self.name, self.data, labels=labels, **kwargs) self.configs += self.client.configs(filters={'id': config_id}) except APIError as exc: - self.client.fail("Error creating config: %s" % to_native(exc)) + self.client.fail(f"Error creating config: {exc}") if isinstance(config_id, dict): config_id = config_id['ID'] @@ -332,7 +332,7 @@ class ConfigManager(DockerBaseClass): if not self.check_mode: self.client.remove_config(config['ID']) except APIError as exc: - self.client.fail("Error removing config %s: %s" % (config['Spec']['Name'], to_native(exc))) + self.client.fail(f"Error removing config {config['Spec']['Name']}: {exc}") def present(self): ''' Handles state == 'present', creating or updating the config ''' @@ -425,10 +425,10 @@ def main(): ConfigManager(client, results)() client.module.exit_json(**results) except DockerException as e: - client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) except RequestException as e: client.fail( - 'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)), + f'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {e}', exception=traceback.format_exc()) diff --git a/plugins/modules/docker_container_copy_into.py b/plugins/modules/docker_container_copy_into.py index 5060cdb3..4896d62c 100644 --- a/plugins/modules/docker_container_copy_into.py +++ b/plugins/modules/docker_container_copy_into.py @@ -451,7 +451,7 @@ def is_file_idempotent(client, container, managed_path, container_path, follow_l file_stat = os.stat(managed_path) if local_follow_links else os.lstat(managed_path) except OSError as exc: if exc.errno == 2: - raise DockerFileNotFound('Cannot find local file {managed_path}'.format(managed_path=managed_path)) + raise DockerFileNotFound(f'Cannot find local file {managed_path}') raise if mode is None: mode = stat.S_IMODE(file_stat.st_mode) @@ -786,13 +786,13 @@ def parse_modern(mode): return int(to_native(mode), 8) if isinstance(mode, int): return mode - raise TypeError('must be an octal string or an integer, got {mode!r}'.format(mode=mode)) + raise TypeError(f'must be an octal string or an integer, got {mode!r}') def parse_octal_string_only(mode): if isinstance(mode, str): return int(to_native(mode), 8) - raise TypeError('must be an octal string, got {mode!r}'.format(mode=mode)) + raise TypeError(f'must be an octal string, got {mode!r}') def main(): @@ -847,16 +847,16 @@ def main(): elif mode_parse == 'octal_string_only': mode = parse_octal_string_only(mode) except (TypeError, ValueError) as e: - client.fail("Error while parsing 'mode': {error}".format(error=e)) + client.fail(f"Error while parsing 'mode': {e}") if mode < 0: - client.fail("'mode' must not be negative; got {mode}".format(mode=mode)) + client.fail(f"'mode' must not be negative; got {mode}") if content is not None: if client.module.params['content_is_b64']: try: content = base64.b64decode(content) except Exception as e: # depending on Python version and error, multiple different exceptions can be raised - client.fail('Cannot Base64 decode the content option: {0}'.format(e)) + client.fail(f'Cannot Base64 decode the content option: {e}') else: content = to_bytes(content) @@ -901,21 +901,21 @@ def main(): # Can happen if a user explicitly passes `content: null` or `path: null`... client.fail('One of path and content must be supplied') except NotFound as exc: - client.fail('Could not find container "{1}" or resource in it ({0})'.format(exc, container)) + client.fail(f'Could not find container "{container}" or resource in it ({exc})') except APIError as exc: - client.fail('An unexpected Docker error occurred for container "{1}": {0}'.format(exc, container), exception=traceback.format_exc()) + client.fail(f'An unexpected Docker error occurred for container "{container}": {exc}', exception=traceback.format_exc()) except DockerException as exc: - client.fail('An unexpected Docker error occurred for container "{1}": {0}'.format(exc, container), exception=traceback.format_exc()) + client.fail(f'An unexpected Docker error occurred for container "{container}": {exc}', exception=traceback.format_exc()) except RequestException as exc: client.fail( - 'An unexpected requests error occurred for container "{1}" when trying to talk to the Docker daemon: {0}'.format(exc, container), + f'An unexpected requests error occurred for container "{container}" when trying to talk to the Docker daemon: {exc}', exception=traceback.format_exc()) except DockerUnexpectedError as exc: - client.fail('Unexpected error: {exc}'.format(exc=to_native(exc)), exception=traceback.format_exc()) + client.fail(f'Unexpected error: {exc}', exception=traceback.format_exc()) except DockerFileCopyError as exc: client.fail(to_native(exc)) except OSError as exc: - client.fail('Unexpected error: {exc}'.format(exc=to_native(exc)), exception=traceback.format_exc()) + client.fail(f'Unexpected error: {exc}', exception=traceback.format_exc()) if __name__ == '__main__': diff --git a/plugins/modules/docker_container_exec.py b/plugins/modules/docker_container_exec.py index 1e9e2779..ec4edd36 100644 --- a/plugins/modules/docker_container_exec.py +++ b/plugins/modules/docker_container_exec.py @@ -167,7 +167,7 @@ import selectors import shlex import traceback -from ansible.module_utils.common.text.converters import to_text, to_bytes, to_native +from ansible.module_utils.common.text.converters import to_text, to_bytes from ansible_collections.community.docker.plugins.module_utils.common_api import ( AnsibleDockerClient, @@ -228,7 +228,7 @@ def main(): if not isinstance(value, str): client.module.fail_json( msg="Non-string value found for env option. Ambiguous env options must be " - "wrapped in quotes to avoid them being interpreted. Key: %s" % (name, )) + f"wrapped in quotes to avoid them being interpreted. Key: {name}") env[name] = to_text(value, errors='surrogate_or_strict') if command is not None: @@ -295,16 +295,16 @@ def main(): rc=result.get('ExitCode') or 0, ) except NotFound: - client.fail('Could not find container "{0}"'.format(container)) + client.fail(f'Could not find container "{container}"') except APIError as e: if e.response is not None and e.response.status_code == 409: - client.fail('The container "{0}" has been paused ({1})'.format(container, to_native(e))) - client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'The container "{container}" has been paused ({e})') + client.fail(f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) except DockerException as e: - client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) except RequestException as e: client.fail( - 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)), + f'An unexpected requests error occurred when trying to talk to the Docker daemon: {e}', exception=traceback.format_exc()) diff --git a/plugins/modules/docker_container_info.py b/plugins/modules/docker_container_info.py index 0028fcc7..8b47b63d 100644 --- a/plugins/modules/docker_container_info.py +++ b/plugins/modules/docker_container_info.py @@ -77,8 +77,6 @@ container: import traceback -from ansible.module_utils.common.text.converters import to_native - from ansible_collections.community.docker.plugins.module_utils.common_api import ( AnsibleDockerClient, RequestException, @@ -105,10 +103,10 @@ def main(): container=container, ) except DockerException as e: - client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) except RequestException as e: client.fail( - 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)), + f'An unexpected requests error occurred when trying to talk to the Docker daemon: {e}', exception=traceback.format_exc()) diff --git a/plugins/modules/docker_context_info.py b/plugins/modules/docker_context_info.py index 30b466e8..5c504167 100644 --- a/plugins/modules/docker_context_info.py +++ b/plugins/modules/docker_context_info.py @@ -175,7 +175,7 @@ current_context_name: import traceback from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native, to_text +from ansible.module_utils.common.text.converters import to_text from ansible_collections.community.docker.plugins.module_utils._api.context.api import ( ContextAPI, @@ -226,7 +226,7 @@ def context_to_json(context, current): if proto == 'http+unix': proto = 'unix' if proto: - host_str = "{0}://{1}".format(proto, host_str) + host_str = f"{proto}://{host_str}" # Create config for the modules module_config['docker_host'] = host_str @@ -274,15 +274,12 @@ def main(): if module.params['name']: contexts = [ContextAPI.get_context(module.params['name'])] if not contexts[0]: - module.fail_json(msg="There is no context of name {name!r}".format(name=module.params['name'])) + module.fail_json(msg=f"There is no context of name {module.params['name']!r}") elif module.params['only_current']: contexts = [ContextAPI.get_context(current_context_name)] if not contexts[0]: module.fail_json( - msg="There is no context of name {name!r}, which is configured as the default context ({source})".format( - name=current_context_name, - source=current_context_source, - ), + msg=f"There is no context of name {current_context_name!r}, which is configured as the default context ({current_context_source})", ) else: contexts = ContextAPI.contexts() @@ -298,9 +295,9 @@ def main(): current_context_name=current_context_name, ) except ContextException as e: - module.fail_json(msg='Error when handling Docker contexts: {0}'.format(to_native(e)), exception=traceback.format_exc()) + module.fail_json(msg=f'Error when handling Docker contexts: {e}', exception=traceback.format_exc()) except DockerException as e: - module.fail_json(msg='An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + module.fail_json(msg=f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) if __name__ == '__main__': diff --git a/plugins/modules/docker_host_info.py b/plugins/modules/docker_host_info.py index 76f612d1..335d0f79 100644 --- a/plugins/modules/docker_host_info.py +++ b/plugins/modules/docker_host_info.py @@ -213,8 +213,6 @@ disk_usage: import traceback -from ansible.module_utils.common.text.converters import to_native - from ansible_collections.community.docker.plugins.module_utils.common_api import ( AnsibleDockerClient, RequestException, @@ -259,7 +257,7 @@ class DockerHostManager(DockerBaseClass): try: return self.client.info() except APIError as exc: - self.client.fail("Error inspecting docker host: %s" % to_native(exc)) + self.client.fail(f"Error inspecting docker host: {exc}") def get_docker_disk_usage_facts(self): try: @@ -268,7 +266,7 @@ class DockerHostManager(DockerBaseClass): else: return dict(LayersSize=self.client.df()['LayersSize']) except APIError as exc: - self.client.fail("Error inspecting docker host: %s" % to_native(exc)) + self.client.fail(f"Error inspecting docker host: {exc}") def get_docker_items_list(self, docker_object=None, filters=None, verbose=False): items = None @@ -311,7 +309,7 @@ class DockerHostManager(DockerBaseClass): items = self.client.get_json('/volumes', params=params) items = items['Volumes'] except APIError as exc: - self.client.fail("Error inspecting docker host for object '%s': %s" % (docker_object, to_native(exc))) + self.client.fail(f"Error inspecting docker host for object '{docker_object}': {exc}") if self.verbose_output: return items @@ -370,10 +368,10 @@ def main(): DockerHostManager(client, results) client.module.exit_json(**results) except DockerException as e: - client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) except RequestException as e: client.fail( - 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)), + f'An unexpected requests error occurred when trying to talk to the Docker daemon: {e}', exception=traceback.format_exc()) diff --git a/plugins/modules/docker_image.py b/plugins/modules/docker_image.py index 57a7c090..2ff36dd7 100644 --- a/plugins/modules/docker_image.py +++ b/plugins/modules/docker_image.py @@ -417,7 +417,7 @@ def convert_to_bytes(value, module, name, unlimited_value=None): return unlimited_value return human_to_bytes(value) except ValueError as exc: - module.fail_json(msg='Failed to convert %s to bytes: %s' % (name, to_native(exc))) + module.fail_json(msg=f'Failed to convert {name} to bytes: {exc}') class ImageManager(DockerBaseClass): @@ -485,9 +485,9 @@ class ImageManager(DockerBaseClass): # Sanity check: fail early when we know that something will fail later if self.repository and is_image_name_id(self.repository): - self.fail("`repository` must not be an image ID; got: %s" % self.repository) + self.fail(f"`repository` must not be an image ID; got: {self.repository}") if not self.repository and self.push and is_image_name_id(self.name): - self.fail("Cannot push an image by ID; specify `repository` to tag and push the image with ID %s instead" % self.name) + self.fail(f"Cannot push an image by ID; specify `repository` to tag and push the image with ID {self.name} instead") if self.state == 'present': self.present() @@ -512,16 +512,16 @@ class ImageManager(DockerBaseClass): if not image or self.force_source: if self.source == 'build': if is_image_name_id(self.name): - self.fail("Image name must not be an image ID for source=build; got: %s" % self.name) + self.fail(f"Image name must not be an image ID for source=build; got: {self.name}") # Build the image if not os.path.isdir(self.build_path): - self.fail("Requested build path %s could not be found or you do not have access." % self.build_path) + self.fail(f"Requested build path {self.build_path} could not be found or you do not have access.") image_name = self.name if self.tag: - image_name = "%s:%s" % (self.name, self.tag) - self.log("Building image %s" % image_name) - self.results['actions'].append("Built image %s from %s" % (image_name, self.build_path)) + image_name = f"{self.name}:{self.tag}" + self.log(f"Building image {image_name}") + self.results['actions'].append(f"Built image {image_name} from {self.build_path}") self.results['changed'] = True if not self.check_mode: self.results.update(self.build_image()) @@ -529,21 +529,20 @@ class ImageManager(DockerBaseClass): elif self.source == 'load': # Load the image from an archive if not os.path.isfile(self.load_path): - self.fail("Error loading image %s. Specified path %s does not exist." % (self.name, - self.load_path)) + self.fail(f"Error loading image {self.name}. Specified path {self.load_path} does not exist.") image_name = self.name if self.tag and not is_image_name_id(image_name): - image_name = "%s:%s" % (self.name, self.tag) - self.results['actions'].append("Loaded image %s from %s" % (image_name, self.load_path)) + image_name = f"{self.name}:{self.tag}" + self.results['actions'].append(f"Loaded image {image_name} from {self.load_path}") self.results['changed'] = True if not self.check_mode: self.results['image'] = self.load_image() elif self.source == 'pull': if is_image_name_id(self.name): - self.fail("Image name must not be an image ID for source=pull; got: %s" % self.name) + self.fail(f"Image name must not be an image ID for source=pull; got: {self.name}") # pull the image - self.results['actions'].append('Pulled image %s:%s' % (self.name, self.tag)) + self.results['actions'].append(f'Pulled image {self.name}:{self.tag}') self.results['changed'] = True if not self.check_mode: self.results['image'], dummy = self.client.pull_image(self.name, tag=self.tag, platform=self.pull_platform) @@ -551,8 +550,8 @@ class ImageManager(DockerBaseClass): if image is None: name = self.name if self.tag and not is_image_name_id(name): - name = "%s:%s" % (self.name, self.tag) - self.client.fail('Cannot find the image %s locally.' % name) + name = f"{self.name}:{self.tag}" + self.client.fail(f'Cannot find the image {name} locally.') if not self.check_mode and image and image['Id'] == self.results['image']['Id']: self.results['changed'] = False else: @@ -578,7 +577,7 @@ class ImageManager(DockerBaseClass): else: image = self.client.find_image(name, self.tag) if self.tag: - name = "%s:%s" % (self.name, self.tag) + name = f"{self.name}:{self.tag}" if image: if not self.check_mode: try: @@ -587,10 +586,10 @@ class ImageManager(DockerBaseClass): # If the image vanished while we were trying to remove it, do not fail pass except Exception as exc: - self.fail("Error removing image %s - %s" % (name, to_native(exc))) + self.fail(f"Error removing image {name} - {exc}") self.results['changed'] = True - self.results['actions'].append("Removed image %s" % (name)) + self.results['actions'].append(f"Removed image {name}") self.results['image']['state'] = 'Deleted' @staticmethod @@ -612,12 +611,12 @@ class ImageManager(DockerBaseClass): ''' def build_msg(reason): - return 'Archived image %s to %s, %s' % (current_image_name, archive_path, reason) + return f'Archived image {current_image_name} to {archive_path}, {reason}' try: archived = archived_image_manifest(archive_path) except ImageArchiveInvalidException as exc: - failure_logger('Unable to extract manifest summary from archive: %s' % to_native(exc)) + failure_logger(f'Unable to extract manifest summary from archive: {exc}') return build_msg('overwriting an unreadable archive file') if archived is None: @@ -627,7 +626,7 @@ class ImageManager(DockerBaseClass): else: name = ', '.join(archived.repo_tags) - return build_msg('overwriting archive with image %s named %s' % (archived.image_id, name)) + return build_msg(f'overwriting archive with image {archived.image_id} named {name}') def archive_image(self, name, tag): ''' @@ -647,10 +646,10 @@ class ImageManager(DockerBaseClass): image_name = name else: image = self.client.find_image(name=name, tag=tag) - image_name = "%s:%s" % (name, tag) + image_name = f"{name}:{tag}" if not image: - self.log("archive image: image %s not found" % image_name) + self.log(f"archive image: image {image_name} not found") return # Will have a 'sha256:' prefix @@ -664,7 +663,7 @@ class ImageManager(DockerBaseClass): self.results['changed'] = action is not None if (not self.check_mode) and self.results['changed']: - self.log("Getting archive of image %s" % image_name) + self.log(f"Getting archive of image {image_name}") try: saved_image = self.client._stream_raw_result( self.client._get(self.client._url('/images/{0}/get', image_name), stream=True), @@ -672,14 +671,14 @@ class ImageManager(DockerBaseClass): False, ) except Exception as exc: - self.fail("Error getting image %s - %s" % (image_name, to_native(exc))) + self.fail(f"Error getting image {image_name} - {exc}") try: with open(self.archive_path, 'wb') as fd: for chunk in saved_image: fd.write(chunk) except Exception as exc: - self.fail("Error writing image archive %s - %s" % (self.archive_path, to_native(exc))) + self.fail(f"Error writing image archive {self.archive_path} - {exc}") self.results['image'] = image @@ -693,17 +692,17 @@ class ImageManager(DockerBaseClass): ''' if is_image_name_id(name): - self.fail("Cannot push an image ID: %s" % name) + self.fail(f"Cannot push an image ID: {name}") repository = name if not tag: repository, tag = parse_repository_tag(name) registry, repo_name = resolve_repository_name(repository) - self.log("push %s to %s/%s:%s" % (self.name, registry, repo_name, tag)) + self.log(f"push {self.name} to {registry}/{repo_name}:{tag}") if registry: - self.results['actions'].append("Pushed image %s to %s/%s:%s" % (self.name, registry, repo_name, tag)) + self.results['actions'].append(f"Pushed image {self.name} to {registry}/{repo_name}:{tag}") self.results['changed'] = True if not self.check_mode: status = None @@ -740,12 +739,10 @@ class ImageManager(DockerBaseClass): except Exception as exc: if 'unauthorized' in str(exc): if 'authentication required' in str(exc): - self.fail("Error pushing image %s/%s:%s - %s. Try logging into %s first." % - (registry, repo_name, tag, to_native(exc), registry)) + self.fail(f"Error pushing image {registry}/{repo_name}:{tag} - {exc}. Try logging into {registry} first.") else: - self.fail("Error pushing image %s/%s:%s - %s. Does the repository exist?" % - (registry, repo_name, tag, str(exc))) - self.fail("Error pushing image %s: %s" % (repository, to_native(exc))) + self.fail(f"Error pushing image {registry}/{repo_name}:{tag} - {exc}. Does the repository exist?") + self.fail(f"Error pushing image {repository}: {exc}") self.results['image'] = self.client.find_image(name=repository, tag=tag) if not self.results['image']: self.results['image'] = dict() @@ -768,15 +765,15 @@ class ImageManager(DockerBaseClass): repo_tag = tag image = self.client.find_image(name=repo, tag=repo_tag) found = 'found' if image else 'not found' - self.log("image %s was %s" % (repo, found)) + self.log(f"image {repo} was {found}") if not image or self.force_tag: image_name = name if not is_image_name_id(name) and tag and not name.endswith(':' + tag): - image_name = "%s:%s" % (name, tag) - self.log("tagging %s to %s:%s" % (image_name, repo, repo_tag)) + image_name = f"{name}:{tag}" + self.log(f"tagging {image_name} to {repo}:{repo_tag}") self.results['changed'] = True - self.results['actions'].append("Tagged image %s to %s:%s" % (image_name, repo, repo_tag)) + self.results['actions'].append(f"Tagged image {image_name} to {repo}:{repo_tag}") if not self.check_mode: try: # Finding the image does not always work, especially running a localhost registry. In those @@ -791,7 +788,7 @@ class ImageManager(DockerBaseClass): if res.status_code != 201: raise Exception("Tag operation failed.") except Exception as exc: - self.fail("Error: failed to tag image - %s" % to_native(exc)) + self.fail(f"Error: failed to tag image - {exc}") self.results['image'] = self.client.find_image(name=repo, tag=repo_tag) if image and image['Id'] == self.results['image']['Id']: self.results['changed'] = False @@ -826,7 +823,7 @@ class ImageManager(DockerBaseClass): container_limits = self.container_limits or {} for key in container_limits.keys(): if key not in CONTAINER_LIMITS_KEYS: - raise DockerException('Invalid container_limits key {key}'.format(key=key)) + raise DockerException(f'Invalid container_limits key {key}') dockerfile = self.dockerfile if self.build_path.startswith(('http://', 'https://', 'git://', 'github.com/', 'git@')): @@ -846,7 +843,7 @@ class ImageManager(DockerBaseClass): context = tar(self.build_path, exclude=exclude, dockerfile=dockerfile, gzip=False) params = { - 't': "%s:%s" % (self.name, self.tag) if self.tag else self.name, + 't': f"{self.name}:{self.tag}" if self.tag else self.name, 'remote': remote, 'q': False, 'nocache': self.nocache, @@ -912,14 +909,9 @@ class ImageManager(DockerBaseClass): if line.get('errorDetail'): errorDetail = line.get('errorDetail') self.fail( - "Error building %s - code: %s, message: %s, logs: %s" % ( - self.name, - errorDetail.get('code'), - errorDetail.get('message'), - build_output)) + f"Error building {self.name} - code: {errorDetail.get('code')}, message: {errorDetail.get('message')}, logs: {build_output}") else: - self.fail("Error building %s - message: %s, logs: %s" % ( - self.name, line.get('error'), build_output)) + self.fail(f"Error building {self.name} - message: {line.get('error')}, logs: {build_output}") return { "stdout": "\n".join(build_output), @@ -936,9 +928,9 @@ class ImageManager(DockerBaseClass): load_output = [] has_output = False try: - self.log("Opening image %s" % self.load_path) + self.log(f"Opening image {self.load_path}") with open(self.load_path, 'rb') as image_tar: - self.log("Loading image from %s" % self.load_path) + self.log(f"Loading image from {self.load_path}") res = self.client._post(self.client._url("/images/load"), data=image_tar, stream=True) if LooseVersion(self.client.api_version) >= LooseVersion('1.23'): has_output = True @@ -955,10 +947,10 @@ class ImageManager(DockerBaseClass): ) except EnvironmentError as exc: if exc.errno == errno.ENOENT: - self.client.fail("Error opening image %s - %s" % (self.load_path, to_native(exc))) - self.client.fail("Error loading image %s - %s" % (self.name, to_native(exc)), stdout='\n'.join(load_output)) + self.client.fail(f"Error opening image {self.load_path} - {exc}") + self.client.fail(f"Error loading image {self.name} - {exc}", stdout='\n'.join(load_output)) except Exception as exc: - self.client.fail("Error loading image %s - %s" % (self.name, to_native(exc)), stdout='\n'.join(load_output)) + self.client.fail(f"Error loading image {self.name} - {exc}", stdout='\n'.join(load_output)) # Collect loaded images if has_output: @@ -978,20 +970,19 @@ class ImageManager(DockerBaseClass): expected_image = self.name.lower() found_image = expected_image not in loaded_image_ids else: - expected_image = '%s:%s' % (self.name, self.tag) + expected_image = f'{self.name}:{self.tag}' found_image = expected_image not in loaded_images if found_image: + found_instead = ', '.join(sorted([f"'{image}'" for image in loaded_images] + list(loaded_image_ids))) self.client.fail( - "The archive did not contain image '%s'. Instead, found %s." % ( - expected_image, - ', '.join(sorted(["'%s'" % image for image in loaded_images] + list(loaded_image_ids)))), + f"The archive did not contain image '{expected_image}'. Instead, found {found_instead}.", stdout='\n'.join(load_output)) loaded_images.remove(expected_image) if loaded_images: + found_more = ', '.join(sorted([f"'{image}'" for image in loaded_images] + list(loaded_image_ids))) self.client.module.warn( - "The archive contained more images than specified: %s" % ( - ', '.join(sorted(["'%s'" % image for image in loaded_images] + list(loaded_image_ids))), )) + f"The archive contained more images than specified: {found_more}") if is_image_name_id(self.name): return self.client.find_image_by_id(self.name, accept_missing_image=True) @@ -1068,7 +1059,7 @@ def main(): ) if not is_valid_tag(client.module.params['tag'], allow_empty=True): - client.fail('"{0}" is not a valid docker tag!'.format(client.module.params['tag'])) + client.fail(f'"{client.module.params["tag"]}" is not a valid docker tag!') if client.module.params['source'] == 'build': if not client.module.params['build'] or not client.module.params['build'].get('path'): @@ -1084,10 +1075,10 @@ def main(): ImageManager(client, results) client.module.exit_json(**results) except DockerException as e: - client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) except RequestException as e: client.fail( - 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)), + f'An unexpected requests error occurred when trying to talk to the Docker daemon: {e}', exception=traceback.format_exc()) diff --git a/plugins/modules/docker_image_build.py b/plugins/modules/docker_image_build.py index d83549df..b54aaa1d 100644 --- a/plugins/modules/docker_image_build.py +++ b/plugins/modules/docker_image_build.py @@ -313,17 +313,18 @@ def convert_to_bytes(value, module, name, unlimited_value=None): return unlimited_value return human_to_bytes(value) except ValueError as exc: - module.fail_json(msg='Failed to convert %s to bytes: %s' % (name, to_native(exc))) + module.fail_json(msg=f'Failed to convert {name} to bytes: {exc}') def dict_to_list(dictionary, concat='='): - return ['%s%s%s' % (k, concat, v) for k, v in sorted(dictionary.items())] + return [f'{k}{concat}{v}' for k, v in sorted(dictionary.items())] def _quote_csv(input): if input.strip() == input and all(i not in input for i in '",\r\n'): return input - return '"{0}"'.format(input.replace('"', '""')) + input = input.replace('"', '""') + return f'"{input}"' class ImageBuilder(DockerBaseClass): @@ -349,33 +350,29 @@ class ImageBuilder(DockerBaseClass): buildx = self.client.get_client_plugin_info('buildx') if buildx is None: - self.fail('Docker CLI {0} does not have the buildx plugin installed'.format(self.client.get_cli())) + self.fail(f'Docker CLI {self.client.get_cli()} does not have the buildx plugin installed') buildx_version = buildx['Version'].lstrip('v') if self.secrets: for secret in self.secrets: if secret['type'] in ('env', 'value'): if LooseVersion(buildx_version) < LooseVersion('0.6.0'): - self.fail('The Docker buildx plugin has version {version}, but 0.6.0 is needed for secrets of type=env and type=value'.format( - version=buildx_version, - )) + self.fail(f'The Docker buildx plugin has version {buildx_version}, but 0.6.0 is needed for secrets of type=env and type=value') if self.outputs and len(self.outputs) > 1: if LooseVersion(buildx_version) < LooseVersion('0.13.0'): - self.fail('The Docker buildx plugin has version {version}, but 0.13.0 is needed to specify more than one output'.format( - version=buildx_version, - )) + self.fail(f'The Docker buildx plugin has version {buildx_version}, but 0.13.0 is needed to specify more than one output') self.path = parameters['path'] if not os.path.isdir(self.path): - self.fail('"{0}" is not an existing directory'.format(self.path)) + self.fail(f'"{self.path}" is not an existing directory') self.dockerfile = parameters['dockerfile'] if self.dockerfile and not os.path.isfile(os.path.join(self.path, self.dockerfile)): - self.fail('"{0}" is not an existing file'.format(os.path.join(self.path, self.dockerfile))) + self.fail(f'"{os.path.join(self.path, self.dockerfile)}" is not an existing file') self.name = parameters['name'] self.tag = parameters['tag'] if not is_valid_tag(self.tag, allow_empty=True): - self.fail('"{0}" is not a valid docker tag'.format(self.tag)) + self.fail(f'"{self.tag}" is not a valid docker tag') if is_image_name_id(self.name): self.fail('Image name must not be a digest') @@ -390,7 +387,7 @@ class ImageBuilder(DockerBaseClass): if self.outputs: found = False - name_tag = '%s:%s' % (self.name, self.tag) + name_tag = f'{self.name}:{self.tag}' for output in self.outputs: if output['type'] == 'image': if not output['name']: @@ -406,11 +403,8 @@ class ImageBuilder(DockerBaseClass): }) if LooseVersion(buildx_version) < LooseVersion('0.13.0'): self.fail( - "The output does not include an image with name {name_tag}, and the Docker" - " buildx plugin has version {version} which only supports one output.".format( - name_tag=name_tag, - version=buildx_version, - ), + f"The output does not include an image with name {name_tag}, and the Docker" + f" buildx plugin has version {buildx_version} which only supports one output." ) def fail(self, msg, **kwargs): @@ -423,7 +417,7 @@ class ImageBuilder(DockerBaseClass): def add_args(self, args): environ_update = {} if not self.outputs: - args.extend(['--tag', '%s:%s' % (self.name, self.tag)]) + args.extend(['--tag', f'{self.name}:{self.tag}']) if self.dockerfile: args.extend(['--file', os.path.join(self.path, self.dockerfile)]) if self.cache_from: @@ -450,41 +444,47 @@ class ImageBuilder(DockerBaseClass): if self.secrets: random_prefix = None for index, secret in enumerate(self.secrets): + sid = secret['id'] if secret['type'] == 'file': - args.extend(['--secret', 'id={id},type=file,src={src}'.format(id=secret['id'], src=secret['src'])]) + src = secret['src'] + args.extend(['--secret', f'id={sid},type=file,src={src}']) if secret['type'] == 'env': - args.extend(['--secret', 'id={id},type=env,env={env}'.format(id=secret['id'], env=secret['src'])]) + env = secret['src'] + args.extend(['--secret', f'id={sid},type=env,env={env}']) if secret['type'] == 'value': # We pass values on using environment variables. The user has been warned in the documentation # that they should only use this mechanism when being comfortable with it. if random_prefix is None: # Use /dev/urandom to generate some entropy to make the environment variable's name unguessable random_prefix = base64.b64encode(os.urandom(16)).decode('utf-8').replace('=', '') - env_name = 'ANSIBLE_DOCKER_COMPOSE_ENV_SECRET_{random}_{id}'.format( - random=random_prefix, - id=index, - ) + env_name = f'ANSIBLE_DOCKER_COMPOSE_ENV_SECRET_{random_prefix}_{index}' environ_update[env_name] = secret['value'] - args.extend(['--secret', 'id={id},type=env,env={env}'.format(id=secret['id'], env=env_name)]) + args.extend(['--secret', f'id={sid},type=env,env={env_name}']) if self.outputs: for output in self.outputs: subargs = [] if output['type'] == 'local': - subargs.extend(['type=local', 'dest={dest}'.format(dest=output['dest'])]) + dest = output['dest'] + subargs.extend(['type=local', f'dest={dest}']) if output['type'] == 'tar': - subargs.extend(['type=tar', 'dest={dest}'.format(dest=output['dest'])]) + dest = output['dest'] + subargs.extend(['type=tar', f'dest={dest}']) if output['type'] == 'oci': - subargs.extend(['type=oci', 'dest={dest}'.format(dest=output['dest'])]) + dest = output['dest'] + subargs.extend(['type=oci', f'dest={dest}']) if output['type'] == 'docker': subargs.append('type=docker') + dest = output['dest'] if output['dest'] is not None: - subargs.append('dest={dest}'.format(dest=output['dest'])) + subargs.append(f'dest={dest}') if output['context'] is not None: - subargs.append('context={context}'.format(context=output['context'])) + context = output['context'] + subargs.append(f'context={context}') if output['type'] == 'image': subargs.append('type=image') if output['name'] is not None: - subargs.append('name={name}'.format(name=','.join(output['name']))) + name = ','.join(output['name']) + subargs.append(f'name={name}') if output['push']: subargs.append('push=true') if subargs: @@ -510,7 +510,7 @@ class ImageBuilder(DockerBaseClass): args.extend(['--', self.path]) rc, stdout, stderr = self.client.call_cli(*args, environ_update=environ_update) if rc != 0: - self.fail('Building %s:%s failed' % (self.name, self.tag), stdout=to_native(stdout), stderr=to_native(stderr), command=args) + self.fail(f'Building {self.name}:{self.tag} failed', stdout=to_native(stdout), stderr=to_native(stderr), command=args) results['stdout'] = to_native(stdout) results['stderr'] = to_native(stderr) results['image'] = self.client.find_image(self.name, self.tag) or {} @@ -590,7 +590,7 @@ def main(): results = ImageBuilder(client).build_image() client.module.exit_json(**results) except DockerException as e: - client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) if __name__ == '__main__': diff --git a/plugins/modules/docker_image_export.py b/plugins/modules/docker_image_export.py index 85161d29..41e349ec 100644 --- a/plugins/modules/docker_image_export.py +++ b/plugins/modules/docker_image_export.py @@ -95,8 +95,6 @@ images: import traceback -from ansible.module_utils.common.text.converters import to_native - from ansible_collections.community.docker.plugins.module_utils.common_api import ( AnsibleDockerClient, RequestException, @@ -135,7 +133,7 @@ class ImageExportManager(DockerBaseClass): self.tag = parameters['tag'] if not is_valid_tag(self.tag, allow_empty=True): - self.fail('"{0}" is not a valid docker tag'.format(self.tag)) + self.fail(f'"{self.tag}" is not a valid docker tag') # If name contains a tag, it takes precedence over tag parameter. self.names = [] @@ -146,7 +144,7 @@ class ImageExportManager(DockerBaseClass): repo, repo_tag = parse_repository_tag(name) if not repo_tag: repo_tag = self.tag - self.names.append({'name': repo, 'tag': repo_tag, 'joined': '%s:%s' % (repo, repo_tag)}) + self.names.append({'name': repo, 'tag': repo_tag, 'joined': f'{repo}:{repo_tag}'}) if not self.names: self.fail('At least one image name must be specified') @@ -163,7 +161,7 @@ class ImageExportManager(DockerBaseClass): if archived_images is None: return 'Overwriting since no image is present in archive' except ImageArchiveInvalidException as exc: - self.log('Unable to extract manifest summary from archive: %s' % to_native(exc)) + self.log(f'Unable to extract manifest summary from archive: {exc}') return 'Overwriting an unreadable archive file' left_names = list(self.names) @@ -175,11 +173,9 @@ class ImageExportManager(DockerBaseClass): found = True break if not found: - return 'Overwriting archive since it contains unexpected image %s named %s' % ( - archived_image.image_id, ', '.join(archived_image.repo_tags) - ) + return f'Overwriting archive since it contains unexpected image {archived_image.image_id} named {", ".join(archived_image.repo_tags)}' if left_names: - return 'Overwriting archive since it is missing image(s) %s' % (', '.join([name['joined'] for name in left_names])) + return f"Overwriting archive since it is missing image(s) {', '.join([name['joined'] for name in left_names])}" return None @@ -189,13 +185,13 @@ class ImageExportManager(DockerBaseClass): for chunk in chunks: fd.write(chunk) except Exception as exc: - self.fail("Error writing image archive %s - %s" % (self.path, to_native(exc))) + self.fail(f"Error writing image archive {self.path} - {exc}") def export_images(self): image_names = [name['joined'] for name in self.names] image_names_str = ', '.join(image_names) if len(image_names) == 1: - self.log("Getting archive of image %s" % image_names[0]) + self.log(f"Getting archive of image {image_names[0]}") try: chunks = self.client._stream_raw_result( self.client._get(self.client._url('/images/{0}/get', image_names[0]), stream=True), @@ -203,9 +199,9 @@ class ImageExportManager(DockerBaseClass): False, ) except Exception as exc: - self.fail("Error getting image %s - %s" % (image_names[0], to_native(exc))) + self.fail(f"Error getting image {image_names[0]} - {exc}") else: - self.log("Getting archive of images %s" % image_names_str) + self.log(f"Getting archive of images {image_names_str}") try: chunks = self.client._stream_raw_result( self.client._get( @@ -217,7 +213,7 @@ class ImageExportManager(DockerBaseClass): False, ) except Exception as exc: - self.fail("Error getting images %s - %s" % (image_names_str, to_native(exc))) + self.fail(f"Error getting images {image_names_str} - {exc}") self.write_chunks(chunks) @@ -233,7 +229,7 @@ class ImageExportManager(DockerBaseClass): else: image = self.client.find_image(name=name['name'], tag=name['tag']) if not image: - self.fail("Image %s not found" % name['joined']) + self.fail(f"Image {name['joined']} not found") images.append(image) # Will have a 'sha256:' prefix @@ -272,10 +268,10 @@ def main(): results = ImageExportManager(client).run() client.module.exit_json(**results) except DockerException as e: - client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) except RequestException as e: client.fail( - 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)), + f'An unexpected requests error occurred when trying to talk to the Docker daemon: {e}', exception=traceback.format_exc()) diff --git a/plugins/modules/docker_image_info.py b/plugins/modules/docker_image_info.py index 6671d5f1..3fa8d72d 100644 --- a/plugins/modules/docker_image_info.py +++ b/plugins/modules/docker_image_info.py @@ -137,8 +137,6 @@ images: import traceback -from ansible.module_utils.common.text.converters import to_native - from ansible_collections.community.docker.plugins.module_utils.common_api import ( AnsibleDockerClient, RequestException, @@ -160,7 +158,7 @@ class ImageManager(DockerBaseClass): self.client = client self.results = results self.name = self.client.module.params.get('name') - self.log("Gathering facts for images: %s" % (str(self.name))) + self.log(f"Gathering facts for images: {self.name}") if self.name: self.results['images'] = self.get_facts() @@ -185,13 +183,13 @@ class ImageManager(DockerBaseClass): for name in names: if is_image_name_id(name): - self.log('Fetching image %s (ID)' % (name)) + self.log(f'Fetching image {name} (ID)') image = self.client.find_image_by_id(name, accept_missing_image=True) else: repository, tag = parse_repository_tag(name) if not tag: tag = 'latest' - self.log('Fetching image %s:%s' % (repository, tag)) + self.log(f'Fetching image {repository}:{tag}') image = self.client.find_image(name=repository, tag=tag) if image: results.append(image) @@ -210,7 +208,7 @@ class ImageManager(DockerBaseClass): except NotFound: inspection = None except Exception as exc: - self.fail("Error inspecting image %s - %s" % (image['Id'], to_native(exc))) + self.fail(f"Error inspecting image {image['Id']} - {exc}") results.append(inspection) return results @@ -234,10 +232,10 @@ def main(): ImageManager(client, results) client.module.exit_json(**results) except DockerException as e: - client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) except RequestException as e: client.fail( - 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)), + f'An unexpected requests error occurred when trying to talk to the Docker daemon: {e}', exception=traceback.format_exc()) diff --git a/plugins/modules/docker_image_load.py b/plugins/modules/docker_image_load.py index 5d6376e1..0ca6ea3d 100644 --- a/plugins/modules/docker_image_load.py +++ b/plugins/modules/docker_image_load.py @@ -82,8 +82,6 @@ images: import errno import traceback -from ansible.module_utils.common.text.converters import to_native - from ansible_collections.community.docker.plugins.module_utils.common_api import ( AnsibleDockerClient, RequestException, @@ -127,19 +125,19 @@ class ImageManager(DockerBaseClass): # Load image(s) from file load_output = [] try: - self.log("Opening image {0}".format(self.path)) + self.log(f"Opening image {self.path}") with open(self.path, 'rb') as image_tar: - self.log("Loading images from {0}".format(self.path)) + self.log(f"Loading images from {self.path}") res = self.client._post(self.client._url("/images/load"), data=image_tar, stream=True) for line in self.client._stream_helper(res, decode=True): self.log(line, pretty_print=True) self._extract_output_line(line, load_output) except EnvironmentError as exc: if exc.errno == errno.ENOENT: - self.client.fail("Error opening archive {0} - {1}".format(self.path, to_native(exc))) - self.client.fail("Error loading archive {0} - {1}".format(self.path, to_native(exc)), stdout='\n'.join(load_output)) + self.client.fail(f"Error opening archive {self.path} - {exc}") + self.client.fail(f"Error loading archive {self.path} - {exc}", stdout='\n'.join(load_output)) except Exception as exc: - self.client.fail("Error loading archive {0} - {1}".format(self.path, to_native(exc)), stdout='\n'.join(load_output)) + self.client.fail(f"Error loading archive {self.path} - {exc}", stdout='\n'.join(load_output)) # Collect loaded images loaded_images = [] @@ -160,7 +158,7 @@ class ImageManager(DockerBaseClass): image_name, tag = image_name.rsplit(':', 1) images.append(self.client.find_image(image_name, tag)) else: - self.client.module.warn('Image name "{0}" is neither ID nor has a tag'.format(image_name)) + self.client.module.warn(f'Image name "{image_name}" is neither ID nor has a tag') self.results['image_names'] = loaded_images self.results['images'] = images @@ -185,10 +183,10 @@ def main(): ImageManager(client, results) client.module.exit_json(**results) except DockerException as e: - client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) except RequestException as e: client.fail( - 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)), + f'An unexpected requests error occurred when trying to talk to the Docker daemon: {e}', exception=traceback.format_exc()) diff --git a/plugins/modules/docker_image_pull.py b/plugins/modules/docker_image_pull.py index d62c428f..0df7a41f 100644 --- a/plugins/modules/docker_image_pull.py +++ b/plugins/modules/docker_image_pull.py @@ -92,8 +92,6 @@ image: import traceback -from ansible.module_utils.common.text.converters import to_native - from ansible_collections.community.docker.plugins.module_utils.common_api import ( AnsibleDockerClient, RequestException, @@ -142,7 +140,7 @@ class ImagePuller(DockerBaseClass): if is_image_name_id(self.name): self.client.fail("Cannot pull an image by ID") if not is_valid_tag(self.tag, allow_empty=True): - self.client.fail('"{0}" is not a valid docker tag!'.format(self.tag)) + self.client.fail(f'"{self.tag}" is not a valid docker tag!') # If name contains a tag, it takes precedence over tag parameter. repo, repo_tag = parse_repository_tag(self.name) @@ -178,7 +176,7 @@ class ImagePuller(DockerBaseClass): if compare_platform_strings(wanted_platform, image_platform): return results - results['actions'].append('Pulled image %s:%s' % (self.name, self.tag)) + results['actions'].append(f'Pulled image {self.name}:{self.tag}') if self.check_mode: results['changed'] = True results['diff']['after'] = image_info(dict(Id='unknown')) @@ -212,10 +210,10 @@ def main(): results = ImagePuller(client).pull() client.module.exit_json(**results) except DockerException as e: - client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) except RequestException as e: client.fail( - 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)), + f'An unexpected requests error occurred when trying to talk to the Docker daemon: {e}', exception=traceback.format_exc()) diff --git a/plugins/modules/docker_image_push.py b/plugins/modules/docker_image_push.py index ee75f270..883e4c91 100644 --- a/plugins/modules/docker_image_push.py +++ b/plugins/modules/docker_image_push.py @@ -74,8 +74,6 @@ image: import base64 import traceback -from ansible.module_utils.common.text.converters import to_native - from ansible_collections.community.docker.plugins.module_utils.common_api import ( AnsibleDockerClient, RequestException, @@ -112,7 +110,7 @@ class ImagePusher(DockerBaseClass): if is_image_name_id(self.name): self.client.fail("Cannot push an image by ID") if not is_valid_tag(self.tag, allow_empty=True): - self.client.fail('"{0}" is not a valid docker tag!'.format(self.tag)) + self.client.fail(f'"{self.tag}" is not a valid docker tag!') # If name contains a tag, it takes precedence over tag parameter. repo, repo_tag = parse_repository_tag(self.name) @@ -123,12 +121,12 @@ class ImagePusher(DockerBaseClass): if is_image_name_id(self.tag): self.client.fail("Cannot push an image by digest") if not is_valid_tag(self.tag, allow_empty=False): - self.client.fail('"{0}" is not a valid docker tag!'.format(self.tag)) + self.client.fail(f'"{self.tag}" is not a valid docker tag!') def push(self): image = self.client.find_image(name=self.name, tag=self.tag) if not image: - self.client.fail('Cannot find image %s:%s' % (self.name, self.tag)) + self.client.fail(f'Cannot find image {self.name}:{self.tag}') results = dict( changed=False, @@ -138,7 +136,7 @@ class ImagePusher(DockerBaseClass): push_registry, push_repo = resolve_repository_name(self.name) try: - results['actions'].append('Pushed image %s:%s' % (self.name, self.tag)) + results['actions'].append(f'Pushed image {self.name}:{self.tag}') headers = {} header = get_config_header(self.client, push_registry) @@ -165,12 +163,10 @@ class ImagePusher(DockerBaseClass): except Exception as exc: if 'unauthorized' in str(exc): if 'authentication required' in str(exc): - self.client.fail("Error pushing image %s/%s:%s - %s. Try logging into %s first." % - (push_registry, push_repo, self.tag, to_native(exc), push_registry)) + self.client.fail(f"Error pushing image {push_registry}/{push_repo}:{self.tag} - {exc}. Try logging into {push_registry} first.") else: - self.client.fail("Error pushing image %s/%s:%s - %s. Does the repository exist?" % - (push_registry, push_repo, self.tag, str(exc))) - self.client.fail("Error pushing image %s:%s: %s" % (self.name, self.tag, to_native(exc))) + self.client.fail(f"Error pushing image {push_registry}/{push_repo}:{self.tag} - {exc}. Does the repository exist?") + self.client.fail(f"Error pushing image {self.name}:{self.tag}: {exc}") return results @@ -190,10 +186,10 @@ def main(): results = ImagePusher(client).push() client.module.exit_json(**results) except DockerException as e: - client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) except RequestException as e: client.fail( - 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)), + f'An unexpected requests error occurred when trying to talk to the Docker daemon: {e}', exception=traceback.format_exc()) diff --git a/plugins/modules/docker_image_remove.py b/plugins/modules/docker_image_remove.py index 23d3f795..294cfc4f 100644 --- a/plugins/modules/docker_image_remove.py +++ b/plugins/modules/docker_image_remove.py @@ -99,8 +99,6 @@ untagged: import traceback -from ansible.module_utils.common.text.converters import to_native - from ansible_collections.community.docker.plugins.module_utils.common_api import ( AnsibleDockerClient, RequestException, @@ -134,7 +132,7 @@ class ImageRemover(DockerBaseClass): self.prune = parameters['prune'] if not is_valid_tag(self.tag, allow_empty=True): - self.fail('"{0}" is not a valid docker tag'.format(self.tag)) + self.fail(f'"{self.tag}" is not a valid docker tag') # If name contains a tag, it takes precedence over tag parameter. if not is_image_name_id(self.name): @@ -171,7 +169,7 @@ class ImageRemover(DockerBaseClass): else: image = self.client.find_image(name, self.tag) if self.tag: - name = "%s:%s" % (self.name, self.tag) + name = f"{self.name}:{self.tag}" if self.diff: results['diff'] = dict(before=self.get_diff_state(image)) @@ -182,7 +180,7 @@ class ImageRemover(DockerBaseClass): return results results['changed'] = True - results['actions'].append("Removed image %s" % (name)) + results['actions'].append(f"Removed image {name}") results['image'] = image if not self.check_mode: @@ -192,7 +190,7 @@ class ImageRemover(DockerBaseClass): # If the image vanished while we were trying to remove it, do not fail res = [] except Exception as exc: - self.fail("Error removing image %s - %s" % (name, to_native(exc))) + self.fail(f"Error removing image {name} - {exc}") for entry in res: if entry.get('Untagged'): @@ -257,10 +255,10 @@ def main(): results = ImageRemover(client).absent() client.module.exit_json(**results) except DockerException as e: - client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) except RequestException as e: client.fail( - 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)), + f'An unexpected requests error occurred when trying to talk to the Docker daemon: {e}', exception=traceback.format_exc()) diff --git a/plugins/modules/docker_image_tag.py b/plugins/modules/docker_image_tag.py index 3001bbc8..fcd23619 100644 --- a/plugins/modules/docker_image_tag.py +++ b/plugins/modules/docker_image_tag.py @@ -102,7 +102,6 @@ tagged_images: import traceback -from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.common.text.formatters import human_to_bytes from ansible_collections.community.docker.plugins.module_utils.common_api import ( @@ -130,7 +129,7 @@ def convert_to_bytes(value, module, name, unlimited_value=None): return unlimited_value return human_to_bytes(value) except ValueError as exc: - module.fail_json(msg='Failed to convert %s to bytes: %s' % (name, to_native(exc))) + module.fail_json(msg=f'Failed to convert {name} to bytes: {exc}') def image_info(name, tag, image): @@ -153,7 +152,7 @@ class ImageTagger(DockerBaseClass): self.name = parameters['name'] self.tag = parameters['tag'] if not is_valid_tag(self.tag, allow_empty=True): - self.fail('"{0}" is not a valid docker tag'.format(self.tag)) + self.fail(f'"{self.tag}" is not a valid docker tag') # If name contains a tag, it takes precedence over tag parameter. if not is_image_name_id(self.name): @@ -168,12 +167,12 @@ class ImageTagger(DockerBaseClass): self.repositories = [] for i, repository in enumerate(parameters['repository']): if is_image_name_id(repository): - self.fail("repository[%d] must not be an image ID; got: %s" % (i + 1, repository)) + self.fail(f"repository[{i + 1}] must not be an image ID; got: {repository}") repo, repo_tag = parse_repository_tag(repository) if not repo_tag: repo_tag = parameters['tag'] elif not is_valid_tag(repo_tag, allow_empty=False): - self.fail("repository[%d] must not have a digest; got: %s" % (i + 1, repository)) + self.fail(f"repository[{i + 1}] must not have a digest; got: {repository}") self.repositories.append((repo, repo_tag)) def fail(self, msg): @@ -186,16 +185,16 @@ class ImageTagger(DockerBaseClass): if tagged_image['Id'] == image['Id']: return ( False, - "target image already exists (%s) and is as expected" % tagged_image['Id'], + f"target image already exists ({tagged_image['Id']}) and is as expected", tagged_image, ) if self.keep_existing_images: return ( False, - "target image already exists (%s) and is not as expected, but kept" % tagged_image['Id'], + f"target image already exists ({tagged_image['Id']}) and is not as expected, but kept", tagged_image, ) - msg = "target image existed (%s) and was not as expected" % tagged_image['Id'] + msg = f"target image existed ({tagged_image['Id']}) and was not as expected" else: msg = "target image did not exist" @@ -211,7 +210,7 @@ class ImageTagger(DockerBaseClass): if res.status_code != 201: raise Exception("Tag operation failed.") except Exception as exc: - self.fail("Error: failed to tag image as %s:%s - %s" % (name, tag, to_native(exc))) + self.fail(f"Error: failed to tag image as {name}:{tag} - {exc}") return True, msg, tagged_image @@ -221,7 +220,7 @@ class ImageTagger(DockerBaseClass): else: image = self.client.find_image(name=self.name, tag=self.tag) if not image: - self.fail("Cannot find image %s:%s" % (self.name, self.tag)) + self.fail(f"Cannot find image {self.name}:{self.tag}") before = [] after = [] @@ -239,10 +238,10 @@ class ImageTagger(DockerBaseClass): after.append(image_info(repository, tag, image if tagged else old_image)) if tagged: results['changed'] = True - results['actions'].append('Tagged image %s as %s:%s: %s' % (image['Id'], repository, tag, msg)) - tagged_images.append('%s:%s' % (repository, tag)) + results['actions'].append(f"Tagged image {image['Id']} as {repository}:{tag}: {msg}") + tagged_images.append(f'{repository}:{tag}') else: - results['actions'].append('Not tagged image %s as %s:%s: %s' % (image['Id'], repository, tag, msg)) + results['actions'].append(f"Not tagged image {image['Id']} as {repository}:{tag}: {msg}") return results @@ -264,10 +263,10 @@ def main(): results = ImageTagger(client).tag_images() client.module.exit_json(**results) except DockerException as e: - client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) except RequestException as e: client.fail( - 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)), + f'An unexpected requests error occurred when trying to talk to the Docker daemon: {e}', exception=traceback.format_exc()) diff --git a/plugins/modules/docker_login.py b/plugins/modules/docker_login.py index 22d5add8..225e653c 100644 --- a/plugins/modules/docker_login.py +++ b/plugins/modules/docker_login.py @@ -121,7 +121,7 @@ import json import os import traceback -from ansible.module_utils.common.text.converters import to_bytes, to_text, to_native +from ansible.module_utils.common.text.converters import to_bytes, to_text from ansible_collections.community.docker.plugins.module_utils.common_api import ( AnsibleDockerClient, @@ -305,12 +305,12 @@ class LoginManager(DockerBaseClass): :return: None ''' - self.results['actions'].append("Logged into %s" % (self.registry_url)) - self.log("Log into %s with username %s" % (self.registry_url, self.username)) + self.results['actions'].append(f"Logged into {self.registry_url}") + self.log(f"Log into {self.registry_url} with username {self.username}") try: response = self._login(self.reauthorize) except Exception as exc: - self.fail("Logging into %s for user %s failed - %s" % (self.registry_url, self.username, to_native(exc))) + self.fail(f"Logging into {self.registry_url} for user {self.username} failed - {exc}") # If user is already logged in, then response contains password for user if 'password' in response: @@ -321,7 +321,7 @@ class LoginManager(DockerBaseClass): try: response = self._login(True) except Exception as exc: - self.fail("Logging into %s for user %s failed - %s" % (self.registry_url, self.username, to_native(exc))) + self.fail(f"Logging into {self.registry_url} for user {self.username} failed - {exc}") response.pop('password', None) self.results['login_result'] = response @@ -341,7 +341,7 @@ class LoginManager(DockerBaseClass): store.get(self.registry_url) except CredentialsNotFound: # get raises an exception on not found. - self.log("Credentials for %s not present, doing nothing." % (self.registry_url)) + self.log(f"Credentials for {self.registry_url} not present, doing nothing.") self.results['changed'] = False return @@ -372,9 +372,8 @@ class LoginManager(DockerBaseClass): if current['Username'] != self.username or current['Secret'] != self.password or self.reauthorize: if not self.check_mode: store.store(self.registry_url, self.username, self.password) - self.log("Writing credentials to configured helper %s for %s" % (store.program, self.registry_url)) - self.results['actions'].append("Wrote credentials to configured helper %s for %s" % ( - store.program, self.registry_url)) + self.log(f"Writing credentials to configured helper {store.program} for {self.registry_url}") + self.results['actions'].append(f"Wrote credentials to configured helper {store.program} for {self.registry_url}") self.results['changed'] = True def get_credential_store_instance(self, registry, dockercfg_path): @@ -394,7 +393,7 @@ class LoginManager(DockerBaseClass): # Make sure that there is a credential helper before trying to instantiate a # Store object. if store_name: - self.log("Found credential store %s" % store_name) + self.log(f"Found credential store {store_name}") return Store(store_name, environment=credstore_env) return DockerFileStore(dockercfg_path) @@ -435,10 +434,10 @@ def main(): del results['actions'] client.module.exit_json(**results) except DockerException as e: - client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) except RequestException as e: client.fail( - 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)), + f'An unexpected requests error occurred when trying to talk to the Docker daemon: {e}', exception=traceback.format_exc()) diff --git a/plugins/modules/docker_network.py b/plugins/modules/docker_network.py index a9bd04fd..149eb250 100644 --- a/plugins/modules/docker_network.py +++ b/plugins/modules/docker_network.py @@ -355,7 +355,7 @@ def validate_cidr(cidr): return 'ipv4' elif CIDR_IPV6.match(cidr): return 'ipv6' - raise ValueError('"{0}" is not a valid CIDR'.format(cidr)) + raise ValueError(f'"{cidr}" is not a valid CIDR') def normalize_ipam_config_key(key): @@ -389,8 +389,8 @@ class DockerNetworkManager(object): self.parameters = TaskParameters(client) self.check_mode = self.client.check_mode self.results = { - u'changed': False, - u'actions': [] + 'changed': False, + 'actions': [] } self.diff = self.client.module._diff self.diff_tracker = DifferenceTracker() @@ -454,7 +454,7 @@ class DockerNetworkManager(object): else: for key, value in self.parameters.driver_options.items(): if not (key in net['Options']) or value != net['Options'][key]: - differences.add('driver_options.%s' % key, + differences.add(f'driver_options.{key}', parameter=value, active=net['Options'].get(key)) @@ -497,7 +497,7 @@ class DockerNetworkManager(object): # (but have default value None if not specified) continue if value != net_config.get(key): - differences.add('ipam_config[%s].%s' % (idx, key), + differences.add(f'ipam_config[{idx}].{key}', parameter=value, active=net_config.get(key)) @@ -536,7 +536,7 @@ class DockerNetworkManager(object): else: for key, value in self.parameters.labels.items(): if not (key in net['Labels']) or value != net['Labels'][key]: - differences.add('labels.%s' % key, + differences.add(f'labels.{key}', parameter=value, active=net['Labels'].get(key)) @@ -596,7 +596,7 @@ class DockerNetworkManager(object): resp = self.client.post_json_to_json('/networks/create', data=data) self.client.report_warnings(resp, ['Warning']) self.existing_network = self.client.get_network(network_id=resp['Id']) - self.results['actions'].append("Created network %s with driver %s" % (self.parameters.name, self.parameters.driver)) + self.results['actions'].append(f"Created network {self.parameters.name} with driver {self.parameters.driver}") self.results['changed'] = True def remove_network(self): @@ -607,7 +607,7 @@ class DockerNetworkManager(object): if self.existing_network.get('Scope', 'local') == 'swarm': while self.get_existing_network(): time.sleep(0.1) - self.results['actions'].append("Removed network %s" % (self.parameters.name,)) + self.results['actions'].append(f"Removed network {self.parameters.name}") self.results['changed'] = True def is_container_connected(self, container_name): @@ -621,10 +621,10 @@ class DockerNetworkManager(object): return bool(container) except DockerException as e: - self.client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + self.client.fail(f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) except RequestException as e: self.client.fail( - 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)), + f'An unexpected requests error occurred when trying to talk to the Docker daemon: {e}', exception=traceback.format_exc()) def connect_containers(self): @@ -636,9 +636,9 @@ class DockerNetworkManager(object): "EndpointConfig": None, } self.client.post_json('/networks/{0}/connect', self.parameters.name, data=data) - self.results['actions'].append("Connected container %s" % (name,)) + self.results['actions'].append(f"Connected container {name}") self.results['changed'] = True - self.diff_tracker.add('connected.{0}'.format(name), parameter=True, active=False) + self.diff_tracker.add(f'connected.{name}', parameter=True, active=False) def disconnect_missing(self): if not self.existing_network: @@ -662,9 +662,9 @@ class DockerNetworkManager(object): if not self.check_mode: data = {"Container": container_name, "Force": True} self.client.post_json('/networks/{0}/disconnect', self.parameters.name, data=data) - self.results['actions'].append("Disconnected container %s" % (container_name,)) + self.results['actions'].append(f"Disconnected container {container_name}") self.results['changed'] = True - self.diff_tracker.add('connected.{0}'.format(container_name), + self.diff_tracker.add(f'connected.{container_name}', parameter=False, active=True) @@ -747,10 +747,10 @@ def main(): cm = DockerNetworkManager(client) client.module.exit_json(**cm.results) except DockerException as e: - client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) except RequestException as e: client.fail( - 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)), + f'An unexpected requests error occurred when trying to talk to the Docker daemon: {e}', exception=traceback.format_exc()) diff --git a/plugins/modules/docker_network_info.py b/plugins/modules/docker_network_info.py index b07a4133..96745223 100644 --- a/plugins/modules/docker_network_info.py +++ b/plugins/modules/docker_network_info.py @@ -98,8 +98,6 @@ network: import traceback -from ansible.module_utils.common.text.converters import to_native - from ansible_collections.community.docker.plugins.module_utils.common_api import ( AnsibleDockerClient, RequestException, @@ -126,10 +124,10 @@ def main(): network=network, ) except DockerException as e: - client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) except RequestException as e: client.fail( - 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)), + f'An unexpected requests error occurred when trying to talk to the Docker daemon: {e}', exception=traceback.format_exc()) diff --git a/plugins/modules/docker_node.py b/plugins/modules/docker_node.py index bf5c8f89..177a492e 100644 --- a/plugins/modules/docker_node.py +++ b/plugins/modules/docker_node.py @@ -198,7 +198,7 @@ class SwarmNodeManager(DockerBaseClass): try: node_info = self.client.inspect_node(node_id=self.parameters.hostname) except APIError as exc: - self.client.fail("Failed to get node information for %s" % to_native(exc)) + self.client.fail(f"Failed to get node information for {exc}") changed = False node_spec = dict( @@ -247,9 +247,8 @@ class SwarmNodeManager(DockerBaseClass): changed = True else: self.client.module.warn( - "Label '%s' listed both in 'labels' and 'labels_to_remove'. " - "Keeping the assigned label value." - % to_native(key)) + f"Label '{to_native(key)}' listed both in 'labels' and 'labels_to_remove'. " + "Keeping the assigned label value.") else: if node_spec['Labels'].get(key): node_spec['Labels'].pop(key) @@ -261,7 +260,7 @@ class SwarmNodeManager(DockerBaseClass): self.client.update_node(node_id=node_info['ID'], version=node_info['Version']['Index'], node_spec=node_spec) except APIError as exc: - self.client.fail("Failed to update node : %s" % to_native(exc)) + self.client.fail(f"Failed to update node : {exc}") self.results['node'] = self.client.get_node_inspect(node_id=node_info['ID']) self.results['changed'] = changed else: @@ -293,10 +292,10 @@ def main(): SwarmNodeManager(client, results) client.module.exit_json(**results) except DockerException as e: - client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) except RequestException as e: client.fail( - 'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)), + f'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {e}', exception=traceback.format_exc()) diff --git a/plugins/modules/docker_node_info.py b/plugins/modules/docker_node_info.py index 9f6c7bf3..f548e3d1 100644 --- a/plugins/modules/docker_node_info.py +++ b/plugins/modules/docker_node_info.py @@ -88,8 +88,6 @@ nodes: import traceback -from ansible.module_utils.common.text.converters import to_native - from ansible_collections.community.docker.plugins.module_utils.common import ( RequestException, ) @@ -149,10 +147,10 @@ def main(): nodes=nodes, ) except DockerException as e: - client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected docker error occurred: {e}', exception=traceback.format_exc()) except RequestException as e: client.fail( - 'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)), + f'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {e}', exception=traceback.format_exc()) diff --git a/plugins/modules/docker_plugin.py b/plugins/modules/docker_plugin.py index 3b07d208..878fdd92 100644 --- a/plugins/modules/docker_plugin.py +++ b/plugins/modules/docker_plugin.py @@ -162,7 +162,7 @@ class TaskParameters(DockerBaseClass): def prepare_options(options): - return ['%s=%s' % (k, v if v is not None else "") for k, v in options.items()] if options else [] + return [f'{k}={v if v is not None else ""}' for k, v in options.items()] if options else [] def parse_options(options_list): @@ -227,7 +227,7 @@ class DockerPluginManager(object): if ((not existing_options.get(key) and value) or not value or value != existing_options[key]): - differences.add('plugin_options.%s' % key, + differences.add(f'plugin_options.{key}', parameter=value, active=existing_options.get(key)) @@ -262,7 +262,7 @@ class DockerPluginManager(object): except APIError as e: self.client.fail(to_native(e)) - self.actions.append("Installed plugin %s" % self.preferred_name) + self.actions.append(f"Installed plugin {self.preferred_name}") self.changed = True def remove_plugin(self): @@ -274,7 +274,7 @@ class DockerPluginManager(object): except APIError as e: self.client.fail(to_native(e)) - self.actions.append("Removed plugin %s" % self.preferred_name) + self.actions.append(f"Removed plugin {self.preferred_name}") self.changed = True def update_plugin(self): @@ -287,7 +287,7 @@ class DockerPluginManager(object): self.client.post_json('/plugins/{0}/set', self.preferred_name, data=data) except APIError as e: self.client.fail(to_native(e)) - self.actions.append("Updated plugin %s settings" % self.preferred_name) + self.actions.append(f"Updated plugin {self.preferred_name} settings") self.changed = True else: self.client.fail("Cannot update the plugin: Plugin does not exist") @@ -322,7 +322,7 @@ class DockerPluginManager(object): self.client.post_json('/plugins/{0}/enable', self.preferred_name, params={'timeout': timeout}) except APIError as e: self.client.fail(to_native(e)) - self.actions.append("Enabled plugin %s" % self.preferred_name) + self.actions.append(f"Enabled plugin {self.preferred_name}") self.changed = True else: self.install_plugin() @@ -331,7 +331,7 @@ class DockerPluginManager(object): self.client.post_json('/plugins/{0}/enable', self.preferred_name, params={'timeout': timeout}) except APIError as e: self.client.fail(to_native(e)) - self.actions.append("Enabled plugin %s" % self.preferred_name) + self.actions.append(f"Enabled plugin {self.preferred_name}") self.changed = True def disable(self): @@ -342,7 +342,7 @@ class DockerPluginManager(object): self.client.post_json('/plugins/{0}/disable', self.preferred_name) except APIError as e: self.client.fail(to_native(e)) - self.actions.append("Disable plugin %s" % self.preferred_name) + self.actions.append(f"Disable plugin {self.preferred_name}") self.changed = True else: self.client.fail("Plugin not found: Plugin does not exist.") @@ -384,10 +384,10 @@ def main(): cm = DockerPluginManager(client) client.module.exit_json(**cm.result) except DockerException as e: - client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected docker error occurred: {e}', exception=traceback.format_exc()) except RequestException as e: client.fail( - 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)), + f'An unexpected requests error occurred when trying to talk to the Docker daemon: {e}', exception=traceback.format_exc()) diff --git a/plugins/modules/docker_prune.py b/plugins/modules/docker_prune.py index 645f50e4..0095709f 100644 --- a/plugins/modules/docker_prune.py +++ b/plugins/modules/docker_prune.py @@ -230,7 +230,6 @@ builder_cache_caches_deleted: import traceback -from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.common.text.formatters import human_to_bytes from ansible_collections.community.docker.plugins.module_utils.common_api import ( @@ -276,7 +275,7 @@ def main(): try: builder_cache_keep_storage = human_to_bytes(client.module.params.get('builder_cache_keep_storage')) except ValueError as exc: - client.module.fail_json(msg='Error while parsing value of builder_cache_keep_storage: {0}'.format(exc)) + client.module.fail_json(msg=f'Error while parsing value of builder_cache_keep_storage: {exc}') try: result = dict() @@ -337,10 +336,10 @@ def main(): result['changed'] = changed client.module.exit_json(**result) except DockerException as e: - client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) except RequestException as e: client.fail( - 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)), + f'An unexpected requests error occurred when trying to talk to the Docker daemon: {e}', exception=traceback.format_exc()) diff --git a/plugins/modules/docker_secret.py b/plugins/modules/docker_secret.py index 86a38e64..a4481937 100644 --- a/plugins/modules/docker_secret.py +++ b/plugins/modules/docker_secret.py @@ -206,7 +206,7 @@ from ansible_collections.community.docker.plugins.module_utils.util import ( compare_generic, sanitize_labels, ) -from ansible.module_utils.common.text.converters import to_native, to_bytes +from ansible.module_utils.common.text.converters import to_bytes class SecretManager(DockerBaseClass): @@ -234,7 +234,7 @@ class SecretManager(DockerBaseClass): with open(data_src, 'rb') as f: self.data = f.read() except Exception as exc: - self.client.fail('Error while reading {src}: {error}'.format(src=data_src, error=to_native(exc))) + self.client.fail(f'Error while reading {data_src}: {exc}') self.labels = parameters.get('labels') self.force = parameters.get('force') self.rolling_versions = parameters.get('rolling_versions') @@ -272,13 +272,13 @@ class SecretManager(DockerBaseClass): try: secrets = self.client.secrets(filters={'name': self.name}) except APIError as exc: - self.client.fail("Error accessing secret %s: %s" % (self.name, to_native(exc))) + self.client.fail(f"Error accessing secret {self.name}: {exc}") if self.rolling_versions: self.secrets = [ secret for secret in secrets - if secret['Spec']['Name'].startswith('{name}_v'.format(name=self.name)) + if secret['Spec']['Name'].startswith(f'{self.name}_v') ] self.secrets.sort(key=self.get_version) else: @@ -296,7 +296,7 @@ class SecretManager(DockerBaseClass): if self.rolling_versions: self.version += 1 labels['ansible_version'] = str(self.version) - self.name = '{name}_v{version}'.format(name=self.name, version=self.version) + self.name = f'{self.name}_v{self.version}' if self.labels: labels.update(self.labels) @@ -305,7 +305,7 @@ class SecretManager(DockerBaseClass): secret_id = self.client.create_secret(self.name, self.data, labels=labels) self.secrets += self.client.secrets(filters={'id': secret_id}) except APIError as exc: - self.client.fail("Error creating secret: %s" % to_native(exc)) + self.client.fail(f"Error creating secret: {exc}") if isinstance(secret_id, dict): secret_id = secret_id['ID'] @@ -317,7 +317,7 @@ class SecretManager(DockerBaseClass): if not self.check_mode: self.client.remove_secret(secret['ID']) except APIError as exc: - self.client.fail("Error removing secret %s: %s" % (secret['Spec']['Name'], to_native(exc))) + self.client.fail(f"Error removing secret {secret['Spec']['Name']}: {exc}") def present(self): ''' Handles state == 'present', creating or updating the secret ''' @@ -397,10 +397,10 @@ def main(): SecretManager(client, results)() client.module.exit_json(**results) except DockerException as e: - client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) except RequestException as e: client.fail( - 'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)), + f'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {e}', exception=traceback.format_exc()) diff --git a/plugins/modules/docker_stack.py b/plugins/modules/docker_stack.py index b6b98621..64765e47 100644 --- a/plugins/modules/docker_stack.py +++ b/plugins/modules/docker_stack.py @@ -121,7 +121,7 @@ stack_spec_diff: definition. sample: > "stack_spec_diff": - {'test_stack_test_service': {u'TaskTemplate': {u'ContainerSpec': {delete: [u'Env']}}}} + {'test_stack_test_service': {'TaskTemplate': {'ContainerSpec': {delete: ['Env']}}}} returned: on change type: dict """ @@ -184,7 +184,7 @@ except ImportError: def docker_stack_services(client, stack_name): rc, out, err = client.call_cli("stack", "services", stack_name, "--format", "{{.Name}}") - if to_native(err) == "Nothing found in stack: %s\n" % stack_name: + if to_native(err) == f"Nothing found in stack: {stack_name}\n": return [] return to_native(out).strip().split('\n') @@ -230,7 +230,7 @@ def docker_stack_rm(client, stack_name, retries, interval): command += ["--detach=false"] rc, out, err = client.call_cli(*command) - while to_native(err) != "Nothing found in stack: %s\n" % stack_name and retries > 0: + while to_native(err) != f"Nothing found in stack: {stack_name}\n" and retries > 0: sleep(interval) retries = retries - 1 rc, out, err = client.call_cli(*command) @@ -281,7 +281,7 @@ def main(): elif isinstance(compose_def, str): compose_files.append(compose_def) else: - client.fail("compose element '%s' must be a string or a dictionary" % compose_def) + client.fail(f"compose element '{compose_def}' must be a string or a dictionary") before_stack_services = docker_stack_inspect(client, name) @@ -340,7 +340,7 @@ def main(): ) client.module.exit_json(changed=False) except DockerException as e: - client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) if __name__ == "__main__": diff --git a/plugins/modules/docker_stack_info.py b/plugins/modules/docker_stack_info.py index a5f7fc82..72dbd9c0 100644 --- a/plugins/modules/docker_stack_info.py +++ b/plugins/modules/docker_stack_info.py @@ -110,7 +110,7 @@ def main(): results=ret, ) except DockerException as e: - client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) if __name__ == "__main__": diff --git a/plugins/modules/docker_stack_task_info.py b/plugins/modules/docker_stack_task_info.py index 6d9a7fb9..1269c235 100644 --- a/plugins/modules/docker_stack_task_info.py +++ b/plugins/modules/docker_stack_task_info.py @@ -120,7 +120,7 @@ def main(): results=ret, ) except DockerException as e: - client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) if __name__ == "__main__": diff --git a/plugins/modules/docker_swarm.py b/plugins/modules/docker_swarm.py index ab2cae8e..a20987a1 100644 --- a/plugins/modules/docker_swarm.py +++ b/plugins/modules/docker_swarm.py @@ -309,8 +309,6 @@ from ansible_collections.community.docker.plugins.module_utils.util import ( from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient -from ansible.module_utils.common.text.converters import to_native - class TaskParameters(DockerBaseClass): def __init__(self): @@ -531,7 +529,7 @@ class SwarmManager(DockerBaseClass): try: self.client.init_swarm(**init_arguments) except APIError as exc: - self.client.fail("Can not create a new Swarm Cluster: %s" % to_native(exc)) + self.client.fail(f"Can not create a new Swarm Cluster: {exc}") if not self.client.check_if_swarm_manager(): if not self.check_mode: @@ -539,7 +537,7 @@ class SwarmManager(DockerBaseClass): self.created = True self.inspect_swarm() - self.results['actions'].append("New Swarm cluster created: %s" % (self.swarm_info.get('ID'))) + self.results['actions'].append(f"New Swarm cluster created: {self.swarm_info.get('ID')}") self.differences.add('state', parameter='present', active='absent') self.results['changed'] = True self.results['swarm_facts'] = { @@ -567,7 +565,7 @@ class SwarmManager(DockerBaseClass): rotate_worker_token=self.parameters.rotate_worker_token, rotate_manager_token=self.parameters.rotate_manager_token) except APIError as exc: - self.client.fail("Can not update a Swarm Cluster: %s" % to_native(exc)) + self.client.fail(f"Can not update a Swarm Cluster: {exc}") return self.inspect_swarm() @@ -590,7 +588,7 @@ class SwarmManager(DockerBaseClass): try: self.client.join_swarm(**join_arguments) except APIError as exc: - self.client.fail("Can not join the Swarm Cluster: %s" % to_native(exc)) + self.client.fail(f"Can not join the Swarm Cluster: {exc}") self.results['actions'].append("New node is added to swarm cluster") self.differences.add('joined', parameter=True, active=False) self.results['changed'] = True @@ -603,7 +601,7 @@ class SwarmManager(DockerBaseClass): try: self.client.leave_swarm(force=self.force) except APIError as exc: - self.client.fail("This node can not leave the Swarm Cluster: %s" % to_native(exc)) + self.client.fail(f"This node can not leave the Swarm Cluster: {exc}") self.results['actions'].append("Node has left the swarm cluster") self.differences.add('joined', parameter='absent', active='present') self.results['changed'] = True @@ -624,7 +622,7 @@ class SwarmManager(DockerBaseClass): try: self.client.remove_node(node_id=self.node_id, force=self.force) except APIError as exc: - self.client.fail("Can not remove the node from the Swarm Cluster: %s" % to_native(exc)) + self.client.fail(f"Can not remove the node from the Swarm Cluster: {exc}") self.results['actions'].append("Node is removed from swarm cluster.") self.differences.add('joined', parameter=False, active=True) self.results['changed'] = True @@ -707,10 +705,10 @@ def main(): SwarmManager(client, results)() client.module.exit_json(**results) except DockerException as e: - client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) except RequestException as e: client.fail( - 'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)), + f'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {e}', exception=traceback.format_exc()) diff --git a/plugins/modules/docker_swarm_info.py b/plugins/modules/docker_swarm_info.py index 5d2acd4d..c57aa3d7 100644 --- a/plugins/modules/docker_swarm_info.py +++ b/plugins/modules/docker_swarm_info.py @@ -192,8 +192,6 @@ except ImportError: # missing Docker SDK for Python handled in ansible.module_utils.docker_common pass -from ansible.module_utils.common.text.converters import to_native - from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient from ansible_collections.community.docker.plugins.module_utils.common import RequestException from ansible_collections.community.docker.plugins.module_utils.util import ( @@ -231,7 +229,7 @@ class DockerSwarmManager(DockerBaseClass): try: return self.client.inspect_swarm() except APIError as exc: - self.client.fail("Error inspecting docker swarm: %s" % to_native(exc)) + self.client.fail(f"Error inspecting docker swarm: {exc}") def get_docker_items_list(self, docker_object=None, filters=None): items = None @@ -245,8 +243,7 @@ class DockerSwarmManager(DockerBaseClass): elif docker_object == 'services': items = self.client.services(filters=filters) except APIError as exc: - self.client.fail("Error inspecting docker swarm for object '%s': %s" % - (docker_object, to_native(exc))) + self.client.fail(f"Error inspecting docker swarm for object '{docker_object}': {exc}") if self.verbose_output: return items @@ -367,10 +364,10 @@ def main(): results.update(client.fail_results) client.module.exit_json(**results) except DockerException as e: - client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) except RequestException as e: client.fail( - 'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)), + f'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {e}', exception=traceback.format_exc()) diff --git a/plugins/modules/docker_swarm_service.py b/plugins/modules/docker_swarm_service.py index 22947b6c..c9b443fb 100644 --- a/plugins/modules/docker_swarm_service.py +++ b/plugins/modules/docker_swarm_service.py @@ -869,7 +869,7 @@ from ansible_collections.community.docker.plugins.module_utils.util import ( ) from ansible.module_utils.basic import human_to_bytes -from ansible.module_utils.common.text.converters import to_text, to_native +from ansible.module_utils.common.text.converters import to_text try: from docker import types @@ -909,7 +909,7 @@ def get_docker_environment(env, env_files): if not isinstance(value, str): raise ValueError( 'Non-string value found for env option. ' - 'Ambiguous env options must be wrapped in quotes to avoid YAML parsing. Key: %s' % name + f'Ambiguous env options must be wrapped in quotes to avoid YAML parsing. Key: {name}' ) env_dict[name] = str(value) elif env is not None and isinstance(env, list): @@ -921,7 +921,7 @@ def get_docker_environment(env, env_files): env_dict[name] = value elif env is not None: raise ValueError( - 'Invalid type for env %s (%s). Only list or dict allowed.' % (env, type(env)) + f'Invalid type for env {env} ({type(env)}). Only list or dict allowed.' ) env_list = format_environment(env_dict) if not env_list: @@ -968,7 +968,7 @@ def get_docker_networks(networks, network_ids): if network: invalid_keys = ', '.join(network.keys()) raise TypeError( - '%s are not valid keys for the networks option' % invalid_keys + f'{invalid_keys} are not valid keys for the networks option' ) else: @@ -979,7 +979,7 @@ def get_docker_networks(networks, network_ids): try: parsed_network['id'] = network_ids[network_name] except KeyError as e: - raise ValueError('Could not find a network named: %s.' % e) + raise ValueError(f'Could not find a network named: {e}.') parsed_networks.append(parsed_network) return parsed_networks or [] @@ -996,8 +996,7 @@ def get_nanoseconds_from_raw_option(name, value): return convert_duration_to_nanosecond(value) else: raise ValueError( - 'Invalid type for %s %s (%s). Only string or int allowed.' - % (name, value, type(value)) + f'Invalid type for {name} {value} ({type(value)}). Only string or int allowed.' ) @@ -1385,7 +1384,7 @@ class DockerService(DockerBaseClass): try: memory = human_to_bytes(memory) except ValueError as exc: - raise Exception('Failed to convert limit_memory to bytes: %s' % exc) + raise Exception(f'Failed to convert limit_memory to bytes: {exc}') return { 'limit_cpu': cpus, 'limit_memory': memory, @@ -1407,7 +1406,7 @@ class DockerService(DockerBaseClass): try: memory = human_to_bytes(memory) except ValueError as exc: - raise Exception('Failed to convert reserve_memory to bytes: %s' % exc) + raise Exception(f'Failed to convert reserve_memory to bytes: {exc}') return { 'reserve_cpu': cpus, 'reserve_memory': memory, @@ -1483,21 +1482,19 @@ class DockerService(DockerBaseClass): if invalid_items: errors = ', '.join( [ - '%s (%s) at index %s' % (item, type(item), index) + f'{item} ({type(item)}) at index {index}' for index, item in invalid_items ] ) raise Exception( 'All items in a command list need to be strings. ' - 'Check quoting. Invalid items: %s.' - % errors + f'Check quoting. Invalid items: {errors}.' ) s.command = ap['command'] elif s.command is not None: raise ValueError( - 'Invalid type for command %s (%s). ' + f'Invalid type for command {s.command} ({type(s.command)}). ' 'Only string or list allowed. Check quoting.' - % (s.command, type(s.command)) ) s.env = get_docker_environment(ap['env'], ap['env_files']) @@ -1577,7 +1574,7 @@ class DockerService(DockerBaseClass): tmpfs_size = human_to_bytes(tmpfs_size) except ValueError as exc: raise ValueError( - 'Failed to convert tmpfs_size to bytes: %s' % exc + f'Failed to convert tmpfs_size to bytes: {exc}' ) service_m['tmpfs_size'] = tmpfs_size @@ -2214,7 +2211,7 @@ class DockerServiceManager(object): ds.mode = to_text('replicated-job', encoding='utf-8') ds.replicas = mode['ReplicatedJob']['TotalCompletions'] else: - raise Exception('Unknown service mode: %s' % mode) + raise Exception(f'Unknown service mode: {mode}') raw_data_mounts = task_template_data['ContainerSpec'].get('Mounts') if raw_data_mounts: @@ -2314,7 +2311,7 @@ class DockerServiceManager(object): name = repo + ':' + tag distribution_data = self.client.inspect_distribution(name) digest = distribution_data['Descriptor']['digest'] - return '%s@%s' % (name, digest) + return f'{name}@{digest}' def get_networks_names_ids(self): return dict( @@ -2341,7 +2338,7 @@ class DockerServiceManager(object): for secret_name in secret_names: if secret_name not in secrets: self.client.fail( - 'Could not find a secret named "%s"' % secret_name + f'Could not find a secret named "{secret_name}"' ) return secrets @@ -2365,7 +2362,7 @@ class DockerServiceManager(object): for config_name in config_names: if config_name not in configs: self.client.fail( - 'Could not find a config named "%s"' % config_name + f'Could not find a config named "{config_name}"' ) return configs @@ -2381,16 +2378,14 @@ class DockerServiceManager(object): ) except DockerException as e: self.client.fail( - 'Error looking for an image named %s: %s' - % (image, to_native(e)) + f'Error looking for an image named {image}: {e}' ) try: current_service = self.get_service(module.params['name']) except Exception as e: self.client.fail( - 'Error looking for service named %s: %s' - % (module.params['name'], to_native(e)) + f"Error looking for service named {module.params['name']}: {e}" ) try: secret_ids = self.get_missing_secret_ids() @@ -2407,7 +2402,7 @@ class DockerServiceManager(object): ) except Exception as e: return self.client.fail( - 'Error parsing module parameters: %s' % to_native(e) + f'Error parsing module parameters: {e}' ) changed = False @@ -2792,10 +2787,10 @@ def main(): client.module.exit_json(**results) except DockerException as e: - client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) except RequestException as e: client.fail( - 'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)), + f'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {e}', exception=traceback.format_exc()) diff --git a/plugins/modules/docker_swarm_service_info.py b/plugins/modules/docker_swarm_service_info.py index eba7a29c..3fa084eb 100644 --- a/plugins/modules/docker_swarm_service_info.py +++ b/plugins/modules/docker_swarm_service_info.py @@ -64,8 +64,6 @@ service: import traceback -from ansible.module_utils.common.text.converters import to_native - try: from docker.errors import DockerException except ImportError: @@ -109,10 +107,10 @@ def main(): exists=bool(service) ) except DockerException as e: - client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) except RequestException as e: client.fail( - 'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)), + f'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {e}', exception=traceback.format_exc()) diff --git a/plugins/modules/docker_volume.py b/plugins/modules/docker_volume.py index 35d8e43f..16d43627 100644 --- a/plugins/modules/docker_volume.py +++ b/plugins/modules/docker_volume.py @@ -159,8 +159,8 @@ class DockerVolumeManager(object): self.parameters = TaskParameters(client) self.check_mode = self.client.check_mode self.results = { - u'changed': False, - u'actions': [] + 'changed': False, + 'actions': [] } self.diff = self.client.module._diff self.diff_tracker = DifferenceTracker() @@ -185,10 +185,10 @@ class DockerVolumeManager(object): except APIError as e: self.client.fail(to_native(e)) - if volumes[u'Volumes'] is None: + if volumes['Volumes'] is None: return None - for volume in volumes[u'Volumes']: + for volume in volumes['Volumes']: if volume['Name'] == self.parameters.volume_name: return volume @@ -212,14 +212,14 @@ class DockerVolumeManager(object): for key, value in self.parameters.driver_options.items(): if (not self.existing_volume['Options'].get(key) or value != self.existing_volume['Options'][key]): - differences.add('driver_options.%s' % key, + differences.add(f'driver_options.{key}', parameter=value, active=self.existing_volume['Options'].get(key)) if self.parameters.labels: existing_labels = self.existing_volume.get('Labels') or {} for label in self.parameters.labels: if existing_labels.get(label) != self.parameters.labels.get(label): - differences.add('labels.%s' % label, + differences.add(f'labels.{label}', parameter=self.parameters.labels.get(label), active=existing_labels.get(label)) @@ -241,7 +241,7 @@ class DockerVolumeManager(object): except APIError as e: self.client.fail(to_native(e)) - self.results['actions'].append("Created volume %s with driver %s" % (self.parameters.volume_name, self.parameters.driver)) + self.results['actions'].append(f"Created volume {self.parameters.volume_name} with driver {self.parameters.driver}") self.results['changed'] = True def remove_volume(self): @@ -252,7 +252,7 @@ class DockerVolumeManager(object): except APIError as e: self.client.fail(to_native(e)) - self.results['actions'].append("Removed volume %s" % self.parameters.volume_name) + self.results['actions'].append(f"Removed volume {self.parameters.volume_name}") self.results['changed'] = True def present(self): @@ -304,10 +304,10 @@ def main(): cm = DockerVolumeManager(client) client.module.exit_json(**cm.results) except DockerException as e: - client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) except RequestException as e: client.fail( - 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)), + f'An unexpected requests error occurred when trying to talk to the Docker daemon: {e}', exception=traceback.format_exc()) diff --git a/plugins/modules/docker_volume_info.py b/plugins/modules/docker_volume_info.py index ad38432c..986fe797 100644 --- a/plugins/modules/docker_volume_info.py +++ b/plugins/modules/docker_volume_info.py @@ -72,8 +72,6 @@ volume: import traceback -from ansible.module_utils.common.text.converters import to_native - from ansible_collections.community.docker.plugins.module_utils.common_api import ( AnsibleDockerClient, RequestException, @@ -87,7 +85,7 @@ def get_existing_volume(client, volume_name): except NotFound as dummy: return None except Exception as exc: - client.fail("Error inspecting volume: %s" % to_native(exc)) + client.fail(f"Error inspecting volume: {exc}") def main(): @@ -109,10 +107,10 @@ def main(): volume=volume, ) except DockerException as e: - client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + client.fail(f'An unexpected Docker error occurred: {e}', exception=traceback.format_exc()) except RequestException as e: client.fail( - 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)), + f'An unexpected requests error occurred when trying to talk to the Docker daemon: {e}', exception=traceback.format_exc()) diff --git a/plugins/plugin_utils/common.py b/plugins/plugin_utils/common.py index f3c7c05c..65941f84 100644 --- a/plugins/plugin_utils/common.py +++ b/plugins/plugin_utils/common.py @@ -28,7 +28,7 @@ class AnsibleDockerClient(AnsibleDockerClientBase): def fail(self, msg, **kwargs): if kwargs: - msg += '\nContext:\n' + '\n'.join(' {0} = {1!r}'.format(k, v) for (k, v) in kwargs.items()) + msg += '\nContext:\n' + '\n'.join(f' {k} = {v!r}' for (k, v) in kwargs.items()) raise AnsibleConnectionFailure(msg) def deprecate(self, msg, version=None, date=None, collection_name=None): diff --git a/plugins/plugin_utils/common_api.py b/plugins/plugin_utils/common_api.py index 97d2ebdb..f096a9a3 100644 --- a/plugins/plugin_utils/common_api.py +++ b/plugins/plugin_utils/common_api.py @@ -26,7 +26,7 @@ class AnsibleDockerClient(AnsibleDockerClientBase): def fail(self, msg, **kwargs): if kwargs: - msg += '\nContext:\n' + '\n'.join(' {0} = {1!r}'.format(k, v) for (k, v) in kwargs.items()) + msg += '\nContext:\n' + '\n'.join(f' {k} = {v!r}' for (k, v) in kwargs.items()) raise AnsibleConnectionFailure(msg) def deprecate(self, msg, version=None, date=None, collection_name=None): diff --git a/plugins/plugin_utils/unsafe.py b/plugins/plugin_utils/unsafe.py index a03d1e34..a6ad363f 100644 --- a/plugins/plugin_utils/unsafe.py +++ b/plugins/plugin_utils/unsafe.py @@ -13,7 +13,7 @@ from ansible.utils.unsafe_proxy import ( wrap_var as _make_unsafe, ) -_RE_TEMPLATE_CHARS = re.compile(u'[{}]') +_RE_TEMPLATE_CHARS = re.compile('[{}]') _RE_TEMPLATE_CHARS_BYTES = re.compile(b'[{}]') diff --git a/tests/unit/plugins/inventory/test_docker_containers.py b/tests/unit/plugins/inventory/test_docker_containers.py index 86771577..7bdb7c3f 100644 --- a/tests/unit/plugins/inventory/test_docker_containers.py +++ b/tests/unit/plugins/inventory/test_docker_containers.py @@ -107,8 +107,8 @@ class FakeClient(object): 'Image': host['Config']['Image'], 'ImageId': host['Image'], }) - self.get_results['/containers/{0}/json'.format(host['Name'])] = host - self.get_results['/containers/{0}/json'.format(host['Id'])] = host + self.get_results[f"/containers/{host['Name']}/json"] = host + self.get_results[f"/containers/{host['Id']}/json"] = host self.get_results['/containers/json'] = list_reply def get_json(self, url, *param, **kwargs): diff --git a/tests/unit/plugins/module_utils/_api/api/test_client.py b/tests/unit/plugins/module_utils/_api/api/test_client.py index 8f4e9a6a..483e1944 100644 --- a/tests/unit/plugins/module_utils/_api/api/test_client.py +++ b/tests/unit/plugins/module_utils/_api/api/test_client.py @@ -74,7 +74,7 @@ def fake_resp(method, url, *args, **kwargs): elif (url, method) in fake_api.fake_responses: key = (url, method) if not key: - raise Exception('{method} {url}'.format(method=method, url=url)) + raise Exception(f'{method} {url}') status_code, content = fake_api.fake_responses[key]() return response(status_code=status_code, content=content) @@ -102,10 +102,8 @@ def fake_read_from_socket(self, response, stream, tty=False, demux=False): return b"" -url_base = '{prefix}/'.format(prefix=fake_api.prefix) -url_prefix = '{0}v{1}/'.format( - url_base, - DEFAULT_DOCKER_API_VERSION) +url_base = f'{fake_api.prefix}/' +url_prefix = f'{url_base}v{DEFAULT_DOCKER_API_VERSION}/' class BaseAPIClientTest(unittest.TestCase): @@ -147,22 +145,18 @@ class DockerApiTest(BaseAPIClientTest): def test_url_valid_resource(self): url = self.client._url('/hello/{0}/world', 'somename') - assert url == '{0}{1}'.format(url_prefix, 'hello/somename/world') + assert url == f'{url_prefix}hello/somename/world' url = self.client._url( '/hello/{0}/world/{1}', 'somename', 'someothername' ) - assert url == '{0}{1}'.format( - url_prefix, 'hello/somename/world/someothername' - ) + assert url == f'{url_prefix}hello/somename/world/someothername' url = self.client._url('/hello/{0}/world', 'some?name') - assert url == '{0}{1}'.format(url_prefix, 'hello/some%3Fname/world') + assert url == f'{url_prefix}hello/some%3Fname/world' url = self.client._url("/images/{0}/push", "localhost:5000/image") - assert url == '{0}{1}'.format( - url_prefix, 'images/localhost:5000/image/push' - ) + assert url == f'{url_prefix}images/localhost:5000/image/push' def test_url_invalid_resource(self): with pytest.raises(ValueError): @@ -170,13 +164,13 @@ class DockerApiTest(BaseAPIClientTest): def test_url_no_resource(self): url = self.client._url('/simple') - assert url == '{0}{1}'.format(url_prefix, 'simple') + assert url == f'{url_prefix}simple' def test_url_unversioned_api(self): url = self.client._url( '/hello/{0}/world', 'somename', versioned_api=False ) - assert url == '{0}{1}'.format(url_base, 'hello/somename/world') + assert url == f'{url_base}hello/somename/world' def test_version(self): self.client.version() @@ -401,7 +395,7 @@ class UnixSocketStreamTest(unittest.TestCase): lines = [] for i in range(0, 50): line = str(i).encode() - lines += [('%x' % len(line)).encode(), line] + lines += [f'{len(line):x}'.encode(), line] lines.append(b'0') lines.append(b'') @@ -463,8 +457,7 @@ class TCPSocketStreamTest(unittest.TestCase): cls.thread = threading.Thread(target=cls.server.serve_forever) cls.thread.daemon = True cls.thread.start() - cls.address = 'http://{0}:{1}'.format( - socket.gethostname(), cls.server.server_address[1]) + cls.address = f'http://{socket.gethostname()}:{cls.server.server_address[1]}' @classmethod def teardown_class(cls): @@ -503,7 +496,7 @@ class TCPSocketStreamTest(unittest.TestCase): data += stderr_data return data else: - raise Exception('Unknown path {path}'.format(path=path)) + raise Exception(f'Unknown path {path}') @staticmethod def frame_header(stream, data): @@ -573,7 +566,7 @@ class UserAgentTest(unittest.TestCase): self.patcher = mock.patch.object( APIClient, 'send', - return_value=fake_resp("GET", "%s/version" % fake_api.prefix) + return_value=fake_resp("GET", f"{fake_api.prefix}/version") ) self.mock_send = self.patcher.start() diff --git a/tests/unit/plugins/module_utils/_api/fake_api.py b/tests/unit/plugins/module_utils/_api/fake_api.py index 9a312f9b..9f099ec0 100644 --- a/tests/unit/plugins/module_utils/_api/fake_api.py +++ b/tests/unit/plugins/module_utils/_api/fake_api.py @@ -15,7 +15,7 @@ from ansible_collections.community.docker.tests.unit.plugins.module_utils._api.c from . import fake_stat -CURRENT_VERSION = 'v{api_version}'.format(api_version=DEFAULT_DOCKER_API_VERSION) +CURRENT_VERSION = f'v{DEFAULT_DOCKER_API_VERSION}' FAKE_CONTAINER_ID = '3cc2351ab11b' FAKE_IMAGE_ID = 'e9aa60c60128' @@ -539,131 +539,117 @@ if constants.IS_WINDOWS_PLATFORM: prefix = 'http+docker://localnpipe' fake_responses = { - '{prefix}/version'.format(prefix=prefix): + f'{prefix}/version': get_fake_version, - '{prefix}/{CURRENT_VERSION}/version'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/version': get_fake_version, - '{prefix}/{CURRENT_VERSION}/info'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/info': get_fake_info, - '{prefix}/{CURRENT_VERSION}/auth'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/auth': post_fake_auth, - '{prefix}/{CURRENT_VERSION}/_ping'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/_ping': get_fake_ping, - '{prefix}/{CURRENT_VERSION}/images/search'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/images/search': get_fake_search, - '{prefix}/{CURRENT_VERSION}/images/json'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/images/json': get_fake_images, - '{prefix}/{CURRENT_VERSION}/images/test_image/history'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/images/test_image/history': get_fake_image_history, - '{prefix}/{CURRENT_VERSION}/images/create'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/images/create': post_fake_import_image, - '{prefix}/{CURRENT_VERSION}/containers/json'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/containers/json': get_fake_containers, - '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/start'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/start': post_fake_start_container, - '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/resize'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/resize': post_fake_resize_container, - '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/json'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/json': get_fake_inspect_container, - '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/rename'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/rename': post_fake_rename_container, - '{prefix}/{CURRENT_VERSION}/images/e9aa60c60128/tag'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/images/e9aa60c60128/tag': post_fake_tag_image, - '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/wait'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/wait': get_fake_wait, - '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/logs'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/logs': get_fake_logs, - '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/changes'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/changes': get_fake_diff, - '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/export'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/export': get_fake_export, - '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/update'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/update': post_fake_update_container, - '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/exec'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/exec': post_fake_exec_create, - '{prefix}/{CURRENT_VERSION}/exec/d5d177f121dc/start'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/exec/d5d177f121dc/start': post_fake_exec_start, - '{prefix}/{CURRENT_VERSION}/exec/d5d177f121dc/json'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/exec/d5d177f121dc/json': get_fake_exec_inspect, - '{prefix}/{CURRENT_VERSION}/exec/d5d177f121dc/resize'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/exec/d5d177f121dc/resize': post_fake_exec_resize, - '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/stats'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/stats': get_fake_stats, - '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/top'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/top': get_fake_top, - '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/stop'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/stop': post_fake_stop_container, - '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/kill'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/kill': post_fake_kill_container, - '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/pause'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/pause': post_fake_pause_container, - '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/unpause'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/unpause': post_fake_unpause_container, - '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/restart'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/restart': post_fake_restart_container, - '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b': delete_fake_remove_container, - '{prefix}/{CURRENT_VERSION}/images/create'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/images/create': post_fake_image_create, - '{prefix}/{CURRENT_VERSION}/images/e9aa60c60128'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/images/e9aa60c60128': delete_fake_remove_image, - '{prefix}/{CURRENT_VERSION}/images/e9aa60c60128/get'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/images/e9aa60c60128/get': get_fake_get_image, - '{prefix}/{CURRENT_VERSION}/images/load'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/images/load': post_fake_load_image, - '{prefix}/{CURRENT_VERSION}/images/test_image/json'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/images/test_image/json': get_fake_inspect_image, - '{prefix}/{CURRENT_VERSION}/images/test_image/insert'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/images/test_image/insert': get_fake_insert_image, - '{prefix}/{CURRENT_VERSION}/images/test_image/push'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/images/test_image/push': post_fake_push, - '{prefix}/{CURRENT_VERSION}/commit'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/commit': post_fake_commit, - '{prefix}/{CURRENT_VERSION}/containers/create'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/containers/create': post_fake_create_container, - '{prefix}/{CURRENT_VERSION}/build'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/build': post_fake_build_container, - '{prefix}/{CURRENT_VERSION}/events'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/events': get_fake_events, - ('{prefix}/{CURRENT_VERSION}/volumes'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION), 'GET'): + (f'{prefix}/{CURRENT_VERSION}/volumes', 'GET'): get_fake_volume_list, - ('{prefix}/{CURRENT_VERSION}/volumes/create'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION), 'POST'): + (f'{prefix}/{CURRENT_VERSION}/volumes/create', 'POST'): get_fake_volume, - ('{1}/{0}/volumes/{2}'.format( - CURRENT_VERSION, prefix, FAKE_VOLUME_NAME - ), 'GET'): + (f'{prefix}/{CURRENT_VERSION}/volumes/{FAKE_VOLUME_NAME}', 'GET'): get_fake_volume, - ('{1}/{0}/volumes/{2}'.format( - CURRENT_VERSION, prefix, FAKE_VOLUME_NAME - ), 'DELETE'): + (f'{prefix}/{CURRENT_VERSION}/volumes/{FAKE_VOLUME_NAME}', 'DELETE'): fake_remove_volume, - ('{1}/{0}/nodes/{2}/update?version=1'.format( - CURRENT_VERSION, prefix, FAKE_NODE_ID - ), 'POST'): + (f'{prefix}/{CURRENT_VERSION}/nodes/{FAKE_NODE_ID}/update?version=1', 'POST'): post_fake_update_node, - ('{prefix}/{CURRENT_VERSION}/swarm/join'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION), 'POST'): + (f'{prefix}/{CURRENT_VERSION}/swarm/join', 'POST'): post_fake_join_swarm, - ('{prefix}/{CURRENT_VERSION}/networks'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION), 'GET'): + (f'{prefix}/{CURRENT_VERSION}/networks', 'GET'): get_fake_network_list, - ('{prefix}/{CURRENT_VERSION}/networks/create'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION), 'POST'): + (f'{prefix}/{CURRENT_VERSION}/networks/create', 'POST'): post_fake_network, - ('{1}/{0}/networks/{2}'.format( - CURRENT_VERSION, prefix, FAKE_NETWORK_ID - ), 'GET'): + (f'{prefix}/{CURRENT_VERSION}/networks/{FAKE_NETWORK_ID}', 'GET'): get_fake_network, - ('{1}/{0}/networks/{2}'.format( - CURRENT_VERSION, prefix, FAKE_NETWORK_ID - ), 'DELETE'): + (f'{prefix}/{CURRENT_VERSION}/networks/{FAKE_NETWORK_ID}', 'DELETE'): delete_fake_network, - ('{1}/{0}/networks/{2}/connect'.format( - CURRENT_VERSION, prefix, FAKE_NETWORK_ID - ), 'POST'): + (f'{prefix}/{CURRENT_VERSION}/networks/{FAKE_NETWORK_ID}/connect', 'POST'): post_fake_network_connect, - ('{1}/{0}/networks/{2}/disconnect'.format( - CURRENT_VERSION, prefix, FAKE_NETWORK_ID - ), 'POST'): + (f'{prefix}/{CURRENT_VERSION}/networks/{FAKE_NETWORK_ID}/disconnect', 'POST'): post_fake_network_disconnect, - '{prefix}/{CURRENT_VERSION}/secrets/create'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION): + f'{prefix}/{CURRENT_VERSION}/secrets/create': post_fake_secret, } diff --git a/tests/unit/plugins/module_utils/_api/test_auth.py b/tests/unit/plugins/module_utils/_api/test_auth.py index ce3a2c70..4fed09d7 100644 --- a/tests/unit/plugins/module_utils/_api/test_auth.py +++ b/tests/unit/plugins/module_utils/_api/test_auth.py @@ -252,7 +252,7 @@ class LoadConfigTest(unittest.TestCase): cfg_path = os.path.join(folder, '.dockercfg') auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii') with open(cfg_path, 'w') as f: - f.write('auth = {auth}\n'.format(auth=auth_)) + f.write(f'auth = {auth_}\n') f.write('email = sakuya@scarlet.net') cfg = auth.load_config(cfg_path) @@ -309,14 +309,12 @@ class LoadConfigTest(unittest.TestCase): folder = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, folder) - dockercfg_path = os.path.join(folder, - '.{0}.dockercfg'.format( - random.randrange(100000))) + dockercfg_path = os.path.join(folder, f'.{random.randrange(100000)}.dockercfg') registry = 'https://your.private.registry.io' auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii') config = { registry: { - 'auth': '{auth}'.format(auth=auth_), + 'auth': f'{auth_}', 'email': 'sakuya@scarlet.net' } } @@ -342,7 +340,7 @@ class LoadConfigTest(unittest.TestCase): auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii') config = { registry: { - 'auth': '{auth}'.format(auth=auth_), + 'auth': f'{auth_}', 'email': 'sakuya@scarlet.net' } } @@ -370,7 +368,7 @@ class LoadConfigTest(unittest.TestCase): config = { 'auths': { registry: { - 'auth': '{auth}'.format(auth=auth_), + 'auth': f'{auth_}', 'email': 'sakuya@scarlet.net' } } @@ -399,7 +397,7 @@ class LoadConfigTest(unittest.TestCase): config = { 'auths': { registry: { - 'auth': '{auth}'.format(auth=auth_), + 'auth': f'{auth_}', 'email': 'sakuya@scarlet.net' } } diff --git a/tests/unit/plugins/module_utils/_api/utils/test_build.py b/tests/unit/plugins/module_utils/_api/utils/test_build.py index 0686e4c2..a7fd74d6 100644 --- a/tests/unit/plugins/module_utils/_api/utils/test_build.py +++ b/tests/unit/plugins/module_utils/_api/utils/test_build.py @@ -431,9 +431,7 @@ class TarTest(unittest.TestCase): with pytest.raises(IOError) as ei: tar(base) - assert 'Can not read file in context: {full_path}'.format(full_path=full_path) in ( - ei.exconly() - ) + assert f'Can not read file in context: {full_path}' in ei.exconly() @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='No symlinks on Windows') def test_tar_with_file_symlinks(self): diff --git a/tests/unit/plugins/module_utils/_api/utils/test_json_stream.py b/tests/unit/plugins/module_utils/_api/utils/test_json_stream.py index c578ffaf..d6aae484 100644 --- a/tests/unit/plugins/module_utils/_api/utils/test_json_stream.py +++ b/tests/unit/plugins/module_utils/_api/utils/test_json_stream.py @@ -33,12 +33,12 @@ class TestStreamAsText: def test_stream_with_non_utf_unicode_character(self): stream = [b'\xed\xf3\xf3'] output, = stream_as_text(stream) - assert output == u'���' + assert output == '���' def test_stream_with_utf_character(self): - stream = [u'ěĝ'.encode('utf-8')] + stream = ['ěĝ'.encode('utf-8')] output, = stream_as_text(stream) - assert output == u'ěĝ' + assert output == 'ěĝ' class TestJsonStream: diff --git a/tests/unit/plugins/module_utils/_api/utils/test_proxy.py b/tests/unit/plugins/module_utils/_api/utils/test_proxy.py index 5104c870..b25674c8 100644 --- a/tests/unit/plugins/module_utils/_api/utils/test_proxy.py +++ b/tests/unit/plugins/module_utils/_api/utils/test_proxy.py @@ -75,7 +75,7 @@ class ProxyConfigTest(unittest.TestCase): # Proxy config is non null, env is None. self.assertSetEqual( set(CONFIG.inject_proxy_environment(None)), - set('{k}={v}'.format(k=k, v=v) for k, v in ENV.items())) + set(f'{k}={v}' for k, v in ENV.items())) # Proxy config is null, env is None. self.assertIsNone(ProxyConfig().inject_proxy_environment(None), None) @@ -84,7 +84,7 @@ class ProxyConfigTest(unittest.TestCase): # Proxy config is non null, env is non null actual = CONFIG.inject_proxy_environment(env) - expected = ['{k}={v}'.format(k=k, v=v) for k, v in ENV.items()] + env + expected = [f'{k}={v}' for k, v in ENV.items()] + env # It's important that the first 8 variables are the ones from the proxy # config, and the last 2 are the ones from the input environment self.assertSetEqual(set(actual[:8]), set(expected[:8])) diff --git a/tests/unit/plugins/module_utils/_api/utils/test_utils.py b/tests/unit/plugins/module_utils/_api/utils/test_utils.py index a556a128..905bf52c 100644 --- a/tests/unit/plugins/module_utils/_api/utils/test_utils.py +++ b/tests/unit/plugins/module_utils/_api/utils/test_utils.py @@ -176,23 +176,23 @@ class ConverVolumeBindsTest(unittest.TestCase): assert convert_volume_binds(data) == ['/mnt/vol1:/data:rw'] def test_convert_volume_binds_unicode_bytes_input(self): - expected = [u'/mnt/지연:/unicode/박:rw'] + expected = ['/mnt/지연:/unicode/박:rw'] data = { - u'/mnt/지연'.encode('utf-8'): { - 'bind': u'/unicode/박'.encode('utf-8'), - 'mode': u'rw' + '/mnt/지연'.encode('utf-8'): { + 'bind': '/unicode/박'.encode('utf-8'), + 'mode': 'rw' } } assert convert_volume_binds(data) == expected def test_convert_volume_binds_unicode_unicode_input(self): - expected = [u'/mnt/지연:/unicode/박:rw'] + expected = ['/mnt/지연:/unicode/박:rw'] data = { - u'/mnt/지연': { - 'bind': u'/unicode/박', - 'mode': u'rw' + '/mnt/지연': { + 'bind': '/unicode/박', + 'mode': 'rw' } } assert convert_volume_binds(data) == expected @@ -288,7 +288,7 @@ class ParseHostTest(unittest.TestCase): } for host in invalid_hosts: - msg = 'Should have failed to parse invalid host: {0}'.format(host) + msg = f'Should have failed to parse invalid host: {host}' with self.assertRaises(DockerException, msg=msg): parse_host(host, None) @@ -296,7 +296,7 @@ class ParseHostTest(unittest.TestCase): self.assertEqual( parse_host(host, None), expected, - msg='Failed to parse valid host: {0}'.format(host), + msg=f'Failed to parse valid host: {host}', ) def test_parse_host_empty_value(self): @@ -347,14 +347,14 @@ class ParseRepositoryTagTest(unittest.TestCase): ) def test_index_image_sha(self): - assert parse_repository_tag("root@sha256:{sha}".format(sha=self.sha)) == ( - "root", "sha256:{sha}".format(sha=self.sha) + assert parse_repository_tag(f"root@sha256:{self.sha}") == ( + "root", f"sha256:{self.sha}" ) def test_private_reg_image_sha(self): assert parse_repository_tag( - "url:5000/repo@sha256:{sha}".format(sha=self.sha) - ) == ("url:5000/repo", "sha256:{sha}".format(sha=self.sha)) + f"url:5000/repo@sha256:{self.sha}" + ) == ("url:5000/repo", f"sha256:{self.sha}") class ParseDeviceTest(unittest.TestCase): @@ -457,7 +457,7 @@ class UtilsTest(unittest.TestCase): class SplitCommandTest(unittest.TestCase): def test_split_command_with_unicode(self): - assert split_command(u'echo μμ') == ['echo', 'μμ'] + assert split_command('echo μμ') == ['echo', 'μμ'] class FormatEnvironmentTest(unittest.TestCase): @@ -465,7 +465,7 @@ class FormatEnvironmentTest(unittest.TestCase): env_dict = { 'ARTIST_NAME': b'\xec\x86\xa1\xec\xa7\x80\xec\x9d\x80' } - assert format_environment(env_dict) == [u'ARTIST_NAME=송지은'] + assert format_environment(env_dict) == ['ARTIST_NAME=송지은'] def test_format_env_no_value(self): env_dict = { diff --git a/tests/unit/plugins/module_utils/test__scramble.py b/tests/unit/plugins/module_utils/test__scramble.py index ff004306..f4a31a8c 100644 --- a/tests/unit/plugins/module_utils/test__scramble.py +++ b/tests/unit/plugins/module_utils/test__scramble.py @@ -14,15 +14,15 @@ from ansible_collections.community.docker.plugins.module_utils._scramble import @pytest.mark.parametrize('plaintext, key, scrambled', [ - (u'', b'0', '=S='), - (u'hello', b'\x00', '=S=aGVsbG8='), - (u'hello', b'\x01', '=S=aWRtbW4='), + ('', b'0', '=S='), + ('hello', b'\x00', '=S=aGVsbG8='), + ('hello', b'\x01', '=S=aWRtbW4='), ]) def test_scramble_unscramble(plaintext, key, scrambled): scrambled_ = scramble(plaintext, key) - print('{0!r} == {1!r}'.format(scrambled_, scrambled)) + print(f'{scrambled_!r} == {scrambled!r}') assert scrambled_ == scrambled plaintext_ = unscramble(scrambled, key) - print('{0!r} == {1!r}'.format(plaintext_, plaintext)) + print(f'{plaintext_!r} == {plaintext!r}') assert plaintext_ == plaintext diff --git a/tests/unit/plugins/modules/test_docker_container_copy_into.py b/tests/unit/plugins/modules/test_docker_container_copy_into.py index dfbf5983..4a8292fd 100644 --- a/tests/unit/plugins/modules/test_docker_container_copy_into.py +++ b/tests/unit/plugins/modules/test_docker_container_copy_into.py @@ -39,7 +39,7 @@ def test_parse_string(input, expected): ]) def test_parse_int(input): assert parse_modern(input) == input - with pytest.raises(TypeError, match="^must be an octal string, got {value}L?$".format(value=input)): + with pytest.raises(TypeError, match=f"^must be an octal string, got {input}L?$"): parse_octal_string_only(input) diff --git a/tests/unit/plugins/modules/test_docker_image.py b/tests/unit/plugins/modules/test_docker_image.py index b650aaa8..b408420a 100644 --- a/tests/unit/plugins/modules/test_docker_image.py +++ b/tests/unit/plugins/modules/test_docker_image.py @@ -18,7 +18,7 @@ from ..test_support.docker_image_archive_stubbing import ( def assert_no_logging(msg): - raise AssertionError('Should not have logged anything but logged %s' % msg) + raise AssertionError(f'Should not have logged anything but logged {msg}') def capture_logging(messages): @@ -41,7 +41,7 @@ def test_archived_image_action_when_missing(tar_file_name): fake_name = 'a:latest' fake_id = 'a1' - expected = 'Archived image %s to %s, since none present' % (fake_name, tar_file_name) + expected = f'Archived image {fake_name} to {tar_file_name}, since none present' actual = ImageManager.archived_image_action(assert_no_logging, tar_file_name, fake_name, api_image_id(fake_id)) @@ -65,7 +65,7 @@ def test_archived_image_action_when_invalid(tar_file_name): write_irrelevant_tar(tar_file_name) - expected = 'Archived image %s to %s, overwriting an unreadable archive file' % (fake_name, tar_file_name) + expected = f'Archived image {fake_name} to {tar_file_name}, overwriting an unreadable archive file' actual_log = [] actual = ImageManager.archived_image_action( @@ -88,9 +88,7 @@ def test_archived_image_action_when_obsolete_by_id(tar_file_name): write_imitation_archive(tar_file_name, old_id, [fake_name]) - expected = 'Archived image %s to %s, overwriting archive with image %s named %s' % ( - fake_name, tar_file_name, old_id, fake_name - ) + expected = f'Archived image {fake_name} to {tar_file_name}, overwriting archive with image {old_id} named {fake_name}' actual = ImageManager.archived_image_action(assert_no_logging, tar_file_name, fake_name, api_image_id(new_id)) assert actual == expected @@ -103,11 +101,9 @@ def test_archived_image_action_when_obsolete_by_name(tar_file_name): write_imitation_archive(tar_file_name, fake_id, [old_name]) - expected = 'Archived image %s to %s, overwriting archive with image %s named %s' % ( - new_name, tar_file_name, fake_id, old_name - ) + expected = f'Archived image {new_name} to {tar_file_name}, overwriting archive with image {fake_id} named {old_name}' actual = ImageManager.archived_image_action(assert_no_logging, tar_file_name, new_name, api_image_id(fake_id)) - print('actual : %s', actual) - print('expected : %s', expected) + print(f'actual : {actual}') + print(f'expected : {expected}') assert actual == expected diff --git a/tests/unit/plugins/modules/test_docker_network.py b/tests/unit/plugins/modules/test_docker_network.py index a937c6db..62da8313 100644 --- a/tests/unit/plugins/modules/test_docker_network.py +++ b/tests/unit/plugins/modules/test_docker_network.py @@ -32,4 +32,4 @@ def test_validate_cidr_positives(cidr, expected): def test_validate_cidr_negatives(cidr): with pytest.raises(ValueError) as e: validate_cidr(cidr) - assert '"{0}" is not a valid CIDR'.format(cidr) == str(e.value) + assert f'"{cidr}" is not a valid CIDR' == str(e.value) diff --git a/tests/unit/plugins/modules/test_docker_swarm_service.py b/tests/unit/plugins/modules/test_docker_swarm_service.py index 1cef623b..9f2b27f4 100644 --- a/tests/unit/plugins/modules/test_docker_swarm_service.py +++ b/tests/unit/plugins/modules/test_docker_swarm_service.py @@ -75,7 +75,7 @@ def test_get_docker_environment(mocker, docker_swarm_service): mocker.patch.object( docker_swarm_service, 'format_environment', - side_effect=lambda d: ['{0}={1}'.format(key, value) for key, value in d.items()], + side_effect=lambda d: [f'{key}={value}' for key, value in d.items()], ) # Test with env dict and file result = docker_swarm_service.get_docker_environment( @@ -207,7 +207,7 @@ def test_has_list_changed(docker_swarm_service): ) assert docker_swarm_service.has_list_changed( ['sleep', '3400'], - [u'sleep', u'3600'], + ['sleep', '3600'], sort_lists=False ) diff --git a/tests/unit/plugins/plugin_utils/test_unsafe.py b/tests/unit/plugins/plugin_utils/test_unsafe.py index c13237c8..d9021ece 100644 --- a/tests/unit/plugins/plugin_utils/test_unsafe.py +++ b/tests/unit/plugins/plugin_utils/test_unsafe.py @@ -22,14 +22,14 @@ from ansible_collections.community.docker.plugins.plugin_utils.unsafe import ( TEST_MAKE_UNSAFE = [ ( - _make_trusted(u'text'), + _make_trusted('text'), [], [ (), ], ), ( - _make_trusted(u'{{text}}'), + _make_trusted('{{text}}'), [ (), ], @@ -117,7 +117,7 @@ def test_make_unsafe_idempotence(): def test_make_unsafe_dict_key(): value = { - _make_trusted(u'test'): 2, + _make_trusted('test'): 2, } if not SUPPORTS_DATA_TAGGING: value[_make_trusted(b"test")] = 1 @@ -127,7 +127,7 @@ def test_make_unsafe_dict_key(): assert _is_trusted(obj) value = { - _make_trusted(u'{{test}}'): 2, + _make_trusted('{{test}}'): 2, } if not SUPPORTS_DATA_TAGGING: value[_make_trusted(b"{{test}}")] = 1 @@ -138,7 +138,7 @@ def test_make_unsafe_dict_key(): def test_make_unsafe_set(): - value = set([_make_trusted(u'test')]) + value = set([_make_trusted('test')]) if not SUPPORTS_DATA_TAGGING: value.add(_make_trusted(b"test")) unsafe_value = make_unsafe(value) @@ -146,7 +146,7 @@ def test_make_unsafe_set(): for obj in unsafe_value: assert _is_trusted(obj) - value = set([_make_trusted(u'{{test}}')]) + value = set([_make_trusted('{{test}}')]) if not SUPPORTS_DATA_TAGGING: value.add(_make_trusted(b"{{test}}")) unsafe_value = make_unsafe(value) diff --git a/tests/unit/plugins/test_support/docker_image_archive_stubbing.py b/tests/unit/plugins/test_support/docker_image_archive_stubbing.py index 3a45d0f8..cbfee043 100644 --- a/tests/unit/plugins/test_support/docker_image_archive_stubbing.py +++ b/tests/unit/plugins/test_support/docker_image_archive_stubbing.py @@ -28,7 +28,7 @@ def write_imitation_archive(file_name, image_id, repo_tags): manifest = [ { - 'Config': '%s.json' % image_id, + 'Config': f'{image_id}.json', 'RepoTags': repo_tags } ]