diff --git a/.pylintrc b/.pylintrc index 3a1f0480..2a1fc6c8 100644 --- a/.pylintrc +++ b/.pylintrc @@ -388,18 +388,11 @@ disable=raw-checker-failed, no-name-in-module, # TODO figure out why pylint cannot find the module not-an-iterable, # TODO: needs better typing info protected-access, - raise-missing-from, redefined-outer-name, # needed for test fixtures - simplifiable-if-expression, subprocess-popen-preexec-fn, unexpected-keyword-arg, - unnecessary-dunder-call, - unnecessary-pass, unsupported-assignment-operation, # TODO: needs better typing info unused-argument, - unused-variable, - use-dict-literal, - use-list-literal, # Cannot remove yet due to inadequacy of rules inconsistent-return-statements, # doesn't notice that fail_json() does not return diff --git a/plugins/connection/docker.py b/plugins/connection/docker.py index 7d1aba90..1f264e6c 100644 --- a/plugins/connection/docker.py +++ b/plugins/connection/docker.py @@ -167,8 +167,8 @@ class Connection(ConnectionBase): else: try: self.docker_cmd = get_bin_path("docker") - except ValueError: - raise AnsibleError("docker command not found in PATH") + except ValueError as exc: + raise AnsibleError("docker command not found in PATH") from exc @staticmethod def _sanitize_version(version): @@ -426,7 +426,7 @@ class Connection(ConnectionBase): ) chunks = b"" - for key, event in events: + for key, dummy_event in events: if key.fileobj == p.stdout: chunk = p.stdout.read() if chunk: @@ -523,10 +523,10 @@ class Connection(ConnectionBase): p = subprocess.Popen( args, stdin=in_file, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) - except OSError: + except OSError as exc: raise AnsibleError( "docker connection requires dd command in the container to put files" - ) + ) from exc stdout, stderr = p.communicate() if p.returncode != 0: @@ -588,10 +588,10 @@ class Connection(ConnectionBase): stdout=out_file, stderr=subprocess.PIPE, ) - except OSError: + except OSError as exc: raise AnsibleError( "docker connection requires dd command in the container to put files" - ) + ) from exc stdout, stderr = pp.communicate() if pp.returncode != 0: diff --git a/plugins/connection/docker_api.py b/plugins/connection/docker_api.py index cd9da076..f871549e 100644 --- a/plugins/connection/docker_api.py +++ b/plugins/connection/docker_api.py @@ -158,15 +158,15 @@ class Connection(ConnectionBase): if not_found_can_be_resource: raise AnsibleConnectionFailure( f'Could not find container "{remote_addr}" or resource in it ({e})' - ) + ) from e raise AnsibleConnectionFailure( f'Could not find container "{remote_addr}" ({e})' - ) + ) from e except APIError as e: if e.response is not None and e.response.status_code == 409: raise AnsibleConnectionFailure( f'The container "{remote_addr}" has been paused ({e})' - ) + ) from e self.client.fail( f'An unexpected Docker error occurred for container "{remote_addr}": {e}' ) @@ -183,7 +183,7 @@ class Connection(ConnectionBase): super().__init__(play_context, new_stdin, *args, **kwargs) self.client = None - self.ids = dict() + self.ids = {} # Windows uses Powershell modules if getattr(self._shell, "_IS_WINDOWS", False): @@ -239,7 +239,7 @@ class Connection(ConnectionBase): host=self.get_option("remote_addr"), ) - need_stdin = True if (in_data is not None) or do_become else False + need_stdin = bool((in_data is not None) or do_become) data = { "Container": self.get_option("remote_addr"), @@ -393,7 +393,7 @@ class Connection(ConnectionBase): except Exception as e: raise AnsibleConnectionFailure( f'Error while determining user and group ID of current user in container "{remote_addr}": {e}\nGot value: {ids!r}' - ) + ) from e user_id, group_id = self.ids[self.actual_user] try: @@ -411,9 +411,9 @@ class Connection(ConnectionBase): not_found_can_be_resource=True, ) except DockerFileNotFound as exc: - raise AnsibleFileNotFound(to_native(exc)) + raise AnsibleFileNotFound(to_native(exc)) from exc except DockerFileCopyError as exc: - raise AnsibleConnectionFailure(to_native(exc)) + raise AnsibleConnectionFailure(to_native(exc)) from exc def fetch_file(self, in_path, out_path): """Fetch a file from container to local.""" @@ -439,9 +439,9 @@ class Connection(ConnectionBase): not_found_can_be_resource=True, ) except DockerFileNotFound as exc: - raise AnsibleFileNotFound(to_native(exc)) + raise AnsibleFileNotFound(to_native(exc)) from exc except DockerFileCopyError as exc: - raise AnsibleConnectionFailure(to_native(exc)) + raise AnsibleConnectionFailure(to_native(exc)) from exc def close(self): """Terminate the connection. Nothing to do for Docker""" diff --git a/plugins/connection/nsenter.py b/plugins/connection/nsenter.py index bdf321f6..695ccdc9 100644 --- a/plugins/connection/nsenter.py +++ b/plugins/connection/nsenter.py @@ -178,7 +178,7 @@ class Connection(ConnectionBase): ) chunks = b"" - for key, event in events: + for key, dummy_event in events: if key.fileobj == p.stdout: chunk = p.stdout.read() if chunk: @@ -244,11 +244,13 @@ class Connection(ConnectionBase): try: with open(to_bytes(in_path, errors="surrogate_or_strict"), "rb") as in_file: in_data = in_file.read() - rc, out, err = self.exec_command(cmd=["tee", out_path], in_data=in_data) + rc, dummy_out, err = self.exec_command( + cmd=["tee", out_path], in_data=in_data + ) if rc != 0: raise AnsibleError(f"failed to transfer file to {out_path}: {err}") except IOError as e: - raise AnsibleError(f"failed to transfer file to {out_path}: {e}") + raise AnsibleError(f"failed to transfer file to {out_path}: {e}") from e def fetch_file(self, in_path, out_path): super().fetch_file(in_path, out_path) @@ -268,7 +270,9 @@ class Connection(ConnectionBase): ) as out_file: out_file.write(out) except IOError as e: - raise AnsibleError(f"failed to transfer file to {to_native(out_path)}: {e}") + raise AnsibleError( + f"failed to transfer file to {to_native(out_path)}: {e}" + ) from e def close(self): """terminate the connection; nothing to do here""" diff --git a/plugins/inventory/docker_containers.py b/plugins/inventory/docker_containers.py index 1aead1de..b5e2979c 100644 --- a/plugins/inventory/docker_containers.py +++ b/plugins/inventory/docker_containers.py @@ -228,7 +228,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): } containers = client.get_json("/containers/json", params=params) except APIError as exc: - raise AnsibleError(f"Error listing containers: {exc}") + raise AnsibleError(f"Error listing containers: {exc}") from exc if add_legacy_groups: self.inventory.add_group("running") @@ -247,26 +247,28 @@ class InventoryModule(BaseInventoryPlugin, Constructable): short_container_id = container_id[:13] try: - name = container.get("Names", list())[0].lstrip("/") + name = container.get("Names", [])[0].lstrip("/") full_name = name except IndexError: name = short_container_id full_name = container_id - facts = dict( - docker_name=make_unsafe(name), - docker_short_id=make_unsafe(short_container_id), - ) - full_facts = dict() + facts = { + "docker_name": make_unsafe(name), + "docker_short_id": make_unsafe(short_container_id), + } + full_facts = {} try: inspect = client.get_json("/containers/{0}/json", container_id) except APIError as exc: - raise AnsibleError(f"Error inspecting container {name} - {exc}") + raise AnsibleError( + f"Error inspecting container {name} - {exc}" + ) from exc - state = inspect.get("State") or dict() - config = inspect.get("Config") or dict() - labels = config.get("Labels") or dict() + state = inspect.get("State") or {} + config = inspect.get("Config") or {} + labels = config.get("Labels") or {} running = state.get("Running") @@ -298,7 +300,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): port_settings = network_settings.get("Ports") or {} port = port_settings.get(f"{ssh_port}/tcp")[0] except (IndexError, AttributeError, TypeError): - port = dict() + port = {} try: ip = default_ip if port["HostIp"] == "0.0.0.0" else port["HostIp"] @@ -306,23 +308,23 @@ class InventoryModule(BaseInventoryPlugin, Constructable): ip = "" facts.update( - dict( - ansible_ssh_host=ip, - ansible_ssh_port=port.get("HostPort", 0), - ) + { + "ansible_ssh_host": ip, + "ansible_ssh_port": port.get("HostPort", 0), + } ) elif connection_type == "docker-cli": facts.update( - dict( - ansible_host=full_name, - ) + { + "ansible_host": full_name, + } ) ansible_connection = "community.docker.docker" elif connection_type == "docker-api": facts.update( - dict( - ansible_host=full_name, - ) + { + "ansible_host": full_name, + } ) facts.update(extra_facts) ansible_connection = "community.docker.docker_api" @@ -401,8 +403,8 @@ class InventoryModule(BaseInventoryPlugin, Constructable): try: self._populate(client) except DockerException as e: - raise AnsibleError(f"An unexpected Docker error occurred: {e}") + raise AnsibleError(f"An unexpected Docker error occurred: {e}") from e except RequestException as e: raise AnsibleError( f"An unexpected requests error occurred when trying to talk to the Docker daemon: {e}" - ) + ) from e diff --git a/plugins/inventory/docker_machine.py b/plugins/inventory/docker_machine.py index d4eae1dd..87f44101 100644 --- a/plugins/inventory/docker_machine.py +++ b/plugins/inventory/docker_machine.py @@ -132,7 +132,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): try: self.docker_machine_path = get_bin_path("docker-machine") except ValueError as e: - raise AnsibleError(to_native(e)) + raise AnsibleError(to_native(e)) from e command = [self.docker_machine_path] command.extend(args) diff --git a/plugins/inventory/docker_swarm.py b/plugins/inventory/docker_swarm.py index 46faf6e0..a47ed9b1 100644 --- a/plugins/inventory/docker_swarm.py +++ b/plugins/inventory/docker_swarm.py @@ -184,19 +184,19 @@ class InventoryModule(BaseInventoryPlugin, Constructable): raise AnsibleError(msg) def _populate(self): - raw_params = dict( - docker_host=self.get_option("docker_host"), - tls=self.get_option("tls"), - tls_verify=self.get_option("validate_certs"), - key_path=self.get_option("client_key"), - cacert_path=self.get_option("ca_path"), - cert_path=self.get_option("client_cert"), - tls_hostname=self.get_option("tls_hostname"), - api_version=self.get_option("api_version"), - timeout=self.get_option("timeout"), - use_ssh_client=self.get_option("use_ssh_client"), - debug=None, - ) + raw_params = { + "docker_host": self.get_option("docker_host"), + "tls": self.get_option("tls"), + "tls_verify": self.get_option("validate_certs"), + "key_path": self.get_option("client_key"), + "cacert_path": self.get_option("ca_path"), + "cert_path": self.get_option("client_cert"), + "tls_hostname": self.get_option("tls_hostname"), + "api_version": self.get_option("api_version"), + "timeout": self.get_option("timeout"), + "use_ssh_client": self.get_option("use_ssh_client"), + "debug": None, + } update_tls_hostname(raw_params) connect_params = get_connect_params(raw_params, fail_function=self._fail) client = docker.DockerClient(**connect_params) @@ -305,7 +305,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): except Exception as e: raise AnsibleError( f"Unable to fetch hosts from Docker swarm API, this was the original exception: {e}" - ) + ) from e def verify_file(self, path): """Return the possibly of a file being consumable by this plugin.""" diff --git a/plugins/module_utils/_api/api/client.py b/plugins/module_utils/_api/api/client.py index 3c7b765e..af5d1db0 100644 --- a/plugins/module_utils/_api/api/client.py +++ b/plugins/module_utils/_api/api/client.py @@ -227,18 +227,20 @@ class APIClient(_Session, DaemonApiMixin): try: version_result = self.version(api_version=False) except Exception as e: - raise DockerException(f"Error while fetching server API version: {e}") + raise DockerException( + f"Error while fetching server API version: {e}" + ) from e try: return version_result["ApiVersion"] except KeyError: raise DockerException( 'Invalid response from docker daemon: key "ApiVersion" is missing.' - ) + ) from None except Exception as e: raise DockerException( f"Error while fetching server API version: {e}. Response seems to be broken." - ) + ) from e def _set_request_timeout(self, kwargs): """Prepare the kwargs for an HTTP request by inserting the timeout diff --git a/plugins/module_utils/_api/auth.py b/plugins/module_utils/_api/auth.py index e6d44085..317e6c77 100644 --- a/plugins/module_utils/_api/auth.py +++ b/plugins/module_utils/_api/auth.py @@ -370,7 +370,6 @@ def _load_legacy_config(config_file): } except Exception as e: # pylint: disable=broad-exception-caught log.debug(e) - pass log.debug("All parsing attempts failed - returning empty config") return {} diff --git a/plugins/module_utils/_api/context/context.py b/plugins/module_utils/_api/context/context.py index cc27885a..b8a43fb0 100644 --- a/plugins/module_utils/_api/context/context.py +++ b/plugins/module_utils/_api/context/context.py @@ -106,7 +106,7 @@ class Context: self.tls_cfg[name] = tls_cfg def inspect(self): - return self.__call__() + return self() @classmethod def load_context(cls, name): diff --git a/plugins/module_utils/_api/tls.py b/plugins/module_utils/_api/tls.py index 38ce5ba0..1b81c193 100644 --- a/plugins/module_utils/_api/tls.py +++ b/plugins/module_utils/_api/tls.py @@ -71,7 +71,7 @@ class TLSConfig: except ValueError: raise errors.TLSParameterError( "client_cert must be a tuple of (client certificate, key file)" - ) + ) from None if not (tls_cert and tls_key) or ( not os.path.isfile(tls_cert) or not os.path.isfile(tls_key) diff --git a/plugins/module_utils/_api/transport/npipeconn.py b/plugins/module_utils/_api/transport/npipeconn.py index 2c056d44..3f618b38 100644 --- a/plugins/module_utils/_api/transport/npipeconn.py +++ b/plugins/module_utils/_api/transport/npipeconn.py @@ -52,16 +52,16 @@ class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool): try: conn = self.pool.get(block=self.block, timeout=timeout) - except AttributeError: # self.pool is None - raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.") + except AttributeError as exc: # self.pool is None + raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.") from exc - except Empty: + except Empty as exc: if self.block: raise urllib3.exceptions.EmptyPoolError( self, "Pool reached maximum size and no more connections are allowed.", - ) - pass # Oh well, we'll create a new connection then + ) from exc + # Oh well, we'll create a new connection then return conn or self._new_conn() diff --git a/plugins/module_utils/_api/transport/npipesocket.py b/plugins/module_utils/_api/transport/npipesocket.py index f178c8fd..a6c649bc 100644 --- a/plugins/module_utils/_api/transport/npipesocket.py +++ b/plugins/module_utils/_api/transport/npipesocket.py @@ -141,7 +141,7 @@ class NpipeSocket: @check_closed def recv(self, bufsize, flags=0): - err, data = win32file.ReadFile(self._handle, bufsize) + dummy_err, data = win32file.ReadFile(self._handle, bufsize) return data @check_closed @@ -163,7 +163,7 @@ class NpipeSocket: try: overlapped = pywintypes.OVERLAPPED() overlapped.hEvent = event - err, data = win32file.ReadFile( + dummy_err, dummy_data = win32file.ReadFile( self._handle, readbuf[:nbytes] if nbytes else readbuf, overlapped ) wait_result = win32event.WaitForSingleObject(event, self._timeout) diff --git a/plugins/module_utils/_api/transport/sshconn.py b/plugins/module_utils/_api/transport/sshconn.py index b888b9c9..ba0a3909 100644 --- a/plugins/module_utils/_api/transport/sshconn.py +++ b/plugins/module_utils/_api/transport/sshconn.py @@ -159,16 +159,16 @@ class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool): try: conn = self.pool.get(block=self.block, timeout=timeout) - except AttributeError: # self.pool is None - raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.") + except AttributeError as exc: # self.pool is None + raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.") from exc - except Empty: + except Empty as exc: if self.block: raise urllib3.exceptions.EmptyPoolError( self, "Pool reached maximum size and no more connections are allowed.", - ) - pass # Oh well, we'll create a new connection then + ) from exc + # Oh well, we'll create a new connection then return conn or self._new_conn() diff --git a/plugins/module_utils/_api/types/daemon.py b/plugins/module_utils/_api/types/daemon.py index c22ad9b5..6defe9b2 100644 --- a/plugins/module_utils/_api/types/daemon.py +++ b/plugins/module_utils/_api/types/daemon.py @@ -39,10 +39,10 @@ class CancellableStream: def __next__(self): try: return next(self._stream) - except urllib3.exceptions.ProtocolError: - raise StopIteration - except socket.error: - raise StopIteration + except urllib3.exceptions.ProtocolError as exc: + raise StopIteration from exc + except socket.error as exc: + raise StopIteration from exc next = __next__ diff --git a/plugins/module_utils/_api/utils/build.py b/plugins/module_utils/_api/utils/build.py index 5a06772b..d15774be 100644 --- a/plugins/module_utils/_api/utils/build.py +++ b/plugins/module_utils/_api/utils/build.py @@ -107,8 +107,8 @@ def create_archive(root, files=None, fileobj=None, gzip=False, extra_files=None) try: with open(full_path, "rb") as f: t.addfile(i, f) - except IOError: - raise IOError(f"Can not read file in context: {full_path}") + except IOError as exc: + raise IOError(f"Can not read file in context: {full_path}") from exc else: # Directories, FIFOs, symlinks... do not need to be read. t.addfile(i, None) diff --git a/plugins/module_utils/_api/utils/json_stream.py b/plugins/module_utils/_api/utils/json_stream.py index 164c5c9e..dac3d0ca 100644 --- a/plugins/module_utils/_api/utils/json_stream.py +++ b/plugins/module_utils/_api/utils/json_stream.py @@ -85,4 +85,4 @@ def split_buffer(stream, splitter=None, decoder=lambda a: a): try: yield decoder(buffered) except Exception as e: - raise StreamParseError(e) + raise StreamParseError(e) from e diff --git a/plugins/module_utils/_api/utils/utils.py b/plugins/module_utils/_api/utils/utils.py index e54f6eef..31c9b39f 100644 --- a/plugins/module_utils/_api/utils/utils.py +++ b/plugins/module_utils/_api/utils/utils.py @@ -420,10 +420,10 @@ def parse_bytes(s): if suffix in units or suffix.isdigit(): try: digits = float(digits_part) - except ValueError: + except ValueError as exc: raise errors.DockerException( f"Failed converting the string value for memory ({digits_part}) to an integer." - ) + ) from exc # Reconvert to long for the final result s = int(digits * units[suffix]) diff --git a/plugins/module_utils/_common.py b/plugins/module_utils/_common.py index a3e96fd0..6e9c2800 100644 --- a/plugins/module_utils/_common.py +++ b/plugins/module_utils/_common.py @@ -144,19 +144,19 @@ def get_connect_params(auth_data, fail_function): "tcp://", "https://" ) - result = dict( - base_url=auth_data["docker_host"], - version=auth_data["api_version"], - timeout=auth_data["timeout"], - ) + result = { + "base_url": auth_data["docker_host"], + "version": auth_data["api_version"], + "timeout": auth_data["timeout"], + } if auth_data["tls_verify"]: # TLS with verification - tls_config = dict( - verify=True, - assert_hostname=auth_data["tls_hostname"], - fail_function=fail_function, - ) + tls_config = { + "verify": True, + "assert_hostname": auth_data["tls_hostname"], + "fail_function": fail_function, + } if auth_data["cert_path"] and auth_data["key_path"]: tls_config["client_cert"] = (auth_data["cert_path"], auth_data["key_path"]) if auth_data["cacert_path"]: @@ -164,10 +164,10 @@ def get_connect_params(auth_data, fail_function): result["tls"] = _get_tls_config(**tls_config) elif auth_data["tls"]: # TLS without verification - tls_config = dict( - verify=False, - fail_function=fail_function, - ) + tls_config = { + "verify": False, + "fail_function": fail_function, + } if auth_data["cert_path"] and auth_data["key_path"]: tls_config["client_cert"] = (auth_data["cert_path"], auth_data["key_path"]) result["tls"] = _get_tls_config(**tls_config) @@ -312,78 +312,78 @@ class AnsibleDockerClientBase(Client): client_params = self._get_params() - params = dict() + params = {} for key in DOCKER_COMMON_ARGS: params[key] = client_params.get(key) - result = dict( - docker_host=self._get_value( + result = { + "docker_host": self._get_value( "docker_host", params["docker_host"], "DOCKER_HOST", DEFAULT_DOCKER_HOST, value_type="str", ), - tls_hostname=self._get_value( + "tls_hostname": self._get_value( "tls_hostname", params["tls_hostname"], "DOCKER_TLS_HOSTNAME", None, value_type="str", ), - api_version=self._get_value( + "api_version": self._get_value( "api_version", params["api_version"], "DOCKER_API_VERSION", "auto", value_type="str", ), - cacert_path=self._get_value( + "cacert_path": self._get_value( "cacert_path", params["ca_path"], "DOCKER_CERT_PATH", None, value_type="str", ), - cert_path=self._get_value( + "cert_path": self._get_value( "cert_path", params["client_cert"], "DOCKER_CERT_PATH", None, value_type="str", ), - key_path=self._get_value( + "key_path": self._get_value( "key_path", params["client_key"], "DOCKER_CERT_PATH", None, value_type="str", ), - tls=self._get_value( + "tls": self._get_value( "tls", params["tls"], "DOCKER_TLS", DEFAULT_TLS, value_type="bool" ), - tls_verify=self._get_value( + "tls_verify": self._get_value( "validate_certs", params["validate_certs"], "DOCKER_TLS_VERIFY", DEFAULT_TLS_VERIFY, value_type="bool", ), - timeout=self._get_value( + "timeout": self._get_value( "timeout", params["timeout"], "DOCKER_TIMEOUT", DEFAULT_TIMEOUT_SECONDS, value_type="int", ), - use_ssh_client=self._get_value( + "use_ssh_client": self._get_value( "use_ssh_client", params["use_ssh_client"], None, False, value_type="bool", ), - ) + } update_tls_hostname(result) @@ -586,11 +586,11 @@ class AnsibleDockerClientBase(Client): """ Pull an image """ - kwargs = dict( - tag=tag, - stream=True, - decode=True, - ) + kwargs = { + "tag": tag, + "stream": True, + "decode": True, + } if image_platform is not None: kwargs["platform"] = image_platform self.log(f"Pulling image {name}:{tag}") @@ -654,7 +654,7 @@ class AnsibleDockerClient(AnsibleDockerClientBase): # in case client.fail() is called. self.fail_results = fail_results or {} - merged_arg_spec = dict() + merged_arg_spec = {} merged_arg_spec.update(DOCKER_COMMON_ARGS) if argument_spec: merged_arg_spec.update(argument_spec) @@ -706,12 +706,12 @@ class AnsibleDockerClient(AnsibleDockerClientBase): return self.module.params def _get_minimal_versions(self, option_minimal_versions, ignore_params=None): - self.option_minimal_versions = dict() + self.option_minimal_versions = {} for option in self.module.argument_spec: if ignore_params is not None: if option in ignore_params: continue - self.option_minimal_versions[option] = dict() + self.option_minimal_versions[option] = {} self.option_minimal_versions.update(option_minimal_versions) for option, data in self.option_minimal_versions.items(): diff --git a/plugins/module_utils/_common_api.py b/plugins/module_utils/_common_api.py index 6e390e69..067f1c84 100644 --- a/plugins/module_utils/_common_api.py +++ b/plugins/module_utils/_common_api.py @@ -78,19 +78,19 @@ def get_connect_params(auth_data, fail_function): "tcp://", "https://" ) - result = dict( - base_url=auth_data["docker_host"], - version=auth_data["api_version"], - timeout=auth_data["timeout"], - ) + result = { + "base_url": auth_data["docker_host"], + "version": auth_data["api_version"], + "timeout": auth_data["timeout"], + } if auth_data["tls_verify"]: # TLS with verification - tls_config = dict( - verify=True, - assert_hostname=auth_data["tls_hostname"], - fail_function=fail_function, - ) + tls_config = { + "verify": True, + "assert_hostname": auth_data["tls_hostname"], + "fail_function": fail_function, + } if auth_data["cert_path"] and auth_data["key_path"]: tls_config["client_cert"] = (auth_data["cert_path"], auth_data["key_path"]) if auth_data["cacert_path"]: @@ -98,10 +98,10 @@ def get_connect_params(auth_data, fail_function): result["tls"] = _get_tls_config(**tls_config) elif auth_data["tls"]: # TLS without verification - tls_config = dict( - verify=False, - fail_function=fail_function, - ) + tls_config = { + "verify": False, + "fail_function": fail_function, + } if auth_data["cert_path"] and auth_data["key_path"]: tls_config["client_cert"] = (auth_data["cert_path"], auth_data["key_path"]) result["tls"] = _get_tls_config(**tls_config) @@ -203,78 +203,78 @@ class AnsibleDockerClientBase(Client): client_params = self._get_params() - params = dict() + params = {} for key in DOCKER_COMMON_ARGS: params[key] = client_params.get(key) - result = dict( - docker_host=self._get_value( + result = { + "docker_host": self._get_value( "docker_host", params["docker_host"], "DOCKER_HOST", DEFAULT_DOCKER_HOST, value_type="str", ), - tls_hostname=self._get_value( + "tls_hostname": self._get_value( "tls_hostname", params["tls_hostname"], "DOCKER_TLS_HOSTNAME", None, value_type="str", ), - api_version=self._get_value( + "api_version": self._get_value( "api_version", params["api_version"], "DOCKER_API_VERSION", "auto", value_type="str", ), - cacert_path=self._get_value( + "cacert_path": self._get_value( "cacert_path", params["ca_path"], "DOCKER_CERT_PATH", None, value_type="str", ), - cert_path=self._get_value( + "cert_path": self._get_value( "cert_path", params["client_cert"], "DOCKER_CERT_PATH", None, value_type="str", ), - key_path=self._get_value( + "key_path": self._get_value( "key_path", params["client_key"], "DOCKER_CERT_PATH", None, value_type="str", ), - tls=self._get_value( + "tls": self._get_value( "tls", params["tls"], "DOCKER_TLS", DEFAULT_TLS, value_type="bool" ), - tls_verify=self._get_value( + "tls_verify": self._get_value( "validate_certs", params["validate_certs"], "DOCKER_TLS_VERIFY", DEFAULT_TLS_VERIFY, value_type="bool", ), - timeout=self._get_value( + "timeout": self._get_value( "timeout", params["timeout"], "DOCKER_TIMEOUT", DEFAULT_TIMEOUT_SECONDS, value_type="int", ), - use_ssh_client=self._get_value( + "use_ssh_client": self._get_value( "use_ssh_client", params["use_ssh_client"], None, False, value_type="bool", ), - ) + } def depr(*args, **kwargs): self.deprecate(*args, **kwargs) @@ -504,7 +504,7 @@ class AnsibleDockerClientBase(Client): old_tag = self.find_image(name, tag) try: repository, image_tag = parse_repository_tag(name) - registry, repo_name = auth.resolve_repository_name(repository) + registry, dummy_repo_name = auth.resolve_repository_name(repository) params = { "tag": tag or image_tag or "latest", "fromImage": repository, @@ -564,7 +564,7 @@ class AnsibleDockerClient(AnsibleDockerClientBase): # in case client.fail() is called. self.fail_results = fail_results or {} - merged_arg_spec = dict() + merged_arg_spec = {} merged_arg_spec.update(DOCKER_COMMON_ARGS) if argument_spec: merged_arg_spec.update(argument_spec) @@ -613,12 +613,12 @@ class AnsibleDockerClient(AnsibleDockerClientBase): return self.module.params def _get_minimal_versions(self, option_minimal_versions, ignore_params=None): - self.option_minimal_versions = dict() + self.option_minimal_versions = {} for option in self.module.argument_spec: if ignore_params is not None: if option in ignore_params: continue - self.option_minimal_versions[option] = dict() + self.option_minimal_versions[option] = {} self.option_minimal_versions.update(option_minimal_versions) for option, data in self.option_minimal_versions.items(): diff --git a/plugins/module_utils/_common_cli.py b/plugins/module_utils/_common_cli.py index 81006d8e..fb9a316b 100644 --- a/plugins/module_utils/_common_cli.py +++ b/plugins/module_utils/_common_cli.py @@ -31,31 +31,40 @@ from ansible_collections.community.docker.plugins.module_utils._version import ( ) -DOCKER_COMMON_ARGS = dict( - docker_cli=dict(type="path"), - docker_host=dict( - type="str", fallback=(env_fallback, ["DOCKER_HOST"]), aliases=["docker_url"] - ), - tls_hostname=dict(type="str", fallback=(env_fallback, ["DOCKER_TLS_HOSTNAME"])), - api_version=dict( - type="str", - default="auto", - fallback=(env_fallback, ["DOCKER_API_VERSION"]), - aliases=["docker_api_version"], - ), - ca_path=dict(type="path", aliases=["ca_cert", "tls_ca_cert", "cacert_path"]), - client_cert=dict(type="path", aliases=["tls_client_cert", "cert_path"]), - client_key=dict(type="path", aliases=["tls_client_key", "key_path"]), - tls=dict(type="bool", default=DEFAULT_TLS, fallback=(env_fallback, ["DOCKER_TLS"])), - validate_certs=dict( - type="bool", - default=DEFAULT_TLS_VERIFY, - fallback=(env_fallback, ["DOCKER_TLS_VERIFY"]), - aliases=["tls_verify"], - ), - # debug=dict(type='bool', default=False), - cli_context=dict(type="str"), -) +DOCKER_COMMON_ARGS = { + "docker_cli": {"type": "path"}, + "docker_host": { + "type": "str", + "fallback": (env_fallback, ["DOCKER_HOST"]), + "aliases": ["docker_url"], + }, + "tls_hostname": { + "type": "str", + "fallback": (env_fallback, ["DOCKER_TLS_HOSTNAME"]), + }, + "api_version": { + "type": "str", + "default": "auto", + "fallback": (env_fallback, ["DOCKER_API_VERSION"]), + "aliases": ["docker_api_version"], + }, + "ca_path": {"type": "path", "aliases": ["ca_cert", "tls_ca_cert", "cacert_path"]}, + "client_cert": {"type": "path", "aliases": ["tls_client_cert", "cert_path"]}, + "client_key": {"type": "path", "aliases": ["tls_client_key", "key_path"]}, + "tls": { + "type": "bool", + "default": DEFAULT_TLS, + "fallback": (env_fallback, ["DOCKER_TLS"]), + }, + "validate_certs": { + "type": "bool", + "default": DEFAULT_TLS_VERIFY, + "fallback": (env_fallback, ["DOCKER_TLS_VERIFY"]), + "aliases": ["tls_verify"], + }, + # "debug": {"type": "bool", "default: False}, + "cli_context": {"type": "str"}, +} class DockerException(Exception): @@ -327,7 +336,7 @@ class AnsibleModuleDockerClient(AnsibleDockerClientBase): # in case client.fail() is called. self.fail_results = fail_results or {} - merged_arg_spec = dict() + merged_arg_spec = {} merged_arg_spec.update(DOCKER_COMMON_ARGS) if argument_spec: merged_arg_spec.update(argument_spec) diff --git a/plugins/module_utils/_compose_v2.py b/plugins/module_utils/_compose_v2.py index 3631a5c4..de78796a 100644 --- a/plugins/module_utils/_compose_v2.py +++ b/plugins/module_utils/_compose_v2.py @@ -691,28 +691,28 @@ def update_failed(result, events, args, stdout, stderr, rc, cli): def common_compose_argspec(): - return dict( - project_src=dict(type="path"), - project_name=dict(type="str"), - files=dict(type="list", elements="path"), - definition=dict(type="dict"), - env_files=dict(type="list", elements="path"), - profiles=dict(type="list", elements="str"), - check_files_existing=dict(type="bool", default=True), - ) + return { + "project_src": {"type": "path"}, + "project_name": {"type": "str"}, + "files": {"type": "list", "elements": "path"}, + "definition": {"type": "dict"}, + "env_files": {"type": "list", "elements": "path"}, + "profiles": {"type": "list", "elements": "str"}, + "check_files_existing": {"type": "bool", "default": True}, + } def common_compose_argspec_ex(): - return dict( - argspec=common_compose_argspec(), - mutually_exclusive=[("definition", "project_src"), ("definition", "files")], - required_one_of=[ + return { + "argspec": common_compose_argspec(), + "mutually_exclusive": [("definition", "project_src"), ("definition", "files")], + "required_one_of": [ ("definition", "project_src"), ], - required_by={ + "required_by": { "definition": ("project_name",), }, - ) + } def combine_binary_output(*outputs): @@ -794,7 +794,7 @@ class BaseComposeManager(DockerBaseClass): ) def get_compose_version_from_cli(self): - rc, version_info, stderr = self.client.call_cli( + rc, version_info, dummy_stderr = self.client.call_cli( "compose", "version", "--format", "json" ) if rc: @@ -853,7 +853,7 @@ class BaseComposeManager(DockerBaseClass): if self.compose_version >= LooseVersion("2.23.0"): # https://github.com/docker/compose/pull/11038 args.append("--no-trunc") - kwargs = dict(cwd=self.project_src, check_rc=not self.use_json_events) + kwargs = {"cwd": self.project_src, "check_rc": not self.use_json_events} if self.compose_version >= LooseVersion("2.21.0"): # Breaking change in 2.21.0: https://github.com/docker/compose/pull/10918 rc, containers, stderr = self.client.call_cli_json_stream(*args, **kwargs) @@ -882,7 +882,7 @@ class BaseComposeManager(DockerBaseClass): def list_images(self): args = self.get_base_args() + ["images", "--format", "json"] - kwargs = dict(cwd=self.project_src, check_rc=not self.use_json_events) + kwargs = {"cwd": self.project_src, "check_rc": not self.use_json_events} rc, images, stderr = self.client.call_cli_json(*args, **kwargs) if self.use_json_events and rc != 0: self._handle_failed_cli_call(args, rc, images, stderr) diff --git a/plugins/module_utils/_copy.py b/plugins/module_utils/_copy.py index 2641b63f..6ee8479a 100644 --- a/plugins/module_utils/_copy.py +++ b/plugins/module_utils/_copy.py @@ -285,7 +285,7 @@ def stat_file(client, container, in_path, follow_links=False, log=None): except Exception as exc: raise DockerUnexpectedError( f"When retrieving information for {in_path} from {container}, obtained header {header!r} that cannot be loaded as JSON: {exc}" - ) + ) from exc # https://pkg.go.dev/io/fs#FileMode: bit 32 - 5 means ModeSymlink if stat_data["mode"] & (1 << (32 - 5)) != 0: @@ -500,7 +500,7 @@ def _execute_command(client, container, command, log=None, check_rc=False): def determine_user_group(client, container, log=None): - dummy, stdout, stderr = _execute_command( + dummy_rc, stdout, dummy_stderr = _execute_command( client, container, ["/bin/sh", "-c", "id -u && id -g"], check_rc=True, log=log ) @@ -513,7 +513,7 @@ def determine_user_group(client, container, log=None): user_id, group_id = stdout_lines try: return int(user_id), int(group_id) - except ValueError: + except ValueError as exc: raise DockerUnexpectedError( f'Expected two-line output with numeric IDs to obtain user and group ID for container {container}, but got "{user_id}" and "{group_id}" instead' - ) + ) from exc diff --git a/plugins/module_utils/_logfmt.py b/plugins/module_utils/_logfmt.py index 362ae4ff..c048966b 100644 --- a/plugins/module_utils/_logfmt.py +++ b/plugins/module_utils/_logfmt.py @@ -96,14 +96,14 @@ class _Parser: if self.line[self.index : self.index + 2] != "\\u": raise InvalidLogFmt("Invalid unicode escape start") v = 0 - for i in range(self.index + 2, self.index + 6): + for dummy_index in range(self.index + 2, self.index + 6): v <<= 4 try: v += _HEX_DICT[self.line[self.index]] except KeyError: raise InvalidLogFmt( f"Invalid unicode escape digit {self.line[self.index]!r}" - ) + ) from None self.index += 6 return chr(v) diff --git a/plugins/module_utils/_module_container/base.py b/plugins/module_utils/_module_container/base.py index ca4c80ef..50e57b23 100644 --- a/plugins/module_utils/_module_container/base.py +++ b/plugins/module_utils/_module_container/base.py @@ -37,21 +37,21 @@ _DEFAULT_IP_REPLACEMENT_STRING = ( ) -_MOUNT_OPTION_TYPES = dict( - create_mountpoint=("bind",), - labels=("volume",), - no_copy=("volume",), - non_recursive=("bind",), - propagation=("bind",), - read_only_force_recursive=("bind",), - read_only_non_recursive=("bind",), - subpath=("volume", "image"), - tmpfs_size=("tmpfs",), - tmpfs_mode=("tmpfs",), - tmpfs_options=("tmpfs",), - volume_driver=("volume",), - volume_options=("volume",), -) +_MOUNT_OPTION_TYPES = { + "create_mountpoint": ("bind",), + "labels": ("volume",), + "no_copy": ("volume",), + "non_recursive": ("bind",), + "propagation": ("bind",), + "read_only_force_recursive": ("bind",), + "read_only_non_recursive": ("bind",), + "subpath": ("volume", "image"), + "tmpfs_size": ("tmpfs",), + "tmpfs_mode": ("tmpfs",), + "tmpfs_options": ("tmpfs",), + "volume_driver": ("volume",), + "volume_options": ("volume",), +} def _get_ansible_type(value_type): @@ -629,7 +629,7 @@ def _preprocess_ulimits(module, values): return values result = [] for limit in values["ulimits"]: - limits = dict() + limits = {} pieces = limit.split(":") if len(pieces) >= 2: limits["Name"] = pieces[0] @@ -644,7 +644,7 @@ def _preprocess_ulimits(module, values): def _preprocess_mounts(module, values): - last = dict() + last = {} def check_collision(t, name): if t in last: @@ -970,53 +970,53 @@ OPTION_DEVICE_READ_BPS = OptionGroup().add_option( "device_read_bps", value_type="set", elements="dict", - ansible_suboptions=dict( - path=dict(required=True, type="str"), - rate=dict(required=True, type="str"), - ), + ansible_suboptions={ + "path": {"type": "str", "required": True}, + "rate": {"type": "str", "required": True}, + }, ) OPTION_DEVICE_WRITE_BPS = OptionGroup().add_option( "device_write_bps", value_type="set", elements="dict", - ansible_suboptions=dict( - path=dict(required=True, type="str"), - rate=dict(required=True, type="str"), - ), + ansible_suboptions={ + "path": {"type": "str", "required": True}, + "rate": {"type": "str", "required": True}, + }, ) OPTION_DEVICE_READ_IOPS = OptionGroup().add_option( "device_read_iops", value_type="set", elements="dict", - ansible_suboptions=dict( - path=dict(required=True, type="str"), - rate=dict(required=True, type="int"), - ), + ansible_suboptions={ + "path": {"type": "str", "required": True}, + "rate": {"type": "int", "required": True}, + }, ) OPTION_DEVICE_WRITE_IOPS = OptionGroup().add_option( "device_write_iops", value_type="set", elements="dict", - ansible_suboptions=dict( - path=dict(required=True, type="str"), - rate=dict(required=True, type="int"), - ), + ansible_suboptions={ + "path": {"type": "str", "required": True}, + "rate": {"type": "int", "required": True}, + }, ) OPTION_DEVICE_REQUESTS = OptionGroup().add_option( "device_requests", value_type="set", elements="dict", - ansible_suboptions=dict( - capabilities=dict(type="list", elements="list"), - count=dict(type="int"), - device_ids=dict(type="list", elements="str"), - driver=dict(type="str"), - options=dict(type="dict"), - ), + ansible_suboptions={ + "capabilities": {"type": "list", "elements": "list"}, + "count": {"type": "int"}, + "device_ids": {"type": "list", "elements": "str"}, + "driver": {"type": "str"}, + "options": {"type": "dict"}, + }, ) OPTION_DEVICE_CGROUP_RULES = OptionGroup().add_option( @@ -1066,15 +1066,15 @@ OPTION_GROUPS = OptionGroup().add_option("groups", value_type="set", elements="s OPTION_HEALTHCHECK = OptionGroup(preprocess=_preprocess_healthcheck).add_option( "healthcheck", value_type="dict", - ansible_suboptions=dict( - test=dict(type="raw"), - test_cli_compatible=dict(type="bool", default=False), - interval=dict(type="str"), - timeout=dict(type="str"), - start_period=dict(type="str"), - start_interval=dict(type="str"), - retries=dict(type="int"), - ), + ansible_suboptions={ + "test": {"type": "raw"}, + "test_cli_compatible": {"type": "bool", "default": False}, + "interval": {"type": "str"}, + "timeout": {"type": "str"}, + "start_period": {"type": "str"}, + "start_interval": {"type": "str"}, + "retries": {"type": "int"}, + }, ) OPTION_HOSTNAME = OptionGroup().add_option("hostname", value_type="str") @@ -1143,16 +1143,16 @@ OPTION_NETWORK = ( "networks", value_type="set", elements="dict", - ansible_suboptions=dict( - name=dict(type="str", required=True), - ipv4_address=dict(type="str"), - ipv6_address=dict(type="str"), - aliases=dict(type="list", elements="str"), - links=dict(type="list", elements="str"), - mac_address=dict(type="str"), - driver_opts=dict(type="dict"), - gw_priority=dict(type="int"), - ), + ansible_suboptions={ + "name": {"type": "str", "required": True}, + "ipv4_address": {"type": "str"}, + "ipv6_address": {"type": "str"}, + "aliases": {"type": "list", "elements": "str"}, + "links": {"type": "list", "elements": "str"}, + "mac_address": {"type": "str"}, + "driver_opts": {"type": "dict"}, + "gw_priority": {"type": "int"}, + }, ) ) @@ -1232,35 +1232,43 @@ OPTION_MOUNTS_VOLUMES = ( "mounts", value_type="set", elements="dict", - ansible_suboptions=dict( - target=dict(type="str", required=True), - source=dict(type="str"), - type=dict( - type="str", - choices=["bind", "volume", "tmpfs", "npipe", "cluster", "image"], - default="volume", - ), - read_only=dict(type="bool"), - consistency=dict( - type="str", choices=["default", "consistent", "cached", "delegated"] - ), - propagation=dict( - type="str", - choices=["private", "rprivate", "shared", "rshared", "slave", "rslave"], - ), - no_copy=dict(type="bool"), - labels=dict(type="dict"), - volume_driver=dict(type="str"), - volume_options=dict(type="dict"), - tmpfs_size=dict(type="str"), - tmpfs_mode=dict(type="str"), - non_recursive=dict(type="bool"), - create_mountpoint=dict(type="bool"), - read_only_non_recursive=dict(type="bool"), - read_only_force_recursive=dict(type="bool"), - subpath=dict(type="str"), - tmpfs_options=dict(type="list", elements="dict"), - ), + ansible_suboptions={ + "target": {"type": "str", "required": True}, + "source": {"type": "str"}, + "type": { + "type": "str", + "choices": ["bind", "volume", "tmpfs", "npipe", "cluster", "image"], + "default": "volume", + }, + "read_only": {"type": "bool"}, + "consistency": { + "type": "str", + "choices": ["default", "consistent", "cached", "delegated"], + }, + "propagation": { + "type": "str", + "choices": [ + "private", + "rprivate", + "shared", + "rshared", + "slave", + "rslave", + ], + }, + "no_copy": {"type": "bool"}, + "labels": {"type": "dict"}, + "volume_driver": {"type": "str"}, + "volume_options": {"type": "dict"}, + "tmpfs_size": {"type": "str"}, + "tmpfs_mode": {"type": "str"}, + "non_recursive": {"type": "bool"}, + "create_mountpoint": {"type": "bool"}, + "read_only_non_recursive": {"type": "bool"}, + "read_only_force_recursive": {"type": "bool"}, + "subpath": {"type": "str"}, + "tmpfs_options": {"type": "list", "elements": "dict"}, + }, ) .add_option("volumes", value_type="set", elements="str") .add_option( diff --git a/plugins/module_utils/_module_container/docker_api.py b/plugins/module_utils/_module_container/docker_api.py index 95b286c8..607e4f7a 100644 --- a/plugins/module_utils/_module_container/docker_api.py +++ b/plugins/module_utils/_module_container/docker_api.py @@ -398,13 +398,15 @@ class DockerAPIEngineDriver(EngineDriver): # New docker daemon versions do not allow containers to be removed # if they are paused. Make sure we do not end up in an infinite loop. if count == 3: - raise RuntimeError(f"{exc} [tried to unpause three times]") + raise RuntimeError( + f"{exc} [tried to unpause three times]" + ) from exc count += 1 # Unpause try: self.unpause_container(client, container_id) except Exception as exc2: - raise RuntimeError(f"{exc2} [while unpausing]") + raise RuntimeError(f"{exc2} [while unpausing]") from exc2 # Now try again continue raise @@ -429,13 +431,15 @@ class DockerAPIEngineDriver(EngineDriver): # New docker daemon versions do not allow containers to be removed # if they are paused. Make sure we do not end up in an infinite loop. if count == 3: - raise RuntimeError(f"{exc} [tried to unpause three times]") + raise RuntimeError( + f"{exc} [tried to unpause three times]" + ) from exc count += 1 # Unpause try: self.unpause_container(client, container_id) except Exception as exc2: - raise RuntimeError(f"{exc2} [while unpausing]") + raise RuntimeError(f"{exc2} [while unpausing]") from exc2 # Now try again continue if ( @@ -817,28 +821,28 @@ def _preprocess_devices(module, client, api_version, value): parts = device.split(":") if len(parts) == 1: expected_devices.append( - dict( - CgroupPermissions="rwm", - PathInContainer=parts[0], - PathOnHost=parts[0], - ) + { + "CgroupPermissions": "rwm", + "PathInContainer": parts[0], + "PathOnHost": parts[0], + } ) elif len(parts) == 2: parts = device.split(":") expected_devices.append( - dict( - CgroupPermissions="rwm", - PathInContainer=parts[1], - PathOnHost=parts[0], - ) + { + "CgroupPermissions": "rwm", + "PathInContainer": parts[1], + "PathOnHost": parts[0], + } ) else: expected_devices.append( - dict( - CgroupPermissions=parts[2], - PathInContainer=parts[1], - PathOnHost=parts[0], - ) + { + "CgroupPermissions": parts[2], + "PathInContainer": parts[1], + "PathOnHost": parts[0], + } ) return expected_devices @@ -1186,7 +1190,7 @@ def _get_expected_values_mounts( expected_values["mounts"] = values["mounts"] # volumes - expected_vols = dict() + expected_vols = {} if image and image["Config"].get("Volumes"): expected_vols.update(image["Config"].get("Volumes")) if "volumes" in values: @@ -1400,9 +1404,7 @@ def _get_values_ports(module, container, api_version, options, image, host_info) # "ExposedPorts": null returns None type & causes AttributeError - PR #5517 if config.get("ExposedPorts") is not None: - expected_exposed = [ - _normalize_port(p) for p in config.get("ExposedPorts", dict()).keys() - ] + expected_exposed = [_normalize_port(p) for p in config.get("ExposedPorts", {})] else: expected_exposed = [] diff --git a/plugins/module_utils/_module_container/module.py b/plugins/module_utils/_module_container/module.py index 535ed875..486dfdc9 100644 --- a/plugins/module_utils/_module_container/module.py +++ b/plugins/module_utils/_module_container/module.py @@ -42,7 +42,7 @@ class Container(DockerBaseClass): @property def exists(self): - return True if self.container else False + return bool(self.container) @property def removing(self): @@ -232,17 +232,17 @@ class ContainerManager(DockerBaseClass): "name" ] if self.param_container_default_behavior == "compatibility": - old_default_values = dict( - auto_remove=False, - detach=True, - init=False, - interactive=False, - memory="0", - paused=False, - privileged=False, - read_only=False, - tty=False, - ) + old_default_values = { + "auto_remove": False, + "detach": True, + "init": False, + "interactive": False, + "memory": "0", + "paused": False, + "privileged": False, + "read_only": False, + "tty": False, + } for param, value in old_default_values.items(): if self.module.params[param] is None: self.module.params[param] = value @@ -487,7 +487,7 @@ class ContainerManager(DockerBaseClass): ) container = self._get_container(container.id) self.results["changed"] = True - self.results["actions"].append(dict(set_paused=self.param_paused)) + self.results["actions"].append({"set_paused": self.param_paused}) self.facts = container.raw @@ -577,19 +577,19 @@ class ContainerManager(DockerBaseClass): if already_to_latest: self.results["changed"] = False self.results["actions"].append( - dict(pulled_image=f"{repository}:{tag}", changed=False) + {"pulled_image": f"{repository}:{tag}", "changed": False} ) else: self.results["changed"] = True self.results["actions"].append( - dict(pulled_image=f"{repository}:{tag}", changed=True) + {"pulled_image": f"{repository}:{tag}", "changed": True} ) elif not image or self.param_pull_check_mode_behavior == "always": # If the image is not there, or pull_check_mode_behavior == 'always', claim we'll # pull. (Implicitly: if the image is there, claim it already was latest unless # pull_check_mode_behavior == 'always'.) self.results["changed"] = True - action = dict(pulled_image=f"{repository}:{tag}") + action = {"pulled_image": f"{repository}:{tag}"} if not image: action["changed"] = True self.results["actions"].append(action) @@ -817,7 +817,7 @@ class ContainerManager(DockerBaseClass): network_info = connected_networks.get(network["name"]) if network_info is None: different = True - differences.append(dict(parameter=network, container=None)) + differences.append({"parameter": network, "container": None}) else: diff = False network_info_ipam = network_info.get("IPAMConfig") or {} @@ -855,17 +855,17 @@ class ContainerManager(DockerBaseClass): if diff: different = True differences.append( - dict( - parameter=network, - container=dict( - name=network["name"], - ipv4_address=network_info_ipam.get("IPv4Address"), - ipv6_address=network_info_ipam.get("IPv6Address"), - aliases=network_info.get("Aliases"), - links=network_info.get("Links"), - mac_address=network_info.get("MacAddress"), - ), - ) + { + "parameter": network, + "container": { + "name": network["name"], + "ipv4_address": network_info_ipam.get("IPv4Address"), + "ipv6_address": network_info_ipam.get("IPv6Address"), + "aliases": network_info.get("Aliases"), + "links": network_info.get("Links"), + "mac_address": network_info.get("MacAddress"), + }, + } ) return different, differences @@ -892,7 +892,7 @@ class ContainerManager(DockerBaseClass): if not keep: extra = True extra_networks.append( - dict(name=network, id=network_config["NetworkID"]) + {"name": network, "id": network_config["NetworkID"]} ) return extra, extra_networks @@ -905,11 +905,11 @@ class ContainerManager(DockerBaseClass): if has_network_differences: if self.diff.get("differences"): self.diff["differences"].append( - dict(network_differences=network_differences) + {"network_differences": network_differences} ) else: self.diff["differences"] = [ - dict(network_differences=network_differences) + {"network_differences": network_differences} ] for netdiff in network_differences: self.diff_tracker.add( @@ -928,9 +928,9 @@ class ContainerManager(DockerBaseClass): has_extra_networks, extra_networks = self.has_extra_networks(container) if has_extra_networks: if self.diff.get("differences"): - self.diff["differences"].append(dict(purge_networks=extra_networks)) + self.diff["differences"].append({"purge_networks": extra_networks}) else: - self.diff["differences"] = [dict(purge_networks=extra_networks)] + self.diff["differences"] = [{"purge_networks": extra_networks}] for extra_network in extra_networks: self.diff_tracker.add( f"network.{extra_network['name']}", active=extra_network @@ -944,7 +944,7 @@ class ContainerManager(DockerBaseClass): # remove the container from the network, if connected if diff.get("container"): self.results["actions"].append( - dict(removed_from_network=diff["parameter"]["name"]) + {"removed_from_network": diff["parameter"]["name"]} ) if not self.check_mode: try: @@ -957,10 +957,10 @@ class ContainerManager(DockerBaseClass): ) # connect to the network self.results["actions"].append( - dict( - added_to_network=diff["parameter"]["name"], - network_parameters=diff["parameter"], - ) + { + "added_to_network": diff["parameter"]["name"], + "network_parameters": diff["parameter"], + } ) if not self.check_mode: params = { @@ -984,7 +984,7 @@ class ContainerManager(DockerBaseClass): def _purge_networks(self, container, networks): for network in networks: - self.results["actions"].append(dict(removed_from_network=network["name"])) + self.results["actions"].append({"removed_from_network": network["name"]}) if not self.check_mode: try: self.engine_driver.disconnect_container_from_network( @@ -1015,11 +1015,11 @@ class ContainerManager(DockerBaseClass): if key not in ("name", "id") } self.results["actions"].append( - dict( - created="Created container", - create_parameters=create_parameters, - networks=networks, - ) + { + "created": "Created container", + "create_parameters": create_parameters, + "networks": networks, + } ) self.results["changed"] = True new_container = None @@ -1035,7 +1035,7 @@ class ContainerManager(DockerBaseClass): def container_start(self, container_id): self.log(f"start container {container_id}") - self.results["actions"].append(dict(started=container_id)) + self.results["actions"].append({"started": container_id}) self.results["changed"] = True if not self.check_mode: try: @@ -1069,7 +1069,7 @@ class ContainerManager(DockerBaseClass): if insp.raw: insp.raw["Output"] = output else: - insp.raw = dict(Output=output) + insp.raw = {"Output": output} if status != 0: # Set `failed` to True and return output as msg self.results["failed"] = True @@ -1083,9 +1083,12 @@ class ContainerManager(DockerBaseClass): f"remove container container:{container_id} v:{volume_state} link:{link} force{force}" ) self.results["actions"].append( - dict( - removed=container_id, volume_state=volume_state, link=link, force=force - ) + { + "removed": container_id, + "volume_state": volume_state, + "link": link, + "force": force, + } ) self.results["changed"] = True if not self.check_mode: @@ -1105,7 +1108,7 @@ class ContainerManager(DockerBaseClass): self.log(f"update container {container_id}") self.log(update_parameters, pretty_print=True) self.results["actions"].append( - dict(updated=container_id, update_parameters=update_parameters) + {"updated": container_id, "update_parameters": update_parameters} ) self.results["changed"] = True if not self.check_mode: @@ -1119,7 +1122,7 @@ class ContainerManager(DockerBaseClass): def container_kill(self, container_id): self.results["actions"].append( - dict(killed=container_id, signal=self.param_kill_signal) + {"killed": container_id, "signal": self.param_kill_signal} ) self.results["changed"] = True if not self.check_mode: @@ -1132,7 +1135,7 @@ class ContainerManager(DockerBaseClass): def container_restart(self, container_id): self.results["actions"].append( - dict(restarted=container_id, timeout=self.module.params["stop_timeout"]) + {"restarted": container_id, "timeout": self.module.params["stop_timeout"]} ) self.results["changed"] = True if not self.check_mode: @@ -1149,7 +1152,7 @@ class ContainerManager(DockerBaseClass): self.container_kill(container_id) return self.results["actions"].append( - dict(stopped=container_id, timeout=self.module.params["stop_timeout"]) + {"stopped": container_id, "timeout": self.module.params["stop_timeout"]} ) self.results["changed"] = True if not self.check_mode: @@ -1163,57 +1166,63 @@ class ContainerManager(DockerBaseClass): def run_module(engine_driver): module, active_options, client = engine_driver.setup( - argument_spec=dict( - cleanup=dict(type="bool", default=False), - comparisons=dict(type="dict"), - container_default_behavior=dict( - type="str", - default="no_defaults", - choices=["compatibility", "no_defaults"], - ), - command_handling=dict( - type="str", choices=["compatibility", "correct"], default="correct" - ), - default_host_ip=dict(type="str"), - force_kill=dict(type="bool", default=False, aliases=["forcekill"]), - image=dict(type="str"), - image_comparison=dict( - type="str", - choices=["desired-image", "current-image"], - default="desired-image", - ), - image_label_mismatch=dict( - type="str", choices=["ignore", "fail"], default="ignore" - ), - image_name_mismatch=dict( - type="str", choices=["ignore", "recreate"], default="recreate" - ), - keep_volumes=dict(type="bool", default=True), - kill_signal=dict(type="str"), - name=dict(type="str", required=True), - networks_cli_compatible=dict(type="bool", default=True), - output_logs=dict(type="bool", default=False), - paused=dict(type="bool"), - pull=dict( - type="raw", - choices=["never", "missing", "always", True, False], - default="missing", - ), - pull_check_mode_behavior=dict( - type="str", - choices=["image_not_present", "always"], - default="image_not_present", - ), - recreate=dict(type="bool", default=False), - removal_wait_timeout=dict(type="float"), - restart=dict(type="bool", default=False), - state=dict( - type="str", - default="started", - choices=["absent", "present", "healthy", "started", "stopped"], - ), - healthy_wait_timeout=dict(type="float", default=300), - ), + argument_spec={ + "cleanup": {"type": "bool", "default": False}, + "comparisons": {"type": "dict"}, + "container_default_behavior": { + "type": "str", + "default": "no_defaults", + "choices": ["compatibility", "no_defaults"], + }, + "command_handling": { + "type": "str", + "choices": ["compatibility", "correct"], + "default": "correct", + }, + "default_host_ip": {"type": "str"}, + "force_kill": {"type": "bool", "default": False, "aliases": ["forcekill"]}, + "image": {"type": "str"}, + "image_comparison": { + "type": "str", + "choices": ["desired-image", "current-image"], + "default": "desired-image", + }, + "image_label_mismatch": { + "type": "str", + "choices": ["ignore", "fail"], + "default": "ignore", + }, + "image_name_mismatch": { + "type": "str", + "choices": ["ignore", "recreate"], + "default": "recreate", + }, + "keep_volumes": {"type": "bool", "default": True}, + "kill_signal": {"type": "str"}, + "name": {"type": "str", "required": True}, + "networks_cli_compatible": {"type": "bool", "default": True}, + "output_logs": {"type": "bool", "default": False}, + "paused": {"type": "bool"}, + "pull": { + "type": "raw", + "choices": ["never", "missing", "always", True, False], + "default": "missing", + }, + "pull_check_mode_behavior": { + "type": "str", + "choices": ["image_not_present", "always"], + "default": "image_not_present", + }, + "recreate": {"type": "bool", "default": False}, + "removal_wait_timeout": {"type": "float"}, + "restart": {"type": "bool", "default": False}, + "state": { + "type": "str", + "default": "started", + "choices": ["absent", "present", "healthy", "started", "stopped"], + }, + "healthy_wait_timeout": {"type": "float", "default": 300}, + }, required_if=[ ("state", "present", ["image"]), ], diff --git a/plugins/module_utils/_util.py b/plugins/module_utils/_util.py index ec01a299..5022c582 100644 --- a/plugins/module_utils/_util.py +++ b/plugins/module_utils/_util.py @@ -23,38 +23,45 @@ DEFAULT_TLS_VERIFY = False DEFAULT_TLS_HOSTNAME = "localhost" # deprecated DEFAULT_TIMEOUT_SECONDS = 60 -DOCKER_COMMON_ARGS = dict( - docker_host=dict( - type="str", - default=DEFAULT_DOCKER_HOST, - fallback=(env_fallback, ["DOCKER_HOST"]), - aliases=["docker_url"], - ), - tls_hostname=dict(type="str", fallback=(env_fallback, ["DOCKER_TLS_HOSTNAME"])), - api_version=dict( - type="str", - default="auto", - fallback=(env_fallback, ["DOCKER_API_VERSION"]), - aliases=["docker_api_version"], - ), - timeout=dict( - type="int", - default=DEFAULT_TIMEOUT_SECONDS, - fallback=(env_fallback, ["DOCKER_TIMEOUT"]), - ), - ca_path=dict(type="path", aliases=["ca_cert", "tls_ca_cert", "cacert_path"]), - client_cert=dict(type="path", aliases=["tls_client_cert", "cert_path"]), - client_key=dict(type="path", aliases=["tls_client_key", "key_path"]), - tls=dict(type="bool", default=DEFAULT_TLS, fallback=(env_fallback, ["DOCKER_TLS"])), - use_ssh_client=dict(type="bool", default=False), - validate_certs=dict( - type="bool", - default=DEFAULT_TLS_VERIFY, - fallback=(env_fallback, ["DOCKER_TLS_VERIFY"]), - aliases=["tls_verify"], - ), - debug=dict(type="bool", default=False), -) +DOCKER_COMMON_ARGS = { + "docker_host": { + "type": "str", + "default": DEFAULT_DOCKER_HOST, + "fallback": (env_fallback, ["DOCKER_HOST"]), + "aliases": ["docker_url"], + }, + "tls_hostname": { + "type": "str", + "fallback": (env_fallback, ["DOCKER_TLS_HOSTNAME"]), + }, + "api_version": { + "type": "str", + "default": "auto", + "fallback": (env_fallback, ["DOCKER_API_VERSION"]), + "aliases": ["docker_api_version"], + }, + "timeout": { + "type": "int", + "default": DEFAULT_TIMEOUT_SECONDS, + "fallback": (env_fallback, ["DOCKER_TIMEOUT"]), + }, + "ca_path": {"type": "path", "aliases": ["ca_cert", "tls_ca_cert", "cacert_path"]}, + "client_cert": {"type": "path", "aliases": ["tls_client_cert", "cert_path"]}, + "client_key": {"type": "path", "aliases": ["tls_client_key", "key_path"]}, + "tls": { + "type": "bool", + "default": DEFAULT_TLS, + "fallback": (env_fallback, ["DOCKER_TLS"]), + }, + "use_ssh_client": {"type": "bool", "default": False}, + "validate_certs": { + "type": "bool", + "default": DEFAULT_TLS_VERIFY, + "fallback": (env_fallback, ["DOCKER_TLS_VERIFY"]), + "aliases": ["tls_verify"], + }, + "debug": {"type": "bool", "default": False}, +} DOCKER_COMMON_ARGS_VARS = { option_name: f"ansible_docker_{option_name}" @@ -245,11 +252,11 @@ class DifferenceTracker: def add(self, name, parameter=None, active=None): self._diff.append( - dict( - name=name, - parameter=parameter, - active=active, - ) + { + "name": name, + "parameter": parameter, + "active": active, + } ) def merge(self, other_tracker): @@ -263,8 +270,8 @@ class DifferenceTracker: """ Return texts ``before`` and ``after``. """ - before = dict() - after = dict() + before = {} + after = {} for item in self._diff: before[item["name"]] = item["active"] after[item["name"]] = item["parameter"] @@ -282,11 +289,11 @@ class DifferenceTracker: """ result = [] for entry in self._diff: - item = dict() - item[entry["name"]] = dict( - parameter=entry["parameter"], - container=entry["active"], - ) + item = {} + item[entry["name"]] = { + "parameter": entry["parameter"], + "container": entry["active"], + } result.append(item) return result @@ -335,7 +342,7 @@ def clean_dict_booleans_for_docker_api(data, allow_sequences=False): return "false" return str(value) - result = dict() + result = {} if data is not None: for k, v in data.items(): result[str(k)] = ( @@ -389,7 +396,7 @@ def normalize_healthcheck(healthcheck, normalize_test=False): """ Return dictionary of healthcheck parameters. """ - result = dict() + result = {} # All supported healthcheck parameters options = ( @@ -420,10 +427,10 @@ def normalize_healthcheck(healthcheck, normalize_test=False): if key == "retries": try: value = int(value) - except ValueError: + except ValueError as exc: raise ValueError( f'Cannot parse number of retries for healthcheck. Expected an integer, got "{value}".' - ) + ) from exc if key == "test" and value and normalize_test: value = normalize_healthcheck_test(value) result[key] = value diff --git a/plugins/modules/current_container_facts.py b/plugins/modules/current_container_facts.py index 2a4139da..d6ad8978 100644 --- a/plugins/modules/current_container_facts.py +++ b/plugins/modules/current_container_facts.py @@ -80,7 +80,7 @@ from ansible.module_utils.basic import AnsibleModule def main(): - module = AnsibleModule(dict(), supports_check_mode=True) + module = AnsibleModule({}, supports_check_mode=True) cpuset_path = "/proc/self/cpuset" mountinfo_path = "/proc/self/mountinfo" @@ -136,11 +136,11 @@ def main(): container_type = "podman" module.exit_json( - ansible_facts=dict( - ansible_module_running_in_container=container_id != "", - ansible_module_container_id=container_id, - ansible_module_container_type=container_type, - ) + ansible_facts={ + "ansible_module_running_in_container": container_id != "", + "ansible_module_container_id": container_id, + "ansible_module_container_type": container_type, + } ) diff --git a/plugins/modules/docker_compose_v2.py b/plugins/modules/docker_compose_v2.py index e4d748d9..3cd1e09f 100644 --- a/plugins/modules/docker_compose_v2.py +++ b/plugins/modules/docker_compose_v2.py @@ -550,7 +550,7 @@ class ServicesManager(BaseComposeManager): return args def cmd_up(self): - result = dict() + result = {} args = self.get_up_cmd(self.check_mode) rc, stdout, stderr = self.client.call_cli(*args, cwd=self.project_src) events = self.parse_events(stderr, dry_run=self.check_mode, nonzero_rc=rc != 0) @@ -587,7 +587,7 @@ class ServicesManager(BaseComposeManager): # Since 'docker compose stop' **always** claims it is stopping containers, even if they are already # stopped, we have to do this a bit more complicated. - result = dict() + result = {} # Make sure all containers are created args_1 = self.get_up_cmd(self.check_mode, no_start=True) rc_1, stdout_1, stderr_1 = self.client.call_cli(*args_1, cwd=self.project_src) @@ -644,7 +644,7 @@ class ServicesManager(BaseComposeManager): return args def cmd_restart(self): - result = dict() + result = {} args = self.get_restart_cmd(self.check_mode) rc, stdout, stderr = self.client.call_cli(*args, cwd=self.project_src) events = self.parse_events(stderr, dry_run=self.check_mode, nonzero_rc=rc != 0) @@ -671,7 +671,7 @@ class ServicesManager(BaseComposeManager): return args def cmd_down(self): - result = dict() + result = {} args = self.get_down_cmd(self.check_mode) rc, stdout, stderr = self.client.call_cli(*args, cwd=self.project_src) events = self.parse_events(stderr, dry_run=self.check_mode, nonzero_rc=rc != 0) @@ -682,32 +682,40 @@ class ServicesManager(BaseComposeManager): def main(): - argument_spec = dict( - state=dict( - type="str", - default="present", - choices=["absent", "present", "stopped", "restarted"], - ), - dependencies=dict(type="bool", default=True), - pull=dict( - type="str", - choices=["always", "missing", "never", "policy"], - default="policy", - ), - build=dict(type="str", choices=["always", "never", "policy"], default="policy"), - recreate=dict(type="str", default="auto", choices=["always", "never", "auto"]), - renew_anon_volumes=dict(type="bool", default=False), - remove_images=dict(type="str", choices=["all", "local"]), - remove_volumes=dict(type="bool", default=False), - remove_orphans=dict(type="bool", default=False), - timeout=dict(type="int"), - services=dict(type="list", elements="str"), - scale=dict(type="dict"), - wait=dict(type="bool", default=False), - wait_timeout=dict(type="int"), - ignore_build_events=dict(type="bool", default=True), - assume_yes=dict(type="bool", default=False), - ) + argument_spec = { + "state": { + "type": "str", + "default": "present", + "choices": ["absent", "present", "stopped", "restarted"], + }, + "dependencies": {"type": "bool", "default": True}, + "pull": { + "type": "str", + "choices": ["always", "missing", "never", "policy"], + "default": "policy", + }, + "build": { + "type": "str", + "choices": ["always", "never", "policy"], + "default": "policy", + }, + "recreate": { + "type": "str", + "default": "auto", + "choices": ["always", "never", "auto"], + }, + "renew_anon_volumes": {"type": "bool", "default": False}, + "remove_images": {"type": "str", "choices": ["all", "local"]}, + "remove_volumes": {"type": "bool", "default": False}, + "remove_orphans": {"type": "bool", "default": False}, + "timeout": {"type": "int"}, + "services": {"type": "list", "elements": "str"}, + "scale": {"type": "dict"}, + "wait": {"type": "bool", "default": False}, + "wait_timeout": {"type": "int"}, + "ignore_build_events": {"type": "bool", "default": True}, + "assume_yes": {"type": "bool", "default": False}, + } argspec_ex = common_compose_argspec_ex() argument_spec.update(argspec_ex.pop("argspec")) diff --git a/plugins/modules/docker_compose_v2_exec.py b/plugins/modules/docker_compose_v2_exec.py index 985637f5..e7836bd0 100644 --- a/plugins/modules/docker_compose_v2_exec.py +++ b/plugins/modules/docker_compose_v2_exec.py @@ -263,21 +263,21 @@ class ExecManager(BaseComposeManager): def main(): - argument_spec = dict( - service=dict(type="str", required=True), - index=dict(type="int"), - argv=dict(type="list", elements="str"), - command=dict(type="str"), - chdir=dict(type="str"), - detach=dict(type="bool", default=False), - user=dict(type="str"), - stdin=dict(type="str"), - stdin_add_newline=dict(type="bool", default=True), - strip_empty_ends=dict(type="bool", default=True), - privileged=dict(type="bool", default=False), - tty=dict(type="bool", default=True), - env=dict(type="dict"), - ) + argument_spec = { + "service": {"type": "str", "required": True}, + "index": {"type": "int"}, + "argv": {"type": "list", "elements": "str"}, + "command": {"type": "str"}, + "chdir": {"type": "str"}, + "detach": {"type": "bool", "default": False}, + "user": {"type": "str"}, + "stdin": {"type": "str"}, + "stdin_add_newline": {"type": "bool", "default": True}, + "strip_empty_ends": {"type": "bool", "default": True}, + "privileged": {"type": "bool", "default": False}, + "tty": {"type": "bool", "default": True}, + "env": {"type": "dict"}, + } argspec_ex = common_compose_argspec_ex() argument_spec.update(argspec_ex.pop("argspec")) diff --git a/plugins/modules/docker_compose_v2_pull.py b/plugins/modules/docker_compose_v2_pull.py index 408f2972..1f438464 100644 --- a/plugins/modules/docker_compose_v2_pull.py +++ b/plugins/modules/docker_compose_v2_pull.py @@ -162,7 +162,7 @@ class PullManager(BaseComposeManager): return args def run(self): - result = dict() + result = {} args = self.get_pull_cmd(self.check_mode) rc, stdout, stderr = self.client.call_cli(*args, cwd=self.project_src) events = self.parse_events(stderr, dry_run=self.check_mode, nonzero_rc=rc != 0) @@ -180,12 +180,16 @@ class PullManager(BaseComposeManager): def main(): - argument_spec = dict( - policy=dict(type="str", choices=["always", "missing"], default="always"), - ignore_buildable=dict(type="bool", default=False), - include_deps=dict(type="bool", default=False), - services=dict(type="list", elements="str"), - ) + argument_spec = { + "policy": { + "type": "str", + "choices": ["always", "missing"], + "default": "always", + }, + "ignore_buildable": {"type": "bool", "default": False}, + "include_deps": {"type": "bool", "default": False}, + "services": {"type": "list", "elements": "str"}, + } argspec_ex = common_compose_argspec_ex() argument_spec.update(argspec_ex.pop("argspec")) diff --git a/plugins/modules/docker_compose_v2_run.py b/plugins/modules/docker_compose_v2_run.py index 73571a31..8f9040e2 100644 --- a/plugins/modules/docker_compose_v2_run.py +++ b/plugins/modules/docker_compose_v2_run.py @@ -383,34 +383,34 @@ class ExecManager(BaseComposeManager): def main(): - argument_spec = dict( - service=dict(type="str", required=True), - argv=dict(type="list", elements="str"), - command=dict(type="str"), - build=dict(type="bool", default=False), - cap_add=dict(type="list", elements="str"), - cap_drop=dict(type="list", elements="str"), - entrypoint=dict(type="str"), - interactive=dict(type="bool", default=True), - labels=dict(type="list", elements="str"), - name=dict(type="str"), - no_deps=dict(type="bool", default=False), - publish=dict(type="list", elements="str"), - quiet_pull=dict(type="bool", default=False), - remove_orphans=dict(type="bool", default=False), - cleanup=dict(type="bool", default=False), - service_ports=dict(type="bool", default=False), - use_aliases=dict(type="bool", default=False), - volumes=dict(type="list", elements="str"), - chdir=dict(type="str"), - detach=dict(type="bool", default=False), - user=dict(type="str"), - stdin=dict(type="str"), - stdin_add_newline=dict(type="bool", default=True), - strip_empty_ends=dict(type="bool", default=True), - tty=dict(type="bool", default=True), - env=dict(type="dict"), - ) + argument_spec = { + "service": {"type": "str", "required": True}, + "argv": {"type": "list", "elements": "str"}, + "command": {"type": "str"}, + "build": {"type": "bool", "default": False}, + "cap_add": {"type": "list", "elements": "str"}, + "cap_drop": {"type": "list", "elements": "str"}, + "entrypoint": {"type": "str"}, + "interactive": {"type": "bool", "default": True}, + "labels": {"type": "list", "elements": "str"}, + "name": {"type": "str"}, + "no_deps": {"type": "bool", "default": False}, + "publish": {"type": "list", "elements": "str"}, + "quiet_pull": {"type": "bool", "default": False}, + "remove_orphans": {"type": "bool", "default": False}, + "cleanup": {"type": "bool", "default": False}, + "service_ports": {"type": "bool", "default": False}, + "use_aliases": {"type": "bool", "default": False}, + "volumes": {"type": "list", "elements": "str"}, + "chdir": {"type": "str"}, + "detach": {"type": "bool", "default": False}, + "user": {"type": "str"}, + "stdin": {"type": "str"}, + "stdin_add_newline": {"type": "bool", "default": True}, + "strip_empty_ends": {"type": "bool", "default": True}, + "tty": {"type": "bool", "default": True}, + "env": {"type": "dict"}, + } argspec_ex = common_compose_argspec_ex() argument_spec.update(argspec_ex.pop("argspec")) diff --git a/plugins/modules/docker_config.py b/plugins/modules/docker_config.py index b494095d..e33a47de 100644 --- a/plugins/modules/docker_config.py +++ b/plugins/modules/docker_config.py @@ -387,18 +387,22 @@ class ConfigManager(DockerBaseClass): def main(): - argument_spec = dict( - name=dict(type="str", required=True), - state=dict(type="str", default="present", choices=["absent", "present"]), - data=dict(type="str"), - data_is_b64=dict(type="bool", default=False), - data_src=dict(type="path"), - labels=dict(type="dict"), - force=dict(type="bool", default=False), - rolling_versions=dict(type="bool", default=False), - versions_to_keep=dict(type="int", default=5), - template_driver=dict(type="str", choices=["golang"]), - ) + argument_spec = { + "name": {"type": "str", "required": True}, + "state": { + "type": "str", + "default": "present", + "choices": ["absent", "present"], + }, + "data": {"type": "str"}, + "data_is_b64": {"type": "bool", "default": False}, + "data_src": {"type": "path"}, + "labels": {"type": "dict"}, + "force": {"type": "bool", "default": False}, + "rolling_versions": {"type": "bool", "default": False}, + "versions_to_keep": {"type": "int", "default": 5}, + "template_driver": {"type": "str", "choices": ["golang"]}, + } required_if = [ ("state", "present", ["data", "data_src"], True), @@ -408,9 +412,9 @@ def main(): ("data", "data_src"), ] - option_minimal_versions = dict( - template_driver=dict(docker_py_version="5.0.3", docker_api_version="1.37"), - ) + option_minimal_versions = { + "template_driver": {"docker_py_version": "5.0.3", "docker_api_version": "1.37"}, + } client = AnsibleDockerClient( argument_spec=argument_spec, @@ -424,9 +428,9 @@ def main(): sanitize_labels(client.module.params["labels"], "labels", client) try: - results = dict( - changed=False, - ) + results = { + "changed": False, + } ConfigManager(client, results)() client.module.exit_json(**results) diff --git a/plugins/modules/docker_container_copy_into.py b/plugins/modules/docker_container_copy_into.py index f2f9d49c..9bf6d965 100644 --- a/plugins/modules/docker_container_copy_into.py +++ b/plugins/modules/docker_container_copy_into.py @@ -484,7 +484,7 @@ def is_file_idempotent( ) except OSError as exc: if exc.errno == 2: - raise DockerFileNotFound(f"Cannot find local file {managed_path}") + raise DockerFileNotFound(f"Cannot find local file {managed_path}") from exc raise if mode is None: mode = stat.S_IMODE(file_stat.st_mode) @@ -752,10 +752,10 @@ def copy_file_into_container( follow_links=local_follow_links, ) - result = dict( - container_path=container_path, - changed=changed, - ) + result = { + "container_path": container_path, + "changed": changed, + } if diff: result["diff"] = diff client.module.exit_json(**result) @@ -992,10 +992,10 @@ def copy_content_into_container( mode=mode, ) - result = dict( - container_path=container_path, - changed=changed, - ) + result = { + "container_path": container_path, + "changed": changed, + } if diff: # Since the content is no_log, make sure that the before/after strings look sufficiently different key = generate_insecure_key() @@ -1022,26 +1022,26 @@ def parse_octal_string_only(mode): def main(): - argument_spec = dict( - container=dict(type="str", required=True), - path=dict(type="path"), - container_path=dict(type="str", required=True), - follow=dict(type="bool", default=False), - local_follow=dict(type="bool", default=True), - owner_id=dict(type="int"), - group_id=dict(type="int"), - mode=dict(type="raw"), - mode_parse=dict( - type="str", - choices=["legacy", "modern", "octal_string_only"], - default="legacy", - ), - force=dict(type="bool"), - content=dict(type="str", no_log=True), - content_is_b64=dict(type="bool", default=False), + argument_spec = { + "container": {"type": "str", "required": True}, + "path": {"type": "path"}, + "container_path": {"type": "str", "required": True}, + "follow": {"type": "bool", "default": False}, + "local_follow": {"type": "bool", "default": True}, + "owner_id": {"type": "int"}, + "group_id": {"type": "int"}, + "mode": {"type": "raw"}, + "mode_parse": { + "type": "str", + "choices": ["legacy", "modern", "octal_string_only"], + "default": "legacy", + }, + "force": {"type": "bool"}, + "content": {"type": "str", "no_log": True}, + "content_is_b64": {"type": "bool", "default": False}, # Undocumented parameters for use by the action plugin - _max_file_size_for_diff=dict(type="int"), - ) + "_max_file_size_for_diff": {"type": "int"}, + } client = AnsibleDockerClient( argument_spec=argument_spec, diff --git a/plugins/modules/docker_container_exec.py b/plugins/modules/docker_container_exec.py index c697f79f..bcbe4616 100644 --- a/plugins/modules/docker_container_exec.py +++ b/plugins/modules/docker_container_exec.py @@ -187,23 +187,23 @@ from ansible_collections.community.docker.plugins.module_utils._socket_handler i def main(): - argument_spec = dict( - container=dict(type="str", required=True), - argv=dict(type="list", elements="str"), - command=dict(type="str"), - chdir=dict(type="str"), - detach=dict(type="bool", default=False), - user=dict(type="str"), - stdin=dict(type="str"), - stdin_add_newline=dict(type="bool", default=True), - strip_empty_ends=dict(type="bool", default=True), - tty=dict(type="bool", default=False), - env=dict(type="dict"), - ) + argument_spec = { + "container": {"type": "str", "required": True}, + "argv": {"type": "list", "elements": "str"}, + "command": {"type": "str"}, + "chdir": {"type": "str"}, + "detach": {"type": "bool", "default": False}, + "user": {"type": "str"}, + "stdin": {"type": "str"}, + "stdin_add_newline": {"type": "bool", "default": True}, + "strip_empty_ends": {"type": "bool", "default": True}, + "tty": {"type": "bool", "default": False}, + "env": {"type": "dict"}, + } - option_minimal_versions = dict( - chdir=dict(docker_api_version="1.35"), - ) + option_minimal_versions = { + "chdir": {"docker_api_version": "1.35"}, + } client = AnsibleDockerClient( argument_spec=argument_spec, diff --git a/plugins/modules/docker_container_info.py b/plugins/modules/docker_container_info.py index 077afe63..6a97fd13 100644 --- a/plugins/modules/docker_container_info.py +++ b/plugins/modules/docker_container_info.py @@ -87,9 +87,9 @@ from ansible_collections.community.docker.plugins.module_utils._common_api impor def main(): - argument_spec = dict( - name=dict(type="str", required=True), - ) + argument_spec = { + "name": {"type": "str", "required": True}, + } client = AnsibleDockerClient( argument_spec=argument_spec, @@ -101,7 +101,7 @@ def main(): client.module.exit_json( changed=False, - exists=(True if container else False), + exists=bool(container), container=container, ) except DockerException as e: diff --git a/plugins/modules/docker_context_info.py b/plugins/modules/docker_context_info.py index 496c8e81..7a5bfd78 100644 --- a/plugins/modules/docker_context_info.py +++ b/plugins/modules/docker_context_info.py @@ -204,10 +204,6 @@ def tls_context_to_json(context): } -def to_bool(value): - return True if value else False - - def context_to_json(context, current): module_config = {} if "docker" in context.endpoints: @@ -240,7 +236,7 @@ def context_to_json(context, current): module_config["validate_certs"] = tls_cfg.verify module_config["tls"] = True else: - module_config["tls"] = to_bool(endpoint.get("SkipTLSVerify")) + module_config["tls"] = bool(endpoint.get("SkipTLSVerify")) return { "current": current, "name": context.name, @@ -252,11 +248,11 @@ def context_to_json(context, current): def main(): - argument_spec = dict( - only_current=dict(type="bool", default=False), - name=dict(type="str"), - cli_context=dict(type="str"), - ) + argument_spec = { + "only_current": {"type": "bool", "default": False}, + "name": {"type": "str"}, + "cli_context": {"type": "str"}, + } module = AnsibleModule( argument_spec=argument_spec, diff --git a/plugins/modules/docker_host_info.py b/plugins/modules/docker_host_info.py index 8bd12f30..b71387cf 100644 --- a/plugins/modules/docker_host_info.py +++ b/plugins/modules/docker_host_info.py @@ -271,7 +271,7 @@ class DockerHostManager(DockerBaseClass): try: if self.verbose_output: return self.client.df() - return dict(LayersSize=self.client.df()["LayersSize"]) + return {"LayersSize": self.client.df()["LayersSize"]} except APIError as exc: self.client.fail(f"Error inspecting docker host: {exc}") @@ -292,7 +292,7 @@ class DockerHostManager(DockerBaseClass): header_images = ["Id", "RepoTags", "Created", "Size"] header_networks = ["Id", "Driver", "Name", "Scope"] - filter_arg = dict() + filter_arg = {} if filters: filter_arg["filters"] = filters try: @@ -330,7 +330,7 @@ class DockerHostManager(DockerBaseClass): return items for item in items: - item_record = dict() + item_record = {} if docker_object == "containers": for key in header_containers: @@ -350,26 +350,26 @@ class DockerHostManager(DockerBaseClass): def main(): - argument_spec = dict( - containers=dict(type="bool", default=False), - containers_all=dict(type="bool", default=False), - containers_filters=dict(type="dict"), - images=dict(type="bool", default=False), - images_filters=dict(type="dict"), - networks=dict(type="bool", default=False), - networks_filters=dict(type="dict"), - volumes=dict(type="bool", default=False), - volumes_filters=dict(type="dict"), - disk_usage=dict(type="bool", default=False), - verbose_output=dict(type="bool", default=False), - ) + argument_spec = { + "containers": {"type": "bool", "default": False}, + "containers_all": {"type": "bool", "default": False}, + "containers_filters": {"type": "dict"}, + "images": {"type": "bool", "default": False}, + "images_filters": {"type": "dict"}, + "networks": {"type": "bool", "default": False}, + "networks_filters": {"type": "dict"}, + "volumes": {"type": "bool", "default": False}, + "volumes_filters": {"type": "dict"}, + "disk_usage": {"type": "bool", "default": False}, + "verbose_output": {"type": "bool", "default": False}, + } client = AnsibleDockerClient( argument_spec=argument_spec, supports_check_mode=True, - fail_results=dict( - can_talk_to_docker=False, - ), + fail_results={ + "can_talk_to_docker": False, + }, ) if ( client.module.params["api_version"] is None @@ -379,9 +379,9 @@ def main(): client.fail_results["can_talk_to_docker"] = True try: - results = dict( - changed=False, - ) + results = { + "changed": False, + } DockerHostManager(client, results) client.module.exit_json(**results) diff --git a/plugins/modules/docker_image.py b/plugins/modules/docker_image.py index b7ee1131..f1e9e0d1 100644 --- a/plugins/modules/docker_image.py +++ b/plugins/modules/docker_image.py @@ -442,8 +442,8 @@ class ImageManager(DockerBaseClass): self.check_mode = self.client.check_mode self.source = parameters["source"] - build = parameters["build"] or dict() - pull = parameters["pull"] or dict() + build = parameters["build"] or {} + pull = parameters["pull"] or {} self.archive_path = parameters["archive_path"] self.cache_from = build.get("cache_from") self.container_limits = build.get("container_limits") @@ -796,7 +796,7 @@ class ImageManager(DockerBaseClass): self.fail(f"Error pushing image {repository}: {exc}") self.results["image"] = self.client.find_image(name=repository, tag=tag) if not self.results["image"]: - self.results["image"] = dict() + self.results["image"] = {} self.results["image"]["push_status"] = status def tag_image(self, name, tag, repository, push=False): @@ -1079,54 +1079,58 @@ class ImageManager(DockerBaseClass): def main(): - argument_spec = dict( - source=dict(type="str", choices=["build", "load", "pull", "local"]), - build=dict( - type="dict", - options=dict( - cache_from=dict(type="list", elements="str"), - container_limits=dict( - type="dict", - options=dict( - memory=dict(type="str"), - memswap=dict(type="str"), - cpushares=dict(type="int"), - cpusetcpus=dict(type="str"), - ), - ), - dockerfile=dict(type="str"), - http_timeout=dict(type="int"), - network=dict(type="str"), - nocache=dict(type="bool", default=False), - path=dict(type="path", required=True), - pull=dict(type="bool", default=False), - rm=dict(type="bool", default=True), - args=dict(type="dict"), - use_config_proxy=dict(type="bool"), - target=dict(type="str"), - etc_hosts=dict(type="dict"), - platform=dict(type="str"), - shm_size=dict(type="str"), - labels=dict(type="dict"), - ), - ), - archive_path=dict(type="path"), - force_source=dict(type="bool", default=False), - force_absent=dict(type="bool", default=False), - force_tag=dict(type="bool", default=False), - load_path=dict(type="path"), - name=dict(type="str", required=True), - pull=dict( - type="dict", - options=dict( - platform=dict(type="str"), - ), - ), - push=dict(type="bool", default=False), - repository=dict(type="str"), - state=dict(type="str", default="present", choices=["absent", "present"]), - tag=dict(type="str", default="latest"), - ) + argument_spec = { + "source": {"type": "str", "choices": ["build", "load", "pull", "local"]}, + "build": { + "type": "dict", + "options": { + "cache_from": {"type": "list", "elements": "str"}, + "container_limits": { + "type": "dict", + "options": { + "memory": {"type": "str"}, + "memswap": {"type": "str"}, + "cpushares": {"type": "int"}, + "cpusetcpus": {"type": "str"}, + }, + }, + "dockerfile": {"type": "str"}, + "http_timeout": {"type": "int"}, + "network": {"type": "str"}, + "nocache": {"type": "bool", "default": False}, + "path": {"type": "path", "required": True}, + "pull": {"type": "bool", "default": False}, + "rm": {"type": "bool", "default": True}, + "args": {"type": "dict"}, + "use_config_proxy": {"type": "bool"}, + "target": {"type": "str"}, + "etc_hosts": {"type": "dict"}, + "platform": {"type": "str"}, + "shm_size": {"type": "str"}, + "labels": {"type": "dict"}, + }, + }, + "archive_path": {"type": "path"}, + "force_source": {"type": "bool", "default": False}, + "force_absent": {"type": "bool", "default": False}, + "force_tag": {"type": "bool", "default": False}, + "load_path": {"type": "path"}, + "name": {"type": "str", "required": True}, + "pull": { + "type": "dict", + "options": { + "platform": {"type": "str"}, + }, + }, + "push": {"type": "bool", "default": False}, + "repository": {"type": "str"}, + "state": { + "type": "str", + "default": "present", + "choices": ["absent", "present"], + }, + "tag": {"type": "str", "default": "latest"}, + } required_if = [ ("state", "present", ["source"]), @@ -1151,16 +1155,20 @@ def main(): and client.module.params["pull"].get("platform") is not None ) - option_minimal_versions = dict() - option_minimal_versions["build.etc_hosts"] = dict( - docker_api_version="1.27", detect_usage=detect_etc_hosts - ) - option_minimal_versions["build.platform"] = dict( - docker_api_version="1.32", detect_usage=detect_build_platform - ) - option_minimal_versions["pull.platform"] = dict( - docker_api_version="1.32", detect_usage=detect_pull_platform - ) + option_minimal_versions = { + "build.etc_hosts": { + "docker_api_version": "1.27", + "detect_usage": detect_etc_hosts, + }, + "build.platform": { + "docker_api_version": "1.32", + "detect_usage": detect_build_platform, + }, + "pull.platform": { + "docker_api_version": "1.32", + "detect_usage": detect_pull_platform, + }, + } client = AnsibleDockerClient( argument_spec=argument_spec, @@ -1181,7 +1189,7 @@ def main(): ) try: - results = dict(changed=False, actions=[], image={}) + results = {"changed": False, "actions": [], "image": {}} ImageManager(client, results) client.module.exit_json(**results) diff --git a/plugins/modules/docker_image_build.py b/plugins/modules/docker_image_build.py index 9172e82c..b4b765cf 100644 --- a/plugins/modules/docker_image_build.py +++ b/plugins/modules/docker_image_build.py @@ -514,11 +514,11 @@ class ImageBuilder(DockerBaseClass): def build_image(self): image = self.client.find_image(self.name, self.tag) - results = dict( - changed=False, - actions=[], - image=image or {}, - ) + results = { + "changed": False, + "actions": [], + "image": image or {}, + } if image: if self.rebuild == "never": @@ -548,69 +548,73 @@ class ImageBuilder(DockerBaseClass): def main(): - argument_spec = dict( - name=dict(type="str", required=True), - tag=dict(type="str", default="latest"), - path=dict(type="path", required=True), - dockerfile=dict(type="str"), - cache_from=dict(type="list", elements="str"), - pull=dict(type="bool", default=False), - network=dict(type="str"), - nocache=dict(type="bool", default=False), - etc_hosts=dict(type="dict"), - args=dict(type="dict"), - target=dict(type="str"), - platform=dict(type="list", elements="str"), - shm_size=dict(type="str"), - labels=dict(type="dict"), - rebuild=dict(type="str", choices=["never", "always"], default="never"), - secrets=dict( - type="list", - elements="dict", - options=dict( - id=dict(type="str", required=True), - type=dict(type="str", choices=["file", "env", "value"], required=True), - src=dict(type="path"), - env=dict(type="str"), - value=dict(type="str", no_log=True), - ), - required_if=[ + argument_spec = { + "name": {"type": "str", "required": True}, + "tag": {"type": "str", "default": "latest"}, + "path": {"type": "path", "required": True}, + "dockerfile": {"type": "str"}, + "cache_from": {"type": "list", "elements": "str"}, + "pull": {"type": "bool", "default": False}, + "network": {"type": "str"}, + "nocache": {"type": "bool", "default": False}, + "etc_hosts": {"type": "dict"}, + "args": {"type": "dict"}, + "target": {"type": "str"}, + "platform": {"type": "list", "elements": "str"}, + "shm_size": {"type": "str"}, + "labels": {"type": "dict"}, + "rebuild": {"type": "str", "choices": ["never", "always"], "default": "never"}, + "secrets": { + "type": "list", + "elements": "dict", + "options": { + "id": {"type": "str", "required": True}, + "type": { + "type": "str", + "choices": ["file", "env", "value"], + "required": True, + }, + "src": {"type": "path"}, + "env": {"type": "str"}, + "value": {"type": "str", "no_log": True}, + }, + "required_if": [ ("type", "file", ["src"]), ("type", "env", ["env"]), ("type", "value", ["value"]), ], - mutually_exclusive=[ + "mutually_exclusive": [ ("src", "env", "value"), ], - no_log=False, - ), - outputs=dict( - type="list", - elements="dict", - options=dict( - type=dict( - type="str", - choices=["local", "tar", "oci", "docker", "image"], - required=True, - ), - dest=dict(type="path"), - context=dict(type="str"), - name=dict(type="list", elements="str"), - push=dict(type="bool", default=False), - ), - required_if=[ + "no_log": False, + }, + "outputs": { + "type": "list", + "elements": "dict", + "options": { + "type": { + "type": "str", + "choices": ["local", "tar", "oci", "docker", "image"], + "required": True, + }, + "dest": {"type": "path"}, + "context": {"type": "str"}, + "name": {"type": "list", "elements": "str"}, + "push": {"type": "bool", "default": False}, + }, + "required_if": [ ("type", "local", ["dest"]), ("type", "tar", ["dest"]), ("type", "oci", ["dest"]), ], - mutually_exclusive=[ + "mutually_exclusive": [ ("dest", "name"), ("dest", "push"), ("context", "name"), ("context", "push"), ], - ), - ) + }, + } client = AnsibleModuleDockerClient( argument_spec=argument_spec, diff --git a/plugins/modules/docker_image_export.py b/plugins/modules/docker_image_export.py index eb32f9a4..0d107631 100644 --- a/plugins/modules/docker_image_export.py +++ b/plugins/modules/docker_image_export.py @@ -261,12 +261,17 @@ class ImageExportManager(DockerBaseClass): def main(): - argument_spec = dict( - path=dict(type="path"), - force=dict(type="bool", default=False), - names=dict(type="list", elements="str", required=True, aliases=["name"]), - tag=dict(type="str", default="latest"), - ) + argument_spec = { + "path": {"type": "path"}, + "force": {"type": "bool", "default": False}, + "names": { + "type": "list", + "elements": "str", + "required": True, + "aliases": ["name"], + }, + "tag": {"type": "str", "default": "latest"}, + } client = AnsibleDockerClient( argument_spec=argument_spec, diff --git a/plugins/modules/docker_image_info.py b/plugins/modules/docker_image_info.py index a9ca0405..050f60d7 100644 --- a/plugins/modules/docker_image_info.py +++ b/plugins/modules/docker_image_info.py @@ -219,9 +219,9 @@ class ImageManager(DockerBaseClass): def main(): - argument_spec = dict( - name=dict(type="list", elements="str"), - ) + argument_spec = { + "name": {"type": "list", "elements": "str"}, + } client = AnsibleDockerClient( argument_spec=argument_spec, @@ -229,7 +229,7 @@ def main(): ) try: - results = dict(changed=False, images=[]) + results = {"changed": False, "images": []} ImageManager(client, results) client.module.exit_json(**results) diff --git a/plugins/modules/docker_image_load.py b/plugins/modules/docker_image_load.py index e49a5b4c..e7e10431 100644 --- a/plugins/modules/docker_image_load.py +++ b/plugins/modules/docker_image_load.py @@ -181,17 +181,17 @@ class ImageManager(DockerBaseClass): def main(): client = AnsibleDockerClient( - argument_spec=dict( - path=dict(type="path", required=True), - ), + argument_spec={ + "path": {"type": "path", "required": True}, + }, supports_check_mode=False, ) try: - results = dict( - image_names=[], - images=[], - ) + results = { + "image_names": [], + "images": [], + } ImageManager(client, results) client.module.exit_json(**results) diff --git a/plugins/modules/docker_image_pull.py b/plugins/modules/docker_image_pull.py index 880e251f..02042734 100644 --- a/plugins/modules/docker_image_pull.py +++ b/plugins/modules/docker_image_pull.py @@ -149,12 +149,12 @@ class ImagePuller(DockerBaseClass): def pull(self): image = self.client.find_image(name=self.name, tag=self.tag) - results = dict( - changed=False, - actions=[], - image=image or {}, - diff=dict(before=image_info(image), after=image_info(image)), - ) + results = { + "changed": False, + "actions": [], + "image": image or {}, + "diff": {"before": image_info(image), "after": image_info(image)}, + } if image and self.pull_mode == "not_present": if self.platform is None: @@ -178,7 +178,7 @@ class ImagePuller(DockerBaseClass): results["actions"].append(f"Pulled image {self.name}:{self.tag}") if self.check_mode: results["changed"] = True - results["diff"]["after"] = image_info(dict(Id="unknown")) + results["diff"]["after"] = image_info({"Id": "unknown"}) else: results["image"], not_changed = self.client.pull_image( self.name, tag=self.tag, image_platform=self.platform @@ -190,16 +190,20 @@ class ImagePuller(DockerBaseClass): def main(): - argument_spec = dict( - name=dict(type="str", required=True), - tag=dict(type="str", default="latest"), - platform=dict(type="str"), - pull=dict(type="str", choices=["always", "not_present"], default="always"), - ) + argument_spec = { + "name": {"type": "str", "required": True}, + "tag": {"type": "str", "default": "latest"}, + "platform": {"type": "str"}, + "pull": { + "type": "str", + "choices": ["always", "not_present"], + "default": "always", + }, + } - option_minimal_versions = dict( - platform=dict(docker_api_version="1.32"), - ) + option_minimal_versions = { + "platform": {"docker_api_version": "1.32"}, + } client = AnsibleDockerClient( argument_spec=argument_spec, diff --git a/plugins/modules/docker_image_push.py b/plugins/modules/docker_image_push.py index a6d711b2..a350df34 100644 --- a/plugins/modules/docker_image_push.py +++ b/plugins/modules/docker_image_push.py @@ -127,11 +127,11 @@ class ImagePusher(DockerBaseClass): if not image: self.client.fail(f"Cannot find image {self.name}:{self.tag}") - results = dict( - changed=False, - actions=[], - image=image, - ) + results = { + "changed": False, + "actions": [], + "image": image, + } push_registry, push_repo = resolve_repository_name(self.name) try: @@ -175,10 +175,10 @@ class ImagePusher(DockerBaseClass): def main(): - argument_spec = dict( - name=dict(type="str", required=True), - tag=dict(type="str", default="latest"), - ) + argument_spec = { + "name": {"type": "str", "required": True}, + "tag": {"type": "str", "default": "latest"}, + } client = AnsibleDockerClient( argument_spec=argument_spec, diff --git a/plugins/modules/docker_image_remove.py b/plugins/modules/docker_image_remove.py index ca2d7569..5bb1276f 100644 --- a/plugins/modules/docker_image_remove.py +++ b/plugins/modules/docker_image_remove.py @@ -147,22 +147,22 @@ class ImageRemover(DockerBaseClass): def get_diff_state(self, image): if not image: - return dict(exists=False) - return dict( - exists=True, - id=image["Id"], - tags=sorted(image.get("RepoTags") or []), - digests=sorted(image.get("RepoDigests") or []), - ) + return {"exists": False} + return { + "exists": True, + "id": image["Id"], + "tags": sorted(image.get("RepoTags") or []), + "digests": sorted(image.get("RepoDigests") or []), + } def absent(self): - results = dict( - changed=False, - actions=[], - image={}, - deleted=[], - untagged=[], - ) + results = { + "changed": False, + "actions": [], + "image": {}, + "deleted": [], + "untagged": [], + } name = self.name if is_image_name_id(name): @@ -173,7 +173,7 @@ class ImageRemover(DockerBaseClass): name = f"{self.name}:{self.tag}" if self.diff: - results["diff"] = dict(before=self.get_diff_state(image)) + results["diff"] = {"before": self.get_diff_state(image)} if not image: if self.diff: @@ -256,12 +256,12 @@ class ImageRemover(DockerBaseClass): def main(): - argument_spec = dict( - name=dict(type="str", required=True), - tag=dict(type="str", default="latest"), - force=dict(type="bool", default=False), - prune=dict(type="bool", default=True), - ) + argument_spec = { + "name": {"type": "str", "required": True}, + "tag": {"type": "str", "default": "latest"}, + "force": {"type": "bool", "default": False}, + "prune": {"type": "bool", "default": True}, + } client = AnsibleDockerClient( argument_spec=argument_spec, diff --git a/plugins/modules/docker_image_tag.py b/plugins/modules/docker_image_tag.py index d27553ac..65dc7642 100644 --- a/plugins/modules/docker_image_tag.py +++ b/plugins/modules/docker_image_tag.py @@ -133,7 +133,7 @@ def convert_to_bytes(value, module, name, unlimited_value=None): def image_info(name, tag, image): - result = dict(name=name, tag=tag) + result = {"name": name, "tag": tag} if image: result["id"] = image["Id"] else: @@ -231,13 +231,13 @@ class ImageTagger(DockerBaseClass): before = [] after = [] tagged_images = [] - results = dict( - changed=False, - actions=[], - image=image, - tagged_images=tagged_images, - diff=dict(before=dict(images=before), after=dict(images=after)), - ) + results = { + "changed": False, + "actions": [], + "image": image, + "tagged_images": tagged_images, + "diff": {"before": {"images": before}, "after": {"images": after}}, + } for repository, tag in self.repositories: tagged, msg, old_image = self.tag_image(image, repository, tag) before.append(image_info(repository, tag, old_image)) @@ -257,14 +257,16 @@ class ImageTagger(DockerBaseClass): def main(): - argument_spec = dict( - name=dict(type="str", required=True), - tag=dict(type="str", default="latest"), - repository=dict(type="list", elements="str", required=True), - existing_images=dict( - type="str", choices=["keep", "overwrite"], default="overwrite" - ), - ) + argument_spec = { + "name": {"type": "str", "required": True}, + "tag": {"type": "str", "default": "latest"}, + "repository": {"type": "list", "elements": "str", "required": True}, + "existing_images": { + "type": "str", + "choices": ["keep", "overwrite"], + "default": "overwrite", + }, + } client = AnsibleDockerClient( argument_spec=argument_spec, diff --git a/plugins/modules/docker_login.py b/plugins/modules/docker_login.py index 9de88be0..f90ac6a9 100644 --- a/plugins/modules/docker_login.py +++ b/plugins/modules/docker_login.py @@ -158,7 +158,7 @@ class DockerFileStore: self._config_path = config_path # Make sure we have a minimal config if none is available. - self._config = dict(auths=dict()) + self._config = {"auths": {}} try: # Attempt to read the existing config. @@ -166,7 +166,7 @@ class DockerFileStore: config = json.load(f) except (ValueError, IOError): # No config found or an invalid config found so we'll ignore it. - config = dict() + config = {} # Update our internal config with what ever was loaded. self._config.update(config) @@ -191,7 +191,7 @@ class DockerFileStore: (username, password) = decode_auth(server_creds["auth"]) - return dict(Username=username, Secret=password) + return {"Username": username, "Secret": password} def _write(self): """ @@ -219,9 +219,9 @@ class DockerFileStore: # build up the auth structure if "auths" not in self._config: - self._config["auths"] = dict() + self._config["auths"] = {} - self._config["auths"][server] = dict(auth=tauth) + self._config["auths"][server] = {"auth": tauth} self._write() @@ -368,7 +368,7 @@ class LoginManager(DockerBaseClass): current = store.get(self.registry_url) except CredentialsNotFound: # get raises an exception on not found. - current = dict(Username="", Secret="") + current = {"Username": "", "Secret": ""} if ( current["Username"] != self.username @@ -410,18 +410,26 @@ class LoginManager(DockerBaseClass): def main(): - argument_spec = dict( - registry_url=dict( - type="str", default=DEFAULT_DOCKER_REGISTRY, aliases=["registry", "url"] - ), - username=dict(type="str"), - password=dict(type="str", no_log=True), - reauthorize=dict(type="bool", default=False, aliases=["reauth"]), - state=dict(type="str", default="present", choices=["present", "absent"]), - config_path=dict( - type="path", default="~/.docker/config.json", aliases=["dockercfg_path"] - ), - ) + argument_spec = { + "registry_url": { + "type": "str", + "default": DEFAULT_DOCKER_REGISTRY, + "aliases": ["registry", "url"], + }, + "username": {"type": "str"}, + "password": {"type": "str", "no_log": True}, + "reauthorize": {"type": "bool", "default": False, "aliases": ["reauth"]}, + "state": { + "type": "str", + "default": "present", + "choices": ["present", "absent"], + }, + "config_path": { + "type": "path", + "default": "~/.docker/config.json", + "aliases": ["dockercfg_path"], + }, + } required_if = [ ("state", "present", ["username", "password"]), @@ -434,7 +442,7 @@ def main(): ) try: - results = dict(changed=False, actions=[], login_result={}) + results = {"changed": False, "actions": [], "login_result": {}} manager = LoginManager(client, results) manager.run() diff --git a/plugins/modules/docker_network.py b/plugins/modules/docker_network.py index 34edb46a..6c539aea 100644 --- a/plugins/modules/docker_network.py +++ b/plugins/modules/docker_network.py @@ -395,7 +395,7 @@ class DockerNetworkManager: self.results = {"changed": False, "actions": []} self.diff = self.client.module._diff self.diff_tracker = DifferenceTracker() - self.diff_result = dict() + self.diff_result = {} self.existing_network = self.get_existing_network() @@ -511,13 +511,13 @@ class DockerNetworkManager: # Put network's IPAM config into the same format as module's IPAM config net_ipam_configs = [] for net_ipam_config in net["IPAM"]["Config"]: - config = dict() + config = {} for k, v in net_ipam_config.items(): config[normalize_ipam_config_key(k)] = v net_ipam_configs.append(config) # Compare lists of dicts as sets of dicts for idx, ipam_config in enumerate(self.parameters.ipam_config): - net_config = dict() + net_config = {} for net_ipam_config in net_ipam_configs: if dicts_are_essentially_equal(ipam_config, net_ipam_config): net_config = net_ipam_config @@ -779,45 +779,54 @@ class DockerNetworkManager: def main(): - argument_spec = dict( - name=dict(type="str", required=True, aliases=["network_name"]), - config_from=dict(type="str"), - config_only=dict(type="bool"), - connected=dict(type="list", default=[], elements="str", aliases=["containers"]), - state=dict(type="str", default="present", choices=["present", "absent"]), - driver=dict(type="str", default="bridge"), - driver_options=dict(type="dict", default={}), - force=dict(type="bool", default=False), - appends=dict(type="bool", default=False, aliases=["incremental"]), - ipam_driver=dict(type="str"), - ipam_driver_options=dict(type="dict"), - ipam_config=dict( - type="list", - elements="dict", - options=dict( - subnet=dict(type="str"), - iprange=dict(type="str"), - gateway=dict(type="str"), - aux_addresses=dict(type="dict"), - ), - ), - enable_ipv4=dict(type="bool"), - enable_ipv6=dict(type="bool"), - internal=dict(type="bool"), - labels=dict(type="dict", default={}), - debug=dict(type="bool", default=False), - scope=dict(type="str", choices=["local", "global", "swarm"]), - attachable=dict(type="bool"), - ingress=dict(type="bool"), - ) + argument_spec = { + "name": {"type": "str", "required": True, "aliases": ["network_name"]}, + "config_from": {"type": "str"}, + "config_only": {"type": "bool"}, + "connected": { + "type": "list", + "default": [], + "elements": "str", + "aliases": ["containers"], + }, + "state": { + "type": "str", + "default": "present", + "choices": ["present", "absent"], + }, + "driver": {"type": "str", "default": "bridge"}, + "driver_options": {"type": "dict", "default": {}}, + "force": {"type": "bool", "default": False}, + "appends": {"type": "bool", "default": False, "aliases": ["incremental"]}, + "ipam_driver": {"type": "str"}, + "ipam_driver_options": {"type": "dict"}, + "ipam_config": { + "type": "list", + "elements": "dict", + "options": { + "subnet": {"type": "str"}, + "iprange": {"type": "str"}, + "gateway": {"type": "str"}, + "aux_addresses": {"type": "dict"}, + }, + }, + "enable_ipv4": {"type": "bool"}, + "enable_ipv6": {"type": "bool"}, + "internal": {"type": "bool"}, + "labels": {"type": "dict", "default": {}}, + "debug": {"type": "bool", "default": False}, + "scope": {"type": "str", "choices": ["local", "global", "swarm"]}, + "attachable": {"type": "bool"}, + "ingress": {"type": "bool"}, + } - option_minimal_versions = dict( - config_from=dict(docker_api_version="1.30"), - config_only=dict(docker_api_version="1.30"), - scope=dict(docker_api_version="1.30"), - attachable=dict(docker_api_version="1.26"), - enable_ipv4=dict(docker_api_version="1.47"), - ) + option_minimal_versions = { + "config_from": {"docker_api_version": "1.30"}, + "config_only": {"docker_api_version": "1.30"}, + "scope": {"docker_api_version": "1.30"}, + "attachable": {"docker_api_version": "1.26"}, + "enable_ipv4": {"docker_api_version": "1.47"}, + } client = AnsibleDockerClient( argument_spec=argument_spec, diff --git a/plugins/modules/docker_network_info.py b/plugins/modules/docker_network_info.py index 52d4848f..03536119 100644 --- a/plugins/modules/docker_network_info.py +++ b/plugins/modules/docker_network_info.py @@ -108,9 +108,9 @@ from ansible_collections.community.docker.plugins.module_utils._common_api impor def main(): - argument_spec = dict( - name=dict(type="str", required=True), - ) + argument_spec = { + "name": {"type": "str", "required": True}, + } client = AnsibleDockerClient( argument_spec=argument_spec, @@ -122,7 +122,7 @@ def main(): client.module.exit_json( changed=False, - exists=(True if network else False), + exists=bool(network), network=network, ) except DockerException as e: diff --git a/plugins/modules/docker_node.py b/plugins/modules/docker_node.py index 456f2cc0..61d907f5 100644 --- a/plugins/modules/docker_node.py +++ b/plugins/modules/docker_node.py @@ -206,11 +206,11 @@ class SwarmNodeManager(DockerBaseClass): self.client.fail(f"Failed to get node information for {exc}") changed = False - node_spec = dict( - Availability=self.parameters.availability, - Role=self.parameters.role, - Labels=self.parameters.labels, - ) + node_spec = { + "Availability": self.parameters.availability, + "Role": self.parameters.role, + "Labels": self.parameters.labels, + } if self.parameters.role is None: node_spec["Role"] = node_info["Spec"]["Role"] @@ -278,14 +278,18 @@ class SwarmNodeManager(DockerBaseClass): def main(): - argument_spec = dict( - hostname=dict(type="str", required=True), - labels=dict(type="dict"), - labels_state=dict(type="str", default="merge", choices=["merge", "replace"]), - labels_to_remove=dict(type="list", elements="str"), - availability=dict(type="str", choices=["active", "pause", "drain"]), - role=dict(type="str", choices=["worker", "manager"]), - ) + argument_spec = { + "hostname": {"type": "str", "required": True}, + "labels": {"type": "dict"}, + "labels_state": { + "type": "str", + "default": "merge", + "choices": ["merge", "replace"], + }, + "labels_to_remove": {"type": "list", "elements": "str"}, + "availability": {"type": "str", "choices": ["active", "pause", "drain"]}, + "role": {"type": "str", "choices": ["worker", "manager"]}, + } client = AnsibleDockerSwarmClient( argument_spec=argument_spec, @@ -294,9 +298,9 @@ def main(): ) try: - results = dict( - changed=False, - ) + results = { + "changed": False, + } SwarmNodeManager(client, results) client.module.exit_json(**results) diff --git a/plugins/modules/docker_node_info.py b/plugins/modules/docker_node_info.py index 1e3e678b..3ec75c46 100644 --- a/plugins/modules/docker_node_info.py +++ b/plugins/modules/docker_node_info.py @@ -131,10 +131,10 @@ def get_node_facts(client): def main(): - argument_spec = dict( - name=dict(type="list", elements="str"), - self=dict(type="bool", default=False), - ) + argument_spec = { + "name": {"type": "list", "elements": "str"}, + "self": {"type": "bool", "default": False}, + } client = AnsibleDockerSwarmClient( argument_spec=argument_spec, diff --git a/plugins/modules/docker_plugin.py b/plugins/modules/docker_plugin.py index b7bcea0c..d9953953 100644 --- a/plugins/modules/docker_plugin.py +++ b/plugins/modules/docker_plugin.py @@ -185,7 +185,7 @@ class DockerPluginManager: self.check_mode = self.client.check_mode self.diff = self.client.module._diff self.diff_tracker = DifferenceTracker() - self.diff_result = dict() + self.diff_result = {} self.actions = [] self.changed = False @@ -255,7 +255,7 @@ class DockerPluginManager: try: # Get privileges headers = {} - registry, repo_name = auth.resolve_repository_name( + registry, dummy_repo_name = auth.resolve_repository_name( self.parameters.plugin_name ) header = auth.get_config_header(self.client, registry) @@ -416,19 +416,19 @@ class DockerPluginManager: def main(): - argument_spec = dict( - alias=dict(type="str"), - plugin_name=dict(type="str", required=True), - state=dict( - type="str", - default="present", - choices=["present", "absent", "enable", "disable"], - ), - plugin_options=dict(type="dict", default={}), - debug=dict(type="bool", default=False), - force_remove=dict(type="bool", default=False), - enable_timeout=dict(type="int", default=0), - ) + argument_spec = { + "alias": {"type": "str"}, + "plugin_name": {"type": "str", "required": True}, + "state": { + "type": "str", + "default": "present", + "choices": ["present", "absent", "enable", "disable"], + }, + "plugin_options": {"type": "dict", "default": {}}, + "debug": {"type": "bool", "default": False}, + "force_remove": {"type": "bool", "default": False}, + "enable_timeout": {"type": "int", "default": 0}, + } client = AnsibleDockerClient( argument_spec=argument_spec, supports_check_mode=True, diff --git a/plugins/modules/docker_prune.py b/plugins/modules/docker_prune.py index a3ae8818..a2d3f80e 100644 --- a/plugins/modules/docker_prune.py +++ b/plugins/modules/docker_prune.py @@ -248,29 +248,29 @@ from ansible_collections.community.docker.plugins.module_utils._util import ( def main(): - argument_spec = dict( - containers=dict(type="bool", default=False), - containers_filters=dict(type="dict"), - images=dict(type="bool", default=False), - images_filters=dict(type="dict"), - networks=dict(type="bool", default=False), - networks_filters=dict(type="dict"), - volumes=dict(type="bool", default=False), - volumes_filters=dict(type="dict"), - builder_cache=dict(type="bool", default=False), - builder_cache_all=dict(type="bool", default=False), - builder_cache_filters=dict(type="dict"), - builder_cache_keep_storage=dict(type="str"), # convert to bytes - ) + argument_spec = { + "containers": {"type": "bool", "default": False}, + "containers_filters": {"type": "dict"}, + "images": {"type": "bool", "default": False}, + "images_filters": {"type": "dict"}, + "networks": {"type": "bool", "default": False}, + "networks_filters": {"type": "dict"}, + "volumes": {"type": "bool", "default": False}, + "volumes_filters": {"type": "dict"}, + "builder_cache": {"type": "bool", "default": False}, + "builder_cache_all": {"type": "bool", "default": False}, + "builder_cache_filters": {"type": "dict"}, + "builder_cache_keep_storage": {"type": "str"}, # convert to bytes + } client = AnsibleDockerClient( argument_spec=argument_spec, - option_minimal_versions=dict( - builder_cache=dict(docker_py_version="1.31"), - builder_cache_all=dict(docker_py_version="1.39"), - builder_cache_filters=dict(docker_py_version="1.31"), - builder_cache_keep_storage=dict(docker_py_version="1.39"), - ), + option_minimal_versions={ + "builder_cache": {"docker_py_version": "1.31"}, + "builder_cache_all": {"docker_py_version": "1.39"}, + "builder_cache_filters": {"docker_py_version": "1.31"}, + "builder_cache_keep_storage": {"docker_py_version": "1.39"}, + }, # supports_check_mode=True, ) @@ -286,7 +286,7 @@ def main(): ) try: - result = dict() + result = {} changed = False if client.module.params["containers"]: diff --git a/plugins/modules/docker_secret.py b/plugins/modules/docker_secret.py index 378876a0..211e83d2 100644 --- a/plugins/modules/docker_secret.py +++ b/plugins/modules/docker_secret.py @@ -366,17 +366,21 @@ class SecretManager(DockerBaseClass): def main(): - argument_spec = dict( - name=dict(type="str", required=True), - state=dict(type="str", default="present", choices=["absent", "present"]), - data=dict(type="str", no_log=True), - data_is_b64=dict(type="bool", default=False), - data_src=dict(type="path"), - labels=dict(type="dict"), - force=dict(type="bool", default=False), - rolling_versions=dict(type="bool", default=False), - versions_to_keep=dict(type="int", default=5), - ) + argument_spec = { + "name": {"type": "str", "required": True}, + "state": { + "type": "str", + "default": "present", + "choices": ["absent", "present"], + }, + "data": {"type": "str", "no_log": True}, + "data_is_b64": {"type": "bool", "default": False}, + "data_src": {"type": "path"}, + "labels": {"type": "dict"}, + "force": {"type": "bool", "default": False}, + "rolling_versions": {"type": "bool", "default": False}, + "versions_to_keep": {"type": "int", "default": 5}, + } required_if = [ ("state", "present", ["data", "data_src"], True), @@ -396,7 +400,7 @@ def main(): sanitize_labels(client.module.params["labels"], "labels", client) try: - results = dict(changed=False, secret_id="", secret_name="") + results = {"changed": False, "secret_id": "", "secret_name": ""} SecretManager(client, results)() client.module.exit_json(**results) diff --git a/plugins/modules/docker_stack.py b/plugins/modules/docker_stack.py index 06fdef48..8a9f12ef 100644 --- a/plugins/modules/docker_stack.py +++ b/plugins/modules/docker_stack.py @@ -184,7 +184,7 @@ except ImportError: def docker_stack_services(client, stack_name): - rc, out, err = client.call_cli( + dummy_rc, out, err = client.call_cli( "stack", "services", stack_name, "--format", "{{.Name}}" ) if to_native(err) == f"Nothing found in stack: {stack_name}\n": @@ -193,7 +193,7 @@ def docker_stack_services(client, stack_name): def docker_service_inspect(client, service_name): - rc, out, err = client.call_cli("service", "inspect", service_name) + rc, out, dummy_err = client.call_cli("service", "inspect", service_name) if rc != 0: return None ret = json.loads(out)[0]["Spec"] @@ -240,15 +240,19 @@ def docker_stack_rm(client, stack_name, retries, interval): def main(): client = AnsibleModuleDockerClient( argument_spec={ - "name": dict(type="str", required=True), - "compose": dict(type="list", elements="raw", default=[]), - "prune": dict(type="bool", default=False), - "detach": dict(type="bool", default=True), - "with_registry_auth": dict(type="bool", default=False), - "resolve_image": dict(type="str", choices=["always", "changed", "never"]), - "state": dict(type="str", default="present", choices=["present", "absent"]), - "absent_retries": dict(type="int", default=0), - "absent_retries_interval": dict(type="int", default=1), + "name": {"type": "str", "required": True}, + "compose": {"type": "list", "elements": "raw", "default": []}, + "prune": {"type": "bool", "default": False}, + "detach": {"type": "bool", "default": True}, + "with_registry_auth": {"type": "bool", "default": False}, + "resolve_image": {"type": "str", "choices": ["always", "changed", "never"]}, + "state": { + "type": "str", + "default": "present", + "choices": ["present", "absent"], + }, + "absent_retries": {"type": "int", "default": 0}, + "absent_retries_interval": {"type": "int", "default": 1}, }, supports_check_mode=False, ) @@ -273,7 +277,7 @@ def main(): ) compose_files = [] - for i, compose_def in enumerate(compose): + for compose_def in compose: if isinstance(compose_def, dict): compose_file_fd, compose_file = tempfile.mkstemp() client.module.add_cleanup_file(compose_file) diff --git a/plugins/modules/docker_stack_task_info.py b/plugins/modules/docker_stack_task_info.py index e9fb91f1..f67d26b1 100644 --- a/plugins/modules/docker_stack_task_info.py +++ b/plugins/modules/docker_stack_task_info.py @@ -104,7 +104,7 @@ def docker_stack_task(module, stack_name): def main(): client = AnsibleModuleDockerClient( - argument_spec={"name": dict(type="str", required=True)}, + argument_spec={"name": {"type": "str", "required": True}}, supports_check_mode=True, ) diff --git a/plugins/modules/docker_swarm.py b/plugins/modules/docker_swarm.py index d55b52c1..db37c2db 100644 --- a/plugins/modules/docker_swarm.py +++ b/plugins/modules/docker_swarm.py @@ -359,17 +359,17 @@ class TaskParameters(DockerBaseClass): def update_from_swarm_info(self, swarm_info): spec = swarm_info["Spec"] - ca_config = spec.get("CAConfig") or dict() + ca_config = spec.get("CAConfig") or {} if self.node_cert_expiry is None: self.node_cert_expiry = ca_config.get("NodeCertExpiry") if self.ca_force_rotate is None: self.ca_force_rotate = ca_config.get("ForceRotate") - dispatcher = spec.get("Dispatcher") or dict() + dispatcher = spec.get("Dispatcher") or {} if self.dispatcher_heartbeat_period is None: self.dispatcher_heartbeat_period = dispatcher.get("HeartbeatPeriod") - raft = spec.get("Raft") or dict() + raft = spec.get("Raft") or {} if self.snapshot_interval is None: self.snapshot_interval = raft.get("SnapshotInterval") if self.keep_old_snapshots is None: @@ -381,13 +381,13 @@ class TaskParameters(DockerBaseClass): if self.election_tick is None: self.election_tick = raft.get("ElectionTick") - orchestration = spec.get("Orchestration") or dict() + orchestration = spec.get("Orchestration") or {} if self.task_history_retention_limit is None: self.task_history_retention_limit = orchestration.get( "TaskHistoryRetentionLimit" ) - encryption_config = spec.get("EncryptionConfig") or dict() + encryption_config = spec.get("EncryptionConfig") or {} if self.autolock_managers is None: self.autolock_managers = encryption_config.get("AutoLockManagers") @@ -401,24 +401,24 @@ class TaskParameters(DockerBaseClass): self.log_driver = spec["TaskDefaults"]["LogDriver"] def update_parameters(self, client): - assign = dict( - snapshot_interval="snapshot_interval", - task_history_retention_limit="task_history_retention_limit", - keep_old_snapshots="keep_old_snapshots", - log_entries_for_slow_followers="log_entries_for_slow_followers", - heartbeat_tick="heartbeat_tick", - election_tick="election_tick", - dispatcher_heartbeat_period="dispatcher_heartbeat_period", - node_cert_expiry="node_cert_expiry", - name="name", - labels="labels", - signing_ca_cert="signing_ca_cert", - signing_ca_key="signing_ca_key", - ca_force_rotate="ca_force_rotate", - autolock_managers="autolock_managers", - log_driver="log_driver", - ) - params = dict() + assign = { + "snapshot_interval": "snapshot_interval", + "task_history_retention_limit": "task_history_retention_limit", + "keep_old_snapshots": "keep_old_snapshots", + "log_entries_for_slow_followers": "log_entries_for_slow_followers", + "heartbeat_tick": "heartbeat_tick", + "election_tick": "election_tick", + "dispatcher_heartbeat_period": "dispatcher_heartbeat_period", + "node_cert_expiry": "node_cert_expiry", + "name": "name", + "labels": "labels", + "signing_ca_cert": "signing_ca_cert", + "signing_ca_key": "signing_ca_key", + "ca_force_rotate": "ca_force_rotate", + "autolock_managers": "autolock_managers", + "log_driver": "log_driver", + } + params = {} for dest, source in assign.items(): if not client.option_minimal_versions[source]["supported"]: continue @@ -489,7 +489,7 @@ class SwarmManager(DockerBaseClass): choice_map.get(self.state)() if self.client.module._diff or self.parameters.debug: - diff = dict() + diff = {} diff["before"], diff["after"] = self.differences.get_before_after() self.results["diff"] = diff @@ -660,62 +660,65 @@ def _detect_remove_operation(client): def main(): - argument_spec = dict( - advertise_addr=dict(type="str"), - data_path_addr=dict(type="str"), - data_path_port=dict(type="int"), - state=dict( - type="str", - default="present", - choices=["present", "join", "absent", "remove"], - ), - force=dict(type="bool", default=False), - listen_addr=dict(type="str", default="0.0.0.0:2377"), - remote_addrs=dict(type="list", elements="str"), - join_token=dict(type="str", no_log=True), - snapshot_interval=dict(type="int"), - task_history_retention_limit=dict(type="int"), - keep_old_snapshots=dict(type="int"), - log_entries_for_slow_followers=dict(type="int"), - heartbeat_tick=dict(type="int"), - election_tick=dict(type="int"), - dispatcher_heartbeat_period=dict(type="int"), - node_cert_expiry=dict(type="int"), - name=dict(type="str"), - labels=dict(type="dict"), - signing_ca_cert=dict(type="str"), - signing_ca_key=dict(type="str", no_log=True), - ca_force_rotate=dict(type="int"), - autolock_managers=dict(type="bool"), - node_id=dict(type="str"), - rotate_worker_token=dict(type="bool", default=False), - rotate_manager_token=dict(type="bool", default=False), - default_addr_pool=dict(type="list", elements="str"), - subnet_size=dict(type="int"), - ) + argument_spec = { + "advertise_addr": {"type": "str"}, + "data_path_addr": {"type": "str"}, + "data_path_port": {"type": "int"}, + "state": { + "type": "str", + "default": "present", + "choices": ["present", "join", "absent", "remove"], + }, + "force": {"type": "bool", "default": False}, + "listen_addr": {"type": "str", "default": "0.0.0.0:2377"}, + "remote_addrs": {"type": "list", "elements": "str"}, + "join_token": {"type": "str", "no_log": True}, + "snapshot_interval": {"type": "int"}, + "task_history_retention_limit": {"type": "int"}, + "keep_old_snapshots": {"type": "int"}, + "log_entries_for_slow_followers": {"type": "int"}, + "heartbeat_tick": {"type": "int"}, + "election_tick": {"type": "int"}, + "dispatcher_heartbeat_period": {"type": "int"}, + "node_cert_expiry": {"type": "int"}, + "name": {"type": "str"}, + "labels": {"type": "dict"}, + "signing_ca_cert": {"type": "str"}, + "signing_ca_key": {"type": "str", "no_log": True}, + "ca_force_rotate": {"type": "int"}, + "autolock_managers": {"type": "bool"}, + "node_id": {"type": "str"}, + "rotate_worker_token": {"type": "bool", "default": False}, + "rotate_manager_token": {"type": "bool", "default": False}, + "default_addr_pool": {"type": "list", "elements": "str"}, + "subnet_size": {"type": "int"}, + } required_if = [ ("state", "join", ["remote_addrs", "join_token"]), ("state", "remove", ["node_id"]), ] - option_minimal_versions = dict( - labels=dict(docker_py_version="2.6.0", docker_api_version="1.32"), - signing_ca_cert=dict(docker_py_version="2.6.0", docker_api_version="1.30"), - signing_ca_key=dict(docker_py_version="2.6.0", docker_api_version="1.30"), - ca_force_rotate=dict(docker_py_version="2.6.0", docker_api_version="1.30"), - autolock_managers=dict(docker_py_version="2.6.0"), - log_driver=dict(docker_py_version="2.6.0"), - remove_operation=dict( - docker_py_version="2.4.0", - detect_usage=_detect_remove_operation, - usage_msg="remove swarm nodes", - ), - default_addr_pool=dict(docker_py_version="4.0.0", docker_api_version="1.39"), - subnet_size=dict(docker_py_version="4.0.0", docker_api_version="1.39"), - data_path_addr=dict(docker_py_version="4.0.0", docker_api_version="1.30"), - data_path_port=dict(docker_py_version="6.0.0", docker_api_version="1.40"), - ) + option_minimal_versions = { + "labels": {"docker_py_version": "2.6.0", "docker_api_version": "1.32"}, + "signing_ca_cert": {"docker_py_version": "2.6.0", "docker_api_version": "1.30"}, + "signing_ca_key": {"docker_py_version": "2.6.0", "docker_api_version": "1.30"}, + "ca_force_rotate": {"docker_py_version": "2.6.0", "docker_api_version": "1.30"}, + "autolock_managers": {"docker_py_version": "2.6.0"}, + "log_driver": {"docker_py_version": "2.6.0"}, + "remove_operation": { + "docker_py_version": "2.4.0", + "detect_usage": _detect_remove_operation, + "usage_msg": "remove swarm nodes", + }, + "default_addr_pool": { + "docker_py_version": "4.0.0", + "docker_api_version": "1.39", + }, + "subnet_size": {"docker_py_version": "4.0.0", "docker_api_version": "1.39"}, + "data_path_addr": {"docker_py_version": "4.0.0", "docker_api_version": "1.30"}, + "data_path_port": {"docker_py_version": "6.0.0", "docker_api_version": "1.40"}, + } client = AnsibleDockerSwarmClient( argument_spec=argument_spec, @@ -727,7 +730,7 @@ def main(): sanitize_labels(client.module.params["labels"], "labels", client) try: - results = dict(changed=False, result="", actions=[]) + results = {"changed": False, "result": "", "actions": []} SwarmManager(client, results)() client.module.exit_json(**results) diff --git a/plugins/modules/docker_swarm_info.py b/plugins/modules/docker_swarm_info.py index abd5bf0e..9b614912 100644 --- a/plugins/modules/docker_swarm_info.py +++ b/plugins/modules/docker_swarm_info.py @@ -261,7 +261,7 @@ class DockerSwarmManager(DockerBaseClass): return items for item in items: - item_record = dict() + item_record = {} if docker_object == "nodes": item_record = self.get_essential_facts_nodes(item) @@ -277,7 +277,7 @@ class DockerSwarmManager(DockerBaseClass): @staticmethod def get_essential_facts_nodes(item): - object_essentials = dict() + object_essentials = {} object_essentials["ID"] = item.get("ID") object_essentials["Hostname"] = item["Description"]["Hostname"] @@ -299,7 +299,7 @@ class DockerSwarmManager(DockerBaseClass): return object_essentials def get_essential_facts_tasks(self, item): - object_essentials = dict() + object_essentials = {} object_essentials["ID"] = item["ID"] # Returning container ID to not trigger another connection to host @@ -320,7 +320,7 @@ class DockerSwarmManager(DockerBaseClass): @staticmethod def get_essential_facts_services(item): - object_essentials = dict() + object_essentials = {} object_essentials["ID"] = item["ID"] object_essentials["Name"] = item["Spec"]["Name"] @@ -349,39 +349,39 @@ class DockerSwarmManager(DockerBaseClass): def main(): - argument_spec = dict( - nodes=dict(type="bool", default=False), - nodes_filters=dict(type="dict"), - tasks=dict(type="bool", default=False), - tasks_filters=dict(type="dict"), - services=dict(type="bool", default=False), - services_filters=dict(type="dict"), - unlock_key=dict(type="bool", default=False), - verbose_output=dict(type="bool", default=False), - ) - option_minimal_versions = dict( - unlock_key=dict(docker_py_version="2.7.0"), - ) + argument_spec = { + "nodes": {"type": "bool", "default": False}, + "nodes_filters": {"type": "dict"}, + "tasks": {"type": "bool", "default": False}, + "tasks_filters": {"type": "dict"}, + "services": {"type": "bool", "default": False}, + "services_filters": {"type": "dict"}, + "unlock_key": {"type": "bool", "default": False}, + "verbose_output": {"type": "bool", "default": False}, + } + option_minimal_versions = { + "unlock_key": {"docker_py_version": "2.7.0"}, + } client = AnsibleDockerSwarmClient( argument_spec=argument_spec, supports_check_mode=True, min_docker_version="1.10.0", option_minimal_versions=option_minimal_versions, - fail_results=dict( - can_talk_to_docker=False, - docker_swarm_active=False, - docker_swarm_manager=False, - ), + fail_results={ + "can_talk_to_docker": False, + "docker_swarm_active": False, + "docker_swarm_manager": False, + }, ) client.fail_results["can_talk_to_docker"] = True client.fail_results["docker_swarm_active"] = client.check_if_swarm_node() client.fail_results["docker_swarm_manager"] = client.check_if_swarm_manager() try: - results = dict( - changed=False, - ) + results = { + "changed": False, + } DockerSwarmManager(client, results) results.update(client.fail_results) diff --git a/plugins/modules/docker_swarm_service.py b/plugins/modules/docker_swarm_service.py index 754b750f..eb6cc1cb 100644 --- a/plugins/modules/docker_swarm_service.py +++ b/plugins/modules/docker_swarm_service.py @@ -919,10 +919,10 @@ def get_docker_environment(env, env_files): for item in env: try: name, value = item.split("=", 1) - except ValueError: + except ValueError as exc: raise ValueError( "Invalid environment variable found in list, needs to be in format KEY=VALUE." - ) + ) from exc env_dict[name] = value elif env is not None: raise ValueError( @@ -983,7 +983,7 @@ def get_docker_networks(networks, network_ids): try: parsed_network["id"] = network_ids[network_name] except KeyError as e: - raise ValueError(f"Could not find a network named: {e}.") + raise ValueError(f"Could not find a network named: {e}.") from None parsed_networks.append(parsed_network) return parsed_networks or [] @@ -1019,9 +1019,9 @@ def has_dict_changed(new_dict, old_dict): return True if not old_dict and new_dict: return True - defined_options = dict( - (option, value) for option, value in new_dict.items() if value is not None - ) + defined_options = { + option: value for option, value in new_dict.items() if value is not None + } for option, value in defined_options.items(): old_value = old_dict.get(option) if not value and not old_value: @@ -1370,7 +1370,9 @@ class DockerService(DockerBaseClass): try: memory = human_to_bytes(memory) except ValueError as exc: - raise ValueError(f"Failed to convert limit_memory to bytes: {exc}") + raise ValueError( + f"Failed to convert limit_memory to bytes: {exc}" + ) from exc return { "limit_cpu": cpus, "limit_memory": memory, @@ -1392,7 +1394,9 @@ class DockerService(DockerBaseClass): try: memory = human_to_bytes(memory) except ValueError as exc: - raise ValueError(f"Failed to convert reserve_memory to bytes: {exc}") + raise ValueError( + f"Failed to convert reserve_memory to bytes: {exc}" + ) from exc return { "reserve_cpu": cpus, "reserve_memory": memory, @@ -1559,7 +1563,7 @@ class DockerService(DockerBaseClass): except ValueError as exc: raise ValueError( f"Failed to convert tmpfs_size to bytes: {exc}" - ) + ) from exc service_m["tmpfs_size"] = tmpfs_size s.mounts.append(service_m) @@ -1855,12 +1859,12 @@ class DockerService(DockerBaseClass): if not publish_item.get("mode"): ignored_keys.add("mode") # Create copies of publish_item dicts where keys specified in ignored_keys are left out - filtered_old_publish_item = dict( - (k, v) for k, v in old_publish_item.items() if k not in ignored_keys - ) - filtered_publish_item = dict( - (k, v) for k, v in publish_item.items() if k not in ignored_keys - ) + filtered_old_publish_item = { + k: v for k, v in old_publish_item.items() if k not in ignored_keys + } + filtered_publish_item = { + k: v for k, v in publish_item.items() if k not in ignored_keys + } if filtered_publish_item != filtered_old_publish_item: return True return False @@ -2213,11 +2217,11 @@ class DockerServiceManager: "StartPeriod": "start_period", "Retries": "retries", } - healthcheck = dict( - (options[key], value) + healthcheck = { + options[key]: value for key, value in healthcheck_data.items() if value is not None and key in options - ) + } ds.healthcheck = healthcheck update_config_data = raw_data["Spec"].get("UpdateConfig") @@ -2258,7 +2262,7 @@ class DockerServiceManager: ) for host in hosts ] - ds.hosts = dict((hostname, ip) for ip, hostname in hosts) + ds.hosts = {hostname: ip for ip, hostname in hosts} ds.tty = task_template_data["ContainerSpec"].get("TTY") placement = task_template_data.get("Placement") @@ -2268,10 +2272,10 @@ class DockerServiceManager: placement_preferences = [] for preference in placement.get("Preferences", []): placement_preferences.append( - dict( - (key.lower(), value["SpreadDescriptor"]) + { + key.lower(): value["SpreadDescriptor"] for key, value in preference.items() - ) + } ) ds.placement_preferences = placement_preferences or None @@ -2345,10 +2349,9 @@ class DockerServiceManager: volume_options = mount_data.get("VolumeOptions", {}) tmpfs_options = mount_data.get("TmpfsOptions", {}) driver_config = volume_options.get("DriverConfig", {}) - driver_config = ( - dict((key.lower(), value) for key, value in driver_config.items()) - or None - ) + driver_config = { + key.lower(): value for key, value in driver_config.items() + } or None ds.mounts.append( { "source": mount_data.get("Source", ""), @@ -2444,9 +2447,7 @@ class DockerServiceManager: return f"{name}@{digest}" def get_networks_names_ids(self): - return dict( - (network["Name"], network["Id"]) for network in self.client.networks() - ) + return {network["Name"]: network["Id"] for network in self.client.networks()} def get_missing_secret_ids(self): """ @@ -2460,11 +2461,11 @@ class DockerServiceManager: if not secret_names: return {} secrets = self.client.secrets(filters={"name": secret_names}) - secrets = dict( - (secret["Spec"]["Name"], secret["ID"]) + secrets = { + secret["Spec"]["Name"]: secret["ID"] for secret in secrets if secret["Spec"]["Name"] in secret_names - ) + } for secret_name in secret_names: if secret_name not in secrets: self.client.fail(f'Could not find a secret named "{secret_name}"') @@ -2482,11 +2483,11 @@ class DockerServiceManager: if not config_names: return {} configs = self.client.configs(filters={"name": config_names}) - configs = dict( - (config["Spec"]["Name"], config["ID"]) + configs = { + config["Spec"]["Name"]: config["ID"] for config in configs if config["Spec"]["Name"] in config_names - ) + } for config_name in config_names: if config_name not in configs: self.client.fail(f'Could not find a config named "{config_name}"') @@ -2627,26 +2628,30 @@ def _detect_update_config_failure_action_rollback(client): def main(): - argument_spec = dict( - name=dict(type="str", required=True), - image=dict(type="str"), - state=dict(type="str", default="present", choices=["present", "absent"]), - mounts=dict( - type="list", - elements="dict", - options=dict( - source=dict(type="str"), - target=dict(type="str", required=True), - type=dict( - type="str", - default="bind", - choices=["bind", "volume", "tmpfs", "npipe"], - ), - readonly=dict(type="bool"), - labels=dict(type="dict"), - propagation=dict( - type="str", - choices=[ + argument_spec = { + "name": {"type": "str", "required": True}, + "image": {"type": "str"}, + "state": { + "type": "str", + "default": "present", + "choices": ["present", "absent"], + }, + "mounts": { + "type": "list", + "elements": "dict", + "options": { + "source": {"type": "str"}, + "target": {"type": "str", "required": True}, + "type": { + "type": "str", + "default": "bind", + "choices": ["bind", "volume", "tmpfs", "npipe"], + }, + "readonly": {"type": "bool"}, + "labels": {"type": "dict"}, + "propagation": { + "type": "str", + "choices": [ "shared", "slave", "private", @@ -2654,266 +2659,273 @@ def main(): "rslave", "rprivate", ], - ), - no_copy=dict(type="bool"), - driver_config=dict( - type="dict", - options=dict(name=dict(type="str"), options=dict(type="dict")), - ), - tmpfs_size=dict(type="str"), - tmpfs_mode=dict(type="int"), - ), - ), - configs=dict( - type="list", - elements="dict", - options=dict( - config_id=dict(type="str"), - config_name=dict(type="str", required=True), - filename=dict(type="str"), - uid=dict(type="str"), - gid=dict(type="str"), - mode=dict(type="int"), - ), - ), - secrets=dict( - type="list", - elements="dict", - no_log=False, - options=dict( - secret_id=dict(type="str", no_log=False), - secret_name=dict(type="str", required=True, no_log=False), - filename=dict(type="str"), - uid=dict(type="str"), - gid=dict(type="str"), - mode=dict(type="int"), - ), - ), - networks=dict(type="list", elements="raw"), - command=dict(type="raw"), - args=dict(type="list", elements="str"), - env=dict(type="raw"), - env_files=dict(type="list", elements="path"), - force_update=dict(type="bool", default=False), - groups=dict(type="list", elements="str"), - logging=dict( - type="dict", - options=dict( - driver=dict(type="str"), - options=dict(type="dict"), - ), - ), - publish=dict( - type="list", - elements="dict", - options=dict( - published_port=dict(type="int", required=False), - target_port=dict(type="int", required=True), - protocol=dict(type="str", default="tcp", choices=["tcp", "udp"]), - mode=dict(type="str", choices=["ingress", "host"]), - ), - ), - placement=dict( - type="dict", - options=dict( - constraints=dict(type="list", elements="str"), - preferences=dict(type="list", elements="dict"), - replicas_max_per_node=dict(type="int"), - ), - ), - tty=dict(type="bool"), - dns=dict(type="list", elements="str"), - dns_search=dict(type="list", elements="str"), - dns_options=dict(type="list", elements="str"), - healthcheck=dict( - type="dict", - options=dict( - test=dict(type="raw"), - interval=dict(type="str"), - timeout=dict(type="str"), - start_period=dict(type="str"), - retries=dict(type="int"), - ), - ), - hostname=dict(type="str"), - hosts=dict(type="dict"), - labels=dict(type="dict"), - container_labels=dict(type="dict"), - sysctls=dict(type="dict"), - mode=dict( - type="str", - default="replicated", - choices=["replicated", "global", "replicated-job"], - ), - replicas=dict(type="int", default=-1), - endpoint_mode=dict(type="str", choices=["vip", "dnsrr"]), - stop_grace_period=dict(type="str"), - stop_signal=dict(type="str"), - limits=dict( - type="dict", - options=dict( - cpus=dict(type="float"), - memory=dict(type="str"), - ), - ), - read_only=dict(type="bool"), - reservations=dict( - type="dict", - options=dict( - cpus=dict(type="float"), - memory=dict(type="str"), - ), - ), - resolve_image=dict(type="bool", default=False), - restart_config=dict( - type="dict", - options=dict( - condition=dict(type="str", choices=["none", "on-failure", "any"]), - delay=dict(type="str"), - max_attempts=dict(type="int"), - window=dict(type="str"), - ), - ), - rollback_config=dict( - type="dict", - options=dict( - parallelism=dict(type="int"), - delay=dict(type="str"), - failure_action=dict(type="str", choices=["continue", "pause"]), - monitor=dict(type="str"), - max_failure_ratio=dict(type="float"), - order=dict(type="str"), - ), - ), - update_config=dict( - type="dict", - options=dict( - parallelism=dict(type="int"), - delay=dict(type="str"), - failure_action=dict( - type="str", choices=["continue", "pause", "rollback"] - ), - monitor=dict(type="str"), - max_failure_ratio=dict(type="float"), - order=dict(type="str"), - ), - ), - user=dict(type="str"), - working_dir=dict(type="str"), - init=dict(type="bool"), - cap_add=dict(type="list", elements="str"), - cap_drop=dict(type="list", elements="str"), - ) + }, + "no_copy": {"type": "bool"}, + "driver_config": { + "type": "dict", + "options": {"name": {"type": "str"}, "options": {"type": "dict"}}, + }, + "tmpfs_size": {"type": "str"}, + "tmpfs_mode": {"type": "int"}, + }, + }, + "configs": { + "type": "list", + "elements": "dict", + "options": { + "config_id": {"type": "str"}, + "config_name": {"type": "str", "required": True}, + "filename": {"type": "str"}, + "uid": {"type": "str"}, + "gid": {"type": "str"}, + "mode": {"type": "int"}, + }, + }, + "secrets": { + "type": "list", + "elements": "dict", + "no_log": False, + "options": { + "secret_id": {"type": "str", "no_log": False}, + "secret_name": {"type": "str", "required": True, "no_log": False}, + "filename": {"type": "str"}, + "uid": {"type": "str"}, + "gid": {"type": "str"}, + "mode": {"type": "int"}, + }, + }, + "networks": {"type": "list", "elements": "raw"}, + "command": {"type": "raw"}, + "args": {"type": "list", "elements": "str"}, + "env": {"type": "raw"}, + "env_files": {"type": "list", "elements": "path"}, + "force_update": {"type": "bool", "default": False}, + "groups": {"type": "list", "elements": "str"}, + "logging": { + "type": "dict", + "options": { + "driver": {"type": "str"}, + "options": {"type": "dict"}, + }, + }, + "publish": { + "type": "list", + "elements": "dict", + "options": { + "published_port": {"type": "int", "required": False}, + "target_port": {"type": "int", "required": True}, + "protocol": { + "type": "str", + "default": "tcp", + "choices": ["tcp", "udp"], + }, + "mode": {"type": "str", "choices": ["ingress", "host"]}, + }, + }, + "placement": { + "type": "dict", + "options": { + "constraints": {"type": "list", "elements": "str"}, + "preferences": {"type": "list", "elements": "dict"}, + "replicas_max_per_node": {"type": "int"}, + }, + }, + "tty": {"type": "bool"}, + "dns": {"type": "list", "elements": "str"}, + "dns_search": {"type": "list", "elements": "str"}, + "dns_options": {"type": "list", "elements": "str"}, + "healthcheck": { + "type": "dict", + "options": { + "test": {"type": "raw"}, + "interval": {"type": "str"}, + "timeout": {"type": "str"}, + "start_period": {"type": "str"}, + "retries": {"type": "int"}, + }, + }, + "hostname": {"type": "str"}, + "hosts": {"type": "dict"}, + "labels": {"type": "dict"}, + "container_labels": {"type": "dict"}, + "sysctls": {"type": "dict"}, + "mode": { + "type": "str", + "default": "replicated", + "choices": ["replicated", "global", "replicated-job"], + }, + "replicas": {"type": "int", "default": -1}, + "endpoint_mode": {"type": "str", "choices": ["vip", "dnsrr"]}, + "stop_grace_period": {"type": "str"}, + "stop_signal": {"type": "str"}, + "limits": { + "type": "dict", + "options": { + "cpus": {"type": "float"}, + "memory": {"type": "str"}, + }, + }, + "read_only": {"type": "bool"}, + "reservations": { + "type": "dict", + "options": { + "cpus": {"type": "float"}, + "memory": {"type": "str"}, + }, + }, + "resolve_image": {"type": "bool", "default": False}, + "restart_config": { + "type": "dict", + "options": { + "condition": {"type": "str", "choices": ["none", "on-failure", "any"]}, + "delay": {"type": "str"}, + "max_attempts": {"type": "int"}, + "window": {"type": "str"}, + }, + }, + "rollback_config": { + "type": "dict", + "options": { + "parallelism": {"type": "int"}, + "delay": {"type": "str"}, + "failure_action": {"type": "str", "choices": ["continue", "pause"]}, + "monitor": {"type": "str"}, + "max_failure_ratio": {"type": "float"}, + "order": {"type": "str"}, + }, + }, + "update_config": { + "type": "dict", + "options": { + "parallelism": {"type": "int"}, + "delay": {"type": "str"}, + "failure_action": { + "type": "str", + "choices": ["continue", "pause", "rollback"], + }, + "monitor": {"type": "str"}, + "max_failure_ratio": {"type": "float"}, + "order": {"type": "str"}, + }, + }, + "user": {"type": "str"}, + "working_dir": {"type": "str"}, + "init": {"type": "bool"}, + "cap_add": {"type": "list", "elements": "str"}, + "cap_drop": {"type": "list", "elements": "str"}, + } - option_minimal_versions = dict( - dns=dict(docker_py_version="2.6.0"), - dns_options=dict(docker_py_version="2.6.0"), - dns_search=dict(docker_py_version="2.6.0"), - endpoint_mode=dict(docker_py_version="3.0.0"), - force_update=dict(docker_py_version="2.1.0"), - healthcheck=dict(docker_py_version="2.6.0"), - hostname=dict(docker_py_version="2.2.0"), - hosts=dict(docker_py_version="2.6.0"), - groups=dict(docker_py_version="2.6.0"), - tty=dict(docker_py_version="2.4.0"), - secrets=dict(docker_py_version="2.4.0"), - configs=dict(docker_py_version="2.6.0", docker_api_version="1.30"), - stop_signal=dict(docker_py_version="2.6.0", docker_api_version="1.28"), - publish=dict(docker_py_version="3.0.0"), - read_only=dict(docker_py_version="2.6.0", docker_api_version="1.28"), - resolve_image=dict(docker_api_version="1.30", docker_py_version="3.2.0"), - rollback_config=dict(docker_py_version="3.5.0", docker_api_version="1.28"), - init=dict(docker_py_version="4.0.0", docker_api_version="1.37"), - cap_add=dict(docker_py_version="5.0.3", docker_api_version="1.41"), - cap_drop=dict(docker_py_version="5.0.3", docker_api_version="1.41"), - sysctls=dict(docker_py_version="6.0.0", docker_api_version="1.40"), + option_minimal_versions = { + "dns": {"docker_py_version": "2.6.0"}, + "dns_options": {"docker_py_version": "2.6.0"}, + "dns_search": {"docker_py_version": "2.6.0"}, + "endpoint_mode": {"docker_py_version": "3.0.0"}, + "force_update": {"docker_py_version": "2.1.0"}, + "healthcheck": {"docker_py_version": "2.6.0"}, + "hostname": {"docker_py_version": "2.2.0"}, + "hosts": {"docker_py_version": "2.6.0"}, + "groups": {"docker_py_version": "2.6.0"}, + "tty": {"docker_py_version": "2.4.0"}, + "secrets": {"docker_py_version": "2.4.0"}, + "configs": {"docker_py_version": "2.6.0", "docker_api_version": "1.30"}, + "stop_signal": {"docker_py_version": "2.6.0", "docker_api_version": "1.28"}, + "publish": {"docker_py_version": "3.0.0"}, + "read_only": {"docker_py_version": "2.6.0", "docker_api_version": "1.28"}, + "resolve_image": {"docker_api_version": "1.30", "docker_py_version": "3.2.0"}, + "rollback_config": {"docker_py_version": "3.5.0", "docker_api_version": "1.28"}, + "init": {"docker_py_version": "4.0.0", "docker_api_version": "1.37"}, + "cap_add": {"docker_py_version": "5.0.3", "docker_api_version": "1.41"}, + "cap_drop": {"docker_py_version": "5.0.3", "docker_api_version": "1.41"}, + "sysctls": {"docker_py_version": "6.0.0", "docker_api_version": "1.40"}, # specials - publish_mode=dict( - docker_py_version="3.0.0", - detect_usage=_detect_publish_mode_usage, - usage_msg="set publish.mode", - ), - healthcheck_start_period=dict( - docker_py_version="2.6.0", - docker_api_version="1.29", - detect_usage=_detect_healthcheck_start_period, - usage_msg="set healthcheck.start_period", - ), - update_config_max_failure_ratio=dict( - docker_py_version="2.1.0", - detect_usage=lambda c: (c.module.params["update_config"] or {}).get( + "publish_mode": { + "docker_py_version": "3.0.0", + "detect_usage": _detect_publish_mode_usage, + "usage_msg": "set publish.mode", + }, + "healthcheck_start_period": { + "docker_py_version": "2.6.0", + "docker_api_version": "1.29", + "detect_usage": _detect_healthcheck_start_period, + "usage_msg": "set healthcheck.start_period", + }, + "update_config_max_failure_ratio": { + "docker_py_version": "2.1.0", + "detect_usage": lambda c: (c.module.params["update_config"] or {}).get( "max_failure_ratio" ) is not None, - usage_msg="set update_config.max_failure_ratio", - ), - update_config_failure_action=dict( - docker_py_version="3.5.0", - docker_api_version="1.28", - detect_usage=_detect_update_config_failure_action_rollback, - usage_msg="set update_config.failure_action.rollback", - ), - update_config_monitor=dict( - docker_py_version="2.1.0", - detect_usage=lambda c: (c.module.params["update_config"] or {}).get( + "usage_msg": "set update_config.max_failure_ratio", + }, + "update_config_failure_action": { + "docker_py_version": "3.5.0", + "docker_api_version": "1.28", + "detect_usage": _detect_update_config_failure_action_rollback, + "usage_msg": "set update_config.failure_action.rollback", + }, + "update_config_monitor": { + "docker_py_version": "2.1.0", + "detect_usage": lambda c: (c.module.params["update_config"] or {}).get( "monitor" ) is not None, - usage_msg="set update_config.monitor", - ), - update_config_order=dict( - docker_py_version="2.7.0", - docker_api_version="1.29", - detect_usage=lambda c: (c.module.params["update_config"] or {}).get("order") - is not None, - usage_msg="set update_config.order", - ), - placement_config_preferences=dict( - docker_py_version="2.4.0", - docker_api_version="1.27", - detect_usage=lambda c: (c.module.params["placement"] or {}).get( - "preferences" - ) - is not None, - usage_msg="set placement.preferences", - ), - placement_config_constraints=dict( - docker_py_version="2.4.0", - detect_usage=lambda c: (c.module.params["placement"] or {}).get( - "constraints" - ) - is not None, - usage_msg="set placement.constraints", - ), - placement_config_replicas_max_per_node=dict( - docker_py_version="4.4.3", - docker_api_version="1.40", - detect_usage=lambda c: (c.module.params["placement"] or {}).get( - "replicas_max_per_node" - ) - is not None, - usage_msg="set placement.replicas_max_per_node", - ), - mounts_tmpfs=dict( - docker_py_version="2.6.0", - detect_usage=_detect_mount_tmpfs_usage, - usage_msg="set mounts.tmpfs", - ), - rollback_config_order=dict( - docker_api_version="1.29", - detect_usage=lambda c: (c.module.params["rollback_config"] or {}).get( + "usage_msg": "set update_config.monitor", + }, + "update_config_order": { + "docker_py_version": "2.7.0", + "docker_api_version": "1.29", + "detect_usage": lambda c: (c.module.params["update_config"] or {}).get( "order" ) is not None, - usage_msg="set rollback_config.order", - ), - mode_replicated_job=dict( - docker_py_version="6.0.0", - docker_api_version="1.41", - detect_usage=lambda c: c.module.params.get("mode") == "replicated-job", - usage_msg="set mode", - ), - ) + "usage_msg": "set update_config.order", + }, + "placement_config_preferences": { + "docker_py_version": "2.4.0", + "docker_api_version": "1.27", + "detect_usage": lambda c: (c.module.params["placement"] or {}).get( + "preferences" + ) + is not None, + "usage_msg": "set placement.preferences", + }, + "placement_config_constraints": { + "docker_py_version": "2.4.0", + "detect_usage": lambda c: (c.module.params["placement"] or {}).get( + "constraints" + ) + is not None, + "usage_msg": "set placement.constraints", + }, + "placement_config_replicas_max_per_node": { + "docker_py_version": "4.4.3", + "docker_api_version": "1.40", + "detect_usage": lambda c: (c.module.params["placement"] or {}).get( + "replicas_max_per_node" + ) + is not None, + "usage_msg": "set placement.replicas_max_per_node", + }, + "mounts_tmpfs": { + "docker_py_version": "2.6.0", + "detect_usage": _detect_mount_tmpfs_usage, + "usage_msg": "set mounts.tmpfs", + }, + "rollback_config_order": { + "docker_api_version": "1.29", + "detect_usage": lambda c: (c.module.params["rollback_config"] or {}).get( + "order" + ) + is not None, + "usage_msg": "set rollback_config.order", + }, + "mode_replicated_job": { + "docker_py_version": "6.0.0", + "docker_api_version": "1.41", + "detect_usage": lambda c: c.module.params.get("mode") == "replicated-job", + "usage_msg": "set mode", + }, + } required_if = [("state", "present", ["image"])] client = AnsibleDockerClient( @@ -2928,16 +2940,16 @@ def main(): dsm = DockerServiceManager(client) msg, changed, rebuilt, changes, facts = dsm.run_safe() - results = dict( - msg=msg, - changed=changed, - rebuilt=rebuilt, - changes=changes, - swarm_service=facts, - ) + results = { + "msg": msg, + "changed": changed, + "rebuilt": rebuilt, + "changes": changes, + "swarm_service": facts, + } if client.module._diff: before, after = dsm.diff_tracker.get_before_after() - results["diff"] = dict(before=before, after=after) + results["diff"] = {"before": before, "after": after} client.module.exit_json(**results) except DockerException as e: diff --git a/plugins/modules/docker_swarm_service_info.py b/plugins/modules/docker_swarm_service_info.py index 81f1b081..358e90dd 100644 --- a/plugins/modules/docker_swarm_service_info.py +++ b/plugins/modules/docker_swarm_service_info.py @@ -85,9 +85,9 @@ def get_service_info(client): def main(): - argument_spec = dict( - name=dict(type="str", required=True), - ) + argument_spec = { + "name": {"type": "str", "required": True}, + } client = AnsibleDockerSwarmClient( argument_spec=argument_spec, diff --git a/plugins/modules/docker_volume.py b/plugins/modules/docker_volume.py index 4803d061..5c59fed9 100644 --- a/plugins/modules/docker_volume.py +++ b/plugins/modules/docker_volume.py @@ -161,7 +161,7 @@ class DockerVolumeManager: self.results = {"changed": False, "actions": []} self.diff = self.client.module._diff self.diff_tracker = DifferenceTracker() - self.diff_result = dict() + self.diff_result = {} self.existing_volume = self.get_existing_volume() @@ -309,17 +309,23 @@ class DockerVolumeManager: def main(): - argument_spec = dict( - volume_name=dict(type="str", required=True, aliases=["name"]), - state=dict(type="str", default="present", choices=["present", "absent"]), - driver=dict(type="str", default="local"), - driver_options=dict(type="dict", default={}), - labels=dict(type="dict"), - recreate=dict( - type="str", default="never", choices=["always", "never", "options-changed"] - ), - debug=dict(type="bool", default=False), - ) + argument_spec = { + "volume_name": {"type": "str", "required": True, "aliases": ["name"]}, + "state": { + "type": "str", + "default": "present", + "choices": ["present", "absent"], + }, + "driver": {"type": "str", "default": "local"}, + "driver_options": {"type": "dict", "default": {}}, + "labels": {"type": "dict"}, + "recreate": { + "type": "str", + "default": "never", + "choices": ["always", "never", "options-changed"], + }, + "debug": {"type": "bool", "default": False}, + } client = AnsibleDockerClient( argument_spec=argument_spec, diff --git a/plugins/modules/docker_volume_info.py b/plugins/modules/docker_volume_info.py index e0f99a50..76d29d96 100644 --- a/plugins/modules/docker_volume_info.py +++ b/plugins/modules/docker_volume_info.py @@ -92,9 +92,9 @@ def get_existing_volume(client, volume_name): def main(): - argument_spec = dict( - name=dict(type="str", required=True, aliases=["volume_name"]), - ) + argument_spec = { + "name": {"type": "str", "required": True, "aliases": ["volume_name"]}, + } client = AnsibleDockerClient( argument_spec=argument_spec, @@ -106,7 +106,7 @@ def main(): client.module.exit_json( changed=False, - exists=(True if volume else False), + exists=bool(volume), volume=volume, ) except DockerException as e: diff --git a/tests/unit/plugins/module_utils/_api/api/test_client.py b/tests/unit/plugins/module_utils/_api/api/test_client.py index 918d4686..e5ee57df 100644 --- a/tests/unit/plugins/module_utils/_api/api/test_client.py +++ b/tests/unit/plugins/module_utils/_api/api/test_client.py @@ -364,7 +364,7 @@ class UnixSocketStreamTest(unittest.TestCase): try: while not self.stop_server: try: - connection, client_address = self.server_socket.accept() + connection, dummy_client_address = self.server_socket.accept() except socket.error: # Probably no connection to accept yet time.sleep(0.01) diff --git a/tests/unit/plugins/module_utils/_api/test_auth.py b/tests/unit/plugins/module_utils/_api/test_auth.py index 363d51e2..217618c7 100644 --- a/tests/unit/plugins/module_utils/_api/test_auth.py +++ b/tests/unit/plugins/module_utils/_api/test_auth.py @@ -807,7 +807,7 @@ class InMemoryStore(Store): try: return self.__store[server] except KeyError: - raise CredentialsNotFound() + raise CredentialsNotFound() from None def store(self, server, username, secret): self.__store[server] = {