diff --git a/plugins/connection/docker.py b/plugins/connection/docker.py index 33913a8a..252f6451 100644 --- a/plugins/connection/docker.py +++ b/plugins/connection/docker.py @@ -86,7 +86,7 @@ options: # - key: extra_env # section: docker_connection # ansible-core's config manager does NOT support converting JSON strings (or other things) to dictionaries, - # it only accepts actual dictionaries (which don't happen to come from env and ini vars). So there's no way + # it only accepts actual dictionaries (which do not happen to come from env and ini vars). So there's no way # to actually provide this parameter from env and ini sources... :-( vars: - name: ansible_docker_extra_env @@ -153,7 +153,7 @@ class Connection(ConnectionBase): # Note: docker supports running as non-root in some configurations. # (For instance, setting the UNIX socket file to be readable and # writable by a specific UNIX group and then putting users into that - # group). Therefore we don't check that the user is root when using + # group). Therefore we do not check that the user is root when using # this connection. But if the user is getting a permission denied # error it probably means that docker on their system is only # configured to be connected to by root and they are not running as @@ -333,9 +333,9 @@ class Connection(ConnectionBase): .format(self.docker_version, self.actual_user or u'?')) return actual_user elif self._display.verbosity > 2: - # Since we're not setting the actual_user, look it up so we have it for logging later + # Since we are not setting the actual_user, look it up so we have it for logging later # Only do this if display verbosity is high enough that we'll need the value - # This saves overhead from calling into docker when we don't need to. + # This saves overhead from calling into docker when we do not need to. return self._get_docker_remote_user() else: return None @@ -418,11 +418,11 @@ class Connection(ConnectionBase): ''' Make sure that we put files into a standard path If a path is relative, then we need to choose where to put it. - ssh chooses $HOME but we aren't guaranteed that a home dir will - exist in any given chroot. So for now we're choosing "/" instead. + ssh chooses $HOME but we are not guaranteed that a home dir will + exist in any given chroot. So for now we are choosing "/" instead. This also happens to be the former default. - Can revisit using $HOME instead if it's a problem + Can revisit using $HOME instead if it is a problem ''' if getattr(self._shell, "_IS_WINDOWS", False): import ntpath @@ -444,7 +444,7 @@ class Connection(ConnectionBase): "file or module does not exist: %s" % to_native(in_path)) out_path = shlex_quote(out_path) - # Older docker doesn't have native support for copying files into + # Older docker does not have native support for copying files into # running containers, so we use docker exec to implement this # Although docker version 1.8 and later provide support, the # owner and group of the files are always set to root @@ -490,7 +490,7 @@ class Connection(ConnectionBase): actual_out_path = os.path.join(out_dir, os.path.basename(in_path)) if p.returncode != 0: - # Older docker doesn't have native support for fetching files command `cp` + # Older docker does not have native support for fetching files command `cp` # If `cp` fails, try to use `dd` instead args = self._build_exec_cmd([self._play_context.executable, "-c", "dd if=%s bs=%s" % (in_path, BUFSIZE)]) args = [to_bytes(i, errors='surrogate_or_strict') for i in args] diff --git a/plugins/connection/docker_api.py b/plugins/connection/docker_api.py index d5bd3b6a..0ec54d4a 100644 --- a/plugins/connection/docker_api.py +++ b/plugins/connection/docker_api.py @@ -80,7 +80,7 @@ options: # - key: extra_env # section: docker_connection # ansible-core's config manager does NOT support converting JSON strings (or other things) to dictionaries, - # it only accepts actual dictionaries (which don't happen to come from env and ini vars). So there's no way + # it only accepts actual dictionaries (which do not happen to come from env and ini vars). So there's no way # to actually provide this parameter from env and ini sources... :-( vars: - name: ansible_docker_extra_env @@ -206,9 +206,9 @@ class Connection(ConnectionBase): self._connected = True if self.actual_user is None and display.verbosity > 2: - # Since we're not setting the actual_user, look it up so we have it for logging later + # Since we are not setting the actual_user, look it up so we have it for logging later # Only do this if display verbosity is high enough that we'll need the value - # This saves overhead from calling into docker when we don't need to + # This saves overhead from calling into docker when we do not need to display.vvv(u"Trying to determine actual user") result = self._call_client(lambda: self.client.get_json('/containers/{0}/json', self.get_option('remote_addr'))) if result.get('Config'): @@ -320,11 +320,11 @@ class Connection(ConnectionBase): ''' Make sure that we put files into a standard path If a path is relative, then we need to choose where to put it. - ssh chooses $HOME but we aren't guaranteed that a home dir will - exist in any given chroot. So for now we're choosing "/" instead. + ssh chooses $HOME but we are not guaranteed that a home dir will + exist in any given chroot. So for now we are choosing "/" instead. This also happens to be the former default. - Can revisit using $HOME instead if it's a problem + Can revisit using $HOME instead if it is a problem ''' if getattr(self._shell, "_IS_WINDOWS", False): import ntpath diff --git a/plugins/inventory/docker_containers.py b/plugins/inventory/docker_containers.py index fc1977dc..4db75a62 100644 --- a/plugins/inventory/docker_containers.py +++ b/plugins/inventory/docker_containers.py @@ -34,7 +34,7 @@ options: plugin: description: - The name of this plugin, it should always be set to V(community.docker.docker_containers) - for this plugin to recognize it as it's own. + for this plugin to recognize it as its own. type: str required: true choices: [ community.docker.docker_containers ] @@ -163,7 +163,7 @@ filters: # Next accept all containers whose inventory_hostname starts with 'a' - include: >- inventory_hostname.startswith("a") - # Exclude all containers that didn't match any of the above filters + # Exclude all containers that did not match any of the above filters - exclude: true ''' diff --git a/plugins/inventory/docker_machine.py b/plugins/inventory/docker_machine.py index 7144a913..13239078 100644 --- a/plugins/inventory/docker_machine.py +++ b/plugins/inventory/docker_machine.py @@ -167,7 +167,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): return vars def _get_machine_names(self): - # Filter out machines that are not in the Running state as we probably can't do anything useful actions + # Filter out machines that are not in the Running state as we probably cannot do anything useful actions # with them. ls_command = ['ls', '-q'] if self.get_option('running_required'): diff --git a/plugins/inventory/docker_swarm.py b/plugins/inventory/docker_swarm.py index c0f97d9f..46499493 100644 --- a/plugins/inventory/docker_swarm.py +++ b/plugins/inventory/docker_swarm.py @@ -32,7 +32,7 @@ DOCUMENTATION = r''' options: plugin: description: The name of this plugin, it should always be set to V(community.docker.docker_swarm) - for this plugin to recognize it as it's own. + for this plugin to recognize it as its own. type: str required: true choices: [ docker_swarm, community.docker.docker_swarm ] diff --git a/plugins/module_utils/_api/api/client.py b/plugins/module_utils/_api/api/client.py index 44f17924..8ad7c09e 100644 --- a/plugins/module_utils/_api/api/client.py +++ b/plugins/module_utils/_api/api/client.py @@ -276,7 +276,7 @@ class APIClient( return response.text def _post_json(self, url, data, **kwargs): - # Go <1.1 can't unserialize null to a string + # Go <1.1 cannot unserialize null to a string # so we do this disgusting thing here. data2 = {} if data is not None and isinstance(data, dict): @@ -316,8 +316,8 @@ class APIClient( # close TLS sockets. sock._response = response except AttributeError: - # UNIX sockets can't have attributes set on them, but that's - # fine because we won't be doing TLS over them + # UNIX sockets cannot have attributes set on them, but that's + # fine because we will not be doing TLS over them pass return sock @@ -340,7 +340,7 @@ class APIClient( data += reader.read(reader._fp.chunk_left) yield data else: - # Response isn't chunked, meaning we probably + # Response is not chunked, meaning we probably # encountered an error immediately yield self._result(response, json=decode) @@ -419,7 +419,7 @@ class APIClient( response.close() def _disable_socket_timeout(self, socket): - """ Depending on the combination of python version and whether we're + """ Depending on the combination of python version and whether we are connecting over http or https, we might need to access _sock, which may or may not exist; or we may need to just settimeout on socket itself, which also may or may not have settimeout on it. To avoid @@ -440,7 +440,7 @@ class APIClient( if hasattr(s, 'gettimeout'): timeout = s.gettimeout() - # Don't change the timeout if it is already disabled. + # Do not change the timeout if it is already disabled. if timeout is None or timeout == 0.0: continue @@ -456,7 +456,7 @@ class APIClient( def _get_result_tty(self, stream, res, is_tty): # We should also use raw streaming (without keep-alive) - # if we're dealing with a tty-enabled container. + # if we are dealing with a tty-enabled container. if is_tty: return self._stream_raw_result(res) if stream else \ self._result(res, binary=True) @@ -506,7 +506,7 @@ class APIClient( def _set_auth_headers(self, headers): log.debug('Looking for auth config') - # If we don't have any auth data so far, try reloading the config + # If we do not have any auth data so far, try reloading the config # file one more time in case anything showed up in there. if not self._auth_configs or self._auth_configs.is_empty: log.debug("No auth config in memory - loading from filesystem") diff --git a/plugins/module_utils/_api/api/daemon.py b/plugins/module_utils/_api/api/daemon.py index 16d1d124..55164c0b 100644 --- a/plugins/module_utils/_api/api/daemon.py +++ b/plugins/module_utils/_api/api/daemon.py @@ -72,7 +72,7 @@ class DaemonApiMixin(object): If the server returns an error. """ - # If we don't have any auth data so far, try reloading the config file + # If we do not have any auth data so far, try reloading the config file # one more time in case anything showed up in there. # If dockercfg_path is passed check to see if the config file exists, # if so load that config. @@ -107,7 +107,7 @@ class DaemonApiMixin(object): def ping(self): """ Checks the server is responsive. An exception will be raised if it - isn't responding. + is not responding. Returns: (bool) The response from the server. diff --git a/plugins/module_utils/_api/auth.py b/plugins/module_utils/_api/auth.py index a172ced5..1bcfd653 100644 --- a/plugins/module_utils/_api/auth.py +++ b/plugins/module_utils/_api/auth.py @@ -170,7 +170,7 @@ class AuthConfig(dict): with open(config_file) as f: config_dict = json.load(f) except (IOError, KeyError, ValueError) as e: - # Likely missing new Docker config file or it's in an + # Likely missing new Docker config file or it is in an # unknown format, continue to attempt to read old location # and format. log.debug(e) @@ -194,7 +194,7 @@ class AuthConfig(dict): return cls(res, credstore_env) log.debug( - "Couldn't find auth-related section ; attempting to interpret " + "Could not find auth-related section ; attempting to interpret " "as auth-only file" ) return cls({'auths': cls.parse_auth(config_dict)}, credstore_env) diff --git a/plugins/module_utils/_api/errors.py b/plugins/module_utils/_api/errors.py index 47c284d3..a7b92247 100644 --- a/plugins/module_utils/_api/errors.py +++ b/plugins/module_utils/_api/errors.py @@ -52,7 +52,7 @@ class APIError(_HTTPError, DockerException): """ def __init__(self, message, response=None, explanation=None): # requests 1.2 supports response as a keyword argument, but - # requests 1.1 doesn't + # requests 1.1 does not super(APIError, self).__init__(message) self.response = response self.explanation = explanation diff --git a/plugins/module_utils/_api/transport/npipeconn.py b/plugins/module_utils/_api/transport/npipeconn.py index 912e465f..959d6e40 100644 --- a/plugins/module_utils/_api/transport/npipeconn.py +++ b/plugins/module_utils/_api/transport/npipeconn.py @@ -106,7 +106,7 @@ class NpipeHTTPAdapter(BaseHTTPAdapter): def request_url(self, request, proxies): # The select_proxy utility in requests errors out when the provided URL - # doesn't have a hostname, like is the case when using a UNIX socket. + # does not have a hostname, like is the case when using a UNIX socket. # Since proxies are an irrelevant notion in the case of UNIX sockets # anyway, we simply return the path URL directly. # See also: https://github.com/docker/docker-sdk-python/issues/811 diff --git a/plugins/module_utils/_api/transport/unixconn.py b/plugins/module_utils/_api/transport/unixconn.py index 3b24fe46..4dce1f56 100644 --- a/plugins/module_utils/_api/transport/unixconn.py +++ b/plugins/module_utils/_api/transport/unixconn.py @@ -107,7 +107,7 @@ class UnixHTTPAdapter(BaseHTTPAdapter): def request_url(self, request, proxies): # The select_proxy utility in requests errors out when the provided URL - # doesn't have a hostname, like is the case when using a UNIX socket. + # does not have a hostname, like is the case when using a UNIX socket. # Since proxies are an irrelevant notion in the case of UNIX sockets # anyway, we simply return the path URL directly. # See also: https://github.com/docker/docker-py/issues/811 diff --git a/plugins/module_utils/_api/types/daemon.py b/plugins/module_utils/_api/types/daemon.py index 61964428..47decd44 100644 --- a/plugins/module_utils/_api/types/daemon.py +++ b/plugins/module_utils/_api/types/daemon.py @@ -67,7 +67,7 @@ class CancellableStream(object): sock = sock_raw._sock elif hasattr(sock_fp, 'channel'): - # We're working with a paramiko (SSH) channel, which doesn't + # We are working with a paramiko (SSH) channel, which does not # support cancelable streams with the current implementation raise DockerException( 'Cancellable streams not supported for the SSH protocol' diff --git a/plugins/module_utils/_api/utils/build.py b/plugins/module_utils/_api/utils/build.py index 85704f94..67946457 100644 --- a/plugins/module_utils/_api/utils/build.py +++ b/plugins/module_utils/_api/utils/build.py @@ -100,7 +100,7 @@ def create_archive(root, files=None, fileobj=None, gzip=False, i.mtime = int(i.mtime) if IS_WINDOWS_PLATFORM: - # Windows doesn't keep track of the execute bit, so we make files + # Windows does not keep track of the execute bit, so we make files # and directories executable by default. i.mode = i.mode & 0o755 | 0o111 @@ -113,7 +113,7 @@ def create_archive(root, files=None, fileobj=None, gzip=False, 'Can not read file in context: {0}'.format(full_path) ) else: - # Directories, FIFOs, symlinks... don't need to be read. + # Directories, FIFOs, symlinks... do not need to be read. t.addfile(i, None) for name, contents in extra_files: @@ -210,10 +210,10 @@ class PatternMatcher(object): continue if match: - # If we want to skip this file and it's a directory + # If we want to skip this file and it is a directory # then we should first check to see if there's an # excludes pattern (e.g. !dir/file) that starts with this - # dir. If so then we can't skip this dir. + # dir. If so then we cannot skip this dir. skip = True for pat in self.patterns: diff --git a/plugins/module_utils/_api/utils/config.py b/plugins/module_utils/_api/utils/config.py index eed538b4..a62ce666 100644 --- a/plugins/module_utils/_api/utils/config.py +++ b/plugins/module_utils/_api/utils/config.py @@ -70,7 +70,7 @@ def load_general_config(config_path=None): with open(config_file) as f: return json.load(f) except (IOError, ValueError) as e: - # In the case of a legacy `.dockercfg` file, we won't + # In the case of a legacy `.dockercfg` file, we will not # be able to load any JSON data. log.debug(e) diff --git a/plugins/module_utils/_api/utils/fnmatch.py b/plugins/module_utils/_api/utils/fnmatch.py index f6e77a5f..30883250 100644 --- a/plugins/module_utils/_api/utils/fnmatch.py +++ b/plugins/module_utils/_api/utils/fnmatch.py @@ -48,7 +48,7 @@ def fnmatch(name, pat): An initial period in FILENAME is not special. Both FILENAME and PATTERN are first case-normalized if the operating system requires it. - If you don't want this, use fnmatchcase(FILENAME, PATTERN). + If you do not want this, use fnmatchcase(FILENAME, PATTERN). """ name = name.lower() @@ -58,7 +58,7 @@ def fnmatch(name, pat): def fnmatchcase(name, pat): """Test whether FILENAME matches PATTERN, including case. - This is a version of fnmatch() which doesn't case-normalize + This is a version of fnmatch() which does not case-normalize its arguments. """ diff --git a/plugins/module_utils/_api/utils/socket.py b/plugins/module_utils/_api/utils/socket.py index 792aa0cb..16dc07e8 100644 --- a/plugins/module_utils/_api/utils/socket.py +++ b/plugins/module_utils/_api/utils/socket.py @@ -64,7 +64,7 @@ def read(socket, n=4096): len(e.args) > 0 and e.args[0] == NPIPE_ENDED) if is_pipe_ended: - # npipes don't support duplex sockets, so we interpret + # npipes do not support duplex sockets, so we interpret # a PIPE_ENDED error as a close operation (0-length read). return '' raise @@ -73,7 +73,7 @@ def read(socket, n=4096): def read_exactly(socket, n): """ Reads exactly n bytes from socket - Raises SocketError if there isn't enough data + Raises SocketError if there is not enough data """ data = binary_type() while len(data) < n: diff --git a/plugins/module_utils/_api/utils/utils.py b/plugins/module_utils/_api/utils/utils.py index cdf3eedd..81b28a50 100644 --- a/plugins/module_utils/_api/utils/utils.py +++ b/plugins/module_utils/_api/utils/utils.py @@ -160,7 +160,7 @@ def convert_volume_binds(binds): mode = 'rw' # NOTE: this is only relevant for Linux hosts - # (doesn't apply in Docker Desktop) + # (does not apply in Docker Desktop) propagation_modes = [ 'rshared', 'shared', @@ -391,7 +391,7 @@ def kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None): if not tls_verify and assert_hostname is None: # assert_hostname is a subset of TLS verification, - # so if it's not set already then set it to false. + # so if it is not set already then set it to false. assert_hostname = False params['tls'] = TLSConfig( diff --git a/plugins/module_utils/common.py b/plugins/module_utils/common.py index d1dcf3e6..d4171b4d 100644 --- a/plugins/module_utils/common.py +++ b/plugins/module_utils/common.py @@ -69,9 +69,9 @@ except ImportError: try: from requests.exceptions import RequestException # noqa: F401, pylint: disable=unused-import except ImportError: - # Either Docker SDK for Python is no longer using requests, or Docker SDK for Python isn't around either, + # Either Docker SDK for Python is no longer using requests, or Docker SDK for Python is not around either, # or Docker SDK for Python's dependency requests is missing. In any case, define an exception - # class RequestException so that our code doesn't break. + # class RequestException so that our code does not break. class RequestException(Exception): pass @@ -455,7 +455,7 @@ class AnsibleDockerClientBase(Client): registry, repo_name = auth.resolve_repository_name(name) if registry == 'docker.io': # If docker.io is explicitly there in name, the image - # isn't found in some cases (#41509) + # is not found in some cases (#41509) self.log("Check for docker.io image: %s" % repo_name) images = self._image_lookup(repo_name, tag) if not images and repo_name.startswith('library/'): @@ -464,8 +464,8 @@ class AnsibleDockerClientBase(Client): self.log("Check for docker.io image: %s" % lookup) images = self._image_lookup(lookup, tag) if not images: - # Last case for some Docker versions: if docker.io wasn't there, - # it can be that the image wasn't found either + # Last case for some Docker versions: if docker.io was not there, + # it can be that the image was not found either # (https://github.com/ansible/ansible/pull/15586) lookup = "%s/%s" % (registry, repo_name) self.log("Check for docker.io image: %s" % lookup) diff --git a/plugins/module_utils/common_api.py b/plugins/module_utils/common_api.py index b5ea42fa..aa7cc91c 100644 --- a/plugins/module_utils/common_api.py +++ b/plugins/module_utils/common_api.py @@ -21,7 +21,7 @@ from ansible_collections.community.docker.plugins.module_utils.version import Lo try: from requests.exceptions import RequestException, SSLError # noqa: F401, pylint: disable=unused-import except ImportError: - # Define an exception class RequestException so that our code doesn't break. + # Define an exception class RequestException so that our code does not break. class RequestException(Exception): pass @@ -369,7 +369,7 @@ class AnsibleDockerClientBase(Client): registry, repo_name = auth.resolve_repository_name(name) if registry == 'docker.io': # If docker.io is explicitly there in name, the image - # isn't found in some cases (#41509) + # is not found in some cases (#41509) self.log("Check for docker.io image: %s" % repo_name) images = self._image_lookup(repo_name, tag) if not images and repo_name.startswith('library/'): @@ -378,8 +378,8 @@ class AnsibleDockerClientBase(Client): self.log("Check for docker.io image: %s" % lookup) images = self._image_lookup(lookup, tag) if not images: - # Last case for some Docker versions: if docker.io wasn't there, - # it can be that the image wasn't found either + # Last case for some Docker versions: if docker.io was not there, + # it can be that the image was not found either # (https://github.com/ansible/ansible/pull/15586) lookup = "%s/%s" % (registry, repo_name) self.log("Check for docker.io image: %s" % lookup) diff --git a/plugins/module_utils/common_cli.py b/plugins/module_utils/common_cli.py index d15babf7..a6f7e55a 100644 --- a/plugins/module_utils/common_cli.py +++ b/plugins/module_utils/common_cli.py @@ -120,7 +120,7 @@ class AnsibleDockerClientBase(object): @abc.abstractmethod # def call_cli(self, *args, check_rc=False, data=None, cwd=None, environ_update=None): def call_cli(self, *args, **kwargs): - # Python 2.7 doesn't like anything than '**kwargs' after '*args', so we have to do this manually... + # Python 2.7 does not like anything than '**kwargs' after '*args', so we have to do this manually... pass # def call_cli_json(self, *args, check_rc=False, data=None, cwd=None, environ_update=None, warn_on_stderr=False): @@ -220,7 +220,7 @@ class AnsibleDockerClientBase(object): registry, repo_name = resolve_repository_name(name) if registry == 'docker.io': # If docker.io is explicitly there in name, the image - # isn't found in some cases (#41509) + # is not found in some cases (#41509) self.log("Check for docker.io image: %s" % repo_name) images = self._image_lookup(repo_name, tag) if not images and repo_name.startswith('library/'): @@ -229,8 +229,8 @@ class AnsibleDockerClientBase(object): self.log("Check for docker.io image: %s" % lookup) images = self._image_lookup(lookup, tag) if not images: - # Last case for some Docker versions: if docker.io wasn't there, - # it can be that the image wasn't found either + # Last case for some Docker versions: if docker.io was not there, + # it can be that the image was not found either # (https://github.com/ansible/ansible/pull/15586) lookup = "%s/%s" % (registry, repo_name) self.log("Check for docker.io image: %s" % lookup) @@ -322,7 +322,7 @@ class AnsibleModuleDockerClient(AnsibleDockerClientBase): # def call_cli(self, *args, check_rc=False, data=None, cwd=None, environ_update=None): def call_cli(self, *args, **kwargs): - # Python 2.7 doesn't like anything than '**kwargs' after '*args', so we have to do this manually... + # Python 2.7 does not like anything than '**kwargs' after '*args', so we have to do this manually... check_rc = kwargs.pop('check_rc', False) data = kwargs.pop('data', None) cwd = kwargs.pop('cwd', None) diff --git a/plugins/module_utils/compose_v2.py b/plugins/module_utils/compose_v2.py index c25a88c3..22b4e1db 100644 --- a/plugins/module_utils/compose_v2.py +++ b/plugins/module_utils/compose_v2.py @@ -841,7 +841,7 @@ class BaseComposeManager(DockerBaseClass): def cleanup_result(self, result): if not result.get('failed'): - # Only return stdout and stderr if it's not empty + # Only return stdout and stderr if it is not empty for res in ('stdout', 'stderr'): if result.get(res) == '': result.pop(res) @@ -851,5 +851,5 @@ class BaseComposeManager(DockerBaseClass): try: shutil.rmtree(dir, True) except Exception: - # shouldn't happen, but simply ignore to be on the safe side + # should not happen, but simply ignore to be on the safe side pass diff --git a/plugins/module_utils/image_archive.py b/plugins/module_utils/image_archive.py index 46b5abc1..cad8e832 100644 --- a/plugins/module_utils/image_archive.py +++ b/plugins/module_utils/image_archive.py @@ -41,7 +41,7 @@ class ImageArchiveInvalidException(Exception): super(ImageArchiveInvalidException, self).__init__(message) - # Python 2 doesn't support causes + # Python 2 does not support causes self.cause = cause diff --git a/plugins/module_utils/module_container/docker_api.py b/plugins/module_utils/module_container/docker_api.py index 06b7d95d..277ccd22 100644 --- a/plugins/module_utils/module_container/docker_api.py +++ b/plugins/module_utils/module_container/docker_api.py @@ -353,7 +353,7 @@ class DockerAPIEngineDriver(EngineDriver): except APIError as exc: if 'Unpause the container before stopping or killing' in exc.explanation: # New docker daemon versions do not allow containers to be removed - # if they are paused. Make sure we don't end up in an infinite loop. + # if they are paused. Make sure we do not end up in an infinite loop. if count == 3: raise Exception('%s [tried to unpause three times]' % to_native(exc)) count += 1 @@ -379,7 +379,7 @@ class DockerAPIEngineDriver(EngineDriver): except APIError as exc: if 'Unpause the container before stopping or killing' in exc.explanation: # New docker daemon versions do not allow containers to be removed - # if they are paused. Make sure we don't end up in an infinite loop. + # if they are paused. Make sure we do not end up in an infinite loop. if count == 3: raise Exception('%s [tried to unpause three times]' % to_native(exc)) count += 1 @@ -802,7 +802,7 @@ def _get_image_labels(image): if not image: return {} - # Can't use get('Labels', {}) because 'Labels' may be present and be None + # Cannot use get('Labels', {}) because 'Labels' may be present and be None return image['Config'].get('Labels') or {} @@ -1267,7 +1267,7 @@ def _preprocess_container_names(module, client, api_version, value): # name (and in the latter case, retrieve its ID) container = client.get_container(container_name) if container is None: - # If we can't find the container, issue a warning and continue with + # If we cannot find the container, issue a warning and continue with # what the user specified. module.warn('Cannot find a container with name or ID "{0}"'.format(container_name)) return value diff --git a/plugins/module_utils/module_container/module.py b/plugins/module_utils/module_container/module.py index c97170a3..28b42327 100644 --- a/plugins/module_utils/module_container/module.py +++ b/plugins/module_utils/module_container/module.py @@ -482,7 +482,7 @@ class ContainerManager(DockerBaseClass): self.results['changed'] = True self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag), changed=True)) elif not image or self.param_pull_check_mode_behavior == 'always': - # If the image isn't there, or pull_check_mode_behavior == 'always', claim we'll + # If the image is not there, or pull_check_mode_behavior == 'always', claim we'll # pull. (Implicitly: if the image is there, claim it already was latest unless # pull_check_mode_behavior == 'always'.) self.results['changed'] = True diff --git a/plugins/module_utils/socket_helper.py b/plugins/module_utils/socket_helper.py index 2148fe97..257bf61a 100644 --- a/plugins/module_utils/socket_helper.py +++ b/plugins/module_utils/socket_helper.py @@ -57,7 +57,7 @@ def shutdown_writing(sock, log=_empty_writer): def write_to_socket(sock, data): if hasattr(sock, '_send_until_done'): - # WrappedSocket (urllib3/contrib/pyopenssl) doesn't have `send`, but + # WrappedSocket (urllib3/contrib/pyopenssl) does not have `send`, but # only `sendall`, which uses `_send_until_done` under the hood. return sock._send_until_done(data) elif hasattr(sock, 'send'): diff --git a/plugins/module_utils/util.py b/plugins/module_utils/util.py index c0fef25c..9a74cd29 100644 --- a/plugins/module_utils/util.py +++ b/plugins/module_utils/util.py @@ -216,7 +216,7 @@ def compare_generic(a, b, method, datatype): # If we would know that both a and b do not contain duplicates, # we could simply compare len(a) to len(b) to finish this test. # We can assume that b has no duplicates (as it is returned by - # docker), but we don't know for a. + # docker), but we do not know for a. for bv in b: found = False for av in a: @@ -309,7 +309,7 @@ def sanitize_labels(labels, labels_field, client=None, module=None): def clean_dict_booleans_for_docker_api(data, allow_sequences=False): ''' - Go doesn't like Python booleans 'True' or 'False', while Ansible is just + Go does not like Python booleans 'True' or 'False', while Ansible is just fine with them in YAML. As such, they need to be converted in cases where we pass dictionaries to the Docker API (e.g. docker_network's driver_options and docker_prune's filters). When `allow_sequences=True` diff --git a/plugins/modules/docker_compose_v2.py b/plugins/modules/docker_compose_v2.py index 89d5e8b5..4f120aa0 100644 --- a/plugins/modules/docker_compose_v2.py +++ b/plugins/modules/docker_compose_v2.py @@ -531,7 +531,7 @@ class ServicesManager(BaseComposeManager): return True def cmd_stop(self): - # Since 'docker compose stop' **always** claims its stopping containers, even if they are already + # Since 'docker compose stop' **always** claims it is stopping containers, even if they are already # stopped, we have to do this a bit more complicated. result = dict() diff --git a/plugins/modules/docker_config.py b/plugins/modules/docker_config.py index 50a1f92f..88a92f2e 100644 --- a/plugins/modules/docker_config.py +++ b/plugins/modules/docker_config.py @@ -298,7 +298,7 @@ class ConfigManager(DockerBaseClass): def create_config(self): ''' Create a new config ''' config_id = None - # We can't see the data after creation, so adding a label we can use for idempotency check + # We ca not see the data after creation, so adding a label we can use for idempotency check labels = { 'ansible_key': self.data_key } @@ -350,7 +350,7 @@ class ConfigManager(DockerBaseClass): if not self.force: self.client.module.warn("'ansible_key' label not found. Config will not be changed unless the force parameter is set to 'true'") # template_driver has changed if it was set in the previous config - # and now it differs, or if it wasn't set but now it is. + # and now it differs, or if it was not set but now it is. if attrs.get('Templating', {}).get('Name'): if attrs['Templating']['Name'] != self.template_driver: template_driver_changed = True diff --git a/plugins/modules/docker_container_copy_into.py b/plugins/modules/docker_container_copy_into.py index 67411d87..50906759 100644 --- a/plugins/modules/docker_container_copy_into.py +++ b/plugins/modules/docker_container_copy_into.py @@ -371,7 +371,7 @@ def is_binary(content): if b'\x00' in content: return True # TODO: better detection - # (ansible-core also just checks for 0x00, and even just sticks to the first 8k, so this isn't too bad...) + # (ansible-core also just checks for 0x00, and even just sticks to the first 8k, so this is not too bad...) return False @@ -450,7 +450,7 @@ def is_file_idempotent(client, container, managed_path, container_path, follow_l diff['after_header'] = managed_path diff['after'] = to_text(content) - # When forcing and we're not following links in the container, go! + # When forcing and we are not following links in the container, go! if force and not follow_links: retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff) return container_path, mode, False @@ -467,7 +467,7 @@ def is_file_idempotent(client, container, managed_path, container_path, follow_l if follow_links: container_path = real_container_path - # If the file wasn't found, continue + # If the file was not found, continue if regular_stat is None: if diff is not None: diff['before_header'] = container_path @@ -617,7 +617,7 @@ def is_content_idempotent(client, container, content, container_path, follow_lin diff['after_header'] = 'dynamically generated' diff['after'] = to_text(content) - # When forcing and we're not following links in the container, go! + # When forcing and we are not following links in the container, go! if force and not follow_links: retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff) return container_path, mode, False @@ -634,7 +634,7 @@ def is_content_idempotent(client, container, content, container_path, follow_lin if follow_links: container_path = real_container_path - # If the file wasn't found, continue + # If the file was not found, continue if regular_stat is None: if diff is not None: diff['before_header'] = container_path diff --git a/plugins/modules/docker_image.py b/plugins/modules/docker_image.py index b5a98c97..6927d301 100644 --- a/plugins/modules/docker_image.py +++ b/plugins/modules/docker_image.py @@ -587,7 +587,7 @@ class ImageManager(DockerBaseClass): try: self.client.delete_json('/images/{0}', name, params={'force': self.force_absent}) except NotFound: - # If the image vanished while we were trying to remove it, don't fail + # If the image vanished while we were trying to remove it, do not fail pass except Exception as exc: self.fail("Error removing image %s - %s" % (name, to_native(exc))) @@ -758,7 +758,7 @@ class ImageManager(DockerBaseClass): :param name: name of the image. required. :param tag: image tag. :param repository: path to the repository. required. - :param push: bool. push the image once it's tagged. + :param push: bool. push the image once it is tagged. :return: None ''' repo, repo_tag = parse_repository_tag(repository) @@ -780,7 +780,7 @@ class ImageManager(DockerBaseClass): if not self.check_mode: try: # Finding the image does not always work, especially running a localhost registry. In those - # cases, if we don't set force=True, it errors. + # cases, if we do not set force=True, it errors. params = { 'tag': repo_tag, 'repo': repo, diff --git a/plugins/modules/docker_image_remove.py b/plugins/modules/docker_image_remove.py index c8ea326b..da4ac269 100644 --- a/plugins/modules/docker_image_remove.py +++ b/plugins/modules/docker_image_remove.py @@ -190,7 +190,7 @@ class ImageRemover(DockerBaseClass): try: res = self.client.delete_json('/images/{0}', name, params={'force': self.force, 'noprune': not self.prune}) except NotFound: - # If the image vanished while we were trying to remove it, don't fail + # If the image vanished while we were trying to remove it, do not fail res = [] except Exception as exc: self.fail("Error removing image %s - %s" % (name, to_native(exc))) diff --git a/plugins/modules/docker_login.py b/plugins/modules/docker_login.py index bb4e00b8..f5398a2b 100644 --- a/plugins/modules/docker_login.py +++ b/plugins/modules/docker_login.py @@ -320,7 +320,7 @@ class LoginManager(DockerBaseClass): # If user is already logged in, then response contains password for user if 'password' in response: # This returns correct password if user is logged in and wrong password is given. - # So if it returns another password as we passed, and the user didn't request to + # So if it returns another password as we passed, and the user did not request to # reauthorize, still do it. if not self.reauthorize and response['password'] != self.password: try: diff --git a/plugins/modules/docker_secret.py b/plugins/modules/docker_secret.py index cbc5a037..160d957a 100644 --- a/plugins/modules/docker_secret.py +++ b/plugins/modules/docker_secret.py @@ -289,7 +289,7 @@ class SecretManager(DockerBaseClass): def create_secret(self): ''' Create a new secret ''' secret_id = None - # We can't see the data after creation, so adding a label we can use for idempotency check + # We cannot see the data after creation, so adding a label we can use for idempotency check labels = { 'ansible_key': self.data_key } diff --git a/plugins/modules/docker_swarm.py b/plugins/modules/docker_swarm.py index 5e840f8c..87c8f02c 100644 --- a/plugins/modules/docker_swarm.py +++ b/plugins/modules/docker_swarm.py @@ -38,13 +38,13 @@ options: the port number from the listen address is used. - If O(advertise_addr) is not specified, it will be automatically detected when possible. - - Only used when swarm is initialised or joined. Because of this it's not + - Only used when swarm is initialised or joined. Because of this it is not considered for idempotency checking. type: str default_addr_pool: description: - Default address pool in CIDR format. - - Only used when swarm is initialised. Because of this it's not considered + - Only used when swarm is initialised. Because of this it is not considered for idempotency checking. - Requires API version >= 1.39. type: list @@ -52,7 +52,7 @@ options: subnet_size: description: - Default address pool subnet mask length. - - Only used when swarm is initialised. Because of this it's not considered + - Only used when swarm is initialised. Because of this it is not considered for idempotency checking. - Requires API version >= 1.39. type: int @@ -64,7 +64,7 @@ options: like V(eth0:4567). - If the port number is omitted, the default swarm listening port is used. - - Only used when swarm is initialised or joined. Because of this it's not + - Only used when swarm is initialised or joined. Because of this it is not considered for idempotency checking. type: str default: 0.0.0.0:2377