Improve language.

This commit is contained in:
Felix Fontein 2024-12-28 14:30:49 +01:00
parent 04c97728dc
commit f69536ef3b
34 changed files with 89 additions and 89 deletions

View File

@ -86,7 +86,7 @@ options:
# - key: extra_env
# section: docker_connection
# ansible-core's config manager does NOT support converting JSON strings (or other things) to dictionaries,
# it only accepts actual dictionaries (which don't happen to come from env and ini vars). So there's no way
# it only accepts actual dictionaries (which do not happen to come from env and ini vars). So there's no way
# to actually provide this parameter from env and ini sources... :-(
vars:
- name: ansible_docker_extra_env
@ -153,7 +153,7 @@ class Connection(ConnectionBase):
# Note: docker supports running as non-root in some configurations.
# (For instance, setting the UNIX socket file to be readable and
# writable by a specific UNIX group and then putting users into that
# group). Therefore we don't check that the user is root when using
# group). Therefore we do not check that the user is root when using
# this connection. But if the user is getting a permission denied
# error it probably means that docker on their system is only
# configured to be connected to by root and they are not running as
@ -333,9 +333,9 @@ class Connection(ConnectionBase):
.format(self.docker_version, self.actual_user or u'?'))
return actual_user
elif self._display.verbosity > 2:
# Since we're not setting the actual_user, look it up so we have it for logging later
# Since we are not setting the actual_user, look it up so we have it for logging later
# Only do this if display verbosity is high enough that we'll need the value
# This saves overhead from calling into docker when we don't need to.
# This saves overhead from calling into docker when we do not need to.
return self._get_docker_remote_user()
else:
return None
@ -418,11 +418,11 @@ class Connection(ConnectionBase):
''' Make sure that we put files into a standard path
If a path is relative, then we need to choose where to put it.
ssh chooses $HOME but we aren't guaranteed that a home dir will
exist in any given chroot. So for now we're choosing "/" instead.
ssh chooses $HOME but we are not guaranteed that a home dir will
exist in any given chroot. So for now we are choosing "/" instead.
This also happens to be the former default.
Can revisit using $HOME instead if it's a problem
Can revisit using $HOME instead if it is a problem
'''
if getattr(self._shell, "_IS_WINDOWS", False):
import ntpath
@ -444,7 +444,7 @@ class Connection(ConnectionBase):
"file or module does not exist: %s" % to_native(in_path))
out_path = shlex_quote(out_path)
# Older docker doesn't have native support for copying files into
# Older docker does not have native support for copying files into
# running containers, so we use docker exec to implement this
# Although docker version 1.8 and later provide support, the
# owner and group of the files are always set to root
@ -490,7 +490,7 @@ class Connection(ConnectionBase):
actual_out_path = os.path.join(out_dir, os.path.basename(in_path))
if p.returncode != 0:
# Older docker doesn't have native support for fetching files command `cp`
# Older docker does not have native support for fetching files command `cp`
# If `cp` fails, try to use `dd` instead
args = self._build_exec_cmd([self._play_context.executable, "-c", "dd if=%s bs=%s" % (in_path, BUFSIZE)])
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]

View File

@ -79,7 +79,7 @@ options:
# - key: extra_env
# section: docker_connection
# ansible-core's config manager does NOT support converting JSON strings (or other things) to dictionaries,
# it only accepts actual dictionaries (which don't happen to come from env and ini vars). So there's no way
# it only accepts actual dictionaries (which do not happen to come from env and ini vars). So there's no way
# to actually provide this parameter from env and ini sources... :-(
vars:
- name: ansible_docker_extra_env
@ -205,9 +205,9 @@ class Connection(ConnectionBase):
self._connected = True
if self.actual_user is None and display.verbosity > 2:
# Since we're not setting the actual_user, look it up so we have it for logging later
# Since we are not setting the actual_user, look it up so we have it for logging later
# Only do this if display verbosity is high enough that we'll need the value
# This saves overhead from calling into docker when we don't need to
# This saves overhead from calling into docker when we do not need to
display.vvv(u"Trying to determine actual user")
result = self._call_client(lambda: self.client.get_json('/containers/{0}/json', self.get_option('remote_addr')))
if result.get('Config'):
@ -319,11 +319,11 @@ class Connection(ConnectionBase):
''' Make sure that we put files into a standard path
If a path is relative, then we need to choose where to put it.
ssh chooses $HOME but we aren't guaranteed that a home dir will
exist in any given chroot. So for now we're choosing "/" instead.
ssh chooses $HOME but we are not guaranteed that a home dir will
exist in any given chroot. So for now we are choosing "/" instead.
This also happens to be the former default.
Can revisit using $HOME instead if it's a problem
Can revisit using $HOME instead if it is a problem
'''
if getattr(self._shell, "_IS_WINDOWS", False):
import ntpath

View File

@ -33,7 +33,7 @@ options:
plugin:
description:
- The name of this plugin, it should always be set to V(community.docker.docker_containers)
for this plugin to recognize it as it's own.
for this plugin to recognize it as its own.
type: str
required: true
choices: [ community.docker.docker_containers ]
@ -162,7 +162,7 @@ filters:
# Next accept all containers whose inventory_hostname starts with 'a'
- include: >-
inventory_hostname.startswith("a")
# Exclude all containers that didn't match any of the above filters
# Exclude all containers that did not match any of the above filters
- exclude: true
'''

View File

@ -167,7 +167,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
return vars
def _get_machine_names(self):
# Filter out machines that are not in the Running state as we probably can't do anything useful actions
# Filter out machines that are not in the Running state as we probably cannot do anything useful actions
# with them.
ls_command = ['ls', '-q']
if self.get_option('running_required'):

View File

@ -32,7 +32,7 @@ DOCUMENTATION = r'''
options:
plugin:
description: The name of this plugin, it should always be set to V(community.docker.docker_swarm)
for this plugin to recognize it as it's own.
for this plugin to recognize it as its own.
type: str
required: true
choices: [ docker_swarm, community.docker.docker_swarm ]

View File

@ -276,7 +276,7 @@ class APIClient(
return response.text
def _post_json(self, url, data, **kwargs):
# Go <1.1 can't unserialize null to a string
# Go <1.1 cannot unserialize null to a string
# so we do this disgusting thing here.
data2 = {}
if data is not None and isinstance(data, dict):
@ -316,8 +316,8 @@ class APIClient(
# close TLS sockets.
sock._response = response
except AttributeError:
# UNIX sockets can't have attributes set on them, but that's
# fine because we won't be doing TLS over them
# UNIX sockets cannot have attributes set on them, but that's
# fine because we will not be doing TLS over them
pass
return sock
@ -340,7 +340,7 @@ class APIClient(
data += reader.read(reader._fp.chunk_left)
yield data
else:
# Response isn't chunked, meaning we probably
# Response is not chunked, meaning we probably
# encountered an error immediately
yield self._result(response, json=decode)
@ -419,7 +419,7 @@ class APIClient(
response.close()
def _disable_socket_timeout(self, socket):
""" Depending on the combination of python version and whether we're
""" Depending on the combination of python version and whether we are
connecting over http or https, we might need to access _sock, which
may or may not exist; or we may need to just settimeout on socket
itself, which also may or may not have settimeout on it. To avoid
@ -440,7 +440,7 @@ class APIClient(
if hasattr(s, 'gettimeout'):
timeout = s.gettimeout()
# Don't change the timeout if it is already disabled.
# Do not change the timeout if it is already disabled.
if timeout is None or timeout == 0.0:
continue
@ -456,7 +456,7 @@ class APIClient(
def _get_result_tty(self, stream, res, is_tty):
# We should also use raw streaming (without keep-alive)
# if we're dealing with a tty-enabled container.
# if we are dealing with a tty-enabled container.
if is_tty:
return self._stream_raw_result(res) if stream else \
self._result(res, binary=True)
@ -506,7 +506,7 @@ class APIClient(
def _set_auth_headers(self, headers):
log.debug('Looking for auth config')
# If we don't have any auth data so far, try reloading the config
# If we do not have any auth data so far, try reloading the config
# file one more time in case anything showed up in there.
if not self._auth_configs or self._auth_configs.is_empty:
log.debug("No auth config in memory - loading from filesystem")

View File

@ -72,7 +72,7 @@ class DaemonApiMixin(object):
If the server returns an error.
"""
# If we don't have any auth data so far, try reloading the config file
# If we do not have any auth data so far, try reloading the config file
# one more time in case anything showed up in there.
# If dockercfg_path is passed check to see if the config file exists,
# if so load that config.
@ -107,7 +107,7 @@ class DaemonApiMixin(object):
def ping(self):
"""
Checks the server is responsive. An exception will be raised if it
isn't responding.
is not responding.
Returns:
(bool) The response from the server.

View File

@ -170,7 +170,7 @@ class AuthConfig(dict):
with open(config_file) as f:
config_dict = json.load(f)
except (IOError, KeyError, ValueError) as e:
# Likely missing new Docker config file or it's in an
# Likely missing new Docker config file or it is in an
# unknown format, continue to attempt to read old location
# and format.
log.debug(e)
@ -194,7 +194,7 @@ class AuthConfig(dict):
return cls(res, credstore_env)
log.debug(
"Couldn't find auth-related section ; attempting to interpret "
"Could not find auth-related section ; attempting to interpret "
"as auth-only file"
)
return cls({'auths': cls.parse_auth(config_dict)}, credstore_env)

View File

@ -52,7 +52,7 @@ class APIError(_HTTPError, DockerException):
"""
def __init__(self, message, response=None, explanation=None):
# requests 1.2 supports response as a keyword argument, but
# requests 1.1 doesn't
# requests 1.1 does not
super(APIError, self).__init__(message)
self.response = response
self.explanation = explanation

View File

@ -106,7 +106,7 @@ class NpipeHTTPAdapter(BaseHTTPAdapter):
def request_url(self, request, proxies):
# The select_proxy utility in requests errors out when the provided URL
# doesn't have a hostname, like is the case when using a UNIX socket.
# does not have a hostname, like is the case when using a UNIX socket.
# Since proxies are an irrelevant notion in the case of UNIX sockets
# anyway, we simply return the path URL directly.
# See also: https://github.com/docker/docker-sdk-python/issues/811

View File

@ -107,7 +107,7 @@ class UnixHTTPAdapter(BaseHTTPAdapter):
def request_url(self, request, proxies):
# The select_proxy utility in requests errors out when the provided URL
# doesn't have a hostname, like is the case when using a UNIX socket.
# does not have a hostname, like is the case when using a UNIX socket.
# Since proxies are an irrelevant notion in the case of UNIX sockets
# anyway, we simply return the path URL directly.
# See also: https://github.com/docker/docker-py/issues/811

View File

@ -67,7 +67,7 @@ class CancellableStream(object):
sock = sock_raw._sock
elif hasattr(sock_fp, 'channel'):
# We're working with a paramiko (SSH) channel, which doesn't
# We are working with a paramiko (SSH) channel, which does not
# support cancelable streams with the current implementation
raise DockerException(
'Cancellable streams not supported for the SSH protocol'

View File

@ -100,7 +100,7 @@ def create_archive(root, files=None, fileobj=None, gzip=False,
i.mtime = int(i.mtime)
if IS_WINDOWS_PLATFORM:
# Windows doesn't keep track of the execute bit, so we make files
# Windows does not keep track of the execute bit, so we make files
# and directories executable by default.
i.mode = i.mode & 0o755 | 0o111
@ -113,7 +113,7 @@ def create_archive(root, files=None, fileobj=None, gzip=False,
'Can not read file in context: {0}'.format(full_path)
)
else:
# Directories, FIFOs, symlinks... don't need to be read.
# Directories, FIFOs, symlinks... do not need to be read.
t.addfile(i, None)
for name, contents in extra_files:
@ -210,10 +210,10 @@ class PatternMatcher(object):
continue
if match:
# If we want to skip this file and it's a directory
# If we want to skip this file and it is a directory
# then we should first check to see if there's an
# excludes pattern (e.g. !dir/file) that starts with this
# dir. If so then we can't skip this dir.
# dir. If so then we cannot skip this dir.
skip = True
for pat in self.patterns:

View File

@ -70,7 +70,7 @@ def load_general_config(config_path=None):
with open(config_file) as f:
return json.load(f)
except (IOError, ValueError) as e:
# In the case of a legacy `.dockercfg` file, we won't
# In the case of a legacy `.dockercfg` file, we will not
# be able to load any JSON data.
log.debug(e)

View File

@ -48,7 +48,7 @@ def fnmatch(name, pat):
An initial period in FILENAME is not special.
Both FILENAME and PATTERN are first case-normalized
if the operating system requires it.
If you don't want this, use fnmatchcase(FILENAME, PATTERN).
If you do not want this, use fnmatchcase(FILENAME, PATTERN).
"""
name = name.lower()
@ -58,7 +58,7 @@ def fnmatch(name, pat):
def fnmatchcase(name, pat):
"""Test whether FILENAME matches PATTERN, including case.
This is a version of fnmatch() which doesn't case-normalize
This is a version of fnmatch() which does not case-normalize
its arguments.
"""

View File

@ -64,7 +64,7 @@ def read(socket, n=4096):
len(e.args) > 0 and
e.args[0] == NPIPE_ENDED)
if is_pipe_ended:
# npipes don't support duplex sockets, so we interpret
# npipes do not support duplex sockets, so we interpret
# a PIPE_ENDED error as a close operation (0-length read).
return ''
raise
@ -73,7 +73,7 @@ def read(socket, n=4096):
def read_exactly(socket, n):
"""
Reads exactly n bytes from socket
Raises SocketError if there isn't enough data
Raises SocketError if there is not enough data
"""
data = binary_type()
while len(data) < n:

View File

@ -160,7 +160,7 @@ def convert_volume_binds(binds):
mode = 'rw'
# NOTE: this is only relevant for Linux hosts
# (doesn't apply in Docker Desktop)
# (does not apply in Docker Desktop)
propagation_modes = [
'rshared',
'shared',
@ -391,7 +391,7 @@ def kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None):
if not tls_verify and assert_hostname is None:
# assert_hostname is a subset of TLS verification,
# so if it's not set already then set it to false.
# so if it is not set already then set it to false.
assert_hostname = False
params['tls'] = TLSConfig(

View File

@ -69,9 +69,9 @@ except ImportError:
try:
from requests.exceptions import RequestException # noqa: F401, pylint: disable=unused-import
except ImportError:
# Either Docker SDK for Python is no longer using requests, or Docker SDK for Python isn't around either,
# Either Docker SDK for Python is no longer using requests, or Docker SDK for Python is not around either,
# or Docker SDK for Python's dependency requests is missing. In any case, define an exception
# class RequestException so that our code doesn't break.
# class RequestException so that our code does not break.
class RequestException(Exception):
pass
@ -440,7 +440,7 @@ class AnsibleDockerClientBase(Client):
registry, repo_name = auth.resolve_repository_name(name)
if registry == 'docker.io':
# If docker.io is explicitly there in name, the image
# isn't found in some cases (#41509)
# is not found in some cases (#41509)
self.log("Check for docker.io image: %s" % repo_name)
images = self._image_lookup(repo_name, tag)
if not images and repo_name.startswith('library/'):
@ -449,8 +449,8 @@ class AnsibleDockerClientBase(Client):
self.log("Check for docker.io image: %s" % lookup)
images = self._image_lookup(lookup, tag)
if not images:
# Last case for some Docker versions: if docker.io wasn't there,
# it can be that the image wasn't found either
# Last case for some Docker versions: if docker.io was not there,
# it can be that the image was not found either
# (https://github.com/ansible/ansible/pull/15586)
lookup = "%s/%s" % (registry, repo_name)
self.log("Check for docker.io image: %s" % lookup)

View File

@ -21,7 +21,7 @@ from ansible_collections.community.docker.plugins.module_utils.version import Lo
try:
from requests.exceptions import RequestException, SSLError # noqa: F401, pylint: disable=unused-import
except ImportError:
# Define an exception class RequestException so that our code doesn't break.
# Define an exception class RequestException so that our code does not break.
class RequestException(Exception):
pass
@ -366,7 +366,7 @@ class AnsibleDockerClientBase(Client):
registry, repo_name = auth.resolve_repository_name(name)
if registry == 'docker.io':
# If docker.io is explicitly there in name, the image
# isn't found in some cases (#41509)
# is not found in some cases (#41509)
self.log("Check for docker.io image: %s" % repo_name)
images = self._image_lookup(repo_name, tag)
if not images and repo_name.startswith('library/'):
@ -375,8 +375,8 @@ class AnsibleDockerClientBase(Client):
self.log("Check for docker.io image: %s" % lookup)
images = self._image_lookup(lookup, tag)
if not images:
# Last case for some Docker versions: if docker.io wasn't there,
# it can be that the image wasn't found either
# Last case for some Docker versions: if docker.io was not there,
# it can be that the image was not found either
# (https://github.com/ansible/ansible/pull/15586)
lookup = "%s/%s" % (registry, repo_name)
self.log("Check for docker.io image: %s" % lookup)

View File

@ -120,7 +120,7 @@ class AnsibleDockerClientBase(object):
@abc.abstractmethod
# def call_cli(self, *args, check_rc=False, data=None, cwd=None, environ_update=None):
def call_cli(self, *args, **kwargs):
# Python 2.7 doesn't like anything than '**kwargs' after '*args', so we have to do this manually...
# Python 2.7 does not like anything than '**kwargs' after '*args', so we have to do this manually...
pass
# def call_cli_json(self, *args, check_rc=False, data=None, cwd=None, environ_update=None, warn_on_stderr=False):
@ -220,7 +220,7 @@ class AnsibleDockerClientBase(object):
registry, repo_name = resolve_repository_name(name)
if registry == 'docker.io':
# If docker.io is explicitly there in name, the image
# isn't found in some cases (#41509)
# is not found in some cases (#41509)
self.log("Check for docker.io image: %s" % repo_name)
images = self._image_lookup(repo_name, tag)
if not images and repo_name.startswith('library/'):
@ -229,8 +229,8 @@ class AnsibleDockerClientBase(object):
self.log("Check for docker.io image: %s" % lookup)
images = self._image_lookup(lookup, tag)
if not images:
# Last case for some Docker versions: if docker.io wasn't there,
# it can be that the image wasn't found either
# Last case for some Docker versions: if docker.io was not there,
# it can be that the image was not found either
# (https://github.com/ansible/ansible/pull/15586)
lookup = "%s/%s" % (registry, repo_name)
self.log("Check for docker.io image: %s" % lookup)
@ -322,7 +322,7 @@ class AnsibleModuleDockerClient(AnsibleDockerClientBase):
# def call_cli(self, *args, check_rc=False, data=None, cwd=None, environ_update=None):
def call_cli(self, *args, **kwargs):
# Python 2.7 doesn't like anything than '**kwargs' after '*args', so we have to do this manually...
# Python 2.7 does not like anything than '**kwargs' after '*args', so we have to do this manually...
check_rc = kwargs.pop('check_rc', False)
data = kwargs.pop('data', None)
cwd = kwargs.pop('cwd', None)

View File

@ -841,7 +841,7 @@ class BaseComposeManager(DockerBaseClass):
def cleanup_result(self, result):
if not result.get('failed'):
# Only return stdout and stderr if it's not empty
# Only return stdout and stderr if it is not empty
for res in ('stdout', 'stderr'):
if result.get(res) == '':
result.pop(res)
@ -851,5 +851,5 @@ class BaseComposeManager(DockerBaseClass):
try:
shutil.rmtree(dir, True)
except Exception:
# shouldn't happen, but simply ignore to be on the safe side
# should not happen, but simply ignore to be on the safe side
pass

View File

@ -41,7 +41,7 @@ class ImageArchiveInvalidException(Exception):
super(ImageArchiveInvalidException, self).__init__(message)
# Python 2 doesn't support causes
# Python 2 does not support causes
self.cause = cause

View File

@ -353,7 +353,7 @@ class DockerAPIEngineDriver(EngineDriver):
except APIError as exc:
if 'Unpause the container before stopping or killing' in exc.explanation:
# New docker daemon versions do not allow containers to be removed
# if they are paused. Make sure we don't end up in an infinite loop.
# if they are paused. Make sure we do not end up in an infinite loop.
if count == 3:
raise Exception('%s [tried to unpause three times]' % to_native(exc))
count += 1
@ -379,7 +379,7 @@ class DockerAPIEngineDriver(EngineDriver):
except APIError as exc:
if 'Unpause the container before stopping or killing' in exc.explanation:
# New docker daemon versions do not allow containers to be removed
# if they are paused. Make sure we don't end up in an infinite loop.
# if they are paused. Make sure we do not end up in an infinite loop.
if count == 3:
raise Exception('%s [tried to unpause three times]' % to_native(exc))
count += 1
@ -802,7 +802,7 @@ def _get_image_labels(image):
if not image:
return {}
# Can't use get('Labels', {}) because 'Labels' may be present and be None
# Cannot use get('Labels', {}) because 'Labels' may be present and be None
return image['Config'].get('Labels') or {}
@ -1267,7 +1267,7 @@ def _preprocess_container_names(module, client, api_version, value):
# name (and in the latter case, retrieve its ID)
container = client.get_container(container_name)
if container is None:
# If we can't find the container, issue a warning and continue with
# If we cannot find the container, issue a warning and continue with
# what the user specified.
module.warn('Cannot find a container with name or ID "{0}"'.format(container_name))
return value

View File

@ -461,7 +461,7 @@ class ContainerManager(DockerBaseClass):
self.results['changed'] = True
self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag), changed=True))
elif not image or self.param_pull_check_mode_behavior == 'always':
# If the image isn't there, or pull_check_mode_behavior == 'always', claim we'll
# If the image is not there, or pull_check_mode_behavior == 'always', claim we'll
# pull. (Implicitly: if the image is there, claim it already was latest unless
# pull_check_mode_behavior == 'always'.)
self.results['changed'] = True

View File

@ -57,7 +57,7 @@ def shutdown_writing(sock, log=_empty_writer):
def write_to_socket(sock, data):
if hasattr(sock, '_send_until_done'):
# WrappedSocket (urllib3/contrib/pyopenssl) doesn't have `send`, but
# WrappedSocket (urllib3/contrib/pyopenssl) does not have `send`, but
# only `sendall`, which uses `_send_until_done` under the hood.
return sock._send_until_done(data)
elif hasattr(sock, 'send'):

View File

@ -210,7 +210,7 @@ def compare_generic(a, b, method, datatype):
# If we would know that both a and b do not contain duplicates,
# we could simply compare len(a) to len(b) to finish this test.
# We can assume that b has no duplicates (as it is returned by
# docker), but we don't know for a.
# docker), but we do not know for a.
for bv in b:
found = False
for av in a:
@ -303,7 +303,7 @@ def sanitize_labels(labels, labels_field, client=None, module=None):
def clean_dict_booleans_for_docker_api(data, allow_sequences=False):
'''
Go doesn't like Python booleans 'True' or 'False', while Ansible is just
Go does not like Python booleans 'True' or 'False', while Ansible is just
fine with them in YAML. As such, they need to be converted in cases where
we pass dictionaries to the Docker API (e.g. docker_network's
driver_options and docker_prune's filters). When `allow_sequences=True`

View File

@ -550,7 +550,7 @@ class ServicesManager(BaseComposeManager):
return True
def cmd_stop(self):
# Since 'docker compose stop' **always** claims its stopping containers, even if they are already
# Since 'docker compose stop' **always** claims it is stopping containers, even if they are already
# stopped, we have to do this a bit more complicated.
result = dict()

View File

@ -298,7 +298,7 @@ class ConfigManager(DockerBaseClass):
def create_config(self):
''' Create a new config '''
config_id = None
# We can't see the data after creation, so adding a label we can use for idempotency check
# We ca not see the data after creation, so adding a label we can use for idempotency check
labels = {
'ansible_key': self.data_key
}
@ -350,7 +350,7 @@ class ConfigManager(DockerBaseClass):
if not self.force:
self.client.module.warn("'ansible_key' label not found. Config will not be changed unless the force parameter is set to 'true'")
# template_driver has changed if it was set in the previous config
# and now it differs, or if it wasn't set but now it is.
# and now it differs, or if it was not set but now it is.
if attrs.get('Templating', {}).get('Name'):
if attrs['Templating']['Name'] != self.template_driver:
template_driver_changed = True

View File

@ -371,7 +371,7 @@ def is_binary(content):
if b'\x00' in content:
return True
# TODO: better detection
# (ansible-core also just checks for 0x00, and even just sticks to the first 8k, so this isn't too bad...)
# (ansible-core also just checks for 0x00, and even just sticks to the first 8k, so this is not too bad...)
return False
@ -450,7 +450,7 @@ def is_file_idempotent(client, container, managed_path, container_path, follow_l
diff['after_header'] = managed_path
diff['after'] = to_text(content)
# When forcing and we're not following links in the container, go!
# When forcing and we are not following links in the container, go!
if force and not follow_links:
retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff)
return container_path, mode, False
@ -467,7 +467,7 @@ def is_file_idempotent(client, container, managed_path, container_path, follow_l
if follow_links:
container_path = real_container_path
# If the file wasn't found, continue
# If the file was not found, continue
if regular_stat is None:
if diff is not None:
diff['before_header'] = container_path
@ -617,7 +617,7 @@ def is_content_idempotent(client, container, content, container_path, follow_lin
diff['after_header'] = 'dynamically generated'
diff['after'] = to_text(content)
# When forcing and we're not following links in the container, go!
# When forcing and we are not following links in the container, go!
if force and not follow_links:
retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff)
return container_path, mode, False
@ -634,7 +634,7 @@ def is_content_idempotent(client, container, content, container_path, follow_lin
if follow_links:
container_path = real_container_path
# If the file wasn't found, continue
# If the file was not found, continue
if regular_stat is None:
if diff is not None:
diff['before_header'] = container_path

View File

@ -587,7 +587,7 @@ class ImageManager(DockerBaseClass):
try:
self.client.delete_json('/images/{0}', name, params={'force': self.force_absent})
except NotFound:
# If the image vanished while we were trying to remove it, don't fail
# If the image vanished while we were trying to remove it, do not fail
pass
except Exception as exc:
self.fail("Error removing image %s - %s" % (name, to_native(exc)))
@ -758,7 +758,7 @@ class ImageManager(DockerBaseClass):
:param name: name of the image. required.
:param tag: image tag.
:param repository: path to the repository. required.
:param push: bool. push the image once it's tagged.
:param push: bool. push the image once it is tagged.
:return: None
'''
repo, repo_tag = parse_repository_tag(repository)
@ -780,7 +780,7 @@ class ImageManager(DockerBaseClass):
if not self.check_mode:
try:
# Finding the image does not always work, especially running a localhost registry. In those
# cases, if we don't set force=True, it errors.
# cases, if we do not set force=True, it errors.
params = {
'tag': repo_tag,
'repo': repo,

View File

@ -190,7 +190,7 @@ class ImageRemover(DockerBaseClass):
try:
res = self.client.delete_json('/images/{0}', name, params={'force': self.force, 'noprune': not self.prune})
except NotFound:
# If the image vanished while we were trying to remove it, don't fail
# If the image vanished while we were trying to remove it, do not fail
res = []
except Exception as exc:
self.fail("Error removing image %s - %s" % (name, to_native(exc)))

View File

@ -320,7 +320,7 @@ class LoginManager(DockerBaseClass):
# If user is already logged in, then response contains password for user
if 'password' in response:
# This returns correct password if user is logged in and wrong password is given.
# So if it returns another password as we passed, and the user didn't request to
# So if it returns another password as we passed, and the user did not request to
# reauthorize, still do it.
if not self.reauthorize and response['password'] != self.password:
try:

View File

@ -289,7 +289,7 @@ class SecretManager(DockerBaseClass):
def create_secret(self):
''' Create a new secret '''
secret_id = None
# We can't see the data after creation, so adding a label we can use for idempotency check
# We cannot see the data after creation, so adding a label we can use for idempotency check
labels = {
'ansible_key': self.data_key
}

View File

@ -38,13 +38,13 @@ options:
the port number from the listen address is used.
- If O(advertise_addr) is not specified, it will be automatically
detected when possible.
- Only used when swarm is initialised or joined. Because of this it's not
- Only used when swarm is initialised or joined. Because of this it is not
considered for idempotency checking.
type: str
default_addr_pool:
description:
- Default address pool in CIDR format.
- Only used when swarm is initialised. Because of this it's not considered
- Only used when swarm is initialised. Because of this it is not considered
for idempotency checking.
- Requires API version >= 1.39.
type: list
@ -52,7 +52,7 @@ options:
subnet_size:
description:
- Default address pool subnet mask length.
- Only used when swarm is initialised. Because of this it's not considered
- Only used when swarm is initialised. Because of this it is not considered
for idempotency checking.
- Requires API version >= 1.39.
type: int
@ -64,7 +64,7 @@ options:
like V(eth0:4567).
- If the port number is omitted, the default swarm listening port
is used.
- Only used when swarm is initialised or joined. Because of this it's not
- Only used when swarm is initialised or joined. Because of this it is not
considered for idempotency checking.
type: str
default: 0.0.0.0:2377